content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#' Expand aggregated data
#' Several datasets for the Bradley-Terry Model aggregate the number of wins for each player in a different column.
#' The models we provide are intended to be used in a long format. A single result for each contest.
#' This function expands datasets that have aggregated data into this long format.
#' @param d a data frame
#' @param player0 string with column name of player0
#' @param player1 string with column name of player1
#' @param wins0 string with column name of the number of wins of player 0
#' @param wins1 string with column name of the number of wins of player 1
#' @param keep an array of strings with the name of columns we want to keep in the new data frame (and repeat in every expanded row)
#' @return a data frame with the expanded dataset. It will have the columns player1, player0, y, the keep columns, and a rowid column (to make each row unique)
#' @export
#'
#' @examples
#' #Creating a simple data frame with only one row to illustrate how the function works
#' df1 <- tibble::tribble(~player0, ~player1, ~wins0, ~wins1,~cluster, 'A','B',4, 3, 'c1')
#' df2 <- expand_aggregated_data(df1,'player0', 'player1', 'wins0', 'wins1', keep=c('cluster'))
#' print(df2)
expand_aggregated_data <-
function(d, player0, player1, wins0, wins1, keep) {
#Currently this approach is not the most efficient, but it is also not a priority in the package
d <- as.data.frame(d)
n_d_rows <- nrow(d)
n_d_columns <- ncol(d)
n_out_col <-
n_d_columns #win0 and win1 will become y Bbut we add the rowid in the end
n_row_out <- sum(d[, wins0]) + sum(d[, wins1])
out <- data.frame(matrix(ncol = n_out_col, nrow = n_row_out))
colnames(out) <- c('player0', 'player1', 'y', keep, 'rowid')
j <- 1
for (i in seq_along(1:n_d_rows)) {
current_row <- d[i, ]
n0_rows <- current_row[1, wins0]
n1_rows <- current_row[1, wins1]
#First we expand the zero wins
for (k in seq_along(1:n0_rows)) {
out[j, 'player0'] <- current_row[1, player0]
out[j, 'player1'] <- current_row[1, player1]
out[j, 'y'] <- 0
out[j, keep] <- current_row[1, keep]
j <- j + 1
}
#Second we expand the one wins
for (k in seq_along(1:n1_rows)) {
out[j, 'player0'] <- current_row[1, player0]
out[j, 'player1'] <- current_row[1, player1]
out[j, 'y'] <- 1
out[j, keep] <- current_row[1, keep]
j <- j + 1
}
}
#add the rowid column
out[, 'rowid'] <- seq(1:n_row_out)
return(out)
}
| /fuzzedpackages/bpcs/R/bpc_data_transformation.R | permissive | akhikolla/testpackages | R | false | false | 2,558 | r | #' Expand aggregated data
#' Several datasets for the Bradley-Terry Model aggregate the number of wins for each player in a different column.
#' The models we provide are intended to be used in a long format. A single result for each contest.
#' This function expands datasets that have aggregated data into this long format.
#' @param d a data frame
#' @param player0 string with column name of player0
#' @param player1 string with column name of player1
#' @param wins0 string with column name of the number of wins of player 0
#' @param wins1 string with column name of the number of wins of player 1
#' @param keep an array of strings with the name of columns we want to keep in the new data frame (and repeat in every expanded row)
#' @return a data frame with the expanded dataset. It will have the columns player1, player0, y, the keep columns, and a rowid column (to make each row unique)
#' @export
#'
#' @examples
#' #Creating a simple data frame with only one row to illustrate how the function works
#' df1 <- tibble::tribble(~player0, ~player1, ~wins0, ~wins1,~cluster, 'A','B',4, 3, 'c1')
#' df2 <- expand_aggregated_data(df1,'player0', 'player1', 'wins0', 'wins1', keep=c('cluster'))
#' print(df2)
expand_aggregated_data <-
function(d, player0, player1, wins0, wins1, keep) {
#Currently this approach is not the most efficient, but it is also not a priority in the package
d <- as.data.frame(d)
n_d_rows <- nrow(d)
n_d_columns <- ncol(d)
n_out_col <-
n_d_columns #win0 and win1 will become y Bbut we add the rowid in the end
n_row_out <- sum(d[, wins0]) + sum(d[, wins1])
out <- data.frame(matrix(ncol = n_out_col, nrow = n_row_out))
colnames(out) <- c('player0', 'player1', 'y', keep, 'rowid')
j <- 1
for (i in seq_along(1:n_d_rows)) {
current_row <- d[i, ]
n0_rows <- current_row[1, wins0]
n1_rows <- current_row[1, wins1]
#First we expand the zero wins
for (k in seq_along(1:n0_rows)) {
out[j, 'player0'] <- current_row[1, player0]
out[j, 'player1'] <- current_row[1, player1]
out[j, 'y'] <- 0
out[j, keep] <- current_row[1, keep]
j <- j + 1
}
#Second we expand the one wins
for (k in seq_along(1:n1_rows)) {
out[j, 'player0'] <- current_row[1, player0]
out[j, 'player1'] <- current_row[1, player1]
out[j, 'y'] <- 1
out[j, keep] <- current_row[1, keep]
j <- j + 1
}
}
#add the rowid column
out[, 'rowid'] <- seq(1:n_row_out)
return(out)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codelists.R
\name{export_definition_search}
\alias{export_definition_search}
\title{Exports definition searches to an excel file}
\usage{
export_definition_search(definition_search, out_file)
}
\arguments{
\item{definition_search}{a list of dataframes as produced by build_definition_lists}
\item{out_file}{file path to the excel file to be exported}
}
\description{
Exports definition searches to an excel file
}
\examples{
\dontrun{
medical_table <- read.delim("medical.txt", fileEncoding="latin1", stringsAsFactors = FALSE)
drug_table <- read.delim("product.txt", fileEncoding="latin1", stringsAsFactors = FALSE)
def2 <- import_definition_lists(system.file("extdata", "example_search.csv",
package = "rpcdsearch"))
draft_lists <- definition_search(def2, medical_table, drug_table = drug_table)
out_file <- "def_searches.xlsx"
export_definition_search(draft_lists, out_file)
}
}
| /man/export_definition_search.Rd | no_license | cran/rEHR | R | false | true | 1,033 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codelists.R
\name{export_definition_search}
\alias{export_definition_search}
\title{Exports definition searches to an excel file}
\usage{
export_definition_search(definition_search, out_file)
}
\arguments{
\item{definition_search}{a list of dataframes as produced by build_definition_lists}
\item{out_file}{file path to the excel file to be exported}
}
\description{
Exports definition searches to an excel file
}
\examples{
\dontrun{
medical_table <- read.delim("medical.txt", fileEncoding="latin1", stringsAsFactors = FALSE)
drug_table <- read.delim("product.txt", fileEncoding="latin1", stringsAsFactors = FALSE)
def2 <- import_definition_lists(system.file("extdata", "example_search.csv",
package = "rpcdsearch"))
draft_lists <- definition_search(def2, medical_table, drug_table = drug_table)
out_file <- "def_searches.xlsx"
export_definition_search(draft_lists, out_file)
}
}
|
#' @title Fuse learner with the bagging technique and oversampling for imbalancy correction.
#'
#' @description
#' Fuses a classification learner for binary classification with an over-bagging method
#' for imbalancy correction when we have strongly unequal class sizes.
#' Creates a learner object, which can be
#' used like any other learner object.
#' Models can easily be accessed via \code{\link{getHomogeneousEnsembleModels}}.
#'
#' OverBagging is implemented as follows:
#' For each iteration a random data subset is sampled. Minority class examples
#' are oversampled with replacement with a given rate.
#' Majority class examples are either simply copied into each bag, or bootstrapped with replacement
#' until we have as many majority class examples as in the original training data.
#' Features are currently not changed or sampled.
#'
#' Prediction works as follows:
#' For classification we do majority voting to create a discrete label and
#' probabilities are predicted by considering the proportions of all predicted labels.
#'
#' @template arg_learner
#' @param obw.iters [\code{integer(1)}]\cr
#' Number of fitted models in bagging.
#' Default is 10.
#' @param obw.rate [\code{numeric(1)}]\cr
#' Factor to upsample the smaller class in each bag.
#' Must be between 1 and \code{Inf},
#' where 1 means no oversampling and 2 would mean doubling the class size.
#' Default is 1.
#' @param obw.maxcl [\code{character(1)}]\cr
#' character value that controls how to sample majority class.
#' \dQuote{all} means every instance of the majority class gets in each bag,
#' \dQuote{boot} means the majority class instances are bootstrapped in each iteration.
#' Default is \dQuote{boot}.
#' @template ret_learner
#' @family imbalancy
#' @family wrapper
#' @export
makeOverBaggingWrapper = function(learner, obw.iters = 10L, obw.rate = 1, obw.maxcl = "boot") {
learner = checkLearner(learner, "classif")
pv = list()
if (!missing(obw.iters)) {
obw.iters = asCount(obw.iters, positive = TRUE)
pv$obw.iters = obw.iters
}
if (!missing(obw.rate)) {
assertNumber(obw.rate, lower = 1)
pv$obw.rate = obw.rate
}
if (!missing(obw.maxcl)) {
assertChoice(obw.maxcl, choices = c("boot", "all"))
pv$obw.maxcl = obw.maxcl
}
if (learner$predict.type != "response")
stop("Predict type of the basic learner must be response.")
id = paste(learner$id, "overbagged", sep = ".")
packs = learner$package
ps = makeParamSet(
makeIntegerLearnerParam(id = "obw.iters", lower = 1L, default = 10L),
makeNumericLearnerParam(id = "obw.rate", lower = 1),
makeDiscreteLearnerParam(id = "obw.maxcl", c("boot", "all"))
)
x = makeHomogeneousEnsemble(id, "classif", learner, packs, par.set = ps, par.vals = pv,
learner.subclass = c("OverBaggingWrapper", "BaggingWrapper"), model.subclass = "BaggingModel")
addProperties(x, "prob")
}
#' @export
trainLearner.OverBaggingWrapper = function(.learner, .task, .subset, .weights = NULL,
obw.iters = 10L, obw.rate = 1, obw.maxcl = "boot", ...) {
.task = subsetTask(.task, subset = .subset)
y = getTaskTargets(.task)
models = lapply(seq_len(obw.iters), function(i) {
bag = sampleBinaryClass(y, obw.rate, cl = "min", clreplace = TRUE,
othreplace = (obw.maxcl == "boot"), bagging = TRUE)
train(.learner$next.learner, .task, subset = bag, weights = .weights)
})
m = makeHomChainModel(.learner, models)
}
| /R/OverBaggingWrapper.R | no_license | dickoa/mlr | R | false | false | 3,439 | r | #' @title Fuse learner with the bagging technique and oversampling for imbalancy correction.
#'
#' @description
#' Fuses a classification learner for binary classification with an over-bagging method
#' for imbalancy correction when we have strongly unequal class sizes.
#' Creates a learner object, which can be
#' used like any other learner object.
#' Models can easily be accessed via \code{\link{getHomogeneousEnsembleModels}}.
#'
#' OverBagging is implemented as follows:
#' For each iteration a random data subset is sampled. Minority class examples
#' are oversampled with replacement with a given rate.
#' Majority class examples are either simply copied into each bag, or bootstrapped with replacement
#' until we have as many majority class examples as in the original training data.
#' Features are currently not changed or sampled.
#'
#' Prediction works as follows:
#' For classification we do majority voting to create a discrete label and
#' probabilities are predicted by considering the proportions of all predicted labels.
#'
#' @template arg_learner
#' @param obw.iters [\code{integer(1)}]\cr
#' Number of fitted models in bagging.
#' Default is 10.
#' @param obw.rate [\code{numeric(1)}]\cr
#' Factor to upsample the smaller class in each bag.
#' Must be between 1 and \code{Inf},
#' where 1 means no oversampling and 2 would mean doubling the class size.
#' Default is 1.
#' @param obw.maxcl [\code{character(1)}]\cr
#' character value that controls how to sample majority class.
#' \dQuote{all} means every instance of the majority class gets in each bag,
#' \dQuote{boot} means the majority class instances are bootstrapped in each iteration.
#' Default is \dQuote{boot}.
#' @template ret_learner
#' @family imbalancy
#' @family wrapper
#' @export
makeOverBaggingWrapper = function(learner, obw.iters = 10L, obw.rate = 1, obw.maxcl = "boot") {
learner = checkLearner(learner, "classif")
pv = list()
if (!missing(obw.iters)) {
obw.iters = asCount(obw.iters, positive = TRUE)
pv$obw.iters = obw.iters
}
if (!missing(obw.rate)) {
assertNumber(obw.rate, lower = 1)
pv$obw.rate = obw.rate
}
if (!missing(obw.maxcl)) {
assertChoice(obw.maxcl, choices = c("boot", "all"))
pv$obw.maxcl = obw.maxcl
}
if (learner$predict.type != "response")
stop("Predict type of the basic learner must be response.")
id = paste(learner$id, "overbagged", sep = ".")
packs = learner$package
ps = makeParamSet(
makeIntegerLearnerParam(id = "obw.iters", lower = 1L, default = 10L),
makeNumericLearnerParam(id = "obw.rate", lower = 1),
makeDiscreteLearnerParam(id = "obw.maxcl", c("boot", "all"))
)
x = makeHomogeneousEnsemble(id, "classif", learner, packs, par.set = ps, par.vals = pv,
learner.subclass = c("OverBaggingWrapper", "BaggingWrapper"), model.subclass = "BaggingModel")
addProperties(x, "prob")
}
#' @export
trainLearner.OverBaggingWrapper = function(.learner, .task, .subset, .weights = NULL,
obw.iters = 10L, obw.rate = 1, obw.maxcl = "boot", ...) {
.task = subsetTask(.task, subset = .subset)
y = getTaskTargets(.task)
models = lapply(seq_len(obw.iters), function(i) {
bag = sampleBinaryClass(y, obw.rate, cl = "min", clreplace = TRUE,
othreplace = (obw.maxcl == "boot"), bagging = TRUE)
train(.learner$next.learner, .task, subset = bag, weights = .weights)
})
m = makeHomChainModel(.learner, models)
}
|
applyModel <- function(population,
plpData,
plpModel,
calculatePerformance=T,
databaseOutput = NULL,
silent = F) {
# check logger
if(length(ParallelLogger::getLoggers())==0){
logger <- ParallelLogger::createLogger(name = "SIMPLE",
threshold = "INFO",
appenders = list(ParallelLogger::createConsoleAppender(layout = 'layoutTimestamp')))
ParallelLogger::registerLogger(logger)
}
# check input:
if (is.null(population))
stop("NULL population")
if (class(plpData) != "plpData")
stop("Incorrect plpData class")
if (class(plpModel) != "plpModel")
stop("Incorrect plpModel class")
# log the trained model details TODO
# get prediction counts:
peopleCount <- nrow(population)
start.pred <- Sys.time()
if (!silent){
ParallelLogger::logInfo(paste("Starting Prediction ", Sys.time(), "for ", peopleCount, " people"))
if('outcomeCount' %in% colnames(population)){
ParallelLogger::logInfo(paste("Outcome count: ", sum(population$outcomeCount>0), " people"))
}
}
prediction <- plpModel$predict(plpData = plpData, population = population)
delta <- start.pred - Sys.time()
if (!silent)
ParallelLogger::logInfo(paste("Prediction completed at ", Sys.time(), " taking ", signif(delta, 3), attr(delta, "units")))
if (!"outcomeCount" %in% colnames(prediction))
return(list(prediction = prediction))
if(!calculatePerformance || nrow(prediction) == 1)
return(prediction)
if (!silent)
ParallelLogger::logInfo(paste("Starting evaulation at ", Sys.time()))
performance <- PatientLevelPrediction::evaluatePlp(prediction, plpData)
# reformatting the performance
analysisId <- '000000'
if(!is.null(plpModel$analysisId)){
analysisId <- plpModel$analysisId
}
nr1 <- length(unlist(performance$evaluationStatistics[-1]))
performance$evaluationStatistics <- cbind(analysisId= rep(analysisId,nr1),
Eval=rep('validation', nr1),
Metric = names(unlist(performance$evaluationStatistics[-1])),
Value = unlist(performance$evaluationStatistics[-1])
)
nr1 <- nrow(performance$thresholdSummary)
performance$thresholdSummary <- tryCatch({cbind(analysisId=rep(analysisId,nr1),
Eval=rep('validation', nr1),
performance$thresholdSummary)},
error = function(e){return(NULL)})
nr1 <- nrow(performance$demographicSummary)
if(!is.null(performance$demographicSummary)){
performance$demographicSummary <- cbind(analysisId=rep(analysisId,nr1),
Eval=rep('validation', nr1),
performance$demographicSummary)
}
nr1 <- nrow(performance$calibrationSummary)
performance$calibrationSummary <- cbind(analysisId=rep(analysisId,nr1),
Eval=rep('validation', nr1),
performance$calibrationSummary)
nr1 <- nrow(performance$predictionDistribution)
performance$predictionDistribution <- tryCatch({cbind(analysisId=rep(analysisId,nr1),
Eval=rep('validation', nr1),
performance$predictionDistribution)}, error = function(e){return(NULL)})
delta <- start.pred - Sys.time()
if (!silent)
ParallelLogger::logInfo(paste("Evaluation completed at ", Sys.time(), " taking ", signif(delta, 3), attr(delta, "units") ))
if (!silent)
ParallelLogger::logInfo(paste("Starting covariate summary at ", Sys.time()))
start.pred <- Sys.time()
# covSum <- covariateSummary(plpData, population, model = plpModel)
delta <- start.pred - Sys.time()
if (!silent)
ParallelLogger::logInfo(paste("Covariate summary completed at ", Sys.time(), " taking ", signif(delta, 3), attr(delta, "units")))
executionSummary <- list(PackageVersion = list(rVersion= R.Version()$version.string,
packageVersion = utils::packageVersion("PatientLevelPrediction")),
PlatformDetails= list(platform= R.Version()$platform,
cores= Sys.getenv('NUMBER_OF_PROCESSORS'),
RAM=utils::memory.size()), # test for non-windows needed
# Sys.info()
TotalExecutionElapsedTime = NULL,
ExecutionDateTime = Sys.Date())
result <- list(prediction = prediction,
performanceEvaluation = performance,
inputSetting = list(outcomeId=attr(population, "metaData")$outcomeId,
cohortId= plpData$metaData$call$cohortId,
database = plpData$metaData$call$cdmDatabaseSchema),
executionSummary = executionSummary,
model = list(model='applying plp model',
modelAnalysisId = plpModel$analysisId,
modelSettings = plpModel$modelSettings),
analysisRef=list(analysisId=NULL,
analysisName=NULL,
analysisSettings= NULL))
return(result)
}
| /R/applyModel.R | permissive | ABMI/PsychosisMultimodalValidation | R | false | false | 5,668 | r | applyModel <- function(population,
plpData,
plpModel,
calculatePerformance=T,
databaseOutput = NULL,
silent = F) {
# check logger
if(length(ParallelLogger::getLoggers())==0){
logger <- ParallelLogger::createLogger(name = "SIMPLE",
threshold = "INFO",
appenders = list(ParallelLogger::createConsoleAppender(layout = 'layoutTimestamp')))
ParallelLogger::registerLogger(logger)
}
# check input:
if (is.null(population))
stop("NULL population")
if (class(plpData) != "plpData")
stop("Incorrect plpData class")
if (class(plpModel) != "plpModel")
stop("Incorrect plpModel class")
# log the trained model details TODO
# get prediction counts:
peopleCount <- nrow(population)
start.pred <- Sys.time()
if (!silent){
ParallelLogger::logInfo(paste("Starting Prediction ", Sys.time(), "for ", peopleCount, " people"))
if('outcomeCount' %in% colnames(population)){
ParallelLogger::logInfo(paste("Outcome count: ", sum(population$outcomeCount>0), " people"))
}
}
prediction <- plpModel$predict(plpData = plpData, population = population)
delta <- start.pred - Sys.time()
if (!silent)
ParallelLogger::logInfo(paste("Prediction completed at ", Sys.time(), " taking ", signif(delta, 3), attr(delta, "units")))
if (!"outcomeCount" %in% colnames(prediction))
return(list(prediction = prediction))
if(!calculatePerformance || nrow(prediction) == 1)
return(prediction)
if (!silent)
ParallelLogger::logInfo(paste("Starting evaulation at ", Sys.time()))
performance <- PatientLevelPrediction::evaluatePlp(prediction, plpData)
# reformatting the performance
analysisId <- '000000'
if(!is.null(plpModel$analysisId)){
analysisId <- plpModel$analysisId
}
nr1 <- length(unlist(performance$evaluationStatistics[-1]))
performance$evaluationStatistics <- cbind(analysisId= rep(analysisId,nr1),
Eval=rep('validation', nr1),
Metric = names(unlist(performance$evaluationStatistics[-1])),
Value = unlist(performance$evaluationStatistics[-1])
)
nr1 <- nrow(performance$thresholdSummary)
performance$thresholdSummary <- tryCatch({cbind(analysisId=rep(analysisId,nr1),
Eval=rep('validation', nr1),
performance$thresholdSummary)},
error = function(e){return(NULL)})
nr1 <- nrow(performance$demographicSummary)
if(!is.null(performance$demographicSummary)){
performance$demographicSummary <- cbind(analysisId=rep(analysisId,nr1),
Eval=rep('validation', nr1),
performance$demographicSummary)
}
nr1 <- nrow(performance$calibrationSummary)
performance$calibrationSummary <- cbind(analysisId=rep(analysisId,nr1),
Eval=rep('validation', nr1),
performance$calibrationSummary)
nr1 <- nrow(performance$predictionDistribution)
performance$predictionDistribution <- tryCatch({cbind(analysisId=rep(analysisId,nr1),
Eval=rep('validation', nr1),
performance$predictionDistribution)}, error = function(e){return(NULL)})
delta <- start.pred - Sys.time()
if (!silent)
ParallelLogger::logInfo(paste("Evaluation completed at ", Sys.time(), " taking ", signif(delta, 3), attr(delta, "units") ))
if (!silent)
ParallelLogger::logInfo(paste("Starting covariate summary at ", Sys.time()))
start.pred <- Sys.time()
# covSum <- covariateSummary(plpData, population, model = plpModel)
delta <- start.pred - Sys.time()
if (!silent)
ParallelLogger::logInfo(paste("Covariate summary completed at ", Sys.time(), " taking ", signif(delta, 3), attr(delta, "units")))
executionSummary <- list(PackageVersion = list(rVersion= R.Version()$version.string,
packageVersion = utils::packageVersion("PatientLevelPrediction")),
PlatformDetails= list(platform= R.Version()$platform,
cores= Sys.getenv('NUMBER_OF_PROCESSORS'),
RAM=utils::memory.size()), # test for non-windows needed
# Sys.info()
TotalExecutionElapsedTime = NULL,
ExecutionDateTime = Sys.Date())
result <- list(prediction = prediction,
performanceEvaluation = performance,
inputSetting = list(outcomeId=attr(population, "metaData")$outcomeId,
cohortId= plpData$metaData$call$cohortId,
database = plpData$metaData$call$cdmDatabaseSchema),
executionSummary = executionSummary,
model = list(model='applying plp model',
modelAnalysisId = plpModel$analysisId,
modelSettings = plpModel$modelSettings),
analysisRef=list(analysisId=NULL,
analysisName=NULL,
analysisSettings= NULL))
return(result)
}
|
\name{gsBinomialExact}
\alias{gsBinomialExact}
\alias{print.gsBinomialExact}
\alias{plot.gsBinomialExact}
\alias{binomialSPRT}
\alias{plot.binomialSPRT}
%\alias{gsBinomialPP}
\alias{nBinomial1Sample}
\title{3.4: One-Sample Binomial Routines}
\description{
\code{gsBinomialExact} computes power/Type I error and expected sample size for a group sequential design
in a single-arm trial with a binary outcome.
This can also be used to compare event rates in two-arm studies.
The print function has been extended using \code{print.gsBinomialExact} to print \code{gsBinomialExact} objects; see examples.
Similarly, a plot function has been extended using \code{plot.gsBinomialExact} to plot \code{gsBinomialExact} objects; see examples.
\code{binomialSPRT} computes a truncated binomial sequential probability ratio test (SPRT) which is a specific instance of an exact binomial group sequential design for a single arm trial with a binary outcome.
%\code{gsBinomialPP} computes a truncated binomial (group) sequential design based on predictive probability.
\code{nBinomial1Sample} uses exact binomial calculations to compute power and sample size for single arm binomial experiments.
}
\usage{
gsBinomialExact(k=2, theta=c(.1, .2), n.I=c(50, 100), a=c(3, 7), b=c(20,30))
binomialSPRT(p0,p1,alpha,beta,minn,maxn)
nBinomial1Sample(p0 = 0.90, p1=0.95,
alpha = 0.025, beta=NULL,
n = 200:250, outtype=1, conservative=FALSE)
\method{plot}{gsBinomialExact}(x,plottype=1,\dots)
\method{plot}{binomialSPRT}(x,plottype=1,\dots)
}
\arguments{
\item{k}{Number of analyses planned, including interim and final.}
\item{theta}{Vector of possible underling binomial probabilities for a single binomial sample.}
\item{n.I}{Sample size at analyses (increasing positive integers); vector of length k.}
\item{a}{Number of "successes" required to cross lower bound cutoffs to reject \code{p1} in favor of \code{p0} at each analysis; vector of length k; -1 means no lower bound.}
\item{b}{Number of "successes" required to cross upper bound cutoffs for rejecting \code{p0} in favor of \code{p1} at each analysis; vector of length k.}
\item{p0}{Lower of the two response (event) rates hypothesized.}
\item{p1}{Higher of the two response (event) rates hypothesized.}
\item{alpha}{Nominal probability of rejecting response (event) rate \code{p0} when it is true.}
\item{beta}{Nominal probability of rejecting response (event) rate \code{p1} when it is true.}
\item{minn}{Minimum sample size at which sequential testing begins.}
\item{maxn}{Maximum sample size.}
\item{x}{Item of class \code{gsBinomialExact} or \code{binomialSPRT} for \code{print.gsBinomialExact}. Item of class \code{gsBinomialExact} for \code{plot.gsBinomialExact}. Item of class \code{binomialSPRT} for item of class \code{binomialSPRT}.}
\item{plottype}{1 produces a plot with counts of response at bounds (for \code{binomialSPRT}, also produces linear SPRT bounds); 2 produces a plot with power to reject null and alternate response rates as well as the probability of not crossing a bound by the maximum sample size; 3 produces a plot with the response rate at the boundary as a function of sample size when the boundary is crossed; 6 produces a plot of the expected sample size by the underlying event rate (this assumes there is no enrollment beyond the sample size where the boundary is crossed).}
\item{n}{sample sizes to be considered for \code{nBinomial1Sample}. These should be ordered from smallest to largest and be > 0.}
\item{outtype}{Operative when \code{beta != NULL}. \code{1} means routine will return a single integer sample size while for \code{output=2} or \code{3} a data frame is returned (see Value).}
\item{conservative}{operative when \code{outtype=1} or \code{2} and \code{beta != NULL}. Default \code{FALSE} selects minimum sample size for which power is at least \code{1-beta}. When \code{conservative=TRUE}, the minimum sample sample size for which power is at least \code{1-beta} and there is no larger sample size in the input \code{n} where power is less than \code{1-beta}.}
\item{\dots}{arguments passed through to \code{ggplot}.}
}
\details{
\code{gsBinomialExact} is based on the book "Group Sequential Methods with Applications to Clinical Trials,"
Christopher Jennison and Bruce W. Turnbull, Chapter 12, Section 12.1.2 Exact Calculations for Binary Data.
This computation is often used as an approximation for the distribution of the number of events in one treatment group out of all events when the probability of an event is small and sample size is large.
An object of class \code{gsBinomialExact} is returned.
On output, the values of \code{theta} input to \code{gsBinomialExact} will be the parameter values for which the boundary crossing probabilities and expected sample sizes are computed.
Note that a[1] equal to -1 lower bound at n.I[1] means 0 successes continues at interim 1; a[2]==0 at interim 2 means 0 successes stops trial for futility at 2nd analysis.
For final analysis, set a[k] equal to b[k]-1 to incorporate all possibilities into non-positive trial; see example.
The sequential probability ratio test (SPRT) is a sequential testing scheme allowing testing after each observation. This likelihood ratio is used to determine upper and lower cutoffs which are linear and parallel in the number of responses as a function of sample size.
\code{binomialSPRT} produces a variation the the SPRT that tests only within a range of sample sizes.
While the linear SPRT bounds are continuous, actual bounds are the integer number of response at or beyond each linear bound for each sample size where testing is performed. Because of the truncation and discretization of the bounds, power and Type I error achieve will be lower than the nominal levels specified by \code{alpha} and \code{beta} which can be altered to produce desired values that are achieved by the planned sample size. See also example that shows computation of Type I error when futility bound is considered non-binding.
Note that if the objective of a design is to demonstrate that a rate (e.g., failure rate) is lower than a certain level, two approaches can be taken. First, 1 minus the failure rate is the success rate and this can be used for planning. Second, the role of \code{beta} becomes to express Type I error and \code{alpha} is used to express Type II error.
Plots produced include boundary plots, expected sample size, response rate at the boundary and power.
\code{gsBinomial1Sample} uses exact binomial computations based on the base R functions \code{qbinom()} and \code{pbinom()}. The tabular output may be convenient for plotting. Note that input variables are largely not checked, so the user is largely responsible for results; it is a good idea to do a run with \code{outtype=3} to check that you have done things appropriately.
If \code{n} is not ordered (a bad idea) or not sequential (maybe OK), be aware of possible consequences.
}
\value{
\code{gsBinomialExact()} returns a list of class \code{gsBinomialExact} and \code{gsProbability} (see example); when displaying one of these objects, the default function to print is \code{print.gsProbability()}.
The object returned from \code{gsBinomialExact()} contains the following elements:
\item{k}{As input.}
\item{theta}{As input.}
\item{n.I}{As input.}
\item{lower}{A list containing two elements: \code{bound} is as input in \code{a} and \code{prob} is a matrix of boundary
crossing probabilities. Element \code{i,j} contains the boundary crossing probability at analysis \code{i} for the \code{j}-th element of \code{theta} input. All boundary crossing is assumed to be binding for this computation; that is, the trial must stop if a boundary is crossed.}
\item{upper}{A list of the same form as \code{lower} containing the upper bound and upper boundary crossing probabilities.}
\item{en}{A vector of the same length as \code{theta} containing expected sample sizes for the trial design
corresponding to each value in the vector \code{theta}.}
\code{binomialSPRT} produces an object of class \code{binomialSPRT} that is an extension of the \code{gsBinomialExact} class. The values returned in addition to those returned by \code{gsBinomialExact} are:
\item{intercept}{A vector of length 2 with the intercepts for the two SPRT bounds.}
\item{slope}{A scalar with the common slope of the SPRT bounds.}
\item{alpha}{As input. Note that this will exceed the actual Type I error achieved by the design returned.}
\item{beta}{As input. Note that this will exceed the actual Type II error achieved by the design returned.}
\item{p0}{As input.}
\item{p1}{As input.}
\code{nBinomial1Sample} produces an integer if the input \code{outtype=1} and a data frame with the following values otherwise:
\item{p0}{Input null hypothesis event (or response) rate.}
\item{p1}{Input alternative hypothesis (or response) rate; must be \code{> p0}.}
\item{alpha}{Input Type I error.}
\item{beta}{Input Type II error except when input is \code{NULL} in which case realized Type II error is computed.}
\item{alphaR}{Type I error achieved for each output value of \code{n}; less than or equal to the input value \code{alpha}.}
\item{Power}{Power achived for each output value of \code{n}.}
\item{n}{sample size.}
\item{b}{cutoff given \code{n} to control Type I error; value is \code{NULL} if no such value exists.}
}
\seealso{\code{\link{gsProbability}}}
\note{The manual is not linked to this help file, but is available in library/gsdesign/doc/gsDesignManual.pdf
in the directory where R is installed.}
\author{Jon Hartzel, Yevgen Tymofyeyev and Keaven Anderson \email{keaven\_anderson@merck.}}
\references{
Jennison C and Turnbull BW (2000), \emph{Group Sequential Methods with Applications to Clinical Trials}.
Boca Raton: Chapman and Hall.
Code for nBinomial1Sample was based on code developed by marc_schwartz@me.com.
}
\examples{
zz <- gsBinomialExact(k=3,theta=seq(0,1,0.1), n.I=c(12,24,36),
a=c(-1, 0, 11), b=c( 5, 9, 12))
# let's see what class this is
class(zz)
# because of "gsProbability" class above, following is equivalent to
# print.gsProbability(zz)
zz
# also plot (see also plots below for \code{binomialSPRT})
# add lines using geom_line()
plot(zz) + geom_line()
# now for SPRT examples
x <- binomialSPRT(p0=.05,p1=.25,alpha=.1,beta=.2)
# boundary plot
plot(x)
# power plot
plot(x,plottype=2)
# Response (event) rate at boundary
plot(x,plottype=3)
# Expect sample size at boundary crossing or end of trial
plot(x,plottype=6)
# sample size for single arm exact binomial
# plot of table of power by sample size
nb1 <- nBinomial1Sample(p0 = 0.05, p1=0.2,alpha = 0.025, beta=.2, n = 25:40, outtype=3)
nb1
library(scales)
ggplot(nb1,aes(x=n,y=Power))+geom_line()+geom_point()+scale_y_continuous(labels=percent)
# simple call with same parameters to get minimum sample size yielding desired power
nBinomial1Sample(p0 = 0.05, p1=0.2,alpha = 0.025, beta=.2, n = 25:40)
# change to 'conservative' if you want all larger sample
# sizes to also provide adequate power
nBinomial1Sample(p0 = 0.05, p1=0.2,alpha = 0.025, beta=.2, n = 25:40, conservative=TRUE)
# print out more information for the selected derived sample size
nBinomial1Sample(p0 = 0.05, p1=0.2,alpha = 0.025, beta=.2, n = 25:40, conservative=TRUE,outtype=2)
# what happens if input sample sizes not sufficient?
nBinomial1Sample(p0 = 0.05, p1=0.2,alpha = 0.025, beta=.2, n = 25:30)
}
\keyword{design}
| /man/gsBinomialExact.Rd | no_license | richarddmorey/gsDesign | R | false | false | 11,686 | rd | \name{gsBinomialExact}
\alias{gsBinomialExact}
\alias{print.gsBinomialExact}
\alias{plot.gsBinomialExact}
\alias{binomialSPRT}
\alias{plot.binomialSPRT}
%\alias{gsBinomialPP}
\alias{nBinomial1Sample}
\title{3.4: One-Sample Binomial Routines}
\description{
\code{gsBinomialExact} computes power/Type I error and expected sample size for a group sequential design
in a single-arm trial with a binary outcome.
This can also be used to compare event rates in two-arm studies.
The print function has been extended using \code{print.gsBinomialExact} to print \code{gsBinomialExact} objects; see examples.
Similarly, a plot function has been extended using \code{plot.gsBinomialExact} to plot \code{gsBinomialExact} objects; see examples.
\code{binomialSPRT} computes a truncated binomial sequential probability ratio test (SPRT) which is a specific instance of an exact binomial group sequential design for a single arm trial with a binary outcome.
%\code{gsBinomialPP} computes a truncated binomial (group) sequential design based on predictive probability.
\code{nBinomial1Sample} uses exact binomial calculations to compute power and sample size for single arm binomial experiments.
}
\usage{
gsBinomialExact(k=2, theta=c(.1, .2), n.I=c(50, 100), a=c(3, 7), b=c(20,30))
binomialSPRT(p0,p1,alpha,beta,minn,maxn)
nBinomial1Sample(p0 = 0.90, p1=0.95,
alpha = 0.025, beta=NULL,
n = 200:250, outtype=1, conservative=FALSE)
\method{plot}{gsBinomialExact}(x,plottype=1,\dots)
\method{plot}{binomialSPRT}(x,plottype=1,\dots)
}
\arguments{
\item{k}{Number of analyses planned, including interim and final.}
\item{theta}{Vector of possible underling binomial probabilities for a single binomial sample.}
\item{n.I}{Sample size at analyses (increasing positive integers); vector of length k.}
\item{a}{Number of "successes" required to cross lower bound cutoffs to reject \code{p1} in favor of \code{p0} at each analysis; vector of length k; -1 means no lower bound.}
\item{b}{Number of "successes" required to cross upper bound cutoffs for rejecting \code{p0} in favor of \code{p1} at each analysis; vector of length k.}
\item{p0}{Lower of the two response (event) rates hypothesized.}
\item{p1}{Higher of the two response (event) rates hypothesized.}
\item{alpha}{Nominal probability of rejecting response (event) rate \code{p0} when it is true.}
\item{beta}{Nominal probability of rejecting response (event) rate \code{p1} when it is true.}
\item{minn}{Minimum sample size at which sequential testing begins.}
\item{maxn}{Maximum sample size.}
\item{x}{Item of class \code{gsBinomialExact} or \code{binomialSPRT} for \code{print.gsBinomialExact}. Item of class \code{gsBinomialExact} for \code{plot.gsBinomialExact}. Item of class \code{binomialSPRT} for item of class \code{binomialSPRT}.}
\item{plottype}{1 produces a plot with counts of response at bounds (for \code{binomialSPRT}, also produces linear SPRT bounds); 2 produces a plot with power to reject null and alternate response rates as well as the probability of not crossing a bound by the maximum sample size; 3 produces a plot with the response rate at the boundary as a function of sample size when the boundary is crossed; 6 produces a plot of the expected sample size by the underlying event rate (this assumes there is no enrollment beyond the sample size where the boundary is crossed).}
\item{n}{sample sizes to be considered for \code{nBinomial1Sample}. These should be ordered from smallest to largest and be > 0.}
\item{outtype}{Operative when \code{beta != NULL}. \code{1} means routine will return a single integer sample size while for \code{output=2} or \code{3} a data frame is returned (see Value).}
\item{conservative}{operative when \code{outtype=1} or \code{2} and \code{beta != NULL}. Default \code{FALSE} selects minimum sample size for which power is at least \code{1-beta}. When \code{conservative=TRUE}, the minimum sample sample size for which power is at least \code{1-beta} and there is no larger sample size in the input \code{n} where power is less than \code{1-beta}.}
\item{\dots}{arguments passed through to \code{ggplot}.}
}
\details{
\code{gsBinomialExact} is based on the book "Group Sequential Methods with Applications to Clinical Trials,"
Christopher Jennison and Bruce W. Turnbull, Chapter 12, Section 12.1.2 Exact Calculations for Binary Data.
This computation is often used as an approximation for the distribution of the number of events in one treatment group out of all events when the probability of an event is small and sample size is large.
An object of class \code{gsBinomialExact} is returned.
On output, the values of \code{theta} input to \code{gsBinomialExact} will be the parameter values for which the boundary crossing probabilities and expected sample sizes are computed.
Note that a[1] equal to -1 lower bound at n.I[1] means 0 successes continues at interim 1; a[2]==0 at interim 2 means 0 successes stops trial for futility at 2nd analysis.
For final analysis, set a[k] equal to b[k]-1 to incorporate all possibilities into non-positive trial; see example.
The sequential probability ratio test (SPRT) is a sequential testing scheme allowing testing after each observation. This likelihood ratio is used to determine upper and lower cutoffs which are linear and parallel in the number of responses as a function of sample size.
\code{binomialSPRT} produces a variation the the SPRT that tests only within a range of sample sizes.
While the linear SPRT bounds are continuous, actual bounds are the integer number of response at or beyond each linear bound for each sample size where testing is performed. Because of the truncation and discretization of the bounds, power and Type I error achieve will be lower than the nominal levels specified by \code{alpha} and \code{beta} which can be altered to produce desired values that are achieved by the planned sample size. See also example that shows computation of Type I error when futility bound is considered non-binding.
Note that if the objective of a design is to demonstrate that a rate (e.g., failure rate) is lower than a certain level, two approaches can be taken. First, 1 minus the failure rate is the success rate and this can be used for planning. Second, the role of \code{beta} becomes to express Type I error and \code{alpha} is used to express Type II error.
Plots produced include boundary plots, expected sample size, response rate at the boundary and power.
\code{gsBinomial1Sample} uses exact binomial computations based on the base R functions \code{qbinom()} and \code{pbinom()}. The tabular output may be convenient for plotting. Note that input variables are largely not checked, so the user is largely responsible for results; it is a good idea to do a run with \code{outtype=3} to check that you have done things appropriately.
If \code{n} is not ordered (a bad idea) or not sequential (maybe OK), be aware of possible consequences.
}
\value{
\code{gsBinomialExact()} returns a list of class \code{gsBinomialExact} and \code{gsProbability} (see example); when displaying one of these objects, the default function to print is \code{print.gsProbability()}.
The object returned from \code{gsBinomialExact()} contains the following elements:
\item{k}{As input.}
\item{theta}{As input.}
\item{n.I}{As input.}
\item{lower}{A list containing two elements: \code{bound} is as input in \code{a} and \code{prob} is a matrix of boundary
crossing probabilities. Element \code{i,j} contains the boundary crossing probability at analysis \code{i} for the \code{j}-th element of \code{theta} input. All boundary crossing is assumed to be binding for this computation; that is, the trial must stop if a boundary is crossed.}
\item{upper}{A list of the same form as \code{lower} containing the upper bound and upper boundary crossing probabilities.}
\item{en}{A vector of the same length as \code{theta} containing expected sample sizes for the trial design
corresponding to each value in the vector \code{theta}.}
\code{binomialSPRT} produces an object of class \code{binomialSPRT} that is an extension of the \code{gsBinomialExact} class. The values returned in addition to those returned by \code{gsBinomialExact} are:
\item{intercept}{A vector of length 2 with the intercepts for the two SPRT bounds.}
\item{slope}{A scalar with the common slope of the SPRT bounds.}
\item{alpha}{As input. Note that this will exceed the actual Type I error achieved by the design returned.}
\item{beta}{As input. Note that this will exceed the actual Type II error achieved by the design returned.}
\item{p0}{As input.}
\item{p1}{As input.}
\code{nBinomial1Sample} produces an integer if the input \code{outtype=1} and a data frame with the following values otherwise:
\item{p0}{Input null hypothesis event (or response) rate.}
\item{p1}{Input alternative hypothesis (or response) rate; must be \code{> p0}.}
\item{alpha}{Input Type I error.}
\item{beta}{Input Type II error except when input is \code{NULL} in which case realized Type II error is computed.}
\item{alphaR}{Type I error achieved for each output value of \code{n}; less than or equal to the input value \code{alpha}.}
\item{Power}{Power achived for each output value of \code{n}.}
\item{n}{sample size.}
\item{b}{cutoff given \code{n} to control Type I error; value is \code{NULL} if no such value exists.}
}
\seealso{\code{\link{gsProbability}}}
\note{The manual is not linked to this help file, but is available in library/gsdesign/doc/gsDesignManual.pdf
in the directory where R is installed.}
\author{Jon Hartzel, Yevgen Tymofyeyev and Keaven Anderson \email{keaven\_anderson@merck.}}
\references{
Jennison C and Turnbull BW (2000), \emph{Group Sequential Methods with Applications to Clinical Trials}.
Boca Raton: Chapman and Hall.
Code for nBinomial1Sample was based on code developed by marc_schwartz@me.com.
}
\examples{
zz <- gsBinomialExact(k=3,theta=seq(0,1,0.1), n.I=c(12,24,36),
a=c(-1, 0, 11), b=c( 5, 9, 12))
# let's see what class this is
class(zz)
# because of "gsProbability" class above, following is equivalent to
# print.gsProbability(zz)
zz
# also plot (see also plots below for \code{binomialSPRT})
# add lines using geom_line()
plot(zz) + geom_line()
# now for SPRT examples
x <- binomialSPRT(p0=.05,p1=.25,alpha=.1,beta=.2)
# boundary plot
plot(x)
# power plot
plot(x,plottype=2)
# Response (event) rate at boundary
plot(x,plottype=3)
# Expect sample size at boundary crossing or end of trial
plot(x,plottype=6)
# sample size for single arm exact binomial
# plot of table of power by sample size
nb1 <- nBinomial1Sample(p0 = 0.05, p1=0.2,alpha = 0.025, beta=.2, n = 25:40, outtype=3)
nb1
library(scales)
ggplot(nb1,aes(x=n,y=Power))+geom_line()+geom_point()+scale_y_continuous(labels=percent)
# simple call with same parameters to get minimum sample size yielding desired power
nBinomial1Sample(p0 = 0.05, p1=0.2,alpha = 0.025, beta=.2, n = 25:40)
# change to 'conservative' if you want all larger sample
# sizes to also provide adequate power
nBinomial1Sample(p0 = 0.05, p1=0.2,alpha = 0.025, beta=.2, n = 25:40, conservative=TRUE)
# print out more information for the selected derived sample size
nBinomial1Sample(p0 = 0.05, p1=0.2,alpha = 0.025, beta=.2, n = 25:40, conservative=TRUE,outtype=2)
# what happens if input sample sizes not sufficient?
nBinomial1Sample(p0 = 0.05, p1=0.2,alpha = 0.025, beta=.2, n = 25:30)
}
\keyword{design}
|
##Plot3
setwd("~/Datasciencecoursera/ExData_Plotting1")
library(data.table)
##Step 1: Load and Read file
electric<-read.table("./household_power_consumption.txt",header=T,sep=";")
View(electric)
##Step 2: Clean Data
electric$Date<-as.Date(electric$Date,format="%d/%m/%Y") #change the date format
View(electric) #view to make sure date is in correct format
electricSubset<-electric[(electric$Date=="2007-02-01")|(electric$Date=="2007-02-02"),] #create subset of date
View(electricSubset) #view to make sure correct- now have 2880 observations
str(electricSubset) #look at the properties of each variable. Global Active Power, Sub metering 1, 2, 3 are all factors instead of a numbers.
electricSubset$Global_active_power<-as.numeric(as.character(electricSubset$Global_active_power)) #Change Global Active Power to number
electricSubset$Sub_metering_1<-as.numeric(as.character(electricSubset$Sub_metering_1))
electricSubset$Sub_metering_2<-as.numeric(as.character(electricSubset$Sub_metering_2))
electricSubset$Sub_metering_3<-as.numeric(as.character(electricSubset$Sub_metering_3))
str(electricSubset) #Verify that variable types have changed
electricSubset<-transform(electricSubset,timeCombined=as.POSIXct(paste(Date,Time)),"%d/%m/%Y %H:%M:%S") #Add new Date and Time combined variable and format as POSIXct class
View(electricSubset) #verify new column created
str(electricSubset$timeCombined) #check to make sure new column is in POSIXct format
#Step 3: Create the Plot
par(mfrow=c(1,1)) #single plot
#Create plot with timeCombined on x axis and submetering 1,2,3 on y axis, type=l for lines, no label on x axes and add label for y axis
plot(electricSubset$timeCombined,electricSubset$Sub_metering_1,type="l",xlab="",ylab="Energy sub metering")
lines(electricSubset$timeCombined,electricSubset$Sub_metering_2,col="red")
lines(electricSubset$timeCombined,electricSubset$Sub_metering_3,col="blue")
#Add legend in top right
legend("topright",col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=c(1,1),lwd=c(1,1),cex=.8)
#Step 4: Copy plot to a PNG File
dev.copy(png,file="plot3.png",width=480,height=480) #save copy as PNG and make file 480x480
dev.off() #close graphics device
| /plot3.R | no_license | vcarreon/ExData_Plotting1 | R | false | false | 2,222 | r | ##Plot3
setwd("~/Datasciencecoursera/ExData_Plotting1")
library(data.table)
##Step 1: Load and Read file
electric<-read.table("./household_power_consumption.txt",header=T,sep=";")
View(electric)
##Step 2: Clean Data
electric$Date<-as.Date(electric$Date,format="%d/%m/%Y") #change the date format
View(electric) #view to make sure date is in correct format
electricSubset<-electric[(electric$Date=="2007-02-01")|(electric$Date=="2007-02-02"),] #create subset of date
View(electricSubset) #view to make sure correct- now have 2880 observations
str(electricSubset) #look at the properties of each variable. Global Active Power, Sub metering 1, 2, 3 are all factors instead of a numbers.
electricSubset$Global_active_power<-as.numeric(as.character(electricSubset$Global_active_power)) #Change Global Active Power to number
electricSubset$Sub_metering_1<-as.numeric(as.character(electricSubset$Sub_metering_1))
electricSubset$Sub_metering_2<-as.numeric(as.character(electricSubset$Sub_metering_2))
electricSubset$Sub_metering_3<-as.numeric(as.character(electricSubset$Sub_metering_3))
str(electricSubset) #Verify that variable types have changed
electricSubset<-transform(electricSubset,timeCombined=as.POSIXct(paste(Date,Time)),"%d/%m/%Y %H:%M:%S") #Add new Date and Time combined variable and format as POSIXct class
View(electricSubset) #verify new column created
str(electricSubset$timeCombined) #check to make sure new column is in POSIXct format
#Step 3: Create the Plot
par(mfrow=c(1,1)) #single plot
#Create plot with timeCombined on x axis and submetering 1,2,3 on y axis, type=l for lines, no label on x axes and add label for y axis
plot(electricSubset$timeCombined,electricSubset$Sub_metering_1,type="l",xlab="",ylab="Energy sub metering")
lines(electricSubset$timeCombined,electricSubset$Sub_metering_2,col="red")
lines(electricSubset$timeCombined,electricSubset$Sub_metering_3,col="blue")
#Add legend in top right
legend("topright",col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=c(1,1),lwd=c(1,1),cex=.8)
#Step 4: Copy plot to a PNG File
dev.copy(png,file="plot3.png",width=480,height=480) #save copy as PNG and make file 480x480
dev.off() #close graphics device
|
\name{maply}
\alias{maply}
\title{Call function with arguments in array or data frame, returning an array.}
\usage{
maply(.data, .fun = NULL, ..., .expand = TRUE,
.progress = "none", .parallel = FALSE)
}
\arguments{
\item{.data}{matrix or data frame to use as source of
arguments}
\item{.fun}{function to be called with varying arguments}
\item{...}{other arguments passed on to \code{.fun}}
\item{.expand}{should output be 1d (expand = FALSE), with
an element for each row; or nd (expand = TRUE), with a
dimension for each variable.}
\item{.progress}{name of the progress bar to use, see
\code{\link{create_progress_bar}}}
\item{.parallel}{if \code{TRUE}, apply function in
parallel, using parallel backend provided by foreach}
}
\value{
if results are atomic with same type and dimensionality,
a vector, matrix or array; otherwise, a list-array (a
list with dimensions)
}
\description{
Call a multi-argument function with values taken from
columns of an data frame or array, and combine results
into an array
}
\details{
The \code{m*ply} functions are the \code{plyr} version of
\code{mapply}, specialised according to the type of
output they produce. These functions are just a
convenient wrapper around \code{a*ply} with \code{margins
= 1} and \code{.fun} wrapped in \code{\link{splat}}.
This function combines the result into an array. If
there are no results, then this function will return a
vector of length 0 (\code{vector()}).
}
\examples{
maply(cbind(mean = 1:5, sd = 1:5), rnorm, n = 5)
maply(expand.grid(mean = 1:5, sd = 1:5), rnorm, n = 5)
maply(cbind(1:5, 1:5), rnorm, n = 5)
}
\references{
Hadley Wickham (2011). The Split-Apply-Combine Strategy
for Data Analysis. Journal of Statistical Software,
40(1), 1-29. \url{http://www.jstatsoft.org/v40/i01/}.
}
\keyword{manip}
| /man/maply.Rd | no_license | vspinu/plyr | R | false | false | 1,858 | rd | \name{maply}
\alias{maply}
\title{Call function with arguments in array or data frame, returning an array.}
\usage{
maply(.data, .fun = NULL, ..., .expand = TRUE,
.progress = "none", .parallel = FALSE)
}
\arguments{
\item{.data}{matrix or data frame to use as source of
arguments}
\item{.fun}{function to be called with varying arguments}
\item{...}{other arguments passed on to \code{.fun}}
\item{.expand}{should output be 1d (expand = FALSE), with
an element for each row; or nd (expand = TRUE), with a
dimension for each variable.}
\item{.progress}{name of the progress bar to use, see
\code{\link{create_progress_bar}}}
\item{.parallel}{if \code{TRUE}, apply function in
parallel, using parallel backend provided by foreach}
}
\value{
if results are atomic with same type and dimensionality,
a vector, matrix or array; otherwise, a list-array (a
list with dimensions)
}
\description{
Call a multi-argument function with values taken from
columns of an data frame or array, and combine results
into an array
}
\details{
The \code{m*ply} functions are the \code{plyr} version of
\code{mapply}, specialised according to the type of
output they produce. These functions are just a
convenient wrapper around \code{a*ply} with \code{margins
= 1} and \code{.fun} wrapped in \code{\link{splat}}.
This function combines the result into an array. If
there are no results, then this function will return a
vector of length 0 (\code{vector()}).
}
\examples{
maply(cbind(mean = 1:5, sd = 1:5), rnorm, n = 5)
maply(expand.grid(mean = 1:5, sd = 1:5), rnorm, n = 5)
maply(cbind(1:5, 1:5), rnorm, n = 5)
}
\references{
Hadley Wickham (2011). The Split-Apply-Combine Strategy
for Data Analysis. Journal of Statistical Software,
40(1), 1-29. \url{http://www.jstatsoft.org/v40/i01/}.
}
\keyword{manip}
|
library(hexbin)
library(ggplot2)
library(plyr)
library(reshape)
library(hexbin)
library(RColorBrewer)
library(reshape)
library(ggdendro)
x <- "yea this works..."
getFullPath <- function(projectPath){ paste(projectDir,projectPath,sep="/") }
exportAsTable <- function(df, file){ write.table(df,file=file,quote=FALSE, row.names=FALSE,sep="\t") }
clear <- function(save.vec=c()){ ls.vec <- ls(globalenv());del.vec <-setdiff(ls.vec,c(save.vec,"clear")); rm(list=del.vec,pos=globalenv())}
readInTable <- function(file) read.table(file=file,stringsAsFactors=FALSE,header=TRUE)
withinRange3 <- function(x,lower,upper){
if(x >= lower && x <= upper ){
TRUE
}
else{
FALSE
}
}
entropy <- function(...){
x = c(...)
x = x / sum(x)
y = log2(x) * x
y[is.na(y)]=0
sum(y) * -1
}
entropyVec <- function(x,logFn=log2){
x = x / sum(x)
y = logFn(x) * x
y[is.na(y)]=0
sum(y) * -1
}
JS <- function(x,y,logFn=log2){
x = x / sum(x)
y = y / sum(y)
a = (x + y)/2
entropyVec(a ,logFn)- ((entropyVec(x,logFn)+entropyVec(y,logFn))/2) -> z
# entropyVec(a )- ((entropyVec(x)+entropyVec(y))/2) -> z
z}
JSsp <- function(e1,e2,logFn=log2){
1 - sqrt(JS(e1,e2,logFn))
}
calcTissSpec <- function(x,cols,logFn=log2){
x <- sapply(x,as.numeric)
profile <- diag(length(cols))
specEn <- sapply(1:length(cols),function(y)JSsp(x,profile[y,],logFn))
# cols[which(specEn == max(specEn))]
max(specEn)
#specEn
}
calcTissSpecVector <- function(x,logFn=log2){
x <- sapply(x,as.numeric)
profile <- diag(length(x))
specEn <- sapply(seq_along(x),function(y)JSsp(x,profile[y,],logFn))
# cols[which(specEn == max(specEn))]
max(specEn)
#specEn
}
#this needs to be generalized...
tissSpec <- function(...){
x <- unlist(...)
# print(x)
profile = list(c(1,0,0,0,0),c(0,1,0,0,0),c(0,0,1,0,0),c(0,0,0,1,0),c(0,0,0,0,1))
specEn = c()
for (i in 1:5){
specEn[i] = JSsp(x,profile[[i]])
}
#print(specEn)
c("HELAS3","GM12878","H1HESC","HEPG2","K562")[which(specEn== max(specEn))]
max(specEn)
}
logNormal <- function(X,c=1){
normX <- min(X) + 1
log(normX)
}
maxExpr <- function(x,cols){
ifelse( sum(x) > 0.00000000001,
cols[which(x== max(x))],
"none"
)
}
threeTimesRestSpecifity <- function(expr.vec,cols){
ifelse( sort(expr.vec,decreasing=TRUE)[1] > (3* sort(expr.vec,decreasing=TRUE)[2]),
cols[which(expr.vec == max(expr.vec))],
"none")}
nTimesRestSpecifity <- function(expr.vec,cols){
first <- sort(expr.vec,decreasing=TRUE)[1]
second <- sort(expr.vec,decreasing=TRUE)[2]
n <- Inf # case that first >0, second==0
if (first == 0 && second == 0){
n <- 0
}
if (first > 0 && second> 0){
n <- first/second
}
n
}
applyAndAppendFnDf <-function(df=df,cols=cols,FUN=fn,newCol=newCol){
df[newCol] <- apply(df[as.vector(cols)],1,function(...){x<-c(...);FUN(x)})
df
}
applyAndAppendFnDfSecondArg <-function(df=df,cols=cols,FUN=fn,newCol=newCol,arg2=arg2){
df[newCol] <- apply(df[as.vector(cols)],1,function(...){x<-c(...);FUN(x,arg2)})
df
}
#split vec
exprLabels <- function(x){
# y<-cut(x, quantile(x[which( x> 0)],(0:3)/3),labels=c("low","mid","high"))
output.vec <- rep("none",length(x))
expr.labels.wNA <- as.character(cut(x, quantile(x[which( x> 0)],(0:3)/3),labels=c("low","mid","high")))
expr.labels.index <- !is.na(expr.labels.wNA)
output.vec[expr.labels.index] <- expr.labels.wNA[expr.labels.index]
factor(output.vec)
}
#take a df, the figure out {expression, notExpressed}, where notExpress = {low, medium, high} && count(low) == count(medium) == count(high)
assignExprLabels <- function(df,cols){
labels.df <- as.data.frame(apply(df[as.vector(cols)],2,exprLabels),)
colnames(labels.df) <- as.vector(sapply(colnames(labels.df),function(x)paste(x,"ExprLabel",sep=".")))
cbind(df,labels.df)
}
#for determining the maximal transcript(by sum expression)
rankSumExpr <- function(df){
df.new <- df[order(df$sumExpr,decreasing=TRUE),]
df.new$rank <- seq(1,length(df$transcript_id) )
df.new
}
getExprComp <- function(df){
with(df,
data.frame(
exprInBoth = length(intersect(which(longPolyAexpr > 0),which(longNonPolyAexpr > 0))),
none = length(intersect(which(longPolyAexpr == 0),which(longNonPolyAexpr == 0))),
either = length(union(which(longPolyAexpr > 0),which(longNonPolyAexpr > 0))),
polyA = length(which(longPolyAexpr > 0)),
nonPolyA = length(which(longNonPolyAexpr > 0)),
polyAOnly = length(intersect(which(longPolyAexpr > 0),which(longNonPolyAexpr == 0))),
nonPolyAOnly= length(intersect(which(longPolyAexpr == 0),which(longNonPolyAexpr > 0))),
total = length(longNonPolyAexpr)))
}
appendAnalysis <- function(df,cols){
df <- applyAndAppendFnDf(df,cols,entropy,"entropyExpr")
df <- applyAndAppendFnDf(df,cols,sum,"sumExpr")
df <- applyAndAppendFnDf(df,cols,var,"varianceExpr")
df <- applyAndAppendFnDf(df,cols,mean,"averageExpr")
df <- applyAndAppendFnDf(df,cols,min,"minExpr")
df <- applyAndAppendFnDf(df,cols,max,"maxExpr")
df <- applyAndAppendFnDf(df,cols,function(x)maxExpr(x,cols),"maxExprType")
df <- applyAndAppendFnDf(df=df,cols=cols,FUN=function(x)threeTimesRestSpecifity(x,cols),"threeTimesRest")
df <- applyAndAppendFnDf(df=df,cols=cols,FUN=function(x)nTimesRestSpecifity(x,cols),"nTimesRest")
df <- applyAndAppendFnDf(df=df,cols=cols,FUN=function(x)calcTissSpec(x,cols),"tissSpec")
df <- assignExprLabels(df,cols=cols)
df$expressed = "no"
df[which(df$maxExprType != "none"),]$expressed <- "yes"
df
}
getTranscriptForMaxCols <- function(t1)cast(ldply(apply(t1,2,function(x)t1[which(x == max(x)),"transcript_id"]),function(x)x[1]), ~ .id )
applyTranscriptForMaxCols <- function(df,geneCol="gene_id")ldply(split(cd.expr.df,cd.expr.df[c(geneCol)]),getTranscriptForMaxCols)
makeTmpOutFile <- function(x,outdir="/Users/adam/Desktop/"){
if(missing(x)){
x = paste(as.vector(proc.time()[1]),"anon",sep="-")
}
paste(outdir.tmp,x,sep="")}
#if (!file.exists(outdir)){
# print("WARNING: cannot access output dir for plots")
#}
getFullGencodeExpr <- function(exprFile = "/Volumes/MyBook/Volume1/scratch/encodeRnaSeq/cshl/allCellsCombined.space",exprCols=2:33){
appendAnalysis(read.table(file=expr.file, header=TRUE, stringsAsFactors=FALSE), exprCols)}
preProcessDf<- function(df){
df <- df[which(df$sumExpr > 0),]
df$transcript_id <- df$gene_id
df$Genc_polyA <- rep(0,length(df$gene_id))
df
}
getPolyACompDf <- function(df, label,exprColIndex){
df <- preProcessDf(df)
expr.cols <- sort(colnames(df[exprColIndex]))
expr.uniq.cols <- unique(unlist(lapply(colnames(df[exprColIndex]),function(x)strsplit(x,".long"))))
expr.cells = expr.uniq.cols[grep("Poly",expr.uniq.cols,invert=TRUE)]
expr.cols.polyA <- as.vector(sapply(expr.cells,function(x)paste(x,"longPolyA",sep=".")))
expr.cols.nonPolyA <- as.vector(sapply(expr.cells,function(x)paste(x,"longNonPolyA",sep=".")))
lpa <- melt(df[c("transcript_id",expr.cols.polyA)],measure.vars=sort(expr.cols.polyA),id.vars="transcript_id")
colnames(lpa) <- c("transcript_id", "expr","longPolyAexpr")
lpa$cellType <- as.vector(sapply(sapply(as.vector(lpa$expr),function(x){strsplit(x,'\\.')[1]}),function(x)x[1]))
lpa$seqPullDown <- "longPolyA"
colnames(lpa) <- c("transcript_id", "expr", "expression" ,"cellType" ,"seqPullDown" )
lnpa <- melt(df[c("transcript_id",expr.cols.nonPolyA)],measure.vars=sort(expr.cols.nonPolyA),id.vars="transcript_id")
colnames(lnpa) <- c("transcript_id", "expr","longNonPolyAexpr")
lnpa$cellType <- as.vector(sapply(sapply(as.vector(lnpa$expr),function(x){strsplit(x,'\\.')[1]}),function(x)x[1]))
lnpa$seqPullDown <- "longNonPolyA"
colnames(lnpa) <- c("transcript_id", "expr", "expression" ,"cellType" ,"seqPullDown")
tm.lnpa <- lnpa[c("transcript_id","cellType","expression")]
colnames(tm.lnpa) <- c("transcript_id","cellType","longNonPolyA")
tm.lpa <- lpa[c("transcript_id","cellType","expression")]
colnames(tm.lpa) <- c("transcript_id","cellType","longPolyA")
comb <- merge(tm.lnpa,tm.lpa,by=c("transcript_id","cellType"))
comb <- transform(comb,sum= longNonPolyA + longPolyA)
# calcMix <- function(lnpa,lpa){
# sumIn=lnpa+lpa
# if (sumIn > 0){
# if (lnpa == 0){
# "longPolyA"
# }
# if (lpa == 0){
# "longNonPolyA"
# }
# else {
# "both"
# }
# }
# else {
# "neither"
# }
# "default"
# }
#comb <- transform(comb, expMix = function)
comb <- transform(comb,logsum= log(longNonPolyA) + log(longPolyA))
comb <- transform(comb,product=longNonPolyA * longPolyA)
comb$exprMix <- "both"
comb[intersect(intersect(which(comb$product == 0),which(comb$sum > 0)),which(comb$longNonPolyA == 0)),]$exprMix <- "lpaOnly"
comb[intersect(intersect(which(comb$product == 0),which(comb$sum > 0)),which(comb$longPolyA == 0)),]$exprMix <- "lnpaOnly"
comb[intersect(which(comb$product == 0),which(comb$sum == 0)),]$exprMix <- "none"
mc <- melt(comb,id.var=c("transcript_id","cellType","sum","logsum","exprMix"),measure.va=c("longNonPolyA","longPolyA"))
colnames(mc)<- c("transcript_id", "cellType", "sum", "logsum", "exprMix","polyApulldown", "RPKM")
mc$label <- label
mc
}
compareLncMrnaRnaSeqMix <- function(outdir="/Users/adam/work/research/researchProjects/encode/encode-manager/plots/rnaSeq-pullDownCompare",
lnc.file = "/Users/adam/work/research/researchProjects/encode/encode-manager/data/lncExprWithStats_transEachSample.tab",
cd.file = "/Users/adam/work/research/researchProjects/encode/encode-manager/data/cdExprWithStats_transEachSample.tab"){
#outdir="/Users/adam/work/research/researchProjects/encode/encode-manager/plots/rnaSeq-pullDownCompare"
makeOutFile <- function(x){paste(outdir,x,sep="/")}
#lnc.file = "/Users/adam/work/research/researchProjects/encode/encode-manager/data/lncExprWithStats_transEachSample.tab"
lnc.df<-getPolyACompDf(readInTable(lnc.file),"lncRNA",2:33)
lnc.df <- lnc.df[which(lnc.df$sum > 0),]
lnc.df$sumGroups <- cut(lnc.df$sum, breaks=quantile(lnc.df$sum,0:10/10))
#cd.file = "/Users/adam/work/research/researchProjects/encode/encode-manager/data/cdExprWithStats_transEachSample.tab"
cd.df<-getPolyACompDf(readInTable(cd.file),"mRNA",2:33)
cd.df <- cd.df[which(cd.df$sum > 0),]
cd.df$sumGroups <- cut(cd.df$sum, breaks=quantile(lnc.df$sum,0:10/10))
comb.df <- rbind(cd.df,lnc.df)
comb.df <- transform(comb.df, cellPulldown = paste(cellType,polyApulldown,sep="."))
comb.df <- transform(comb.df, transcriptTypePulldown = paste(label,polyApulldown,sep="."))
makeExprMixForComb <- function(exprMixGroup){
ggplot(comb.df[comb.df$exprMix == exprMixGroup,],aes(x=log(RPKM),color=transcriptTypePulldown,fill=transcriptTypePulldown)) +
geom_freqpoly(size=1,binwidth=0.1)+
theme_bw()+
facet_grid(sumGroups ~ .,scale="free")+
scale_color_manual(values = c("mRNA.longNonPolyA"= brewer.pal(9,"Blues")[4], "mRNA.longPolyA"= brewer.pal(9,"Blues")[8], "lncRNA.longPolyA"= brewer.pal(9,"Reds")[8],"lncRNA.longNonPolyA"= brewer.pal(9,"Reds")[5]))+
ggtitle(paste("transcript pulldown RPKM comparisons\nfacet over bin\nwhere bin=range of transcript expr sum in sample\nTranscripts: ",exprMixGroup,sep=" "))
ggsave( file=makeOutFile(paste("pullDownComp-freqPoly-sumGroups_sampleComb_scaleFree-trans",exprMixGroup,".pdf",sep="")),width=8,height=18)
ggplot(comb.df[comb.df$exprMix == exprMixGroup,],aes(x=log(RPKM),color=transcriptTypePulldown,fill=transcriptTypePulldown)) +
geom_density()+
theme_bw()+
facet_grid(sumGroups ~ .,scale="free")+
scale_color_manual(values = c("mRNA.longNonPolyA"= brewer.pal(9,"Blues")[4], "mRNA.longPolyA"= brewer.pal(9,"Blues")[8], "lncRNA.longPolyA"= brewer.pal(9,"Reds")[8],"lncRNA.longNonPolyA"= brewer.pal(9,"Reds")[5]))+
ggtitle(paste("transcript pulldown RPKM comparisons\nfacet over bin\nwhere bin=range of transcript expr sum in sample\nTranscripts: ",exprMixGroup,sep=" "))
ggsave( file=makeOutFile(paste("pullDownComp-freqPoly-sumGroups_sampleComb_scaleFree-trans",exprMixGroup,"-density.pdf",sep="")),width=8,height=18)
}
sapply(c("both","lnpaOnly","lpaOnly"),makeExprMixForComb)
makeExprMixForCombDistro <- function(exprMixGroup){
ggplot(comb.df[comb.df$exprMix == exprMixGroup,],aes(x=log(RPKM),color=transcriptTypePulldown,fill=transcriptTypePulldown)) +
geom_freqpoly(size=1,binwidth=0.1)+
theme_bw()+
scale_color_manual(values = c("mRNA.longNonPolyA"= brewer.pal(9,"Blues")[4], "mRNA.longPolyA"= brewer.pal(9,"Blues")[8], "lncRNA.longPolyA"= brewer.pal(9,"Reds")[8],"lncRNA.longNonPolyA"= brewer.pal(9,"Reds")[5]))+
ggtitle(paste("transcript pulldown RPKM comparisons\nfacet over bin\nwhere bin=range of transcript expr sum in sample\nTranscripts: ",exprMixGroup,sep=" "))
ggsave( file=makeOutFile(paste("pullDownComp-freqPoly-totalDistro-trans",exprMixGroup,".pdf",sep="")),width=7,height=7)
ggplot(comb.df[comb.df$exprMix == exprMixGroup,],aes(x=log(RPKM),color=transcriptTypePulldown,fill=transcriptTypePulldown)) +
geom_density()+
theme_bw()+
scale_color_manual(values = c("mRNA.longNonPolyA"= brewer.pal(9,"Blues")[4], "mRNA.longPolyA"= brewer.pal(9,"Blues")[8], "lncRNA.longPolyA"= brewer.pal(9,"Reds")[8],"lncRNA.longNonPolyA"= brewer.pal(9,"Reds")[5]))+
ggtitle(paste("transcript pulldown RPKM comparisons\nfacet over bin\nwhere bin=range of transcript expr sum in sample\nTranscripts: ",exprMixGroup,sep=" "))
ggsave( file=makeOutFile(paste("pullDownComp-freqPoly-totalDistro-trans",exprMixGroup,"density.pdf",sep="")),width=7,height=7)
}
sapply(c("both","lnpaOnly","lpaOnly"),makeExprMixForCombDistro)
c.df<- comb.df[which(!is.na(comb.df$sumGroups)),]
ggplot(c.df[c.df$exprMix == "both",],aes(x=sumGroups,y=log(RPKM),color=transcriptTypePulldown,fill=transcriptTypePulldown))+geom_boxplot(outlier.size=1)+theme_bw()+
theme( axis.text.x = element_text(angle=25, vjust=0.8))+
ggtitle("transcriptType-PullDown boxplot for each expression group")+
xlab("binned expression group")+
ylab("log(RPKM of transcript)")+
scale_fill_manual(values = c("mRNA.longNonPolyA"= brewer.pal(9,"Blues")[4], "mRNA.longPolyA"= brewer.pal(9,"Blues")[8], "lncRNA.longPolyA"= brewer.pal(9,"Reds")[8],"lncRNA.longNonPolyA"= brewer.pal(9,"Reds")[5]))+
scale_color_manual(values = c("mRNA.longNonPolyA"= brewer.pal(9,"Blues")[5], "mRNA.longPolyA"= brewer.pal(9,"Blues")[9], "lncRNA.longPolyA"= brewer.pal(9,"Reds")[9],"lncRNA.longNonPolyA"= brewer.pal(9,"Reds")[6]))+
ggsave( file=makeOutFile("pullDownComp-boxplot-bySumgroups.pdf"),width=10,height=7)
ggplot(c.df, aes(x=log(RPKM),fill=factor(sumGroups)))+geom_histogram()+theme_bw()+
facet_wrap(polyApulldown~label)+
ggtitle("Distribution of summation groups in transcript-type/pulldown RPKM distros\ncolor by summation group")+
xlab("log(RPKM of transcript)")+
ylab("count")
ggsave( file=makeOutFile("pullDownComp-sumGroupsForPulldown.pdf"),width=10,height=7)
ggplot(c.df, aes(x=log(RPKM),fill=factor(sumGroups)))+geom_histogram(position="fill")+theme_bw()+
facet_wrap(polyApulldown~label)+
ggtitle("Distribution of summation groups in transcript-type/pulldown RPKM distros\ncolor by summation group")+
xlab("log(RPKM of transcript)")+
ylab("count")
ggsave( file=makeOutFile("pullDownComp-sumGroupsForPulldown-fill.pdf"),width=10,height=7)
rm(c.df)
comb.df
}
compareLncMrnaRnaSeq <- function(outdir="/Users/adam/work/research/researchProjects/encode/encode-manager/plots/rnaSeq-pullDownCompare",
lnc.file = "/Users/adam/work/research/researchProjects/encode/encode-manager/data/lncExprWithStats_transEachSample.tab",
cd.file = "/Users/adam/work/research/researchProjects/encode/encode-manager/data/cdExprWithStats_transEachSample.tab"){
#
#outdir="/Users/adam/work/research/researchProjects/encode/encode-manager/plots/rnaSeq-pullDownCompare"
makeOutFile <- function(x){paste(outdir,x,sep="/")}
#lnc.file = "/Users/adam/work/research/researchProjects/encode/encode-manager/data/lncExprWithStats_transEachSample.tab"
lnc.df<-getPolyACompDf(readInTable(lnc.file),"lncRNA",2:33)
lnc.df <- lnc.df[which(lnc.df$sum > 0),]
lnc.df$sumGroups <- cut(lnc.df$sum, breaks=quantile(lnc.df$sum,0:10/10))
#cd.file = "/Users/adam/work/research/researchProjects/encode/encode-manager/data/cdExprWithStats_transEachSample.tab"
cd.df<-getPolyACompDf(readInTable(cd.file),"mRNA",2:33)
cd.df <- cd.df[which(cd.df$sum > 0),]
cd.df$sumGroups <- cut(cd.df$sum, breaks=quantile(lnc.df$sum,0:10/10))
comb.df <- rbind(cd.df,lnc.df)
ggplot(comb.df,aes(x=log(RPKM),color=polyApulldown,fill=polyApulldown)) +
geom_density(alpha=I(0.4))+
theme_bw()+
facet_grid(cellType~label)+
ggtitle("Comparison of mRNA and lncRNA RNA-seq experiments\nFaceted by cell-type and transcript-type")
ggsave(file=makeOutFile("pullDownComp-density-allBins.pdf"),width=10,height=24)
ggplot(comb.df,aes(x=log(RPKM),color=polyApulldown,fill=polyApulldown)) +
geom_freqpoly(size=0.7,binwidth=0.2)+
theme_bw()+
facet_grid(cellType~label)+
ggtitle("Comparison of mRNA and lncRNA RNA-seq experiments\nFaceted by cell-type and transcript-type")
ggsave(file=makeOutFile("pullDownComp-freqPoly-allBins.pdf"),width=10,height=24)
sapply(levels(cut(lnc.df$sum, breaks=quantile(lnc.df$sum,0:10/10))),
function(x.f){
x.factor <- gsub('\\(','',gsub('\\]','',gsub(",","-",x.f)))
x.title <- paste("Comparison of mRNA and lncRNA RNA-seq experiments\nFaceted by cell-type and transcript-type\nbin=",x.factor,sep="")
x.outFile.freq = makeOutFile(paste("pullDownComp-freqPoly-",x.factor,"=binwidth.pdf",sep=""))
x.outFile.dens = makeOutFile(paste("pullDownComp-freqPoly-",x.factor,"=binwidth.pdf",sep=""))
ggplot(comb.df,aes(x=log(RPKM),color=polyApulldown,fill=polyApulldown)) +
geom_density(alpha=I(0.4))+
theme_bw()+
facet_grid(cellType~label)+
ggtitle(x.title)
ggsave(file=x.outFile.dens,width=10,height=24)
ggplot(comb.df,aes(x=log(RPKM),color=polyApulldown,fill=polyApulldown)) +
geom_freqpoly(size=0.7,binwidth=0.2)+
theme_bw()+
facet_grid(cellType~label)+
ggtitle(x.title)
ggsave(file=x.outFile.freq,width=10,height=24)
}#end of applied function
)#end of sapply over factors...
comb.df <- transform(comb.df, cellPulldown = paste(cellType,polyApulldown,sep="."))
comb.df <- transform(comb.df, transcriptTypePulldown = paste(label,polyApulldown,sep="."))
ggplot(comb.df,aes(x=log(RPKM),color=transcriptTypePulldown,fill=transcriptTypePulldown)) +
geom_freqpoly(size=0.7,binwidth=0.2)+
theme_bw()+
facet_grid(cellType~sumGroups,scale="free")+
scale_fill_manual(values = c("blue","purple","orange","red"))+
scale_color_manual(values = c("blue","purple","orange","red"))+
ggtitle("transcript pulldown RPKM comparisons\nfacet over celltype and bin\nwhere bin=range of transcript expr sum in sample")
ggsave(file=makeOutFile("pullDownComp-freqPoly-cellTypeVsumGroups.pdf"),width=24,height=24)
ggplot(comb.df,aes(x=log(RPKM),color=transcriptTypePulldown,fill=transcriptTypePulldown)) +
geom_freqpoly(size=0.7,binwidth=0.2)+
theme_bw()+
facet_grid(cellType~sumGroups,scale="free_y")+
scale_fill_manual(values = c(" mRNA.longNonPolyA"="black", "mRNA.longPolyA"="grey", "lncRNA.longPolyA"="blue","lncRNA.longNonPolyA"="red"))+
ggtitle("transcript pulldown RPKM comparisons\nfacet over celltype and bin\nwhere bin=range of transcript expr sum in sample")
ggsave(file=makeOutFile("pullDownComp-freqPoly-cellTypeVsumGroups_free.pdf"),width=24,height=24)
low.comb.df <- comb.df[which(as.numeric(comb.df$sumGroups) %in% 1:4),]
ggplot(low.comb.df,aes(x=log(RPKM),color=transcriptTypePulldown,fill=transcriptTypePulldown)) +
geom_freqpoly(size=1,binwidth=0.3)+
theme_bw()+
facet_grid(cellType~sumGroups,scale="free_y")+
scale_color_manual(values = c("mRNA.longNonPolyA"= brewer.pal(9,"Blues")[4], "mRNA.longPolyA"= brewer.pal(9,"Blues")[8], "lncRNA.longPolyA"= brewer.pal(9,"Reds")[8],"lncRNA.longNonPolyA"= brewer.pal(9,"Reds")[5]))+
ggtitle("transcript pulldown RPKM comparisons\nfacet over celltype and bin\nwhere bin=range of transcript expr sum in sample")
ggsave(file=makeOutFile("pullDownComp-freqPoly-cellTypeVsumGroups_low.pdf"),width=24,height=24)
high.comb.df <- comb.df[which(as.numeric(comb.df$sumGroups) %in% 5:10),]
ggplot(high.comb.df,aes(x=log(RPKM),color=transcriptTypePulldown,fill=transcriptTypePulldown)) +
geom_freqpoly(size=0.7,binwidth=0.1)+
theme_bw()+
facet_grid(cellType~sumGroups,scale="free_y")+
scale_color_manual(values = c("mRNA.longNonPolyA"= brewer.pal(9,"Blues")[4], "mRNA.longPolyA"= brewer.pal(9,"Blues")[8], "lncRNA.longPolyA"= brewer.pal(9,"Reds")[7],"lncRNA.longNonPolyA"= brewer.pal(9,"Reds")[4]))+
ggtitle("transcript pulldown RPKM comparisons\nfacet over celltype and bin\nwhere bin=range of transcript expr sum in sample")
ggsave(file=makeOutFile("pullDownComp-freqPoly-cellTypeVsumGroups_high.pdf"),width=24,height=24)
ggplot(as.data.frame(with(comb.df[which(comb.df$RPKM > 0),],table(sumGroups,transcriptTypePulldown))),aes(x=sumGroups,y=Freq,color=transcriptTypePulldown,fill=transcriptTypePulldown))+
geom_histogram()+
theme_bw()+coord_flip()+
scale_color_manual(values = c("mRNA.longNonPolyA"= brewer.pal(9,"Blues")[4], "mRNA.longPolyA"= brewer.pal(9,"Blues")[8], "lncRNA.longPolyA"= brewer.pal(9,"Reds")[7],"lncRNA.longNonPolyA"= brewer.pal(9,"Reds")[4]))+
scale_fill_manual(values = c("mRNA.longNonPolyA"= brewer.pal(9,"Blues")[3], "mRNA.longPolyA"= brewer.pal(9,"Blues")[7], "lncRNA.longPolyA"= brewer.pal(9,"Reds")[6],"lncRNA.longNonPolyA"= brewer.pal(9,"Reds")[3]))+
ggtitle("transcript pulldown RPKM counts\nfor each sample by RPKM bin\nOnly transcripts w/ RPKM > 0 counted")
ggsave(file=makeOutFile("pullDownComp-histogram-forSamples.pdf"),width=7,height=7)
ggplot(comb.df[which(!is.na(comb.df$sumGroups)),],aes(x=log(RPKM),color=transcriptTypePulldown,fill=transcriptTypePulldown)) +
geom_freqpoly(size=1,binwidth=0.1)+
theme_bw()+
facet_grid(sumGroups ~ .,scale="free_y")+
scale_color_manual(values = c("mRNA.longNonPolyA"= brewer.pal(9,"Blues")[4], "mRNA.longPolyA"= brewer.pal(9,"Blues")[8], "lncRNA.longPolyA"= brewer.pal(9,"Reds")[8],"lncRNA.longNonPolyA"= brewer.pal(9,"Reds")[5]))+
ggtitle("transcript pulldown RPKM comparisons\nfacet over bin\nwhere bin=range of transcript expr sum in sample")
ggsave(file=makeOutFile("pullDownComp-freqPoly-sumGroups_sampleComb.pdf"),width=8,height=18)
ggplot(comb.df[which(!is.na(comb.df$sumGroups)),],aes(x=log(RPKM),color=transcriptTypePulldown,fill=transcriptTypePulldown)) +
geom_freqpoly(size=1,binwidth=0.1)+
theme_bw()+
facet_grid(sumGroups ~ .,scale="free")+
scale_color_manual(values = c("mRNA.longNonPolyA"= brewer.pal(9,"Blues")[4], "mRNA.longPolyA"= brewer.pal(9,"Blues")[8], "lncRNA.longPolyA"= brewer.pal(9,"Reds")[8],"lncRNA.longNonPolyA"= brewer.pal(9,"Reds")[5]))+
ggtitle("transcript pulldown RPKM comparisons\nfacet over bin\nwhere bin=range of transcript expr sum in sample")
ggsave(file=makeOutFile("pullDownComp-freqPoly-sumGroups_sampleComb_scaleFree.pdf"),width=8,height=18)
comb.npa.df <- comb.df[which(comb.df$polyApulldown == "longNonPolyA"),]
comb.npa.df$npa <- comb.npa.df$RPKM
comb.np.df <- comb.df[which(comb.df$polyApulldown == "longPolyA"),]
comb.npa.df$pa <- comb.pa.df$RPKM
ggplot(comb.npa.df,aes(x=log(pa),y=log(npa),color=label))+geom_point(size=1)+theme_bw()+facet_grid(cellType ~ sumGroups);
ggsave(file=makeOutFile("comb-scatterplot.pdf"),height=24,width=24)
ggplot(comb.df,aes(x=log(sum),y=log(RPKM),color=polyApulldown))+geom_point(size=1)+facet_wrap(~label,ncol=2)+ theme_bw()
ggsave(file=makeOutFile("comb-scatterplot-sum-vs-components.pdf"),height=7,width=14)
ggplot(comb.df,aes(x=sum,y=log(RPKM),color=polyApulldown))+stat_smooth(size=1)+facet_wrap(~label,ncol=2)+ theme_bw()
ggsave(file=makeOutFile("comb-scatterplot-sum-vs-components-qauntile.pdf"),height=7,width=14)
}
runAllTransPolyA_Analysis <- function(){
lf <- "/Users/adam/work/research/researchProjects/encode/encode-manager/data/lncExprWithStats_allTrans.tab"
cd <- "/Users/adam/work/research/researchProjects/encode/encode-manager/data/cdExprWithStats_allTrans.tab"
od <- "/Users/adam/work/research/researchProjects/encode/encode-manager/plots/rnaSeq-pullDownCompare_allTrans"
compareLncMrnaRnaSeq(outdir=od,lnc.file=lf,cd.file=cd)
compareLncMrnaRnaSeqMix(outdir=od,lnc.file=lf,cd.file=cd)
}
getAnnotLncDf <- function(lncDf,annotDf,exprCol,annotColName){
lncDf$withinSubset = "false"
lncDf[which(lncDf$gene_id_short %in% annotDf$gene_id_short),]$withinSubset = "true"
#apply annotDf information to lncDf
ddply(annotDf,.(gene_id),function(x)x[c("gene_id","lncRnaName")]) -> ensLnc.df
getLncName <- function(x){ensLnc.df[which(ensLnc.df$gene_id == x ), "lncRnaName"][1]}
lncDf[c(annotColName)] <- "1"
lncDf[c(annotColName)] <- apply(lncDf,1,function(x)getLncName(x[c("gene_id_short")]))
lncDf[is.na(lncDf[c(annotColName)]),annotColName] <- "notFound"
lncDf
}
makeDir <- function(dir,recursiveCreate=FALSE){
if (!file.exists(dir)){
dir.create(path=dir,showWarnings=TRUE,recursive=recursiveCreate,mode="0755")
}
}
getLpaColnames <- function(df,colnamesDf=colnames(df)){
doubleCols = colnamesDf[as.vector(sapply(colnamesDf,function(x)(typeof(df[1,x]) == "double")))]
doubleCols[grep("longPolyA$",doubleCols)]
}
getLnpaColnames <- function(colnamesDf){
doubleCols = colnamesDf[as.vector(sapply(colnamesDf,function(x)(typeof(df[1,x]) == "double")))]
doubleCols[grep("longNonPolyA$",doubleCols)]
}
editStatsForLncDf <- function(expr.df, cols){
col.names <- colnames(expr.df)[cols]
expr.df <- applyAndAppendFnDf(expr.df,cols,entropy,"entropyExpr")
expr.df <- applyAndAppendFnDf(expr.df,cols,sum,"sumExpr")
expr.df <- applyAndAppendFnDf(expr.df,cols,var,"varianceExpr")
expr.df <- applyAndAppendFnDf(expr.df,cols,mean,"averageExpr")
expr.df <- applyAndAppendFnDf(expr.df,cols,min,"minExpr")
expr.df <- applyAndAppendFnDf(expr.df,cols,max,"maxExpr")
expr.df <- applyAndAppendFnDf(df=expr.df,cols,function(x)maxExpr(x,col.names),"maxExprType")
expr.df <- applyAndAppendFnDf(df=expr.df,cols=cols,FUN=function(x)threeTimesRestSpecifity(x,col.names),"threeTimesRest")
expr.df <- applyAndAppendFnDf(df=expr.df,cols=cols,FUN=function(x)nTimesRestSpecifity(x,cols),"nTimesRest")
expr.df <- applyAndAppendFnDf(df=expr.df,cols=cols,FUN=function(x)calcTissSpec(x,cols),"tissSpec")
expr.df
}
xapplyAndAppendFnDf <-function(df=df,cols=cols,FUN=fn,newCol=newCol){
df[newCol] <- apply(df[as.vector(cols)],1,function(...){x<-c(...);FUN(x)})
df
}
xapplyAndAppendFnDfSecondArg <-function(df=df,cols=cols,FUN=fn,newCol=newCol,arg2=arg2){
df[newCol] <- apply(df[as.vector(cols)],1,function(...){x<-c(...);FUN(x,arg2)})
df
}
getMemory <- function(){
gettextf("%.2f Mb stored in memory",
sum(sapply(unlist(ls(envir=.GlobalEnv)),
function(x)object.size(get(x,envir=.GlobalEnv))))
/ (1000000))
}
getMemory()
| /analysis/rnaSeq/exprLib.R | no_license | adamwespiser/encode-manager | R | false | false | 27,663 | r | library(hexbin)
library(ggplot2)
library(plyr)
library(reshape)
library(hexbin)
library(RColorBrewer)
library(reshape)
library(ggdendro)
x <- "yea this works..."
getFullPath <- function(projectPath){ paste(projectDir,projectPath,sep="/") }
exportAsTable <- function(df, file){ write.table(df,file=file,quote=FALSE, row.names=FALSE,sep="\t") }
clear <- function(save.vec=c()){ ls.vec <- ls(globalenv());del.vec <-setdiff(ls.vec,c(save.vec,"clear")); rm(list=del.vec,pos=globalenv())}
readInTable <- function(file) read.table(file=file,stringsAsFactors=FALSE,header=TRUE)
withinRange3 <- function(x,lower,upper){
if(x >= lower && x <= upper ){
TRUE
}
else{
FALSE
}
}
entropy <- function(...){
x = c(...)
x = x / sum(x)
y = log2(x) * x
y[is.na(y)]=0
sum(y) * -1
}
entropyVec <- function(x,logFn=log2){
x = x / sum(x)
y = logFn(x) * x
y[is.na(y)]=0
sum(y) * -1
}
JS <- function(x,y,logFn=log2){
x = x / sum(x)
y = y / sum(y)
a = (x + y)/2
entropyVec(a ,logFn)- ((entropyVec(x,logFn)+entropyVec(y,logFn))/2) -> z
# entropyVec(a )- ((entropyVec(x)+entropyVec(y))/2) -> z
z}
JSsp <- function(e1,e2,logFn=log2){
1 - sqrt(JS(e1,e2,logFn))
}
calcTissSpec <- function(x,cols,logFn=log2){
x <- sapply(x,as.numeric)
profile <- diag(length(cols))
specEn <- sapply(1:length(cols),function(y)JSsp(x,profile[y,],logFn))
# cols[which(specEn == max(specEn))]
max(specEn)
#specEn
}
calcTissSpecVector <- function(x,logFn=log2){
x <- sapply(x,as.numeric)
profile <- diag(length(x))
specEn <- sapply(seq_along(x),function(y)JSsp(x,profile[y,],logFn))
# cols[which(specEn == max(specEn))]
max(specEn)
#specEn
}
#this needs to be generalized...
tissSpec <- function(...){
x <- unlist(...)
# print(x)
profile = list(c(1,0,0,0,0),c(0,1,0,0,0),c(0,0,1,0,0),c(0,0,0,1,0),c(0,0,0,0,1))
specEn = c()
for (i in 1:5){
specEn[i] = JSsp(x,profile[[i]])
}
#print(specEn)
c("HELAS3","GM12878","H1HESC","HEPG2","K562")[which(specEn== max(specEn))]
max(specEn)
}
logNormal <- function(X,c=1){
normX <- min(X) + 1
log(normX)
}
maxExpr <- function(x,cols){
ifelse( sum(x) > 0.00000000001,
cols[which(x== max(x))],
"none"
)
}
threeTimesRestSpecifity <- function(expr.vec,cols){
ifelse( sort(expr.vec,decreasing=TRUE)[1] > (3* sort(expr.vec,decreasing=TRUE)[2]),
cols[which(expr.vec == max(expr.vec))],
"none")}
nTimesRestSpecifity <- function(expr.vec,cols){
first <- sort(expr.vec,decreasing=TRUE)[1]
second <- sort(expr.vec,decreasing=TRUE)[2]
n <- Inf # case that first >0, second==0
if (first == 0 && second == 0){
n <- 0
}
if (first > 0 && second> 0){
n <- first/second
}
n
}
applyAndAppendFnDf <-function(df=df,cols=cols,FUN=fn,newCol=newCol){
df[newCol] <- apply(df[as.vector(cols)],1,function(...){x<-c(...);FUN(x)})
df
}
applyAndAppendFnDfSecondArg <-function(df=df,cols=cols,FUN=fn,newCol=newCol,arg2=arg2){
df[newCol] <- apply(df[as.vector(cols)],1,function(...){x<-c(...);FUN(x,arg2)})
df
}
#split vec
exprLabels <- function(x){
# y<-cut(x, quantile(x[which( x> 0)],(0:3)/3),labels=c("low","mid","high"))
output.vec <- rep("none",length(x))
expr.labels.wNA <- as.character(cut(x, quantile(x[which( x> 0)],(0:3)/3),labels=c("low","mid","high")))
expr.labels.index <- !is.na(expr.labels.wNA)
output.vec[expr.labels.index] <- expr.labels.wNA[expr.labels.index]
factor(output.vec)
}
#take a df, the figure out {expression, notExpressed}, where notExpress = {low, medium, high} && count(low) == count(medium) == count(high)
assignExprLabels <- function(df,cols){
labels.df <- as.data.frame(apply(df[as.vector(cols)],2,exprLabels),)
colnames(labels.df) <- as.vector(sapply(colnames(labels.df),function(x)paste(x,"ExprLabel",sep=".")))
cbind(df,labels.df)
}
#for determining the maximal transcript(by sum expression)
rankSumExpr <- function(df){
df.new <- df[order(df$sumExpr,decreasing=TRUE),]
df.new$rank <- seq(1,length(df$transcript_id) )
df.new
}
getExprComp <- function(df){
with(df,
data.frame(
exprInBoth = length(intersect(which(longPolyAexpr > 0),which(longNonPolyAexpr > 0))),
none = length(intersect(which(longPolyAexpr == 0),which(longNonPolyAexpr == 0))),
either = length(union(which(longPolyAexpr > 0),which(longNonPolyAexpr > 0))),
polyA = length(which(longPolyAexpr > 0)),
nonPolyA = length(which(longNonPolyAexpr > 0)),
polyAOnly = length(intersect(which(longPolyAexpr > 0),which(longNonPolyAexpr == 0))),
nonPolyAOnly= length(intersect(which(longPolyAexpr == 0),which(longNonPolyAexpr > 0))),
total = length(longNonPolyAexpr)))
}
appendAnalysis <- function(df,cols){
df <- applyAndAppendFnDf(df,cols,entropy,"entropyExpr")
df <- applyAndAppendFnDf(df,cols,sum,"sumExpr")
df <- applyAndAppendFnDf(df,cols,var,"varianceExpr")
df <- applyAndAppendFnDf(df,cols,mean,"averageExpr")
df <- applyAndAppendFnDf(df,cols,min,"minExpr")
df <- applyAndAppendFnDf(df,cols,max,"maxExpr")
df <- applyAndAppendFnDf(df,cols,function(x)maxExpr(x,cols),"maxExprType")
df <- applyAndAppendFnDf(df=df,cols=cols,FUN=function(x)threeTimesRestSpecifity(x,cols),"threeTimesRest")
df <- applyAndAppendFnDf(df=df,cols=cols,FUN=function(x)nTimesRestSpecifity(x,cols),"nTimesRest")
df <- applyAndAppendFnDf(df=df,cols=cols,FUN=function(x)calcTissSpec(x,cols),"tissSpec")
df <- assignExprLabels(df,cols=cols)
df$expressed = "no"
df[which(df$maxExprType != "none"),]$expressed <- "yes"
df
}
getTranscriptForMaxCols <- function(t1)cast(ldply(apply(t1,2,function(x)t1[which(x == max(x)),"transcript_id"]),function(x)x[1]), ~ .id )
applyTranscriptForMaxCols <- function(df,geneCol="gene_id")ldply(split(cd.expr.df,cd.expr.df[c(geneCol)]),getTranscriptForMaxCols)
makeTmpOutFile <- function(x,outdir="/Users/adam/Desktop/"){
if(missing(x)){
x = paste(as.vector(proc.time()[1]),"anon",sep="-")
}
paste(outdir.tmp,x,sep="")}
#if (!file.exists(outdir)){
# print("WARNING: cannot access output dir for plots")
#}
getFullGencodeExpr <- function(exprFile = "/Volumes/MyBook/Volume1/scratch/encodeRnaSeq/cshl/allCellsCombined.space",exprCols=2:33){
appendAnalysis(read.table(file=expr.file, header=TRUE, stringsAsFactors=FALSE), exprCols)}
preProcessDf<- function(df){
df <- df[which(df$sumExpr > 0),]
df$transcript_id <- df$gene_id
df$Genc_polyA <- rep(0,length(df$gene_id))
df
}
getPolyACompDf <- function(df, label,exprColIndex){
df <- preProcessDf(df)
expr.cols <- sort(colnames(df[exprColIndex]))
expr.uniq.cols <- unique(unlist(lapply(colnames(df[exprColIndex]),function(x)strsplit(x,".long"))))
expr.cells = expr.uniq.cols[grep("Poly",expr.uniq.cols,invert=TRUE)]
expr.cols.polyA <- as.vector(sapply(expr.cells,function(x)paste(x,"longPolyA",sep=".")))
expr.cols.nonPolyA <- as.vector(sapply(expr.cells,function(x)paste(x,"longNonPolyA",sep=".")))
lpa <- melt(df[c("transcript_id",expr.cols.polyA)],measure.vars=sort(expr.cols.polyA),id.vars="transcript_id")
colnames(lpa) <- c("transcript_id", "expr","longPolyAexpr")
lpa$cellType <- as.vector(sapply(sapply(as.vector(lpa$expr),function(x){strsplit(x,'\\.')[1]}),function(x)x[1]))
lpa$seqPullDown <- "longPolyA"
colnames(lpa) <- c("transcript_id", "expr", "expression" ,"cellType" ,"seqPullDown" )
lnpa <- melt(df[c("transcript_id",expr.cols.nonPolyA)],measure.vars=sort(expr.cols.nonPolyA),id.vars="transcript_id")
colnames(lnpa) <- c("transcript_id", "expr","longNonPolyAexpr")
lnpa$cellType <- as.vector(sapply(sapply(as.vector(lnpa$expr),function(x){strsplit(x,'\\.')[1]}),function(x)x[1]))
lnpa$seqPullDown <- "longNonPolyA"
colnames(lnpa) <- c("transcript_id", "expr", "expression" ,"cellType" ,"seqPullDown")
tm.lnpa <- lnpa[c("transcript_id","cellType","expression")]
colnames(tm.lnpa) <- c("transcript_id","cellType","longNonPolyA")
tm.lpa <- lpa[c("transcript_id","cellType","expression")]
colnames(tm.lpa) <- c("transcript_id","cellType","longPolyA")
comb <- merge(tm.lnpa,tm.lpa,by=c("transcript_id","cellType"))
comb <- transform(comb,sum= longNonPolyA + longPolyA)
# calcMix <- function(lnpa,lpa){
# sumIn=lnpa+lpa
# if (sumIn > 0){
# if (lnpa == 0){
# "longPolyA"
# }
# if (lpa == 0){
# "longNonPolyA"
# }
# else {
# "both"
# }
# }
# else {
# "neither"
# }
# "default"
# }
#comb <- transform(comb, expMix = function)
comb <- transform(comb,logsum= log(longNonPolyA) + log(longPolyA))
comb <- transform(comb,product=longNonPolyA * longPolyA)
comb$exprMix <- "both"
comb[intersect(intersect(which(comb$product == 0),which(comb$sum > 0)),which(comb$longNonPolyA == 0)),]$exprMix <- "lpaOnly"
comb[intersect(intersect(which(comb$product == 0),which(comb$sum > 0)),which(comb$longPolyA == 0)),]$exprMix <- "lnpaOnly"
comb[intersect(which(comb$product == 0),which(comb$sum == 0)),]$exprMix <- "none"
mc <- melt(comb,id.var=c("transcript_id","cellType","sum","logsum","exprMix"),measure.va=c("longNonPolyA","longPolyA"))
colnames(mc)<- c("transcript_id", "cellType", "sum", "logsum", "exprMix","polyApulldown", "RPKM")
mc$label <- label
mc
}
compareLncMrnaRnaSeqMix <- function(outdir="/Users/adam/work/research/researchProjects/encode/encode-manager/plots/rnaSeq-pullDownCompare",
lnc.file = "/Users/adam/work/research/researchProjects/encode/encode-manager/data/lncExprWithStats_transEachSample.tab",
cd.file = "/Users/adam/work/research/researchProjects/encode/encode-manager/data/cdExprWithStats_transEachSample.tab"){
#outdir="/Users/adam/work/research/researchProjects/encode/encode-manager/plots/rnaSeq-pullDownCompare"
makeOutFile <- function(x){paste(outdir,x,sep="/")}
#lnc.file = "/Users/adam/work/research/researchProjects/encode/encode-manager/data/lncExprWithStats_transEachSample.tab"
lnc.df<-getPolyACompDf(readInTable(lnc.file),"lncRNA",2:33)
lnc.df <- lnc.df[which(lnc.df$sum > 0),]
lnc.df$sumGroups <- cut(lnc.df$sum, breaks=quantile(lnc.df$sum,0:10/10))
#cd.file = "/Users/adam/work/research/researchProjects/encode/encode-manager/data/cdExprWithStats_transEachSample.tab"
cd.df<-getPolyACompDf(readInTable(cd.file),"mRNA",2:33)
cd.df <- cd.df[which(cd.df$sum > 0),]
cd.df$sumGroups <- cut(cd.df$sum, breaks=quantile(lnc.df$sum,0:10/10))
comb.df <- rbind(cd.df,lnc.df)
comb.df <- transform(comb.df, cellPulldown = paste(cellType,polyApulldown,sep="."))
comb.df <- transform(comb.df, transcriptTypePulldown = paste(label,polyApulldown,sep="."))
makeExprMixForComb <- function(exprMixGroup){
ggplot(comb.df[comb.df$exprMix == exprMixGroup,],aes(x=log(RPKM),color=transcriptTypePulldown,fill=transcriptTypePulldown)) +
geom_freqpoly(size=1,binwidth=0.1)+
theme_bw()+
facet_grid(sumGroups ~ .,scale="free")+
scale_color_manual(values = c("mRNA.longNonPolyA"= brewer.pal(9,"Blues")[4], "mRNA.longPolyA"= brewer.pal(9,"Blues")[8], "lncRNA.longPolyA"= brewer.pal(9,"Reds")[8],"lncRNA.longNonPolyA"= brewer.pal(9,"Reds")[5]))+
ggtitle(paste("transcript pulldown RPKM comparisons\nfacet over bin\nwhere bin=range of transcript expr sum in sample\nTranscripts: ",exprMixGroup,sep=" "))
ggsave( file=makeOutFile(paste("pullDownComp-freqPoly-sumGroups_sampleComb_scaleFree-trans",exprMixGroup,".pdf",sep="")),width=8,height=18)
ggplot(comb.df[comb.df$exprMix == exprMixGroup,],aes(x=log(RPKM),color=transcriptTypePulldown,fill=transcriptTypePulldown)) +
geom_density()+
theme_bw()+
facet_grid(sumGroups ~ .,scale="free")+
scale_color_manual(values = c("mRNA.longNonPolyA"= brewer.pal(9,"Blues")[4], "mRNA.longPolyA"= brewer.pal(9,"Blues")[8], "lncRNA.longPolyA"= brewer.pal(9,"Reds")[8],"lncRNA.longNonPolyA"= brewer.pal(9,"Reds")[5]))+
ggtitle(paste("transcript pulldown RPKM comparisons\nfacet over bin\nwhere bin=range of transcript expr sum in sample\nTranscripts: ",exprMixGroup,sep=" "))
ggsave( file=makeOutFile(paste("pullDownComp-freqPoly-sumGroups_sampleComb_scaleFree-trans",exprMixGroup,"-density.pdf",sep="")),width=8,height=18)
}
sapply(c("both","lnpaOnly","lpaOnly"),makeExprMixForComb)
makeExprMixForCombDistro <- function(exprMixGroup){
ggplot(comb.df[comb.df$exprMix == exprMixGroup,],aes(x=log(RPKM),color=transcriptTypePulldown,fill=transcriptTypePulldown)) +
geom_freqpoly(size=1,binwidth=0.1)+
theme_bw()+
scale_color_manual(values = c("mRNA.longNonPolyA"= brewer.pal(9,"Blues")[4], "mRNA.longPolyA"= brewer.pal(9,"Blues")[8], "lncRNA.longPolyA"= brewer.pal(9,"Reds")[8],"lncRNA.longNonPolyA"= brewer.pal(9,"Reds")[5]))+
ggtitle(paste("transcript pulldown RPKM comparisons\nfacet over bin\nwhere bin=range of transcript expr sum in sample\nTranscripts: ",exprMixGroup,sep=" "))
ggsave( file=makeOutFile(paste("pullDownComp-freqPoly-totalDistro-trans",exprMixGroup,".pdf",sep="")),width=7,height=7)
ggplot(comb.df[comb.df$exprMix == exprMixGroup,],aes(x=log(RPKM),color=transcriptTypePulldown,fill=transcriptTypePulldown)) +
geom_density()+
theme_bw()+
scale_color_manual(values = c("mRNA.longNonPolyA"= brewer.pal(9,"Blues")[4], "mRNA.longPolyA"= brewer.pal(9,"Blues")[8], "lncRNA.longPolyA"= brewer.pal(9,"Reds")[8],"lncRNA.longNonPolyA"= brewer.pal(9,"Reds")[5]))+
ggtitle(paste("transcript pulldown RPKM comparisons\nfacet over bin\nwhere bin=range of transcript expr sum in sample\nTranscripts: ",exprMixGroup,sep=" "))
ggsave( file=makeOutFile(paste("pullDownComp-freqPoly-totalDistro-trans",exprMixGroup,"density.pdf",sep="")),width=7,height=7)
}
sapply(c("both","lnpaOnly","lpaOnly"),makeExprMixForCombDistro)
c.df<- comb.df[which(!is.na(comb.df$sumGroups)),]
ggplot(c.df[c.df$exprMix == "both",],aes(x=sumGroups,y=log(RPKM),color=transcriptTypePulldown,fill=transcriptTypePulldown))+geom_boxplot(outlier.size=1)+theme_bw()+
theme( axis.text.x = element_text(angle=25, vjust=0.8))+
ggtitle("transcriptType-PullDown boxplot for each expression group")+
xlab("binned expression group")+
ylab("log(RPKM of transcript)")+
scale_fill_manual(values = c("mRNA.longNonPolyA"= brewer.pal(9,"Blues")[4], "mRNA.longPolyA"= brewer.pal(9,"Blues")[8], "lncRNA.longPolyA"= brewer.pal(9,"Reds")[8],"lncRNA.longNonPolyA"= brewer.pal(9,"Reds")[5]))+
scale_color_manual(values = c("mRNA.longNonPolyA"= brewer.pal(9,"Blues")[5], "mRNA.longPolyA"= brewer.pal(9,"Blues")[9], "lncRNA.longPolyA"= brewer.pal(9,"Reds")[9],"lncRNA.longNonPolyA"= brewer.pal(9,"Reds")[6]))+
ggsave( file=makeOutFile("pullDownComp-boxplot-bySumgroups.pdf"),width=10,height=7)
ggplot(c.df, aes(x=log(RPKM),fill=factor(sumGroups)))+geom_histogram()+theme_bw()+
facet_wrap(polyApulldown~label)+
ggtitle("Distribution of summation groups in transcript-type/pulldown RPKM distros\ncolor by summation group")+
xlab("log(RPKM of transcript)")+
ylab("count")
ggsave( file=makeOutFile("pullDownComp-sumGroupsForPulldown.pdf"),width=10,height=7)
ggplot(c.df, aes(x=log(RPKM),fill=factor(sumGroups)))+geom_histogram(position="fill")+theme_bw()+
facet_wrap(polyApulldown~label)+
ggtitle("Distribution of summation groups in transcript-type/pulldown RPKM distros\ncolor by summation group")+
xlab("log(RPKM of transcript)")+
ylab("count")
ggsave( file=makeOutFile("pullDownComp-sumGroupsForPulldown-fill.pdf"),width=10,height=7)
rm(c.df)
comb.df
}
compareLncMrnaRnaSeq <- function(outdir="/Users/adam/work/research/researchProjects/encode/encode-manager/plots/rnaSeq-pullDownCompare",
lnc.file = "/Users/adam/work/research/researchProjects/encode/encode-manager/data/lncExprWithStats_transEachSample.tab",
cd.file = "/Users/adam/work/research/researchProjects/encode/encode-manager/data/cdExprWithStats_transEachSample.tab"){
#
#outdir="/Users/adam/work/research/researchProjects/encode/encode-manager/plots/rnaSeq-pullDownCompare"
makeOutFile <- function(x){paste(outdir,x,sep="/")}
#lnc.file = "/Users/adam/work/research/researchProjects/encode/encode-manager/data/lncExprWithStats_transEachSample.tab"
lnc.df<-getPolyACompDf(readInTable(lnc.file),"lncRNA",2:33)
lnc.df <- lnc.df[which(lnc.df$sum > 0),]
lnc.df$sumGroups <- cut(lnc.df$sum, breaks=quantile(lnc.df$sum,0:10/10))
#cd.file = "/Users/adam/work/research/researchProjects/encode/encode-manager/data/cdExprWithStats_transEachSample.tab"
cd.df<-getPolyACompDf(readInTable(cd.file),"mRNA",2:33)
cd.df <- cd.df[which(cd.df$sum > 0),]
cd.df$sumGroups <- cut(cd.df$sum, breaks=quantile(lnc.df$sum,0:10/10))
comb.df <- rbind(cd.df,lnc.df)
ggplot(comb.df,aes(x=log(RPKM),color=polyApulldown,fill=polyApulldown)) +
geom_density(alpha=I(0.4))+
theme_bw()+
facet_grid(cellType~label)+
ggtitle("Comparison of mRNA and lncRNA RNA-seq experiments\nFaceted by cell-type and transcript-type")
ggsave(file=makeOutFile("pullDownComp-density-allBins.pdf"),width=10,height=24)
ggplot(comb.df,aes(x=log(RPKM),color=polyApulldown,fill=polyApulldown)) +
geom_freqpoly(size=0.7,binwidth=0.2)+
theme_bw()+
facet_grid(cellType~label)+
ggtitle("Comparison of mRNA and lncRNA RNA-seq experiments\nFaceted by cell-type and transcript-type")
ggsave(file=makeOutFile("pullDownComp-freqPoly-allBins.pdf"),width=10,height=24)
sapply(levels(cut(lnc.df$sum, breaks=quantile(lnc.df$sum,0:10/10))),
function(x.f){
x.factor <- gsub('\\(','',gsub('\\]','',gsub(",","-",x.f)))
x.title <- paste("Comparison of mRNA and lncRNA RNA-seq experiments\nFaceted by cell-type and transcript-type\nbin=",x.factor,sep="")
x.outFile.freq = makeOutFile(paste("pullDownComp-freqPoly-",x.factor,"=binwidth.pdf",sep=""))
x.outFile.dens = makeOutFile(paste("pullDownComp-freqPoly-",x.factor,"=binwidth.pdf",sep=""))
ggplot(comb.df,aes(x=log(RPKM),color=polyApulldown,fill=polyApulldown)) +
geom_density(alpha=I(0.4))+
theme_bw()+
facet_grid(cellType~label)+
ggtitle(x.title)
ggsave(file=x.outFile.dens,width=10,height=24)
ggplot(comb.df,aes(x=log(RPKM),color=polyApulldown,fill=polyApulldown)) +
geom_freqpoly(size=0.7,binwidth=0.2)+
theme_bw()+
facet_grid(cellType~label)+
ggtitle(x.title)
ggsave(file=x.outFile.freq,width=10,height=24)
}#end of applied function
)#end of sapply over factors...
comb.df <- transform(comb.df, cellPulldown = paste(cellType,polyApulldown,sep="."))
comb.df <- transform(comb.df, transcriptTypePulldown = paste(label,polyApulldown,sep="."))
ggplot(comb.df,aes(x=log(RPKM),color=transcriptTypePulldown,fill=transcriptTypePulldown)) +
geom_freqpoly(size=0.7,binwidth=0.2)+
theme_bw()+
facet_grid(cellType~sumGroups,scale="free")+
scale_fill_manual(values = c("blue","purple","orange","red"))+
scale_color_manual(values = c("blue","purple","orange","red"))+
ggtitle("transcript pulldown RPKM comparisons\nfacet over celltype and bin\nwhere bin=range of transcript expr sum in sample")
ggsave(file=makeOutFile("pullDownComp-freqPoly-cellTypeVsumGroups.pdf"),width=24,height=24)
ggplot(comb.df,aes(x=log(RPKM),color=transcriptTypePulldown,fill=transcriptTypePulldown)) +
geom_freqpoly(size=0.7,binwidth=0.2)+
theme_bw()+
facet_grid(cellType~sumGroups,scale="free_y")+
scale_fill_manual(values = c(" mRNA.longNonPolyA"="black", "mRNA.longPolyA"="grey", "lncRNA.longPolyA"="blue","lncRNA.longNonPolyA"="red"))+
ggtitle("transcript pulldown RPKM comparisons\nfacet over celltype and bin\nwhere bin=range of transcript expr sum in sample")
ggsave(file=makeOutFile("pullDownComp-freqPoly-cellTypeVsumGroups_free.pdf"),width=24,height=24)
low.comb.df <- comb.df[which(as.numeric(comb.df$sumGroups) %in% 1:4),]
ggplot(low.comb.df,aes(x=log(RPKM),color=transcriptTypePulldown,fill=transcriptTypePulldown)) +
geom_freqpoly(size=1,binwidth=0.3)+
theme_bw()+
facet_grid(cellType~sumGroups,scale="free_y")+
scale_color_manual(values = c("mRNA.longNonPolyA"= brewer.pal(9,"Blues")[4], "mRNA.longPolyA"= brewer.pal(9,"Blues")[8], "lncRNA.longPolyA"= brewer.pal(9,"Reds")[8],"lncRNA.longNonPolyA"= brewer.pal(9,"Reds")[5]))+
ggtitle("transcript pulldown RPKM comparisons\nfacet over celltype and bin\nwhere bin=range of transcript expr sum in sample")
ggsave(file=makeOutFile("pullDownComp-freqPoly-cellTypeVsumGroups_low.pdf"),width=24,height=24)
high.comb.df <- comb.df[which(as.numeric(comb.df$sumGroups) %in% 5:10),]
ggplot(high.comb.df,aes(x=log(RPKM),color=transcriptTypePulldown,fill=transcriptTypePulldown)) +
geom_freqpoly(size=0.7,binwidth=0.1)+
theme_bw()+
facet_grid(cellType~sumGroups,scale="free_y")+
scale_color_manual(values = c("mRNA.longNonPolyA"= brewer.pal(9,"Blues")[4], "mRNA.longPolyA"= brewer.pal(9,"Blues")[8], "lncRNA.longPolyA"= brewer.pal(9,"Reds")[7],"lncRNA.longNonPolyA"= brewer.pal(9,"Reds")[4]))+
ggtitle("transcript pulldown RPKM comparisons\nfacet over celltype and bin\nwhere bin=range of transcript expr sum in sample")
ggsave(file=makeOutFile("pullDownComp-freqPoly-cellTypeVsumGroups_high.pdf"),width=24,height=24)
ggplot(as.data.frame(with(comb.df[which(comb.df$RPKM > 0),],table(sumGroups,transcriptTypePulldown))),aes(x=sumGroups,y=Freq,color=transcriptTypePulldown,fill=transcriptTypePulldown))+
geom_histogram()+
theme_bw()+coord_flip()+
scale_color_manual(values = c("mRNA.longNonPolyA"= brewer.pal(9,"Blues")[4], "mRNA.longPolyA"= brewer.pal(9,"Blues")[8], "lncRNA.longPolyA"= brewer.pal(9,"Reds")[7],"lncRNA.longNonPolyA"= brewer.pal(9,"Reds")[4]))+
scale_fill_manual(values = c("mRNA.longNonPolyA"= brewer.pal(9,"Blues")[3], "mRNA.longPolyA"= brewer.pal(9,"Blues")[7], "lncRNA.longPolyA"= brewer.pal(9,"Reds")[6],"lncRNA.longNonPolyA"= brewer.pal(9,"Reds")[3]))+
ggtitle("transcript pulldown RPKM counts\nfor each sample by RPKM bin\nOnly transcripts w/ RPKM > 0 counted")
ggsave(file=makeOutFile("pullDownComp-histogram-forSamples.pdf"),width=7,height=7)
ggplot(comb.df[which(!is.na(comb.df$sumGroups)),],aes(x=log(RPKM),color=transcriptTypePulldown,fill=transcriptTypePulldown)) +
geom_freqpoly(size=1,binwidth=0.1)+
theme_bw()+
facet_grid(sumGroups ~ .,scale="free_y")+
scale_color_manual(values = c("mRNA.longNonPolyA"= brewer.pal(9,"Blues")[4], "mRNA.longPolyA"= brewer.pal(9,"Blues")[8], "lncRNA.longPolyA"= brewer.pal(9,"Reds")[8],"lncRNA.longNonPolyA"= brewer.pal(9,"Reds")[5]))+
ggtitle("transcript pulldown RPKM comparisons\nfacet over bin\nwhere bin=range of transcript expr sum in sample")
ggsave(file=makeOutFile("pullDownComp-freqPoly-sumGroups_sampleComb.pdf"),width=8,height=18)
ggplot(comb.df[which(!is.na(comb.df$sumGroups)),],aes(x=log(RPKM),color=transcriptTypePulldown,fill=transcriptTypePulldown)) +
geom_freqpoly(size=1,binwidth=0.1)+
theme_bw()+
facet_grid(sumGroups ~ .,scale="free")+
scale_color_manual(values = c("mRNA.longNonPolyA"= brewer.pal(9,"Blues")[4], "mRNA.longPolyA"= brewer.pal(9,"Blues")[8], "lncRNA.longPolyA"= brewer.pal(9,"Reds")[8],"lncRNA.longNonPolyA"= brewer.pal(9,"Reds")[5]))+
ggtitle("transcript pulldown RPKM comparisons\nfacet over bin\nwhere bin=range of transcript expr sum in sample")
ggsave(file=makeOutFile("pullDownComp-freqPoly-sumGroups_sampleComb_scaleFree.pdf"),width=8,height=18)
comb.npa.df <- comb.df[which(comb.df$polyApulldown == "longNonPolyA"),]
comb.npa.df$npa <- comb.npa.df$RPKM
comb.np.df <- comb.df[which(comb.df$polyApulldown == "longPolyA"),]
comb.npa.df$pa <- comb.pa.df$RPKM
ggplot(comb.npa.df,aes(x=log(pa),y=log(npa),color=label))+geom_point(size=1)+theme_bw()+facet_grid(cellType ~ sumGroups);
ggsave(file=makeOutFile("comb-scatterplot.pdf"),height=24,width=24)
ggplot(comb.df,aes(x=log(sum),y=log(RPKM),color=polyApulldown))+geom_point(size=1)+facet_wrap(~label,ncol=2)+ theme_bw()
ggsave(file=makeOutFile("comb-scatterplot-sum-vs-components.pdf"),height=7,width=14)
ggplot(comb.df,aes(x=sum,y=log(RPKM),color=polyApulldown))+stat_smooth(size=1)+facet_wrap(~label,ncol=2)+ theme_bw()
ggsave(file=makeOutFile("comb-scatterplot-sum-vs-components-qauntile.pdf"),height=7,width=14)
}
runAllTransPolyA_Analysis <- function(){
lf <- "/Users/adam/work/research/researchProjects/encode/encode-manager/data/lncExprWithStats_allTrans.tab"
cd <- "/Users/adam/work/research/researchProjects/encode/encode-manager/data/cdExprWithStats_allTrans.tab"
od <- "/Users/adam/work/research/researchProjects/encode/encode-manager/plots/rnaSeq-pullDownCompare_allTrans"
compareLncMrnaRnaSeq(outdir=od,lnc.file=lf,cd.file=cd)
compareLncMrnaRnaSeqMix(outdir=od,lnc.file=lf,cd.file=cd)
}
getAnnotLncDf <- function(lncDf,annotDf,exprCol,annotColName){
lncDf$withinSubset = "false"
lncDf[which(lncDf$gene_id_short %in% annotDf$gene_id_short),]$withinSubset = "true"
#apply annotDf information to lncDf
ddply(annotDf,.(gene_id),function(x)x[c("gene_id","lncRnaName")]) -> ensLnc.df
getLncName <- function(x){ensLnc.df[which(ensLnc.df$gene_id == x ), "lncRnaName"][1]}
lncDf[c(annotColName)] <- "1"
lncDf[c(annotColName)] <- apply(lncDf,1,function(x)getLncName(x[c("gene_id_short")]))
lncDf[is.na(lncDf[c(annotColName)]),annotColName] <- "notFound"
lncDf
}
makeDir <- function(dir,recursiveCreate=FALSE){
if (!file.exists(dir)){
dir.create(path=dir,showWarnings=TRUE,recursive=recursiveCreate,mode="0755")
}
}
getLpaColnames <- function(df,colnamesDf=colnames(df)){
doubleCols = colnamesDf[as.vector(sapply(colnamesDf,function(x)(typeof(df[1,x]) == "double")))]
doubleCols[grep("longPolyA$",doubleCols)]
}
getLnpaColnames <- function(colnamesDf){
doubleCols = colnamesDf[as.vector(sapply(colnamesDf,function(x)(typeof(df[1,x]) == "double")))]
doubleCols[grep("longNonPolyA$",doubleCols)]
}
editStatsForLncDf <- function(expr.df, cols){
col.names <- colnames(expr.df)[cols]
expr.df <- applyAndAppendFnDf(expr.df,cols,entropy,"entropyExpr")
expr.df <- applyAndAppendFnDf(expr.df,cols,sum,"sumExpr")
expr.df <- applyAndAppendFnDf(expr.df,cols,var,"varianceExpr")
expr.df <- applyAndAppendFnDf(expr.df,cols,mean,"averageExpr")
expr.df <- applyAndAppendFnDf(expr.df,cols,min,"minExpr")
expr.df <- applyAndAppendFnDf(expr.df,cols,max,"maxExpr")
expr.df <- applyAndAppendFnDf(df=expr.df,cols,function(x)maxExpr(x,col.names),"maxExprType")
expr.df <- applyAndAppendFnDf(df=expr.df,cols=cols,FUN=function(x)threeTimesRestSpecifity(x,col.names),"threeTimesRest")
expr.df <- applyAndAppendFnDf(df=expr.df,cols=cols,FUN=function(x)nTimesRestSpecifity(x,cols),"nTimesRest")
expr.df <- applyAndAppendFnDf(df=expr.df,cols=cols,FUN=function(x)calcTissSpec(x,cols),"tissSpec")
expr.df
}
xapplyAndAppendFnDf <-function(df=df,cols=cols,FUN=fn,newCol=newCol){
df[newCol] <- apply(df[as.vector(cols)],1,function(...){x<-c(...);FUN(x)})
df
}
xapplyAndAppendFnDfSecondArg <-function(df=df,cols=cols,FUN=fn,newCol=newCol,arg2=arg2){
df[newCol] <- apply(df[as.vector(cols)],1,function(...){x<-c(...);FUN(x,arg2)})
df
}
getMemory <- function(){
gettextf("%.2f Mb stored in memory",
sum(sapply(unlist(ls(envir=.GlobalEnv)),
function(x)object.size(get(x,envir=.GlobalEnv))))
/ (1000000))
}
getMemory()
|
library(glmmTMB)
### Name: profile.glmmTMB
### Title: Compute likelihood profiles for a fitted model
### Aliases: profile.glmmTMB confint.profile.glmmTMB
### ** Examples
## Not run:
##D m1 <- glmmTMB(count~ mined + (1|site),
##D zi=~mined, family=poisson, data=Salamanders)
##D salamander_prof1 <- profile(m1, parallel="multicore",
##D ncpus=2, trace=1)
##D ## testing
##D salamander_prof1 <- profile(m1, trace=1,parm=1)
##D salamander_prof1M <- profile(m1, trace=1,parm=1, npts = 4)
##D salamander_prof2 <- profile(m1, parm="theta_")
##D
## End(Not run)
salamander_prof1 <- readRDS(system.file("example_files","salamander_prof1.rds",package="glmmTMB"))
if (require("ggplot2")) {
ggplot(salamander_prof1,aes(.focal,sqrt(value))) +
geom_point() + geom_line()+
facet_wrap(~.par,scale="free_x")+
geom_hline(yintercept=1.96,linetype=2)
}
salamander_prof1 <- readRDS(system.file("example_files","salamander_prof1.rds",package="glmmTMB"))
confint(salamander_prof1)
confint(salamander_prof1,level=0.99)
| /data/genthat_extracted_code/glmmTMB/examples/profile.glmmTMB.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,065 | r | library(glmmTMB)
### Name: profile.glmmTMB
### Title: Compute likelihood profiles for a fitted model
### Aliases: profile.glmmTMB confint.profile.glmmTMB
### ** Examples
## Not run:
##D m1 <- glmmTMB(count~ mined + (1|site),
##D zi=~mined, family=poisson, data=Salamanders)
##D salamander_prof1 <- profile(m1, parallel="multicore",
##D ncpus=2, trace=1)
##D ## testing
##D salamander_prof1 <- profile(m1, trace=1,parm=1)
##D salamander_prof1M <- profile(m1, trace=1,parm=1, npts = 4)
##D salamander_prof2 <- profile(m1, parm="theta_")
##D
## End(Not run)
salamander_prof1 <- readRDS(system.file("example_files","salamander_prof1.rds",package="glmmTMB"))
if (require("ggplot2")) {
ggplot(salamander_prof1,aes(.focal,sqrt(value))) +
geom_point() + geom_line()+
facet_wrap(~.par,scale="free_x")+
geom_hline(yintercept=1.96,linetype=2)
}
salamander_prof1 <- readRDS(system.file("example_files","salamander_prof1.rds",package="glmmTMB"))
confint(salamander_prof1)
confint(salamander_prof1,level=0.99)
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 1.35807730621777e-312, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615781259-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 348 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 1.35807730621777e-312, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
#!/usr/bin/R
# -----------------------------------------------------
# Plot modules from CHMM/other clustering with GWAS
# 1. Plot the centers matrix with gwas
# 2. Plot stats on gwas - amt of enrichment/depletion
# 3. Associate gwas with cell types by enrichment
# 4. Plot and order/cluster the adjacency matrix
# for the epigenomes by gwas co-enrichment
# -----------------------------------------------------
domain = system("hostname -d", intern=TRUE)
if (domain == 'broadinstitute.org'){
bindir='~/data/EPIMAP_ANALYSIS/bin/'
} else {
bindir='~/EPIMAP_ANALYSIS/bin/'
}
source(paste0(bindir, 'general_EPIMAP_ANALYSIS.R'))
source(paste0(bindir, 'auxiliary_chromImpute_functions.R'))
source(paste0(bindir, 'load_metadata.R'))
today <- format(Sys.time(), "%m%d%y")
library(ggplot2)
library(viridis)
library(tidyr)
library(dplyr)
# Defaults:
gwasfile = 'observed_aux_18_on_mixed_impobs_QCUT_PROM_bin_on_mixed_impobs_5000_enrich.tsv'
# gwasfile = 'observed_aux_18_on_mixed_impobs_QCUT_ENH_bin_on_mixed_impobs_5000_enrich.tsv'
# gwasfile = 'observed_aux_18_on_mixed_impobs_QCUT_ENH_bin_on_mixed_impobs_1000_enrich.tsv'
# gwasfile = 'observed_aux_18_on_mixed_impobs_QCUT_ENH_bin_on_mixed_impobs_0_enrich.tsv'
# filepref = 'cls_merge2_wH3K27ac100_300'
#tagline = 'ChromHMM Enhancers'
filepref = 'prom_cls/cls_merge2_wH3K27ac100_300'
tagline = 'ChromHMM Promoters'
extension = 5000
imgdir = paste0(img, "clusters/")
# Arguments:
args=(commandArgs(TRUE))
if (length(args)==0) {
stop("No arguments supplied: Need clusters filename")
} else {
gwasfile = args[1]
filepref = args[2]
tagline = args[3]
extension = args[4]
if (length(args) > 4){
imgdir = args[5]
}
}
# -------------------------
# Load in and process data:
# -------------------------
commandArgs <- function(trailingOnly=TRUE){
c(filepref, tagline, imgdir) }
source(paste0(bindir, 'load_modules_data.R'))
# Prefix:
cmd = paste0('mkdir -p ', imgdir)
system(cmd)
# imgpref = paste0(imgdir, 'clusters_gwas_', filepref, '_e', extension, '_')
imgpref = paste0(imgdir, 'clusters_gwas_', sub("/","_", filepref), '_e', extension, '_')
# --------------------------------
# Add gwas enrichment information:
# --------------------------------
gwdf = read.delim(gwasfile, header=F)
names(gwdf) = c('pvalue','cluster','pmid','trait',
'counthit','countall','fold')
gwdf$set = tagline
gwdf$pmt = paste0(gwdf$pmid, '_', gwdf$trait)
gwdf$cls = paste0('c', gwdf$cluster)
gwdf$logpval = -log10(gwdf$pvalue)
gwlong = aggregate(logpval ~ cls + pmt, gwdf, max)
wide = spread(gwlong, pmt, logpval, fill=0)
gwmat = as.matrix(wide[,-1])
rownames(gwmat) = wide$cls
gwmat[gwmat < 1] = 0
keep.cls = list()
keep.cls[[tagline]] = rownames(gwmat)
# Choose # gwas to show:
SHOWGWAS=300
zmax = 5
zmin=0.5
# gwasmarg = sort(apply(gwmat, 2, sum), decreasing=T)
gwasmarg = sort(apply(gwmat, 2, max), decreasing=T)
keep.studies = names(head(gwasmarg, SHOWGWAS))
# Order the top studies:
r2 = reord(t(gwmat[, keep.studies]) > zmin, 'Jaccard')
studyord = rownames(r2)
r3 = reord(gwmat[, keep.studies] > zmin, 'eJaccard')
roword = rownames(r3)
finalmat = gwmat[roword, studyord]
# Threshold for plotting:
# Plot diagonalized -> first order by cluster (rows) -> diag
gmat = finalmat
plot.gwas <- function(gwas, set, ordll, zmin=1, zmax=5, gwasord=NULL, labeled=TRUE, calc.only=FALSE){
# GWAS PLOT HERE:
ord = ordll[[1]]
vbreaks = ordll[[2]]
clscuts = ordll[[3]]
subord = ord[ord %in% rownames(gwas)]
gmat = gwas[subord,]
if (!is.null(gwasord)){ gmat = gmat[,gwasord] }
# Diagonalize, ignoring lower than cutoff
gmat[gmat < zmin] <- 0
ll = diag.mat(gmat)
gmat = ll[[1]]
cto = ll[[3]] # For breaks
vcut = c(cto[cto ==0], clscuts[cto])
hbreaks = calc.breaks.acut(vcut)
# Threshold for plotting:
gmat[gmat > zmax] <- zmax
if (!calc.only){
image(gmat, axes=FALSE,col=colred, zlim=c(zmin, zmax))
abline(h=par()$usr[3:4],lty=1,lw=0.5)
abline(v=par()$usr[1:2],lty=1,lw=0.5)
abline(v=vbreaks,lty='dotted',lw=1, col='darkgrey')
abline(h=hbreaks,lty='dotted',lw=1, col='darkgrey')
}
return(list(colnames(gmat), vcut))
}
# Set groupdist parameters
rnorder = rngroup # ord.gwas
acutnamed = acutgroup.nam
set = tagline
lablist = lablist.group
dist.order = plot.centers(centlist, set, rnorder, counts=counts, cls=acutnamed, subset=TRUE, calc.only=TRUE)
gll = plot.gwas(finalmat, set, dist.order, gwasord=studyord, calc.only=TRUE)
gwnam = gll[[1]]
gcut = gll[[2]]
# Make plot:
png(paste0(imgpref,'top', SHOWGWAS,'_groupord.png'),res=450,units='in',width=12,height=17)
ratio = 1
layout(matrix(c(1:12),4,3), heights=c(.5,8,8 * ratio, 1), widths=c(1.25,8,.5), TRUE)
par(xpd=FALSE)
par(yaxs="i")
par(xaxs="i")
dist.order = list()
# Metadata matrix:
par(mar=c(0.25, 6, 0, 0))
image(t(as.matrix(metamat) == ""), col='white', axes=F)
metaclass = sapply(rev(colnames(metamat)), capitalize)
text(x=seq(0,1, length.out=ncol(metamat)),
y=par()$usr[3]-0.01*(par()$usr[4]-par()$usr[3]),
labels=metaclass, srt=90, adj=0, xpd=TRUE, cex=.7)
par(mar=c(.25, 6, 0, 0))
meta.image(metamat[rnorder,5:1], colvals=colvals, cex=0, horiz=T)
abline(h=par()$usr[3:4],lty=1,lw=0.5)
abline(v=par()$usr[1:2],lty=1,lw=0.5)
text(y=lablist[[1]],
x=par()$usr[1]-0.1*(par()$usr[2]-par()$usr[1]),
labels=lablist[[2]], srt=0, adj=1, xpd=TRUE,cex=.8, col=lablist[[3]])
abline(h=dist.breaks,lty='dotted', lw=1, col='darkgrey')
# GWAS labels:
par(mar=c(0.25, 6, 0, 0))
par(xpd=NA)
image(gwmat[,gwnam], col='white', axes=F)
text(y=seq(0,1, length.out=length(gwnam)),
x=par()$usr[2]-0.001*(par()$usr[2]-par()$usr[1]),
labels=gwnam, srt=0, adj=1, xpd=TRUE,cex=.25)
par(xpd=FALSE)
# Add labels
plot.new()
par(mar=c(0.25, 0.25, 2, 0.25))
dist.order = plot.centers(centlist, set, rnorder, counts=counts, cls=acutnamed, calc.only=TRUE, subset=TRUE)
clsord = dist.order[[1]]
meta.image(enrichmat[clsord,5:1], colvals=colvals, cex=0, horiz=F)
abline(h=par()$usr[3:4],lty=1,lw=0.5)
abline(v=par()$usr[1:2],lty=1,lw=0.5)
vbreaks = dist.order[[set]][[2]]
abline(v=vbreaks,lty='dotted',lw=1, col='darkgrey')
mtext(set, side=3, cex=1.3)
# Plot clusters and counts
set = tagline
par(mar=c(0.25, 0.25, 0, 0.25))
dist.order[[set]] = plot.centers(centlist, set, rnorder, counts=counts, cls=acutnamed, title=FALSE, subset=TRUE)
abline(h=dist.breaks,lty='dotted', lw=1, col='darkgrey')
# Add rectangles to centers:
rll = calc.breaks.rect(hcls=acutnamed, vcls=dist.order[[set]][[3]], colset)
rectdf = rll[[1]]
vccols = rll[[2]]
# Add grey - ubq rectangle:
rectdf = rbind(c(x1=par()$usr[1], x2=rectdf$x1[1],
y1=par()$usr[3], y2=par()$usr[4]), rectdf)
vccols = c('grey',vccols)
rect(xleft=rectdf$x1, xright=rectdf$x2,
ybottom=rectdf$y1, ytop=rectdf$y2,
border=vccols, lwd=1)
par(xpd=NA)
rect(xleft=rectdf$x1, xright=rectdf$x2, ybottom=par()$usr[3],
ytop=par()$usr[3]-0.004*(par()$usr[4]-par()$usr[3]),
border='white', col=vccols, lwd=.25)
par(xpd=FALSE)
# GWAS plot
par(mar=c(0.25, 0.25, 0, 0.25))
gll = plot.gwas(finalmat, set, dist.order[[set]], gwasord=studyord)
gwnam = gll[[1]]
gcut = gll[[2]]
rll = calc.breaks.rect(hcls=gcut, vcls=dist.order[[set]][[3]], colset)
rectdf = rll[[1]]
vccols = rll[[2]]
# Add grey - ubq rectangle:
rectdf = rbind(c(x1=par()$usr[1], x2=rectdf$x1[1],
y1=par()$usr[3], y2=par()$usr[4]), rectdf)
vccols = c('grey',vccols)
rect(xleft=rectdf$x1, xright=rectdf$x2,
ybottom=rectdf$y1, ytop=rectdf$y2,
border=vccols, lwd=1)
par(xpd=NA)
rect(ybottom=rectdf$y1, ytop=rectdf$y2, xright=par()$usr[1],
xleft=par()$usr[1]-0.004*(par()$usr[2]-par()$usr[1]),
border='white', col=vccols, lwd=.25)
rect(xleft=rectdf$x1, xright=rectdf$x2, ybottom=par()$usr[3],
ytop=par()$usr[3]-0.004*(par()$usr[4]-par()$usr[3]),
border='white', col=vccols, lwd=.25)
par(xpd=FALSE)
par(mar=c(0.25, 0.25, 0, 0.25))
plot.counts(counts, set, dist.order[[set]])
plot.new()
# Availability:
par(mar=c(.25, 0, 0, 0.25))
avail = as.matrix(wm[main.marks, rnorder])
image(avail, axes=F, col=c('white', 'darkgrey'))
abline(h=par()$usr[3:4],lty=1,lw=0.5)
abline(v=par()$usr[1:2],lty=1,lw=0.5)
par(mar=c(.25, 0, 0, 0.25))
image(avail, axes=F, col='white')
text(x=seq(0,1, length.out=length(main.marks)),
y=par()$usr[4],
labels=main.marks, srt=90, adj=1, xpd=TRUE,cex=.5)
dev.off()
| /plot_modules_with_gwas.R | no_license | hzaurzli/EPIMAP_ANALYSIS | R | false | false | 8,448 | r | #!/usr/bin/R
# -----------------------------------------------------
# Plot modules from CHMM/other clustering with GWAS
# 1. Plot the centers matrix with gwas
# 2. Plot stats on gwas - amt of enrichment/depletion
# 3. Associate gwas with cell types by enrichment
# 4. Plot and order/cluster the adjacency matrix
# for the epigenomes by gwas co-enrichment
# -----------------------------------------------------
domain = system("hostname -d", intern=TRUE)
if (domain == 'broadinstitute.org'){
bindir='~/data/EPIMAP_ANALYSIS/bin/'
} else {
bindir='~/EPIMAP_ANALYSIS/bin/'
}
source(paste0(bindir, 'general_EPIMAP_ANALYSIS.R'))
source(paste0(bindir, 'auxiliary_chromImpute_functions.R'))
source(paste0(bindir, 'load_metadata.R'))
today <- format(Sys.time(), "%m%d%y")
library(ggplot2)
library(viridis)
library(tidyr)
library(dplyr)
# Defaults:
gwasfile = 'observed_aux_18_on_mixed_impobs_QCUT_PROM_bin_on_mixed_impobs_5000_enrich.tsv'
# gwasfile = 'observed_aux_18_on_mixed_impobs_QCUT_ENH_bin_on_mixed_impobs_5000_enrich.tsv'
# gwasfile = 'observed_aux_18_on_mixed_impobs_QCUT_ENH_bin_on_mixed_impobs_1000_enrich.tsv'
# gwasfile = 'observed_aux_18_on_mixed_impobs_QCUT_ENH_bin_on_mixed_impobs_0_enrich.tsv'
# filepref = 'cls_merge2_wH3K27ac100_300'
#tagline = 'ChromHMM Enhancers'
filepref = 'prom_cls/cls_merge2_wH3K27ac100_300'
tagline = 'ChromHMM Promoters'
extension = 5000
imgdir = paste0(img, "clusters/")
# Arguments:
args=(commandArgs(TRUE))
if (length(args)==0) {
stop("No arguments supplied: Need clusters filename")
} else {
gwasfile = args[1]
filepref = args[2]
tagline = args[3]
extension = args[4]
if (length(args) > 4){
imgdir = args[5]
}
}
# -------------------------
# Load in and process data:
# -------------------------
commandArgs <- function(trailingOnly=TRUE){
c(filepref, tagline, imgdir) }
source(paste0(bindir, 'load_modules_data.R'))
# Prefix:
cmd = paste0('mkdir -p ', imgdir)
system(cmd)
# imgpref = paste0(imgdir, 'clusters_gwas_', filepref, '_e', extension, '_')
imgpref = paste0(imgdir, 'clusters_gwas_', sub("/","_", filepref), '_e', extension, '_')
# --------------------------------
# Add gwas enrichment information:
# --------------------------------
gwdf = read.delim(gwasfile, header=F)
names(gwdf) = c('pvalue','cluster','pmid','trait',
'counthit','countall','fold')
gwdf$set = tagline
gwdf$pmt = paste0(gwdf$pmid, '_', gwdf$trait)
gwdf$cls = paste0('c', gwdf$cluster)
gwdf$logpval = -log10(gwdf$pvalue)
gwlong = aggregate(logpval ~ cls + pmt, gwdf, max)
wide = spread(gwlong, pmt, logpval, fill=0)
gwmat = as.matrix(wide[,-1])
rownames(gwmat) = wide$cls
gwmat[gwmat < 1] = 0
keep.cls = list()
keep.cls[[tagline]] = rownames(gwmat)
# Choose # gwas to show:
SHOWGWAS=300
zmax = 5
zmin=0.5
# gwasmarg = sort(apply(gwmat, 2, sum), decreasing=T)
gwasmarg = sort(apply(gwmat, 2, max), decreasing=T)
keep.studies = names(head(gwasmarg, SHOWGWAS))
# Order the top studies:
r2 = reord(t(gwmat[, keep.studies]) > zmin, 'Jaccard')
studyord = rownames(r2)
r3 = reord(gwmat[, keep.studies] > zmin, 'eJaccard')
roword = rownames(r3)
finalmat = gwmat[roword, studyord]
# Threshold for plotting:
# Plot diagonalized -> first order by cluster (rows) -> diag
gmat = finalmat
plot.gwas <- function(gwas, set, ordll, zmin=1, zmax=5, gwasord=NULL, labeled=TRUE, calc.only=FALSE){
# GWAS PLOT HERE:
ord = ordll[[1]]
vbreaks = ordll[[2]]
clscuts = ordll[[3]]
subord = ord[ord %in% rownames(gwas)]
gmat = gwas[subord,]
if (!is.null(gwasord)){ gmat = gmat[,gwasord] }
# Diagonalize, ignoring lower than cutoff
gmat[gmat < zmin] <- 0
ll = diag.mat(gmat)
gmat = ll[[1]]
cto = ll[[3]] # For breaks
vcut = c(cto[cto ==0], clscuts[cto])
hbreaks = calc.breaks.acut(vcut)
# Threshold for plotting:
gmat[gmat > zmax] <- zmax
if (!calc.only){
image(gmat, axes=FALSE,col=colred, zlim=c(zmin, zmax))
abline(h=par()$usr[3:4],lty=1,lw=0.5)
abline(v=par()$usr[1:2],lty=1,lw=0.5)
abline(v=vbreaks,lty='dotted',lw=1, col='darkgrey')
abline(h=hbreaks,lty='dotted',lw=1, col='darkgrey')
}
return(list(colnames(gmat), vcut))
}
# Set groupdist parameters
rnorder = rngroup # ord.gwas
acutnamed = acutgroup.nam
set = tagline
lablist = lablist.group
dist.order = plot.centers(centlist, set, rnorder, counts=counts, cls=acutnamed, subset=TRUE, calc.only=TRUE)
gll = plot.gwas(finalmat, set, dist.order, gwasord=studyord, calc.only=TRUE)
gwnam = gll[[1]]
gcut = gll[[2]]
# Make plot:
png(paste0(imgpref,'top', SHOWGWAS,'_groupord.png'),res=450,units='in',width=12,height=17)
ratio = 1
layout(matrix(c(1:12),4,3), heights=c(.5,8,8 * ratio, 1), widths=c(1.25,8,.5), TRUE)
par(xpd=FALSE)
par(yaxs="i")
par(xaxs="i")
dist.order = list()
# Metadata matrix:
par(mar=c(0.25, 6, 0, 0))
image(t(as.matrix(metamat) == ""), col='white', axes=F)
metaclass = sapply(rev(colnames(metamat)), capitalize)
text(x=seq(0,1, length.out=ncol(metamat)),
y=par()$usr[3]-0.01*(par()$usr[4]-par()$usr[3]),
labels=metaclass, srt=90, adj=0, xpd=TRUE, cex=.7)
par(mar=c(.25, 6, 0, 0))
meta.image(metamat[rnorder,5:1], colvals=colvals, cex=0, horiz=T)
abline(h=par()$usr[3:4],lty=1,lw=0.5)
abline(v=par()$usr[1:2],lty=1,lw=0.5)
text(y=lablist[[1]],
x=par()$usr[1]-0.1*(par()$usr[2]-par()$usr[1]),
labels=lablist[[2]], srt=0, adj=1, xpd=TRUE,cex=.8, col=lablist[[3]])
abline(h=dist.breaks,lty='dotted', lw=1, col='darkgrey')
# GWAS labels:
par(mar=c(0.25, 6, 0, 0))
par(xpd=NA)
image(gwmat[,gwnam], col='white', axes=F)
text(y=seq(0,1, length.out=length(gwnam)),
x=par()$usr[2]-0.001*(par()$usr[2]-par()$usr[1]),
labels=gwnam, srt=0, adj=1, xpd=TRUE,cex=.25)
par(xpd=FALSE)
# Add labels
plot.new()
par(mar=c(0.25, 0.25, 2, 0.25))
dist.order = plot.centers(centlist, set, rnorder, counts=counts, cls=acutnamed, calc.only=TRUE, subset=TRUE)
clsord = dist.order[[1]]
meta.image(enrichmat[clsord,5:1], colvals=colvals, cex=0, horiz=F)
abline(h=par()$usr[3:4],lty=1,lw=0.5)
abline(v=par()$usr[1:2],lty=1,lw=0.5)
vbreaks = dist.order[[set]][[2]]
abline(v=vbreaks,lty='dotted',lw=1, col='darkgrey')
mtext(set, side=3, cex=1.3)
# Plot clusters and counts
set = tagline
par(mar=c(0.25, 0.25, 0, 0.25))
dist.order[[set]] = plot.centers(centlist, set, rnorder, counts=counts, cls=acutnamed, title=FALSE, subset=TRUE)
abline(h=dist.breaks,lty='dotted', lw=1, col='darkgrey')
# Add rectangles to centers:
rll = calc.breaks.rect(hcls=acutnamed, vcls=dist.order[[set]][[3]], colset)
rectdf = rll[[1]]
vccols = rll[[2]]
# Add grey - ubq rectangle:
rectdf = rbind(c(x1=par()$usr[1], x2=rectdf$x1[1],
y1=par()$usr[3], y2=par()$usr[4]), rectdf)
vccols = c('grey',vccols)
rect(xleft=rectdf$x1, xright=rectdf$x2,
ybottom=rectdf$y1, ytop=rectdf$y2,
border=vccols, lwd=1)
par(xpd=NA)
rect(xleft=rectdf$x1, xright=rectdf$x2, ybottom=par()$usr[3],
ytop=par()$usr[3]-0.004*(par()$usr[4]-par()$usr[3]),
border='white', col=vccols, lwd=.25)
par(xpd=FALSE)
# GWAS plot
par(mar=c(0.25, 0.25, 0, 0.25))
gll = plot.gwas(finalmat, set, dist.order[[set]], gwasord=studyord)
gwnam = gll[[1]]
gcut = gll[[2]]
rll = calc.breaks.rect(hcls=gcut, vcls=dist.order[[set]][[3]], colset)
rectdf = rll[[1]]
vccols = rll[[2]]
# Add grey - ubq rectangle:
rectdf = rbind(c(x1=par()$usr[1], x2=rectdf$x1[1],
y1=par()$usr[3], y2=par()$usr[4]), rectdf)
vccols = c('grey',vccols)
rect(xleft=rectdf$x1, xright=rectdf$x2,
ybottom=rectdf$y1, ytop=rectdf$y2,
border=vccols, lwd=1)
par(xpd=NA)
rect(ybottom=rectdf$y1, ytop=rectdf$y2, xright=par()$usr[1],
xleft=par()$usr[1]-0.004*(par()$usr[2]-par()$usr[1]),
border='white', col=vccols, lwd=.25)
rect(xleft=rectdf$x1, xright=rectdf$x2, ybottom=par()$usr[3],
ytop=par()$usr[3]-0.004*(par()$usr[4]-par()$usr[3]),
border='white', col=vccols, lwd=.25)
par(xpd=FALSE)
par(mar=c(0.25, 0.25, 0, 0.25))
plot.counts(counts, set, dist.order[[set]])
plot.new()
# Availability:
par(mar=c(.25, 0, 0, 0.25))
avail = as.matrix(wm[main.marks, rnorder])
image(avail, axes=F, col=c('white', 'darkgrey'))
abline(h=par()$usr[3:4],lty=1,lw=0.5)
abline(v=par()$usr[1:2],lty=1,lw=0.5)
par(mar=c(.25, 0, 0, 0.25))
image(avail, axes=F, col='white')
text(x=seq(0,1, length.out=length(main.marks)),
y=par()$usr[4],
labels=main.marks, srt=90, adj=1, xpd=TRUE,cex=.5)
dev.off()
|
read_lines <- function(...) {
if (requireNamespace("readr", quietly = TRUE)) {
readr::read_lines(...)
} else {
readLines(...)
}
}
write_lines <- function(...) {
if (requireNamespace("readr", quietly = TRUE)) {
readr::write_lines(...)
} else {
writeLines(...)
}
}
stri_locate_first_fixed_no_stringi <- function(str, pattern) {
out <- matrix(NA_integer_, nrow = length(str), ncol = 2L)
rows_with_pattern <- grep(pattern, str, fixed = TRUE)
nchar_pattern <- nchar(pattern)
for (i in rows_with_pattern) {
res <- as.vector(regexpr(pattern = pattern,
text = str[i],
fixed = TRUE),
mode = "integer")
out[i, 1L] <- res
}
out[, 2L] <- out[, 1L] + nchar_pattern - 1L
# Conformance with stringi
setattr(out, "dimnames", value = list(NULL, c("start", "end")))
out
}
stri_count_fixed_no_stringi <- function(str, pattern) {
relevant_line_nos <- grep(pattern, str, fixed = TRUE)
relevant_lines <- str[relevant_line_nos]
count_on_relevant <-
if (nchar(pattern) == 1L) {
split_lines <- strsplit(relevant_lines, split = "", fixed = TRUE)
vapply(split_lines, function(x) sum(x == pattern), integer(1L))
} else {
# If you wanted speed, you should have used stringi!
vapply(relevant_lines, function(line) {
count <- 0L
# How many times do we have to cut 'pattern' away?
while (grepl(pattern, line, fixed = TRUE)) {
count <- count + 1L
# Bear in mind 'aaaaa' where pattern = 'aa'
line <- sub(pattern, replacement = "", line, fixed = TRUE)
}
count
},
FUN.VALUE = integer(1L))
}
out <- integer(length(str))
out[relevant_line_nos] <- count_on_relevant
out
}
stri_locate_first_fixed <- function(str, pattern, ...) {
if (requireNamespace("stringi", quietly = TRUE)) {
stringi::stri_locate_first_fixed(str = str, pattern = pattern, ...)
} else {
stri_locate_first_fixed_no_stringi(str, pattern)
}
}
stri_count_fixed <- function(str, pattern, ...) {
if (requireNamespace("stringi", quietly = TRUE)) {
stringi::stri_count_fixed(str = str, pattern = pattern, ...)
} else {
stri_count_fixed_no_stringi(str = str, pattern = pattern)
}
}
stri_sub_no_stringi <- function(str, from, to) {
out <- str
nchar_out <- nchar(out) + 1L
stopifnot(length(from) == 1L, length(to) == 1L)
FROM <- rep_len(from, length(out))
TO <- rep_len(to, length(out))
if (from < 0L) {
FROM <- nchar_out + from
} else {
FROM <- rep_len(from, length(out))
}
if (to < 0L) {
TO <- nchar_out + to
} else {
TO <- rep_len(to, length(out))
}
substr(out, FROM, TO)
}
stri_sub <- function(str, from = 1L, to = -1L, .len) {
if (requireNamespace("stringi", quietly = TRUE)) {
if (missing(.len)) {
stringi::stri_sub(str = str, from = from, to = to)
} else {
stringi::stri_sub(str = str, from = from, to = to, length = .len)
}
} else {
stri_sub_no_stringi(str = str, from = from, to = to)
}
}
stri_trim_both <- function(str) {
if (requireNamespace("stringi", quietly = TRUE)) {
stringi::stri_trim_both(str)
} else {
trimws(str)
}
}
| /R/suggested_speedups.R | no_license | jonocarroll/TeXCheckR | R | false | false | 3,260 | r |
read_lines <- function(...) {
if (requireNamespace("readr", quietly = TRUE)) {
readr::read_lines(...)
} else {
readLines(...)
}
}
write_lines <- function(...) {
if (requireNamespace("readr", quietly = TRUE)) {
readr::write_lines(...)
} else {
writeLines(...)
}
}
stri_locate_first_fixed_no_stringi <- function(str, pattern) {
out <- matrix(NA_integer_, nrow = length(str), ncol = 2L)
rows_with_pattern <- grep(pattern, str, fixed = TRUE)
nchar_pattern <- nchar(pattern)
for (i in rows_with_pattern) {
res <- as.vector(regexpr(pattern = pattern,
text = str[i],
fixed = TRUE),
mode = "integer")
out[i, 1L] <- res
}
out[, 2L] <- out[, 1L] + nchar_pattern - 1L
# Conformance with stringi
setattr(out, "dimnames", value = list(NULL, c("start", "end")))
out
}
stri_count_fixed_no_stringi <- function(str, pattern) {
relevant_line_nos <- grep(pattern, str, fixed = TRUE)
relevant_lines <- str[relevant_line_nos]
count_on_relevant <-
if (nchar(pattern) == 1L) {
split_lines <- strsplit(relevant_lines, split = "", fixed = TRUE)
vapply(split_lines, function(x) sum(x == pattern), integer(1L))
} else {
# If you wanted speed, you should have used stringi!
vapply(relevant_lines, function(line) {
count <- 0L
# How many times do we have to cut 'pattern' away?
while (grepl(pattern, line, fixed = TRUE)) {
count <- count + 1L
# Bear in mind 'aaaaa' where pattern = 'aa'
line <- sub(pattern, replacement = "", line, fixed = TRUE)
}
count
},
FUN.VALUE = integer(1L))
}
out <- integer(length(str))
out[relevant_line_nos] <- count_on_relevant
out
}
stri_locate_first_fixed <- function(str, pattern, ...) {
if (requireNamespace("stringi", quietly = TRUE)) {
stringi::stri_locate_first_fixed(str = str, pattern = pattern, ...)
} else {
stri_locate_first_fixed_no_stringi(str, pattern)
}
}
stri_count_fixed <- function(str, pattern, ...) {
if (requireNamespace("stringi", quietly = TRUE)) {
stringi::stri_count_fixed(str = str, pattern = pattern, ...)
} else {
stri_count_fixed_no_stringi(str = str, pattern = pattern)
}
}
stri_sub_no_stringi <- function(str, from, to) {
out <- str
nchar_out <- nchar(out) + 1L
stopifnot(length(from) == 1L, length(to) == 1L)
FROM <- rep_len(from, length(out))
TO <- rep_len(to, length(out))
if (from < 0L) {
FROM <- nchar_out + from
} else {
FROM <- rep_len(from, length(out))
}
if (to < 0L) {
TO <- nchar_out + to
} else {
TO <- rep_len(to, length(out))
}
substr(out, FROM, TO)
}
stri_sub <- function(str, from = 1L, to = -1L, .len) {
if (requireNamespace("stringi", quietly = TRUE)) {
if (missing(.len)) {
stringi::stri_sub(str = str, from = from, to = to)
} else {
stringi::stri_sub(str = str, from = from, to = to, length = .len)
}
} else {
stri_sub_no_stringi(str = str, from = from, to = to)
}
}
stri_trim_both <- function(str) {
if (requireNamespace("stringi", quietly = TRUE)) {
stringi::stri_trim_both(str)
} else {
trimws(str)
}
}
|
\name{valueMat}
\alias{valueMat}
\title{Create a binary matrix from a list of values}
\usage{
valueMat(listOfValues, fill = NA)
}
\arguments{
\item{listOfValues}{A \code{list} of input values to be
inserted in a matrix.}
\item{fill}{The initializing fill value for the empty
matrix.}
}
\value{
A \code{matrix}.
}
\description{
Create a binary matrix from a list of values
}
\details{
This is primarily a helper function for the
\code{\link{concat.split}} function when creating the
"expanded" structure. The input is anticipated to be a
\code{list} of values obtained using
\code{\link{strsplit}}.
}
\examples{
invec <- c("1,2,4,5,6", "1,2,4,5,6", "1,2,4,5,6",
"1,2,4,5,6", "1,2,5,6", "1,2,5,6")
A <- strsplit(invec, ",")
splitstackshape:::valueMat(A)
splitstackshape:::valueMat(A, "ZZZ")
\dontshow{rm(invec, A)}
}
\author{
Ananda Mahto
}
\seealso{
\code{strsplit}, \code{\link{binaryMat}}
}
| /man/valueMat.Rd | no_license | Jivan517/splitstackshape | R | false | false | 934 | rd | \name{valueMat}
\alias{valueMat}
\title{Create a binary matrix from a list of values}
\usage{
valueMat(listOfValues, fill = NA)
}
\arguments{
\item{listOfValues}{A \code{list} of input values to be
inserted in a matrix.}
\item{fill}{The initializing fill value for the empty
matrix.}
}
\value{
A \code{matrix}.
}
\description{
Create a binary matrix from a list of values
}
\details{
This is primarily a helper function for the
\code{\link{concat.split}} function when creating the
"expanded" structure. The input is anticipated to be a
\code{list} of values obtained using
\code{\link{strsplit}}.
}
\examples{
invec <- c("1,2,4,5,6", "1,2,4,5,6", "1,2,4,5,6",
"1,2,4,5,6", "1,2,5,6", "1,2,5,6")
A <- strsplit(invec, ",")
splitstackshape:::valueMat(A)
splitstackshape:::valueMat(A, "ZZZ")
\dontshow{rm(invec, A)}
}
\author{
Ananda Mahto
}
\seealso{
\code{strsplit}, \code{\link{binaryMat}}
}
|
testlist <- list(doy = numeric(0), latitude = numeric(0), temp = c(7.28934196280358e-304, NaN, -9.53116278721537e-150, NaN, NaN, -8.98846567424643e+307, 9.0707523845019e-97, 2.8527919535453e+180, 5.25176744052243e-90, NaN, 8.0712325733939e-65, 2.31320649354895e-23, NaN, 7.06238442689523e-304, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, 2.12199579047121e-314, 0, 0, 0, 0, 0, 0, 0, -5.48612406879369e+303, NaN, NaN, 9.03704860055495e-318, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) | /meteor/inst/testfiles/ET0_ThornthwaiteWilmott/libFuzzer_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1612735475-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 712 | r | testlist <- list(doy = numeric(0), latitude = numeric(0), temp = c(7.28934196280358e-304, NaN, -9.53116278721537e-150, NaN, NaN, -8.98846567424643e+307, 9.0707523845019e-97, 2.8527919535453e+180, 5.25176744052243e-90, NaN, 8.0712325733939e-65, 2.31320649354895e-23, NaN, 7.06238442689523e-304, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, 2.12199579047121e-314, 0, 0, 0, 0, 0, 0, 0, -5.48612406879369e+303, NaN, NaN, 9.03704860055495e-318, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) |
suppressPackageStartupMessages(library(EpiModelHIV))
rm(list = ls())
#load("scenarios/rdiffhet/est/nwstats.rda")
load("nwstats.rda")
# 1. Main Model -----------------------------------------------------------
# Initialize network
nw.main <- base_nw.mard(st)
# Assign degree
nw.main <- assign_degree(nw.main, deg.type = "pers", nwstats = st)
# Formulas
formation.m <- ~edges +
nodemix("race", base = 1) +
nodefactor("deg.pers", base = c(1, 4)) +
absdiffnodemix("sqrt.age", "race") +
offset(nodematch("role.class", diff = TRUE, keep = 1:2))
# Fit model
fit.m <- netest(nw.main,
formation = formation.m,
coef.form = c(-Inf, -Inf),
target.stats = st$stats.m,
coef.diss = st$coef.diss.m,
constraints = ~bd(maxout = 1),
set.control.ergm = control.ergm(MPLE.max.dyad.types = 1e10,
init.method = "zeros",
MCMLE.maxit = 250))
# 2. Casual Model ---------------------------------------------------------
# Initialize network
nw.pers <- nw.main
# Assign degree
nw.pers <- assign_degree(nw.pers, deg.type = "main", nwstats = st)
# Formulas
formation.p <- ~edges +
nodemix("race", base = 1) +
nodefactor("deg.main", base = c(1, 3)) +
concurrent(by = "race") +
absdiffnodemix("sqrt.age", "race") +
offset(nodematch("role.class", diff = TRUE, keep = 1:2))
# Fit model
fit.p <- netest(nw.pers,
formation = formation.p,
coef.form = c(-Inf, -Inf),
target.stats = st$stats.p,
coef.diss = st$coef.diss.p,
constraints = ~bd(maxout = 2),
set.control.ergm = control.ergm(MPLE.max.dyad.types = 1e9,
init.method = "zeros",
MCMLE.maxit = 250))
# Fit inst model ----------------------------------------------------------
# Initialize network
nw.inst <- nw.main
# Assign degree
nw.inst <- set.vertex.attribute(nw.inst, "deg.main", nw.pers %v% "deg.main")
nw.inst <- set.vertex.attribute(nw.inst, "deg.pers", nw.main %v% "deg.pers")
table(nw.inst %v% "deg.main", nw.inst %v% "deg.pers")
# Formulas
formation.i <- ~edges +
nodefactor(c("deg.main", "deg.pers")) +
nodefactor(c("race", "riskg"), base = c(3, 8)) +
nodematch("race") +
absdiffnodemix("sqrt.age", "race") +
offset(nodematch("role.class", diff = TRUE, keep = 1:2))
# Fit model
fit.i <- netest(nw.inst,
formation = formation.i,
target.stats = st$stats.i,
coef.form = c(-Inf, -Inf),
coef.diss = dissolution_coefs(~offset(edges), 1),
set.control.ergm = control.ergm(MPLE.max.dyad.types = 1e9,
MCMLE.maxit = 250))
# Save data
est <- list(fit.m, fit.p, fit.i)
#save(est, file = "scenarios/rdiffhet/est/fit.rda")
#save(est, file = "/net/proj/camp/rdiffhet/est/fit.rda")
save(est, file = "fit.rda")
# Diagnostics -------------------------------------------------------------
# dx <- netdx(fit.i, nsims = 10000, ncores = 1, dynamic = FALSE,
# nwstats.formula = ~ edges + nodefactor(c("race", "riskg"), base = 0))
# dx
| /scripts/CCR5delta32/02.estim.R | permissive | wangdafacai/MSMRaceDisparities_LancetHIV2017 | R | false | false | 3,511 | r |
suppressPackageStartupMessages(library(EpiModelHIV))
rm(list = ls())
#load("scenarios/rdiffhet/est/nwstats.rda")
load("nwstats.rda")
# 1. Main Model -----------------------------------------------------------
# Initialize network
nw.main <- base_nw.mard(st)
# Assign degree
nw.main <- assign_degree(nw.main, deg.type = "pers", nwstats = st)
# Formulas
formation.m <- ~edges +
nodemix("race", base = 1) +
nodefactor("deg.pers", base = c(1, 4)) +
absdiffnodemix("sqrt.age", "race") +
offset(nodematch("role.class", diff = TRUE, keep = 1:2))
# Fit model
fit.m <- netest(nw.main,
formation = formation.m,
coef.form = c(-Inf, -Inf),
target.stats = st$stats.m,
coef.diss = st$coef.diss.m,
constraints = ~bd(maxout = 1),
set.control.ergm = control.ergm(MPLE.max.dyad.types = 1e10,
init.method = "zeros",
MCMLE.maxit = 250))
# 2. Casual Model ---------------------------------------------------------
# Initialize network
nw.pers <- nw.main
# Assign degree
nw.pers <- assign_degree(nw.pers, deg.type = "main", nwstats = st)
# Formulas
formation.p <- ~edges +
nodemix("race", base = 1) +
nodefactor("deg.main", base = c(1, 3)) +
concurrent(by = "race") +
absdiffnodemix("sqrt.age", "race") +
offset(nodematch("role.class", diff = TRUE, keep = 1:2))
# Fit model
fit.p <- netest(nw.pers,
formation = formation.p,
coef.form = c(-Inf, -Inf),
target.stats = st$stats.p,
coef.diss = st$coef.diss.p,
constraints = ~bd(maxout = 2),
set.control.ergm = control.ergm(MPLE.max.dyad.types = 1e9,
init.method = "zeros",
MCMLE.maxit = 250))
# Fit inst model ----------------------------------------------------------
# Initialize network
nw.inst <- nw.main
# Assign degree
nw.inst <- set.vertex.attribute(nw.inst, "deg.main", nw.pers %v% "deg.main")
nw.inst <- set.vertex.attribute(nw.inst, "deg.pers", nw.main %v% "deg.pers")
table(nw.inst %v% "deg.main", nw.inst %v% "deg.pers")
# Formulas
formation.i <- ~edges +
nodefactor(c("deg.main", "deg.pers")) +
nodefactor(c("race", "riskg"), base = c(3, 8)) +
nodematch("race") +
absdiffnodemix("sqrt.age", "race") +
offset(nodematch("role.class", diff = TRUE, keep = 1:2))
# Fit model
fit.i <- netest(nw.inst,
formation = formation.i,
target.stats = st$stats.i,
coef.form = c(-Inf, -Inf),
coef.diss = dissolution_coefs(~offset(edges), 1),
set.control.ergm = control.ergm(MPLE.max.dyad.types = 1e9,
MCMLE.maxit = 250))
# Save data
est <- list(fit.m, fit.p, fit.i)
#save(est, file = "scenarios/rdiffhet/est/fit.rda")
#save(est, file = "/net/proj/camp/rdiffhet/est/fit.rda")
save(est, file = "fit.rda")
# Diagnostics -------------------------------------------------------------
# dx <- netdx(fit.i, nsims = 10000, ncores = 1, dynamic = FALSE,
# nwstats.formula = ~ edges + nodefactor(c("race", "riskg"), base = 0))
# dx
|
#๏ผ/urs/bin/env Rscript
# GSEA R่ๆฌ
library(clusterProfiler) | /GSEA.R | no_license | the8thday/16s_plot_R | R | false | false | 67 | r | #๏ผ/urs/bin/env Rscript
# GSEA R่ๆฌ
library(clusterProfiler) |
\name{plot.BootTOS}
\alias{plot.BootTOS}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Plots results of a Bootstrap Test of Stationarity
}
\description{Produces Bootstrap simulation result as a histogram
with a vertical line indicating the test statistic computed
on the actual data.
}
\usage{
\method{plot}{BootTOS}(x, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{The object you wish to get a plot on.
}
\item{\dots}{Other arguments to plot.
}
}
\details{Produces a histogram of all the bootstrap statistics
and the test statistic computed on the true data.
Also produces a vertical line indicating the position
of the true statistic.
}
\value{None.
}
\references{Cardinali, A. and Nason, Guy P. (2013) Costationarity of
Locally Stationary Time Series Using costat.
\emph{Journal of Statistical Software}, \bold{55}, Issue 1.
Cardinali, A. and Nason, G.P. (2010) Costationarity of locally stationary
time series. \emph{J. Time Series Econometrics}, \bold{2}, Issue 2, Article 1.
}
\author{
G.P. Nason
}
\seealso{\code{\link{BootTOS}}}
\examples{
#
v <- rnorm(512)
\dontrun{v.BootTOS <- BootTOS(v)}
\dontrun{plot(v.BootTOS)}
}
\keyword{ts}
| /man/plot.BootTOS.Rd | no_license | cran/costat | R | false | false | 1,226 | rd | \name{plot.BootTOS}
\alias{plot.BootTOS}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Plots results of a Bootstrap Test of Stationarity
}
\description{Produces Bootstrap simulation result as a histogram
with a vertical line indicating the test statistic computed
on the actual data.
}
\usage{
\method{plot}{BootTOS}(x, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{The object you wish to get a plot on.
}
\item{\dots}{Other arguments to plot.
}
}
\details{Produces a histogram of all the bootstrap statistics
and the test statistic computed on the true data.
Also produces a vertical line indicating the position
of the true statistic.
}
\value{None.
}
\references{Cardinali, A. and Nason, Guy P. (2013) Costationarity of
Locally Stationary Time Series Using costat.
\emph{Journal of Statistical Software}, \bold{55}, Issue 1.
Cardinali, A. and Nason, G.P. (2010) Costationarity of locally stationary
time series. \emph{J. Time Series Econometrics}, \bold{2}, Issue 2, Article 1.
}
\author{
G.P. Nason
}
\seealso{\code{\link{BootTOS}}}
\examples{
#
v <- rnorm(512)
\dontrun{v.BootTOS <- BootTOS(v)}
\dontrun{plot(v.BootTOS)}
}
\keyword{ts}
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/.FYC..ssp');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Employment Status
if(year == 1996)
FYC <- FYC %>% mutate(EMPST53 = EMPST96, EMPST42 = EMPST2, EMPST31 = EMPST1)
FYC <- FYC %>%
mutate_at(vars(EMPST53, EMPST42, EMPST31), funs(replace(., .< 0, NA))) %>%
mutate(employ_last = coalesce(EMPST53, EMPST42, EMPST31))
FYC <- FYC %>% mutate(
employed = 1*(employ_last==1) + 2*(employ_last > 1),
employed = replace(employed, is.na(employed) & AGELAST < 16, 9),
employed = recode_factor(employed, .default = "Missing", .missing = "Missing",
"1" = "Employed",
"2" = "Not employed",
"9" = "Inapplicable (age < 16)"))
# Insurance coverage
# To compute for all insurance categories, replace 'insurance' in the 'svyby' function with 'insurance_v2X'
if(year == 1996){
FYC <- FYC %>%
mutate(MCDEV96 = MCDEVER, MCREV96 = MCREVER,
OPAEV96 = OPAEVER, OPBEV96 = OPBEVER)
}
if(year < 2011){
FYC <- FYC %>%
mutate(
public = (MCDEV.yy.==1|OPAEV.yy.==1|OPBEV.yy.==1),
medicare = (MCREV.yy.==1),
private = (INSCOV.yy.==1),
mcr_priv = (medicare & private),
mcr_pub = (medicare & !private & public),
mcr_only = (medicare & !private & !public),
no_mcr = (!medicare),
ins_gt65 = 4*mcr_only + 5*mcr_priv + 6*mcr_pub + 7*no_mcr,
INSURC.yy. = ifelse(AGELAST < 65, INSCOV.yy., ins_gt65)
)
}
FYC <- FYC %>%
mutate(insurance = recode_factor(INSCOV.yy., .default = "Missing", .missing = "Missing",
"1" = "Any private, all ages",
"2" = "Public only, all ages",
"3" = "Uninsured, all ages")) %>%
mutate(insurance_v2X = recode_factor(INSURC.yy., .default = "Missing", .missing = "Missing",
"1" = "<65, Any private",
"2" = "<65, Public only",
"3" = "<65, Uninsured",
"4" = "65+, Medicare only",
"5" = "65+, Medicare and private",
"6" = "65+, Medicare and other public",
"7" = "65+, No medicare",
"8" = "65+, No medicare"))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~TOTEXP.yy., FUN = svytotal, by = ~insurance + employed, design = FYCdsgn)
print(results)
| /mepstrends/hc_use/json/code/r/totEXP__insurance__employed__.r | permissive | RandomCriticalAnalysis/MEPS-summary-tables | R | false | false | 3,006 | r | # Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/.FYC..ssp');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Employment Status
if(year == 1996)
FYC <- FYC %>% mutate(EMPST53 = EMPST96, EMPST42 = EMPST2, EMPST31 = EMPST1)
FYC <- FYC %>%
mutate_at(vars(EMPST53, EMPST42, EMPST31), funs(replace(., .< 0, NA))) %>%
mutate(employ_last = coalesce(EMPST53, EMPST42, EMPST31))
FYC <- FYC %>% mutate(
employed = 1*(employ_last==1) + 2*(employ_last > 1),
employed = replace(employed, is.na(employed) & AGELAST < 16, 9),
employed = recode_factor(employed, .default = "Missing", .missing = "Missing",
"1" = "Employed",
"2" = "Not employed",
"9" = "Inapplicable (age < 16)"))
# Insurance coverage
# To compute for all insurance categories, replace 'insurance' in the 'svyby' function with 'insurance_v2X'
if(year == 1996){
FYC <- FYC %>%
mutate(MCDEV96 = MCDEVER, MCREV96 = MCREVER,
OPAEV96 = OPAEVER, OPBEV96 = OPBEVER)
}
if(year < 2011){
FYC <- FYC %>%
mutate(
public = (MCDEV.yy.==1|OPAEV.yy.==1|OPBEV.yy.==1),
medicare = (MCREV.yy.==1),
private = (INSCOV.yy.==1),
mcr_priv = (medicare & private),
mcr_pub = (medicare & !private & public),
mcr_only = (medicare & !private & !public),
no_mcr = (!medicare),
ins_gt65 = 4*mcr_only + 5*mcr_priv + 6*mcr_pub + 7*no_mcr,
INSURC.yy. = ifelse(AGELAST < 65, INSCOV.yy., ins_gt65)
)
}
FYC <- FYC %>%
mutate(insurance = recode_factor(INSCOV.yy., .default = "Missing", .missing = "Missing",
"1" = "Any private, all ages",
"2" = "Public only, all ages",
"3" = "Uninsured, all ages")) %>%
mutate(insurance_v2X = recode_factor(INSURC.yy., .default = "Missing", .missing = "Missing",
"1" = "<65, Any private",
"2" = "<65, Public only",
"3" = "<65, Uninsured",
"4" = "65+, Medicare only",
"5" = "65+, Medicare and private",
"6" = "65+, Medicare and other public",
"7" = "65+, No medicare",
"8" = "65+, No medicare"))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~TOTEXP.yy., FUN = svytotal, by = ~insurance + employed, design = FYCdsgn)
print(results)
|
schema <- function(session) {
session <- unwrap_session(session)
shiny::isolate(session$input[["shinytableau-schema"]])
}
#' Get info about available worksheets in this Tableau dashboard
#'
#' For advanced use only; most shinytableau extensions should use the
#' [choose_data()] module to allow the user to specify a worksheet.
#'
#' @param session The Shiny `session` object. (You should probably just use the
#' default.)
#' @param name A worksheet name, as returned by `tableau_worksheets()`.
#'
#' @return `tableau_worksheets()` returns a character vector whose elements are
#' worksheet names. Note that only worksheets that are included on the same
#' dashboard will be listed, and these are the only worksheets we can access.
#'
#' `tableau_worksheet_info()` returns metadata for a specific worksheet. The
#' return value is a named list that contains the following fields:
#'
#' * **`name`** - The name of the worksheet.
#'
#' * **`summary`** - The [data table schema object][DataTableSchema] for the
#' worksheet's summary-level data table.
#'
#' * **`dataSourceIds`** - Character vector of data source IDs used by this
#' worksheet. See [tableau_datasource_info()].
#'
#' * **`underlyingTables`** - Unnamed list, each element is a [data table
#' schema object][DataTableSchema] of one of the worksheet's underlying
#' data tables.
#'
#' @export
tableau_worksheets <- function(session = shiny::getDefaultReactiveDomain()) {
names(schema(session)[["worksheets"]])
}
#' @rdname tableau_worksheets
#' @export
tableau_worksheet_info <- function(name, session = shiny::getDefaultReactiveDomain()) {
schema(session)[["worksheets"]][[name]]
}
#' Construct a reactive expression that reads Tableau data
#'
#' This function is used to read data from Tableau. Because of the many levels
#' of indirection involved in actually physically reading data from Tableau,
#' using this function is significantly more involved than, say, a simple
#' [read.csv()]. See the Details section for a more detailed introduction.
#'
#' There are two complicating factors when reading data from Tableau; the first
#' is how to tell shinytableau what specific data table you want to access, and
#' the second is actually accessing the data from R.
#'
#' ### Specifying a data table
#'
#' If we want to access data from Tableau, the Tableau Extension API only allows
#' us to do so via one of the worksheets that are part of the same dashboard.
#'
#' Each worksheet makes three categories of data available to us:
#'
#' 1. **Summary data:** The data in its final form before visualization. If the
#' visualization aggregates measures, then the summary data contains the data
#' after aggregation has been performed. If the worksheet has an active
#' selection, then by default, only the selected data is returned (set the
#' `ignoreSelection` option to `TRUE` to retrieve all data).
#'
#' 2. **Underlying data:** The underlying data that is used in the visualization,
#' before aggregation operations are performed but after tables are joined.
#'
#' By default, only the columns that are used in the worksheet are included
#' (set `includeAllColumns` to `TRUE` if you need them all). If the worksheet
#' has an active selection, then by default, only the selected data is
#' returned (set the `ignoreSelection` option to `TRUE` to retrieve all
#' data).
#'
#' 3. **Data source:** You can also access the raw data from the data source(s)
#' used by the worksheet. This data is unaffected by the worksheet settings.
#' Tableau data sources are broken into one or more logical tables, like how
#' a relational database has multiple tables.
#'
#' As an R user, you may find this analogy based on the examples from
#' [dplyr::mutate-joins] to be helpful in explaining the relationship between
#' data source, underlying, and summary data:
#'
#' ```
#' # Data source
#' logical1 <- band_members
#' logical2 <- band_instruments
#'
#' # Underlying is joined/selected, but not aggregated
#' underlying <- band_members %>%
#' full_join(band_instruments, by = "name") %>%
#' select(band, name)
#'
#' # Summary is underlying plus aggregation
#' summary <- underlying %>%
#' group_by(band) %>%
#' tally(name = "COUNT(name)")
#' ```
#'
#' The existence of these three levels of data granularity, plus the fact that
#' the underlying and data source levels need additional specification to narrow
#' down which of the multiple data tables at each level are desired, means that
#' providing clear instructions to `reactive_tableau_data` is surprisingly
#' complicated.
#'
#' Now that you have some context, see the description for the `spec` parameter,
#' above, for specific instructions on the different ways to specify data
#' tables, based on current user input, previously saved configuration, or
#' programmatically.
#'
#' ### Accessing a data table
#'
#' We turn our attention now to consuming data from `reactive_tableau_data()`.
#' Given the following code snippet, one that might appear in `config_server`:
#'
#' ```
#' data_spec <- choose_data("mydata")
#' data <- reactive_tableau_data(data_spec)
#' ```
#'
#' The `data` variable created here has two complications.
#'
#' First, it's reactive; like all reactive expressions, you must call `data` as
#' a function to get at its value. It must be reactive because Tableau data can
#' change (based on selection and filtering, if nothing else), and also, the
#' user's choices can change as well (in the example, the `data_spec` object is
#' also reactive).
#'
#' Second, and more seriously, reading Tableau data is asynchronous, so when you
#' invoke `data()` what you get back is not a data frame, but the [promise of a
#' data frame](https://rstudio.github.io/promises/articles/overview.html).
#' Working with promises has its own learning curve, so it's regrettable that
#' they play such a prominent role in reading Tableau data. If this is a new
#' topic for you, [start with this
#' talk](https://rstudio.com/resources/rstudioconf-2018/scaling-shiny-apps-with-async-programming/)
#' and then read through the various articles on the [promises
#' website](https://rstudio.github.io/promises/).
#'
#' The bottom line with promises is that you can use any of the normal functions
#' you usually use for manipulating, analyzing, and visualizing data frames, but
#' the manner in which you invoke those functions will be a bit different.
#' Instead of calling `print(data())`, for example, you'll need to first change
#' to the more pipe-oriented `data() %>% print()` and then replace the magrittr
#' pipe with the promise-pipe like `data() %...>% print()`. There's much more to
#' the story, though; for all but the simplest scenarios, you'll need to check
#' out the resources linked in the previous paragraph.
#'
#'
#' @param spec An argument that specifies what specific data should be
#' accessed. This can be specified in a number of ways:
#'
#' 1. The name of a setting, that was set using a value returned from
#' [choose_data()]. This is the most common scenario for `server`.
#'
#' 2. The object returned from [choose_data()] can be passed in directly. This
#' is likely the approach you should take if you want to access data in
#' `config_server` based on unsaved config changes (e.g. to give the user a
#' live preview of what their `choose_data` choices would yield).
#'
#' 3. You can directly create a spec object using one of the helper functions
#' [spec_summary()], [spec_underlying()], or [spec_datasource()]. For cases where
#' the data is not selected based on [choose_data()] at all, but
#' programmatically determined or hardcoded. (This should not be common.)
#'
#' @param options A named list of options:
#'
#' * `ignoreAliases` - Do not use aliases specified in the data source in
#' Tableau. Default is `FALSE`.
#'
#' * `ignoreSelection` - If `FALSE` (the default), only return data for the
#' currently selected marks. Does not apply for datasource tables, only
#' summary and underlying. If `"never"`, then if no marks are selected,
#' `NULL` is returned. If `TRUE`, all data is returned, regardless of
#' selection.
#'
#' * `includeAllColumns` - Return all the columns for the table. Default is
#' `FALSE`. Does not apply for datasource and summary tables, only underlying.
#'
#' * `maxRows` - The maximum number of rows to return. **Tableau will not,
#' under any circumstances, return more than 10,000 rows for datasource and
#' underlying tables.** This option is ignored for summary tables.
#'
#' * `columnsToInclude` - Character vector of columns that should be included;
#' leaving this option unspecified means all columns should be returned. Does
#' not apply for summary and underlying, only datasource.
#'
#' * `truncation` - For underlying and datasource reads, Tableau will never,
#' under any circumstances, return more than 10,000 rows of data. If `warn`
#' (the default), when this condition occurs a warning will be displayed to
#' the user and emitted as a warning in the R process, then the available
#' data will be returned. If `ignore`, then no warning will be issued. If
#' `error`, then an error will be raised.
#'
#' @param session The Shiny `session` object. (You should probably just use the
#' default.)
#'
#' @examples
#' \dontrun{
#' data_spec_x <- choose_data("x", iv = iv)
#' data_x <- reactive_tableau_data(data_spec_x)
#' }
#'
#' @import promises
#' @export
reactive_tableau_data <- function(spec, options = list(),
session = shiny::getDefaultReactiveDomain()) {
force(spec)
force(options)
session <- unwrap_session(session)
if (is.character(spec) && length(spec) == 1) {
setting_name <- spec
spec <- shiny::reactive({
tableau_setting(setting_name, session = session)
})
}
if (!is.function(spec)) {
value <- spec
spec <- function() value
}
options <- merge_defaults(options, list(
truncation = "warn"
))
match.arg(options[["truncation"]], c("warn", "error", "ignore"))
shiny::reactive({
shiny::req(spec())
if (!isTRUE(options[["ignoreSelection"]])) {
# Take dependency
session$input[["shinytableau-selection"]]
}
tableau_get_data_async(spec(), options) %...>% {
if (is.null(.)) {
return(NULL)
}
if (isTRUE(.$isTotalRowCountLimited)) {
if (options[["truncation"]] == "warn") {
shiny::showNotification(
htmltools::tagList(
htmltools::strong("Warning:"),
" Incomplete data; only the first ",
nrow(.$data),
" rows of data can be retrieved from Tableau!"
),
type = "warning",
session = session
)
warning("Tableau data was limited to first ", nrow(.$data), " rows")
} else if (options[["truncation"]] == "error") {
stop("The data requested contains too many rows (limit: ", nrow(.$data), ")")
} else if (options[["truncation"]] == "ignore") {
# Do nothing
} else {
warning("Unknown value for `truncation` option: ", options[["truncation"]])
}
}
.$data
}
})
}
#' Create data spec objects programmatically
#'
#' A data spec object is a pointer to a specific data table in a Tableau
#' dashboard. It is analogous to a file path or a URL, except instead of a
#' simple string, it is a structured object consisting of multiple arguments.
#' The components of each data spec object will vary, depending on the type of
#' data being requested: summary, underlying, or data source. See the Details
#' section of [reactive_tableau_data()] for more information.
#'
#' @param worksheet The name (as character vector) or number (as integer) of the
#' worksheet. If a number is given, it will immediately be resolved to a
#' worksheet name.
#' @param underlyingTableId,dataSourceId,logicalTableId The id (as character
#' vector) or number (as integer) of the specific underlying table/data
#' source/logical table to read. If a number is given, it will immediately be
#' resolved to an id.
#' @param session The Shiny `session` object. (You should probably just use the
#' default.)
#'
#' @return A spec object, suitable for the `spec` argument to
#' [reactive_tableau_data()] or persisting via
#' [update_tableau_settings_async()].
#'
#' @export
spec_summary <- function(worksheet = 1L, session = shiny::getDefaultReactiveDomain()) {
worksheet <- resolve_worksheet(worksheet, session = session)
list(
worksheet = worksheet,
source = "summary"
)
}
#' @rdname spec_summary
#' @export
spec_underlying <- function(worksheet = 1L, underlyingTableId = 1L, session = shiny::getDefaultReactiveDomain()) {
worksheet <- resolve_worksheet(worksheet, session = session)
wsi <- tableau_worksheet_info(worksheet, session = session)
utables <- wsi[["underlyingTables"]]
if (is.numeric(underlyingTableId) && underlyingTableId >= 1 && underlyingTableId <= length(utables)) {
underlyingTableId <- utables[[underlyingTableId]][["id"]]
} else if (is.character(underlyingTableId) && underlyingTableId %in% pluck(utables, "id")) {
# Do nothing
} else {
stop("Underlying table not found")
}
list(
worksheet = worksheet,
source = "underlying",
table = underlyingTableId
)
}
#' @rdname spec_summary
#' @export
spec_datasource <- function(worksheet = 1L, dataSourceId = 1L, logicalTableId = 1L, session = shiny::getDefaultReactiveDomain()) {
worksheet <- resolve_worksheet(worksheet, session = session)
wsi <- tableau_worksheet_info(worksheet, session = session)
dsIds <- wsi[["dataSourceIds"]]
if (is.numeric(dataSourceId) && dataSourceId >= 1 && dataSourceId <= length(dsIds)) {
dataSourceId <- dsIds[[dataSourceId]]
} else if (is.character(dataSourceId) && dataSourceId %in% dsIds) {
# Do nothing
} else {
stop("Specified data source not found")
}
dataSource <- tableau_datasource_info(dataSourceId, session = session)
logicalTables <- dataSource[["logicalTables"]]
if (is.numeric(logicalTableId) && logicalTableId >= 1 && logicalTableId <= length(logicalTables)) {
logicalTableId <- logicalTables[[logicalTableId]][["id"]]
} else if (is.character(logicalTableId) && logicalTableId %in% pluck(dataSource[["logicalTables"]], "id")) {
# Do nothing
} else {
stop("Logical table not found")
}
list(
worksheet = worksheet,
source = "datasource",
ds = dataSourceId,
table = logicalTableId
)
}
resolve_worksheet <- function(worksheet, session = shiny::getDefaultReactiveDomain()) {
worksheet_names <- tableau_worksheets(session = session)
if (is.numeric(worksheet) && worksheet >= 1L && worksheet <= length(worksheet_names)) {
return(worksheet_names[[worksheet]])
} else if (worksheet %in% worksheet_names) {
return(worksheet)
} else {
stop("Requested worksheet not found")
}
}
#' Construct a reactive expression that reads Tableau data schema
#'
#' Creates a reactive expression that returns schema data for the specified
#' Tableau data table, including the names and data types of columns. Basically,
#' this is a convenience wrapper that takes a `spec` object in any of its
#' various forms, invokes either [tableau_worksheet_info()] or
#' [tableau_datasource_info()] as appropriate, and extracts the specific
#' sub-object that matches `spec`.
#'
#' @param spec See [reactive_tableau_data()].
#' @param session The Shiny `session` object. (You should probably just use the
#' default.)
#'
#' @return A named list, as described in the [DataTableSchema] topic.
#'
#' @export
reactive_tableau_schema <- function(spec, session = shiny::getDefaultReactiveDomain()) {
session <- unwrap_session(session)
if (is.character(spec) && length(spec) == 1) {
setting_name <- spec
spec <- shiny::reactive({
tableau_setting(setting_name, session = session)
})
}
if (!is.function(spec)) {
value <- spec
spec <- function() value
}
shiny::reactive({
sp <- shiny::req(spec())
worksheet_name <- sp[["worksheet"]]
switch(sp[["source"]],
summary = {
return(tableau_worksheet_info(worksheet_name, session = session)[["summary"]])
},
underlying = {
tables <- tableau_worksheet_info(worksheet_name, session = session)[["underlyingTables"]]
shiny::req(tables)
shiny::req(find_logical_table(tables, sp[["table"]]))
},
datasource = {
tables <- tableau_datasource_info(sp[["ds"]], session = session)[["logicalTables"]]
shiny::req(tables)
shiny::req(find_logical_table(tables, sp[["table"]]))
},
stop("Unknown data_spec source: '", sp[["source"]], "'")
)
})
}
find_logical_table <- function(logical_tables, id) {
for (table in logical_tables) {
if (table[["id"]] == id) {
return(table)
}
}
return(NULL)
}
#' Get info about available data sources in this Tableau dashboard
#'
#' For advanced use only; most shinytableau extensions should use the
#' [choose_data()] module to allow the user to specify a data source.
#'
#' @param session The Shiny `session` object. (You should probably just use the
#' default.)
#' @param id A data source ID, as returned by `tableau_datasources()` or
#' [tableau_worksheet_info()]`$dataSourceIds`.
#'
#' @return `tableau_datasources()` returns a character vector whose elements are
#' data source IDs.
#'
#' `tableau_datasource_info()` returns the metadata and schema for a specific
#' data source. Note that an extension instance can only access data sources
#' that are actually used by worksheets in the same dashboard. The return
#' value is a named list that contains the following fields:
#' * **`id`** - Unique ID for this data source.
#' * **`fields`** - data frame where each row is one of the fields in the data source, and these columns:
#' * `aggregation` - character - The type of aggregation used for this field. Possible values listed [here](https://tableau.github.io/extensions-api/docs/enums/tableau.fieldaggregationtype.html), e.g. `"attr"`, `"avg"`, `"count"`, ...
#' * `id` - character - The field id.
#' * `name` - character - The caption for this field.
#' * `description` - character - User description of the field, or `""` if there is none.
#' * `role` - character - `"dimension"`, `"measure"`, or `"unknown"`.
#' * `isCalculatedField` - logical - Whether the field is a table calculation.
#' * `isCombinedField` - logical - Whether the field is a combination of multiple fields.
#' * `isGenerated` - logical - Whether this field is generated by Tableau. Tableau generates a number of fields for a data source, such as Number of Records, or Measure Values. This property can be used to distinguish between those fields and fields that come from the underlying data connection, or were created by a user.
#' * `isHidden` - logical - Whether this field is hidden.
#' * **`isExtract`** - `TRUE` if this data source is an extract, `FALSE` otherwise.
#' * **`name`** - The user friendly name of the data source as seen in the UI.
#' * **`extractUpdateTime`** - A [POSIXlt] indicating the time of extract, or `NULL` if the data source is live.
#' * **`logicalTables`** - An unnamed list; each element is a [data table schema][DataTableSchema].
#'
#' @export
tableau_datasources <- function(session = shiny::getDefaultReactiveDomain()) {
names(schema(session)[["dataSources"]])
}
#' Data table schema object
#'
#' An object that describes the schema of a data table, like a worksheet's
#' summary data or underlying data, or a logical table from a data source.
#'
#' @seealso Data table schema objects are obtained via
#' [tableau_worksheet_info()] and [tableau_datasource_info()].
#'
#' @field id Character vector indicating the ID of the underlying data table or
#' logical table. (Not present for summary data.)
#' @field caption Character vector with a human-readable description of the
#' underlying data table or logical table. (Not present for summary data.)
#' @field name Character vector with either `"Underlying Data Table"` or
#' `"Summary Data Table"` (yes, literally those strings; not sure why this
#' field is called `name` but it comes from the Tableau Extension API).
#' @field columns A data frame that describes the columns in this table. Each
#' data frame row describes a column in the data table. The data frame
#' contains these columns:
#'
#' * `dataType` - character - `"bool"`, `"date"`, `"date-time"`, `"float"`, `"int"`, `"spatial"`, or `"string"`.
#' * `fieldName` - character - The name of the column.
#' * `index` - integer - The column number.
#' * `isReferenced` - logical - If `TRUE`, then the column is referenced in the worksheet.
#'
#' @name DataTableSchema
#' @rdname DataTableSchema
NULL
#' @rdname tableau_datasources
#' @export
tableau_datasource_info <- function(id, session = shiny::getDefaultReactiveDomain()) {
schema(session)[["dataSources"]][[id]]
}
| /R/data-accessors.R | permissive | rstudio/shinytableau | R | false | false | 21,136 | r | schema <- function(session) {
session <- unwrap_session(session)
shiny::isolate(session$input[["shinytableau-schema"]])
}
#' Get info about available worksheets in this Tableau dashboard
#'
#' For advanced use only; most shinytableau extensions should use the
#' [choose_data()] module to allow the user to specify a worksheet.
#'
#' @param session The Shiny `session` object. (You should probably just use the
#' default.)
#' @param name A worksheet name, as returned by `tableau_worksheets()`.
#'
#' @return `tableau_worksheets()` returns a character vector whose elements are
#' worksheet names. Note that only worksheets that are included on the same
#' dashboard will be listed, and these are the only worksheets we can access.
#'
#' `tableau_worksheet_info()` returns metadata for a specific worksheet. The
#' return value is a named list that contains the following fields:
#'
#' * **`name`** - The name of the worksheet.
#'
#' * **`summary`** - The [data table schema object][DataTableSchema] for the
#' worksheet's summary-level data table.
#'
#' * **`dataSourceIds`** - Character vector of data source IDs used by this
#' worksheet. See [tableau_datasource_info()].
#'
#' * **`underlyingTables`** - Unnamed list, each element is a [data table
#' schema object][DataTableSchema] of one of the worksheet's underlying
#' data tables.
#'
#' @export
tableau_worksheets <- function(session = shiny::getDefaultReactiveDomain()) {
names(schema(session)[["worksheets"]])
}
#' @rdname tableau_worksheets
#' @export
tableau_worksheet_info <- function(name, session = shiny::getDefaultReactiveDomain()) {
schema(session)[["worksheets"]][[name]]
}
#' Construct a reactive expression that reads Tableau data
#'
#' This function is used to read data from Tableau. Because of the many levels
#' of indirection involved in actually physically reading data from Tableau,
#' using this function is significantly more involved than, say, a simple
#' [read.csv()]. See the Details section for a more detailed introduction.
#'
#' There are two complicating factors when reading data from Tableau; the first
#' is how to tell shinytableau what specific data table you want to access, and
#' the second is actually accessing the data from R.
#'
#' ### Specifying a data table
#'
#' If we want to access data from Tableau, the Tableau Extension API only allows
#' us to do so via one of the worksheets that are part of the same dashboard.
#'
#' Each worksheet makes three categories of data available to us:
#'
#' 1. **Summary data:** The data in its final form before visualization. If the
#' visualization aggregates measures, then the summary data contains the data
#' after aggregation has been performed. If the worksheet has an active
#' selection, then by default, only the selected data is returned (set the
#' `ignoreSelection` option to `TRUE` to retrieve all data).
#'
#' 2. **Underlying data:** The underlying data that is used in the visualization,
#' before aggregation operations are performed but after tables are joined.
#'
#' By default, only the columns that are used in the worksheet are included
#' (set `includeAllColumns` to `TRUE` if you need them all). If the worksheet
#' has an active selection, then by default, only the selected data is
#' returned (set the `ignoreSelection` option to `TRUE` to retrieve all
#' data).
#'
#' 3. **Data source:** You can also access the raw data from the data source(s)
#' used by the worksheet. This data is unaffected by the worksheet settings.
#' Tableau data sources are broken into one or more logical tables, like how
#' a relational database has multiple tables.
#'
#' As an R user, you may find this analogy based on the examples from
#' [dplyr::mutate-joins] to be helpful in explaining the relationship between
#' data source, underlying, and summary data:
#'
#' ```
#' # Data source
#' logical1 <- band_members
#' logical2 <- band_instruments
#'
#' # Underlying is joined/selected, but not aggregated
#' underlying <- band_members %>%
#' full_join(band_instruments, by = "name") %>%
#' select(band, name)
#'
#' # Summary is underlying plus aggregation
#' summary <- underlying %>%
#' group_by(band) %>%
#' tally(name = "COUNT(name)")
#' ```
#'
#' The existence of these three levels of data granularity, plus the fact that
#' the underlying and data source levels need additional specification to narrow
#' down which of the multiple data tables at each level are desired, means that
#' providing clear instructions to `reactive_tableau_data` is surprisingly
#' complicated.
#'
#' Now that you have some context, see the description for the `spec` parameter,
#' above, for specific instructions on the different ways to specify data
#' tables, based on current user input, previously saved configuration, or
#' programmatically.
#'
#' ### Accessing a data table
#'
#' We turn our attention now to consuming data from `reactive_tableau_data()`.
#' Given the following code snippet, one that might appear in `config_server`:
#'
#' ```
#' data_spec <- choose_data("mydata")
#' data <- reactive_tableau_data(data_spec)
#' ```
#'
#' The `data` variable created here has two complications.
#'
#' First, it's reactive; like all reactive expressions, you must call `data` as
#' a function to get at its value. It must be reactive because Tableau data can
#' change (based on selection and filtering, if nothing else), and also, the
#' user's choices can change as well (in the example, the `data_spec` object is
#' also reactive).
#'
#' Second, and more seriously, reading Tableau data is asynchronous, so when you
#' invoke `data()` what you get back is not a data frame, but the [promise of a
#' data frame](https://rstudio.github.io/promises/articles/overview.html).
#' Working with promises has its own learning curve, so it's regrettable that
#' they play such a prominent role in reading Tableau data. If this is a new
#' topic for you, [start with this
#' talk](https://rstudio.com/resources/rstudioconf-2018/scaling-shiny-apps-with-async-programming/)
#' and then read through the various articles on the [promises
#' website](https://rstudio.github.io/promises/).
#'
#' The bottom line with promises is that you can use any of the normal functions
#' you usually use for manipulating, analyzing, and visualizing data frames, but
#' the manner in which you invoke those functions will be a bit different.
#' Instead of calling `print(data())`, for example, you'll need to first change
#' to the more pipe-oriented `data() %>% print()` and then replace the magrittr
#' pipe with the promise-pipe like `data() %...>% print()`. There's much more to
#' the story, though; for all but the simplest scenarios, you'll need to check
#' out the resources linked in the previous paragraph.
#'
#'
#' @param spec An argument that specifies what specific data should be
#' accessed. This can be specified in a number of ways:
#'
#' 1. The name of a setting, that was set using a value returned from
#' [choose_data()]. This is the most common scenario for `server`.
#'
#' 2. The object returned from [choose_data()] can be passed in directly. This
#' is likely the approach you should take if you want to access data in
#' `config_server` based on unsaved config changes (e.g. to give the user a
#' live preview of what their `choose_data` choices would yield).
#'
#' 3. You can directly create a spec object using one of the helper functions
#' [spec_summary()], [spec_underlying()], or [spec_datasource()]. For cases where
#' the data is not selected based on [choose_data()] at all, but
#' programmatically determined or hardcoded. (This should not be common.)
#'
#' @param options A named list of options:
#'
#' * `ignoreAliases` - Do not use aliases specified in the data source in
#' Tableau. Default is `FALSE`.
#'
#' * `ignoreSelection` - If `FALSE` (the default), only return data for the
#' currently selected marks. Does not apply for datasource tables, only
#' summary and underlying. If `"never"`, then if no marks are selected,
#' `NULL` is returned. If `TRUE`, all data is returned, regardless of
#' selection.
#'
#' * `includeAllColumns` - Return all the columns for the table. Default is
#' `FALSE`. Does not apply for datasource and summary tables, only underlying.
#'
#' * `maxRows` - The maximum number of rows to return. **Tableau will not,
#' under any circumstances, return more than 10,000 rows for datasource and
#' underlying tables.** This option is ignored for summary tables.
#'
#' * `columnsToInclude` - Character vector of columns that should be included;
#' leaving this option unspecified means all columns should be returned. Does
#' not apply for summary and underlying, only datasource.
#'
#' * `truncation` - For underlying and datasource reads, Tableau will never,
#' under any circumstances, return more than 10,000 rows of data. If `warn`
#' (the default), when this condition occurs a warning will be displayed to
#' the user and emitted as a warning in the R process, then the available
#' data will be returned. If `ignore`, then no warning will be issued. If
#' `error`, then an error will be raised.
#'
#' @param session The Shiny `session` object. (You should probably just use the
#' default.)
#'
#' @examples
#' \dontrun{
#' data_spec_x <- choose_data("x", iv = iv)
#' data_x <- reactive_tableau_data(data_spec_x)
#' }
#'
#' @import promises
#' @export
reactive_tableau_data <- function(spec, options = list(),
session = shiny::getDefaultReactiveDomain()) {
force(spec)
force(options)
session <- unwrap_session(session)
if (is.character(spec) && length(spec) == 1) {
setting_name <- spec
spec <- shiny::reactive({
tableau_setting(setting_name, session = session)
})
}
if (!is.function(spec)) {
value <- spec
spec <- function() value
}
options <- merge_defaults(options, list(
truncation = "warn"
))
match.arg(options[["truncation"]], c("warn", "error", "ignore"))
shiny::reactive({
shiny::req(spec())
if (!isTRUE(options[["ignoreSelection"]])) {
# Take dependency
session$input[["shinytableau-selection"]]
}
tableau_get_data_async(spec(), options) %...>% {
if (is.null(.)) {
return(NULL)
}
if (isTRUE(.$isTotalRowCountLimited)) {
if (options[["truncation"]] == "warn") {
shiny::showNotification(
htmltools::tagList(
htmltools::strong("Warning:"),
" Incomplete data; only the first ",
nrow(.$data),
" rows of data can be retrieved from Tableau!"
),
type = "warning",
session = session
)
warning("Tableau data was limited to first ", nrow(.$data), " rows")
} else if (options[["truncation"]] == "error") {
stop("The data requested contains too many rows (limit: ", nrow(.$data), ")")
} else if (options[["truncation"]] == "ignore") {
# Do nothing
} else {
warning("Unknown value for `truncation` option: ", options[["truncation"]])
}
}
.$data
}
})
}
#' Create data spec objects programmatically
#'
#' A data spec object is a pointer to a specific data table in a Tableau
#' dashboard. It is analogous to a file path or a URL, except instead of a
#' simple string, it is a structured object consisting of multiple arguments.
#' The components of each data spec object will vary, depending on the type of
#' data being requested: summary, underlying, or data source. See the Details
#' section of [reactive_tableau_data()] for more information.
#'
#' @param worksheet The name (as character vector) or number (as integer) of the
#' worksheet. If a number is given, it will immediately be resolved to a
#' worksheet name.
#' @param underlyingTableId,dataSourceId,logicalTableId The id (as character
#' vector) or number (as integer) of the specific underlying table/data
#' source/logical table to read. If a number is given, it will immediately be
#' resolved to an id.
#' @param session The Shiny `session` object. (You should probably just use the
#' default.)
#'
#' @return A spec object, suitable for the `spec` argument to
#' [reactive_tableau_data()] or persisting via
#' [update_tableau_settings_async()].
#'
#' @export
spec_summary <- function(worksheet = 1L, session = shiny::getDefaultReactiveDomain()) {
worksheet <- resolve_worksheet(worksheet, session = session)
list(
worksheet = worksheet,
source = "summary"
)
}
#' @rdname spec_summary
#' @export
spec_underlying <- function(worksheet = 1L, underlyingTableId = 1L, session = shiny::getDefaultReactiveDomain()) {
worksheet <- resolve_worksheet(worksheet, session = session)
wsi <- tableau_worksheet_info(worksheet, session = session)
utables <- wsi[["underlyingTables"]]
if (is.numeric(underlyingTableId) && underlyingTableId >= 1 && underlyingTableId <= length(utables)) {
underlyingTableId <- utables[[underlyingTableId]][["id"]]
} else if (is.character(underlyingTableId) && underlyingTableId %in% pluck(utables, "id")) {
# Do nothing
} else {
stop("Underlying table not found")
}
list(
worksheet = worksheet,
source = "underlying",
table = underlyingTableId
)
}
#' @rdname spec_summary
#' @export
spec_datasource <- function(worksheet = 1L, dataSourceId = 1L, logicalTableId = 1L, session = shiny::getDefaultReactiveDomain()) {
worksheet <- resolve_worksheet(worksheet, session = session)
wsi <- tableau_worksheet_info(worksheet, session = session)
dsIds <- wsi[["dataSourceIds"]]
if (is.numeric(dataSourceId) && dataSourceId >= 1 && dataSourceId <= length(dsIds)) {
dataSourceId <- dsIds[[dataSourceId]]
} else if (is.character(dataSourceId) && dataSourceId %in% dsIds) {
# Do nothing
} else {
stop("Specified data source not found")
}
dataSource <- tableau_datasource_info(dataSourceId, session = session)
logicalTables <- dataSource[["logicalTables"]]
if (is.numeric(logicalTableId) && logicalTableId >= 1 && logicalTableId <= length(logicalTables)) {
logicalTableId <- logicalTables[[logicalTableId]][["id"]]
} else if (is.character(logicalTableId) && logicalTableId %in% pluck(dataSource[["logicalTables"]], "id")) {
# Do nothing
} else {
stop("Logical table not found")
}
list(
worksheet = worksheet,
source = "datasource",
ds = dataSourceId,
table = logicalTableId
)
}
resolve_worksheet <- function(worksheet, session = shiny::getDefaultReactiveDomain()) {
worksheet_names <- tableau_worksheets(session = session)
if (is.numeric(worksheet) && worksheet >= 1L && worksheet <= length(worksheet_names)) {
return(worksheet_names[[worksheet]])
} else if (worksheet %in% worksheet_names) {
return(worksheet)
} else {
stop("Requested worksheet not found")
}
}
#' Construct a reactive expression that reads Tableau data schema
#'
#' Creates a reactive expression that returns schema data for the specified
#' Tableau data table, including the names and data types of columns. Basically,
#' this is a convenience wrapper that takes a `spec` object in any of its
#' various forms, invokes either [tableau_worksheet_info()] or
#' [tableau_datasource_info()] as appropriate, and extracts the specific
#' sub-object that matches `spec`.
#'
#' @param spec See [reactive_tableau_data()].
#' @param session The Shiny `session` object. (You should probably just use the
#' default.)
#'
#' @return A named list, as described in the [DataTableSchema] topic.
#'
#' @export
reactive_tableau_schema <- function(spec, session = shiny::getDefaultReactiveDomain()) {
session <- unwrap_session(session)
if (is.character(spec) && length(spec) == 1) {
setting_name <- spec
spec <- shiny::reactive({
tableau_setting(setting_name, session = session)
})
}
if (!is.function(spec)) {
value <- spec
spec <- function() value
}
shiny::reactive({
sp <- shiny::req(spec())
worksheet_name <- sp[["worksheet"]]
switch(sp[["source"]],
summary = {
return(tableau_worksheet_info(worksheet_name, session = session)[["summary"]])
},
underlying = {
tables <- tableau_worksheet_info(worksheet_name, session = session)[["underlyingTables"]]
shiny::req(tables)
shiny::req(find_logical_table(tables, sp[["table"]]))
},
datasource = {
tables <- tableau_datasource_info(sp[["ds"]], session = session)[["logicalTables"]]
shiny::req(tables)
shiny::req(find_logical_table(tables, sp[["table"]]))
},
stop("Unknown data_spec source: '", sp[["source"]], "'")
)
})
}
find_logical_table <- function(logical_tables, id) {
for (table in logical_tables) {
if (table[["id"]] == id) {
return(table)
}
}
return(NULL)
}
#' Get info about available data sources in this Tableau dashboard
#'
#' For advanced use only; most shinytableau extensions should use the
#' [choose_data()] module to allow the user to specify a data source.
#'
#' @param session The Shiny `session` object. (You should probably just use the
#' default.)
#' @param id A data source ID, as returned by `tableau_datasources()` or
#' [tableau_worksheet_info()]`$dataSourceIds`.
#'
#' @return `tableau_datasources()` returns a character vector whose elements are
#' data source IDs.
#'
#' `tableau_datasource_info()` returns the metadata and schema for a specific
#' data source. Note that an extension instance can only access data sources
#' that are actually used by worksheets in the same dashboard. The return
#' value is a named list that contains the following fields:
#' * **`id`** - Unique ID for this data source.
#' * **`fields`** - data frame where each row is one of the fields in the data source, and these columns:
#' * `aggregation` - character - The type of aggregation used for this field. Possible values listed [here](https://tableau.github.io/extensions-api/docs/enums/tableau.fieldaggregationtype.html), e.g. `"attr"`, `"avg"`, `"count"`, ...
#' * `id` - character - The field id.
#' * `name` - character - The caption for this field.
#' * `description` - character - User description of the field, or `""` if there is none.
#' * `role` - character - `"dimension"`, `"measure"`, or `"unknown"`.
#' * `isCalculatedField` - logical - Whether the field is a table calculation.
#' * `isCombinedField` - logical - Whether the field is a combination of multiple fields.
#' * `isGenerated` - logical - Whether this field is generated by Tableau. Tableau generates a number of fields for a data source, such as Number of Records, or Measure Values. This property can be used to distinguish between those fields and fields that come from the underlying data connection, or were created by a user.
#' * `isHidden` - logical - Whether this field is hidden.
#' * **`isExtract`** - `TRUE` if this data source is an extract, `FALSE` otherwise.
#' * **`name`** - The user friendly name of the data source as seen in the UI.
#' * **`extractUpdateTime`** - A [POSIXlt] indicating the time of extract, or `NULL` if the data source is live.
#' * **`logicalTables`** - An unnamed list; each element is a [data table schema][DataTableSchema].
#'
#' @export
tableau_datasources <- function(session = shiny::getDefaultReactiveDomain()) {
names(schema(session)[["dataSources"]])
}
#' Data table schema object
#'
#' An object that describes the schema of a data table, like a worksheet's
#' summary data or underlying data, or a logical table from a data source.
#'
#' @seealso Data table schema objects are obtained via
#' [tableau_worksheet_info()] and [tableau_datasource_info()].
#'
#' @field id Character vector indicating the ID of the underlying data table or
#' logical table. (Not present for summary data.)
#' @field caption Character vector with a human-readable description of the
#' underlying data table or logical table. (Not present for summary data.)
#' @field name Character vector with either `"Underlying Data Table"` or
#' `"Summary Data Table"` (yes, literally those strings; not sure why this
#' field is called `name` but it comes from the Tableau Extension API).
#' @field columns A data frame that describes the columns in this table. Each
#' data frame row describes a column in the data table. The data frame
#' contains these columns:
#'
#' * `dataType` - character - `"bool"`, `"date"`, `"date-time"`, `"float"`, `"int"`, `"spatial"`, or `"string"`.
#' * `fieldName` - character - The name of the column.
#' * `index` - integer - The column number.
#' * `isReferenced` - logical - If `TRUE`, then the column is referenced in the worksheet.
#'
#' @name DataTableSchema
#' @rdname DataTableSchema
NULL
#' @rdname tableau_datasources
#' @export
tableau_datasource_info <- function(id, session = shiny::getDefaultReactiveDomain()) {
schema(session)[["dataSources"]][[id]]
}
|
# this file defines the class 'antsMatrix' and its associated methods
# C++ type used to represent an element of the matrix pointer to the actual image
# of C++ type 'itk::image< pixeltype , dimension >::Pointer'
#' @rdname antsMatrix_class
#' @title antsMatrix Class
#' @description An S4 class to hold an antsMatrix imported from ITK types
#' C++ type used to represent an element of the matrix pointer
#' to the actual image
#' C++ type 'itk::image< pixeltype , dimension >::Pointer'
#'
#' @param .Object input object to convert
#' @param elementtype string e.g. "float"
#'
#' @slot elementtype string of the type of storage of the matrix e.g. "float"
#' @slot pointer the memory location
setClass(Class = "antsMatrix", representation(
elementtype = "character", pointer = "externalptr"))
#' @rdname antsMatrix_class
#' @aliases initialize,antsMatrix-method
#' @examples
#' mat = as.antsMatrix(matrix(rnorm(10), nrow=2))
#' as.data.frame(mat)
#' as.matrix(mat)
setMethod(f = "initialize", signature(.Object = "antsMatrix"), definition = function(.Object,
elementtype) {
.Call("antsMatrix", elementtype, PACKAGE = "ANTsRCore")
})
#' @rdname as.array
#' @aliases as.data.frame,antsMatrix-method
setMethod(f = "as.data.frame", signature(x = "antsMatrix"), definition = function(x) {
lst <- .Call("antsMatrix_asList", x, PACKAGE = "ANTsRCore")
names(lst)[1:(length(lst) - 1)] <- lst[[length(lst)]]
lst[[length(lst)]] <- NULL
return(as.data.frame(lst))
})
#' @rdname as.array
#' @param row.names NULL or a character vector giving the row names for the
#' data frame.
#' @param optional passsed to \code{\link{as.data.frame}}
#' @export
#' @method as.data.frame antsMatrix
as.data.frame.antsMatrix = function(x, row.names = NULL, optional = FALSE, ...) {
lst <- .Call("antsMatrix_asList", x, PACKAGE = "ANTsRCore")
names(lst)[1:(length(lst) - 1)] <- lst[[length(lst)]]
lst[[length(lst)]] <- NULL
return(as.data.frame(lst, row.names = row.names, optional = optional, ...))
}
#' @rdname as.array
#' @aliases as.matrix,antsMatrix-method
setMethod(f = "as.matrix", signature(x = "antsMatrix"),
definition = function(x, ...) {
as.matrix.data.frame(as.data.frame(x), ...)
})
#' @rdname as.array
#' @aliases as.list,antsMatrix-method
setMethod(f = "as.list", signature(x = "antsMatrix"), definition = function(x, ...) {
lst <- .Call("antsMatrix_asList", x, PACKAGE = "ANTsRCore")
names(lst)[1:(length(lst) - 1)] <- lst[[length(lst)]]
lst[[length(lst)]] <- NULL
return(lst)
})
#' @rdname as.antsMatrix
#' @title Coerce Object to as.antsMatrix
#'
#' @description convert types to an antsMatrix
#'
#' @param object An object
#' @param elementtype e.g. "float" or "double"
#' @param ... other parameters
#' @rdname as.antsMatrix
#' @examples
#' as.antsMatrix(matrix(rnorm(10), nrow=2))
#' @export
setGeneric(name = "as.antsMatrix", def = function(object,
elementtype="float", ...)
standardGeneric("as.antsMatrix"))
#' @rdname as.antsMatrix
#' @aliases as.antsMatrix,list-method
#' @examples
#' mat = matrix(rnorm(10), nrow=2)
#' df = as.data.frame(mat)
#' as.antsMatrix(df)
#' as.antsMatrix(as.list(df))
setMethod(f = "as.antsMatrix", signature(object = "list"),
definition = function(object, elementtype="float") {
return(.Call("antsMatrix_asantsMatrix", object, elementtype, PACKAGE = "ANTsRCore"))
})
#' @rdname as.antsMatrix
#' @aliases as.antsMatrix,data.frame-method
setMethod(f = "as.antsMatrix", signature(object = "data.frame"),
definition = function(object, elementtype="float") {
return(.Call("antsMatrix_asantsMatrix", as.list(object),
elementtype, PACKAGE = "ANTsRCore"))
})
#' @rdname as.antsMatrix
#' @aliases as.antsMatrix,matrix-method
setMethod(f = "as.antsMatrix", signature(object = "matrix"),
definition = function(object, elementtype="float") {
return(.Call("antsMatrix_asantsMatrix", as.list(as.data.frame(object)),
elementtype, PACKAGE = "ANTsRCore"))
})
| /R/antsMatrix.R | permissive | dorianps/ANTsRCore | R | false | false | 3,997 | r | # this file defines the class 'antsMatrix' and its associated methods
# C++ type used to represent an element of the matrix pointer to the actual image
# of C++ type 'itk::image< pixeltype , dimension >::Pointer'
#' @rdname antsMatrix_class
#' @title antsMatrix Class
#' @description An S4 class to hold an antsMatrix imported from ITK types
#' C++ type used to represent an element of the matrix pointer
#' to the actual image
#' C++ type 'itk::image< pixeltype , dimension >::Pointer'
#'
#' @param .Object input object to convert
#' @param elementtype string e.g. "float"
#'
#' @slot elementtype string of the type of storage of the matrix e.g. "float"
#' @slot pointer the memory location
setClass(Class = "antsMatrix", representation(
elementtype = "character", pointer = "externalptr"))
#' @rdname antsMatrix_class
#' @aliases initialize,antsMatrix-method
#' @examples
#' mat = as.antsMatrix(matrix(rnorm(10), nrow=2))
#' as.data.frame(mat)
#' as.matrix(mat)
setMethod(f = "initialize", signature(.Object = "antsMatrix"), definition = function(.Object,
elementtype) {
.Call("antsMatrix", elementtype, PACKAGE = "ANTsRCore")
})
#' @rdname as.array
#' @aliases as.data.frame,antsMatrix-method
setMethod(f = "as.data.frame", signature(x = "antsMatrix"), definition = function(x) {
lst <- .Call("antsMatrix_asList", x, PACKAGE = "ANTsRCore")
names(lst)[1:(length(lst) - 1)] <- lst[[length(lst)]]
lst[[length(lst)]] <- NULL
return(as.data.frame(lst))
})
#' @rdname as.array
#' @param row.names NULL or a character vector giving the row names for the
#' data frame.
#' @param optional passsed to \code{\link{as.data.frame}}
#' @export
#' @method as.data.frame antsMatrix
as.data.frame.antsMatrix = function(x, row.names = NULL, optional = FALSE, ...) {
lst <- .Call("antsMatrix_asList", x, PACKAGE = "ANTsRCore")
names(lst)[1:(length(lst) - 1)] <- lst[[length(lst)]]
lst[[length(lst)]] <- NULL
return(as.data.frame(lst, row.names = row.names, optional = optional, ...))
}
#' @rdname as.array
#' @aliases as.matrix,antsMatrix-method
setMethod(f = "as.matrix", signature(x = "antsMatrix"),
definition = function(x, ...) {
as.matrix.data.frame(as.data.frame(x), ...)
})
#' @rdname as.array
#' @aliases as.list,antsMatrix-method
setMethod(f = "as.list", signature(x = "antsMatrix"), definition = function(x, ...) {
lst <- .Call("antsMatrix_asList", x, PACKAGE = "ANTsRCore")
names(lst)[1:(length(lst) - 1)] <- lst[[length(lst)]]
lst[[length(lst)]] <- NULL
return(lst)
})
#' @rdname as.antsMatrix
#' @title Coerce Object to as.antsMatrix
#'
#' @description convert types to an antsMatrix
#'
#' @param object An object
#' @param elementtype e.g. "float" or "double"
#' @param ... other parameters
#' @rdname as.antsMatrix
#' @examples
#' as.antsMatrix(matrix(rnorm(10), nrow=2))
#' @export
setGeneric(name = "as.antsMatrix", def = function(object,
elementtype="float", ...)
standardGeneric("as.antsMatrix"))
#' @rdname as.antsMatrix
#' @aliases as.antsMatrix,list-method
#' @examples
#' mat = matrix(rnorm(10), nrow=2)
#' df = as.data.frame(mat)
#' as.antsMatrix(df)
#' as.antsMatrix(as.list(df))
setMethod(f = "as.antsMatrix", signature(object = "list"),
definition = function(object, elementtype="float") {
return(.Call("antsMatrix_asantsMatrix", object, elementtype, PACKAGE = "ANTsRCore"))
})
#' @rdname as.antsMatrix
#' @aliases as.antsMatrix,data.frame-method
setMethod(f = "as.antsMatrix", signature(object = "data.frame"),
definition = function(object, elementtype="float") {
return(.Call("antsMatrix_asantsMatrix", as.list(object),
elementtype, PACKAGE = "ANTsRCore"))
})
#' @rdname as.antsMatrix
#' @aliases as.antsMatrix,matrix-method
setMethod(f = "as.antsMatrix", signature(object = "matrix"),
definition = function(object, elementtype="float") {
return(.Call("antsMatrix_asantsMatrix", as.list(as.data.frame(object)),
elementtype, PACKAGE = "ANTsRCore"))
})
|
### Text QC file headers
.qcTextHeaderT = {paste(sep = "\t",
"Sample",
"# BAMs",
"Total reads",
"Reads aligned\tRA % of total",
"Reads after filter\tRAF % of aligned",
"Reads removed as duplicate\tRRAD % of aligned",
"Reads used for coverage\tRUFC % of aligned",
"Forward strand (%)",
"Avg alignment score",
"Avg aligned length",
"Avg edit distance",
"Non-CpG reads (%)",
"Avg non-CpG coverage",
"Avg CpG coverage",
"Non-Cpg/CpG coverage ratio",
"ChrX reads (%)",
"ChrY reads (%)",
"Peak SQRT")};
.qcTextHeaderR = {paste(sep = "\t",
"Sample",
"NBAMs",
"TotalReads",
"ReadsAligned\tReadsAlignedPct",
"ReadsAfterFilter\tReadsAfterFilterPct",
"ReadsRemovedAsDuplicate\tReadsRemovedAsDuplicatePct",
"ReadsUsedForCoverage\tReadsUsedForCoveragePct",
"ForwardStrandPct",
"AvgAlignmentScore",
"AvgAlignedLength",
"AvgEditDistance",
"NonCpGreadsPct",
"AvgNonCpGcoverage",
"AvgCpGcoverage",
"NonCpg2CpGcoverageRatio",
"ChrXreadsPct",
"ChrYreadsPct",
"PeakSQRT")};
# number of columns in the header
.qccols = length(strsplit(.qcTextHeaderT,"\t",fixed = TRUE)[[1]])
# Create a line for Excel friendly text QC file
.qcTextLineT = function(qc){
name = qc$name;
if( is.null(qc) )
return(paste0(name,paste0(rep("\tNA",.qccols-1), collapse = "")));
afracb = function(a,b){ sprintf("%s\t%.1f%%",s(a),100*a/b) };
perc = function(x){ sprintf("%.2f%%",100*x) };
twodig = function(x){ sprintf("%.2f",x) };
fourdig = function(x){ sprintf("%.4f",x) };
s = function(x){
formatC(x=x,
digits=ceiling(log10(max(x)+1)),
big.mark=",",
big.interval=3);
}
if( !is.null(qc$hist.score1))
class(qc$hist.score1) = "qcHistScore";
if( !is.null(qc$bf.hist.score1))
class(qc$bf.hist.score1) = "qcHistScoreBF";
if( !is.null(qc$hist.edit.dist1))
class(qc$hist.edit.dist1) = "qcEditDist";
if( !is.null(qc$bf.hist.edit.dist1))
class(qc$bf.hist.edit.dist1) = "qcEditDistBF";
if( !is.null(qc$hist.length.matched))
class(qc$hist.length.matched) = "qcLengthMatched";
if( !is.null(qc$bf.hist.length.matched))
class(qc$bf.hist.length.matched) = "qcLengthMatchedBF";
if( !is.null(qc$frwrev) )
class(qc$frwrev) = "qcFrwrev";
rez = paste( sep = "\t",
name, # Sample
if(is.null(qc$nbams)){1}else{qc$nbams}, # Number of BAMs
s(qc$reads.total), # Total reads
afracb(qc$reads.aligned, qc$reads.total), # Reads aligned, % of total
afracb(qc$reads.recorded, qc$reads.aligned), # Reads after filter, % ali
afracb(
qc$reads.recorded - qc$reads.recorded.no.repeats,
qc$reads.aligned), # Reads removed as repeats\tRRAR % of aligned
afracb(qc$reads.recorded.no.repeats, qc$reads.aligned), # Reads for scor
perc(qcmean( qc$frwrev.no.repeats )), # Forward strand (%)
twodig(qcmean( qc$hist.score1 )), # Avg alignment score
twodig(qcmean( qc$hist.length.matched )), # Avg aligned length
twodig(qcmean( qc$hist.edit.dist1 )), # Avg edit distance
perc(qcmean( qc$cnt.nonCpG.reads )), # Non-CpG reads (%)
twodig( qc$avg.noncpg.coverage ), # Avg non-CpG coverage
twodig( qc$avg.cpg.coverage ), # Avg CpG coverage
fourdig( qc$avg.noncpg.coverage / qc$avg.cpg.coverage), # Non-Cpg/CpG
perc(qcmean( qc$chrX.count )), # ChrX reads (%)
perc(qcmean( qc$chrY.count )), # ChrY reads (%)
twodig(qcmean( qc$avg.coverage.by.density )) # Peak SQRT
);
# message(rez);
return(rez);
}
# Create a line for R friendly text QC file
.qcTextLineR = function(qc){
name = qc$name;
if( is.null(qc) )
return(paste0(name,paste0(rep("\tNA",.qccols-1), collapse = "")));
afracb = function(a,b){ paste0(a, "\t", a/b) };
perc = identity;
twodig = identity;
s = identity;
if( !is.null(qc$hist.score1))
class(qc$hist.score1) = "qcHistScore";
if( !is.null(qc$bf.hist.score1))
class(qc$bf.hist.score1) = "qcHistScoreBF";
if( !is.null(qc$hist.edit.dist1))
class(qc$hist.edit.dist1) = "qcEditDist";
if( !is.null(qc$bf.hist.edit.dist1))
class(qc$bf.hist.edit.dist1) = "qcEditDistBF";
if( !is.null(qc$hist.length.matched))
class(qc$hist.length.matched) = "qcLengthMatched";
if( !is.null(qc$bf.hist.length.matched))
class(qc$bf.hist.length.matched) = "qcLengthMatchedBF";
if( !is.null(qc$frwrev) )
class(qc$frwrev) = "qcFrwrev";
rez = paste( sep = "\t",
name, # Sample
if(is.null(qc$nbams)){1}else{qc$nbams}, # Number of BAMs
s(qc$reads.total), # Total reads
afracb(qc$reads.aligned, qc$reads.total), # Reads aligned, % of total
afracb(qc$reads.recorded, qc$reads.aligned), # Reads after filter, % ali
afracb(
qc$reads.recorded - qc$reads.recorded.no.repeats,
qc$reads.aligned), # Reads removed as repeats\tRRAR % of aligned
afracb(qc$reads.recorded.no.repeats, qc$reads.aligned), # Reads for scor
perc(qcmean( qc$frwrev.no.repeats )), # Forward strand (%)
twodig(qcmean( qc$hist.score1 )), # Avg alignment score
twodig(qcmean( qc$hist.length.matched )), # Avg aligned length
twodig(qcmean( qc$hist.edit.dist1 )), # Avg edit distance
perc(qcmean( qc$cnt.nonCpG.reads )), # Non-CpG reads (%)
twodig( qc$avg.noncpg.coverage ), # Avg non-CpG coverage
twodig( qc$avg.cpg.coverage ), # Avg CpG coverage
perc( qc$avg.noncpg.coverage / qc$avg.cpg.coverage), # Non-Cpg/CpG
perc(qcmean( qc$chrX.count )), # ChrX reads (%)
perc(qcmean( qc$chrY.count )), # ChrY reads (%)
twodig(qcmean( qc$avg.coverage.by.density )) # Peak SQRT
);
return(rez);
}
# combine QC metrics of multiple Rbam objects
.combine.bams.qc = function( bamlist ){
if(length(bamlist)==1)
return(bamlist[[1]]);
### Deal with QCs
qclist = lapply(bamlist, `[[`, "qc");
qcnames = lapply(qclist, names);
qcnames = unique(unlist(qcnames, use.names = FALSE))
bigqc = vector("list", length(qcnames));
names(bigqc) = qcnames;
for( nm in qcnames){ # nm = qcnames[1]
bigqc[[nm]] = Reduce(`%add%`, lapply(qclist, `[[`, nm));
}
return(list(qc = bigqc));
}
# Load QC metrics for all BAMs
loadBamQC = function(param, bams){
rbamlist = vector("list", length(bams));
names(rbamlist) = bams;
for( bamname in bams){ # bamname = bams[1]
rdsqcfile = paste0( param$dirrqc, "/", bamname, ".qc.rds" );
if(file.exists(rdsqcfile)){
rbamlist[[bamname]] = readRDS(rdsqcfile);
} else {
message("QC file not found: ",rdsqcfile)
}
}
return(rbamlist)
}
# Combine BAM QCs by sample
combineBamQcIntoSamples = function(rbamlist, bamset){
bigqc = vector("list", length(bamset));
names(bigqc) = names(bamset);
for( ibam in seq_along(bamset) ){ # ibam=1
curbams = rbamlist[bamset[[ibam]]];
qc = .combine.bams.qc(curbams)$qc;
if( length(qc) > 0 ){
bigqc[[ibam]] = qc;
} else {
bigqc[[ibam]] = list();
}
bigqc[[ibam]]$name = names(bamset)[ibam];
}
return(bigqc);
}
# Estimate fragmet size distribution
estimateFragmentSizeDistribution = function(frdata, seqLength){
if( length(frdata) == seqLength )
return( rep(1, seqLength) );
### Point of crossing the middle
ytop = median(frdata[1:seqLength]);
ybottom = median(tail(frdata,seqLength));
ymidpoint = ( ytop + ybottom )/2;
yrange = ( ytop - ybottom );
overymid = (frdata > ymidpoint)
xmidpoint = which.max( cumsum( overymid - mean(overymid) ) );
### interquartile range estimate
xqrange =
which.max(cumsum( ((frdata >
quantile(frdata,0.25))-0.75) ))
-
which.max(cumsum( ((frdata >
quantile(frdata,0.75))-0.25) ))
logitrange = diff(qlogis(c(0.25,0.75)));
initparam = c(
xmidpoint = xmidpoint,
xdivider = (xqrange/logitrange)/2,
ymultiplier = yrange,
ymean = ybottom);
fsPredict = function( x, param){
(plogis((param[1]-x)/param[2]))*param[3]+param[4]
}
x = seq_along(frdata);
# plot( x, frdata)
# lines( x, fsPredict(x, initparam), col="blue", lwd = 3)
fmin = function(param){
fit2 = fsPredict(x, param);
# (plogis((param[1]-x)/param[2]))*param[3]+param[4];
error = frdata - fit2;
e2 = error^2;
e2s = sort.int(e2,decreasing = TRUE);
return(sum(e2s[-(1:10)]));
}
estimate = optim(par = initparam, fn = fmin, method = "BFGS");
param = estimate$par;
fit = fsPredict(x, param);
rezfit = plogis((param[1]-x)/param[2]);
keep = rezfit>0.05;
keep[length(keep)] = FALSE;
rezfit = rezfit - max(rezfit[!keep],0)
rezfit[1:seqLength] = rezfit[seqLength];
rezfit = rezfit[keep];
rezfit = rezfit / rezfit[1];
# lz = lm(frdata[seq_along(rezfit)] ~ rezfit)
# lines(rezfit*lz$coefficients[2]+lz$coefficients[1], lwd = 4, col="red");
return(rezfit);
}
# Step 2 of RaMWAS
ramwas2collectqc = function( param ){
param = parameterPreprocess(param);
dir.create(param$dirqc, showWarnings = FALSE, recursive = TRUE);
parameterDump(dir = param$dirqc, param = param,
toplines = c( "dirrqc",
"filebamlist", "filebam2sample",
"bam2sample", "bamnames",
"scoretag", "minscore",
"minfragmentsize", "maxfragmentsize",
"maxrepeats",
"filecpgset", "filenoncpgset"));
{
bams = NULL;
if( !is.null(param$bamnames) )
bams = c(bams, param$bamnames);
if( !is.null(param$bam2sample) )
bams = c(bams, unlist(param$bam2sample, use.names = FALSE));
bams = unique(basename(bams));
} # bams
{
message("Load BAM QC info");
rbamlist = loadBamQC(param, bams);
}
collect.qc.summary = function(bamset, dirname){
dirloc = paste0(param$dirqc, "/", dirname);
dir.create(dirloc, showWarnings = FALSE, recursive = TRUE);
bigqc = combineBamQcIntoSamples(rbamlist = rbamlist, bamset = bamset);
saveRDS(file = paste0(dirloc,"/qclist.rds"), object = bigqc);
{
textT = sapply(bigqc, .qcTextLineT)
textR = sapply(bigqc, .qcTextLineR)
writeLines(
con = paste0(dirloc, "/Summary_QC.txt"),
text = c(.qcTextHeaderT, textT));
writeLines(
con = paste0(dirloc, "/Summary_QC_R.txt"),
text = c(.qcTextHeaderR, textR));
rm(textT, textR);
} # text summary
histqc = function(qcfun, plottitle, filename){
vec = unlist(lapply(bigqc, qcfun));
vec = vec[vec<1e6];
if(length(vec) == 0)
return();
pdf(paste0(dirloc, "/Fig_hist_", filename, ".pdf"));
hist(
x = vec,
breaks = 3*round(sqrt(length(vec))),
main = plottitle,
col = "lightblue",
xlab = "value",
yaxs = "i")
dev.off()
}
figfun = function(qcname, plotname){
message("Saving plots ", plotname);
pdf(paste0(dirloc,"/Fig_",plotname,".pdf"));
for( ibam in seq_along(bamset) ){
plotinfo = bigqc[[ibam]][[qcname]];
if( !is.null(bigqc[[ibam]][[qcname]]))
plot(plotinfo, samplename = names(bamset)[ibam]);
rm(plotinfo);
}
dev.off();
}
figfun(
qcname = "hist.score1",
plotname = "score");
figfun(
qcname = "bf.hist.score1",
plotname = "score_before_filter");
figfun(
qcname = "hist.edit.dist1",
plotname = "edit_distance");
figfun(
qcname = "bf.hist.edit.dist1",
plotname = "edit_distance_before_filter");
figfun(
qcname = "hist.length.matched",
plotname = "matched_length");
figfun(
qcname = "bf.hist.length.matched",
plotname = "matched_length_before_filter");
figfun(
qcname = "hist.isolated.dist1",
plotname = "isolated_distance");
figfun(
qcname = "avg.coverage.by.density",
plotname = "coverage_by_density");
# bigqc[[1]]$cnt.nonCpG.reads
if(length(bigqc) >= 10){
histqc(
qcfun = function(x){x$avg.cpg.coverage /
max(x$avg.noncpg.coverage,.Machine$double.eps)},
plottitle = paste0(
"Enrichment lower bound\n",
"(Avg CpG / non-CpG score)"),
filename = "enrichment")
histqc(
qcfun = function(x){x$avg.noncpg.coverage /
x$avg.cpg.coverage * 100},
plottitle = paste0(
"Background noise level\n",
"(Avg non-CpG / CpG score, %)"),
filename = "noise")
histqc(qcfun = function(x){x$reads.recorded.no.repeats/1e6},
plottitle = "Number of reads after filters, millions",
filename = "Nreads")
histqc(qcfun = function(x){qcmean(x$hist.edit.dist1)},
plottitle = "Average edit distance of aligned reads",
filename = "edit_dist")
histqc(qcfun = function(x){qcmean(x$avg.coverage.by.density)},
plottitle = paste0(
"CpG density at peak sensitivity (SQRT)\n",
"(a value per BAM / sample)"),
filename = "peak")
histqc(qcfun = function(x){qcmean(x$cnt.nonCpG.reads)},
plottitle = "Fraction of reads not covering any CpGs",
filename = "noncpg_reads")
}
return(invisible(bigqc));
}
# By bam
bamset = bams;
names(bamset) = bams;
dirname = "summary_bams";
message("Saving QC info by BAM");
collect.qc.summary(bamset, dirname);
rm(bamset, dirname);
if( !is.null(param$bam2sample) ){
# by sample
message("Saving QC info by BAMs in bam2sample");
bb = unlist(param$bam2sample, use.names = FALSE);
names(bb) = bb;
collect.qc.summary(
bamset = bb,
dirname = "summary_bams_in_bam2sample");
message("Saving QC info by SAMPLE");
collect.qc.summary(
bamset = param$bam2sample,
dirname = "summary_by_sample");
message("Saving QC info TOTAL (all BAMs in bam2sample)");
uniqbams = unique(unlist(param$bam2sample, use.names = FALSE));
bigqc = collect.qc.summary(
bamset = list(total = uniqbams),
dirname = "summary_total");
rm(uniqbams);
} else {
message("Saving QC info TOTAL (all BAMs in bamnames/filebamlist)");
bigqc = collect.qc.summary(
bamset = list(total=bams),
dirname = "summary_total");
}
### Fragment size
frdata = bigqc$total$hist.isolated.dist1;
estimate = estimateFragmentSizeDistribution(frdata, param$minfragmentsize);
writeLines(
con = paste0(param$dirfilter,"/Fragment_size_distribution.txt"),
text = as.character(estimate));
pdf(paste0(param$dirqc,"/Fragment_size_distribution_estimate.pdf"), 8, 8);
plotFragmentSizeDistributionEstimate(frdata, estimate);
title("Isolated CpG coverage vs.\nfragment size distribution estimate");
dev.off();
return(invisible(NULL));
}
plotFragmentSizeDistributionEstimate = function(
frdata,
estimate,
col1 = "blue",
col2 = "red"){
lz = lm(frdata[seq_along(estimate)] ~ estimate)
plot(
x = as.vector(frdata)/1000,
pch = 19,
col = col1,
ylab = "Read count, thousands",
xlab = "Distance to isolated CpGs, bp",
xaxs = "i",
xlim = c(0, length(frdata)),
axes = FALSE);
axis(1);
axis(2);
lines(
x = (estimate*lz$coefficients[2]+lz$coefficients[1])/1000,
lwd = 4,
col = col2);
}
| /R/rw2collectqc.r | no_license | xtmgah/ramwas | R | false | false | 16,871 | r | ### Text QC file headers
.qcTextHeaderT = {paste(sep = "\t",
"Sample",
"# BAMs",
"Total reads",
"Reads aligned\tRA % of total",
"Reads after filter\tRAF % of aligned",
"Reads removed as duplicate\tRRAD % of aligned",
"Reads used for coverage\tRUFC % of aligned",
"Forward strand (%)",
"Avg alignment score",
"Avg aligned length",
"Avg edit distance",
"Non-CpG reads (%)",
"Avg non-CpG coverage",
"Avg CpG coverage",
"Non-Cpg/CpG coverage ratio",
"ChrX reads (%)",
"ChrY reads (%)",
"Peak SQRT")};
.qcTextHeaderR = {paste(sep = "\t",
"Sample",
"NBAMs",
"TotalReads",
"ReadsAligned\tReadsAlignedPct",
"ReadsAfterFilter\tReadsAfterFilterPct",
"ReadsRemovedAsDuplicate\tReadsRemovedAsDuplicatePct",
"ReadsUsedForCoverage\tReadsUsedForCoveragePct",
"ForwardStrandPct",
"AvgAlignmentScore",
"AvgAlignedLength",
"AvgEditDistance",
"NonCpGreadsPct",
"AvgNonCpGcoverage",
"AvgCpGcoverage",
"NonCpg2CpGcoverageRatio",
"ChrXreadsPct",
"ChrYreadsPct",
"PeakSQRT")};
# number of columns in the header
.qccols = length(strsplit(.qcTextHeaderT,"\t",fixed = TRUE)[[1]])
# Create a line for Excel friendly text QC file
.qcTextLineT = function(qc){
name = qc$name;
if( is.null(qc) )
return(paste0(name,paste0(rep("\tNA",.qccols-1), collapse = "")));
afracb = function(a,b){ sprintf("%s\t%.1f%%",s(a),100*a/b) };
perc = function(x){ sprintf("%.2f%%",100*x) };
twodig = function(x){ sprintf("%.2f",x) };
fourdig = function(x){ sprintf("%.4f",x) };
s = function(x){
formatC(x=x,
digits=ceiling(log10(max(x)+1)),
big.mark=",",
big.interval=3);
}
if( !is.null(qc$hist.score1))
class(qc$hist.score1) = "qcHistScore";
if( !is.null(qc$bf.hist.score1))
class(qc$bf.hist.score1) = "qcHistScoreBF";
if( !is.null(qc$hist.edit.dist1))
class(qc$hist.edit.dist1) = "qcEditDist";
if( !is.null(qc$bf.hist.edit.dist1))
class(qc$bf.hist.edit.dist1) = "qcEditDistBF";
if( !is.null(qc$hist.length.matched))
class(qc$hist.length.matched) = "qcLengthMatched";
if( !is.null(qc$bf.hist.length.matched))
class(qc$bf.hist.length.matched) = "qcLengthMatchedBF";
if( !is.null(qc$frwrev) )
class(qc$frwrev) = "qcFrwrev";
rez = paste( sep = "\t",
name, # Sample
if(is.null(qc$nbams)){1}else{qc$nbams}, # Number of BAMs
s(qc$reads.total), # Total reads
afracb(qc$reads.aligned, qc$reads.total), # Reads aligned, % of total
afracb(qc$reads.recorded, qc$reads.aligned), # Reads after filter, % ali
afracb(
qc$reads.recorded - qc$reads.recorded.no.repeats,
qc$reads.aligned), # Reads removed as repeats\tRRAR % of aligned
afracb(qc$reads.recorded.no.repeats, qc$reads.aligned), # Reads for scor
perc(qcmean( qc$frwrev.no.repeats )), # Forward strand (%)
twodig(qcmean( qc$hist.score1 )), # Avg alignment score
twodig(qcmean( qc$hist.length.matched )), # Avg aligned length
twodig(qcmean( qc$hist.edit.dist1 )), # Avg edit distance
perc(qcmean( qc$cnt.nonCpG.reads )), # Non-CpG reads (%)
twodig( qc$avg.noncpg.coverage ), # Avg non-CpG coverage
twodig( qc$avg.cpg.coverage ), # Avg CpG coverage
fourdig( qc$avg.noncpg.coverage / qc$avg.cpg.coverage), # Non-Cpg/CpG
perc(qcmean( qc$chrX.count )), # ChrX reads (%)
perc(qcmean( qc$chrY.count )), # ChrY reads (%)
twodig(qcmean( qc$avg.coverage.by.density )) # Peak SQRT
);
# message(rez);
return(rez);
}
# Create a line for R friendly text QC file
.qcTextLineR = function(qc){
name = qc$name;
if( is.null(qc) )
return(paste0(name,paste0(rep("\tNA",.qccols-1), collapse = "")));
afracb = function(a,b){ paste0(a, "\t", a/b) };
perc = identity;
twodig = identity;
s = identity;
if( !is.null(qc$hist.score1))
class(qc$hist.score1) = "qcHistScore";
if( !is.null(qc$bf.hist.score1))
class(qc$bf.hist.score1) = "qcHistScoreBF";
if( !is.null(qc$hist.edit.dist1))
class(qc$hist.edit.dist1) = "qcEditDist";
if( !is.null(qc$bf.hist.edit.dist1))
class(qc$bf.hist.edit.dist1) = "qcEditDistBF";
if( !is.null(qc$hist.length.matched))
class(qc$hist.length.matched) = "qcLengthMatched";
if( !is.null(qc$bf.hist.length.matched))
class(qc$bf.hist.length.matched) = "qcLengthMatchedBF";
if( !is.null(qc$frwrev) )
class(qc$frwrev) = "qcFrwrev";
rez = paste( sep = "\t",
name, # Sample
if(is.null(qc$nbams)){1}else{qc$nbams}, # Number of BAMs
s(qc$reads.total), # Total reads
afracb(qc$reads.aligned, qc$reads.total), # Reads aligned, % of total
afracb(qc$reads.recorded, qc$reads.aligned), # Reads after filter, % ali
afracb(
qc$reads.recorded - qc$reads.recorded.no.repeats,
qc$reads.aligned), # Reads removed as repeats\tRRAR % of aligned
afracb(qc$reads.recorded.no.repeats, qc$reads.aligned), # Reads for scor
perc(qcmean( qc$frwrev.no.repeats )), # Forward strand (%)
twodig(qcmean( qc$hist.score1 )), # Avg alignment score
twodig(qcmean( qc$hist.length.matched )), # Avg aligned length
twodig(qcmean( qc$hist.edit.dist1 )), # Avg edit distance
perc(qcmean( qc$cnt.nonCpG.reads )), # Non-CpG reads (%)
twodig( qc$avg.noncpg.coverage ), # Avg non-CpG coverage
twodig( qc$avg.cpg.coverage ), # Avg CpG coverage
perc( qc$avg.noncpg.coverage / qc$avg.cpg.coverage), # Non-Cpg/CpG
perc(qcmean( qc$chrX.count )), # ChrX reads (%)
perc(qcmean( qc$chrY.count )), # ChrY reads (%)
twodig(qcmean( qc$avg.coverage.by.density )) # Peak SQRT
);
return(rez);
}
# combine QC metrics of multiple Rbam objects
.combine.bams.qc = function( bamlist ){
if(length(bamlist)==1)
return(bamlist[[1]]);
### Deal with QCs
qclist = lapply(bamlist, `[[`, "qc");
qcnames = lapply(qclist, names);
qcnames = unique(unlist(qcnames, use.names = FALSE))
bigqc = vector("list", length(qcnames));
names(bigqc) = qcnames;
for( nm in qcnames){ # nm = qcnames[1]
bigqc[[nm]] = Reduce(`%add%`, lapply(qclist, `[[`, nm));
}
return(list(qc = bigqc));
}
# Load QC metrics for all BAMs
loadBamQC = function(param, bams){
rbamlist = vector("list", length(bams));
names(rbamlist) = bams;
for( bamname in bams){ # bamname = bams[1]
rdsqcfile = paste0( param$dirrqc, "/", bamname, ".qc.rds" );
if(file.exists(rdsqcfile)){
rbamlist[[bamname]] = readRDS(rdsqcfile);
} else {
message("QC file not found: ",rdsqcfile)
}
}
return(rbamlist)
}
# Combine BAM QCs by sample
combineBamQcIntoSamples = function(rbamlist, bamset){
bigqc = vector("list", length(bamset));
names(bigqc) = names(bamset);
for( ibam in seq_along(bamset) ){ # ibam=1
curbams = rbamlist[bamset[[ibam]]];
qc = .combine.bams.qc(curbams)$qc;
if( length(qc) > 0 ){
bigqc[[ibam]] = qc;
} else {
bigqc[[ibam]] = list();
}
bigqc[[ibam]]$name = names(bamset)[ibam];
}
return(bigqc);
}
# Estimate fragmet size distribution
estimateFragmentSizeDistribution = function(frdata, seqLength){
if( length(frdata) == seqLength )
return( rep(1, seqLength) );
### Point of crossing the middle
ytop = median(frdata[1:seqLength]);
ybottom = median(tail(frdata,seqLength));
ymidpoint = ( ytop + ybottom )/2;
yrange = ( ytop - ybottom );
overymid = (frdata > ymidpoint)
xmidpoint = which.max( cumsum( overymid - mean(overymid) ) );
### interquartile range estimate
xqrange =
which.max(cumsum( ((frdata >
quantile(frdata,0.25))-0.75) ))
-
which.max(cumsum( ((frdata >
quantile(frdata,0.75))-0.25) ))
logitrange = diff(qlogis(c(0.25,0.75)));
initparam = c(
xmidpoint = xmidpoint,
xdivider = (xqrange/logitrange)/2,
ymultiplier = yrange,
ymean = ybottom);
fsPredict = function( x, param){
(plogis((param[1]-x)/param[2]))*param[3]+param[4]
}
x = seq_along(frdata);
# plot( x, frdata)
# lines( x, fsPredict(x, initparam), col="blue", lwd = 3)
fmin = function(param){
fit2 = fsPredict(x, param);
# (plogis((param[1]-x)/param[2]))*param[3]+param[4];
error = frdata - fit2;
e2 = error^2;
e2s = sort.int(e2,decreasing = TRUE);
return(sum(e2s[-(1:10)]));
}
estimate = optim(par = initparam, fn = fmin, method = "BFGS");
param = estimate$par;
fit = fsPredict(x, param);
rezfit = plogis((param[1]-x)/param[2]);
keep = rezfit>0.05;
keep[length(keep)] = FALSE;
rezfit = rezfit - max(rezfit[!keep],0)
rezfit[1:seqLength] = rezfit[seqLength];
rezfit = rezfit[keep];
rezfit = rezfit / rezfit[1];
# lz = lm(frdata[seq_along(rezfit)] ~ rezfit)
# lines(rezfit*lz$coefficients[2]+lz$coefficients[1], lwd = 4, col="red");
return(rezfit);
}
# Step 2 of RaMWAS
ramwas2collectqc = function( param ){
param = parameterPreprocess(param);
dir.create(param$dirqc, showWarnings = FALSE, recursive = TRUE);
parameterDump(dir = param$dirqc, param = param,
toplines = c( "dirrqc",
"filebamlist", "filebam2sample",
"bam2sample", "bamnames",
"scoretag", "minscore",
"minfragmentsize", "maxfragmentsize",
"maxrepeats",
"filecpgset", "filenoncpgset"));
{
bams = NULL;
if( !is.null(param$bamnames) )
bams = c(bams, param$bamnames);
if( !is.null(param$bam2sample) )
bams = c(bams, unlist(param$bam2sample, use.names = FALSE));
bams = unique(basename(bams));
} # bams
{
message("Load BAM QC info");
rbamlist = loadBamQC(param, bams);
}
collect.qc.summary = function(bamset, dirname){
dirloc = paste0(param$dirqc, "/", dirname);
dir.create(dirloc, showWarnings = FALSE, recursive = TRUE);
bigqc = combineBamQcIntoSamples(rbamlist = rbamlist, bamset = bamset);
saveRDS(file = paste0(dirloc,"/qclist.rds"), object = bigqc);
{
textT = sapply(bigqc, .qcTextLineT)
textR = sapply(bigqc, .qcTextLineR)
writeLines(
con = paste0(dirloc, "/Summary_QC.txt"),
text = c(.qcTextHeaderT, textT));
writeLines(
con = paste0(dirloc, "/Summary_QC_R.txt"),
text = c(.qcTextHeaderR, textR));
rm(textT, textR);
} # text summary
histqc = function(qcfun, plottitle, filename){
vec = unlist(lapply(bigqc, qcfun));
vec = vec[vec<1e6];
if(length(vec) == 0)
return();
pdf(paste0(dirloc, "/Fig_hist_", filename, ".pdf"));
hist(
x = vec,
breaks = 3*round(sqrt(length(vec))),
main = plottitle,
col = "lightblue",
xlab = "value",
yaxs = "i")
dev.off()
}
figfun = function(qcname, plotname){
message("Saving plots ", plotname);
pdf(paste0(dirloc,"/Fig_",plotname,".pdf"));
for( ibam in seq_along(bamset) ){
plotinfo = bigqc[[ibam]][[qcname]];
if( !is.null(bigqc[[ibam]][[qcname]]))
plot(plotinfo, samplename = names(bamset)[ibam]);
rm(plotinfo);
}
dev.off();
}
figfun(
qcname = "hist.score1",
plotname = "score");
figfun(
qcname = "bf.hist.score1",
plotname = "score_before_filter");
figfun(
qcname = "hist.edit.dist1",
plotname = "edit_distance");
figfun(
qcname = "bf.hist.edit.dist1",
plotname = "edit_distance_before_filter");
figfun(
qcname = "hist.length.matched",
plotname = "matched_length");
figfun(
qcname = "bf.hist.length.matched",
plotname = "matched_length_before_filter");
figfun(
qcname = "hist.isolated.dist1",
plotname = "isolated_distance");
figfun(
qcname = "avg.coverage.by.density",
plotname = "coverage_by_density");
# bigqc[[1]]$cnt.nonCpG.reads
if(length(bigqc) >= 10){
histqc(
qcfun = function(x){x$avg.cpg.coverage /
max(x$avg.noncpg.coverage,.Machine$double.eps)},
plottitle = paste0(
"Enrichment lower bound\n",
"(Avg CpG / non-CpG score)"),
filename = "enrichment")
histqc(
qcfun = function(x){x$avg.noncpg.coverage /
x$avg.cpg.coverage * 100},
plottitle = paste0(
"Background noise level\n",
"(Avg non-CpG / CpG score, %)"),
filename = "noise")
histqc(qcfun = function(x){x$reads.recorded.no.repeats/1e6},
plottitle = "Number of reads after filters, millions",
filename = "Nreads")
histqc(qcfun = function(x){qcmean(x$hist.edit.dist1)},
plottitle = "Average edit distance of aligned reads",
filename = "edit_dist")
histqc(qcfun = function(x){qcmean(x$avg.coverage.by.density)},
plottitle = paste0(
"CpG density at peak sensitivity (SQRT)\n",
"(a value per BAM / sample)"),
filename = "peak")
histqc(qcfun = function(x){qcmean(x$cnt.nonCpG.reads)},
plottitle = "Fraction of reads not covering any CpGs",
filename = "noncpg_reads")
}
return(invisible(bigqc));
}
# By bam
bamset = bams;
names(bamset) = bams;
dirname = "summary_bams";
message("Saving QC info by BAM");
collect.qc.summary(bamset, dirname);
rm(bamset, dirname);
if( !is.null(param$bam2sample) ){
# by sample
message("Saving QC info by BAMs in bam2sample");
bb = unlist(param$bam2sample, use.names = FALSE);
names(bb) = bb;
collect.qc.summary(
bamset = bb,
dirname = "summary_bams_in_bam2sample");
message("Saving QC info by SAMPLE");
collect.qc.summary(
bamset = param$bam2sample,
dirname = "summary_by_sample");
message("Saving QC info TOTAL (all BAMs in bam2sample)");
uniqbams = unique(unlist(param$bam2sample, use.names = FALSE));
bigqc = collect.qc.summary(
bamset = list(total = uniqbams),
dirname = "summary_total");
rm(uniqbams);
} else {
message("Saving QC info TOTAL (all BAMs in bamnames/filebamlist)");
bigqc = collect.qc.summary(
bamset = list(total=bams),
dirname = "summary_total");
}
### Fragment size
frdata = bigqc$total$hist.isolated.dist1;
estimate = estimateFragmentSizeDistribution(frdata, param$minfragmentsize);
writeLines(
con = paste0(param$dirfilter,"/Fragment_size_distribution.txt"),
text = as.character(estimate));
pdf(paste0(param$dirqc,"/Fragment_size_distribution_estimate.pdf"), 8, 8);
plotFragmentSizeDistributionEstimate(frdata, estimate);
title("Isolated CpG coverage vs.\nfragment size distribution estimate");
dev.off();
return(invisible(NULL));
}
plotFragmentSizeDistributionEstimate = function(
frdata,
estimate,
col1 = "blue",
col2 = "red"){
lz = lm(frdata[seq_along(estimate)] ~ estimate)
plot(
x = as.vector(frdata)/1000,
pch = 19,
col = col1,
ylab = "Read count, thousands",
xlab = "Distance to isolated CpGs, bp",
xaxs = "i",
xlim = c(0, length(frdata)),
axes = FALSE);
axis(1);
axis(2);
lines(
x = (estimate*lz$coefficients[2]+lz$coefficients[1])/1000,
lwd = 4,
col = col2);
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coord.R
\name{SurveyData.Assign}
\alias{SurveyData.Assign}
\title{Assign to SurveyData environment}
\usage{
SurveyData.Assign(ObjectName, ObjectValue = NA, DeparseObjectName = FALSE)
}
\arguments{
\item{ObjectName}{The variable name to assign}
\item{ObjectValue}{The value to assign to the variable}
\item{DeparseObjectName}{true if ObjectName is a string and deparse needs to be called, false otherwise}
}
\description{
Assigns or overrides an existing variable in the SurveyData environment
}
\details{
By default, the value NA is assigned when unspecified
}
\keyword{internal}
| /man/SurveyData.Assign.Rd | no_license | nathanesau-academic/coord | R | false | true | 661 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coord.R
\name{SurveyData.Assign}
\alias{SurveyData.Assign}
\title{Assign to SurveyData environment}
\usage{
SurveyData.Assign(ObjectName, ObjectValue = NA, DeparseObjectName = FALSE)
}
\arguments{
\item{ObjectName}{The variable name to assign}
\item{ObjectValue}{The value to assign to the variable}
\item{DeparseObjectName}{true if ObjectName is a string and deparse needs to be called, false otherwise}
}
\description{
Assigns or overrides an existing variable in the SurveyData environment
}
\details{
By default, the value NA is assigned when unspecified
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/thin.R
\name{thin}
\alias{thin}
\title{Thinning Posterior Draws}
\usage{
thin(object, thin = 5)
}
\arguments{
\item{object}{an object of class \code{"bvar"} or \code{"bvec"}.}
\item{thin}{an integer specifying the thinning interval between successive values of posterior draws.}
}
\value{
An object of class \code{"bvar"} or \code{"bvec"}.
}
\description{
Thins the MCMC posterior draws in an object of class \code{"bvar"} or \code{"bvec"}.
}
\examples{
data("e6")
data <- gen_vec(e6, p = 4, const = "unrestricted", season = "unrestricted")
y <- data$Y
w <- data$W
x <- data$X
# Reset random number generator for reproducibility
set.seed(1234567)
iter <- 500 # Number of iterations of the Gibbs sampler
# Chosen number of iterations should be much higher, e.g. 30000.
burnin <- 100 # Number of burn-in draws
store <- iter - burnin
r <- 1 # Set rank
t <- ncol(y) # Number of observations
k <- nrow(y) # Number of endogenous variables
k_w <- nrow(w) # Number of regressors in error correction term
k_x <- nrow(x) # Number of differenced regressors and unrestrictec deterministic terms
k_alpha <- k * r # Number of elements in alpha
k_beta <- k_w * r # Number of elements in beta
k_gamma <- k * k_x
# Set uninformative priors
a_mu_prior <- matrix(0, k_x * k) # Vector of prior parameter means
a_v_i_prior <- diag(0, k_x * k) # Inverse of the prior covariance matrix
v_i <- 0
p_tau_i <- diag(1, k_w)
u_sigma_df_prior <- r # Prior degrees of freedom
u_sigma_scale_prior <- diag(0, k) # Prior covariance matrix
u_sigma_df_post <- t + u_sigma_df_prior # Posterior degrees of freedom
# Initial values
beta <- matrix(c(1, -4), k_w, r)
u_sigma_i <- diag(.0001, k)
u_sigma <- solve(u_sigma_i)
g_i <- u_sigma_i
# Data containers
draws_alpha <- matrix(NA, k_alpha, store)
draws_beta <- matrix(NA, k_beta, store)
draws_pi <- matrix(NA, k * k_w, store)
draws_gamma <- matrix(NA, k_gamma, store)
draws_sigma <- matrix(NA, k^2, store)
# Start Gibbs sampler
for (draw in 1:iter) {
# Draw conditional mean parameters
temp <- post_coint_kls(y = y, beta = beta, w = w, x = x, sigma_i = u_sigma_i,
v_i = v_i, p_tau_i = p_tau_i, g_i = g_i,
gamma_mu_prior = a_mu_prior,
gamma_v_i_prior = a_v_i_prior)
alpha <- temp$alpha
beta <- temp$beta
Pi <- temp$Pi
gamma <- temp$Gamma
# Draw variance-covariance matrix
u <- y - Pi \%*\% w - matrix(gamma, k) \%*\% x
u_sigma_scale_post <- solve(tcrossprod(u) +
v_i * alpha \%*\% tcrossprod(crossprod(beta, p_tau_i) \%*\% beta, alpha))
u_sigma_i <- matrix(rWishart(1, u_sigma_df_post, u_sigma_scale_post)[,, 1], k)
u_sigma <- solve(u_sigma_i)
# Update g_i
g_i <- u_sigma_i
# Store draws
if (draw > burnin) {
draws_alpha[, draw - burnin] <- alpha
draws_beta[, draw - burnin] <- beta
draws_pi[, draw - burnin] <- Pi
draws_gamma[, draw - burnin] <- gamma
draws_sigma[, draw - burnin] <- u_sigma
}
}
# Number of non-deterministic coefficients
k_nondet <- (k_x - 4) * k
# Generate bvec object
bvec_est <- bvec(y = y, w = w, x = x,
Pi = draws_pi,
Gamma = draws_gamma[1:k_nondet,],
C = draws_gamma[(k_nondet + 1):nrow(draws_gamma),],
Sigma = draws_sigma)
# Thin posterior draws
bvec_est <- thin(bvec_est, thin = 4)
}
| /man/thin.Rd | no_license | skycaptainleo/bvartools | R | false | true | 3,429 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/thin.R
\name{thin}
\alias{thin}
\title{Thinning Posterior Draws}
\usage{
thin(object, thin = 5)
}
\arguments{
\item{object}{an object of class \code{"bvar"} or \code{"bvec"}.}
\item{thin}{an integer specifying the thinning interval between successive values of posterior draws.}
}
\value{
An object of class \code{"bvar"} or \code{"bvec"}.
}
\description{
Thins the MCMC posterior draws in an object of class \code{"bvar"} or \code{"bvec"}.
}
\examples{
data("e6")
data <- gen_vec(e6, p = 4, const = "unrestricted", season = "unrestricted")
y <- data$Y
w <- data$W
x <- data$X
# Reset random number generator for reproducibility
set.seed(1234567)
iter <- 500 # Number of iterations of the Gibbs sampler
# Chosen number of iterations should be much higher, e.g. 30000.
burnin <- 100 # Number of burn-in draws
store <- iter - burnin
r <- 1 # Set rank
t <- ncol(y) # Number of observations
k <- nrow(y) # Number of endogenous variables
k_w <- nrow(w) # Number of regressors in error correction term
k_x <- nrow(x) # Number of differenced regressors and unrestrictec deterministic terms
k_alpha <- k * r # Number of elements in alpha
k_beta <- k_w * r # Number of elements in beta
k_gamma <- k * k_x
# Set uninformative priors
a_mu_prior <- matrix(0, k_x * k) # Vector of prior parameter means
a_v_i_prior <- diag(0, k_x * k) # Inverse of the prior covariance matrix
v_i <- 0
p_tau_i <- diag(1, k_w)
u_sigma_df_prior <- r # Prior degrees of freedom
u_sigma_scale_prior <- diag(0, k) # Prior covariance matrix
u_sigma_df_post <- t + u_sigma_df_prior # Posterior degrees of freedom
# Initial values
beta <- matrix(c(1, -4), k_w, r)
u_sigma_i <- diag(.0001, k)
u_sigma <- solve(u_sigma_i)
g_i <- u_sigma_i
# Data containers
draws_alpha <- matrix(NA, k_alpha, store)
draws_beta <- matrix(NA, k_beta, store)
draws_pi <- matrix(NA, k * k_w, store)
draws_gamma <- matrix(NA, k_gamma, store)
draws_sigma <- matrix(NA, k^2, store)
# Start Gibbs sampler
for (draw in 1:iter) {
# Draw conditional mean parameters
temp <- post_coint_kls(y = y, beta = beta, w = w, x = x, sigma_i = u_sigma_i,
v_i = v_i, p_tau_i = p_tau_i, g_i = g_i,
gamma_mu_prior = a_mu_prior,
gamma_v_i_prior = a_v_i_prior)
alpha <- temp$alpha
beta <- temp$beta
Pi <- temp$Pi
gamma <- temp$Gamma
# Draw variance-covariance matrix
u <- y - Pi \%*\% w - matrix(gamma, k) \%*\% x
u_sigma_scale_post <- solve(tcrossprod(u) +
v_i * alpha \%*\% tcrossprod(crossprod(beta, p_tau_i) \%*\% beta, alpha))
u_sigma_i <- matrix(rWishart(1, u_sigma_df_post, u_sigma_scale_post)[,, 1], k)
u_sigma <- solve(u_sigma_i)
# Update g_i
g_i <- u_sigma_i
# Store draws
if (draw > burnin) {
draws_alpha[, draw - burnin] <- alpha
draws_beta[, draw - burnin] <- beta
draws_pi[, draw - burnin] <- Pi
draws_gamma[, draw - burnin] <- gamma
draws_sigma[, draw - burnin] <- u_sigma
}
}
# Number of non-deterministic coefficients
k_nondet <- (k_x - 4) * k
# Generate bvec object
bvec_est <- bvec(y = y, w = w, x = x,
Pi = draws_pi,
Gamma = draws_gamma[1:k_nondet,],
C = draws_gamma[(k_nondet + 1):nrow(draws_gamma),],
Sigma = draws_sigma)
# Thin posterior draws
bvec_est <- thin(bvec_est, thin = 4)
}
|
rvgamma <- function (n=1, shape, rate = 1, scale = 1/rate) {
rvvapply(stats:::rgamma, n.=n, shape=shape, scale=scale)
}
| /rv/R/rvgamma.R | no_license | ingted/R-Examples | R | false | false | 127 | r |
rvgamma <- function (n=1, shape, rate = 1, scale = 1/rate) {
rvvapply(stats:::rgamma, n.=n, shape=shape, scale=scale)
}
|
library(data.table)
library(tidyverse)
library(magrittr)
library(openxlsx)
options(stringsAsFactors = F)
########### import dmp results ##################
f_pfoa_DMP <- fread("/home/guanshim/Documents/gitlab/ECCHO_github/DataProcessed/genomewide_chem/2019-03-07_f_pfoa__CpGs_withChem.csv", header = T)
f_pfos_DMP <- fread("/home/guanshim/Documents/gitlab/ECCHO_github/DataProcessed/genomewide_chem/2019-03-07_f_pfos__CpGs_withChem.csv", header = T)
m_pfoa_DMP <- fread("/home/guanshim/Documents/gitlab/ECCHO_github/DataProcessed/genomewide_chem/2019-03-07_m_pfoa__CpGs_withChem.csv", header = T)
m_pfos_DMP <- fread("/home/guanshim/Documents/gitlab/ECCHO_github/DataProcessed/genomewide_chem/2019-03-07_m_pfos__CpGs_withChem.csv", header = T)
######## import top20 cpgs list ###############
dir <- "~/Documents/gitlab/ECCHO_github/DataRaw/3chem_otherstudies/"
# rename the first column to ID
rename_ID <- function(data) {
colnames(data)[1] = "ID"
return(data.frame(data) )
}
pfoa_miura <- read.xlsx( paste(dir, "pofa_miura_cpg.xlsx", sep = "") ) %>% as.data.frame() %>% rename_ID
pfos_miura <- read.xlsx( paste(dir, "pfos_miura_cpg.xlsx", sep = "") ) %>% as.data.frame() %>% rename_ID
pfoa_kingsley <- read.xlsx( paste(dir, "pfoa_kingsley.xlsx", sep = "") ) %>% as.data.frame() %>% rename_ID
## merge by ID and return the ID and raw p and FDR (BH) by gender and chem
rawp_fdr <- function(dmp, list, gender) {
# get the name
chem = (strsplit(as.character(substitute(list)), "_", fixed = T) %>% unlist )[1]
study = (strsplit(as.character(substitute(list)), "_", fixed = T) %>% unlist )[2]
data = plyr::join( list, dmp, by = "ID") %>% column_to_rownames("ID") %>%
# beta fc is the direction of the study
dplyr::select ( raw, fdr, betafc ) %>% as.matrix() %>% round( ., 7) %>% as.data.frame() %>%
rownames_to_column("ID")
colnames(data)[2:4] = c( paste("Raw p-value in our study:", gender),
paste("FDR (BH) in our study:", gender),
paste("Beta value change in our study", gender))
dir <- "~/Documents/gitlab/ECCHO_github/DataRaw/3chem_otherstudies/"
write.csv(data.frame(data), paste(dir, chem, study, gender,".csv", sep = ""), row.names = F)
return(data.frame(data) )
}
rawp_fdr( f_pfoa_DMP, pfoa_miura, "Female")
rawp_fdr( m_pfoa_DMP, pfoa_miura, "Male")
rawp_fdr( f_pfos_DMP, pfos_miura, "Female")
rawp_fdr( m_pfos_DMP, pfos_miura, "Male")
rawp_fdr( f_pfoa_DMP, pfoa_kingsley, "Female")
rawp_fdr( m_pfoa_DMP, pfoa_kingsley, "Male")
| /Code/6_3_otherstudies_check.R | no_license | Guannan-Shen/ECCHO_github | R | false | false | 2,537 | r | library(data.table)
library(tidyverse)
library(magrittr)
library(openxlsx)
options(stringsAsFactors = F)
########### import dmp results ##################
f_pfoa_DMP <- fread("/home/guanshim/Documents/gitlab/ECCHO_github/DataProcessed/genomewide_chem/2019-03-07_f_pfoa__CpGs_withChem.csv", header = T)
f_pfos_DMP <- fread("/home/guanshim/Documents/gitlab/ECCHO_github/DataProcessed/genomewide_chem/2019-03-07_f_pfos__CpGs_withChem.csv", header = T)
m_pfoa_DMP <- fread("/home/guanshim/Documents/gitlab/ECCHO_github/DataProcessed/genomewide_chem/2019-03-07_m_pfoa__CpGs_withChem.csv", header = T)
m_pfos_DMP <- fread("/home/guanshim/Documents/gitlab/ECCHO_github/DataProcessed/genomewide_chem/2019-03-07_m_pfos__CpGs_withChem.csv", header = T)
######## import top20 cpgs list ###############
dir <- "~/Documents/gitlab/ECCHO_github/DataRaw/3chem_otherstudies/"
# rename the first column to ID
rename_ID <- function(data) {
colnames(data)[1] = "ID"
return(data.frame(data) )
}
pfoa_miura <- read.xlsx( paste(dir, "pofa_miura_cpg.xlsx", sep = "") ) %>% as.data.frame() %>% rename_ID
pfos_miura <- read.xlsx( paste(dir, "pfos_miura_cpg.xlsx", sep = "") ) %>% as.data.frame() %>% rename_ID
pfoa_kingsley <- read.xlsx( paste(dir, "pfoa_kingsley.xlsx", sep = "") ) %>% as.data.frame() %>% rename_ID
## merge by ID and return the ID and raw p and FDR (BH) by gender and chem
rawp_fdr <- function(dmp, list, gender) {
# get the name
chem = (strsplit(as.character(substitute(list)), "_", fixed = T) %>% unlist )[1]
study = (strsplit(as.character(substitute(list)), "_", fixed = T) %>% unlist )[2]
data = plyr::join( list, dmp, by = "ID") %>% column_to_rownames("ID") %>%
# beta fc is the direction of the study
dplyr::select ( raw, fdr, betafc ) %>% as.matrix() %>% round( ., 7) %>% as.data.frame() %>%
rownames_to_column("ID")
colnames(data)[2:4] = c( paste("Raw p-value in our study:", gender),
paste("FDR (BH) in our study:", gender),
paste("Beta value change in our study", gender))
dir <- "~/Documents/gitlab/ECCHO_github/DataRaw/3chem_otherstudies/"
write.csv(data.frame(data), paste(dir, chem, study, gender,".csv", sep = ""), row.names = F)
return(data.frame(data) )
}
rawp_fdr( f_pfoa_DMP, pfoa_miura, "Female")
rawp_fdr( m_pfoa_DMP, pfoa_miura, "Male")
rawp_fdr( f_pfos_DMP, pfos_miura, "Female")
rawp_fdr( m_pfos_DMP, pfos_miura, "Male")
rawp_fdr( f_pfoa_DMP, pfoa_kingsley, "Female")
rawp_fdr( m_pfoa_DMP, pfoa_kingsley, "Male")
|
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ra = numeric(0), relh = -1.72131968218895e+83, rs = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 1.5912511542407e+135, 2.33179059610651e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result) | /meteor/inst/testfiles/ET0_PenmanMonteith/AFL_ET0_PenmanMonteith/ET0_PenmanMonteith_valgrind_files/1615841721-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 826 | r | testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ra = numeric(0), relh = -1.72131968218895e+83, rs = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 1.5912511542407e+135, 2.33179059610651e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result) |
testlist <- list(bytes1 = c(-1414812757L, 0L, 0L, 0L), pmutation = 0)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result) | /mcga/inst/testfiles/ByteCodeMutation/libFuzzer_ByteCodeMutation/ByteCodeMutation_valgrind_files/1612887581-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 133 | r | testlist <- list(bytes1 = c(-1414812757L, 0L, 0L, 0L), pmutation = 0)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result) |
#----> This is an All In One Presentation and Interactive Customer Data Analysis Application-----------
# Badass function that checks to see if the package is installed, installs it or loads the library...Totally Awesome
usePackage <- function(p)
{
if (!is.element(p, installed.packages()[,1]))
install.packages(p, dep = TRUE)
require(p, character.only = TRUE)
}
#---------Using the usePackage Function instead of library-----------------------------
usePackage("shiny")
usePackage("DT")
usePackage("data.table")
#usePackage("arules")
#usePackage("arulesViz")
usePackage("crosstalk")
usePackage("caret")
usePackage("ggplot2")
usePackage("dplyr")
usePackage("tidyr")
usePackage("Hmisc")
usePackage("pastecs")
usePackage("psych")
usePackage("mice")
usePackage("VIM")
#usePackage(remove.na)
#usePackage(lubridate)
#usePackage(data.table)
#usePackage(tibble)
usePackage("rlang")
usePackage("gridExtra")
usePackage("lubridate")
usePackage("chron")
usePackage("zoo")
usePackage("forecast")
usePackage("reshape")
usePackage("prophet")
usePackage("xts")
usePackage("grid")
usePackage("ggfortify")
#--------------------------Time to get this party started!!!!!! WOOOOOOOOOOO
# autoplot
# _____ _ __ ___ _ __ __ _ _ ___
# | __ \ | | \ \ / / | ( ) | \/ | | \ | | /\ |__ \
# | | | |_ _ __| | ___ \ \ /\ / /| |__ ___ _ __ ___|/ ___ | \ / |_ _ | \| | / \ ) |
# | | | | | | |/ _` |/ _ \ \ \/ \/ / | '_ \ / _ \ '__/ _ \ / __| | |\/| | | | | | . ` | / /\ \ / /
# | |__| | |_| | (_| | __/_ \ /\ / | | | | __/ | | __/ \__ \ | | | | |_| | | |\ |/ ____ \ |_|
# |_____/ \__,_|\__,_|\___( ) \/ \/ |_| |_|\___|_| \___| |___/ |_| |_|\__, | |_| \_/_/ \_\ (_)
# |/ __/ |
# |___/
#
# ---------------> Define some critical variables first....These are Default Values---------------------------------
dateStart = as.POSIXct("2006-12-16 17:24:00","%Y-%m-%d %H:%M:%S", tz = "") #start day and time
dateEnd = as.POSIXct("2010-11-26 21:02:00","%Y-%m-%d %H:%M:%S", tz = "") #end day and time
supp=0.1
conf=0.5
vars=50
########------------> Use the following if you want to browse for a file (set as default)
import = read.csv("household_power_consumption1.txt", sep=";")
#######-------------> Use the following if you want to lock in a file (disabled by default)
#----> Testing Dataset--------
#import = read.csv("https://drive.google.com/uc?id=1a1yWld2tiv6Aw0lE3nhH_aRWM54r75AN", sep=";")
####--------> Change the stupid column names to something actually readable-------------------------------
setnames(import, old=c("Global_active_power", "Global_reactive_power", "Global_intensity","Sub_metering_1","Sub_metering_2", "Sub_metering_3"), new=c("Active", "Reactive", "Amps", "Kitchen", "Laundry", "HVAC"))
#---------> Convert Date ---------------------------------------------------------------------------------#######
import$Date <- as.Date(import$Date, "%d/%m/%Y", tz = "")
######------> Creating one date+time column called DateTime############################################################
import <-cbind(import,paste(import$Date,import$Time), stringsAsFactors=FALSE)
colnames(import)[10] <-"DateTime"
import <- import[,c(ncol(import), 1:(ncol(import)-1))]
######------> Converting date and Time format #########################################################################
import$DateTime <- strptime(import$DateTime, "%Y-%m-%d %H:%M:%S", tz ="") # Converts the string to a Date/Time Object
import$DateTime = as.POSIXct(import$DateTime, tz = "") #Does some Voodoo shit to fix the DST problem
#--------> Convert 3,4,5,6,7,8,9 to numeric (And Normalize Some of them)------- They are not Factors ##############################################
import$Active = as.numeric(import$Active)
import$Reactive = as.numeric(import$Reactive)
import$Voltage = as.numeric(import$Voltage)
import$Amps = as.numeric(import$Amps)
import$Kitchen = as.numeric(import$Kitchen)
import$Kitchen = import$Kitchen/1000
import$Laundry = as.numeric(import$Laundry)
import$Laundry = import$Laundry/1000
import$HVAC = as.numeric(import$HVAC)
import$HVAC = import$HVAC/1000
#-------> Remove NA's ---------------------------
cleaned = na.omit(import) #removed all of the NA's
#cleaned = rm.outlier(cleaned$Active, fill = FALSE, median = FALSE, opposite = FALSE)
cleaned$day = weekdays(as.Date(cleaned$DateTime))
startDS = paste("Start of Data: ",as.character(cleaned$Date[1]))
endDS = paste("End of Data: ", as.character(cleaned$Date[as.numeric(length(cleaned$Date))]))
# Define first 1/2 of Shiny app....The User Interface!!! Awwwwww Yeah!
ui <- fluidPage(theme = "bootstrap.css",
#---------> Creating a NavBar for the top of the page-------------------------------------------------------------------------------------------------
navbarPage(
#---------> Insert the Company Logo at the top left....Like a Boss! -------------------------------------------------------------------------------------------------
img(src='http://res.cloudinary.com/x78163/image/upload/v1510921813/IOT_p5bgjo.png', align = "left", width="150", height="30"),
#---------> Creating a Home Tabs for the top of the page---------------------------------------------------------------------------
tabPanel("Home" ,
h1("Ubiqum Data Science Consultants", align = "center"),
HTML('<center><img src="http://cultofthepartyparrot.com/assets/sirocco.gif" style ="width="300", height="300"></center>'),
#HTML('<center><img src="http://res.cloudinary.com/x78163/image/upload/v1512060481/partyparrot_lcjgj2.gif" style ="width="300", height="300"></center>'),
# HTML('<center><img src="http://res.cloudinary.com/x78163/image/upload/v1510907256/DS_logo_rmmtbo.png" style ="width="300", height="300"></center>'),
h3("Time to make your data party like a parrot!!!!!", align = "center"),
HTML('<center><img src="http://res.cloudinary.com/x78163/image/upload/v1510907481/headshot_foglex.png" style ="width="100", height="100"></center>')
),
#---------> Creating a Presentation Tabs for the top of the page-------------------------------------------------------------------------------------------------
tabPanel("Presentation",
#---------> Code to Insert a Powerpoint Presentation-----------------------------------------------------------------------------------------------------------------------
tags$iframe(style="height:50vw; width:90vw; scrolling=no",
src="https://onedrive.live.com/embed?cid=D091F528EDB75B0A&resid=D091F528EDB75B0A%2111092&authkey=AJlOeVwrPeQJKDc&em=2")),
# <iframe src="https://onedrive.live.com/embed?cid=D091F528EDB75B0A&resid=D091F528EDB75B0A%2111092&authkey=AJlOeVwrPeQJKDc&em=2" width="402" height="327" frameborder="0" scrolling="no"></iframe>
#---------> Creating an Analysis Tabs for the top of the page---------------------------------------------------------------------------------------------------------------------
tabPanel("Our Analysis",
sidebarPanel(
# img(src='http://res.cloudinary.com/x78163/image/upload/v1510825161/DS_logo_rsoctl.png', align = "center", width="250", height="250"),
tags$br(),
conditionalPanel(
condition = "input.mytab %in%' c('grouped', 'graph', 'table', 'datatable', 'scatter', 'paracoord', 'matrix', 'itemFreq')",
# radioButtons('samp', label='Sample', choices=c('All Rules', 'Sample'), inline=T), br(),
#uiOutput("choose_columns"), br(),
HTML('<center><img src="http://res.cloudinary.com/x78163/image/upload/v1510908400/Calendar_f5yruq.png" style ="width="150", height="100"></center>'),
tags$br(),
startDS,
tags$br(),
endDS,
tags$br(),
dateRangeInput("daterange", "Date range:",
start = "2007-01-01",
end = "2007-01-31"),
# sliderInput("supp", "Support:", min = "2007-12-05 01:00:00", max = "2007-11-05 01:00:00", value = c(dateStart,dateEnd) ), br(),
tags$br(),
HTML('<center><img src="http://res.cloudinary.com/x78163/image/upload/v1510908508/Clock_l3ktu9.png" style ="width="75", height="100"></center>'),
tags$br(),
tags$br(),
sliderInput("time", "Time (0000-2359):",
min = 0, max = 2359, step = 100,
value = c(200,500)),
#sliderInput("conf", "Confidence:", min = 0, max = 1, value = conf , step = 1/100), br(),
tags$br(),
HTML('<center><img src="http://res.cloudinary.com/x78163/image/upload/v1510908579/sunmoon_ybnl63.png" style ="width="100", height="100"></center>'),
tags$br(),
tags$br(),
selectInput('light', label='Filter by Day/Night', choices = c('All Day','Day', 'Night')), br(), br()
# selectInput('week', label='Filter by Weekend/Weekday', choices = c('All','Weekend', 'Weekday')), br(), br(),
# numericInput("minL", "Min. items per set:", 2), br(),
# numericInput("maxL", "Max. items per set::", 3), br(),
# radioButtons('lhsv', label='LHS variables', choices=c('All', 'Subset')), br(),
# radioButtons('rhsv', label='RHS variables', choices=c('All', 'Subset')), br(),
# downloadButton('downloadPlot', 'Download Charts')
)
),
mainPanel(
tabsetPanel(
tabPanel("Dataset", DT::dataTableOutput("rules")),
tabPanel("Combined Plot", plotOutput("combined"),downloadButton('downloadCombined', 'Download Charts')),
tabPanel("Histograms", plotOutput("hist"),downloadButton('downloadHist', 'Download Charts')),
navbarMenu("Active/Reactive Plots",
tabPanel("Overview", plotOutput("activeoverview", click = "plot1_click", brush = brushOpts(id = "plot1_brush"))),
tabPanel("Costs", plotOutput("activecosts"),downloadButton('downloadActiveCosts', 'Download Charts'))),
navbarMenu("Submeter Plots",
tabPanel("Overview", plotOutput("suboverview"),downloadButton('downloadSubOverview', 'Download Charts')),
tabPanel("Costs", plotOutput("subcost"),downloadButton('downloadSubCost', 'Download Charts'))),
tabPanel("Dashboard", textOutput("dash"))
)
)
),
tabPanel("Predictions",
sidebarPanel(
# img(src='http://res.cloudinary.com/x78163/image/upload/v1510825161/DS_logo_rsoctl.png', align = "center", width="250", height="250"),
tags$br(),
conditionalPanel(
condition = "input.mytab %in%' c('grouped', 'graph', 'table', 'datatable', 'scatter', 'paracoord', 'matrix', 'itemFreq')",
HTML('<center><img src="http://www.freepngimg.com/download/calendar/4-2-calendar-png-hd.png" style ="width="150", height="100"></center>'),
tags$br(),
tags$br(),
startDS,
tags$br(),
endDS,
tags$br(),
dateRangeInput("dateSelect", "Select Date Range:",
start = "2007-01-01",
end = "2007-01-31"),
tags$br(),
HTML('<center><img src="http://iconbug.com/data/95/256/8696325e0e7407823058632e68fb5970.png" style ="width="75", height="100"></center>'),
tags$br(),
tags$br(),
sliderInput("predict", "Number of Days to Predict:",
min = 0, max = 60, step = 1,
value = 30),
#sliderInput("conf", "Confidence:", min = 0, max = 1, value = conf , step = 1/100), br(),
tags$br(),
HTML('<center><img src="http://www.clker.com/cliparts/e/0/7/1/1197123164299010126JPortugall_icon_bus_bar.svg.med.png" style ="width="100", height="100"></center>'),
tags$br(),
tags$br(),
selectInput('series', label='Choose a Data Series to Predict', choices = c('Global Active','Global Reactive', 'Amps', 'Kitchen', 'Laundry', 'HVAC')), br(), br(),
selectInput('interval', label='Choose a time Interval', choices = c('Every Day (Default)','Every Second (please no)','Every Minute (poor computer)', 'Every Hour', 'Every Week')), br()
)
),
mainPanel(
tabsetPanel(
tabPanel("Linear Forecasts", plotOutput("linearForecast")),
tabPanel("Time Series Decomposition", plotOutput("tsDecomp")),
tabPanel("Holt-Winters Predictions", plotOutput("hwPrediction")),
tabPanel("Prophet Predictions", plotOutput("prophetPrediction"),downloadButton('downloadprophet', 'Download Charts'))
)
)
),
tabPanel("Take Aways",
h1("Recommendation: Install The Submeters In The Housing Project", align = "center"),
tags$br(),
tags$br(),
tags$ol(margin= "20%",
tags$li("Housing Community Meets 'Renewable Energy' Reactive Power Guidelines"),
tags$br(),
tags$li("Builder Can Re-Allocate Equipment to Optimize Material Costs"),
tags$br(),
tags$li("Home Owners Can Save Money With Smart Home Appliances (Off-Peak Power Usage)"),
tags$br(),
tags$li("Home Owners Can Remotely Monitor Their Home (Identify Failing Systems)"),
tags$br(),
tags$li("Home Owners Consume More Power In Winter (Consider Gas Heating)")
),
tags$br(),
tags$br(),
HTML('<center><img src="http://res.cloudinary.com/x78163/image/upload/v1510907256/DS_logo_rmmtbo.png" style ="width="300", height="300"></center>')
)
# titlePanel("Recommendation: Install The Submeters In The Housing Project")
)
)
# Mandatory 2/2 Shiny App requirement---> server call
server <- function(input, output) {
###----> Do some awesome magic to make all of the charts reactive. Basically, run Apriori when you move the slider! WOOOOOOO!!
#apriori (import, parameter = list(supp=input$supp,conf = input$conf, minlen= input$minL, maxlen=input$maxL))
dataInput <- reactive({
results = cleaned
#-----------------------Date Filter Function...There is some crazy wonky shit happening here....Enable only if it makes sense to you------
# if(input$week=="All")
# {
dates = filter(cleaned, cleaned$DateTime >= input$daterange[1] & cleaned$DateTime <= input$daterange[2] )
# }
#
# if(input$week =="Weekend")
# {
#
# dates = is.weekend(results$DateTime)
# }
#
# if(input$week == "Weekday")
# {
#
# dates = filter(results, wday(cleaned$DateTime, label=FALSE) )
# }
#-----------------------Hour Range Filter Function --------------------------------------------------------------
# Moved into the Night/Day Filter Function
#----------------------Night/Day Filter Function ----------------------------------------------------------------
if(input$light == "All Day")
{
results = filter(dates, as.numeric(dates$Time) >= input$time[1] & as.numeric(dates$Time) <= input$time[2] )
}
if(input$light == "Day")
{
nightStart = as.POSIXct("18:30:00","%H:%M:%S", tz = "") #start night and time
nightEnd = as.POSIXct("06:30:00","%H:%M:%S", tz = "") #end night and time
dates$Time = as.POSIXct(dates$Time,"%H:%M:%S", tz = "")
results = filter(dates, dates$Time > nightStart | dates$Time <= nightEnd )
}
if(input$light == "Night")
{
nightStart = as.POSIXct("18:30:00","%H:%M:%S", tz = "") #start night and time
nightEnd = as.POSIXct("06:30:00","%H:%M:%S", tz = "") #end night and time
dates$Time = as.POSIXct(dates$Time,"%H:%M:%S", tz = "")
results = filter(dates, dates$Time < nightStart | dates$Time >= nightEnd )
}
#----------------------Weekend/Week Filter Function NOT WORKING AWWWWWWWWW :( --------------------------------------------------------------
# # To avoid displaying dates as integers in outputted table
#results$EndDate = as.character(results$DateTime)
#
# #---------> Create Day Select Filter -----------------------
# dateStart = as.POSIXct(input$daterange[1],"%Y-%m-%d", tz = "") #start day and time
# dateEnd = as.POSIXct(input$daterange[2],"%Y-%m-%d", tz = "") #end day and time
#
# filtered = filter(cleaned, cleaned$DateTime>dateStart & cleaned$DateTime < dateEnd) #filters between start and end selected
# filtered$Active
#------------------Output results. Call this for plots with: dataInput()$results
results
})
############################################## Prediction Space ################################################
predictInput <- reactive({
predictRange = input$predict
dateSelect = input$dateSelect
series = input$series
interval = input$interval
intervalSelected = "days"
seriesSelected= "Active"
y = cleanTail$Active
if(interval == "Every Day (Default)")
{
intervalSelected = "days"
}
if(interval == "Every Second (please no)")
{
intervalSelected = "seconds"
}
if(interval == "Every Minute (please no)")
{
intervalSelected = "minutes"
}
if(interval == "Every Hour")
{
intervalSelected = "hours"
}
if(interval == "Every Week")
{
intervalSelected = "weeks"
}
if(series == "Global Active")
{
y = cleanTail$Active
}
if(series == "Global Reactive")
{
y = cleanTail$Reactive
}
if(series == "Amps")
{
y = cleanTail$Amps
}
if(series == "Kitchen")
{
y = cleanTail$Kitchen
}
if(series == "Laundry")
{
y = cleanTail$Laundry
}
if(series == "HVAC")
{
y = cleanTail$HVAC
}
ds = cleanTail$DateTime
dat.xts <- xts(x = y,as.POSIXct(ds)) #convert to xts format for maximum awesomeness
head(dat.xts) #quick systems check to make sure it is working
ep <- endpoints(dat.xts,intervalSelected) #our endpoints will be hours (so hourly) and using endpoint function
mean = period.apply(dat.xts,ep,mean) #applying the mean function to the defined period (hourly)
mean = as.data.frame(mean) #converting to dataframe
meanDF = data.matrix(as.data.frame(mean)) #shifting to matrix
meanDF = as.data.frame(meanDF) #Pulling back into dataframe
meanDF = setDT(meanDF, keep.rownames = TRUE)[] #making the row names into a column
colnames(meanDF) = c("ds", "y") #Applying labels from above
# demand4 <- ts(meanDF$y, frequency = 4)
# demand12 <- ts(meanDF$y, frequency = 12)
})
#---------------------------------> <----------------------------
#---------------------------------> <----------------------------
# output$tsDecomp = renderPlot({
#
# #---------------------> Moving Average Additive---------------------------------------------
#
#
#
#
#
# #---------------------> Moving Average Multiplicative---------------------------------------------
#
#
# demand = predictInput()$demand12
#
# trend_air = ma(demand, order = 12, centre = T)
# plot(as.ts(demand))
# lines(trend_air)
# plot(as.ts(trend_air))
#
#
#
# #---------------------> Seasonal decomposed Data Addititive--------------------------
# cleanTail = cleaned
# y = cleanTail$Active
# ds = cleanTail$DateTime
# dat.xts <- xts(x = y,as.POSIXct(ds)) #convert to xts format for maximum awesomeness
# head(dat.xts) #quick systems check to make sure it is working
# ep <- endpoints(dat.xts,'days') #our endpoints will be hours (so hourly) and using endpoint function
# dailyMean = period.apply(dat.xts,ep,mean) #applying the mean function to the defined period (hourly)
# dailyMean = as.data.frame(dailyMean) #converting to dataframe
# dailyMeanDF = data.matrix(as.data.frame(dailyMean)) #shifting to matrix
# dailyMeanDF = as.data.frame(dailyMeanDF) #Pulling back into dataframe
# dailyMeanDF = setDT(dailyMeanDF, keep.rownames = TRUE)[] #making the row names into a column
# colnames(dailyMeanDF) = c("ds", "y") #Applying labels from above
# demand <- ts(dailyMeanDF$y, frequency = 4)
#
#
# decompose_beer = decompose(demand, "additive")
#
# plot(as.ts(decompose_beer$seasonal))
# plot(as.ts(decompose_beer$trend))
# plot(as.ts(decompose_beer$random))
# plot(decompose_beer)
#
#
# #---------------------> Seasonal decomposed Data Multiplicative--------------------------
# cleanTail = cleaned
# y = cleanTail$Active
# ds = cleanTail$DateTime
# dat.xts <- xts(x = y,as.POSIXct(ds)) #convert to xts format for maximum awesomeness
# head(dat.xts) #quick systems check to make sure it is working
# ep <- endpoints(dat.xts,'days') #our endpoints will be hours (so hourly) and using endpoint function
# dailyMean = period.apply(dat.xts,ep,mean) #applying the mean function to the defined period (hourly)
# dailyMean = as.data.frame(dailyMean) #converting to dataframe
# dailyMeanDF = data.matrix(as.data.frame(dailyMean)) #shifting to matrix
# dailyMeanDF = as.data.frame(dailyMeanDF) #Pulling back into dataframe
# dailyMeanDF = setDT(dailyMeanDF, keep.rownames = TRUE)[] #making the row names into a column
# colnames(dailyMeanDF) = c("ds", "y") #Applying labels from above
# demand <- ts(dailyMeanDF$y, frequency = 12)
#
#
#
# decompose_air = decompose(demand, "multiplicative")
#
# plot(as.ts(decompose_air$seasonal))
# plot(as.ts(decompose_air$trend))
# plot(as.ts(decompose_air$random))
# plot(decompose_air)
#
#
# #---------------------> Using STL ------------------------------------------------------
#
# cleanTail = cleaned
# y = cleanTail$Active
# ds = cleanTail$DateTime
# dat.xts <- xts(x = y,as.POSIXct(ds)) #convert to xts format for maximum awesomeness
# head(dat.xts) #quick systems check to make sure it is working
# ep <- endpoints(dat.xts,'days') #our endpoints will be hours (so hourly) and using endpoint function
# dailyMean = period.apply(dat.xts,ep,mean) #applying the mean function to the defined period (hourly)
# dailyMean = as.data.frame(dailyMean) #converting to dataframe
# dailyMeanDF = data.matrix(as.data.frame(dailyMean)) #shifting to matrix
# dailyMeanDF = as.data.frame(dailyMeanDF) #Pulling back into dataframe
# dailyMeanDF = setDT(dailyMeanDF, keep.rownames = TRUE)[] #making the row names into a column
# colnames(dailyMeanDF) = c("ds", "y") #Applying labels from above
# demand <- ts(dailyMeanDF$y, frequency = 4)
#
#
#
#
# stl_beer = stl(demand, "periodic")
# seasonal_stl_beer <- stl_beer$time.series[,1]
# trend_stl_beer <- stl_beer$time.series[,2]
# random_stl_beer <- stl_beer$time.series[,3]
#
# plot(demand)
# plot(as.ts(seasonal_stl_beer))
# plot(trend_stl_beer)
# plot(random_stl_beer)
# plot(stl_beer)
#
#
# })
#---------------------------------> <----------------------------
linearInput = reactive({
cleanTail = filter(cleaned, cleaned$DateTime >= input$dateSelect[1] & cleaned$DateTime <= input$dateSelect[2] )
predictRange = 30
predictRange = as.numeric(input$predict)
series = input$series
interval = input$interval
intervalSelected = "hours"
seriesSelected= "Active"
y = cleanTail$Active
if(interval == "Every Day (Default)")
{
intervalSelected = "days"
}
if(interval == "Every Second (please no)")
{
intervalSelected = "seconds"
}
if(interval == "Every Minute (please no)")
{
intervalSelected = "minutes"
}
if(interval == "Every Hour")
{
intervalSelected = "hours"
}
if(interval == "Every Week")
{
intervalSelected = "weeks"
}
if(series == "Global Active")
{
y = cleanTail$Active
}
if(series == "Global Reactive")
{
y = cleanTail$Reactive
}
if(series == "Amps")
{
y = cleanTail$Amps
}
if(series == "Kitchen")
{
y = cleanTail$Kitchen
}
if(series == "Laundry")
{
y = cleanTail$Laundry
}
if(series == "HVAC")
{
y = cleanTail$HVAC
}
ds = cleanTail$DateTime
dat.xts <- xts(x = y,as.POSIXct(ds)) #convert to xts format for maximum awesomeness
ep <- endpoints(dat.xts,intervalSelected) #our endpoints will be hours (so hourly) and using endpoint function
mean = period.apply(dat.xts,ep,mean) #applying the mean function to the defined period (hourly)
mean = as.data.frame(mean) #converting to dataframe
meanDF = data.matrix(as.data.frame(mean)) #shifting to matrix
meanDF = as.data.frame(meanDF) #Pulling back into dataframe
meanDF = setDT(meanDF, keep.rownames = TRUE)[] #making the row names into a column
colnames(meanDF) = c("ds", "y") #Applying labels from above
demand <- ts(meanDF$y, frequency = 12)
decompose_air = decompose(demand, "multiplicative")
plot(as.ts(decompose_air$seasonal))
plot(as.ts(decompose_air$trend))
plot(as.ts(decompose_air$random))
plot(decompose_air)
})
output$linearForecast = renderPlot({
linearInput()
})
stlInput <- reactive({
cleanTail = filter(cleaned, cleaned$DateTime >= input$dateSelect[1] & cleaned$DateTime <= input$dateSelect[2] )
predictRange = 30
predictRange = as.numeric(input$predict)
series = input$series
interval = input$interval
intervalSelected = "hours"
seriesSelected= "Active"
y = cleanTail$Active
if(interval == "Every Day (Default)")
{
intervalSelected = "days"
}
if(interval == "Every Second (please no)")
{
intervalSelected = "seconds"
}
if(interval == "Every Minute (please no)")
{
intervalSelected = "minutes"
}
if(interval == "Every Hour")
{
intervalSelected = "hours"
}
if(interval == "Every Week")
{
intervalSelected = "weeks"
}
if(series == "Global Active")
{
y = cleanTail$Active
}
if(series == "Global Reactive")
{
y = cleanTail$Reactive
}
if(series == "Amps")
{
y = cleanTail$Amps
}
if(series == "Kitchen")
{
y = cleanTail$Kitchen
}
if(series == "Laundry")
{
y = cleanTail$Laundry
}
if(series == "HVAC")
{
y = cleanTail$HVAC
}
ds = cleanTail$DateTime
dat.xts <- xts(x = y,as.POSIXct(ds)) #convert to xts format for maximum awesomeness
ep <- endpoints(dat.xts,intervalSelected) #our endpoints will be hours (so hourly) and using endpoint function
mean = period.apply(dat.xts,ep,mean) #applying the mean function to the defined period (hourly)
mean = as.data.frame(mean) #converting to dataframe
meanDF = data.matrix(as.data.frame(mean)) #shifting to matrix
meanDF = as.data.frame(meanDF) #Pulling back into dataframe
meanDF = setDT(meanDF, keep.rownames = TRUE)[] #making the row names into a column
colnames(meanDF) = c("ds", "y") #Applying labels from above
demand <- ts(meanDF$y, frequency = 4)
stl_beer = stl(demand, "periodic")
seasonal_stl_beer <- stl_beer$time.series[,1]
trend_stl_beer <- stl_beer$time.series[,2]
random_stl_beer <- stl_beer$time.series[,3]
plot(demand)
plot(as.ts(seasonal_stl_beer))
plot(trend_stl_beer)
plot(random_stl_beer)
plot(stl_beer)
})
output$tsDecomp = renderPlot({
stlInput()
})
hwInput <- reactive({
cleanTail = cleaned#filter(cleaned, cleaned$DateTime >= input$dateSelect[1] & cleaned$DateTime <= input$dateSelect[2] )
predictRange = 30
predictRange = as.numeric(input$predict)
series = input$series
interval = input$interval
intervalSelected = "hours"
seriesSelected= "Active"
y = cleanTail$Active
if(interval == "Every Day (Default)")
{
intervalSelected = "days"
}
if(interval == "Every Second (please no)")
{
intervalSelected = "seconds"
}
if(interval == "Every Minute (please no)")
{
intervalSelected = "minutes"
}
if(interval == "Every Hour")
{
intervalSelected = "hours"
}
if(interval == "Every Week")
{
intervalSelected = "weeks"
}
if(series == "Global Active")
{
y = cleanTail$Active
}
if(series == "Global Reactive")
{
y = cleanTail$Reactive
}
if(series == "Amps")
{
y = cleanTail$Amps
}
if(series == "Kitchen")
{
y = cleanTail$Kitchen
}
if(series == "Laundry")
{
y = cleanTail$Laundry
}
if(series == "HVAC")
{
y = cleanTail$HVAC
}
ds = cleanTail$DateTime
dat.xts <- xts(x = y,as.POSIXct(ds)) #convert to xts format for maximum awesomeness
ep <- endpoints(dat.xts,intervalSelected) #our endpoints will be hours (so hourly) and using endpoint function
mean = period.apply(dat.xts,ep,mean) #applying the mean function to the defined period (hourly)
mean = as.data.frame(mean) #converting to dataframe
meanDF = data.matrix(as.data.frame(mean)) #shifting to matrix
meanDF = as.data.frame(meanDF) #Pulling back into dataframe
meanDF = setDT(meanDF, keep.rownames = TRUE)[] #making the row names into a column
colnames(meanDF) = c("ds", "y") #Applying labels from above
demand <- ts(meanDF$y, frequency = 4)
hw <- HoltWinters(demand)
forecast <- predict(hw, n.ahead = predictRange, prediction.interval = T, level = 0.95)
autoplot(forecast)
})
output$hwPrediction = renderPlot({
hwInput()
})
prophetInput <- reactive({
cleanTail = filter(cleaned, cleaned$DateTime >= input$dateSelect[1] & cleaned$DateTime <= input$dateSelect[2] )
predictRange = 30
predictRange = as.numeric(input$predict)
series = input$series
interval = input$interval
intervalSelected = "hours"
seriesSelected= "Active"
y = cleanTail$Active
if(interval == "Every Day (Default)")
{
intervalSelected = "days"
}
if(interval == "Every Second (please no)")
{
intervalSelected = "seconds"
}
if(interval == "Every Minute (please no)")
{
intervalSelected = "minutes"
}
if(interval == "Every Hour")
{
intervalSelected = "hours"
}
if(interval == "Every Week")
{
intervalSelected = "weeks"
}
if(series == "Global Active")
{
y = cleanTail$Active
}
if(series == "Global Reactive")
{
y = cleanTail$Reactive
}
if(series == "Amps")
{
y = cleanTail$Amps
}
if(series == "Kitchen")
{
y = cleanTail$Kitchen
}
if(series == "Laundry")
{
y = cleanTail$Laundry
}
if(series == "HVAC")
{
y = cleanTail$HVAC
}
ds = cleanTail$DateTime
dat.xts <- xts(x = y,as.POSIXct(ds)) #convert to xts format for maximum awesomeness
ep <- endpoints(dat.xts,intervalSelected) #our endpoints will be hours (so hourly) and using endpoint function
mean = period.apply(dat.xts,ep,mean) #applying the mean function to the defined period (hourly)
mean = as.data.frame(mean) #converting to dataframe
meanDF = data.matrix(as.data.frame(mean)) #shifting to matrix
meanDF = as.data.frame(meanDF) #Pulling back into dataframe
meanDF = setDT(meanDF, keep.rownames = TRUE)[] #making the row names into a column
colnames(meanDF) = c("ds", "y") #Applying labels from above
y = meanDF$y
ds = meanDF$ds
forecasting = data.frame(ds, y) #set up our variables in a data frame
prophetPredictions = prophet(forecasting) #This step releases the wizard (generates model)
future = make_future_dataframe(prophetPredictions, periods=predictRange) #Set the number of days (periods) you want to predict
forecast = predict(prophetPredictions, future) # Unleash the wizard on the data dragon (applies model)
plot(prophetPredictions, forecast, ylab = "Active Watt Hours", xlab = "Date", main = "30 Day Prediction with Prophet")
})
#---------------------------------> <----------------------------
output$prophetPrediction = renderPlot({
prophetInput()
})
############################################### End of Prediction Space #################################################
###-----------> Output a table showing the frequency of the items.
output$dash = renderText({
price = dataInput()
pricekwpeak = .159
pricekwoffpeak = .1252
price$price1 = price$Kitchen*pricekwpeak
price$price2 = price$Laundry*pricekwpeak
price$price3 = price$HVAC*pricekwpeak
price$priceTot = (price$Active)/10000*pricekwpeak
#-------Total amounts of power for each region
sumKW1 = sum( price$Kitchen)
sumKW2 = sum( price$Laundry)
sumKW3 = sum( price$HVAC)
sumKWTot = sum( price$Active)/100000
#--------Totalcosts for each region
sumP1 = sum( price$price1)
sumP2 = sum( price$price2)
sumP3 = sum( price$price3)
sumPTot = sum( price$priceTot)
#------------Totaled submetered
sumSubKW = sum(sumKW1,sumKW2,sumKW3)
sumSubP = sum(sumP1,sumP2,sumP3)
totPercent = (sumSubKW/sumKWTot)*10
# price$sumP1 <- cumsum(price$price1)
# price$sumP2 <- cumsum(price$price2)
# price$sumP3 <- cumsum(price$price3)
paste("For the selected date range of",input$daterange[1], "to", input$daterange[2], " a total of ", sumKWTot, "(kW) consumed. This cost approximately ", sumPTot , " dollars. The Kitchen used: ", sumKW1, " Watts and cost: ", sumP1, " dollars. The Laundry Room used: ", sumKW2, " Watts and cost: ", sumP2, " dollars. The HVAC used: ", sumKW3, " Watts and cost: ", sumP3, " dollars. In total, the sub metered areas cost" , sumSubP, " dollars, and represented ", totPercent, "% of the total house power consumption.")
})
output$combined = renderPlot({
plot(dataInput()$DateTime, dataInput()$Kitchen, type="l", ylab="KiloWatts", xlab="Date Range", main = "Normalized Plot of Entire Dataset")
lines(dataInput()$DateTime, dataInput()$Laundry, type="l", col="red")
lines(dataInput()$DateTime, dataInput()$HVAC, type="l", col="blue")
lines(dataInput()$DateTime, dataInput()$Reactive/10000, type="l", col="green")
lines(dataInput()$DateTime, dataInput()$Reactive/10000, type="l", col="brown")
lines(dataInput()$DateTime, dataInput()$Voltage/1000, type="l", col="purple")
lines(dataInput()$DateTime, dataInput()$Amps, type="l", col="orange")
legend("topright", c("Active","Reactive","Volts", "Amps","Kitchen", "Laundry", "HVAC" ), lty=1, lwd=2.5, col=c("brown", "green", "purple", "orange", "black", "red", "blue"))
})
###------------> output the rules, and make them sortable by the drop down magic box thingy
output$rules = DT:: renderDataTable({
dataInput()
})
###------------> Output the cool interactive scatterplot
output$activeoverview = renderPlot({
plot(dataInput()$DateTime, dataInput()$Active/1000, type="l", ylab="KiloWatt Hours", xlab="Date Range", main = "Normalized Plot of Daily Power Usage")
lines(dataInput()$DateTime, dataInput()$Reactive/100, type="l", col="green")
lines(dataInput()$DateTime, dataInput()$Voltage/1000, type="l", col="purple")
lines(dataInput()$DateTime, dataInput()$Amps/1000, type="l", col="orange")
legend("topright", c("Active","Reactive","Volts", "Amps" ), lty=1, lwd=2.5, col=c("black", "green", "purple", "orange"))
})
###------------------> Submeter Overview Chart-------------------
output$suboverview = renderPlot({
plot(dataInput()$DateTime, dataInput()$Kitchen, type="l", ylab="Kilowatt Hours", xlab="Date Range", main ="Plot of Kitchen, Laundry and HVAC Requirements")
lines(dataInput()$DateTime, dataInput()$Laundry, type="l", col="red")
lines(dataInput()$DateTime, dataInput()$HVAC, type="l", col="blue")
legend("topright", c("Kitchen", "Laundry", "HVAC" ), lty=1, lwd=2.5, col=c( "black", "red", "blue"))
})
###------------> Boring Histograms
output$hist = renderPlot({
sampled = dataInput()
actReact = ggplot(sampled, aes( sampled$Active/1000), fill = "blue") +
geom_histogram() +
geom_histogram(data = sampled, aes( sampled$Reactive/1000), fill = "red") + labs(title = "Most Frequent Power Level (Home) ", subtitle = paste("Dates from:",input$daterange[1], "to", input$daterange[2], "And Time From:", input$time[1], "to", input$time[2], sep=" " ), x = "KiloWatts (Normalized)", y = "Number of Observations")
sub = ggplot(sampled, aes( sampled$Kitchen), fill = "purple") +
geom_histogram() +
geom_histogram(data = sampled, aes( sampled$Laundry), fill = "red") +
geom_histogram(data = sampled, aes( sampled$HVAC), fill = "green")+ labs(title = "Most Frequent Power Levels (Submetered Rooms)", subtitle = paste("Dates from:",input$daterange[1], "to", input$daterange[2], "And Time From:", input$time[1], "to", input$time[2], sep=" " ), x = "KiloWatts (Normalized)", y = "Number of Observations")
# plot1 = hist(dataInput()$Active/100, col=rgb(1,0,0,0.5), main="Histogram of Active and Reactive Power", xlab="KiloWatts")
# hist(dataInput()$Reactive/10, col=rgb(0,0,1,0.5), add=T)
grid.arrange(actReact,sub ,ncol = 1, nrow = 2)
})
##---------> Check Active and Reactive Power
output$activecosts = renderPlot({
sampled = dataInput()
sampled$plott = sampled$Reactive/((sampled$Active+sampled$Reactive)*100)
sampled$sum1 <- cumsum((sampled$Active+sampled$Reactive)/1000)
percentReactive = ggplot(sampled, aes(y = plott, x = DateTime))+ geom_point()+ geom_smooth() +labs(y="% Reactive Power", x = "Date Range", title = "Active/Reactive Power Ratio" , subtitle = paste("5% is Maximum for a Green Community. Dates from:",input$daterange[1], "to", input$daterange[2], "And Time From:", input$time[1], "to", input$time[2], sep=" " ))+ theme_bw()
actReact = ggplot(sampled, aes(sampled$DateTime, sampled$Active)) +
geom_line() +
geom_line(data = sampled, aes(sampled$DateTime, sampled$Reactive), color = "red") +
geom_line(data = sampled, aes(sampled$DateTime, sampled$sum1), color = "green")+ labs(title = "Total Instant Demand and Cumulative Power Requirement", subtitle = paste("Dates from:",input$daterange[1], "to", input$daterange[2], "And Time From:", input$time[1], "to", input$time[2], sep=" " ), x = "Date Range", y = "KiloWatts")
grid.arrange(actReact,percentReactive ,ncol = 1, nrow = 2)
})
########----------------Calculate the Costs associated with Each Room
output$subcost = renderPlot({
price = dataInput()
pricekwpeak = .159
pricekwoffpeak = .1252
price$price1 = price$Kitchen*pricekwpeak
price$price2 = price$Laundry*pricekwpeak
price$price3 = price$HVAC*pricekwpeak
sum1 = sum( price$price1)
sum2 = sum( price$price2)
sum3 = sum( price$price3)
price$sum1 <- cumsum(price$price1)
price$sum2 <- cumsum(price$price2)
price$sum3 <- cumsum(price$price3)
df <- data.frame(
group = c("Kitchen", "Laundry", "HVAC"),
value = c(sum1,sum2, sum3)
)
polar = ggplot(df, aes(x="", y=value, fill=group))+
geom_bar(width = 1, stat = "identity")+coord_polar("y", start=0)+ labs(title = "Pie Chart of Consumption", subtitle = paste(input$daterange[1], "to", input$daterange[2], "(", input$time[1], "to", input$time[2],")", sep=" " ), y = "Percent", x = "")
bar = ggplot(df, aes(group, value)) +
geom_col()+ labs(title = "Relative Magnitutes of Power Consumed", subtitle = paste(input$daterange[1], "to", input$daterange[2], "(", input$time[1], "to", input$time[2],")", sep=" " ), x = "SubMetered Area", y = "KiloWatt Hours")
instaPrice = ggplot(price, aes(price$DateTime, price$price1, fill = price$price1)) +
geom_line() +
geom_line(data = price, aes(price$DateTime, price$price2), color = "red") + #the damn plus must be on the same line
geom_line(data = price, aes(price$DateTime, price$price3), color = "green")+ labs(title = "Instant Submeter Cost", subtitle = paste(input$daterange[1], "to", input$daterange[2], "(", input$time[1], "to", input$time[2],")", sep=" " ), x = "Date Range", y = "Total Cost ($)")
totprice = ggplot(price, aes(price$DateTime, price$sum1)) +
geom_line() +
geom_line(data = price, aes(price$DateTime, price$sum2), color = "red") + #the damn plus must be on the same line
geom_line(data = price, aes(price$DateTime, price$sum3), color = "green")+ labs(title = "Aggregated Submeter Cost", subtitle = paste(input$daterange[1], "to", input$daterange[2], "(", input$time[1], "to", input$time[2],")", sep=" " ), x = "Date Range", y = "Total Cost ($)")
grid.arrange(polar, bar, instaPrice, totprice,ncol = 2, nrow = 2)
})
###############------------> Code to Histogram Chart ** Now with Extra Dates!
plotInput = reactive({
sampled = dataInput()
actReact = ggplot(sampled, aes( sampled$Active/1000), fill = "blue") +
geom_histogram() +
geom_histogram(data = sampled, aes( sampled$Reactive/1000), fill = "red") + labs(title = "Most Frequent Power Level (Home)", subtitle = paste("Dates from:",input$daterange[1], "to", input$daterange[2], "And Time From:", input$time[1], "to", input$time[2], sep=" " ), x = "KiloWatts (Normalized)", y = "Number of Observations")
sub = ggplot(sampled, aes( sampled$Kitchen), fill = "purple") +
geom_histogram() +
geom_histogram(data = sampled, aes( sampled$Laundry), fill = "red") +
geom_histogram(data = sampled, aes( sampled$HVAC), fill = "green")+ labs(title = "Most Frequent Power Levels (Submetered Rooms)", subtitle = paste("Dates from:",input$daterange[1], "to", input$daterange[2], "And Time From:", input$time[1], "to", input$time[2], sep=" " ), y = "Number of Observations")
# plot1 = hist(dataInput()$Active/100, col=rgb(1,0,0,0.5), main="Histogram of Active and Reactive Power", xlab="KiloWatts")
# hist(dataInput()$Reactive/10, col=rgb(0,0,1,0.5), add=T)
grid.arrange(actReact,sub ,ncol = 1, nrow = 2)
})
plotInput2 = reactive({
price = dataInput()
pricekwpeak = .159
pricekwoffpeak = .1252
price$price1 = price$Kitchen*pricekwpeak
price$price2 = price$Laundry*pricekwpeak
price$price3 = price$HVAC*pricekwpeak
sum1 = sum( price$price1)
sum2 = sum( price$price2)
sum3 = sum( price$price3)
price$sum1 <- cumsum(price$price1)
price$sum2 <- cumsum(price$price2)
price$sum3 <- cumsum(price$price3)
df <- data.frame(
group = c("Kitchen", "Laundry", "HVAC"),
value = c(sum1,sum2, sum3)
)
polar = ggplot(df, aes(x="", y=value, fill=group))+
geom_bar(width = 1, stat = "identity")+coord_polar("y", start=0)+ labs(title = "Pie Chart of Consumption", subtitle = paste(input$daterange[1], "to", input$daterange[2], "(", input$time[1], "to", input$time[2],")", sep=" " ), y = "Percent", x = "")
bar = ggplot(df, aes(group, value)) +
geom_col()+ labs(title = "Relative Magnitutes of Power Consumed", subtitle = paste(input$daterange[1], "to", input$daterange[2], "(", input$time[1], "to", input$time[2],")", sep=" " ), x = "SubMetered Area", y = "KiloWatt Hours")
instaPrice = ggplot(price, aes(price$DateTime, price$price1, fill = price$price1)) +
geom_line() +
geom_line(data = price, aes(price$DateTime, price$price2), color = "red") + #the damn plus must be on the same line
geom_line(data = price, aes(price$DateTime, price$price3), color = "green")+ labs(title = "Instant Submeter Cost", subtitle = paste(input$daterange[1], "to", input$daterange[2], "(", input$time[1], "to", input$time[2],")", sep=" " ), x = "Date Range", y = "Total Cost ($)")
totprice = ggplot(price, aes(price$DateTime, price$sum1)) +
geom_line() +
geom_line(data = price, aes(price$DateTime, price$sum2), color = "red") + #the damn plus must be on the same line
geom_line(data = price, aes(price$DateTime, price$sum3), color = "green")+ labs(title = "Aggregated Submeter Cost", subtitle = paste(input$daterange[1], "to", input$daterange[2], "(", input$time[1], "to", input$time[2],")", sep=" " ), x = "Date Range", y = "Total Cost ($)")
grid.arrange(polar, bar, instaPrice, totprice,ncol = 2, nrow = 2)
})
plotInput3 = reactive({
sampled = dataInput()
sampled$plott = sampled$Reactive/((sampled$Active+sampled$Reactive)*100)
sampled$sum1 <- cumsum((sampled$Active+sampled$Reactive)/1000)
percentReactive = ggplot(sampled, aes(y = plott, x = DateTime))+ geom_point()+ geom_smooth() +labs(y="% Reactive Power", x = "Date Range", title = "Active/Reactive Power Ratio" , subtitle = paste("5% is Maximum for a Green Community. Dates from:",input$daterange[1], "to", input$daterange[2], "And Time From:", input$time[1], "to", input$time[2], sep=" " ))+ theme_bw()
actReact = ggplot(sampled, aes(sampled$DateTime, sampled$Active)) +
geom_line() +
geom_line(data = sampled, aes(sampled$DateTime, sampled$Reactive), color = "red") +
geom_line(data = sampled, aes(sampled$DateTime, sampled$sum1), color = "green")+ labs(title = "Total Instant Demand and Cumulative Power Requirement", subtitle = paste("Dates from:",input$daterange[1], "to", input$daterange[2], "And Time From:", input$time[1], "to", input$time[2], sep=" " ), x = "Date Range", y = "KiloWatts")
grid.arrange(actReact,percentReactive ,ncol = 1, nrow = 2)
})
plotInput4 = reactive({
prophetInput()
})
plotInput4 = reactive({
})
output$downloadCombined <- downloadHandler(
filename = function() { paste("inputdataset", '.png', sep='') },
content = function(file) {
ggsave(file, plot = plotInput(), device = "png")
}
)
output$downloadHist <- downloadHandler(
filename = function() { paste("inputdataset", '.png', sep='') },
content = function(file) {
ggsave(file, plot = plotInput(), device = "png")
}
)
output$downloadActiveCosts <- downloadHandler(
filename = function() { paste("inputdataset", '.png', sep='') },
content = function(file) {
ggsave(file, plot = plotInput3(), device = "png")
}
)
output$downloadSubOverview <- downloadHandler(
filename = function() { paste("inputdataset", '.png', sep='') },
content = function(file) {
ggsave(file, plot = plotInput(), device = "png")
}
)
output$downloadSubCost <- downloadHandler(
filename = function() { paste("inputdataset", '.png', sep='') },
content = function(file) {
ggsave(file, plot = plotInput2(), device = "png")
}
)
output$downloadprophet <- downloadHandler(
filename = function() { paste("inputdataset", '.png', sep='') },
content = function(file) {
ggsave(file, plot = plotInput4(), device = "png")
}
)
# output$downloadData <- downloadHandler(
# filename = 'My_Rules.csv',
# content = function(file) {
# write.csv(rules2df(dataInput()), filename)
# }
# )
}
# Run the application
shinyApp(ui = ui, server = server)
| /power.R | no_license | x78163/power | R | false | false | 52,025 | r | #----> This is an All In One Presentation and Interactive Customer Data Analysis Application-----------
# Badass function that checks to see if the package is installed, installs it or loads the library...Totally Awesome
usePackage <- function(p)
{
if (!is.element(p, installed.packages()[,1]))
install.packages(p, dep = TRUE)
require(p, character.only = TRUE)
}
#---------Using the usePackage Function instead of library-----------------------------
usePackage("shiny")
usePackage("DT")
usePackage("data.table")
#usePackage("arules")
#usePackage("arulesViz")
usePackage("crosstalk")
usePackage("caret")
usePackage("ggplot2")
usePackage("dplyr")
usePackage("tidyr")
usePackage("Hmisc")
usePackage("pastecs")
usePackage("psych")
usePackage("mice")
usePackage("VIM")
#usePackage(remove.na)
#usePackage(lubridate)
#usePackage(data.table)
#usePackage(tibble)
usePackage("rlang")
usePackage("gridExtra")
usePackage("lubridate")
usePackage("chron")
usePackage("zoo")
usePackage("forecast")
usePackage("reshape")
usePackage("prophet")
usePackage("xts")
usePackage("grid")
usePackage("ggfortify")
#--------------------------Time to get this party started!!!!!! WOOOOOOOOOOO
# autoplot
# _____ _ __ ___ _ __ __ _ _ ___
# | __ \ | | \ \ / / | ( ) | \/ | | \ | | /\ |__ \
# | | | |_ _ __| | ___ \ \ /\ / /| |__ ___ _ __ ___|/ ___ | \ / |_ _ | \| | / \ ) |
# | | | | | | |/ _` |/ _ \ \ \/ \/ / | '_ \ / _ \ '__/ _ \ / __| | |\/| | | | | | . ` | / /\ \ / /
# | |__| | |_| | (_| | __/_ \ /\ / | | | | __/ | | __/ \__ \ | | | | |_| | | |\ |/ ____ \ |_|
# |_____/ \__,_|\__,_|\___( ) \/ \/ |_| |_|\___|_| \___| |___/ |_| |_|\__, | |_| \_/_/ \_\ (_)
# |/ __/ |
# |___/
#
# ---------------> Define some critical variables first....These are Default Values---------------------------------
dateStart = as.POSIXct("2006-12-16 17:24:00","%Y-%m-%d %H:%M:%S", tz = "") #start day and time
dateEnd = as.POSIXct("2010-11-26 21:02:00","%Y-%m-%d %H:%M:%S", tz = "") #end day and time
supp=0.1
conf=0.5
vars=50
########------------> Use the following if you want to browse for a file (set as default)
import = read.csv("household_power_consumption1.txt", sep=";")
#######-------------> Use the following if you want to lock in a file (disabled by default)
#----> Testing Dataset--------
#import = read.csv("https://drive.google.com/uc?id=1a1yWld2tiv6Aw0lE3nhH_aRWM54r75AN", sep=";")
####--------> Change the stupid column names to something actually readable-------------------------------
setnames(import, old=c("Global_active_power", "Global_reactive_power", "Global_intensity","Sub_metering_1","Sub_metering_2", "Sub_metering_3"), new=c("Active", "Reactive", "Amps", "Kitchen", "Laundry", "HVAC"))
#---------> Convert Date ---------------------------------------------------------------------------------#######
import$Date <- as.Date(import$Date, "%d/%m/%Y", tz = "")
######------> Creating one date+time column called DateTime############################################################
import <-cbind(import,paste(import$Date,import$Time), stringsAsFactors=FALSE)
colnames(import)[10] <-"DateTime"
import <- import[,c(ncol(import), 1:(ncol(import)-1))]
######------> Converting date and Time format #########################################################################
import$DateTime <- strptime(import$DateTime, "%Y-%m-%d %H:%M:%S", tz ="") # Converts the string to a Date/Time Object
import$DateTime = as.POSIXct(import$DateTime, tz = "") #Does some Voodoo shit to fix the DST problem
#--------> Convert 3,4,5,6,7,8,9 to numeric (And Normalize Some of them)------- They are not Factors ##############################################
import$Active = as.numeric(import$Active)
import$Reactive = as.numeric(import$Reactive)
import$Voltage = as.numeric(import$Voltage)
import$Amps = as.numeric(import$Amps)
import$Kitchen = as.numeric(import$Kitchen)
import$Kitchen = import$Kitchen/1000
import$Laundry = as.numeric(import$Laundry)
import$Laundry = import$Laundry/1000
import$HVAC = as.numeric(import$HVAC)
import$HVAC = import$HVAC/1000
#-------> Remove NA's ---------------------------
cleaned = na.omit(import) #removed all of the NA's
#cleaned = rm.outlier(cleaned$Active, fill = FALSE, median = FALSE, opposite = FALSE)
cleaned$day = weekdays(as.Date(cleaned$DateTime))
startDS = paste("Start of Data: ",as.character(cleaned$Date[1]))
endDS = paste("End of Data: ", as.character(cleaned$Date[as.numeric(length(cleaned$Date))]))
# Define first 1/2 of Shiny app....The User Interface!!! Awwwwww Yeah!
ui <- fluidPage(theme = "bootstrap.css",
#---------> Creating a NavBar for the top of the page-------------------------------------------------------------------------------------------------
navbarPage(
#---------> Insert the Company Logo at the top left....Like a Boss! -------------------------------------------------------------------------------------------------
img(src='http://res.cloudinary.com/x78163/image/upload/v1510921813/IOT_p5bgjo.png', align = "left", width="150", height="30"),
#---------> Creating a Home Tabs for the top of the page---------------------------------------------------------------------------
tabPanel("Home" ,
h1("Ubiqum Data Science Consultants", align = "center"),
HTML('<center><img src="http://cultofthepartyparrot.com/assets/sirocco.gif" style ="width="300", height="300"></center>'),
#HTML('<center><img src="http://res.cloudinary.com/x78163/image/upload/v1512060481/partyparrot_lcjgj2.gif" style ="width="300", height="300"></center>'),
# HTML('<center><img src="http://res.cloudinary.com/x78163/image/upload/v1510907256/DS_logo_rmmtbo.png" style ="width="300", height="300"></center>'),
h3("Time to make your data party like a parrot!!!!!", align = "center"),
HTML('<center><img src="http://res.cloudinary.com/x78163/image/upload/v1510907481/headshot_foglex.png" style ="width="100", height="100"></center>')
),
#---------> Creating a Presentation Tabs for the top of the page-------------------------------------------------------------------------------------------------
tabPanel("Presentation",
#---------> Code to Insert a Powerpoint Presentation-----------------------------------------------------------------------------------------------------------------------
tags$iframe(style="height:50vw; width:90vw; scrolling=no",
src="https://onedrive.live.com/embed?cid=D091F528EDB75B0A&resid=D091F528EDB75B0A%2111092&authkey=AJlOeVwrPeQJKDc&em=2")),
# <iframe src="https://onedrive.live.com/embed?cid=D091F528EDB75B0A&resid=D091F528EDB75B0A%2111092&authkey=AJlOeVwrPeQJKDc&em=2" width="402" height="327" frameborder="0" scrolling="no"></iframe>
#---------> Creating an Analysis Tabs for the top of the page---------------------------------------------------------------------------------------------------------------------
tabPanel("Our Analysis",
sidebarPanel(
# img(src='http://res.cloudinary.com/x78163/image/upload/v1510825161/DS_logo_rsoctl.png', align = "center", width="250", height="250"),
tags$br(),
conditionalPanel(
condition = "input.mytab %in%' c('grouped', 'graph', 'table', 'datatable', 'scatter', 'paracoord', 'matrix', 'itemFreq')",
# radioButtons('samp', label='Sample', choices=c('All Rules', 'Sample'), inline=T), br(),
#uiOutput("choose_columns"), br(),
HTML('<center><img src="http://res.cloudinary.com/x78163/image/upload/v1510908400/Calendar_f5yruq.png" style ="width="150", height="100"></center>'),
tags$br(),
startDS,
tags$br(),
endDS,
tags$br(),
dateRangeInput("daterange", "Date range:",
start = "2007-01-01",
end = "2007-01-31"),
# sliderInput("supp", "Support:", min = "2007-12-05 01:00:00", max = "2007-11-05 01:00:00", value = c(dateStart,dateEnd) ), br(),
tags$br(),
HTML('<center><img src="http://res.cloudinary.com/x78163/image/upload/v1510908508/Clock_l3ktu9.png" style ="width="75", height="100"></center>'),
tags$br(),
tags$br(),
sliderInput("time", "Time (0000-2359):",
min = 0, max = 2359, step = 100,
value = c(200,500)),
#sliderInput("conf", "Confidence:", min = 0, max = 1, value = conf , step = 1/100), br(),
tags$br(),
HTML('<center><img src="http://res.cloudinary.com/x78163/image/upload/v1510908579/sunmoon_ybnl63.png" style ="width="100", height="100"></center>'),
tags$br(),
tags$br(),
selectInput('light', label='Filter by Day/Night', choices = c('All Day','Day', 'Night')), br(), br()
# selectInput('week', label='Filter by Weekend/Weekday', choices = c('All','Weekend', 'Weekday')), br(), br(),
# numericInput("minL", "Min. items per set:", 2), br(),
# numericInput("maxL", "Max. items per set::", 3), br(),
# radioButtons('lhsv', label='LHS variables', choices=c('All', 'Subset')), br(),
# radioButtons('rhsv', label='RHS variables', choices=c('All', 'Subset')), br(),
# downloadButton('downloadPlot', 'Download Charts')
)
),
mainPanel(
tabsetPanel(
tabPanel("Dataset", DT::dataTableOutput("rules")),
tabPanel("Combined Plot", plotOutput("combined"),downloadButton('downloadCombined', 'Download Charts')),
tabPanel("Histograms", plotOutput("hist"),downloadButton('downloadHist', 'Download Charts')),
navbarMenu("Active/Reactive Plots",
tabPanel("Overview", plotOutput("activeoverview", click = "plot1_click", brush = brushOpts(id = "plot1_brush"))),
tabPanel("Costs", plotOutput("activecosts"),downloadButton('downloadActiveCosts', 'Download Charts'))),
navbarMenu("Submeter Plots",
tabPanel("Overview", plotOutput("suboverview"),downloadButton('downloadSubOverview', 'Download Charts')),
tabPanel("Costs", plotOutput("subcost"),downloadButton('downloadSubCost', 'Download Charts'))),
tabPanel("Dashboard", textOutput("dash"))
)
)
),
tabPanel("Predictions",
sidebarPanel(
# img(src='http://res.cloudinary.com/x78163/image/upload/v1510825161/DS_logo_rsoctl.png', align = "center", width="250", height="250"),
tags$br(),
conditionalPanel(
condition = "input.mytab %in%' c('grouped', 'graph', 'table', 'datatable', 'scatter', 'paracoord', 'matrix', 'itemFreq')",
HTML('<center><img src="http://www.freepngimg.com/download/calendar/4-2-calendar-png-hd.png" style ="width="150", height="100"></center>'),
tags$br(),
tags$br(),
startDS,
tags$br(),
endDS,
tags$br(),
dateRangeInput("dateSelect", "Select Date Range:",
start = "2007-01-01",
end = "2007-01-31"),
tags$br(),
HTML('<center><img src="http://iconbug.com/data/95/256/8696325e0e7407823058632e68fb5970.png" style ="width="75", height="100"></center>'),
tags$br(),
tags$br(),
sliderInput("predict", "Number of Days to Predict:",
min = 0, max = 60, step = 1,
value = 30),
#sliderInput("conf", "Confidence:", min = 0, max = 1, value = conf , step = 1/100), br(),
tags$br(),
HTML('<center><img src="http://www.clker.com/cliparts/e/0/7/1/1197123164299010126JPortugall_icon_bus_bar.svg.med.png" style ="width="100", height="100"></center>'),
tags$br(),
tags$br(),
selectInput('series', label='Choose a Data Series to Predict', choices = c('Global Active','Global Reactive', 'Amps', 'Kitchen', 'Laundry', 'HVAC')), br(), br(),
selectInput('interval', label='Choose a time Interval', choices = c('Every Day (Default)','Every Second (please no)','Every Minute (poor computer)', 'Every Hour', 'Every Week')), br()
)
),
mainPanel(
tabsetPanel(
tabPanel("Linear Forecasts", plotOutput("linearForecast")),
tabPanel("Time Series Decomposition", plotOutput("tsDecomp")),
tabPanel("Holt-Winters Predictions", plotOutput("hwPrediction")),
tabPanel("Prophet Predictions", plotOutput("prophetPrediction"),downloadButton('downloadprophet', 'Download Charts'))
)
)
),
tabPanel("Take Aways",
h1("Recommendation: Install The Submeters In The Housing Project", align = "center"),
tags$br(),
tags$br(),
tags$ol(margin= "20%",
tags$li("Housing Community Meets 'Renewable Energy' Reactive Power Guidelines"),
tags$br(),
tags$li("Builder Can Re-Allocate Equipment to Optimize Material Costs"),
tags$br(),
tags$li("Home Owners Can Save Money With Smart Home Appliances (Off-Peak Power Usage)"),
tags$br(),
tags$li("Home Owners Can Remotely Monitor Their Home (Identify Failing Systems)"),
tags$br(),
tags$li("Home Owners Consume More Power In Winter (Consider Gas Heating)")
),
tags$br(),
tags$br(),
HTML('<center><img src="http://res.cloudinary.com/x78163/image/upload/v1510907256/DS_logo_rmmtbo.png" style ="width="300", height="300"></center>')
)
# titlePanel("Recommendation: Install The Submeters In The Housing Project")
)
)
# Mandatory 2/2 Shiny App requirement---> server call
server <- function(input, output) {
###----> Do some awesome magic to make all of the charts reactive. Basically, run Apriori when you move the slider! WOOOOOOO!!
#apriori (import, parameter = list(supp=input$supp,conf = input$conf, minlen= input$minL, maxlen=input$maxL))
dataInput <- reactive({
results = cleaned
#-----------------------Date Filter Function...There is some crazy wonky shit happening here....Enable only if it makes sense to you------
# if(input$week=="All")
# {
dates = filter(cleaned, cleaned$DateTime >= input$daterange[1] & cleaned$DateTime <= input$daterange[2] )
# }
#
# if(input$week =="Weekend")
# {
#
# dates = is.weekend(results$DateTime)
# }
#
# if(input$week == "Weekday")
# {
#
# dates = filter(results, wday(cleaned$DateTime, label=FALSE) )
# }
#-----------------------Hour Range Filter Function --------------------------------------------------------------
# Moved into the Night/Day Filter Function
#----------------------Night/Day Filter Function ----------------------------------------------------------------
if(input$light == "All Day")
{
results = filter(dates, as.numeric(dates$Time) >= input$time[1] & as.numeric(dates$Time) <= input$time[2] )
}
if(input$light == "Day")
{
nightStart = as.POSIXct("18:30:00","%H:%M:%S", tz = "") #start night and time
nightEnd = as.POSIXct("06:30:00","%H:%M:%S", tz = "") #end night and time
dates$Time = as.POSIXct(dates$Time,"%H:%M:%S", tz = "")
results = filter(dates, dates$Time > nightStart | dates$Time <= nightEnd )
}
if(input$light == "Night")
{
nightStart = as.POSIXct("18:30:00","%H:%M:%S", tz = "") #start night and time
nightEnd = as.POSIXct("06:30:00","%H:%M:%S", tz = "") #end night and time
dates$Time = as.POSIXct(dates$Time,"%H:%M:%S", tz = "")
results = filter(dates, dates$Time < nightStart | dates$Time >= nightEnd )
}
#----------------------Weekend/Week Filter Function NOT WORKING AWWWWWWWWW :( --------------------------------------------------------------
# # To avoid displaying dates as integers in outputted table
#results$EndDate = as.character(results$DateTime)
#
# #---------> Create Day Select Filter -----------------------
# dateStart = as.POSIXct(input$daterange[1],"%Y-%m-%d", tz = "") #start day and time
# dateEnd = as.POSIXct(input$daterange[2],"%Y-%m-%d", tz = "") #end day and time
#
# filtered = filter(cleaned, cleaned$DateTime>dateStart & cleaned$DateTime < dateEnd) #filters between start and end selected
# filtered$Active
#------------------Output results. Call this for plots with: dataInput()$results
results
})
############################################## Prediction Space ################################################
predictInput <- reactive({
predictRange = input$predict
dateSelect = input$dateSelect
series = input$series
interval = input$interval
intervalSelected = "days"
seriesSelected= "Active"
y = cleanTail$Active
if(interval == "Every Day (Default)")
{
intervalSelected = "days"
}
if(interval == "Every Second (please no)")
{
intervalSelected = "seconds"
}
if(interval == "Every Minute (please no)")
{
intervalSelected = "minutes"
}
if(interval == "Every Hour")
{
intervalSelected = "hours"
}
if(interval == "Every Week")
{
intervalSelected = "weeks"
}
if(series == "Global Active")
{
y = cleanTail$Active
}
if(series == "Global Reactive")
{
y = cleanTail$Reactive
}
if(series == "Amps")
{
y = cleanTail$Amps
}
if(series == "Kitchen")
{
y = cleanTail$Kitchen
}
if(series == "Laundry")
{
y = cleanTail$Laundry
}
if(series == "HVAC")
{
y = cleanTail$HVAC
}
ds = cleanTail$DateTime
dat.xts <- xts(x = y,as.POSIXct(ds)) #convert to xts format for maximum awesomeness
head(dat.xts) #quick systems check to make sure it is working
ep <- endpoints(dat.xts,intervalSelected) #our endpoints will be hours (so hourly) and using endpoint function
mean = period.apply(dat.xts,ep,mean) #applying the mean function to the defined period (hourly)
mean = as.data.frame(mean) #converting to dataframe
meanDF = data.matrix(as.data.frame(mean)) #shifting to matrix
meanDF = as.data.frame(meanDF) #Pulling back into dataframe
meanDF = setDT(meanDF, keep.rownames = TRUE)[] #making the row names into a column
colnames(meanDF) = c("ds", "y") #Applying labels from above
# demand4 <- ts(meanDF$y, frequency = 4)
# demand12 <- ts(meanDF$y, frequency = 12)
})
#---------------------------------> <----------------------------
#---------------------------------> <----------------------------
# output$tsDecomp = renderPlot({
#
# #---------------------> Moving Average Additive---------------------------------------------
#
#
#
#
#
# #---------------------> Moving Average Multiplicative---------------------------------------------
#
#
# demand = predictInput()$demand12
#
# trend_air = ma(demand, order = 12, centre = T)
# plot(as.ts(demand))
# lines(trend_air)
# plot(as.ts(trend_air))
#
#
#
# #---------------------> Seasonal decomposed Data Addititive--------------------------
# cleanTail = cleaned
# y = cleanTail$Active
# ds = cleanTail$DateTime
# dat.xts <- xts(x = y,as.POSIXct(ds)) #convert to xts format for maximum awesomeness
# head(dat.xts) #quick systems check to make sure it is working
# ep <- endpoints(dat.xts,'days') #our endpoints will be hours (so hourly) and using endpoint function
# dailyMean = period.apply(dat.xts,ep,mean) #applying the mean function to the defined period (hourly)
# dailyMean = as.data.frame(dailyMean) #converting to dataframe
# dailyMeanDF = data.matrix(as.data.frame(dailyMean)) #shifting to matrix
# dailyMeanDF = as.data.frame(dailyMeanDF) #Pulling back into dataframe
# dailyMeanDF = setDT(dailyMeanDF, keep.rownames = TRUE)[] #making the row names into a column
# colnames(dailyMeanDF) = c("ds", "y") #Applying labels from above
# demand <- ts(dailyMeanDF$y, frequency = 4)
#
#
# decompose_beer = decompose(demand, "additive")
#
# plot(as.ts(decompose_beer$seasonal))
# plot(as.ts(decompose_beer$trend))
# plot(as.ts(decompose_beer$random))
# plot(decompose_beer)
#
#
# #---------------------> Seasonal decomposed Data Multiplicative--------------------------
# cleanTail = cleaned
# y = cleanTail$Active
# ds = cleanTail$DateTime
# dat.xts <- xts(x = y,as.POSIXct(ds)) #convert to xts format for maximum awesomeness
# head(dat.xts) #quick systems check to make sure it is working
# ep <- endpoints(dat.xts,'days') #our endpoints will be hours (so hourly) and using endpoint function
# dailyMean = period.apply(dat.xts,ep,mean) #applying the mean function to the defined period (hourly)
# dailyMean = as.data.frame(dailyMean) #converting to dataframe
# dailyMeanDF = data.matrix(as.data.frame(dailyMean)) #shifting to matrix
# dailyMeanDF = as.data.frame(dailyMeanDF) #Pulling back into dataframe
# dailyMeanDF = setDT(dailyMeanDF, keep.rownames = TRUE)[] #making the row names into a column
# colnames(dailyMeanDF) = c("ds", "y") #Applying labels from above
# demand <- ts(dailyMeanDF$y, frequency = 12)
#
#
#
# decompose_air = decompose(demand, "multiplicative")
#
# plot(as.ts(decompose_air$seasonal))
# plot(as.ts(decompose_air$trend))
# plot(as.ts(decompose_air$random))
# plot(decompose_air)
#
#
# #---------------------> Using STL ------------------------------------------------------
#
# cleanTail = cleaned
# y = cleanTail$Active
# ds = cleanTail$DateTime
# dat.xts <- xts(x = y,as.POSIXct(ds)) #convert to xts format for maximum awesomeness
# head(dat.xts) #quick systems check to make sure it is working
# ep <- endpoints(dat.xts,'days') #our endpoints will be hours (so hourly) and using endpoint function
# dailyMean = period.apply(dat.xts,ep,mean) #applying the mean function to the defined period (hourly)
# dailyMean = as.data.frame(dailyMean) #converting to dataframe
# dailyMeanDF = data.matrix(as.data.frame(dailyMean)) #shifting to matrix
# dailyMeanDF = as.data.frame(dailyMeanDF) #Pulling back into dataframe
# dailyMeanDF = setDT(dailyMeanDF, keep.rownames = TRUE)[] #making the row names into a column
# colnames(dailyMeanDF) = c("ds", "y") #Applying labels from above
# demand <- ts(dailyMeanDF$y, frequency = 4)
#
#
#
#
# stl_beer = stl(demand, "periodic")
# seasonal_stl_beer <- stl_beer$time.series[,1]
# trend_stl_beer <- stl_beer$time.series[,2]
# random_stl_beer <- stl_beer$time.series[,3]
#
# plot(demand)
# plot(as.ts(seasonal_stl_beer))
# plot(trend_stl_beer)
# plot(random_stl_beer)
# plot(stl_beer)
#
#
# })
#---------------------------------> <----------------------------
linearInput = reactive({
cleanTail = filter(cleaned, cleaned$DateTime >= input$dateSelect[1] & cleaned$DateTime <= input$dateSelect[2] )
predictRange = 30
predictRange = as.numeric(input$predict)
series = input$series
interval = input$interval
intervalSelected = "hours"
seriesSelected= "Active"
y = cleanTail$Active
if(interval == "Every Day (Default)")
{
intervalSelected = "days"
}
if(interval == "Every Second (please no)")
{
intervalSelected = "seconds"
}
if(interval == "Every Minute (please no)")
{
intervalSelected = "minutes"
}
if(interval == "Every Hour")
{
intervalSelected = "hours"
}
if(interval == "Every Week")
{
intervalSelected = "weeks"
}
if(series == "Global Active")
{
y = cleanTail$Active
}
if(series == "Global Reactive")
{
y = cleanTail$Reactive
}
if(series == "Amps")
{
y = cleanTail$Amps
}
if(series == "Kitchen")
{
y = cleanTail$Kitchen
}
if(series == "Laundry")
{
y = cleanTail$Laundry
}
if(series == "HVAC")
{
y = cleanTail$HVAC
}
ds = cleanTail$DateTime
dat.xts <- xts(x = y,as.POSIXct(ds)) #convert to xts format for maximum awesomeness
ep <- endpoints(dat.xts,intervalSelected) #our endpoints will be hours (so hourly) and using endpoint function
mean = period.apply(dat.xts,ep,mean) #applying the mean function to the defined period (hourly)
mean = as.data.frame(mean) #converting to dataframe
meanDF = data.matrix(as.data.frame(mean)) #shifting to matrix
meanDF = as.data.frame(meanDF) #Pulling back into dataframe
meanDF = setDT(meanDF, keep.rownames = TRUE)[] #making the row names into a column
colnames(meanDF) = c("ds", "y") #Applying labels from above
demand <- ts(meanDF$y, frequency = 12)
decompose_air = decompose(demand, "multiplicative")
plot(as.ts(decompose_air$seasonal))
plot(as.ts(decompose_air$trend))
plot(as.ts(decompose_air$random))
plot(decompose_air)
})
output$linearForecast = renderPlot({
linearInput()
})
stlInput <- reactive({
cleanTail = filter(cleaned, cleaned$DateTime >= input$dateSelect[1] & cleaned$DateTime <= input$dateSelect[2] )
predictRange = 30
predictRange = as.numeric(input$predict)
series = input$series
interval = input$interval
intervalSelected = "hours"
seriesSelected= "Active"
y = cleanTail$Active
if(interval == "Every Day (Default)")
{
intervalSelected = "days"
}
if(interval == "Every Second (please no)")
{
intervalSelected = "seconds"
}
if(interval == "Every Minute (please no)")
{
intervalSelected = "minutes"
}
if(interval == "Every Hour")
{
intervalSelected = "hours"
}
if(interval == "Every Week")
{
intervalSelected = "weeks"
}
if(series == "Global Active")
{
y = cleanTail$Active
}
if(series == "Global Reactive")
{
y = cleanTail$Reactive
}
if(series == "Amps")
{
y = cleanTail$Amps
}
if(series == "Kitchen")
{
y = cleanTail$Kitchen
}
if(series == "Laundry")
{
y = cleanTail$Laundry
}
if(series == "HVAC")
{
y = cleanTail$HVAC
}
ds = cleanTail$DateTime
dat.xts <- xts(x = y,as.POSIXct(ds)) #convert to xts format for maximum awesomeness
ep <- endpoints(dat.xts,intervalSelected) #our endpoints will be hours (so hourly) and using endpoint function
mean = period.apply(dat.xts,ep,mean) #applying the mean function to the defined period (hourly)
mean = as.data.frame(mean) #converting to dataframe
meanDF = data.matrix(as.data.frame(mean)) #shifting to matrix
meanDF = as.data.frame(meanDF) #Pulling back into dataframe
meanDF = setDT(meanDF, keep.rownames = TRUE)[] #making the row names into a column
colnames(meanDF) = c("ds", "y") #Applying labels from above
demand <- ts(meanDF$y, frequency = 4)
stl_beer = stl(demand, "periodic")
seasonal_stl_beer <- stl_beer$time.series[,1]
trend_stl_beer <- stl_beer$time.series[,2]
random_stl_beer <- stl_beer$time.series[,3]
plot(demand)
plot(as.ts(seasonal_stl_beer))
plot(trend_stl_beer)
plot(random_stl_beer)
plot(stl_beer)
})
output$tsDecomp = renderPlot({
stlInput()
})
hwInput <- reactive({
cleanTail = cleaned#filter(cleaned, cleaned$DateTime >= input$dateSelect[1] & cleaned$DateTime <= input$dateSelect[2] )
predictRange = 30
predictRange = as.numeric(input$predict)
series = input$series
interval = input$interval
intervalSelected = "hours"
seriesSelected= "Active"
y = cleanTail$Active
if(interval == "Every Day (Default)")
{
intervalSelected = "days"
}
if(interval == "Every Second (please no)")
{
intervalSelected = "seconds"
}
if(interval == "Every Minute (please no)")
{
intervalSelected = "minutes"
}
if(interval == "Every Hour")
{
intervalSelected = "hours"
}
if(interval == "Every Week")
{
intervalSelected = "weeks"
}
if(series == "Global Active")
{
y = cleanTail$Active
}
if(series == "Global Reactive")
{
y = cleanTail$Reactive
}
if(series == "Amps")
{
y = cleanTail$Amps
}
if(series == "Kitchen")
{
y = cleanTail$Kitchen
}
if(series == "Laundry")
{
y = cleanTail$Laundry
}
if(series == "HVAC")
{
y = cleanTail$HVAC
}
ds = cleanTail$DateTime
dat.xts <- xts(x = y,as.POSIXct(ds)) #convert to xts format for maximum awesomeness
ep <- endpoints(dat.xts,intervalSelected) #our endpoints will be hours (so hourly) and using endpoint function
mean = period.apply(dat.xts,ep,mean) #applying the mean function to the defined period (hourly)
mean = as.data.frame(mean) #converting to dataframe
meanDF = data.matrix(as.data.frame(mean)) #shifting to matrix
meanDF = as.data.frame(meanDF) #Pulling back into dataframe
meanDF = setDT(meanDF, keep.rownames = TRUE)[] #making the row names into a column
colnames(meanDF) = c("ds", "y") #Applying labels from above
demand <- ts(meanDF$y, frequency = 4)
hw <- HoltWinters(demand)
forecast <- predict(hw, n.ahead = predictRange, prediction.interval = T, level = 0.95)
autoplot(forecast)
})
output$hwPrediction = renderPlot({
hwInput()
})
prophetInput <- reactive({
cleanTail = filter(cleaned, cleaned$DateTime >= input$dateSelect[1] & cleaned$DateTime <= input$dateSelect[2] )
predictRange = 30
predictRange = as.numeric(input$predict)
series = input$series
interval = input$interval
intervalSelected = "hours"
seriesSelected= "Active"
y = cleanTail$Active
if(interval == "Every Day (Default)")
{
intervalSelected = "days"
}
if(interval == "Every Second (please no)")
{
intervalSelected = "seconds"
}
if(interval == "Every Minute (please no)")
{
intervalSelected = "minutes"
}
if(interval == "Every Hour")
{
intervalSelected = "hours"
}
if(interval == "Every Week")
{
intervalSelected = "weeks"
}
if(series == "Global Active")
{
y = cleanTail$Active
}
if(series == "Global Reactive")
{
y = cleanTail$Reactive
}
if(series == "Amps")
{
y = cleanTail$Amps
}
if(series == "Kitchen")
{
y = cleanTail$Kitchen
}
if(series == "Laundry")
{
y = cleanTail$Laundry
}
if(series == "HVAC")
{
y = cleanTail$HVAC
}
ds = cleanTail$DateTime
dat.xts <- xts(x = y,as.POSIXct(ds)) #convert to xts format for maximum awesomeness
ep <- endpoints(dat.xts,intervalSelected) #our endpoints will be hours (so hourly) and using endpoint function
mean = period.apply(dat.xts,ep,mean) #applying the mean function to the defined period (hourly)
mean = as.data.frame(mean) #converting to dataframe
meanDF = data.matrix(as.data.frame(mean)) #shifting to matrix
meanDF = as.data.frame(meanDF) #Pulling back into dataframe
meanDF = setDT(meanDF, keep.rownames = TRUE)[] #making the row names into a column
colnames(meanDF) = c("ds", "y") #Applying labels from above
y = meanDF$y
ds = meanDF$ds
forecasting = data.frame(ds, y) #set up our variables in a data frame
prophetPredictions = prophet(forecasting) #This step releases the wizard (generates model)
future = make_future_dataframe(prophetPredictions, periods=predictRange) #Set the number of days (periods) you want to predict
forecast = predict(prophetPredictions, future) # Unleash the wizard on the data dragon (applies model)
plot(prophetPredictions, forecast, ylab = "Active Watt Hours", xlab = "Date", main = "30 Day Prediction with Prophet")
})
#---------------------------------> <----------------------------
output$prophetPrediction = renderPlot({
prophetInput()
})
############################################### End of Prediction Space #################################################
###-----------> Output a table showing the frequency of the items.
output$dash = renderText({
price = dataInput()
pricekwpeak = .159
pricekwoffpeak = .1252
price$price1 = price$Kitchen*pricekwpeak
price$price2 = price$Laundry*pricekwpeak
price$price3 = price$HVAC*pricekwpeak
price$priceTot = (price$Active)/10000*pricekwpeak
#-------Total amounts of power for each region
sumKW1 = sum( price$Kitchen)
sumKW2 = sum( price$Laundry)
sumKW3 = sum( price$HVAC)
sumKWTot = sum( price$Active)/100000
#--------Totalcosts for each region
sumP1 = sum( price$price1)
sumP2 = sum( price$price2)
sumP3 = sum( price$price3)
sumPTot = sum( price$priceTot)
#------------Totaled submetered
sumSubKW = sum(sumKW1,sumKW2,sumKW3)
sumSubP = sum(sumP1,sumP2,sumP3)
totPercent = (sumSubKW/sumKWTot)*10
# price$sumP1 <- cumsum(price$price1)
# price$sumP2 <- cumsum(price$price2)
# price$sumP3 <- cumsum(price$price3)
paste("For the selected date range of",input$daterange[1], "to", input$daterange[2], " a total of ", sumKWTot, "(kW) consumed. This cost approximately ", sumPTot , " dollars. The Kitchen used: ", sumKW1, " Watts and cost: ", sumP1, " dollars. The Laundry Room used: ", sumKW2, " Watts and cost: ", sumP2, " dollars. The HVAC used: ", sumKW3, " Watts and cost: ", sumP3, " dollars. In total, the sub metered areas cost" , sumSubP, " dollars, and represented ", totPercent, "% of the total house power consumption.")
})
output$combined = renderPlot({
plot(dataInput()$DateTime, dataInput()$Kitchen, type="l", ylab="KiloWatts", xlab="Date Range", main = "Normalized Plot of Entire Dataset")
lines(dataInput()$DateTime, dataInput()$Laundry, type="l", col="red")
lines(dataInput()$DateTime, dataInput()$HVAC, type="l", col="blue")
lines(dataInput()$DateTime, dataInput()$Reactive/10000, type="l", col="green")
lines(dataInput()$DateTime, dataInput()$Reactive/10000, type="l", col="brown")
lines(dataInput()$DateTime, dataInput()$Voltage/1000, type="l", col="purple")
lines(dataInput()$DateTime, dataInput()$Amps, type="l", col="orange")
legend("topright", c("Active","Reactive","Volts", "Amps","Kitchen", "Laundry", "HVAC" ), lty=1, lwd=2.5, col=c("brown", "green", "purple", "orange", "black", "red", "blue"))
})
###------------> output the rules, and make them sortable by the drop down magic box thingy
output$rules = DT:: renderDataTable({
dataInput()
})
###------------> Output the cool interactive scatterplot
output$activeoverview = renderPlot({
plot(dataInput()$DateTime, dataInput()$Active/1000, type="l", ylab="KiloWatt Hours", xlab="Date Range", main = "Normalized Plot of Daily Power Usage")
lines(dataInput()$DateTime, dataInput()$Reactive/100, type="l", col="green")
lines(dataInput()$DateTime, dataInput()$Voltage/1000, type="l", col="purple")
lines(dataInput()$DateTime, dataInput()$Amps/1000, type="l", col="orange")
legend("topright", c("Active","Reactive","Volts", "Amps" ), lty=1, lwd=2.5, col=c("black", "green", "purple", "orange"))
})
###------------------> Submeter Overview Chart-------------------
output$suboverview = renderPlot({
plot(dataInput()$DateTime, dataInput()$Kitchen, type="l", ylab="Kilowatt Hours", xlab="Date Range", main ="Plot of Kitchen, Laundry and HVAC Requirements")
lines(dataInput()$DateTime, dataInput()$Laundry, type="l", col="red")
lines(dataInput()$DateTime, dataInput()$HVAC, type="l", col="blue")
legend("topright", c("Kitchen", "Laundry", "HVAC" ), lty=1, lwd=2.5, col=c( "black", "red", "blue"))
})
###------------> Boring Histograms
output$hist = renderPlot({
sampled = dataInput()
actReact = ggplot(sampled, aes( sampled$Active/1000), fill = "blue") +
geom_histogram() +
geom_histogram(data = sampled, aes( sampled$Reactive/1000), fill = "red") + labs(title = "Most Frequent Power Level (Home) ", subtitle = paste("Dates from:",input$daterange[1], "to", input$daterange[2], "And Time From:", input$time[1], "to", input$time[2], sep=" " ), x = "KiloWatts (Normalized)", y = "Number of Observations")
sub = ggplot(sampled, aes( sampled$Kitchen), fill = "purple") +
geom_histogram() +
geom_histogram(data = sampled, aes( sampled$Laundry), fill = "red") +
geom_histogram(data = sampled, aes( sampled$HVAC), fill = "green")+ labs(title = "Most Frequent Power Levels (Submetered Rooms)", subtitle = paste("Dates from:",input$daterange[1], "to", input$daterange[2], "And Time From:", input$time[1], "to", input$time[2], sep=" " ), x = "KiloWatts (Normalized)", y = "Number of Observations")
# plot1 = hist(dataInput()$Active/100, col=rgb(1,0,0,0.5), main="Histogram of Active and Reactive Power", xlab="KiloWatts")
# hist(dataInput()$Reactive/10, col=rgb(0,0,1,0.5), add=T)
grid.arrange(actReact,sub ,ncol = 1, nrow = 2)
})
##---------> Check Active and Reactive Power
output$activecosts = renderPlot({
sampled = dataInput()
sampled$plott = sampled$Reactive/((sampled$Active+sampled$Reactive)*100)
sampled$sum1 <- cumsum((sampled$Active+sampled$Reactive)/1000)
percentReactive = ggplot(sampled, aes(y = plott, x = DateTime))+ geom_point()+ geom_smooth() +labs(y="% Reactive Power", x = "Date Range", title = "Active/Reactive Power Ratio" , subtitle = paste("5% is Maximum for a Green Community. Dates from:",input$daterange[1], "to", input$daterange[2], "And Time From:", input$time[1], "to", input$time[2], sep=" " ))+ theme_bw()
actReact = ggplot(sampled, aes(sampled$DateTime, sampled$Active)) +
geom_line() +
geom_line(data = sampled, aes(sampled$DateTime, sampled$Reactive), color = "red") +
geom_line(data = sampled, aes(sampled$DateTime, sampled$sum1), color = "green")+ labs(title = "Total Instant Demand and Cumulative Power Requirement", subtitle = paste("Dates from:",input$daterange[1], "to", input$daterange[2], "And Time From:", input$time[1], "to", input$time[2], sep=" " ), x = "Date Range", y = "KiloWatts")
grid.arrange(actReact,percentReactive ,ncol = 1, nrow = 2)
})
########----------------Calculate the Costs associated with Each Room
output$subcost = renderPlot({
price = dataInput()
pricekwpeak = .159
pricekwoffpeak = .1252
price$price1 = price$Kitchen*pricekwpeak
price$price2 = price$Laundry*pricekwpeak
price$price3 = price$HVAC*pricekwpeak
sum1 = sum( price$price1)
sum2 = sum( price$price2)
sum3 = sum( price$price3)
price$sum1 <- cumsum(price$price1)
price$sum2 <- cumsum(price$price2)
price$sum3 <- cumsum(price$price3)
df <- data.frame(
group = c("Kitchen", "Laundry", "HVAC"),
value = c(sum1,sum2, sum3)
)
polar = ggplot(df, aes(x="", y=value, fill=group))+
geom_bar(width = 1, stat = "identity")+coord_polar("y", start=0)+ labs(title = "Pie Chart of Consumption", subtitle = paste(input$daterange[1], "to", input$daterange[2], "(", input$time[1], "to", input$time[2],")", sep=" " ), y = "Percent", x = "")
bar = ggplot(df, aes(group, value)) +
geom_col()+ labs(title = "Relative Magnitutes of Power Consumed", subtitle = paste(input$daterange[1], "to", input$daterange[2], "(", input$time[1], "to", input$time[2],")", sep=" " ), x = "SubMetered Area", y = "KiloWatt Hours")
instaPrice = ggplot(price, aes(price$DateTime, price$price1, fill = price$price1)) +
geom_line() +
geom_line(data = price, aes(price$DateTime, price$price2), color = "red") + #the damn plus must be on the same line
geom_line(data = price, aes(price$DateTime, price$price3), color = "green")+ labs(title = "Instant Submeter Cost", subtitle = paste(input$daterange[1], "to", input$daterange[2], "(", input$time[1], "to", input$time[2],")", sep=" " ), x = "Date Range", y = "Total Cost ($)")
totprice = ggplot(price, aes(price$DateTime, price$sum1)) +
geom_line() +
geom_line(data = price, aes(price$DateTime, price$sum2), color = "red") + #the damn plus must be on the same line
geom_line(data = price, aes(price$DateTime, price$sum3), color = "green")+ labs(title = "Aggregated Submeter Cost", subtitle = paste(input$daterange[1], "to", input$daterange[2], "(", input$time[1], "to", input$time[2],")", sep=" " ), x = "Date Range", y = "Total Cost ($)")
grid.arrange(polar, bar, instaPrice, totprice,ncol = 2, nrow = 2)
})
###############------------> Code to Histogram Chart ** Now with Extra Dates!
plotInput = reactive({
sampled = dataInput()
actReact = ggplot(sampled, aes( sampled$Active/1000), fill = "blue") +
geom_histogram() +
geom_histogram(data = sampled, aes( sampled$Reactive/1000), fill = "red") + labs(title = "Most Frequent Power Level (Home)", subtitle = paste("Dates from:",input$daterange[1], "to", input$daterange[2], "And Time From:", input$time[1], "to", input$time[2], sep=" " ), x = "KiloWatts (Normalized)", y = "Number of Observations")
sub = ggplot(sampled, aes( sampled$Kitchen), fill = "purple") +
geom_histogram() +
geom_histogram(data = sampled, aes( sampled$Laundry), fill = "red") +
geom_histogram(data = sampled, aes( sampled$HVAC), fill = "green")+ labs(title = "Most Frequent Power Levels (Submetered Rooms)", subtitle = paste("Dates from:",input$daterange[1], "to", input$daterange[2], "And Time From:", input$time[1], "to", input$time[2], sep=" " ), y = "Number of Observations")
# plot1 = hist(dataInput()$Active/100, col=rgb(1,0,0,0.5), main="Histogram of Active and Reactive Power", xlab="KiloWatts")
# hist(dataInput()$Reactive/10, col=rgb(0,0,1,0.5), add=T)
grid.arrange(actReact,sub ,ncol = 1, nrow = 2)
})
plotInput2 = reactive({
price = dataInput()
pricekwpeak = .159
pricekwoffpeak = .1252
price$price1 = price$Kitchen*pricekwpeak
price$price2 = price$Laundry*pricekwpeak
price$price3 = price$HVAC*pricekwpeak
sum1 = sum( price$price1)
sum2 = sum( price$price2)
sum3 = sum( price$price3)
price$sum1 <- cumsum(price$price1)
price$sum2 <- cumsum(price$price2)
price$sum3 <- cumsum(price$price3)
df <- data.frame(
group = c("Kitchen", "Laundry", "HVAC"),
value = c(sum1,sum2, sum3)
)
polar = ggplot(df, aes(x="", y=value, fill=group))+
geom_bar(width = 1, stat = "identity")+coord_polar("y", start=0)+ labs(title = "Pie Chart of Consumption", subtitle = paste(input$daterange[1], "to", input$daterange[2], "(", input$time[1], "to", input$time[2],")", sep=" " ), y = "Percent", x = "")
bar = ggplot(df, aes(group, value)) +
geom_col()+ labs(title = "Relative Magnitutes of Power Consumed", subtitle = paste(input$daterange[1], "to", input$daterange[2], "(", input$time[1], "to", input$time[2],")", sep=" " ), x = "SubMetered Area", y = "KiloWatt Hours")
instaPrice = ggplot(price, aes(price$DateTime, price$price1, fill = price$price1)) +
geom_line() +
geom_line(data = price, aes(price$DateTime, price$price2), color = "red") + #the damn plus must be on the same line
geom_line(data = price, aes(price$DateTime, price$price3), color = "green")+ labs(title = "Instant Submeter Cost", subtitle = paste(input$daterange[1], "to", input$daterange[2], "(", input$time[1], "to", input$time[2],")", sep=" " ), x = "Date Range", y = "Total Cost ($)")
totprice = ggplot(price, aes(price$DateTime, price$sum1)) +
geom_line() +
geom_line(data = price, aes(price$DateTime, price$sum2), color = "red") + #the damn plus must be on the same line
geom_line(data = price, aes(price$DateTime, price$sum3), color = "green")+ labs(title = "Aggregated Submeter Cost", subtitle = paste(input$daterange[1], "to", input$daterange[2], "(", input$time[1], "to", input$time[2],")", sep=" " ), x = "Date Range", y = "Total Cost ($)")
grid.arrange(polar, bar, instaPrice, totprice,ncol = 2, nrow = 2)
})
plotInput3 = reactive({
sampled = dataInput()
sampled$plott = sampled$Reactive/((sampled$Active+sampled$Reactive)*100)
sampled$sum1 <- cumsum((sampled$Active+sampled$Reactive)/1000)
percentReactive = ggplot(sampled, aes(y = plott, x = DateTime))+ geom_point()+ geom_smooth() +labs(y="% Reactive Power", x = "Date Range", title = "Active/Reactive Power Ratio" , subtitle = paste("5% is Maximum for a Green Community. Dates from:",input$daterange[1], "to", input$daterange[2], "And Time From:", input$time[1], "to", input$time[2], sep=" " ))+ theme_bw()
actReact = ggplot(sampled, aes(sampled$DateTime, sampled$Active)) +
geom_line() +
geom_line(data = sampled, aes(sampled$DateTime, sampled$Reactive), color = "red") +
geom_line(data = sampled, aes(sampled$DateTime, sampled$sum1), color = "green")+ labs(title = "Total Instant Demand and Cumulative Power Requirement", subtitle = paste("Dates from:",input$daterange[1], "to", input$daterange[2], "And Time From:", input$time[1], "to", input$time[2], sep=" " ), x = "Date Range", y = "KiloWatts")
grid.arrange(actReact,percentReactive ,ncol = 1, nrow = 2)
})
plotInput4 = reactive({
prophetInput()
})
plotInput4 = reactive({
})
output$downloadCombined <- downloadHandler(
filename = function() { paste("inputdataset", '.png', sep='') },
content = function(file) {
ggsave(file, plot = plotInput(), device = "png")
}
)
output$downloadHist <- downloadHandler(
filename = function() { paste("inputdataset", '.png', sep='') },
content = function(file) {
ggsave(file, plot = plotInput(), device = "png")
}
)
output$downloadActiveCosts <- downloadHandler(
filename = function() { paste("inputdataset", '.png', sep='') },
content = function(file) {
ggsave(file, plot = plotInput3(), device = "png")
}
)
output$downloadSubOverview <- downloadHandler(
filename = function() { paste("inputdataset", '.png', sep='') },
content = function(file) {
ggsave(file, plot = plotInput(), device = "png")
}
)
output$downloadSubCost <- downloadHandler(
filename = function() { paste("inputdataset", '.png', sep='') },
content = function(file) {
ggsave(file, plot = plotInput2(), device = "png")
}
)
output$downloadprophet <- downloadHandler(
filename = function() { paste("inputdataset", '.png', sep='') },
content = function(file) {
ggsave(file, plot = plotInput4(), device = "png")
}
)
# output$downloadData <- downloadHandler(
# filename = 'My_Rules.csv',
# content = function(file) {
# write.csv(rules2df(dataInput()), filename)
# }
# )
}
# Run the application
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estimateCellTypeFromProjection.R
\name{estimateCellTypeFromProjection}
\alias{estimateCellTypeFromProjection}
\title{Estimate the most likely cell type from the projection to the reference panel}
\usage{
estimateCellTypeFromProjection(rca.obj, confidence = NULL, ctRank = F,
cSCompute = F)
}
\arguments{
\item{rca.obj}{RCA object.}
\item{confidence}{a parameter indicating the difference between z-scores. If the difference is below this threshold, the cell type will be set to unknown. Default is NULL.}
\item{ctRank}{parameter indicating whether a relative rank coloring for each cell type shall be computed. Default is FALSE.}
\item{cSCompute}{parameter indicating wheter the confidence score should be computed for each cell. Default is FALSE.}
}
\value{
RCA object.
}
\description{
Estimate the most likely cell type from the projection to the reference panel
}
| /man/estimateCellTypeFromProjection.Rd | no_license | leprohonmalo/RCAv2 | R | false | true | 950 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estimateCellTypeFromProjection.R
\name{estimateCellTypeFromProjection}
\alias{estimateCellTypeFromProjection}
\title{Estimate the most likely cell type from the projection to the reference panel}
\usage{
estimateCellTypeFromProjection(rca.obj, confidence = NULL, ctRank = F,
cSCompute = F)
}
\arguments{
\item{rca.obj}{RCA object.}
\item{confidence}{a parameter indicating the difference between z-scores. If the difference is below this threshold, the cell type will be set to unknown. Default is NULL.}
\item{ctRank}{parameter indicating whether a relative rank coloring for each cell type shall be computed. Default is FALSE.}
\item{cSCompute}{parameter indicating wheter the confidence score should be computed for each cell. Default is FALSE.}
}
\value{
RCA object.
}
\description{
Estimate the most likely cell type from the projection to the reference panel
}
|
install.packages("car")
library(car)
kt_reg <- kt %>%
select(nps,
digital_exp,
willingess_to_pay,
keep_indie,
aligned_values,
quality_news,
daily_routine,
replaceability,
mission,
helps_job,
quality_news,
informed,
news_sources,
important_to_sup,
nps_recode) %>%
mutate(across(2:12, ~as.numeric(.x)))
kt_reg %>%
drop_na() %>%
cor()
kt_model <- lm(formula = willingess_to_pay ~ digital_exp
+ nps
+ keep_indie
+ aligned_values
+ quality_news
+ daily_routine
+ replaceability
+ mission
+ helps_job
+ quality_news
+ informed
+ important_to_sup
+ news_sources,
data = kt_reg)
summary(kt_model)
vif(kt_model)
kt %>%
count(replaceability)
kt %>%
count(news_sources) %>%
mutate(prop = n / sum(n))
kt_reg %>%
group_by(willingess_to_pay) %>%
summarise(n = n(),
aligned_values = mean(aligned_values, na.rm = T),
keep_indie = mean(keep_indie, na.rm = T),
important_to_sup = mean(important_to_sup, na.rm = T),
news_sources = mean(news_sources, na.rm = T),
replaceability = mean(replaceability , na.rm = T))
kt %>%
select(age, gender, willingess_to_pay) %>%
mutate(willingess_to_pay = as.numeric(willingess_to_pay)) %>%
group_by(age) %>%
summarise(willingess_to_pay = mean(willingess_to_pay, na.rm = T),
n = n()) %>%
drop_na() %>%
{ barplot(height = .$willingess_to_pay, names = .$age, width = .$n, col = "#F4B400", ylim = c(0,5))}
kt %>%
select(age, gender, news_sources) %>%
mutate(news_sources = as.numeric(news_sources)) %>%
group_by(age) %>%
summarise(news_sources = mean(news_sources, na.rm = T),
n = n()) %>%
drop_na() %>%
{ barplot(height = .$news_sources, names = .$age, width = .$n, col = "#F4B400", ylim = c(0,3))}
kt %>%
group_by(sub_a) %>%
summarise(n = n(),
willingess_to_pay = mean(as.numeric(willingess_to_pay), na.rm = T))
| /kt.R | no_license | jekinsmythFT/drl | R | false | false | 2,289 | r | install.packages("car")
library(car)
kt_reg <- kt %>%
select(nps,
digital_exp,
willingess_to_pay,
keep_indie,
aligned_values,
quality_news,
daily_routine,
replaceability,
mission,
helps_job,
quality_news,
informed,
news_sources,
important_to_sup,
nps_recode) %>%
mutate(across(2:12, ~as.numeric(.x)))
kt_reg %>%
drop_na() %>%
cor()
kt_model <- lm(formula = willingess_to_pay ~ digital_exp
+ nps
+ keep_indie
+ aligned_values
+ quality_news
+ daily_routine
+ replaceability
+ mission
+ helps_job
+ quality_news
+ informed
+ important_to_sup
+ news_sources,
data = kt_reg)
summary(kt_model)
vif(kt_model)
kt %>%
count(replaceability)
kt %>%
count(news_sources) %>%
mutate(prop = n / sum(n))
kt_reg %>%
group_by(willingess_to_pay) %>%
summarise(n = n(),
aligned_values = mean(aligned_values, na.rm = T),
keep_indie = mean(keep_indie, na.rm = T),
important_to_sup = mean(important_to_sup, na.rm = T),
news_sources = mean(news_sources, na.rm = T),
replaceability = mean(replaceability , na.rm = T))
kt %>%
select(age, gender, willingess_to_pay) %>%
mutate(willingess_to_pay = as.numeric(willingess_to_pay)) %>%
group_by(age) %>%
summarise(willingess_to_pay = mean(willingess_to_pay, na.rm = T),
n = n()) %>%
drop_na() %>%
{ barplot(height = .$willingess_to_pay, names = .$age, width = .$n, col = "#F4B400", ylim = c(0,5))}
kt %>%
select(age, gender, news_sources) %>%
mutate(news_sources = as.numeric(news_sources)) %>%
group_by(age) %>%
summarise(news_sources = mean(news_sources, na.rm = T),
n = n()) %>%
drop_na() %>%
{ barplot(height = .$news_sources, names = .$age, width = .$n, col = "#F4B400", ylim = c(0,3))}
kt %>%
group_by(sub_a) %>%
summarise(n = n(),
willingess_to_pay = mean(as.numeric(willingess_to_pay), na.rm = T))
|
#-----------------------
#KFW 1 Cross-Sectional Model
#Treatment: Early Demarcated through PPTAL by April 2001 (vs. demarcated after April 2001)
#Outcome: Median NDVI change in level from 1995-2001, Median NDVI change in level from 2001-2010
#Using MatchIt package instead of SCI
#-----------------------
library(devtools)
devtools::install_github("itpir/SCI@master")
library(SCI)
library(stargazer)
loadLibs()
library(MatchIt)
library(rgeos)
library(maptools)
library(rgdal)
library(sp)
shpfile = "processed_data/kfw_analysis_inputs.shp"
dta_Shp = readShapePoly(shpfile)
#-------------------------------------------------
#-------------------------------------------------
#Pre-processing to create cross-sectional variable summaries
#-------------------------------------------------
#-------------------------------------------------
#Calculate NDVI Trends
dta_Shp$pre_trend_NDVI_mean <- timeRangeTrend(dta_Shp,"MeanL_[0-9][0-9][0-9][0-9]",1982,1995,"id")
dta_Shp$pre_trend_NDVI_max <- timeRangeTrend(dta_Shp,"MaxL_[0-9][0-9][0-9][0-9]",1982,1995,"id")
dta_Shp$pre_trend_NDVI_med <- timeRangeTrend(dta_Shp,"MedL_[0-9][0-9][0-9][0-9]",1982,1995,"id")
#NDVI Max Trends for 1995-2001
dta_Shp$post_trend_NDVI_95_01 <- timeRangeTrend(dta_Shp,"MaxL_[0-9][0-9][0-9][0-9]",1995,2001,"id")
dta_Shp@data["NDVILevelChange_95_01"] <- dta_Shp$MaxL_2001 - dta_Shp$MaxL_1995
#dta_Shp@data["NDVIslopeChange_95_01"] <- dta_Shp@data["post_trend_NDVI_95_01"] - dta_Shp@data["pre_trend_NDVI_max"]
#NDVI Max Trends for 2001-2010
dta_Shp$post_trend_NDVI_01_10 <- timeRangeTrend(dta_Shp,"MaxL_[0-9][0-9][0-9][0-9]",2001,2010,"id")
dta_Shp@data["NDVILevelChange_01_10"] <- dta_Shp$MaxL_2010 - dta_Shp$MaxL_2001
#dta_Shp@data["NDVIslopeChange_01_10"] <- dta_Shp@data["post_trend_NDVI_01_10"] - dta_Shp@data["pre_trend_NDVI_max"]
#NDVI Med Trends for 1995-2001
dta_Shp$post_trend_NDVI_95_01_Med <- timeRangeTrend(dta_Shp,"MedL_[0-9][0-9][0-9][0-9]",1995,2001,"id")
dta_Shp@data["NDVILevelChange_95_01_Med"] <- dta_Shp$MedL_2001 - dta_Shp$MedL_1995
#NDVI Med Trends for 2001-2010
dta_Shp$post_trend_NDVI_01_10_Med <- timeRangeTrend(dta_Shp,"MedL_[0-9][0-9][0-9][0-9]",2001,2010,"id")
dta_Shp@data["NDVILevelChange_01_10_Med"] <- dta_Shp$MedL_2010 - dta_Shp$MedL_2001
#Calculate Temp and Precip Pre and Post Trends
dta_Shp$pre_trend_temp_mean <- timeRangeTrend(dta_Shp,"MeanT_[0-9][0-9][0-9][0-9]",1982,1995,"id")
dta_Shp$pre_trend_temp_max <- timeRangeTrend(dta_Shp,"MaxT_[0-9][0-9][0-9][0-9]",1982,1995,"id")
dta_Shp$pre_trend_temp_min <- timeRangeTrend(dta_Shp,"MinT_[0-9][0-9][0-9][0-9]",1982,1995,"id")
dta_Shp$post_trend_temp_95_01 <- timeRangeTrend(dta_Shp,"MeanT_[0-9][0-9][0-9][0-9]",1995,2001,"id")
dta_Shp$post_trend_temp_01_10 <- timeRangeTrend(dta_Shp,"MeanT_[0-9][0-9][0-9][0-9]",2001,2010,"id")
dta_Shp$pre_trend_precip_mean <- timeRangeTrend(dta_Shp,"MeanP_[0-9][0-9][0-9][0-9]",1982,1995,"id")
dta_Shp$pre_trend_precip_max <- timeRangeTrend(dta_Shp,"MaxP_[0-9][0-9][0-9][0-9]",1982,1995,"id")
dta_Shp$pre_trend_precip_min <- timeRangeTrend(dta_Shp,"MinP_[0-9][0-9][0-9][0-9]",1982,1995,"id")
dta_Shp$post_trend_precip_95_01 <- timeRangeTrend(dta_Shp,"MeanP_[0-9][0-9][0-9][0-9]",1995,2001,"id")
dta_Shp$post_trend_precip_01_10 <- timeRangeTrend(dta_Shp,"MeanP_[0-9][0-9][0-9][0-9]",2001,2010,"id")
#-------------------------------------------------
#-------------------------------------------------
#Define the Treatment Variable and Population
#-------------------------------------------------
#-------------------------------------------------
#Make a binary to test treatment..
dta_Shp@data["TrtBin"] <- 0
dta_Shp@data$TrtBin[dta_Shp@data$demend_y <= 2001] <- 1
dta_Shp@data$TrtBin[(dta_Shp@data$demend_m > 4) & (dta_Shp@data$demend_y==2001)] <- 0
#Remove units that did not ever receive any treatment (within-sample test)
dta_Shp@data$NA_check <- 0
dta_Shp@data$NA_check[is.na(dta_Shp@data$demend_y)] <- 1
int_Shp <- dta_Shp[dta_Shp@data$NA_check != 1,]
dta_Shp <- int_Shp
table(dta_Shp@data$TrtBin)
#--------------------------
#Matching, with replacement
#--------------------------
#identify vars needed for psm model and analytic models (only these will be included in new matched dataset)
aVars <- c("reu_id","UF","TrtBin", "terrai_are","Pop_1990","Pop_2000", "MeanT_1995","MeanT_2001", "pre_trend_temp_mean",
"pre_trend_temp_min", "pre_trend_temp_max", "MeanP_1995", "MeanP_2001","pre_trend_precip_min",
"pre_trend_NDVI_mean", "pre_trend_NDVI_max","pre_trend_NDVI_med",
"MaxL_1995","MedL_1995","NDVILevelChange_95_01","NDVILevelChange_01_10",
"NDVILevelChange_95_01_Med","NDVILevelChange_01_10_Med",
"Slope","Elevation","Riv_Dist","Road_dist",
"pre_trend_precip_mean", "pre_trend_precip_max",
"post_trend_temp_95_01","post_trend_temp_01_10",
"post_trend_precip_95_01","post_trend_precip_01_10")
#propensity score model
#replace=TRUE to match with replacement
#exact="UF" restricts matches to same Brazilian state and discard="both" must accompany it
#use resulting weights in models to account for matching with replacement
psmModel <- matchit(TrtBin ~ terrai_are + Pop_1990 + MeanT_1995 + pre_trend_temp_mean + pre_trend_temp_min +
pre_trend_temp_max + MeanP_1995 + pre_trend_precip_min +
pre_trend_NDVI_mean + pre_trend_NDVI_max + pre_trend_NDVI_med+
Slope + Elevation + MedL_1995 + Riv_Dist + Road_dist +
pre_trend_precip_mean + pre_trend_precip_max,
data=dta_Shp@data[aVars],
method="nearest",replace=TRUE, exact="UF",discard="both")
print(summary(psmModel))
#create new dataset with matches
model_data<-match.data(psmModel)
#check states that were dropped out
summary(model_data$UF)
##create standardized dataset to produce standardized coefficients in models that are easy to output
#identify vars for inclusion in standardized dataset
#include all numeric variables from psm equation and that will be included in models
#exclude any id fields and weights created from matchit
# stvars <- c("TrtBin", "terrai_are","Pop_1990","Pop_2000" ,"MeanT_1995","MeanT_2001", "pre_trend_temp_mean",
# "pre_trend_temp_min", "pre_trend_temp_max", "MeanP_1995","MeanP_2001", "pre_trend_precip_min",
# "pre_trend_NDVI_mean", "pre_trend_NDVI_max","pre_trend_NDVI_med",
# "NDVILevelChange_95_01_Med","NDVILevelChange_01_10_Med",
# "MaxL_1995","MedL_1995",
# "Slope","Elevation","Riv_Dist","Road_dist",
# "pre_trend_precip_mean", "pre_trend_precip_max",
# "NDVILevelChange_95_01","NDVILevelChange_01_10","post_trend_temp_95_01","post_trend_temp_01_10",
# "post_trend_precip_95_01","post_trend_precip_01_10")
#
# model_data_st<- model_data
# model_data_st[stvars]<-lapply(model_data_st[stvars],scale)
#--------------
#Analytic Models
#--------------
##Early Models, Outcome: 1995-2001 Median
#Create dataset with some common names for stargazer
model_data_early <- model_data
colnames(model_data_early)[(colnames(model_data_early)=="Pop_1990")] <- "Pop_B"
colnames(model_data_early)[(colnames(model_data_early)=="MeanT_1995")] <- "MeanT_B"
colnames(model_data_early)[(colnames(model_data_early)=="MeanP_1995")] <- "MeanP_B"
colnames(model_data_early)[(colnames(model_data_early)=="post_trend_temp_95_01")] <- "post_trend_temp"
colnames(model_data_early)[(colnames(model_data_early)=="post_trend_precip_95_01")] <- "post_trend_precip"
#ModelEarly2, treatment effect + weights, 1995-2001 Median
ModelEarly2_Med <- lm(NDVILevelChange_95_01_Med ~ TrtBin, data=model_data, weights=(weights))
#ModelEarly3, treatment effect + weights + covars, 1995-2001 Median
ModelEarly3_Med<-lm(NDVILevelChange_95_01_Med~TrtBin +pre_trend_NDVI_med + MedL_1995 + terrai_are+Pop_B+
MeanT_B + post_trend_temp+
MeanP_B + post_trend_precip+
Slope+Elevation+Riv_Dist+Road_dist,
data=model_data_early,
weights=(weights))
##Late Models
#Create dataset with some common names for stargazer
model_data_late<-model_data
colnames(model_data_late)[(colnames(model_data_late)=="Pop_2000")] <- "Pop_B"
colnames(model_data_late)[(colnames(model_data_late)=="MeanT_2001")] <- "MeanT_B"
colnames(model_data_late)[(colnames(model_data_late)=="MeanP_2001")] <- "MeanP_B"
colnames(model_data_late)[(colnames(model_data_late)=="post_trend_temp_01_10")] <- "post_trend_temp"
colnames(model_data_late)[(colnames(model_data_late)=="post_trend_precip_01_10")] <- "post_trend_precip"
#ModelLate, treatment effect + weights + covars, 2001-2010 Median
ModelLate_Med<-lm(NDVILevelChange_01_10_Med~TrtBin+ pre_trend_NDVI_med + MedL_1995+terrai_are+Pop_B+
MeanT_B+post_trend_temp+
MeanP_B + post_trend_precip+
Slope + Elevation + Riv_Dist + Road_dist,
data=model_data_late,
weights=(weights))
#-------------
#Stargazer
#-------------
stargazer(ModelEarly2_Med, ModelEarly3_Med,ModelLate_Med,
keep=c("TrtBin", "pre_trend_NDVI_med","MedL_1995", "terrai_are","Pop_B","MeanT_B","post_trend_temp","MeanP_B",
"post_trend_precip","Slope","Elevation","Riv_Dist","Road_dist"),
covariate.labels=c("Treatment (Early Demarcation)", "Pre-Trend NDVI", "Baseline NDVI", "Area (hectares)","Baseline Population Density",
"Baseline Temperature", "Temperature Trends","Baseline Precipitation","Precipitation Trends",
"Slope", "Elevation", "Distance to River", "Distance to Road"),
dep.var.labels=c("Median NDVI 1995-2010"," Median NDVI 2001-2010"),
title="Regression Results", type="html", omit.stat=c("f","ser"), align=TRUE)
| /KFW_CrossSectionResults_Median_Pre2001_MatchIt.R | no_license | aiddata/KFW_Amazon | R | false | false | 9,919 | r |
#-----------------------
#KFW 1 Cross-Sectional Model
#Treatment: Early Demarcated through PPTAL by April 2001 (vs. demarcated after April 2001)
#Outcome: Median NDVI change in level from 1995-2001, Median NDVI change in level from 2001-2010
#Using MatchIt package instead of SCI
#-----------------------
library(devtools)
devtools::install_github("itpir/SCI@master")
library(SCI)
library(stargazer)
loadLibs()
library(MatchIt)
library(rgeos)
library(maptools)
library(rgdal)
library(sp)
shpfile = "processed_data/kfw_analysis_inputs.shp"
dta_Shp = readShapePoly(shpfile)
#-------------------------------------------------
#-------------------------------------------------
#Pre-processing to create cross-sectional variable summaries
#-------------------------------------------------
#-------------------------------------------------
#Calculate NDVI Trends
dta_Shp$pre_trend_NDVI_mean <- timeRangeTrend(dta_Shp,"MeanL_[0-9][0-9][0-9][0-9]",1982,1995,"id")
dta_Shp$pre_trend_NDVI_max <- timeRangeTrend(dta_Shp,"MaxL_[0-9][0-9][0-9][0-9]",1982,1995,"id")
dta_Shp$pre_trend_NDVI_med <- timeRangeTrend(dta_Shp,"MedL_[0-9][0-9][0-9][0-9]",1982,1995,"id")
#NDVI Max Trends for 1995-2001
dta_Shp$post_trend_NDVI_95_01 <- timeRangeTrend(dta_Shp,"MaxL_[0-9][0-9][0-9][0-9]",1995,2001,"id")
dta_Shp@data["NDVILevelChange_95_01"] <- dta_Shp$MaxL_2001 - dta_Shp$MaxL_1995
#dta_Shp@data["NDVIslopeChange_95_01"] <- dta_Shp@data["post_trend_NDVI_95_01"] - dta_Shp@data["pre_trend_NDVI_max"]
#NDVI Max Trends for 2001-2010
dta_Shp$post_trend_NDVI_01_10 <- timeRangeTrend(dta_Shp,"MaxL_[0-9][0-9][0-9][0-9]",2001,2010,"id")
dta_Shp@data["NDVILevelChange_01_10"] <- dta_Shp$MaxL_2010 - dta_Shp$MaxL_2001
#dta_Shp@data["NDVIslopeChange_01_10"] <- dta_Shp@data["post_trend_NDVI_01_10"] - dta_Shp@data["pre_trend_NDVI_max"]
#NDVI Med Trends for 1995-2001
dta_Shp$post_trend_NDVI_95_01_Med <- timeRangeTrend(dta_Shp,"MedL_[0-9][0-9][0-9][0-9]",1995,2001,"id")
dta_Shp@data["NDVILevelChange_95_01_Med"] <- dta_Shp$MedL_2001 - dta_Shp$MedL_1995
#NDVI Med Trends for 2001-2010
dta_Shp$post_trend_NDVI_01_10_Med <- timeRangeTrend(dta_Shp,"MedL_[0-9][0-9][0-9][0-9]",2001,2010,"id")
dta_Shp@data["NDVILevelChange_01_10_Med"] <- dta_Shp$MedL_2010 - dta_Shp$MedL_2001
#Calculate Temp and Precip Pre and Post Trends
dta_Shp$pre_trend_temp_mean <- timeRangeTrend(dta_Shp,"MeanT_[0-9][0-9][0-9][0-9]",1982,1995,"id")
dta_Shp$pre_trend_temp_max <- timeRangeTrend(dta_Shp,"MaxT_[0-9][0-9][0-9][0-9]",1982,1995,"id")
dta_Shp$pre_trend_temp_min <- timeRangeTrend(dta_Shp,"MinT_[0-9][0-9][0-9][0-9]",1982,1995,"id")
dta_Shp$post_trend_temp_95_01 <- timeRangeTrend(dta_Shp,"MeanT_[0-9][0-9][0-9][0-9]",1995,2001,"id")
dta_Shp$post_trend_temp_01_10 <- timeRangeTrend(dta_Shp,"MeanT_[0-9][0-9][0-9][0-9]",2001,2010,"id")
dta_Shp$pre_trend_precip_mean <- timeRangeTrend(dta_Shp,"MeanP_[0-9][0-9][0-9][0-9]",1982,1995,"id")
dta_Shp$pre_trend_precip_max <- timeRangeTrend(dta_Shp,"MaxP_[0-9][0-9][0-9][0-9]",1982,1995,"id")
dta_Shp$pre_trend_precip_min <- timeRangeTrend(dta_Shp,"MinP_[0-9][0-9][0-9][0-9]",1982,1995,"id")
dta_Shp$post_trend_precip_95_01 <- timeRangeTrend(dta_Shp,"MeanP_[0-9][0-9][0-9][0-9]",1995,2001,"id")
dta_Shp$post_trend_precip_01_10 <- timeRangeTrend(dta_Shp,"MeanP_[0-9][0-9][0-9][0-9]",2001,2010,"id")
#-------------------------------------------------
#-------------------------------------------------
#Define the Treatment Variable and Population
#-------------------------------------------------
#-------------------------------------------------
#Make a binary to test treatment..
dta_Shp@data["TrtBin"] <- 0
dta_Shp@data$TrtBin[dta_Shp@data$demend_y <= 2001] <- 1
dta_Shp@data$TrtBin[(dta_Shp@data$demend_m > 4) & (dta_Shp@data$demend_y==2001)] <- 0
#Remove units that did not ever receive any treatment (within-sample test)
dta_Shp@data$NA_check <- 0
dta_Shp@data$NA_check[is.na(dta_Shp@data$demend_y)] <- 1
int_Shp <- dta_Shp[dta_Shp@data$NA_check != 1,]
dta_Shp <- int_Shp
table(dta_Shp@data$TrtBin)
#--------------------------
#Matching, with replacement
#--------------------------
#identify vars needed for psm model and analytic models (only these will be included in new matched dataset)
aVars <- c("reu_id","UF","TrtBin", "terrai_are","Pop_1990","Pop_2000", "MeanT_1995","MeanT_2001", "pre_trend_temp_mean",
"pre_trend_temp_min", "pre_trend_temp_max", "MeanP_1995", "MeanP_2001","pre_trend_precip_min",
"pre_trend_NDVI_mean", "pre_trend_NDVI_max","pre_trend_NDVI_med",
"MaxL_1995","MedL_1995","NDVILevelChange_95_01","NDVILevelChange_01_10",
"NDVILevelChange_95_01_Med","NDVILevelChange_01_10_Med",
"Slope","Elevation","Riv_Dist","Road_dist",
"pre_trend_precip_mean", "pre_trend_precip_max",
"post_trend_temp_95_01","post_trend_temp_01_10",
"post_trend_precip_95_01","post_trend_precip_01_10")
#propensity score model
#replace=TRUE to match with replacement
#exact="UF" restricts matches to same Brazilian state and discard="both" must accompany it
#use resulting weights in models to account for matching with replacement
psmModel <- matchit(TrtBin ~ terrai_are + Pop_1990 + MeanT_1995 + pre_trend_temp_mean + pre_trend_temp_min +
pre_trend_temp_max + MeanP_1995 + pre_trend_precip_min +
pre_trend_NDVI_mean + pre_trend_NDVI_max + pre_trend_NDVI_med+
Slope + Elevation + MedL_1995 + Riv_Dist + Road_dist +
pre_trend_precip_mean + pre_trend_precip_max,
data=dta_Shp@data[aVars],
method="nearest",replace=TRUE, exact="UF",discard="both")
print(summary(psmModel))
#create new dataset with matches
model_data<-match.data(psmModel)
#check states that were dropped out
summary(model_data$UF)
##create standardized dataset to produce standardized coefficients in models that are easy to output
#identify vars for inclusion in standardized dataset
#include all numeric variables from psm equation and that will be included in models
#exclude any id fields and weights created from matchit
# stvars <- c("TrtBin", "terrai_are","Pop_1990","Pop_2000" ,"MeanT_1995","MeanT_2001", "pre_trend_temp_mean",
# "pre_trend_temp_min", "pre_trend_temp_max", "MeanP_1995","MeanP_2001", "pre_trend_precip_min",
# "pre_trend_NDVI_mean", "pre_trend_NDVI_max","pre_trend_NDVI_med",
# "NDVILevelChange_95_01_Med","NDVILevelChange_01_10_Med",
# "MaxL_1995","MedL_1995",
# "Slope","Elevation","Riv_Dist","Road_dist",
# "pre_trend_precip_mean", "pre_trend_precip_max",
# "NDVILevelChange_95_01","NDVILevelChange_01_10","post_trend_temp_95_01","post_trend_temp_01_10",
# "post_trend_precip_95_01","post_trend_precip_01_10")
#
# model_data_st<- model_data
# model_data_st[stvars]<-lapply(model_data_st[stvars],scale)
#--------------
#Analytic Models
#--------------
##Early Models, Outcome: 1995-2001 Median
#Create dataset with some common names for stargazer
model_data_early <- model_data
colnames(model_data_early)[(colnames(model_data_early)=="Pop_1990")] <- "Pop_B"
colnames(model_data_early)[(colnames(model_data_early)=="MeanT_1995")] <- "MeanT_B"
colnames(model_data_early)[(colnames(model_data_early)=="MeanP_1995")] <- "MeanP_B"
colnames(model_data_early)[(colnames(model_data_early)=="post_trend_temp_95_01")] <- "post_trend_temp"
colnames(model_data_early)[(colnames(model_data_early)=="post_trend_precip_95_01")] <- "post_trend_precip"
#ModelEarly2, treatment effect + weights, 1995-2001 Median
ModelEarly2_Med <- lm(NDVILevelChange_95_01_Med ~ TrtBin, data=model_data, weights=(weights))
#ModelEarly3, treatment effect + weights + covars, 1995-2001 Median
ModelEarly3_Med<-lm(NDVILevelChange_95_01_Med~TrtBin +pre_trend_NDVI_med + MedL_1995 + terrai_are+Pop_B+
MeanT_B + post_trend_temp+
MeanP_B + post_trend_precip+
Slope+Elevation+Riv_Dist+Road_dist,
data=model_data_early,
weights=(weights))
##Late Models
#Create dataset with some common names for stargazer
model_data_late<-model_data
colnames(model_data_late)[(colnames(model_data_late)=="Pop_2000")] <- "Pop_B"
colnames(model_data_late)[(colnames(model_data_late)=="MeanT_2001")] <- "MeanT_B"
colnames(model_data_late)[(colnames(model_data_late)=="MeanP_2001")] <- "MeanP_B"
colnames(model_data_late)[(colnames(model_data_late)=="post_trend_temp_01_10")] <- "post_trend_temp"
colnames(model_data_late)[(colnames(model_data_late)=="post_trend_precip_01_10")] <- "post_trend_precip"
#ModelLate, treatment effect + weights + covars, 2001-2010 Median
ModelLate_Med<-lm(NDVILevelChange_01_10_Med~TrtBin+ pre_trend_NDVI_med + MedL_1995+terrai_are+Pop_B+
MeanT_B+post_trend_temp+
MeanP_B + post_trend_precip+
Slope + Elevation + Riv_Dist + Road_dist,
data=model_data_late,
weights=(weights))
#-------------
#Stargazer
#-------------
stargazer(ModelEarly2_Med, ModelEarly3_Med,ModelLate_Med,
keep=c("TrtBin", "pre_trend_NDVI_med","MedL_1995", "terrai_are","Pop_B","MeanT_B","post_trend_temp","MeanP_B",
"post_trend_precip","Slope","Elevation","Riv_Dist","Road_dist"),
covariate.labels=c("Treatment (Early Demarcation)", "Pre-Trend NDVI", "Baseline NDVI", "Area (hectares)","Baseline Population Density",
"Baseline Temperature", "Temperature Trends","Baseline Precipitation","Precipitation Trends",
"Slope", "Elevation", "Distance to River", "Distance to Road"),
dep.var.labels=c("Median NDVI 1995-2010"," Median NDVI 2001-2010"),
title="Regression Results", type="html", omit.stat=c("f","ser"), align=TRUE)
|
## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix function takes a single argument x
## Return value: a list consisting of four methods
## set: set the value of x, also resetting cached inverse
## get: retrieve the value of x
## setinverse: set the cached inverse
## getinverse: get the cached inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inv2) inv <<- inv2
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cachedSolve calculates the inverse of a matrix x
## using cached inverse
cacheSolve <- function(x, ...) {
#print(x)
inverse <- x$getinverse()
if(!is.null(inverse)) {
# if cached inverse is found
# then simply return the cached inverse
message("getting cached inverse")
return(inverse)
}
# otherwise calculates the inverse using solve
# and store the value in cached inverse
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
inverse
}
| /cachematrix.R | no_license | kuangchen/ProgrammingAssignment2 | R | false | false | 1,163 | r | ## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix function takes a single argument x
## Return value: a list consisting of four methods
## set: set the value of x, also resetting cached inverse
## get: retrieve the value of x
## setinverse: set the cached inverse
## getinverse: get the cached inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inv2) inv <<- inv2
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cachedSolve calculates the inverse of a matrix x
## using cached inverse
cacheSolve <- function(x, ...) {
#print(x)
inverse <- x$getinverse()
if(!is.null(inverse)) {
# if cached inverse is found
# then simply return the cached inverse
message("getting cached inverse")
return(inverse)
}
# otherwise calculates the inverse using solve
# and store the value in cached inverse
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
inverse
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_functions.R
\name{head.hexlattice}
\alias{head.hexlattice}
\title{Return the first rows of a hexlattice data frame}
\usage{
\method{head}{hexlattice}(x, n = 6L, geometry = FALSE, ...)
}
\arguments{
\item{x}{A \code{hexlattice} object.}
\item{n}{Number of rows to return or, if negative, the number of
trailing rows to omit.}
\item{geometry}{If TRUE, include the geometry column if present. If FALSE
(default) omit the geometry column.}
\item{...}{Arguments to be passed to other methods (presently unused).}
}
\value{
A data frame. If the lattice has geometries this will be an
\code{sf} data frame.
}
\description{
Return the first rows of a hexlattice data frame
}
| /man/head.hexlattice.Rd | permissive | mbedward/sfhextools | R | false | true | 758 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_functions.R
\name{head.hexlattice}
\alias{head.hexlattice}
\title{Return the first rows of a hexlattice data frame}
\usage{
\method{head}{hexlattice}(x, n = 6L, geometry = FALSE, ...)
}
\arguments{
\item{x}{A \code{hexlattice} object.}
\item{n}{Number of rows to return or, if negative, the number of
trailing rows to omit.}
\item{geometry}{If TRUE, include the geometry column if present. If FALSE
(default) omit the geometry column.}
\item{...}{Arguments to be passed to other methods (presently unused).}
}
\value{
A data frame. If the lattice has geometries this will be an
\code{sf} data frame.
}
\description{
Return the first rows of a hexlattice data frame
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{initDb}
\alias{initDb}
\title{initDb - Initialization of the package so the GTEx networks can be used with
CoExpNets}
\usage{
initDb(mandatory = F)
}
\arguments{
\item{mandatory}{If this parameter is `TRUE` then the networks will be added no matter whether they were already there.}
}
\value{
No value
}
\description{
initDb - Initialization of the package so the GTEx networks can be used with
CoExpNets
}
| /man/initDb.Rd | permissive | juanbot/CoExpGTEx | R | false | true | 498 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{initDb}
\alias{initDb}
\title{initDb - Initialization of the package so the GTEx networks can be used with
CoExpNets}
\usage{
initDb(mandatory = F)
}
\arguments{
\item{mandatory}{If this parameter is `TRUE` then the networks will be added no matter whether they were already there.}
}
\value{
No value
}
\description{
initDb - Initialization of the package so the GTEx networks can be used with
CoExpNets
}
|
library(optional)
### Name: match_with
### Title: Match With
### Aliases: match_with
### ** Examples
library(magrittr)
a <- 5
match_with(a,
. %>% option(.), paste,
none, function() "Error!"
)
## [1] 5
match_with(a,
1, function() "Matched exact value",
list(2, 3, 4), function(x) paste("Matched in list:", x),
. %>% if (. > 4) ., function(x) paste("Matched in condition:", x)
)
## [1] "Matched in condition: 5"
| /data/genthat_extracted_code/optional/examples/match_with.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 461 | r | library(optional)
### Name: match_with
### Title: Match With
### Aliases: match_with
### ** Examples
library(magrittr)
a <- 5
match_with(a,
. %>% option(.), paste,
none, function() "Error!"
)
## [1] 5
match_with(a,
1, function() "Matched exact value",
list(2, 3, 4), function(x) paste("Matched in list:", x),
. %>% if (. > 4) ., function(x) paste("Matched in condition:", x)
)
## [1] "Matched in condition: 5"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dfareporting_objects.R
\name{Site}
\alias{Site}
\title{Site Object}
\usage{
Site(accountId = NULL, approved = NULL, directorySiteId = NULL,
directorySiteIdDimensionValue = NULL, id = NULL,
idDimensionValue = NULL, keyName = NULL, name = NULL,
siteContacts = NULL, siteSettings = NULL, subaccountId = NULL)
}
\arguments{
\item{accountId}{Account ID of this site}
\item{approved}{Whether this site is approved}
\item{directorySiteId}{Directory site associated with this site}
\item{directorySiteIdDimensionValue}{Dimension value for the ID of the directory site}
\item{id}{ID of this site}
\item{idDimensionValue}{Dimension value for the ID of this site}
\item{keyName}{Key name of this site}
\item{name}{Name of this site}
\item{siteContacts}{Site contacts}
\item{siteSettings}{Site-wide settings}
\item{subaccountId}{Subaccount ID of this site}
}
\value{
Site object
}
\description{
Site Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Contains properties of a site.
}
\seealso{
Other Site functions: \code{\link{sites.insert}},
\code{\link{sites.patch}}, \code{\link{sites.update}}
}
| /googledfareportingv24.auto/man/Site.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 1,221 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dfareporting_objects.R
\name{Site}
\alias{Site}
\title{Site Object}
\usage{
Site(accountId = NULL, approved = NULL, directorySiteId = NULL,
directorySiteIdDimensionValue = NULL, id = NULL,
idDimensionValue = NULL, keyName = NULL, name = NULL,
siteContacts = NULL, siteSettings = NULL, subaccountId = NULL)
}
\arguments{
\item{accountId}{Account ID of this site}
\item{approved}{Whether this site is approved}
\item{directorySiteId}{Directory site associated with this site}
\item{directorySiteIdDimensionValue}{Dimension value for the ID of the directory site}
\item{id}{ID of this site}
\item{idDimensionValue}{Dimension value for the ID of this site}
\item{keyName}{Key name of this site}
\item{name}{Name of this site}
\item{siteContacts}{Site contacts}
\item{siteSettings}{Site-wide settings}
\item{subaccountId}{Subaccount ID of this site}
}
\value{
Site object
}
\description{
Site Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Contains properties of a site.
}
\seealso{
Other Site functions: \code{\link{sites.insert}},
\code{\link{sites.patch}}, \code{\link{sites.update}}
}
|
/*
* tkMacMenu.r --
*
* Resources needed by menus.
*
* This file also contains the icons 'SICN' used by the menu code
* in menu items.
*
* Copyright (c) 1997 Sun Microsystems, Inc.
*
* See the file "license.terms" for information on usage and redistribution
* of this file, and for a DISCLAIMER OF ALL WARRANTIES.
*
* RCS: @(#) $Id: tkMacMenu.r,v 1.1.1.1 2007/07/10 15:05:17 duncan Exp $
*/
#include <Types.r>
/*
* Icons used in menu items.
*/
resource 'SICN' (128, preload, locked) {
{ /* array: 7 elements */
/* [1] */
$"0000 0000 8000 C000 E000 F000 F800 FC00"
$"F800 F000 E000 C000 80",
/* [2] */
$"0000 0000 0000 0800 1400 2200 4100 8080"
$"E380 2200 2200 2200 3E",
/* [3] */
$"0000 0000 0000 0000 0000 F8F0 C4F0 F270"
$"0900 0480 0270 0130 00F0",
/* [4] */
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 E4E0 CE60 1B00 3180",
/* [5] */
$"0000 0000 0000 0000 6300 9480 9480 7F00"
$"1400 7F00 9480 9480 63",
/* [6] */
$"0000 0000 0000 0000 0000 3FF8 1FF0 0FE0"
$"07C0 0380 01",
/* [7] */
$"0000 0000 0000 0000 0000 0100 0380 07C0"
$"0FE0 1FF0 3FF8"
}
};
| /extras/src/tk/mac/tkMacMenu.r | permissive | muschellij2/FSL6.0.0 | R | false | false | 1,126 | r | /*
* tkMacMenu.r --
*
* Resources needed by menus.
*
* This file also contains the icons 'SICN' used by the menu code
* in menu items.
*
* Copyright (c) 1997 Sun Microsystems, Inc.
*
* See the file "license.terms" for information on usage and redistribution
* of this file, and for a DISCLAIMER OF ALL WARRANTIES.
*
* RCS: @(#) $Id: tkMacMenu.r,v 1.1.1.1 2007/07/10 15:05:17 duncan Exp $
*/
#include <Types.r>
/*
* Icons used in menu items.
*/
resource 'SICN' (128, preload, locked) {
{ /* array: 7 elements */
/* [1] */
$"0000 0000 8000 C000 E000 F000 F800 FC00"
$"F800 F000 E000 C000 80",
/* [2] */
$"0000 0000 0000 0800 1400 2200 4100 8080"
$"E380 2200 2200 2200 3E",
/* [3] */
$"0000 0000 0000 0000 0000 F8F0 C4F0 F270"
$"0900 0480 0270 0130 00F0",
/* [4] */
$"0000 0000 0000 0000 0000 0000 0000 0000"
$"0000 E4E0 CE60 1B00 3180",
/* [5] */
$"0000 0000 0000 0000 6300 9480 9480 7F00"
$"1400 7F00 9480 9480 63",
/* [6] */
$"0000 0000 0000 0000 0000 3FF8 1FF0 0FE0"
$"07C0 0380 01",
/* [7] */
$"0000 0000 0000 0000 0000 0100 0380 07C0"
$"0FE0 1FF0 3FF8"
}
};
|
#PLOT 4
fileUrl="https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
if(!file.exists("data")){
dir.create("data")
}
temp=tempfile()
setInternet2(use=TRUE)
#downloading file and unzip
download.file(fileUrl,temp, method="internal")
unzip(temp,exdir="./data",unzip="internal")
#clear now the connection
unlink(temp)
#read table
data<-read.table("./data/household_power_consumption.txt",sep=";",header=T,na.string="")
#change date format
data$Date<-as.Date(as.character(data$Date),format = "%d/%m/%Y")
#we filter out the data from the dates 2007-02-01 and 2007-02-02
require(dplyr)
dataN<-filter(data,Date >= "2007-02-01" & Date <= "2007-02-02")
#set data=NULL
data<-NULL
#add one column with days
dataN$day<- weekdays(as.Date(dataN$Date))
#save the plot directly on a .png file
png(file="plot4.png",width = 480, height = 480, units = "px")
#4 plots on same file, change margins
par(mfrow=c(2,2),mar=c(6,4,4,2))
#plot (1,1)
with(dataN, plot(as.numeric(as.character(Global_active_power)),type="l",
ylab="Global Active Power (Kilowats)",
xlab="",
xaxt="n",
lwd=1
))
axis(1,at=c(1,nrow(dataN)/2,nrow(dataN)), labels=c("Thurs","Fri","Sat"))
#plot (1,2)
with(dataN, plot(as.numeric(as.character(Voltage)),type="l",
ylab="Voltage",
xlab="datetime",
xaxt="n",
lwd=1
))
axis(1,at=c(1,nrow(dataN)/2,nrow(dataN)), labels=c("Thurs","Fri","Sat"))
#plot (2,1)
with(dataN, plot(as.numeric(as.character(Sub_metering_1)),type="l",
ylab="Energy sub metering",
xlab="",
xaxt="n",
lwd=1,
col="black"
))
lines(as.numeric(as.character(dataN$Sub_metering_2)),col="red")
lines(dataN$Sub_metering_3,col="blue")
axis(1,at=c(1,nrow(dataN)/2,nrow(dataN)), labels=c("Thurs","Fri","Sat"))
legend("topright",lwd=2,cex=1, col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
#plot (2,2)
with(dataN, plot(as.numeric(as.character(Global_reactive_power)),type="l",
ylab=" Global_reactive_power",
xlab="datetime",
xaxt="n",
lwd=1
))
axis(1,at=c(1,nrow(dataN)/2,nrow(dataN)), labels=c("Thurs","Fri","Sat"))
#clear device
dev.off()
| /plot4.R | no_license | fbarbalato/Exploratory-Data | R | false | false | 2,132 | r | #PLOT 4
fileUrl="https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
if(!file.exists("data")){
dir.create("data")
}
temp=tempfile()
setInternet2(use=TRUE)
#downloading file and unzip
download.file(fileUrl,temp, method="internal")
unzip(temp,exdir="./data",unzip="internal")
#clear now the connection
unlink(temp)
#read table
data<-read.table("./data/household_power_consumption.txt",sep=";",header=T,na.string="")
#change date format
data$Date<-as.Date(as.character(data$Date),format = "%d/%m/%Y")
#we filter out the data from the dates 2007-02-01 and 2007-02-02
require(dplyr)
dataN<-filter(data,Date >= "2007-02-01" & Date <= "2007-02-02")
#set data=NULL
data<-NULL
#add one column with days
dataN$day<- weekdays(as.Date(dataN$Date))
#save the plot directly on a .png file
png(file="plot4.png",width = 480, height = 480, units = "px")
#4 plots on same file, change margins
par(mfrow=c(2,2),mar=c(6,4,4,2))
#plot (1,1)
with(dataN, plot(as.numeric(as.character(Global_active_power)),type="l",
ylab="Global Active Power (Kilowats)",
xlab="",
xaxt="n",
lwd=1
))
axis(1,at=c(1,nrow(dataN)/2,nrow(dataN)), labels=c("Thurs","Fri","Sat"))
#plot (1,2)
with(dataN, plot(as.numeric(as.character(Voltage)),type="l",
ylab="Voltage",
xlab="datetime",
xaxt="n",
lwd=1
))
axis(1,at=c(1,nrow(dataN)/2,nrow(dataN)), labels=c("Thurs","Fri","Sat"))
#plot (2,1)
with(dataN, plot(as.numeric(as.character(Sub_metering_1)),type="l",
ylab="Energy sub metering",
xlab="",
xaxt="n",
lwd=1,
col="black"
))
lines(as.numeric(as.character(dataN$Sub_metering_2)),col="red")
lines(dataN$Sub_metering_3,col="blue")
axis(1,at=c(1,nrow(dataN)/2,nrow(dataN)), labels=c("Thurs","Fri","Sat"))
legend("topright",lwd=2,cex=1, col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
#plot (2,2)
with(dataN, plot(as.numeric(as.character(Global_reactive_power)),type="l",
ylab=" Global_reactive_power",
xlab="datetime",
xaxt="n",
lwd=1
))
axis(1,at=c(1,nrow(dataN)/2,nrow(dataN)), labels=c("Thurs","Fri","Sat"))
#clear device
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/labels.eoa.r
\name{labels.eoar}
\alias{labels.eoar}
\title{labels.eoar - Extract parameter labels from \code{eoar} objects.}
\usage{
\method{labels}{eoar}(obj, type = "coef")
}
\arguments{
\item{obj}{An \code{eoar} model object. See \code{\link{eoar}}.}
\item{type}{The type of parameter label requred or a regular expression.
Parameter type possibilities are "coef" or
"derived". Regular expressions are used to match parameter labels using \code{grep}.
If \code{type} is not "coef" and not "derived" and the regular expression
fails to match anything, all parameter labels.}
}
\description{
labels.eoar - Extract parameter labels from \code{eoar} objects.
}
\details{
Coefficient labels are for variables in the log-linear model for
lambda. Derived parameter labels are non-coefficient parameters link
M, Mtot, and lambda.
}
\examples{
# A 3 year study of 7 sites. 21 "cells". lambda change = 20/year
set.seed(9430834) # fixes Y and g of this example, but not the RNG's used in chains
ns <- 3
ny <- 7
g <- data.frame(
alpha = rnorm(ns*ny,70,2),
beta = rnorm(ns*ny,700,25)
)
Y <- rbinom(ns*ny, c(rep(20,ny), rep(40,ny), rep(60,ny)), g$alpha/(g$alpha+g$beta))
df <- data.frame(year=factor(c(rep("2015",ny),rep("2016",ny),rep("2017",ny))),
Year=c(rep(1,ny),rep(2,ny),rep(3,ny)))
# Uninformed eoar (use low number of iterations because it's and example)
eoar.1 <- eoar(Y~year, g, df, nburn = 1000, niters= 50*10, nthins = 10 )
labels(eoar.1)
labels(eoar.1,"derived")
labels(eoar.1,"^M") # all M parameters
labels(eoar.1,"\\\\[3\\\\]$") # M[3] and lambda[3]
labels(eoar.1,".") # all parameter labels
plot(eoar.1$out[,labels(eoa.1)]) # trace plot of coefficients.
}
\seealso{
\code{\link{coef}}, \code{\link{eoar}}
}
\author{
Trent McDonald
}
| /man/labels.eoar.Rd | permissive | atredennick/EoAR | R | false | true | 1,834 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/labels.eoa.r
\name{labels.eoar}
\alias{labels.eoar}
\title{labels.eoar - Extract parameter labels from \code{eoar} objects.}
\usage{
\method{labels}{eoar}(obj, type = "coef")
}
\arguments{
\item{obj}{An \code{eoar} model object. See \code{\link{eoar}}.}
\item{type}{The type of parameter label requred or a regular expression.
Parameter type possibilities are "coef" or
"derived". Regular expressions are used to match parameter labels using \code{grep}.
If \code{type} is not "coef" and not "derived" and the regular expression
fails to match anything, all parameter labels.}
}
\description{
labels.eoar - Extract parameter labels from \code{eoar} objects.
}
\details{
Coefficient labels are for variables in the log-linear model for
lambda. Derived parameter labels are non-coefficient parameters link
M, Mtot, and lambda.
}
\examples{
# A 3 year study of 7 sites. 21 "cells". lambda change = 20/year
set.seed(9430834) # fixes Y and g of this example, but not the RNG's used in chains
ns <- 3
ny <- 7
g <- data.frame(
alpha = rnorm(ns*ny,70,2),
beta = rnorm(ns*ny,700,25)
)
Y <- rbinom(ns*ny, c(rep(20,ny), rep(40,ny), rep(60,ny)), g$alpha/(g$alpha+g$beta))
df <- data.frame(year=factor(c(rep("2015",ny),rep("2016",ny),rep("2017",ny))),
Year=c(rep(1,ny),rep(2,ny),rep(3,ny)))
# Uninformed eoar (use low number of iterations because it's and example)
eoar.1 <- eoar(Y~year, g, df, nburn = 1000, niters= 50*10, nthins = 10 )
labels(eoar.1)
labels(eoar.1,"derived")
labels(eoar.1,"^M") # all M parameters
labels(eoar.1,"\\\\[3\\\\]$") # M[3] and lambda[3]
labels(eoar.1,".") # all parameter labels
plot(eoar.1$out[,labels(eoa.1)]) # trace plot of coefficients.
}
\seealso{
\code{\link{coef}}, \code{\link{eoar}}
}
\author{
Trent McDonald
}
|
##' @title
##' @param map map to cut the temperatures down to
##' @param type temperature type, "low", "avg", or "high". Default is "low".
##' @return
##' @author Adam Sparks & Nicholas Tierney
##' @export
create_temp <- function(map, date) {
temperatures <- fetch_temperature(date)
map_mask <- raster::mask(temperatures, map)
# Crop the AWAP grid to remove several outlying territories + small islands
# not of interest, so we have a much smaller plot when mapping
map_mask_crop <- raster::crop(map_mask, temperatures)
# `rasterToPoints` pulls data from a gridded format and creates a data frame
# with latitude, longitude and the temperature value at that point.
map_mask_crop_df <- as.data.frame(raster::rasterToPoints(map_mask_crop))
colnames(map_mask_crop_df) <- c("long", "lat", "temperature")
# As BOM maps in 3 degree increments, we cut data into these bins.
map_mask_crop_df$cuts <- cut_temps(map_mask_crop_df$temperature)
map_mask_crop_df
}
| /R/create_temp.R | no_license | anhnguyendepocen/monash-colour-in-graphics | R | false | false | 982 | r | ##' @title
##' @param map map to cut the temperatures down to
##' @param type temperature type, "low", "avg", or "high". Default is "low".
##' @return
##' @author Adam Sparks & Nicholas Tierney
##' @export
create_temp <- function(map, date) {
temperatures <- fetch_temperature(date)
map_mask <- raster::mask(temperatures, map)
# Crop the AWAP grid to remove several outlying territories + small islands
# not of interest, so we have a much smaller plot when mapping
map_mask_crop <- raster::crop(map_mask, temperatures)
# `rasterToPoints` pulls data from a gridded format and creates a data frame
# with latitude, longitude and the temperature value at that point.
map_mask_crop_df <- as.data.frame(raster::rasterToPoints(map_mask_crop))
colnames(map_mask_crop_df) <- c("long", "lat", "temperature")
# As BOM maps in 3 degree increments, we cut data into these bins.
map_mask_crop_df$cuts <- cut_temps(map_mask_crop_df$temperature)
map_mask_crop_df
}
|
testlist <- list(x = c(-721944577L, -1610621056L, 6385253L, NA, 0L, 1048618L, 1283446298L), y = integer(0))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result) | /diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609962330-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 182 | r | testlist <- list(x = c(-721944577L, -1610621056L, 6385253L, NA, 0L, 1048618L, 1283446298L), y = integer(0))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result) |
testlist <- list(hi = 0, lo = 5.45406001361907e-311, mu = 0, sig = 0)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result) | /gjam/inst/testfiles/tnormRcpp/libFuzzer_tnormRcpp/tnormRcpp_valgrind_files/1610046384-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 126 | r | testlist <- list(hi = 0, lo = 5.45406001361907e-311, mu = 0, sig = 0)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result) |
##This assignment inverses a matrix in an efficient manner using caching.
## This function creates a list of four elements, each of which are a function
## set sets the value of the matrix
## get gets the value of the matrix
## setInverse inverts the matrix, but is only ever called by the cacheSolve function
## getInverse returns the inverted
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(solve) inv <<- solve
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Cachesolve checks to see if the object contains the inverted matrix. If
## it does, then it simply gets that data from the object. If it does not
## contain the inverted matrix, then cacheSolve inverts the matrix and sets the
## value in the object.
cacheSolve <- function(x, ...) {
inv <- x$getInverse()
if (!is.null(inv)) { ##if the getInverse is not null, then simply get it
message("getting cached data")
return(inv)
}
## if getInverse is null, then calculate the inverse using solve, and set the value
mat <- x$get()
inv <- solve(mat)
x$setInverse(inv)
inv
}
| /cachematrix.R | no_license | millikenrobert/ProgrammingAssignment2 | R | false | false | 1,262 | r | ##This assignment inverses a matrix in an efficient manner using caching.
## This function creates a list of four elements, each of which are a function
## set sets the value of the matrix
## get gets the value of the matrix
## setInverse inverts the matrix, but is only ever called by the cacheSolve function
## getInverse returns the inverted
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(solve) inv <<- solve
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Cachesolve checks to see if the object contains the inverted matrix. If
## it does, then it simply gets that data from the object. If it does not
## contain the inverted matrix, then cacheSolve inverts the matrix and sets the
## value in the object.
cacheSolve <- function(x, ...) {
inv <- x$getInverse()
if (!is.null(inv)) { ##if the getInverse is not null, then simply get it
message("getting cached data")
return(inv)
}
## if getInverse is null, then calculate the inverse using solve, and set the value
mat <- x$get()
inv <- solve(mat)
x$setInverse(inv)
inv
}
|
#######################################################################
#
# This script simulate 100,000 genotype data for 35000 unrelated individuals
# using uniform distribution of allele frequencies with MAF>1%.
# Data output are in ped/fam format.
#
# Author: Valentin Hivert
#######################################################################
makeID <- function(i,base="IID"){
if(i<10) return(paste0(base,"000",i))
if(i>=10 & i<100) return(paste0(base,"00",i))
if(i>=100 & i<1000) return(paste0(base,"0",i))
if(i>=1000) return(paste0(base,i))
}
makeSNP <- function(i){
if(i<10) return(paste0("rs000",i))
if(i>=10 & i<100) return(paste0("rs00",i))
if(i>=100 & i<1000) return(paste0("rs0",i))
if(i>=1000 & i<10000) return(paste0("rs0",i))
if(i>=10000) return(paste0("rs",i))
}
simPlinkTextData <- function(N, # Sample size
M, # Number of unliked markers
prefixOutput){
## ped/map file
chr <- rep(1,M)
pos <- sort(sample(1:1e6,M))
a1a2 <- do.call("rbind",lapply(1:M,function(j) sample(c("A","C","G","T"),2)))
snp <- sapply(1:M,makeSNP)
## ped/geno
mapData <- cbind.data.frame(chr,snp,0,pos)
write.table(mapData,paste0(prefixOutput,".map"),quote=FALSE,row.names=FALSE,col.names=FALSE,sep="\t")
if(N*M>35000000){
N=N/100
freq=runif(M,min = 0.01,max = 0.99)
for(i in 1:100){
print(paste0("Part ",i,"/100"))
X <- do.call("cbind",lapply(1:M,function(j,p,N) rbinom(N,2,prob=p[j]),p=freq,N=N))
for(j in 1:M){
while(length(unique(X[,j]))==1){X[,j]=rbinom(N,2,prob=freq[j])}
}
refGeno <- t(sapply(1:M,function(j) c(paste0(a1a2[j,1],"\t",a1a2[j,1]),paste0(a1a2[j,1],"\t",a1a2[j,2]),paste0(a1a2[j,2],"\t",a1a2[j,2]))))
ped <- do.call("cbind",lapply(1:M,function(j) refGeno[j,1+X[,j]]))
## fam file
iid <- sapply(((i-1)*N+1):(i*N),makeID,"IID")
fid <- iid
pid <- rep(0,N)
mid <- rep(0,N)
sex <- sample(c(1,2),N,replace=TRUE)
pheno <- rep(-9,N)
fam <- cbind.data.frame(fid,iid,pid,mid,sex,pheno)
if(i==1){
write.table(cbind.data.frame(fam,ped),paste0(prefixOutput,".ped"),quote=FALSE,row.names=FALSE,col.names=FALSE,sep="\t")
}else{write.table(cbind.data.frame(fam,ped),paste0(prefixOutput,".ped"),quote=FALSE,row.names=FALSE,col.names=FALSE,sep="\t",append = T)}
}
}else{
X <- do.call("cbind",lapply(1:M,function(j) rbinom(N,2,prob=runif(1,min = 0.01,max = 0.99))))
X <- apply(X,2,function(j,N){
while(length(unique(j))==1){j=rbinom(N,2,prob=runif(1))}
return(j)
},N=N)
summary(colMeans(X)/2)
refGeno <- t(sapply(1:M,function(j) c(paste0(a1a2[j,1],"\t",a1a2[j,1]),paste0(a1a2[j,1],"\t",a1a2[j,2]),paste0(a1a2[j,2],"\t",a1a2[j,2]))))
ped <- do.call("cbind",lapply(1:M,function(j) refGeno[j,1+X[,j]]))
## fam file
iid <- sapply(1:N,makeID,"IID")
fid <- iid
pid <- rep(0,N)
mid <- rep(0,N)
sex <- sample(c(1,2),N,replace=TRUE)
pheno <- rep(-9,N)
fam <- cbind.data.frame(fid,iid,pid,mid,sex,pheno)
## ped/geno
mapData <- cbind.data.frame(chr,snp,0,pos)
pedData <- cbind.data.frame(fam,ped)
write.table(mapData,paste0(prefixOutput,".map"),quote=FALSE,row.names=FALSE,col.names=FALSE,sep="\t")
write.table(pedData,paste0(prefixOutput,".ped"),quote=FALSE,row.names=FALSE,col.names=FALSE,sep="\t")
}
}
simPlinkTextData(N = 35000,M = 100000,prefixOutput = "data_Simu_35K_ind_100K_unlinkedSNP")
| /Simulate_unrelatedInd_unlinkedMarkers.R | no_license | vhivert/NonAddVar-AJHG-2021 | R | false | false | 3,668 | r | #######################################################################
#
# This script simulate 100,000 genotype data for 35000 unrelated individuals
# using uniform distribution of allele frequencies with MAF>1%.
# Data output are in ped/fam format.
#
# Author: Valentin Hivert
#######################################################################
makeID <- function(i,base="IID"){
if(i<10) return(paste0(base,"000",i))
if(i>=10 & i<100) return(paste0(base,"00",i))
if(i>=100 & i<1000) return(paste0(base,"0",i))
if(i>=1000) return(paste0(base,i))
}
makeSNP <- function(i){
if(i<10) return(paste0("rs000",i))
if(i>=10 & i<100) return(paste0("rs00",i))
if(i>=100 & i<1000) return(paste0("rs0",i))
if(i>=1000 & i<10000) return(paste0("rs0",i))
if(i>=10000) return(paste0("rs",i))
}
simPlinkTextData <- function(N, # Sample size
M, # Number of unliked markers
prefixOutput){
## ped/map file
chr <- rep(1,M)
pos <- sort(sample(1:1e6,M))
a1a2 <- do.call("rbind",lapply(1:M,function(j) sample(c("A","C","G","T"),2)))
snp <- sapply(1:M,makeSNP)
## ped/geno
mapData <- cbind.data.frame(chr,snp,0,pos)
write.table(mapData,paste0(prefixOutput,".map"),quote=FALSE,row.names=FALSE,col.names=FALSE,sep="\t")
if(N*M>35000000){
N=N/100
freq=runif(M,min = 0.01,max = 0.99)
for(i in 1:100){
print(paste0("Part ",i,"/100"))
X <- do.call("cbind",lapply(1:M,function(j,p,N) rbinom(N,2,prob=p[j]),p=freq,N=N))
for(j in 1:M){
while(length(unique(X[,j]))==1){X[,j]=rbinom(N,2,prob=freq[j])}
}
refGeno <- t(sapply(1:M,function(j) c(paste0(a1a2[j,1],"\t",a1a2[j,1]),paste0(a1a2[j,1],"\t",a1a2[j,2]),paste0(a1a2[j,2],"\t",a1a2[j,2]))))
ped <- do.call("cbind",lapply(1:M,function(j) refGeno[j,1+X[,j]]))
## fam file
iid <- sapply(((i-1)*N+1):(i*N),makeID,"IID")
fid <- iid
pid <- rep(0,N)
mid <- rep(0,N)
sex <- sample(c(1,2),N,replace=TRUE)
pheno <- rep(-9,N)
fam <- cbind.data.frame(fid,iid,pid,mid,sex,pheno)
if(i==1){
write.table(cbind.data.frame(fam,ped),paste0(prefixOutput,".ped"),quote=FALSE,row.names=FALSE,col.names=FALSE,sep="\t")
}else{write.table(cbind.data.frame(fam,ped),paste0(prefixOutput,".ped"),quote=FALSE,row.names=FALSE,col.names=FALSE,sep="\t",append = T)}
}
}else{
X <- do.call("cbind",lapply(1:M,function(j) rbinom(N,2,prob=runif(1,min = 0.01,max = 0.99))))
X <- apply(X,2,function(j,N){
while(length(unique(j))==1){j=rbinom(N,2,prob=runif(1))}
return(j)
},N=N)
summary(colMeans(X)/2)
refGeno <- t(sapply(1:M,function(j) c(paste0(a1a2[j,1],"\t",a1a2[j,1]),paste0(a1a2[j,1],"\t",a1a2[j,2]),paste0(a1a2[j,2],"\t",a1a2[j,2]))))
ped <- do.call("cbind",lapply(1:M,function(j) refGeno[j,1+X[,j]]))
## fam file
iid <- sapply(1:N,makeID,"IID")
fid <- iid
pid <- rep(0,N)
mid <- rep(0,N)
sex <- sample(c(1,2),N,replace=TRUE)
pheno <- rep(-9,N)
fam <- cbind.data.frame(fid,iid,pid,mid,sex,pheno)
## ped/geno
mapData <- cbind.data.frame(chr,snp,0,pos)
pedData <- cbind.data.frame(fam,ped)
write.table(mapData,paste0(prefixOutput,".map"),quote=FALSE,row.names=FALSE,col.names=FALSE,sep="\t")
write.table(pedData,paste0(prefixOutput,".ped"),quote=FALSE,row.names=FALSE,col.names=FALSE,sep="\t")
}
}
simPlinkTextData(N = 35000,M = 100000,prefixOutput = "data_Simu_35K_ind_100K_unlinkedSNP")
|
testlist <- list(A = structure(c(2.32784507357645e-308, 9.53818252170711e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613104499-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 344 | r | testlist <- list(A = structure(c(2.32784507357645e-308, 9.53818252170711e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
leaps<-function(...) eleaps(...)
| /R/leaps.R | no_license | cran/subselect | R | false | false | 35 | r |
leaps<-function(...) eleaps(...)
|
# Gather in-lake chlorophyll and total phosphorus data
# Vanern Lake
# Read in datafile including temp, TP, chla ####
data <- read.csv('./VanernLake/Staging Files/Vanern_Variables/Vanern_inlake_chem_Megrundet.csv')
chla <- data # For simplicity of reusing code, name copy of 'data' as chla
tp <- data # For simplicity of reusing code, name copy of 'data' as tp
# Chlorophyll-a in ug/L (1 mg/m3=1ug/L) ####
# Calculate mean water column chla for each available sampling date
library(xts)
chla <- subset(chla, chla$Chla.mg.m3>=0) #Remove NAs and values <0
chla.xts <- xts(x = chla$Chla.mg.m3,as.POSIXct(chla$sampledate))
ep <- endpoints(chla.xts,'days')
daily_chla <- period.apply(chla.xts,ep,mean) # ug/L
# Total Phosphorus ug/L ####
# Calculate mean water column TP for each available sampling date
tp <- subset(tp, Tot.P.ยตg.l>=0) #Remove NA values
tp.xts <- xts(x = tp$Tot.P.ยตg.l, as.POSIXct(tp$sampledate))
ep <- endpoints(tp.xts,'days')
daily_tp <- period.apply(tp.xts,ep,mean) # ug/L
# Flow m3/s ####
flow <- read.csv('./VanernLake/Staging Files/Vanern_Variables/outflow.csv')
flow$date <- as.Date( as.character(flow$date), "%m/%d/%Y")
flow <- subset(flow, date >= as.Date("2000/01/01"))
flow.xts <- xts(x= flow$flow, as.POSIXct(flow$date))
ep <- endpoints(flow.xts, 'days')
daily_flow <- period.apply(flow.xts, ep, mean)
daily_flow <- to.daily(daily_flow)
daily_flow <- subset(daily_flow[,1])
# Temperature in lake ####
epi <- subset(data, Depth..m.<=2)
hypo <- subset(data, Depth..m.>=50)
# Mean Epi and Hypo temp per date ####
library(xts)
epi.xts <- xts(x = epi$Temp...C,as.POSIXct(epi$sampledate))
ep <- endpoints(epi.xts,'days')
daily_epi <- period.apply(epi.xts,ep,mean) # C
hypo.xts <- xts(x = hypo$Temp...C,as.POSIXct(hypo$sampledate))
ep <- endpoints(hypo.xts,'days')
daily_hypo <- period.apply(hypo.xts,ep,mean) # C
# In Lake DOC #####
toc.xts <- xts(x= data$TOC.mg.l, as.POSIXct(data$sampledate))
ep <- endpoints(toc.xts, 'days')
daily_toc <- period.apply(toc.xts, ep,mean)
daily_doc <- daily_toc*.9
# Surface water DOC
sw <- read.csv ('./VanernLake/Staging Files/Vanern_Variables/Vanern_tributaries.csv')
sw$sampledate <- as.Date( as.character(sw$sampledate), "%m/%d/%Y")
sw <- subset(sw, In.Out=='Inflow')
sw.xts <- xts(x= sw$TOC.mg.L, as.POSIXct(sw$sampledate))
ep <- endpoints(sw.xts, 'days')
daily_sw <- period.apply(sw.xts, ep,mean)
daily_swdoc <- daily_sw*.9
# Merge data with continuous date series and interpolate between missing values ####
# create continuous set of dates between mod_start mod_end
mod_start <- '2000-01-01'
mod_end <- '2014-12-31'
dates <- (seq(as.Date(mod_start), as.Date(mod_end), by='days'))
dates_xts <- as.xts(dates)
# Merge xts dates, chla, tp, flow and convert to dataframe
daily_data <- merge.xts(dates_xts, daily_flow, daily_hypo, daily_epi,
daily_chla, daily_tp, daily_doc, daily_swdoc, fill='')
daily_data <- data.frame(date=index(daily_data), coredata(daily_data))
daily_data$date <- as.Date(daily_data$date, format = "%m/%d/%y")
format(daily_data$date,"%m/%d/%Y")
#Write .csv of data before interpolation
write.csv(daily_data, file='./VanernLake/Staging Files/Vanern_Variables/Vanern_inlake_TPchla.csv',
row.names=FALSE, na="")
| /VanernLake/Staging Files/gather_data.R | no_license | fscordo/SOS | R | false | false | 3,238 | r | # Gather in-lake chlorophyll and total phosphorus data
# Vanern Lake
# Read in datafile including temp, TP, chla ####
data <- read.csv('./VanernLake/Staging Files/Vanern_Variables/Vanern_inlake_chem_Megrundet.csv')
chla <- data # For simplicity of reusing code, name copy of 'data' as chla
tp <- data # For simplicity of reusing code, name copy of 'data' as tp
# Chlorophyll-a in ug/L (1 mg/m3=1ug/L) ####
# Calculate mean water column chla for each available sampling date
library(xts)
chla <- subset(chla, chla$Chla.mg.m3>=0) #Remove NAs and values <0
chla.xts <- xts(x = chla$Chla.mg.m3,as.POSIXct(chla$sampledate))
ep <- endpoints(chla.xts,'days')
daily_chla <- period.apply(chla.xts,ep,mean) # ug/L
# Total Phosphorus ug/L ####
# Calculate mean water column TP for each available sampling date
tp <- subset(tp, Tot.P.ยตg.l>=0) #Remove NA values
tp.xts <- xts(x = tp$Tot.P.ยตg.l, as.POSIXct(tp$sampledate))
ep <- endpoints(tp.xts,'days')
daily_tp <- period.apply(tp.xts,ep,mean) # ug/L
# Flow m3/s ####
flow <- read.csv('./VanernLake/Staging Files/Vanern_Variables/outflow.csv')
flow$date <- as.Date( as.character(flow$date), "%m/%d/%Y")
flow <- subset(flow, date >= as.Date("2000/01/01"))
flow.xts <- xts(x= flow$flow, as.POSIXct(flow$date))
ep <- endpoints(flow.xts, 'days')
daily_flow <- period.apply(flow.xts, ep, mean)
daily_flow <- to.daily(daily_flow)
daily_flow <- subset(daily_flow[,1])
# Temperature in lake ####
epi <- subset(data, Depth..m.<=2)
hypo <- subset(data, Depth..m.>=50)
# Mean Epi and Hypo temp per date ####
library(xts)
epi.xts <- xts(x = epi$Temp...C,as.POSIXct(epi$sampledate))
ep <- endpoints(epi.xts,'days')
daily_epi <- period.apply(epi.xts,ep,mean) # C
hypo.xts <- xts(x = hypo$Temp...C,as.POSIXct(hypo$sampledate))
ep <- endpoints(hypo.xts,'days')
daily_hypo <- period.apply(hypo.xts,ep,mean) # C
# In Lake DOC #####
toc.xts <- xts(x= data$TOC.mg.l, as.POSIXct(data$sampledate))
ep <- endpoints(toc.xts, 'days')
daily_toc <- period.apply(toc.xts, ep,mean)
daily_doc <- daily_toc*.9
# Surface water DOC
sw <- read.csv ('./VanernLake/Staging Files/Vanern_Variables/Vanern_tributaries.csv')
sw$sampledate <- as.Date( as.character(sw$sampledate), "%m/%d/%Y")
sw <- subset(sw, In.Out=='Inflow')
sw.xts <- xts(x= sw$TOC.mg.L, as.POSIXct(sw$sampledate))
ep <- endpoints(sw.xts, 'days')
daily_sw <- period.apply(sw.xts, ep,mean)
daily_swdoc <- daily_sw*.9
# Merge data with continuous date series and interpolate between missing values ####
# create continuous set of dates between mod_start mod_end
mod_start <- '2000-01-01'
mod_end <- '2014-12-31'
dates <- (seq(as.Date(mod_start), as.Date(mod_end), by='days'))
dates_xts <- as.xts(dates)
# Merge xts dates, chla, tp, flow and convert to dataframe
daily_data <- merge.xts(dates_xts, daily_flow, daily_hypo, daily_epi,
daily_chla, daily_tp, daily_doc, daily_swdoc, fill='')
daily_data <- data.frame(date=index(daily_data), coredata(daily_data))
daily_data$date <- as.Date(daily_data$date, format = "%m/%d/%y")
format(daily_data$date,"%m/%d/%Y")
#Write .csv of data before interpolation
write.csv(daily_data, file='./VanernLake/Staging Files/Vanern_Variables/Vanern_inlake_TPchla.csv',
row.names=FALSE, na="")
|
context("abar/regimes tests")
n <- 50
W <- rnorm(n)
C.binary <- rexpit(W)
C <- BinaryToCensoring(is.uncensored = C.binary)
A <- rexpit(W)
Y <- rexpit(W+A)
data <- data.frame(W, C, A, Y)
test_that("logical abar is the same as numeric",{
r1 <- ltmle(data, Anodes="A", Cnodes="C", Ynodes="Y", abar=as.matrix(W > 0), estimate.time = F)
r2 <- ltmle(data, Anodes="A", Cnodes="C", Ynodes="Y", abar=as.matrix(as.numeric(W > 0)), estimate.time = F)
r1$call <- r2$call <- NULL
expect_equal(r1, r2)
})
test_that("list of rules works", {
rule1 <- function (row) row["W"] > 0
rule2 <- function (row) row["W"] > 1
r1 <- ltmle(data, Anodes="A", Cnodes="C", Ynodes="Y", rule=list(rule1, rule2), estimate.time = F)
r2 <- ltmle(data, Anodes="A", Cnodes="C", Ynodes="Y", rule=list(rule2, rule1), estimate.time = F)
expect_equal(summary(r1)$effect.measures$ATE$estimate, -1 * summary(r2)$effect.measures$ATE$estimate, tolerance = 1e-4)
})
test_that("abar can be NULL if no Anodes, C nodes can be binary or factor", {
r1 <- ltmle(data, Anodes=NULL, Lnodes="A", Cnodes="C", Ynodes="Y", abar=NULL, estimate.time = F)
data2 <- data.frame(W, C=C.binary, A, Y)
r2 <- ltmle(data2, Anodes=NULL, Lnodes="A", Cnodes="C", Ynodes="Y", abar=NULL, estimate.time = F)
r1$call <- r2$call <- NULL
expect_equal(r1, r2)
r3 <- ltmleMSM(data, Anodes=NULL, Lnodes="A", Cnodes="C", Ynodes="Y", regimes=NULL, estimate.time = F, summary.measures = NULL, working.msm = "Y~1")
})
test_that("IPTW is NA if no intervention_match", {
data.all1 <- data.frame(W, A=1, Y=rexpit(W))
expect_warning(r <- ltmle(data.all1, Anodes="A", Ynodes="Y", abar=0, estimate.time = F), "no rows uncensored and matching regimes/abar - IPTW returns NA")
expect_true(is.na(r$estimates["iptw"]))
})
test_that("abar is same as equivalent rule", {
A2 <- rexpit(W)
data2 <- data.frame(W, C, A, A2, Y)
r1 <- ltmle(data2, Anodes=c("A", "A2"), Cnodes="C", Ynodes="Y", abar=cbind(W < 0, W > 1), estimate.time = F)
r2 <- ltmle(data2, Anodes=c("A", "A2"), Cnodes="C", Ynodes="Y", rule=function (row) c(row["W"] < 0, row["W"] > 1), estimate.time = F)
r1$call <- r2$call <- NULL
expect_equal(r1, r2)
})
test_that("regimes is same as equivalent rule", {
A2 <- rexpit(W)
data2 <- data.frame(W, C, A, A2, Y)
rule1 <- function (row) c(row["W"] < 0, row["W"] > 1)
rule2 <- function (row) c(0, 1)
rule3 <- function (row) c(1, 1)
regimes <- array(1, dim=c(n, 2, 3))
regimes[, , 1] <- cbind(W < 0, W > 1)
regimes[, , 2] <- cbind(rep(0, n), rep(1, n))
r1 <- ltmleMSM(data2, Anodes=c("A", "A2"), Cnodes="C", Ynodes="Y", regimes=regimes, estimate.time = F, summary.measures = NULL, working.msm = "Y~1")
r2 <- ltmleMSM(data2, Anodes=c("A", "A2"), Cnodes="C", Ynodes="Y", regimes=list(rule1, rule2, rule3), estimate.time = F, summary.measures = NULL, working.msm = "Y~1")
r1$call <- r2$call <- NULL
expect_equal(r1, r2)
r3 <- ltmle(data, Anodes="A", Cnodes="C", Ynodes="Y", abar=as.matrix(W > 0), estimate.time = F)
r4 <- ltmle(data, Anodes="A", Cnodes="C", Ynodes="Y", rule=function (row) row$W > 0, estimate.time = F)
r3$call <- r4$call <- NULL
expect_equal(r3, r4)
rule1 <- function (row) row["W"] < 0
rule2 <- function (row) 0
rule3 <- function (row) 1
regimes <- array(1, dim=c(n, 1, 3))
regimes[, 1, 1] <- W < 0
regimes[, 1, 2] <- 0
r1 <- ltmleMSM(data, Anodes="A", Cnodes="C", Ynodes="Y", regimes=regimes, estimate.time = F, summary.measures = NULL, working.msm = "Y~1")
r2 <- ltmleMSM(data, Anodes="A", Cnodes="C", Ynodes="Y", regimes=list(rule1, rule2, rule3), estimate.time = F, summary.measures = NULL, working.msm = "Y~1")
r1$call <- r2$call <- NULL
expect_equal(r1, r2)
})
test_that("NA in regimes after censoring is OK", {
abar <- rep(NA, n)
abar[C=="uncensored"] <- 1
data2 <- data.frame(W, C, L=rnorm(n), A, Y)
ltmle(data2, Anodes="A", Cnodes="C", Ynodes="Y", Lnodes="L", estimate.time = F, abar=AsMatrix(abar))
}) | /tests/testthat/test-AbarAndRegimes.R | no_license | joshuaschwab/ltmle | R | false | false | 3,970 | r | context("abar/regimes tests")
n <- 50
W <- rnorm(n)
C.binary <- rexpit(W)
C <- BinaryToCensoring(is.uncensored = C.binary)
A <- rexpit(W)
Y <- rexpit(W+A)
data <- data.frame(W, C, A, Y)
test_that("logical abar is the same as numeric",{
r1 <- ltmle(data, Anodes="A", Cnodes="C", Ynodes="Y", abar=as.matrix(W > 0), estimate.time = F)
r2 <- ltmle(data, Anodes="A", Cnodes="C", Ynodes="Y", abar=as.matrix(as.numeric(W > 0)), estimate.time = F)
r1$call <- r2$call <- NULL
expect_equal(r1, r2)
})
test_that("list of rules works", {
rule1 <- function (row) row["W"] > 0
rule2 <- function (row) row["W"] > 1
r1 <- ltmle(data, Anodes="A", Cnodes="C", Ynodes="Y", rule=list(rule1, rule2), estimate.time = F)
r2 <- ltmle(data, Anodes="A", Cnodes="C", Ynodes="Y", rule=list(rule2, rule1), estimate.time = F)
expect_equal(summary(r1)$effect.measures$ATE$estimate, -1 * summary(r2)$effect.measures$ATE$estimate, tolerance = 1e-4)
})
test_that("abar can be NULL if no Anodes, C nodes can be binary or factor", {
r1 <- ltmle(data, Anodes=NULL, Lnodes="A", Cnodes="C", Ynodes="Y", abar=NULL, estimate.time = F)
data2 <- data.frame(W, C=C.binary, A, Y)
r2 <- ltmle(data2, Anodes=NULL, Lnodes="A", Cnodes="C", Ynodes="Y", abar=NULL, estimate.time = F)
r1$call <- r2$call <- NULL
expect_equal(r1, r2)
r3 <- ltmleMSM(data, Anodes=NULL, Lnodes="A", Cnodes="C", Ynodes="Y", regimes=NULL, estimate.time = F, summary.measures = NULL, working.msm = "Y~1")
})
test_that("IPTW is NA if no intervention_match", {
data.all1 <- data.frame(W, A=1, Y=rexpit(W))
expect_warning(r <- ltmle(data.all1, Anodes="A", Ynodes="Y", abar=0, estimate.time = F), "no rows uncensored and matching regimes/abar - IPTW returns NA")
expect_true(is.na(r$estimates["iptw"]))
})
test_that("abar is same as equivalent rule", {
A2 <- rexpit(W)
data2 <- data.frame(W, C, A, A2, Y)
r1 <- ltmle(data2, Anodes=c("A", "A2"), Cnodes="C", Ynodes="Y", abar=cbind(W < 0, W > 1), estimate.time = F)
r2 <- ltmle(data2, Anodes=c("A", "A2"), Cnodes="C", Ynodes="Y", rule=function (row) c(row["W"] < 0, row["W"] > 1), estimate.time = F)
r1$call <- r2$call <- NULL
expect_equal(r1, r2)
})
test_that("regimes is same as equivalent rule", {
A2 <- rexpit(W)
data2 <- data.frame(W, C, A, A2, Y)
rule1 <- function (row) c(row["W"] < 0, row["W"] > 1)
rule2 <- function (row) c(0, 1)
rule3 <- function (row) c(1, 1)
regimes <- array(1, dim=c(n, 2, 3))
regimes[, , 1] <- cbind(W < 0, W > 1)
regimes[, , 2] <- cbind(rep(0, n), rep(1, n))
r1 <- ltmleMSM(data2, Anodes=c("A", "A2"), Cnodes="C", Ynodes="Y", regimes=regimes, estimate.time = F, summary.measures = NULL, working.msm = "Y~1")
r2 <- ltmleMSM(data2, Anodes=c("A", "A2"), Cnodes="C", Ynodes="Y", regimes=list(rule1, rule2, rule3), estimate.time = F, summary.measures = NULL, working.msm = "Y~1")
r1$call <- r2$call <- NULL
expect_equal(r1, r2)
r3 <- ltmle(data, Anodes="A", Cnodes="C", Ynodes="Y", abar=as.matrix(W > 0), estimate.time = F)
r4 <- ltmle(data, Anodes="A", Cnodes="C", Ynodes="Y", rule=function (row) row$W > 0, estimate.time = F)
r3$call <- r4$call <- NULL
expect_equal(r3, r4)
rule1 <- function (row) row["W"] < 0
rule2 <- function (row) 0
rule3 <- function (row) 1
regimes <- array(1, dim=c(n, 1, 3))
regimes[, 1, 1] <- W < 0
regimes[, 1, 2] <- 0
r1 <- ltmleMSM(data, Anodes="A", Cnodes="C", Ynodes="Y", regimes=regimes, estimate.time = F, summary.measures = NULL, working.msm = "Y~1")
r2 <- ltmleMSM(data, Anodes="A", Cnodes="C", Ynodes="Y", regimes=list(rule1, rule2, rule3), estimate.time = F, summary.measures = NULL, working.msm = "Y~1")
r1$call <- r2$call <- NULL
expect_equal(r1, r2)
})
test_that("NA in regimes after censoring is OK", {
abar <- rep(NA, n)
abar[C=="uncensored"] <- 1
data2 <- data.frame(W, C, L=rnorm(n), A, Y)
ltmle(data2, Anodes="A", Cnodes="C", Ynodes="Y", Lnodes="L", estimate.time = F, abar=AsMatrix(abar))
}) |
#'
#'@title Function to compare survey selectivity functions by year among several models
#'
#'@description This function compares survey selectivity functions by year
#' among several models.
#'
#'@param objs - list of resLst objects or dataframe from call to \code{extractMDFR.Surveys.SelFcns}
#'@param cast - formula to exclude factors from "averaging" over
#'@param fleets - vector of survey fleets to plot, or "all"
#'@param years - vector of years to show, or 'all' to show all years
#'@param dodge - width to dodge overlapping series
#'@param singlePlot - flag to plot all years on single plot (be sure to adjust facet_grid)
#'@param mxy - max number of years per page
#'@param facet_wrap - ggplot2 formula to produce figure with wrapped facets
#'@param facet_grid - ggplot2 formula to produce figure with gridded facets
#'@param pdf - creates pdf, if not NULL
#'@param showPlot - flag (T/F) to show plot
#'@param verbose - flag (T/F) to print diagnostic information
#'
#'@return ggplot2 object
#'
#'@details None.
#'
#'@import ggplot2
#'
#'@export
#'
compareResults.Surveys.SelFcns<-function(objs,
cast='y+x',
fleets="all",
years='all',
dodge=0.2,
singlePlot=FALSE,
mxy=15,
facet_wrap=NULL,
facet_grid=ifelse(singlePlot,"x~case","y~x"),
pdf=NULL,
showPlot=FALSE,
verbose=FALSE){
if (verbose) message("Starting rCompTCMs::compareResults.Surveys.SelFcns().\n");
options(stringsAsFactors=FALSE);
std_theme = wtsPlots::getStdTheme();
if (is.null(years)) return(list());
#create pdf, if necessary
if(!is.null(pdf)){
pdf(file=pdf,width=11,height=8,onefile=TRUE);
on.exit(grDevices::dev.off());
showPlot<-TRUE;
}
if (is.data.frame(objs)) {
mdfr<-objs;
} else {
mdfr<-extractMDFR.Surveys.SelFcns(objs,fleets=fleets,cast=cast,years=years,verbose=verbose);
if (is.null(mdfr)) return(list()); #empty list
}
#---------NEW CODE------------
#--identify stanzas
tmp = mdfr %>% tidyr::pivot_wider(names_from=z,values_from=val);
cols = stringr::str_subset(names(tmp ),stringr::fixed("y"),negate=TRUE);
mdfr = tmp %>% dplyr::group_by(dplyr::across(dplyr::all_of(cols))) %>%
dplyr::summarize(ymn=min(y),
ymx=max(y)) %>%
dplyr::ungroup() %>%
tidyr::pivot_longer(cols=cols[10:length(cols)],names_to="z",values_to="val") %>%
dplyr::mutate(z=as.numeric(z),
stanza=ifelse(ymn!=ymx,paste0(ymn,"-",ymx),ymn));
#--use stanza as faceting variable
#----------------------------------
#selectivity functions
#----------------------------------
plots<-list();
uF<-unique(mdfr$fleet);
if (fleets[1]!="all") uF<-fleets;
for (f in uF){
if (verbose) message("Plotting fleet",f,"\n")
mdfrp<-mdfr[mdfr$fleet==f,];
# if (!singlePlot){
# uY<-unique(mdfrp$y);
# for (pg in 1:ceiling(length(uY)/mxy)){
# mdfrpp<-mdfrp[mdfrp$y %in% uY[(1+mxy*(pg-1)):min(length(uY),mxy*pg)],];
# p<-plotMDFR.XY(mdfrpp,x='z',value.var='val',agg.formula=NULL,
# facet_grid=facet_grid,facet_wrap=facet_wrap,nrow=5,
# xlab='size (mm CW)',ylab='Selectivity',units='',lnscale=FALSE,
# title=f,
# colour='case',guideTitleColor='',
# shape='case',guideTitleShape='',
# showPlot=FALSE);
# if (showPlot||!is.null(pdf)) print(p);
# cap<-paste0("\n \nFigure &&figno. Selectivity functions for ",f,"(",pg," of ",ceiling(length(uY)/mxy),"). \n \n")
# plots[[cap]]<-p;
# }#pg
# } else {
# p<-plotMDFR.XY(mdfrp,x='z',value.var='val',agg.formula=NULL,
# facet_grid=facet_grid,facet_wrap=facet_wrap,nrow=5,
# xlab='size (mm CW)',ylab='Selectivity',units='',lnscale=FALSE,
# title=f,
# colour='y',guideTitleColour='year',
# shape='y',guideTitleShape='year',
# showPlot=FALSE);
# if (showPlot||!is.null(pdf)) print(p);
# cap<-paste0("\n \nFigure &&figno. Selectivity functions for ",f,". \n \n")
# plots[[cap]]<-p;
# }
rws = mdfrp %>% dplyr::distinct(x,m,s);
for (i in 1:nrow(rws)){
rw = rws[i,];
str = stringr::str_trim(stringr::str_remove_all(paste(rw$s,rw$m,rw$x),stringr::fixed("all")));
mdfrpp = mdfrp %>% dplyr::inner_join(rw,by=c("x","m","s"));
nStanzas = length(unique(mdfrpp$stanza));
facets = facet_wrap(~stanza,ncol=1);
if (nStanzas>3){facets = facet_wrap(~stanza,ncol=floor(sqrt(nStanzas)))}
p = ggplot(mdfrpp,aes(x=z,y=val,colour=case)) +
geom_line() +
facets + ylim(0,1) +
xlab('size (mm CW)')+ylab('selectivity')+ggtitle(f)+
std_theme;
cap<-paste0("\n \nFigure &&figno. Selectivity functions for ",str," crab in the ",f," survey. \n \n");
plots[[cap]]<-p;
}
}#uF
if (verbose) message("rCompTCMs::compareResults.Surveys.SelFcns: Done!\n");
return(plots)
}
| /R/compareResults.Surveys.SelFcns.R | permissive | wStockhausen/rCompTCMs | R | false | false | 5,921 | r | #'
#'@title Function to compare survey selectivity functions by year among several models
#'
#'@description This function compares survey selectivity functions by year
#' among several models.
#'
#'@param objs - list of resLst objects or dataframe from call to \code{extractMDFR.Surveys.SelFcns}
#'@param cast - formula to exclude factors from "averaging" over
#'@param fleets - vector of survey fleets to plot, or "all"
#'@param years - vector of years to show, or 'all' to show all years
#'@param dodge - width to dodge overlapping series
#'@param singlePlot - flag to plot all years on single plot (be sure to adjust facet_grid)
#'@param mxy - max number of years per page
#'@param facet_wrap - ggplot2 formula to produce figure with wrapped facets
#'@param facet_grid - ggplot2 formula to produce figure with gridded facets
#'@param pdf - creates pdf, if not NULL
#'@param showPlot - flag (T/F) to show plot
#'@param verbose - flag (T/F) to print diagnostic information
#'
#'@return ggplot2 object
#'
#'@details None.
#'
#'@import ggplot2
#'
#'@export
#'
compareResults.Surveys.SelFcns<-function(objs,
cast='y+x',
fleets="all",
years='all',
dodge=0.2,
singlePlot=FALSE,
mxy=15,
facet_wrap=NULL,
facet_grid=ifelse(singlePlot,"x~case","y~x"),
pdf=NULL,
showPlot=FALSE,
verbose=FALSE){
if (verbose) message("Starting rCompTCMs::compareResults.Surveys.SelFcns().\n");
options(stringsAsFactors=FALSE);
std_theme = wtsPlots::getStdTheme();
if (is.null(years)) return(list());
#create pdf, if necessary
if(!is.null(pdf)){
pdf(file=pdf,width=11,height=8,onefile=TRUE);
on.exit(grDevices::dev.off());
showPlot<-TRUE;
}
if (is.data.frame(objs)) {
mdfr<-objs;
} else {
mdfr<-extractMDFR.Surveys.SelFcns(objs,fleets=fleets,cast=cast,years=years,verbose=verbose);
if (is.null(mdfr)) return(list()); #empty list
}
#---------NEW CODE------------
#--identify stanzas
tmp = mdfr %>% tidyr::pivot_wider(names_from=z,values_from=val);
cols = stringr::str_subset(names(tmp ),stringr::fixed("y"),negate=TRUE);
mdfr = tmp %>% dplyr::group_by(dplyr::across(dplyr::all_of(cols))) %>%
dplyr::summarize(ymn=min(y),
ymx=max(y)) %>%
dplyr::ungroup() %>%
tidyr::pivot_longer(cols=cols[10:length(cols)],names_to="z",values_to="val") %>%
dplyr::mutate(z=as.numeric(z),
stanza=ifelse(ymn!=ymx,paste0(ymn,"-",ymx),ymn));
#--use stanza as faceting variable
#----------------------------------
#selectivity functions
#----------------------------------
plots<-list();
uF<-unique(mdfr$fleet);
if (fleets[1]!="all") uF<-fleets;
for (f in uF){
if (verbose) message("Plotting fleet",f,"\n")
mdfrp<-mdfr[mdfr$fleet==f,];
# if (!singlePlot){
# uY<-unique(mdfrp$y);
# for (pg in 1:ceiling(length(uY)/mxy)){
# mdfrpp<-mdfrp[mdfrp$y %in% uY[(1+mxy*(pg-1)):min(length(uY),mxy*pg)],];
# p<-plotMDFR.XY(mdfrpp,x='z',value.var='val',agg.formula=NULL,
# facet_grid=facet_grid,facet_wrap=facet_wrap,nrow=5,
# xlab='size (mm CW)',ylab='Selectivity',units='',lnscale=FALSE,
# title=f,
# colour='case',guideTitleColor='',
# shape='case',guideTitleShape='',
# showPlot=FALSE);
# if (showPlot||!is.null(pdf)) print(p);
# cap<-paste0("\n \nFigure &&figno. Selectivity functions for ",f,"(",pg," of ",ceiling(length(uY)/mxy),"). \n \n")
# plots[[cap]]<-p;
# }#pg
# } else {
# p<-plotMDFR.XY(mdfrp,x='z',value.var='val',agg.formula=NULL,
# facet_grid=facet_grid,facet_wrap=facet_wrap,nrow=5,
# xlab='size (mm CW)',ylab='Selectivity',units='',lnscale=FALSE,
# title=f,
# colour='y',guideTitleColour='year',
# shape='y',guideTitleShape='year',
# showPlot=FALSE);
# if (showPlot||!is.null(pdf)) print(p);
# cap<-paste0("\n \nFigure &&figno. Selectivity functions for ",f,". \n \n")
# plots[[cap]]<-p;
# }
rws = mdfrp %>% dplyr::distinct(x,m,s);
for (i in 1:nrow(rws)){
rw = rws[i,];
str = stringr::str_trim(stringr::str_remove_all(paste(rw$s,rw$m,rw$x),stringr::fixed("all")));
mdfrpp = mdfrp %>% dplyr::inner_join(rw,by=c("x","m","s"));
nStanzas = length(unique(mdfrpp$stanza));
facets = facet_wrap(~stanza,ncol=1);
if (nStanzas>3){facets = facet_wrap(~stanza,ncol=floor(sqrt(nStanzas)))}
p = ggplot(mdfrpp,aes(x=z,y=val,colour=case)) +
geom_line() +
facets + ylim(0,1) +
xlab('size (mm CW)')+ylab('selectivity')+ggtitle(f)+
std_theme;
cap<-paste0("\n \nFigure &&figno. Selectivity functions for ",str," crab in the ",f," survey. \n \n");
plots[[cap]]<-p;
}
}#uF
if (verbose) message("rCompTCMs::compareResults.Surveys.SelFcns: Done!\n");
return(plots)
}
|
#assignment03a.r
for(var in 0:25)
{
print(var)
} | /assignment03a.r | no_license | gibbydicenso/BIS-044-dicensogibby | R | false | false | 52 | r | #assignment03a.r
for(var in 0:25)
{
print(var)
} |
## File for Initializing Project ##
# Initialize subdirectories
dir.create("code")
dir.create("data")
dir.create("images")
dir.create("rawdata")
dir.create("report")
dir.create("resources")
# Download data for champions
url.champs <-
'https://www.ticketcity.com/nba/nba-finals-tickets/nba-finals-champions.html'
download.file(url.champs, "rawdata/champions.html")
# Download data for drafted player heights
for (i in 1995:2015) {
url_p_height <-
paste0('http://www.draftexpress.com/nba-draft-history/?syear=', i)
download.file(url_p_height, paste0('rawdata/player_heights', i, '.html'))
}
# Download data for champions' season stats
for (i in 1996:2015) {
url_championship_stats <-
paste0('http://www.basketball-reference.com/leagues/NBA_', i, '.html')
download.file(url_championship_stats, paste0('rawdata/championship_stats', i, '.html'))
}
# Download data for Case Study Player: Stephen Curry
url_steph_curry <-
'http://www.draftexpress.com/profile/Stephen-Curry-1170/'
download.file(url_steph_curry, 'rawdata/steph_curry.html')
url_steph_curry_career <-
'http://www.basketball-reference.com/players/c/curryst01.html'
download.file(url_steph_curry_career, 'rawdata/steph_curry_career.html')
# Download data for Case Study Player: Draymond Green
url_draymond_green <-
'http://www.draftexpress.com/profile/Draymond-Green-5859/'
download.file(url_draymond_green, 'rawdata/draymond_green.html')
url_draymond_green_career <-
'http://www.basketball-reference.com/players/g/greendr01.html'
download.file(url_draymond_green_career, 'rawdata/draymond_green_career.html')
# Download data for Case Study Player: Tim Duncan
url_tim_duncan <-
'http://www.draftexpress.com/profile/Tim-Duncan-2292/'
download.file(url_tim_duncan, 'rawdata/tim_duncan.html')
url_tim_duncan_career <-
'http://www.basketball-reference.com/players/d/duncati01.html'
download.file(url_tim_duncan_career, 'rawdata/tim_duncan_career.html')
# Download data for Case Study Player: Shaq
url_shaq <-
'http://www.draftexpress.com/profile/Shaquille-O-neal-3796/'
download.file(url_shaq, 'rawdata/shaq.html')
url_shaq_career <-
'http://www.basketball-reference.com/players/o/onealsh01.html'
download.file(url_shaq_career, 'rawdata/shaq_career.html')
# Download data for Championship team lineups
url_champ_stats <- 'http://www.basketball-reference.com/teams/HOU/1995.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 1995, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/CHI/1996.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 1996, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/CHI/1997.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 1997, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/CHI/1998.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 1998, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/SAS/1999.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 1999, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/LAL/2000.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2000, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/LAL/2001.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2001, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/LAL/2002.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2002, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/SAS/2003.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2003, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/DET/2004.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2004, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/SAS/2005.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2005, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/MIA/2006.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2006, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/SAS/2007.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2007, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/BOS/2008.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2008, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/LAL/2009.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2009, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/LAL/2010.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2010, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/DAL/2011.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2011, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/MIA/2012.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2012, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/MIA/2013.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2013, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/SAS/2014.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2014, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/GSW/2015.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2015, '.html'))
suppressWarnings(source("code/data_cleanprep.R")) | /skeleton.R | no_license | ArmanMadani/Stat-133-Project | R | false | false | 5,650 | r | ## File for Initializing Project ##
# Initialize subdirectories
dir.create("code")
dir.create("data")
dir.create("images")
dir.create("rawdata")
dir.create("report")
dir.create("resources")
# Download data for champions
url.champs <-
'https://www.ticketcity.com/nba/nba-finals-tickets/nba-finals-champions.html'
download.file(url.champs, "rawdata/champions.html")
# Download data for drafted player heights
for (i in 1995:2015) {
url_p_height <-
paste0('http://www.draftexpress.com/nba-draft-history/?syear=', i)
download.file(url_p_height, paste0('rawdata/player_heights', i, '.html'))
}
# Download data for champions' season stats
for (i in 1996:2015) {
url_championship_stats <-
paste0('http://www.basketball-reference.com/leagues/NBA_', i, '.html')
download.file(url_championship_stats, paste0('rawdata/championship_stats', i, '.html'))
}
# Download data for Case Study Player: Stephen Curry
url_steph_curry <-
'http://www.draftexpress.com/profile/Stephen-Curry-1170/'
download.file(url_steph_curry, 'rawdata/steph_curry.html')
url_steph_curry_career <-
'http://www.basketball-reference.com/players/c/curryst01.html'
download.file(url_steph_curry_career, 'rawdata/steph_curry_career.html')
# Download data for Case Study Player: Draymond Green
url_draymond_green <-
'http://www.draftexpress.com/profile/Draymond-Green-5859/'
download.file(url_draymond_green, 'rawdata/draymond_green.html')
url_draymond_green_career <-
'http://www.basketball-reference.com/players/g/greendr01.html'
download.file(url_draymond_green_career, 'rawdata/draymond_green_career.html')
# Download data for Case Study Player: Tim Duncan
url_tim_duncan <-
'http://www.draftexpress.com/profile/Tim-Duncan-2292/'
download.file(url_tim_duncan, 'rawdata/tim_duncan.html')
url_tim_duncan_career <-
'http://www.basketball-reference.com/players/d/duncati01.html'
download.file(url_tim_duncan_career, 'rawdata/tim_duncan_career.html')
# Download data for Case Study Player: Shaq
url_shaq <-
'http://www.draftexpress.com/profile/Shaquille-O-neal-3796/'
download.file(url_shaq, 'rawdata/shaq.html')
url_shaq_career <-
'http://www.basketball-reference.com/players/o/onealsh01.html'
download.file(url_shaq_career, 'rawdata/shaq_career.html')
# Download data for Championship team lineups
url_champ_stats <- 'http://www.basketball-reference.com/teams/HOU/1995.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 1995, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/CHI/1996.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 1996, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/CHI/1997.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 1997, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/CHI/1998.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 1998, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/SAS/1999.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 1999, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/LAL/2000.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2000, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/LAL/2001.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2001, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/LAL/2002.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2002, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/SAS/2003.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2003, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/DET/2004.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2004, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/SAS/2005.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2005, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/MIA/2006.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2006, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/SAS/2007.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2007, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/BOS/2008.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2008, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/LAL/2009.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2009, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/LAL/2010.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2010, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/DAL/2011.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2011, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/MIA/2012.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2012, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/MIA/2013.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2013, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/SAS/2014.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2014, '.html'))
url_champ_stats <- 'http://www.basketball-reference.com/teams/GSW/2015.html'
download.file(url_champ_stats, paste0('rawdata/champion_stats', 2015, '.html'))
suppressWarnings(source("code/data_cleanprep.R")) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/feature_plot.R
\name{get_heatmap_color}
\alias{get_heatmap_color}
\title{Get a vector of colors for heatmap}
\usage{
get_heatmap_color(palette)
}
\description{
Get a vector of colors for heatmap
}
| /man/get_heatmap_color.Rd | no_license | jeevanyue/PIVOT | R | false | true | 276 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/feature_plot.R
\name{get_heatmap_color}
\alias{get_heatmap_color}
\title{Get a vector of colors for heatmap}
\usage{
get_heatmap_color(palette)
}
\description{
Get a vector of colors for heatmap
}
|
library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(tidyr, warn.conflicts = F, quietly = T)
source("funs/do.cca.check.R")
source("funs/p1.eval.R")
source("funs/p1.eval.full.R")
t <- readRDS("results/p1_mnar/p1_mnar_pc65_nobs100_do30_mu1.8_sd0.3_100k.rds")
sum.prop1 <- readRDS("summaries/sum.prop1.rds")
p1.eval(t, pc = 0.65)
sum.prop1%>%
dplyr::filter(mu.k==1.8, sd.k==0.3, n==100, pc==0.65, do==0.3)
###############################################
source("funs/m2_ch.R")
source("funs/do_ch.R")
setting <-readRDS("setting.rds")
d10k <- readRDS("results/p2_mars/p2_mar_strong_set_n5.rds")
d100k <- readRDS("results/p2_mcar/p2_mcar_set_n5_100k.rds")
#d100km20 <- readRDS("results/p2_mcar/p2_mcar_set_n5_100k_m20.rds")
do_ch(d10k)
do_ch(d100k)
m2_ch(d10k)
m2_ch(d100k)
bin2mi::p2_eval(d10k, m2 = setting$m2[setting$set_n==5])
bin2mi::p2_eval(d100k, m2 = setting$m2[setting$set_n==5])
#bin2mi::p2_eval(d100km20, m2 = setting$m2[setting$set_n==5])
purrr::discard(d10k, .p=function(d10k) is.character(d10k[[1]]))%>%head(10000)
purrr::keep(d100k, .p=function(d100k) is.character(d100k[[1]]))%>%length()
###############################################
source("funs/m2_ch.R")
source("funs/do_ch.R")
setting <-readRDS("setting.rds")
d10k <- readRDS("results/p2_mars/p2_mar_strong_set_n6.rds")
d100k <- readRDS("results/p2_mars/p2_mar_strong_set_n6_100k.rds")
#d100km20 <- readRDS("results/p2_mcar/p2_mcar_set_n5_100k_m20.rds")
do_ch(d10k)
do_ch(d100k)
m2_ch(d10k)
m2_ch(d100k)
bin2mi::p2_eval(d10k, m2 = setting$m2[setting$set_n==6])
bin2mi::p2_eval(d100k, m2 = setting$m2[setting$set_n==6])
| /checks/check_100k.R | no_license | yuliasidi/wilson_newcombe | R | false | false | 1,659 | r | library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(tidyr, warn.conflicts = F, quietly = T)
source("funs/do.cca.check.R")
source("funs/p1.eval.R")
source("funs/p1.eval.full.R")
t <- readRDS("results/p1_mnar/p1_mnar_pc65_nobs100_do30_mu1.8_sd0.3_100k.rds")
sum.prop1 <- readRDS("summaries/sum.prop1.rds")
p1.eval(t, pc = 0.65)
sum.prop1%>%
dplyr::filter(mu.k==1.8, sd.k==0.3, n==100, pc==0.65, do==0.3)
###############################################
source("funs/m2_ch.R")
source("funs/do_ch.R")
setting <-readRDS("setting.rds")
d10k <- readRDS("results/p2_mars/p2_mar_strong_set_n5.rds")
d100k <- readRDS("results/p2_mcar/p2_mcar_set_n5_100k.rds")
#d100km20 <- readRDS("results/p2_mcar/p2_mcar_set_n5_100k_m20.rds")
do_ch(d10k)
do_ch(d100k)
m2_ch(d10k)
m2_ch(d100k)
bin2mi::p2_eval(d10k, m2 = setting$m2[setting$set_n==5])
bin2mi::p2_eval(d100k, m2 = setting$m2[setting$set_n==5])
#bin2mi::p2_eval(d100km20, m2 = setting$m2[setting$set_n==5])
purrr::discard(d10k, .p=function(d10k) is.character(d10k[[1]]))%>%head(10000)
purrr::keep(d100k, .p=function(d100k) is.character(d100k[[1]]))%>%length()
###############################################
source("funs/m2_ch.R")
source("funs/do_ch.R")
setting <-readRDS("setting.rds")
d10k <- readRDS("results/p2_mars/p2_mar_strong_set_n6.rds")
d100k <- readRDS("results/p2_mars/p2_mar_strong_set_n6_100k.rds")
#d100km20 <- readRDS("results/p2_mcar/p2_mcar_set_n5_100k_m20.rds")
do_ch(d10k)
do_ch(d100k)
m2_ch(d10k)
m2_ch(d100k)
bin2mi::p2_eval(d10k, m2 = setting$m2[setting$set_n==6])
bin2mi::p2_eval(d100k, m2 = setting$m2[setting$set_n==6])
|
### 1-D plots ###
# continuous
x <- rchisq(500, df=2)
hist(x)
hist(x, breaks = 30, col = 'grey50')
colors()
plot(density(x))
boxplot(x, horizontal = TRUE)
par(pch = 16)
boxplot(x, col = 'grey75')
# categorical
x <- sample(c('red','blue','green'), 500, replace = TRUE, prob = c(.5,.2,.3))
barplot( table(x) )
barplot( table(x)/length(x) )
barplot( matrix( table(x)/length(x), ncol=1))
x <- factor(x, levels = c('red','blue','green'))
barplot( table(x) )
barplot( table(x), col = c('red','blue','green'))
# aside: RColorBrewer
require(RColorBrewer)
display.brewer.all()
barplot( table(x), col = brewer.pal(3, 'Set1') )
### 2-D plots ###
# continuous x continuous
x <- rnorm(500)
y <- x + rnorm(500)
plot( x,y )
plot( x,y, col = rgb(0,0,0, .5))
require(scales)
plot( x,y, col = alpha(brewer.pal(3,'Set1')[2], .5))
x <- 1:100
y <- sqrt(x) + rnorm(100)
plot( x,y )
plot( x,y, type = 'l') # l = line
x <- 1:10
y <- sqrt(x) + rnorm(10)
plot( x,y, type = 'b') # b = both
plot( x,y )
lines( x,y )
# sneaking in an extra dimension
z <- sqrt(x + y^2)
plot( x,y, type = 'b', cex = z)
plot( x,y, type = 'b', cex = z/2)
clus <- 1 + (y > 2) + (y > 3)
plot( x,y, type = 'b', col = clus)
plot( x,y, type = 'l')
points( x,y, col = c('red','yellow','green')[clus])
plot( x,y, type = 'l')
points( x,y, col = c('red','yellow','green')[clus], cex = z/2)
plot( x,y, type = 'l')
points( x,y, col = colorby(z, colors = c('red','yellow','green')), cex = z/2)
### plot elements ###
axis()
{
plot( x,y, type = 'b', axes = FALSE, xlab = 'Day', ylab = 'Outcome')
axis(1, at = c(1,5,10))
}
lines()
{
plot( x,y )
lines( x,y )
}
points()
{
}
text()
{
plot( x,y, type = 'b')
ind <- which.min(y)
text( x[ind],y[ind], 'MIN', pos = 4, col = 'red')
ind <- which.max(y)
text( x[ind],y[ind], 'MAX', pos = 2, col = 'red')
}
rect()
{
plot( x,y )
rect(par("usr")[1], par("usr")[3], par("usr")[2], par("usr")[4], col = "grey90")
grid(col="white")
points( x,y, type='b')
}
polygon()
{
x1 <- rchisq(500, df=2)
x2 <- rchisq(500, df=4)
d1 <- density(x1)
d2 <- density(x2)
plot( range(c(d1$x,d2$x)), range(c(d1$y,d2$y,0)))
rect(par("usr")[1], par("usr")[3], par("usr")[2], par("usr")[4], col = "grey90")
grid(col="white")
polygon( d1$x,d1$y, col=alpha('red',.5))
polygon( d2$x,d2$y, col=alpha('blue',.5))
}
segments()
{
x1 <- 1:30; y1 <- x1 + rnorm(30)
x2 <- x1 + rnorm(30); y2 <- x2 + rnorm(30)
plot( x1,y1, xlim = range(c(x1,x2)), ylim = range(c(y1,y2)))
points( x2,y2, col = 'red')
segments( x1, y1, x2, y2)
}
### Small Multiples ###
theta <- abs(rnorm(9))
data <- sapply(theta, function(t){ rnorm(500, sd=t) })
par(mfrow=c(3,3))
for(i in 1:9){
plot(density(data[,i]), main=theta[i], xlim = range(data), xlab='', ylab='')
#hist(data[,i], breaks = 30, col = 'grey50', xlim=range(data), main = theta[i])
}
par(mfrow=c(1,1))
boxplot(data, col = 'grey75')
pairs(data[,1:4], col=rgb(0,0,0,.25))
### Heatmaps ###
# generate some data (X):
{
X <- matrix(NA, nrow = 100, ncol = 50)
X[,1] <- rnorm(100)
X[,2] <- rnorm(100)
X[,3] <- rnorm(100)
clus <- c(1:3, rep(NA,47))
for(j in 4:50){
i <- sample(1:3, 1)
X[,j] <- .75*X[,i] + sqrt(1-.75^2)*rnorm(100)
clus[j] <- i
}
rownames(X) <- paste('feature',1:100, sep='')
colnames(X) <- paste('sample',1:50, sep='')
}
boxplot(X)
heatmap(X)
heatmap(X, col = heat.colors(50))
heatmap(X, col = topo.colors(50))
mypal <- colorRampPalette( c('red','black','green') )
heatmap(X, col = mypal(50))
heatmap(X, Rowv = NA)
require(gplots)
heatmap.2(X)
heatmap.2(X, trace = 'none')
heatmap.2(X, trace = 'none', Rowv = NA, dendrogram = 'col')
heatmap.2(X, trace = 'none', Rowv = NA, dendrogram = 'col', ColSideColors = c('red','blue','green')[clus])
heatmap.2(X, trace = 'none', Rowv = NA, dendrogram = 'col', ColSideColors = c('red','blue','green')[clus], RowSideColors = topo.colors(100))
heatmap.2(X, trace = 'none', dendrogram = 'col', ColSideColors = c('red','blue','green')[clus], RowSideColors = topo.colors(100))
### 3D plots ###
pca <- prcomp(t(X))
require(rgl)
plot3d( pca$x[,1:3] )
plot3d( pca$x[,1:3], col = c('red','blue','green')[clus])
plot3d( pca$x[,1:3], col = c('red','blue','green')[clus], size = 5)
plot3d( pca$x[,1:3], col = c('red','blue','green')[clus], size = 5, aspect = 'iso')
| /Week-1/graphics.R | no_license | rmyjong/mcb293s | R | false | false | 4,331 | r |
### 1-D plots ###
# continuous
x <- rchisq(500, df=2)
hist(x)
hist(x, breaks = 30, col = 'grey50')
colors()
plot(density(x))
boxplot(x, horizontal = TRUE)
par(pch = 16)
boxplot(x, col = 'grey75')
# categorical
x <- sample(c('red','blue','green'), 500, replace = TRUE, prob = c(.5,.2,.3))
barplot( table(x) )
barplot( table(x)/length(x) )
barplot( matrix( table(x)/length(x), ncol=1))
x <- factor(x, levels = c('red','blue','green'))
barplot( table(x) )
barplot( table(x), col = c('red','blue','green'))
# aside: RColorBrewer
require(RColorBrewer)
display.brewer.all()
barplot( table(x), col = brewer.pal(3, 'Set1') )
### 2-D plots ###
# continuous x continuous
x <- rnorm(500)
y <- x + rnorm(500)
plot( x,y )
plot( x,y, col = rgb(0,0,0, .5))
require(scales)
plot( x,y, col = alpha(brewer.pal(3,'Set1')[2], .5))
x <- 1:100
y <- sqrt(x) + rnorm(100)
plot( x,y )
plot( x,y, type = 'l') # l = line
x <- 1:10
y <- sqrt(x) + rnorm(10)
plot( x,y, type = 'b') # b = both
plot( x,y )
lines( x,y )
# sneaking in an extra dimension
z <- sqrt(x + y^2)
plot( x,y, type = 'b', cex = z)
plot( x,y, type = 'b', cex = z/2)
clus <- 1 + (y > 2) + (y > 3)
plot( x,y, type = 'b', col = clus)
plot( x,y, type = 'l')
points( x,y, col = c('red','yellow','green')[clus])
plot( x,y, type = 'l')
points( x,y, col = c('red','yellow','green')[clus], cex = z/2)
plot( x,y, type = 'l')
points( x,y, col = colorby(z, colors = c('red','yellow','green')), cex = z/2)
### plot elements ###
axis()
{
plot( x,y, type = 'b', axes = FALSE, xlab = 'Day', ylab = 'Outcome')
axis(1, at = c(1,5,10))
}
lines()
{
plot( x,y )
lines( x,y )
}
points()
{
}
text()
{
plot( x,y, type = 'b')
ind <- which.min(y)
text( x[ind],y[ind], 'MIN', pos = 4, col = 'red')
ind <- which.max(y)
text( x[ind],y[ind], 'MAX', pos = 2, col = 'red')
}
rect()
{
plot( x,y )
rect(par("usr")[1], par("usr")[3], par("usr")[2], par("usr")[4], col = "grey90")
grid(col="white")
points( x,y, type='b')
}
polygon()
{
x1 <- rchisq(500, df=2)
x2 <- rchisq(500, df=4)
d1 <- density(x1)
d2 <- density(x2)
plot( range(c(d1$x,d2$x)), range(c(d1$y,d2$y,0)))
rect(par("usr")[1], par("usr")[3], par("usr")[2], par("usr")[4], col = "grey90")
grid(col="white")
polygon( d1$x,d1$y, col=alpha('red',.5))
polygon( d2$x,d2$y, col=alpha('blue',.5))
}
segments()
{
x1 <- 1:30; y1 <- x1 + rnorm(30)
x2 <- x1 + rnorm(30); y2 <- x2 + rnorm(30)
plot( x1,y1, xlim = range(c(x1,x2)), ylim = range(c(y1,y2)))
points( x2,y2, col = 'red')
segments( x1, y1, x2, y2)
}
### Small Multiples ###
theta <- abs(rnorm(9))
data <- sapply(theta, function(t){ rnorm(500, sd=t) })
par(mfrow=c(3,3))
for(i in 1:9){
plot(density(data[,i]), main=theta[i], xlim = range(data), xlab='', ylab='')
#hist(data[,i], breaks = 30, col = 'grey50', xlim=range(data), main = theta[i])
}
par(mfrow=c(1,1))
boxplot(data, col = 'grey75')
pairs(data[,1:4], col=rgb(0,0,0,.25))
### Heatmaps ###
# generate some data (X):
{
X <- matrix(NA, nrow = 100, ncol = 50)
X[,1] <- rnorm(100)
X[,2] <- rnorm(100)
X[,3] <- rnorm(100)
clus <- c(1:3, rep(NA,47))
for(j in 4:50){
i <- sample(1:3, 1)
X[,j] <- .75*X[,i] + sqrt(1-.75^2)*rnorm(100)
clus[j] <- i
}
rownames(X) <- paste('feature',1:100, sep='')
colnames(X) <- paste('sample',1:50, sep='')
}
boxplot(X)
heatmap(X)
heatmap(X, col = heat.colors(50))
heatmap(X, col = topo.colors(50))
mypal <- colorRampPalette( c('red','black','green') )
heatmap(X, col = mypal(50))
heatmap(X, Rowv = NA)
require(gplots)
heatmap.2(X)
heatmap.2(X, trace = 'none')
heatmap.2(X, trace = 'none', Rowv = NA, dendrogram = 'col')
heatmap.2(X, trace = 'none', Rowv = NA, dendrogram = 'col', ColSideColors = c('red','blue','green')[clus])
heatmap.2(X, trace = 'none', Rowv = NA, dendrogram = 'col', ColSideColors = c('red','blue','green')[clus], RowSideColors = topo.colors(100))
heatmap.2(X, trace = 'none', dendrogram = 'col', ColSideColors = c('red','blue','green')[clus], RowSideColors = topo.colors(100))
### 3D plots ###
pca <- prcomp(t(X))
require(rgl)
plot3d( pca$x[,1:3] )
plot3d( pca$x[,1:3], col = c('red','blue','green')[clus])
plot3d( pca$x[,1:3], col = c('red','blue','green')[clus], size = 5)
plot3d( pca$x[,1:3], col = c('red','blue','green')[clus], size = 5, aspect = 'iso')
|
# get fcc data from socrata
# renata gerecke
# 2021-06-26
library(tidyverse)
library(httr)
library(jsonlite)
library(RSocrata)
library(tictoc)
app_token <- "2sC55bQkSqnsDPTcPqIyhD4Eb"
paths <- c(
jun20 = "https://opendata.fcc.gov/resource/ktav-pdj7.json",
dec19 = "https://opendata.fcc.gov/resource/ws2a-amik.json",
jun19 = "https://opendata.fcc.gov/resource/udcw-naqn.json",
dec18 = "https://opendata.fcc.gov/resource/wucg-w9k9.json",
jun18 = "https://opendata.fcc.gov/resource/cekp-f8tj.json",
dec17 = "https://opendata.fcc.gov/resource/fnid-qg8r.json",
jun17 = "https://opendata.fcc.gov/resource/yikn-7er3.json",
dec16 = "https://opendata.fcc.gov/resource/xv2f-wqqz.json",
jun16 = "https://opendata.fcc.gov/resource/nb5q-gkcn.json"
)
tic()
dfs <- map_dfr(
paths,
~read.socrata(str_c(.x, "?type=county&tech=acfosw"),
app_token = app_token),
.id = "date"
)
toc()
write_rds(dfs, "data/fcc/data_export.rds")
| /R/00A_getdata_fcc.R | no_license | mspp-data-studio-2021/broadband-access | R | false | false | 977 | r | # get fcc data from socrata
# renata gerecke
# 2021-06-26
library(tidyverse)
library(httr)
library(jsonlite)
library(RSocrata)
library(tictoc)
app_token <- "2sC55bQkSqnsDPTcPqIyhD4Eb"
paths <- c(
jun20 = "https://opendata.fcc.gov/resource/ktav-pdj7.json",
dec19 = "https://opendata.fcc.gov/resource/ws2a-amik.json",
jun19 = "https://opendata.fcc.gov/resource/udcw-naqn.json",
dec18 = "https://opendata.fcc.gov/resource/wucg-w9k9.json",
jun18 = "https://opendata.fcc.gov/resource/cekp-f8tj.json",
dec17 = "https://opendata.fcc.gov/resource/fnid-qg8r.json",
jun17 = "https://opendata.fcc.gov/resource/yikn-7er3.json",
dec16 = "https://opendata.fcc.gov/resource/xv2f-wqqz.json",
jun16 = "https://opendata.fcc.gov/resource/nb5q-gkcn.json"
)
tic()
dfs <- map_dfr(
paths,
~read.socrata(str_c(.x, "?type=county&tech=acfosw"),
app_token = app_token),
.id = "date"
)
toc()
write_rds(dfs, "data/fcc/data_export.rds")
|
################################################################################
# Author: Jerrison Li
# Date: Friday, December 23, 2016 6:14:42 PM PST
# Filename:
# Version: 1.0
# Description:
################################################################################
library(dplyr)
library(tidyr)
################################################################################
#
################################################################################
library(kernlab)
data(spam)
set.seed(3435)
trainingIndicator <- rbinom(4601, size = 1, prob = 0.5)
table(trainingIndicator)
################################################################################
trainSpam <- spam[trainingIndicator == 1, ]
testSpam <- spam[trainingIndicator == 0, ]
head(names(trainSpam), 20)
head(trainSpam[, 1:10])
str(trainSpam)
table(trainSpam$type)
boxplot(capitalAve ~ type, data = trainSpam)
boxplot(log10(capitalAve + 1) ~ type, data = trainSpam)
# let's take a look at the relationship between the first 4 variables of the
# trainSpam data (logtransformed)
pairs(log10(trainSpam[, 1:4] + 1))
# plot Dendogram to see what predictors or what words or characteristics tend to
# cluster together
hCluster <- hclust(dist(t(trainSpam[, 1:57])))
plot(hCluster)
# redo clustering after transforming the predictors to log10
hClusterUpdated <- hclust(dist(t(log10(trainSpam[, 1:55] +1 ))))
plot(hClusterUpdated)
# build a GLM based on logistic regression. Univariate predictor to determine
# whether an email is spam or not. We will cycle through all the predictors and
# subsquently calculate the cross validated error rate of predicting spam emails
# from a single variable
trainSpam$numType <- as.numeric(trainSpam$type) - 1
costFunction <- function(x, y) sum(x != (y > 0.5))
cvError <- rep(NA, 55)
library(boot)
for (i in 1:55) {
lmFormula <- reformulate(names(trainSpam)[i], response = "numType")
glmFit = glm(lmFormula, family = "binomial", data = trainSpam)
cvError[i] = cv.glm(trainSpam, glmFit, costFunction, 2)$delta[2]
}
## which predictor has minimum cross-validated error?
names(trainSpam)[which.min(cvError)]
## Use the best model from the group
predictionModel <- glm(numType ~ charDollar, family = "binomial", data =
trainSpam)
## Get predictions on the test set
predictionTest = predict(predictionModel, testSpam)
predictedSpam <- rep("nonspam", dim(testSpam)[1])
## Classify as 'spam' for those with prob > 0.5
predictedSpam[predictionModel$fitted > 0.5] <- "spam"
table(predictedSpam, testSpam$type)
## Error rate
diag(table(predictedSpam, testSpam$type))
| /data_science_john_hopkins/Reproducible_Research/Week1/chapter_7_script.R | no_license | 143Reige/Coursera | R | false | false | 2,697 | r | ################################################################################
# Author: Jerrison Li
# Date: Friday, December 23, 2016 6:14:42 PM PST
# Filename:
# Version: 1.0
# Description:
################################################################################
library(dplyr)
library(tidyr)
################################################################################
#
################################################################################
library(kernlab)
data(spam)
set.seed(3435)
trainingIndicator <- rbinom(4601, size = 1, prob = 0.5)
table(trainingIndicator)
################################################################################
trainSpam <- spam[trainingIndicator == 1, ]
testSpam <- spam[trainingIndicator == 0, ]
head(names(trainSpam), 20)
head(trainSpam[, 1:10])
str(trainSpam)
table(trainSpam$type)
boxplot(capitalAve ~ type, data = trainSpam)
boxplot(log10(capitalAve + 1) ~ type, data = trainSpam)
# let's take a look at the relationship between the first 4 variables of the
# trainSpam data (logtransformed)
pairs(log10(trainSpam[, 1:4] + 1))
# plot Dendogram to see what predictors or what words or characteristics tend to
# cluster together
hCluster <- hclust(dist(t(trainSpam[, 1:57])))
plot(hCluster)
# redo clustering after transforming the predictors to log10
hClusterUpdated <- hclust(dist(t(log10(trainSpam[, 1:55] +1 ))))
plot(hClusterUpdated)
# build a GLM based on logistic regression. Univariate predictor to determine
# whether an email is spam or not. We will cycle through all the predictors and
# subsquently calculate the cross validated error rate of predicting spam emails
# from a single variable
trainSpam$numType <- as.numeric(trainSpam$type) - 1
costFunction <- function(x, y) sum(x != (y > 0.5))
cvError <- rep(NA, 55)
library(boot)
for (i in 1:55) {
lmFormula <- reformulate(names(trainSpam)[i], response = "numType")
glmFit = glm(lmFormula, family = "binomial", data = trainSpam)
cvError[i] = cv.glm(trainSpam, glmFit, costFunction, 2)$delta[2]
}
## which predictor has minimum cross-validated error?
names(trainSpam)[which.min(cvError)]
## Use the best model from the group
predictionModel <- glm(numType ~ charDollar, family = "binomial", data =
trainSpam)
## Get predictions on the test set
predictionTest = predict(predictionModel, testSpam)
predictedSpam <- rep("nonspam", dim(testSpam)[1])
## Classify as 'spam' for those with prob > 0.5
predictedSpam[predictionModel$fitted > 0.5] <- "spam"
table(predictedSpam, testSpam$type)
## Error rate
diag(table(predictedSpam, testSpam$type))
|
setwd("/datascience/projects/statisticallyfit/github/learningprogramming/R/RStats/learneconometrics/CarterHill_PrinciplesOfEconometrics/Chapter13_VARandVECmodels")
rm(list=ls())
library(tsDyn)
library(ggplot2)
library(foreign)
library(ggfortify)
library(urca)
library(vars)
# Data from: http://www.principlesofeconometrics.com/poe4/poe4stata.htm
## Part b) is there a long run relationship? (cointegrating)
gfc <- read.dta("gfc.dta")
gfc$time <- seq(1, 60)
# there seems to be constant to so test "constant"
# y = leuro, x = lusa (why?)
cointegrationTest(gfc, type="constant", resid.lags = 0)
## Part c) So no long run (cointegrating). Is there short run relation (VAR) ?
gfc.diff <- data.frame(dleuro=diff(gfc$leuro), dlusa=diff(gfc$lusa))
# RESULT: constant is significant so keep it
var <- VAR(gfc.diff, type="const", p=1)
var
summary(var)
# RESULT: not all second lags are significant, so just use p =1
var <- VAR(gfc, type="const", p=2)
var
summary(var)
# FINAL MODEL
var <- VAR(gfc.diff, type="const", p=1)
var
summary(var)
# No serial correlation confirms that p = 1 and not p > 1...
e.res <- var$varresult$dleuro$residuals
u.res <- var$varresult$dlusa$residuals
df.res <- data.frame(t=seq(1, 58), e.res=e.res, u.res=u.res)
autoplot(acf(var$varresult$dleuro$residuals, lag.max = 20, plot = FALSE))
autoplot(acf(var$varresult$dlusa$residuals, lag.max = 20, plot = FALSE))
ggplot() +
geom_line(data=df.res, aes(x=t, y=e.res), lwd=1, colour="red") +
geom_line(data=df.res, aes(x=t, y=u.res), lwd=1, colour="blue")
autoplot(ts(e.res), size=1, colour="red")
autoplot(ts(u.res), size=1, colour="blue")
| /CarterHill_PrinciplesOfEconometrics/Chapter13_VARandVECmodels/exercise13.13.R | no_license | statisticallyfit/REconometrics | R | false | false | 1,632 | r | setwd("/datascience/projects/statisticallyfit/github/learningprogramming/R/RStats/learneconometrics/CarterHill_PrinciplesOfEconometrics/Chapter13_VARandVECmodels")
rm(list=ls())
library(tsDyn)
library(ggplot2)
library(foreign)
library(ggfortify)
library(urca)
library(vars)
# Data from: http://www.principlesofeconometrics.com/poe4/poe4stata.htm
## Part b) is there a long run relationship? (cointegrating)
gfc <- read.dta("gfc.dta")
gfc$time <- seq(1, 60)
# there seems to be constant to so test "constant"
# y = leuro, x = lusa (why?)
cointegrationTest(gfc, type="constant", resid.lags = 0)
## Part c) So no long run (cointegrating). Is there short run relation (VAR) ?
gfc.diff <- data.frame(dleuro=diff(gfc$leuro), dlusa=diff(gfc$lusa))
# RESULT: constant is significant so keep it
var <- VAR(gfc.diff, type="const", p=1)
var
summary(var)
# RESULT: not all second lags are significant, so just use p =1
var <- VAR(gfc, type="const", p=2)
var
summary(var)
# FINAL MODEL
var <- VAR(gfc.diff, type="const", p=1)
var
summary(var)
# No serial correlation confirms that p = 1 and not p > 1...
e.res <- var$varresult$dleuro$residuals
u.res <- var$varresult$dlusa$residuals
df.res <- data.frame(t=seq(1, 58), e.res=e.res, u.res=u.res)
autoplot(acf(var$varresult$dleuro$residuals, lag.max = 20, plot = FALSE))
autoplot(acf(var$varresult$dlusa$residuals, lag.max = 20, plot = FALSE))
ggplot() +
geom_line(data=df.res, aes(x=t, y=e.res), lwd=1, colour="red") +
geom_line(data=df.res, aes(x=t, y=u.res), lwd=1, colour="blue")
autoplot(ts(e.res), size=1, colour="red")
autoplot(ts(u.res), size=1, colour="blue")
|
e9bcfec101180ec0a2ce22798766f487 ctrl.e#1.a#3.E#128.A#48.c#.w#5.s#31.asp.qdimacs 5295 15358 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Amendola-Ricca-Truszczynski/wgrowing/ctrl.e#1.a#3.E#128.A#48.c#.w#5.s#31.asp/ctrl.e#1.a#3.E#128.A#48.c#.w#5.s#31.asp.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 91 | r | e9bcfec101180ec0a2ce22798766f487 ctrl.e#1.a#3.E#128.A#48.c#.w#5.s#31.asp.qdimacs 5295 15358 |
##########################################
##
## Data ## Oocysts
##
##
##########################################
b7_85<-read.table("C:\\Users\\Ellie\\Dropbox\\Malaria\\Data Malaria\\Blagborough data Nat Comms\\grant data\\Andrew Blagborough\\4B7-85\\mosquito.txt",header=TRUE)
b7_85$OocPrev<-ifelse(b7_85$Oocyst==0,0,1)
summary(b7_85)
###Oocyst intensity mean and CI95%
##Round 1
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(b7_85$Oocyst[b7_85$Round==1 & b7_85$Bites==1],replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(b7_85$Oocyst[b7_85$Round==1 & b7_85$Bites==1])
for(i in 1:10000) a[i] <- mean(sample(b7_85$Oocyst[b7_85$Round==1 & b7_85$Bites==2],replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(b7_85$Oocyst[b7_85$Round==1 & b7_85$Bites==2])
for(i in 1:10000) a[i] <- mean(sample(b7_85$Oocyst[b7_85$Round==1 & b7_85$Bites==5],replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(b7_85$Oocyst[b7_85$Round==1 & b7_85$Bites==5])
for(i in 1:10000) a[i] <- mean(sample(b7_85$Oocyst[b7_85$Round==1 & b7_85$Bites==10],replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(b7_85$Oocyst[b7_85$Round==1 & b7_85$Bites==10])
###Oocyst prevalence mean and CI95%
##Round 1
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(b7_85$OocPrev[b7_85$Round==1 & b7_85$Bites==1],replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(b7_85$OocPrev[b7_85$Round==1 & b7_85$Bites==1])
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(b7_85$OocPrev[b7_85$Round==1 & b7_85$Bites==2],replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(b7_85$OocPrev[b7_85$Round==1 & b7_85$Bites==2])
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(b7_85$OocPrev[b7_85$Round==1 & b7_85$Bites==5],replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(b7_85$OocPrev[b7_85$Round==1 & b7_85$Bites==5])
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(b7_85$OocPrev[b7_85$Round==1 & b7_85$Bites==10],replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(b7_85$OocPrev[b7_85$Round==1 & b7_85$Bites==10])
##########################################
##
## Data ## BloodStage and Sporozoites
##
##
##########################################
b7_85b<-read.table("C:\\Users\\Ellie\\Dropbox\\Malaria\\Data Malaria\\Blagborough data Nat Comms\\grant data\\Andrew Blagborough\\4B7-85\\mouse.txt",header=TRUE)
b7_85b$bloodstage<-ifelse(b7_85b$Parasitemia==0,0,1)
summary(b7_85b)
###Bloodstage mean and CI95%
##Round 1
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(b7_85b$bloodstage[b7_85b$Round==1 & b7_85b$Bites==1],replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(b7_85b$bloodstage[b7_85b$Round==1 & b7_85b$Bites==1])
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(b7_85b$bloodstage[b7_85b$Round==1 & b7_85b$Bites==2],replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(b7_85b$bloodstage[b7_85b$Round==1 & b7_85b$Bites==2])
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(b7_85b$bloodstage[b7_85b$Round==1 & b7_85b$Bites==5],replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(b7_85b$bloodstage[b7_85b$Round==1 & b7_85b$Bites==5])
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(b7_85b$bloodstage[b7_85b$Round==1 & b7_85b$Bites==10],replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(b7_85b$bloodstage[b7_85b$Round==1 & b7_85b$Bites==10])
###Sporozoites mean and CI95%
##create prevalence
b7_85b$sp1<-ifelse(b7_85b$Sporozoite1==0,0,1)
b7_85b$sp2<-ifelse(b7_85b$Sporozoite2==0,0,1)
b7_85b$sp3<-ifelse(b7_85b$Sporozoite3==0,0,1)
b7_85b$sp4<-ifelse(b7_85b$Sporozoite4==0,0,1)
b7_85b$sp5<-ifelse(b7_85b$Sporozoite5==0,0,1)
b7_85b$sp6<-ifelse(b7_85b$Sporozoite6==0,0,1)
b7_85b$sp7<-ifelse(b7_85b$Sporozoite7==0,0,1)
b7_85b$sp8<-ifelse(b7_85b$Sporozoite8==0,0,1)
b7_85b$sp9<-ifelse(b7_85b$Sporozoite9==0,0,1)
b7_85b$sp10<-ifelse(b7_85b$Sporozoite10==0,0,1)
sp1a<-b7_85b$sp1[b7_85b$Round==1 & b7_85b$Bites==1]
sp2a<-c(b7_85b$sp1[b7_85b$Round==1 & b7_85b$Bites==2],b7_85b$sp2[b7_85b$Round==1 & b7_85b$Bites==2])
sp5a<-c(b7_85b$sp1[b7_85b$Round==1 & b7_85b$Bites==5],b7_85b$sp2[b7_85b$Round==1 & b7_85b$Bites==5],
b7_85b$sp3[b7_85b$Round==1 & b7_85b$Bites==5],b7_85b$sp4[b7_85b$Round==1 & b7_85b$Bites==5],
b7_85b$sp5[b7_85b$Round==1 & b7_85b$Bites==5])
sp10a<-c(b7_85b$sp1[b7_85b$Round==1 & b7_85b$Bites==10],b7_85b$sp2[b7_85b$Round==1 & b7_85b$Bites==10],
b7_85b$sp3[b7_85b$Round==1 & b7_85b$Bites==10],b7_85b$sp4[b7_85b$Round==1 & b7_85b$Bites==10],
b7_85b$sp5[b7_85b$Round==1 & b7_85b$Bites==10],b7_85b$sp6[b7_85b$Round==1 & b7_85b$Bites==10],
b7_85b$sp7[b7_85b$Round==1 & b7_85b$Bites==10],b7_85b$sp8[b7_85b$Round==1 & b7_85b$Bites==10],
b7_85b$sp9[b7_85b$Round==1 & b7_85b$Bites==10],b7_85b$sp10[b7_85b$Round==1 & b7_85b$Bites==10])
##Round 1
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(sp1a,replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(sp1a)
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(sp2a,replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(sp2a,na.rm=T)
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(sp5a,replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(sp5a,na.rm=T)
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(sp10a,replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(sp10a,na.rm=T)
###Sporozoite intensity
Sporozoite1a<-b7_85b$Sporozoite1[b7_85b$Round==1 & b7_85b$Bites==1]
Sporozoite2a<-c(b7_85b$Sporozoite1[b7_85b$Round==1 & b7_85b$Bites==2],b7_85b$Sporozoite2[b7_85b$Round==1 & b7_85b$Bites==2])
Sporozoite5a<-c(b7_85b$Sporozoite1[b7_85b$Round==1 & b7_85b$Bites==5],b7_85b$Sporozoite2[b7_85b$Round==1 & b7_85b$Bites==5],
b7_85b$Sporozoite3[b7_85b$Round==1 & b7_85b$Bites==5],b7_85b$Sporozoite4[b7_85b$Round==1 & b7_85b$Bites==5],
b7_85b$Sporozoite5[b7_85b$Round==1 & b7_85b$Bites==5])
Sporozoite10a<-c(b7_85b$Sporozoite1[b7_85b$Round==1 & b7_85b$Bites==10],b7_85b$Sporozoite2[b7_85b$Round==1 & b7_85b$Bites==10],
b7_85b$Sporozoite3[b7_85b$Round==1 & b7_85b$Bites==10],b7_85b$Sporozoite4[b7_85b$Round==1 & b7_85b$Bites==10],
b7_85b$Sporozoite5[b7_85b$Round==1 & b7_85b$Bites==10],b7_85b$Sporozoite6[b7_85b$Round==1 & b7_85b$Bites==10],
b7_85b$Sporozoite7[b7_85b$Round==1 & b7_85b$Bites==10],b7_85b$Sporozoite8[b7_85b$Round==1 & b7_85b$Bites==10],
b7_85b$Sporozoite9[b7_85b$Round==1 & b7_85b$Bites==10],b7_85b$Sporozoite10[b7_85b$Round==1 & b7_85b$Bites==10])
##Round 1
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(Sporozoite1a,replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(Sporozoite1a,na.rm=T)
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(Sporozoite2a,replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(Sporozoite2a,na.rm=T)
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(Sporozoite5a,replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(Sporozoite5a,na.rm=T)
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(Sporozoite10a,replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(Sporozoite10a,na.rm=T)
is.mosi<-0
n.rounds=4
n.bites=4
t.bites<-c(1,2,5,10)
par(mfcol=c(4,5),mar= c(3, 4, 1, 2))
is.mosi<-0
##Controls: mouse read.csv("C:\\Users\\Ellie\\Documents\\Data Malaria\\Blagborough data Nat Comms\\sporozoites.csv",header=T)
##Controls: mosquito read.table("C:\\Users\\Ellie\\Documents\\Data Malaria\\Blagborough data Nat Comms\\oocysts.txt",header=T)
##4B7_85%
##Bloodstage
prevH.round.1<-matrix(ncol=4,nrow=4,rep(0,16))
prevH.lower.1<-matrix(ncol=4,nrow=4,rep(0,16))
prevH.upper.1<-matrix(ncol=4,nrow=4,rep(0,16))
for(mb in 1:4){
hh=rbind(prevH.round.1[mb,])
hh.max<-apply(hh, 2, max)+6
ciL<-rbind(prevH.lower.1[mb,])
ciU<-rbind(prevH.upper.1[mb,])
colnames(hh)<-seq(1,4,1)
par(las=1,col.axis="black")
mybarcol <- "gray20"
mp <- barplot(hh*100,
col = c("dodgerblue4"),
ylim= c(0,100),
# main = "Parasetemia", font.main = 4,
# sub = "Transmission cycle", col.sub = mybarcol,
cex.names = 1.4)
segments(mp, ciL*100, mp, ciU*100 , col = mybarcol, lwd = 1.5)
}
##Oocyst prevalence
prevMO.round.1<-matrix(ncol=3,nrow=4,c(0,0,0.04,0.10,rep(0,8)))
prevMO.lower.1<-matrix(ncol=3,nrow=4,c(0,0,0.00,0.02,rep(0,8)))
prevMO.upper.1<-matrix(ncol=3,nrow=4,c(0,0,0.10,0.18,rep(0,8)))
for(mb in 1:4){
hh=rbind(prevMO.round.1[mb,])
hh.max<-apply(hh, 2, max)+6
ciL<-rbind(prevMO.lower.1[mb,])
ciU<-rbind(prevMO.upper.1[mb,])
colnames(hh)<-seq(1,3,1)
par(las=1,col.axis="black")
mybarcol <- "gray20"
mp <- barplot(hh*100,
col = c("forestgreen"),
ylim= c(0,100),
# main = "Parasetemia", font.main = 4,
# sub = "Transmission cycle", col.sub = mybarcol,
cex.names = 1.4)
segments(mp, ciL*100, mp, ciU*100 , col = mybarcol, lwd = 1.5)
}
##Oocyst intensity
prevMOI.round.1<-matrix(ncol=3,nrow=4,c(0,0,0.06,0.18,rep(0,8)))
bootMOI.lower1<-matrix(ncol=3,nrow=4,c(0,0,0,0.04,rep(0,8)))
bootMOI.upper1<-matrix(ncol=3,nrow=4,c(0,0,0.16,0.36,rep(0,8)))
for(mb in 1:4){
hh=rbind(prevMOI.round.1[mb,])
hh.max<-apply(hh, 2, max)+5
ciL<-rbind(bootMOI.lower1[mb,])
ciU<-rbind(bootMOI.upper1[mb,])
colnames(hh)<-seq(1,3,1)
par(las=1,col.axis="black")
mybarcol <- "gray20"
mp <- barplot(hh*100,
col = c("firebrick3"),
ylim= c(0,45),
# main = "Parasetemia", font.main = 4,
# sub = "Transmission cycle", col.sub = mybarcol,
cex.names = 1.4)
segments(mp, ciL*100, mp, ciU*100 , col = mybarcol, lwd = 1.5)
#text(colMeans(mp)+0.2,hh.max, labels =less.than.001[mb,], col = "red")
#mtext(side = 1, at = colMeans(mp), line = 2,
# text = paste("efficacy", formatC(colMeans(hh))), col = "red")
}
##Sporozoite prevalence
prevMOS.round.1<-matrix(ncol=3,nrow=4,c(0.2,0.111,0.04,0.10,rep(0,8)))
prevMOS.lower.1<-matrix(ncol=3,nrow=4,c(0.0,0.000,0.00,0.02,rep(0,8)))
prevMOS.upper.1<-matrix(ncol=3,nrow=4,c(0.6,0.333,0.12,0.18,rep(0,8)))
for(mb in 1:4){
hh=rbind(prevMOS.round.1[mb,])
hh.max<-apply(hh, 2, max)+6
ciL<-rbind(prevMOS.lower.1[mb,])
ciU<-rbind(prevMOS.upper.1[mb,])
colnames(hh)<-seq(1,3,1)
par(las=1,col.axis="black")
mybarcol <- "gray20"
mp <- barplot(hh*100,
col = c("orange"),
ylim= c(0,100),
# main = "Parasetemia", font.main = 4,
# sub = "Transmission cycle", col.sub = mybarcol,
cex.names = 1.4)
segments(mp, ciL*100, mp, ciU*100 , col = mybarcol, lwd = 1.5)
}
##Sporozoite intensity
MOSI.round.1<-matrix(ncol=3, nrow = 4,c(0.2,0.1111,0.04,0.16,rep(0,8)))
bootMOSI.lower1<-matrix(ncol=3,nrow=4,c(0.0,0.0000,0.00,0.04,rep(0,8)))
bootMOSI.upper1<-matrix(ncol=3,nrow=4,c(0.6,0.3333,0.12,0.32,rep(0,8)))
for(nb in 1:4){
colnames(MOSI.round.1)<-c(1,2,3)
mp <- barplot(MOSI.round.1[nb,],ylim=c(0,2.5),col = "mediumpurple",cex.names = 1.4)
par(las=1,col.axis="black")
segments(mp, bootMOSI.lower1[nb,], mp, bootMOSI.upper1[nb,] , col = mybarcol, lwd = 1.5)
}
| /CopyOfMalaria Summary Trials Information/4B785.R | no_license | EllieSherrardSmith/Malaria2 | R | false | false | 11,185 | r | ##########################################
##
## Data ## Oocysts
##
##
##########################################
b7_85<-read.table("C:\\Users\\Ellie\\Dropbox\\Malaria\\Data Malaria\\Blagborough data Nat Comms\\grant data\\Andrew Blagborough\\4B7-85\\mosquito.txt",header=TRUE)
b7_85$OocPrev<-ifelse(b7_85$Oocyst==0,0,1)
summary(b7_85)
###Oocyst intensity mean and CI95%
##Round 1
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(b7_85$Oocyst[b7_85$Round==1 & b7_85$Bites==1],replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(b7_85$Oocyst[b7_85$Round==1 & b7_85$Bites==1])
for(i in 1:10000) a[i] <- mean(sample(b7_85$Oocyst[b7_85$Round==1 & b7_85$Bites==2],replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(b7_85$Oocyst[b7_85$Round==1 & b7_85$Bites==2])
for(i in 1:10000) a[i] <- mean(sample(b7_85$Oocyst[b7_85$Round==1 & b7_85$Bites==5],replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(b7_85$Oocyst[b7_85$Round==1 & b7_85$Bites==5])
for(i in 1:10000) a[i] <- mean(sample(b7_85$Oocyst[b7_85$Round==1 & b7_85$Bites==10],replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(b7_85$Oocyst[b7_85$Round==1 & b7_85$Bites==10])
###Oocyst prevalence mean and CI95%
##Round 1
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(b7_85$OocPrev[b7_85$Round==1 & b7_85$Bites==1],replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(b7_85$OocPrev[b7_85$Round==1 & b7_85$Bites==1])
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(b7_85$OocPrev[b7_85$Round==1 & b7_85$Bites==2],replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(b7_85$OocPrev[b7_85$Round==1 & b7_85$Bites==2])
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(b7_85$OocPrev[b7_85$Round==1 & b7_85$Bites==5],replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(b7_85$OocPrev[b7_85$Round==1 & b7_85$Bites==5])
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(b7_85$OocPrev[b7_85$Round==1 & b7_85$Bites==10],replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(b7_85$OocPrev[b7_85$Round==1 & b7_85$Bites==10])
##########################################
##
## Data ## BloodStage and Sporozoites
##
##
##########################################
b7_85b<-read.table("C:\\Users\\Ellie\\Dropbox\\Malaria\\Data Malaria\\Blagborough data Nat Comms\\grant data\\Andrew Blagborough\\4B7-85\\mouse.txt",header=TRUE)
b7_85b$bloodstage<-ifelse(b7_85b$Parasitemia==0,0,1)
summary(b7_85b)
###Bloodstage mean and CI95%
##Round 1
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(b7_85b$bloodstage[b7_85b$Round==1 & b7_85b$Bites==1],replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(b7_85b$bloodstage[b7_85b$Round==1 & b7_85b$Bites==1])
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(b7_85b$bloodstage[b7_85b$Round==1 & b7_85b$Bites==2],replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(b7_85b$bloodstage[b7_85b$Round==1 & b7_85b$Bites==2])
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(b7_85b$bloodstage[b7_85b$Round==1 & b7_85b$Bites==5],replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(b7_85b$bloodstage[b7_85b$Round==1 & b7_85b$Bites==5])
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(b7_85b$bloodstage[b7_85b$Round==1 & b7_85b$Bites==10],replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(b7_85b$bloodstage[b7_85b$Round==1 & b7_85b$Bites==10])
###Sporozoites mean and CI95%
##create prevalence
b7_85b$sp1<-ifelse(b7_85b$Sporozoite1==0,0,1)
b7_85b$sp2<-ifelse(b7_85b$Sporozoite2==0,0,1)
b7_85b$sp3<-ifelse(b7_85b$Sporozoite3==0,0,1)
b7_85b$sp4<-ifelse(b7_85b$Sporozoite4==0,0,1)
b7_85b$sp5<-ifelse(b7_85b$Sporozoite5==0,0,1)
b7_85b$sp6<-ifelse(b7_85b$Sporozoite6==0,0,1)
b7_85b$sp7<-ifelse(b7_85b$Sporozoite7==0,0,1)
b7_85b$sp8<-ifelse(b7_85b$Sporozoite8==0,0,1)
b7_85b$sp9<-ifelse(b7_85b$Sporozoite9==0,0,1)
b7_85b$sp10<-ifelse(b7_85b$Sporozoite10==0,0,1)
sp1a<-b7_85b$sp1[b7_85b$Round==1 & b7_85b$Bites==1]
sp2a<-c(b7_85b$sp1[b7_85b$Round==1 & b7_85b$Bites==2],b7_85b$sp2[b7_85b$Round==1 & b7_85b$Bites==2])
sp5a<-c(b7_85b$sp1[b7_85b$Round==1 & b7_85b$Bites==5],b7_85b$sp2[b7_85b$Round==1 & b7_85b$Bites==5],
b7_85b$sp3[b7_85b$Round==1 & b7_85b$Bites==5],b7_85b$sp4[b7_85b$Round==1 & b7_85b$Bites==5],
b7_85b$sp5[b7_85b$Round==1 & b7_85b$Bites==5])
sp10a<-c(b7_85b$sp1[b7_85b$Round==1 & b7_85b$Bites==10],b7_85b$sp2[b7_85b$Round==1 & b7_85b$Bites==10],
b7_85b$sp3[b7_85b$Round==1 & b7_85b$Bites==10],b7_85b$sp4[b7_85b$Round==1 & b7_85b$Bites==10],
b7_85b$sp5[b7_85b$Round==1 & b7_85b$Bites==10],b7_85b$sp6[b7_85b$Round==1 & b7_85b$Bites==10],
b7_85b$sp7[b7_85b$Round==1 & b7_85b$Bites==10],b7_85b$sp8[b7_85b$Round==1 & b7_85b$Bites==10],
b7_85b$sp9[b7_85b$Round==1 & b7_85b$Bites==10],b7_85b$sp10[b7_85b$Round==1 & b7_85b$Bites==10])
##Round 1
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(sp1a,replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(sp1a)
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(sp2a,replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(sp2a,na.rm=T)
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(sp5a,replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(sp5a,na.rm=T)
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(sp10a,replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(sp10a,na.rm=T)
###Sporozoite intensity
Sporozoite1a<-b7_85b$Sporozoite1[b7_85b$Round==1 & b7_85b$Bites==1]
Sporozoite2a<-c(b7_85b$Sporozoite1[b7_85b$Round==1 & b7_85b$Bites==2],b7_85b$Sporozoite2[b7_85b$Round==1 & b7_85b$Bites==2])
Sporozoite5a<-c(b7_85b$Sporozoite1[b7_85b$Round==1 & b7_85b$Bites==5],b7_85b$Sporozoite2[b7_85b$Round==1 & b7_85b$Bites==5],
b7_85b$Sporozoite3[b7_85b$Round==1 & b7_85b$Bites==5],b7_85b$Sporozoite4[b7_85b$Round==1 & b7_85b$Bites==5],
b7_85b$Sporozoite5[b7_85b$Round==1 & b7_85b$Bites==5])
Sporozoite10a<-c(b7_85b$Sporozoite1[b7_85b$Round==1 & b7_85b$Bites==10],b7_85b$Sporozoite2[b7_85b$Round==1 & b7_85b$Bites==10],
b7_85b$Sporozoite3[b7_85b$Round==1 & b7_85b$Bites==10],b7_85b$Sporozoite4[b7_85b$Round==1 & b7_85b$Bites==10],
b7_85b$Sporozoite5[b7_85b$Round==1 & b7_85b$Bites==10],b7_85b$Sporozoite6[b7_85b$Round==1 & b7_85b$Bites==10],
b7_85b$Sporozoite7[b7_85b$Round==1 & b7_85b$Bites==10],b7_85b$Sporozoite8[b7_85b$Round==1 & b7_85b$Bites==10],
b7_85b$Sporozoite9[b7_85b$Round==1 & b7_85b$Bites==10],b7_85b$Sporozoite10[b7_85b$Round==1 & b7_85b$Bites==10])
##Round 1
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(Sporozoite1a,replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(Sporozoite1a,na.rm=T)
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(Sporozoite2a,replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(Sporozoite2a,na.rm=T)
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(Sporozoite5a,replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(Sporozoite5a,na.rm=T)
a<-numeric(10000)
for(i in 1:10000) a[i] <- mean(sample(Sporozoite10a,replace=T),na.rm=T)
quantile(a,c(0.025,0.975))
mean(Sporozoite10a,na.rm=T)
is.mosi<-0
n.rounds=4
n.bites=4
t.bites<-c(1,2,5,10)
par(mfcol=c(4,5),mar= c(3, 4, 1, 2))
is.mosi<-0
##Controls: mouse read.csv("C:\\Users\\Ellie\\Documents\\Data Malaria\\Blagborough data Nat Comms\\sporozoites.csv",header=T)
##Controls: mosquito read.table("C:\\Users\\Ellie\\Documents\\Data Malaria\\Blagborough data Nat Comms\\oocysts.txt",header=T)
##4B7_85%
##Bloodstage
prevH.round.1<-matrix(ncol=4,nrow=4,rep(0,16))
prevH.lower.1<-matrix(ncol=4,nrow=4,rep(0,16))
prevH.upper.1<-matrix(ncol=4,nrow=4,rep(0,16))
for(mb in 1:4){
hh=rbind(prevH.round.1[mb,])
hh.max<-apply(hh, 2, max)+6
ciL<-rbind(prevH.lower.1[mb,])
ciU<-rbind(prevH.upper.1[mb,])
colnames(hh)<-seq(1,4,1)
par(las=1,col.axis="black")
mybarcol <- "gray20"
mp <- barplot(hh*100,
col = c("dodgerblue4"),
ylim= c(0,100),
# main = "Parasetemia", font.main = 4,
# sub = "Transmission cycle", col.sub = mybarcol,
cex.names = 1.4)
segments(mp, ciL*100, mp, ciU*100 , col = mybarcol, lwd = 1.5)
}
##Oocyst prevalence
prevMO.round.1<-matrix(ncol=3,nrow=4,c(0,0,0.04,0.10,rep(0,8)))
prevMO.lower.1<-matrix(ncol=3,nrow=4,c(0,0,0.00,0.02,rep(0,8)))
prevMO.upper.1<-matrix(ncol=3,nrow=4,c(0,0,0.10,0.18,rep(0,8)))
for(mb in 1:4){
hh=rbind(prevMO.round.1[mb,])
hh.max<-apply(hh, 2, max)+6
ciL<-rbind(prevMO.lower.1[mb,])
ciU<-rbind(prevMO.upper.1[mb,])
colnames(hh)<-seq(1,3,1)
par(las=1,col.axis="black")
mybarcol <- "gray20"
mp <- barplot(hh*100,
col = c("forestgreen"),
ylim= c(0,100),
# main = "Parasetemia", font.main = 4,
# sub = "Transmission cycle", col.sub = mybarcol,
cex.names = 1.4)
segments(mp, ciL*100, mp, ciU*100 , col = mybarcol, lwd = 1.5)
}
##Oocyst intensity
prevMOI.round.1<-matrix(ncol=3,nrow=4,c(0,0,0.06,0.18,rep(0,8)))
bootMOI.lower1<-matrix(ncol=3,nrow=4,c(0,0,0,0.04,rep(0,8)))
bootMOI.upper1<-matrix(ncol=3,nrow=4,c(0,0,0.16,0.36,rep(0,8)))
for(mb in 1:4){
hh=rbind(prevMOI.round.1[mb,])
hh.max<-apply(hh, 2, max)+5
ciL<-rbind(bootMOI.lower1[mb,])
ciU<-rbind(bootMOI.upper1[mb,])
colnames(hh)<-seq(1,3,1)
par(las=1,col.axis="black")
mybarcol <- "gray20"
mp <- barplot(hh*100,
col = c("firebrick3"),
ylim= c(0,45),
# main = "Parasetemia", font.main = 4,
# sub = "Transmission cycle", col.sub = mybarcol,
cex.names = 1.4)
segments(mp, ciL*100, mp, ciU*100 , col = mybarcol, lwd = 1.5)
#text(colMeans(mp)+0.2,hh.max, labels =less.than.001[mb,], col = "red")
#mtext(side = 1, at = colMeans(mp), line = 2,
# text = paste("efficacy", formatC(colMeans(hh))), col = "red")
}
##Sporozoite prevalence
prevMOS.round.1<-matrix(ncol=3,nrow=4,c(0.2,0.111,0.04,0.10,rep(0,8)))
prevMOS.lower.1<-matrix(ncol=3,nrow=4,c(0.0,0.000,0.00,0.02,rep(0,8)))
prevMOS.upper.1<-matrix(ncol=3,nrow=4,c(0.6,0.333,0.12,0.18,rep(0,8)))
for(mb in 1:4){
hh=rbind(prevMOS.round.1[mb,])
hh.max<-apply(hh, 2, max)+6
ciL<-rbind(prevMOS.lower.1[mb,])
ciU<-rbind(prevMOS.upper.1[mb,])
colnames(hh)<-seq(1,3,1)
par(las=1,col.axis="black")
mybarcol <- "gray20"
mp <- barplot(hh*100,
col = c("orange"),
ylim= c(0,100),
# main = "Parasetemia", font.main = 4,
# sub = "Transmission cycle", col.sub = mybarcol,
cex.names = 1.4)
segments(mp, ciL*100, mp, ciU*100 , col = mybarcol, lwd = 1.5)
}
##Sporozoite intensity
MOSI.round.1<-matrix(ncol=3, nrow = 4,c(0.2,0.1111,0.04,0.16,rep(0,8)))
bootMOSI.lower1<-matrix(ncol=3,nrow=4,c(0.0,0.0000,0.00,0.04,rep(0,8)))
bootMOSI.upper1<-matrix(ncol=3,nrow=4,c(0.6,0.3333,0.12,0.32,rep(0,8)))
for(nb in 1:4){
colnames(MOSI.round.1)<-c(1,2,3)
mp <- barplot(MOSI.round.1[nb,],ylim=c(0,2.5),col = "mediumpurple",cex.names = 1.4)
par(las=1,col.axis="black")
segments(mp, bootMOSI.lower1[nb,], mp, bootMOSI.upper1[nb,] , col = mybarcol, lwd = 1.5)
}
|
rm(list=ls())
library(biomod2)
library(ecospat)
library(raster)
rm(list = ls())
setwd("C:/Users/gzl02/Desktop/Yi2020Research/Ecological_differentiation_chapter/Esemble_of_Small_models")
ascii_list <- list.files(path = "./", pattern='.asc$',
all.files=TRUE, full.names=FALSE)
asc_rasters <- lapply(ascii_list, raster)
asc_rasters
layers <- stack(asc_rasters)
densiflora <- read.table("./densiflora_Cleaned.txt", header = TRUE)
densiflora_presence <- densiflora[,2:3]
xy <- as.matrix(densiflora_presence)
coordinates(densiflora_presence) <- ~x+y
projection(densiflora_presence) <- CRS('+proj=longlat +datum=WGS84 +no_defs')
crs(layers) <- '+proj=longlat +datum=WGS84 +no_defs'
myBiomodData <- BIOMOD_FormatingData( resp.var = densiflora_presence,
expl.var = layers,
PA.nb.rep = 1,
PA.nb.absences = 10000,
PA.strategy = 'random',
resp.xy = xy,
resp.name = "A_densiflora")
#summary(myBiomodData)
#str(myBiomodData)
#myBiomodOption <- BIOMOD_ModelingOptions()
##extract the pseudo-absences tables for posterior
library(dplyr)
## function to get PA dataset
get_PAtab <- function(bfd){
dplyr::bind_cols(
x = bfd@coord[, 1],
y = bfd@coord[, 2],
status = bfd@data.species,
bfd@PA
)
}
## function to get background mask
get_mask <- function(bfd){
bfd@data.mask
}
## get the coordiantes of absences ==> No absences here
(abs.xy <- get_PAtab(myBiomodData) %>%
filter(status == 0) %>%
select(x, y))
## get the coordiantes of pseudo - absences
## all repetition of pseudo absences sampling merged
(pa.all.xy <- get_PAtab(myBiomodData) %>%
filter(is.na(status)) %>%
select(x, y)) %>%
distinct()
str(pa.all.xy)
##add column with the species name
pa.all.xy["sp"] <- c("A_densiflora")
pa.all.xy <- pa.all.xy[colnames(pa.all.xy)[c(3,1:2)]]
str(pa.all.xy)
my.ESM <- ecospat.ESM.Modeling( data=myBiomodData,
models=c('MAXENT.Phillips'),
NbRunEval=2,
DataSplit=70,
weighting.score=c("AUC"),
parallel=FALSE)
my.ESM_EF <- ecospat.ESM.EnsembleModeling(my.ESM,weighting.score=c("AUC"),threshold=0.8)
### Projection of simple bivariate models into new space
my.ESM_proj_current<-ecospat.ESM.Projection(ESM.modeling.output=my.ESM,
new.env=layers)
### Projection of calibrated ESMs into new space
my.ESM_EFproj_current <- ecospat.ESM.EnsembleProjection(ESM.prediction.output=my.ESM_proj_current,
ESM.EnsembleModeling.output=my.ESM_EF)
plot(my.ESM_EFproj_current)
densiflora_ESM <- my.ESM_EFproj_current
names(densiflora_ESM) <- c("A_densiflora")
ESMdir <- "C:/Users/gzl02/Desktop/Yi2020Research/Ecological_differentiation_chapter/Esemble_of_Small_models"
dir.create(file.path(ESMdir, "densiflora_ESM_MSDS"))
dir <- file.path(ESMdir, "densiflora_ESM_MSDS")
writeRaster(densiflora_ESM, file.path(dir, names(densiflora_ESM)),
bylayer=TRUE, format='GTiff', overwrite=TRUE)
## get the model performance of ESMs
my.ESM_EF$ESM.evaluations
## get the weights of the single bivariate models used to build the ESMs
my.ESM_EF$weights
ecospat.ESM.VarContrib(my.ESM,my.ESM_EF)
require(devtools)
#install_github("sjevelazco/MSDM")
require(MSDM)
require(raster)
require(sp)
densiflora_ESM_MSDS <- MSDM_Posteriori(
records = densiflora,
absence = pa.all.xy,
x = "x",
y = "y",
sp = "sp",
method = c("MCP"),
dirraster = dir,
threshold = c("spec_sens"),
buffer = NULL,
dirsave = "C:/Users/gzl02/Desktop/Yi2020Research/Ecological_differentiation_chapter/Esemble_of_Small_models/densiflora_ESM_MSDS"
)
###Do the ESM and MSDS to the other endemic species.
##franciscana with only one unique presence
franciscana <- read.table("./franciscana_Cleaned.txt", header = TRUE)
franciscana_presence <- franciscana[,2:3]
xy <- as.matrix(franciscana_presence)
coordinates(franciscana_presence) <- ~x+y
projection(franciscana_presence) <- CRS('+proj=longlat +datum=WGS84 +no_defs')
franciscana_myBiomodData <- BIOMOD_FormatingData( resp.var = franciscana_presence,
expl.var = layers,
PA.nb.rep = 1,
PA.nb.absences = 10000,
PA.strategy = 'random',
resp.xy = xy,
resp.name = "A_franciscana")
##below script(line139-155) failed as Error in modOut[[d1]][[d2]][[d3]][["ModelName"]] :
#subscript out of bounds
franciscana_my.ESM <- ecospat.ESM.Modeling( data=franciscana_myBiomodData,
models=c('MAXENT.Phillips'),
NbRunEval=2,
DataSplit=70,
weighting.score=c("AUC"),
parallel=FALSE)
franciscana_my.ESM_EF <- ecospat.ESM.EnsembleModeling(franciscana_my.ESM,weighting.score=c("AUC"),threshold=0.8)
### Projection of simple bivariate models into new space
franciscana_my.ESM_proj_current<-ecospat.ESM.Projection(ESM.modeling.output=franciscana_my.ESM,
new.env=layers)
### Projection of calibrated ESMs into new space
franciscana_my.ESM_EFproj_current <- ecospat.ESM.EnsembleProjection(ESM.prediction.output=franciscana_my.ESM_proj_current,
ESM.EnsembleModeling.output=franciscana_my.ESM_EF)
plot(franciscana_my.ESM_EFproj_current)
##gabilanensis
gabilanensis <- read.table("./gabilanensis_Cleaned.txt", header = TRUE)
gabilanensis_presence <- gabilanensis[,2:3]
xy <- as.matrix(gabilanensis_presence)
coordinates(gabilanensis_presence) <- ~x+y
projection(gabilanensis_presence) <- CRS('+proj=longlat +datum=WGS84 +no_defs')
gabilaensis_myBiomodData <- BIOMOD_FormatingData( resp.var = gabilanensis_presence,
expl.var = layers,
PA.nb.rep = 1,
PA.nb.absences = 10000,
PA.strategy = 'random',
resp.xy = xy,
resp.name = "A_gabilanensis")
gabilanensis_my.ESM <- ecospat.ESM.Modeling( data=gabilaensis_myBiomodData,
models=c('MAXENT.Phillips'),
NbRunEval=2,
DataSplit=70,
weighting.score=c("AUC"),
parallel=FALSE)
gabilanensis_my.ESM_EF <- ecospat.ESM.EnsembleModeling(gabilanensis_my.ESM,weighting.score=c("AUC"),threshold=0.8)
### Projection of simple bivariate models into new space
gabilanensis_my.ESM_proj_current<-ecospat.ESM.Projection(ESM.modeling.output=gabilanensis_my.ESM,
new.env=layers)
### Projection of calibrated ESMs into new space
gabilanensis_my.ESM_EFproj_current <- ecospat.ESM.EnsembleProjection(ESM.prediction.output=gabilanensis_my.ESM_proj_current,
ESM.EnsembleModeling.output=gabilanensis_my.ESM_EF)
plot(gabilanensis_my.ESM_EFproj_current)
names(gabilanensis_my.ESM_EFproj_current) = "A_gabilanensis"
dir = "C:/Users/gzl02/Desktop/Yi2020Research/Ecological_differentiation_chapter/Esemble_of_Small_models/gabilanensis_ESM_MSDS"
writeRaster(gabilanensis_my.ESM_EFproj_current, file.path(dir, names(gabilanensis_my.ESM_EFproj_current)),
bylayer=TRUE, format='GTiff', overwrite=TRUE)
## get the coordiantes of pseudo - absences
## all repetition of pseudo absences sampling merged
(gabilanensis_pa.all.xy <- get_PAtab(gabilaensis_myBiomodData) %>%
filter(is.na(status)) %>%
select(x, y)) %>%
distinct()
str(gabilanensis_pa.all.xy)
##add column with the species name
gabilanensis_pa.all.xy["sp"] <- c("A_gabilanensis")
gabilanensis_pa.all.xy <- gabilanensis_pa.all.xy[colnames(gabilanensis_pa.all.xy)[c(3,1:2)]]
str(gabilanensis_pa.all.xy)
gabilanensis_ESM_MSDS <- MSDM_Posteriori(
records = gabilanensis,
absence = gabilanensis_pa.all.xy,
x = "x",
y = "y",
sp = "sp",
method = c("MCP"),
dirraster = dir,
threshold = c("spec_sens"),
buffer = NULL,
dirsave = dir
)
##imbricata
imbricata <- read.table("./imbricata_Cleaned.txt", header = TRUE)
imbricata_presence <- imbricata[,2:3]
xy <- as.matrix(imbricata_presence)
coordinates(imbricata_presence) <- ~x+y
projection(imbricata_presence) <- CRS('+proj=longlat +datum=WGS84 +no_defs')
imbricata_myBiomodData <- BIOMOD_FormatingData( resp.var = imbricata_presence,
expl.var = layers,
PA.nb.rep = 1,
PA.nb.absences = 10000,
PA.strategy = 'random',
resp.xy = xy,
resp.name = "A_imbricata")
imbricata_my.ESM <- ecospat.ESM.Modeling( data=imbricata_myBiomodData,
models=c('MAXENT.Phillips'),
NbRunEval=2,
DataSplit=70,
weighting.score=c("AUC"),
parallel=FALSE)
imbricata_my.ESM_EF <- ecospat.ESM.EnsembleModeling(imbricata_my.ESM,weighting.score=c("AUC"),threshold=0.8)
### Projection of simple bivariate models into new space
imbricata_my.ESM_proj_current<-ecospat.ESM.Projection(ESM.modeling.output=imbricata_my.ESM,
new.env=layers)
### Projection of calibrated ESMs into new space
imbricata_my.ESM_EFproj_current <- ecospat.ESM.EnsembleProjection(ESM.prediction.output=imbricata_my.ESM_proj_current,
ESM.EnsembleModeling.output=imbricata_my.ESM_EF)
plot(imbricata_my.ESM_EFproj_current)
names(imbricata_my.ESM_EFproj_current) = "A_imbricata"
ESMdir <- "C:/Users/gzl02/Desktop/Yi2020Research/Ecological_differentiation_chapter/Esemble_of_Small_models"
dir.create(file.path(ESMdir, "imbricata_ESM_MSDS"))
dir <- file.path(ESMdir, "imbricata_ESM_MSDS")
#dir = "C:/Users/gzl02/Desktop/Yi2020Research/Ecological_differentiation_chapter/Esemble_of_Small_models/imbricata_ESM_MSDS"
writeRaster(imbricata_my.ESM_EFproj_current, file.path(dir, names(imbricata_my.ESM_EFproj_current)),
bylayer=TRUE, format='GTiff', overwrite=TRUE)
## get the coordiantes of pseudo - absences
## all repetition of pseudo absences sampling merged
(imbricata_pa.all.xy <- get_PAtab(imbricata_myBiomodData) %>%
filter(is.na(status)) %>%
select(x, y)) %>%
distinct()
str(imbricata_pa.all.xy)
##add column with the species name
imbricata_pa.all.xy["sp"] <- c("A_imbricata")
imbricata_pa.all.xy <- imbricata_pa.all.xy[colnames(imbricata_pa.all.xy)[c(3,1:2)]]
str(imbricata_pa.all.xy)
imbricata_ESM_MSDS <- MSDM_Posteriori(
records = imbricata,
absence = imbricata_pa.all.xy,
x = "x",
y = "y",
sp = "sp",
method = c("MCP"),
dirraster = dir,
threshold = c("spec_sens"),
buffer = NULL,
dirsave = dir
)
##montaraensis
montaraensis <- read.table("./montaraensis_Cleaned.txt", header = TRUE)
montaraensis_presence <- montaraensis[,2:3]
xy <- as.matrix(montaraensis_presence)
coordinates(montaraensis_presence) <- ~x+y
projection(montaraensis_presence) <- CRS('+proj=longlat +datum=WGS84 +no_defs')
montaraensis_myBiomodData <- BIOMOD_FormatingData( resp.var = montaraensis_presence,
expl.var = layers,
PA.nb.rep = 1,
PA.nb.absences = 10000,
PA.strategy = 'random',
resp.xy = xy,
resp.name = "A_montaraensis")
montaraensis_my.ESM <- ecospat.ESM.Modeling( data=montaraensis_myBiomodData,
models=c('MAXENT.Phillips'),
NbRunEval=2,
DataSplit=70,
weighting.score=c("AUC"),
parallel=FALSE)
montaraensis_my.ESM_EF <- ecospat.ESM.EnsembleModeling(montaraensis_my.ESM,weighting.score=c("AUC"),threshold=0.8)
### Projection of simple bivariate models into new space
montaraensis_my.ESM_proj_current<-ecospat.ESM.Projection(ESM.modeling.output=montaraensis_my.ESM,
new.env=layers)
### Projection of calibrated ESMs into new space
montaraensis_my.ESM_EFproj_current <- ecospat.ESM.EnsembleProjection(ESM.prediction.output=montaraensis_my.ESM_proj_current,
ESM.EnsembleModeling.output=montaraensis_my.ESM_EF)
plot(montaraensis_my.ESM_EFproj_current)
names(montaraensis_my.ESM_EFproj_current) = "A_montaraensis"
ESMdir <- "C:/Users/gzl02/Desktop/Yi2020Research/Ecological_differentiation_chapter/Esemble_of_Small_models"
dir.create(file.path(ESMdir, "montaraensis_ESM_MSDS"))
dir <- file.path(ESMdir, "montaraensis_ESM_MSDS")
#dir = "C:/Users/gzl02/Desktop/Yi2020Research/Ecological_differentiation_chapter/Esemble_of_Small_models/imbricata_ESM_MSDS"
writeRaster(montaraensis_my.ESM_EFproj_current, file.path(dir, names(montaraensis_my.ESM_EFproj_current)),
bylayer=TRUE, format='GTiff', overwrite=TRUE)
## get the coordiantes of pseudo - absences
## all repetition of pseudo absences sampling merged
(montaraensis_pa.all.xy <- get_PAtab(montaraensis_myBiomodData) %>%
filter(is.na(status)) %>%
select(x, y)) %>%
distinct()
str(montaraensis_pa.all.xy)
##add column with the species name
montaraensis_pa.all.xy["sp"] <- c("A_montaraensis")
montaraensis_pa.all.xy <- montaraensis_pa.all.xy[colnames(montaraensis_pa.all.xy)[c(3,1:2)]]
str(montaraensis_pa.all.xy)
montaraensis_ESM_MSDS <- MSDM_Posteriori(
records = montaraensis,
absence = montaraensis_pa.all.xy,
x = "x",
y = "y",
sp = "sp",
method = c("MCP"),
dirraster = dir,
threshold = c("spec_sens"),
buffer = NULL,
dirsave = dir
)
##obispoensis
obispoensis <- read.table("./obispoensis_Cleaned.txt", header = TRUE)
obispoensis_presence <- obispoensis[,2:3]
xy <- as.matrix(obispoensis_presence)
coordinates(obispoensis_presence) <- ~x+y
projection(obispoensis_presence) <- CRS('+proj=longlat +datum=WGS84 +no_defs')
obispoensis_myBiomodData <- BIOMOD_FormatingData( resp.var = obispoensis_presence,
expl.var = layers,
PA.nb.rep = 1,
PA.nb.absences = 10000,
PA.strategy = 'random',
resp.xy = xy,
resp.name = "A_obispoensis")
obispoensis_my.ESM <- ecospat.ESM.Modeling( data=obispoensis_myBiomodData,
models=c('MAXENT.Phillips'),
NbRunEval=2,
DataSplit=70,
weighting.score=c("AUC"),
parallel=FALSE)
obispoensis_my.ESM_EF <- ecospat.ESM.EnsembleModeling(obispoensis_my.ESM,weighting.score=c("AUC"),threshold=0.8)
### Projection of simple bivariate models into new space
obispoensis_my.ESM_proj_current<-ecospat.ESM.Projection(ESM.modeling.output=obispoensis_my.ESM,
new.env=layers)
### Projection of calibrated ESMs into new space
obispoensis_my.ESM_EFproj_current <- ecospat.ESM.EnsembleProjection(ESM.prediction.output=obispoensis_my.ESM_proj_current,
ESM.EnsembleModeling.output=obispoensis_my.ESM_EF)
plot(obispoensis_my.ESM_EFproj_current)
names(obispoensis_my.ESM_EFproj_current) = "A_obispoensis"
ESMdir <- "C:/Users/gzl02/Desktop/Yi2020Research/Ecological_differentiation_chapter/Esemble_of_Small_models"
dir.create(file.path(ESMdir, "obispoensis_ESM_MSDS"))
dir <- file.path(ESMdir, "obispoensis_ESM_MSDS")
writeRaster(obispoensis_my.ESM_EFproj_current, file.path(dir, names(obispoensis_my.ESM_EFproj_current)),
bylayer=TRUE, format='GTiff', overwrite=TRUE)
## get the coordiantes of pseudo - absences
## all repetition of pseudo absences sampling merged
(obispoensis_pa.all.xy <- get_PAtab(obispoensis_myBiomodData) %>%
filter(is.na(status)) %>%
select(x, y)) %>%
distinct()
str(obispoensis_pa.all.xy)
##add column with the species name
obispoensis_pa.all.xy["sp"] <- c("A_obispoensis")
obispoensis_pa.all.xy <- obispoensis_pa.all.xy[colnames(obispoensis_pa.all.xy)[c(3,1:2)]]
str(obispoensis_pa.all.xy)
obispoensis_ESM_MSDS <- MSDM_Posteriori(
records = obispoensis,
absence = obispoensis_pa.all.xy,
x = "x",
y = "y",
sp = "sp",
method = c("MCP"),
dirraster = dir,
threshold = c("spec_sens"),
buffer = NULL,
dirsave = dir
)
##ohloneana
ohloneana <- read.table("./ohloneana_Cleaned.txt", header = TRUE)
ohloneana_presence <- ohloneana[,2:3]
xy <- as.matrix(ohloneana_presence)
coordinates(ohloneana_presence) <- ~x+y
projection(ohloneana_presence) <- CRS('+proj=longlat +datum=WGS84 +no_defs')
ohloneana_myBiomodData <- BIOMOD_FormatingData( resp.var = ohloneana_presence,
expl.var = layers,
PA.nb.rep = 1,
PA.nb.absences = 10000,
PA.strategy = 'random',
resp.xy = xy,
resp.name = "A_ohloneana")
ohloneana_my.ESM <- ecospat.ESM.Modeling( data=ohloneana_myBiomodData,
models=c('MAXENT.Phillips'),
NbRunEval=2,
DataSplit=70,
weighting.score=c("AUC"),
parallel=FALSE)
ohloneana_my.ESM_EF <- ecospat.ESM.EnsembleModeling(ohloneana_my.ESM,weighting.score=c("AUC"),threshold=0.8)
### Projection of simple bivariate models into new space
ohloneana_my.ESM_proj_current<-ecospat.ESM.Projection(ESM.modeling.output=ohloneana_my.ESM,
new.env=layers)
### Projection of calibrated ESMs into new space
ohloneana_my.ESM_EFproj_current <- ecospat.ESM.EnsembleProjection(ESM.prediction.output=ohloneana_my.ESM_proj_current,
ESM.EnsembleModeling.output=ohloneana_my.ESM_EF)
plot(ohloneana_my.ESM_EFproj_current)
names(ohloneana_my.ESM_EFproj_current) = "A_ohloneana"
ESMdir <- "C:/Users/gzl02/Desktop/Yi2020Research/Ecological_differentiation_chapter/Esemble_of_Small_models"
dir.create(file.path(ESMdir, "ohloneana_ESM_MSDS"))
dir <- file.path(ESMdir, "ohloneana_ESM_MSDS")
writeRaster(ohloneana_my.ESM_EFproj_current, file.path(dir, names(ohloneana_my.ESM_EFproj_current)),
bylayer=TRUE, format='GTiff', overwrite=TRUE)
## get the coordiantes of pseudo - absences
## all repetition of pseudo absences sampling merged
(ohloneana_pa.all.xy <- get_PAtab(ohloneana_myBiomodData) %>%
filter(is.na(status)) %>%
select(x, y)) %>%
distinct()
str(ohloneana_pa.all.xy)
##add column with the species name
ohloneana_pa.all.xy["sp"] <- c("A_ohloneana")
ohloneana_pa.all.xy <- ohloneana_pa.all.xy[colnames(ohloneana_pa.all.xy)[c(3,1:2)]]
str(ohloneana_pa.all.xy)
ohloneana_ESM_MSDS <- MSDM_Posteriori(
records = ohloneana,
absence = ohloneana_pa.all.xy,
x = "x",
y = "y",
sp = "sp",
method = c("MCP"),
dirraster = dir,
threshold = c("spec_sens"),
buffer = NULL,
dirsave = dir
)
##osoensis
osoensis <- read.table("./osoensis_Cleaned.txt", header = TRUE)
osoensis_presence <- osoensis[,2:3]
xy <- as.matrix(osoensis_presence)
coordinates(osoensis_presence) <- ~x+y
projection(osoensis_presence) <- CRS('+proj=longlat +datum=WGS84 +no_defs')
osoensis_myBiomodData <- BIOMOD_FormatingData( resp.var = osoensis_presence,
expl.var = layers,
PA.nb.rep = 1,
PA.nb.absences = 10000,
PA.strategy = 'random',
resp.xy = xy,
resp.name = "A_osoensis")
osoensis_my.ESM <- ecospat.ESM.Modeling( data=osoensis_myBiomodData,
models=c('MAXENT.Phillips'),
NbRunEval=2,
DataSplit=70,
weighting.score=c("AUC"),
parallel=FALSE)
osoensis_my.ESM_EF <- ecospat.ESM.EnsembleModeling(osoensis_my.ESM,weighting.score=c("AUC"),threshold=0.8)
### Projection of simple bivariate models into new space
osoensis_my.ESM_proj_current<-ecospat.ESM.Projection(ESM.modeling.output=osoensis_my.ESM,
new.env=layers)
### Projection of calibrated ESMs into new space
osoensis_my.ESM_EFproj_current <- ecospat.ESM.EnsembleProjection(ESM.prediction.output=osoensis_my.ESM_proj_current,
ESM.EnsembleModeling.output=osoensis_my.ESM_EF)
plot(osoensis_my.ESM_EFproj_current)
names(osoensis_my.ESM_EFproj_current) = "A_osoensis"
ESMdir <- "C:/Users/gzl02/Desktop/Yi2020Research/Ecological_differentiation_chapter/Esemble_of_Small_models"
dir.create(file.path(ESMdir, "osoensis_ESM_MSDS"))
dir <- file.path(ESMdir, "osoensis_ESM_MSDS")
writeRaster(osoensis_my.ESM_EFproj_current, file.path(dir, names(osoensis_my.ESM_EFproj_current)),
bylayer=TRUE, format='GTiff', overwrite=TRUE)
## get the coordiantes of pseudo - absences
## all repetition of pseudo absences sampling merged
(osoensis_pa.all.xy <- get_PAtab(osoensis_myBiomodData) %>%
filter(is.na(status)) %>%
select(x, y)) %>%
distinct()
str(osoensis_pa.all.xy)
##add column with the species name
osoensis_pa.all.xy["sp"] <- c("A_osoensis")
osoensis_pa.all.xy <- osoensis_pa.all.xy[colnames(osoensis_pa.all.xy)[c(3,1:2)]]
str(osoensis_pa.all.xy)
osoensis_ESM_MSDS <- MSDM_Posteriori(
records = osoensis,
absence = osoensis_pa.all.xy,
x = "x",
y = "y",
sp = "sp",
method = c("MCP"),
dirraster = dir,
threshold = c("spec_sens"),
buffer = NULL,
dirsave = dir
)
##pacifica
pacifica <- read.table("./pacifica_Cleaned.txt", header = TRUE)
pacifica_presence <- pacifica[,2:3]
xy <- as.matrix(pacifica_presence)
coordinates(pacifica_presence) <- ~x+y
projection(pacifica_presence) <- CRS('+proj=longlat +datum=WGS84 +no_defs')
pacifica_myBiomodData <- BIOMOD_FormatingData( resp.var = pacifica_presence,
expl.var = layers,
PA.nb.rep = 1,
PA.nb.absences = 10000,
PA.strategy = 'random',
resp.xy = xy,
resp.name = "A_pacifica")
pacifica_my.ESM <- ecospat.ESM.Modeling( data=pacifica_myBiomodData,
models=c('MAXENT.Phillips'),
NbRunEval=2,
DataSplit=70,
weighting.score=c("AUC"),
parallel=FALSE)
pacifica_my.ESM_EF <- ecospat.ESM.EnsembleModeling(pacifica_my.ESM,weighting.score=c("AUC"),threshold=0.8)
### Projection of simple bivariate models into new space
pacifica_my.ESM_proj_current<-ecospat.ESM.Projection(ESM.modeling.output=pacifica_my.ESM,
new.env=layers)
### Projection of calibrated ESMs into new space
pacifica_my.ESM_EFproj_current <- ecospat.ESM.EnsembleProjection(ESM.prediction.output=pacifica_my.ESM_proj_current,
ESM.EnsembleModeling.output=pacifica_my.ESM_EF)
plot(pacifica_my.ESM_EFproj_current)
names(pacifica_my.ESM_EFproj_current) = "A_pacifica"
ESMdir <- "C:/Users/gzl02/Desktop/Yi2020Research/Ecological_differentiation_chapter/Esemble_of_Small_models"
dir.create(file.path(ESMdir, "pacifica_ESM_MSDS"))
dir <- file.path(ESMdir, "pacifica_ESM_MSDS")
writeRaster(pacifica_my.ESM_EFproj_current, file.path(dir, names(pacifica_my.ESM_EFproj_current)),
bylayer=TRUE, format='GTiff', overwrite=TRUE)
## get the coordiantes of pseudo - absences
## all repetition of pseudo absences sampling merged
(pacifica_pa.all.xy <- get_PAtab(pacifica_myBiomodData) %>%
filter(is.na(status)) %>%
select(x, y)) %>%
distinct()
str(pacifica_pa.all.xy)
##add column with the species name
pacifica_pa.all.xy["sp"] <- c("A_pacifica")
pacifica_pa.all.xy <- pacifica_pa.all.xy[colnames(pacifica_pa.all.xy)[c(3,1:2)]]
str(pacifica_pa.all.xy)
pacifica_ESM_MSDS <- MSDM_Posteriori(
records = pacifica,
absence = pacifica_pa.all.xy,
x = "x",
y = "y",
sp = "sp",
method = c("MCP"),
dirraster = dir,
threshold = c("spec_sens"),
buffer = NULL,
dirsave = dir
)
| /2_Fixed_ESM_methods.R | no_license | Yi-HuangUCR/Ecological-Differentiation-Among-Manzanita-Species | R | false | false | 26,704 | r | rm(list=ls())
library(biomod2)
library(ecospat)
library(raster)
rm(list = ls())
setwd("C:/Users/gzl02/Desktop/Yi2020Research/Ecological_differentiation_chapter/Esemble_of_Small_models")
ascii_list <- list.files(path = "./", pattern='.asc$',
all.files=TRUE, full.names=FALSE)
asc_rasters <- lapply(ascii_list, raster)
asc_rasters
layers <- stack(asc_rasters)
densiflora <- read.table("./densiflora_Cleaned.txt", header = TRUE)
densiflora_presence <- densiflora[,2:3]
xy <- as.matrix(densiflora_presence)
coordinates(densiflora_presence) <- ~x+y
projection(densiflora_presence) <- CRS('+proj=longlat +datum=WGS84 +no_defs')
crs(layers) <- '+proj=longlat +datum=WGS84 +no_defs'
myBiomodData <- BIOMOD_FormatingData( resp.var = densiflora_presence,
expl.var = layers,
PA.nb.rep = 1,
PA.nb.absences = 10000,
PA.strategy = 'random',
resp.xy = xy,
resp.name = "A_densiflora")
#summary(myBiomodData)
#str(myBiomodData)
#myBiomodOption <- BIOMOD_ModelingOptions()
##extract the pseudo-absences tables for posterior
library(dplyr)
## function to get PA dataset
get_PAtab <- function(bfd){
dplyr::bind_cols(
x = bfd@coord[, 1],
y = bfd@coord[, 2],
status = bfd@data.species,
bfd@PA
)
}
## function to get background mask
get_mask <- function(bfd){
bfd@data.mask
}
## get the coordiantes of absences ==> No absences here
(abs.xy <- get_PAtab(myBiomodData) %>%
filter(status == 0) %>%
select(x, y))
## get the coordiantes of pseudo - absences
## all repetition of pseudo absences sampling merged
(pa.all.xy <- get_PAtab(myBiomodData) %>%
filter(is.na(status)) %>%
select(x, y)) %>%
distinct()
str(pa.all.xy)
##add column with the species name
pa.all.xy["sp"] <- c("A_densiflora")
pa.all.xy <- pa.all.xy[colnames(pa.all.xy)[c(3,1:2)]]
str(pa.all.xy)
my.ESM <- ecospat.ESM.Modeling( data=myBiomodData,
models=c('MAXENT.Phillips'),
NbRunEval=2,
DataSplit=70,
weighting.score=c("AUC"),
parallel=FALSE)
my.ESM_EF <- ecospat.ESM.EnsembleModeling(my.ESM,weighting.score=c("AUC"),threshold=0.8)
### Projection of simple bivariate models into new space
my.ESM_proj_current<-ecospat.ESM.Projection(ESM.modeling.output=my.ESM,
new.env=layers)
### Projection of calibrated ESMs into new space
my.ESM_EFproj_current <- ecospat.ESM.EnsembleProjection(ESM.prediction.output=my.ESM_proj_current,
ESM.EnsembleModeling.output=my.ESM_EF)
plot(my.ESM_EFproj_current)
densiflora_ESM <- my.ESM_EFproj_current
names(densiflora_ESM) <- c("A_densiflora")
ESMdir <- "C:/Users/gzl02/Desktop/Yi2020Research/Ecological_differentiation_chapter/Esemble_of_Small_models"
dir.create(file.path(ESMdir, "densiflora_ESM_MSDS"))
dir <- file.path(ESMdir, "densiflora_ESM_MSDS")
writeRaster(densiflora_ESM, file.path(dir, names(densiflora_ESM)),
bylayer=TRUE, format='GTiff', overwrite=TRUE)
## get the model performance of ESMs
my.ESM_EF$ESM.evaluations
## get the weights of the single bivariate models used to build the ESMs
my.ESM_EF$weights
ecospat.ESM.VarContrib(my.ESM,my.ESM_EF)
require(devtools)
#install_github("sjevelazco/MSDM")
require(MSDM)
require(raster)
require(sp)
densiflora_ESM_MSDS <- MSDM_Posteriori(
records = densiflora,
absence = pa.all.xy,
x = "x",
y = "y",
sp = "sp",
method = c("MCP"),
dirraster = dir,
threshold = c("spec_sens"),
buffer = NULL,
dirsave = "C:/Users/gzl02/Desktop/Yi2020Research/Ecological_differentiation_chapter/Esemble_of_Small_models/densiflora_ESM_MSDS"
)
###Do the ESM and MSDS to the other endemic species.
##franciscana with only one unique presence
franciscana <- read.table("./franciscana_Cleaned.txt", header = TRUE)
franciscana_presence <- franciscana[,2:3]
xy <- as.matrix(franciscana_presence)
coordinates(franciscana_presence) <- ~x+y
projection(franciscana_presence) <- CRS('+proj=longlat +datum=WGS84 +no_defs')
franciscana_myBiomodData <- BIOMOD_FormatingData( resp.var = franciscana_presence,
expl.var = layers,
PA.nb.rep = 1,
PA.nb.absences = 10000,
PA.strategy = 'random',
resp.xy = xy,
resp.name = "A_franciscana")
##below script(line139-155) failed as Error in modOut[[d1]][[d2]][[d3]][["ModelName"]] :
#subscript out of bounds
franciscana_my.ESM <- ecospat.ESM.Modeling( data=franciscana_myBiomodData,
models=c('MAXENT.Phillips'),
NbRunEval=2,
DataSplit=70,
weighting.score=c("AUC"),
parallel=FALSE)
franciscana_my.ESM_EF <- ecospat.ESM.EnsembleModeling(franciscana_my.ESM,weighting.score=c("AUC"),threshold=0.8)
### Projection of simple bivariate models into new space
franciscana_my.ESM_proj_current<-ecospat.ESM.Projection(ESM.modeling.output=franciscana_my.ESM,
new.env=layers)
### Projection of calibrated ESMs into new space
franciscana_my.ESM_EFproj_current <- ecospat.ESM.EnsembleProjection(ESM.prediction.output=franciscana_my.ESM_proj_current,
ESM.EnsembleModeling.output=franciscana_my.ESM_EF)
plot(franciscana_my.ESM_EFproj_current)
##gabilanensis
gabilanensis <- read.table("./gabilanensis_Cleaned.txt", header = TRUE)
gabilanensis_presence <- gabilanensis[,2:3]
xy <- as.matrix(gabilanensis_presence)
coordinates(gabilanensis_presence) <- ~x+y
projection(gabilanensis_presence) <- CRS('+proj=longlat +datum=WGS84 +no_defs')
gabilaensis_myBiomodData <- BIOMOD_FormatingData( resp.var = gabilanensis_presence,
expl.var = layers,
PA.nb.rep = 1,
PA.nb.absences = 10000,
PA.strategy = 'random',
resp.xy = xy,
resp.name = "A_gabilanensis")
gabilanensis_my.ESM <- ecospat.ESM.Modeling( data=gabilaensis_myBiomodData,
models=c('MAXENT.Phillips'),
NbRunEval=2,
DataSplit=70,
weighting.score=c("AUC"),
parallel=FALSE)
gabilanensis_my.ESM_EF <- ecospat.ESM.EnsembleModeling(gabilanensis_my.ESM,weighting.score=c("AUC"),threshold=0.8)
### Projection of simple bivariate models into new space
gabilanensis_my.ESM_proj_current<-ecospat.ESM.Projection(ESM.modeling.output=gabilanensis_my.ESM,
new.env=layers)
### Projection of calibrated ESMs into new space
gabilanensis_my.ESM_EFproj_current <- ecospat.ESM.EnsembleProjection(ESM.prediction.output=gabilanensis_my.ESM_proj_current,
ESM.EnsembleModeling.output=gabilanensis_my.ESM_EF)
plot(gabilanensis_my.ESM_EFproj_current)
names(gabilanensis_my.ESM_EFproj_current) = "A_gabilanensis"
dir = "C:/Users/gzl02/Desktop/Yi2020Research/Ecological_differentiation_chapter/Esemble_of_Small_models/gabilanensis_ESM_MSDS"
writeRaster(gabilanensis_my.ESM_EFproj_current, file.path(dir, names(gabilanensis_my.ESM_EFproj_current)),
bylayer=TRUE, format='GTiff', overwrite=TRUE)
## get the coordiantes of pseudo - absences
## all repetition of pseudo absences sampling merged
(gabilanensis_pa.all.xy <- get_PAtab(gabilaensis_myBiomodData) %>%
filter(is.na(status)) %>%
select(x, y)) %>%
distinct()
str(gabilanensis_pa.all.xy)
##add column with the species name
gabilanensis_pa.all.xy["sp"] <- c("A_gabilanensis")
gabilanensis_pa.all.xy <- gabilanensis_pa.all.xy[colnames(gabilanensis_pa.all.xy)[c(3,1:2)]]
str(gabilanensis_pa.all.xy)
gabilanensis_ESM_MSDS <- MSDM_Posteriori(
records = gabilanensis,
absence = gabilanensis_pa.all.xy,
x = "x",
y = "y",
sp = "sp",
method = c("MCP"),
dirraster = dir,
threshold = c("spec_sens"),
buffer = NULL,
dirsave = dir
)
##imbricata
imbricata <- read.table("./imbricata_Cleaned.txt", header = TRUE)
imbricata_presence <- imbricata[,2:3]
xy <- as.matrix(imbricata_presence)
coordinates(imbricata_presence) <- ~x+y
projection(imbricata_presence) <- CRS('+proj=longlat +datum=WGS84 +no_defs')
imbricata_myBiomodData <- BIOMOD_FormatingData( resp.var = imbricata_presence,
expl.var = layers,
PA.nb.rep = 1,
PA.nb.absences = 10000,
PA.strategy = 'random',
resp.xy = xy,
resp.name = "A_imbricata")
imbricata_my.ESM <- ecospat.ESM.Modeling( data=imbricata_myBiomodData,
models=c('MAXENT.Phillips'),
NbRunEval=2,
DataSplit=70,
weighting.score=c("AUC"),
parallel=FALSE)
imbricata_my.ESM_EF <- ecospat.ESM.EnsembleModeling(imbricata_my.ESM,weighting.score=c("AUC"),threshold=0.8)
### Projection of simple bivariate models into new space
imbricata_my.ESM_proj_current<-ecospat.ESM.Projection(ESM.modeling.output=imbricata_my.ESM,
new.env=layers)
### Projection of calibrated ESMs into new space
imbricata_my.ESM_EFproj_current <- ecospat.ESM.EnsembleProjection(ESM.prediction.output=imbricata_my.ESM_proj_current,
ESM.EnsembleModeling.output=imbricata_my.ESM_EF)
plot(imbricata_my.ESM_EFproj_current)
names(imbricata_my.ESM_EFproj_current) = "A_imbricata"
ESMdir <- "C:/Users/gzl02/Desktop/Yi2020Research/Ecological_differentiation_chapter/Esemble_of_Small_models"
dir.create(file.path(ESMdir, "imbricata_ESM_MSDS"))
dir <- file.path(ESMdir, "imbricata_ESM_MSDS")
#dir = "C:/Users/gzl02/Desktop/Yi2020Research/Ecological_differentiation_chapter/Esemble_of_Small_models/imbricata_ESM_MSDS"
writeRaster(imbricata_my.ESM_EFproj_current, file.path(dir, names(imbricata_my.ESM_EFproj_current)),
bylayer=TRUE, format='GTiff', overwrite=TRUE)
## get the coordiantes of pseudo - absences
## all repetition of pseudo absences sampling merged
(imbricata_pa.all.xy <- get_PAtab(imbricata_myBiomodData) %>%
filter(is.na(status)) %>%
select(x, y)) %>%
distinct()
str(imbricata_pa.all.xy)
##add column with the species name
imbricata_pa.all.xy["sp"] <- c("A_imbricata")
imbricata_pa.all.xy <- imbricata_pa.all.xy[colnames(imbricata_pa.all.xy)[c(3,1:2)]]
str(imbricata_pa.all.xy)
imbricata_ESM_MSDS <- MSDM_Posteriori(
records = imbricata,
absence = imbricata_pa.all.xy,
x = "x",
y = "y",
sp = "sp",
method = c("MCP"),
dirraster = dir,
threshold = c("spec_sens"),
buffer = NULL,
dirsave = dir
)
##montaraensis
montaraensis <- read.table("./montaraensis_Cleaned.txt", header = TRUE)
montaraensis_presence <- montaraensis[,2:3]
xy <- as.matrix(montaraensis_presence)
coordinates(montaraensis_presence) <- ~x+y
projection(montaraensis_presence) <- CRS('+proj=longlat +datum=WGS84 +no_defs')
montaraensis_myBiomodData <- BIOMOD_FormatingData( resp.var = montaraensis_presence,
expl.var = layers,
PA.nb.rep = 1,
PA.nb.absences = 10000,
PA.strategy = 'random',
resp.xy = xy,
resp.name = "A_montaraensis")
montaraensis_my.ESM <- ecospat.ESM.Modeling( data=montaraensis_myBiomodData,
models=c('MAXENT.Phillips'),
NbRunEval=2,
DataSplit=70,
weighting.score=c("AUC"),
parallel=FALSE)
montaraensis_my.ESM_EF <- ecospat.ESM.EnsembleModeling(montaraensis_my.ESM,weighting.score=c("AUC"),threshold=0.8)
### Projection of simple bivariate models into new space
montaraensis_my.ESM_proj_current<-ecospat.ESM.Projection(ESM.modeling.output=montaraensis_my.ESM,
new.env=layers)
### Projection of calibrated ESMs into new space
montaraensis_my.ESM_EFproj_current <- ecospat.ESM.EnsembleProjection(ESM.prediction.output=montaraensis_my.ESM_proj_current,
ESM.EnsembleModeling.output=montaraensis_my.ESM_EF)
plot(montaraensis_my.ESM_EFproj_current)
names(montaraensis_my.ESM_EFproj_current) = "A_montaraensis"
ESMdir <- "C:/Users/gzl02/Desktop/Yi2020Research/Ecological_differentiation_chapter/Esemble_of_Small_models"
dir.create(file.path(ESMdir, "montaraensis_ESM_MSDS"))
dir <- file.path(ESMdir, "montaraensis_ESM_MSDS")
#dir = "C:/Users/gzl02/Desktop/Yi2020Research/Ecological_differentiation_chapter/Esemble_of_Small_models/imbricata_ESM_MSDS"
writeRaster(montaraensis_my.ESM_EFproj_current, file.path(dir, names(montaraensis_my.ESM_EFproj_current)),
bylayer=TRUE, format='GTiff', overwrite=TRUE)
## get the coordiantes of pseudo - absences
## all repetition of pseudo absences sampling merged
(montaraensis_pa.all.xy <- get_PAtab(montaraensis_myBiomodData) %>%
filter(is.na(status)) %>%
select(x, y)) %>%
distinct()
str(montaraensis_pa.all.xy)
##add column with the species name
montaraensis_pa.all.xy["sp"] <- c("A_montaraensis")
montaraensis_pa.all.xy <- montaraensis_pa.all.xy[colnames(montaraensis_pa.all.xy)[c(3,1:2)]]
str(montaraensis_pa.all.xy)
montaraensis_ESM_MSDS <- MSDM_Posteriori(
records = montaraensis,
absence = montaraensis_pa.all.xy,
x = "x",
y = "y",
sp = "sp",
method = c("MCP"),
dirraster = dir,
threshold = c("spec_sens"),
buffer = NULL,
dirsave = dir
)
##obispoensis
obispoensis <- read.table("./obispoensis_Cleaned.txt", header = TRUE)
obispoensis_presence <- obispoensis[,2:3]
xy <- as.matrix(obispoensis_presence)
coordinates(obispoensis_presence) <- ~x+y
projection(obispoensis_presence) <- CRS('+proj=longlat +datum=WGS84 +no_defs')
obispoensis_myBiomodData <- BIOMOD_FormatingData( resp.var = obispoensis_presence,
expl.var = layers,
PA.nb.rep = 1,
PA.nb.absences = 10000,
PA.strategy = 'random',
resp.xy = xy,
resp.name = "A_obispoensis")
obispoensis_my.ESM <- ecospat.ESM.Modeling( data=obispoensis_myBiomodData,
models=c('MAXENT.Phillips'),
NbRunEval=2,
DataSplit=70,
weighting.score=c("AUC"),
parallel=FALSE)
obispoensis_my.ESM_EF <- ecospat.ESM.EnsembleModeling(obispoensis_my.ESM,weighting.score=c("AUC"),threshold=0.8)
### Projection of simple bivariate models into new space
obispoensis_my.ESM_proj_current<-ecospat.ESM.Projection(ESM.modeling.output=obispoensis_my.ESM,
new.env=layers)
### Projection of calibrated ESMs into new space
obispoensis_my.ESM_EFproj_current <- ecospat.ESM.EnsembleProjection(ESM.prediction.output=obispoensis_my.ESM_proj_current,
ESM.EnsembleModeling.output=obispoensis_my.ESM_EF)
plot(obispoensis_my.ESM_EFproj_current)
names(obispoensis_my.ESM_EFproj_current) = "A_obispoensis"
ESMdir <- "C:/Users/gzl02/Desktop/Yi2020Research/Ecological_differentiation_chapter/Esemble_of_Small_models"
dir.create(file.path(ESMdir, "obispoensis_ESM_MSDS"))
dir <- file.path(ESMdir, "obispoensis_ESM_MSDS")
writeRaster(obispoensis_my.ESM_EFproj_current, file.path(dir, names(obispoensis_my.ESM_EFproj_current)),
bylayer=TRUE, format='GTiff', overwrite=TRUE)
## get the coordiantes of pseudo - absences
## all repetition of pseudo absences sampling merged
(obispoensis_pa.all.xy <- get_PAtab(obispoensis_myBiomodData) %>%
filter(is.na(status)) %>%
select(x, y)) %>%
distinct()
str(obispoensis_pa.all.xy)
##add column with the species name
obispoensis_pa.all.xy["sp"] <- c("A_obispoensis")
obispoensis_pa.all.xy <- obispoensis_pa.all.xy[colnames(obispoensis_pa.all.xy)[c(3,1:2)]]
str(obispoensis_pa.all.xy)
obispoensis_ESM_MSDS <- MSDM_Posteriori(
records = obispoensis,
absence = obispoensis_pa.all.xy,
x = "x",
y = "y",
sp = "sp",
method = c("MCP"),
dirraster = dir,
threshold = c("spec_sens"),
buffer = NULL,
dirsave = dir
)
##ohloneana
ohloneana <- read.table("./ohloneana_Cleaned.txt", header = TRUE)
ohloneana_presence <- ohloneana[,2:3]
xy <- as.matrix(ohloneana_presence)
coordinates(ohloneana_presence) <- ~x+y
projection(ohloneana_presence) <- CRS('+proj=longlat +datum=WGS84 +no_defs')
ohloneana_myBiomodData <- BIOMOD_FormatingData( resp.var = ohloneana_presence,
expl.var = layers,
PA.nb.rep = 1,
PA.nb.absences = 10000,
PA.strategy = 'random',
resp.xy = xy,
resp.name = "A_ohloneana")
ohloneana_my.ESM <- ecospat.ESM.Modeling( data=ohloneana_myBiomodData,
models=c('MAXENT.Phillips'),
NbRunEval=2,
DataSplit=70,
weighting.score=c("AUC"),
parallel=FALSE)
ohloneana_my.ESM_EF <- ecospat.ESM.EnsembleModeling(ohloneana_my.ESM,weighting.score=c("AUC"),threshold=0.8)
### Projection of simple bivariate models into new space
ohloneana_my.ESM_proj_current<-ecospat.ESM.Projection(ESM.modeling.output=ohloneana_my.ESM,
new.env=layers)
### Projection of calibrated ESMs into new space
ohloneana_my.ESM_EFproj_current <- ecospat.ESM.EnsembleProjection(ESM.prediction.output=ohloneana_my.ESM_proj_current,
ESM.EnsembleModeling.output=ohloneana_my.ESM_EF)
plot(ohloneana_my.ESM_EFproj_current)
names(ohloneana_my.ESM_EFproj_current) = "A_ohloneana"
ESMdir <- "C:/Users/gzl02/Desktop/Yi2020Research/Ecological_differentiation_chapter/Esemble_of_Small_models"
dir.create(file.path(ESMdir, "ohloneana_ESM_MSDS"))
dir <- file.path(ESMdir, "ohloneana_ESM_MSDS")
writeRaster(ohloneana_my.ESM_EFproj_current, file.path(dir, names(ohloneana_my.ESM_EFproj_current)),
bylayer=TRUE, format='GTiff', overwrite=TRUE)
## get the coordiantes of pseudo - absences
## all repetition of pseudo absences sampling merged
(ohloneana_pa.all.xy <- get_PAtab(ohloneana_myBiomodData) %>%
filter(is.na(status)) %>%
select(x, y)) %>%
distinct()
str(ohloneana_pa.all.xy)
##add column with the species name
ohloneana_pa.all.xy["sp"] <- c("A_ohloneana")
ohloneana_pa.all.xy <- ohloneana_pa.all.xy[colnames(ohloneana_pa.all.xy)[c(3,1:2)]]
str(ohloneana_pa.all.xy)
ohloneana_ESM_MSDS <- MSDM_Posteriori(
records = ohloneana,
absence = ohloneana_pa.all.xy,
x = "x",
y = "y",
sp = "sp",
method = c("MCP"),
dirraster = dir,
threshold = c("spec_sens"),
buffer = NULL,
dirsave = dir
)
##osoensis
osoensis <- read.table("./osoensis_Cleaned.txt", header = TRUE)
osoensis_presence <- osoensis[,2:3]
xy <- as.matrix(osoensis_presence)
coordinates(osoensis_presence) <- ~x+y
projection(osoensis_presence) <- CRS('+proj=longlat +datum=WGS84 +no_defs')
osoensis_myBiomodData <- BIOMOD_FormatingData( resp.var = osoensis_presence,
expl.var = layers,
PA.nb.rep = 1,
PA.nb.absences = 10000,
PA.strategy = 'random',
resp.xy = xy,
resp.name = "A_osoensis")
osoensis_my.ESM <- ecospat.ESM.Modeling( data=osoensis_myBiomodData,
models=c('MAXENT.Phillips'),
NbRunEval=2,
DataSplit=70,
weighting.score=c("AUC"),
parallel=FALSE)
osoensis_my.ESM_EF <- ecospat.ESM.EnsembleModeling(osoensis_my.ESM,weighting.score=c("AUC"),threshold=0.8)
### Projection of simple bivariate models into new space
osoensis_my.ESM_proj_current<-ecospat.ESM.Projection(ESM.modeling.output=osoensis_my.ESM,
new.env=layers)
### Projection of calibrated ESMs into new space
osoensis_my.ESM_EFproj_current <- ecospat.ESM.EnsembleProjection(ESM.prediction.output=osoensis_my.ESM_proj_current,
ESM.EnsembleModeling.output=osoensis_my.ESM_EF)
plot(osoensis_my.ESM_EFproj_current)
names(osoensis_my.ESM_EFproj_current) = "A_osoensis"
ESMdir <- "C:/Users/gzl02/Desktop/Yi2020Research/Ecological_differentiation_chapter/Esemble_of_Small_models"
dir.create(file.path(ESMdir, "osoensis_ESM_MSDS"))
dir <- file.path(ESMdir, "osoensis_ESM_MSDS")
writeRaster(osoensis_my.ESM_EFproj_current, file.path(dir, names(osoensis_my.ESM_EFproj_current)),
bylayer=TRUE, format='GTiff', overwrite=TRUE)
## get the coordiantes of pseudo - absences
## all repetition of pseudo absences sampling merged
(osoensis_pa.all.xy <- get_PAtab(osoensis_myBiomodData) %>%
filter(is.na(status)) %>%
select(x, y)) %>%
distinct()
str(osoensis_pa.all.xy)
##add column with the species name
osoensis_pa.all.xy["sp"] <- c("A_osoensis")
osoensis_pa.all.xy <- osoensis_pa.all.xy[colnames(osoensis_pa.all.xy)[c(3,1:2)]]
str(osoensis_pa.all.xy)
osoensis_ESM_MSDS <- MSDM_Posteriori(
records = osoensis,
absence = osoensis_pa.all.xy,
x = "x",
y = "y",
sp = "sp",
method = c("MCP"),
dirraster = dir,
threshold = c("spec_sens"),
buffer = NULL,
dirsave = dir
)
##pacifica
pacifica <- read.table("./pacifica_Cleaned.txt", header = TRUE)
pacifica_presence <- pacifica[,2:3]
xy <- as.matrix(pacifica_presence)
coordinates(pacifica_presence) <- ~x+y
projection(pacifica_presence) <- CRS('+proj=longlat +datum=WGS84 +no_defs')
pacifica_myBiomodData <- BIOMOD_FormatingData( resp.var = pacifica_presence,
expl.var = layers,
PA.nb.rep = 1,
PA.nb.absences = 10000,
PA.strategy = 'random',
resp.xy = xy,
resp.name = "A_pacifica")
pacifica_my.ESM <- ecospat.ESM.Modeling( data=pacifica_myBiomodData,
models=c('MAXENT.Phillips'),
NbRunEval=2,
DataSplit=70,
weighting.score=c("AUC"),
parallel=FALSE)
pacifica_my.ESM_EF <- ecospat.ESM.EnsembleModeling(pacifica_my.ESM,weighting.score=c("AUC"),threshold=0.8)
### Projection of simple bivariate models into new space
pacifica_my.ESM_proj_current<-ecospat.ESM.Projection(ESM.modeling.output=pacifica_my.ESM,
new.env=layers)
### Projection of calibrated ESMs into new space
pacifica_my.ESM_EFproj_current <- ecospat.ESM.EnsembleProjection(ESM.prediction.output=pacifica_my.ESM_proj_current,
ESM.EnsembleModeling.output=pacifica_my.ESM_EF)
plot(pacifica_my.ESM_EFproj_current)
names(pacifica_my.ESM_EFproj_current) = "A_pacifica"
ESMdir <- "C:/Users/gzl02/Desktop/Yi2020Research/Ecological_differentiation_chapter/Esemble_of_Small_models"
dir.create(file.path(ESMdir, "pacifica_ESM_MSDS"))
dir <- file.path(ESMdir, "pacifica_ESM_MSDS")
writeRaster(pacifica_my.ESM_EFproj_current, file.path(dir, names(pacifica_my.ESM_EFproj_current)),
bylayer=TRUE, format='GTiff', overwrite=TRUE)
## get the coordiantes of pseudo - absences
## all repetition of pseudo absences sampling merged
(pacifica_pa.all.xy <- get_PAtab(pacifica_myBiomodData) %>%
filter(is.na(status)) %>%
select(x, y)) %>%
distinct()
str(pacifica_pa.all.xy)
##add column with the species name
pacifica_pa.all.xy["sp"] <- c("A_pacifica")
pacifica_pa.all.xy <- pacifica_pa.all.xy[colnames(pacifica_pa.all.xy)[c(3,1:2)]]
str(pacifica_pa.all.xy)
pacifica_ESM_MSDS <- MSDM_Posteriori(
records = pacifica,
absence = pacifica_pa.all.xy,
x = "x",
y = "y",
sp = "sp",
method = c("MCP"),
dirraster = dir,
threshold = c("spec_sens"),
buffer = NULL,
dirsave = dir
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fo_data.r
\name{bone_cf}
\alias{bone_cf}
\title{Computes correction factor for pre-1992 data}
\usage{
bone_cf(fo)
}
\arguments{
\item{fo}{dataframe of prey species identified in each scat and whether it was identified by otolith/beak or other}
}
\value{
list with correction factor cf and its std error se.cf
}
\description{
For a given set of species codes it computes the correction factor for
oto/bk only data collected before 1992. It computes FO with all structures divided by
FO from oto/bk only and that ratio is then used to correct years prior to 1992 in
the fo function.
}
\details{
No longer used because scats are counted as the number of scats with any identified prey from the
fo dataframe.
}
\author{
Jeff Laake
}
| /CIPinnipedAnalysis/man/bone_cf.Rd | no_license | jlaake/CIPinnipedAnalysis | R | false | true | 835 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fo_data.r
\name{bone_cf}
\alias{bone_cf}
\title{Computes correction factor for pre-1992 data}
\usage{
bone_cf(fo)
}
\arguments{
\item{fo}{dataframe of prey species identified in each scat and whether it was identified by otolith/beak or other}
}
\value{
list with correction factor cf and its std error se.cf
}
\description{
For a given set of species codes it computes the correction factor for
oto/bk only data collected before 1992. It computes FO with all structures divided by
FO from oto/bk only and that ratio is then used to correct years prior to 1992 in
the fo function.
}
\details{
No longer used because scats are counted as the number of scats with any identified prey from the
fo dataframe.
}
\author{
Jeff Laake
}
|
#' Makes four graphs of annual streamflow statistics on a single page
#'
#' @description
#' Part of the flowHistory system. The four statistics are 1-day maximum, annual mean, annual median, and annual 7-day minimum.
#' Although there are a lot of optional arguments to this function, most are set to a logical default.
#'
#' Data come from named list, which contains a Sample dataframe with the sample data,
#' a Daily dataframe with the daily flow data,
#' and an INFO dataframe with metadata. Each graph shows a loess smooth of the data that are plotted.
#'
#' @param eList named list with at least Daily and INFO dataframes
#' @param yearStart A numeric value for year in which the graph should start, default is NA, which indicates that the graph should start with first annual value
#' @param yearEnd A numeric value for year in which the graph should end, default is NA, which indicates that the graph should end with last annual value
#' @param printTitle logical variable, if TRUE title is printed, if FALSE title is not printed, default is TRUE
#' @param runoff logical variable, if TRUE the streamflow data are converted to runoff values in mm/day
#' @param qUnit object of qUnit class \code{\link{printqUnitCheatSheet}}, or numeric represented the short code, or character representing the descriptive name.
#' @param cex numerical value giving the amount by which plotting symbols should be magnified
#' @param cex.main magnification to be used for main titles relative to the current setting of cex
#' @param cex.axis magnification to be used for axis annotation relative to the current setting of cex
#' @param col color of points on plot, see ?par 'Color Specification'
#' @param lwd number line width. Default is 1.
#' @param \dots arbitrary graphical parameters that will be passed to genericEGRETDotPlot function (see ?par for options)
#' @keywords graphics streamflow statistics
#' @export
#' @seealso \code{\link{plotFlowSingle}}
#' @examples
#' eList <- Choptank_eList
#' \donttest{
#' # Water year:
#' plotFourStats(eList)
#' # Graphs consisting of Jun-Aug
#' eList <- setPA(eList,paStart=6,paLong=3)
#' plotFourStats(eList)
#' }
plotFourStats<-function(eList, yearStart = NA, yearEnd = NA,
printTitle = TRUE, runoff = FALSE, cex.main = 1.2,
qUnit =1,cex.axis=1.2,cex=0.8, col="black", lwd=1,...) {
# prior to running this user must do these two commands
# INFO<-setPA(pastart,paLong,window)
# annualSeries<-makeAnnualSeries()
#
localINFO <- getInfo(eList)
localDaily <- getDaily(eList)
localAnnualSeries <- makeAnnualSeries(eList)
par(mfcol=c(2,2),oma=c(0,1.7,6,1.7))
setYearStart<-if(is.na(yearStart)) min(localAnnualSeries[1,,],na.rm=TRUE) else yearStart
setYearEnd<-if(is.na(yearEnd)) max(localAnnualSeries[1,,],na.rm=TRUE) else yearEnd
plotFlowSingle(eList, istat=8, yearStart=setYearStart, yearEnd=setYearEnd,
tinyPlot=TRUE, runoff=runoff,
qUnit=qUnit, printPA=FALSE, printIstat=TRUE, printStaName=FALSE,
cex.axis=cex.axis,cex=cex, col=col,lwd=lwd, cex.main=1,...)
plotFlowSingle(eList, istat=4, yearStart=setYearStart, yearEnd=setYearEnd,
tinyPlot=TRUE, runoff=runoff,
qUnit=qUnit, printPA=FALSE, printIstat=TRUE, printStaName=FALSE,
cex.axis=cex.axis,cex=cex, col=col,lwd=lwd, cex.main=1, ...)
plotFlowSingle(eList, istat=5, yearStart=setYearStart, yearEnd=setYearEnd,
tinyPlot=TRUE, runoff=runoff,
qUnit=qUnit, printPA=FALSE, printIstat=TRUE, printStaName=FALSE,
cex.axis=cex.axis,cex=cex, col=col,lwd=lwd, cex.main=1, ...)
plotFlowSingle(eList, istat=2, yearStart=setYearStart, yearEnd=setYearEnd,
tinyPlot=TRUE, runoff=runoff,
qUnit=qUnit, printPA=FALSE, printIstat=TRUE, printStaName=FALSE,
cex.axis=cex.axis,cex=cex, col=col,lwd=lwd, cex.main=1, ...)
textPA<-setSeasonLabelByUser(paStartInput=localINFO$paStart, paLongInput=localINFO$paLong)
title<-if(printTitle) paste(localINFO$shortName,"\n",textPA)
mtext(title, outer = TRUE, font = 2,cex=cex.main)
par(mfcol=c(1,1),oma=c(0,0,0,0))
} | /R/plotFourStats.R | no_license | cran/EGRET | R | false | false | 4,303 | r | #' Makes four graphs of annual streamflow statistics on a single page
#'
#' @description
#' Part of the flowHistory system. The four statistics are 1-day maximum, annual mean, annual median, and annual 7-day minimum.
#' Although there are a lot of optional arguments to this function, most are set to a logical default.
#'
#' Data come from named list, which contains a Sample dataframe with the sample data,
#' a Daily dataframe with the daily flow data,
#' and an INFO dataframe with metadata. Each graph shows a loess smooth of the data that are plotted.
#'
#' @param eList named list with at least Daily and INFO dataframes
#' @param yearStart A numeric value for year in which the graph should start, default is NA, which indicates that the graph should start with first annual value
#' @param yearEnd A numeric value for year in which the graph should end, default is NA, which indicates that the graph should end with last annual value
#' @param printTitle logical variable, if TRUE title is printed, if FALSE title is not printed, default is TRUE
#' @param runoff logical variable, if TRUE the streamflow data are converted to runoff values in mm/day
#' @param qUnit object of qUnit class \code{\link{printqUnitCheatSheet}}, or numeric represented the short code, or character representing the descriptive name.
#' @param cex numerical value giving the amount by which plotting symbols should be magnified
#' @param cex.main magnification to be used for main titles relative to the current setting of cex
#' @param cex.axis magnification to be used for axis annotation relative to the current setting of cex
#' @param col color of points on plot, see ?par 'Color Specification'
#' @param lwd number line width. Default is 1.
#' @param \dots arbitrary graphical parameters that will be passed to genericEGRETDotPlot function (see ?par for options)
#' @keywords graphics streamflow statistics
#' @export
#' @seealso \code{\link{plotFlowSingle}}
#' @examples
#' eList <- Choptank_eList
#' \donttest{
#' # Water year:
#' plotFourStats(eList)
#' # Graphs consisting of Jun-Aug
#' eList <- setPA(eList,paStart=6,paLong=3)
#' plotFourStats(eList)
#' }
plotFourStats<-function(eList, yearStart = NA, yearEnd = NA,
printTitle = TRUE, runoff = FALSE, cex.main = 1.2,
qUnit =1,cex.axis=1.2,cex=0.8, col="black", lwd=1,...) {
# prior to running this user must do these two commands
# INFO<-setPA(pastart,paLong,window)
# annualSeries<-makeAnnualSeries()
#
localINFO <- getInfo(eList)
localDaily <- getDaily(eList)
localAnnualSeries <- makeAnnualSeries(eList)
par(mfcol=c(2,2),oma=c(0,1.7,6,1.7))
setYearStart<-if(is.na(yearStart)) min(localAnnualSeries[1,,],na.rm=TRUE) else yearStart
setYearEnd<-if(is.na(yearEnd)) max(localAnnualSeries[1,,],na.rm=TRUE) else yearEnd
plotFlowSingle(eList, istat=8, yearStart=setYearStart, yearEnd=setYearEnd,
tinyPlot=TRUE, runoff=runoff,
qUnit=qUnit, printPA=FALSE, printIstat=TRUE, printStaName=FALSE,
cex.axis=cex.axis,cex=cex, col=col,lwd=lwd, cex.main=1,...)
plotFlowSingle(eList, istat=4, yearStart=setYearStart, yearEnd=setYearEnd,
tinyPlot=TRUE, runoff=runoff,
qUnit=qUnit, printPA=FALSE, printIstat=TRUE, printStaName=FALSE,
cex.axis=cex.axis,cex=cex, col=col,lwd=lwd, cex.main=1, ...)
plotFlowSingle(eList, istat=5, yearStart=setYearStart, yearEnd=setYearEnd,
tinyPlot=TRUE, runoff=runoff,
qUnit=qUnit, printPA=FALSE, printIstat=TRUE, printStaName=FALSE,
cex.axis=cex.axis,cex=cex, col=col,lwd=lwd, cex.main=1, ...)
plotFlowSingle(eList, istat=2, yearStart=setYearStart, yearEnd=setYearEnd,
tinyPlot=TRUE, runoff=runoff,
qUnit=qUnit, printPA=FALSE, printIstat=TRUE, printStaName=FALSE,
cex.axis=cex.axis,cex=cex, col=col,lwd=lwd, cex.main=1, ...)
textPA<-setSeasonLabelByUser(paStartInput=localINFO$paStart, paLongInput=localINFO$paLong)
title<-if(printTitle) paste(localINFO$shortName,"\n",textPA)
mtext(title, outer = TRUE, font = 2,cex=cex.main)
par(mfcol=c(1,1),oma=c(0,0,0,0))
} |
#ke bijiao d hemidu tu.
opar <- par(no.readonly = T)
par(lwd = 2) #double line wide
library(sm)
attach(mtcars)
#general factor
cyl.f <- factor(cyl, levels = c(4,6,8),
labels = c('4 cylinder','6 cylinder', '8 cylinder'))
sm.density.compare(mpg, cyl, xlab = "Miles Per Gallon") # plot density plot
title(main = 'MPG Distribution by Car Cylinders')
colfill <- c(2:(1+length(levels(cyl.f))))
legend(locator(1), levels(cyl.f), fill = colfill) #shubiao xuanze tuli weizhi
detach(mtcars)
par(opar) | /6-8-sm-density.r | no_license | papadalin/R-book-code | R | false | false | 513 | r | #ke bijiao d hemidu tu.
opar <- par(no.readonly = T)
par(lwd = 2) #double line wide
library(sm)
attach(mtcars)
#general factor
cyl.f <- factor(cyl, levels = c(4,6,8),
labels = c('4 cylinder','6 cylinder', '8 cylinder'))
sm.density.compare(mpg, cyl, xlab = "Miles Per Gallon") # plot density plot
title(main = 'MPG Distribution by Car Cylinders')
colfill <- c(2:(1+length(levels(cyl.f))))
legend(locator(1), levels(cyl.f), fill = colfill) #shubiao xuanze tuli weizhi
detach(mtcars)
par(opar) |
discrepancyCriteria <- function(design, type='all'){
#---------------------------------------
# source code by Jessica FRANCO (2006.10.05)
# modified by Bertrand Iooss (2013.26.12)
#---------------------------------------
# inputs
# - design of experiments
# - type of dicrepancies to be computed
X <- as.matrix(design)
dimension <- dim(X)[2] # dimension
n <- dim(X)[1] # number of points
if ( n < dimension ){
stop('Warning : the number of points is lower than the dimension.')
}
# To check the experimental region
if ( min(X)<0 || max(X)>1 ){
warning("The design is rescaling into the unit cube [0,1]^d.")
M <- apply(X,2,max)
m <- apply(X,2,min)
for (j in 1:dim(X)[2]){
X[,j] <- (X[,j]-m[j])/(M[j]-m[j])
}
}
R <- list()
DisC2 <- FALSE
DisL2 <- FALSE
DisL2star <- FALSE
DisM2 <- FALSE
DisS2 <- FALSE
DisW2 <- FALSE
DisMix2 <- FALSE
if (length(type)==1 && type=='all'){
type <- c('C2','L2','L2star','M2','S2','W2','Mix2')
}
for(i in 1:length(type)){
type_ <- type[i]
switch(type_,
C2 = {DisC2 <- TRUE},
L2 = {DisL2 <- TRUE},
L2star = {DisL2star <- TRUE},
M2 = {DisM2 <- TRUE},
S2 = {DisS2 <- TRUE},
W2 = {DisW2 <- TRUE},
Mix2 = {DisMix2 <- TRUE})
}
# centered L2-discrepancy
#------------------------
if(DisC2 == TRUE){
s1 <- 0; s2 <- 0
for (i in 1:n){
p <- prod((1+0.5*abs(X[i,]-0.5)-0.5*((abs(X[i,]-0.5))^2)))
s1 <- s1+p
for (k in 1:n){
q <- prod((1+0.5*abs(X[i,]-0.5)+0.5*abs(X[k,]-0.5)-0.5*abs(X[i,]-X[k,])))
s2 <- s2+q
}
}
R <- c(R,DisC2 = sqrt(((13/12)^dimension)-((2/n)*s1) + ((1/n^2)*s2)))
}
# L2-discrepancy
#------------------------
if(DisL2 == TRUE){
s1 <- 0; s2 <- 0
for (i in 1:n){
p <- prod(X[i,]*(1-X[i,]))
s1 <- s1+p
for (k in 1:n){
q <- 1
for (j in 1:dimension){
q <- q*(1-max(X[i,j],X[k,j]))*min(X[i,j],X[k,j])
}
s2 <- s2+q
}
}
R <- c(R,DisL2 = sqrt(12^(-dimension) - (((2^(1-dimension))/n)*s1) + ((1/n^2)*s2)))
}
# L2star-discrepancy
#------------------------
if(DisL2star == TRUE){
dL2<-0
for (j in 1:n){
for (i in 1:n){
if(i!=j){
t<-c()
for (l in 1:dimension) t<-c(t,1-max(X[i,l],X[j,l]))
t<-(prod(t))/(n^2)
}
else{
t1<-1-X[i,]
t1<-prod(t1)
t2<-1-X[i,]^2
t2<-prod(t2)
t<-t1/(n^2)-((2^(1-dimension))/n)*t2
}
dL2<-dL2+t}
}
R <- c(R,DisL2star = sqrt(3^(-dimension)+dL2))
}
# modified L2-discrepancy
#------------------------
if(DisM2 == TRUE){
s1 <- 0; s2 <- 0
for (i in 1:n){
p <- 1
p <- prod((3-(X[i,]*X[i,])))
s1 <- s1+p
for (k in 1:n){
q <- 1
for (j in 1:dimension){
q <- q*(2-max(X[i,j],X[k,j]))
}
s2 <- s2+q
}
}
R <- c(R,DisM2 = sqrt(((4/3)^dimension) - (((2^(1-dimension))/n)*s1) + ((1/n^2)*s2)))
}
# symmetric L2-discrepancy
#------------------------
if(DisS2 == TRUE){
s1 <- 0; s2 <- 0
for (i in 1:n){
p <- prod((1+(2*X[i,])-(2*X[i,]*X[i,])))
s1 <- s1+p
for (k in 1:n){
q <- prod((1-abs(X[i,]-X[k,])))
s2 <- s2+q
}
}
R <- c(R,DisS2 = sqrt(((4/3)^dimension) - ((2/n)*s1) + ((2^dimension/n^2)*s2)))
}
# wrap-around L2-discrepancy
#------------------------
if(DisW2 == TRUE){
s1 <- 0
for (i in 1:n){
for (k in 1:n){
p <- prod((1.5-((abs(X[i,]-X[k,]))*(1-abs(X[i,]-X[k,])))))
s1 <- s1+p
}
}
R <- c(R , DisW2 = sqrt((-((4/3)^dimension) + ((1/n^2)*s1))))
}
# mixture L2-discrepancy
#------------------------
if(DisMix2 == TRUE){
s1 <- 0; s2 <- 0
for (i in 1:n){
p <- prod((5/3-0.25*abs(X[i,]-0.5)-0.25*((abs(X[i,]-0.5))^2)))
s1 <- s1+p
for (k in 1:n){
q <- prod((15/8-0.25*abs(X[i,]-0.5)-0.25*abs(X[k,]-0.5)-0.75*abs(X[i,]-X[k,])+0.5*((abs(X[i,]-X[k,]))^2)))
s2 <- s2+q
}
}
R <- c(R,DisMix2 = sqrt(((19/12)^dimension)-((2/n)*s1) + ((1/n^2)*s2)))
}
return(R)
}
| /R/discrepancyCriteria.R | no_license | cran/DiceDesign | R | false | false | 4,230 | r | discrepancyCriteria <- function(design, type='all'){
#---------------------------------------
# source code by Jessica FRANCO (2006.10.05)
# modified by Bertrand Iooss (2013.26.12)
#---------------------------------------
# inputs
# - design of experiments
# - type of dicrepancies to be computed
X <- as.matrix(design)
dimension <- dim(X)[2] # dimension
n <- dim(X)[1] # number of points
if ( n < dimension ){
stop('Warning : the number of points is lower than the dimension.')
}
# To check the experimental region
if ( min(X)<0 || max(X)>1 ){
warning("The design is rescaling into the unit cube [0,1]^d.")
M <- apply(X,2,max)
m <- apply(X,2,min)
for (j in 1:dim(X)[2]){
X[,j] <- (X[,j]-m[j])/(M[j]-m[j])
}
}
R <- list()
DisC2 <- FALSE
DisL2 <- FALSE
DisL2star <- FALSE
DisM2 <- FALSE
DisS2 <- FALSE
DisW2 <- FALSE
DisMix2 <- FALSE
if (length(type)==1 && type=='all'){
type <- c('C2','L2','L2star','M2','S2','W2','Mix2')
}
for(i in 1:length(type)){
type_ <- type[i]
switch(type_,
C2 = {DisC2 <- TRUE},
L2 = {DisL2 <- TRUE},
L2star = {DisL2star <- TRUE},
M2 = {DisM2 <- TRUE},
S2 = {DisS2 <- TRUE},
W2 = {DisW2 <- TRUE},
Mix2 = {DisMix2 <- TRUE})
}
# centered L2-discrepancy
#------------------------
if(DisC2 == TRUE){
s1 <- 0; s2 <- 0
for (i in 1:n){
p <- prod((1+0.5*abs(X[i,]-0.5)-0.5*((abs(X[i,]-0.5))^2)))
s1 <- s1+p
for (k in 1:n){
q <- prod((1+0.5*abs(X[i,]-0.5)+0.5*abs(X[k,]-0.5)-0.5*abs(X[i,]-X[k,])))
s2 <- s2+q
}
}
R <- c(R,DisC2 = sqrt(((13/12)^dimension)-((2/n)*s1) + ((1/n^2)*s2)))
}
# L2-discrepancy
#------------------------
if(DisL2 == TRUE){
s1 <- 0; s2 <- 0
for (i in 1:n){
p <- prod(X[i,]*(1-X[i,]))
s1 <- s1+p
for (k in 1:n){
q <- 1
for (j in 1:dimension){
q <- q*(1-max(X[i,j],X[k,j]))*min(X[i,j],X[k,j])
}
s2 <- s2+q
}
}
R <- c(R,DisL2 = sqrt(12^(-dimension) - (((2^(1-dimension))/n)*s1) + ((1/n^2)*s2)))
}
# L2star-discrepancy
#------------------------
if(DisL2star == TRUE){
dL2<-0
for (j in 1:n){
for (i in 1:n){
if(i!=j){
t<-c()
for (l in 1:dimension) t<-c(t,1-max(X[i,l],X[j,l]))
t<-(prod(t))/(n^2)
}
else{
t1<-1-X[i,]
t1<-prod(t1)
t2<-1-X[i,]^2
t2<-prod(t2)
t<-t1/(n^2)-((2^(1-dimension))/n)*t2
}
dL2<-dL2+t}
}
R <- c(R,DisL2star = sqrt(3^(-dimension)+dL2))
}
# modified L2-discrepancy
#------------------------
if(DisM2 == TRUE){
s1 <- 0; s2 <- 0
for (i in 1:n){
p <- 1
p <- prod((3-(X[i,]*X[i,])))
s1 <- s1+p
for (k in 1:n){
q <- 1
for (j in 1:dimension){
q <- q*(2-max(X[i,j],X[k,j]))
}
s2 <- s2+q
}
}
R <- c(R,DisM2 = sqrt(((4/3)^dimension) - (((2^(1-dimension))/n)*s1) + ((1/n^2)*s2)))
}
# symmetric L2-discrepancy
#------------------------
if(DisS2 == TRUE){
s1 <- 0; s2 <- 0
for (i in 1:n){
p <- prod((1+(2*X[i,])-(2*X[i,]*X[i,])))
s1 <- s1+p
for (k in 1:n){
q <- prod((1-abs(X[i,]-X[k,])))
s2 <- s2+q
}
}
R <- c(R,DisS2 = sqrt(((4/3)^dimension) - ((2/n)*s1) + ((2^dimension/n^2)*s2)))
}
# wrap-around L2-discrepancy
#------------------------
if(DisW2 == TRUE){
s1 <- 0
for (i in 1:n){
for (k in 1:n){
p <- prod((1.5-((abs(X[i,]-X[k,]))*(1-abs(X[i,]-X[k,])))))
s1 <- s1+p
}
}
R <- c(R , DisW2 = sqrt((-((4/3)^dimension) + ((1/n^2)*s1))))
}
# mixture L2-discrepancy
#------------------------
if(DisMix2 == TRUE){
s1 <- 0; s2 <- 0
for (i in 1:n){
p <- prod((5/3-0.25*abs(X[i,]-0.5)-0.25*((abs(X[i,]-0.5))^2)))
s1 <- s1+p
for (k in 1:n){
q <- prod((15/8-0.25*abs(X[i,]-0.5)-0.25*abs(X[k,]-0.5)-0.75*abs(X[i,]-X[k,])+0.5*((abs(X[i,]-X[k,]))^2)))
s2 <- s2+q
}
}
R <- c(R,DisMix2 = sqrt(((19/12)^dimension)-((2/n)*s1) + ((1/n^2)*s2)))
}
return(R)
}
|
#####################
## Define settings ##
#####################
if (grepl("ricard",Sys.info()['nodename'])) {
source("/Users/ricard/scnmt_gastrulation/settings.R")
source("/Users/ricard/scnmt_gastrulation/utils.R")
} else if (grepl("ebi",Sys.info()['nodename'])) {
source("/homes/ricard/scnmt_gastrulation/settings.R")
source("/homes/ricard/scnmt_gastrulation/utils.R")
} else {
stop()
}
# I/O
io$umap <- paste0(io$basedir,"/metaccrna/mofa/all_stages/umap_coordinates.txt")
io$outdir <- paste0(io$basedir,"/metaccrna/mefisto/quantification_imputation")
# Define which stage and lineages to look at
opts$stage_lineage <- c(
# "E4.5_Epiblast",
# "E4.5_Primitive_endoderm",
# "E5.5_Epiblast",
# "E5.5_Visceral_endoderm",
"E6.5_Epiblast",
# "E6.5_Primitive_Streak",
# "E6.5_Visceral_endoderm",
# "E6.5_Mesoderm",
"E7.5_Epiblast",
"E7.5_Primitive_Streak",
"E7.5_Ectoderm",
"E7.5_Endoderm",
"E7.5_Mesoderm"
# "E7.5_Visceral_endoderm"
)
# Filtering options for methylation
opts$met_min.cells <- 50 # minimum number of cells per feature (per stage)
opts$met_nfeatures <- 500 # maximum number of features per view (filter based on variance)
# Filtering options for accessibility
opts$acc_min.cells <- 50 # minimum number of cells per feature (per stage)
opts$acc_nfeatures <- 500 # maximum number of features per view (filter based on variance)
############################
## Update sample metadata ##
############################
sample_metadata <- fread(io$metadata) %>%
.[,stage_lineage:=as.factor(paste(stage,lineage10x_2,sep="_"))] %>%
.[pass_rnaQC==T & stage_lineage%in%opts$stage_lineage] %>%
droplevels
opts$met_cells <- sample_metadata %>% .[pass_metQC==T,id_met]
opts$rna_cells <- sample_metadata %>% .[pass_rnaQC==T,id_rna]
opts$acc_cells <- sample_metadata %>% .[pass_accQC==T,id_acc]
| /scnmt/quantification_imputation/load_settings.R | no_license | bioFAM/MEFISTO_analyses | R | false | false | 1,861 | r | #####################
## Define settings ##
#####################
if (grepl("ricard",Sys.info()['nodename'])) {
source("/Users/ricard/scnmt_gastrulation/settings.R")
source("/Users/ricard/scnmt_gastrulation/utils.R")
} else if (grepl("ebi",Sys.info()['nodename'])) {
source("/homes/ricard/scnmt_gastrulation/settings.R")
source("/homes/ricard/scnmt_gastrulation/utils.R")
} else {
stop()
}
# I/O
io$umap <- paste0(io$basedir,"/metaccrna/mofa/all_stages/umap_coordinates.txt")
io$outdir <- paste0(io$basedir,"/metaccrna/mefisto/quantification_imputation")
# Define which stage and lineages to look at
opts$stage_lineage <- c(
# "E4.5_Epiblast",
# "E4.5_Primitive_endoderm",
# "E5.5_Epiblast",
# "E5.5_Visceral_endoderm",
"E6.5_Epiblast",
# "E6.5_Primitive_Streak",
# "E6.5_Visceral_endoderm",
# "E6.5_Mesoderm",
"E7.5_Epiblast",
"E7.5_Primitive_Streak",
"E7.5_Ectoderm",
"E7.5_Endoderm",
"E7.5_Mesoderm"
# "E7.5_Visceral_endoderm"
)
# Filtering options for methylation
opts$met_min.cells <- 50 # minimum number of cells per feature (per stage)
opts$met_nfeatures <- 500 # maximum number of features per view (filter based on variance)
# Filtering options for accessibility
opts$acc_min.cells <- 50 # minimum number of cells per feature (per stage)
opts$acc_nfeatures <- 500 # maximum number of features per view (filter based on variance)
############################
## Update sample metadata ##
############################
sample_metadata <- fread(io$metadata) %>%
.[,stage_lineage:=as.factor(paste(stage,lineage10x_2,sep="_"))] %>%
.[pass_rnaQC==T & stage_lineage%in%opts$stage_lineage] %>%
droplevels
opts$met_cells <- sample_metadata %>% .[pass_metQC==T,id_met]
opts$rna_cells <- sample_metadata %>% .[pass_rnaQC==T,id_rna]
opts$acc_cells <- sample_metadata %>% .[pass_accQC==T,id_acc]
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_rand.R
\name{plot_rand}
\alias{plot_rand}
\title{Plot random distribution}
\usage{
plot_rand(
object,
xlab = NA,
ylab = "Frequency",
title = "Random distribution",
text_observed = "observed",
color = "lightgrey",
...
)
}
\arguments{
\item{object}{Object returned from the rand_test() function}
\item{xlab}{Label for the x-axis.}
\item{ylab}{Label for the y-axis.}
\item{title}{Plot title.}
\item{text_observed}{Text for marking the number of observed statistic.}
\item{color}{Bar color.}
\item{...}{Further arguments passed to the plot function.}
}
\description{
This function takes the return of the rand_test function and creates a
histogram with the distribution of the rand sample statistics.
}
| /man/plot_rand.Rd | no_license | cran/scan | R | false | true | 802 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_rand.R
\name{plot_rand}
\alias{plot_rand}
\title{Plot random distribution}
\usage{
plot_rand(
object,
xlab = NA,
ylab = "Frequency",
title = "Random distribution",
text_observed = "observed",
color = "lightgrey",
...
)
}
\arguments{
\item{object}{Object returned from the rand_test() function}
\item{xlab}{Label for the x-axis.}
\item{ylab}{Label for the y-axis.}
\item{title}{Plot title.}
\item{text_observed}{Text for marking the number of observed statistic.}
\item{color}{Bar color.}
\item{...}{Further arguments passed to the plot function.}
}
\description{
This function takes the return of the rand_test function and creates a
histogram with the distribution of the rand sample statistics.
}
|
library(FNN)
#For the Fast Nearest Neighbour(FNN) we will need only the price and the category id
df_table <- products[,4:5]
#Verify if the table was correctly sliced
head(df_table)
#Now Its time to apply the FNN algorith to find the nearest neighbour for each product
model<-get.knn(df_table, k = 5, algorithm = "cover_tree")
#We check if the algorithm is correct by calling one line and see if it matches
model$nn.index[2,]
#The line 2 correspond to the product "Handtowel"
#It has given back 5 indexes: (3:"Washcloth", 1:"Large Towel",11:"Shower Cap",
#9: "Tissues",14: "Pillowcase)
#As we see the algorithm works very well so its time to put it together
#The next step is to merge the data into one table.
#In order to do that we need to create a new table with the neighbours indexes
index_table <- as.data.frame(model$nn.index,colnames(c("n1","n2","n3","n4","n5")))
#After that we just need to replace the index for the productname so we know which
#the name of each closest product
products$neighbour1 <- products$ProductName[index_table$V1]
products$neighbour2 <- products$ProductName[index_table$V2]
products$neighbour3 <- products$ProductName[index_table$V3]
products$neighbour4 <- products$ProductName[index_table$V4]
products$neighbour5 <- products$ProductName[index_table$V5]
#So we finally have the table we were looking for, we just need to simple
#export it as csv and we are done
write.csv(products,"crossseling.csv")
| /fnn_product_similarity.R | no_license | gabrielbenitezfml/ml-and-data-analysis | R | false | false | 1,491 | r | library(FNN)
#For the Fast Nearest Neighbour(FNN) we will need only the price and the category id
df_table <- products[,4:5]
#Verify if the table was correctly sliced
head(df_table)
#Now Its time to apply the FNN algorith to find the nearest neighbour for each product
model<-get.knn(df_table, k = 5, algorithm = "cover_tree")
#We check if the algorithm is correct by calling one line and see if it matches
model$nn.index[2,]
#The line 2 correspond to the product "Handtowel"
#It has given back 5 indexes: (3:"Washcloth", 1:"Large Towel",11:"Shower Cap",
#9: "Tissues",14: "Pillowcase)
#As we see the algorithm works very well so its time to put it together
#The next step is to merge the data into one table.
#In order to do that we need to create a new table with the neighbours indexes
index_table <- as.data.frame(model$nn.index,colnames(c("n1","n2","n3","n4","n5")))
#After that we just need to replace the index for the productname so we know which
#the name of each closest product
products$neighbour1 <- products$ProductName[index_table$V1]
products$neighbour2 <- products$ProductName[index_table$V2]
products$neighbour3 <- products$ProductName[index_table$V3]
products$neighbour4 <- products$ProductName[index_table$V4]
products$neighbour5 <- products$ProductName[index_table$V5]
#So we finally have the table we were looking for, we just need to simple
#export it as csv and we are done
write.csv(products,"crossseling.csv")
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53828387296911e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613109835-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 257 | r | testlist <- list(A = structure(c(2.31584307392677e+77, 9.53828387296911e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
\name{bfastpp}
\alias{bfastpp}
\title{Time Series Preprocessing for BFAST-Type Models}
\description{
Time series preprocessing for subsequent regression modeling.
Based on a (seasonal) time series, a data frame with the response,
seasonal terms, a trend term, (seasonal) autoregressive terms,
and covariates is computed. This can subsequently be employed in
regression models.
}
\usage{
bfastpp(data, order = 3,
lag = NULL, slag = NULL, na.action = na.omit,
stl = c("none", "trend", "seasonal", "both"))
}
\arguments{
\item{data}{A time series of class \code{\link[stats]{ts}}, or another object that
can be coerced to such. For seasonal components, a frequency greater than 1 is
required.}
\item{order}{numeric. Order of the harmonic term, defaulting to \code{3}.}
\item{lag}{numeric. Orders of the autoregressive term, by default omitted.}
\item{slag}{numeric. Orders of the seasonal autoregressive term, by default omitted.}
\item{na.action}{function for handling \code{NA}s in the data (after all other
preprocessing).}
\item{stl}{character. Prior to all other preprocessing, STL (season-trend decomposition
via LOESS smoothing) can be employed for trend-adjustment and/or season-adjustment.
The \code{"trend"} or \code{"seasonal"} component or both from \code{\link[stats]{stl}}
are removed from each column in \code{data}. By default (\code{"none"}), no STL
adjustment is used.}
\item{...}{ Further options to \code{stlplus}. See \code{?stlplus}}
}
\details{
To facilitate (linear) regression models of time series data, \code{bfastpp} facilitates
preprocessing and setting up regressor terms. It returns a \code{data.frame} containing the
first column of the \code{data} as the \code{response} while further columns (if any) are
used as covariates \code{xreg}. Additionally, a linear trend, seasonal dummies, harmonic
seasonal terms, and (seasonal) autoregressive terms are provided.
Optionally, each column of \code{data} can be seasonally adjusted and/or trend-adjusted via
STL (season-trend decomposition via LOESS smoothing) prior to preprocessing. The idea would
be to capture season and/or trend nonparametrically prior to regression modelling.
}
\value{
\code{bfastpp} returns a \code{"data.frame"} with the following variables (some of which may be matrices).
\item{time}{numeric vector of time stamps,}
\item{response}{response vector (first column of \code{data}),}
\item{trend}{linear time trend (running from 1 to number of observations),}
\item{season}{factor indicating season period,}
\item{harmon}{harmonic seasonal terms (of specified \code{order}),}
\item{lag}{autoregressive terms (or orders \code{lag}, if any),}
\item{slag}{seasonal autoregressive terms (or orders \code{slag}, if any),}
\item{xreg}{covariate regressor (all columns of \code{data} except the first, if any).}
}
\references{
Verbesselt J, Zeileis A, Herold M (2011).
Near Real-Time Disturbance Detection in Terrestrial Ecosystems Using Satellite
Image Time Series: Drought Detection in Somalia.
Working Paper 2011-18. Working Papers in Economics and Statistics,
Research Platform Empirical and Experimental Economics, Universitaet Innsbruck.
\url{http://EconPapers.RePEc.org/RePEc:inn:wpaper:2011-18}.
Submitted to Remote Sensing and Environment.
}
\author{Achim Zeileis}
\seealso{\code{\link[bfast]{bfastmonitor}}}
\examples{
## set up time series
library(zoo)
ndvi <- as.ts(zoo(cbind(a = som$NDVI.a, b = som$NDVI.b), som$Time))
ndvi <- window(ndvi, start = c(2006, 1), end = c(2009, 23))
## parametric season-trend model
d1 <- bfastpp(ndvi, order = 2)
d1lm <- lm(response ~ trend + harmon, data = d1)
summary(d1lm)
## autoregressive model (after nonparametric season-trend adjustment)
d2 <- bfastpp(ndvi, stl = "both", lag = 1:2)
d2lm <- lm(response ~ lag, data = d2)
summary(d2lm)
}
\keyword{ts}
| /man/bfastpp.Rd | no_license | Martin-Jung/bfast | R | false | false | 3,912 | rd | \name{bfastpp}
\alias{bfastpp}
\title{Time Series Preprocessing for BFAST-Type Models}
\description{
Time series preprocessing for subsequent regression modeling.
Based on a (seasonal) time series, a data frame with the response,
seasonal terms, a trend term, (seasonal) autoregressive terms,
and covariates is computed. This can subsequently be employed in
regression models.
}
\usage{
bfastpp(data, order = 3,
lag = NULL, slag = NULL, na.action = na.omit,
stl = c("none", "trend", "seasonal", "both"))
}
\arguments{
\item{data}{A time series of class \code{\link[stats]{ts}}, or another object that
can be coerced to such. For seasonal components, a frequency greater than 1 is
required.}
\item{order}{numeric. Order of the harmonic term, defaulting to \code{3}.}
\item{lag}{numeric. Orders of the autoregressive term, by default omitted.}
\item{slag}{numeric. Orders of the seasonal autoregressive term, by default omitted.}
\item{na.action}{function for handling \code{NA}s in the data (after all other
preprocessing).}
\item{stl}{character. Prior to all other preprocessing, STL (season-trend decomposition
via LOESS smoothing) can be employed for trend-adjustment and/or season-adjustment.
The \code{"trend"} or \code{"seasonal"} component or both from \code{\link[stats]{stl}}
are removed from each column in \code{data}. By default (\code{"none"}), no STL
adjustment is used.}
\item{...}{ Further options to \code{stlplus}. See \code{?stlplus}}
}
\details{
To facilitate (linear) regression models of time series data, \code{bfastpp} facilitates
preprocessing and setting up regressor terms. It returns a \code{data.frame} containing the
first column of the \code{data} as the \code{response} while further columns (if any) are
used as covariates \code{xreg}. Additionally, a linear trend, seasonal dummies, harmonic
seasonal terms, and (seasonal) autoregressive terms are provided.
Optionally, each column of \code{data} can be seasonally adjusted and/or trend-adjusted via
STL (season-trend decomposition via LOESS smoothing) prior to preprocessing. The idea would
be to capture season and/or trend nonparametrically prior to regression modelling.
}
\value{
\code{bfastpp} returns a \code{"data.frame"} with the following variables (some of which may be matrices).
\item{time}{numeric vector of time stamps,}
\item{response}{response vector (first column of \code{data}),}
\item{trend}{linear time trend (running from 1 to number of observations),}
\item{season}{factor indicating season period,}
\item{harmon}{harmonic seasonal terms (of specified \code{order}),}
\item{lag}{autoregressive terms (or orders \code{lag}, if any),}
\item{slag}{seasonal autoregressive terms (or orders \code{slag}, if any),}
\item{xreg}{covariate regressor (all columns of \code{data} except the first, if any).}
}
\references{
Verbesselt J, Zeileis A, Herold M (2011).
Near Real-Time Disturbance Detection in Terrestrial Ecosystems Using Satellite
Image Time Series: Drought Detection in Somalia.
Working Paper 2011-18. Working Papers in Economics and Statistics,
Research Platform Empirical and Experimental Economics, Universitaet Innsbruck.
\url{http://EconPapers.RePEc.org/RePEc:inn:wpaper:2011-18}.
Submitted to Remote Sensing and Environment.
}
\author{Achim Zeileis}
\seealso{\code{\link[bfast]{bfastmonitor}}}
\examples{
## set up time series
library(zoo)
ndvi <- as.ts(zoo(cbind(a = som$NDVI.a, b = som$NDVI.b), som$Time))
ndvi <- window(ndvi, start = c(2006, 1), end = c(2009, 23))
## parametric season-trend model
d1 <- bfastpp(ndvi, order = 2)
d1lm <- lm(response ~ trend + harmon, data = d1)
summary(d1lm)
## autoregressive model (after nonparametric season-trend adjustment)
d2 <- bfastpp(ndvi, stl = "both", lag = 1:2)
d2lm <- lm(response ~ lag, data = d2)
summary(d2lm)
}
\keyword{ts}
|
tabItem(
tabName = "all_empl_view",
h1("Employee Overview"),
actionButton("all_empl_refresh", label = "Refresh Table"),
br(),br(),
fluidRow(
box( title = "", width = 12,
collapsible = TRUE, solidHeader = TRUE, status = "primary",
DT::dataTableOutput("all_empl_tbl")
)
) # End Fluid Row
) # End tabItem | /views/all_empl_view.R | no_license | dhairyadalal/lti_tracker_tool | R | false | false | 350 | r | tabItem(
tabName = "all_empl_view",
h1("Employee Overview"),
actionButton("all_empl_refresh", label = "Refresh Table"),
br(),br(),
fluidRow(
box( title = "", width = 12,
collapsible = TRUE, solidHeader = TRUE, status = "primary",
DT::dataTableOutput("all_empl_tbl")
)
) # End Fluid Row
) # End tabItem |
#--------------------------------------------
#--------------- Opgave 2.1.1.1
#--------------------------------------------
model <- prcomp(id100Small[,-1], center = TRUE, scale = TRUE)
plot(model$sdev[1:20], type = "o")
variance <- model$sdev^2
variance <- variance / sum(variance)
plot(variance[1:20], ylim = c(0,1), type = "o")
cumulativeVariance <- cumsum(variance)
plot(cumulativeVariance[1:20], ylim = c(0,1), type = "o")
| /MachineLearning/FinalRepport/Opgave1_1.R | no_license | DanielHviid/8.-semester-gruppearbejde | R | false | false | 437 | r |
#--------------------------------------------
#--------------- Opgave 2.1.1.1
#--------------------------------------------
model <- prcomp(id100Small[,-1], center = TRUE, scale = TRUE)
plot(model$sdev[1:20], type = "o")
variance <- model$sdev^2
variance <- variance / sum(variance)
plot(variance[1:20], ylim = c(0,1), type = "o")
cumulativeVariance <- cumsum(variance)
plot(cumulativeVariance[1:20], ylim = c(0,1), type = "o")
|
#! /usr/bin/env Rscript
'extract snp counts in gene level for case datasets
Usage:
case_vcf_maf.R [--vcfSource] [--snvType] <input> <output>
Options:
-h --help Show this screen.
-v --version Show version.
--vcfSource=<DB> contral vcf file type, could be ExAC, gnomAD or else. [default: gnomAD]
--snvType=<missense> missense or other "missense_variant|start_lost|stop_lost|protein_altering_variant|stop_gained" [default: missense]
Arguments:
input contral vcf file, could be download from ExAC, genomAD or else
output output filename
' -> doc
suppressMessages(library(maftools))
suppressMessages(library(data.table))
suppressMessages(library(docopt))
arguments <- docopt(doc, version = 'control_table v0.1\n\n')
print(str(arguments))
mafFile=arguments$input
maf=read.maf(maf =mafFile)
#Double Check needed for this
#which(maf@data$Tumor_Seq_Allele1==maf@data$Tumor_Seq_Allele2)
#integer(0)
#which(maf@data$Tumor_Seq_Allele1!=maf@data$Reference_Allele)
#integer(0)
#table(maf@data$Variant_Classification)
#table(maf@data$Variant_Classification)
#table(maf@data$Variant_Classification,maf@data$Variant_Classification)
makeMafToGeneBurdenCount<-function(maf,SELECTED_VAR_TYPE="Missense_Mutation",varTypeCol="Variant_Classification") {
totalSampleNum=as.integer(maf@summary[3,"summary"])
selectedVarInd=grep(SELECTED_VAR_TYPE,maf@data[[varTypeCol]])
if (length(selectedVarInd)>0) {
mafSubForCount=maf@data[selectedVarInd,]
} else { #Not found SELECTED_VAR_TYPE, maybe in slient data
selectedVarInd=grep(SELECTED_VAR_TYPE,maf@maf.silent[[varTypeCol]])
mafSubForCount=maf@maf.silent[selectedVarInd,]
}
if (length(selectedVarInd)==0) { #Still can't find SELECTED_VAR_TYPE, Stop
stop(paste0("Can't find ",SELECTED_VAR_TYPE," in MAF"))
}
mafSubForCount$HetOrHomoVar="None"
mafSubForCount$HetOrHomoVar[which(mafSubForCount$Tumor_Seq_Allele2!=mafSubForCount$Reference_Allele)]="COUNT_HET"
mafSubForCount$HetOrHomoVar[which(mafSubForCount$Tumor_Seq_Allele2!=mafSubForCount$Reference_Allele & mafSubForCount$Tumor_Seq_Allele1!=mafSubForCount$Reference_Allele)]="COUNT_HOM"
#remove more than one variant in same gene on same sample. More robust
mafSubForCount=unique(mafSubForCount[,c("Hugo_Symbol","Tumor_Sample_Barcode","HetOrHomoVar")])
geneBurdenCount=mafSubForCount[, .N, , c("Hugo_Symbol","HetOrHomoVar")]
geneBurdenCountTable=dcast(geneBurdenCount,Hugo_Symbol~HetOrHomoVar,value.var="N")
if (! ("COUNT_HOM" %in% colnames(geneBurdenCountTable))) {
geneBurdenCountTable$COUNT_HOM=0
}
geneBurdenCountTable[is.na(geneBurdenCountTable)]=0
geneBurdenCountTable$TOTAL_AC=geneBurdenCountTable$COUNT_HET+2*geneBurdenCountTable$COUNT_HOM
geneBurdenCountTable$TOTAL_AN=totalSampleNum*2
colnames(geneBurdenCountTable)[1]="Gene"
return(geneBurdenCountTable)
}
#SELECTED_VAR_TYPE="missense_variant"
#geneBurdenCountTable=makeMafToGeneBurdenCount(maf,SELECTED_VAR_TYPE=SELECTED_VAR_TYPE)
#write.csv(geneBurdenCountTable,paste0(basename(mafFile),".",gsub("\\|",".",SELECTED_VAR_TYPE),".csv"),row.names=FALSE)
#SELECTED_VAR_TYPE="synonymous_variant"
#geneBurdenCountTable=makeMafToGeneBurdenCount(maf,SELECTED_VAR_TYPE=SELECTED_VAR_TYPE)
#write.csv(geneBurdenCountTable,paste0(basename(mafFile),".",gsub("\\|",".",SELECTED_VAR_TYPE),".csv"),row.names=FALSE)
SELECTED_VAR_TYPE=arguments$snvType
geneBurdenCountTable=makeMafToGeneBurdenCount(maf,SELECTED_VAR_TYPE=SELECTED_VAR_TYPE,varTypeCol="Variant_Classification")
write.csv(geneBurdenCountTable,paste0(basename(mafFile),".",gsub("\\|",".",SELECTED_VAR_TYPE),".csv"),row.names=FALSE)
| /src/case_vcf_maf.R | permissive | biobai/iBurden | R | false | false | 3,606 | r | #! /usr/bin/env Rscript
'extract snp counts in gene level for case datasets
Usage:
case_vcf_maf.R [--vcfSource] [--snvType] <input> <output>
Options:
-h --help Show this screen.
-v --version Show version.
--vcfSource=<DB> contral vcf file type, could be ExAC, gnomAD or else. [default: gnomAD]
--snvType=<missense> missense or other "missense_variant|start_lost|stop_lost|protein_altering_variant|stop_gained" [default: missense]
Arguments:
input contral vcf file, could be download from ExAC, genomAD or else
output output filename
' -> doc
suppressMessages(library(maftools))
suppressMessages(library(data.table))
suppressMessages(library(docopt))
arguments <- docopt(doc, version = 'control_table v0.1\n\n')
print(str(arguments))
mafFile=arguments$input
maf=read.maf(maf =mafFile)
#Double Check needed for this
#which(maf@data$Tumor_Seq_Allele1==maf@data$Tumor_Seq_Allele2)
#integer(0)
#which(maf@data$Tumor_Seq_Allele1!=maf@data$Reference_Allele)
#integer(0)
#table(maf@data$Variant_Classification)
#table(maf@data$Variant_Classification)
#table(maf@data$Variant_Classification,maf@data$Variant_Classification)
makeMafToGeneBurdenCount<-function(maf,SELECTED_VAR_TYPE="Missense_Mutation",varTypeCol="Variant_Classification") {
totalSampleNum=as.integer(maf@summary[3,"summary"])
selectedVarInd=grep(SELECTED_VAR_TYPE,maf@data[[varTypeCol]])
if (length(selectedVarInd)>0) {
mafSubForCount=maf@data[selectedVarInd,]
} else { #Not found SELECTED_VAR_TYPE, maybe in slient data
selectedVarInd=grep(SELECTED_VAR_TYPE,maf@maf.silent[[varTypeCol]])
mafSubForCount=maf@maf.silent[selectedVarInd,]
}
if (length(selectedVarInd)==0) { #Still can't find SELECTED_VAR_TYPE, Stop
stop(paste0("Can't find ",SELECTED_VAR_TYPE," in MAF"))
}
mafSubForCount$HetOrHomoVar="None"
mafSubForCount$HetOrHomoVar[which(mafSubForCount$Tumor_Seq_Allele2!=mafSubForCount$Reference_Allele)]="COUNT_HET"
mafSubForCount$HetOrHomoVar[which(mafSubForCount$Tumor_Seq_Allele2!=mafSubForCount$Reference_Allele & mafSubForCount$Tumor_Seq_Allele1!=mafSubForCount$Reference_Allele)]="COUNT_HOM"
#remove more than one variant in same gene on same sample. More robust
mafSubForCount=unique(mafSubForCount[,c("Hugo_Symbol","Tumor_Sample_Barcode","HetOrHomoVar")])
geneBurdenCount=mafSubForCount[, .N, , c("Hugo_Symbol","HetOrHomoVar")]
geneBurdenCountTable=dcast(geneBurdenCount,Hugo_Symbol~HetOrHomoVar,value.var="N")
if (! ("COUNT_HOM" %in% colnames(geneBurdenCountTable))) {
geneBurdenCountTable$COUNT_HOM=0
}
geneBurdenCountTable[is.na(geneBurdenCountTable)]=0
geneBurdenCountTable$TOTAL_AC=geneBurdenCountTable$COUNT_HET+2*geneBurdenCountTable$COUNT_HOM
geneBurdenCountTable$TOTAL_AN=totalSampleNum*2
colnames(geneBurdenCountTable)[1]="Gene"
return(geneBurdenCountTable)
}
#SELECTED_VAR_TYPE="missense_variant"
#geneBurdenCountTable=makeMafToGeneBurdenCount(maf,SELECTED_VAR_TYPE=SELECTED_VAR_TYPE)
#write.csv(geneBurdenCountTable,paste0(basename(mafFile),".",gsub("\\|",".",SELECTED_VAR_TYPE),".csv"),row.names=FALSE)
#SELECTED_VAR_TYPE="synonymous_variant"
#geneBurdenCountTable=makeMafToGeneBurdenCount(maf,SELECTED_VAR_TYPE=SELECTED_VAR_TYPE)
#write.csv(geneBurdenCountTable,paste0(basename(mafFile),".",gsub("\\|",".",SELECTED_VAR_TYPE),".csv"),row.names=FALSE)
SELECTED_VAR_TYPE=arguments$snvType
geneBurdenCountTable=makeMafToGeneBurdenCount(maf,SELECTED_VAR_TYPE=SELECTED_VAR_TYPE,varTypeCol="Variant_Classification")
write.csv(geneBurdenCountTable,paste0(basename(mafFile),".",gsub("\\|",".",SELECTED_VAR_TYPE),".csv"),row.names=FALSE)
|
searchDataToDF <- function(json){
df <- data.frame(
from_id = unlistWithNA(json, c('from', 'id')),
from_name = unlistWithNA(json, c('from', 'name')),
message = unlistWithNA(json, 'message'),
created_time = unlistWithNA(json, 'created_time'),
type = unlistWithNA(json, 'type'),
link = unlistWithNA(json, 'link'),
id = unlistWithNA(json, 'id'),
likes_count = unlistWithNA(json, c('likes', 'summary', 'total_count')),
comments_count = unlistWithNA(json, c('comments', 'summary', 'total_count')),
shares_count = unlistWithNA(json, c('shares', 'count')),
stringsAsFactors=F)
return(df)
}
newsDataToDF <- function(json){
df <- data.frame(
from_id = unlistWithNA(json, c('from', 'id')),
from_name = unlistWithNA(json, c('from', 'name')),
to_id = unlistWithNA(json, c('to', 'data', "1", 'id')),
to_name = unlistWithNA(json, c('to', 'data', '1', 'name')),
message = unlistWithNA(json, 'message'),
created_time = unlistWithNA(json, 'created_time'),
type = unlistWithNA(json, 'type'),
link = unlistWithNA(json, 'link'),
id = unlistWithNA(json, 'id'),
likes_count = unlistWithNA(json, c('likes', 'summary', 'total_count')),
comments_count = unlistWithNA(json, c('comments', 'summary', 'total_count')),
shares_count = unlistWithNA(json, c('shares', 'count')),
stringsAsFactors=F)
return(df)
}
pageDataToDF <- function(json){
df <- data.frame(
from_id = unlistWithNA(json, c('from', 'id')),
from_name = unlistWithNA(json, c('from', 'name')),
message = unlistWithNA(json, 'message'),
created_time = unlistWithNA(json, 'created_time'),
type = unlistWithNA(json, 'type'),
link = unlistWithNA(json, 'link'),
id = unlistWithNA(json, 'id'),
likes_count = unlistWithNA(json, c('likes', 'summary', 'total_count')),
comments_count = unlistWithNA(json, c('comments', 'summary', 'total_count')),
shares_count = unlistWithNA(json, c('shares', 'count')),
stringsAsFactors=F)
return(df)
}
insightsDataToDF <- function(json, values, metric){
if (metric!="post_consumptions_by_type" & metric!="page_fans_country"){
df <- data.frame(
id = unlistWithNA(json, 'id'),
metric_name = unlistWithNA(json, 'name'),
period = unlistWithNA(json, 'period'),
values = unlistWithNA(values, 'value'),
end_time = unlistWithNA(values, 'end_time'),
stringsAsFactors=F)
}
if (metric=="post_consumptions_by_type"){
values <- lapply(json[[1]]$values, function(x) x$value)
df <- data.frame(
id = unlistWithNA(json, 'id'),
metric_name = unlistWithNA(json, 'name'),
period = unlistWithNA(json, 'period'),
values = unlist(values),
stringsAsFactors=F)
}
if (metric=="page_fans_country"){
# values for country-level variables
countries <- lapply(json[[1]]$values, function(x) names(x$value))
values <- lapply(json[[1]]$values, function(x) x$value)
end_times <- unlist(lapply(json[[1]]$values, function(x) x$end_time))
end_times <- unlist(lapply(1:length(countries), function(x)
rep(end_times[[x]], length(countries[[x]]))))
df <- data.frame(
id = unlistWithNA(json, 'id'),
metric_name = unlistWithNA(json, 'name'),
period = unlistWithNA(json, 'period'),
country = unlist(countries),
values = unlist(values),
end_time = unlist(end_times),
stringsAsFactors=F)
}
return(df)
}
postDataToDF <- function(json){
df <- data.frame(
from_id = json$from$id,
from_name = json$from$name,
message = ifelse(!is.null(json$message),json$message, NA),
created_time = json$created_time,
type = json$type,
link = ifelse(!is.null(json$link), json$link, NA),
id = json$id,
likes_count = ifelse(!is.null(json$likes$summary$total_count),
json$likes$summary$total_count, 0),
comments_count = ifelse(!is.null(json$comments$summary$total_count),
json$comments$summary$total_count, 0),
shares_count = ifelse(!is.null(json$shares$count),
json$shares$count, 0),
stringsAsFactors=F)
return(df)
}
likesDataToDF <- function(json){
if (!is.null(json)){
df <- data.frame(
from_name = unlistWithNA(json, "name"),
from_id = unlistWithNA(json, "id"),
stringsAsFactors=F
)
}
if (length(json)==0){
df <- NULL
}
return(df)
}
commentsDataToDF <- function(json){
if (!is.null(json)){
df <- data.frame(
from_id = unlistWithNA(json, c('from', 'id')),
from_name = unlistWithNA(json, c('from', 'name')),
message = unlistWithNA(json, 'message'),
created_time = unlistWithNA(json, 'created_time'),
likes_count = unlistWithNA(json, 'like_count'),
id = unlistWithNA(json, 'id'),
stringsAsFactors=F)
}
if (is.null(json)){
df <- NULL
}
return(df)
}
userDataToDF <- function(user_data, private_info){
df <- data.frame(
id = unlistWithNA(user_data, 'id'),
name = unlistWithNA(user_data, 'name'),
username = unlistWithNA(user_data, 'username'),
first_name = unlistWithNA(user_data, 'first_name'),
middle_name = unlistWithNA(user_data, 'middle_name'),
last_name = unlistWithNA(user_data, 'last_name'),
gender = unlistWithNA(user_data, 'gender'),
locale = unlistWithNA(user_data, 'locale'),
category = unlistWithNA(user_data, 'category'),
likes = unlistWithNA(user_data, 'likes'),
picture = unlistWithNA(user_data, c('picture', 'data', 'url')),
stringsAsFactors=F)
if (private_info==TRUE){
df$birthday <- unlistWithNA(user_data, 'birthday')
df$location <- unlistWithNA(user_data, c('location', 'name'))
df$hometown <- unlistWithNA(user_data, c('hometown', 'name'))
df$relationship_status <- unlistWithNA(user_data, 'relationship_status')
}
return(df)
}
checkinDataToDF <- function(checkin_data){
df <- data.frame(
checkin_time = unlistWithNA(checkin_data, 'created_time'),
place_id = unlistWithNA(checkin_data, c('place', 'id')),
place_name = unlistWithNA(checkin_data, c('place', 'name')),
place_city = unlistWithNA(checkin_data, c('place', 'location','city')),
place_state = unlistWithNA(checkin_data, c('place', 'location','state')),
place_country = unlistWithNA(checkin_data, c('place', 'location','country')),
place_lat = unlistWithNA(checkin_data, c('place', 'location', 'latitude')),
place_long = unlistWithNA(checkin_data, c('place', 'location', 'longitude')),
stringsAsFactors=F)
return(df)
}
userLikesToDF <- function(user_likes){
df <- data.frame(
id = unlistWithNA(user_likes, 'id'),
names = unlistWithNA(user_likes, 'name'),
website = unlistWithNA(user_likes, 'website'),
stringsAsFactors=F)
return(df)
}
tagsDataToDF <- function(tags){
tags <- lapply(tags, '[[', "tags")
tags <- lapply(tags, '[[', 'data')
tagsListToDF <- function(x){
if (!is.null(x)){
values <- data.frame(matrix(unlist(x),ncol=2,byrow=TRUE),
stringsAsFactors=F)
names(values) <- c("id", "name")
}
if (is.null(x)){
values <- NULL
}
return(values)
}
tags <- lapply(tags, tagsListToDF)
return(tags)
}
unlistWithNA <- function(lst, field){
if (length(field)==1){
notnulls <- unlist(lapply(lst, function(x) !is.null(x[[field]])))
vect <- rep(NA, length(lst))
vect[notnulls] <- unlist(lapply(lst, function(x) x[[field]]))
}
if (length(field)==2){
notnulls <- unlist(lapply(lst, function(x) !is.null(x[[field[1]]][[field[2]]])))
vect <- rep(NA, length(lst))
vect[notnulls] <- unlist(lapply(lst, function(x) x[[field[1]]][[field[2]]]))
}
if (field[1]=="shares"){
notnulls <- unlist(lapply(lst, function(x) !is.null(x[[field[1]]][[field[2]]])))
vect <- rep(0, length(lst))
vect[notnulls] <- unlist(lapply(lst, function(x) x[[field[1]]][[field[2]]]))
}
if (length(field)==3){
notnulls <- unlist(lapply(lst, function(x)
tryCatch(!is.null(x[[field[1]]][[field[2]]][[field[3]]]),
error=function(e) FALSE)))
vect <- rep(NA, length(lst))
vect[notnulls] <- unlist(lapply(lst[notnulls], function(x) x[[field[1]]][[field[2]]][[field[3]]]))
}
if (length(field)==4 & field[1]=="to"){
notnulls <- unlist(lapply(lst, function(x)
tryCatch(!is.null(x[[field[1]]][[field[2]]][[as.numeric(field[3])]][[field[4]]]),
error=function(e) FALSE)))
vect <- rep(NA, length(lst))
vect[notnulls] <- unlist(lapply(lst[notnulls], function(x) x[[field[1]]][[field[2]]][[as.numeric(field[3])]][[field[4]]]))
}
if (field[1] %in% c("comments", "likes") & !is.na(field[2])){
notnulls <- unlist(lapply(lst, function(x) !is.null(x[[field[1]]][[field[2]]][[field[3]]])))
vect <- rep(0, length(lst))
vect[notnulls] <- unlist(lapply(lst, function(x) x[[field[1]]][[field[2]]][[field[3]]]))
}
return(vect)
}
searchPageDataToDF <- function(json){
df <- data.frame(
id = unlistWithNA(json, 'id'),
about = unlistWithNA(json, 'about'),
category = unlistWithNA(json, 'category'),
description = unlistWithNA(json, 'description'),
general_info = unlistWithNA(json, 'general_info'),
likes = unlistWithNA(json, 'likes'),
link = unlistWithNA(json, 'link'),
city = unlistWithNA(json, c('location', 'city')),
state = unlistWithNA(json, c('location', 'state')),
country = unlistWithNA(json, c('location', 'country')),
latitude = unlistWithNA(json, c('location', 'latitude')),
longitude = unlistWithNA(json, c('location', 'longitude')),
name = unlistWithNA(json, 'name'),
talking_about_count = unlistWithNA(json, 'talking_about_count'),
username = unlistWithNA(json, 'username'),
website = unlistWithNA(json, 'website'),
stringsAsFactors=F)
return(df)
}
callAPI <- function(url, token){
if (class(token)[1]=="config"){
url.data <- GET(url, config=token)
}
if (class(token)[1]=="Token2.0"){
url.data <- GET(url, config(token=token))
}
if (class(token)[1]=="character"){
url <- paste0(url, "&access_token=", token)
url <- gsub(" ", "%20", url)
url.data <- GET(url)
}
if (class(token)[1]!="character" & class(token)[1]!="config" & class(token)[1]!="Token2.0"){
stop("Error in access token. See help for details.")
}
content <- fromJSON(rawToChar(url.data$content))
if (length(content$error)>0){
stop(content$error$message)
}
return(content)
}
getTokenVersion <- function(token){
if (!is.na(class(token)[4])){
tkversion <- class(token)[4]
}
if (is.na(class(token)[4])){
error <- tryCatch(callAPI('https://graph.facebook.com/pablobarbera', token),
error = function(e) e)
if (inherits(error, 'error')){
tkversion <- 'v2'
}
if (!inherits(error, 'error')){
tkversion <- 'v1'
}
}
return(tkversion)
}
formatFbDate <- function(datestring, format="datetime") {
if (format=="datetime"){
date <- as.POSIXct(datestring, format = "%Y-%m-%dT%H:%M:%S+0000", tz = "GMT")
}
if (format=="date"){
date <- as.Date(datestring, format = "%Y-%m-%dT%H:%M:%S+0000", tz = "GMT")
}
return(date)
}
| /Rfacebook/R/utils.R | no_license | Mageshpoondi/Rfacebook | R | false | false | 10,839 | r | searchDataToDF <- function(json){
df <- data.frame(
from_id = unlistWithNA(json, c('from', 'id')),
from_name = unlistWithNA(json, c('from', 'name')),
message = unlistWithNA(json, 'message'),
created_time = unlistWithNA(json, 'created_time'),
type = unlistWithNA(json, 'type'),
link = unlistWithNA(json, 'link'),
id = unlistWithNA(json, 'id'),
likes_count = unlistWithNA(json, c('likes', 'summary', 'total_count')),
comments_count = unlistWithNA(json, c('comments', 'summary', 'total_count')),
shares_count = unlistWithNA(json, c('shares', 'count')),
stringsAsFactors=F)
return(df)
}
newsDataToDF <- function(json){
df <- data.frame(
from_id = unlistWithNA(json, c('from', 'id')),
from_name = unlistWithNA(json, c('from', 'name')),
to_id = unlistWithNA(json, c('to', 'data', "1", 'id')),
to_name = unlistWithNA(json, c('to', 'data', '1', 'name')),
message = unlistWithNA(json, 'message'),
created_time = unlistWithNA(json, 'created_time'),
type = unlistWithNA(json, 'type'),
link = unlistWithNA(json, 'link'),
id = unlistWithNA(json, 'id'),
likes_count = unlistWithNA(json, c('likes', 'summary', 'total_count')),
comments_count = unlistWithNA(json, c('comments', 'summary', 'total_count')),
shares_count = unlistWithNA(json, c('shares', 'count')),
stringsAsFactors=F)
return(df)
}
pageDataToDF <- function(json){
df <- data.frame(
from_id = unlistWithNA(json, c('from', 'id')),
from_name = unlistWithNA(json, c('from', 'name')),
message = unlistWithNA(json, 'message'),
created_time = unlistWithNA(json, 'created_time'),
type = unlistWithNA(json, 'type'),
link = unlistWithNA(json, 'link'),
id = unlistWithNA(json, 'id'),
likes_count = unlistWithNA(json, c('likes', 'summary', 'total_count')),
comments_count = unlistWithNA(json, c('comments', 'summary', 'total_count')),
shares_count = unlistWithNA(json, c('shares', 'count')),
stringsAsFactors=F)
return(df)
}
insightsDataToDF <- function(json, values, metric){
if (metric!="post_consumptions_by_type" & metric!="page_fans_country"){
df <- data.frame(
id = unlistWithNA(json, 'id'),
metric_name = unlistWithNA(json, 'name'),
period = unlistWithNA(json, 'period'),
values = unlistWithNA(values, 'value'),
end_time = unlistWithNA(values, 'end_time'),
stringsAsFactors=F)
}
if (metric=="post_consumptions_by_type"){
values <- lapply(json[[1]]$values, function(x) x$value)
df <- data.frame(
id = unlistWithNA(json, 'id'),
metric_name = unlistWithNA(json, 'name'),
period = unlistWithNA(json, 'period'),
values = unlist(values),
stringsAsFactors=F)
}
if (metric=="page_fans_country"){
# values for country-level variables
countries <- lapply(json[[1]]$values, function(x) names(x$value))
values <- lapply(json[[1]]$values, function(x) x$value)
end_times <- unlist(lapply(json[[1]]$values, function(x) x$end_time))
end_times <- unlist(lapply(1:length(countries), function(x)
rep(end_times[[x]], length(countries[[x]]))))
df <- data.frame(
id = unlistWithNA(json, 'id'),
metric_name = unlistWithNA(json, 'name'),
period = unlistWithNA(json, 'period'),
country = unlist(countries),
values = unlist(values),
end_time = unlist(end_times),
stringsAsFactors=F)
}
return(df)
}
postDataToDF <- function(json){
df <- data.frame(
from_id = json$from$id,
from_name = json$from$name,
message = ifelse(!is.null(json$message),json$message, NA),
created_time = json$created_time,
type = json$type,
link = ifelse(!is.null(json$link), json$link, NA),
id = json$id,
likes_count = ifelse(!is.null(json$likes$summary$total_count),
json$likes$summary$total_count, 0),
comments_count = ifelse(!is.null(json$comments$summary$total_count),
json$comments$summary$total_count, 0),
shares_count = ifelse(!is.null(json$shares$count),
json$shares$count, 0),
stringsAsFactors=F)
return(df)
}
likesDataToDF <- function(json){
if (!is.null(json)){
df <- data.frame(
from_name = unlistWithNA(json, "name"),
from_id = unlistWithNA(json, "id"),
stringsAsFactors=F
)
}
if (length(json)==0){
df <- NULL
}
return(df)
}
commentsDataToDF <- function(json){
if (!is.null(json)){
df <- data.frame(
from_id = unlistWithNA(json, c('from', 'id')),
from_name = unlistWithNA(json, c('from', 'name')),
message = unlistWithNA(json, 'message'),
created_time = unlistWithNA(json, 'created_time'),
likes_count = unlistWithNA(json, 'like_count'),
id = unlistWithNA(json, 'id'),
stringsAsFactors=F)
}
if (is.null(json)){
df <- NULL
}
return(df)
}
userDataToDF <- function(user_data, private_info){
df <- data.frame(
id = unlistWithNA(user_data, 'id'),
name = unlistWithNA(user_data, 'name'),
username = unlistWithNA(user_data, 'username'),
first_name = unlistWithNA(user_data, 'first_name'),
middle_name = unlistWithNA(user_data, 'middle_name'),
last_name = unlistWithNA(user_data, 'last_name'),
gender = unlistWithNA(user_data, 'gender'),
locale = unlistWithNA(user_data, 'locale'),
category = unlistWithNA(user_data, 'category'),
likes = unlistWithNA(user_data, 'likes'),
picture = unlistWithNA(user_data, c('picture', 'data', 'url')),
stringsAsFactors=F)
if (private_info==TRUE){
df$birthday <- unlistWithNA(user_data, 'birthday')
df$location <- unlistWithNA(user_data, c('location', 'name'))
df$hometown <- unlistWithNA(user_data, c('hometown', 'name'))
df$relationship_status <- unlistWithNA(user_data, 'relationship_status')
}
return(df)
}
checkinDataToDF <- function(checkin_data){
df <- data.frame(
checkin_time = unlistWithNA(checkin_data, 'created_time'),
place_id = unlistWithNA(checkin_data, c('place', 'id')),
place_name = unlistWithNA(checkin_data, c('place', 'name')),
place_city = unlistWithNA(checkin_data, c('place', 'location','city')),
place_state = unlistWithNA(checkin_data, c('place', 'location','state')),
place_country = unlistWithNA(checkin_data, c('place', 'location','country')),
place_lat = unlistWithNA(checkin_data, c('place', 'location', 'latitude')),
place_long = unlistWithNA(checkin_data, c('place', 'location', 'longitude')),
stringsAsFactors=F)
return(df)
}
userLikesToDF <- function(user_likes){
df <- data.frame(
id = unlistWithNA(user_likes, 'id'),
names = unlistWithNA(user_likes, 'name'),
website = unlistWithNA(user_likes, 'website'),
stringsAsFactors=F)
return(df)
}
tagsDataToDF <- function(tags){
tags <- lapply(tags, '[[', "tags")
tags <- lapply(tags, '[[', 'data')
tagsListToDF <- function(x){
if (!is.null(x)){
values <- data.frame(matrix(unlist(x),ncol=2,byrow=TRUE),
stringsAsFactors=F)
names(values) <- c("id", "name")
}
if (is.null(x)){
values <- NULL
}
return(values)
}
tags <- lapply(tags, tagsListToDF)
return(tags)
}
unlistWithNA <- function(lst, field){
if (length(field)==1){
notnulls <- unlist(lapply(lst, function(x) !is.null(x[[field]])))
vect <- rep(NA, length(lst))
vect[notnulls] <- unlist(lapply(lst, function(x) x[[field]]))
}
if (length(field)==2){
notnulls <- unlist(lapply(lst, function(x) !is.null(x[[field[1]]][[field[2]]])))
vect <- rep(NA, length(lst))
vect[notnulls] <- unlist(lapply(lst, function(x) x[[field[1]]][[field[2]]]))
}
if (field[1]=="shares"){
notnulls <- unlist(lapply(lst, function(x) !is.null(x[[field[1]]][[field[2]]])))
vect <- rep(0, length(lst))
vect[notnulls] <- unlist(lapply(lst, function(x) x[[field[1]]][[field[2]]]))
}
if (length(field)==3){
notnulls <- unlist(lapply(lst, function(x)
tryCatch(!is.null(x[[field[1]]][[field[2]]][[field[3]]]),
error=function(e) FALSE)))
vect <- rep(NA, length(lst))
vect[notnulls] <- unlist(lapply(lst[notnulls], function(x) x[[field[1]]][[field[2]]][[field[3]]]))
}
if (length(field)==4 & field[1]=="to"){
notnulls <- unlist(lapply(lst, function(x)
tryCatch(!is.null(x[[field[1]]][[field[2]]][[as.numeric(field[3])]][[field[4]]]),
error=function(e) FALSE)))
vect <- rep(NA, length(lst))
vect[notnulls] <- unlist(lapply(lst[notnulls], function(x) x[[field[1]]][[field[2]]][[as.numeric(field[3])]][[field[4]]]))
}
if (field[1] %in% c("comments", "likes") & !is.na(field[2])){
notnulls <- unlist(lapply(lst, function(x) !is.null(x[[field[1]]][[field[2]]][[field[3]]])))
vect <- rep(0, length(lst))
vect[notnulls] <- unlist(lapply(lst, function(x) x[[field[1]]][[field[2]]][[field[3]]]))
}
return(vect)
}
searchPageDataToDF <- function(json){
df <- data.frame(
id = unlistWithNA(json, 'id'),
about = unlistWithNA(json, 'about'),
category = unlistWithNA(json, 'category'),
description = unlistWithNA(json, 'description'),
general_info = unlistWithNA(json, 'general_info'),
likes = unlistWithNA(json, 'likes'),
link = unlistWithNA(json, 'link'),
city = unlistWithNA(json, c('location', 'city')),
state = unlistWithNA(json, c('location', 'state')),
country = unlistWithNA(json, c('location', 'country')),
latitude = unlistWithNA(json, c('location', 'latitude')),
longitude = unlistWithNA(json, c('location', 'longitude')),
name = unlistWithNA(json, 'name'),
talking_about_count = unlistWithNA(json, 'talking_about_count'),
username = unlistWithNA(json, 'username'),
website = unlistWithNA(json, 'website'),
stringsAsFactors=F)
return(df)
}
callAPI <- function(url, token){
if (class(token)[1]=="config"){
url.data <- GET(url, config=token)
}
if (class(token)[1]=="Token2.0"){
url.data <- GET(url, config(token=token))
}
if (class(token)[1]=="character"){
url <- paste0(url, "&access_token=", token)
url <- gsub(" ", "%20", url)
url.data <- GET(url)
}
if (class(token)[1]!="character" & class(token)[1]!="config" & class(token)[1]!="Token2.0"){
stop("Error in access token. See help for details.")
}
content <- fromJSON(rawToChar(url.data$content))
if (length(content$error)>0){
stop(content$error$message)
}
return(content)
}
getTokenVersion <- function(token){
if (!is.na(class(token)[4])){
tkversion <- class(token)[4]
}
if (is.na(class(token)[4])){
error <- tryCatch(callAPI('https://graph.facebook.com/pablobarbera', token),
error = function(e) e)
if (inherits(error, 'error')){
tkversion <- 'v2'
}
if (!inherits(error, 'error')){
tkversion <- 'v1'
}
}
return(tkversion)
}
formatFbDate <- function(datestring, format="datetime") {
if (format=="datetime"){
date <- as.POSIXct(datestring, format = "%Y-%m-%dT%H:%M:%S+0000", tz = "GMT")
}
if (format=="date"){
date <- as.Date(datestring, format = "%Y-%m-%dT%H:%M:%S+0000", tz = "GMT")
}
return(date)
}
|
\name{CVARace.data}
\alias{CVARace.data}
\docType{data}
\title{
A SUBSET OF THE EDUCATION DATA SET
}
\description{
See Understanding Biplots
}
\usage{data(CVARace.data)}
\format{
A data frame with 799 observations on the following 6 variables.
\describe{
\item{\code{TOTScore}}{a numeric vector}
\item{\code{eduyrs}}{a numeric vector}
\item{\code{age}}{a numeric vector}
\item{\code{pcexpdec}}{a numeric vector}
\item{\code{mEdY}}{a numeric vector}
\item{\code{groepe}}{a factor with levels \code{Black} \code{Coloured} \code{Indian} \code{White}}
}
}
\keyword{datasets}
| /man/CVARace.data.Rd | no_license | carelvdmerwe/UBbipl3 | R | false | false | 602 | rd | \name{CVARace.data}
\alias{CVARace.data}
\docType{data}
\title{
A SUBSET OF THE EDUCATION DATA SET
}
\description{
See Understanding Biplots
}
\usage{data(CVARace.data)}
\format{
A data frame with 799 observations on the following 6 variables.
\describe{
\item{\code{TOTScore}}{a numeric vector}
\item{\code{eduyrs}}{a numeric vector}
\item{\code{age}}{a numeric vector}
\item{\code{pcexpdec}}{a numeric vector}
\item{\code{mEdY}}{a numeric vector}
\item{\code{groepe}}{a factor with levels \code{Black} \code{Coloured} \code{Indian} \code{White}}
}
}
\keyword{datasets}
|
#!/usr/bin/env Rscript
# imported libraries
library(ggplot2)
library(dplyr)
data <- read.csv("HairEyeColor.csv")
summary(apply(data, 2, as.factor))
nrow(data)
dim(data)[1]
## Plot bar graph no color with only eye color and occurences
eye_plot <- qplot(data$Eye, geom= "bar")
eye_plot_labeled <- eye_plot + labs(title= "", x = "Eye Color", y = "Occurences")
print(eye_plot_labeled) ##
eye_counts <- count(data, Eye)
eye_pcts <- round((eye_counts$n/sum(eye_counts$n)*100), 1)
eye_plot_pcts <- eye_plot_labeled + annotate("text", x = seq(1,4), y = eye_counts$n+10, label = paste(eye_pcts, "%", sep = ""))
eye_gender_plot <- ggplot(data = data, mapping = aes(x = Eye))
eye_gender_barplot <- eye_gender_plot + geom_bar(aes(fill = Sex), position = "dodge") + labs(title = "Relation of Eye Color to Gender", x = "Eye Color", y = "Occurrences")
# this prints to a file called RPlot.pdf
#print(eye_gender_barplot)
| /3/Fall/MATH-338/programs/making_saving_plots.r | no_license | Greencats0/university | R | false | false | 914 | r | #!/usr/bin/env Rscript
# imported libraries
library(ggplot2)
library(dplyr)
data <- read.csv("HairEyeColor.csv")
summary(apply(data, 2, as.factor))
nrow(data)
dim(data)[1]
## Plot bar graph no color with only eye color and occurences
eye_plot <- qplot(data$Eye, geom= "bar")
eye_plot_labeled <- eye_plot + labs(title= "", x = "Eye Color", y = "Occurences")
print(eye_plot_labeled) ##
eye_counts <- count(data, Eye)
eye_pcts <- round((eye_counts$n/sum(eye_counts$n)*100), 1)
eye_plot_pcts <- eye_plot_labeled + annotate("text", x = seq(1,4), y = eye_counts$n+10, label = paste(eye_pcts, "%", sep = ""))
eye_gender_plot <- ggplot(data = data, mapping = aes(x = Eye))
eye_gender_barplot <- eye_gender_plot + geom_bar(aes(fill = Sex), position = "dodge") + labs(title = "Relation of Eye Color to Gender", x = "Eye Color", y = "Occurrences")
# this prints to a file called RPlot.pdf
#print(eye_gender_barplot)
|
"paraep4" <-
function(lmom, checklmom=TRUE,
method=c("A", "DG", "ADG"),
sqrt.t3t4=TRUE, eps=1e-4,
checkbounds=TRUE, kapapproved=TRUE,
snap.tau4=FALSE, nudge.tau4=0,
A.guess=NULL, K.guess=NULL, H.guess=NULL, ...) {
method <- match.arg(method)
if(checklmom && ! are.lmom.valid(lmom)) {
warning("L-moments are invalid.")
return()
}
if(length(lmom$L1) != 0) { # convert to named L-moments
lmom <- lmorph(lmom) # nondestructive conversion!
}
para <- vector(mode="numeric", length=4)
names(para) <- c("xi","alpha","kappa","h")
para <- c(NA, NA, NA, NA)
z <- list(type = 'aep4', para = para,
source = "paraep4", method = method,
ifail = 0, ifailtext="")
L234 <- list(para_L234 = para,
ifail_L234=NA,
optim.converg_L234=NA,
optim.message_L234=NA,
optim.value_L234=NA,
optim.counts_L234=NA)
T34 <- list(para_T34 = para,
ifail_T34=NA,
optim.converg_T34=NA,
optim.message_T34=NA,
optim.value_T34=NA,
optim.counts_T34=NA)
L1 <- lmom$lambdas[1]
L2 <- lmom$lambdas[2]
L3 <- lmom$lambdas[3]
L4 <- lmom$lambdas[4]
T2 <- lmom$ratios[2]
T3 <- lmom$ratios[3]
T4 <- lmom$ratios[4]
if(checkbounds) {
a <- abs(T3)
co <- c(0.7755464, -3.3354852, 14.1955782, -29.9090294,
37.2141451, -24.7411869, 6.7997646)
T4.lowerbounds <- co[1]*a + co[2]*a^2 + co[3]*a^3 +
co[4]*a^4 + co[5]*a^5 + co[6]*a^6 + co[7]*a^7
if(T4 < T4.lowerbounds) {
if(snap.tau4) {
T4o <- T4
T4 <- T4.lowerbounds + abs(nudge.tau4) # only permit upwards
z$message <- paste0("Tau4 snapped up to lower bounds of Tau3-Tau4: ", T4o, " ---> ",T4)
} else if(kapapproved) {
z <- parkap(lmom)
z$ifailkap <- z$ifail
z$ifailtextkap <- z$ifailtext
z$ifailtext <- "TAU4 is estimated as below limits of AEP4, Kappa fit instead"
z$source <- "paraep4 --> parkap"
return(z)
} else {
z$ifail <- 4
z$ifailtext <- "TAU4 is estimated as below limits of AEP4, Kappa not fit instead"
return(z)
}
}
}
if(is.null(A.guess)) A.guess <- 1
err <- (T3 - .lmomcohash$AEPkh2lmrTable$T3)^2 +
(T4 - .lmomcohash$AEPkh2lmrTable$T4)^2
if(is.null(K.guess)) {
K.guess <- .lmomcohash$AEPkh2lmrTable$K[err == min(err)]
}
if(is.null(H.guess)) {
H.guess <- .lmomcohash$AEPkh2lmrTable$H[err == min(err)]
}
para.guess <- vec2par(c(0,A.guess,K.guess,H.guess), type="aep4")
if(! are.paraep4.valid(para.guess)) {
message <- "One or more of the guesses of A, K, and H (regardless of method choice) are invalid."
warning(message)
z$ifail <- 3
z$ifailtext <- message
return(z)
}
if(method != "A") {
opt <- NULL
"fn" <- function(ps, ...) {
para <- list(para=c(0,exp(ps)), type="aep4")
slmr <- lmomaep4(para, paracheck=FALSE)
return(log(1 + (L2 - slmr$lambdas[2])^2
+ (L3 - slmr$lambdas[3])^2
+ (L4 - slmr$lambdas[4])^2))
}
try( opt <- optim(log(c(A.guess,K.guess,H.guess)), fn), silent=TRUE)
if(is.null(opt) | length(opt$par) == 0) {
L234$ifail_L234 <- 1
L234$optim.converg_L234 <- NA
L234$optim.message_L234 <- NA
L234$optim.value_L234 <- NA
L234$optim.counts_L234 <- NA
z$L234 <- L234
if(method == "DG") {
message <- "The function optim failed or reports failure on its own behalf."
warning(message)
z$ifail <- 1
z$ifailtext <- message
return(z)
}
} else {
para[2:4] <- exp(opt$par)
A <- para[2]
K <- para[3]
H <- para[4]
KmK <- 1/K - K
H2H1 <- exp(lgamma(2/H) - lgamma(1/H))
U <- L1 - A * KmK * H2H1
para[1] <- U
L234$para_L234 <- para
L234$ifail_L234 <- opt$convergence
if(method == "DG") {
z$para <- para
z$ifail <- L234$ifail_L234
}
L234$optim.converg_L234 <- opt$convergence
L234$optim.message_L234 <- opt$message
L234$optim.value_L234 <- opt$value
L234$optim.counts_L234 <- opt$counts
z$L234 <- L234
if(method == "DG") {
if(! are.paraep4.valid(z)) {
message <- "One or more parameters are not valid: Delicado-Goria method."
warning(message)
z$ifailtext <- message
z$ifail <- 3
}
return(z)
}
}
}
"sqrtit" <- function(x) { return(x) }
if(sqrt.t3t4) "sqrtit" <- function(x) { return(sqrt(x)) }
opt <- NULL
"fn" <- function(ps, ...) {
para <- list(para=c(0,1,exp(ps)), type="aep4")
#print(para)
slmr <- lmomaep4(para, paracheck=FALSE, t3t4only=TRUE)
return(sqrtit((T3 - slmr$T3)^2 + (T4 - slmr$T4)^2))
}
try( opt <- optim(log(c(K.guess,H.guess)), fn), silent=TRUE)
if(is.null(opt) | length(opt$par) == 0) {
T34$ifail_T34 <- 1
T34$optim.converg_T34 <- NA
T34$optim.message_T34 <- NA
T34$optim.value_T34 <- NA
T34$optim.counts_T34 <- NA
z$T34 <- T34
message <- "The function optim failed or reports failure on its own behalf."
warning(message)
z$ifail <- 1
z$ifailtext <- message
return(z)
}
para[3:4] <- exp(opt$par)
#if(para[3] <= 0) para[3] <- exp(-4) # Asquith (2014, figs. 1--2) plateaus: log(K) << -2
#if(para[4] <= 0) para[4] <- exp(-4) # Asquith (2014, figs. 1--2) plateaus: log(H) << -2
K <- para[3]
H <- para[4]
KmK <- 1/K - K
H2H1 <- exp(lgamma(2/H) - lgamma(1/H))
Ihalf <- pbeta(1/2, shape1=1/H, shape2=2/H)
KK <- K*K
KKK <- KK*K
L2a <- -K * KmK^2 / (1+KK)
L2b <- 2 * KK * (1/KKK + KKK) / (1+KK)^2 * Ihalf
A <- L2 / ((L2a + L2b) * H2H1)
para[2] <- A
U <- L1 - A * KmK * H2H1
para[1] <- U
z$para <- para
T34$para_T34 <- para
T34$ifail_T34 <- opt$convergence
z$ifail <- T34$ifail_T34
if(opt$value > eps) {
message <- "Judging a solution failure based on eps value to convergence error: one of the A methods."
warning(message)
z$ifailtext <- message
z$ifail <- 2
}
T34$optim.converg_T34 <- opt$convergence
T34$optim.message_T34 <- opt$message
T34$optim.value_T34 <- opt$value
T34$optim.counts_T34 <- opt$counts
z$T34 <- T34
#message('A=',A," K=",K," H=",H,"\n")
if(! are.paraep4.valid(z)) {
message <- "One or more parameters are not valid: One of the A methods."
warning(message)
z$ifailtext <- message
z$ifail <- 3
}
#print(para)
return(z)
}
# ifail3 is a parameter validity failure
# ifail2 is a general attempt to have a singular failure by sometype of eps outside of optim
# ifail1 is a failure by optim
| /R/paraep4.R | no_license | wasquith/lmomco | R | false | false | 7,796 | r | "paraep4" <-
function(lmom, checklmom=TRUE,
method=c("A", "DG", "ADG"),
sqrt.t3t4=TRUE, eps=1e-4,
checkbounds=TRUE, kapapproved=TRUE,
snap.tau4=FALSE, nudge.tau4=0,
A.guess=NULL, K.guess=NULL, H.guess=NULL, ...) {
method <- match.arg(method)
if(checklmom && ! are.lmom.valid(lmom)) {
warning("L-moments are invalid.")
return()
}
if(length(lmom$L1) != 0) { # convert to named L-moments
lmom <- lmorph(lmom) # nondestructive conversion!
}
para <- vector(mode="numeric", length=4)
names(para) <- c("xi","alpha","kappa","h")
para <- c(NA, NA, NA, NA)
z <- list(type = 'aep4', para = para,
source = "paraep4", method = method,
ifail = 0, ifailtext="")
L234 <- list(para_L234 = para,
ifail_L234=NA,
optim.converg_L234=NA,
optim.message_L234=NA,
optim.value_L234=NA,
optim.counts_L234=NA)
T34 <- list(para_T34 = para,
ifail_T34=NA,
optim.converg_T34=NA,
optim.message_T34=NA,
optim.value_T34=NA,
optim.counts_T34=NA)
L1 <- lmom$lambdas[1]
L2 <- lmom$lambdas[2]
L3 <- lmom$lambdas[3]
L4 <- lmom$lambdas[4]
T2 <- lmom$ratios[2]
T3 <- lmom$ratios[3]
T4 <- lmom$ratios[4]
if(checkbounds) {
a <- abs(T3)
co <- c(0.7755464, -3.3354852, 14.1955782, -29.9090294,
37.2141451, -24.7411869, 6.7997646)
T4.lowerbounds <- co[1]*a + co[2]*a^2 + co[3]*a^3 +
co[4]*a^4 + co[5]*a^5 + co[6]*a^6 + co[7]*a^7
if(T4 < T4.lowerbounds) {
if(snap.tau4) {
T4o <- T4
T4 <- T4.lowerbounds + abs(nudge.tau4) # only permit upwards
z$message <- paste0("Tau4 snapped up to lower bounds of Tau3-Tau4: ", T4o, " ---> ",T4)
} else if(kapapproved) {
z <- parkap(lmom)
z$ifailkap <- z$ifail
z$ifailtextkap <- z$ifailtext
z$ifailtext <- "TAU4 is estimated as below limits of AEP4, Kappa fit instead"
z$source <- "paraep4 --> parkap"
return(z)
} else {
z$ifail <- 4
z$ifailtext <- "TAU4 is estimated as below limits of AEP4, Kappa not fit instead"
return(z)
}
}
}
if(is.null(A.guess)) A.guess <- 1
err <- (T3 - .lmomcohash$AEPkh2lmrTable$T3)^2 +
(T4 - .lmomcohash$AEPkh2lmrTable$T4)^2
if(is.null(K.guess)) {
K.guess <- .lmomcohash$AEPkh2lmrTable$K[err == min(err)]
}
if(is.null(H.guess)) {
H.guess <- .lmomcohash$AEPkh2lmrTable$H[err == min(err)]
}
para.guess <- vec2par(c(0,A.guess,K.guess,H.guess), type="aep4")
if(! are.paraep4.valid(para.guess)) {
message <- "One or more of the guesses of A, K, and H (regardless of method choice) are invalid."
warning(message)
z$ifail <- 3
z$ifailtext <- message
return(z)
}
if(method != "A") {
opt <- NULL
"fn" <- function(ps, ...) {
para <- list(para=c(0,exp(ps)), type="aep4")
slmr <- lmomaep4(para, paracheck=FALSE)
return(log(1 + (L2 - slmr$lambdas[2])^2
+ (L3 - slmr$lambdas[3])^2
+ (L4 - slmr$lambdas[4])^2))
}
try( opt <- optim(log(c(A.guess,K.guess,H.guess)), fn), silent=TRUE)
if(is.null(opt) | length(opt$par) == 0) {
L234$ifail_L234 <- 1
L234$optim.converg_L234 <- NA
L234$optim.message_L234 <- NA
L234$optim.value_L234 <- NA
L234$optim.counts_L234 <- NA
z$L234 <- L234
if(method == "DG") {
message <- "The function optim failed or reports failure on its own behalf."
warning(message)
z$ifail <- 1
z$ifailtext <- message
return(z)
}
} else {
para[2:4] <- exp(opt$par)
A <- para[2]
K <- para[3]
H <- para[4]
KmK <- 1/K - K
H2H1 <- exp(lgamma(2/H) - lgamma(1/H))
U <- L1 - A * KmK * H2H1
para[1] <- U
L234$para_L234 <- para
L234$ifail_L234 <- opt$convergence
if(method == "DG") {
z$para <- para
z$ifail <- L234$ifail_L234
}
L234$optim.converg_L234 <- opt$convergence
L234$optim.message_L234 <- opt$message
L234$optim.value_L234 <- opt$value
L234$optim.counts_L234 <- opt$counts
z$L234 <- L234
if(method == "DG") {
if(! are.paraep4.valid(z)) {
message <- "One or more parameters are not valid: Delicado-Goria method."
warning(message)
z$ifailtext <- message
z$ifail <- 3
}
return(z)
}
}
}
"sqrtit" <- function(x) { return(x) }
if(sqrt.t3t4) "sqrtit" <- function(x) { return(sqrt(x)) }
opt <- NULL
"fn" <- function(ps, ...) {
para <- list(para=c(0,1,exp(ps)), type="aep4")
#print(para)
slmr <- lmomaep4(para, paracheck=FALSE, t3t4only=TRUE)
return(sqrtit((T3 - slmr$T3)^2 + (T4 - slmr$T4)^2))
}
try( opt <- optim(log(c(K.guess,H.guess)), fn), silent=TRUE)
if(is.null(opt) | length(opt$par) == 0) {
T34$ifail_T34 <- 1
T34$optim.converg_T34 <- NA
T34$optim.message_T34 <- NA
T34$optim.value_T34 <- NA
T34$optim.counts_T34 <- NA
z$T34 <- T34
message <- "The function optim failed or reports failure on its own behalf."
warning(message)
z$ifail <- 1
z$ifailtext <- message
return(z)
}
para[3:4] <- exp(opt$par)
#if(para[3] <= 0) para[3] <- exp(-4) # Asquith (2014, figs. 1--2) plateaus: log(K) << -2
#if(para[4] <= 0) para[4] <- exp(-4) # Asquith (2014, figs. 1--2) plateaus: log(H) << -2
K <- para[3]
H <- para[4]
KmK <- 1/K - K
H2H1 <- exp(lgamma(2/H) - lgamma(1/H))
Ihalf <- pbeta(1/2, shape1=1/H, shape2=2/H)
KK <- K*K
KKK <- KK*K
L2a <- -K * KmK^2 / (1+KK)
L2b <- 2 * KK * (1/KKK + KKK) / (1+KK)^2 * Ihalf
A <- L2 / ((L2a + L2b) * H2H1)
para[2] <- A
U <- L1 - A * KmK * H2H1
para[1] <- U
z$para <- para
T34$para_T34 <- para
T34$ifail_T34 <- opt$convergence
z$ifail <- T34$ifail_T34
if(opt$value > eps) {
message <- "Judging a solution failure based on eps value to convergence error: one of the A methods."
warning(message)
z$ifailtext <- message
z$ifail <- 2
}
T34$optim.converg_T34 <- opt$convergence
T34$optim.message_T34 <- opt$message
T34$optim.value_T34 <- opt$value
T34$optim.counts_T34 <- opt$counts
z$T34 <- T34
#message('A=',A," K=",K," H=",H,"\n")
if(! are.paraep4.valid(z)) {
message <- "One or more parameters are not valid: One of the A methods."
warning(message)
z$ifailtext <- message
z$ifail <- 3
}
#print(para)
return(z)
}
# ifail3 is a parameter validity failure
# ifail2 is a general attempt to have a singular failure by sometype of eps outside of optim
# ifail1 is a failure by optim
|
#Yellow-shouldered amazon functions
library(popbio)
library(tidyverse)
# Here is the function
# pi, piSD etc are inside the data frame. This needs to be specified this
# for example: dataSource$pi[1] is where pi[1] is located
#--------------------------------------------------------------------------------------------------------------------------------
# ysaFunc creates a matrix from randomly drawn data, with p values taken from a beta distribution and f values
# drawn from a lognormal distribution
ysaFunc <- function (dataSource)
{
#ps
p <- purrr::map2(dataSource$pi, dataSource$piSD, function(mu, sdev) {
## check if variance < (1-p) * p
if (sdev^2 < (1 - mu)*mu) {
## OK to use sdev in betaval function
betaval(mu, sdev, fx=runif(1))
} else {
## Replace sdev with allowable value
betaval(mu, sqrt((1 - mu)*mu) - 0.01, fx=runif(1))
}
})
names(p) <- c("p1a", "p1b", "p1c", "p2", "p3")
## this adds elements of the list to the current environment
list2env(p, envir = environment())
#f
f3 <- rnorm(1, mean = (dataSource$f[5]), sd = (dataSource$fSD[5]))
# Pi <- ((1 - (pi^(di - 1)))/(1 - (pi^di)))*pi ------- equation for Pi's
# Gi <- (pi^di*(1 - pi))/(1 - pi^di) ------- equation for Gi's
#d
d1 <- dataSource$di[1] + dataSource$di[2] + dataSource$di[3]
d2 <- dataSource$di[4]
d3 <- dataSource$di[5]
# this uses p1's defined above
p1 <- (p1a*p1b*p1c) # this stage as the survival is from the multiplication of p1a, p1b and p1c
#add ps
# construct the matrix using defined parameters above
matrix2 <- matrix(0, nrow = 3, ncol = 3)
dimnames(matrix2) <- list(rownames(matrix2, do.NULL = FALSE, prefix = "row"),
colnames(matrix2, do.NULL = FALSE, prefix = "col"))
matrix2[1,1] <- ((1 - (p1^(d1 - 1)))/(1 - (p1^d1)))*p1
matrix2[2,2] <- ((1 - (p2^(d2 - 1)))/(1 - (p2^d2)))*p2
matrix2[3,3] <- ((1 - (p3^(d3 - 1)))/(1 - (p3^d3)))*p3
#add f - fix for missing survival
#matrix2[1,3] <- f3
matrix2[1,3] <- f3*matrix2[3,3]
#add gs
matrix2[2,1] <- (p1^d1*(1 - p1))/(1 - p1^d1)
matrix2[3,2] <- (p2^d2*(1 - p2))/(1 - p2^d2)
return(matrix2)
}
#--------------------------------------------------------------------------------------------------------------------------------
# ysameanFunc creates a matrix based on means, ie the raw values from the data source rather than drawing from distributions
ysameanFunc <- function (dataSource)
{
#ps
p1a<- dataSource$pi[1]
p1b<- dataSource$pi[2]
p1c<- dataSource$pi[3]
p2 <- dataSource$pi[4]
p3 <- dataSource$pi[5]
#f
f3 <- dataSource$f[5] #should 3.3 be divided by 2
# Pi <- ((1 - (pi^(di - 1)))/(1 - (pi^di)))*pi ------- equation for Pi's
# Gi <- (pi^di*(1 - pi))/(1 - pi^di) ------- equation for Gi's
#d
d1 <- dataSource$di[1] + dataSource$di[2] + dataSource$di[3]
d2 <- dataSource$di[4]
d3 <- dataSource$di[5]
# this uses p1's defined above
p1 <- (p1a*p1b*p1c) # this stage as the survival is from the multiplication of p1a, p1b and p1c
#add ps
# construct the matrix using defined parameters above
matrix2 <- matrix(0, nrow = 3, ncol = 3)
matrix2[1,1] <- ((1 - (p1^(d1 - 1)))/(1 - (p1^d1)))*p1
matrix2[2,2] <- ((1 - (p2^(d2 - 1)))/(1 - (p2^d2)))*p2
matrix2[3,3] <- ((1 - (p3^(d3 - 1)))/(1 - (p3^d3)))*p3
#add f - fix for missing survival
#matrix2[1,3] <- f3
matrix2[1,3] <- f3*matrix2[3,3]
#add gs
matrix2[2,1] <- (p1^d1*(1 - p1))/(1 - p1^d1)
matrix2[3,2] <- (p2^d2*(1 - p2))/(1 - p2^d2)
return(matrix2)
}
#--------------------------------------------------------------------------------------------------------------------------------
# ysaFuncDD creates a function to calculate a matrix model with density-dependent fecundity - need to pass in the current
# population vector n and threshold for density-dependent effects. Also specify whether to make it stochastic or just use mean
# values.
ysaFuncDD <- function (dataSource, n, threshold, stochastic = FALSE)
{
if (stochastic) {
#ps
p1a<- betaval((dataSource$pi[1]), (dataSource$piSD[1]), fx=runif(1))
p1b<- betaval((dataSource$pi[2]), (dataSource$piSD[2]), fx=runif(1))
p1c<- betaval((dataSource$pi[3]), (dataSource$piSD[3]), fx=runif(1))
p2 <- betaval((dataSource$pi[4]), (dataSource$piSD[4]), fx=runif(1))
p3 <- betaval((dataSource$pi[5]), (dataSource$piSD[5]), fx=runif(1))
# F
# N > M -> (M*F)/N
# N <= M -> F
f3 <- rnorm(1, mean = (dataSource$f[5]), sd = (dataSource$fSD[5]))
} else {
#ps
p1a<- dataSource$pi[1]
p1b<- dataSource$pi[2]
p1c<- dataSource$pi[3]
p2 <- dataSource$pi[4]
p3 <- dataSource$pi[5]
#f
f3 <- dataSource$f[5]
}
# Pi <- ((1 - (pi^(di - 1)))/(1 - (pi^di)))*pi ------- equation for Pi's
# Gi <- (pi^di*(1 - pi))/(1 - pi^di) ------- equation for Gi's
#d
d1 <- dataSource$di[1] + dataSource$di[2] + dataSource$di[3]
d2 <- dataSource$di[4]
d3 <- dataSource$di[5]
# this uses p1's defined above
p1 <- (p1a*p1b*p1c) # this stage as the survival is from the multiplication of p1a, p1b and p1c
#add ps
# construct the matrix using defined parameters above
matrix2 <- matrix(0, nrow = 3, ncol = 3)
matrix2[1,1] <- ((1 - (p1^(d1 - 1)))/(1 - (p1^d1)))*p1
matrix2[2,2] <- ((1 - (p2^(d2 - 1)))/(1 - (p2^d2)))*p2
matrix2[3,3] <- ((1 - (p3^(d3 - 1)))/(1 - (p3^d3)))*p3
#add f - including density dependence based on number of breeders
if (n[3] > threshold) {
matrix2[1,3] <- f3*threshold/n[3]
} else {
matrix2[1,3] <- f3
}
#add gs
matrix2[2,1] <- (p1^d1*(1 - p1))/(1 - p1^d1)
matrix2[3,2] <- (p2^d2*(1 - p2))/(1 - p2^d2)
return(matrix2)
}
| /APB_BreedPropExercise/ysa functions.R | no_license | andbeck/parrot-proj | R | false | false | 5,813 | r | #Yellow-shouldered amazon functions
library(popbio)
library(tidyverse)
# Here is the function
# pi, piSD etc are inside the data frame. This needs to be specified this
# for example: dataSource$pi[1] is where pi[1] is located
#--------------------------------------------------------------------------------------------------------------------------------
# ysaFunc creates a matrix from randomly drawn data, with p values taken from a beta distribution and f values
# drawn from a lognormal distribution
ysaFunc <- function (dataSource)
{
#ps
p <- purrr::map2(dataSource$pi, dataSource$piSD, function(mu, sdev) {
## check if variance < (1-p) * p
if (sdev^2 < (1 - mu)*mu) {
## OK to use sdev in betaval function
betaval(mu, sdev, fx=runif(1))
} else {
## Replace sdev with allowable value
betaval(mu, sqrt((1 - mu)*mu) - 0.01, fx=runif(1))
}
})
names(p) <- c("p1a", "p1b", "p1c", "p2", "p3")
## this adds elements of the list to the current environment
list2env(p, envir = environment())
#f
f3 <- rnorm(1, mean = (dataSource$f[5]), sd = (dataSource$fSD[5]))
# Pi <- ((1 - (pi^(di - 1)))/(1 - (pi^di)))*pi ------- equation for Pi's
# Gi <- (pi^di*(1 - pi))/(1 - pi^di) ------- equation for Gi's
#d
d1 <- dataSource$di[1] + dataSource$di[2] + dataSource$di[3]
d2 <- dataSource$di[4]
d3 <- dataSource$di[5]
# this uses p1's defined above
p1 <- (p1a*p1b*p1c) # this stage as the survival is from the multiplication of p1a, p1b and p1c
#add ps
# construct the matrix using defined parameters above
matrix2 <- matrix(0, nrow = 3, ncol = 3)
dimnames(matrix2) <- list(rownames(matrix2, do.NULL = FALSE, prefix = "row"),
colnames(matrix2, do.NULL = FALSE, prefix = "col"))
matrix2[1,1] <- ((1 - (p1^(d1 - 1)))/(1 - (p1^d1)))*p1
matrix2[2,2] <- ((1 - (p2^(d2 - 1)))/(1 - (p2^d2)))*p2
matrix2[3,3] <- ((1 - (p3^(d3 - 1)))/(1 - (p3^d3)))*p3
#add f - fix for missing survival
#matrix2[1,3] <- f3
matrix2[1,3] <- f3*matrix2[3,3]
#add gs
matrix2[2,1] <- (p1^d1*(1 - p1))/(1 - p1^d1)
matrix2[3,2] <- (p2^d2*(1 - p2))/(1 - p2^d2)
return(matrix2)
}
#--------------------------------------------------------------------------------------------------------------------------------
# ysameanFunc creates a matrix based on means, ie the raw values from the data source rather than drawing from distributions
ysameanFunc <- function (dataSource)
{
#ps
p1a<- dataSource$pi[1]
p1b<- dataSource$pi[2]
p1c<- dataSource$pi[3]
p2 <- dataSource$pi[4]
p3 <- dataSource$pi[5]
#f
f3 <- dataSource$f[5] #should 3.3 be divided by 2
# Pi <- ((1 - (pi^(di - 1)))/(1 - (pi^di)))*pi ------- equation for Pi's
# Gi <- (pi^di*(1 - pi))/(1 - pi^di) ------- equation for Gi's
#d
d1 <- dataSource$di[1] + dataSource$di[2] + dataSource$di[3]
d2 <- dataSource$di[4]
d3 <- dataSource$di[5]
# this uses p1's defined above
p1 <- (p1a*p1b*p1c) # this stage as the survival is from the multiplication of p1a, p1b and p1c
#add ps
# construct the matrix using defined parameters above
matrix2 <- matrix(0, nrow = 3, ncol = 3)
matrix2[1,1] <- ((1 - (p1^(d1 - 1)))/(1 - (p1^d1)))*p1
matrix2[2,2] <- ((1 - (p2^(d2 - 1)))/(1 - (p2^d2)))*p2
matrix2[3,3] <- ((1 - (p3^(d3 - 1)))/(1 - (p3^d3)))*p3
#add f - fix for missing survival
#matrix2[1,3] <- f3
matrix2[1,3] <- f3*matrix2[3,3]
#add gs
matrix2[2,1] <- (p1^d1*(1 - p1))/(1 - p1^d1)
matrix2[3,2] <- (p2^d2*(1 - p2))/(1 - p2^d2)
return(matrix2)
}
#--------------------------------------------------------------------------------------------------------------------------------
# ysaFuncDD creates a function to calculate a matrix model with density-dependent fecundity - need to pass in the current
# population vector n and threshold for density-dependent effects. Also specify whether to make it stochastic or just use mean
# values.
ysaFuncDD <- function (dataSource, n, threshold, stochastic = FALSE)
{
if (stochastic) {
#ps
p1a<- betaval((dataSource$pi[1]), (dataSource$piSD[1]), fx=runif(1))
p1b<- betaval((dataSource$pi[2]), (dataSource$piSD[2]), fx=runif(1))
p1c<- betaval((dataSource$pi[3]), (dataSource$piSD[3]), fx=runif(1))
p2 <- betaval((dataSource$pi[4]), (dataSource$piSD[4]), fx=runif(1))
p3 <- betaval((dataSource$pi[5]), (dataSource$piSD[5]), fx=runif(1))
# F
# N > M -> (M*F)/N
# N <= M -> F
f3 <- rnorm(1, mean = (dataSource$f[5]), sd = (dataSource$fSD[5]))
} else {
#ps
p1a<- dataSource$pi[1]
p1b<- dataSource$pi[2]
p1c<- dataSource$pi[3]
p2 <- dataSource$pi[4]
p3 <- dataSource$pi[5]
#f
f3 <- dataSource$f[5]
}
# Pi <- ((1 - (pi^(di - 1)))/(1 - (pi^di)))*pi ------- equation for Pi's
# Gi <- (pi^di*(1 - pi))/(1 - pi^di) ------- equation for Gi's
#d
d1 <- dataSource$di[1] + dataSource$di[2] + dataSource$di[3]
d2 <- dataSource$di[4]
d3 <- dataSource$di[5]
# this uses p1's defined above
p1 <- (p1a*p1b*p1c) # this stage as the survival is from the multiplication of p1a, p1b and p1c
#add ps
# construct the matrix using defined parameters above
matrix2 <- matrix(0, nrow = 3, ncol = 3)
matrix2[1,1] <- ((1 - (p1^(d1 - 1)))/(1 - (p1^d1)))*p1
matrix2[2,2] <- ((1 - (p2^(d2 - 1)))/(1 - (p2^d2)))*p2
matrix2[3,3] <- ((1 - (p3^(d3 - 1)))/(1 - (p3^d3)))*p3
#add f - including density dependence based on number of breeders
if (n[3] > threshold) {
matrix2[1,3] <- f3*threshold/n[3]
} else {
matrix2[1,3] <- f3
}
#add gs
matrix2[2,1] <- (p1^d1*(1 - p1))/(1 - p1^d1)
matrix2[3,2] <- (p2^d2*(1 - p2))/(1 - p2^d2)
return(matrix2)
}
|
#load the data set
filename<-"household_power_consumption.txt"
all_data <- read.table(filename,header=T,sep=";",colClasses=c("character","character","double","double","double","double","double","double","numeric"),na.strings="?")
#just add the date and time together
all_data$DateTime = paste(all_data$Date, all_data$Time)
#format it
all_data$DateTime = as.POSIXlt(all_data$DateTime,format="%d/%m/%Y %H:%M:%S")
#format it
all_data$Date = as.Date(all_data$DateTime, format="%d/%m/%Y %H:%M:%S")
#we only want two days worth of data
my_data<- subset(all_data, Date==as.Date("2007-02-01") | Date==as.Date("2007-02-02"))
#clear up some memory
rm(all_data)
#make the file
png("plot4.png", height = 480, width = 480, units = "px");
#fill it with a four way graph
par(mfrow=c(2,2))
layout(matrix(c(1,2,3,4), 2, 2, byrow = TRUE))
plot(x=(my_data$DateTime), y=(my_data$Global_active_power), type="l", xlab="", ylab="Global Active Power")
plot(x=(my_data$DateTime), y=(my_data$Voltage), type="l", xlab="datetime", ylab="Voltage")
plot(x=(my_data$DateTime),y=my_data$Sub_metering_1,type="l",ylab="Energy sub metering",xlab="")
lines(x=(my_data$DateTime),y=my_data$Sub_metering_2,col="red")
lines(x=(my_data$DateTime),y=my_data$Sub_metering_3,col="blue")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),bty="l",col=c("black","red","blue"),lwd=2,cex=0.7)
plot(x=(my_data$DateTime), y=(my_data$Global_reactive_power), type="l", xlab="datetime", ylab="Global_reactive_power")
#close the device
dev.off()
| /plot4.R | no_license | AnonDataScience/ExData_Plotting1 | R | false | false | 1,518 | r |
#load the data set
filename<-"household_power_consumption.txt"
all_data <- read.table(filename,header=T,sep=";",colClasses=c("character","character","double","double","double","double","double","double","numeric"),na.strings="?")
#just add the date and time together
all_data$DateTime = paste(all_data$Date, all_data$Time)
#format it
all_data$DateTime = as.POSIXlt(all_data$DateTime,format="%d/%m/%Y %H:%M:%S")
#format it
all_data$Date = as.Date(all_data$DateTime, format="%d/%m/%Y %H:%M:%S")
#we only want two days worth of data
my_data<- subset(all_data, Date==as.Date("2007-02-01") | Date==as.Date("2007-02-02"))
#clear up some memory
rm(all_data)
#make the file
png("plot4.png", height = 480, width = 480, units = "px");
#fill it with a four way graph
par(mfrow=c(2,2))
layout(matrix(c(1,2,3,4), 2, 2, byrow = TRUE))
plot(x=(my_data$DateTime), y=(my_data$Global_active_power), type="l", xlab="", ylab="Global Active Power")
plot(x=(my_data$DateTime), y=(my_data$Voltage), type="l", xlab="datetime", ylab="Voltage")
plot(x=(my_data$DateTime),y=my_data$Sub_metering_1,type="l",ylab="Energy sub metering",xlab="")
lines(x=(my_data$DateTime),y=my_data$Sub_metering_2,col="red")
lines(x=(my_data$DateTime),y=my_data$Sub_metering_3,col="blue")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),bty="l",col=c("black","red","blue"),lwd=2,cex=0.7)
plot(x=(my_data$DateTime), y=(my_data$Global_reactive_power), type="l", xlab="datetime", ylab="Global_reactive_power")
#close the device
dev.off()
|
jacobi <- function(nVar,dMax,coeffF,strF=NULL) {
strRef <- regOrd(nVar,dMax)
pMax <- choose(dMax+nVar,nVar)
# structure of Lorenz model
if (is.null(strF)) {
strF <- coeffF * 0 + 1
strF[coeffF == 0] <- 0
}
#labelPoly(nVar,dMax,strF[,1]*1:pMax)
#labelPoly(nVar,dMax,strF[,2]*1:pMax)
#labelPoly(nVar,dMax,strF[,3]*1:pMax)
# coefficients
#print(coeffF)
# Compute Jacob
J <- matrix(0, ncol = pMax, nrow=0)
lc <- matrix(0, ncol = 2, nrow=0)
for (i in 1:nVar) {
for (j in 1:nVar) {
toBeDerived <- strRef[,strF[,i]*1:pMax]
l <- (1:dim(coeffF)[1])[coeffF[,i]!=0]
dpoly <- polyFD(nVar,dMax,toBeDerived,coeffF[l,i],j)
#print(c(i,j))
#print(dpoly$coeff)
# and terms:
#print(labelPoly(nVar,dMax, dpoly$poly*(1:pMax)))
J <- rbind(J,dpoly$coeff)
lc <- rbind(lc,c(i,j))
}
}
Jac <- list()
Jac$J <- J
Jac$lc <- lc
Jac
}
| /R/jacobi.R | no_license | cran/GPoM.FDLyapu | R | false | false | 1,009 | r | jacobi <- function(nVar,dMax,coeffF,strF=NULL) {
strRef <- regOrd(nVar,dMax)
pMax <- choose(dMax+nVar,nVar)
# structure of Lorenz model
if (is.null(strF)) {
strF <- coeffF * 0 + 1
strF[coeffF == 0] <- 0
}
#labelPoly(nVar,dMax,strF[,1]*1:pMax)
#labelPoly(nVar,dMax,strF[,2]*1:pMax)
#labelPoly(nVar,dMax,strF[,3]*1:pMax)
# coefficients
#print(coeffF)
# Compute Jacob
J <- matrix(0, ncol = pMax, nrow=0)
lc <- matrix(0, ncol = 2, nrow=0)
for (i in 1:nVar) {
for (j in 1:nVar) {
toBeDerived <- strRef[,strF[,i]*1:pMax]
l <- (1:dim(coeffF)[1])[coeffF[,i]!=0]
dpoly <- polyFD(nVar,dMax,toBeDerived,coeffF[l,i],j)
#print(c(i,j))
#print(dpoly$coeff)
# and terms:
#print(labelPoly(nVar,dMax, dpoly$poly*(1:pMax)))
J <- rbind(J,dpoly$coeff)
lc <- rbind(lc,c(i,j))
}
}
Jac <- list()
Jac$J <- J
Jac$lc <- lc
Jac
}
|
\name{binomTest}
\alias{binomTest}
\title{Exact Binomial Tests for Comparing Two Digital Libraries}
\description{
Computes p-values for differential abundance for each gene between two digital libraries,
conditioning on the total count for each gene.
The counts in each group as a proportion of the whole are assumed to follow a binomial distribution.
}
\usage{
binomTest(y1, y2, n1=sum(y1), n2=sum(y2), p=n1/(n1+n2))
}
\arguments{
\item{y1}{integer vector giving the count for each gene in the first library.
Non-integer values are rounded to the nearest integer.}
\item{y2}{integer vector giving the count for each gene in the second library.
Of same length as \code{y1}.
Non-integer values are rounded to the nearest integer.}
\item{n1}{total number of counts in the first library, across all genes.
Non-integer values are rounded to the nearest integer. Not required if \code{p} is supplied.}
\item{n2}{total number of counts in the second library, across all genes.
Non-integer values are rounded to the nearest integer. Not required if \code{p} is supplied.}
\item{p}{expected proportion of \code{y1} to the total for each gene under the null hypothesis.}
}
\details{
This function can be used to compare two libraries from SAGE, RNA-Seq, ChIP-Seq or other sequencing technologies with respect to technical variation.
An exact two-sided binomial test is computed for each gene.
This test is closely related to Fisher's exact test for 2x2 contingency tables but, unlike Fisher's test, it conditions on the total number of counts for each gene.
The null hypothesis is that the expected counts are in the same proportions as the library sizes, i.e., that the binomial probability for the first library is \code{n1/(n1+n2)}.
The two-sided rejection region is chosen analogously to Fisher's test.
Specifically, the rejection region consists of those values with smallest probabilities
under the null hypothesis.
When the counts are reasonably large, the binomial test, Fisher's test and Pearson's chisquare all give the same results.
When the counts are smaller, the binomial test is usually to be preferred in this context.
This function replaces the earlier \code{sage.test} functions in the statmod and sagenhaft packages.
It produces the same results as \code{\link{binom.test}} in the stats packge, but is much faster.
}
\value{
Numeric vector of p-values.
}
\references{
\url{http://en.wikipedia.org/wiki/Binomial_test}
\url{http://en.wikipedia.org/wiki/Fisher's_exact_test}
\url{http://en.wikipedia.org/wiki/Serial_analysis_of_gene_expression}
http://en.wikipedia.org/wiki/RNA-Seq
}
\author{Gordon Smyth}
\seealso{
\code{\link[statmod:sage.test]{sage.test}} (statmod package), \code{\link{binom.test}} (stats package)
}
\examples{
binomTest(c(0,5,10),c(0,30,50),n1=10000,n2=15000)
# Univariate equivalents:
binom.test(5,5+30,p=10000/(10000+15000))$p.value
binom.test(10,10+50,p=10000/(10000+15000))$p.value
}
\concept{Differential expression}
| /man/binomTest.Rd | no_license | hiraksarkar/edgeR_fork | R | false | false | 3,042 | rd | \name{binomTest}
\alias{binomTest}
\title{Exact Binomial Tests for Comparing Two Digital Libraries}
\description{
Computes p-values for differential abundance for each gene between two digital libraries,
conditioning on the total count for each gene.
The counts in each group as a proportion of the whole are assumed to follow a binomial distribution.
}
\usage{
binomTest(y1, y2, n1=sum(y1), n2=sum(y2), p=n1/(n1+n2))
}
\arguments{
\item{y1}{integer vector giving the count for each gene in the first library.
Non-integer values are rounded to the nearest integer.}
\item{y2}{integer vector giving the count for each gene in the second library.
Of same length as \code{y1}.
Non-integer values are rounded to the nearest integer.}
\item{n1}{total number of counts in the first library, across all genes.
Non-integer values are rounded to the nearest integer. Not required if \code{p} is supplied.}
\item{n2}{total number of counts in the second library, across all genes.
Non-integer values are rounded to the nearest integer. Not required if \code{p} is supplied.}
\item{p}{expected proportion of \code{y1} to the total for each gene under the null hypothesis.}
}
\details{
This function can be used to compare two libraries from SAGE, RNA-Seq, ChIP-Seq or other sequencing technologies with respect to technical variation.
An exact two-sided binomial test is computed for each gene.
This test is closely related to Fisher's exact test for 2x2 contingency tables but, unlike Fisher's test, it conditions on the total number of counts for each gene.
The null hypothesis is that the expected counts are in the same proportions as the library sizes, i.e., that the binomial probability for the first library is \code{n1/(n1+n2)}.
The two-sided rejection region is chosen analogously to Fisher's test.
Specifically, the rejection region consists of those values with smallest probabilities
under the null hypothesis.
When the counts are reasonably large, the binomial test, Fisher's test and Pearson's chisquare all give the same results.
When the counts are smaller, the binomial test is usually to be preferred in this context.
This function replaces the earlier \code{sage.test} functions in the statmod and sagenhaft packages.
It produces the same results as \code{\link{binom.test}} in the stats packge, but is much faster.
}
\value{
Numeric vector of p-values.
}
\references{
\url{http://en.wikipedia.org/wiki/Binomial_test}
\url{http://en.wikipedia.org/wiki/Fisher's_exact_test}
\url{http://en.wikipedia.org/wiki/Serial_analysis_of_gene_expression}
http://en.wikipedia.org/wiki/RNA-Seq
}
\author{Gordon Smyth}
\seealso{
\code{\link[statmod:sage.test]{sage.test}} (statmod package), \code{\link{binom.test}} (stats package)
}
\examples{
binomTest(c(0,5,10),c(0,30,50),n1=10000,n2=15000)
# Univariate equivalents:
binom.test(5,5+30,p=10000/(10000+15000))$p.value
binom.test(10,10+50,p=10000/(10000+15000))$p.value
}
\concept{Differential expression}
|
rankhospital <- function(state, outcome, num = "best") {
hospital_data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
hospital_data[,11] <- suppressWarnings(as.numeric(hospital_data[,11]))
hospital_data[,17] <- suppressWarnings(as.numeric(hospital_data[,17]))
hospital_data[,23] <- suppressWarnings(as.numeric(hospital_data[,23]))
if (!(state %in% hospital_data$State)) {
stop(paste("Error in best(", state, ",", outcome, ") : invalid state", sep = ""))
}
expected_outcomes <- c("heart attack", "heart failure", "pneumonia")
if (!(outcome %in% expected_outcomes)) {
stop(paste("Error in best(", state, ",", outcome, ") : invalid outcome", sep = ""))
}
state_hospital_data <- subset(hospital_data, State == state)
state_hospital_data[,11] <- suppressWarnings(as.numeric(state_hospital_data[,11]))
state_hospital_data[,17] <- suppressWarnings(as.numeric(state_hospital_data[,17]))
state_hospital_data[,23] <- suppressWarnings(as.numeric(state_hospital_data[,23]))
if (outcome == "heart attack")
state_hospital_data <- state_hospital_data[order(state_hospital_data[,11],
state_hospital_data[,2],
na.last=NA),]
else if (outcome == "heart failure")
state_hospital_data <- state_hospital_data[order(state_hospital_data[,17],
state_hospital_data[,2],
na.last=NA),]
else
state_hospital_data <- state_hospital_data[order(state_hospital_data[,23],
state_hospital_data[,2],
na.last=NA),]
if (num == "best")
num <- 1
else if (num == "worst")
num <- nrow(state_hospital_data)
else if (num > nrow(state_hospital_data))
return (c("NA"))
return (as.vector(state_hospital_data[num,2]))
} | /week3/rankhospital.R | no_license | smarthi/R-Programming | R | false | false | 2,001 | r | rankhospital <- function(state, outcome, num = "best") {
hospital_data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
hospital_data[,11] <- suppressWarnings(as.numeric(hospital_data[,11]))
hospital_data[,17] <- suppressWarnings(as.numeric(hospital_data[,17]))
hospital_data[,23] <- suppressWarnings(as.numeric(hospital_data[,23]))
if (!(state %in% hospital_data$State)) {
stop(paste("Error in best(", state, ",", outcome, ") : invalid state", sep = ""))
}
expected_outcomes <- c("heart attack", "heart failure", "pneumonia")
if (!(outcome %in% expected_outcomes)) {
stop(paste("Error in best(", state, ",", outcome, ") : invalid outcome", sep = ""))
}
state_hospital_data <- subset(hospital_data, State == state)
state_hospital_data[,11] <- suppressWarnings(as.numeric(state_hospital_data[,11]))
state_hospital_data[,17] <- suppressWarnings(as.numeric(state_hospital_data[,17]))
state_hospital_data[,23] <- suppressWarnings(as.numeric(state_hospital_data[,23]))
if (outcome == "heart attack")
state_hospital_data <- state_hospital_data[order(state_hospital_data[,11],
state_hospital_data[,2],
na.last=NA),]
else if (outcome == "heart failure")
state_hospital_data <- state_hospital_data[order(state_hospital_data[,17],
state_hospital_data[,2],
na.last=NA),]
else
state_hospital_data <- state_hospital_data[order(state_hospital_data[,23],
state_hospital_data[,2],
na.last=NA),]
if (num == "best")
num <- 1
else if (num == "worst")
num <- nrow(state_hospital_data)
else if (num > nrow(state_hospital_data))
return (c("NA"))
return (as.vector(state_hospital_data[num,2]))
} |
#######################################
#######################################
#######################################
### Simulate validation data study 1
######## Setting up #########
set.seed(123)
source("./src/setup.R")
source("./src/data generation functions.R")
s1 <- read_rds(study_1_settings)
system.time(s1_val_data <- generate_data(s1, validation = TRUE))
lapply(lapply(s1_val_data,'[[', 11), mean)
for(i in 1:length(s1_val_data)) {
ind <- letters[1:length(s1_val_data)]
saveRDS(assign(paste0("s1_", i), s1_val_data[[i]]), file = paste0(study_1_val_data, "s1_val_data_", ind[i],".Rds")) # add name of file to path
rm(list = ls(pattern = paste0("s1_",i)))
}
| /src/old/validation data generation study 1.R | permissive | SagevdBrand/Master-thesis | R | false | false | 682 | r | #######################################
#######################################
#######################################
### Simulate validation data study 1
######## Setting up #########
set.seed(123)
source("./src/setup.R")
source("./src/data generation functions.R")
s1 <- read_rds(study_1_settings)
system.time(s1_val_data <- generate_data(s1, validation = TRUE))
lapply(lapply(s1_val_data,'[[', 11), mean)
for(i in 1:length(s1_val_data)) {
ind <- letters[1:length(s1_val_data)]
saveRDS(assign(paste0("s1_", i), s1_val_data[[i]]), file = paste0(study_1_val_data, "s1_val_data_", ind[i],".Rds")) # add name of file to path
rm(list = ls(pattern = paste0("s1_",i)))
}
|
set.ValCo1<-function(){
r<-readRDS("../Data/ValidationCohorts/ValidationCohort1.rds")
load("../Results/Signatures/cell.type.sig.full.RData")
load("../Results/Signatures/resistance.program.RData")
r<-compute.samples.res.scores(r = r,res.sig = res.sig,
cell.sig = cell.sig[c("B.cell","CAF","Macrophage","T.cell","Mal")],
residu.flag = F,cc.sig = NULL,num.rounds = 1000)
saveRDS(r,file = "../Data/ValidationCohorts/ValidationCohort1.rds")
return(r)
}
test.ValCo1<-function(r){
if(is.null(r)){r<-readRDS("../Data/ValidationCohorts/ValidationCohort1.rds")}
r$residu<-t(get.residuals(t(r$res),r$tme))
r$ici<-r$B.ici[,"on"]|r$B.ici[,"post"]
r$tr<-r$B.target[,"on"]|r$B.target[,"post"]
r$immu<-(r$B.immun[,"on"]|r$B.immun[,"post"])&!r$ici
r$ici.on<-r$B.ici[,"on"];r$ici.post<-r$B.ici[,"post"]
r$tr.on<-r$B.target[,"on"];r$tr.post<-r$B.target[,"post"]
results<-list(main = "Validation Cohort 1")
results$anova<-cbind.data.frame(original = apply(r$res,2,function(x) get.anova.p(x,r$patients)),
TME.filtered = (apply(r$residu,2,function(x) get.anova.p(x,r$patients))))
f1<-function(y){
r$y<-y
M1 <- with(r, lmer (y ~ (1 | patients) + tme + ici + tr + immu, mtcars))
idx<-c("iciTRUE","trTRUE","immuTRUE")
v<-summary(M1)$coefficients[idx,c("Estimate","Pr(>|t|)")]
z<-get.cor.zscores(v[,"Estimate"],v[,"Pr(>|t|)"])
return(z)
}
f2<-function(y){
r$y<-y
M1 <- with(r, lmer (y ~ (1 | patients) + ici.on + ici.post + tr.on + tr.post + immu, mtcars))
idx<-c("ici.onTRUE","ici.postTRUE","tr.onTRUE","tr.postTRUE")
v<-summary(M1)$coefficients[idx,c("Estimate","Pr(>|t|)")]
z<-get.cor.zscores(v[,"Estimate"],v[,"Pr(>|t|)"])
return(z)
}
results$hlm.res<-t(apply(r$res,2,f1))
colnames(results$hlm.res)<-gsub("TRUE","",colnames(results$hlm.res))
results$hlm.res<-cbind.data.frame(ICI.p = 10^-abs(results$hlm[,"ici"]),results$hlm.res)
results$hlm.tme<-t(apply(r$tme,2,f2))
colnames(results$hlm.tme)<-gsub("TRUE","",colnames(results$hlm.tme))
results$hlm.tme<-cbind.data.frame(ICI.on.p = 10^-abs(results$hlm.tme[,"ici.on"]),results$hlm.tme)
saveRDS(results,file = "../Results/Predictors/ValCoh1.prf.rds")
print(paste("The immune resistance program is induced in resistant on/post-ICI tumors:",
format.pval.private(results$hlm.res[c("res"),"ICI.p"])))
print(paste("The refined immune resistance program is induced in resistant on/post-ICI tumors:",
format.pval.private(results$hlm.res[c("resF"),"ICI.p"])))
print(paste("T cell infiltration on ICI :",format.pval.private(results$hlm.tme["T.cell","ICI.on.p"])))
print(results$hlm.res[c("resF","res"),])
print(results$hlm.tme["T.cell",])
return(results)
}
set.matched.MAPKi.Hugo<-function(){
r<-readRDS("../Data/PublicData/MAPKi.Hugo.Cell.2015.rds")
load("../Results/Signatures/resistance.program.RData")
load("../Results/Signatures/cell.type.sig.RData")
mapki.sig<-readRDS("../Data/PublicData/public.ICR.sig.rds")[c("mapki.res.up","mapki.res.down")]
r<-compute.samples.res.scores(r = r,res.sig = res.sig,cell.sig = cell.sig,
residu.flag = F,cc.sig = NULL,num.rounds = 1000)
r$mapki<-get.OE(r = r,sig = mapki.sig,bulk.flag = T,num.rounds = 1000)
saveRDS(r,file = "../Data/PublicData/MAPKi.Hugo.Cell.2015.rds")
return(r)
}
test.matched.MAPKi.Hugo<-function(r){
if(is.null(r)){r<-readRDS("../Data/PublicData/MAPKi.Hugo.Cell.2015.rds")}
r$tme<-r$tme[,c("B.cell","CAF","Macrophage","T.cell","Mal")]
f<-function(y){
r$y<-y
M1 <- with(r,lmer (y ~ prog + (1 | patient) + tme, mtcars))
v<-summary(M1)$coefficient["progTRUE",c("Estimate","Pr(>|t|)")]
return(v)
}
results<-t(apply(cbind.data.frame(r$res,r$mapki),2,f))
print(results[c("res","resF","mapki.res"),])
saveRDS(results,file = "../Results/Predictors/MAPKi.res.Hugo2015.prf.rds")
return(results)
}
| /Code/ImmRes3_longitudinal.R | no_license | chitrita/ImmuneResistance | R | false | false | 4,001 | r | set.ValCo1<-function(){
r<-readRDS("../Data/ValidationCohorts/ValidationCohort1.rds")
load("../Results/Signatures/cell.type.sig.full.RData")
load("../Results/Signatures/resistance.program.RData")
r<-compute.samples.res.scores(r = r,res.sig = res.sig,
cell.sig = cell.sig[c("B.cell","CAF","Macrophage","T.cell","Mal")],
residu.flag = F,cc.sig = NULL,num.rounds = 1000)
saveRDS(r,file = "../Data/ValidationCohorts/ValidationCohort1.rds")
return(r)
}
test.ValCo1<-function(r){
if(is.null(r)){r<-readRDS("../Data/ValidationCohorts/ValidationCohort1.rds")}
r$residu<-t(get.residuals(t(r$res),r$tme))
r$ici<-r$B.ici[,"on"]|r$B.ici[,"post"]
r$tr<-r$B.target[,"on"]|r$B.target[,"post"]
r$immu<-(r$B.immun[,"on"]|r$B.immun[,"post"])&!r$ici
r$ici.on<-r$B.ici[,"on"];r$ici.post<-r$B.ici[,"post"]
r$tr.on<-r$B.target[,"on"];r$tr.post<-r$B.target[,"post"]
results<-list(main = "Validation Cohort 1")
results$anova<-cbind.data.frame(original = apply(r$res,2,function(x) get.anova.p(x,r$patients)),
TME.filtered = (apply(r$residu,2,function(x) get.anova.p(x,r$patients))))
f1<-function(y){
r$y<-y
M1 <- with(r, lmer (y ~ (1 | patients) + tme + ici + tr + immu, mtcars))
idx<-c("iciTRUE","trTRUE","immuTRUE")
v<-summary(M1)$coefficients[idx,c("Estimate","Pr(>|t|)")]
z<-get.cor.zscores(v[,"Estimate"],v[,"Pr(>|t|)"])
return(z)
}
f2<-function(y){
r$y<-y
M1 <- with(r, lmer (y ~ (1 | patients) + ici.on + ici.post + tr.on + tr.post + immu, mtcars))
idx<-c("ici.onTRUE","ici.postTRUE","tr.onTRUE","tr.postTRUE")
v<-summary(M1)$coefficients[idx,c("Estimate","Pr(>|t|)")]
z<-get.cor.zscores(v[,"Estimate"],v[,"Pr(>|t|)"])
return(z)
}
results$hlm.res<-t(apply(r$res,2,f1))
colnames(results$hlm.res)<-gsub("TRUE","",colnames(results$hlm.res))
results$hlm.res<-cbind.data.frame(ICI.p = 10^-abs(results$hlm[,"ici"]),results$hlm.res)
results$hlm.tme<-t(apply(r$tme,2,f2))
colnames(results$hlm.tme)<-gsub("TRUE","",colnames(results$hlm.tme))
results$hlm.tme<-cbind.data.frame(ICI.on.p = 10^-abs(results$hlm.tme[,"ici.on"]),results$hlm.tme)
saveRDS(results,file = "../Results/Predictors/ValCoh1.prf.rds")
print(paste("The immune resistance program is induced in resistant on/post-ICI tumors:",
format.pval.private(results$hlm.res[c("res"),"ICI.p"])))
print(paste("The refined immune resistance program is induced in resistant on/post-ICI tumors:",
format.pval.private(results$hlm.res[c("resF"),"ICI.p"])))
print(paste("T cell infiltration on ICI :",format.pval.private(results$hlm.tme["T.cell","ICI.on.p"])))
print(results$hlm.res[c("resF","res"),])
print(results$hlm.tme["T.cell",])
return(results)
}
set.matched.MAPKi.Hugo<-function(){
r<-readRDS("../Data/PublicData/MAPKi.Hugo.Cell.2015.rds")
load("../Results/Signatures/resistance.program.RData")
load("../Results/Signatures/cell.type.sig.RData")
mapki.sig<-readRDS("../Data/PublicData/public.ICR.sig.rds")[c("mapki.res.up","mapki.res.down")]
r<-compute.samples.res.scores(r = r,res.sig = res.sig,cell.sig = cell.sig,
residu.flag = F,cc.sig = NULL,num.rounds = 1000)
r$mapki<-get.OE(r = r,sig = mapki.sig,bulk.flag = T,num.rounds = 1000)
saveRDS(r,file = "../Data/PublicData/MAPKi.Hugo.Cell.2015.rds")
return(r)
}
test.matched.MAPKi.Hugo<-function(r){
if(is.null(r)){r<-readRDS("../Data/PublicData/MAPKi.Hugo.Cell.2015.rds")}
r$tme<-r$tme[,c("B.cell","CAF","Macrophage","T.cell","Mal")]
f<-function(y){
r$y<-y
M1 <- with(r,lmer (y ~ prog + (1 | patient) + tme, mtcars))
v<-summary(M1)$coefficient["progTRUE",c("Estimate","Pr(>|t|)")]
return(v)
}
results<-t(apply(cbind.data.frame(r$res,r$mapki),2,f))
print(results[c("res","resF","mapki.res"),])
saveRDS(results,file = "../Results/Predictors/MAPKi.res.Hugo2015.prf.rds")
return(results)
}
|
\name{lauderdale-package}
\alias{lauderdale-package}
\alias{lauderdale}
\docType{package}
\title{
A short title line describing what the package does
}
\description{
A more detailed description of what the package does. A length
of about one to five lines is recommended.
}
\details{
This section should provide a more detailed overview of how to use the
package, including the most important functions.
}
\author{
Your Name, email optional.
Maintainer: Your Name <your@email.com>
}
\references{
This optional section can contain literature or other references for
background information.
}
\keyword{ package }
\seealso{
Optional links to other man pages
}
\examples{
\dontrun{
## Optional simple examples of the most important functions
## These can be in \dontrun{} and \donttest{} blocks.
}
}
| /man/lauderdale-package.Rd | no_license | petershan1119/lauderdale | R | false | false | 830 | rd | \name{lauderdale-package}
\alias{lauderdale-package}
\alias{lauderdale}
\docType{package}
\title{
A short title line describing what the package does
}
\description{
A more detailed description of what the package does. A length
of about one to five lines is recommended.
}
\details{
This section should provide a more detailed overview of how to use the
package, including the most important functions.
}
\author{
Your Name, email optional.
Maintainer: Your Name <your@email.com>
}
\references{
This optional section can contain literature or other references for
background information.
}
\keyword{ package }
\seealso{
Optional links to other man pages
}
\examples{
\dontrun{
## Optional simple examples of the most important functions
## These can be in \dontrun{} and \donttest{} blocks.
}
}
|
#
# See ../README.md
#
#
# Run as: Rscript ch-1.r
#
cat ("0, 0, 0, 2, 0, 5, 0, 6, 3, 7, 0, 15, 0, 9, 8, 14, 0, 20, 0, 21\n")
| /challenge-109/abigail/r/ch-1.r | no_license | southpawgeek/perlweeklychallenge-club | R | false | false | 127 | r | #
# See ../README.md
#
#
# Run as: Rscript ch-1.r
#
cat ("0, 0, 0, 2, 0, 5, 0, 6, 3, 7, 0, 15, 0, 9, 8, 14, 0, 20, 0, 21\n")
|
SPICalcPanelCmd <- function(){
listOpenFiles <- openFile_ttkcomboList()
if(Sys.info()["sysname"] == "Windows"){
wscrlwin <- w.scale(26)
hscrlwin <- h.scale(46)
largeur0 <- as.integer(w.scale(22)/sfont0)
largeur1 <- as.integer(w.scale(27)/sfont0)
largeur2 <- as.integer(w.scale(29)/sfont0)
largeur3 <- 20
largeur4 <- 26
largeur5 <- 20
}else{
wscrlwin <- w.scale(27)
hscrlwin <- h.scale(48.5)
largeur0 <- as.integer(w.scale(16)/sfont0)
largeur1 <- as.integer(w.scale(21)/sfont0)
largeur2 <- as.integer(w.scale(22)/sfont0)
largeur3 <- 15
largeur4 <- 20
largeur5 <- 14
}
GeneralParameters <- list(intstep = "dekadal", data.type = "cdtstation",
cdtstation = "", cdtdataset = "",
outfreq = "month", tscale = 3, distr = 'Gamma',
monitoring = FALSE,
dates = list(year1 = 2018, mon1 = 1, dek1 = 1, year2 = 2018, mon2 = 2, dek2 = 3),
outdir = "")
###################
cmd.frame <- tkframe(panel.left)
tknote.cmd <- bwNoteBook(cmd.frame)
tkgrid(tknote.cmd, sticky = 'nwes')
tkgrid.columnconfigure(tknote.cmd, 0, weight = 1)
cmd.tab1 <- bwAddTab(tknote.cmd, text = "SPI")
cmd.tab2 <- bwAddTab(tknote.cmd, text = "Maps")
cmd.tab3 <- bwAddTab(tknote.cmd, text = "Graphs")
cmd.tab4 <- bwAddTab(tknote.cmd, text = "Boundaries")
bwRaiseTab(tknote.cmd, cmd.tab1)
tkgrid.columnconfigure(cmd.tab1, 0, weight = 1)
tkgrid.columnconfigure(cmd.tab2, 0, weight = 1)
tkgrid.columnconfigure(cmd.tab3, 0, weight = 1)
tkgrid.columnconfigure(cmd.tab4, 0, weight = 1)
#######################################################################################################
#Tab1
frTab1 <- tkframe(cmd.tab1)
tkgrid(frTab1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid.columnconfigure(frTab1, 0, weight = 1)
scrw1 <- bwScrolledWindow(frTab1)
tkgrid(scrw1)
tkgrid.columnconfigure(scrw1, 0, weight = 1)
subfr1 <- bwScrollableFrame(scrw1, width = wscrlwin, height = hscrlwin)
tkgrid.columnconfigure(subfr1, 0, weight = 1)
#######################
frameTimeS <- ttklabelframe(subfr1, text = "Time step of input data", relief = 'groove')
timeSteps <- tclVar()
CbperiodVAL <- c('Daily data', 'Pentad data', 'Dekadal data', 'Monthly data')
tclvalue(timeSteps) <- switch(GeneralParameters$intstep,
'daily' = CbperiodVAL[1],
'pentad' = CbperiodVAL[2],
'dekadal' = CbperiodVAL[3],
'monthly' = CbperiodVAL[4])
cb.fperiod <- ttkcombobox(frameTimeS, values = CbperiodVAL, textvariable = timeSteps, width = largeur1)
tkgrid(cb.fperiod, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
infobulle(cb.fperiod, 'Select the time step of the data')
status.bar.display(cb.fperiod, TextOutputVar, 'Select the time step of the data')
############
tkbind(cb.fperiod, "<<ComboboxSelected>>", function(){
valSPIfreq <- if(str_trim(tclvalue(timeSteps)) == 'Monthly data') "month" else c("dekad", "month")
tkconfigure(cb.SPIfreq, values = valSPIfreq)
if(str_trim(tclvalue(timeSteps)) == 'Monthly data'){
tclvalue(out.spifreq) <- "month"
tclvalue(txt.suffix.var) <- '-month'
}
stateTscale <- if(str_trim(tclvalue(out.spifreq)) == 'month') "normal" else "disabled"
tkconfigure(spin.Tscale, state = stateTscale)
})
#######################
frameInData <- ttklabelframe(subfr1, text = "Input Data", relief = 'groove')
DataType <- tclVar()
CbdatatypeVAL <- c('CDT stations data format', 'CDT dataset format (gridded)')
tclvalue(DataType) <- switch(GeneralParameters$data.type,
'cdtstation' = CbdatatypeVAL[1],
'cdtdataset' = CbdatatypeVAL[2])
if(GeneralParameters$data.type == 'cdtstation'){
input.file <- tclVar(GeneralParameters$cdtstation)
txt.INData <- 'File containing stations Precip data'
}else{
input.file <- tclVar(GeneralParameters$cdtdataset)
txt.INData <- 'Index file (*.rds) for Precip dataset'
}
txt.INData.var <- tclVar(txt.INData)
txt.datatype <- tklabel(frameInData, text = "Format", anchor = 'w', justify = 'left')
cb.datatype <- ttkcombobox(frameInData, values = CbdatatypeVAL, textvariable = DataType, width = largeur0)
txt.infile <- tklabel(frameInData, text = tclvalue(txt.INData.var), textvariable = txt.INData.var, anchor = 'w', justify = 'left')
if(GeneralParameters$data.type == 'cdtstation'){
cb.en.infile <- ttkcombobox(frameInData, values = unlist(listOpenFiles), textvariable = input.file, width = largeur1)
}else{
cb.en.infile <- tkentry(frameInData, textvariable = input.file, width = largeur2)
}
bt.infile <- tkbutton(frameInData, text = "...")
############
tkconfigure(bt.infile, command = function(){
if(GeneralParameters$data.type == 'cdtstation'){
dat.opfiles <- getOpenFiles(main.win, all.opfiles)
if(!is.null(dat.opfiles)){
nopf <- length(AllOpenFilesType)
AllOpenFilesType[[nopf+1]] <<- 'ascii'
AllOpenFilesData[[nopf+1]] <<- dat.opfiles
listOpenFiles[[length(listOpenFiles)+1]] <<- AllOpenFilesData[[nopf+1]][[1]]
tclvalue(input.file) <- AllOpenFilesData[[nopf+1]][[1]]
tkconfigure(cb.en.infile, values = unlist(listOpenFiles), textvariable = input.file)
}else return(NULL)
}else{
filetypes <- "{{R Objects} {.rds .RDS .RData}} {{All files} *}"
path.rds <- tclvalue(tkgetOpenFile(initialdir = getwd(), initialfile = "", filetypes = filetypes))
tclvalue(input.file) <- if(path.rds%in%c("", "NA") | is.na(path.rds)) "" else path.rds
}
})
############
tkgrid(txt.datatype, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.datatype, row = 0, column = 2, sticky = 'we', rowspan = 1, columnspan = 8, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.infile, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 8, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.en.infile, row = 2, column = 0, sticky = 'we', rowspan = 1, columnspan = 9, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.infile, row = 2, column = 9, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
############
infobulle(cb.datatype, 'Select the format of the input data')
status.bar.display(cb.datatype, TextOutputVar, 'Select the format of the input data')
if(GeneralParameters$data.type == 'cdtstation'){
infobulle(cb.en.infile, 'Select the file containing the input data')
status.bar.display(cb.en.infile, TextOutputVar, 'Select the file containing the input data')
infobulle(bt.infile, 'Browse file if not listed')
status.bar.display(bt.infile, TextOutputVar, 'Browse file if not listed')
}else{
infobulle(cb.en.infile, 'Enter the full path to the file <dataset name>.rds')
status.bar.display(cb.en.infile, TextOutputVar, 'Enter the full path to the file <dataset name>.rds')
infobulle(bt.infile, 'or browse here')
status.bar.display(bt.infile, TextOutputVar, 'or browse here')
}
############
tkbind(cb.datatype, "<<ComboboxSelected>>", function(){
tkdestroy(cb.en.infile)
tclvalue(input.file) <- ''
###
if(str_trim(tclvalue(DataType)) == 'CDT stations data format'){
tclvalue(txt.INData.var) <- 'File containing stations Precip data'
cb.en.infile <- ttkcombobox(frameInData, values = unlist(listOpenFiles), textvariable = input.file, width = largeur1)
tkconfigure(bt.infile, command = function(){
dat.opfiles <- getOpenFiles(main.win, all.opfiles)
if(!is.null(dat.opfiles)){
nopf <- length(AllOpenFilesType)
AllOpenFilesType[[nopf+1]] <<- 'ascii'
AllOpenFilesData[[nopf+1]] <<- dat.opfiles
listOpenFiles[[length(listOpenFiles)+1]] <<- AllOpenFilesData[[nopf+1]][[1]]
tclvalue(input.file) <- AllOpenFilesData[[nopf+1]][[1]]
tkconfigure(cb.en.infile, values = unlist(listOpenFiles), textvariable = input.file)
}else return(NULL)
})
infobulle(cb.en.infile, 'Select the file containing the input data')
status.bar.display(cb.en.infile, TextOutputVar, 'Select the file containing the input data')
infobulle(bt.infile, 'Browse file if not listed')
status.bar.display(bt.infile, TextOutputVar, 'Browse file if not listed')
}
###
if(str_trim(tclvalue(DataType)) == 'CDT dataset format (gridded)'){
tclvalue(txt.INData.var) <- 'Index file (*.rds) for Precip dataset'
cb.en.infile <- tkentry(frameInData, textvariable = input.file, width = largeur2)
tkconfigure(bt.infile, command = function(){
filetypes <- "{{R Objects} {.rds .RDS .RData}} {{All files} *}"
path.rds <- tclvalue(tkgetOpenFile(initialdir = getwd(), initialfile = "", filetypes = filetypes))
tclvalue(input.file) <- if(path.rds%in%c("", "NA") | is.na(path.rds)) "" else path.rds
})
infobulle(cb.en.infile, 'Enter the full path to the file <dataset name>.rds')
status.bar.display(cb.en.infile, TextOutputVar, 'Enter the full path to the file <dataset name>.rds')
infobulle(bt.infile, 'or browse here')
status.bar.display(bt.infile, TextOutputVar, 'or browse here')
}
tkgrid(cb.en.infile, row = 2, column = 0, sticky = 'we', rowspan = 1, columnspan = 9, padx = 0, pady = 1, ipadx = 1, ipady = 1)
})
#############################
frameMoni <- tkframe(subfr1, relief = 'groove', borderwidth = 2)
monitoring <- tclVar(GeneralParameters$monitoring)
istart.yrs <- tclVar(GeneralParameters$dates$year1)
istart.mon <- tclVar(GeneralParameters$dates$mon1)
istart.dek <- tclVar(GeneralParameters$dates$dek1)
iend.yrs <- tclVar(GeneralParameters$dates$year2)
iend.mon <- tclVar(GeneralParameters$dates$mon2)
iend.dek <- tclVar(GeneralParameters$dates$dek2)
if(GeneralParameters$monitoring){
statedates <- 'normal'
statedatedek <- if(GeneralParameters$outfreq == 'month') 'disabled' else 'normal'
}else{
statedates <- 'disabled'
statedatedek <- 'disabled'
}
chk.Moni <- tkcheckbutton(frameMoni, variable = monitoring, text = "Monitoring: update SPI dataset", anchor = 'w', justify = 'left')
fr.Moni <- tkframe(frameMoni)
txt.deb.Moni <- tklabel(fr.Moni, text = 'Start date', anchor = 'e', justify = 'right')
txt.fin.Moni <- tklabel(fr.Moni, text = 'End date', anchor = 'e', justify = 'right')
txt.yrs.Moni <- tklabel(fr.Moni, text = 'Year')
txt.mon.Moni <- tklabel(fr.Moni, text = 'Month')
txt.dek.Moni <- tklabel(fr.Moni, text = 'Dekad')
en.yrs1.Moni <- tkentry(fr.Moni, width = 4, textvariable = istart.yrs, justify = "right", state = statedates)
en.mon1.Moni <- tkentry(fr.Moni, width = 4, textvariable = istart.mon, justify = "right", state = statedates)
en.dek1.Moni <- tkentry(fr.Moni, width = 4, textvariable = istart.dek, justify = "right", state = statedatedek)
en.yrs2.Moni <- tkentry(fr.Moni, width = 4, textvariable = iend.yrs, justify = "right", state = statedates)
en.mon2.Moni <- tkentry(fr.Moni, width = 4, textvariable = iend.mon, justify = "right", state = statedates)
en.dek2.Moni <- tkentry(fr.Moni, width = 4, textvariable = iend.dek, justify = "right", state = statedatedek)
tkgrid(txt.deb.Moni, row = 1, column = 0, sticky = 'ew', rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
tkgrid(txt.fin.Moni, row = 2, column = 0, sticky = 'ew', rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
tkgrid(txt.yrs.Moni, row = 0, column = 1, rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
tkgrid(txt.mon.Moni, row = 0, column = 2, rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
tkgrid(txt.dek.Moni, row = 0, column = 3, rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
tkgrid(en.yrs1.Moni, row = 1, column = 1, rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
tkgrid(en.mon1.Moni, row = 1, column = 2, rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
tkgrid(en.dek1.Moni, row = 1, column = 3, rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
tkgrid(en.yrs2.Moni, row = 2, column = 1, rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
tkgrid(en.mon2.Moni, row = 2, column = 2, rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
tkgrid(en.dek2.Moni, row = 2, column = 3, rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
tkgrid(chk.Moni, row = 0, column = 0, rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
tkgrid(fr.Moni, row = 1, column = 0, rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
###############
tkbind(chk.Moni, "<Button-1>", function(){
if(tclvalue(monitoring) == "0"){
statedates <- 'normal'
statedatedek <- if(str_trim(tclvalue(out.spifreq)) == 'month') 'disabled' else 'normal'
stateDistr <- 'disabled'
tclvalue(txt.save.var) <- "Index file (SPI.rds) for SPI data"
tkconfigure(bt.outSPI, command = function(){
filetypes <- "{{R Objects} {.rds .RDS .RData}} {{All files} *}"
path.rds <- tclvalue(tkgetOpenFile(initialdir = getwd(), initialfile = "", filetypes = filetypes))
tclvalue(outSPIdir) <- if(path.rds%in%c("", "NA") | is.na(path.rds)) "" else path.rds
})
}else{
statedates <- 'disabled'
statedatedek <- 'disabled'
stateDistr <- 'normal'
tclvalue(txt.save.var) <- "Directory to save the outputs"
tkconfigure(bt.outSPI, command = function(){
dirSPI <- tk_choose.dir(getwd(), "")
tclvalue(outSPIdir) <- if(dirSPI%in%c("", "NA") | is.na(dirSPI)) "" else dirSPI
})
}
tkconfigure(en.yrs1.Moni, state = statedates)
tkconfigure(en.mon1.Moni, state = statedates)
tkconfigure(en.dek1.Moni, state = statedatedek)
tkconfigure(en.yrs2.Moni, state = statedates)
tkconfigure(en.mon2.Moni, state = statedates)
tkconfigure(en.dek2.Moni, state = statedatedek)
tkconfigure(cb.Distrb, state = stateDistr)
})
#############################
frameParams <- tkframe(subfr1, relief = 'groove', borderwidth = 2)
out.spifreq <- tclVar(GeneralParameters$outfreq)
if(GeneralParameters$outfreq == 'dekad'){
txt.suffix <- '-dekad'
stateTscale <- "disabled"
up.tscale <- 1
val.tscale <- 1
}else{
txt.suffix <- '-month'
stateTscale <- "normal"
up.tscale <- 60
val.tscale <- GeneralParameters$tscale
}
txt.suffix.var <- tclVar(txt.suffix)
frameTscale <- tkframe(frameParams)
txt.SPIfreq <- tklabel(frameTscale, text = "SPI", anchor = 'e', justify = 'right')
cb.SPIfreq <- ttkcombobox(frameTscale, values = c("dekad", "month"), textvariable = out.spifreq, width = 8)
txt.Tscale1 <- tklabel(frameTscale, text = "Timescale", anchor = 'e', justify = 'right')
spin.Tscale <- ttkspinbox(frameTscale, from = 1, to = up.tscale, increment = 1, justify = 'center', width = 2, state = stateTscale)
tkset(spin.Tscale, val.tscale)
txt.Tscale2 <- tklabel(frameTscale, text = tclvalue(txt.suffix.var), textvariable = txt.suffix.var, anchor = 'w', justify = 'left')
tkgrid(txt.SPIfreq, cb.SPIfreq, txt.Tscale1, spin.Tscale, txt.Tscale2)
########
tkbind(cb.SPIfreq, "<<ComboboxSelected>>", function(){
if(str_trim(tclvalue(out.spifreq)) == 'dekad'){
stateTscale <- "disabled"
tclvalue(txt.suffix.var) <- '-dekad'
tkset(spin.Tscale, 1)
statedatedek <- if(tclvalue(monitoring) == "1") "normal" else "disabled"
}
if(str_trim(tclvalue(out.spifreq)) == 'month'){
stateTscale <- "normal"
tclvalue(txt.suffix.var) <- '-month'
tkconfigure(spin.Tscale, to = 60)
statedatedek <- "disabled"
}
tkconfigure(spin.Tscale, state = stateTscale)
tkconfigure(en.dek1.Moni, state = statedatedek)
tkconfigure(en.dek2.Moni, state = statedatedek)
})
########
frameDistrb <- tkframe(frameParams)
DistrbVAL <- c("Gamma", "Pearson Type III", "log-Logistic", "Z-Score")
DistrbFun <- tclVar(GeneralParameters$distr)
stateDistr <- if(GeneralParameters$monitoring) 'disabled' else 'normal'
txt.Distrb <- tklabel(frameDistrb, text = "Distribution function", anchor = 'e', justify = 'right')
cb.Distrb <- ttkcombobox(frameDistrb, values = DistrbVAL, textvariable = DistrbFun, width = largeur3, state = stateDistr)
tkgrid(txt.Distrb, cb.Distrb)
########
tkgrid(frameTscale, row = 0, column = 0, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frameDistrb, row = 1, column = 0, padx = 1, pady = 1, ipadx = 1, ipady = 1)
#############################
frameDirSav <- tkframe(subfr1, relief = 'groove', borderwidth = 2)
outSPIdir <- tclVar(GeneralParameters$outdir)
if(GeneralParameters$monitoring){
text.save <- "Index file (SPI.rds) for SPI data"
}else{
text.save <- "Directory to save the outputs"
}
txt.save.var <- tclVar(text.save)
txt.outSPI <- tklabel(frameDirSav, text = tclvalue(txt.save.var), textvariable = txt.save.var, anchor = 'w', justify = 'left')
en.outSPI <- tkentry(frameDirSav, textvariable = outSPIdir, width = largeur2)
bt.outSPI <- tkbutton(frameDirSav, text = "...")
######
tkconfigure(bt.outSPI, command = function(){
if(GeneralParameters$monitoring){
filetypes <- "{{R Objects} {.rds .RDS .RData}} {{All files} *}"
path.rds <- tclvalue(tkgetOpenFile(initialdir = getwd(), initialfile = "", filetypes = filetypes))
tclvalue(outSPIdir) <- if(path.rds%in%c("", "NA") | is.na(path.rds)) "" else path.rds
}else{
dirSPI <- tk_choose.dir(getwd(), "")
tclvalue(outSPIdir) <- if(dirSPI%in%c("", "NA") | is.na(dirSPI)) "" else dirSPI
}
})
######
tkgrid(txt.outSPI, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 5, padx = 1, pady = 0, ipadx = 1, ipady = 1)
tkgrid(en.outSPI, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 0, pady = 0, ipadx = 1, ipady = 1)
tkgrid(bt.outSPI, row = 1, column = 4, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 0, ipadx = 1, ipady = 1)
infobulle(en.outSPI, 'Enter the full path to directory to save outputs')
status.bar.display(en.outSPI, TextOutputVar, 'Enter the full path to directory to save outputs')
infobulle(bt.outSPI, 'or browse here')
status.bar.display(bt.outSPI, TextOutputVar, 'or browse here')
#############################
if(!is.null(EnvSPICalcPlot$DirExist)){
stateCaclBut <- if(tclvalue(EnvSPICalcPlot$DirExist) == "0") "normal" else "disabled"
}else stateCaclBut <- "normal"
calculateBut <- ttkbutton(subfr1, text = "Calculate", state = stateCaclBut)
#################
tkconfigure(calculateBut, command = function(){
GeneralParameters$intstep <- switch(str_trim(tclvalue(timeSteps)),
'Daily data' = 'daily',
'Pentad data' = 'pentad',
'Dekadal data' = 'dekadal',
'Monthly data' = 'monthly')
GeneralParameters$data.type <- switch(str_trim(tclvalue(DataType)),
'CDT stations data format' = 'cdtstation',
'CDT dataset format (gridded)' = 'cdtdataset')
if(str_trim(tclvalue(DataType)) == 'CDT stations data format')
GeneralParameters$cdtstation <- str_trim(tclvalue(input.file))
if(str_trim(tclvalue(DataType)) == 'CDT dataset format (gridded)')
GeneralParameters$cdtdataset <- str_trim(tclvalue(input.file))
GeneralParameters$monitoring <- switch(tclvalue(monitoring), '0' = FALSE, '1' = TRUE)
GeneralParameters$dates$year1 <- as.numeric(str_trim(tclvalue(istart.yrs)))
GeneralParameters$dates$mon1 <- as.numeric(str_trim(tclvalue(istart.mon)))
GeneralParameters$dates$dek1 <- as.numeric(str_trim(tclvalue(istart.dek)))
GeneralParameters$dates$year2 <- as.numeric(str_trim(tclvalue(iend.yrs)))
GeneralParameters$dates$mon2 <- as.numeric(str_trim(tclvalue(iend.mon)))
GeneralParameters$dates$dek2 <- as.numeric(str_trim(tclvalue(iend.dek)))
GeneralParameters$outfreq <- str_trim(tclvalue(out.spifreq))
GeneralParameters$tscale <- as.numeric(str_trim(tclvalue(tkget(spin.Tscale))))
GeneralParameters$distr <- str_trim(tclvalue(DistrbFun))
GeneralParameters$outdir <- str_trim(tclvalue(outSPIdir))
# assign('GeneralParameters', GeneralParameters, envir = .GlobalEnv)
tkconfigure(main.win, cursor = 'watch')
InsertMessagesTxt(main.txt.out, "Calculate SPI ......")
ret <- tryCatch(
computeSPIProcs(GeneralParameters),
#warning = function(w) warningFun(w),
error = function(e) errorFun(e),
finally = tkconfigure(main.win, cursor = '')
)
msg0 <- "SPI calculation finished successfully"
msg1 <- "SPI calculation failed"
if(!is.null(ret)){
if(ret == 0){
InsertMessagesTxt(main.txt.out, msg0)
###################
widgets.Station.Pixel()
ret <- try(set.Data.Scales(), silent = TRUE)
if(inherits(ret, "try-error") | is.null(ret)) return(NULL)
ret <- try(set.Data.Dates(), silent = TRUE)
if(inherits(ret, "try-error") | is.null(ret)) return(NULL)
}else InsertMessagesTxt(main.txt.out, msg1, format = TRUE)
}else InsertMessagesTxt(main.txt.out, msg1, format = TRUE)
})
############################################
tkgrid(frameTimeS, row = 0, column = 0, sticky = '', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frameInData, row = 1, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frameMoni, row = 2, column = 0, sticky = '', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frameParams, row = 3, column = 0, sticky = '', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frameDirSav, row = 4, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(calculateBut, row = 5, column = 0, sticky = '', padx = 1, pady = 3, ipadx = 1, ipady = 1)
#######################################################################################################
#Tab2
frTab2 <- tkframe(cmd.tab2)
tkgrid(frTab2, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid.columnconfigure(frTab2, 0, weight = 1)
scrw2 <- bwScrolledWindow(frTab2)
tkgrid(scrw2)
tkgrid.columnconfigure(scrw2, 0, weight = 1)
subfr2 <- bwScrollableFrame(scrw2, width = wscrlwin, height = hscrlwin)
tkgrid.columnconfigure(subfr2, 0, weight = 1)
##############################################
frameDataExist <- ttklabelframe(subfr2, text = "SPI data", relief = 'groove')
EnvSPICalcPlot$DirExist <- tclVar(0)
file.dataIndex <- tclVar()
stateExistData <- if(tclvalue(EnvSPICalcPlot$DirExist) == "1") "normal" else "disabled"
chk.dataIdx <- tkcheckbutton(frameDataExist, variable = EnvSPICalcPlot$DirExist, text = "SPI data already computed", anchor = 'w', justify = 'left')
en.dataIdx <- tkentry(frameDataExist, textvariable = file.dataIndex, width = largeur2, state = stateExistData)
bt.dataIdx <- tkbutton(frameDataExist, text = "...", state = stateExistData)
tkconfigure(bt.dataIdx, command = function(){
filetypes <- "{{R Objects} {.rds .RDS .RData}} {{All files} *}"
path.Stat <- tclvalue(tkgetOpenFile(initialdir = getwd(), initialfile = "", filetypes = filetypes))
if(path.Stat%in%c("", "NA") | is.na(path.Stat)) return(NULL)
tclvalue(file.dataIndex) <- path.Stat
if(file.exists(str_trim(tclvalue(file.dataIndex)))){
OutSPIdata <- try(readRDS(str_trim(tclvalue(file.dataIndex))), silent = TRUE)
if(inherits(OutSPIdata, "try-error")){
InsertMessagesTxt(main.txt.out, 'Unable to load SPI data', format = TRUE)
InsertMessagesTxt(main.txt.out, gsub('[\r\n]', '', OutSPIdata[1]), format = TRUE)
tkconfigure(cb.spi.maps, values = "")
tclvalue(EnvSPICalcPlot$spi.tscale) <- ""
tkconfigure(cb.spi.Date, values = "")
tclvalue(EnvSPICalcPlot$spi.date) <- ""
return(NULL)
}
EnvSPICalcPlot$output <- OutSPIdata
EnvSPICalcPlot$PathData <- dirname(str_trim(tclvalue(file.dataIndex)))
###################
widgets.Station.Pixel()
ret <- try(set.Data.Scales(), silent = TRUE)
if(inherits(ret, "try-error") | is.null(ret)) return(NULL)
ret <- try(set.Data.Dates(), silent = TRUE)
if(inherits(ret, "try-error") | is.null(ret)) return(NULL)
}
})
tkgrid(chk.dataIdx, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 5, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.dataIdx, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.dataIdx, row = 1, column = 4, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
###############
tkbind(chk.dataIdx, "<Button-1>", function(){
stateExistData <- if(tclvalue(EnvSPICalcPlot$DirExist) == '1') 'disabled' else 'normal'
tkconfigure(en.dataIdx, state = stateExistData)
tkconfigure(bt.dataIdx, state = stateExistData)
stateCaclBut <- if(tclvalue(EnvSPICalcPlot$DirExist) == '1') 'normal' else 'disabled'
tkconfigure(calculateBut, state = stateCaclBut)
})
##############################################
frameSPIMap <- ttklabelframe(subfr2, text = "SPI Map", relief = 'groove')
EnvSPICalcPlot$spi.tscale <- tclVar()
EnvSPICalcPlot$spi.date <- tclVar()
cb.spi.maps <- ttkcombobox(frameSPIMap, values = "", textvariable = EnvSPICalcPlot$spi.tscale, width = largeur4)
bt.spi.maps <- ttkbutton(frameSPIMap, text = "PLOT", width = 7)
cb.spi.Date <- ttkcombobox(frameSPIMap, values = "", textvariable = EnvSPICalcPlot$spi.date, width = largeur5)
bt.spi.Date.prev <- ttkbutton(frameSPIMap, text = "<<", width = 3)
bt.spi.Date.next <- ttkbutton(frameSPIMap, text = ">>", width = 3)
bt.spi.MapOpt <- ttkbutton(frameSPIMap, text = "Options", width = 7)
###############
EnvSPICalcPlot$dataMapOp <- list(presetCol = list(color = 'tim.colors', reverse = TRUE),
userCol = list(custom = FALSE, color = NULL),
userLvl = list(custom = TRUE, levels = c(-2, -1.5, -1, 0, 1, 1.5, 2), equidist = TRUE),
title = list(user = FALSE, title = ''),
colkeyLab = list(user = FALSE, label = ''),
scalebar = list(add = FALSE, pos = 'bottomleft'))
tkconfigure(bt.spi.MapOpt, command = function(){
if(!is.null(EnvSPICalcPlot$varData$map)){
atlevel <- pretty(EnvSPICalcPlot$varData$map$z, n = 10, min.n = 7)
if(is.null(EnvSPICalcPlot$dataMapOp$userLvl$levels)){
EnvSPICalcPlot$dataMapOp$userLvl$levels <- atlevel
}else{
if(!EnvSPICalcPlot$dataMapOp$userLvl$custom)
EnvSPICalcPlot$dataMapOp$userLvl$levels <- atlevel
}
}
EnvSPICalcPlot$dataMapOp <- MapGraph.MapOptions(main.win, EnvSPICalcPlot$dataMapOp)
})
###############
EnvSPICalcPlot$notebookTab.dataMap <- NULL
tkconfigure(bt.spi.maps, command = function(){
if(str_trim(tclvalue(EnvSPICalcPlot$spi.date)) != "" &
!is.null(EnvSPICalcPlot$varData))
{
get.Data.Map()
imgContainer <- SPICalc.Display.Maps(tknotes)
retNBTab <- imageNotebookTab_unik(tknotes, imgContainer, EnvSPICalcPlot$notebookTab.dataMap, AllOpenTabType, AllOpenTabData)
EnvSPICalcPlot$notebookTab.dataMap <- retNBTab$notebookTab
AllOpenTabType <<- retNBTab$AllOpenTabType
AllOpenTabData <<- retNBTab$AllOpenTabData
}
})
tkconfigure(bt.spi.Date.prev, command = function(){
if(str_trim(tclvalue(EnvSPICalcPlot$spi.date)) != ""){
donDates <- EnvSPICalcPlot$varData$ts$dates
idaty <- which(donDates == str_trim(tclvalue(EnvSPICalcPlot$spi.date)))
idaty <- idaty-1
if(idaty < 1) idaty <- length(donDates)
tclvalue(EnvSPICalcPlot$spi.date) <- donDates[idaty]
get.Data.Map()
imgContainer <- SPICalc.Display.Maps(tknotes)
retNBTab <- imageNotebookTab_unik(tknotes, imgContainer, EnvSPICalcPlot$notebookTab.dataMap, AllOpenTabType, AllOpenTabData)
EnvSPICalcPlot$notebookTab.dataMap <- retNBTab$notebookTab
AllOpenTabType <<- retNBTab$AllOpenTabType
AllOpenTabData <<- retNBTab$AllOpenTabData
}
})
tkconfigure(bt.spi.Date.next, command = function(){
if(str_trim(tclvalue(EnvSPICalcPlot$spi.date)) != ""){
donDates <- EnvSPICalcPlot$varData$ts$dates
idaty <- which(donDates == str_trim(tclvalue(EnvSPICalcPlot$spi.date)))
idaty <- idaty+1
if(idaty > length(donDates)) idaty <- 1
tclvalue(EnvSPICalcPlot$spi.date) <- donDates[idaty]
get.Data.Map()
imgContainer <- SPICalc.Display.Maps(tknotes)
retNBTab <- imageNotebookTab_unik(tknotes, imgContainer, EnvSPICalcPlot$notebookTab.dataMap, AllOpenTabType, AllOpenTabData)
EnvSPICalcPlot$notebookTab.dataMap <- retNBTab$notebookTab
AllOpenTabType <<- retNBTab$AllOpenTabType
AllOpenTabData <<- retNBTab$AllOpenTabData
}
})
###############
tkgrid(cb.spi.maps, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.spi.maps, row = 0, column = 4, sticky = '', rowspan = 1, columnspan = 2, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.spi.Date.prev, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.spi.Date, row = 1, column = 1, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.spi.Date.next, row = 1, column = 3, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.spi.MapOpt, row = 1, column = 4, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 1, ipadx = 1, ipady = 1)
###############
tkbind(cb.spi.maps, "<<ComboboxSelected>>", function(){
ret <- try(set.Data.Dates(), silent = TRUE)
if(inherits(ret, "try-error") | is.null(ret)) return(NULL)
})
##############################################
tkgrid(frameDataExist, row = 0, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frameSPIMap, row = 1, column = 0, sticky = 'we', padx = 1, pady = 3, ipadx = 1, ipady = 1)
#######################################################################################################
#Tab3
frTab3 <- tkframe(cmd.tab3)
tkgrid(frTab3, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid.columnconfigure(frTab3, 0, weight = 1)
scrw3 <- bwScrolledWindow(frTab3)
tkgrid(scrw3)
tkgrid.columnconfigure(scrw3, 0, weight = 1)
subfr3 <- bwScrollableFrame(scrw3, width = wscrlwin, height = hscrlwin)
tkgrid.columnconfigure(subfr3, 0, weight = 1)
##############################################
frameDataTS <- ttklabelframe(subfr3, text = "SPI Graph", relief = 'groove')
typeTSPLOT <- c("Bar-Line", "Polygon")
EnvSPICalcPlot$graph$typeTSp <- tclVar("Bar-Line")
cb.typeTSp <- ttkcombobox(frameDataTS, values = typeTSPLOT, textvariable = EnvSPICalcPlot$graph$typeTSp, width = largeur5)
bt.TsGraph.plot <- ttkbutton(frameDataTS, text = "PLOT", width = 7)
bt.TSGraphOpt <- ttkbutton(frameDataTS, text = "Options", width = 8)
#################
EnvSPICalcPlot$TSGraphOp <- list(
bar.line = list(
xlim = list(is.min = FALSE, min = "1981-1-1", is.max = FALSE, max = "2017-12-3"),
ylim = list(is.min = FALSE, min = -10, is.max = FALSE, max = 10),
userYTcks = list(custom = TRUE, ticks = c(-2, -1.5, -1, 0, 1, 1.5, 2)),
axislabs = list(is.xlab = FALSE, xlab = '', is.ylab = FALSE, ylab = ''),
title = list(is.title = FALSE, title = '', position = 'top'),
colors = list(y0 = 0, negative = "#CF661C", positive = "#157040"),
line = list(plot = FALSE, col = "black", lwd = 1.5)
)
)
tkconfigure(bt.TSGraphOpt, command = function(){
suffix.fun <- switch(str_trim(tclvalue(EnvSPICalcPlot$graph$typeTSp)),
"Bar-Line" = "Bar.Line",
"Polygon" = "Bar.Line")
plot.fun <- match.fun(paste0("MapGraph.GraphOptions.", suffix.fun))
EnvSPICalcPlot$TSGraphOp <- plot.fun(main.win, EnvSPICalcPlot$TSGraphOp)
})
#########
EnvSPICalcPlot$notebookTab.dataGraph <- NULL
tkconfigure(bt.TsGraph.plot, command = function(){
if(!is.null(EnvSPICalcPlot$varData)){
imgContainer <- SPICalc.Display.Graph(tknotes)
retNBTab <- imageNotebookTab_unik(tknotes, imgContainer, EnvSPICalcPlot$notebookTab.dataGraph, AllOpenTabType, AllOpenTabData)
EnvSPICalcPlot$notebookTab.dataGraph <- retNBTab$notebookTab
AllOpenTabType <<- retNBTab$AllOpenTabType
AllOpenTabData <<- retNBTab$AllOpenTabData
}
})
#################
tkgrid(cb.typeTSp, row = 0, column = 0, sticky = 'we', pady = 1, columnspan = 1)
tkgrid(bt.TSGraphOpt, row = 0, column = 1, sticky = 'we', padx = 4, pady = 1, columnspan = 1)
tkgrid(bt.TsGraph.plot, row = 0, column = 2, sticky = 'we', pady = 1, columnspan = 1)
##############################################
frameSTNCrds <- ttklabelframe(subfr3, text = "Station/Coordinates", relief = 'groove')
frTS2 <- tkframe(frameSTNCrds)
EnvSPICalcPlot$graph$lonLOC <- tclVar()
EnvSPICalcPlot$graph$latLOC <- tclVar()
EnvSPICalcPlot$graph$stnIDTSp <- tclVar()
tkgrid(frTS2, row = 0, column = 0, sticky = 'e', pady = 1)
##############################################
frameVizTS <- tkframe(subfr3, relief = 'groove', borderwidth = 2)
EnvSPICalcPlot$spiViz$max.tscale <- tclVar(12)
bt.VizTS <- ttkbutton(frameVizTS, text = "Visualizing time-scales")
bt.VizOpt <- ttkbutton(frameVizTS, text = "Options")
txt.VizTS <- tklabel(frameVizTS, text = "Maximum time-scale", anchor = 'e', justify = 'right')
en.VizTS <- tkentry(frameVizTS, textvariable = EnvSPICalcPlot$spiViz$max.tscale, width = 3)
###############
EnvSPICalcPlot$spiVizOp <- list(presetCol = list(color = 'spi.colors', reverse = FALSE),
userCol = list(custom = FALSE, color = NULL),
userLvl = list(custom = TRUE, levels = c(-2, -1.5, -1, 0, 1, 1.5, 2), equidist = TRUE),
title = list(user = FALSE, title = ''),
colkeyLab = list(user = FALSE, label = ''),
axislabs = list(is.xlab = FALSE, xlab = '', is.ylab = TRUE, ylab = 'Time-scale (months)'))
tkconfigure(bt.VizOpt, command = function(){
EnvSPICalcPlot$spiVizOp <- MapGraph.SpiVizOptions(main.win, EnvSPICalcPlot$spiVizOp)
})
###############
EnvSPICalcPlot$notebookTab.spiViz <- NULL
tkconfigure(bt.VizTS, command = function(){
if(!is.null(EnvSPICalcPlot$varData)){
ret <- try(get.Data.spiViz(), silent = TRUE)
if(inherits(ret, "try-error") | is.null(ret)) return(NULL)
imgContainer <- SPICalc.Display.VizTS(tknotes)
retNBTab <- imageNotebookTab_unik(tknotes, imgContainer, EnvSPICalcPlot$notebookTab.spiViz, AllOpenTabType, AllOpenTabData)
EnvSPICalcPlot$notebookTab.spiViz <- retNBTab$notebookTab
AllOpenTabType <<- retNBTab$AllOpenTabType
AllOpenTabData <<- retNBTab$AllOpenTabData
}
})
###############
tkgrid(bt.VizTS, row = 0, column = 0, sticky = 'we', padx = 3, ipadx = 1, pady = 1)
tkgrid(bt.VizOpt, row = 0, column = 1, sticky = 'we', padx = 3, ipadx = 1, pady = 1)
tkgrid(txt.VizTS, row = 1, column = 0, sticky = 'e', padx = 3, ipadx = 1, pady = 1)
tkgrid(en.VizTS, row = 1, column = 1, sticky = 'w', padx = 3, ipadx = 1, pady = 1)
##############################################
tkgrid(frameDataTS, row = 0, column = 0, sticky = 'we', pady = 1)
tkgrid(frameSTNCrds, row = 1, column = 0, sticky = '', pady = 3)
tkgrid(frameVizTS, row = 2, column = 0, sticky = '', pady = 3)
#######################################################################################################
#Tab4
frTab4 <- tkframe(cmd.tab4)
tkgrid(frTab4, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid.columnconfigure(frTab4, 0, weight = 1)
scrw4 <- bwScrolledWindow(frTab4)
tkgrid(scrw4)
tkgrid.columnconfigure(scrw4, 0, weight = 1)
subfr4 <- bwScrollableFrame(scrw4, width = wscrlwin, height = hscrlwin)
tkgrid.columnconfigure(subfr4, 0, weight = 1)
##############################################
frameSHP <- ttklabelframe(subfr4, text = "Boundaries", relief = 'groove')
EnvSPICalcPlot$shp$add.shp <- tclVar(FALSE)
file.plotShp <- tclVar()
stateSHP <- "disabled"
chk.addshp <- tkcheckbutton(frameSHP, variable = EnvSPICalcPlot$shp$add.shp, text = "Add boundaries to Map", anchor = 'w', justify = 'left')
bt.addshpOpt <- ttkbutton(frameSHP, text = "Options", state = stateSHP)
cb.addshp <- ttkcombobox(frameSHP, values = unlist(listOpenFiles), textvariable = file.plotShp, width = largeur1, state = stateSHP)
bt.addshp <- tkbutton(frameSHP, text = "...", state = stateSHP)
########
tkconfigure(bt.addshp, command = function(){
shp.opfiles <- getOpenShp(main.win, all.opfiles)
if(!is.null(shp.opfiles)){
nopf <- length(AllOpenFilesType)
AllOpenFilesType[[nopf+1]] <<- 'shp'
AllOpenFilesData[[nopf+1]] <<- shp.opfiles
tclvalue(file.plotShp) <- AllOpenFilesData[[nopf+1]][[1]]
listOpenFiles[[length(listOpenFiles)+1]] <<- AllOpenFilesData[[nopf+1]][[1]]
tkconfigure(cb.addshp, values = unlist(listOpenFiles), textvariable = file.plotShp)
shpofile <- getShpOpenData(file.plotShp)
if(is.null(shpofile)) EnvSPICalcPlot$shp$ocrds <- NULL
EnvSPICalcPlot$shp$ocrds <- getBoundaries(shpofile[[2]])
}else return(NULL)
})
########
EnvSPICalcPlot$SHPOp <- list(col = "black", lwd = 1.5)
tkconfigure(bt.addshpOpt, command = function(){
EnvSPICalcPlot$SHPOp <- MapGraph.GraphOptions.LineSHP(main.win, EnvSPICalcPlot$SHPOp)
})
########
tkgrid(chk.addshp, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 6, padx = 1, pady = 1)
tkgrid(bt.addshpOpt, row = 0, column = 6, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 1)
tkgrid(cb.addshp, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 7, padx = 1, pady = 1)
tkgrid(bt.addshp, row = 1, column = 7, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 1)
#################
tkbind(cb.addshp, "<<ComboboxSelected>>", function(){
shpofile <- getShpOpenData(file.plotShp)
if(is.null(shpofile)) EnvSPICalcPlot$shp$ocrds <- NULL
EnvSPICalcPlot$shp$ocrds <- getBoundaries(shpofile[[2]])
})
tkbind(chk.addshp, "<Button-1>", function(){
stateSHP <- if(tclvalue(EnvSPICalcPlot$shp$add.shp) == "1") "disabled" else "normal"
tkconfigure(cb.addshp, state = stateSHP)
tkconfigure(bt.addshp, state = stateSHP)
tkconfigure(bt.addshpOpt, state = stateSHP)
})
##############################################
tkgrid(frameSHP, row = 0, column = 0, sticky = 'we', pady = 1)
#######################################################################################################
widgets.Station.Pixel <- function(){
tkdestroy(frTS2)
frTS2 <<- tkframe(frameSTNCrds)
if(EnvSPICalcPlot$output$params$data.type == "cdtstation"){
stnIDTSPLOT <- EnvSPICalcPlot$output$data$id
txt.stnSel <- tklabel(frTS2, text = "Select a station to plot")
bt.stnID.prev <- ttkbutton(frTS2, text = "<<", width = 6)
bt.stnID.next <- ttkbutton(frTS2, text = ">>", width = 6)
cb.stnID <- ttkcombobox(frTS2, values = stnIDTSPLOT, textvariable = EnvSPICalcPlot$graph$stnIDTSp, width = largeur5)
tclvalue(EnvSPICalcPlot$graph$stnIDTSp) <- stnIDTSPLOT[1]
tkconfigure(bt.stnID.prev, command = function(){
if(!is.null(EnvSPICalcPlot$varData)){
istn <- which(stnIDTSPLOT == str_trim(tclvalue(EnvSPICalcPlot$graph$stnIDTSp)))
istn <- istn-1
if(istn < 1) istn <- length(stnIDTSPLOT)
tclvalue(EnvSPICalcPlot$graph$stnIDTSp) <- stnIDTSPLOT[istn]
imgContainer <- SPICalc.Display.Graph(tknotes)
retNBTab <- imageNotebookTab_unik(tknotes, imgContainer, EnvSPICalcPlot$notebookTab.dataGraph, AllOpenTabType, AllOpenTabData)
EnvSPICalcPlot$notebookTab.dataGraph <- retNBTab$notebookTab
AllOpenTabType <<- retNBTab$AllOpenTabType
AllOpenTabData <<- retNBTab$AllOpenTabData
}
})
tkconfigure(bt.stnID.next, command = function(){
if(!is.null(EnvSPICalcPlot$varData)){
istn <- which(stnIDTSPLOT == str_trim(tclvalue(EnvSPICalcPlot$graph$stnIDTSp)))
istn <- istn+1
if(istn > length(stnIDTSPLOT)) istn <- 1
tclvalue(EnvSPICalcPlot$graph$stnIDTSp) <- stnIDTSPLOT[istn]
imgContainer <- SPICalc.Display.Graph(tknotes)
retNBTab <- imageNotebookTab_unik(tknotes, imgContainer, EnvSPICalcPlot$notebookTab.dataGraph, AllOpenTabType, AllOpenTabData)
EnvSPICalcPlot$notebookTab.dataGraph <- retNBTab$notebookTab
AllOpenTabType <<- retNBTab$AllOpenTabType
AllOpenTabData <<- retNBTab$AllOpenTabData
}
})
tkgrid(txt.stnSel, row = 0, column = 0, sticky = '', rowspan = 1, columnspan = 3, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.stnID.prev, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.stnID, row = 1, column = 1, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.stnID.next, row = 1, column = 2, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
}else{
txt.crdSel <- tklabel(frTS2, text = "Enter longitude and latitude to plot", anchor = 'w', justify = 'left')
txt.lonLoc <- tklabel(frTS2, text = "Longitude", anchor = 'e', justify = 'right')
en.lonLoc <- tkentry(frTS2, textvariable = EnvSPICalcPlot$graph$lonLOC, width = 8)
txt.latLoc <- tklabel(frTS2, text = "Latitude", anchor = 'e', justify = 'right')
en.latLoc <- tkentry(frTS2, textvariable = EnvSPICalcPlot$graph$latLOC, width = 8)
stnIDTSPLOT <- ""
tclvalue(EnvSPICalcPlot$graph$stnIDTSp) <- ""
tkgrid(txt.crdSel, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.lonLoc, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.lonLoc, row = 1, column = 1, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.latLoc, row = 1, column = 2, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.latLoc, row = 1, column = 3, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
}
tkgrid(frTS2, row = 0, column = 0, sticky = 'e', pady = 1)
return(0)
}
#################
set.Data.Scales <- function(){
path.data <- file.path(EnvSPICalcPlot$PathData, "CDTDATASET")
spi.tscales <- list.files(path.data, "SPI_.+")
if(length(spi.tscales) == 0){
InsertMessagesTxt(main.txt.out, 'No SPI data found', format = TRUE)
return(NULL)
}
if(EnvSPICalcPlot$output$params$data.type == "cdtstation")
spi.tscales <- file_path_sans_ext(spi.tscales)
nch <- nchar(spi.tscales)
tsc <- str_pad(substr(spi.tscales, 5, nch-3), 2, pad = "0")
scales <- substr(spi.tscales, nch-2, nch)
spi.tscalesF <- spi.tscales[order(paste0(scales, tsc))]
spi.tscales <- paste0("SPI-", as.numeric(tsc), "-", ifelse(scales == "dek", "Dekad", "Month"))
spi.tscales <- spi.tscales[order(paste0(scales, tsc))]
EnvSPICalcPlot$varData$spi$disp <- spi.tscales
EnvSPICalcPlot$varData$spi$dataF <- spi.tscalesF
tkconfigure(cb.spi.maps, values = spi.tscales)
tclvalue(EnvSPICalcPlot$spi.tscale) <- spi.tscales[1]
return(0)
}
#################
set.Data.Dates <- function(){
path.data <- file.path(EnvSPICalcPlot$PathData, "CDTDATASET")
spi_scale <- str_trim(tclvalue(EnvSPICalcPlot$spi.tscale))
ipos <- which(EnvSPICalcPlot$varData$spi$disp %in% spi_scale)
tscale.data <- EnvSPICalcPlot$varData$spi$dataF[ipos]
file.index <- if(EnvSPICalcPlot$output$params$data.type == "cdtstation")
file.path(path.data, paste0(tscale.data, ".rds"))
else file.path(path.data, tscale.data, paste0(tscale.data, ".rds"))
if(!file.exists(file.index)){
InsertMessagesTxt(main.txt.out, paste(file.index, 'not found'), format = TRUE)
return(NULL)
}
read.cdt.dataIdx <- TRUE
if(!is.null(EnvSPICalcPlot$cdtdataset))
if(!is.null(EnvSPICalcPlot$file.index))
if(EnvSPICalcPlot$file.index == file.index) read.cdt.dataIdx <- FALSE
if(read.cdt.dataIdx){
cdtdataset <- readRDS(file.index)
daty <- if(EnvSPICalcPlot$output$params$data.type == "cdtstation") cdtdataset$date else cdtdataset$dateInfo$date
tkconfigure(cb.spi.Date, values = daty)
tclvalue(EnvSPICalcPlot$spi.date) <- daty[length(daty)]
EnvSPICalcPlot$varData$ts$step <- strsplit(spi_scale, "-")[[1]][3]
EnvSPICalcPlot$varData$ts$dates <- daty
EnvSPICalcPlot$cdtdataset <- cdtdataset
EnvSPICalcPlot$cdtdataset$fileInfo <- file.index
EnvSPICalcPlot$file.index <- file.index
}
return(0)
}
#################
get.Data.Map <- function(){
tkconfigure(main.win, cursor = 'watch')
tcl('update')
on.exit({
tkconfigure(main.win, cursor = '')
tcl('update')
})
this.daty <- str_trim(tclvalue(EnvSPICalcPlot$spi.date))
readVarData <- TRUE
if(!is.null(EnvSPICalcPlot$varData))
if(!is.null(EnvSPICalcPlot$varData$spi$this.daty))
if(EnvSPICalcPlot$varData$spi$this.daty == this.daty) readVarData <- FALSE
if(readVarData){
if(EnvSPICalcPlot$output$params$data.type == "cdtstation"){
idt <- which(EnvSPICalcPlot$cdtdataset$date == this.daty)
x <- EnvSPICalcPlot$output$data$lon
y <- EnvSPICalcPlot$output$data$lat
tmp <- as.numeric(EnvSPICalcPlot$cdtdataset$spi[idt, ])
nx <- nx_ny_as.image(diff(range(x)))
ny <- nx_ny_as.image(diff(range(y)))
tmp <- cdt.as.image(tmp, nx = nx, ny = ny, pts.xy = cbind(x, y))
EnvSPICalcPlot$varData$map$x <- tmp$x
EnvSPICalcPlot$varData$map$y <- tmp$y
EnvSPICalcPlot$varData$map$z <- tmp$z
}else{
ipos <- which(EnvSPICalcPlot$varData$spi$disp %in% str_trim(tclvalue(EnvSPICalcPlot$spi.tscale)))
tscale.data <- EnvSPICalcPlot$varData$spi$dataF[ipos]
nc.file <- file.path(EnvSPICalcPlot$PathData, "DATA_NetCDF", tscale.data, paste0("spi_", this.daty, ".nc"))
nc <- nc_open(nc.file)
EnvSPICalcPlot$varData$map$x <- nc$dim[[1]]$vals
EnvSPICalcPlot$varData$map$y <- nc$dim[[2]]$vals
EnvSPICalcPlot$varData$map$z <- ncvar_get(nc, varid = nc$var[[1]]$name)
nc_close(nc)
}
EnvSPICalcPlot$varData$spi$this.daty <- this.daty
}
}
#################
get.Data.spiViz <- function(){
file.mon <- file.path(EnvSPICalcPlot$PathData, "MONTHLY_data")
file.dek <- file.path(EnvSPICalcPlot$PathData, "DEKADAL_data")
if(file.exists(file.mon)){
file.index <- file.mon
viztstep <- "monthly"
}else{
if(file.exists(file.dek)){
file.index <- file.dek
viztstep <- "dekadal"
}else{
InsertMessagesTxt(main.txt.out, 'No dekadal or monthly data found', format = TRUE)
return(NULL)
}
}
readspiVizData <- TRUE
if(!is.null(EnvSPICalcPlot$spiViz))
if(!is.null(EnvSPICalcPlot$spiViz$tstep))
if(EnvSPICalcPlot$spiViz$tstep == viztstep) readspiVizData <- FALSE
if(readspiVizData){
file.index <- file.path(file.index, paste0(basename(file.index), ".rds"))
EnvSPICalcPlot$spiViz$cdtdataset <- readRDS(file.index)
EnvSPICalcPlot$spiViz$cdtdataset$fileInfo <- file.index
EnvSPICalcPlot$spiViz$tstep <- viztstep
}
return(0)
}
#######################################################################################################
tcl('update')
tkgrid(cmd.frame, sticky = '', pady = 1)
tkgrid.columnconfigure(cmd.frame, 0, weight = 1)
######
return(cmd.frame)
}
| /functions/cdtCompute_SPI_leftCmd_functions.R | no_license | rijaf/CDT | R | false | false | 46,899 | r |
SPICalcPanelCmd <- function(){
listOpenFiles <- openFile_ttkcomboList()
if(Sys.info()["sysname"] == "Windows"){
wscrlwin <- w.scale(26)
hscrlwin <- h.scale(46)
largeur0 <- as.integer(w.scale(22)/sfont0)
largeur1 <- as.integer(w.scale(27)/sfont0)
largeur2 <- as.integer(w.scale(29)/sfont0)
largeur3 <- 20
largeur4 <- 26
largeur5 <- 20
}else{
wscrlwin <- w.scale(27)
hscrlwin <- h.scale(48.5)
largeur0 <- as.integer(w.scale(16)/sfont0)
largeur1 <- as.integer(w.scale(21)/sfont0)
largeur2 <- as.integer(w.scale(22)/sfont0)
largeur3 <- 15
largeur4 <- 20
largeur5 <- 14
}
GeneralParameters <- list(intstep = "dekadal", data.type = "cdtstation",
cdtstation = "", cdtdataset = "",
outfreq = "month", tscale = 3, distr = 'Gamma',
monitoring = FALSE,
dates = list(year1 = 2018, mon1 = 1, dek1 = 1, year2 = 2018, mon2 = 2, dek2 = 3),
outdir = "")
###################
cmd.frame <- tkframe(panel.left)
tknote.cmd <- bwNoteBook(cmd.frame)
tkgrid(tknote.cmd, sticky = 'nwes')
tkgrid.columnconfigure(tknote.cmd, 0, weight = 1)
cmd.tab1 <- bwAddTab(tknote.cmd, text = "SPI")
cmd.tab2 <- bwAddTab(tknote.cmd, text = "Maps")
cmd.tab3 <- bwAddTab(tknote.cmd, text = "Graphs")
cmd.tab4 <- bwAddTab(tknote.cmd, text = "Boundaries")
bwRaiseTab(tknote.cmd, cmd.tab1)
tkgrid.columnconfigure(cmd.tab1, 0, weight = 1)
tkgrid.columnconfigure(cmd.tab2, 0, weight = 1)
tkgrid.columnconfigure(cmd.tab3, 0, weight = 1)
tkgrid.columnconfigure(cmd.tab4, 0, weight = 1)
#######################################################################################################
#Tab1
frTab1 <- tkframe(cmd.tab1)
tkgrid(frTab1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid.columnconfigure(frTab1, 0, weight = 1)
scrw1 <- bwScrolledWindow(frTab1)
tkgrid(scrw1)
tkgrid.columnconfigure(scrw1, 0, weight = 1)
subfr1 <- bwScrollableFrame(scrw1, width = wscrlwin, height = hscrlwin)
tkgrid.columnconfigure(subfr1, 0, weight = 1)
#######################
frameTimeS <- ttklabelframe(subfr1, text = "Time step of input data", relief = 'groove')
timeSteps <- tclVar()
CbperiodVAL <- c('Daily data', 'Pentad data', 'Dekadal data', 'Monthly data')
tclvalue(timeSteps) <- switch(GeneralParameters$intstep,
'daily' = CbperiodVAL[1],
'pentad' = CbperiodVAL[2],
'dekadal' = CbperiodVAL[3],
'monthly' = CbperiodVAL[4])
cb.fperiod <- ttkcombobox(frameTimeS, values = CbperiodVAL, textvariable = timeSteps, width = largeur1)
tkgrid(cb.fperiod, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
infobulle(cb.fperiod, 'Select the time step of the data')
status.bar.display(cb.fperiod, TextOutputVar, 'Select the time step of the data')
############
tkbind(cb.fperiod, "<<ComboboxSelected>>", function(){
valSPIfreq <- if(str_trim(tclvalue(timeSteps)) == 'Monthly data') "month" else c("dekad", "month")
tkconfigure(cb.SPIfreq, values = valSPIfreq)
if(str_trim(tclvalue(timeSteps)) == 'Monthly data'){
tclvalue(out.spifreq) <- "month"
tclvalue(txt.suffix.var) <- '-month'
}
stateTscale <- if(str_trim(tclvalue(out.spifreq)) == 'month') "normal" else "disabled"
tkconfigure(spin.Tscale, state = stateTscale)
})
#######################
frameInData <- ttklabelframe(subfr1, text = "Input Data", relief = 'groove')
DataType <- tclVar()
CbdatatypeVAL <- c('CDT stations data format', 'CDT dataset format (gridded)')
tclvalue(DataType) <- switch(GeneralParameters$data.type,
'cdtstation' = CbdatatypeVAL[1],
'cdtdataset' = CbdatatypeVAL[2])
if(GeneralParameters$data.type == 'cdtstation'){
input.file <- tclVar(GeneralParameters$cdtstation)
txt.INData <- 'File containing stations Precip data'
}else{
input.file <- tclVar(GeneralParameters$cdtdataset)
txt.INData <- 'Index file (*.rds) for Precip dataset'
}
txt.INData.var <- tclVar(txt.INData)
txt.datatype <- tklabel(frameInData, text = "Format", anchor = 'w', justify = 'left')
cb.datatype <- ttkcombobox(frameInData, values = CbdatatypeVAL, textvariable = DataType, width = largeur0)
txt.infile <- tklabel(frameInData, text = tclvalue(txt.INData.var), textvariable = txt.INData.var, anchor = 'w', justify = 'left')
if(GeneralParameters$data.type == 'cdtstation'){
cb.en.infile <- ttkcombobox(frameInData, values = unlist(listOpenFiles), textvariable = input.file, width = largeur1)
}else{
cb.en.infile <- tkentry(frameInData, textvariable = input.file, width = largeur2)
}
bt.infile <- tkbutton(frameInData, text = "...")
############
tkconfigure(bt.infile, command = function(){
if(GeneralParameters$data.type == 'cdtstation'){
dat.opfiles <- getOpenFiles(main.win, all.opfiles)
if(!is.null(dat.opfiles)){
nopf <- length(AllOpenFilesType)
AllOpenFilesType[[nopf+1]] <<- 'ascii'
AllOpenFilesData[[nopf+1]] <<- dat.opfiles
listOpenFiles[[length(listOpenFiles)+1]] <<- AllOpenFilesData[[nopf+1]][[1]]
tclvalue(input.file) <- AllOpenFilesData[[nopf+1]][[1]]
tkconfigure(cb.en.infile, values = unlist(listOpenFiles), textvariable = input.file)
}else return(NULL)
}else{
filetypes <- "{{R Objects} {.rds .RDS .RData}} {{All files} *}"
path.rds <- tclvalue(tkgetOpenFile(initialdir = getwd(), initialfile = "", filetypes = filetypes))
tclvalue(input.file) <- if(path.rds%in%c("", "NA") | is.na(path.rds)) "" else path.rds
}
})
############
tkgrid(txt.datatype, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.datatype, row = 0, column = 2, sticky = 'we', rowspan = 1, columnspan = 8, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.infile, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 8, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.en.infile, row = 2, column = 0, sticky = 'we', rowspan = 1, columnspan = 9, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.infile, row = 2, column = 9, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
############
infobulle(cb.datatype, 'Select the format of the input data')
status.bar.display(cb.datatype, TextOutputVar, 'Select the format of the input data')
if(GeneralParameters$data.type == 'cdtstation'){
infobulle(cb.en.infile, 'Select the file containing the input data')
status.bar.display(cb.en.infile, TextOutputVar, 'Select the file containing the input data')
infobulle(bt.infile, 'Browse file if not listed')
status.bar.display(bt.infile, TextOutputVar, 'Browse file if not listed')
}else{
infobulle(cb.en.infile, 'Enter the full path to the file <dataset name>.rds')
status.bar.display(cb.en.infile, TextOutputVar, 'Enter the full path to the file <dataset name>.rds')
infobulle(bt.infile, 'or browse here')
status.bar.display(bt.infile, TextOutputVar, 'or browse here')
}
############
tkbind(cb.datatype, "<<ComboboxSelected>>", function(){
tkdestroy(cb.en.infile)
tclvalue(input.file) <- ''
###
if(str_trim(tclvalue(DataType)) == 'CDT stations data format'){
tclvalue(txt.INData.var) <- 'File containing stations Precip data'
cb.en.infile <- ttkcombobox(frameInData, values = unlist(listOpenFiles), textvariable = input.file, width = largeur1)
tkconfigure(bt.infile, command = function(){
dat.opfiles <- getOpenFiles(main.win, all.opfiles)
if(!is.null(dat.opfiles)){
nopf <- length(AllOpenFilesType)
AllOpenFilesType[[nopf+1]] <<- 'ascii'
AllOpenFilesData[[nopf+1]] <<- dat.opfiles
listOpenFiles[[length(listOpenFiles)+1]] <<- AllOpenFilesData[[nopf+1]][[1]]
tclvalue(input.file) <- AllOpenFilesData[[nopf+1]][[1]]
tkconfigure(cb.en.infile, values = unlist(listOpenFiles), textvariable = input.file)
}else return(NULL)
})
infobulle(cb.en.infile, 'Select the file containing the input data')
status.bar.display(cb.en.infile, TextOutputVar, 'Select the file containing the input data')
infobulle(bt.infile, 'Browse file if not listed')
status.bar.display(bt.infile, TextOutputVar, 'Browse file if not listed')
}
###
if(str_trim(tclvalue(DataType)) == 'CDT dataset format (gridded)'){
tclvalue(txt.INData.var) <- 'Index file (*.rds) for Precip dataset'
cb.en.infile <- tkentry(frameInData, textvariable = input.file, width = largeur2)
tkconfigure(bt.infile, command = function(){
filetypes <- "{{R Objects} {.rds .RDS .RData}} {{All files} *}"
path.rds <- tclvalue(tkgetOpenFile(initialdir = getwd(), initialfile = "", filetypes = filetypes))
tclvalue(input.file) <- if(path.rds%in%c("", "NA") | is.na(path.rds)) "" else path.rds
})
infobulle(cb.en.infile, 'Enter the full path to the file <dataset name>.rds')
status.bar.display(cb.en.infile, TextOutputVar, 'Enter the full path to the file <dataset name>.rds')
infobulle(bt.infile, 'or browse here')
status.bar.display(bt.infile, TextOutputVar, 'or browse here')
}
tkgrid(cb.en.infile, row = 2, column = 0, sticky = 'we', rowspan = 1, columnspan = 9, padx = 0, pady = 1, ipadx = 1, ipady = 1)
})
#############################
frameMoni <- tkframe(subfr1, relief = 'groove', borderwidth = 2)
monitoring <- tclVar(GeneralParameters$monitoring)
istart.yrs <- tclVar(GeneralParameters$dates$year1)
istart.mon <- tclVar(GeneralParameters$dates$mon1)
istart.dek <- tclVar(GeneralParameters$dates$dek1)
iend.yrs <- tclVar(GeneralParameters$dates$year2)
iend.mon <- tclVar(GeneralParameters$dates$mon2)
iend.dek <- tclVar(GeneralParameters$dates$dek2)
if(GeneralParameters$monitoring){
statedates <- 'normal'
statedatedek <- if(GeneralParameters$outfreq == 'month') 'disabled' else 'normal'
}else{
statedates <- 'disabled'
statedatedek <- 'disabled'
}
chk.Moni <- tkcheckbutton(frameMoni, variable = monitoring, text = "Monitoring: update SPI dataset", anchor = 'w', justify = 'left')
fr.Moni <- tkframe(frameMoni)
txt.deb.Moni <- tklabel(fr.Moni, text = 'Start date', anchor = 'e', justify = 'right')
txt.fin.Moni <- tklabel(fr.Moni, text = 'End date', anchor = 'e', justify = 'right')
txt.yrs.Moni <- tklabel(fr.Moni, text = 'Year')
txt.mon.Moni <- tklabel(fr.Moni, text = 'Month')
txt.dek.Moni <- tklabel(fr.Moni, text = 'Dekad')
en.yrs1.Moni <- tkentry(fr.Moni, width = 4, textvariable = istart.yrs, justify = "right", state = statedates)
en.mon1.Moni <- tkentry(fr.Moni, width = 4, textvariable = istart.mon, justify = "right", state = statedates)
en.dek1.Moni <- tkentry(fr.Moni, width = 4, textvariable = istart.dek, justify = "right", state = statedatedek)
en.yrs2.Moni <- tkentry(fr.Moni, width = 4, textvariable = iend.yrs, justify = "right", state = statedates)
en.mon2.Moni <- tkentry(fr.Moni, width = 4, textvariable = iend.mon, justify = "right", state = statedates)
en.dek2.Moni <- tkentry(fr.Moni, width = 4, textvariable = iend.dek, justify = "right", state = statedatedek)
tkgrid(txt.deb.Moni, row = 1, column = 0, sticky = 'ew', rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
tkgrid(txt.fin.Moni, row = 2, column = 0, sticky = 'ew', rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
tkgrid(txt.yrs.Moni, row = 0, column = 1, rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
tkgrid(txt.mon.Moni, row = 0, column = 2, rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
tkgrid(txt.dek.Moni, row = 0, column = 3, rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
tkgrid(en.yrs1.Moni, row = 1, column = 1, rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
tkgrid(en.mon1.Moni, row = 1, column = 2, rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
tkgrid(en.dek1.Moni, row = 1, column = 3, rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
tkgrid(en.yrs2.Moni, row = 2, column = 1, rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
tkgrid(en.mon2.Moni, row = 2, column = 2, rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
tkgrid(en.dek2.Moni, row = 2, column = 3, rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
tkgrid(chk.Moni, row = 0, column = 0, rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
tkgrid(fr.Moni, row = 1, column = 0, rowspan = 1, columnspan = 1, padx = 1, ipadx = 1)
###############
tkbind(chk.Moni, "<Button-1>", function(){
if(tclvalue(monitoring) == "0"){
statedates <- 'normal'
statedatedek <- if(str_trim(tclvalue(out.spifreq)) == 'month') 'disabled' else 'normal'
stateDistr <- 'disabled'
tclvalue(txt.save.var) <- "Index file (SPI.rds) for SPI data"
tkconfigure(bt.outSPI, command = function(){
filetypes <- "{{R Objects} {.rds .RDS .RData}} {{All files} *}"
path.rds <- tclvalue(tkgetOpenFile(initialdir = getwd(), initialfile = "", filetypes = filetypes))
tclvalue(outSPIdir) <- if(path.rds%in%c("", "NA") | is.na(path.rds)) "" else path.rds
})
}else{
statedates <- 'disabled'
statedatedek <- 'disabled'
stateDistr <- 'normal'
tclvalue(txt.save.var) <- "Directory to save the outputs"
tkconfigure(bt.outSPI, command = function(){
dirSPI <- tk_choose.dir(getwd(), "")
tclvalue(outSPIdir) <- if(dirSPI%in%c("", "NA") | is.na(dirSPI)) "" else dirSPI
})
}
tkconfigure(en.yrs1.Moni, state = statedates)
tkconfigure(en.mon1.Moni, state = statedates)
tkconfigure(en.dek1.Moni, state = statedatedek)
tkconfigure(en.yrs2.Moni, state = statedates)
tkconfigure(en.mon2.Moni, state = statedates)
tkconfigure(en.dek2.Moni, state = statedatedek)
tkconfigure(cb.Distrb, state = stateDistr)
})
#############################
frameParams <- tkframe(subfr1, relief = 'groove', borderwidth = 2)
out.spifreq <- tclVar(GeneralParameters$outfreq)
if(GeneralParameters$outfreq == 'dekad'){
txt.suffix <- '-dekad'
stateTscale <- "disabled"
up.tscale <- 1
val.tscale <- 1
}else{
txt.suffix <- '-month'
stateTscale <- "normal"
up.tscale <- 60
val.tscale <- GeneralParameters$tscale
}
txt.suffix.var <- tclVar(txt.suffix)
frameTscale <- tkframe(frameParams)
txt.SPIfreq <- tklabel(frameTscale, text = "SPI", anchor = 'e', justify = 'right')
cb.SPIfreq <- ttkcombobox(frameTscale, values = c("dekad", "month"), textvariable = out.spifreq, width = 8)
txt.Tscale1 <- tklabel(frameTscale, text = "Timescale", anchor = 'e', justify = 'right')
spin.Tscale <- ttkspinbox(frameTscale, from = 1, to = up.tscale, increment = 1, justify = 'center', width = 2, state = stateTscale)
tkset(spin.Tscale, val.tscale)
txt.Tscale2 <- tklabel(frameTscale, text = tclvalue(txt.suffix.var), textvariable = txt.suffix.var, anchor = 'w', justify = 'left')
tkgrid(txt.SPIfreq, cb.SPIfreq, txt.Tscale1, spin.Tscale, txt.Tscale2)
########
tkbind(cb.SPIfreq, "<<ComboboxSelected>>", function(){
if(str_trim(tclvalue(out.spifreq)) == 'dekad'){
stateTscale <- "disabled"
tclvalue(txt.suffix.var) <- '-dekad'
tkset(spin.Tscale, 1)
statedatedek <- if(tclvalue(monitoring) == "1") "normal" else "disabled"
}
if(str_trim(tclvalue(out.spifreq)) == 'month'){
stateTscale <- "normal"
tclvalue(txt.suffix.var) <- '-month'
tkconfigure(spin.Tscale, to = 60)
statedatedek <- "disabled"
}
tkconfigure(spin.Tscale, state = stateTscale)
tkconfigure(en.dek1.Moni, state = statedatedek)
tkconfigure(en.dek2.Moni, state = statedatedek)
})
########
frameDistrb <- tkframe(frameParams)
DistrbVAL <- c("Gamma", "Pearson Type III", "log-Logistic", "Z-Score")
DistrbFun <- tclVar(GeneralParameters$distr)
stateDistr <- if(GeneralParameters$monitoring) 'disabled' else 'normal'
txt.Distrb <- tklabel(frameDistrb, text = "Distribution function", anchor = 'e', justify = 'right')
cb.Distrb <- ttkcombobox(frameDistrb, values = DistrbVAL, textvariable = DistrbFun, width = largeur3, state = stateDistr)
tkgrid(txt.Distrb, cb.Distrb)
########
tkgrid(frameTscale, row = 0, column = 0, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frameDistrb, row = 1, column = 0, padx = 1, pady = 1, ipadx = 1, ipady = 1)
#############################
frameDirSav <- tkframe(subfr1, relief = 'groove', borderwidth = 2)
outSPIdir <- tclVar(GeneralParameters$outdir)
if(GeneralParameters$monitoring){
text.save <- "Index file (SPI.rds) for SPI data"
}else{
text.save <- "Directory to save the outputs"
}
txt.save.var <- tclVar(text.save)
txt.outSPI <- tklabel(frameDirSav, text = tclvalue(txt.save.var), textvariable = txt.save.var, anchor = 'w', justify = 'left')
en.outSPI <- tkentry(frameDirSav, textvariable = outSPIdir, width = largeur2)
bt.outSPI <- tkbutton(frameDirSav, text = "...")
######
tkconfigure(bt.outSPI, command = function(){
if(GeneralParameters$monitoring){
filetypes <- "{{R Objects} {.rds .RDS .RData}} {{All files} *}"
path.rds <- tclvalue(tkgetOpenFile(initialdir = getwd(), initialfile = "", filetypes = filetypes))
tclvalue(outSPIdir) <- if(path.rds%in%c("", "NA") | is.na(path.rds)) "" else path.rds
}else{
dirSPI <- tk_choose.dir(getwd(), "")
tclvalue(outSPIdir) <- if(dirSPI%in%c("", "NA") | is.na(dirSPI)) "" else dirSPI
}
})
######
tkgrid(txt.outSPI, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 5, padx = 1, pady = 0, ipadx = 1, ipady = 1)
tkgrid(en.outSPI, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 0, pady = 0, ipadx = 1, ipady = 1)
tkgrid(bt.outSPI, row = 1, column = 4, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 0, ipadx = 1, ipady = 1)
infobulle(en.outSPI, 'Enter the full path to directory to save outputs')
status.bar.display(en.outSPI, TextOutputVar, 'Enter the full path to directory to save outputs')
infobulle(bt.outSPI, 'or browse here')
status.bar.display(bt.outSPI, TextOutputVar, 'or browse here')
#############################
if(!is.null(EnvSPICalcPlot$DirExist)){
stateCaclBut <- if(tclvalue(EnvSPICalcPlot$DirExist) == "0") "normal" else "disabled"
}else stateCaclBut <- "normal"
calculateBut <- ttkbutton(subfr1, text = "Calculate", state = stateCaclBut)
#################
tkconfigure(calculateBut, command = function(){
GeneralParameters$intstep <- switch(str_trim(tclvalue(timeSteps)),
'Daily data' = 'daily',
'Pentad data' = 'pentad',
'Dekadal data' = 'dekadal',
'Monthly data' = 'monthly')
GeneralParameters$data.type <- switch(str_trim(tclvalue(DataType)),
'CDT stations data format' = 'cdtstation',
'CDT dataset format (gridded)' = 'cdtdataset')
if(str_trim(tclvalue(DataType)) == 'CDT stations data format')
GeneralParameters$cdtstation <- str_trim(tclvalue(input.file))
if(str_trim(tclvalue(DataType)) == 'CDT dataset format (gridded)')
GeneralParameters$cdtdataset <- str_trim(tclvalue(input.file))
GeneralParameters$monitoring <- switch(tclvalue(monitoring), '0' = FALSE, '1' = TRUE)
GeneralParameters$dates$year1 <- as.numeric(str_trim(tclvalue(istart.yrs)))
GeneralParameters$dates$mon1 <- as.numeric(str_trim(tclvalue(istart.mon)))
GeneralParameters$dates$dek1 <- as.numeric(str_trim(tclvalue(istart.dek)))
GeneralParameters$dates$year2 <- as.numeric(str_trim(tclvalue(iend.yrs)))
GeneralParameters$dates$mon2 <- as.numeric(str_trim(tclvalue(iend.mon)))
GeneralParameters$dates$dek2 <- as.numeric(str_trim(tclvalue(iend.dek)))
GeneralParameters$outfreq <- str_trim(tclvalue(out.spifreq))
GeneralParameters$tscale <- as.numeric(str_trim(tclvalue(tkget(spin.Tscale))))
GeneralParameters$distr <- str_trim(tclvalue(DistrbFun))
GeneralParameters$outdir <- str_trim(tclvalue(outSPIdir))
# assign('GeneralParameters', GeneralParameters, envir = .GlobalEnv)
tkconfigure(main.win, cursor = 'watch')
InsertMessagesTxt(main.txt.out, "Calculate SPI ......")
ret <- tryCatch(
computeSPIProcs(GeneralParameters),
#warning = function(w) warningFun(w),
error = function(e) errorFun(e),
finally = tkconfigure(main.win, cursor = '')
)
msg0 <- "SPI calculation finished successfully"
msg1 <- "SPI calculation failed"
if(!is.null(ret)){
if(ret == 0){
InsertMessagesTxt(main.txt.out, msg0)
###################
widgets.Station.Pixel()
ret <- try(set.Data.Scales(), silent = TRUE)
if(inherits(ret, "try-error") | is.null(ret)) return(NULL)
ret <- try(set.Data.Dates(), silent = TRUE)
if(inherits(ret, "try-error") | is.null(ret)) return(NULL)
}else InsertMessagesTxt(main.txt.out, msg1, format = TRUE)
}else InsertMessagesTxt(main.txt.out, msg1, format = TRUE)
})
############################################
tkgrid(frameTimeS, row = 0, column = 0, sticky = '', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frameInData, row = 1, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frameMoni, row = 2, column = 0, sticky = '', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frameParams, row = 3, column = 0, sticky = '', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frameDirSav, row = 4, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(calculateBut, row = 5, column = 0, sticky = '', padx = 1, pady = 3, ipadx = 1, ipady = 1)
#######################################################################################################
#Tab2
frTab2 <- tkframe(cmd.tab2)
tkgrid(frTab2, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid.columnconfigure(frTab2, 0, weight = 1)
scrw2 <- bwScrolledWindow(frTab2)
tkgrid(scrw2)
tkgrid.columnconfigure(scrw2, 0, weight = 1)
subfr2 <- bwScrollableFrame(scrw2, width = wscrlwin, height = hscrlwin)
tkgrid.columnconfigure(subfr2, 0, weight = 1)
##############################################
frameDataExist <- ttklabelframe(subfr2, text = "SPI data", relief = 'groove')
EnvSPICalcPlot$DirExist <- tclVar(0)
file.dataIndex <- tclVar()
stateExistData <- if(tclvalue(EnvSPICalcPlot$DirExist) == "1") "normal" else "disabled"
chk.dataIdx <- tkcheckbutton(frameDataExist, variable = EnvSPICalcPlot$DirExist, text = "SPI data already computed", anchor = 'w', justify = 'left')
en.dataIdx <- tkentry(frameDataExist, textvariable = file.dataIndex, width = largeur2, state = stateExistData)
bt.dataIdx <- tkbutton(frameDataExist, text = "...", state = stateExistData)
tkconfigure(bt.dataIdx, command = function(){
filetypes <- "{{R Objects} {.rds .RDS .RData}} {{All files} *}"
path.Stat <- tclvalue(tkgetOpenFile(initialdir = getwd(), initialfile = "", filetypes = filetypes))
if(path.Stat%in%c("", "NA") | is.na(path.Stat)) return(NULL)
tclvalue(file.dataIndex) <- path.Stat
if(file.exists(str_trim(tclvalue(file.dataIndex)))){
OutSPIdata <- try(readRDS(str_trim(tclvalue(file.dataIndex))), silent = TRUE)
if(inherits(OutSPIdata, "try-error")){
InsertMessagesTxt(main.txt.out, 'Unable to load SPI data', format = TRUE)
InsertMessagesTxt(main.txt.out, gsub('[\r\n]', '', OutSPIdata[1]), format = TRUE)
tkconfigure(cb.spi.maps, values = "")
tclvalue(EnvSPICalcPlot$spi.tscale) <- ""
tkconfigure(cb.spi.Date, values = "")
tclvalue(EnvSPICalcPlot$spi.date) <- ""
return(NULL)
}
EnvSPICalcPlot$output <- OutSPIdata
EnvSPICalcPlot$PathData <- dirname(str_trim(tclvalue(file.dataIndex)))
###################
widgets.Station.Pixel()
ret <- try(set.Data.Scales(), silent = TRUE)
if(inherits(ret, "try-error") | is.null(ret)) return(NULL)
ret <- try(set.Data.Dates(), silent = TRUE)
if(inherits(ret, "try-error") | is.null(ret)) return(NULL)
}
})
tkgrid(chk.dataIdx, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 5, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.dataIdx, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.dataIdx, row = 1, column = 4, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
###############
tkbind(chk.dataIdx, "<Button-1>", function(){
stateExistData <- if(tclvalue(EnvSPICalcPlot$DirExist) == '1') 'disabled' else 'normal'
tkconfigure(en.dataIdx, state = stateExistData)
tkconfigure(bt.dataIdx, state = stateExistData)
stateCaclBut <- if(tclvalue(EnvSPICalcPlot$DirExist) == '1') 'normal' else 'disabled'
tkconfigure(calculateBut, state = stateCaclBut)
})
##############################################
frameSPIMap <- ttklabelframe(subfr2, text = "SPI Map", relief = 'groove')
EnvSPICalcPlot$spi.tscale <- tclVar()
EnvSPICalcPlot$spi.date <- tclVar()
cb.spi.maps <- ttkcombobox(frameSPIMap, values = "", textvariable = EnvSPICalcPlot$spi.tscale, width = largeur4)
bt.spi.maps <- ttkbutton(frameSPIMap, text = "PLOT", width = 7)
cb.spi.Date <- ttkcombobox(frameSPIMap, values = "", textvariable = EnvSPICalcPlot$spi.date, width = largeur5)
bt.spi.Date.prev <- ttkbutton(frameSPIMap, text = "<<", width = 3)
bt.spi.Date.next <- ttkbutton(frameSPIMap, text = ">>", width = 3)
bt.spi.MapOpt <- ttkbutton(frameSPIMap, text = "Options", width = 7)
###############
EnvSPICalcPlot$dataMapOp <- list(presetCol = list(color = 'tim.colors', reverse = TRUE),
userCol = list(custom = FALSE, color = NULL),
userLvl = list(custom = TRUE, levels = c(-2, -1.5, -1, 0, 1, 1.5, 2), equidist = TRUE),
title = list(user = FALSE, title = ''),
colkeyLab = list(user = FALSE, label = ''),
scalebar = list(add = FALSE, pos = 'bottomleft'))
tkconfigure(bt.spi.MapOpt, command = function(){
if(!is.null(EnvSPICalcPlot$varData$map)){
atlevel <- pretty(EnvSPICalcPlot$varData$map$z, n = 10, min.n = 7)
if(is.null(EnvSPICalcPlot$dataMapOp$userLvl$levels)){
EnvSPICalcPlot$dataMapOp$userLvl$levels <- atlevel
}else{
if(!EnvSPICalcPlot$dataMapOp$userLvl$custom)
EnvSPICalcPlot$dataMapOp$userLvl$levels <- atlevel
}
}
EnvSPICalcPlot$dataMapOp <- MapGraph.MapOptions(main.win, EnvSPICalcPlot$dataMapOp)
})
###############
EnvSPICalcPlot$notebookTab.dataMap <- NULL
tkconfigure(bt.spi.maps, command = function(){
if(str_trim(tclvalue(EnvSPICalcPlot$spi.date)) != "" &
!is.null(EnvSPICalcPlot$varData))
{
get.Data.Map()
imgContainer <- SPICalc.Display.Maps(tknotes)
retNBTab <- imageNotebookTab_unik(tknotes, imgContainer, EnvSPICalcPlot$notebookTab.dataMap, AllOpenTabType, AllOpenTabData)
EnvSPICalcPlot$notebookTab.dataMap <- retNBTab$notebookTab
AllOpenTabType <<- retNBTab$AllOpenTabType
AllOpenTabData <<- retNBTab$AllOpenTabData
}
})
tkconfigure(bt.spi.Date.prev, command = function(){
if(str_trim(tclvalue(EnvSPICalcPlot$spi.date)) != ""){
donDates <- EnvSPICalcPlot$varData$ts$dates
idaty <- which(donDates == str_trim(tclvalue(EnvSPICalcPlot$spi.date)))
idaty <- idaty-1
if(idaty < 1) idaty <- length(donDates)
tclvalue(EnvSPICalcPlot$spi.date) <- donDates[idaty]
get.Data.Map()
imgContainer <- SPICalc.Display.Maps(tknotes)
retNBTab <- imageNotebookTab_unik(tknotes, imgContainer, EnvSPICalcPlot$notebookTab.dataMap, AllOpenTabType, AllOpenTabData)
EnvSPICalcPlot$notebookTab.dataMap <- retNBTab$notebookTab
AllOpenTabType <<- retNBTab$AllOpenTabType
AllOpenTabData <<- retNBTab$AllOpenTabData
}
})
tkconfigure(bt.spi.Date.next, command = function(){
if(str_trim(tclvalue(EnvSPICalcPlot$spi.date)) != ""){
donDates <- EnvSPICalcPlot$varData$ts$dates
idaty <- which(donDates == str_trim(tclvalue(EnvSPICalcPlot$spi.date)))
idaty <- idaty+1
if(idaty > length(donDates)) idaty <- 1
tclvalue(EnvSPICalcPlot$spi.date) <- donDates[idaty]
get.Data.Map()
imgContainer <- SPICalc.Display.Maps(tknotes)
retNBTab <- imageNotebookTab_unik(tknotes, imgContainer, EnvSPICalcPlot$notebookTab.dataMap, AllOpenTabType, AllOpenTabData)
EnvSPICalcPlot$notebookTab.dataMap <- retNBTab$notebookTab
AllOpenTabType <<- retNBTab$AllOpenTabType
AllOpenTabData <<- retNBTab$AllOpenTabData
}
})
###############
tkgrid(cb.spi.maps, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.spi.maps, row = 0, column = 4, sticky = '', rowspan = 1, columnspan = 2, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.spi.Date.prev, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.spi.Date, row = 1, column = 1, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.spi.Date.next, row = 1, column = 3, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.spi.MapOpt, row = 1, column = 4, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 1, ipadx = 1, ipady = 1)
###############
tkbind(cb.spi.maps, "<<ComboboxSelected>>", function(){
ret <- try(set.Data.Dates(), silent = TRUE)
if(inherits(ret, "try-error") | is.null(ret)) return(NULL)
})
##############################################
tkgrid(frameDataExist, row = 0, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frameSPIMap, row = 1, column = 0, sticky = 'we', padx = 1, pady = 3, ipadx = 1, ipady = 1)
#######################################################################################################
#Tab3
frTab3 <- tkframe(cmd.tab3)
tkgrid(frTab3, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid.columnconfigure(frTab3, 0, weight = 1)
scrw3 <- bwScrolledWindow(frTab3)
tkgrid(scrw3)
tkgrid.columnconfigure(scrw3, 0, weight = 1)
subfr3 <- bwScrollableFrame(scrw3, width = wscrlwin, height = hscrlwin)
tkgrid.columnconfigure(subfr3, 0, weight = 1)
##############################################
frameDataTS <- ttklabelframe(subfr3, text = "SPI Graph", relief = 'groove')
typeTSPLOT <- c("Bar-Line", "Polygon")
EnvSPICalcPlot$graph$typeTSp <- tclVar("Bar-Line")
cb.typeTSp <- ttkcombobox(frameDataTS, values = typeTSPLOT, textvariable = EnvSPICalcPlot$graph$typeTSp, width = largeur5)
bt.TsGraph.plot <- ttkbutton(frameDataTS, text = "PLOT", width = 7)
bt.TSGraphOpt <- ttkbutton(frameDataTS, text = "Options", width = 8)
#################
EnvSPICalcPlot$TSGraphOp <- list(
bar.line = list(
xlim = list(is.min = FALSE, min = "1981-1-1", is.max = FALSE, max = "2017-12-3"),
ylim = list(is.min = FALSE, min = -10, is.max = FALSE, max = 10),
userYTcks = list(custom = TRUE, ticks = c(-2, -1.5, -1, 0, 1, 1.5, 2)),
axislabs = list(is.xlab = FALSE, xlab = '', is.ylab = FALSE, ylab = ''),
title = list(is.title = FALSE, title = '', position = 'top'),
colors = list(y0 = 0, negative = "#CF661C", positive = "#157040"),
line = list(plot = FALSE, col = "black", lwd = 1.5)
)
)
tkconfigure(bt.TSGraphOpt, command = function(){
suffix.fun <- switch(str_trim(tclvalue(EnvSPICalcPlot$graph$typeTSp)),
"Bar-Line" = "Bar.Line",
"Polygon" = "Bar.Line")
plot.fun <- match.fun(paste0("MapGraph.GraphOptions.", suffix.fun))
EnvSPICalcPlot$TSGraphOp <- plot.fun(main.win, EnvSPICalcPlot$TSGraphOp)
})
#########
EnvSPICalcPlot$notebookTab.dataGraph <- NULL
tkconfigure(bt.TsGraph.plot, command = function(){
if(!is.null(EnvSPICalcPlot$varData)){
imgContainer <- SPICalc.Display.Graph(tknotes)
retNBTab <- imageNotebookTab_unik(tknotes, imgContainer, EnvSPICalcPlot$notebookTab.dataGraph, AllOpenTabType, AllOpenTabData)
EnvSPICalcPlot$notebookTab.dataGraph <- retNBTab$notebookTab
AllOpenTabType <<- retNBTab$AllOpenTabType
AllOpenTabData <<- retNBTab$AllOpenTabData
}
})
#################
tkgrid(cb.typeTSp, row = 0, column = 0, sticky = 'we', pady = 1, columnspan = 1)
tkgrid(bt.TSGraphOpt, row = 0, column = 1, sticky = 'we', padx = 4, pady = 1, columnspan = 1)
tkgrid(bt.TsGraph.plot, row = 0, column = 2, sticky = 'we', pady = 1, columnspan = 1)
##############################################
frameSTNCrds <- ttklabelframe(subfr3, text = "Station/Coordinates", relief = 'groove')
frTS2 <- tkframe(frameSTNCrds)
EnvSPICalcPlot$graph$lonLOC <- tclVar()
EnvSPICalcPlot$graph$latLOC <- tclVar()
EnvSPICalcPlot$graph$stnIDTSp <- tclVar()
tkgrid(frTS2, row = 0, column = 0, sticky = 'e', pady = 1)
##############################################
frameVizTS <- tkframe(subfr3, relief = 'groove', borderwidth = 2)
EnvSPICalcPlot$spiViz$max.tscale <- tclVar(12)
bt.VizTS <- ttkbutton(frameVizTS, text = "Visualizing time-scales")
bt.VizOpt <- ttkbutton(frameVizTS, text = "Options")
txt.VizTS <- tklabel(frameVizTS, text = "Maximum time-scale", anchor = 'e', justify = 'right')
en.VizTS <- tkentry(frameVizTS, textvariable = EnvSPICalcPlot$spiViz$max.tscale, width = 3)
###############
EnvSPICalcPlot$spiVizOp <- list(presetCol = list(color = 'spi.colors', reverse = FALSE),
userCol = list(custom = FALSE, color = NULL),
userLvl = list(custom = TRUE, levels = c(-2, -1.5, -1, 0, 1, 1.5, 2), equidist = TRUE),
title = list(user = FALSE, title = ''),
colkeyLab = list(user = FALSE, label = ''),
axislabs = list(is.xlab = FALSE, xlab = '', is.ylab = TRUE, ylab = 'Time-scale (months)'))
tkconfigure(bt.VizOpt, command = function(){
EnvSPICalcPlot$spiVizOp <- MapGraph.SpiVizOptions(main.win, EnvSPICalcPlot$spiVizOp)
})
###############
EnvSPICalcPlot$notebookTab.spiViz <- NULL
tkconfigure(bt.VizTS, command = function(){
if(!is.null(EnvSPICalcPlot$varData)){
ret <- try(get.Data.spiViz(), silent = TRUE)
if(inherits(ret, "try-error") | is.null(ret)) return(NULL)
imgContainer <- SPICalc.Display.VizTS(tknotes)
retNBTab <- imageNotebookTab_unik(tknotes, imgContainer, EnvSPICalcPlot$notebookTab.spiViz, AllOpenTabType, AllOpenTabData)
EnvSPICalcPlot$notebookTab.spiViz <- retNBTab$notebookTab
AllOpenTabType <<- retNBTab$AllOpenTabType
AllOpenTabData <<- retNBTab$AllOpenTabData
}
})
###############
tkgrid(bt.VizTS, row = 0, column = 0, sticky = 'we', padx = 3, ipadx = 1, pady = 1)
tkgrid(bt.VizOpt, row = 0, column = 1, sticky = 'we', padx = 3, ipadx = 1, pady = 1)
tkgrid(txt.VizTS, row = 1, column = 0, sticky = 'e', padx = 3, ipadx = 1, pady = 1)
tkgrid(en.VizTS, row = 1, column = 1, sticky = 'w', padx = 3, ipadx = 1, pady = 1)
##############################################
tkgrid(frameDataTS, row = 0, column = 0, sticky = 'we', pady = 1)
tkgrid(frameSTNCrds, row = 1, column = 0, sticky = '', pady = 3)
tkgrid(frameVizTS, row = 2, column = 0, sticky = '', pady = 3)
#######################################################################################################
#Tab4
frTab4 <- tkframe(cmd.tab4)
tkgrid(frTab4, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid.columnconfigure(frTab4, 0, weight = 1)
scrw4 <- bwScrolledWindow(frTab4)
tkgrid(scrw4)
tkgrid.columnconfigure(scrw4, 0, weight = 1)
subfr4 <- bwScrollableFrame(scrw4, width = wscrlwin, height = hscrlwin)
tkgrid.columnconfigure(subfr4, 0, weight = 1)
##############################################
frameSHP <- ttklabelframe(subfr4, text = "Boundaries", relief = 'groove')
EnvSPICalcPlot$shp$add.shp <- tclVar(FALSE)
file.plotShp <- tclVar()
stateSHP <- "disabled"
chk.addshp <- tkcheckbutton(frameSHP, variable = EnvSPICalcPlot$shp$add.shp, text = "Add boundaries to Map", anchor = 'w', justify = 'left')
bt.addshpOpt <- ttkbutton(frameSHP, text = "Options", state = stateSHP)
cb.addshp <- ttkcombobox(frameSHP, values = unlist(listOpenFiles), textvariable = file.plotShp, width = largeur1, state = stateSHP)
bt.addshp <- tkbutton(frameSHP, text = "...", state = stateSHP)
########
tkconfigure(bt.addshp, command = function(){
shp.opfiles <- getOpenShp(main.win, all.opfiles)
if(!is.null(shp.opfiles)){
nopf <- length(AllOpenFilesType)
AllOpenFilesType[[nopf+1]] <<- 'shp'
AllOpenFilesData[[nopf+1]] <<- shp.opfiles
tclvalue(file.plotShp) <- AllOpenFilesData[[nopf+1]][[1]]
listOpenFiles[[length(listOpenFiles)+1]] <<- AllOpenFilesData[[nopf+1]][[1]]
tkconfigure(cb.addshp, values = unlist(listOpenFiles), textvariable = file.plotShp)
shpofile <- getShpOpenData(file.plotShp)
if(is.null(shpofile)) EnvSPICalcPlot$shp$ocrds <- NULL
EnvSPICalcPlot$shp$ocrds <- getBoundaries(shpofile[[2]])
}else return(NULL)
})
########
EnvSPICalcPlot$SHPOp <- list(col = "black", lwd = 1.5)
tkconfigure(bt.addshpOpt, command = function(){
EnvSPICalcPlot$SHPOp <- MapGraph.GraphOptions.LineSHP(main.win, EnvSPICalcPlot$SHPOp)
})
########
tkgrid(chk.addshp, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 6, padx = 1, pady = 1)
tkgrid(bt.addshpOpt, row = 0, column = 6, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 1)
tkgrid(cb.addshp, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 7, padx = 1, pady = 1)
tkgrid(bt.addshp, row = 1, column = 7, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 1)
#################
tkbind(cb.addshp, "<<ComboboxSelected>>", function(){
shpofile <- getShpOpenData(file.plotShp)
if(is.null(shpofile)) EnvSPICalcPlot$shp$ocrds <- NULL
EnvSPICalcPlot$shp$ocrds <- getBoundaries(shpofile[[2]])
})
tkbind(chk.addshp, "<Button-1>", function(){
stateSHP <- if(tclvalue(EnvSPICalcPlot$shp$add.shp) == "1") "disabled" else "normal"
tkconfigure(cb.addshp, state = stateSHP)
tkconfigure(bt.addshp, state = stateSHP)
tkconfigure(bt.addshpOpt, state = stateSHP)
})
##############################################
tkgrid(frameSHP, row = 0, column = 0, sticky = 'we', pady = 1)
#######################################################################################################
widgets.Station.Pixel <- function(){
tkdestroy(frTS2)
frTS2 <<- tkframe(frameSTNCrds)
if(EnvSPICalcPlot$output$params$data.type == "cdtstation"){
stnIDTSPLOT <- EnvSPICalcPlot$output$data$id
txt.stnSel <- tklabel(frTS2, text = "Select a station to plot")
bt.stnID.prev <- ttkbutton(frTS2, text = "<<", width = 6)
bt.stnID.next <- ttkbutton(frTS2, text = ">>", width = 6)
cb.stnID <- ttkcombobox(frTS2, values = stnIDTSPLOT, textvariable = EnvSPICalcPlot$graph$stnIDTSp, width = largeur5)
tclvalue(EnvSPICalcPlot$graph$stnIDTSp) <- stnIDTSPLOT[1]
tkconfigure(bt.stnID.prev, command = function(){
if(!is.null(EnvSPICalcPlot$varData)){
istn <- which(stnIDTSPLOT == str_trim(tclvalue(EnvSPICalcPlot$graph$stnIDTSp)))
istn <- istn-1
if(istn < 1) istn <- length(stnIDTSPLOT)
tclvalue(EnvSPICalcPlot$graph$stnIDTSp) <- stnIDTSPLOT[istn]
imgContainer <- SPICalc.Display.Graph(tknotes)
retNBTab <- imageNotebookTab_unik(tknotes, imgContainer, EnvSPICalcPlot$notebookTab.dataGraph, AllOpenTabType, AllOpenTabData)
EnvSPICalcPlot$notebookTab.dataGraph <- retNBTab$notebookTab
AllOpenTabType <<- retNBTab$AllOpenTabType
AllOpenTabData <<- retNBTab$AllOpenTabData
}
})
tkconfigure(bt.stnID.next, command = function(){
if(!is.null(EnvSPICalcPlot$varData)){
istn <- which(stnIDTSPLOT == str_trim(tclvalue(EnvSPICalcPlot$graph$stnIDTSp)))
istn <- istn+1
if(istn > length(stnIDTSPLOT)) istn <- 1
tclvalue(EnvSPICalcPlot$graph$stnIDTSp) <- stnIDTSPLOT[istn]
imgContainer <- SPICalc.Display.Graph(tknotes)
retNBTab <- imageNotebookTab_unik(tknotes, imgContainer, EnvSPICalcPlot$notebookTab.dataGraph, AllOpenTabType, AllOpenTabData)
EnvSPICalcPlot$notebookTab.dataGraph <- retNBTab$notebookTab
AllOpenTabType <<- retNBTab$AllOpenTabType
AllOpenTabData <<- retNBTab$AllOpenTabData
}
})
tkgrid(txt.stnSel, row = 0, column = 0, sticky = '', rowspan = 1, columnspan = 3, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.stnID.prev, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.stnID, row = 1, column = 1, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.stnID.next, row = 1, column = 2, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
}else{
txt.crdSel <- tklabel(frTS2, text = "Enter longitude and latitude to plot", anchor = 'w', justify = 'left')
txt.lonLoc <- tklabel(frTS2, text = "Longitude", anchor = 'e', justify = 'right')
en.lonLoc <- tkentry(frTS2, textvariable = EnvSPICalcPlot$graph$lonLOC, width = 8)
txt.latLoc <- tklabel(frTS2, text = "Latitude", anchor = 'e', justify = 'right')
en.latLoc <- tkentry(frTS2, textvariable = EnvSPICalcPlot$graph$latLOC, width = 8)
stnIDTSPLOT <- ""
tclvalue(EnvSPICalcPlot$graph$stnIDTSp) <- ""
tkgrid(txt.crdSel, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.lonLoc, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.lonLoc, row = 1, column = 1, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.latLoc, row = 1, column = 2, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.latLoc, row = 1, column = 3, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
}
tkgrid(frTS2, row = 0, column = 0, sticky = 'e', pady = 1)
return(0)
}
#################
set.Data.Scales <- function(){
path.data <- file.path(EnvSPICalcPlot$PathData, "CDTDATASET")
spi.tscales <- list.files(path.data, "SPI_.+")
if(length(spi.tscales) == 0){
InsertMessagesTxt(main.txt.out, 'No SPI data found', format = TRUE)
return(NULL)
}
if(EnvSPICalcPlot$output$params$data.type == "cdtstation")
spi.tscales <- file_path_sans_ext(spi.tscales)
nch <- nchar(spi.tscales)
tsc <- str_pad(substr(spi.tscales, 5, nch-3), 2, pad = "0")
scales <- substr(spi.tscales, nch-2, nch)
spi.tscalesF <- spi.tscales[order(paste0(scales, tsc))]
spi.tscales <- paste0("SPI-", as.numeric(tsc), "-", ifelse(scales == "dek", "Dekad", "Month"))
spi.tscales <- spi.tscales[order(paste0(scales, tsc))]
EnvSPICalcPlot$varData$spi$disp <- spi.tscales
EnvSPICalcPlot$varData$spi$dataF <- spi.tscalesF
tkconfigure(cb.spi.maps, values = spi.tscales)
tclvalue(EnvSPICalcPlot$spi.tscale) <- spi.tscales[1]
return(0)
}
#################
set.Data.Dates <- function(){
path.data <- file.path(EnvSPICalcPlot$PathData, "CDTDATASET")
spi_scale <- str_trim(tclvalue(EnvSPICalcPlot$spi.tscale))
ipos <- which(EnvSPICalcPlot$varData$spi$disp %in% spi_scale)
tscale.data <- EnvSPICalcPlot$varData$spi$dataF[ipos]
file.index <- if(EnvSPICalcPlot$output$params$data.type == "cdtstation")
file.path(path.data, paste0(tscale.data, ".rds"))
else file.path(path.data, tscale.data, paste0(tscale.data, ".rds"))
if(!file.exists(file.index)){
InsertMessagesTxt(main.txt.out, paste(file.index, 'not found'), format = TRUE)
return(NULL)
}
read.cdt.dataIdx <- TRUE
if(!is.null(EnvSPICalcPlot$cdtdataset))
if(!is.null(EnvSPICalcPlot$file.index))
if(EnvSPICalcPlot$file.index == file.index) read.cdt.dataIdx <- FALSE
if(read.cdt.dataIdx){
cdtdataset <- readRDS(file.index)
daty <- if(EnvSPICalcPlot$output$params$data.type == "cdtstation") cdtdataset$date else cdtdataset$dateInfo$date
tkconfigure(cb.spi.Date, values = daty)
tclvalue(EnvSPICalcPlot$spi.date) <- daty[length(daty)]
EnvSPICalcPlot$varData$ts$step <- strsplit(spi_scale, "-")[[1]][3]
EnvSPICalcPlot$varData$ts$dates <- daty
EnvSPICalcPlot$cdtdataset <- cdtdataset
EnvSPICalcPlot$cdtdataset$fileInfo <- file.index
EnvSPICalcPlot$file.index <- file.index
}
return(0)
}
#################
get.Data.Map <- function(){
tkconfigure(main.win, cursor = 'watch')
tcl('update')
on.exit({
tkconfigure(main.win, cursor = '')
tcl('update')
})
this.daty <- str_trim(tclvalue(EnvSPICalcPlot$spi.date))
readVarData <- TRUE
if(!is.null(EnvSPICalcPlot$varData))
if(!is.null(EnvSPICalcPlot$varData$spi$this.daty))
if(EnvSPICalcPlot$varData$spi$this.daty == this.daty) readVarData <- FALSE
if(readVarData){
if(EnvSPICalcPlot$output$params$data.type == "cdtstation"){
idt <- which(EnvSPICalcPlot$cdtdataset$date == this.daty)
x <- EnvSPICalcPlot$output$data$lon
y <- EnvSPICalcPlot$output$data$lat
tmp <- as.numeric(EnvSPICalcPlot$cdtdataset$spi[idt, ])
nx <- nx_ny_as.image(diff(range(x)))
ny <- nx_ny_as.image(diff(range(y)))
tmp <- cdt.as.image(tmp, nx = nx, ny = ny, pts.xy = cbind(x, y))
EnvSPICalcPlot$varData$map$x <- tmp$x
EnvSPICalcPlot$varData$map$y <- tmp$y
EnvSPICalcPlot$varData$map$z <- tmp$z
}else{
ipos <- which(EnvSPICalcPlot$varData$spi$disp %in% str_trim(tclvalue(EnvSPICalcPlot$spi.tscale)))
tscale.data <- EnvSPICalcPlot$varData$spi$dataF[ipos]
nc.file <- file.path(EnvSPICalcPlot$PathData, "DATA_NetCDF", tscale.data, paste0("spi_", this.daty, ".nc"))
nc <- nc_open(nc.file)
EnvSPICalcPlot$varData$map$x <- nc$dim[[1]]$vals
EnvSPICalcPlot$varData$map$y <- nc$dim[[2]]$vals
EnvSPICalcPlot$varData$map$z <- ncvar_get(nc, varid = nc$var[[1]]$name)
nc_close(nc)
}
EnvSPICalcPlot$varData$spi$this.daty <- this.daty
}
}
#################
get.Data.spiViz <- function(){
file.mon <- file.path(EnvSPICalcPlot$PathData, "MONTHLY_data")
file.dek <- file.path(EnvSPICalcPlot$PathData, "DEKADAL_data")
if(file.exists(file.mon)){
file.index <- file.mon
viztstep <- "monthly"
}else{
if(file.exists(file.dek)){
file.index <- file.dek
viztstep <- "dekadal"
}else{
InsertMessagesTxt(main.txt.out, 'No dekadal or monthly data found', format = TRUE)
return(NULL)
}
}
readspiVizData <- TRUE
if(!is.null(EnvSPICalcPlot$spiViz))
if(!is.null(EnvSPICalcPlot$spiViz$tstep))
if(EnvSPICalcPlot$spiViz$tstep == viztstep) readspiVizData <- FALSE
if(readspiVizData){
file.index <- file.path(file.index, paste0(basename(file.index), ".rds"))
EnvSPICalcPlot$spiViz$cdtdataset <- readRDS(file.index)
EnvSPICalcPlot$spiViz$cdtdataset$fileInfo <- file.index
EnvSPICalcPlot$spiViz$tstep <- viztstep
}
return(0)
}
#######################################################################################################
tcl('update')
tkgrid(cmd.frame, sticky = '', pady = 1)
tkgrid.columnconfigure(cmd.frame, 0, weight = 1)
######
return(cmd.frame)
}
|
####################################################
# This code loads required libraries using Pacman.#
# Author: Kevin Okiah @2017 #
####################################################
pacman::p_load(pacman,ggplot2,knitr, plyr, psych, taRifx, reshape2, rmarkdown)
| /analysis/load_libraries.R | no_license | kevin-okiah/US-Breweries-Data-EDA | R | false | false | 295 | r | ####################################################
# This code loads required libraries using Pacman.#
# Author: Kevin Okiah @2017 #
####################################################
pacman::p_load(pacman,ggplot2,knitr, plyr, psych, taRifx, reshape2, rmarkdown)
|
rm(list=ls())
library(ggplot2)
load("D:/ivan/ivan6-cell/Results/Result_getfromthePROGRAMME/LabAandCresults/LabAGene.RData")
z=distributionofrate(LabAREP1Express,1,0.1,0.01)
z1=distributionofrate(LabAREP2Express,1,0.1,0.01)
z2=distributionofrate(LabAREP12Express,1,0.1,0.01)
load("D:/ivan/ivan6-cell/Results/Result_getfromthePROGRAMME/LabAandCresults/LabCGene.RData")
z=distributionofrate(LabCREP1Express,1,0.1,0.01)
z1=distributionofrate(LabCREP2Express,1,0.1,0.01)
z2=distributionofrate(LabCREP12Express,1,0.1,0.01)
# Multiple plot function
#
# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
#
#
# ๅฎไน็ๆ่ฝฌๅฝ๏ผๅๆ้่งฃ็ๅๅธๅฝๆฐ็บงไธญไฝๆฐ็บฟๅพ็ๅฝๆฐ
distributionofrate<-function(Express,totalThreshold,preRNAThreshold,rateThreshold){
# ๆฐๆฎ่ฟ่กๅ็ญ
Express[apply(Express[,1:3]<=totalThreshold, FUN = any, 1), ] = NA
Express[apply(Express[,4:6]<=preRNAThreshold, FUN = any, 1), ] = NA
Express[apply(Express[,7:15]<=rateThreshold, FUN = any, 1), ] = NA
Express=na.omit(Express)
print(length(Express[,1]))
# synthsis
synthsis=Express[,7:9]
synthsis=as.data.frame(as.numeric(unlist(synthsis)))
s=median(as.numeric(unlist(synthsis)))
print(s)
synthsis=log2(synthsis)
# processing
processing=Express[,13:15]
processing=as.data.frame(as.numeric(unlist(processing)))
p=median(as.numeric(unlist(processing)))
print(p)
processing=log2(processing)
# degration
degration=Express[,10:12]
degration=as.data.frame(as.numeric(unlist(degration)))
d=median(as.numeric(unlist(degration)))
print(d)
degration=log2(degration)
# #็ปๆฆ็ๅๅธๅพ
graphics.off()
par(mfrow=c(1,3))
p1=ggplot(synthsis, aes(x = synthsis),y = ..density..)+
# geom_histogram(bins = 20,fill = "steelblue", colour = "black")+
geom_density(data=synthsis,colour = "black",size = 1)+
geom_vline(xintercept =log2(s) ,colour="red")+
xlim(-5,5)+
labs(x = "Gene's transcription rate ", y = "fraction of genes", title = "")+
theme(panel.grid =element_blank(),panel.background = element_blank()) ## ๅ ๅป็ฝๆ ผ็บฟ
p2=ggplot(processing, aes(x = processing),y = ..density..)+
geom_density(data=processing,colour = "black",size = 1)+
geom_vline(xintercept =log2(p) ,colour="red")+
xlim(-5,5)+
labs(x = "Gene's processing rate ", y = "fraction of genes", title = "")+
theme(panel.grid =element_blank(),panel.background = element_blank()) ## ๅ ๅป็ฝๆ ผ็บฟ
p3=ggplot(degration, aes(x = degration),y = ..density..)+
geom_density(data=degration,colour = "black",size = 1)+
geom_vline(xintercept =log2(d) ,colour="red")+
xlim(-5,5)+
labs(x = "Gene's degration rate ", y = "fraction of genes", title = "")+
theme(panel.grid =element_blank(),panel.background = element_blank()) ## ๅ ๅป็ฝๆ ผ็บฟ
mp=multiplot(p1, p2, p3, cols=1)
return(mp)
}
# ็ๆ็็ปๆๅญๅจๅจ
# D:\ivan\ivan6-cell\Results\6-Figure 3-wzz\A-C
| /5_figure3A-C.R | no_license | bioWzz/Inspect | R | false | false | 4,538 | r | rm(list=ls())
library(ggplot2)
load("D:/ivan/ivan6-cell/Results/Result_getfromthePROGRAMME/LabAandCresults/LabAGene.RData")
z=distributionofrate(LabAREP1Express,1,0.1,0.01)
z1=distributionofrate(LabAREP2Express,1,0.1,0.01)
z2=distributionofrate(LabAREP12Express,1,0.1,0.01)
load("D:/ivan/ivan6-cell/Results/Result_getfromthePROGRAMME/LabAandCresults/LabCGene.RData")
z=distributionofrate(LabCREP1Express,1,0.1,0.01)
z1=distributionofrate(LabCREP2Express,1,0.1,0.01)
z2=distributionofrate(LabCREP12Express,1,0.1,0.01)
# Multiple plot function
#
# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
#
#
# ๅฎไน็ๆ่ฝฌๅฝ๏ผๅๆ้่งฃ็ๅๅธๅฝๆฐ็บงไธญไฝๆฐ็บฟๅพ็ๅฝๆฐ
distributionofrate<-function(Express,totalThreshold,preRNAThreshold,rateThreshold){
# ๆฐๆฎ่ฟ่กๅ็ญ
Express[apply(Express[,1:3]<=totalThreshold, FUN = any, 1), ] = NA
Express[apply(Express[,4:6]<=preRNAThreshold, FUN = any, 1), ] = NA
Express[apply(Express[,7:15]<=rateThreshold, FUN = any, 1), ] = NA
Express=na.omit(Express)
print(length(Express[,1]))
# synthsis
synthsis=Express[,7:9]
synthsis=as.data.frame(as.numeric(unlist(synthsis)))
s=median(as.numeric(unlist(synthsis)))
print(s)
synthsis=log2(synthsis)
# processing
processing=Express[,13:15]
processing=as.data.frame(as.numeric(unlist(processing)))
p=median(as.numeric(unlist(processing)))
print(p)
processing=log2(processing)
# degration
degration=Express[,10:12]
degration=as.data.frame(as.numeric(unlist(degration)))
d=median(as.numeric(unlist(degration)))
print(d)
degration=log2(degration)
# #็ปๆฆ็ๅๅธๅพ
graphics.off()
par(mfrow=c(1,3))
p1=ggplot(synthsis, aes(x = synthsis),y = ..density..)+
# geom_histogram(bins = 20,fill = "steelblue", colour = "black")+
geom_density(data=synthsis,colour = "black",size = 1)+
geom_vline(xintercept =log2(s) ,colour="red")+
xlim(-5,5)+
labs(x = "Gene's transcription rate ", y = "fraction of genes", title = "")+
theme(panel.grid =element_blank(),panel.background = element_blank()) ## ๅ ๅป็ฝๆ ผ็บฟ
p2=ggplot(processing, aes(x = processing),y = ..density..)+
geom_density(data=processing,colour = "black",size = 1)+
geom_vline(xintercept =log2(p) ,colour="red")+
xlim(-5,5)+
labs(x = "Gene's processing rate ", y = "fraction of genes", title = "")+
theme(panel.grid =element_blank(),panel.background = element_blank()) ## ๅ ๅป็ฝๆ ผ็บฟ
p3=ggplot(degration, aes(x = degration),y = ..density..)+
geom_density(data=degration,colour = "black",size = 1)+
geom_vline(xintercept =log2(d) ,colour="red")+
xlim(-5,5)+
labs(x = "Gene's degration rate ", y = "fraction of genes", title = "")+
theme(panel.grid =element_blank(),panel.background = element_blank()) ## ๅ ๅป็ฝๆ ผ็บฟ
mp=multiplot(p1, p2, p3, cols=1)
return(mp)
}
# ็ๆ็็ปๆๅญๅจๅจ
# D:\ivan\ivan6-cell\Results\6-Figure 3-wzz\A-C
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_csvy.R
\name{write_csvy}
\alias{write_csvy}
\title{Export CSVY data}
\usage{
write_csvy(x, file, metadata = NULL, sep = ",", sep2 = ".",
comment_header = if (is.null(metadata)) TRUE else FALSE,
name = as.character(substitute(x)), ...)
}
\arguments{
\item{x}{A data.frame.}
\item{file}{A character string or R connection specifying a file.}
\item{metadata}{Optionally, a character string specifying a YAML (\dQuote{.yaml}) or JSON (\dQuote{.json}) file to write the metadata (in lieu of including it in the header of the file).}
\item{sep}{A character string specifying a between-field separator. Passed to \code{\link[data.table]{fwrite}}.}
\item{sep2}{A character string specifying a within-field separator. Passed to \code{\link[data.table]{fwrite}}.}
\item{comment_header}{A logical indicating whether to comment the lines containing the YAML front matter. Default is \code{TRUE}.}
\item{name}{A character string specifying a name for the dataset.}
\item{\dots}{Additional arguments passed to \code{\link[data.table]{fwrite}}.}
}
\description{
Export data.frame to CSVY
}
\examples{
library("datasets")
write_csvy(head(iris))
# write yaml w/o comment charaters
write_csvy(head(iris), comment_header = FALSE)
}
\seealso{
\code{\link{write_csvy}}
}
| /man/write_csvy.Rd | no_license | jonocarroll/csvy | R | false | true | 1,347 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_csvy.R
\name{write_csvy}
\alias{write_csvy}
\title{Export CSVY data}
\usage{
write_csvy(x, file, metadata = NULL, sep = ",", sep2 = ".",
comment_header = if (is.null(metadata)) TRUE else FALSE,
name = as.character(substitute(x)), ...)
}
\arguments{
\item{x}{A data.frame.}
\item{file}{A character string or R connection specifying a file.}
\item{metadata}{Optionally, a character string specifying a YAML (\dQuote{.yaml}) or JSON (\dQuote{.json}) file to write the metadata (in lieu of including it in the header of the file).}
\item{sep}{A character string specifying a between-field separator. Passed to \code{\link[data.table]{fwrite}}.}
\item{sep2}{A character string specifying a within-field separator. Passed to \code{\link[data.table]{fwrite}}.}
\item{comment_header}{A logical indicating whether to comment the lines containing the YAML front matter. Default is \code{TRUE}.}
\item{name}{A character string specifying a name for the dataset.}
\item{\dots}{Additional arguments passed to \code{\link[data.table]{fwrite}}.}
}
\description{
Export data.frame to CSVY
}
\examples{
library("datasets")
write_csvy(head(iris))
# write yaml w/o comment charaters
write_csvy(head(iris), comment_header = FALSE)
}
\seealso{
\code{\link{write_csvy}}
}
|
#! /usr/bin/R --vanilla -f
source('load_data.R')
source('panel_fcts.R')
library(lattice)
### Display hist only diag
## Not yet finished. TODO: legend (& input size normalization?)
xyplot(time ~ factor(par), groups = interaction(arch,imptype),
subset = ave(size, btype, machine, FUN = max) == size & lanef == 1 & btype == "Histogram",
data = mdat,
pch = rep(c(1,2,4,5), 3),
lty = rep(c(1,2,3), c(3,3,3)),
type = "o",
col = "black",
scale = list(y = list(log = 10)),
legend = list(
top = list(
fun = draw.key,
x = -10,
y = 0,
args = list(key = list(
points = list(col = 1:2),
text = list("foo","bar"))
)
),
bottom = list(
fun = draw.key,
x = -10,
y = 0,
args = list(key = list(
points = list(col = 1:2),
text = list("foo","bar"))
)
)
)
)
### Display CPU-scaling of Qs
# size == max & lanef == 1
xyplot(time ~ factor(par) | arch * btype,
data = mdat,
groups = imptype,
subset = ave(size, btype, machine, FUN = max) == size & lanef == 1,
scales = list(y = list(log = 10)),
xlab = "Number of CPUs"
)
pdf("graphs/cpu-scaling.pdf", height = 10)
update.myopts(key.labels = levels(mdat$imptype),5,3)
dev.off()
### Display Size-scaling of Qs
# par == 1 & lanef == 1
xyplot(time ~ size | arch * btype,
data = mdat,
groups = imptype,
subset = par == 1 & lanef == 1 & btype != "Comm" & btype != "Histogram",
xlab = "Size of Benchmark")
pdf("graphs/size-scaling-par1.pdf", height = 5)
update.myopts(key.labels = levels(mdat$imptype),3,3)
dev.off()
# par == 8 & lanef == 1
xyplot(time ~ size | arch * btype,
data = mdat,
groups = imptype,
subset = par == 8 & lanef == 1 & btype != "Comm" & btype != "Histogram",
xlab = "Size of Benchmark",
)
pdf("graphs/size-scaling-par8.pdf", height = 5)
update.myopts(key.labels = levels(mdat$imptype),3,3)
dev.off()
### Display lane-factor-scaling of Qs
# par == 8 & size == max
xyplot(time ~ factor(lanef) | btype,
data = mdat,
group = arch,
subset = par == 8 &
(ave(size, btype, machine, FUN = max) == size |
btype == "Comm" & size == 100000000) &
machine != "wolf" & imptype == "Multi-Lane FlowPool",
xlab = "Number of Lanes per Inserting Thread"
)
pdf("graphs/lanef-scaling.pdf", height = 5)
update.myopts(key.labels = levels(mdat$arch)[1:2], matrix = FALSE)
dev.off()
| /benchmarks/flowPools/plots.R | no_license | heathermiller/scala-dataflow | R | false | false | 2,716 | r | #! /usr/bin/R --vanilla -f
source('load_data.R')
source('panel_fcts.R')
library(lattice)
### Display hist only diag
## Not yet finished. TODO: legend (& input size normalization?)
xyplot(time ~ factor(par), groups = interaction(arch,imptype),
subset = ave(size, btype, machine, FUN = max) == size & lanef == 1 & btype == "Histogram",
data = mdat,
pch = rep(c(1,2,4,5), 3),
lty = rep(c(1,2,3), c(3,3,3)),
type = "o",
col = "black",
scale = list(y = list(log = 10)),
legend = list(
top = list(
fun = draw.key,
x = -10,
y = 0,
args = list(key = list(
points = list(col = 1:2),
text = list("foo","bar"))
)
),
bottom = list(
fun = draw.key,
x = -10,
y = 0,
args = list(key = list(
points = list(col = 1:2),
text = list("foo","bar"))
)
)
)
)
### Display CPU-scaling of Qs
# size == max & lanef == 1
xyplot(time ~ factor(par) | arch * btype,
data = mdat,
groups = imptype,
subset = ave(size, btype, machine, FUN = max) == size & lanef == 1,
scales = list(y = list(log = 10)),
xlab = "Number of CPUs"
)
pdf("graphs/cpu-scaling.pdf", height = 10)
update.myopts(key.labels = levels(mdat$imptype),5,3)
dev.off()
### Display Size-scaling of Qs
# par == 1 & lanef == 1
xyplot(time ~ size | arch * btype,
data = mdat,
groups = imptype,
subset = par == 1 & lanef == 1 & btype != "Comm" & btype != "Histogram",
xlab = "Size of Benchmark")
pdf("graphs/size-scaling-par1.pdf", height = 5)
update.myopts(key.labels = levels(mdat$imptype),3,3)
dev.off()
# par == 8 & lanef == 1
xyplot(time ~ size | arch * btype,
data = mdat,
groups = imptype,
subset = par == 8 & lanef == 1 & btype != "Comm" & btype != "Histogram",
xlab = "Size of Benchmark",
)
pdf("graphs/size-scaling-par8.pdf", height = 5)
update.myopts(key.labels = levels(mdat$imptype),3,3)
dev.off()
### Display lane-factor-scaling of Qs
# par == 8 & size == max
xyplot(time ~ factor(lanef) | btype,
data = mdat,
group = arch,
subset = par == 8 &
(ave(size, btype, machine, FUN = max) == size |
btype == "Comm" & size == 100000000) &
machine != "wolf" & imptype == "Multi-Lane FlowPool",
xlab = "Number of Lanes per Inserting Thread"
)
pdf("graphs/lanef-scaling.pdf", height = 5)
update.myopts(key.labels = levels(mdat$arch)[1:2], matrix = FALSE)
dev.off()
|
# makeCacheMatrix creates a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# The following function returns the inverse of the matrix. It first checks if
# the inverse has already been computed. If so, it gets the result and skips the
# computation. If not, it computes the inverse, sets the value in the cache via
# setinverse function.
# This function assumes that the matrix is always invertible.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
} | /cachematrix.R | no_license | Hammad-Raza/ProgrammingAssignment2 | R | false | false | 1,075 | r | # makeCacheMatrix creates a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# The following function returns the inverse of the matrix. It first checks if
# the inverse has already been computed. If so, it gets the result and skips the
# computation. If not, it computes the inverse, sets the value in the cache via
# setinverse function.
# This function assumes that the matrix is always invertible.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
} |
## Function downloaded from Joey Berhardt's Numerical Ecology repository on github
## Feb. 16, 2017
## https://github.com/JoeyBernhardt/NumericalEcology/blob/master/plot.links.R
plot.links <- function(XY, D.mat=NULL, thresh=0.05, xlim=NULL, ylim=NULL)
#
# Plot a PCoA graph (map) from XY = a table of Cartesian coordinates. On the
# map, draw lines corresponding to values below a dissimilarity threshold.
#
# Parameters:
#
# XY = file of Cartesian coordinates
# D.mat = distance matrix provided by user.
# if D.mat=NULL, D.mat will be computed from the Cartesian coordinates.
# thresh = plot links up to and including that distance
# xlim, ylim = drawing limits in abscissa and ordinate
#
# Cartesian coordinates can be obtained from Lat-Lon data using the
# function geoXY() of library SoDA.
#
# Example from Chapter 7:
# plot.links(mite.xy, thresh=1.0112)
#
# License: GPL-2
# Author:: Pierre Legendre, 2010
#
{
if(is.null(D.mat)) D.mat <- dist(XY)
D.mat <- as.matrix(D.mat)
par(mai=c(1.0, 1.0, 1.0, 0.5))
plot(XY, type="p", xlim=xlim, ylim=ylim, asp=1, xlab="Easting",
ylab="Northing")
text(XY, labels=rownames(XY), pos=2)
title(main=c("Linkage map", paste("D <=", thresh)))
n <- nrow(XY)
for(j in 1:(n-1))
{
for(jj in (j+1):n)
{
if((D.mat[j,jj] <= thresh)&(D.mat[j,jj] > 0))
lines(c(XY[j,1], XY[jj,1]), c(XY[j,2], XY[jj,2]))
}
}
}
| /scripts/plot.links.R | no_license | eabowman/Workshop-2017.02.20-Spatial-Analysis | R | false | false | 1,386 | r | ## Function downloaded from Joey Berhardt's Numerical Ecology repository on github
## Feb. 16, 2017
## https://github.com/JoeyBernhardt/NumericalEcology/blob/master/plot.links.R
plot.links <- function(XY, D.mat=NULL, thresh=0.05, xlim=NULL, ylim=NULL)
#
# Plot a PCoA graph (map) from XY = a table of Cartesian coordinates. On the
# map, draw lines corresponding to values below a dissimilarity threshold.
#
# Parameters:
#
# XY = file of Cartesian coordinates
# D.mat = distance matrix provided by user.
# if D.mat=NULL, D.mat will be computed from the Cartesian coordinates.
# thresh = plot links up to and including that distance
# xlim, ylim = drawing limits in abscissa and ordinate
#
# Cartesian coordinates can be obtained from Lat-Lon data using the
# function geoXY() of library SoDA.
#
# Example from Chapter 7:
# plot.links(mite.xy, thresh=1.0112)
#
# License: GPL-2
# Author:: Pierre Legendre, 2010
#
{
if(is.null(D.mat)) D.mat <- dist(XY)
D.mat <- as.matrix(D.mat)
par(mai=c(1.0, 1.0, 1.0, 0.5))
plot(XY, type="p", xlim=xlim, ylim=ylim, asp=1, xlab="Easting",
ylab="Northing")
text(XY, labels=rownames(XY), pos=2)
title(main=c("Linkage map", paste("D <=", thresh)))
n <- nrow(XY)
for(j in 1:(n-1))
{
for(jj in (j+1):n)
{
if((D.mat[j,jj] <= thresh)&(D.mat[j,jj] > 0))
lines(c(XY[j,1], XY[jj,1]), c(XY[j,2], XY[jj,2]))
}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/obj_find.r
\name{obj_find}
\alias{obj_find}
\title{Find an object in the workspace including user-defined environments}
\usage{
obj_find(
obj,
envir = NULL,
envmap = NULL,
globalsearch = TRUE,
n = 0,
return_address = FALSE,
include_functions = FALSE,
silent = TRUE
)
}
\arguments{
\item{obj}{object to be searched given as the object itself or as a character string. If given as an object,
expressions are accepted (see details on how expressions are handled).}
\item{envir}{environment where the search for \code{obj} should be carried out.
It defaults to \code{NULL} which means \code{obj} is searched in the calling environment
(i.e. in the environment calling this function), unless \code{globalsearch=TRUE} in which case
it is searched in the whole workspace.}
\item{envmap}{data frame containing a lookup table with name-address pairs of environment names and
addresses to be used when searching for environment \code{env}. It defaults to \code{NULL} which means that the
lookup table is constructed on the fly with the environments defined in the \code{envir} environment
--if not \code{NULL}--, or in the whole workspace if \code{envir=NULL}.
See the details section for more information on its structure.}
\item{globalsearch}{when \code{envir=NULL} it specifies whether the search for \code{obj} should be done
globally, i.e. in the whole workspace, or just within the calling environment.}
\item{n}{non-negative integer indicating the number of levels to go up from the calling function environment
to evaluate \code{obj}. It defaults to 0 which implies that \code{obj} is evaluated in the environment
of the calling function (i.e. the function that calls \code{obj_find()}).}
\item{return_address}{whether to return the address of the environments where the object is found in addition
to their names.}
\item{include_functions}{whether to include funtion execution environments as environments where the object
is searched for. Set this flag to \code{TRUE} with caution because there may be several functions where the
same object is defined, for instance functions that are called as part of the object searching process!}
\item{silent}{run in silent mode? If not, the search history is shown,
listing all the environments that are searched for object \code{obj}. It defaults to TRUE.}
}
\value{
The return value depends on the value of parameter \code{return_address}: when \code{FALSE}
(the default) it returns an array containing the names of the environments where the object \code{obj}
is found; when \code{TRUE} it returns a list with two attributes: \code{"env_full_names"} and
\code{"env_addresses"} with respectively the environment names and addresses where the object is found.
}
\description{
Look for an object in the whole workspace including all environments defined within it
(possibly recursively) and return ALL the environment(s) where the object is found.
User-defined environments are also searched.
Note that both the "recursive search" and the "user-defined environments search" makes this function
quite different from functions \link{find} and \link{exists} of the base package.
Optionally, the search can be limited to a specified environment, as opposed to carrying it out in the whole workspace.
Still, all user-defined environments defined inside the specified environment are searched.
}
\details{
An object is found in an environment if it is reachable from within that environment. An object is considered
reachable in an environment if either one of the following occurs:
\itemize{
\item it exists in the given environment
\item it exists in a user-defined environment defined inside the given environment or in any environment
recursively defined inside them
}
Note that \code{obj_find} differs from base functions \code{find} and \code{exists} in that \code{obj_find}
searches for the object inside user-defined environments within any given environment in a \emph{recursive} way.
In particular, compared to:
\itemize{
\item{\code{find}:} \code{obj_find} searches for objects inside user-defined environments while \code{find} is not
able to do so (see examples).
\item{\code{exists}:} \code{obj_find} \emph{never} searches for objects in the parent environment of \code{envir}
when \code{envir} is not \code{NULL}, as is the case with the \code{exists} function when its \code{inherits}
parameter is set to \code{TRUE} (the default).
If it is wished to search for objects in parent environments, simply set \code{envir=NULL}
and \code{globalsearch=TRUE}, in which case the object will be searched in the whole workspace
and the environments where it is found will be returned.
}
When the object is found, an array containing the names of all the environments where the object is found is
returned.
When \code{envir} is not \code{NULL} attached packages are not included in the search for \code{obj},
unless of course \code{envir} is itself a package environment.
When given as an object, \code{obj} can be an expression. If the expression is an attribute of a list
or an array element, the object contained therein is searched for.
Ex: if \code{alist$var = "x"} then object \code{x} is searched.
If \code{envmap} is passed it should be a data frame providing an address-name pair lookup table
of environments and should contain at least the following columns:
\itemize{
\item{\code{location}} for user-defined environments, the name of the environment where the environment
is located; otherwise \code{NA}.
\item{\code{pathname}} the full \emph{environment path} to reach the environment separated by \code{$}
(e.g. \code{"env1$env$envx"})
\item{\code{address}} the 8-digit (32-bit architectures) thru 16-digit (64-bit architectures) memory address
of the environment given in \code{pathname} enclosed in < > (e.g. \code{"<0000000007DCFB38>"}
(64-bit architectures))
Be ware that Linux Debian distributions may have a 12-digit memory address representation.
So the best way to know is to check a memory address by calling e.g. `address("x")`.
}
Passing an \code{envmap} lookup table is useful for speedup purposes, in case several calls to this
function will be performed in the context of an unchanged set of defined environments.
Such \code{envmap} data frame can be created by calling \link{get_env_names}.
Use this parameter with care, as the matrix passed may not correspond to the actual mapping of existing
environments to their addresses and in that case results may be different from those expected.
}
\examples{
# Define a variable in the global environment
x = 4
# Create new environments, some nested
env1 = new.env()
with(env1, envx <- new.env())
env1$x = 3
env1$envx$x = 2
env1$y = 5
# Look for objects (crawling environments recursively)
obj_find(x) # "env1" "env1$envx" "R_GlobalEnv"
obj_find("x") # "env1" "env1$envx" "R_GlobalEnv"
obj_find("x", envir=env1) # "env1" "envx" (as the search is limited to the env1 environment)
obj_find("y") # "env1"
obj_find(nonexistent) # NULL (note that NO error is raised even if the object does not exist)
}
| /man/obj_find.Rd | no_license | cran/envnames | R | false | true | 7,174 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/obj_find.r
\name{obj_find}
\alias{obj_find}
\title{Find an object in the workspace including user-defined environments}
\usage{
obj_find(
obj,
envir = NULL,
envmap = NULL,
globalsearch = TRUE,
n = 0,
return_address = FALSE,
include_functions = FALSE,
silent = TRUE
)
}
\arguments{
\item{obj}{object to be searched given as the object itself or as a character string. If given as an object,
expressions are accepted (see details on how expressions are handled).}
\item{envir}{environment where the search for \code{obj} should be carried out.
It defaults to \code{NULL} which means \code{obj} is searched in the calling environment
(i.e. in the environment calling this function), unless \code{globalsearch=TRUE} in which case
it is searched in the whole workspace.}
\item{envmap}{data frame containing a lookup table with name-address pairs of environment names and
addresses to be used when searching for environment \code{env}. It defaults to \code{NULL} which means that the
lookup table is constructed on the fly with the environments defined in the \code{envir} environment
--if not \code{NULL}--, or in the whole workspace if \code{envir=NULL}.
See the details section for more information on its structure.}
\item{globalsearch}{when \code{envir=NULL} it specifies whether the search for \code{obj} should be done
globally, i.e. in the whole workspace, or just within the calling environment.}
\item{n}{non-negative integer indicating the number of levels to go up from the calling function environment
to evaluate \code{obj}. It defaults to 0 which implies that \code{obj} is evaluated in the environment
of the calling function (i.e. the function that calls \code{obj_find()}).}
\item{return_address}{whether to return the address of the environments where the object is found in addition
to their names.}
\item{include_functions}{whether to include funtion execution environments as environments where the object
is searched for. Set this flag to \code{TRUE} with caution because there may be several functions where the
same object is defined, for instance functions that are called as part of the object searching process!}
\item{silent}{run in silent mode? If not, the search history is shown,
listing all the environments that are searched for object \code{obj}. It defaults to TRUE.}
}
\value{
The return value depends on the value of parameter \code{return_address}: when \code{FALSE}
(the default) it returns an array containing the names of the environments where the object \code{obj}
is found; when \code{TRUE} it returns a list with two attributes: \code{"env_full_names"} and
\code{"env_addresses"} with respectively the environment names and addresses where the object is found.
}
\description{
Look for an object in the whole workspace including all environments defined within it
(possibly recursively) and return ALL the environment(s) where the object is found.
User-defined environments are also searched.
Note that both the "recursive search" and the "user-defined environments search" makes this function
quite different from functions \link{find} and \link{exists} of the base package.
Optionally, the search can be limited to a specified environment, as opposed to carrying it out in the whole workspace.
Still, all user-defined environments defined inside the specified environment are searched.
}
\details{
An object is found in an environment if it is reachable from within that environment. An object is considered
reachable in an environment if either one of the following occurs:
\itemize{
\item it exists in the given environment
\item it exists in a user-defined environment defined inside the given environment or in any environment
recursively defined inside them
}
Note that \code{obj_find} differs from base functions \code{find} and \code{exists} in that \code{obj_find}
searches for the object inside user-defined environments within any given environment in a \emph{recursive} way.
In particular, compared to:
\itemize{
\item{\code{find}:} \code{obj_find} searches for objects inside user-defined environments while \code{find} is not
able to do so (see examples).
\item{\code{exists}:} \code{obj_find} \emph{never} searches for objects in the parent environment of \code{envir}
when \code{envir} is not \code{NULL}, as is the case with the \code{exists} function when its \code{inherits}
parameter is set to \code{TRUE} (the default).
If it is wished to search for objects in parent environments, simply set \code{envir=NULL}
and \code{globalsearch=TRUE}, in which case the object will be searched in the whole workspace
and the environments where it is found will be returned.
}
When the object is found, an array containing the names of all the environments where the object is found is
returned.
When \code{envir} is not \code{NULL} attached packages are not included in the search for \code{obj},
unless of course \code{envir} is itself a package environment.
When given as an object, \code{obj} can be an expression. If the expression is an attribute of a list
or an array element, the object contained therein is searched for.
Ex: if \code{alist$var = "x"} then object \code{x} is searched.
If \code{envmap} is passed it should be a data frame providing an address-name pair lookup table
of environments and should contain at least the following columns:
\itemize{
\item{\code{location}} for user-defined environments, the name of the environment where the environment
is located; otherwise \code{NA}.
\item{\code{pathname}} the full \emph{environment path} to reach the environment separated by \code{$}
(e.g. \code{"env1$env$envx"})
\item{\code{address}} the 8-digit (32-bit architectures) thru 16-digit (64-bit architectures) memory address
of the environment given in \code{pathname} enclosed in < > (e.g. \code{"<0000000007DCFB38>"}
(64-bit architectures))
Be ware that Linux Debian distributions may have a 12-digit memory address representation.
So the best way to know is to check a memory address by calling e.g. `address("x")`.
}
Passing an \code{envmap} lookup table is useful for speedup purposes, in case several calls to this
function will be performed in the context of an unchanged set of defined environments.
Such \code{envmap} data frame can be created by calling \link{get_env_names}.
Use this parameter with care, as the matrix passed may not correspond to the actual mapping of existing
environments to their addresses and in that case results may be different from those expected.
}
\examples{
# Define a variable in the global environment
x = 4
# Create new environments, some nested
env1 = new.env()
with(env1, envx <- new.env())
env1$x = 3
env1$envx$x = 2
env1$y = 5
# Look for objects (crawling environments recursively)
obj_find(x) # "env1" "env1$envx" "R_GlobalEnv"
obj_find("x") # "env1" "env1$envx" "R_GlobalEnv"
obj_find("x", envir=env1) # "env1" "envx" (as the search is limited to the env1 environment)
obj_find("y") # "env1"
obj_find(nonexistent) # NULL (note that NO error is raised even if the object does not exist)
}
|
library(shiny)
set.seed(1000)
lambda = 0.2
shinyUI(pageWithSidebar(
headerPanel("Investigation of Exponential Distribution"),
sidebarPanel(
sliderInput('n', 'Number of observations',value = 50, min = 5, max = 100, step = 1,),
sliderInput('sim','Number of Simulations',value = 5000, min = 100, max = 10000, step = 10,)
),
mainPanel(
plotOutput('newHist'),
h5('Sample Mean'),
verbatimTextOutput("samplemean"),
h5('Theretical Mean'),
verbatimTextOutput("theoreticalmean"),
h5('Expected Standard Deviation'),
verbatimTextOutput("esd"),
h5('Theretical Standard Deviation'),
verbatimTextOutput("tsd")
)
)) | /ui.R | no_license | moduma/DataProducts | R | false | false | 709 | r | library(shiny)
set.seed(1000)
lambda = 0.2
shinyUI(pageWithSidebar(
headerPanel("Investigation of Exponential Distribution"),
sidebarPanel(
sliderInput('n', 'Number of observations',value = 50, min = 5, max = 100, step = 1,),
sliderInput('sim','Number of Simulations',value = 5000, min = 100, max = 10000, step = 10,)
),
mainPanel(
plotOutput('newHist'),
h5('Sample Mean'),
verbatimTextOutput("samplemean"),
h5('Theretical Mean'),
verbatimTextOutput("theoreticalmean"),
h5('Expected Standard Deviation'),
verbatimTextOutput("esd"),
h5('Theretical Standard Deviation'),
verbatimTextOutput("tsd")
)
)) |
library(igraph)
library(RColorBrewer)
#create data:
links=data.frame(
source=c("China","China","Mongolia", "China", "Kazakhstan", "Russia","China", "Malaysia", "Indonesia", "Sri Lanka","Poland"),
target=c("Pakistan","Mongolia","Russia", "Kazakhstan", "Poland", "Kazakhstan","Malaysia","Indonesia", "Sri Lanka", "Kenya","Greece"),
importance=(sample(1:3,11,replace=T))
)
nodes=data.frame(Country=c("China","Mongolia","Kazakhstan","Pakistan","Russia","Poland","Greece","Malaysia","Indonesia","Sri Lanka","Kenya"),
Continent=c(rep("China",1),rep("Central Asia",3), rep("Europe",3),rep("South Asia",3),rep("Africa",1))
)
#plot:
network=graph_from_data_frame(d=links, vertices=nodes, directed=F)
deg=degree(network, mode="all")
coul = brewer.pal(5, "Set2")
my_color=coul[as.numeric(as.factor(V(network)$Continent))]
plot(network,
vertex.shape="circle",
vertex.size=9*deg,
vertex.color=my_color,
vertex.frame.color="transparent",
vertex.label.color="black"
)
text(-1.5,-1.5,"China One Belt One Road Initiative",col="black")
## JD: What is this meant to be a network of? I'm pretty sure the initiative doesn't contain a road from Sri Lanka to Kenya!
## http://www.scmp.com/infographics/article/1874865/infographic-one-belt-one-road
## I think that it is a maritime silk road of 21st century, but I did not include every single country that is involved.
| /HW8_Network.R | no_license | angelawyq/stat744 | R | false | false | 1,400 | r | library(igraph)
library(RColorBrewer)
#create data:
links=data.frame(
source=c("China","China","Mongolia", "China", "Kazakhstan", "Russia","China", "Malaysia", "Indonesia", "Sri Lanka","Poland"),
target=c("Pakistan","Mongolia","Russia", "Kazakhstan", "Poland", "Kazakhstan","Malaysia","Indonesia", "Sri Lanka", "Kenya","Greece"),
importance=(sample(1:3,11,replace=T))
)
nodes=data.frame(Country=c("China","Mongolia","Kazakhstan","Pakistan","Russia","Poland","Greece","Malaysia","Indonesia","Sri Lanka","Kenya"),
Continent=c(rep("China",1),rep("Central Asia",3), rep("Europe",3),rep("South Asia",3),rep("Africa",1))
)
#plot:
network=graph_from_data_frame(d=links, vertices=nodes, directed=F)
deg=degree(network, mode="all")
coul = brewer.pal(5, "Set2")
my_color=coul[as.numeric(as.factor(V(network)$Continent))]
plot(network,
vertex.shape="circle",
vertex.size=9*deg,
vertex.color=my_color,
vertex.frame.color="transparent",
vertex.label.color="black"
)
text(-1.5,-1.5,"China One Belt One Road Initiative",col="black")
## JD: What is this meant to be a network of? I'm pretty sure the initiative doesn't contain a road from Sri Lanka to Kenya!
## http://www.scmp.com/infographics/article/1874865/infographic-one-belt-one-road
## I think that it is a maritime silk road of 21st century, but I did not include every single country that is involved.
|
### Codes used in the hypothesis testing lecture (lecture 12)
## please run codes in data_input.r first, you can choose "source" option to run all of them
## this is important, as we are going to use data created in data_input.r file
# 1. t-test for single mean
t.test(sales$days, mu=10, alternative="greater")
# sales$days is the data to use
# mu=10: you specify the hypothesized mu
# alternative="greater": specify the direction for alternative
t.test(sales$days, mu=19, alternative="two.sided")
# 2. t-test for difference between means
## create a variable called "month" on diamond data using months() function
diamond$month = months(diamond$fetch_date)
## testing for independent samples
t.test(diamond$price[diamond$month=="July"], diamond$price[diamond$month=="August"])
## diamond$price[diamond$month=="July"] means get the subset of diamond$price based on the condition
## that diamond$month equals "July".
## testing for paired samples
## create price.day1 and price.day11 data
## first merge the sales data back to price data
price = merge(price, sales)
## extract price information for the first day observations
price.day1 = price[price$fetch_date==price$first_day, c("sku", "price")]
## extract price information for the 11th day
price.day11 = price[price$fetch_date==as.Date(price$first_day+10), c("sku", "price")]
## get common observations for both day 1 and day 11
price.day1 = price.day1[price.day1$sku %in% price.day11$sku, ]
dim(price.day1)
## sort to make sure they are in the same order
price.day1 = price.day1[order(price.day1$sku),]
price.day11 = price.day11[order(price.day11$sku), ]
## Results in 2692 diamonds
t.test(price.day1$price, price.day11$price, paired=T, alternative="greater")
###testing for proportion
with(sales[sales$first_day<as.Date("2012-08-25"), ],
prop.test(sum(days<=10), length(days), p=0.3))
# use with() function to select the subsample
# sum(days<=0) is the total number of observations sold within 10 days
### Chisq test: presidential votes
# create the votes data
votes = matrix(c(110, 230, 160, 185, 150, 165, 265, 115, 120), 3,3)
# chi-square test
chisq.test(votes)
| /testing.r | no_license | comm365mr/diamond | R | false | false | 2,146 | r | ### Codes used in the hypothesis testing lecture (lecture 12)
## please run codes in data_input.r first, you can choose "source" option to run all of them
## this is important, as we are going to use data created in data_input.r file
# 1. t-test for single mean
t.test(sales$days, mu=10, alternative="greater")
# sales$days is the data to use
# mu=10: you specify the hypothesized mu
# alternative="greater": specify the direction for alternative
t.test(sales$days, mu=19, alternative="two.sided")
# 2. t-test for difference between means
## create a variable called "month" on diamond data using months() function
diamond$month = months(diamond$fetch_date)
## testing for independent samples
t.test(diamond$price[diamond$month=="July"], diamond$price[diamond$month=="August"])
## diamond$price[diamond$month=="July"] means get the subset of diamond$price based on the condition
## that diamond$month equals "July".
## testing for paired samples
## create price.day1 and price.day11 data
## first merge the sales data back to price data
price = merge(price, sales)
## extract price information for the first day observations
price.day1 = price[price$fetch_date==price$first_day, c("sku", "price")]
## extract price information for the 11th day
price.day11 = price[price$fetch_date==as.Date(price$first_day+10), c("sku", "price")]
## get common observations for both day 1 and day 11
price.day1 = price.day1[price.day1$sku %in% price.day11$sku, ]
dim(price.day1)
## sort to make sure they are in the same order
price.day1 = price.day1[order(price.day1$sku),]
price.day11 = price.day11[order(price.day11$sku), ]
## Results in 2692 diamonds
t.test(price.day1$price, price.day11$price, paired=T, alternative="greater")
###testing for proportion
with(sales[sales$first_day<as.Date("2012-08-25"), ],
prop.test(sum(days<=10), length(days), p=0.3))
# use with() function to select the subsample
# sum(days<=0) is the total number of observations sold within 10 days
### Chisq test: presidential votes
# create the votes data
votes = matrix(c(110, 230, 160, 185, 150, 165, 265, 115, 120), 3,3)
# chi-square test
chisq.test(votes)
|
##practice with if, for and while loops in R
##__author__ = 'Matthew Campos (matthew.campos19@imperial.ac.uk)'
##__version__ = '0.0.1'
## IF Statement
a <- TRUE
if (a==FALSE){
print("a is True")
} else {
print("a is False")
}
## IF Statement on a single line
z <- runif(1) ## uniformly distributed number
if (z <= 0.5) {print("Less than half")}
## For loop using a sequence
for (i in 1:10){ #i is a range from 1 to 10
j <- i * i #j is i squared
print(paste(i, " squared is", j ))
}
## For loop over vector of strings
for(species in c('Heliodoxa rubinoides',
'Boissonneaua jardini',
'Sula nebouxii')){
print(paste('The species is', species)) #species takes on the value in the vector
}
## for loop using a vector
v1 <- c("a","bc","def")
for (i in v1){ #takes on the values in vector
print(i)
}
## While loop
i <- 0
while (i<10){ #breaks when i reaches 10
i <- i+1 #increments i each iteration
print(i^2) #prints the square of i
}
| /Week3/Code/control_flow.R | no_license | matthewcampos/CMEECourseWork | R | false | false | 997 | r | ##practice with if, for and while loops in R
##__author__ = 'Matthew Campos (matthew.campos19@imperial.ac.uk)'
##__version__ = '0.0.1'
## IF Statement
a <- TRUE
if (a==FALSE){
print("a is True")
} else {
print("a is False")
}
## IF Statement on a single line
z <- runif(1) ## uniformly distributed number
if (z <= 0.5) {print("Less than half")}
## For loop using a sequence
for (i in 1:10){ #i is a range from 1 to 10
j <- i * i #j is i squared
print(paste(i, " squared is", j ))
}
## For loop over vector of strings
for(species in c('Heliodoxa rubinoides',
'Boissonneaua jardini',
'Sula nebouxii')){
print(paste('The species is', species)) #species takes on the value in the vector
}
## for loop using a vector
v1 <- c("a","bc","def")
for (i in v1){ #takes on the values in vector
print(i)
}
## While loop
i <- 0
while (i<10){ #breaks when i reaches 10
i <- i+1 #increments i each iteration
print(i^2) #prints the square of i
}
|
library(noaaquake)
context("Testing the output map is a leaflet map or not")
test_that("eq_map runs without error", {
data_clean <- NOAA_data %>%
eq_clean_data()%>%
dplyr::filter(Country == "India"|Country == "Pakistan" & lubridate::year(date) >= 2000)%>%
tidyr::drop_na(Latitude,Longitude)
map = eq_map(data = data_clean, annot_col = "Mag")
expect_equal(class(map)[1], "leaflet")
expect_equal(class(map)[2], "htmlwidget")
})
| /tests/testthat/test_leaflet_map.R | permissive | adeel1997/noaaquake | R | false | false | 470 | r | library(noaaquake)
context("Testing the output map is a leaflet map or not")
test_that("eq_map runs without error", {
data_clean <- NOAA_data %>%
eq_clean_data()%>%
dplyr::filter(Country == "India"|Country == "Pakistan" & lubridate::year(date) >= 2000)%>%
tidyr::drop_na(Latitude,Longitude)
map = eq_map(data = data_clean, annot_col = "Mag")
expect_equal(class(map)[1], "leaflet")
expect_equal(class(map)[2], "htmlwidget")
})
|
# Modified from Winston Chang,
# https://shiny.rstudio.com/gallery/shiny-theme-selector.html
# Concepts about Reactive programming used by Shiny,
# https://shiny.rstudio.com/articles/reactivity-overview.html
# Load R packages
library(shiny)
library(shinythemes)
# Define UI
ui <- fluidPage(theme = shinytheme("cerulean"),
navbarPage(
# theme = "cerulean", # <--- To use a theme, uncomment this
"My first app",
tabPanel("Navbar 1",
sidebarPanel(
tags$h3("Input:"),
textInput("txt1", "Given Name:", ""),
textInput("txt2", "Surname:", ""),
), # sidebarPanel
mainPanel(
h1("Header 1"),
h4("Output 1"),
verbatimTextOutput("txtout"),
) # mainPanel
), # Navbar 1, tabPanel
tabPanel("Navbar 2", "This panel is intentionally left blank"),
tabPanel("Navbar 3", "This panel is intentionally left blank")
) # navbarPage
) # fluidPage
# Define server function
server <- function(input, output) {
output$txtout <- renderText({
paste( input$txt1, input$txt2, sep = " " )
})
} # server
# Create Shiny object
shinyApp(ui = ui, server = server)
| /001-first-app/app.R | no_license | MelinaRentzi/Shiny_Playground | R | false | false | 1,385 | r | # Modified from Winston Chang,
# https://shiny.rstudio.com/gallery/shiny-theme-selector.html
# Concepts about Reactive programming used by Shiny,
# https://shiny.rstudio.com/articles/reactivity-overview.html
# Load R packages
library(shiny)
library(shinythemes)
# Define UI
ui <- fluidPage(theme = shinytheme("cerulean"),
navbarPage(
# theme = "cerulean", # <--- To use a theme, uncomment this
"My first app",
tabPanel("Navbar 1",
sidebarPanel(
tags$h3("Input:"),
textInput("txt1", "Given Name:", ""),
textInput("txt2", "Surname:", ""),
), # sidebarPanel
mainPanel(
h1("Header 1"),
h4("Output 1"),
verbatimTextOutput("txtout"),
) # mainPanel
), # Navbar 1, tabPanel
tabPanel("Navbar 2", "This panel is intentionally left blank"),
tabPanel("Navbar 3", "This panel is intentionally left blank")
) # navbarPage
) # fluidPage
# Define server function
server <- function(input, output) {
output$txtout <- renderText({
paste( input$txt1, input$txt2, sep = " " )
})
} # server
# Create Shiny object
shinyApp(ui = ui, server = server)
|
x<-c(80, 85, 70)
x
c(80,85,70) ->x
x
y <-c("์ต์ฑ์","์ต์ฑ์ฃ2", "์ต์ฑ์3")
y
choi1<-c(80, 75, 75)
choi1
print(choi1)
choi2<-c(80,80,80)
choi3<-c(110)
##์ฌ์น์ฐ์ฐ
10%%2
10%/%2
5%%3+7**2-9%/%4
choi1+choi2
s<-choi1+choi2
s/2
#choi3์ด ์ ์๊ฐ ํ๋๋ฉด ๊ทธ ์ ์๋ก ๋ค ๋ํด์ค๋ค
#๋ฐฐ์๊ฐ ์๋ง์ผ๋ฉด ์ฐ์ฐ no (Ex.7๊ณผ )
choi2+choi3
#๋ฐฐ์ ์๋ง๋๊ฑฐ ํ์ธ
kim<-c(1,2,3,4,5,6,7)
kim
kang<-c(1,2,3)
kang
kim+kang
# type ๋ค๋ฅธ๊ฑฐ์ ๋ํ๋ฉด ๊ณ์ฐ์ด ์์ ์๋จ
aa<-c("hong", 10, 'k')
aa+1
#ํ๊ทธ๋ง๋ค๊ธฐ names ํจ์
names(choi1)<-c("kor","eng","math")
choi1
names(choi2)<-c("์","๋ฉ","๋ฆฌ")
choi2
#ํ๊ท ๊ฐ ๊ณ์ฐํ๊ธฐ
average<-c(choi1, choi2)
average<-c(choi1+choi2)
average
average/2
# true,false
3<5
aa<-c(10, 23, 45, 66)
aa<=14
#ํ๋๊ฐ ์ฐธ, ํ๋๊ฐ ๊ฑฐ์ง ๋๊ฐํฉํ๋ฉด ๊ฑฐ์ง
x=TRUE
y=FALSE
x&y
!x
isTRUE(x)
aa<-1:100
aa
# seq ํจ์ ์ฌ์ฉ! ๋งค๊ฐ๋ณ์ ์ฌ์ฉ
bb<-seq(10,1,by=-3)
bb
cc<-seq(96,10,by=-4)
cc
sum(cc)
dd<-seq(10,100,length.out = 8)
dd
# rep ํจ์
x<-c(1,2,3)
y<-rep(x, times=2)
y
z<-rep(x, each=2)
z
z[1]
# y๋ฒกํฐ์ 1, 3, 5๋ฒ์งธ ๊ฐ์ถ๋ ฅ
y[c(1, 3, 5)]
# y๋ฒกํฐ์ 1, 2๋ฒ์งธ ๊ฐ ์ ์ธ์ถ๋ ฅ
y[-c(1,2)]
#y์ ๊ฐ์ค 1๋ณด๋ค ํฐ๊ฐ ๋ฝ๊ธฐ
y[y>1]
y[y>1&y<3]
y[2]<-100
y
# y์ ๊ฐ์ค 2๋ณด๋ค ์์ผ๋ฉด ์ ๋ถ 11๋ก ๋ง๋ค๊ธฐ
y[y<2]<-11
y
#๋ฌธ์
x<-c(1:10)
x
x<-rep(x,times=2)
x
x[x>2&x<4]<-7
x
x[5]=x[5]*2
x
| /1_calcurate.R | no_license | seongsik-choi/R_Project | R | false | false | 1,396 | r |
x<-c(80, 85, 70)
x
c(80,85,70) ->x
x
y <-c("์ต์ฑ์","์ต์ฑ์ฃ2", "์ต์ฑ์3")
y
choi1<-c(80, 75, 75)
choi1
print(choi1)
choi2<-c(80,80,80)
choi3<-c(110)
##์ฌ์น์ฐ์ฐ
10%%2
10%/%2
5%%3+7**2-9%/%4
choi1+choi2
s<-choi1+choi2
s/2
#choi3์ด ์ ์๊ฐ ํ๋๋ฉด ๊ทธ ์ ์๋ก ๋ค ๋ํด์ค๋ค
#๋ฐฐ์๊ฐ ์๋ง์ผ๋ฉด ์ฐ์ฐ no (Ex.7๊ณผ )
choi2+choi3
#๋ฐฐ์ ์๋ง๋๊ฑฐ ํ์ธ
kim<-c(1,2,3,4,5,6,7)
kim
kang<-c(1,2,3)
kang
kim+kang
# type ๋ค๋ฅธ๊ฑฐ์ ๋ํ๋ฉด ๊ณ์ฐ์ด ์์ ์๋จ
aa<-c("hong", 10, 'k')
aa+1
#ํ๊ทธ๋ง๋ค๊ธฐ names ํจ์
names(choi1)<-c("kor","eng","math")
choi1
names(choi2)<-c("์","๋ฉ","๋ฆฌ")
choi2
#ํ๊ท ๊ฐ ๊ณ์ฐํ๊ธฐ
average<-c(choi1, choi2)
average<-c(choi1+choi2)
average
average/2
# true,false
3<5
aa<-c(10, 23, 45, 66)
aa<=14
#ํ๋๊ฐ ์ฐธ, ํ๋๊ฐ ๊ฑฐ์ง ๋๊ฐํฉํ๋ฉด ๊ฑฐ์ง
x=TRUE
y=FALSE
x&y
!x
isTRUE(x)
aa<-1:100
aa
# seq ํจ์ ์ฌ์ฉ! ๋งค๊ฐ๋ณ์ ์ฌ์ฉ
bb<-seq(10,1,by=-3)
bb
cc<-seq(96,10,by=-4)
cc
sum(cc)
dd<-seq(10,100,length.out = 8)
dd
# rep ํจ์
x<-c(1,2,3)
y<-rep(x, times=2)
y
z<-rep(x, each=2)
z
z[1]
# y๋ฒกํฐ์ 1, 3, 5๋ฒ์งธ ๊ฐ์ถ๋ ฅ
y[c(1, 3, 5)]
# y๋ฒกํฐ์ 1, 2๋ฒ์งธ ๊ฐ ์ ์ธ์ถ๋ ฅ
y[-c(1,2)]
#y์ ๊ฐ์ค 1๋ณด๋ค ํฐ๊ฐ ๋ฝ๊ธฐ
y[y>1]
y[y>1&y<3]
y[2]<-100
y
# y์ ๊ฐ์ค 2๋ณด๋ค ์์ผ๋ฉด ์ ๋ถ 11๋ก ๋ง๋ค๊ธฐ
y[y<2]<-11
y
#๋ฌธ์
x<-c(1:10)
x
x<-rep(x,times=2)
x
x[x>2&x<4]<-7
x
x[5]=x[5]*2
x
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.