content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
\name{sourceInfo.TSPADIdata} \alias{sourceInfo.TSPADIdata} \alias{sourcedb.TSPADIdata} \alias{sourceserver.TSPADIdata} \alias{availability.TSPADIdata} \alias{identifiers.TSPADIdata} \alias{periods.TSPADIdata} \title{TSPADIdata Specific Methods} \description{See the generic function description.} \usage{ \method{sourceInfo}{TSPADIdata}(obj) \method{sourcedb}{TSPADIdata}(obj) \method{sourceserver}{TSPADIdata}(obj) \method{availability}{TSPADIdata}(obj, verbose=TRUE, timeout=60, ...) \method{identifiers}{TSPADIdata}(obj) \method{periods}{TSPADIdata}(x) } \arguments{ \item{obj}{a TSPADIdata object.} \item{x}{a TSPADIdata object.} \item{verbose}{a logical indicating if extra information should be printed.} \item{timeout}{an integer indicating the number of seconds to wait before concluding that the server is not available.} \item{...}{(further arguments, currently disregarded).} } \seealso{ \code{\link{sourceInfo}} } %\keyword{DSE} \keyword{ts}
/dsepadi/man/sourceInfo.TSPADIdata.Rd
no_license
cran/dseplus
R
false
false
1,013
rd
\name{sourceInfo.TSPADIdata} \alias{sourceInfo.TSPADIdata} \alias{sourcedb.TSPADIdata} \alias{sourceserver.TSPADIdata} \alias{availability.TSPADIdata} \alias{identifiers.TSPADIdata} \alias{periods.TSPADIdata} \title{TSPADIdata Specific Methods} \description{See the generic function description.} \usage{ \method{sourceInfo}{TSPADIdata}(obj) \method{sourcedb}{TSPADIdata}(obj) \method{sourceserver}{TSPADIdata}(obj) \method{availability}{TSPADIdata}(obj, verbose=TRUE, timeout=60, ...) \method{identifiers}{TSPADIdata}(obj) \method{periods}{TSPADIdata}(x) } \arguments{ \item{obj}{a TSPADIdata object.} \item{x}{a TSPADIdata object.} \item{verbose}{a logical indicating if extra information should be printed.} \item{timeout}{an integer indicating the number of seconds to wait before concluding that the server is not available.} \item{...}{(further arguments, currently disregarded).} } \seealso{ \code{\link{sourceInfo}} } %\keyword{DSE} \keyword{ts}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/show_any_rounds.R \name{show_theme_rounds} \alias{show_theme_rounds} \title{Return available rounds for a theme in the European Social Survey} \usage{ show_theme_rounds(theme) } \arguments{ \item{theme}{A character of length 1 with the full name of the theme. Use \code{\link{show_themes}}for a list of available themes.} } \value{ character vector with available rounds for \code{country} } \description{ This function returns the available rounds for any theme from \code{\link{show_themes}}. However, contrary to \code{\link{show_country_rounds}} themes can not be downloaded as separate datasets. This and the \code{\link{show_themes}} function serve purely for informative purposes. } \examples{ chosen_theme <- show_themes()[3] # In which rounds was the topic of 'Democracy' asked? show_theme_rounds(chosen_theme) # And politics? show_theme_rounds("Politics") }
/man/show_theme_rounds.Rd
permissive
jlopezper/essurvey
R
false
true
983
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/show_any_rounds.R \name{show_theme_rounds} \alias{show_theme_rounds} \title{Return available rounds for a theme in the European Social Survey} \usage{ show_theme_rounds(theme) } \arguments{ \item{theme}{A character of length 1 with the full name of the theme. Use \code{\link{show_themes}}for a list of available themes.} } \value{ character vector with available rounds for \code{country} } \description{ This function returns the available rounds for any theme from \code{\link{show_themes}}. However, contrary to \code{\link{show_country_rounds}} themes can not be downloaded as separate datasets. This and the \code{\link{show_themes}} function serve purely for informative purposes. } \examples{ chosen_theme <- show_themes()[3] # In which rounds was the topic of 'Democracy' asked? show_theme_rounds(chosen_theme) # And politics? show_theme_rounds("Politics") }
\name{ASparameter-classes} \docType{class} \alias{ASparameter-class} \alias{ASparameter} \alias{APparameter-class} \alias{APparameter} \alias{ECparameter-class} \alias{ECparameter} \alias{initialize,ASparameter-method} \alias{initialize,APparameter-method} \alias{initialize,ECparameter-method} \alias{show,ASparameter-method} \alias{show,APparameter-method} \alias{coerce,NULL,APparameter-method} \alias{coerce,list,APparameter-method} \alias{coerce,NULL,ECparameter-method} \alias{coerce,list,ECparameter-method} \title{Classes ``ASparameter'', ``APparameter'', ``ECparameter'' --- Specifying the `parameter' Argument of apriori() and eclat()} \description{ The \code{ASparameter} class holds the mining parameters (e.g., minimum support) for the used mining algorithms. \code{APparameter} and \code{ECparameter} directly extend \code{ASparameter} with additional slots for parameters only suitable for the Apriori (\code{APparameter}) or the Eclat algorithms (\code{ECparameter}). } \section{Objects from the Class}{ A suitable default parameter object will be automatically created by the \code{\link{apriori}} or the \code{\link{eclat}} function. By specifying a named list (names equal to slots) as \code{parameter} argument for the \code{\link{apriori}} or the \code{\link{eclat}} function, default values can be replaced by the values in the list. Objects can be created by calls of the form \code{new("APparameter", ...)} or \code{new("ECparameter", ...)}. } \section{Slots}{ Common slots defined in \code{ASparameter}: \describe{ \item{\code{support}:}{a numeric value for the minimal support of an item set (default: \eqn{0.1})} \item{\code{minlen}:}{an integer value for the minimal number of items per item set (default: 1)} \item{\code{maxlen}:}{an integer value for the maximal number of items per item set (default: 10)} \item{\code{target}:}{a character string indicating the type of association mined. One of \itemize{ \item \code{"frequent itemsets"} \item \code{"maximally frequent itemsets"} \item \code{"closed frequent itemsets"} \item \code{"rules"} (only available for Apriori) \item \code{"hyperedgesets"} (only available for Apriori; see references for the definition of association hyperedgesets) } } \item{\code{ext}:}{a logical indicating whether to produce extended information on quality measures (e.g., lhs.support) (default: \code{FALSE})} } Additional slots for Apriori in \code{APparameter}: \describe{ \item{\code{confidence}:}{a numeric value for the minimal confidence of rules/association hyperedges (default: \eqn{0.8}). For frequent itemsets it is set to \code{NA}.} \item{\code{smax}:}{a numeric value for the maximal support of itemsets/rules/hyperedgesets (default: 1)} \item{\code{arem}:}{a character string indicating the used additional rule evaluation measure (default: \code{"none"}) given by one of \describe{ \item{\code{"none"}:}{no additional evaluation measure} \item{\code{"diff"}:}{absolute confidence difference} \item{\code{"quot"}:}{difference of confidence quotient to 1} \item{\code{"aimp"}:}{absolute difference of improvement to 1} \item{\code{"info"}:}{information difference to prior} \item{\code{"chi2"}:}{normalized \eqn{\chi^2} measure} } } \item{\code{aval}:}{a logical indicating whether to return the additional rule evaluation measure selected with \code{arem}.} \item{\code{minval}:}{a numeric value for the minimal value of additional evaluation measure selected with \code{arem} (default: \eqn{0.1})} \item{\code{originalSupport}:}{a logical indicating whether to use for minimum support the original definition of the support of a rule (lhs and rhs) instead of lhs support. Make sure to use \code{ext = TRUE} if \code{originalSupport} is set to \code{FALSE} (default: \code{TRUE})} } Additional slots for Eclat in \code{ECparameter}: \describe{ \item{\code{tidLists}:}{a logical indicating whether to return also a list of supporting transactions (transaction IDs) (default: \code{FALSE})} } } \section{Methods}{ \describe{ \item{coerce}{\code{signature(from = "NULL", to = "APparameter")}} \item{coerce}{\code{signature(from = "list", to = "APparameter")}} \item{coerce}{\code{signature(from = "NULL", to = "ECparameter")}} \item{coerce}{\code{signature(from = "list", to = "ECparameter")}} \item{show}{\code{signature(object = "ASparameter")}} } } \references{ Christian Borgelt (2004) \emph{Apriori --- Finding Association Rules/Hyperedges with the Apriori Algorithm}. \url{www.borgelt.net/apriori.html} } \seealso{ \code{\link{apriori}}, \code{\link{eclat}} } \author{Michael Hahsler and Bettina Gruen} \keyword{classes}
/man/ASparameter-class.Rd
no_license
matmo/arules
R
false
false
4,928
rd
\name{ASparameter-classes} \docType{class} \alias{ASparameter-class} \alias{ASparameter} \alias{APparameter-class} \alias{APparameter} \alias{ECparameter-class} \alias{ECparameter} \alias{initialize,ASparameter-method} \alias{initialize,APparameter-method} \alias{initialize,ECparameter-method} \alias{show,ASparameter-method} \alias{show,APparameter-method} \alias{coerce,NULL,APparameter-method} \alias{coerce,list,APparameter-method} \alias{coerce,NULL,ECparameter-method} \alias{coerce,list,ECparameter-method} \title{Classes ``ASparameter'', ``APparameter'', ``ECparameter'' --- Specifying the `parameter' Argument of apriori() and eclat()} \description{ The \code{ASparameter} class holds the mining parameters (e.g., minimum support) for the used mining algorithms. \code{APparameter} and \code{ECparameter} directly extend \code{ASparameter} with additional slots for parameters only suitable for the Apriori (\code{APparameter}) or the Eclat algorithms (\code{ECparameter}). } \section{Objects from the Class}{ A suitable default parameter object will be automatically created by the \code{\link{apriori}} or the \code{\link{eclat}} function. By specifying a named list (names equal to slots) as \code{parameter} argument for the \code{\link{apriori}} or the \code{\link{eclat}} function, default values can be replaced by the values in the list. Objects can be created by calls of the form \code{new("APparameter", ...)} or \code{new("ECparameter", ...)}. } \section{Slots}{ Common slots defined in \code{ASparameter}: \describe{ \item{\code{support}:}{a numeric value for the minimal support of an item set (default: \eqn{0.1})} \item{\code{minlen}:}{an integer value for the minimal number of items per item set (default: 1)} \item{\code{maxlen}:}{an integer value for the maximal number of items per item set (default: 10)} \item{\code{target}:}{a character string indicating the type of association mined. One of \itemize{ \item \code{"frequent itemsets"} \item \code{"maximally frequent itemsets"} \item \code{"closed frequent itemsets"} \item \code{"rules"} (only available for Apriori) \item \code{"hyperedgesets"} (only available for Apriori; see references for the definition of association hyperedgesets) } } \item{\code{ext}:}{a logical indicating whether to produce extended information on quality measures (e.g., lhs.support) (default: \code{FALSE})} } Additional slots for Apriori in \code{APparameter}: \describe{ \item{\code{confidence}:}{a numeric value for the minimal confidence of rules/association hyperedges (default: \eqn{0.8}). For frequent itemsets it is set to \code{NA}.} \item{\code{smax}:}{a numeric value for the maximal support of itemsets/rules/hyperedgesets (default: 1)} \item{\code{arem}:}{a character string indicating the used additional rule evaluation measure (default: \code{"none"}) given by one of \describe{ \item{\code{"none"}:}{no additional evaluation measure} \item{\code{"diff"}:}{absolute confidence difference} \item{\code{"quot"}:}{difference of confidence quotient to 1} \item{\code{"aimp"}:}{absolute difference of improvement to 1} \item{\code{"info"}:}{information difference to prior} \item{\code{"chi2"}:}{normalized \eqn{\chi^2} measure} } } \item{\code{aval}:}{a logical indicating whether to return the additional rule evaluation measure selected with \code{arem}.} \item{\code{minval}:}{a numeric value for the minimal value of additional evaluation measure selected with \code{arem} (default: \eqn{0.1})} \item{\code{originalSupport}:}{a logical indicating whether to use for minimum support the original definition of the support of a rule (lhs and rhs) instead of lhs support. Make sure to use \code{ext = TRUE} if \code{originalSupport} is set to \code{FALSE} (default: \code{TRUE})} } Additional slots for Eclat in \code{ECparameter}: \describe{ \item{\code{tidLists}:}{a logical indicating whether to return also a list of supporting transactions (transaction IDs) (default: \code{FALSE})} } } \section{Methods}{ \describe{ \item{coerce}{\code{signature(from = "NULL", to = "APparameter")}} \item{coerce}{\code{signature(from = "list", to = "APparameter")}} \item{coerce}{\code{signature(from = "NULL", to = "ECparameter")}} \item{coerce}{\code{signature(from = "list", to = "ECparameter")}} \item{show}{\code{signature(object = "ASparameter")}} } } \references{ Christian Borgelt (2004) \emph{Apriori --- Finding Association Rules/Hyperedges with the Apriori Algorithm}. \url{www.borgelt.net/apriori.html} } \seealso{ \code{\link{apriori}}, \code{\link{eclat}} } \author{Michael Hahsler and Bettina Gruen} \keyword{classes}
## ## create data on presidential proclamations ## require(readtext) ## read the texts data_corpus_pp <- corpus(readtext(paste0(getOption("ROOT_DROPBOX"), "data_text/presidential_proclamations/procl_texts/*"), verbosity = 0)) ## set the document variable Year from the directory name docvars(data_corpus_pp, "Year") <- as.integer(substring(docnames(data_corpus_pp), 1, 4)) ## remove any texts of zero length data_corpus_pp <- corpus_subset(data_corpus_pp, ntoken(data_corpus_pp) > 0) ## clean up things that will mess up the sentence segmentation texts(data_corpus_pp) <- stringi::stri_replace_all_regex(texts(data_corpus_pp), "A.\\w{0,1}D.", "AD") # for things like "GO.", "TH.", "J." texts(data_corpus_pp) <- stringi::stri_replace_all_regex(texts(data_corpus_pp), "(\\p{Lu}{1,2})\\.", "$1") # for enumerations followed by ., such as 2. 3. etc texts(data_corpus_pp) <- stringi::stri_replace_all_regex(texts(data_corpus_pp), "(\\d{1,2})\\.", "$1) ") ## save the corpus devtools::use_data(data_corpus_pp, overwrite = TRUE)
/package_analysis/data_creation/data_corpus_pp/data_corpus_pp.R
no_license
kmunger/sophistication
R
false
false
1,029
r
## ## create data on presidential proclamations ## require(readtext) ## read the texts data_corpus_pp <- corpus(readtext(paste0(getOption("ROOT_DROPBOX"), "data_text/presidential_proclamations/procl_texts/*"), verbosity = 0)) ## set the document variable Year from the directory name docvars(data_corpus_pp, "Year") <- as.integer(substring(docnames(data_corpus_pp), 1, 4)) ## remove any texts of zero length data_corpus_pp <- corpus_subset(data_corpus_pp, ntoken(data_corpus_pp) > 0) ## clean up things that will mess up the sentence segmentation texts(data_corpus_pp) <- stringi::stri_replace_all_regex(texts(data_corpus_pp), "A.\\w{0,1}D.", "AD") # for things like "GO.", "TH.", "J." texts(data_corpus_pp) <- stringi::stri_replace_all_regex(texts(data_corpus_pp), "(\\p{Lu}{1,2})\\.", "$1") # for enumerations followed by ., such as 2. 3. etc texts(data_corpus_pp) <- stringi::stri_replace_all_regex(texts(data_corpus_pp), "(\\d{1,2})\\.", "$1) ") ## save the corpus devtools::use_data(data_corpus_pp, overwrite = TRUE)
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f"))) source('../h2o-runit.R') test.pub.859 <- function(conn) { a_initial = as.data.frame(cbind( c(0,0,1,0,0,1,0,0,0,0), c(1,1,1,0,1,0,1,0,1,0), c(1,0,1,0,1,0,1,0,0,1), c(1,1,0,0,0,1,0,0,0,1), c(1,1,1,0,1,0,0,0,1,1), c(1,0,1,0,0,0,0,0,1,1), c(1,1,1,0,0,0,1,1,1,0), c(0,0,1,1,1,0,0,1,1,0), c(0,1,1,1,1,0,0,1,1,0), c(0,0,0,0,0,1,1,0,0,0) )) a = a_initial a.h2o <- as.h2o(conn, a_initial, key="r.hex") d = ifelse(F, a.h2o[1,] , apply(a.h2o, 2, sum)) dd = ifelse(F, a[1,] , apply(a, 2, sum)) d.h2o = as.data.frame(d) dd d.h2o expect_that(all(d.h2o == dd), equals(T)) testEnd() } doTest("Test pub 859", test.pub.859)
/h2o-r/tests/testdir_jira/runit_pub_859_ifelse.R
permissive
StephaneFeniar/h2o-dev
R
false
false
758
r
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f"))) source('../h2o-runit.R') test.pub.859 <- function(conn) { a_initial = as.data.frame(cbind( c(0,0,1,0,0,1,0,0,0,0), c(1,1,1,0,1,0,1,0,1,0), c(1,0,1,0,1,0,1,0,0,1), c(1,1,0,0,0,1,0,0,0,1), c(1,1,1,0,1,0,0,0,1,1), c(1,0,1,0,0,0,0,0,1,1), c(1,1,1,0,0,0,1,1,1,0), c(0,0,1,1,1,0,0,1,1,0), c(0,1,1,1,1,0,0,1,1,0), c(0,0,0,0,0,1,1,0,0,0) )) a = a_initial a.h2o <- as.h2o(conn, a_initial, key="r.hex") d = ifelse(F, a.h2o[1,] , apply(a.h2o, 2, sum)) dd = ifelse(F, a[1,] , apply(a, 2, sum)) d.h2o = as.data.frame(d) dd d.h2o expect_that(all(d.h2o == dd), equals(T)) testEnd() } doTest("Test pub 859", test.pub.859)
#' A Correlation Function #' #' This function allows you to check the correlation of all numeric continuous variables in one specific year #' @param Year Which year data do you want to check? Defaults to Null #' @keywards numeric correlation #' @export #' @example #' correlation_numeric_var() correlation_numeric_var <- function(year){ function_dataset_3 <- purrr::keep(dplyr::filter(Seasons_Stats_NBA, Year==year), is.numeric) utils::cor(function_dataset_3) }
/R/correlation_numeric_var.R
no_license
GYang777/HW5
R
false
false
488
r
#' A Correlation Function #' #' This function allows you to check the correlation of all numeric continuous variables in one specific year #' @param Year Which year data do you want to check? Defaults to Null #' @keywards numeric correlation #' @export #' @example #' correlation_numeric_var() correlation_numeric_var <- function(year){ function_dataset_3 <- purrr::keep(dplyr::filter(Seasons_Stats_NBA, Year==year), is.numeric) utils::cor(function_dataset_3) }
library(gkmSVM) suppressMessages(suppressWarnings(library(tools))) do_gkmSVM <- function(pop){ genNullSeqs(paste0('../../processed/gkmerpeaks/topPeaks-',pop,'.bed'), nMaxTrials=10,xfold=1, genomeVersion='hg19', outputPosFastaFN=paste0('../fasta/',pop,'-positive.fa'), outputBedFN=paste0('../fasta/',pop,'-negative.bed'), outputNegFastaFN=paste0('../fasta/',pop,'-negative.fa')) gkmsvm_kernel(paste0('../fasta/',pop,'-positive.fa'), paste0('../fasta/',pop,'-negative.fa'), paste0('../kernel/', pop, ".kernel.out")) gkmsvm_trainCV(kernelfn = paste0('../kernel/', pop, ".kernel.out"), posfn = paste0('../fasta/',pop,'-positive.fa'), negfn = paste0('../fasta/',pop,'-negative.fa'), svmfnprfx=paste0('../kernel/', pop), outputCVpredfn=paste0('../kernel/', pop, ".cvPred.out"), outputROCfn=paste0('../kernel/', pop, ".roc.out")) gkmsvm_classify('../fasta/nr10mers.fa', svmfnprfx=paste0('../kernel/', pop), paste0('../kernel/', pop, ".weights.10mer.out")) pop } args <- commandArgs(trailingOnly = TRUE) if(file_path_sans_ext(basename(args[1])) == "R"){ i <- 2 } else { # Rscript i <- 0 } pops <- paste0("P", as.character(1:8)) idx <- as.numeric(args[i+1]) do_gkmSVM(pops[idx])
/gkmsvm/code/01_train_pops.R
no_license
sankaranlab/erythroid-profiling
R
false
false
1,424
r
library(gkmSVM) suppressMessages(suppressWarnings(library(tools))) do_gkmSVM <- function(pop){ genNullSeqs(paste0('../../processed/gkmerpeaks/topPeaks-',pop,'.bed'), nMaxTrials=10,xfold=1, genomeVersion='hg19', outputPosFastaFN=paste0('../fasta/',pop,'-positive.fa'), outputBedFN=paste0('../fasta/',pop,'-negative.bed'), outputNegFastaFN=paste0('../fasta/',pop,'-negative.fa')) gkmsvm_kernel(paste0('../fasta/',pop,'-positive.fa'), paste0('../fasta/',pop,'-negative.fa'), paste0('../kernel/', pop, ".kernel.out")) gkmsvm_trainCV(kernelfn = paste0('../kernel/', pop, ".kernel.out"), posfn = paste0('../fasta/',pop,'-positive.fa'), negfn = paste0('../fasta/',pop,'-negative.fa'), svmfnprfx=paste0('../kernel/', pop), outputCVpredfn=paste0('../kernel/', pop, ".cvPred.out"), outputROCfn=paste0('../kernel/', pop, ".roc.out")) gkmsvm_classify('../fasta/nr10mers.fa', svmfnprfx=paste0('../kernel/', pop), paste0('../kernel/', pop, ".weights.10mer.out")) pop } args <- commandArgs(trailingOnly = TRUE) if(file_path_sans_ext(basename(args[1])) == "R"){ i <- 2 } else { # Rscript i <- 0 } pops <- paste0("P", as.character(1:8)) idx <- as.numeric(args[i+1]) do_gkmSVM(pops[idx])
\name{Lab4-package} \alias{Lab4-package} \alias{Lab4} \docType{package} \title{ \packageTitle{Lab4} } \description{ \packageDescription{Lab4} } \author{ \packageAuthor{Lab4} Maintainer: \packageMaintainer{Lab4} } \references{ https://en.wikipedia.org/wiki/Linear_regression \keyword{ package } \seealso{ } \examples{ > linreg(Sepal.Width~Sepal.Length,iris)->x \\ > x \\ Call: linreg(Sepal.Width ~ Sepal.Length) \\ Coefficients: \\ (Intercept) Sepal.Length \\ 3.418947 -0.0618848\\ }
/Lab4/man/Lab4-package.Rd
no_license
Kresch/Lab4
R
false
false
488
rd
\name{Lab4-package} \alias{Lab4-package} \alias{Lab4} \docType{package} \title{ \packageTitle{Lab4} } \description{ \packageDescription{Lab4} } \author{ \packageAuthor{Lab4} Maintainer: \packageMaintainer{Lab4} } \references{ https://en.wikipedia.org/wiki/Linear_regression \keyword{ package } \seealso{ } \examples{ > linreg(Sepal.Width~Sepal.Length,iris)->x \\ > x \\ Call: linreg(Sepal.Width ~ Sepal.Length) \\ Coefficients: \\ (Intercept) Sepal.Length \\ 3.418947 -0.0618848\\ }
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/iplant_resolve.R \name{iplant_resolve} \alias{iplant_resolve} \title{iPlant name resolution} \usage{ iplant_resolve(query, retrieve = "all", callopts = list()) } \arguments{ \item{query}{Vector of one or more taxonomic names. (no common names)} \item{retrieve}{Specifies whether to retrieve all matches for the names submitted. One of 'best' (retrieves only the single best match for each name submitted) or 'all' (retrieves all matches)} \item{callopts}{Curl options passed on to \code{httr::GET}} } \value{ A data frame } \description{ iPlant name resolution } \examples{ \dontrun{ iplant_resolve(query=c("Helianthus annuus", "Homo sapiens")) iplant_resolve("Helianthusss") iplant_resolve("Pooa") library("httr") iplant_resolve("Helianthusss", callopts=verbose()) } }
/man/iplant_resolve.Rd
permissive
MadeleineMcGreer/taxize
R
false
false
860
rd
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/iplant_resolve.R \name{iplant_resolve} \alias{iplant_resolve} \title{iPlant name resolution} \usage{ iplant_resolve(query, retrieve = "all", callopts = list()) } \arguments{ \item{query}{Vector of one or more taxonomic names. (no common names)} \item{retrieve}{Specifies whether to retrieve all matches for the names submitted. One of 'best' (retrieves only the single best match for each name submitted) or 'all' (retrieves all matches)} \item{callopts}{Curl options passed on to \code{httr::GET}} } \value{ A data frame } \description{ iPlant name resolution } \examples{ \dontrun{ iplant_resolve(query=c("Helianthus annuus", "Homo sapiens")) iplant_resolve("Helianthusss") iplant_resolve("Pooa") library("httr") iplant_resolve("Helianthusss", callopts=verbose()) } }
#### Variables #### source("./source/variables/general/VARgeneral.R",local=TRUE) source("./source/variables/fullmodel/VARfullmodel.R",local=TRUE) source("./source/variables/modules/VARmodules.R",local=TRUE) #### Text #### source("./source/text/portal/TXTportal.R",local=TRUE) source("./source/text/modules/TXTmodule1.R",local=TRUE) source("./source/text/modules/TXTmodule3.R",local=TRUE) source("./source/text/modules/TXTmodule6.R",local=TRUE) source("./source/text/fullmodel/TXTfullmodel.R",local=TRUE) #### Inputs #### source("./source/inputs/getSliderInput.R",local=TRUE) source("./source/inputs/getEnvironmentInput.R",local=TRUE) source("./source/inputs/getCheckboxInput.R",local=TRUE) source("./source/inputs/getSelectInput.R",local=TRUE) source("./source/inputs/getNumericInput.R",local=TRUE) #### Utilities #### source("./source/utilities/getIcon.R",local=TRUE) source("./source/utilities/runningIndicator.R",local=TRUE) source("./source/utilities/getTable.R",local=TRUE) source("./source/utilities/info_msg.R",local=TRUE) source("./source/utilities/getLabel.R",local=TRUE) ######## source("./source/pages/fullModel/UIenvironment.R",local=TRUE) source("./source/pages/fullModel/UIspecificEnvironment.R",local=TRUE) ##### c()
/inst/shiny-squid/source/UIsource.R
no_license
alrutten/SQUID
R
false
false
1,237
r
#### Variables #### source("./source/variables/general/VARgeneral.R",local=TRUE) source("./source/variables/fullmodel/VARfullmodel.R",local=TRUE) source("./source/variables/modules/VARmodules.R",local=TRUE) #### Text #### source("./source/text/portal/TXTportal.R",local=TRUE) source("./source/text/modules/TXTmodule1.R",local=TRUE) source("./source/text/modules/TXTmodule3.R",local=TRUE) source("./source/text/modules/TXTmodule6.R",local=TRUE) source("./source/text/fullmodel/TXTfullmodel.R",local=TRUE) #### Inputs #### source("./source/inputs/getSliderInput.R",local=TRUE) source("./source/inputs/getEnvironmentInput.R",local=TRUE) source("./source/inputs/getCheckboxInput.R",local=TRUE) source("./source/inputs/getSelectInput.R",local=TRUE) source("./source/inputs/getNumericInput.R",local=TRUE) #### Utilities #### source("./source/utilities/getIcon.R",local=TRUE) source("./source/utilities/runningIndicator.R",local=TRUE) source("./source/utilities/getTable.R",local=TRUE) source("./source/utilities/info_msg.R",local=TRUE) source("./source/utilities/getLabel.R",local=TRUE) ######## source("./source/pages/fullModel/UIenvironment.R",local=TRUE) source("./source/pages/fullModel/UIspecificEnvironment.R",local=TRUE) ##### c()
library(purrr) library(rvest) library(tidyverse) get_product_data <- function(file_to_look_at){ # file_to_look_at <- c("inputs/2018/products/pearl-earrings-diamond-touchstone-earrings-12mm-oval-yellow-gold.html") # file_to_look_at <- c("inputs/2018/products/pearl-earrings-monsoon-earring-enhancer-l-r-white-gold.html") # file_to_look_at <- c("inputs/2018/products/pearl-necklaces-monsoon-petal-pendant-necklace-yellow-gold.html") content <- read_html(file_to_look_at) %>% html_nodes(".product-info-main") name <- content %>% html_nodes(".base") %>% html_text(trim = TRUE) description <- content %>% html_nodes(".short-description") %>% html_text(trim = TRUE) description <- ifelse(is_empty(description), "none", description) flowerydescription <- content %>% html_nodes(".description") %>% html_text(trim = TRUE) flowerydescription <- ifelse(is_empty(flowerydescription), "none", flowerydescription) availability <- content %>% html_nodes(".stock") %>% html_text(trim = TRUE) sku <- content %>% html_nodes(".sku") %>% html_text(trim = TRUE) price <- content %>% html_nodes(".price") %>% html_text(trim = TRUE) return(tibble( name = name, description = description, flowerydescription = flowerydescription, availability = availability, sku = sku, price = price )) } products <- list.files("inputs/2018/products", full.names = TRUE) names(products) <- list.files("inputs/2018/products") %>% gsub(pattern = ".html$", replacement = "") dataset <- purrr::map_df(products, get_product_data, .id = "product") write_csv(dataset, "outputs/2018_dataset.csv")
/scripts/make_dataframe_from_2018_products.R
no_license
RohanAlexander/paspaley
R
false
false
1,813
r
library(purrr) library(rvest) library(tidyverse) get_product_data <- function(file_to_look_at){ # file_to_look_at <- c("inputs/2018/products/pearl-earrings-diamond-touchstone-earrings-12mm-oval-yellow-gold.html") # file_to_look_at <- c("inputs/2018/products/pearl-earrings-monsoon-earring-enhancer-l-r-white-gold.html") # file_to_look_at <- c("inputs/2018/products/pearl-necklaces-monsoon-petal-pendant-necklace-yellow-gold.html") content <- read_html(file_to_look_at) %>% html_nodes(".product-info-main") name <- content %>% html_nodes(".base") %>% html_text(trim = TRUE) description <- content %>% html_nodes(".short-description") %>% html_text(trim = TRUE) description <- ifelse(is_empty(description), "none", description) flowerydescription <- content %>% html_nodes(".description") %>% html_text(trim = TRUE) flowerydescription <- ifelse(is_empty(flowerydescription), "none", flowerydescription) availability <- content %>% html_nodes(".stock") %>% html_text(trim = TRUE) sku <- content %>% html_nodes(".sku") %>% html_text(trim = TRUE) price <- content %>% html_nodes(".price") %>% html_text(trim = TRUE) return(tibble( name = name, description = description, flowerydescription = flowerydescription, availability = availability, sku = sku, price = price )) } products <- list.files("inputs/2018/products", full.names = TRUE) names(products) <- list.files("inputs/2018/products") %>% gsub(pattern = ".html$", replacement = "") dataset <- purrr::map_df(products, get_product_data, .id = "product") write_csv(dataset, "outputs/2018_dataset.csv")
### Priors_plot library(ggplot2) library(dplyr) library(magrittr) library("truncnorm") theme_set(theme_bw(base_size = 24)) # Country list and WHO data who0 <- as.data.frame(read.csv("~/Dropbox/MDR/new_who_edited_sub.csv")[,-1]) who0$year <- who0$year_new who0.r.95 <- who0 %>% group_by(year) %>% dplyr::summarise(median=median(av_mdr_new_pcnt), min=quantile(av_mdr_new_pcnt, 0.025), max=quantile(av_mdr_new_pcnt, 0.975)) %>% group_by(n=1:n()) %>% dplyr::mutate(median=max(median, 0), min=max(min, 0), max=max(max, 0)) %>% ungroup who0.r.80 <- who0 %>% group_by(year) %>% dplyr::summarise(median=median(av_mdr_new_pcnt), min=quantile(av_mdr_new_pcnt, 0.1), max=quantile(av_mdr_new_pcnt, 0.9)) %>% group_by(n=1:n()) %>% dplyr::mutate(median=max(median, 0), min=max(min, 0), max=max(max, 0)) %>% ungroup who0.r.50 <- who0 %>% group_by(year) %>% dplyr::summarise(median=median(av_mdr_new_pcnt), min=quantile(av_mdr_new_pcnt, 0.25), max=quantile(av_mdr_new_pcnt, 0.75)) %>% group_by(n=1:n()) %>% dplyr::mutate(median=max(median, 0), min=max(min, 0), max=max(max, 0)) %>% ungroup ####### Priors: ### tm ****###################################################################### # original: tm <- normal(1980,5) #tseq <- rnorm(100,mean = 1980, sd = 5) tprop <- rnorm(10000,mean = 1985, sd = 9) plot(density(tprop), col = "red") quantile(tprop,probs=c(0.05, 0.95)) tseq <- 1980 ### b ****###################################################################### # original: b <- uniform(0,0.05) bseq <- runif(30,min = 0, max = 0.05) # mean = exp(mu + sd^2/2): want it at 1980 => log(1980) - sd^2 / 2 = mu bseq <- rlnorm(10000000, meanlog = -5.5, sdlog = 0.7) hist(bseq) mean(bseq) plot(density(bseq)) quantile(bseq) # DATA: #25-75: 0.0003 - 0.001: this is close: 0.0004 - 0.001 quantile(bseq, prob = c(0.05, 0.5,0.95)) # DATA: #25-75: 0.0003 - 0.001: this is close: 0.0004 - 0.001 WANT -15\% to 21\% to match Cohen 2014 plot(quantile(bseq)[1:4]) max(bseq) # hits 0.05 ### rho ****###################################################################### # original: rho <- uniform(0,1) rhoseq <- rnorm(100000,mean = 5, sd = 15) plot(density(rhoseq)) quantile(rhoseq, prob = c(0.05, 0.95)) # DATA: #25-75: 0.0003 - 0.001: this is close: 0.0004 - 0.001 max(rhoseq) ###*** PLOT ***###################################################################################################################################################################### ###### UNIVARIATE *****##################################################################################################################################### # Variation in t tseq <- seq(1970,2014,5) bseq <- 0.01 #seq(0,0.05,0.01) rhoseq <- 5 #seq(-10,36,3) curves <- c() rep <- 0 for(i in 1:length(tseq)){for(j in 1:length(bseq)){for(k in 1:length(rhoseq)){ t_mdr <- tseq[i] b <- bseq[j] rho <- rhoseq[k] c <- rho * b / t_mdr x <- seq(1960, 2020,1) x <- x - t_mdr y <- b*(x) - c*x^2 rep = rep + 1 print(rep) year = seq(1960, 2020,1) curves <- rbind(curves, cbind(rep,x, y, t_mdr,b,rho,c,year)) }}} curves <- as.data.frame(curves) ggplot(curves, aes(x=year, y = y, group = rep, colour = factor(t_mdr))) + geom_line(lwd = 2) + scale_y_continuous("Proportion of new TB cases\nthat are MDR-TB",limits = c(0,0.6)) + geom_vline(xintercept = 2014) + scale_color_discrete("Time") ggsave("~/Dropbox/MDR/output/prior_curves_t.pdf") # Variation in b tseq <- 1970 #seq(1970,2014,5) bseq <- seq(0,0.01,0.001)#,seq(0.01,0.2,0.05)) rhoseq <- 5 #seq(-10,36,3) curves <- c() rep <- 0 for(i in 1:length(tseq)){for(j in 1:length(bseq)){for(k in 1:length(rhoseq)){ t_mdr <- tseq[i] b <- bseq[j] rho <- rhoseq[k] c <- rho * b / t_mdr x <- seq(1960, 2020,1) x <- x - t_mdr y <- b*(x) - c*x^2 rep = rep + 1 print(rep) year = seq(1960, 2020,1) curves <- rbind(curves, cbind(rep,x, y, t_mdr,b,rho,c,year)) }}} curves <- as.data.frame(curves) ggplot(curves, aes(x=year, y = y, group = rep, colour = factor(b))) + geom_line(lwd = 2) + scale_y_continuous("Proportion of new TB cases\nthat are MDR-TB",limits = c(0,0.6)) + geom_vline(xintercept = 2014) + scale_color_discrete("b") ggsave("~/Dropbox/MDR/output/prior_curves_b.pdf") # Variation in rho tseq <- 1970 #seq(1970,2014,5) bseq <- 0.01 #seq(0,0.05,0.01) rhoseq <- seq(-20,36,3) curves <- c() rep <- 0 for(i in 1:length(tseq)){for(j in 1:length(bseq)){for(k in 1:length(rhoseq)){ t_mdr <- tseq[i] b <- bseq[j] rho <- rhoseq[k] c <- rho * b / t_mdr x <- seq(1960, 2020,1) x <- x - t_mdr y <- b*(x) - c*x^2 rep = rep + 1 print(rep) year = seq(1960, 2020,1) curves <- rbind(curves, cbind(rep,x, y, t_mdr,b,rho,c,year)) }}} curves <- as.data.frame(curves) ggplot(curves, aes(x=year, y = y, group = rep, colour = factor(rho))) + geom_line(lwd = 2) + scale_y_continuous("Proportion of new TB cases\nthat are MDR-TB",limits = c(0,0.6)) + geom_vline(xintercept = 2014) + scale_color_discrete("r") ggsave("~/Dropbox/MDR/output/prior_curves_r.pdf", width = 12, height = 10) ###### MULTIVARIATE *****##################################################################################################################################### nsamples <- 100000 years = seq(1950,2020,1) ny <- length(years) rhoseq <- rtruncnorm(nsamples, a=-Inf, b=36, mean = 5, sd = 15) #rnorm(nsamples,mean = 5, sd = 15) bseq <- rlnorm(nsamples,-5.5, 0.7); mm <- cbind(rep(rhoseq, each = ny),rep(bseq, each = ny)) colnames(mm)<- c("r","b") mm <- as.data.frame(mm) mm$year <- years mm$years <- years - tseq mm$rep <- rep(1:nsamples,each = ny) ###**** different t = 1975 **################################################################################################################################## tseq <- 1975 mm$c <- mm$r*mm$b / tseq mm$y <- mm$b * mm$years - mm$c * mm$years * mm$years curves <- as.data.frame(mm) curves.r.95 <- curves %>% group_by(year) %>% dplyr::summarise(median=median(y), min=quantile(y, 0.025), max=quantile(y, 0.975)) %>% group_by(n=1:n()) %>% dplyr::mutate(median=max(median, 0), min=max(min, 0), max=max(max, 0)) %>% ungroup curves.r.80 <- curves %>% group_by(year) %>% dplyr::summarise(median=median(y), min=quantile(y, 0.1), max=quantile(y, 0.9)) %>% group_by(n=1:n()) %>% dplyr::mutate(median=max(median, 0), min=max(min, 0), max=max(max, 0)) %>% ungroup curves.r.50 <- curves %>% group_by(year) %>% dplyr::summarise(median=median(y), min=quantile(y, 0.25), max=quantile(y, 0.75)) %>% group_by(n=1:n()) %>% dplyr::mutate(median=max(median, 0), min=max(min, 0), max=max(max, 0)) %>% ungroup ggplot(curves.r.95, aes(x=year)) + geom_line(aes(y=median)) + geom_ribbon(aes(ymin=min, ymax=max), alpha=0.3, fill = "red") + geom_vline(xintercept=2014, linetype="dashed") + geom_ribbon(data = curves.r.50,aes(ymin=min, ymax=max), alpha=0.3, fill = "blue") + geom_ribbon(data = curves.r.80,aes(ymin=min, ymax=max), alpha=0.3, fill = "green") + expand_limits(x = 1950, y = 0.8) + scale_y_continuous("Proportion of new TB cases\nthat are MDR-TB") ggsave("~/Dropbox/MDR/output/priors_rhotrunc5_1975.pdf") ggplot(curves[1:(50*ny),], aes(x = year, y = y, group = rep)) + geom_line() + geom_vline(xintercept=2014, linetype="dashed") + expand_limits(x = 1950, y = 0) + scale_y_continuous(limits = c(0,1),"Proportion of new TB cases\nthat are MDR-TB") ggsave("~/Dropbox/MDR/output/priors_examples_rhotrunc5_1975.pdf") ###**** different t = 1985 **################################################################################################################################## mm <- as.data.frame(mm) tseq <- 1985 mm$years <- mm$year - tseq rhoseq <- rtruncnorm(nsamples, a=-Inf, b=36, mean = 5, sd = 25) #rnorm(nsamples,mean = 5, sd = 15) bseq <- rlnorm(nsamples,-6.5, 0.6); mm$r <- rep(rhoseq, each = ny) mm$b <- rep(bseq, each = ny) mm$c <- mm$r*mm$b / tseq mm$y <- mm$b * mm$years - mm$c * mm$years * mm$years curves <- as.data.frame(mm) curves.r.95 <- curves %>% group_by(year) %>% dplyr::summarise(median=median(y), min=quantile(y, 0.025), max=quantile(y, 0.975)) %>% group_by(n=1:n()) %>% dplyr::mutate(median=max(median, 0), min=max(min, 0), max=max(max, 0)) %>% ungroup curves.r.80 <- curves %>% group_by(year) %>% dplyr::summarise(median=median(y), min=quantile(y, 0.1), max=quantile(y, 0.9)) %>% group_by(n=1:n()) %>% dplyr::mutate(median=max(median, 0), min=max(min, 0), max=max(max, 0)) %>% ungroup curves.r.50 <- curves %>% group_by(year) %>% dplyr::summarise(median=median(y), min=quantile(y, 0.25), max=quantile(y, 0.75)) %>% group_by(n=1:n()) %>% dplyr::mutate(median=max(median, 0), min=max(min, 0), max=max(max, 0)) %>% ungroup ggplot(curves.r.95, aes(x=year)) + geom_line(aes(y=median)) + geom_ribbon(aes(ymin=min, ymax=max), alpha=0.3, fill = "red") + geom_vline(xintercept=2014, linetype="dashed") + geom_ribbon(data = curves.r.50,aes(ymin=min, ymax=max), alpha=0.3, fill = "blue") + geom_ribbon(data = curves.r.80,aes(ymin=min, ymax=max), alpha=0.3, fill = "green") + expand_limits(x = 1950, y = 0.8) + scale_y_continuous("Proportion of new TB cases\nthat are MDR-TB") ggsave("~/Dropbox/MDR/output/priors_rhotrunc5_1985.pdf") # DATA ggplot(who0.r.95, aes(x=year)) + geom_line(aes(y=median/100)) + geom_ribbon(aes(ymin=min/100, ymax=max/100), alpha=0.3, fill = "red") + geom_vline(xintercept=2014, linetype="dashed") + geom_ribbon(data = who0.r.50,aes(ymin=min/100, ymax=max/100), alpha=0.3, fill = "blue") + geom_ribbon(data = who0.r.80,aes(ymin=min/100, ymax=max/100), alpha=0.3, fill = "green") + expand_limits(x = 1950, y = 0.8) + scale_y_continuous("Proportion of new TB cases\nthat are MDR-TB") ggsave("~/Dropbox/MDR/output/WHO_data_ranges.pdf")
/code/priors_plot.R
permissive
gwenknight/LTBI_MDR
R
false
false
9,838
r
### Priors_plot library(ggplot2) library(dplyr) library(magrittr) library("truncnorm") theme_set(theme_bw(base_size = 24)) # Country list and WHO data who0 <- as.data.frame(read.csv("~/Dropbox/MDR/new_who_edited_sub.csv")[,-1]) who0$year <- who0$year_new who0.r.95 <- who0 %>% group_by(year) %>% dplyr::summarise(median=median(av_mdr_new_pcnt), min=quantile(av_mdr_new_pcnt, 0.025), max=quantile(av_mdr_new_pcnt, 0.975)) %>% group_by(n=1:n()) %>% dplyr::mutate(median=max(median, 0), min=max(min, 0), max=max(max, 0)) %>% ungroup who0.r.80 <- who0 %>% group_by(year) %>% dplyr::summarise(median=median(av_mdr_new_pcnt), min=quantile(av_mdr_new_pcnt, 0.1), max=quantile(av_mdr_new_pcnt, 0.9)) %>% group_by(n=1:n()) %>% dplyr::mutate(median=max(median, 0), min=max(min, 0), max=max(max, 0)) %>% ungroup who0.r.50 <- who0 %>% group_by(year) %>% dplyr::summarise(median=median(av_mdr_new_pcnt), min=quantile(av_mdr_new_pcnt, 0.25), max=quantile(av_mdr_new_pcnt, 0.75)) %>% group_by(n=1:n()) %>% dplyr::mutate(median=max(median, 0), min=max(min, 0), max=max(max, 0)) %>% ungroup ####### Priors: ### tm ****###################################################################### # original: tm <- normal(1980,5) #tseq <- rnorm(100,mean = 1980, sd = 5) tprop <- rnorm(10000,mean = 1985, sd = 9) plot(density(tprop), col = "red") quantile(tprop,probs=c(0.05, 0.95)) tseq <- 1980 ### b ****###################################################################### # original: b <- uniform(0,0.05) bseq <- runif(30,min = 0, max = 0.05) # mean = exp(mu + sd^2/2): want it at 1980 => log(1980) - sd^2 / 2 = mu bseq <- rlnorm(10000000, meanlog = -5.5, sdlog = 0.7) hist(bseq) mean(bseq) plot(density(bseq)) quantile(bseq) # DATA: #25-75: 0.0003 - 0.001: this is close: 0.0004 - 0.001 quantile(bseq, prob = c(0.05, 0.5,0.95)) # DATA: #25-75: 0.0003 - 0.001: this is close: 0.0004 - 0.001 WANT -15\% to 21\% to match Cohen 2014 plot(quantile(bseq)[1:4]) max(bseq) # hits 0.05 ### rho ****###################################################################### # original: rho <- uniform(0,1) rhoseq <- rnorm(100000,mean = 5, sd = 15) plot(density(rhoseq)) quantile(rhoseq, prob = c(0.05, 0.95)) # DATA: #25-75: 0.0003 - 0.001: this is close: 0.0004 - 0.001 max(rhoseq) ###*** PLOT ***###################################################################################################################################################################### ###### UNIVARIATE *****##################################################################################################################################### # Variation in t tseq <- seq(1970,2014,5) bseq <- 0.01 #seq(0,0.05,0.01) rhoseq <- 5 #seq(-10,36,3) curves <- c() rep <- 0 for(i in 1:length(tseq)){for(j in 1:length(bseq)){for(k in 1:length(rhoseq)){ t_mdr <- tseq[i] b <- bseq[j] rho <- rhoseq[k] c <- rho * b / t_mdr x <- seq(1960, 2020,1) x <- x - t_mdr y <- b*(x) - c*x^2 rep = rep + 1 print(rep) year = seq(1960, 2020,1) curves <- rbind(curves, cbind(rep,x, y, t_mdr,b,rho,c,year)) }}} curves <- as.data.frame(curves) ggplot(curves, aes(x=year, y = y, group = rep, colour = factor(t_mdr))) + geom_line(lwd = 2) + scale_y_continuous("Proportion of new TB cases\nthat are MDR-TB",limits = c(0,0.6)) + geom_vline(xintercept = 2014) + scale_color_discrete("Time") ggsave("~/Dropbox/MDR/output/prior_curves_t.pdf") # Variation in b tseq <- 1970 #seq(1970,2014,5) bseq <- seq(0,0.01,0.001)#,seq(0.01,0.2,0.05)) rhoseq <- 5 #seq(-10,36,3) curves <- c() rep <- 0 for(i in 1:length(tseq)){for(j in 1:length(bseq)){for(k in 1:length(rhoseq)){ t_mdr <- tseq[i] b <- bseq[j] rho <- rhoseq[k] c <- rho * b / t_mdr x <- seq(1960, 2020,1) x <- x - t_mdr y <- b*(x) - c*x^2 rep = rep + 1 print(rep) year = seq(1960, 2020,1) curves <- rbind(curves, cbind(rep,x, y, t_mdr,b,rho,c,year)) }}} curves <- as.data.frame(curves) ggplot(curves, aes(x=year, y = y, group = rep, colour = factor(b))) + geom_line(lwd = 2) + scale_y_continuous("Proportion of new TB cases\nthat are MDR-TB",limits = c(0,0.6)) + geom_vline(xintercept = 2014) + scale_color_discrete("b") ggsave("~/Dropbox/MDR/output/prior_curves_b.pdf") # Variation in rho tseq <- 1970 #seq(1970,2014,5) bseq <- 0.01 #seq(0,0.05,0.01) rhoseq <- seq(-20,36,3) curves <- c() rep <- 0 for(i in 1:length(tseq)){for(j in 1:length(bseq)){for(k in 1:length(rhoseq)){ t_mdr <- tseq[i] b <- bseq[j] rho <- rhoseq[k] c <- rho * b / t_mdr x <- seq(1960, 2020,1) x <- x - t_mdr y <- b*(x) - c*x^2 rep = rep + 1 print(rep) year = seq(1960, 2020,1) curves <- rbind(curves, cbind(rep,x, y, t_mdr,b,rho,c,year)) }}} curves <- as.data.frame(curves) ggplot(curves, aes(x=year, y = y, group = rep, colour = factor(rho))) + geom_line(lwd = 2) + scale_y_continuous("Proportion of new TB cases\nthat are MDR-TB",limits = c(0,0.6)) + geom_vline(xintercept = 2014) + scale_color_discrete("r") ggsave("~/Dropbox/MDR/output/prior_curves_r.pdf", width = 12, height = 10) ###### MULTIVARIATE *****##################################################################################################################################### nsamples <- 100000 years = seq(1950,2020,1) ny <- length(years) rhoseq <- rtruncnorm(nsamples, a=-Inf, b=36, mean = 5, sd = 15) #rnorm(nsamples,mean = 5, sd = 15) bseq <- rlnorm(nsamples,-5.5, 0.7); mm <- cbind(rep(rhoseq, each = ny),rep(bseq, each = ny)) colnames(mm)<- c("r","b") mm <- as.data.frame(mm) mm$year <- years mm$years <- years - tseq mm$rep <- rep(1:nsamples,each = ny) ###**** different t = 1975 **################################################################################################################################## tseq <- 1975 mm$c <- mm$r*mm$b / tseq mm$y <- mm$b * mm$years - mm$c * mm$years * mm$years curves <- as.data.frame(mm) curves.r.95 <- curves %>% group_by(year) %>% dplyr::summarise(median=median(y), min=quantile(y, 0.025), max=quantile(y, 0.975)) %>% group_by(n=1:n()) %>% dplyr::mutate(median=max(median, 0), min=max(min, 0), max=max(max, 0)) %>% ungroup curves.r.80 <- curves %>% group_by(year) %>% dplyr::summarise(median=median(y), min=quantile(y, 0.1), max=quantile(y, 0.9)) %>% group_by(n=1:n()) %>% dplyr::mutate(median=max(median, 0), min=max(min, 0), max=max(max, 0)) %>% ungroup curves.r.50 <- curves %>% group_by(year) %>% dplyr::summarise(median=median(y), min=quantile(y, 0.25), max=quantile(y, 0.75)) %>% group_by(n=1:n()) %>% dplyr::mutate(median=max(median, 0), min=max(min, 0), max=max(max, 0)) %>% ungroup ggplot(curves.r.95, aes(x=year)) + geom_line(aes(y=median)) + geom_ribbon(aes(ymin=min, ymax=max), alpha=0.3, fill = "red") + geom_vline(xintercept=2014, linetype="dashed") + geom_ribbon(data = curves.r.50,aes(ymin=min, ymax=max), alpha=0.3, fill = "blue") + geom_ribbon(data = curves.r.80,aes(ymin=min, ymax=max), alpha=0.3, fill = "green") + expand_limits(x = 1950, y = 0.8) + scale_y_continuous("Proportion of new TB cases\nthat are MDR-TB") ggsave("~/Dropbox/MDR/output/priors_rhotrunc5_1975.pdf") ggplot(curves[1:(50*ny),], aes(x = year, y = y, group = rep)) + geom_line() + geom_vline(xintercept=2014, linetype="dashed") + expand_limits(x = 1950, y = 0) + scale_y_continuous(limits = c(0,1),"Proportion of new TB cases\nthat are MDR-TB") ggsave("~/Dropbox/MDR/output/priors_examples_rhotrunc5_1975.pdf") ###**** different t = 1985 **################################################################################################################################## mm <- as.data.frame(mm) tseq <- 1985 mm$years <- mm$year - tseq rhoseq <- rtruncnorm(nsamples, a=-Inf, b=36, mean = 5, sd = 25) #rnorm(nsamples,mean = 5, sd = 15) bseq <- rlnorm(nsamples,-6.5, 0.6); mm$r <- rep(rhoseq, each = ny) mm$b <- rep(bseq, each = ny) mm$c <- mm$r*mm$b / tseq mm$y <- mm$b * mm$years - mm$c * mm$years * mm$years curves <- as.data.frame(mm) curves.r.95 <- curves %>% group_by(year) %>% dplyr::summarise(median=median(y), min=quantile(y, 0.025), max=quantile(y, 0.975)) %>% group_by(n=1:n()) %>% dplyr::mutate(median=max(median, 0), min=max(min, 0), max=max(max, 0)) %>% ungroup curves.r.80 <- curves %>% group_by(year) %>% dplyr::summarise(median=median(y), min=quantile(y, 0.1), max=quantile(y, 0.9)) %>% group_by(n=1:n()) %>% dplyr::mutate(median=max(median, 0), min=max(min, 0), max=max(max, 0)) %>% ungroup curves.r.50 <- curves %>% group_by(year) %>% dplyr::summarise(median=median(y), min=quantile(y, 0.25), max=quantile(y, 0.75)) %>% group_by(n=1:n()) %>% dplyr::mutate(median=max(median, 0), min=max(min, 0), max=max(max, 0)) %>% ungroup ggplot(curves.r.95, aes(x=year)) + geom_line(aes(y=median)) + geom_ribbon(aes(ymin=min, ymax=max), alpha=0.3, fill = "red") + geom_vline(xintercept=2014, linetype="dashed") + geom_ribbon(data = curves.r.50,aes(ymin=min, ymax=max), alpha=0.3, fill = "blue") + geom_ribbon(data = curves.r.80,aes(ymin=min, ymax=max), alpha=0.3, fill = "green") + expand_limits(x = 1950, y = 0.8) + scale_y_continuous("Proportion of new TB cases\nthat are MDR-TB") ggsave("~/Dropbox/MDR/output/priors_rhotrunc5_1985.pdf") # DATA ggplot(who0.r.95, aes(x=year)) + geom_line(aes(y=median/100)) + geom_ribbon(aes(ymin=min/100, ymax=max/100), alpha=0.3, fill = "red") + geom_vline(xintercept=2014, linetype="dashed") + geom_ribbon(data = who0.r.50,aes(ymin=min/100, ymax=max/100), alpha=0.3, fill = "blue") + geom_ribbon(data = who0.r.80,aes(ymin=min/100, ymax=max/100), alpha=0.3, fill = "green") + expand_limits(x = 1950, y = 0.8) + scale_y_continuous("Proportion of new TB cases\nthat are MDR-TB") ggsave("~/Dropbox/MDR/output/WHO_data_ranges.pdf")
library(readxl) setwd('C:\\Users\\fou-f\\Desktop\\MCE\\Second\\EstadisticaMultivariada\\ProyectoFinal\\CONOCER_Data_Docs\\CONOCER_Data_Docs') encuesta <- read_excel(path = 'CONOCER.xlsx', sheet = 'CONOCER FINAL' ) summary(encuesta) nulos <- is.na(encuesta) sum(nulos) encuesta.completa <- na.omit(encuesta)
/Second/EstadisticaMultivariada/ProyectoFinal/Conocer.R
no_license
fou-foo/MCE_CIMAT
R
false
false
314
r
library(readxl) setwd('C:\\Users\\fou-f\\Desktop\\MCE\\Second\\EstadisticaMultivariada\\ProyectoFinal\\CONOCER_Data_Docs\\CONOCER_Data_Docs') encuesta <- read_excel(path = 'CONOCER.xlsx', sheet = 'CONOCER FINAL' ) summary(encuesta) nulos <- is.na(encuesta) sum(nulos) encuesta.completa <- na.omit(encuesta)
require(shiny) require(shinyjs) require(r4ss) require(plyr) require(dplyr) require(ggplot2) require(reshape2) require(data.table) require(tidyr) require(rlist) require(viridis) require(sss) require(shinyWidgets) require(shinyFiles) require(HandyCode) require(nwfscDiag) require(shinybusy) require(truncnorm) require(flextable) require(officer) require(gridExtra) require(ggpubr) require(grid) require(wesanderson) require(adnuts) require(shinystan) require(geomtextpath) #require(paletteer) #require(RColorBrewer) #require(ggthemes) #devtools::load_all("C:/Users/Jason.Cope/Documents/Github/nwfscDiag") source('Functions.r',local = FALSE) theme_report <- function(base_size = 11) { half_line <- base_size/2 theme_light(base_size = base_size) + theme( panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks.length = unit(half_line / 2.2, "pt"), strip.background = element_rect(fill = NA, colour = NA), strip.text.x = element_text(colour = "black"), strip.text.y = element_text(colour = "black"), panel.border = element_rect(fill = NA), legend.key.size = unit(0.9, "lines"), legend.key = element_rect(colour = NA, fill = NA), legend.background = element_rect(colour = NA, fill = NA) ) } theme_set(theme_report()) shinyServer(function(input, output,session) { useShinyjs() theme_report <- function(base_size = 11) { half_line <- base_size/2 theme_light(base_size = base_size) + theme( panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks.length = unit(half_line / 2.2, "pt"), strip.background = element_rect(fill = NA, colour = NA), strip.text.x = element_text(colour = "black"), strip.text.y = element_text(colour = "black"), panel.border = element_rect(fill = NA), legend.key.size = unit(0.9, "lines"), legend.key = element_rect(colour = NA, fill = NA), legend.background = element_rect(colour = NA, fill = NA) ) } theme_set(theme_report()) ################# ### FUNCTIONS ### ################# VBGF<-function(Linf, k, t0, ages){ Linf * (1 - exp(-k * (ages - t0))) } VBGF.age<-function(Linf,k,t0,lt){ t0 - (log(1 - (lt / Linf)) / k) } RUN.SS<-function(path,ss.cmd=" -nohess -nox",OS.in="Windows"){ navigate <- paste("cd ", path, sep="") if(OS.in=="Windows") { #command <- paste0(navigate," & ", "ss", ss.cmd) #shell(command, invisible=TRUE, translate=TRUE) run(path,exe="ss",extras=ss.cmd,skipfinished=FALSE,show_in_console = TRUE) } if(OS.in=="Mac") { command <- c(paste("cd", path), "chmod +x ./ss_osx",paste("./ss_osx", ss.cmd)) system(paste(command, collapse=";"),invisible=TRUE) #command <- paste0(path,"/./ss_mac", ss.cmd) #system(command, invisible=TRUE) } if(OS.in=="Linux") { command <- c(paste("cd", path), "chmod +x ./ss_linux",paste("./ss_linux", ss.cmd)) system(paste(command, collapse=";"), invisible=TRUE) } } pngfun <- function(wd, file,w=7,h=7,pt=12){ file <- file.path(wd, file) cat('writing PNG to',file,'\n') png(filename=file, width=w,height=h, units='in',res=300,pointsize=pt) } rc <- function(n,alpha=1){ # a subset of rich.colors by Arni Magnusson from the gregmisc package # a.k.a. rich.colors.short, but put directly in this function # to try to diagnose problem with transparency on one computer x <- seq(0, 1, length = n) r <- 1/(1 + exp(20 - 35 * x)) g <- pmin(pmax(0, -0.8 + 6 * x - 5 * x^2), 1) b <- dnorm(x, 0.25, 0.15)/max(dnorm(x, 0.25, 0.15)) rgb.m <- matrix(c(r, g, b), ncol = 3) rich.vector <- apply(rgb.m, 1, function(v) rgb(v[1], v[2], v[3], alpha=alpha)) } doubleNorm24.sel <- function(Sel50,Selpeak,PeakDesc,LtPeakFinal,FinalSel) { #UPDATED: - input e and f on 0 to 1 scal and transfrom to logit scale # - changed bin width in peak2 calculation # - updated index of sel when j2 < length(x) # - renamed input parameters, cannot have same names as the logitstic function # - function not handling f < -1000 correctly x<-seq(1,Selpeak+Selpeak,1) bin_width <- x[2] - x[1] a<- Selpeak b<- -log((max(x)-Selpeak-bin_width)/(PeakDesc-Selpeak-bin_width)) c<- log(-((Sel50-Selpeak)^2/log(0.5))) d<- log(LtPeakFinal) e<- -15 f<- -log((1/(FinalSel+0.000000001)-1)) sel <- rep(NA, length(x)) startbin <- 1 peak <- a upselex <- exp(c) downselex <- exp(d) final <- f if (e < -1000) { j1 <- -1001 - round(e) sel[1:j1] <- 1e-06 } if (e >= -1000) { j1 <- startbin - 1 if (e > -999) { point1 <- 1/(1 + exp(-e)) t1min <- exp(-(x[startbin] - peak)^2/upselex) } } if (f < -1000) j2 <- -1000 - round(f) if (f >= -1000) j2 <- length(x) peak2 <- peak + bin_width + (0.99 * x[j2] - peak - bin_width)/(1 + exp(-b)) if (f > -999) { point2 <- 1/(1 + exp(-final)) t2min <- exp(-(x[j2] - peak2)^2/downselex) } t1 <- x - peak t2 <- x - peak2 join1 <- 1/(1 + exp(-(20/(1 + abs(t1))) * t1)) join2 <- 1/(1 + exp(-(20/(1 + abs(t2))) * t2)) if (e > -999) asc <- point1 + (1 - point1) * (exp(-t1^2/upselex) - t1min)/(1 - t1min) if (e <= -999) asc <- exp(-t1^2/upselex) if (f > -999) dsc <- 1 + (point2 - 1) * (exp(-t2^2/downselex) - 1)/(t2min - 1) if (f <= -999) dsc <- exp(-(t2)^2/downselex) idx.seq <- (j1 + 1):j2 sel[idx.seq] <- asc[idx.seq] * (1 - join1[idx.seq]) + join1[idx.seq] * (1 - join2[idx.seq] + dsc[idx.seq] * join2[idx.seq]) if (startbin > 1 && e >= -1000) { sel[1:startbin] <- (x[1:startbin]/x[startbin])^2 * sel[startbin] } if (j2 < length(x)) sel[(j2 + 1):length(x)] <- sel[j2] return(cbind(x,sel)) } ########## Clear data files and plots ############ rv.Lt <- reactiveValues(data = NULL,clear = FALSE) rv.Age <- reactiveValues(data = NULL,clear = FALSE) rv.Ct <- reactiveValues(data = NULL,clear = FALSE) rv.Index <- reactiveValues(data = NULL,clear = FALSE) rv.AgeErr <- reactiveValues(data = NULL,clear = FALSE) ######## #Reset catches observe({ req(input$file2) req(!rv.Ct$clear) rv.Ct$data <- fread(input$file2$datapath,check.names=FALSE,data.table=FALSE) #L <- readLines(input$file2$datapath, n = 1) #if(grepl(";", L)) {rv.Ct$data <- read.csv2(input$file2$datapath,check.names=FALSE)} }) observeEvent(input$file2, { rv.Ct$clear <- FALSE }, priority = 1000) observeEvent(input$reset_ct, { rv.Ct$data <- NULL rv.Ct$clear <- TRUE reset('file2') }, priority = 1000) #Reset lengths observe({ req(input$file1) req(!rv.Lt$clear) rv.Lt$data <- fread(input$file1$datapath,check.names=FALSE,data.table=FALSE) #L <- readLines(input$file1$datapath, n = 1) #rv.Lt$data <- read.csv(input$file1$datapath,check.names=FALSE) #if(grepl(";", L)) {rv.Lt$data <- read.csv2(input$file1$datapath,check.names=FALSE)} }) observeEvent(input$file1, { rv.Lt$clear <- FALSE }, priority = 1000) observeEvent(input$reset_lt, { rv.Lt$data <- NULL rv.Lt$clear <- TRUE reset('file1') }, priority = 1000) #Reset ages observe({ req(input$file3) req(!rv.Age$clear) rv.Age$data <- fread(input$file3$datapath,check.names=FALSE,data.table=FALSE) #L <- readLines(input$file3$datapath, n = 1) #if(grepl(";", L)) {rv.Age$data <- read.csv2(input$file3$datapath,check.names=FALSE)} }) observeEvent(input$file3, { rv.Age$clear <- FALSE }, priority = 1000) observeEvent(input$reset_age, { rv.Age$data <- NULL rv.Age$clear <- TRUE reset('file3') }, priority = 1000) #Reset ageing error observe({ req(input$file33) req(!rv.AgeErr$clear) rv.AgeErr$data <- fread(input$file33$datapath,check.names=FALSE,header=FALSE,data.table=FALSE) #L <- readLines(input$file33$datapath, n = 1) #if(grepl(";", L)) {rv.AgeErr$data <- read.csv2(input$file33$datapath,check.names=FALSE,header=FALSE)} }) observeEvent(input$file33, { rv.AgeErr$clear <- FALSE if(!input$Ageing_error_choice){ rv.AgeErr$data <- NULL rv.AgeErr$clear <- TRUE reset('file33')} }, priority = 1000) # # if(!is.null(input$Ageing_error_choice)){ # observeEvent(input$file33, { # if(!input$Ageing_error_choice){ # rv.AgeErr$data <- NULL # rv.AgeErr$clear <- TRUE # reset('file33') #} # }, priority = 1000) # } #Reset index observe({ req(input$file4) req(!rv.Index$clear) rv.Index$data <- fread(input$file4$datapath,check.names=FALSE,data.table=FALSE) #L <- readLines(input$file4$datapath, n = 1) #rv.Index$data <- read.csv(input$file4$datapath,check.names=FALSE) #if(grepl(";", L)) {rv.Index$data <- read.csv2(input$file4$datapath,check.names=FALSE,header=FALSE)} }) observeEvent(input$file4, { rv.Index$clear <- FALSE }, priority = 1000) observeEvent(input$reset_index, { rv.Index$data <- NULL rv.Index$clear <- TRUE reset('file4') }, priority = 1000) #Throw an error if fleets are not consecutively represented in all loaded data sets. observeEvent(req(any(!is.null(rv.Ct$data),!is.null(rv.Lt$data),!is.null(rv.Age$data),!is.null(rv.Index$data))),{ ct.flt<-lt.flt<-age.flt<-index.flt<-NA if(!is.null(rv.Ct$data)){ct.flt<-c(1:(ncol(rv.Ct$data)))} if(!is.null(rv.Lt$data)){lt.flt<-rv.Lt$data[,3]} if(!is.null(rv.Age$data)){age.flt<-rv.Age$data[,3]} if(!is.null(rv.Index$data)){index.flt<-rv.Index$data[,3]} fleets.no.negs<-unique(na.omit(c(ct.flt,lt.flt,age.flt,index.flt)))[unique(na.omit(c(ct.flt,lt.flt,age.flt,index.flt)))>0] #remove any negative fleets if(length(fleets.no.negs)!=length(seq(1:max(fleets.no.negs)))) { sendSweetAlert( session = session, title = "Model Warning", text = "Non-consecutive fleet numbering. Check all data sets (e.g., catch, lengths, ages, indices) to make sure all fleets from 1 to the maximum fleet number are found when considered across all data sets. For instance, if you have 3 total fleets, there should not be a fleet number > 3 (e.g., 1,2,4). All fleets are not expected in each data file, just across all data files.", type = "warning") } }) ####### # observeEvent(input$reset_lt, { # rv.Lt$data <- NULL # shinyjs::reset('file1') # }) # # observeEvent(input$reset_lt, { # # output$Ltplot<-renderPlot({ # # rv.Lt$data <- NULL # # if (is.null(rv.Lt$data)) return(NULL) # # }) # # }) # observeEvent(input$reset_age, { # rv.Age$data <- NULL # shinyjs::reset('file3') # }) # observeEvent(input$reset_ct, { # rv.Ct$data <- NULL # shinyjs::reset('file2') # }) ##################################################### onclick("est_LHparms",id="panel_SS_est") observe({ shinyjs::show("Data_panel") hideTab(inputId = "tabs", target = "11") #shinyjs::hide("OS_choice") #shinyjs::hide("run_SS") #shinyjs::hide("run_SSS") }) #To get the ObserveEvent to work, each statement in req needs to be unique. #This explains the workaround of ((as.numeric(input$tabs)*x)/x)<4, where x is the unique type of assessment being run #This input allows other tabs to have different side panels. #Switch back to data from different tabs observeEvent(req(((as.numeric(input$tabs)*99)/99)<4), { shinyjs::show("Data_panel") shinyjs::show("Existing_files") shinyjs::hide("panel_Ct_F_LO") shinyjs::hide("panel_data_wt_lt") shinyjs::hide("panel_ct_wt_LO") shinyjs::hide("panel_SSS") shinyjs::hide("panel_SSLO_LH") shinyjs::hide("panel_SSLO_fixed") shinyjs::hide("panel_SS_LH_fixed_est_tog") shinyjs::hide("panel_SS_LH_fixed") shinyjs::hide("panel_SS_fixed") shinyjs::hide("panel_SS_LH_est") shinyjs::hide("panel_SS_est") shinyjs::hide("panel_SS_stock_status") shinyjs::hide("panel_SSS_prod") shinyjs::hide("panel_SS_LO_prod") shinyjs::hide("panel_SS_prod_fixed") shinyjs::hide("panel_SS_prod_est") shinyjs::hide("panel_selectivity") shinyjs::hide("panel_selectivity_sss") shinyjs::hide("panel_SS_recdevs") shinyjs::hide("panel_SS_jitter") shinyjs::hide("panel_RPs") shinyjs::hide("panel_Forecasts") shinyjs::hide("panel_Mod_dims") shinyjs::hide("panel_advanced_SS") shinyjs::hide("panel_advanced_user_SS") shinyjs::hide("panel_advanced_SSS") shinyjs::hide("panel_SSS_reps") shinyjs::hide("OS_choice") shinyjs::hide("Scenario_panel") shinyjs::hide("run_SSS") shinyjs::hide("run_SS") shinyjs::hide("Modeff_panel") shinyjs::hide("Profile_panel") shinyjs::hide("Retro_panel") shinyjs::hide("Sensi_Comparison_panel") shinyjs::hide("Ensemble_panel") hideTab(inputId = "tabs", target = "11") showTab(inputId = "tabs", target = "2") # hideTab(inputId = "tabs", target = "3") # hideTab(inputId = "tabs", target = "4") # hideTab(inputId = "tabs", target = "5") # hideTab(inputId = "tabs", target = "6") }) #Reset when all things are clicked off observeEvent(req(((as.numeric(input$tabs)*1)/1)<4&is.null(rv.Lt$data)&is.null(rv.Ct$data)&is.null(rv.Age$data)&is.null(rv.Index$data)&any(is.null(input$user_model),!input$user_model)), { shinyjs::show("Data_panel") shinyjs::show("Existing_files") shinyjs::hide("panel_Ct_F_LO") shinyjs::hide("panel_data_wt_lt") shinyjs::hide("panel_ct_wt_LO") shinyjs::hide("panel_SSS") shinyjs::hide("panel_SSLO_LH") shinyjs::hide("panel_SSLO_fixed") shinyjs::hide("panel_SS_LH_fixed_est_tog") shinyjs::hide("panel_SS_LH_fixed") shinyjs::hide("panel_SS_fixed") shinyjs::hide("panel_SS_LH_est") shinyjs::hide("panel_SS_est") shinyjs::hide("panel_SS_stock_status") shinyjs::hide("panel_SSS_prod") shinyjs::hide("panel_SS_LO_prod") shinyjs::hide("panel_SS_prod_fixed") shinyjs::hide("panel_SS_prod_est") shinyjs::hide("panel_selectivity") shinyjs::hide("panel_selectivity_sss") shinyjs::hide("panel_SS_recdevs") shinyjs::hide("panel_SS_jitter") shinyjs::hide("panel_RPs") shinyjs::hide("panel_Forecasts") shinyjs::hide("panel_Mod_dims") shinyjs::hide("panel_advanced_SS") shinyjs::hide("panel_advanced_user_SS") shinyjs::hide("panel_advanced_SSS") shinyjs::hide("panel_SSS_reps") shinyjs::hide("OS_choice") shinyjs::hide("Scenario_panel") shinyjs::hide("run_SSS") shinyjs::hide("run_SS") shinyjs::hide("Modeff_panel") shinyjs::hide("Profile_panel") shinyjs::hide("Retro_panel") shinyjs::hide("Sensi_Comparison_panel") shinyjs::hide("Ensemble_panel") hideTab(inputId = "tabs", target = "11") showTab(inputId = "tabs", target = "2") }) #User chosen model observeEvent(req(!is.null(input$user_model)&input$user_model), { shinyjs::show("Data_panel") shinyjs::show("Existing_files") shinyjs::hide("panel_Ct_F_LO") shinyjs::show("panel_data_wt_lt") shinyjs::hide("panel_ct_wt_LO") shinyjs::hide("panel_SSS") shinyjs::hide("panel_SSLO_LH") shinyjs::hide("panel_SSLO_fixed") shinyjs::hide("panel_SS_LH_fixed_est_tog") shinyjs::hide("panel_SS_LH_fixed") shinyjs::hide("panel_SS_fixed") shinyjs::hide("panel_SS_LH_est") shinyjs::hide("panel_SS_est") shinyjs::hide("panel_SS_stock_status") shinyjs::hide("panel_SSS_prod") shinyjs::hide("panel_SS_LO_prod") shinyjs::hide("panel_SS_prod_fixed") shinyjs::hide("panel_SS_prod_est") shinyjs::hide("panel_selectivity") shinyjs::hide("panel_selectivity_sss") shinyjs::hide("panel_SS_recdevs") shinyjs::show("panel_SS_jitter") shinyjs::show("panel_RPs") shinyjs::show("panel_Forecasts") shinyjs::hide("panel_Mod_dims") shinyjs::hide("panel_SSS_reps") shinyjs::hide("panel_advanced_SS") shinyjs::show("panel_advanced_user_SS") shinyjs::hide("panel_advanced_SSS") shinyjs::show("OS_choice") shinyjs::show("Scenario_panel") shinyjs::hide("run_SSS") shinyjs::show("run_SS") shinyjs::hide("Modeff_panel") shinyjs::hide("Profile_panel") shinyjs::hide("Retro_panel") shinyjs::hide("Sensi_Comparison_panel") shinyjs::hide("Ensemble_panel") #shinyjs::show("tab_sss") showTab(inputId = "tabs", target = "11") hideTab(inputId = "tabs", target = "2") }) #SSS panels observeEvent(req(((as.numeric(input$tabs)*1)/1)<4&is.null(rv.Lt$data)&!is.null(rv.Ct$data)&is.null(rv.Age$data)&is.null(rv.Index$data)&any(is.null(input$user_model),!input$user_model)), { shinyjs::show("Data_panel") shinyjs::hide("Existing_files") shinyjs::hide("panel_Ct_F_LO") shinyjs::hide("panel_data_wt_lt") shinyjs::hide("panel_ct_wt_LO") shinyjs::show("panel_SSS") shinyjs::hide("panel_SSLO_LH") shinyjs::hide("panel_SSLO_fixed") shinyjs::hide("panel_SS_LH_fixed_est_tog") shinyjs::hide("panel_SS_LH_fixed") shinyjs::hide("panel_SS_fixed") shinyjs::hide("panel_SS_LH_est") shinyjs::hide("panel_SS_est") shinyjs::show("panel_SS_stock_status") shinyjs::show("panel_SSS_prod") shinyjs::hide("panel_SS_LO_prod") shinyjs::hide("panel_SS_prod_fixed") shinyjs::hide("panel_SS_prod_est") shinyjs::hide("panel_selectivity") shinyjs::show("panel_selectivity_sss") shinyjs::hide("panel_SS_recdevs") shinyjs::hide("panel_SS_jitter") shinyjs::show("panel_RPs") shinyjs::show("panel_Forecasts") shinyjs::show("panel_Mod_dims") shinyjs::show("panel_SSS_reps") shinyjs::hide("panel_advanced_SS") shinyjs::hide("panel_advanced_user_SS") shinyjs::show("panel_advanced_SSS") shinyjs::show("OS_choice") shinyjs::show("Scenario_panel") shinyjs::show("run_SSS") shinyjs::hide("run_SS") shinyjs::hide("Modeff_panel") shinyjs::hide("Profile_panel") shinyjs::hide("Retro_panel") shinyjs::hide("Sensi_Comparison_panel") shinyjs::hide("Ensemble_panel") #shinyjs::show("tab_sss") showTab(inputId = "tabs", target = "11") hideTab(inputId = "tabs", target = "2") }) #SS-LO panels observeEvent(req(((as.numeric(input$tabs)*2)/2)<4&all(!is.null(c(rv.Lt$data,rv.Age$data)),is.null(rv.Ct$data))&any(is.null(input$user_model),!input$user_model)), { shinyjs::show("Data_panel") shinyjs::show("Existing_files") shinyjs::show("panel_Ct_F_LO") shinyjs::show("panel_data_wt_lt") if(length(unique(rv.Lt$data[,3]))>1|length(unique(rv.Age$data[,3]))>1){shinyjs::show("panel_ct_wt_LO")} if(length(unique(rv.Lt$data[,3]))==1|length(unique(rv.Age$data[,3]))==1){shinyjs::hide("panel_ct_wt_LO")} #if(input$Ct_F_LO_select){shinyjs::show("panel_ct_wt_LO")} #if(input$Ct_F_LO_select==NULL){shinyjs::hide("panel_ct_wt_LO")} shinyjs::hide("panel_SSS") shinyjs::show("panel_SSLO_LH") shinyjs::show("panel_SSLO_fixed") shinyjs::hide("panel_SS_LH_fixed_est_tog") shinyjs::hide("panel_SS_LH_fixed") shinyjs::hide("panel_SS_fixed") shinyjs::hide("panel_SS_LH_est") shinyjs::hide("panel_SS_est") shinyjs::hide("panel_SS_stock_status") shinyjs::hide("panel_SSS_prod") shinyjs::show("panel_SS_LO_prod") shinyjs::hide("panel_SS_prod_fixed") shinyjs::hide("panel_SS_prod_est") shinyjs::show("panel_selectivity") shinyjs::hide("panel_selectivity_sss") shinyjs::show("panel_SS_recdevs") shinyjs::show("panel_SS_jitter") shinyjs::show("panel_RPs") shinyjs::show("panel_Forecasts") shinyjs::show("panel_Mod_dims") shinyjs::show("panel_advanced_SS") shinyjs::hide("panel_advanced_user_SS") shinyjs::hide("panel_advanced_SSS") shinyjs::hide("panel_SSS_reps") shinyjs::show("OS_choice") shinyjs::show("Scenario_panel") shinyjs::hide("run_SSS") shinyjs::show("run_SS") shinyjs::hide("Modeff_panel") shinyjs::hide("Profile_panel") shinyjs::hide("Retro_panel") shinyjs::hide("Sensi_Comparison_panel") shinyjs::hide("Ensemble_panel") hideTab(inputId = "tabs", target = "11") showTab(inputId = "tabs", target = "2") # showTab(inputId = "tabs", target = "3") # showTab(inputId = "tabs", target = "4") # hideTab(inputId = "tabs", target = "5") # showTab(inputId = "tabs", target = "6") }) #SS-CL fixed parameters observeEvent(req(((as.numeric(input$tabs)*3)/3)<4&all(any(input$est_parms==FALSE,input$est_parms2==FALSE),any(all(!is.null(rv.Lt$data),!is.null(rv.Ct$data)),all(!is.null(rv.Age$data),!is.null(rv.Ct$data)),all(!is.null(rv.Index$data),!is.null(rv.Ct$data))))&any(is.null(input$user_model),!input$user_model)), { shinyjs::show("Data_panel") shinyjs::show("Existing_files") shinyjs::hide("panel_Ct_F_LO") if(any(!is.null(rv.Lt$data),!is.null(rv.Age$data))){shinyjs::show("panel_data_wt_lt")} else (shinyjs::hide("panel_data_wt_lt")) shinyjs::hide("panel_ct_wt_LO") shinyjs::hide("panel_SSS") shinyjs::hide("panel_SSLO_LH") shinyjs::hide("panel_SSLO_fixed") shinyjs::show("panel_SS_LH_fixed_est_tog") shinyjs::show("panel_SS_LH_fixed") shinyjs::show("panel_SS_fixed") shinyjs::hide("panel_SS_LH_est") shinyjs::hide("panel_SS_est") shinyjs::hide("panel_SS_stock_status") shinyjs::hide("panel_SSS_prod") shinyjs::hide("panel_SS_LO_prod") shinyjs::show("panel_SS_prod_fixed") shinyjs::hide("panel_SS_prod_est") shinyjs::show("panel_selectivity") shinyjs::hide("panel_selectivity_sss") shinyjs::show("panel_SS_recdevs") shinyjs::show("panel_SS_jitter") shinyjs::show("panel_RPs") shinyjs::show("panel_Forecasts") shinyjs::show("panel_Mod_dims") shinyjs::show("panel_advanced_SS") shinyjs::hide("panel_advanced_user_SS") shinyjs::hide("panel_advanced_SSS") shinyjs::show("OS_choice") shinyjs::show("Scenario_panel") shinyjs::hide("panel_SSS_reps") shinyjs::hide("run_SSS") shinyjs::show("run_SS") shinyjs::hide("Modeff_panel") shinyjs::hide("Profile_panel") shinyjs::hide("Retro_panel") shinyjs::hide("Sensi_Comparison_panel") shinyjs::hide("Ensemble_panel") #shinyjs::hide(selector = "#navbar li a[data-value=11]") hideTab(inputId = "tabs", target = "11") showTab(inputId = "tabs", target = "2") # show(selector = '#hello li a[data-value="2"]') #show(selector = '#hello li a[data-value="2"]') # showTab(inputId = "tabs", target = "2") # showTab(inputId = "tabs", target = "3") # showTab(inputId = "tabs", target = "4") # showTab(inputId = "tabs", target = "5") # showTab(inputId = "tabs", target = "6") }) #SS-CL with parameter estimates observeEvent(req(((as.numeric(input$tabs)*4)/4)<4&all(input$est_parms==TRUE,any(all(!is.null(rv.Lt$data),!is.null(rv.Ct$data)),all(!is.null(rv.Age$data),!is.null(rv.Ct$data)),all(!is.null(rv.Index$data),!is.null(rv.Ct$data))))&any(is.null(input$user_model),!input$user_model)), { shinyjs::show("Data_panel") shinyjs::show("Existing_files") shinyjs::hide("panel_Ct_F_LO") if(any(!is.null(rv.Lt$data),!is.null(rv.Age$data))){shinyjs::show("panel_data_wt_lt")} else (shinyjs::hide("panel_data_wt_lt")) shinyjs::hide("panel_ct_wt_LO") shinyjs::hide("panel_SSS") shinyjs::hide("panel_SSLO_LH") shinyjs::hide("panel_SSLO_fixed") shinyjs::show("panel_SS_LH_fixed_est_tog") shinyjs::hide("panel_SS_LH_fixed") shinyjs::hide("panel_SS_fixed") shinyjs::show("panel_SS_LH_est") shinyjs::show("panel_SS_est") shinyjs::hide("panel_SS_stock_status") shinyjs::hide("panel_SSS_prod") shinyjs::hide("panel_SS_LO_prod") shinyjs::hide("panel_SS_prod_fixed") shinyjs::show("panel_SS_prod_est") shinyjs::show("panel_selectivity") shinyjs::hide("panel_selectivity_sss") shinyjs::show("panel_SS_recdevs") shinyjs::show("panel_SS_jitter") shinyjs::show("panel_RPs") shinyjs::show("panel_Forecasts") shinyjs::show("panel_Mod_dims") shinyjs::show("panel_advanced_SS") shinyjs::hide("panel_advanced_user_SS") shinyjs::hide("panel_advanced_SSS") shinyjs::show("OS_choice") shinyjs::show("Scenario_panel") shinyjs::hide("panel_SSS_reps") shinyjs::hide("run_SSS") shinyjs::show("run_SS") shinyjs::hide("Modeff_panel") shinyjs::hide("Profile_panel") shinyjs::hide("Retro_panel") shinyjs::hide("Sensi_Comparison_panel") shinyjs::hide("Ensemble_panel") hideTab(inputId = "tabs", target = "11") showTab(inputId = "tabs", target = "2") # showTab(inputId = "tabs", target = "3") # showTab(inputId = "tabs", target = "4") # showTab(inputId = "tabs", target = "5") # showTab(inputId = "tabs", target = "6") }) #Model Efficiency observeEvent(req((as.numeric(input$tabs)*12/12)==12), { shinyjs::hide("Data_panel") shinyjs::hide("Existing_files") shinyjs::hide("panel_Ct_F_LO") shinyjs::hide("panel_data_wt_lt") shinyjs::hide("panel_ct_wt_LO") shinyjs::hide("panel_SSS") shinyjs::hide("panel_SSLO_LH") shinyjs::hide("panel_SSLO_fixed") shinyjs::hide("panel_SS_LH_fixed_est_tog") shinyjs::hide("panel_SS_LH_fixed") shinyjs::hide("panel_SS_fixed") shinyjs::hide("panel_SS_LH_est") shinyjs::hide("panel_SS_est") shinyjs::hide("panel_SS_stock_status") shinyjs::hide("panel_SSS_prod") shinyjs::hide("panel_SS_LO_prod") shinyjs::hide("panel_SS_prod_fixed") shinyjs::hide("panel_SS_prod_est") shinyjs::hide("panel_selectivity") shinyjs::hide("panel_selectivity_sss") shinyjs::hide("panel_SS_recdevs") shinyjs::hide("panel_SS_jitter") shinyjs::hide("panel_RPs") shinyjs::hide("panel_Forecasts") shinyjs::hide("panel_Mod_dims") shinyjs::hide("panel_advanced_SS") shinyjs::hide("panel_advanced_user_SS") shinyjs::hide("panel_advanced_SSS") shinyjs::hide("OS_choice") shinyjs::hide("Scenario_panel") shinyjs::hide("panel_SSS_reps") shinyjs::hide("run_SSS") shinyjs::hide("run_SS") shinyjs::show("Modeff_panel") shinyjs::hide("Profile_panel") shinyjs::hide("Retro_panel") shinyjs::hide("Sensi_Comparison_panel") shinyjs::hide("Ensemble_panel") hideTab(inputId = "tabs", target = "11") showTab(inputId = "tabs", target = "2") # showTab(inputId = "tabs", target = "3") # showTab(inputId = "tabs", target = "4") # showTab(inputId = "tabs", target = "5") # showTab(inputId = "tabs", target = "6") }) #Profiles observeEvent(req((as.numeric(input$tabs)*4/4)==4), { shinyjs::hide("Data_panel") shinyjs::hide("Existing_files") shinyjs::hide("panel_Ct_F_LO") shinyjs::hide("panel_data_wt_lt") shinyjs::hide("panel_ct_wt_LO") shinyjs::hide("panel_SSS") shinyjs::hide("panel_SSLO_LH") shinyjs::hide("panel_SSLO_fixed") shinyjs::hide("panel_SS_LH_fixed_est_tog") shinyjs::hide("panel_SS_LH_fixed") shinyjs::hide("panel_SS_fixed") shinyjs::hide("panel_SS_LH_est") shinyjs::hide("panel_SS_est") shinyjs::hide("panel_SS_stock_status") shinyjs::hide("panel_SSS_prod") shinyjs::hide("panel_SS_LO_prod") shinyjs::hide("panel_SS_prod_fixed") shinyjs::hide("panel_SS_prod_est") shinyjs::hide("panel_selectivity") shinyjs::hide("panel_selectivity_sss") shinyjs::hide("panel_SS_recdevs") shinyjs::hide("panel_SS_jitter") shinyjs::hide("panel_RPs") shinyjs::hide("panel_Forecasts") shinyjs::hide("panel_Mod_dims") shinyjs::hide("panel_advanced_SS") shinyjs::hide("panel_advanced_user_SS") shinyjs::hide("panel_advanced_SSS") shinyjs::hide("OS_choice") shinyjs::hide("Scenario_panel") shinyjs::hide("panel_SSS_reps") shinyjs::hide("run_SSS") shinyjs::hide("run_SS") shinyjs::hide("Modeff_panel") shinyjs::show("Profile_panel") shinyjs::hide("Retro_panel") shinyjs::hide("Sensi_Comparison_panel") shinyjs::hide("Ensemble_panel") hideTab(inputId = "tabs", target = "11") showTab(inputId = "tabs", target = "2") # showTab(inputId = "tabs", target = "3") # showTab(inputId = "tabs", target = "4") # showTab(inputId = "tabs", target = "5") # showTab(inputId = "tabs", target = "6") }) #Retrospecitves observeEvent(req((as.numeric(input$tabs)*5/5)==5), { shinyjs::hide("Data_panel") shinyjs::hide("Existing_files") shinyjs::hide("panel_Ct_F_LO") shinyjs::hide("panel_data_wt_lt") shinyjs::hide("panel_ct_wt_LO") shinyjs::hide("panel_SSS") shinyjs::hide("panel_SSLO_LH") shinyjs::hide("panel_SSLO_fixed") shinyjs::hide("panel_SS_LH_fixed_est_tog") shinyjs::hide("panel_SS_LH_fixed") shinyjs::hide("panel_SS_fixed") shinyjs::hide("panel_SS_LH_est") shinyjs::hide("panel_SS_est") shinyjs::hide("panel_SS_stock_status") shinyjs::hide("panel_SSS_prod") shinyjs::hide("panel_SS_LO_prod") shinyjs::hide("panel_SS_prod_fixed") shinyjs::hide("panel_SS_prod_est") shinyjs::hide("panel_selectivity") shinyjs::hide("panel_selectivity_sss") shinyjs::hide("panel_SS_recdevs") shinyjs::hide("panel_SS_jitter") shinyjs::hide("panel_RPs") shinyjs::hide("panel_Forecasts") shinyjs::hide("panel_Mod_dims") shinyjs::hide("panel_advanced_SS") shinyjs::hide("panel_advanced_user_SS") shinyjs::hide("panel_advanced_SSS") shinyjs::hide("OS_choice") shinyjs::hide("Scenario_panel") shinyjs::hide("panel_SSS_reps") shinyjs::hide("run_SSS") shinyjs::hide("run_SS") shinyjs::hide("Modeff_panel") shinyjs::hide("Profile_panel") shinyjs::show("Retro_panel") shinyjs::hide("Sensi_Comparison_panel") shinyjs::hide("Ensemble_panel") hideTab(inputId = "tabs", target = "11") showTab(inputId = "tabs", target = "2") # showTab(inputId = "tabs", target = "3") # showTab(inputId = "tabs", target = "4") # showTab(inputId = "tabs", target = "5") # showTab(inputId = "tabs", target = "6") }) #Sensitivities observeEvent(req((as.numeric(input$tabs)*6/6)==6), { shinyjs::hide("Data_panel") shinyjs::hide("Existing_files") shinyjs::hide("panel_Ct_F_LO") shinyjs::hide("panel_data_wt_lt") shinyjs::hide("panel_ct_wt_LO") shinyjs::hide("panel_SSS") shinyjs::hide("panel_SSLO_LH") shinyjs::hide("panel_SSLO_fixed") shinyjs::hide("panel_SS_LH_fixed_est_tog") shinyjs::hide("panel_SS_LH_fixed") shinyjs::hide("panel_SS_fixed") shinyjs::hide("panel_SS_LH_est") shinyjs::hide("panel_SS_est") shinyjs::hide("panel_SS_stock_status") shinyjs::hide("panel_SSS_prod") shinyjs::hide("panel_SS_LO_prod") shinyjs::hide("panel_SS_prod_fixed") shinyjs::hide("panel_SS_prod_est") shinyjs::hide("panel_selectivity") shinyjs::hide("panel_selectivity_sss") shinyjs::hide("panel_SS_recdevs") shinyjs::hide("panel_SS_jitter") shinyjs::hide("panel_RPs") shinyjs::hide("panel_Forecasts") shinyjs::hide("panel_Mod_dims") shinyjs::hide("panel_advanced_SS") shinyjs::hide("panel_advanced_user_SS") shinyjs::hide("panel_advanced_SSS") shinyjs::hide("OS_choice") shinyjs::hide("Scenario_panel") shinyjs::hide("panel_SSS_reps") shinyjs::hide("run_SSS") shinyjs::hide("run_SS") shinyjs::hide("Modeff_panel") shinyjs::hide("Profile_panel") shinyjs::hide("Retro_panel") shinyjs::show("Sensi_Comparison_panel") shinyjs::hide("Ensemble_panel") hideTab(inputId = "tabs", target = "11") showTab(inputId = "tabs", target = "2") # showTab(inputId = "tabs", target = "3") # showTab(inputId = "tabs", target = "4") # showTab(inputId = "tabs", target = "5") # showTab(inputId = "tabs", target = "6") }) #Ensembles observeEvent(req((as.numeric(input$tabs)*7/7)==7), { shinyjs::hide("Data_panel") shinyjs::hide("Existing_files") shinyjs::hide("panel_Ct_F_LO") shinyjs::hide("panel_data_wt_lt") shinyjs::hide("panel_ct_wt_LO") shinyjs::hide("panel_SSS") shinyjs::hide("panel_SSLO_LH") shinyjs::hide("panel_SSLO_fixed") shinyjs::hide("panel_SS_LH_fixed_est_tog") shinyjs::hide("panel_SS_LH_fixed") shinyjs::hide("panel_SS_fixed") shinyjs::hide("panel_SS_LH_est") shinyjs::hide("panel_SS_est") shinyjs::hide("panel_SS_stock_status") shinyjs::hide("panel_SSS_prod") shinyjs::hide("panel_SS_LO_prod") shinyjs::hide("panel_SS_prod_fixed") shinyjs::hide("panel_SS_prod_est") shinyjs::hide("panel_selectivity") shinyjs::hide("panel_selectivity_sss") shinyjs::hide("panel_SS_recdevs") shinyjs::hide("panel_SS_jitter") shinyjs::hide("panel_RPs") shinyjs::hide("panel_Forecasts") shinyjs::hide("panel_Mod_dims") shinyjs::hide("panel_advanced_SS") shinyjs::hide("panel_advanced_user_SS") shinyjs::hide("panel_advanced_SSS") shinyjs::hide("panel_SSS_reps") shinyjs::hide("OS_choice") shinyjs::hide("Scenario_panel") shinyjs::hide("run_SSS") shinyjs::hide("run_SS") shinyjs::hide("Modeff_panel") shinyjs::hide("Profile_panel") shinyjs::hide("Retro_panel") shinyjs::hide("Sensi_Comparison_panel") shinyjs::show("Ensemble_panel") hideTab(inputId = "tabs", target = "11") showTab(inputId = "tabs", target = "2") # showTab(inputId = "tabs", target = "3") # showTab(inputId = "tabs", target = "4") # showTab(inputId = "tabs", target = "5") # showTab(inputId = "tabs", target = "6") }) ######################################## ############################# ######### UI INPUTS ######### ############################# # User activated pop-up parameter values --------------- #Model dimensions output$Model_dims1 <- renderUI({ inFile1 = rv.Lt$data inFile2 = rv.Ct$data inFile3 = rv.Age$data #No file inputs if (is.null(inFile1) & is.null(inFile2) & is.null(inFile3)) return(NULL) #If have lengths and/or ages, but no catches if (any(!is.null(inFile1), !is.null(inFile3))& is.null(inFile2)){ styr.in = min(inFile1[,1],inFile3[,1]) endyr.in = max(inFile1[,1],inFile3[,1]) # if(!(anyNA(c(Linf(), k_vbgf(),t0_vbgf())))& input$Ct_F_LO_select=="Constant Catch"){ # styr.in = min(inFile1[,1],inFile3[,1])-round(VBGF.age(Linf(), k_vbgf(), t0_vbgf(), Linf()*0.95)) # } } #If have catches if (!is.null(inFile2)){ styr.in<-min(inFile2[,1]) endyr.in<-max(inFile2[,1]) } #If lengths or ages with catches if (!is.null(inFile1) &!is.null(inFile2)|!is.null(inFile3) &!is.null(inFile2)){ styr.in<-min(inFile1[,1],inFile2[,1],inFile3[,1]) endyr.in<-max(inFile1[,1],inFile2[,1],inFile3[,1]) } fluidRow(column(width=4, numericInput("styr", "Starting year", value=styr.in, min=1, max=10000, step=1)), column(width=4, numericInput("endyr","Ending year", value=endyr.in, min=1, max=10000, step=1))) # if (!is.null(inFile2)){ # fluidRow(column(width=4, numericInput("styr", "Starting year", # value=min(inFile2[,1]), min=1, max=10000, step=1)), # column(width=4, numericInput("endyr", "Ending year", # value=max(inFile2[,1]), min=1, max=10000, step=1))) # } # print(styr.in) # print(endyr.in) }) output$Model_dims2 <- renderUI({ Ct.data = rv.Ct$data # if (is.null(Ct.data)) return(NULL) if (!is.null(Ct.data)){ fluidRow(column(width=4, numericInput("styr", "Starting year", value=min(Ct.data[,1]), min=1, max=10000, step=1)), column(width=4, numericInput("endyr", "Ending year", value=max(Ct.data[,1]), min=1, max=10000, step=1))) } }) # output$Female_parms_inputs_label <- reactive({ # if(!is.null(input$file1)) # { # (output$Female_parms_inputs_label<- renderUI({ # fluidRow(column(width=6,numericInput("Nages","Max. age", value=NA,min=1, max=1000, step=1)), # column(width=6,numericInput("M_f", "Natural mortality", value=NA,min=0, max=10000, step=0.01))) # })) # } # }) #Male life history parameters output$Male_parms_inputs_label <- renderUI({ if(input$male_parms){ h5(em("Male")) } }) output$Male_parms_inputs1 <- renderUI({ if(input$male_parms){ fluidRow(column(width=6, numericInput("M_m", "Natural mortality", value=NA, min=0, max=10000, step=0.01)), column(width=6, numericInput("Linf_m", "Asymptotic size (Linf)", value=NA, min=0, max=10000, step=0.01))) } }) output$Male_parms_inputs2 <- renderUI({ if(input$male_parms){ fluidRow(column(width=6, numericInput("k_m", "Growth coefficient k", value=NA, min=0, max=10000, step=0.01)), column(width=6, numericInput("t0_m", "Age at length 0 (t0)", value=NA, min=0, max=10000, step=0.01))) } }) output$Male_parms_inputs3 <- renderUI({ if(input$male_parms){ fluidRow(column(width=6, textInput("CV_lt_m", "CV at length (young then old)", value="0.1,0.1"))) } }) output$Male_parms_inputs4 <- renderUI({ if(input$male_parms){ fluidRow(column(width=6, numericInput("WLa_m", "Weight-length alpha", value=0.00001, min=0, max=10000, step=0.000000001)), column(width=6, numericInput("WLb_m", "Weight-length beta", value=3, min=0, max=10000, step=0.01))) } }) output$Male_parms_inputs_label_fix <- renderUI({ if(input$male_parms_fix){ h5(em("Male")) } }) output$Male_parms_inputs1_fix <- renderUI({ if(input$male_parms_fix){ fluidRow(column(width=6, numericInput("M_m_fix", "Natural mortality", value=NA, min=0, max=10000, step=0.01)), column(width=6, numericInput("Linf_m_fix", "Asymptotic size (Linf)", value=NA, min=0, max=10000, step=0.01))) } }) output$Male_parms_inputs2_fix <- renderUI({ if(input$male_parms_fix){ fluidRow(column(width=6, numericInput("k_m_fix", "Growth coefficient k", value=NA, min=0, max=10000, step=0.01)), column(width=6, numericInput("t0_m_fix", "Age at length 0 (t0)", value=NA, min=0, max=10000, step=0.01))) } }) output$Male_parms_inputs3_fix <- renderUI({ if(input$male_parms_fix){ fluidRow(column(width=6, textInput("CV_lt_m_fix", "CV at length (young then old)", value="0.1,0.1"))) } }) output$Male_parms_inputs4_fix <- renderUI({ if(input$male_parms_fix){ fluidRow(column(width=6, numericInput("WLa_m_fix", "Weight-Length alpha", value=0.00001, min=0, max=10000, step=0.000000001)), column(width=6, numericInput("WLb_m_fix", "Weight-length beta", value=3, min=0, max=10000, step=0.01))) } }) output$Male_parms_inputs_label_est <- renderUI({ if(input$male_parms_est){ h4(em("Male")) } }) output$Male_parms_inputs_M_est <- renderUI({ if(input$male_parms_est){ dropdownButton( selectInput("M_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")), numericInput("M_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001), numericInput("M_m_SD", "SD", value=0,min=0, max=10000, step=0.001), numericInput("M_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001), circle = FALSE, right=TRUE, status = "danger", icon = icon("skull-crossbones"), width = "300px",label="Natural mortality" ) } }) output$Male_parms_inputs_space1 <- renderUI({ if(input$male_parms_est){ br() } }) output$Male_parms_inputs_space2 <- renderUI({ if(input$male_parms_est){ br() } }) output$Male_parms_inputs_space3 <- renderUI({ if(input$male_parms_est){ br() } }) output$Male_parms_inputs_space4 <- renderUI({ if(input$male_parms_est){ br() } }) output$Male_parms_inputs_space5 <- renderUI({ if(input$male_parms_est){ br() } }) output$Male_parms_inputs_Growth_label <- renderUI({ if(input$male_parms_est){ h5(strong("Growth")) } }) output$Male_parms_inputs_Linf_est <- renderUI({ if(input$male_parms_est){ dropdownButton( selectInput("Linf_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")), numericInput("Linf_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001), numericInput("Linf_m_SD", "SD", value=0,min=0, max=10000, step=0.001), numericInput("Linf_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001), circle = FALSE, right=TRUE, status = "danger", icon = icon("infinity"), width = "300px",label="Linf: Asymptotic size" ) } }) output$Male_parms_inputs_k_est <- renderUI({ if(input$male_parms_est){ dropdownButton( selectInput("k_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")), numericInput("k_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001), numericInput("k_m_SD", "SD", value=0,min=0, max=10000, step=0.001), numericInput("k_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001), circle = FALSE, right=TRUE, status = "danger", icon = icon("ruler-horizontal"), width = "300px",label="k: VB growth coefficient" ) } }) output$Male_parms_inputs_t0_est <- renderUI({ if(input$male_parms_est){ dropdownButton( selectInput("t0_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")), numericInput("t0_m_mean", "Mean", value=0,min=-100, max=100, step=0.001), numericInput("t0_m_SD", "SD", value=0,min=0, max=100, step=0.001), numericInput("t0_m_phase", "Phase", value=-1,min=-999, max=100, step=0.001), circle = FALSE, right=TRUE, status = "danger", icon = icon("baby-carriage"), width = "300px",label="t0: Age at size 0" ) } }) output$Male_parms_inputs_CV_est_young <- renderUI({ if(input$male_parms_est){ dropdownButton( selectInput("CV_lt_m_young_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")), numericInput("CV_lt_m_young_mean", "Mean", value=0.1,min=0, max=10000, step=0.001), numericInput("CV_lt_m_young_SD", "SD", value=0,min=0, max=10000, step=0.001), numericInput("CV_lt_m_young_phase", "Phase", value=-1,min=-999, max=10, step=0.001), circle = FALSE, right=TRUE, status = "danger", icon = icon("dice"), width = "300px",label="CV at length (young)" ) } }) output$Male_parms_inputs_CV_est_old <- renderUI({ if(input$male_parms_est){ dropdownButton( selectInput("CV_lt_m_old_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")), numericInput("CV_lt_m_old_mean", "Mean", value=0.1,min=0, max=10000, step=0.001), numericInput("CV_lt_m_old_SD", "SD", value=0,min=0, max=10000, step=0.001), numericInput("CV_lt_m_old_phase", "Phase", value=-1,min=-999, max=10, step=0.001), circle = FALSE, right=TRUE, status = "danger", icon = icon("dice"), width = "300px",label="CV at length (old)" ) } }) output$Male_parms_inputs_WL_est <- renderUI({ if(input$male_parms_est){ fluidRow(column(width=6, numericInput("WLa_m_est", "Weight-length alpha", value=0.00001, min=0, max=10000, step=0.000000001)), column(width=6, numericInput("WLb_m_est", "Weight-length beta", value=3, min=0, max=10000, step=0.01))) } }) #h5(strong("M")), # fluidRow(column(width=4,style='padding:1px;',align="center", selectInput("M_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal"))), # column(width=3,style='padding:2px;',align="center",numericInput("M_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001)), # column(width=3,style='padding:2px;',align="center",numericInput("M_m_SD", "SD", value=0,min=0, max=10000, step=0.001)), # column(width=2,style='padding:2px;',align="center",numericInput("M_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001))) # } # }) # output$Male_parms_inputs_Linf_est <- renderUI({ # if(input$male_parms_est){ # #h5(strong("Linf")), # fluidRow(column(width=4,style='padding:1px;',align="center",selectInput("Linf_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal"))), # column(width=3,style='padding:2px;',align="center",numericInput("Linf_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001)), # column(width=3,style='padding:2px;',align="center",numericInput("Linf_m_SD", "SD", value=0,min=0, max=10000, step=0.001)), # column(width=2,style='padding:2px;',align="center",numericInput("Linf_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001))) # } # }) # output$Male_parms_inputs_k_est <- renderUI({ # if(input$male_parms_est){ # #h5(strong("k")), # fluidRow(column(width=4,style='padding:2px;',selectInput("k_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal"))), # column(width=3,style='padding:2px;',numericInput("k_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001)), # column(width=3,style='padding:2px;',numericInput("k_m_SD", "SD", value=0,min=0, max=10000, step=0.001)), # column(width=2,style='padding:2px;',align="center",numericInput("k_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001))) # } # }) # output$Male_parms_inputs_t0_est <- renderUI({ # if(input$male_parms_est){ # #h5(strong("t0")), # fluidRow(column(width=4,style='padding:2px;',selectInput("t0_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal"))), # column(width=3,style='padding:2px;',numericInput("t0_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001)), # column(width=3,style='padding:2px;',numericInput("t0_m_SD", "SD", value=0,min=0, max=10000, step=0.001)), # column(width=2,style='padding:2px;',align="center",numericInput("t0_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001))) # } # }) # output$Male_parms_inputs_CV_est <- renderUI({ # if(input$male_parms_est){ # #h5(strong("Length CV")), # fluidRow(column(width=4,style='padding:2px;',selectInput("CV_lt_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal"))), # column(width=3,style='padding:2px;',numericInput("CV_lt_m_mean", "Mean", value=0.1,min=0, max=10000, step=0.001)), # column(width=3,style='padding:2px;',numericInput("CV_lt_m_SD", "SD", value=0,min=0, max=10000, step=0.001)), # column(width=2,style='padding:2px;',align="center",numericInput("CV_lt_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001))) # } # }) #Male life history parameters output$Male_parms_inputs_label_SSS<- renderUI({ if(input$male_parms_SSS){ h5(em("Male")) } }) output$Male_parms_inputs_M_SSS<- renderUI({ if(input$male_parms_SSS){ dropdownButton( selectInput("M_m_prior_sss","Prior type",c("lognormal","normal","uniform","no prior")), numericInput("M_m_mean_sss", "Mean", value=NA,min=0, max=10000, step=0.001), numericInput("M_m_SD_sss", "SD", value=0.44,min=0, max=10000, step=0.001), circle = FALSE, right=TRUE, status = "danger", icon = icon("skull-crossbones"), width = "300px",label="Natural mortality" ) } }) output$Male_parms_inputs_space1_SSS <- renderUI({ if(input$male_parms_SSS){ br() } }) output$Male_parms_inputs_space2_SSS <- renderUI({ if(input$male_parms_SSS){ br() } }) output$Male_parms_inputs_space3_SSS <- renderUI({ if(input$male_parms_SSS){ br() } }) output$Male_parms_inputs_space4_SSS <- renderUI({ if(input$male_parms_SSS){ br() } }) output$Male_parms_inputs_space5_SSS <- renderUI({ if(input$male_parms_SSS){ br() } }) output$Male_parms_inputs_Growth_label_SSS <- renderUI({ if(input$male_parms_SSS){ h5(strong("Growth")) } }) output$Male_parms_inputs_Linf_SSS <- renderUI({ if(input$male_parms_SSS){ dropdownButton( selectInput("Linf_m_prior_sss","Prior type",c("no prior","normal")), numericInput("Linf_m_mean_sss", "Mean", value=NA,min=0, max=10000, step=0.001), numericInput("Linf_m_SD_sss", "SD", value=0,min=0, max=10000, step=0.001), circle = FALSE, right=TRUE, status = "danger", icon = icon("infinity"), width = "300px",label="Linf: Asymptotic size" ) } }) output$Male_parms_inputs_k_SSS <- renderUI({ if(input$male_parms_SSS){ dropdownButton( selectInput("k_m_prior_sss","Prior type",c("no prior","normal")), numericInput("k_m_mean_sss", "Mean", value=NA,min=0, max=10000, step=0.001), numericInput("k_m_SD_sss", "SD", value=0,min=0, max=10000, step=0.001), circle = FALSE, right=TRUE, status = "danger", icon = icon("ruler-horizontal"), width = "300px",label="k: VB growth coefficient" ) } }) output$Male_parms_inputs_t0_SSS <- renderUI({ if(input$male_parms_SSS){ dropdownButton( selectInput("t0_m_prior_sss","Prior type",c("no prior","normal")), numericInput("t0_m_mean_sss", "Mean", value=0,min=-100, max=100, step=0.001), numericInput("t0_m_SD_sss", "SD", value=0,min=0, max=1000, step=0.001), circle = FALSE, right=TRUE, status = "danger", icon = icon("baby-carriage"), width = "300px",label="t0: Age at size 0" ) } }) output$Male_parms_inputs_CV_young_SSS <- renderUI({ if(input$male_parms_SSS){ dropdownButton( selectInput("CV_lt_m_young_prior_sss","Prior type",c("no prior")), numericInput("CV_lt_m_young_mean_sss", "Mean", value=0.1,min=0, max=10000, step=0.001), numericInput("CV_lt_m_young_SD_sss", "SD", value=0,min=0, max=10000, step=0.001), circle = FALSE, right=TRUE, status = "danger", icon = icon("dice"), width = "300px",label="CV at length" ) } }) output$Male_parms_inputs_CV_old_SSS <- renderUI({ if(input$male_parms_SSS){ dropdownButton( selectInput("CV_lt_m_old_prior_sss","Prior type",c("no prior")), numericInput("CV_lt_m_old_mean_sss", "Mean", value=0.1,min=0, max=10000, step=0.001), numericInput("CV_lt_m_old_SD_sss", "SD", value=0,min=0, max=10000, step=0.001), circle = FALSE, right=TRUE, status = "danger", icon = icon("dice"), width = "300px",label="CV at length" ) } }) output$Male_parms_inputs_WL_SSS<- renderUI({ if(input$male_parms_SSS){ fluidRow(column(width=6,numericInput("WLa_m_sss", "Weight-Length alpha", value=0.00001,min=0, max=10000, step=0.000000001)), column(width=6,numericInput("WLb_m_sss","Weight-length beta", value=3,min=0, max=10000, step=0.01))) } }) #Selectivity paramters output$Sel_parms1 <- renderUI({ fluidRow(column(width=8, textInput("Sel50", "Length at 50% Selectivity",value="")), column(width=4, textInput("Sel50_phase", "Est. phase", value=""))) }) output$Sel_parms2<- renderUI({ fluidRow(column(width=8, textInput("Selpeak", "Length at Peak Selectvity", value="")), column(width=4, textInput("Selpeak_phase", "Est. phase", value=""))) }) output$Sel_parms3 <- renderUI({ if(input$Sel_choice=="Dome-shaped"){ fluidRow(column(width=8, textInput("PeakDesc", "Length at 1st declining selectivity",value="10000")), column(width=4, textInput("PeakDesc_phase", "Est. phase",value=""))) } }) output$Sel_parms4 <- renderUI({ if(input$Sel_choice=="Dome-shaped"){ fluidRow(column(width=8, textInput("LtPeakFinal", "Width of declining selectivity",value="0.0001")), column(width=4, textInput("LtPeakFinal_phase", "Est. phase",value=""))) } }) output$Sel_parms5 <- renderUI({ if(input$Sel_choice=="Dome-shaped"){ fluidRow(column(width=8, textInput("FinalSel", "Selectivity at max bin size",value="0.99999")), column(width=4, textInput("FinalSel_phase", "Est. phase",value=""))) } }) output$Sel_parms1_sss <- renderUI({ fluidRow(column(width=6, textInput("Sel50_sss", "Length at 50% Selectivity",value="")), column(width=6, textInput("Selpeak_sss", "Length at Peak Selectvity", value=""))) }) output$Sel_parms2_sss <- renderUI({ if(input$Sel_choice_sss=="Dome-shaped"){ fluidRow(column(width=6, textInput("PeakDesc_sss", "Length at 1st declining selectivity",value="10000")), column(width=6, textInput("LtPeakFinal_sss", "Width of declining selectivity",value="0.0001"))) } }) output$Sel_parms3_sss <- renderUI({ if(input$Sel_choice_sss=="Dome-shaped"){ fluidRow(column(width=8, textInput("FinalSel_sss", "Selectivity at max bin size",value="0.99999"))) } }) #Recruitment parameter inputs output$Rec_options1 <- renderUI({ if(input$rec_choice){ fluidRow(column(width=6, numericInput("sigmaR", "Rec. varaibility (sR)", value=0.5, min=0, max=10, step=0.01))) } }) output$Rec_options2 <- renderUI({ if(input$rec_choice){ fluidRow(column(width=6, numericInput("Rdev_startyr", "Rec. devs. start year", value=input$styr, min=1, max=10000, step=1)), column(width=6, numericInput("Rdev_endyr", "Rec. devs. end year", value=input$endyr, min=1, max=10000, step=1))) } }) output$Rec_options3 <- renderUI({ if(input$biasC_choice){ fluidRow(column(width=6, numericInput("NobiasC_early", "Early last year", value=input$styr, min=1, max=10000, step=1)), column(width=6, numericInput("NobiasC_recent", "1st recent year", value=input$endyr, min=1, max=10000, step=1))) } }) output$Rec_options4 <- renderUI({ if(input$biasC_choice){ fluidRow(column(width=6, numericInput("BiasC_startyr", "Start year", value=input$styr, min=1, max=10000, step=1)), column(width=6, numericInput("BiasC_endyr", "End year", value=input$endyr, min=1, max=10000, step=1))) } }) output$Rec_options5 <- renderUI({ if(input$biasC_choice){ fluidRow(column(width=6, numericInput("BiasC","Maximum bias adjustment", value=1,min=0, max=1, step=0.001))) } }) output$Rec_options6 <- renderUI({ if(input$rec_choice){ fluidRow(column(width=6, selectInput("RecDevChoice","Recruit deviation option",c("1: Devs sum to zero","2: Simple deviations","3: deviation vector","4: option 3 plus penalties"),selected="1: Devs sum to zero"))) } }) #Jitter value output$Jitter_value <- renderUI({ if(input$jitter_choice){ fluidRow(column(width=6, numericInput("jitter_fraction", "Jitter value", value=0.01, min=0, max=10, step=0.001)), column(width=6, numericInput("Njitter", "# of jitters", value=0, min=1, max=10000, step=1))) } }) #Choose reference points output$RP_selection1<- renderUI({ if(input$RP_choices){ fluidRow(column(width=6, numericInput("SPR_target", "SPR target", value=0.5, min=0, max=1, step=0.001)), column(width=6, numericInput("B_target", "Biomass target", value=0.4, min=0, max=1, step=0.001))) } }) output$RP_selection2<- renderUI({ if(input$RP_choices){ fluidRow(column(width=6,selectInput("CR_Ct_F","Control rule type", c("1: Catch fxn of SSB, buffer on F", "2: F fxn of SSB, buffer on F", "3: Catch fxn of SSB, buffer on catch", "4: F fxn of SSB, buffer on catch"))), #column(width=4, numericInput("CR_Ct_F", "Control rule type", # value=1, min=0, max=1, step=0.001)), column(width=3, numericInput("slope_hi", "Upper ratio value", value=0.4, min=0, max=1, step=0.001)), column(width=3, numericInput("slope_low", "Lower ratio value", value=0.1, min=0, max=1, step=0.001))) } }) output$Forecasts<- renderUI({ if(input$Forecast_choice){ fluidRow(column(width=6, numericInput("forecast_num", "# of forecast years", value=2, min=1, max=1000, step=1)), column(width=6, textInput("forecast_buffer", "Control rule buffer", value="1"))) } }) output$AdvancedSS_nohess<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "no_hess", label = "Turn off Hessian (speeds up runs, but no variance estimation)", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_nohess_user<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "no_hess", label = "Turn off Hessian (speeds up runs, but no variance estimation)", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_addcomms<- renderUI({ fluidRow(column(width=6, prettyCheckbox( inputId = "add_comms", label = "Add additional SS run commands", shape = "round", outline = TRUE, status = "info"))) }) output$AdvancedSS_addcomms_comms <- renderUI({ if(!is.null(input$add_comms)){ if(input$add_comms){ fluidRow(column(width=12, textInput("add_comms_in", "Enter additional run commands", value=""))) } } }) output$AdvancedSS_addcomms_user<- renderUI({ fluidRow(column(width=6, prettyCheckbox( inputId = "add_comms", label = "Add additional SS run commands", shape = "round", outline = TRUE, status = "info"))) }) output$AdvancedSS_addcomms_comms_user <- renderUI({ if(!is.null(input$add_comms_user)){ if(input$add_comms_user){ fluidRow(column(width=12, textInput("add_comms_in", "Enter additional run commands", value=""))) } } }) output$AdvancedSS_noplots<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "no_plots_tables", label = "Turn off plots", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_noplots_user<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "no_plots_tables", label = "Turn off plots", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_noestabs<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "no_tables", label = "No exectutive summary tables", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_noestabs_user<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "no_tables", label = "No exectutive summary tables", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_par<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "use_par", label = "Use par file (i.e., parameter file from previous run)?", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_par_user<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "use_par", label = "Use par file (i.e., parameter file from previous run)?", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_phase0<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "use_phase0", label = "Turn off estimation of all parameters (phase = 0)?", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_phase0_user<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "use_phase0", label = "Turn off estimation of all parameters (phase = 0)?", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_datanew<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "use_datanew", label = "Use the data_echo.ss_new file?", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_datanew_user<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "use_datanew", label = "Use the data_echo.ss_new file?", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_controlnew<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "use_controlnew", label = "Use the control.ss_new file?", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_controlnew_user<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "use_controlnew", label = "Use the control.ss_new file?", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_forecastnew<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "use_forecastnew", label = "Use the forecast.ss_new file?", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_forecastnew_user<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "use_forecastnew", label = "Use the forecast.ss_new file?", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_GT1<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "GT1", label = "Use only one growth type (default is 5)", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_GT5_SSS<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "GT5", label = "Use 5 growth types (default is 1)", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_Sex3<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "Sex3", label = "Retain sex ratio in length compositions (Sex option = 3)", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_Indexvar<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "Indexvar", label = "Estimate additional variance on each abundance index?", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_ageerror<- renderUI({ fluidRow(column(width=12, prettyCheckbox( inputId = "Ageing_error_choice", label = "Add custom ageing error matrices?", shape = "round", outline = TRUE, status = "info"))) }) output$AdvancedSS_ageerror_in <- renderUI({ if(!is.null(input$Ageing_error_choice)){ if(input$Ageing_error_choice){ #h4(strong("Choose data file")), fluidRow(column(width=12,fileInput('file33', 'Ageing error file', accept = c( 'text/csv', 'text/comma-separated-values', 'text/tab-separated-values', 'text/plain', '.csv' ) ))) } } }) output$AdvancedSS_Ctunits<- renderUI({ fluidRow(column(width=12, prettyCheckbox( inputId = "Ct_units_choice", label = "Specify catch units (1=biomass (default); 2=numbers) for each fleet?", shape = "round", outline = TRUE, status = "info"))) }) output$AdvancedSS_Ctunitsfleets <- renderUI({ if(!is.null(input$Ct_units_choice)){ if(input$Ct_units_choice){ fluidRow(column(width=12, textInput("fleet_ct_units", "Enter catch units for each fleet", value=""))) } } }) output$AdvancedSS_Ctunits_SSS<- renderUI({ fluidRow(column(width=12, prettyCheckbox( inputId = "Ct_units_choice_SSS", label = "Specify catch units for each fleet?", shape = "round", outline = TRUE, status = "info"))) }) output$AdvancedSS_Ctunitsfleets_SSS<- renderUI({ if(!is.null(input$Ct_units_choice_SSS)){ if(input$Ct_units_choice_SSS){ fluidRow(column(width=12, textInput("fleet_ct_units_SSS", "Enter catch units for each fleet (1=biomass; 2=numbers)", value=""))) } } }) output$AdvancedSS_retro_choice<- renderUI({ fluidRow(column(width=6, prettyCheckbox( inputId = "Retro_choice", label = "Do retrospective runs? Input minus from current year", shape = "round", outline = TRUE, status = "info"))) }) output$AdvancedSS_retro_years <- renderUI({ if(!is.null(input$Retro_choice)){ if(input$Retro_choice){ fluidRow(column(width=6, numericInput("first_retro_year", "1st retro year", value=-1, min=-1, max=-500, step=-1)), column(width=6, numericInput("final_retro_year", "Last retro year", value=-10, min=-1, max=-500, step=-1))) } } }) output$AdvancedSS_retro_choice_user <- renderUI({ fluidRow(column(width=6, prettyCheckbox( inputId = "Retro_choice", label = "Do retrospective runs? Input minus from current year", shape = "round", outline = TRUE, status = "info"))) }) output$AdvancedSS_retro_years_user <- renderUI({ if(!is.null(input$Retro_choice)){ if(input$Retro_choice){ fluidRow(column(width=6, numericInput("first_retro_year", "1st retro year", value=-1, min=-1, max=-500, step=-1)), column(width=6, numericInput("final_retro_year", "Last retro year", value=-10, min=-1, max=-500, step=-1))) } } }) output$AdvancedSS_Ltbin <- renderUI({ # if(input$advance_ss_click){ if(!is.null(rv.Lt$data)){bin.step<-as.numeric(colnames(rv.Lt$data)[7])-as.numeric(colnames(rv.Lt$data)[6])} if(is.null(rv.Lt$data)){bin.step<-2} fluidRow(column(width=4, numericInput("lt_bin_size", "bin size", value=bin.step, min=0, max=10000, step=1)), column(width=4, numericInput("lt_min_bin", "minimum bin", value=4, min=0, max=10000, step=0.01)), column(width=4, numericInput("lt_max_bin", "maximum bin", value=2*(round((Linf()+(Linf()*0.2326))/2))+2, min=0, max=10000, step=0.01))) # } }) output$Profile_multi_values <- renderUI({ #if(!is.null(input$multi_profile)){ # if(input$multi_profile){ #h4(strong("Choose data file")), fluidRow(column(width=12,fileInput('file_multi_profile', 'Profile input values', accept = c( 'text/csv', 'text/comma-separated-values', 'text/tab-separated-values', 'text/plain', '.csv' ) ))) # } # } }) # roots <- getVolumes()() ############################################### ############################################### ############################################### ################# PARAMETERS ################## ############################################### FleetNs<-reactive({ if(all(c(is.null(rv.Ct$data[,2],rv.Lt$data[,3],rv.Age$data[,3],rv.Index$data[,3])))) return(NULL) fleetnum<-rep(1,max(rv.Lt$data[,3],rv.Age$data[,3],rv.Index$data[,3])) FleetNs<-paste(as.character(fleetnum), collapse=",") #print(FleetNs) FleetNs }) Nages<-reactive({ Nages<-NA if(all(c(is.null(input$M_f),is.null(input$M_f_fix),is.null(input$M_f_mean),is.null(input$M_f_mean_sss),is.null(rv.Age$data)))) return(NULL) if(!is.na(input$M_f)) {Nages<-ceiling(5.4/input$M_f)} if(!is.na(input$M_f_fix)) {Nages<-ceiling(5.4/input$M_f_fix)} if(!is.na(input$M_f_mean)) {Nages<-ceiling(5.4/input$M_f_mean)} if(!is.na(input$M_f_mean_sss)) {Nages<-ceiling(5.4/input$M_f_mean_sss)} if(!is.null(rv.Age$data)) { Nages_in<-max(as.numeric(colnames(rv.Age$data[,9:ncol(rv.Age$data)]))) if(!is.na(Nages)&Nages_in>Nages){Nages<-Nages_in} if(is.na(Nages)){Nages<-Nages_in} } Nages }) M_f_in<-reactive({ M_f_in<-NA if(all(c(is.null(input$M_f),is.null(input$M_f_fix),is.null(input$M_f_mean),is.null(input$M_f_mean_sss)))) return(NULL) if(!is.na(input$M_f)) {M_f_in<-input$M_f} if(!is.na(input$M_f_fix)) {M_f_in<-input$M_f_fix} if(!is.na(input$M_f_mean)) {M_f_in<-input$M_f_mean} if(!is.na(input$M_f_mean_sss)) {M_f_in<-input$M_f_mean_sss} M_f_in }) M_m_in<-reactive({ M_m_in<-NA if(all(c(is.null(input$M_m),is.null(input$M_m_fix),is.null(input$M_m_mean),is.null(input$M_m_mean_sss)))) return(NULL) if(any(input$male_parms&!is.na(input$M_m))) {M_m_in<-input$M_m} if(any(input$male_parms_fix&!is.na(input$M_m_fix))) {M_m_in<-input$M_m_fix} if(any(input$male_parms_est&!is.na(input$M_m_mean))) {M_m_in<-input$M_m_mean} if(any(input$male_parms_SSS&!is.na(input$M_m_mean_sss))) {M_m_in<-input$M_m_mean_sss} M_m_in }) Linf<-reactive({ Linf<-NA if(all(c(is.null(input$Linf_f),is.null(input$Linf_f_fix),is.null(input$Linf_f_mean),is.null(input$Linf_f_mean_sss)))) return(NULL) if(!is.na(input$Linf_f)) {Linf<-input$Linf_f} if(!is.na(input$Linf_f_fix)) {Linf<-input$Linf_f_fix} if(!is.na(input$Linf_f_mean)) {Linf<-input$Linf_f_mean} if(!is.na(input$Linf_f_mean_sss)) {Linf<-input$Linf_f_mean_sss} Linf }) Linf_m_in<-reactive({ Linf_m_in<-NA if(all(c(is.null(input$Linf_m),is.null(input$Linf_m_fix),is.null(input$Linf_m_mean),is.null(input$Linf_m_mean_sss)))) return(NULL) if(any(input$male_parms&!is.na(input$Linf_m))) {Linf_m_in<-input$Linf_m} if(any(input$male_parms_fix&!is.na(input$Linf_m_fix))) {Linf_m_in<-input$Linf_m_fix} if(any(input$male_parms_est&!is.na(input$Linf_m_mean))) {Linf_m_in<-input$Linf_m_mean} if(any(input$male_parms_SSS&!is.na(input$Linf_m_mean_sss))) {Linf_m_in<-input$Linf_m_mean_sss} Linf_m_in }) k_vbgf<-reactive({ k_vbgf<-NA if(all(c(is.null(input$k_f),is.null(input$k_f_fix),is.null(input$k_f_mean),is.null(input$k_f_mean_sss)))) return(NULL) if(!is.na(input$k_f)) {k_vbgf<-input$k_f} if(!is.na(input$k_f_fix)) {k_vbgf<-input$k_f_fix} if(!is.na(input$k_f_mean)) {k_vbgf<-input$k_f_mean} if(!is.na(input$k_f_mean_sss)) {k_vbgf<-input$k_f_mean_sss} k_vbgf }) #Process life history input for plots k_vbgf_m_in<-reactive({ k_vbgf_m_in<-NA if(all(c(is.null(input$k_m),is.null(input$k_m_fix),is.null(input$k_m_mean),is.null(input$k_m_mean_sss)))) return(NULL) if(any(input$male_parms&!is.na(input$k_m))) {k_vbgf_m_in<-input$k_m} if(any(input$male_parms_fix&!is.na(input$k_m_fix))) {k_vbgf_m_in<-input$k_m_fix} if(any(input$male_parms_est&!is.na(input$k_m_mean))) {k_vbgf_m_in<-input$k_m_mean} if(any(input$male_parms_SSS&!is.na(input$k_m_mean_sss))) {k_vbgf_m_in<-input$k_m_mean_sss} k_vbgf_m_in }) t0_vbgf<-reactive({ t0_vbgf<-NA if(all(c(is.null(input$t0_f),is.null(input$t0_f_fix),is.null(input$t0_f_mean),is.null(input$t0_f_mean_sss)))) return(NULL) if(!is.na(input$t0_f)) {t0_vbgf<-input$t0_f} if(!is.na(input$t0_f_fix)) {t0_vbgf<-input$t0_f_fix} if(!is.na(input$t0_f_mean)) {t0_vbgf<-input$t0_f_mean} if(!is.na(input$t0_f_mean_sss)) {t0_vbgf<-input$t0_f_mean_sss} t0_vbgf }) t0_vbgf_m_in<-reactive({ t0_vbgf_m_in<-NA if(all(c(is.null(input$t0_m),is.null(input$t0_m_fix),is.null(input$t0_m_mean),is.null(input$t0_m_mean_sss)))) return(NULL) if(any(input$male_parms&!is.na(input$t0_m))) {t0_vbgf_m_in<-input$t0_m} if(any(input$male_parms_fix&!is.na(input$t0_m_fix))) {t0_vbgf_m_in<-input$t0_m_fix} if(any(input$male_parms_est&!is.na(input$t0_m_mean))) {t0_vbgf_m_in<-input$t0_m_mean} if(any(input$male_parms_SSS&!is.na(input$t0_m_mean_sss))) {t0_vbgf_m_in<-input$t0_m_mean_sss} t0_vbgf_m_in }) L50<-reactive({ L50<-NA if(all(c(is.null(input$L50_f),is.null(input$L50_f_fix),is.null(input$L50_f_est),is.null(input$L50_f_sss)))) return(NULL) if(!is.na(input$L50_f)) {L50<-input$L50_f} if(!is.na(input$L50_f_fix)) {L50<-input$L50_f_fix} if(!is.na(input$L50_f_est)) {L50<-input$L50_f_est} if(!is.na(input$L50_f_sss)) {L50<-input$L50_f_sss} L50 }) L95<-reactive({ L95<-NA if(all(c(is.null(input$L95_f),is.null(input$L95_f_fix),is.null(input$L95_f_est),is.null(input$L95_f_sss)))) return(NULL) if(!is.na(input$L95_f)) {L95<-input$L95_f} if(!is.na(input$L95_f_fix)) {L95<-input$L95_f_fix} if(!is.na(input$L95_f_est)) {L95<-input$L95_f_est} if(!is.na(input$L95_f_sss)) {L95<-input$L95_f_sss} L95 }) ############# ### PLOTS ### ############# ################## ### CATCH PLOT ### ################## observeEvent(req(!is.null(rv.Ct$data)), { shinyjs::show(output$catch_plots_label<-renderText({"Removal history"})) }) observeEvent(req(!is.null(rv.Ct$data)), { output$Ctplot_it<-renderUI({ if(!is.null(rv.Ct$data)) { output$Ctplot <- renderPlot({ if (is.null(rv.Ct$data)) return(NULL) rv.Ct$data %>% pivot_longer(-1, names_to = "Fleet", values_to = "catch") %>% ggplot(aes_string(names(.)[1], "catch", color = "Fleet")) + geom_point() + geom_line(lwd=1.5) + ylab("Removals") + xlab("Year") + scale_color_viridis_d() }) plotOutput("Ctplot") } }) }) ########################## ### LENGTH COMPS PLOTS ### ########################## observeEvent(req(!is.null(rv.Lt$data)), { shinyjs::show(output$lt_comp_plots_label<-renderText({"Length compositions"})) }) observeEvent(req(!is.null(rv.Lt$data)), { output$Ltplot_it<-renderUI({ if(!is.null(rv.Lt$data)) { # L50.plot<-Linf.plot<--1 # if(L50()!=NA){L50.plot=L50()} #if(Linf()!=NA){Linf.plot=Linf()} output$Ltplot<-renderPlot({ if (is.null(rv.Lt$data)) return(NULL) rv.Lt$data %>% rename_all(tolower) %>% dplyr::select(-nsamps) %>% pivot_longer(c(-year, -fleet, -sex)) %>% mutate(Year = factor(year), name = as.numeric(gsub("[^0-9.-]", "", name))) %>% ggplot(aes(name, value, color=Year)) + geom_line() + #geom_col(position="dodge") + facet_grid(sex~fleet, scales="free_y",labeller = label_both) + # facet_wrap(sex~year, scales="free_y",ncol=5) + xlab("Length bin") + ylab("Frequency") + scale_fill_viridis_d()+ #geom_vline(xintercept = -1)+ #geom_textvline(label = "L50", xintercept = L50(), vjust = 1.3) + #geom_textvline(label="Linf", xintercept = Linf(), vjust = -0.7,hjust=2) + #,lty=c(1,1,2),color=c("black","purple","blue") geom_vline(xintercept = c(-1,L50(),Linf()),linetype=c("solid","solid","dashed"),colour = c("black", "black", "blue"),na.rm = TRUE,show.legend = TRUE)+ xlim(0,NA) }) plotOutput("Ltplot") } }) }) # observeEvent(req(!is.null(input$file1)), { # output$Ltplot<-renderPlot({ # inFile<- input$file1 # # if (is.null(inFile)) { # # return(NULL) # # shinyjs::hide("Ltplot")} # # else{ # Lt.comp.data<-read.csv(inFile$datapath,check.names=FALSE) # lt.dat.plot<-(Lt.comp.data)[,c(-4)] # dat.gg<-melt(lt.dat.plot,id=colnames(lt.dat.plot)[1:3]) # colnames(dat.gg)<-c("year","fleet","sex","bin","ltnum") # ggplot(dat.gg,aes(bin,ltnum,fill=factor(fleet)))+ # geom_col(color="white",position="dodge")+ # #geom_col(fill="#236192",color="white")+ # facet_wrap(~year,scales="free_y")+ # xlab("Length bin")+ # ylab("Frequency")+ # labs(fill="Fleet")+ # scale_fill_viridis(discrete=TRUE, option="viridis") # #scale_x_discrete(breaks=c(1,5,10,20),labels=as.character(levels(dat.gg$bin))[c(1,5,10,20)]) # #scale_fill_brewer(palette = "BuPu") # # } # }) # }) ################# ### AGE PLOTS ### ################# observeEvent(req(!is.null(rv.Age$data)), { shinyjs::show(output$marginal_age_comp_plots_label<-renderText({"Marginal age compositions"})) }) observeEvent(req(!is.null(rv.Age$data)), { shinyjs::show(output$conditional_age_comp_plots_label<-renderText({"Conditional age at length"})) }) observeEvent(req(!is.null(rv.Age$data)), { marginal_ages<-subset(rv.Age$data,Lbin_hi<0) Cond_ages<-subset(rv.Age$data,Lbin_hi>=0) output$Ageplot_it_marginal<-renderUI({ if(!is.null(rv.Age$data)) { output$Ageplot_marginal<-renderPlot({ #inFile_age <- rv.Age$data # if (is.null(rv.Age$data)) return(NULL) if (nrow(marginal_ages)==0) return(NULL) # rv.Age$data %>% marginal_ages %>% rename_all(tolower) %>% dplyr::select(-nsamps,-lbin_hi) %>% pivot_longer(c(-year, -fleet, -sex, -lbin_low)) %>% mutate(Year = factor(year), name = as.numeric(gsub("[^0-9.-]", "", name))) %>% ggplot(aes(name, value, color=Year)) + geom_line() + # geom_col(position="dodge") + #facet_wrap(sex~year, scales="free_y",ncol=5) + facet_grid(sex~fleet, scales="free_y",labeller = label_both) + #scale_y_continuous(limits=c(0,max(colSums(rv.Age$data[-1,7:ncol(rv.Age$data)]))))+ #scale_y_continuous(limits=c(0,20))+ xlab("Age bin") + ylab("Frequency") + scale_fill_viridis_d() }) plotOutput("Ageplot_marginal") } }) output$Ageplot_it_cond<-renderUI({ if(!is.null(rv.Age$data)) { output$Ageplot_conditional<-renderPlot({ # if (is.null(rv.Age$data)) return(NULL) if (nrow(Cond_ages)==0) return(NULL) Cond_ages_plots<-melt(Cond_ages[,c(1,3,4,7,9:ncol(Cond_ages))],id.vars=c("Year","Fleet","Sex","Lbin_hi")) Cond_ages_plots_pos<-subset(Cond_ages_plots,value>0) ggplot(Cond_ages_plots_pos,aes(x=as.numeric(variable),y=as.numeric(Lbin_hi),color=Year))+ geom_point()+ facet_grid(vars(Sex),vars(Fleet),labeller = label_both)+ xlab("Age bin")+ ylab("Length bin") }) plotOutput("Ageplot_conditional") } }) }) # output$Ageplot <- renderPlot({ # inFile_age <- rv.Age$data # if (is.null(inFile_age)) return(NULL) # rv.Age$data %>% # pivot_longer(-1, names_to = "year", values_to = "ltnum") %>% # rename(bin = Bins) %>% # ggplot(aes(bin, ltnum)) + # geom_col(fill="#1D252D", color="white") + # facet_wrap(~year) + # xlab("Age bin") + # ylab("Frequency") # }) ################## ### INDEX PLOT ### ################## observeEvent(req(!is.null(rv.Index$data)), { shinyjs::show(output$index_plots_label<-renderText({"Indices of Abundance"})) }) observeEvent(req(!is.null(rv.Index$data)), { output$Indexplot_it<-renderUI({ if(!is.null(rv.Index$data)) { output$Indexplot <- renderPlot({ if (is.null(rv.Index$data)) return(NULL) plot.Index<-rv.Index$data plot.Index[,3]<-as.factor(plot.Index[,3]) plot.Index.zscore<-list() for(i in 1:length(unique(plot.Index$Fleet))) { plot.Index.temp<-plot.Index[plot.Index$Fleet %in% unique(plot.Index$Fleet)[i],] plot.Index.temp$Index<-(plot.Index.temp$Index-mean(plot.Index.temp$Index))/sd(plot.Index.temp$Index) plot.Index.zscore[[i]]<-plot.Index.temp } plot.Index.zs<-do.call("rbind", plot.Index.zscore) ggplot(plot.Index.zs,aes(x=Year,y=Index,group=Fleet, colour=Fleet)) + geom_line(lwd=1.1) + geom_errorbar(aes(ymin=qlnorm(0.0275,log(Index),CV),ymax=qlnorm(0.975,log(Index),CV),group=Fleet),width=0,size=1)+ geom_point(aes(colour=Fleet),size=4) + ylab("Z-score") + xlab("Year") + scale_color_viridis_d() }) plotOutput("Indexplot") } }) }) ##################### ### Plot M by age ### ##################### output$Mplot<-renderPlot({ mf.in = M_f_in()+0.000000000000001 mm.in = M_f_in()+0.000000000000001 # if(input$male_parms|input$male_parms_fix) if(input$male_parms|input$male_parms_SSS|input$male_parms_fix|input$male_parms_est) { mm.in = M_m_in()+0.000000000000001 } if(any(is.na(c(mf.in, mm.in)))|any(is.null(c(mf.in, mm.in)))) return(NULL) Female_M = data.frame(Ages = 0:Nages(), PopN = exp(-mf.in * 0:Nages()), Sex="Female") Male_M = data.frame(Ages = 0:Nages(), PopN=exp(-mm.in * 0:Nages()), Sex="Male") M_sexes <- rbind(Female_M, Male_M) Nage_4_plot <- grobTree(textGrob(paste0("Max age =", Nages()), x=0.1, y=0.95, hjust=0, gp=gpar(col="darkblue", fontsize=12, fontface="italic"))) ggplot(M_sexes,aes(Ages, PopN, color=Sex))+ geom_line(aes(linetype=Sex), lwd=2)+ ylab("Cohort decline by M")+ annotation_custom(Nage_4_plot) }) ############################## ### Plot VBGF and maturity ### ############################## output$VBGFplot<-renderPlot({ f_Linf = m_Linf = Linf() f_k = m_k = k_vbgf() f_t0 = m_t0 = t0_vbgf() f_L50 = L50() f_L95 = L95() maxage = Nages() if(any(input$male_parms,input$male_parms_SSS,input$male_parms_fix,input$male_parms_est)) { m_Linf = Linf_m_in() m_k = k_vbgf_m_in() m_t0 = t0_vbgf_m_in() } if(any(is.na(c(f_Linf, f_k, f_t0)))=="FALSE"){ vbgf_female = data.frame(Age = c(f_t0:Nages()), Length = VBGF(f_Linf, f_k, f_t0, c(f_t0:Nages())), Sex="Female") vbgf_male = data.frame(Age = f_t0:Nages(), Length=VBGF(m_Linf, m_k, m_t0, c(f_t0:Nages())), Sex="Male") rbind(vbgf_female,vbgf_male) %>% ggplot(aes(Age, Length, color=Sex)) + geom_line(aes(linetype=Sex), lwd=2) -> vbgf.plot if(any(is.na(c(f_L50, f_L95)))=="FALSE"){ age.mat = data.frame(Age = VBGF.age(f_Linf, f_k, f_t0, c(f_L50, f_L95)), Length = c(f_L50, f_L95), Sex="Female") vbgf.plot + geom_point(data = age.mat, aes(Age, Length), color = "darkorange", size=6) + geom_text(data = age.mat,label=c("Lmat50%", "Lmat95%"), nudge_x = -0.1 * Nages(), color="black") -> vbgf.plot } vbgf.plot } }) ################### ### Selectivity ### ################### # observeEvent(req(input$Sel50,input$Selpeak), { # shinyjs::show(output$Sel_plots_label<-renderText({"Selectivity"})) # }) #h4("Selectivity") output$Dep_plot_title<-renderUI({ if(((as.numeric(input$tabs)*1)/1)<4&is.null(rv.Lt$data)&!is.null(rv.Ct$data)&is.null(rv.Age$data)&is.null(rv.Index$data)&any(is.null(input$user_model),!input$user_model)){ h4("Relative Stock Status Prior") } }) output$Dep_plot_it<-renderUI({ if(((as.numeric(input$tabs)*1)/1)<4&is.null(rv.Lt$data)&!is.null(rv.Ct$data)&is.null(rv.Age$data)&is.null(rv.Index$data)&any(is.null(input$user_model),!input$user_model)){ output$Depletion_plot <- renderPlot({ if(!is.na(input$status_year)&!is.na(input$Depl_mean_sss)) { if(input$Depl_prior_sss=="beta"){dep.hist.sss<-data.frame(draws=rbeta.ab(100000,input$Depl_mean_sss,input$Depl_SD_sss,0,1))} if(input$Depl_prior_sss=="lognormal"){dep.hist.sss<-data.frame(draws=rlnorm(100000,log(input$Depl_mean_sss),input$Depl_SD_sss))} if(input$Depl_prior_sss=="truncated normal"){dep.hist.sss<-data.frame(draws=rtruncnorm(100000,0,1,input$Depl_mean_sss,input$Depl_SD_sss))} if(input$Depl_prior_sss=="uniform"){dep.hist.sss<-data.frame(draws=runif(100000,input$Depl_mean_sss,input$Depl_SD_sss))} if(input$Depl_prior_sss=="no prior"){NULL} Depletion_plot<-gghistogram(dep.hist.sss, x = "draws", fill = "purple") Depletion_plot } }) plotOutput("Depletion_plot") } }) output$Selplot <- renderPlot({ if(input$Sel_choice=="Logistic"&any(any(input$Sel50[1]=="",is.null(input$Sel50)),any(input$Selpeak[1]=="",is.null(input$Selpeak)))) return(NULL) if(input$Sel_choice=="Logistic") { if(all(length(as.numeric(trimws(unlist(strsplit(input$Sel50,",")))))==length(as.numeric(trimws(unlist(strsplit(input$Selpeak,","))))), all(input$Sel50!=""), all(!is.null(input$Sel50)), all(input$Selpeak!=""), all(!is.null(input$Selpeak)))) { Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50,",")))) Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak,",")))) PeakDesc<-rep(10000,length(Selpeak)) LtPeakFinal<-rep(0.0001,length(Selpeak)) FinalSel<-rep(0.999,length(Selpeak)) # if(input$Sel_choice=="Logistic") # { # } # if(input$Sel_choice=="Dome-shaped") # { # PeakDesc<-as.numeric(trimws(unlist(strsplit(input$PeakDesc,",")))) # LtPeakFinal<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal,",")))) # FinalSel<-as.numeric(trimws(unlist(strsplit(input$FinalSel,",")))) # } Sel.out<-doubleNorm24.sel(Sel50=Sel50[1],Selpeak=Selpeak[1],PeakDesc=PeakDesc[1],LtPeakFinal=LtPeakFinal[1],FinalSel=FinalSel[1]) Sel.out<-data.frame(Bin=Sel.out[,1],Sel=Sel.out[,2],Fleet="Fleet 1") if(length(Sel50)>1) { for(ii in 2:length(Sel50)) { Sel.out.temp<-doubleNorm24.sel(Sel50=Sel50[ii],Selpeak=Selpeak[ii],PeakDesc=PeakDesc[ii],LtPeakFinal=LtPeakFinal[ii],FinalSel=FinalSel[ii]) Sel.out.temp<-data.frame(Bin=Sel.out.temp[,1],Sel=Sel.out.temp[,2],Fleet=paste0("Fleet ",ii)) Sel.out<-rbind(Sel.out,Sel.out.temp) } } selplot.out<-ggplot(Sel.out,aes(Bin,Sel,colour=Fleet)) + geom_line(lwd=1.5) + ylab("Length Bins") + xlab("Selectivity") + scale_color_viridis_d() } } if(input$Sel_choice=="Dome-shaped") { if(all(length(as.numeric(trimws(unlist(strsplit(input$Sel50,",")))))==length(as.numeric(trimws(unlist(strsplit(input$Selpeak,","))))), length(as.numeric(trimws(unlist(strsplit(input$Sel50,",")))))==length(as.numeric(trimws(unlist(strsplit(input$PeakDesc,","))))), length(as.numeric(trimws(unlist(strsplit(input$Sel50,",")))))==length(as.numeric(trimws(unlist(strsplit(input$LtPeakFinal,","))))), length(as.numeric(trimws(unlist(strsplit(input$Sel50,",")))))==length(as.numeric(trimws(unlist(strsplit(input$FinalSel,","))))), all(input$Sel50!=""), all(!is.null(input$Sel50)), all(input$Selpeak!=""), all(!is.null(input$Selpeak)))) { Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50,",")))) Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak,",")))) PeakDesc<-as.numeric(trimws(unlist(strsplit(input$PeakDesc,",")))) LtPeakFinal<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal,",")))) FinalSel<-as.numeric(trimws(unlist(strsplit(input$FinalSel,",")))) # if(input$Sel_choice=="Logistic") # { # PeakDesc<-rep(10000,length(Selpeak)) # LtPeakFinal<-rep(0.0001,length(Selpeak)) # FinalSel<-rep(0.999,length(Selpeak)) # } # if(input$Sel_choice=="Dome-shaped") # { # } Sel.out<-doubleNorm24.sel(Sel50=Sel50[1],Selpeak=Selpeak[1],PeakDesc=PeakDesc[1],LtPeakFinal=LtPeakFinal[1],FinalSel=FinalSel[1]) Sel.out<-data.frame(Bin=Sel.out[,1],Sel=Sel.out[,2],Fleet="Fleet 1") if(length(Sel50)>1) { for(ii in 2:length(Sel50)) { Sel.out.temp<-doubleNorm24.sel(Sel50=Sel50[ii],Selpeak=Selpeak[ii],PeakDesc=PeakDesc[ii],LtPeakFinal=LtPeakFinal[ii],FinalSel=FinalSel[ii]) Sel.out.temp<-data.frame(Bin=Sel.out.temp[,1],Sel=Sel.out.temp[,2],Fleet=paste0("Fleet ",ii)) Sel.out<-rbind(Sel.out,Sel.out.temp) } } selplot.out<-ggplot(Sel.out,aes(Bin,Sel,colour=Fleet)) + geom_line(lwd=1.5) + ylab("Length Bins") + xlab("Selectivity") + scale_color_viridis_d() } } if(!is.null(get0("selplot.out"))){return(selplot.out)} else(return(NULL)) }) output$Selplot_SSS <- renderPlot({ if(input$Sel_choice_sss=="Logistic"&any(any(input$Sel50_sss[1]=="",is.null(input$Sel50_sss)),any(input$Selpeak_sss[1]=="",is.null(input$Selpeak_sss)))) return(NULL) if(input$Sel_choice_sss=="Logistic") { if(all(length(as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))))==length(as.numeric(trimws(unlist(strsplit(input$Selpeak_sss,","))))), all(input$Sel50_sss!=""), all(!is.null(input$Sel50_sss)), all(input$Selpeak_sss!=""), all(!is.null(input$Selpeak_sss)))) { Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))) Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak_sss,",")))) PeakDesc<-rep(10000,length(Selpeak)) LtPeakFinal<-rep(0.0001,length(Selpeak)) FinalSel<-rep(0.999,length(Selpeak)) Sel.out<-doubleNorm24.sel(Sel50=Sel50[1],Selpeak=Selpeak[1],PeakDesc=PeakDesc[1],LtPeakFinal=LtPeakFinal[1],FinalSel=FinalSel[1]) Sel.out<-data.frame(Bin=Sel.out[,1],Sel=Sel.out[,2],Fleet="Fleet 1") if(length(Sel50)>1) { for(ii in 2:length(Sel50)) { Sel.out.temp<-doubleNorm24.sel(Sel50=Sel50[ii],Selpeak=Selpeak[ii],PeakDesc=PeakDesc[ii],LtPeakFinal=LtPeakFinal[ii],FinalSel=FinalSel[ii]) Sel.out.temp<-data.frame(Bin=Sel.out.temp[,1],Sel=Sel.out.temp[,2],Fleet=paste0("Fleet ",ii)) Sel.out<-rbind(Sel.out,Sel.out.temp) } } selplot.out<-ggplot(Sel.out,aes(Bin,Sel,colour=Fleet)) + geom_line(lwd=1.5) + ylab("Length Bins") + xlab("Selectivity") + scale_color_viridis_d() } } if(input$Sel_choice_sss=="Dome-shaped") { if(all(length(as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))))==length(as.numeric(trimws(unlist(strsplit(input$Selpeak_sss,","))))), length(as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))))==length(as.numeric(trimws(unlist(strsplit(input$PeakDesc_sss,","))))), length(as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))))==length(as.numeric(trimws(unlist(strsplit(input$LtPeakFinal_sss,","))))), length(as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))))==length(as.numeric(trimws(unlist(strsplit(input$FinalSel_sss,","))))), all(input$Sel50_sss!=""), all(!is.null(input$Sel50_sss)), all(input$Selpeak_sss!=""), all(!is.null(input$Selpeak_sss)))) { Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))) Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak_sss,",")))) PeakDesc<-as.numeric(trimws(unlist(strsplit(input$PeakDesc_sss,",")))) LtPeakFinal<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal_sss,",")))) FinalSel<-as.numeric(trimws(unlist(strsplit(input$FinalSel_sss,",")))) Sel.out<-doubleNorm24.sel(Sel50=Sel50[1],Selpeak=Selpeak[1],PeakDesc=PeakDesc[1],LtPeakFinal=LtPeakFinal[1],FinalSel=FinalSel[1]) Sel.out<-data.frame(Bin=Sel.out[,1],Sel=Sel.out[,2],Fleet="Fleet 1") if(length(Sel50)>1) { for(ii in 2:length(Sel50)) { Sel.out.temp<-doubleNorm24.sel(Sel50=Sel50[ii],Selpeak=Selpeak[ii],PeakDesc=PeakDesc[ii],LtPeakFinal=LtPeakFinal[ii],FinalSel=FinalSel[ii]) Sel.out.temp<-data.frame(Bin=Sel.out.temp[,1],Sel=Sel.out.temp[,2],Fleet=paste0("Fleet ",ii)) Sel.out<-rbind(Sel.out,Sel.out.temp) } } selplot.out<-ggplot(Sel.out,aes(Bin,Sel,colour=Fleet)) + geom_line(lwd=1.5) + ylab("Length Bins") + xlab("Selectivity") + scale_color_viridis_d() } } if(!is.null(get0("selplot.out"))){return(selplot.out)} else(return(NULL)) }) ############################################# ### END PLOTS ### ############################################# ############################################# ######## PREPARE FILES andD RUN SSS ######### ############################################# SSS.run<-observeEvent(input$run_SSS,{ show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[1],text="Create model files") print(1) # progress <- shiny::Progress$new(session, min=1, max=2) # on.exit(progress$close()) # progress$set(message = 'Model run in progress', # detail = '') # for (i in 1:2) { # progress$set(value = i) # Sys.sleep(0.5) # } #Copy and move files if(file.exists(paste0("Scenarios/",input$Scenario_name))) { unlink(paste0("Scenarios/",input$Scenario_name),recursive=TRUE) # file.remove(paste0(getwd(),"/Scenarios/",input$Scenario_name)) } #if(input$) { file.copy(paste0("SSS_files/sssexample_BH"),paste0("Scenarios"),recursive=TRUE,overwrite=TRUE) file.rename(paste0("Scenarios/sssexample_BH"), paste0("Scenarios/",input$Scenario_name)) } #if() # { # file.copy(paste0(getwd(),"/SSS_files/sssexample_RickPow"),paste0(getwd(),"/Scenarios"),recursive=TRUE,overwrite=TRUE) # file.rename(paste0(getwd(),"/Scenarios/sssexample_RickPow"), paste0(getwd(),"/Scenarios/",input$Scenario_name)) # } #Read data and control files data.file<-SS_readdat(paste0("Scenarios/",input$Scenario_name,"/sss_example.dat")) ctl.file<-SS_readctl(paste0("Scenarios/",input$Scenario_name,"/sss_example.ctl"),use_datlist = TRUE, datlist=data.file) #Read, edit then write new DATA file data.file$styr<-input$styr data.file$endyr<-input$endyr data.file$Nages<-Nages() #Catches Catch.data<-rv.Ct$data catch.dep.fleets<-ncol(Catch.data) data.file$Nfleets<-catch.dep.fleets if(!is.null(rv.Index$data)) { index.fleets<-max(rv.Index$data$Fleet) if(index.fleets>catch.dep.fleets) {data.file$Nfleets<-index.fleets} if(index.fleets==catch.dep.fleets) {data.file$Nfleets<-index.fleets+1} if(index.fleets<catch.dep.fleets) {data.file$Nfleets<-catch.dep.fleets} } if((data.file$Nfleets-1)>1){ for(i in 1:(data.file$Nfleets-2)) { data.file$fleetinfo<-rbind(data.file$fleetinfo,data.file$fleetinfo[1,]) data.file$CPUEinfo<-rbind(data.file$CPUEinfo,data.file$CPUEinfo[1,]) } data.file$fleetinfo$fleetname<-c(paste0("Fishery",1:(catch.dep.fleets-1)),"Depl") data.file$fleetinfo$type[c(2,data.file$Nfleets)]<-c(1,3) data.file$fleetinfo$surveytiming[c(2,data.file$Nfleets)]<-c(-1,0.1) data.file$CPUEinfo[,1]<-1:data.file$Nfleets data.file$CPUEinfo[c(2,data.file$Nfleets),2]<-c(1,34) data.file$CPUE$index<-data.file$Nfleets } year.in<-Catch.data[,1] catch.cols<-colnames(data.file$catch) catch_temp<-list() for(i in 1:(data.file$Nfleets-1)) { catch_temp[[i]]<-data.frame( c(-999,year.in), rep(1,length(year.in)+1), rep(i,length(year.in)+1), c(0,Catch.data[,i+1]), rep(0.01,length(year.in)+1) ) } data.file$catch<-list.rbind(catch_temp) colnames(data.file$catch)<-catch.cols #Relative stock status data.file$CPUE$year<-c(input$styr,input$status_year) #Length composition data if(input$Linf_f_mean_sss>30){data.file$binwidth<-2} data.file$minimum_size<-floor(input$Linf_f_mean_sss/10) data.file$maximum_size<-ceiling(input$Linf_f_mean_sss+(input$Linf_f_mean_sss*0.1)) data.file$lbin_vector<-seq(data.file$minimum_size,data.file$maximum_size,data.file$binwidth) data.file$N_lbinspop<-length(data.file$lbin_vector) #Age composition data # if (is.null(inFile_age)){ # data.file$N_agebins<-Nages() # data.file$agebin_vector<-1:Nages() # data.file$ageerror<-data.frame(matrix(c(rep(-1,(Nages()+1)),rep(0.001,(Nages()+1))),2,(Nages()+1),byrow=TRUE)) # colnames(data.file$ageerror)<-paste0("age",1:Nages()) # } #Catch units if(input$Ct_units_choice_SSS) { ct.units<-as.numeric(trimws(unlist(strsplit(input$fleet_ct_units_SSS,",")))) #data.file$fleetinfo[ct.units,4]<-2 #use this when just specifying which are fleets are numbers data.file$fleetinfo[,4]<-c(ct.units,1) } SS_writedat(data.file,paste0("Scenarios/",input$Scenario_name,"/sss_example.dat"),overwrite=TRUE) ####################### END DATA FILE ##################################### ####################### START SSS CTL FILE ##################################### if(!is.null(input$GT5)){if(input$GT5) { ctl.file$N_platoon<-5 ctl.file$sd_ratio<-0.7 ctl.file$submorphdist<-c(-1,0.25,0.5,0.25,0.125) } } #if(all(any(input$est_parms==TRUE,input$est_parms2==FALSE),any(all(!is.null(rv.Lt$data),!is.null(rv.Ct$data)),all(!is.null(rv.Age$data),!is.null(rv.Ct$data))))==TRUE) #{ fem_vbgf<-VBGF(input$Linf_f_mean_sss,input$k_f_mean_sss,input$t0_f_mean_sss,c(0:Nages())) #c("lognormal","truncated normal","uniform","beta") prior.name<-c("no prior","symmetric beta", "beta","lognormal","gamma","normal") prior.type<-c(0:3,5,6) #Females #M if(input$M_f_prior=="lognormal"){ctl.file$MG_parms[1,3:4]<-c(input$M_f_mean_sss,log(input$M_f_mean_sss))} else {ctl.file$MG_parms[1,3:4]<-c(input$M_f_mean_sss,input$M_f_mean_sss)} #L0 ctl.file$Growth_Age_for_L1<-input$t0_f_mean_sss ctl.file$Growth_Age_for_L1<-0 #if(input$t0_f_prior=="lognormal"){ctl.file$MG_parms[2,3:4]<-c(fem_vbgf[1],log(fem_vbgf[1]))} #else {ctl.file$MG_parms[2,3:4]<-fem_vbgf[1]} if(input$t0_f_prior=="lognormal"){ctl.file$MG_parms[2,3:4]<-c(0,log(0.0000001))} else {ctl.file$MG_parms[2,3:4]<-0} #Linf if(input$Linf_f_prior=="lognormal"){ctl.file$MG_parms[3,3:4]<-c(input$Linf_f_mean_sss,log(input$Linf_f_mean_sss))} else{ctl.file$MG_parms[3,3:4]<-input$Linf_f_mean_sss} #k if(input$k_f_prior=="lognormal"){ctl.file$MG_parms[4,3:4]<-c(input$k_f_mean_sss,log(input$k_f_mean_sss))} else {ctl.file$MG_parms[4,3:4]<-input$k_f_mean_sss} #CV young if(input$CV_lt_f_young_prior=="lognormal"){ctl.file$MG_parms[5,3:4]<-c(input$CV_lt_f_young_mean_sss,log(input$CV_lt_f_young_mean_sss))} else{ctl.file$MG_parms[5,3:4]<-input$CV_lt_f_young_mean_sss} #CV old if(input$CV_lt_f_old_prior=="lognormal"){ctl.file$MG_parms[6,3:4]<-c(input$CV_lt_f_old_mean_sss,log(input$CV_lt_f_old_mean_sss))} else{ctl.file$MG_parms[6,3:4]<-input$CV_lt_f_old_mean_sss} #Weight-length ctl.file$MG_parms[7,3:4]<-input$WLa_f_sss #coefficient ctl.file$MG_parms[8,3:4]<- input$WLb_f_sss #exponent #Maturity ctl.file$MG_parms[9,3:4]<-input$L50_f_sss #Lmat50% ctl.file$MG_parms[10,3:4]<- log(0.05/0.95)/(input$L95_f_sss-input$L50_f_sss) #Maturity slope #Males ctl.file$MG_parms[13,3:4]<-c(input$M_f_mean_sss,log(input$M_f_mean_sss)) #M #ctl.file$MG_parms[14,3:4]<-fem_vbgf[1] #L0 ctl.file$MG_parms[14,3:4]<-0 #L0 ctl.file$MG_parms[15,3:4]<-input$Linf_f_mean_sss #Linf ctl.file$MG_parms[16,3:4]<-input$k_f_mean_sss #k ctl.file$MG_parms[17,3:4]<-input$CV_lt_f_young_mean_sss #CV ctl.file$MG_parms[18,3:4]<-input$CV_lt_f_old_mean_sss #CV #Weight-length ctl.file$MG_parms[19,3:4]<-input$WLa_f_sss #coefficient ctl.file$MG_parms[20,3:4]<- input$WLb_f_sss #exponent ctl.file$MG_parms[11,3:4]<-input$Fec_a_f_sss #coefficient ctl.file$MG_parms[12,3:4]<- input$Fec_b_f_sss #exponent if(input$male_offset_SSS) { ctl.file$parameter_offset_approach<-2 #Change to offset approach ctl.file$MG_parms[13,c(1,3:4)]<-0 #M ctl.file$MG_parms[14,c(1,3:4)]<-0 #L0 ctl.file$MG_parms[15,c(1,3:4)]<-0 #Linf ctl.file$MG_parms[16,c(1,3:4)]<-0 #k ctl.file$MG_parms[17,c(1,3:4)]<-0 #CV ctl.file$MG_parms[18,c(1,3:4)]<-0 #CV #Weight-length ctl.file$MG_parms[19,c(1,3:4)]<-input$WLa_f_sss #coefficient ctl.file$MG_parms[20,c(1,3:4)]<-input$WLb_f_sss #exponent } if(input$male_parms_SSS) { male_vbgf_sss<-VBGF(input$Linf_m_mean_sss,input$k_m_mean_sss,input$t0_m_mean_sss,c(input$t0_f_mean_sss:Nages())) #M if(input$M_m_prior_sss=="lognormal"){ctl.file$MG_parms[13,3:4]<-c(input$M_m_mean_sss,log(input$M_m_mean_sss))} else {ctl.file$MG_parms[13,3:4]<-c(input$M_m_mean_sss,input$M_m_mean_sss)} #L0 if(input$t0_f_prior_sss=="lognormal"){ctl.file$MG_parms[14,3:4]<-c(male_vbgf_sss[1],log(male_vbgf_sss[1]))} else {ctl.file$MG_parms[14,3:4]<-c(male_vbgf_sss[1],male_vbgf_sss[1])} # if(input$t0_f_prior_sss=="lognormal"){ctl.file$MG_parms[14,3:4]<-c(0,log(0.0000001))} #else {ctl.file$MG_parms[14,3:4]<-c(0,0)} #Linf if(input$Linf_f_prior_sss=="lognormal"){ctl.file$MG_parms[15,3:4]<-c(input$Linf_m_mean_sss,log(input$Linf_m_mean_sss))} else{ctl.file$MG_parms[15,3:4]<-c(input$Linf_m_mean_sss,input$Linf_m_mean_sss)} #k if(input$k_f_prior_sss=="lognormal"){ctl.file$MG_parms[16,3:4]<-c(input$k_m_mean_sss,log(input$k_m_mean_sss))} else {ctl.file$MG_parms[16,3:4]<-c(input$k_m_mean_sss,input$k_m_mean_sss)} #CV young if(input$CV_lt_f_young_prior_sss=="lognormal"){ctl.file$MG_parms[17,3:4]<-c(input$CV_lt_m_young_mean_sss,log(input$CV_lt_m_young_mean_sss))} else{ctl.file$MG_parms[17,3:4]<-c(input$CV_lt_m_young_mean_sss,input$CV_lt_m_young_mean_sss)} #CV old if(input$CV_lt_f_old_prior_sss=="lognormal"){ctl.file$MG_parms[18,3:4]<-c(input$CV_lt_m_old_mean_sss,log(input$CV_lt_m_old_mean_sss))} else{ctl.file$MG_parms[18,3:4]<-c(input$CV_lt_m_old_mean_sss,input$CV_lt_m_old_mean_sss)} #Weight-length ctl.file$MG_parms[19,3:4]<-input$WLa_m_sss #coefficient ctl.file$MG_parms[20,3:4]<- input$WLb_m_sss #exponent } #S-R #ctl.file$SR_parms[1,3:4]<-input$lnR0 #lnR0 if(input$h_ss_prior=="lognormal"){ctl.file$SR_parms[2,3:4]<-c(input$h_mean_ss,log(h_mean_ss))} else{ctl.file$SR_parms[2,3:4]<-input$h_mean_ss} #} # ctl.file$Q_options[1]<-data.file$Nfleets #Selectivity Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))) Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak_sss,",")))) bin.width<-data.file$lbin_vector[2]-data.file$lbin_vector[1] if(input$Sel_choice_sss=="Logistic") { #ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector)+2*bin.width,max(data.file$lbin_vector)-2*bin.width) ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector)) ctl.file$size_selex_parms[1,3:4]<- Selpeak[1] ctl.file$size_selex_parms[2,3:4]<- 15 ctl.file$size_selex_parms[3,3:4]<- log(-((Sel50[1]-Selpeak[1])^2/log(0.5))) ctl.file$size_selex_parms[4,3:4]<- -15 ctl.file$size_selex_parms[6,3:4]<- 15 } if(input$Sel_choice_sss=="Dome-shaped") { PeakDesc<-as.numeric(trimws(unlist(strsplit(input$PeakDesc_sss,",")))) LtPeakFinal<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal_sss,",")))) FinalSel<-as.numeric(trimws(unlist(strsplit(input$FinalSel_sss,",")))) #ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector)+2*bin.width,max(data.file$lbin_vector)-2*bin.width) ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector)) ctl.file$size_selex_parms[1,3:4]<- Selpeak[1] ctl.file$size_selex_parms[2,3:4]<- -log((max(data.file$lbin_vector)-Selpeak[1]-bin.width)/(PeakDesc[1]-Selpeak[1]-bin.width)) ctl.file$size_selex_parms[3,3:4]<- log(-((Sel50[1]-Selpeak[1])^2/log(0.5))) ctl.file$size_selex_parms[4,3:4]<- log(LtPeakFinal[1]) ctl.file$size_selex_parms[6,3:4]<- -log((1/(FinalSel[1]+0.000000001)-1)) } #Add other fleets if((data.file$Nfleets-1)>1){ for(i in 1:(data.file$Nfleets-2)) { #ctl.file$init_F<-rbind(ctl.file$init_F,ctl.file$init_F[1,]) ctl.file$size_selex_types<-rbind(ctl.file$size_selex_types,ctl.file$size_selex_types[1,]) ctl.file$age_selex_types<-rbind(ctl.file$age_selex_types,ctl.file$age_selex_types[1,]) ctl.file$size_selex_parms<-rbind(ctl.file$size_selex_parms,ctl.file$size_selex_parms[1:6,]) if(input$Sel_choice_sss=="Logistic") { #ctl.file$size_selex_parms[6*i+1,1:2]<-c(min(data.file$lbin_vector)+2*bin.width,max(data.file$lbin_vector)-2*bin.width) ctl.file$size_selex_parms[6*i+1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector)) ctl.file$size_selex_parms[6*i+1,3:4]<- Selpeak[i+1] ctl.file$size_selex_parms[6*i+2,3:4]<- 15 ctl.file$size_selex_parms[6*i+3,3:4]<- log(-((Sel50[i+1]-Selpeak[i+1])^2/log(0.5))) ctl.file$size_selex_parms[6*i+4,3:4]<- -15 ctl.file$size_selex_parms[6*i+6,3:4]<- 15 } if(input$Sel_choice_sss=="Dome-shaped") { ctl.file$size_selex_parms[6*i+1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector)) ctl.file$size_selex_parms[6*i+1,3:4]<- Selpeak[i+1] ctl.file$size_selex_parms[6*i+2,3:4]<- -log((max(data.file$lbin_vector)-Selpeak[i+1]-bin.width)/(PeakDesc[i+1]-Selpeak[i+1]-bin.width)) ctl.file$size_selex_parms[6*i+3,3:4]<- log(-((Sel50[i+1]-Selpeak[i+1])^2/log(0.5))) ctl.file$size_selex_parms[6*i+4,3:4]<- log(LtPeakFinal[i+1]) ctl.file$size_selex_parms[6*i+6,3:4]<- -log((1/(FinalSel[i+1]+0.000000001)-1)) } } ctl.file$size_selex_types[,1]<-c(rep(24,data.file$Nfleets-1),0) ctl.file$age_selex_types[,1]<-10 #Re-label so r4ss can interpret these new entries #rownames(ctl.file$init_F)<-paste0("InitF_seas_1_flt_",1:data.file$Nfleets,"Fishery",1:data.file$Nfleets) rownames(ctl.file$age_selex_types)<-rownames(ctl.file$size_selex_types)<-c(paste0("Fishery",1:(data.file$Nfleets-1)),"Depl") size_selex_parms_rownames<-list() for(f_i in 1:(data.file$Nfleets-1)) { size_selex_parms_rownames[[f_i]]<-c(paste0("SizeSel_P_1_Fishery",f_i,"(",f_i,")"), paste0("SizeSel_P_2_Fishery",f_i,"(",f_i,")"), paste0("SizeSel_P_3_Fishery",f_i,"(",f_i,")"), paste0("SizeSel_P_4_Fishery",f_i,"(",f_i,")"), paste0("SizeSel_P_5_Fishery",f_i,"(",f_i,")"), paste0("SizeSel_P_6_Fishery",f_i,"(",f_i,")")) } size_selex_parms_rownames<-unlist(size_selex_parms_rownames) rownames(ctl.file$size_selex_parms)<-size_selex_parms_rownames } SS_writectl(ctl.file,paste0("Scenarios/",input$Scenario_name,"/sss_example.ctl"),overwrite=TRUE) #Forecast file modfications #Reference points #if(!input$use_forecastnew) #{ forecast.file<-SS_readforecast(paste0("Scenarios/",input$Scenario_name,"/forecast.ss")) if(input$RP_choices){ forecast.file$SPRtarget<-input$SPR_target forecast.file$Btarget<-input$B_target CR_choices<-c("1: Catch fxn of SSB, buffer on F", "2: F fxn of SSB, buffer on F", "3: Catch fxn of SSB, buffer on catch", "4: F fxn of SSB, buffer on catch") CR_choices_num.vec<-c(1:4) forecast.file$ControlRuleMethod<-CR_choices_num.vec[CR_choices==input$CR_Ct_F] forecast.file$SBforconstantF<-input$slope_hi forecast.file$BfornoF<-input$slope_low } if(input$Forecast_choice) { forecast.file$Nforecastyrs<-input$forecast_num buffer.in<-as.numeric(trimws(unlist(strsplit(input$forecast_buffer,",")))) if(length(buffer.in)==1){forecast.file$Flimitfraction<-buffer.in} if(length(buffer.in)>1) { forecast.file$Flimitfraction<--1 buffer.datafr<-data.frame(Year=c((data.file$endyr+1):(data.file$endyr+input$forecast_num)),Fraction=buffer.in) rownames(buffer.datafr)<-paste0("#_Flimitfraction_m",1:input$forecast_num) forecast.file$Flimitfraction_m<-buffer.datafr } } SS_writeforecast(forecast.file,paste0("Scenarios/",input$Scenario_name),overwrite=TRUE) #} #if(input$use_forecastnew) # { # forecast.file<-SS_readforecast(paste0("Scenarios/",input$Scenario_name,"/forecast.ss_new")) # SS_writeforecast(forecast.file,paste0("Scenarios/",input$Scenario_name),overwrite=TRUE) # } #Set prior inputs #0 = normal #10 = truncated normal #1 = symmetric beta (rbeta) #2 = beta #3 = lognormal #30 = truncated lognormal #4 = uniform #99 = used only for the steepness parameter. Indicates h will come from FMSY/M prior sss.prior.name<-c("no prior","symmetric beta","beta","normal","truncated normal","lognormal","truncated lognormal","uniform") sss.prior.type<-c(-1,1,2,0,10,3,30,4) Dep.in_sss<-c(sss.prior.type[sss.prior.name==input$Depl_prior_sss],input$Depl_mean_sss,input$Depl_SD_sss) h.in_sss<-c(sss.prior.type[sss.prior.name==input$h_prior_sss],input$h_mean_sss,input$h_SD_sss) if(!input$male_offset_SSS) { M.in_sss<-c(sss.prior.type[sss.prior.name==input$M_prior_sss],input$M_f_mean_sss,input$M_f_SD_sss,sss.prior.type[sss.prior.name==input$M_prior_sss],input$M_f_mean_sss,input$M_f_SD_sss) Linf.in_sss<-c(sss.prior.type[sss.prior.name==input$Linf_f_prior_sss],input$Linf_f_mean_sss,input$Linf_f_SD_sss,sss.prior.type[sss.prior.name==input$Linf_f_prior_sss],input$Linf_f_mean_sss,input$Linf_f_SD_sss) k.in_sss<-c(sss.prior.type[sss.prior.name==input$k_f_prior_sss],input$k_f_mean_sss,input$k_f_SD_sss,sss.prior.type[sss.prior.name==input$k_f_prior_sss],input$k_f_mean_sss,input$k_f_SD_sss) t0.in_sss<-c(sss.prior.type[sss.prior.name==input$t0_f_prior_sss],input$t0_f_mean_sss,input$t0_f_SD_sss,sss.prior.type[sss.prior.name==input$t0_f_prior_sss],input$t0_f_mean_sss,input$t0_f_SD_sss) } if(input$male_offset_SSS) { M.in_sss<-c(sss.prior.type[sss.prior.name==input$M_prior_sss],input$M_f_mean_sss,input$M_f_SD_sss,sss.prior.type[sss.prior.name==input$M_prior_sss],0,0) Linf.in_sss<-c(sss.prior.type[sss.prior.name==input$Linf_f_prior_sss],input$Linf_f_mean_sss,input$Linf_f_SD_sss,sss.prior.type[sss.prior.name==input$Linf_f_prior_sss],0,0) k.in_sss<-c(sss.prior.type[sss.prior.name==input$k_f_prior_sss],input$k_f_mean_sss,input$k_f_SD_sss,sss.prior.type[sss.prior.name==input$k_f_prior_sss],0,0) t0.in_sss<-c(sss.prior.type[sss.prior.name==input$t0_f_prior_sss],input$t0_f_mean_sss,input$t0_f_SD_sss,sss.prior.type[sss.prior.name==input$t0_f_prior_sss],0,0) } if(input$male_parms_SSS) { M.in_sss<-c(sss.prior.type[sss.prior.name==input$M_prior_sss],input$M_f_mean_sss,input$M_f_SD_sss,sss.prior.type[sss.prior.name==input$M_m_prior_sss],input$M_m_mean_sss,input$M_m_SD_sss) Linf.in_sss<-c(sss.prior.type[sss.prior.name==input$Linf_f_prior_sss],input$Linf_f_mean_sss,input$Linf_f_SD_sss,sss.prior.type[sss.prior.name==input$Linf_m_prior_sss],input$Linf_m_mean_sss,input$Linf_f_SD_sss) k.in_sss<-c(sss.prior.type[sss.prior.name==input$k_f_prior_sss],input$k_f_mean_sss,input$k_f_SD_sss,sss.prior.type[sss.prior.name==input$k_m_prior_sss],input$k_m_mean_sss,input$k_m_SD_sss) t0.in_sss<-c(sss.prior.type[sss.prior.name==input$t0_f_prior_sss],input$t0_f_mean_sss,input$t0_f_SD_sss,sss.prior.type[sss.prior.name==input$t0_m_prior_sss],input$t0_m_mean_sss,input$t0_m_SD_sss) } show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[2],text="Model run in progress") #Run SSS SSS.out<-SSS(paste0("Scenarios/",input$Scenario_name), file.name=c("sss_example.dat","sss_example.ctl"), reps=input$SSS_reps, seed.in=input$SSS_seed, Dep.in=Dep.in_sss, M.in=M.in_sss, SR_type=3, h.in=h.in_sss, FMSY_M.in=c(-1,0.5,0.1), BMSY_B0.in=c(-1,0.5,0.1), Linf.k.cor=input$Linf_k_cor_sss, Linf.in=Linf.in_sss, k.in=k.in_sss, t0.in=t0.in_sss, Zfrac.Beta.in=c(-99,0.2,0.6,-99,0.5,2), R_start=c(0,input$lnR0_sss), doR0.loop=c(1,round(input$lnR0_sss*0.5),round(input$lnR0_sss*1.5),(round(input$lnR0_sss*1.3)-round(input$lnR0_sss*0.5))/10), sum_age=0, ts_yrs=c(input$styr,input$endyr), pop.ltbins=NA, #ofl_yrs=c(input$endyr+1,input$endyr+2), sexes=T, BH_FMSY_comp=F, OStype=input$OS_choice) #save(SSS.out) show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[3],text="Process model output") if(exists(load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP")))) { output$SSS_priors_post<-renderPlot({ if(exists(load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP")))) { load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP")) sss.M.f<-rbind(data.frame(value=SSS.out$Prior$M_f,type="prior",metric="Female M"),data.frame(value=SSS.out$Post$M_f,type="post",metric="Female M")) sss.M.m<-rbind(data.frame(value=SSS.out$Prior$M_m,type="prior",metric="Male M"),data.frame(value=SSS.out$Post$M_m,type="post",metric="Male M")) sss.h<-rbind(data.frame(value=SSS.out$Prior$h,type="prior",metric="h"),data.frame(value=SSS.out$Post$h,type="post",metric="h")) sss.Dep<-rbind(data.frame(value=SSS.out$Prior$Dep,type="prior",metric="Dep"),data.frame(value=SSS.out$Post$Dep.Obs,type="post",metric="Dep")) sss.vals.out<-rbind(sss.M.f,sss.M.m,sss.h,sss.Dep) ggplot(sss.vals.out,aes(x=value,color=type,fill=type))+ geom_histogram(position="dodge",alpha=0.5)+ theme(legend.position="bottom")+ theme(legend.title=element_blank())+ facet_grid(~metric,scales = "free") # Mf.plot<-ggplot(sss.M.f,aes(x=value,color=type))+geom_histogram(position="dodge",alpha=0.5,fill="white") # Mm.plot<-ggplot(sss.M.m,aes(x=value,color=type))+geom_histogram(position="dodge",alpha=0.5,fill="white") # h.plot<-ggplot(sss.h,aes(x=value,color=type))+geom_histogram(position="dodge",alpha=0.5,fill="white") # Dep.plot<-ggplot(sss.Dep,aes(x=value,color=type))+geom_histogram(position="dodge",alpha=0.5,fill="white") } else{return(NULL)} }) output$SSS_growth_priors_post<-renderPlot({ if(exists(load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP")))) { load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP")) sss.L1_f<-rbind(data.frame(value=SSS.out$Prior$L1_f,type="prior",metric="Female L1"),data.frame(value=SSS.out$Post$L1_f,type="post",metric="Female L1")) sss.Linf_f<-rbind(data.frame(value=SSS.out$Prior$Linf_f,type="prior",metric="Female Linf"),data.frame(value=SSS.out$Post$Linf_f,type="post",metric="Female Linf")) sss.k_f<-rbind(data.frame(value=SSS.out$Prior$k_f,type="prior",metric="Female k"),data.frame(value=SSS.out$Post$k_f,type="post",metric="Female k")) sss.L1_m<-rbind(data.frame(value=SSS.out$Prior$L1_m,type="prior",metric="Male L1"),data.frame(value=SSS.out$Post$L1_m,type="post",metric="Male L1")) sss.Linf_m<-rbind(data.frame(value=SSS.out$Prior$Linf_m,type="prior",metric="Male Linf"),data.frame(value=SSS.out$Post$Linf_m,type="post",metric="Male Linf")) sss.k_m<-rbind(data.frame(value=SSS.out$Prior$k_m,type="prior",metric="Male k"),data.frame(value=SSS.out$Post$k_m,type="post",metric="Male k")) sss.vals.growth.out<-rbind(sss.L1_f,sss.Linf_f,sss.k_f,sss.L1_m,sss.Linf_m,sss.k_m) ggplot(sss.vals.growth.out,aes(x=value,color=type,fill=type))+ geom_histogram(position="dodge",alpha=0.5)+ theme(legend.position="bottom")+ theme(legend.title=element_blank())+ facet_wrap(~metric,scales = "free") } else{return(NULL)} }) output$SSS_OFL_plot<-renderPlot({ if(exists(load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP")))) { load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP")) ofl.years<-as.numeric(unique(melt(SSS.out$OFL)$Var2)) ggplot(melt(SSS.out$OFL),aes(Var2,value,group=Var2))+ geom_boxplot(fill="#236192")+ scale_x_continuous(breaks=ofl.years,labels=as.character(ofl.years))+ ylab("OFL (mt)")+ xlab("Year") } else{return(NULL)} }) output$SSS_ABC_plot<-renderPlot({ if(exists(load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP")))) { load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP")) abc.years<-as.numeric(unique(melt(SSS.out$ABC)$Var2)) ggplot(melt(SSS.out$ABC),aes(Var2,value,group=Var2))+ geom_boxplot(fill="#658D1B")+ scale_x_continuous(breaks=abc.years,labels=as.character(abc.years))+ ylab("ABC (mt)")+ xlab("Year") } else{return(NULL)} }) } remove_modal_spinner() }) ############### ### END SSS ### ############### ################################################################## ### PREPARE FILES and RUN Length and Age-based Stock Synthsis ### ################################################################## SS.file.update<-observeEvent(input$run_SS,{ # if(is.null(inFile) | !anyNA(inp$ # styr,ndyr, # input$Nages, # input$M_f, # input$k_f, # input$Linf_f, # input$t0_f, # input$L50_f, # input$L95_f, # input$M_m, # input$k_m, # input$Linf_m, # input$t0_m, # input$L50_m, # input$L95_m, # )) # { updateTabsetPanel(session, "tabs", selected = '1') # progress <- shiny::Progress$new(session, min=1, max=2) # on.exit(progress$close()) # progress$set(message = 'Model run in progress', # detail = '') # for (i in 1:2) { # progress$set(value = i) # Sys.sleep(0.5) # } if(!any(input$use_par,input$use_datanew,input$use_controlnew,input$user_model)) #if(which(c(input$use_par,input$use_datanew,input$use_datanew_user,input$use_controlnew,input$use_controlnew_user,input$user_model))!=0) { #Copy and move files if(file.exists(paste0("Scenarios/",input$Scenario_name))) { unlink(paste0("Scenarios/",input$Scenario_name),recursive=TRUE) #Deletes previous run # file.remove(paste0(getwd(),"/Scenarios/",input$Scenario_name)) } if(input$Ct_F_LO_select=="Estimate F" & is.null(rv.Ct$data)){ file.copy(paste0("SS_LO_F_files"),paste0("Scenarios"),recursive=TRUE,overwrite=TRUE) file.rename(paste0("Scenarios/SS_LO_F_files"), paste0("Scenarios/",input$Scenario_name)) } else{ file.copy(paste0("SS_LB_files"),paste0("Scenarios"),recursive=TRUE,overwrite=TRUE) file.rename(paste0("Scenarios/SS_LB_files"), paste0("Scenarios/",input$Scenario_name)) } } # if(!input$use_customfile) # { # } #Read data and control files if(!input$user_model) { data.file<-SS_readdat(paste0("Scenarios/",input$Scenario_name,"/datafile.dat")) ctl.file<-SS_readctl(paste0("Scenarios/",input$Scenario_name,"/controlfile.ctl"),use_datlist = TRUE, datlist=data.file) } if(input$use_datanew) { data.file<-SS_readdat(paste0("Scenarios/",input$Scenario_name,"/data_echo.ss_new")) } if(input$use_controlnew) { data.file<-SS_readdat(paste0("Scenarios/",input$Scenario_name,"/data_echo.ss_new")) ctl.file<-SS_readctl(paste0("Scenarios/",input$Scenario_name,"/control.ss_new"),use_datlist = TRUE, datlist=data.file) } # data.file<-SS_readdat(paste0(getwd(),"/Scenarios/",input$Scenario_name,"/SS_LB.dat")) # ctl.file<-SS_readctl(paste0(getwd(),"/Scenarios/",input$Scenario_name,"/SS_LB.ctl"),use_datlist = TRUE, datlist=data.file) #if(input$Ct_F_LO_select=="Estimate F" & is.null(rv.Ct$data)) # { # data.file<-SS_readdat(paste0(getwd(),"/Scenarios/",input$Scenario_name,"/SS_LB.dat")) # ctl.file<-SS_readctl(paste0(getwd(),"/Scenarios/",input$Scenario_name,"/SS_LB.ctl"),use_datlist = TRUE, datlist=data.file) # } if(!input$user_model) { #Prepare inputs to evaluate any errors Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50,",")))) Sel50_phase<-as.numeric(trimws(unlist(strsplit(input$Sel50_phase,",")))) Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak,",")))) Selpeak_phase<-as.numeric(trimws(unlist(strsplit(input$Selpeak_phase,",")))) bin.width<-data.file$lbin_vector[2]-data.file$lbin_vector[1] minmaxbin<-min(Selpeak[1]-min(data.file$lbin_vector),max(data.file$lbin_vector)-Selpeak[1]) #sel.inputs.comps<-length(Sel50)-length(Sel50_phase)-length(Selpeak)-length(Selpeak_phase) sel.inputs.lts<-c(length(Sel50),length(Sel50_phase),length(Selpeak),length(Selpeak_phase)) Nfleets<-max(ncol(rv.Ct$data)-1,rv.Lt$data[,3],rv.Age$data[,3],rv.Index$data[,3]) if(input$Sel_choice=="Dome-shaped") { PeakDesc<-as.numeric(trimws(unlist(strsplit(input$PeakDesc,",")))) PeakDesc_phase<-as.numeric(trimws(unlist(strsplit(input$PeakDesc_phase,",")))) LtPeakFinal<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal,",")))) LtPeakFinal_phase<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal_phase,",")))) FinalSel<-as.numeric(trimws(unlist(strsplit(input$FinalSel,",")))) FinalSel_phase<-as.numeric(trimws(unlist(strsplit(input$FinalSel_phase,",")))) minmaxbin<-min(Selpeak[1]-min(data.file$lbin_vector),max(data.file$lbin_vector)-Selpeak[1]) sel.inputs.lts<-c(length(Sel50),length(Sel50_phase),length(Selpeak),length(Selpeak_phase),length(PeakDesc),length(PeakDesc_phase),length(LtPeakFinal),length(LtPeakFinal_phase),length(FinalSel),length(FinalSel_phase)) } #Search for errors in inputs #Throw warning if not enough selectivity inputs if(!all(Nfleets==sel.inputs.lts)) { #Throw warning if not enough selectivity inputs sendSweetAlert( session = session, title = "Selectivity input warning", text = "Please check to see if you have provided filled in the inputs correctly. Especially check selectivity for missing fleets (both in parameter and phases). Total fleets includes fishing fleets and surveys.", type = "error") remove_modal_spinner() } if(all(Nfleets==sel.inputs.lts)) { checkmod<-1 #add object to verify no errors in inputs and model can be run show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[2],text="Model run in progress") if(!input$use_par) { if(all(!input$use_datanew,!input$user_model)) { #Read, edit then write new DATA file data.file$styr<-input$styr data.file$endyr<-input$endyr data.file$Nages<-Nages() if(!is.null(rv.Ct$data)){catch.fleets<-max(ncol(rv.Ct$data)-1)} if(all(!is.null(rv.Lt$data),is.null(rv.Ct$data))){catch.fleets<-max(rv.Lt$data[,3])} data.file$Nfleets<-max(catch.fleets,rv.Lt$data[,3],rv.Age$data[,3],rv.Index$data[,3]) ######### #Catches# ######### if (is.null(rv.Ct$data)) { #inFile<- rv.Lt$data Lt.comp.data<-rv.Lt$data Age.comp.data<- rv.Age$data #data.file$Nfleets<-max(Lt.comp.data[,2],Age.comp.data[,2]) if(input$Ct_F_LO_select=="Estimate F"){data.file$bycatch_fleet_info[4:5]<-c(input$styr,input$endyr)} if(data.file$Nfleets>1){ for(i in 1:(data.file$Nfleets-1)) { if(input$Ct_F_LO_select=="Estimate F"){data.file$bycatch_fleet_info<-rbind(data.file$bycatch_fleet_info,data.file$bycatch_fleet_info[1,])} } if(input$Ct_F_LO_select=="Estimate F"){data.file$bycatch_fleet_info[,1]<-c(1:data.file$Nfleets)} } year.in<-input$styr:input$endyr catch.cols<-colnames(data.file$catch) catch_temp<-list() if(catch.fleets==1){catch.level<-1000} if(catch.fleets>1){ catch.level<-as.numeric(trimws(unlist(strsplit(input$Wt_fleet_Ct,",")))) catch.level<-catch.level/sum(catch.level)*1000 } for(i in 1:catch.fleets) { catch_temp[[i]]<-data.frame( c(-999,year.in), rep(1,length(year.in)+1), rep(i,length(year.in)+1), c(catch.level[i],rep(catch.level[i],length(year.in))), c(0.01,rep(1000,length(year.in))) ) } data.file$catch<-list.rbind(catch_temp) colnames(data.file$catch)<-catch.cols } if(!is.null(rv.Ct$data)) { Catch.data<-rv.Ct$data #data.file$Nfleets<-max(ncol(Catch.data)-1,data.file$Nfleets) year.in<-Catch.data[,1] catch.cols<-colnames(data.file$catch) catch_temp<-list() for(i in 1:catch.fleets) { catch_temp[[i]]<-data.frame( c(-999,year.in), rep(1,length(year.in)+1), rep(i,length(year.in)+1), c(0.00000000000000000001,Catch.data[,i+1]), rep(0.01,length(year.in)+1) ) } data.file$catch<-list.rbind(catch_temp) colnames(data.file$catch)<-catch.cols } #Index data if (!is.null(rv.Index$data)) { Index.data<-rv.Index$data data.file$N_cpue<-unique(rv.Index$data[,3]) data.file$CPUE<-data.frame(year=rv.Index$data[,1],seas=rv.Index$data[,2],index=rv.Index$data[,3],obs=rv.Index$data[,4],se_log=rv.Index$data[,5]) } ######################### #Length composition data# ######################### #Population length data bins data.file$binwidth<-2 if(!is.null(rv.Lt$data)){data.file$binwidth<-as.numeric(colnames(rv.Lt$data)[7])-as.numeric(colnames(rv.Lt$data)[6])} data.file$minimum_size<-2 if(!is.null(rv.Lt$data)){data.file$minimum_size<-as.numeric(colnames(rv.Lt$data)[6])} max.bin.in<-2*(round((Linf()+(Linf()*0.25))/2))+2 #0.2326 data.file$maximum_size<-max.bin.in # if(input$advance_ss_click) # { data.file$binwidth<-input$lt_bin_size data.file$minimum_size<-input$lt_min_bin data.file$maximum_size<-input$lt_max_bin # } #inFile<- rv.Lt$data if (is.null(rv.Lt$data)) { if(input$est_parms==FALSE){Linf_bins<-input$Linf_f_fix} if(input$est_parms==TRUE){Linf_bins<-input$Linf_f_mean} data.file$binwidth<-2 data.file$minimum_size<-2 max.bin.in<-2*(round((Linf()+(Linf()*0.25))/2))+2 #0.2326 data.file$maximum_size<-max.bin.in data.file$lbin_vector<-seq(data.file$minimum_size,data.file$maximum_size,data.file$binwidth) data.file$N_lbins<-length(data.file$lbin_vector) data.file$lencomp<-NULL } if (!is.null(rv.Lt$data)) { Lt.comp.data<-rv.Lt$data data.file$N_lbins<-ncol(Lt.comp.data)-5 data.file$lbin_vector<-as.numeric(colnames(rv.Lt$data)[6:ncol(rv.Lt$data)]) #as.numeric(colnames(Lt.comp.data[,5:ncol(Lt.comp.data)])) if(data.file$maximum_size<max(data.file$lbin_vector)){data.file$maximum_size<-(2*round(max(data.file$lbin_vector)/2))+2} lt.data.names<-c(colnames(data.file$lencomp[,1:6]),paste0("f",data.file$lbin_vector),paste0("m",data.file$lbin_vector)) lt.data.females<-lt.data.males<-lt.data.unknowns<-lt.data.sex3<-data.frame(matrix(rep(NA,length(lt.data.names)),nrow=1)) colnames(Lt.comp.data)[1:5]<-c("Year","Month","Fleet","Sex","Nsamps") #female lengths if(nrow(subset(Lt.comp.data,Sex==1))>0){ Lt.comp.data_female<-subset(Lt.comp.data,Sex==1 & Nsamps>0) samp.yrs<-Lt.comp.data_female[,1] lt.data.females<-data.frame(cbind(samp.yrs, Lt.comp.data_female[,2], Lt.comp.data_female[,3], Lt.comp.data_female[,4], rep(0,length(samp.yrs)), Lt.comp.data_female[,5], Lt.comp.data_female[,6:ncol(Lt.comp.data_female)], Lt.comp.data_female[,6:ncol(Lt.comp.data_female)]*0) ) } #male lengths if(nrow(subset(Lt.comp.data,Sex==2))>0){ Lt.comp.data_male<-subset(Lt.comp.data,Sex==2 & Nsamps>0) samp.yrs_males<-Lt.comp.data_male[,1] lt.data.males<-data.frame(cbind(samp.yrs_males, Lt.comp.data_male[,2], Lt.comp.data_male[,3], Lt.comp.data_male[,4], rep(0,length(samp.yrs_males)), Lt.comp.data_male[,5], Lt.comp.data_male[,6:ncol(Lt.comp.data_male)]*0, Lt.comp.data_male[,6:ncol(Lt.comp.data_male)]) ) } #unknown sex lengths if(nrow(subset(Lt.comp.data,Sex==0))>0){ Lt.comp.data_unknown<-subset(Lt.comp.data,Sex==0 & Nsamps>0) samp.yrs_unknown<-Lt.comp.data_unknown[,1] lt.data.unknowns<-data.frame(cbind(samp.yrs_unknown, Lt.comp.data_unknown[,2], Lt.comp.data_unknown[,3], Lt.comp.data_unknown[,4], rep(0,length(samp.yrs_unknown)), Lt.comp.data_unknown[,5], Lt.comp.data_unknown[,6:ncol(Lt.comp.data_unknown)], Lt.comp.data_unknown[,6:ncol(Lt.comp.data_unknown)]*0) ) } #Maintain sample sex ratio if(input$Sex3){ yrsfleet_females<-paste0(Lt.comp.data_female[,1],Lt.comp.data_female[,3]) yrsfleet_males<-paste0(Lt.comp.data_male[,1],Lt.comp.data_male[,3]) #Match years #samp.yrs_sex3<-samp.yrs_females[match(samp.yrs_males,samp.yrs_females)] sex3_match_female<-yrsfleet_females%in%yrsfleet_males sex3_match_male<-yrsfleet_males%in%yrsfleet_females #Subset years Lt.comp.data_female_sex3<-Lt.comp.data_female[sex3_match_female,] Lt.comp.data_male_sex3<-Lt.comp.data_male[sex3_match_male,] lt.data.sex3<-data.frame(cbind(Lt.comp.data_female_sex3[,1], Lt.comp.data_female_sex3[,2], Lt.comp.data_female_sex3[,3], rep(3,nrow(Lt.comp.data_female_sex3)), rep(0,nrow(Lt.comp.data_female_sex3)), Lt.comp.data_female_sex3[,5]+Lt.comp.data_male_sex3[,4], Lt.comp.data_female_sex3[,6:ncol(Lt.comp.data_female_sex3)], Lt.comp.data_male_sex3[,6:ncol(Lt.comp.data_male_sex3)]) ) lt.data.females<-lt.data.females[!sex3_match_female,] lt.data.males<-lt.data.males[!sex3_match_male,] } colnames(lt.data.females)<-colnames(lt.data.males)<-colnames(lt.data.unknowns)<-colnames(lt.data.sex3)<-lt.data.names data.file$lencomp<-na.omit(rbind(lt.data.unknowns,lt.data.females,lt.data.males,lt.data.sex3)) } #} #else{ # data.file$lencomp<-data.frame(matrix(cbind(samp.yrs, # rep(1,length(samp.yrs)), # rep(1,length(samp.yrs)), # rep(1,length(samp.yrs)), # rep(0,length(samp.yrs)), # colSums(Lt.comp.data[-1]), # t(Lt.comp.data)[-1,], # t(Lt.comp.data)[-1,]*0), # nrow=length(samp.yrs), # ncol=6+length(Lt.comp.data[,1])*2, # byrow=FALSE))[,,drop=FALSE] # } # colnames(data.file$lencomp)<-lt.data.names ###################### #Age composition data# ###################### Age.comp.data<-rv.Age$data if (is.null(Age.comp.data)) { data.file$N_agebins<-Nages() data.file$agebin_vector<-0:(Nages()-1) data.file$ageerror<-data.frame(matrix(c(rep(-1,(Nages()+1)),rep(0.001,(Nages()+1))),2,(Nages()+1),byrow=TRUE)) colnames(data.file$ageerror)<-paste0("age",0:Nages()) } if (!is.null(Age.comp.data)) { data.file$N_agebins<-ncol(Age.comp.data)-8 data.file$agebin_vector<-as.numeric(colnames(Age.comp.data[,9:ncol(Age.comp.data)])) data.file$ageerror<-data.frame(matrix(c(rep(-1,(Nages()+1)),rep(0.001,(Nages()+1))),2,(Nages()+1),byrow=TRUE)) if(!is.null(input$Ageing_error_choice)){ if(input$Ageing_error_choice) { data.file$ageerror<-data.frame((rv.AgeErr$data)) data.file$N_ageerror_definitions<-nrow(rv.AgeErr$data)/2 } } #Label object for r4ss colnames(data.file$ageerror)<-paste0("age",0:Nages()) rownames(data.file$ageerror)<-c(1:nrow(data.file$ageerror)) # data.file$ageerror<-data.frame(matrix(c(rep(-1,(Nages()+1)),rep(0.001,(Nages()+1))),2,(Nages()+1),byrow=TRUE)) # colnames(data.file$ageerror)<-paste0("age",1:Nages()) age.data.names<-c(c("Yr","Month","Fleet","Sex","Part","Ageerr","Lbin_lo","Lbin_hi","Nsamp"),paste0("f",data.file$agebin_vector),paste0("m",data.file$agebin_vector)) age.data.females<-age.data.males<-age.data.unknowns<-data.frame(matrix(rep(NA,length(age.data.names)),nrow=1)) colnames(Age.comp.data)[1:8]<-c("Year","Month","Fleet","Sex","AgeErr","Lbin_low","Lbin_hi","Nsamps") #female ages if(nrow(subset(Age.comp.data,Sex==1))>0){ Age.comp.data_female<-subset(Age.comp.data,Sex==1 & Nsamps>0) samp.yrs_females<-Age.comp.data_female[,1] age.data.females<-data.frame(cbind(samp.yrs_females, Age.comp.data_female[,2], Age.comp.data_female[,3], Age.comp.data_female[,4], rep(0,length(samp.yrs_females)), Age.comp.data_female[,5], Age.comp.data_female[,6], Age.comp.data_female[,7], Age.comp.data_female[,8], Age.comp.data_female[,9:ncol(Age.comp.data_female)], Age.comp.data_female[,9:ncol(Age.comp.data_female)]*0) ) } #male ages if(nrow(subset(Age.comp.data,Sex==2))>0){ Age.comp.data_male<-subset(Age.comp.data,Sex==2 & Nsamps>0) samp.yrs_males<-Age.comp.data_male[,1] age.data.males<-data.frame(cbind(samp.yrs_males, Age.comp.data_male[,2], Age.comp.data_male[,3], Age.comp.data_male[,4], rep(0,length(samp.yrs_males)), Age.comp.data_male[,5], Age.comp.data_male[,6], Age.comp.data_male[,7], Age.comp.data_male[,8], Age.comp.data_male[,9:ncol(Age.comp.data_male)]*0, Age.comp.data_male[,9:ncol(Age.comp.data_male)]) ) } #unknown sex ages if(nrow(subset(Age.comp.data,Sex==0))>0){ Age.comp.data_unknown<-subset(Age.comp.data,Sex==0 & Nsamps>0) samp.yrs_unknown<-Age.comp.data_unknown[,1] age.data.unknowns<-data.frame(cbind(samp.yrs_unknown, Age.comp.data_unknown[,2], Age.comp.data_unknown[,3], Age.comp.data_unknown[,4], rep(0,length(samp.yrs_unknown)), Age.comp.data_unknown[,5], Age.comp.data_unknown[,6], Age.comp.data_unknown[,7], Age.comp.data_unknown[,8], Age.comp.data_unknown[,9:ncol(Age.comp.data_unknown)], Age.comp.data_unknown[,9:ncol(Age.comp.data_unknown)]*0) ) } #if(nrow(subset(Age.comp.data,Sex==0))>0){age.data.unknowns<-data.frame(cbind( # age.data.unknowns, # Age.comp.data[1,7:ncol(Age.comp.data_unknown)], # Age.comp.data[1,7:ncol(Age.comp.data_unknown)]*0)) # } colnames(age.data.females)<-colnames(age.data.males)<-colnames(age.data.unknowns)<-age.data.names data.file$agecomp<-na.omit(rbind(age.data.females,age.data.males,age.data.unknowns)) } # inFile_age<- rv.Age$data # if (is.null(inFile_age)){ # data.file$N_agebins<-Nages() # data.file$agebin_vector<-1:Nages() # data.file$ageerror<-data.frame(matrix(c(rep(-1,(Nages()+1)),rep(0.001,(Nages()+1))),2,(Nages()+1),byrow=TRUE)) # colnames(data.file$ageerror)<-paste0("age",1:Nages()) # } # if (!is.null(inFile_age)){ # Age.comp.data<-rv.Age$data # age.classes<-nrow(Age.comp.data) # data.file$N_agebins<-age.classes # data.file$agebin_vector<-Age.comp.data[,1] # data.file$ageerror<-data.frame(matrix(c(rep(-1,(age.classes+1)),rep(0.001,(age.classes+1))),2,(age.classes+1),byrow=TRUE)) # colnames(data.file$ageerror)<-paste0("age",1:Nages()) # age.samp.yrs<-as.numeric(colnames(Age.comp.data)[-1]) # age.data.names<-c(c("Yr","Seas","FltSvy","Gender","Part","Ageerr","Lbin_lo","Lbin_hi","Nsamp"),paste0("f",Age.comp.data[,1]),paste0("m",Age.comp.data[,1])) # if(length(age.samp.yrs)==1){ # data.file$agecomp<-data.frame(matrix(c(samp.yrs, # rep(1,length(age.samp.yrs)), # rep(1,length(age.samp.yrs)), # rep(1,length(age.samp.yrs)), # rep(0,length(age.samp.yrs)), # rep(-1,length(age.samp.yrs)), # rep(-1,length(age.samp.yrs)), # rep(-1,length(age.samp.yrs)), # colSums(Age.comp.data[-1]), # t(Age.comp.data)[-1,], # t(Age.comp.data)[-1,]*0), # nrow=length(age.samp.yrs), # ncol=9+length(Age.comp.data[,1])*2, # byrow=FALSE))[,,drop=FALSE] # } # else{ # data.file$agecomp<-data.frame(matrix(cbind(samp.yrs, # rep(1,length(age.samp.yrs)), # rep(1,length(age.samp.yrs)), # rep(1,length(age.samp.yrs)), # rep(0,length(age.samp.yrs)), # rep(-1,length(age.samp.yrs)), # rep(-1,length(age.samp.yrs)), # rep(-1,length(age.samp.yrs)), # colSums(Age.comp.data[-1]), # t(Age.comp.data)[-1,], # t(Age.comp.data)[-1,]*0), # nrow=length(age.samp.yrs), # ncol=9+length(Age.comp.data[,1])*2, # byrow=FALSE))[,,drop=FALSE] # } # colnames(data.file$agecomp)<-age.data.names # } #Create data info if(data.file$Nfleets>1){ for(i in 1:(data.file$Nfleets-1)) { data.file$fleetinfo<-rbind(data.file$fleetinfo,data.file$fleetinfo[1,]) data.file$CPUEinfo<-rbind(data.file$CPUEinfo,data.file$CPUEinfo[1,]) data.file$len_info<-rbind(data.file$len_info,data.file$len_info[1,]) data.file$age_info<-rbind(data.file$age_info,data.file$age_info[1,]) } #Set Dirichlet on # data.file$age_info[,5]<-data.file$len_info[,5]<-1 #Set up the correct fleet enumeration # data.file$len_info[,6]<-1:data.file$Nfleets #Used for Dirichlet set-up # data.file$age_info[,6]<-(data.file$Nfleets+1):(2*data.file$Nfleets) #Used for Dirichlet set-up #Survey names if(is.null(rv.Ct$data)){data.file$fleetinfo$fleetname<-paste0("Fishery",1:data.file$Nfleets)} if(!is.null(rv.Ct$data)) { fishery.names<-gsub(" ","",colnames(rv.Ct$data)[-1]) if(!is.null(rv.Index$data)&data.file$Nfleets>catch.fleets) { Surveyonly<-subset(rv.Index$data,Fleet>catch.fleets) fleet.survey.names<-unique(c(fishery.names,unique(Surveyonly[,6]))) survey.fleets<-unique(Surveyonly[,3]) data.file$fleetinfo$fleetname<-fleet.survey.names } if(is.null(rv.Index$data)|all(!is.null(rv.Index$data)&data.file$Nfleets==catch.fleets)){data.file$fleetinfo$fleetname<-fishery.names} if(!is.null(rv.Index$data)& max(rv.Index$data[,3])>length(fishery.names)){data.file$fleetinfo[survey.fleets,1]<-3} } data.file$CPUEinfo[,1]<-1:data.file$Nfleets } if(!is.null(rv.Index$data)&data.file$Nfleets>catch.fleets) { if(any(fleet.survey.names=="RSS")) { data.file$CPUEinfo[grep("RSS",fleet.survey.names),2]<-34 } } #Change survey timing to 1 data.file$fleetinfo$surveytiming[data.file$fleetinfo$type%in%3]<-1 #Catch units if(input$Ct_units_choice) { ct.units<-as.numeric(trimws(unlist(strsplit(input$fleet_ct_units,",")))) #data.file$fleetinfo[ct.units,4]<-2 #use this when just specifying which are fleets are numbers data.file$fleetinfo[,4]<-ct.units } SS_writedat(data.file,paste0("Scenarios/",input$Scenario_name,"/datafile.dat"),overwrite=TRUE) } ####################### END DATA FILE ##################################### ################################################################################## ####################### START CTL FILE #################################### #Read, edit then write new CONTROL file if(all(!input$use_controlnew,!input$user_model)) { #Change to 1 platoon if(!is.null(input$GT1)){if(input$GT1){ctl.file$N_platoon<-1}} #LENGTH or AGE-ONLY if(all(!is.null(c(rv.Lt$data,rv.Age$data,rv.Index$data)),is.null(rv.Ct$data))==TRUE) { fem_vbgf<-VBGF(input$Linf_f,input$k_f,input$t0_f,c(0:Nages())) #Females ctl.file$MG_parms[1,3]<-input$M_f #M #ctl.file$MG_parms[2,3:4]<-fem_vbgf[1] #L0 ctl.file$Growth_Age_for_L1<-input$t0_f ctl.file$MG_parms[2,3:4]<-0 #L0 ctl.file$MG_parms[3,3:4]<-input$Linf_f #Linf ctl.file$MG_parms[4,3:4]<-input$k_f #k ctl.file$MG_parms[5,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f,","))))[1] #CV ctl.file$MG_parms[6,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f,","))))[2] #CV #Maturity ctl.file$MG_parms[9,3:4]<-input$L50_f #Lmat50% ctl.file$MG_parms[10,3:4]<- log(0.05/0.95)/(input$L95_f-input$L50_f) #Maturity slope #ctl.file$MG_parms[11,3:4]<-input$Fec_a_f #coefficient #ctl.file$MG_parms[12,3:4]<- input$Fec_b_f #exponent #Males ctl.file$MG_parms[13,3]<-input$M_f #M #ctl.file$MG_parms[14,3:4]<-fem_vbgf[1] #L0 ctl.file$MG_parms[14,3:4]<-0 #L0 ctl.file$MG_parms[15,3:4]<-input$Linf_f #Linf ctl.file$MG_parms[16,3:4]<-input$k_f #k ctl.file$MG_parms[17,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f,","))))[1] #CV ctl.file$MG_parms[18,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f,","))))[2] #CV #ctl.file$MG_parms[19,3:4]<-input$WLa_f #coefficient #ctl.file$MG_parms[20,3:4]<-input$WLb_f #exponent if(input$male_offset) { ctl.file$parameter_offset_approach<-2 #Change to offset approach ctl.file$MG_parms[13,3:4]<-0 #M ctl.file$MG_parms[14,3:4]<-0 #L0 ctl.file$MG_parms[15,3:4]<-0 #Linf ctl.file$MG_parms[16,3:4]<-0 #k ctl.file$MG_parms[17,3:4]<-0 #CV ctl.file$MG_parms[18,3:4]<-0 #CV #Weight-length ctl.file$MG_parms[19,3:4]<-0 #coefficient ctl.file$MG_parms[20,3:4]<-0 #exponent } if(input$male_parms) { male_vbgf<-VBGF(input$Linf_m,input$k_m,input$t0_m,c(input$t0_f:Nages())) ctl.file$MG_parms[13,3]<-input$M_m #M ctl.file$MG_parms[14,3:4]<-male_vbgf[1] #L0 ctl.file$MG_parms[15,3:4]<-input$Linf_m #Linf ctl.file$MG_parms[16,3:4]<-input$k_m #k ctl.file$MG_parms[17,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_m,","))))[1] #CV ctl.file$MG_parms[18,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_m,","))))[2] #CV # ctl.file$MG_parms[19,3:4]<-input$WLa_m #coefficient # ctl.file$MG_parms[20,3:4]<-input$WLb_m #exponent } if(input$Ct_F_LO_select=="Estimate F"){ctl.file$SR_parms[1,7]=-1} #lnR0 if(input$Ct_F_LO_select=="Constant Catch"){ctl.file$SR_parms[1,7]=1} #lnR0 ctl.file$SR_parms[2,3:4]<-input$h_LO #steepnes } #LENGTH and CATCH with fixed parameters if(all(any(input$est_parms==FALSE,input$est_parms2==FALSE),any(all(!is.null(rv.Lt$data),!is.null(rv.Ct$data)),all(!is.null(rv.Age$data),!is.null(rv.Ct$data)),all(!is.null(rv.Index$data),!is.null(rv.Ct$data))))==TRUE) { fem_vbgf<-VBGF(input$Linf_f_fix,input$k_f_fix,input$t0_f_fix,c(0:Nages())) #Females ctl.file$MG_parms[1,3]<-input$M_f_fix #M #ctl.file$MG_parms[2,3:4]<-fem_vbgf[1] #L0 ctl.file$Growth_Age_for_L1<-input$t0_f_fix ctl.file$MG_parms[2,3:4]<-0 #L0 ctl.file$MG_parms[3,3:4]<-input$Linf_f_fix #Linf ctl.file$MG_parms[4,3:4]<-input$k_f_fix #k ctl.file$MG_parms[5,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f_fix,","))))[1] #CV ctl.file$MG_parms[6,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f_fix,","))))[2] #CV #Weight-length ctl.file$MG_parms[7,3:4]<-input$WLa_f_fix #coefficient ctl.file$MG_parms[8,3:4]<- input$WLb_f_fix #exponent #Maturity ctl.file$MG_parms[9,3:4]<-input$L50_f_fix #Lmat50% ctl.file$MG_parms[10,3:4]<- log(0.05/0.95)/(input$L95_f_fix-input$L50_f_fix) #Maturity slope ctl.file$MG_parms[11,3:4]<-input$Fec_a_f_fix #coefficient ctl.file$MG_parms[12,3:4]<- input$Fec_b_f_fix #exponent #Males ctl.file$MG_parms[13,3]<-input$M_f_fix #M #ctl.file$MG_parms[14,3:4]<-fem_vbgf[1] #L0 ctl.file$MG_parms[14,3:4]<-0 #L0 ctl.file$MG_parms[15,3:4]<-input$Linf_f_fix #Linf ctl.file$MG_parms[16,3:4]<-input$k_f_fix #k ctl.file$MG_parms[17,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f_fix,","))))[1] #CV ctl.file$MG_parms[18,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f_fix,","))))[2] #CV ctl.file$MG_parms[19,3:4]<-input$WLa_f_fix #coefficient ctl.file$MG_parms[20,3:4]<- input$WLb_f_fix #exponent if(input$male_offset_fix) { ctl.file$parameter_offset_approach<-2 #Change to offset approach ctl.file$MG_parms[13,3:4]<-0 #M ctl.file$MG_parms[14,3:4]<-0 #L0 ctl.file$MG_parms[15,3:4]<-0 #Linf ctl.file$MG_parms[16,3:4]<-0 #k ctl.file$MG_parms[17,3:4]<-0 #CV ctl.file$MG_parms[18,3:4]<-0 #CV #Weight-length ctl.file$MG_parms[19,3:4]<-0 #coefficient ctl.file$MG_parms[20,3:4]<-0 #exponent } if(input$male_parms_fix) { male_vbgf<-VBGF(input$Linf_m_fix,input$k_m_fix,input$t0_m_fix,c(input$t0_f_fix:Nages())) ctl.file$MG_parms[13,3]<-input$M_m_fix #M ctl.file$MG_parms[14,3:4]<-male_vbgf[1] #L0 ctl.file$MG_parms[15,3:4]<-input$Linf_m_fix #Linf ctl.file$MG_parms[16,3:4]<-input$k_m_fix #k ctl.file$MG_parms[17,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_m_fix,","))))[1] #CV ctl.file$MG_parms[18,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_m_fix,","))))[2] #CV #Weight-length ctl.file$MG_parms[19,3:4]<-input$WLa_m_fix #coefficient ctl.file$MG_parms[20,3:4]<-input$WLb_m_fix #exponent } #S-R ctl.file$SR_parms[1,3:4]<-input$lnR0 #lnR0 ctl.file$SR_parms[2,3:4]<-input$h #steepnes } #LENGTH and CATCH with estimated parameters if(all(any(input$est_parms==TRUE,input$est_parms2==FALSE),any(all(!is.null(rv.Lt$data),!is.null(rv.Ct$data)),all(!is.null(rv.Age$data),!is.null(rv.Ct$data)),all(!is.null(rv.Index$data),!is.null(rv.Ct$data))))==TRUE) { fem_vbgf<-VBGF(input$Linf_f_mean,input$k_f_mean,input$t0_f_mean,c(0:Nages())) #c("lognormal","truncated normal","uniform","beta") prior.name<-c("no prior","symmetric beta", "beta","lognormal","gamma","normal") prior.type<-c(0:3,5,6) #Females #M if(input$M_f_prior=="lognormal"){ctl.file$MG_parms[1,3:4]<-c(input$M_f_mean,log(input$M_f_mean))} else {ctl.file$MG_parms[1,3:4]<-c(input$M_f_mean,input$M_f_mean)} ctl.file$MG_parms[1,5]<-input$M_f_SD ctl.file$MG_parms[1,6]<-prior.type[prior.name==input$M_f_prior] ctl.file$MG_parms[1,7]<-input$M_f_phase #L0 ctl.file$Growth_Age_for_L1<-input$t0_f_mean # if(input$t0_f_prior=="lognormal"){ctl.file$MG_parms[2,3:4]<-c(fem_vbgf[1],log(fem_vbgf[1]))} # else {ctl.file$MG_parms[2,3:4]<-fem_vbgf[1]} if(input$t0_f_prior=="lognormal"){ctl.file$MG_parms[2,3:4]<-c(0,log(0.0000001))} else {ctl.file$MG_parms[2,3:4]<-0} ctl.file$MG_parms[2,5]<-input$t0_f_SD ctl.file$MG_parms[2,6]<-prior.type[prior.name==input$t0_f_prior] ctl.file$MG_parms[2,7]<-input$t0_f_phase #Linf if(input$Linf_f_prior=="lognormal"){ctl.file$MG_parms[3,3:4]<-c(input$Linf_f_mean,log(input$Linf_f_mean))} else{ctl.file$MG_parms[3,3:4]<-input$Linf_f_mean} ctl.file$MG_parms[3,5]<-input$Linf_f_SD ctl.file$MG_parms[3,6]<-prior.type[prior.name==input$Linf_f_prior] ctl.file$MG_parms[3,7]<-input$Linf_f_phase #k if(input$k_f_prior=="lognormal"){ctl.file$MG_parms[4,3:4]<-c(input$k_f_mean,log(input$k_f_mean))} else {ctl.file$MG_parms[4,3:4]<-input$k_f_mean} ctl.file$MG_parms[4,5]<-input$k_f_SD ctl.file$MG_parms[4,6]<-prior.type[prior.name==input$k_f_prior] ctl.file$MG_parms[4,7]<-input$k_f_phase #CV young if(input$CV_lt_f_young_prior=="lognormal"){ctl.file$MG_parms[5,3:4]<-c(input$CV_lt_f_young_mean,log(input$CV_lt_f_young_mean))} else{ctl.file$MG_parms[5,3:4]<-input$CV_lt_f_young_mean} ctl.file$MG_parms[5,5]<-input$CV_lt_f_young_SD ctl.file$MG_parms[5,6]<-prior.type[prior.name==input$CV_lt_f_young_prior] ctl.file$MG_parms[5,7]<-input$CV_lt_f_young_phase #CV old if(input$CV_lt_f_old_prior=="lognormal"){ctl.file$MG_parms[6,3:4]<-c(input$CV_lt_f_old_mean,log(input$CV_lt_f_old_mean))} else{ctl.file$MG_parms[6,3:4]<-input$CV_lt_f_old_mean} ctl.file$MG_parms[6,3:4]<-input$CV_lt_f_old_mean ctl.file$MG_parms[6,5]<-input$CV_lt_f_old_SD ctl.file$MG_parms[6,6]<-prior.type[prior.name==input$CV_lt_f_old_prior] ctl.file$MG_parms[6,7]<-input$CV_lt_f_old_phase #Weight-length ctl.file$MG_parms[7,3:4]<-input$WLa_f_est #coefficient ctl.file$MG_parms[8,3:4]<- input$WLb_f_est #exponent #Maturity ctl.file$MG_parms[9,3:4]<-input$L50_f_est #Lmat50% ctl.file$MG_parms[10,3:4]<- log(0.05/0.95)/(input$L95_f_est-input$L50_f_est) #Maturity slope ctl.file$MG_parms[11,3:4]<-input$Fec_a_f_est #coefficient ctl.file$MG_parms[12,3:4]<- input$Fec_b_f_est #exponent #Males ctl.file$MG_parms[13,3:4]<-c(input$M_f_mean,log(input$M_f_mean)) #M #ctl.file$MG_parms[14,3:4]<-fem_vbgf[1] #L0 ctl.file$MG_parms[14,3:4]<-0 #L0 ctl.file$MG_parms[15,3:4]<-input$Linf_f_mean #Linf ctl.file$MG_parms[16,3:4]<-input$k_f_mean #k ctl.file$MG_parms[17,3:4]<-input$CV_lt_f_old_mean #CV ctl.file$MG_parms[18,3:4]<-input$CV_lt_f_old_mean #CV #Weight-length ctl.file$MG_parms[19,3:4]<-input$WLa_f_est #coefficient ctl.file$MG_parms[20,3:4]<- input$WLb_f_est #exponent if(input$male_offset_est) { ctl.file$parameter_offset_approach<-2 #Change to offset approach ctl.file$MG_parms[13,3:4]<-0 #M ctl.file$MG_parms[14,3:4]<-0 #L0 ctl.file$MG_parms[15,3:4]<-0 #Linf ctl.file$MG_parms[16,3:4]<-0 #k ctl.file$MG_parms[17,3:4]<-0 #CV ctl.file$MG_parms[18,3:4]<-0 #CV #Weight-length ctl.file$MG_parms[19,3:4]<-0 #coefficient ctl.file$MG_parms[20,3:4]<-0 #exponent } if(input$male_parms_est) { male_vbgf_est<-VBGF(input$Linf_m_mean,input$k_m_mean,input$t0_m_mean,c(input$t0_f_mean:Nages())) # ctl.file$MG_parms[13,3]<-input$M_m_mean #M # ctl.file$MG_parms[14,3:4]<-male_vbgf_est[1] #L0 # ctl.file$MG_parms[15,3:4]<-input$Linf_m_mean #Linf # ctl.file$MG_parms[16,3:4]<-input$k_m_mean #k # ctl.file$MG_parms[17,3:4]<-input$CV_lt_m_mean #CV # ctl.file$MG_parms[18,3:4]<-input$CV_lt_m_mean #CV #M if(input$M_m_prior=="lognormal"){ctl.file$MG_parms[13,3:4]<-c(input$M_m_mean,log(input$M_m_mean))} else {ctl.file$MG_parms[13,3:4]<-c(input$M_m_mean,input$M_m_mean)} ctl.file$MG_parms[13,5]<-input$M_m_SD ctl.file$MG_parms[13,6]<-prior.type[prior.name==input$M_m_prior] ctl.file$MG_parms[13,7]<-input$M_m_phase #L0 #if(input$t0_f_prior=="lognormal"){ctl.file$MG_parms[14,3:4]<-c(male_vbgf_est[1],log(male_vbgf_est[1]+0.000000001))} #else {ctl.file$MG_parms[14,3:4]<-male_vbgf_est[1]} if(input$t0_m_prior=="lognormal"){ctl.file$MG_parms[14,3:4]<-c(male_vbgf_est[1],log(male_vbgf_est[1]+0.000000001))} else {ctl.file$MG_parms[14,3:4]<-male_vbgf_est[1]} ctl.file$MG_parms[14,5]<-input$t0_m_SD ctl.file$MG_parms[14,6]<-prior.type[prior.name==input$t0_m_prior] ctl.file$MG_parms[14,7]<-input$t0_m_phase #Linf if(input$Linf_m_prior=="lognormal"){ctl.file$MG_parms[15,3:4]<-c(input$Linf_m_mean,log(input$Linf_m_mean))} else{ctl.file$MG_parms[15,3:4]<-input$Linf_m_mean} ctl.file$MG_parms[15,5]<-input$Linf_m_SD ctl.file$MG_parms[15,6]<-prior.type[prior.name==input$Linf_m_prior] ctl.file$MG_parms[15,7]<-input$Linf_m_phase #k if(input$k_m_prior=="lognormal"){ctl.file$MG_parms[16,3:4]<-c(input$k_m_mean,log(input$k_m_mean))} else {ctl.file$MG_parms[16,3:4]<-input$k_m_mean} ctl.file$MG_parms[16,5]<-input$k_m_SD ctl.file$MG_parms[16,6]<-prior.type[prior.name==input$k_m_prior] ctl.file$MG_parms[16,7]<-input$k_m_phase #CV young if(input$CV_lt_m_young_prior=="lognormal"){ctl.file$MG_parms[17,3:4]<-c(input$CV_lt_m_young_mean,log(input$CV_lt_m_young_mean))} else{ctl.file$MG_parms[17,3:4]<-input$CV_lt_m_young_mean} ctl.file$MG_parms[17,5]<-input$CV_lt_m_young_SD ctl.file$MG_parms[17,6]<-prior.type[prior.name==input$CV_lt_m_young_prior] ctl.file$MG_parms[17,7]<-input$CV_lt_m_young_phase #CV old if(input$CV_lt_m_old_prior=="lognormal"){ctl.file$MG_parms[18,3:4]<-c(input$CV_lt_m_old_mean,log(input$CV_lt_m_old_mean))} else{ctl.file$MG_parms[18,3:4]<-input$CV_lt_m_old_mean} ctl.file$MG_parms[18,5]<-input$CV_lt_m_old_SD ctl.file$MG_parms[18,6]<-prior.type[prior.name==input$CV_lt_m_old_prior] ctl.file$MG_parms[18,7]<-input$CV_lt_m_old_phase #Weight-length ctl.file$MG_parms[19,3:4]<-input$WLa_m_est #coefficient ctl.file$MG_parms[20,3:4]<- input$WLb_m_est #exponent } #S-R ctl.file$SR_parms[1,3:4]<-input$lnR0_est #lnR0 if(input$h_ss_prior=="lognormal"){ctl.file$SR_parms[2,3:4]<-c(input$h_mean_ss,log(h_mean_ss))} else{ctl.file$SR_parms[2,3:4]<-input$h_mean_ss} ctl.file$SR_parms[2,5]<-input$h_SD_ss ctl.file$SR_parms[2,6]<-prior.type[prior.name==input$h_ss_prior] ctl.file$SR_parms[2,7]<-input$h_phase } #Recruitment estimation ctl.file$do_recdev<-0 ctl.file$recdev_phase<- -1 ctl.file$MainRdevYrFirst<-input$styr #Start year of recruitment estimation ctl.file$MainRdevYrLast<-input$endyr #Last year of recruitment estimation ctl.file$last_early_yr_nobias_adj<-input$styr #End year of early rev devs (no bias) ctl.file$first_yr_fullbias_adj<-input$styr #First year full bias ctl.file$last_yr_fullbias_adj<-input$endyr #Last year full bias ctl.file$first_recent_yr_nobias_adj<-input$endyr #First year recent no bias if(input$rec_choice) { ctl.file$SR_parms[3,3:4]<-input$sigmaR #sigma R if(input$RecDevChoice=="1: Devs sum to zero"){ctl.file$do_recdev<-1} if(input$RecDevChoice=="2: Simple deviations"){ctl.file$do_recdev<-2} if(input$RecDevChoice=="3: deviation vector"){ctl.file$do_recdev<-3} if(input$RecDevChoice=="4: option 3 plus penalties"){ctl.file$do_recdev<-4} ctl.file$MainRdevYrFirst<-input$Rdev_startyr #Start year of recruitment estimation ctl.file$MainRdevYrLast<-input$Rdev_endyr #Last year of recruitment estimation ctl.file$recdev_phase<- 1 if(input$biasC_choice) { #With bias correction ctl.file$recdev_early_start<--1 #Year early rec dev phase starts ctl.file$recdev_early_phase<-3 #Early rec dev phase ctl.file$Fcast_recr_phase<-0 #Forecast rec dev phase ctl.file$last_early_yr_nobias_adj<-input$NobiasC_early #End year of early rev devs (no bias) ctl.file$first_yr_fullbias_adj<-input$BiasC_startyr #First year full bias ctl.file$last_yr_fullbias_adj<-input$BiasC_endyr #Last year full bias ctl.file$first_recent_yr_nobias_adj<-input$NobiasC_recent #First year recent no bias ctl.file$max_bias_adj<-input$BiasC #Max bias adjustment } } #SELECTIVITY #Length Selectivity if(input$Ct_F_LO_select=="Estimate F" & is.null(rv.Ct$data)){ctl.file$size_selex_types[2]<-3} #Change to recognize discard fishery Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50,",")))) Sel50_phase<-as.numeric(trimws(unlist(strsplit(input$Sel50_phase,",")))) Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak,",")))) Selpeak_phase<-as.numeric(trimws(unlist(strsplit(input$Selpeak_phase,",")))) bin.width<-data.file$lbin_vector[2]-data.file$lbin_vector[1] minmaxbin<-min(Selpeak[1]-min(data.file$lbin_vector),max(data.file$lbin_vector)-Selpeak[1]) sel.inputs.comps<-length(Sel50)-length(Sel50_phase)-length(Selpeak)-length(Selpeak_phase) sel.inputs.lts<-c(length(Sel50),length(Sel50_phase),length(Selpeak),length(Selpeak_phase)) if(input$Sel_choice=="Logistic") { #Throw warning if not enough selectivity inputs if(!all(data.file$Nfleets==sel.inputs.lts)) { sendSweetAlert( session = session, title = "Selectivity input warning", text = "Please check to see if you have provided selectivity inputs (both parameter and phases) for all fleets in the model. This includes fishing fleets and surverys.", type = "error") remove_modal_spinner() stopApp() } #ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector)+2*bin.width,max(data.file$lbin_vector)-2*bin.width) #ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector)) ctl.file$size_selex_parms[1,1:2]<-c(Selpeak[1]-minmaxbin,Selpeak[1]+minmaxbin) ctl.file$size_selex_parms[1,3:4]<- Selpeak[1] ctl.file$size_selex_parms[2,3:4]<- 15 ctl.file$size_selex_parms[3,3:4]<- log(-((Sel50[1]-Selpeak[1])^2/log(0.5))) ctl.file$size_selex_parms[4,3:4]<- -15 ctl.file$size_selex_parms[6,3:4]<- 15 #phases ctl.file$size_selex_parms[1,7]<- Selpeak_phase[1] ctl.file$size_selex_parms[2,7]<- -1 ctl.file$size_selex_parms[3,7]<- Sel50_phase[1] ctl.file$size_selex_parms[4,7]<- -1 ctl.file$size_selex_parms[6,7]<- -1 } if(input$Sel_choice=="Dome-shaped") { #Throw warning if not enough selectivity inputs sel.inputs.comps<-length(Sel50)-length(Sel50_phase)-length(Selpeak)-length(Selpeak_phase)-length(PeakDesc)-length(PeakDesc_phase)-length(LtPeakFinal)-length(LtPeakFinal_phase)-length(FinalSel)-length(FinalSel_phase) sel.inputs.lts<-c(length(Sel50),length(Sel50_phase),length(Selpeak),length(Selpeak_phase),length(PeakDesc),length(PeakDesc_phase),length(LtPeakFinal),length(LtPeakFinal_phase),length(FinalSel),length(FinalSel_phase)) if(!all(data.file$Nfleets==sel.inputs.lts)) { sendSweetAlert( session = session, title = "Selectivity input warning", text = "Please check to see if you have provided selectivity inputs (both parameter and phases) for all fleets in the model. This includes fishing fleets and surverys.", type = "error") remove_modal_spinner() break } browser() PeakDesc<-as.numeric(trimws(unlist(strsplit(input$PeakDesc,",")))) PeakDesc_phase<-as.numeric(trimws(unlist(strsplit(input$PeakDesc_phase,",")))) LtPeakFinal<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal,",")))) LtPeakFinal_phase<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal_phase,",")))) FinalSel<-as.numeric(trimws(unlist(strsplit(input$FinalSel,",")))) FinalSel_phase<-as.numeric(trimws(unlist(strsplit(input$FinalSel_phase,",")))) minmaxbin<-min(Selpeak[1]-min(data.file$lbin_vector),max(data.file$lbin_vector)-Selpeak[1]) #ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector)+2*bin.width,max(data.file$lbin_vector)-2*bin.width) #ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector)) ctl.file$size_selex_parms[1,1:2]<-c(Selpeak[1]-minmaxbin,Selpeak[1]+minmaxbin) ctl.file$size_selex_parms[1,3:4]<- Selpeak[1] ctl.file$size_selex_parms[2,3:4]<- -log((max(data.file$lbin_vector)-Selpeak[1]-bin.width)/(PeakDesc[1]-Selpeak[1]-bin.width)) ctl.file$size_selex_parms[3,3:4]<- log(-((Sel50[1]-Selpeak[1])^2/log(0.5))) ctl.file$size_selex_parms[4,3:4]<- log(LtPeakFinal[1]) ctl.file$size_selex_parms[6,3:4]<- -log((1/(FinalSel[1]+0.000000001)-1)) #phases ctl.file$size_selex_parms[1,7]<- Selpeak_phase[1] ctl.file$size_selex_parms[2,7]<- PeakDesc_phase[1] ctl.file$size_selex_parms[3,7]<- Sel50_phase[1] ctl.file$size_selex_parms[4,7]<- LtPeakFinal_phase[1] ctl.file$size_selex_parms[6,7]<- FinalSel_phase[1] } # if(input$dirichlet) # { # dirichlet.index<-c(unique(data.file$lencomp[,3]),(unique(data.file$agecomp[,3])+3)) # ctl.file$dirichlet_parms[dirichlet.index,3:4]<-0 # ctl.file$dirichlet_parms[dirichlet.index,7]<-2 # } #Add other fleets if(data.file$Nfleets>1){ for(i in 1:(data.file$Nfleets-1)) { ctl.file$init_F<-rbind(ctl.file$init_F,ctl.file$init_F[1,]) ctl.file$size_selex_types<-rbind(ctl.file$size_selex_types,ctl.file$size_selex_types[1,]) if(input$Ct_F_LO_select=="Estimate F" & is.null(rv.Ct$data)){ctl.file$size_selex_types[,2]<-3} ctl.file$age_selex_types<-rbind(ctl.file$age_selex_types,ctl.file$age_selex_types[1,]) ctl.file$size_selex_parms<-rbind(ctl.file$size_selex_parms,ctl.file$size_selex_parms[1:6,]) minmaxbin<-min(Selpeak[i+1]-min(data.file$lbin_vector),max(data.file$lbin_vector)-Selpeak[i+1]) if(input$Sel_choice=="Logistic") { ctl.file$size_selex_parms[6*i+3,3:4]<- log(-((Sel50[i+1]-Selpeak[i+1])^2/log(0.5))) ctl.file$size_selex_parms[6*i+3,7]<- Sel50_phase[i+1] #ctl.file$size_selex_parms[6*i+1,1:2]<-c(min(data.file$lbin_vector)+2*bin.width,max(data.file$lbin_vector)-2*bin.width) # ctl.file$size_selex_parms[6*i+1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector)) ctl.file$size_selex_parms[6*i+1,1:2]<-c(Selpeak[i+1]-minmaxbin,Selpeak[1]+minmaxbin) ctl.file$size_selex_parms[6*i+1,3:4]<- Selpeak[i+1] ctl.file$size_selex_parms[6*i+1,7]<- Selpeak_phase[i+1] ctl.file$size_selex_parms[6*i+2,3:4]<- 15 ctl.file$size_selex_parms[6*i+2,7]<- -1 ctl.file$size_selex_parms[6*i+4,3:4]<- -15 ctl.file$size_selex_parms[6*i+4,7]<- -1 ctl.file$size_selex_parms[6*i+6,3:4]<- 15 ctl.file$size_selex_parms[6*i+6,7]<- -1 } if(input$Sel_choice=="Dome-shaped") { # ctl.file$size_selex_parms[6*i+1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector)) ctl.file$size_selex_parms[6*i+1,1:2]<-c(Selpeak[i+1]-minmaxbin,Selpeak[1]+minmaxbin) ctl.file$size_selex_parms[6*i+1,3:4]<- Selpeak[i+1] ctl.file$size_selex_parms[6*i+1,7]<- Selpeak_phase[i+1] ctl.file$size_selex_parms[6*i+2,3:4]<- -log((max(data.file$lbin_vector)-Selpeak[i+1]-bin.width)/(PeakDesc[i+1]-Selpeak[i+1]-bin.width)) ctl.file$size_selex_parms[6*i+2,7]<- PeakDesc_phase[i+1] ctl.file$size_selex_parms[6*i+3,3:4]<- log(-((Sel50[i+1]-Selpeak[i+1])^2/log(0.5))) ctl.file$size_selex_parms[6*i+3,7]<- Sel50_phase[i+1] ctl.file$size_selex_parms[6*i+4,3:4]<- log(LtPeakFinal[i+1]) ctl.file$size_selex_parms[6*i+4,7]<- LtPeakFinal_phase[i+1] ctl.file$size_selex_parms[6*i+6,3:4]<- -log((1/(FinalSel[i+1]+0.000000001)-1)) ctl.file$size_selex_parms[6*i+6,7]<- FinalSel_phase[i+1] } #Dirichlet data-weighting # ctl.file$dirichlet_parms<-rbind(ctl.file$dirichlet_parms,ctl.file$dirichlet_parms[1:2,]) } #Re-label so r4ss can interpret these new entries rownames(ctl.file$init_F)<-paste0("InitF_seas_1_flt_",1:data.file$Nfleets,"Fishery",1:data.file$Nfleets) rownames(ctl.file$age_selex_types)<-rownames(ctl.file$size_selex_types)<-paste0("Fishery",1:data.file$Nfleets) size_selex_parms_rownames<-list() for(f_i in 1:data.file$Nfleets) { size_selex_parms_rownames[[f_i]]<-c(paste0("SizeSel_P_1_Fishery",f_i,"(",f_i,")"), paste0("SizeSel_P_2_Fishery",f_i,"(",f_i,")"), paste0("SizeSel_P_3_Fishery",f_i,"(",f_i,")"), paste0("SizeSel_P_4_Fishery",f_i,"(",f_i,")"), paste0("SizeSel_P_5_Fishery",f_i,"(",f_i,")"), paste0("SizeSel_P_6_Fishery",f_i,"(",f_i,")")) } size_selex_parms_rownames<-unlist(size_selex_parms_rownames) rownames(ctl.file$size_selex_parms)<-size_selex_parms_rownames } #Remove surveys from initial F lines and add q and xtra variance lines if(!is.null(rv.Index$data)) { if(data.file$Nfleets>catch.fleets){ctl.file$init_F<-ctl.file$init_F[-survey.fleets,]} q.setup.names<-c("fleet","link","link_info","extra_se","biasadj", "float") q.setup.lines<-data.frame(t(c(unique(rv.Index$data[,3])[1],1,0,0,0,1))) if(input$Indexvar){q.setup.lines<-data.frame(t(c(unique(rv.Index$data[,3])[1],1,0,1,0,1)))} qnames<-c("LO","HI","INIT","PRIOR","PR_SD","PR_type","PHASE","env_var&link","dev_link","dev_minyr","dev_maxyr","dev_PH","Block","Block_Fxn") q.lines<-data.frame(t(c(-15,15,1,0,1,0,-1,rep(0,7)))) if(input$Indexvar){q.lines<-data.frame(rbind(c(-15,15,1,0,1,0,-1,rep(0,7)),c(0,5,0,0,99,0,3,0,0,0,0,0,0,0)))} if(length(unique(rv.Index$data[,3]))>1) { for(q in 2:length(unique(rv.Index$data[,3]))) { if(!input$Indexvar) { q.setup.lines<-rbind(q.setup.lines,c(unique(rv.Index$data[,3])[q],1,0,0,0,1)) q.lines<-rbind(q.lines,c(-15,15,1,0,1,0,-1,rep(0,7))) } if(input$Indexvar) { q.setup.lines<-rbind(q.setup.lines,c(unique(rv.Index$data[,3])[q],1,0,1,0,1)) #if(unique(rv.Index$data[,6])[q]!="RSS"){q.setup.lines<-rbind(q.setup.lines,c(unique(rv.Index$data[,3])[q],1,0,1,0,1))} #if(unique(rv.Index$data[,6])[q]=="RSS"){q.setup.lines<-rbind(q.setup.lines,c(unique(rv.Index$data[,3])[q],1,0,0,0,1))} if(unique(rv.Index$data[,6])[q]!="RSS"){q.lines<-rbind(q.lines,data.frame(rbind(c(-15,15,1,0,1,0,-1,rep(0,7)),c(0,5,0,0,99,0,3,0,0,0,0,0,0,0))))} if(unique(rv.Index$data[,6])[q]=="RSS"){q.lines<-rbind(q.lines,data.frame(rbind(c(-15,15,1,0,1,0,-1,rep(0,7)),c(0,5,0,0,99,0,-3,0,0,0,0,0,0,0))))} } } } names(q.setup.lines)<-q.setup.names rownames(q.setup.lines)<-unique(rv.Index$data[,6]) ctl.file$Q_options<-q.setup.lines names(q.lines)<-qnames if(!input$Indexvar){rownames(q.lines)<-paste0("LnQ_base_",unique(rv.Index$data[,6]),"(",unique(rv.Index$data[,3]),")")} #rnames.temp<-c(paste0("LnQ_base_",unique(rv.Index$data[,5]),"(",unique(rv.Index$data[,2]),")"),paste0("Q_extraSD_",unique(rv.Index$data[,5]),"(",unique(rv.Index$data[,2]),")")) #rnames.temp[1:length(rnames.temp)%%2 != 0] if(input$Indexvar) { qnames.temp1<-paste0("LnQ_base_",unique(rv.Index$data[,6]),"(",unique(rv.Index$data[,3]),")") qnames.temp2<-paste0("Q_extraSD_",unique(rv.Index$data[,6]),"(",unique(rv.Index$data[,3]),")") qnames.temp<-as.vector(rbind(qnames.temp1,qnames.temp2)) # if(length(rnames.temp1)>1) # { # for(xx in 2:length(rnames.temp1)) # { # rnames.temp<-c(rnames.temp1[x],rnames.temp2[x]) # } # } rownames(q.lines)<-qnames.temp } ctl.file$Q_parms<-q.lines if(data.file$Nfleets>catch.fleets) { if(any(fleet.survey.names=="RSS")) { RSS.index<-grep("RSS",fleet.survey.names) #ctl.file$Q_parms<-ctl.file$Q_parms ctl.file$size_selex_types[RSS.index,1]<-0 #Rename RSS selectivity types ctl.file$size_selex_parms<-ctl.file$size_selex_parms[-c((RSS.index*6-5):(RSS.index*6)),] #Remove selectivity related to RSS } } } # if(input$Data_wt=="Dirichlet") # { # Dirichlet.fleets<-c(unique(data.file$lencomp[,3]),(unique(data.file$agecomp[,3])+data.file$Nfleets)) # # if(Dirichlet.fleets>1) # # { # # for(i in 1:length(Dirichlet.fleets)){ctl.file$dirichlet_parms<-rbind(ctl.file$dirichlet_parms,ctl.file$dirichlet_parms[1,])} # # } # ctl.file$dirichlet_parms[Dirichlet.fleets,3:4]<-0.5 # ctl.file$dirichlet_parms[Dirichlet.fleets,7]<-2 # } #Change data weights # Lt_dat_wts<-as.numeric(trimws(unlist(strsplit(input$Lt_datawts,",")))) # ctl.file$Variance_adjustments[1,]<-Lt_dat_wts #Change likelihood component weight of catch if (is.null(rv.Ct$data)) { lts.lambdas<-ctl.file$lambdas[1,] ct.lambdas<-ctl.file$lambdas[2,] init.ct.lambdas<-ctl.file$lambdas[3,] if(data.file$Nfleets>1) { for(i_lam in 2:data.file$Nfleets) { lts.lambdas_temp<-ctl.file$lambdas[1,] ct.lambdas_temp<-ct.lambdas[1,] init.ct.lambdas_temp<-init.ct.lambdas[1,] lts.lambdas_temp[1,2]<-ct.lambdas_temp[1,2]<-init.ct.lambdas_temp[1,2]<-i_lam lts.lambdas<-rbind(lts.lambdas,lts.lambdas_temp) ct.lambdas<-rbind(ct.lambdas,ct.lambdas_temp) init.ct.lambdas<-rbind(init.ct.lambdas,init.ct.lambdas_temp) } } if(input$Ct_F_LO_select=="Estimate F") { if(data.file$Nfleets>1) { lt.lam.in<-as.numeric(trimws(unlist(strsplit(input$Wt_fleet_Ct,","))))/sum(as.numeric(trimws(unlist(strsplit(input$Wt_fleet_Ct,","))))) lt.lam<-lt.lam.in/max(lt.lam.in) lts.lambdas[,4]<-lt.lam } if(data.file$Nfleets==1) { lts.lambdas[,4]<-1 } } rownames(lts.lambdas)<-paste0("length_Fishery",c(1:data.file$Nfleets),"_sizefreq_method_1_Phz1") ct.lambdas[,4]<-0 rownames(ct.lambdas)<-paste0("catch_Fishery",c(1:data.file$Nfleets),"_Phz1") init.ct.lambdas[,4]<-0 rownames(init.ct.lambdas)<-paste0("init_equ_catch_Fishery",c(1:data.file$Nfleets),"_lambda_for_init_equ_catch_can_only_enable/disable for_all_fleets_Phz1") ctl.file$lambdas<-rbind(lts.lambdas,ct.lambdas,init.ct.lambdas) ctl.file$N_lambdas<-nrow(ctl.file$lambdas) # ctl.file$lambdas[1,4]<-0 } if(!is.null(rv.Ct$data)) { ct.lambdas<-ctl.file$lambdas[2,] init.ct.lambdas<-ctl.file$lambdas[3,] if(data.file$Nfleets>1) { for(i_lam in 2:data.file$Nfleets) { ct.lambdas_temp<-ct.lambdas[1,] init.ct.lambdas_temp<-init.ct.lambdas[1,] ct.lambdas_temp[1,2]<-init.ct.lambdas_temp[1,2]<-i_lam ct.lambdas<-rbind(ct.lambdas,ct.lambdas_temp) init.ct.lambdas<-rbind(init.ct.lambdas,init.ct.lambdas_temp) } } ct.lambdas[,4]<-1 rownames(ct.lambdas)<-paste0("catch_Fishery",c(1:data.file$Nfleets),"_Phz1") init.ct.lambdas[,4]<-0 ctl.file$lambdas<-rbind(ct.lambdas,init.ct.lambdas) rownames(init.ct.lambdas)<-paste0("init_equ_catch_Fishery",c(1:data.file$Nfleets),"_lambda_for_init_equ_catch_can_only_enable/disable for_all_fleets_Phz1") ctl.file$N_lambdas<-data.file$Nfleets*2 #ctl.file$lambdas[1,4]<-1 # ctl.file$lambdas[2,4]<-0 ctl.file$init_F[,3]<-0.00000000000000000001 ctl.file$init_F[,7]<--1 } SS_writectl(ctl.file,paste0("Scenarios/",input$Scenario_name,"/controlfile.ctl"),overwrite=TRUE) } } } } ####################### END CTL FILE #################################### if(exists("checkmod")|input$user_model) { starter.file<-SS_readstarter(paste0("Scenarios/",input$Scenario_name,"/starter.ss")) #Use par file if(input$use_par) { starter.file$init_values_src<-1 } if(!input$use_par|is.null(input$use_par)) { starter.file$init_values_src<-0 } #Use datanew file if(input$use_datanew) { starter.file$datfile<-"data_echo.ss_new" } if(!input$use_datanew|is.null(input$use_datanew)) { if(!input$user_model|is.null(input$use_datanew)){starter.file$datfile<-"datafile.dat"} } #Use controlnew file if(input$use_controlnew) { starter.file$ctlfile<-"control.ss_new" } if(!input$use_controlnew|is.null(input$use_controlnew)) { if(!input$user_model|is.null(input$use_controlnew)){starter.file$ctlfile<-"controlfile.ctl"} } #Phase 0 if(input$use_phase0) { starter.file$last_estimation_phase<-0 } if(!input$use_par|is.null(input$use_par)) { starter.file$last_estimation_phase<-6 } #Jitter selection starter.file$jitter_fraction<-0 # if(input$jitter_choice) # { # starter.file$jitter_fraction<-input$jitter_fraction # starter.file$init_values_src<-0 # } SS_writestarter(starter.file,paste0("Scenarios/",input$Scenario_name),overwrite=TRUE) #Forecast file modfications #Reference points if(!input$use_forecastnew) { forecast.file<-SS_readforecast(paste0("Scenarios/",input$Scenario_name,"/forecast.ss")) if(input$RP_choices){ forecast.file$SPRtarget<-input$SPR_target forecast.file$Btarget<-input$B_target CR_choices<-c("1: Catch fxn of SSB, buffer on F", "2: F fxn of SSB, buffer on F", "3: Catch fxn of SSB, buffer on catch", "4: F fxn of SSB, buffer on catch") CR_choices_num.vec<-c(1:4) forecast.file$ControlRuleMethod<-CR_choices_num.vec[CR_choices==input$CR_Ct_F] forecast.file$SBforconstantF<-input$slope_hi forecast.file$BfornoF<-input$slope_low } if(input$Forecast_choice) { forecast.file$Nforecastyrs<-input$forecast_num buffer.in<-as.numeric(trimws(unlist(strsplit(input$forecast_buffer,",")))) if(length(buffer.in)==1){forecast.file$Flimitfraction<-buffer.in} if(length(buffer.in)>1) { forecast.file$Flimitfraction<--1 buffer.datafr<-data.frame(Year=c((data.file$endyr+1):(data.file$endyr+input$forecast_num)),Fraction=buffer.in) #rownames(buffer.datafr)<-paste0("#_Flimitfraction_m",1:input$forecast_num) forecast.file$Flimitfraction_m<-buffer.datafr } } SS_writeforecast(forecast.file,paste0("Scenarios/",input$Scenario_name),overwrite=TRUE) } if(input$use_forecastnew) { forecast.file<-SS_readforecast(paste0("Scenarios/",input$Scenario_name,"/forecast.ss_new")) SS_writeforecast(forecast.file,paste0("Scenarios/",input$Scenario_name),overwrite=TRUE) } ######## #Run Stock Synthesis and plot output show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[2],text="Model run in progress") if(input$Data_wt=="None"){DataWT_opt<-"none"} if(input$Data_wt=="Dirichlet"){DataWT_opt<-"DM"} if(input$Data_wt=="Francis"){DataWT_opt<-"Francis"} if(input$Data_wt=="McAllister-Ianelli"){DataWT_opt<-"MI"} if(is.null(input$no_hess)){ cmd.in<-"" if(input$add_comms==TRUE){cmd.in=paste0(" ",input$add_comms_in)} RUN.SS(paste0("Scenarios/",input$Scenario_name),ss.cmd=cmd.in,OS.in=input$OS_choice) if(!file.exists(paste0("Scenarios/",input$Scenario_name,"data_echo.ss_new"))) { cmd.in<-" -nohess" if(input$add_comms==TRUE){cmd.in=paste0(" -nohess ",input$add_comms_in)} RUN.SS(paste0("Scenarios/",input$Scenario_name),ss.cmd=cmd.in,OS.in=input$OS_choice) } } if(!is.null(input$no_hess)) { if(input$no_hess) { cmd.in<-" -nohess" if(input$add_comms==TRUE){cmd.in=paste0(" -nohess ",input$add_comms_in)} RUN.SS(paste0("Scenarios/",input$Scenario_name),ss.cmd=cmd.in,OS.in=input$OS_choice) } if(!input$no_hess) { cmd.in<-"" if(input$add_comms==TRUE){cmd.in=paste0(" ",input$add_comms_in)} RUN.SS(paste0("Scenarios/",input$Scenario_name),ss.cmd=cmd.in,OS.in=input$OS_choice) } } if(file.exists(paste0("Scenarios/",input$Scenario_name,"/data_echo.ss_new"))) { Model.output<-try(SS_output(paste0("Scenarios/",input$Scenario_name),verbose=FALSE,printstats = FALSE)) if(class(Model.output)=="try-error") { Model.output<-SS_output(paste0("Scenarios/",input$Scenario_name),verbose=FALSE,printstats = FALSE,covar=FALSE) } if(input$Data_wt!="None") { if(Model.output$inputs$covar==TRUE) { tune_comps(Model.output,dir=paste0("Scenarios/",input$Scenario_name),niters_tuning=3,option=DataWT_opt,show_in_console = TRUE,verbose=FALSE) Model.output<-try(SS_output(paste0("Scenarios/",input$Scenario_name),verbose=FALSE,printstats = FALSE)) } if(Model.output$inputs$covar==FALSE) { tune_comps(Model.output,dir=paste0("Scenarios/",input$Scenario_name),option=DataWT_opt,niters_tuning=3,extras = " -nohess",show_in_console = TRUE,verbose=FALSE) Model.output<-SS_output(paste0("Scenarios/",input$Scenario_name),verbose=FALSE,printstats = FALSE,covar=FALSE) } } data.file<-SS_readdat(paste0("Scenarios/",input$Scenario_name,"/data_echo.ss_new")) #No plots or figures if(is.null(input$no_plots_tables)) { show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[4],text="Making plots") SS_plots(Model.output,maxyr=data.file$endyr+1,verbose=FALSE) } if(is.null(input$no_tables)) { show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[5],text="Making tables") try(SSexecutivesummary(Model.output)) } if(!is.null(input$no_plots_tables)){ if(input$no_plots_tables==FALSE) { #Make SS plots show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[4],text="Making plots") SS_plots(Model.output,maxyr=data.file$endyr+1,verbose=FALSE) } } if(!is.null(input$no_tables)){ if(input$no_tables==FALSE) { #Make SS tables show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[5],text="Making tables") try(SSexecutivesummary(Model.output)) } } #Run multiple jitters if(input$jitter_choice) { if(input$Njitter>0) { show_modal_spinner(spin="flower",color=wes_palettes$Moonrise1[1],text="Run jitters") #file.copy(paste0("Scenarios/",input$Scenario_name,"/ss.exe"),paste0("Scenarios/",input$Scenario_name,"/ss_copy.exe"),overwrite = FALSE) jits<-jitter( dir=paste0(getwd(),"/Scenarios/",input$Scenario_name), Njitter=input$Njitter, printlikes = TRUE, jitter_fraction=input$jitter_fraction, init_values_src=0, verbose=FALSE, extras = "-nohess" ) profilemodels <- SSgetoutput(dirvec=paste0("Scenarios/",input$Scenario_name), keyvec=0:input$Njitter, getcovar=FALSE) profilesummary <- SSsummarize(profilemodels) minlikes<-profilesummary$likelihoods[1,-length(profilesummary$likelihoods)]==min(profilesummary$likelihoods[1,-length(profilesummary$likelihoods)]) #Find best fit model index.minlikes<-c(1:length(minlikes))[minlikes] jitter.likes<-profilesummary$likelihoods[1,-length(profilesummary$likelihoods)] ref.like<-min(jitter.likes,na.rm = TRUE) #Make plot and save to folder main.dir<-getwd() if(!file.exists(paste0("Scenarios/",input$Scenario_name,"/Jitter Results"))) { dir.create(paste0("Scenarios/",input$Scenario_name,"/Jitter Results")) } setwd(paste0("Scenarios/",input$Scenario_name,"/Jitter Results")) png("jitterplot.png") jitterplot<-plot(c(1:length(jitter.likes)),jitter.likes,type="p",col="black",bg="blue",pch=21,xlab="Jitter run",ylab="-log likelihood value",cex=1.25) points(c(1:length(jitter.likes))[jitter.likes>ref.like],jitter.likes[jitter.likes>ref.like],type="p",col="black",bg="red",pch=21,cex=1.25) abline(h=ref.like) # likebc<-round((length(jitter.likes[ref.like==jitter.likes])/(input$Njitter+1))*100,0) # likelessbc<-round((length(jitter.likes[ref.like>jitter.likes])/(input$Njitter+1))*100,0) # like10<-round((length(jitter.likes[(ref.like+10)<jitter.likes])/(input$Njitter+1))*100,0) # like2<-round(((length(jitter.likes[(ref.like+2)>jitter.likes])-(length(jitter.likes[ref.like==jitter.likes])))/(input$Njitter+1))*100,0) # like_2_10<-round(100-(likebc+like10+like2),0) # legend("topright",c(paste(" ",likelessbc,"% < BC",sep=""),paste(likebc,"% = BC",sep=""),paste(like2,"% < BC+2",sep=""),paste(like_2_10,"% > BC+2 & < BC+10",sep=""),paste(like10,"% > BC+10",sep="")),bty="n") dev.off() save(profilesummary,file=paste0("jitter_summary.DMP")) SSplotComparisons(profilesummary, legendlabels = c(0:input$Njitter), ylimAdj = 1.30, subplot = c(1), new = FALSE,print=TRUE,plotdir=getwd()) SSplotComparisons(profilesummary, legendlabels = c(0:input$Njitter), ylimAdj = 1.30, subplot = c(3), new = FALSE,print=TRUE,plotdir=getwd()) output$Jitterplot<-renderPlot({ # if(input$Njitter==1){return(NULL)} # if(input$Njitter>1) # { #jitter.likes<-profilesummary$likelihoods[1,-length(profilesummary$likelihoods)] #ref.like<-min(jitter.likes) jitterplot<-plot(c(1:length(jitter.likes)),jitter.likes,type="p",col="black",bg="blue",pch=21,xlab="Jitter run",ylab="-log likelihood value",cex=1.25) points(c(1:length(jitter.likes))[jitter.likes>ref.like],jitter.likes[jitter.likes>ref.like],type="p",col="black",bg="red",pch=21,cex=1.25) abline(h=ref.like) # likebc<-round((length(jitter.likes[ref.like==jitter.likes])/(input$Njitter+1))*100,0) # likelessbc<-round((length(jitter.likes[ref.like>jitter.likes])/(input$Njitter+1))*100,0) # like10<-round((length(jitter.likes[(ref.like+10)<jitter.likes])/(input$Njitter+1))*100,0) # like2<-round(((length(jitter.likes[(ref.like+2)>jitter.likes])-(length(jitter.likes[ref.like==jitter.likes])))/(input$Njitter+1))*100,0) # like_2_10<-round(100-(likebc+like10+like2),0) # legend("topright",c(paste(" ",likelessbc,"% < BC",sep=""),paste(likebc,"% = BC",sep=""),paste(like2,"% < BC+2",sep=""),paste(like_2_10,"% > BC+2 & < BC+10",sep=""),paste(like10,"% > BC+10",sep="")),bty="n") # } }) #Spawning output comp output$Jittercompplot1<-renderPlot({ SSplotComparisons(profilesummary, legendlabels = c(0:input$Njitter), ylimAdj = 1.30, subplot = c(1), new = FALSE) }) #Relative stock status comp output$Jittercompplot2<-renderPlot({ SSplotComparisons(profilesummary, legendlabels = c(0:input$Njitter), ylimAdj = 1.30, subplot = c(3), new = FALSE) }) #R-run to get new best fit model show_modal_spinner(spin="flower",color=wes_palettes$Moonrise1[2],text="Re-run best model post-jitters") file.copy(paste0(main.dir,"/Scenarios/",input$Scenario_name,"/ss.par_",(index.minlikes[1]-1),".sso"),paste0(main.dir,"/Scenarios/",input$Scenario_name,"/ss.par"),overwrite = TRUE) #file.rename(paste0("Scenarios/",input$Scenario_name,"/ss_copy.exe"),paste0("Scenarios/",input$Scenario_name,"/ss.exe"),overwrite = FALSE) starter.file$init_values_src<-1 starter.file$jitter_fraction<-0 SS_writestarter(starter.file,paste0(main.dir,"/Scenarios/",input$Scenario_name),overwrite=TRUE) RUN.SS(paste0(main.dir,"/Scenarios/",input$Scenario_name),ss.cmd="",OS.in=input$OS_choice) Model.output<-try(SS_output(paste0(main.dir,"/Scenarios/",input$Scenario_name),verbose=FALSE,printstats = FALSE)) if(class(Model.output)=="try-error") { Model.output<-SS_output(paste0(main.dir,"/Scenarios/",input$Scenario_name),verbose=FALSE,printstats = FALSE,covar=FALSE) } show_modal_spinner(spin="flower",color=wes_palettes$Moonrise1[3],text="Making plots") SS_plots(Model.output,maxyr=data.file$endyr+1,verbose=FALSE) show_modal_spinner(spin="flower",color=wes_palettes$Moonrise1[4],text="Making tables") try(SSexecutivesummary(Model.output)) } setwd(main.dir) } #Add retro runs # if(input$Retro_choice){ # mydir<-paste0(getwd(),"/Scenarios/") # model_settings = get_settings(settings = list(base_name = input$Scenario_name, # run = "retro", # retro_yrs = input$first_retro_year:input$final_retro_year)) # # tryCatch({ # run_diagnostics(mydir = mydir, model_settings = model_settings) # # }, # # warning = function(warn){ # # showNotification(paste0(warn), type = 'warning') # # }, # # error = function(err){ # # showNotification(paste0(err), type = 'err') # # }) # } #Convergence diagnostics output$converge.grad <- renderText({ max.grad<-paste0("Maximum gradient: ",Model.output$maximum_gradient_component) }) output$converge.covar <- renderText({ covar<-paste0("Was covariance file created? ",Model.output$inputs$covar) }) output$converge.dec <- renderText({ if(Model.output$maximum_gradient_component<0.1 & Model.output$inputs$covar==TRUE) {converge.dec<-"Model appears converged. Please check outputs for nonsense."} else{converge.dec<-"Model may not have converged or inputs are missing. Please use the Jitter option or check/change starting values before re-running model."} }) #Relative biomass output$SSout_relSB_table <- renderTable({ SB_indices<-c(which(rownames(Model.output$derived_quants)==paste0("Bratio_",input$endyr)), which(rownames(Model.output$derived_quants)=="B_MSY/SSB_unfished"), which(rownames(Model.output$derived_quants)==paste0("SPRratio_",input$endyr)), which(rownames(Model.output$derived_quants)==paste0("OFLCatch_",(input$endyr+1))), which(rownames(Model.output$derived_quants)==paste0("ForeCatch_",(input$endyr+1))) ) Output_relSB_table<-data.frame(Model.output$derived_quants[SB_indices,1:3]) # Label=c(paste0("SO",input$endyr+1,"/SO_0"), # "SO_MSY/SO_0", # paste0("SPR",input$endyr+1), # paste0("OFL",(input$endyr+1)), # paste0("ABC",(input$endyr+1)) # )) Output_relSB_table[,1]<-c(paste0("SO",input$endyr,"/SO_0"), "SO_MSY/SO_0", paste0("1-SPR",input$endyr), paste0("OFL",(input$endyr+1)), paste0("ABC",(input$endyr+1)) ) Output_relSB_table # rownames=c(expression(SO[input$endyr]/SO[0]), # expression(SO[MSY]/SO[0]), # expression(SPR[input$endyr]), # expression(OFL[input$endyr]), # expression(ABC[input$endyr]) # )) # Output_relSB_table[,1]<-c(expression('B',[input$endyr],'/B',[0]), # expression('B'[MSY]/'B'[0]), # expression('SPR'[input$endyr]), # expression('OFL'[input$endyr]), # expression('ABC'[input$endyr]) # ) }) #F estimate and relative to FMSY and proxies output$SSout_F_table <- renderTable({ F_indices<-c(which(rownames(Model.output$derived_quants)==paste0("F_",input$endyr)), which(rownames(Model.output$derived_quants)=="annF_Btgt"), which(rownames(Model.output$derived_quants)=="annF_SPR"), which(rownames(Model.output$derived_quants)=="annF_MSY") ) F_values<-Model.output$derived_quants[F_indices,1:3] }) #Time series output output$SSout_table <- renderTable({ # Output_table<-Model.output$sprseries[-nrow(Model.output$sprseries),c(1,5,6,7,8,9,11,12,13,25,37)] Output_table<-Model.output$sprseries[,c(1,5,6,7,8,9,11,12,13,25,37)] }) #Paramters output$Parameters_table <- renderTable({ cbind(rownames(Model.output$estimated_non_dev_parameters),Model.output$estimated_non_dev_parameters) }) } if(!file.exists(paste0("Scenarios/",input$Scenario_name,"/data_echo.ss_new"))) { sendSweetAlert( session = session, title = "Model Warning", text = "Model did not run or Hessian did not invert. Double check data files for errors and each input for missing values (or for 0 SD for lognormal priors) and/or re-run model using a different model specification (e.g., starting values).", type = "warning") } remove_modal_spinner() observeEvent(exists("Model.output"), { updateTabsetPanel(session, "tabs", selected = '2') }) } }) ############################################################### ### Likelihood profiles, Sensitivities, and Ensemble models ### ############################################################### roots <- getVolumes()() # pathModelout <- reactive({ shinyDirChoose(input, "Modelout_dir", roots= roots,session=session, filetypes=c('', 'txt')) return(parseDirPath(roots, input$Modelout_dir)) }) observeEvent(as.numeric(input$tabs)==2,{ #observeEvent(exists("Model.output"),{ pathModelout.dir <-pathModelout() if(!identical(pathModelout.dir, character(0))) { #dir.create(paste0(pathModelout.dir,"/Scenarios")) file.copy(paste0("Scenarios/",input$Scenario_name), pathModelout.dir,recursive=TRUE,overwrite=TRUE) if(input$Retro_choice){file.copy(paste0("Scenarios/",input$Scenario_name,"_retro"), pathModelout.dir,recursive=TRUE,overwrite=TRUE)} } }) ######################## ### Model efficiency ### ######################## shinyDirChoose(input,"ModEff_dir", roots=roots,session=session, filetypes=c('', 'txt')) pathRetro <- reactive({ return(parseDirPath(roots, input$ModEff_dir)) }) # if(exists("ModEff_dir")){print(ModEff_dir)} # observeEvent(as.numeric(input$tabs)==12,{ # output$ModEff_model_pick<-renderUI({ # pickerInput( # inputId = "myModEff", # label = "Choose model to evaluate", # choices = list.files(pathModEff()), # options = list( # `actions-box` = TRUE, # size = 12, # `selected-text-format` = "count > 3" # ), # multiple = TRUE # ) # }) # }) observeEvent(req(input$run_adnuts),{ modeff.mod.dir<-parseDirPath(roots, input$ModEff_dir) #pathModEff() modeff.dir<-dirname(modeff.mod.dir) modeff.name<-paste0(basename(modeff.mod.dir),"_",input$ModEff_choice) if(dir.exists(file.path(modeff.dir,modeff.name))==FALSE) { dir.create(file.path(modeff.dir,modeff.name)) file.copy(list.files(modeff.mod.dir,full.names=TRUE),to=file.path(modeff.dir,modeff.name),recursive=TRUE,overwrite=TRUE) } #optimize model if(input$Opt_mod==TRUE) { show_modal_spinner(spin="flower",color=wes_palettes$Rushmore[1],text=paste0("Run initial optimization?")) RUN.SS(file.path(modeff.dir,modeff.name),ss.cmd="/ss -nox -mcmc 100 -hbf",OS.in=input$OS_choice) remove_modal_spinner() } #Set mcmc model show_modal_spinner(spin="flower",color=wes_palettes$Rushmore[1],text=paste0("Run ",input$ModEff_choice," model")) chains <- parallel::detectCores()-1 m<-"ss" p<-file.path(modeff.dir,modeff.name) #Run MCMC model with either rwm or nuts if(input$ModEff_choice=="RWM") { fit_model<- sample_rwm(model=m, path=p, iter=input$iter, warmup=0.25*input$iter, chains=chains, thin=input$thin, duration=NULL) } if (input$ModEff_choice=="Nuts") { fit_model <- sample_nuts(model=m, path=p, iter=input$iter, warmup=0.25*input$iter, chains=4, cores=4,control=list(metric='mle', max_treedepth=5),mceval=TRUE) } fit.mod.summary<-utils::capture.output(summary(fit_model), file=NULL) output$fit.model.summary <- renderText({ #paste0(fit.mod.summary[1],fit.mod.summary[2],fit.mod.summary[3]) fit.mod.summary }) parmax<-10 if(length(fit_model$par_names)<10){parmax<-length(fit_model$par_names)} png(paste0(p,"/pairs_plot_slow.png"),width=600, height=350) pairs_admb(fit_model, pars=1:parmax, order='slow') dev.off() png(paste0(p,"/pairs_plot_fast.png"),width=600, height=350) pairs_admb(fit_model, pars=1:parmax, order='fast') dev.off() output$pairs_slow <- renderImage({ #region image.path1<-normalizePath(paste0(p,"/pairs_plot_fast.png"),mustWork=FALSE) return(list( src = paste0(p,"/pairs_plot_slow.png"), contentType = "image/png", # width = 400, # height = 300, style='height:60vh')) },deleteFile=FALSE) output$pairs_fast <- renderImage({ #region image.path1<-normalizePath(paste0(p,"/pairs_plot_fast.png"),mustWork=FALSE) return(list( src = paste0(p,"/pairs_plot_fast.png"), contentType = "image/png", # width = 400, # height = 300, style='height:60vh')) },deleteFile=FALSE) save(fit_model,file=paste0(p,"/fit_model.RData")) remove_modal_spinner() #if(input$run_stanout==TRUE){launch_shinyadmb(fit_model)} }) ########################### ### Likelihood profiles ### ########################### pathLP <- reactive({ shinyDirChoose(input, "LP_dir", roots=roots,session=session, filetypes=c('', 'txt')) return(parseDirPath(roots, input$LP_dir)) }) observeEvent(as.numeric(input$tabs)==4,{ pathLP.dir <-pathLP() output$LikeProf_model_picks<-renderUI({ pickerInput( inputId = "myPicker_LP", label = "Choose parameters to profile over", choices = c("Steepness","lnR0","Natural mortality female","Linf female","k female", "CV@Lt young female","CV@Lt old female","Natural mortality male","Linf male","k male", "CV@Lt young male", "CV@Lt old male"), options = list( `actions-box` = TRUE, size = 12, `selected-text-format` = "count > 3" ), multiple = TRUE ) }) }) observeEvent(input$run_Profiles,{ show_modal_spinner(spin="flower",color=wes_palettes$Darjeeling1[1],text="Profiles running") starter.file<-SS_readstarter(paste0(pathLP(),"/starter.ss")) #data.file<-SS_readdat(paste0(pathLP(),"/data_echo.ss_new")) #ctl.file<-SS_readctl(paste0(pathLP(),"/control.ss_new"),use_datlist = TRUE, datlist=data.file) rep.parms<-SS_output(pathLP(),covar=FALSE,verbose=FALSE) rep.parms.names<-rownames(rep.parms$parameters) # SS_parm_names<-c("SR_BH_steep", "SR_LN(R0)","NatM_p_1_Fem_GP_1","L_at_Amax_Fem_GP_1","VonBert_K_Fem_GP_1","CV_young_Fem_GP_1","CV_old_Fem_GP_1","NatM_p_1_Mal_GP_1","L_at_Amax_Mal_GP_1","VonBert_K_Mal_GP_1","CV_young_Mal_GP_1","CV_old_Mal_GP_1") #SS_parm_names<-c(rownames(ctl.file$SR_parms)[2], rownames(ctl.file$SR_parms)[1],rownames(ctl.file$MG_parms)[1],rownames(ctl.file$MG_parms)[3],rownames(ctl.file$MG_parms)[4],rownames(ctl.file$MG_parms)[5],rownames(ctl.file$MG_parms)[6],rownames(ctl.file$MG_parms)[13],rownames(ctl.file$MG_parms)[15],rownames(ctl.file$MG_parms)[16],rownames(ctl.file$MG_parms)[17],rownames(ctl.file$MG_parms)[18]) SS_parm_names<-c(rep.parms.names[24], rep.parms.names[23],rep.parms.names[1],rep.parms.names[3],rep.parms.names[4],rep.parms.names[5],rep.parms.names[6],rep.parms.names[13],rep.parms.names[15],rep.parms.names[16],rep.parms.names[17],rep.parms.names[18]) parmnames<-input$myPicker_LP parmnames_vec<-c("Steepness","lnR0","Natural mortality female","Linf female","k female", "CV@Lt young female","CV@Lt old female","Natural mortality male","Linf male","k male", "CV@Lt young male", "CV@Lt old male") prof_parms_names<-SS_parm_names[parmnames_vec%in%parmnames] prior_like<-starter.file$prior_like use_prior_like_in<-rep(0,length(prof_parms_names)) if(prior_like==1){use_prior_like_in = rep(1,length(prof_parms_names))} mydir = dirname(pathLP()) get = get_settings_profile( parameters = prof_parms_names, low = as.numeric(trimws(unlist(strsplit(input$Prof_Low_val,",")))), high = as.numeric(trimws(unlist(strsplit(input$Prof_Hi_val,",")))), step_size = as.numeric(trimws(unlist(strsplit(input$Prof_step,",")))), param_space = rep('real',length(as.numeric(trimws(unlist(strsplit(input$Prof_Low_val,",")))))), use_prior_like = use_prior_like_in ) model_settings = get_settings(settings = list(base_name = basename(pathLP()), run = "profile", profile_details = get)) try(run_diagnostics(mydir = mydir, model_settings = model_settings)) file.remove(paste0(dirname(mydir),"/run_diag_warning.txt")) output$LikeProf_plot_modout <- renderImage({ image.path1<-normalizePath(file.path(paste0(pathLP(),"_profile_",prof_parms_names[1],"/parameter_panel_",prof_parms_names[1],".png")),mustWork=FALSE) return(list( src = image.path1, contentType = "image/png", # width = 400, # height = 300, style='height:60vh')) },deleteFile=FALSE) output$LikeProf_plot_Piner <- renderImage({ image.path2<-normalizePath(file.path(paste0(pathLP(),"_profile_",prof_parms_names[1],"/piner_panel_",prof_parms_names[1],".png")),mustWork=FALSE) return(list( src = image.path2, contentType = "image/png", # width = 400, # height = 300, style='height:60vh')) },deleteFile=FALSE) output$LikeProf_plot_SO <- renderImage({ image.path3<-normalizePath(file.path(paste0(pathLP(),"_profile_",prof_parms_names[1],"/",prof_parms_names[1],"_trajectories_compare1_spawnbio.png")),mustWork=FALSE) return(list( src = image.path3, contentType = "image/png", # width = 400, # height = 300, style='height:60vh')) },deleteFile=FALSE) output$LikeProf_plot_SOt_SO0 <- renderImage({ image.path4<-normalizePath(file.path(paste0(pathLP(),"_profile_",prof_parms_names[1],"/",prof_parms_names[1],"_trajectories_compare3_Bratio.png")),mustWork=FALSE) return(list( src = image.path4, contentType = "image/png", # width = 400, # height = 300, style='height:60vh')) },deleteFile=FALSE) remove_modal_spinner() }) observeEvent(input$run_MultiProfiles,{ show_modal_spinner(spin="flower",color=wes_palettes$Darjeeling1[2],text="Multi-profiles running") refdir<-pathLP() mydir <- dirname(refdir) #Read in reference model ref.model<-SS_output(refdir) #Read in parameter files par.df <- fread(input$file_multi_profile$datapath,check.names=FALSE,data.table=FALSE) L <- readLines(input$file_multi_profile$datapath, n = 1) if(grepl(";", L)) {par.df <- read.csv2(input$file_multi_profile$datapath,check.names=FALSE)} SS_parm_names<-rownames(ref.model$parameters)[c(23:24,1,3,4:6,13,15:18)] parmnames_vec<-c("Steepness","lnR0","Natural mortality female","Linf female","k female", "CV@Lt young female","CV@Lt old female","Natural mortality male","Linf male","k male", "CV@Lt young male", "CV@Lt old male") parmnames<-colnames(par.df) prof_parms_names<-SS_parm_names[parmnames_vec%in%parmnames] modelnames<-paste0(parmnames[1]," ",par.df[,1],";",parmnames[2]," ",par.df[,2]) #Make new folder #para = rownames(model_settings$profile_details)[aa] profile_dir <- paste0(refdir,"_profile_", paste(prof_parms_names,collapse="_")) dir.create(profile_dir, showWarnings = FALSE) if (length(list.files(profile_dir)) !=0) { remove <- list.files(profile_dir) file.remove(file.path(profile_dir, remove)) } all_files <- list.files(refdir) file.copy(from = file.path(refdir,all_files), to = profile_dir, overwrite = TRUE) #Set-up the starter file control file starter.file<-SS_readstarter(paste0(profile_dir,"/starter.ss")) starter.file$ctlfile<-"control_modified.ss" starter.file$init_values_src<-0 starter.file$prior_like<-1 SS_writestarter(starter.file,profile_dir,overwrite=TRUE) # low_in <- as.numeric(trimws(unlist(strsplit(input$Prof_Low_val,",")))), # high_in <- as.numeric(trimws(unlist(strsplit(input$Prof_Hi_val,",")))), # step_size_in <- as.numeric(trimws(unlist(strsplit(input$Prof_step,",")))) # par.df<-data.frame(mapply(function(x) seq(low[x],high[x],step_size[x]),x=1:length(low))) # colnames(par.df)<-prof_parms_names if(input$Hess_multi_like==FALSE) { profile <- profile( dir = profile_dir, # directory masterctlfile = "control.ss_new", newctlfile = "control_modified.ss", string = prof_parms_names, profilevec = par.df, extras = "-nohess", prior_check=TRUE, show_in_console = TRUE ) } if(input$Hess_multi_like==TRUE) { profile <- profile( dir = profile_dir, # directory masterctlfile = "control.ss_new", newctlfile = "control_modified.ss", string = prof_parms_names, profilevec = par.df, prior_check=TRUE, show_in_console = TRUE ) } # get model output profilemodels <- SSgetoutput(dirvec=profile_dir,keyvec=1:nrow(par.df), getcovar=FALSE) n <- length(profilemodels) profilesummary <- SSsummarize(profilemodels) try(SSplotComparisons(profilesummary, legendlabels = modelnames, ylimAdj = 1.30, new = FALSE,plot=FALSE,print=TRUE, legendloc = 'topleft',uncertainty=TRUE,plotdir=profile_dir,btarg=TRP_multi_like,minbthresh=LRP_multi_like)) save(profilesummary,file=paste0(profile_dir,"/multiprofile.DMP")) # add total likelihood (row 1) to table created above par.df$like <- as.numeric(profilesummary$likelihoods[1, 1:n]) par.df$likediff <- as.numeric(profilesummary$likelihoods[1, 1:n]-ref.model$likelihoods_used[1,1]) par.df$Bratio <- as.numeric(profilesummary$Bratio[grep((profilesummary$endyrs[1]),profilesummary$Bratio$Label), 1:n]) par.df$SB0 <- as.numeric(profilesummary$SpawnBio[1, 1:n]) par.df$SBcurrent <- as.numeric(profilesummary$SpawnBio[grep((profilesummary$endyrs[1]),profilesummary$SpawnBio$Label), 1:n]) SBcurrmax<-max(par.df$SBcurrent) colnames(par.df)<-c(parmnames,c("Likelihood","Likelihood_difference",paste0("SB",profilesummary$endyrs[1],"/SB0"),"SB0",paste0("SB",profilesummary$endyrs[1]))) save(par.df,file=paste0(profile_dir,"/multiprofilelikelihoods.DMP")) write.csv(par.df,file=paste0(profile_dir,"/multiprofilelikelihoods.csv")) #This reactive object is needed to get the plots to work plot.dat<-reactive({ plot.dat<-melt(par.df,id.vars=c( colnames(par.df)[1:2]),measure.vars=c("Likelihood_difference",paste0("SB",profilesummary$endyrs[1],"/SB0"),"SB0",paste0("SB",profilesummary$endyrs[1]))) plot.dat }) blank_data<- data.frame(variable = c("Likelihood_difference", "Likelihood_difference", paste0("SB",profilesummary$endyrs[1],"/SB0"), paste0("SB",profilesummary$endyrs[1],"/SB0"), "SB0", "SB0",paste0("SB",profilesummary$endyrs[1]),paste0("SB",profilesummary$endyrs[1])), x =min(par.df[,1]),y = c(min(par.df$Likelihood_difference),max(par.df$Likelihood_difference), 0, 1, 0, ceiling(max(par.df$SB0)),0,ceiling(SBcurrmax))) blank_data$variable<-factor(blank_data$variable,c("Likelihood_difference",paste0("SB",profilesummary$endyrs[1],"/SB0"),"SB0",paste0("SB",profilesummary$endyrs[1]))) refmodel.dat<-data.frame(variable = c("Likelihood_difference",paste0("SB",profilesummary$endyrs[1],"/SB0"),"SB0",paste0("SB",profilesummary$endyrs[1])), x =ref.model$parameters[grep(prof_parms_names[1],ref.model$parameters$Label),3],y = c(0,ref.model$sprseries$Deplete[grep((profilesummary$endyrs[1]),profilesummary$Bratio$Label)+1],ref.model$SBzero,ref.model$derived_quants[grep((profilesummary$endyrs[1]),profilesummary$SpawnBio$Label),2])) #multiprofplotfun<-function(plot.dat) #{ output$LikeProf_multiplot <- renderPlot({ multiplot<-ggplot(plot.dat(),aes(plot.dat()[,1],value))+ geom_line(lwd=1.25)+ facet_wrap(~variable,scales="free_y")+ geom_blank(data = blank_data, aes(x = x, y = y,z="variable"))+ ylab("Difference in -log likelihood")+ scale_x_continuous(name = paste(parmnames[1],"and",parmnames[2]), breaks =par.df[,1], labels = paste0(par.df[,1],"\n",par.df[,2]))+ geom_hline(data = data.frame(yint=c(-1.96,0,1.96,0.4,0.25),variable=c("Likelihood_difference","Likelihood_difference","Likelihood_difference",paste0("SB",profilesummary$endyrs[1],"/SB0"),paste0("SB",profilesummary$endyrs[1],"/SB0"))), aes(yintercept = yint), linetype = c("solid","dotted","solid","dotted","solid"),color=c("red","black","red","darkgreen","red"),lwd=1)+ geom_point(data=refmodel.dat,aes(x=x,y=y),color="blue",size=4)+ theme_bw() ggsave(paste0(profile_dir,"/","multilikelihood_profile.png"),width=10,height=10,units="in") multiplot }) #} # output$LikeProf_multiplot <- renderPlot({ # plotPNG(func=multiprofplotfun(plot.dat()),paste0(profile_dir,"/",paste(parmnames,collapse="_"),"_multilikelihood_profile.png")) # }) # plot.dat2<-reactive({ # plot.dat2<-melt(par.df,id.vars=c( colnames(par.df)[1:2]),measure.vars=c("Likelihood_difference",paste0("SB",profilesummary$endyrs[1]-1,"/SB0"),"SB0",paste0("SB",profilesummary$endyrs[1]-1))) # plot.dat2 # }) # png(file = paste0(profile_dir,"/","multilikelihood_profile.png"),width = 10, height = 10, units = "in", res = 300, pointsize = pt) # # multiplot # ggplot(plot.dat2(),aes(plot.dat2()[,1],value))+ # geom_line(lwd=1.25)+ # facet_wrap(~variable,scales="free_y")+ # #geom_blank(data = blank_data, aes(x = x, y = y,z="variable"))+ # ylab("Difference in -log likelihood")+ # #scale_x_continuous(name = paste(parmnames[1],"and",parmnames[2]), # # breaks =par.df[,1], # # labels = paste0(par.df[,1],"\n",par.df[,2]))+ # geom_hline(data = data.frame(yint=c(-1.96,0,1.96,0.4,0.25),variable=c("Likelihood_difference","Likelihood_difference","Likelihood_difference",paste0("SB",profilesummary$endyrs[1]-1,"/SB0"),paste0("SB",profilesummary$endyrs[1]-1,"/SB0"))), # aes(yintercept = yint), linetype = c("solid","dotted","solid","dotted","solid"),color=c("red","black","red","darkgreen","red"),lwd=1)+ # #geom_point(data=refmodel.dat,aes(x=x,y=y),color="blue",size=3)+ # theme_bw() # multiprofplot #dev.off() # png(file = paste0(profile_dir,"/",paste(parmnames,collapse="_"),"_multilikelihood_profile.png"),width = 10, height = 10, units = "in", res = 300, pointsize = pt) # output$LikeProf_multiplot <- renderImage({ # image.path<-normalizePath(file.path(paste0(profile_dir,paste0("\\",paste(parmnames,collapse="_"),"_multilikelihood_profile.png"))),mustWork=FALSE) # return(list( # src = image.path, # contentType = "image/png", # # width = 400, # # height = 300, # style='height:60vh')) # },deleteFile=FALSE) # reshape data frame into a matrix for use with contour # pngfun(wd = mydir, file = paste0("contour_profile.png"), h = 7,w = 12) # contour(x = as.numeric(rownames(like_matrix)), # y = as.numeric(colnames(like_matrix)), # z = like_matrix) # dev.off() # make contour plot # output$LikeProf_multi_contour <- renderPlot({ # like_matrix <- reshape2::acast(par.df, colnames(par.df)[1]~colnames()[2], value.var="like") # pngfun(wd = mydir, file = paste0("contour_profile.png"), h = 7,w = 12) # contour(x = as.numeric(rownames(like_matrix)), # y = as.numeric(colnames(like_matrix)), # z = like_matrix) # dev.off() # }) remove_modal_spinner() }) ################# ############################### ####### Retrospectives ######## ############################### shinyDirChoose(input,"Retro_dir", roots=roots,session=session, filetypes=c('', 'txt')) pathRetro <- reactive({ return(parseDirPath(roots, input$Retro_dir)) }) observeEvent(input$run_Retro_comps,{ #if(input$run_Retro_comps){ show_modal_spinner(spin="flower",color=wes_palettes$Royal1[1],text="Running retrospectives") mydir_in<-dirname(pathRetro()) scenario_in<-basename(pathRetro()) model_settings = get_settings(settings = list(base_name = scenario_in, run = "retro", retro_yrs = input$first_retro_year_in:input$final_retro_year_in)) run_diagnostics(mydir = mydir_in, model_settings = model_settings) # tryCatch({ # run_diagnostics(mydir = mydir_in, model_settings = model_settings) # }, # warning = function(warn){ # showNotification(paste0(warn), type = 'warning') # }, # error = function(err){ # showNotification(paste0(err), type = 'err') # }) #} output$Retro_comp_plotSB <- renderImage({ image.path<-normalizePath(file.path(paste0(pathRetro(),"_retro/compare2_spawnbio_uncertainty.png")),mustWork=FALSE) return(list( src = image.path, contentType = "image/png", # width = 400, # height = 300, style='height:60vh')) },deleteFile=FALSE) output$Retro_comp_plotBratio <- renderImage({ image.path<-normalizePath(file.path(paste0(pathRetro(),"_retro/compare4_Bratio_uncertainty.png")),mustWork=FALSE) return(list( src = image.path, contentType = "image/png", # width = 400, # height = 300, style='height:60vh')) },deleteFile=FALSE) remove_modal_spinner() }) ############################## ############################### ### Sensitivity comparisons ### ############################### pathSensi <- reactive({ shinyDirChoose(input, "Sensi_dir", roots=roots,session=session, filetypes=c('', 'txt')) return(parseDirPath(roots, input$Sensi_dir)) }) observeEvent(as.numeric(input$tabs)==6,{ output$Sensi_model_Ref<-renderUI({ #dirinfo <- parseDirPath(roots, input$Sensi_dir) pickerInput( inputId = "myPicker_Ref", label = "Choose reference model", #choices = list.files(dirinfo), choices = list.files(pathSensi()), options = list( `actions-box` = TRUE, size = 12, `selected-text-format` = "count > 3" ), multiple = FALSE ) }) }) observeEvent(!is.null(input$myPicker_Ref),{ # observeEvent(as.numeric(input$tabs)==6,{ output$Sensi_model_picks<-renderUI({ #dirinfo <- parseDirPath(roots, input$Sensi_dir) pickerInput( inputId = "myPicker", label = "Choose scenarios to compare to reference model", #choices = list.files(dirinfo), choices = list.files(pathSensi()), options = list( `actions-box` = TRUE, size = 12, `selected-text-format` = "count > 3" ), multiple = TRUE ) }) }) #SS.comparisons<-observeEvent(as.numeric(input$tabs)==5,{ Sensi_model_dir_out<-eventReactive(req(input$run_Sensi_comps&!is.null(input$myPicker)&as.numeric(input$tabs)==6),{ if(!file.exists(paste0(pathSensi(),"/Sensitivity Comparison Plots"))) { dir.create(paste0(pathSensi(),"/Sensitivity Comparison Plots")) } Sensi_model_dir_out_Ref<-paste0(pathSensi(),"/",input$myPicker_Ref) Sensi_model_dir_sensi<-paste0(pathSensi(),"/",input$myPicker) Sensi_model_dir<-c(Sensi_model_dir_out_Ref,Sensi_model_dir_sensi) Sensi_model_dir }) #&exists(Sensi_model_dir_out()) observeEvent(req(input$run_Sensi_comps),{ show_modal_spinner(spin="flower",color=wes_palettes$Rushmore[1],text="Comparisons running") modelnames<-c(input$myPicker_Ref,input$myPicker) zz<-list() Runs<-length(Sensi_model_dir_out()) for(i in 1:Runs) {zz[[i]]<-SS_output(paste0(Sensi_model_dir_out()[i]))} modsummary.sensi<- SSsummarize(zz) col.vec = rc(n=length(modelnames), alpha = 1) shade = adjustcolor(col.vec[1], alpha.f = 0.10) TRP.in<-input$Sensi_TRP LRP.in<-input$Sensi_LRP if(is.na(TRP.in)){TRP.in<-0} if(is.na(LRP.in)){LRP.in<-0} dir.create(paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file)) #Sensi_uncertainty_choice<-input$Sensi_uncertainty_choice #if (all(is.na(quantsSD[, i]) | quantsSD[, i] == 0)) Sensi_uncertainty_choice<-TRUE pngfun(wd = paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file), file = paste0(input$Sensi_comp_file,".png"), h = 7,w = 12) par(mfrow = c(1,3)) try(SSplotComparisons(modsummary.sensi, legendlabels = modelnames, ylimAdj = 1.30, subplot = c(2,4),col = col.vec, new = FALSE,btarg=TRP.in,minbthresh=LRP.in,uncertainty=Sensi_uncertainty_choice)) try(SSplotComparisons(modsummary.sensi, legendlabels = modelnames, ylimAdj = 1.30, subplot = 11,col = col.vec, new = FALSE, legendloc = 'topleft',btarg=TRP.in,minbthresh=LRP.in,uncertainty=Sensi_uncertainty_choice)) dev.off() try(SSplotComparisons(modsummary.sensi, legendlabels = modelnames, ylimAdj = 1.30,col = col.vec, new = FALSE,print=TRUE, legendloc = 'topleft',btarg=TRP.in,minbthresh=LRP.in,uncertainty=Sensi_uncertainty_choice,plotdir=paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file))) save(modsummary.sensi,file=paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file,"/",input$Sensi_comp_file,".DMP")) pngfun(wd = paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file), file = paste0(input$Sensi_comp_file,"_no_uncertainty.png"), h = 7,w = 12) par(mfrow = c(1,3)) try(SSplotComparisons(modsummary.sensi, legendlabels = modelnames, ylimAdj = 1.30, subplot = c(1,3),col = col.vec, new = FALSE,btarg=TRP.in,minbthresh=LRP.in,uncertainty=Sensi_uncertainty_choice)) try(SSplotComparisons(modsummary.sensi, legendlabels = modelnames, ylimAdj = 1.30, subplot = 11,col = col.vec, new = FALSE, legendloc = 'topleft',btarg=TRP.in,minbthresh=LRP.in,uncertainty=Sensi_uncertainty_choice)) dev.off() output$Sensi_comp_plot <- renderImage({ if (all(is.na(modsummary.sensi$quantsSD[, 1]) | modsummary.sensi$quantsSD[, 1] == 0)) { image.path<-normalizePath(file.path(paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file,"/",input$Sensi_comp_file, '_no_uncertainty.png')),mustWork=FALSE) return(list( src = image.path, contentType = "image/png", # width = 400, # height = 300, style='height:60vh')) } else { image.path<-normalizePath(file.path(paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file,"/",input$Sensi_comp_file, '.png')),mustWork=FALSE) return(list( src = image.path, contentType = "image/png", # width = 400, # height = 300, style='height:60vh')) } },deleteFile=FALSE) #Relative error sensitivity plots SensiRE_breaks_in<-as.numeric(trimws(unlist(strsplit(input$SensiRE_breaks,",")))) SensiRE_xcenter_in<-as.numeric(trimws(unlist(strsplit(input$SensiRE_xcenter,",")))) SensiRE_ycenter_in<-as.numeric(trimws(unlist(strsplit(input$SensiRE_ycenter,",")))) SensiRE_headers_in<-trimws(unlist(strsplit(input$SensiRE_headers,","))) yminmax_sensi<-rep(c(input$SensiRE_ymin,input$SensiRE_ymax),5) r4ss::SS_Sensi_plot(dir=paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file,"/"), model.summaries=modsummary.sensi, current.year=modsummary.sensi$endyrs[1]+1, mod.names=modelnames, #List the names of the sensitivity runs #likelihood.out=c(0,0,0), Sensi.RE.out="Sensi_RE_out.DMP", #Saved file of relative errors CI=0.95, #Confidence interval box based on the reference model TRP.in=input$Sensi_TRP, #Target relative abundance value LRP.in=input$Sensi_LRP, #Limit relative abundance value sensi_xlab="Sensitivity scenarios", #X-axis label ylims.in=yminmax_sensi, #Y-axis label plot.figs=c(1,1,1,1,1,1), #Which plots to make/save? sensi.type.breaks=SensiRE_breaks_in, #vertical breaks that can separate out types of sensitivities anno.x=SensiRE_xcenter_in, # Vertical positioning of the sensitivity types labels anno.y=SensiRE_ycenter_in, # Horizontal positioning of the sensitivity types labels anno.lab=SensiRE_headers_in #Sensitivity types labels ) output$SensiRE_comp_plot <- renderImage({ image.path<-normalizePath(file.path(paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file,"/Sensi_REplot_SB_Dep_F_MSY.png")),mustWork=FALSE) return(list( src = image.path, contentType = "image/png", width = 800, height = 1200, style='height:60vh')) },deleteFile=FALSE) output$SensiRElog_comp_plot <- renderImage({ image.path<-normalizePath(file.path(paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file,"/Sensi_logREplot_SB_Dep_F_MSY.png")),mustWork=FALSE) return(list( src = image.path, contentType = "image/png", width = 400, height = 300, style='height:60vh')) },deleteFile=FALSE) remove_modal_spinner() }) ############################# ############################# # image.path<-eventReactive(exists(file.path(paste0(path1(),"/Sensitivity Comparison Plots/", # input$Sensi_comp_file, '.png'))),{ # image.path<-normalizePath(file.path(paste0(path1(),"/Sensitivity Comparison Plots/", # input$Sensi_comp_file, '.png')),mustWork=FALSE) # }) # output$Sensi_comp_plot <- renderImage({ # image.path<-normalizePath(file.path(paste0(path1(),"/Sensitivity Comparison Plots/", # input$Sensi_comp_file, '.png')),mustWork=FALSE) # return(list( # src = image.path, # contentType = "image/png", # # width = 400, # # height = 300, # style='height:60vh')) # print(input$run_Sensi_comps[1]) # },deleteFile=FALSE) #################################### ########################## ### Ensemble modelling ### ########################## pathEnsemble <- reactive({ shinyDirChoose(input, "Ensemble_dir", roots=roots, filetypes=c('', 'txt')) return(parseDirPath(roots, input$Ensemble_dir)) }) #Used to have as.numeric(input$tabs)==4 observeEvent(as.numeric(input$tabs)==7,{ output$Ensemble_model_picks<-renderUI({ pickerInput( inputId = "myEnsemble", label = "Choose scenarios to ensemble", choices = list.files(pathEnsemble()), options = list( `actions-box` = TRUE, size = 12, `selected-text-format` = "count > 3" ), multiple = TRUE ) }) }) #Ensemble_model_dir_out<-eventReactive(req(input$run_Ensemble&!is.null(input$myEnsemble)&as.numeric(input$tabs)==6),{ observeEvent(req(input$run_Ensemble&!is.null(input$myEnsemble)&as.numeric(input$tabs)==7),{ show_modal_spinner(spin="flower",color=wes_palettes$Rushmore[1],text="Prepare models to combine into ensembles") #Ensemble_model_dir_out<-eventReactive(input$run_Ensemble,{ #Ensemble.outputs<-eventReactive(input$run_Ensemble,{ if(!file.exists(paste0(pathEnsemble(),"/Ensemble outputs"))) { dir.create(paste0(pathEnsemble(),"/Ensemble outputs")) } Ensemble_model_dir_out<-paste0(pathEnsemble(),"/Ensemble outputs/",input$Ensemble_file) dir.create(Ensemble_model_dir_out) # }) # print(Ensemble_model_dir_out()) # exists("Ensemble_model_dir_out()") #Ensemble_model_dir_out #}) #exists(Ensemble_model_dir_out()) # observeEvent(req(input$run_Ensemble&!is.null(input$myEnsemble)),{ # Ensemble.outputs<-eventReactive(input$run_Ensemble,{ modelnames<-input$myEnsemble zz<-list() Runs<-length(input$myEnsemble) for(i in 1:Runs) {zz[[i]]<-SS_output(paste0(pathEnsemble(),"/",input$myEnsemble[i]))} modsummary.ensemble<- SSsummarize(zz) Ensemble_wts<-as.numeric(trimws(unlist(strsplit(input$Ensemble_wts,",")))) Stand_ensemble_wts<-Ensemble_wts/sum(Ensemble_wts) Nsamps_ensemble<-10000 Nsamps_ensemble_wts<-round(Nsamps_ensemble*Stand_ensemble_wts) #Calculate weighted values mean.fxn <- function(x, y) rnorm(numdraws, mean = x, sd = y) #Spawning outputs #Bratio SpOt_en<-Bratio_en<-F_en<-SPR_en<-list() SO_0<-SO_t<-Bratio_t<-F_t<-SPR_t<-data.frame(Year=NA,Metric=NA,Model=NA) #Create weighted ensembles for (i in 1:length(Nsamps_ensemble_wts)) { numdraws<-Nsamps_ensemble_wts[i] SpOt_en[[i]]<-Map(mean.fxn,modsummary.ensemble$SpawnBio[,i],modsummary.ensemble$SpawnBioSD[,i]) names(SpOt_en[[i]])<-modsummary.ensemble$SpawnBio$Yr SO_0<-rbind(SO_0,data.frame(Year=as.numeric(names(SpOt_en[[i]][1])),Metric=unlist(SpOt_en[[i]][1]),Model=input$myEnsemble[i])) SO_t<-rbind(SO_t,data.frame(Year=names(SpOt_en[[i]][nrow(modsummary.ensemble$SpawnBio)]),Metric=unlist(SpOt_en[[i]][length(Nsamps_ensemble_wts)]),Model=input$myEnsemble[i])) Bratio_en[[i]]<-Map(mean.fxn,modsummary.ensemble$Bratio[,i],modsummary.ensemble$BratioSD[,i]) names(Bratio_en[[i]])<-modsummary.ensemble$Bratio$Yr Bratio_t<-rbind(Bratio_t,data.frame(Year=names(Bratio_en[[i]][nrow(modsummary.ensemble$Bratio)]),Metric=unlist(Bratio_en[[i]][nrow(modsummary.ensemble$Bratio)]),Model=input$myEnsemble[i])) F_en[[i]]<-Map(mean.fxn,modsummary.ensemble$Fvalue[,i],modsummary.ensemble$FvalueSD[,i]) names(F_en[[i]])<-modsummary.ensemble$Fvalue$Yr F_t<-rbind(F_t,data.frame(Year=names(F_en[[i]][nrow(modsummary.ensemble$Fvalue)]),Metric=unlist(F_en[[i]][nrow(modsummary.ensemble$Fvalue)]),Model=input$myEnsemble[i])) SPR_en[[i]]<-Map(mean.fxn,modsummary.ensemble$SPRratio[,i],modsummary.ensemble$SPRratioSD[,i]) names(SPR_en[[i]])<-modsummary.ensemble$SPRratio$Yr SPR_t<-rbind(SPR_t,data.frame(Year=names(SPR_en[[i]][nrow(modsummary.ensemble$SPRratio)]),Metric=unlist(SPR_en[[i]][nrow(modsummary.ensemble$SPRratio)]),Model=input$myEnsemble[i])) } #Reduce(intersect,list(names(list1),names(list2),names(list3))) # Code to find matches in multiple vectors. For future option of mixing models with different dimensions. #Assemble ensembles Ensemble_SO<-SpOt_en[[1]] Ensemble_Bratio<-Bratio_en[[1]] Ensemble_F<-F_en[[1]] Ensemble_SPR<-SPR_en[[1]] for(ii in 2:length(Nsamps_ensemble_wts)) { Ensemble_SO<-mapply(c,Ensemble_SO,SpOt_en[[ii]]) Ensemble_Bratio<-mapply(c,Ensemble_Bratio,Bratio_en[[ii]]) Ensemble_F<-mapply(c,Ensemble_F,F_en[[ii]]) Ensemble_SPR<-mapply(c,Ensemble_SPR,SPR_en[[ii]]) } SO_0<-rbind(SO_0[-1,],data.frame(Year=as.numeric(colnames(Ensemble_SO)[1]),Metric=Ensemble_SO[,1],Model="Ensemble")) SO_t<-rbind(SO_t[-1,],data.frame(Year=as.numeric(colnames(Ensemble_SO)[ncol(Ensemble_SO)]),Metric=Ensemble_SO[,ncol(Ensemble_SO)],Model="Ensemble")) Bratio_t<-rbind(Bratio_t[-1,],data.frame(Year=as.numeric(colnames(Ensemble_Bratio)[ncol(Ensemble_Bratio)]),Metric=Ensemble_Bratio[,ncol(Ensemble_Bratio)],Model="Ensemble")) F_t<-rbind(F_t[-1,],data.frame(Year=as.numeric(colnames(Ensemble_F)[ncol(Ensemble_F)]),Metric=Ensemble_F[,ncol(Ensemble_F)],Model="Ensemble")) SPR_t<-rbind(SPR_t[-1,],data.frame(Year=as.numeric(colnames(Ensemble_SPR)[ncol(Ensemble_SPR)]),Metric=Ensemble_SPR[,ncol(Ensemble_SPR)],Model="Ensemble")) SO_0$Year<-as.factor(SO_0$Year) SO_t$Year<-as.factor(SO_t$Year) Bratio_t$Year<-as.factor(Bratio_t$Year) F_t$Year<-as.factor(F_t$Year) SPR_t$Year<-as.factor(SPR_t$Year) # mean_cl_quantile <- function(x, q = c(0.1, 0.9), na.rm = TRUE){ # dat <- data.frame(y = mean(x, na.rm = na.rm), # ymin = quantile(x, probs = q[1], na.rm = na.rm), # ymax = quantile(x, probs = q[2], na.rm = na.rm)) # return(dat) # } show_modal_spinner(spin="flower",color=wes_palettes$Rushmore[2],text="Preparing ensemble plots") #Boxplots gg1<-ggplot(SO_0,aes(Model,Metric))+ geom_violin()+ ylab("Initial Spawning Output") gg2<-ggplot(SO_t,aes(Model,Metric))+ geom_violin()+ ylab("Terminal Year Spawning Output") gg3<-ggplot(Bratio_t,aes(Model,Metric))+ geom_violin()+ ylab("Relative stock status") gg4<-ggplot(F_t,aes(Model,Metric))+ geom_violin()+ ylab("Fishing mortality") gg5<-ggplot(SPR_t,aes(Model,Metric))+ geom_violin()+ ylab("1-SPR") ggarrange(gg1,gg2,gg3,gg4,gg5) ggsave(paste0(Ensemble_model_dir_out,"/Ensemble_comp_plots.png")) output$Ensemble_plots <- renderPlot({ ggarrange(gg1,gg2,gg3,gg4,gg5)}) #Spawning Output plot Ensemble_SO_plot<-reshape2::melt(Ensemble_SO,value.name="SO") colnames(Ensemble_SO_plot)[2]<-"Year" Ensemble_SO_plot$Year<-as.factor(Ensemble_SO_plot$Year) ggplot(Ensemble_SO_plot,aes(Year,SO,fill=Year))+ geom_violin()+ theme(legend.position="none")+ theme(axis.text.x = element_text(angle = 45, hjust = 1,vjust=0.5,size=10))+ ylab("Spawning Output") ggsave(paste0(Ensemble_model_dir_out,"/Ensemble_SO.png")) #Relative stock status plot Ensemble_Bratio_plot<-reshape2::melt(Ensemble_Bratio,value.name="Bratio") colnames(Ensemble_Bratio_plot)[2]<-"Year" Ensemble_Bratio_plot$Year<-as.factor(Ensemble_Bratio_plot$Year) ggplot(Ensemble_Bratio_plot,aes(Year,Bratio,fill=Year))+ geom_violin()+ theme(legend.position="none")+ theme(axis.text.x = element_text(angle = 45, hjust = 1,vjust=0.5,size=10))+ ylab("SBt/SO0") ggsave(paste0(Ensemble_model_dir_out,"/Ensemble_Bratio.png")) #F plot Ensemble_F_plot<-reshape2::melt(Ensemble_F,value.name="F") colnames(Ensemble_F_plot)[2]<-"Year" Ensemble_F_plot$Year<-as.factor(Ensemble_F_plot$Year) ggplot(Ensemble_F_plot,aes(Year,F,fill=Year))+ geom_violin()+ theme(legend.position="none")+ theme(axis.text.x = element_text(angle = 45, hjust = 1,vjust=0.5,size=10))+ ylab("Fishing mortality") ggsave(paste0(Ensemble_model_dir_out,"/Ensemble_F.png")) #1-SPR plot Ensemble_SPR_plot<-reshape2::melt(Ensemble_SO,value.name="SPR") colnames(Ensemble_SPR_plot)[2]<-"Year" Ensemble_SPR_plot$Year<-as.factor(Ensemble_SPR_plot$Year) ggplot(Ensemble_SPR_plot,aes(Year,SPR,fill=Year))+ geom_violin()+ theme(legend.position="none")+ theme(axis.text.x = element_text(angle = 45, hjust = 1,vjust=0.5,size=10))+ ylab("1-SPR") ggsave(paste0(Ensemble_model_dir_out,"/Ensemble_SPR.png")) #Get simpler plots for SB0, SBcurrent, RSS, F, and SPR in terminal year # ggplot(reshape2::melt(Ensemble_Bratio,value.name="Bratio"),aes(Var2,Bratio))+ # stat_summary(geom = "line", fun = median)+ # ylim(0,1)+ # stat_summary(geom = "ribbon", fun.data = mean_cl_quantile, alpha = 0.3) #Make outputs show_modal_spinner(spin="flower",color=wes_palettes$Rushmore[3],text="Saving ensemble objects") Model.outputs<-list("Spawning Output"=SpOt_en,"Relative Stock Status"=Bratio_en,"Fishing mortality"=F_en,"1-SPR"=SPR_en) Ensemble.outputs<-list("Spawning Output"=Ensemble_SO,"Relative Stock Status"=Ensemble_Bratio,"Fishing mortality"=Ensemble_F,"1-SPR"=Ensemble_SPR) Ensemble.outputs.plots<-list("Spawning Output"=Ensemble_SO_plot,"Relative Stock Status"=Ensemble_Bratio_plot,"Fishing mortality"=Ensemble_F_plot,"1-SPR"=Ensemble_SPR_plot) save(Model.outputs,file=paste0(Ensemble_model_dir_out,"/Model_results",".DMP")) save(Ensemble.outputs,file=paste0(Ensemble_model_dir_out,"/Ensemble_results",".DMP")) save(Ensemble.outputs.plots,file=paste0(Ensemble_model_dir_out,"/Ensemble_results_plots",".DMP")) remove_modal_spinner() # return(Ensemble.outputs) }) #}) #observeEvent(req(input$run_Ensemble&exists("Ensemble.outputs()")),{ # # }) #Create figures of weighted values # output$Sensi_comp_plot <- renderImage({ # image.path<-normalizePath(file.path(paste0(path1(),"/Sensitivity Comparison Plots/", # input$Sensi_comp_file, '.png')),mustWork=FALSE) # return(list( # src = image.path, # contentType = "image/png", # # width = 400, # # height = 300, # style='height:60vh')) # },deleteFile=FALSE) })
/.history/server_20230518121513.r
no_license
shcaba/SS-DL-tool
R
false
false
238,217
r
require(shiny) require(shinyjs) require(r4ss) require(plyr) require(dplyr) require(ggplot2) require(reshape2) require(data.table) require(tidyr) require(rlist) require(viridis) require(sss) require(shinyWidgets) require(shinyFiles) require(HandyCode) require(nwfscDiag) require(shinybusy) require(truncnorm) require(flextable) require(officer) require(gridExtra) require(ggpubr) require(grid) require(wesanderson) require(adnuts) require(shinystan) require(geomtextpath) #require(paletteer) #require(RColorBrewer) #require(ggthemes) #devtools::load_all("C:/Users/Jason.Cope/Documents/Github/nwfscDiag") source('Functions.r',local = FALSE) theme_report <- function(base_size = 11) { half_line <- base_size/2 theme_light(base_size = base_size) + theme( panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks.length = unit(half_line / 2.2, "pt"), strip.background = element_rect(fill = NA, colour = NA), strip.text.x = element_text(colour = "black"), strip.text.y = element_text(colour = "black"), panel.border = element_rect(fill = NA), legend.key.size = unit(0.9, "lines"), legend.key = element_rect(colour = NA, fill = NA), legend.background = element_rect(colour = NA, fill = NA) ) } theme_set(theme_report()) shinyServer(function(input, output,session) { useShinyjs() theme_report <- function(base_size = 11) { half_line <- base_size/2 theme_light(base_size = base_size) + theme( panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks.length = unit(half_line / 2.2, "pt"), strip.background = element_rect(fill = NA, colour = NA), strip.text.x = element_text(colour = "black"), strip.text.y = element_text(colour = "black"), panel.border = element_rect(fill = NA), legend.key.size = unit(0.9, "lines"), legend.key = element_rect(colour = NA, fill = NA), legend.background = element_rect(colour = NA, fill = NA) ) } theme_set(theme_report()) ################# ### FUNCTIONS ### ################# VBGF<-function(Linf, k, t0, ages){ Linf * (1 - exp(-k * (ages - t0))) } VBGF.age<-function(Linf,k,t0,lt){ t0 - (log(1 - (lt / Linf)) / k) } RUN.SS<-function(path,ss.cmd=" -nohess -nox",OS.in="Windows"){ navigate <- paste("cd ", path, sep="") if(OS.in=="Windows") { #command <- paste0(navigate," & ", "ss", ss.cmd) #shell(command, invisible=TRUE, translate=TRUE) run(path,exe="ss",extras=ss.cmd,skipfinished=FALSE,show_in_console = TRUE) } if(OS.in=="Mac") { command <- c(paste("cd", path), "chmod +x ./ss_osx",paste("./ss_osx", ss.cmd)) system(paste(command, collapse=";"),invisible=TRUE) #command <- paste0(path,"/./ss_mac", ss.cmd) #system(command, invisible=TRUE) } if(OS.in=="Linux") { command <- c(paste("cd", path), "chmod +x ./ss_linux",paste("./ss_linux", ss.cmd)) system(paste(command, collapse=";"), invisible=TRUE) } } pngfun <- function(wd, file,w=7,h=7,pt=12){ file <- file.path(wd, file) cat('writing PNG to',file,'\n') png(filename=file, width=w,height=h, units='in',res=300,pointsize=pt) } rc <- function(n,alpha=1){ # a subset of rich.colors by Arni Magnusson from the gregmisc package # a.k.a. rich.colors.short, but put directly in this function # to try to diagnose problem with transparency on one computer x <- seq(0, 1, length = n) r <- 1/(1 + exp(20 - 35 * x)) g <- pmin(pmax(0, -0.8 + 6 * x - 5 * x^2), 1) b <- dnorm(x, 0.25, 0.15)/max(dnorm(x, 0.25, 0.15)) rgb.m <- matrix(c(r, g, b), ncol = 3) rich.vector <- apply(rgb.m, 1, function(v) rgb(v[1], v[2], v[3], alpha=alpha)) } doubleNorm24.sel <- function(Sel50,Selpeak,PeakDesc,LtPeakFinal,FinalSel) { #UPDATED: - input e and f on 0 to 1 scal and transfrom to logit scale # - changed bin width in peak2 calculation # - updated index of sel when j2 < length(x) # - renamed input parameters, cannot have same names as the logitstic function # - function not handling f < -1000 correctly x<-seq(1,Selpeak+Selpeak,1) bin_width <- x[2] - x[1] a<- Selpeak b<- -log((max(x)-Selpeak-bin_width)/(PeakDesc-Selpeak-bin_width)) c<- log(-((Sel50-Selpeak)^2/log(0.5))) d<- log(LtPeakFinal) e<- -15 f<- -log((1/(FinalSel+0.000000001)-1)) sel <- rep(NA, length(x)) startbin <- 1 peak <- a upselex <- exp(c) downselex <- exp(d) final <- f if (e < -1000) { j1 <- -1001 - round(e) sel[1:j1] <- 1e-06 } if (e >= -1000) { j1 <- startbin - 1 if (e > -999) { point1 <- 1/(1 + exp(-e)) t1min <- exp(-(x[startbin] - peak)^2/upselex) } } if (f < -1000) j2 <- -1000 - round(f) if (f >= -1000) j2 <- length(x) peak2 <- peak + bin_width + (0.99 * x[j2] - peak - bin_width)/(1 + exp(-b)) if (f > -999) { point2 <- 1/(1 + exp(-final)) t2min <- exp(-(x[j2] - peak2)^2/downselex) } t1 <- x - peak t2 <- x - peak2 join1 <- 1/(1 + exp(-(20/(1 + abs(t1))) * t1)) join2 <- 1/(1 + exp(-(20/(1 + abs(t2))) * t2)) if (e > -999) asc <- point1 + (1 - point1) * (exp(-t1^2/upselex) - t1min)/(1 - t1min) if (e <= -999) asc <- exp(-t1^2/upselex) if (f > -999) dsc <- 1 + (point2 - 1) * (exp(-t2^2/downselex) - 1)/(t2min - 1) if (f <= -999) dsc <- exp(-(t2)^2/downselex) idx.seq <- (j1 + 1):j2 sel[idx.seq] <- asc[idx.seq] * (1 - join1[idx.seq]) + join1[idx.seq] * (1 - join2[idx.seq] + dsc[idx.seq] * join2[idx.seq]) if (startbin > 1 && e >= -1000) { sel[1:startbin] <- (x[1:startbin]/x[startbin])^2 * sel[startbin] } if (j2 < length(x)) sel[(j2 + 1):length(x)] <- sel[j2] return(cbind(x,sel)) } ########## Clear data files and plots ############ rv.Lt <- reactiveValues(data = NULL,clear = FALSE) rv.Age <- reactiveValues(data = NULL,clear = FALSE) rv.Ct <- reactiveValues(data = NULL,clear = FALSE) rv.Index <- reactiveValues(data = NULL,clear = FALSE) rv.AgeErr <- reactiveValues(data = NULL,clear = FALSE) ######## #Reset catches observe({ req(input$file2) req(!rv.Ct$clear) rv.Ct$data <- fread(input$file2$datapath,check.names=FALSE,data.table=FALSE) #L <- readLines(input$file2$datapath, n = 1) #if(grepl(";", L)) {rv.Ct$data <- read.csv2(input$file2$datapath,check.names=FALSE)} }) observeEvent(input$file2, { rv.Ct$clear <- FALSE }, priority = 1000) observeEvent(input$reset_ct, { rv.Ct$data <- NULL rv.Ct$clear <- TRUE reset('file2') }, priority = 1000) #Reset lengths observe({ req(input$file1) req(!rv.Lt$clear) rv.Lt$data <- fread(input$file1$datapath,check.names=FALSE,data.table=FALSE) #L <- readLines(input$file1$datapath, n = 1) #rv.Lt$data <- read.csv(input$file1$datapath,check.names=FALSE) #if(grepl(";", L)) {rv.Lt$data <- read.csv2(input$file1$datapath,check.names=FALSE)} }) observeEvent(input$file1, { rv.Lt$clear <- FALSE }, priority = 1000) observeEvent(input$reset_lt, { rv.Lt$data <- NULL rv.Lt$clear <- TRUE reset('file1') }, priority = 1000) #Reset ages observe({ req(input$file3) req(!rv.Age$clear) rv.Age$data <- fread(input$file3$datapath,check.names=FALSE,data.table=FALSE) #L <- readLines(input$file3$datapath, n = 1) #if(grepl(";", L)) {rv.Age$data <- read.csv2(input$file3$datapath,check.names=FALSE)} }) observeEvent(input$file3, { rv.Age$clear <- FALSE }, priority = 1000) observeEvent(input$reset_age, { rv.Age$data <- NULL rv.Age$clear <- TRUE reset('file3') }, priority = 1000) #Reset ageing error observe({ req(input$file33) req(!rv.AgeErr$clear) rv.AgeErr$data <- fread(input$file33$datapath,check.names=FALSE,header=FALSE,data.table=FALSE) #L <- readLines(input$file33$datapath, n = 1) #if(grepl(";", L)) {rv.AgeErr$data <- read.csv2(input$file33$datapath,check.names=FALSE,header=FALSE)} }) observeEvent(input$file33, { rv.AgeErr$clear <- FALSE if(!input$Ageing_error_choice){ rv.AgeErr$data <- NULL rv.AgeErr$clear <- TRUE reset('file33')} }, priority = 1000) # # if(!is.null(input$Ageing_error_choice)){ # observeEvent(input$file33, { # if(!input$Ageing_error_choice){ # rv.AgeErr$data <- NULL # rv.AgeErr$clear <- TRUE # reset('file33') #} # }, priority = 1000) # } #Reset index observe({ req(input$file4) req(!rv.Index$clear) rv.Index$data <- fread(input$file4$datapath,check.names=FALSE,data.table=FALSE) #L <- readLines(input$file4$datapath, n = 1) #rv.Index$data <- read.csv(input$file4$datapath,check.names=FALSE) #if(grepl(";", L)) {rv.Index$data <- read.csv2(input$file4$datapath,check.names=FALSE,header=FALSE)} }) observeEvent(input$file4, { rv.Index$clear <- FALSE }, priority = 1000) observeEvent(input$reset_index, { rv.Index$data <- NULL rv.Index$clear <- TRUE reset('file4') }, priority = 1000) #Throw an error if fleets are not consecutively represented in all loaded data sets. observeEvent(req(any(!is.null(rv.Ct$data),!is.null(rv.Lt$data),!is.null(rv.Age$data),!is.null(rv.Index$data))),{ ct.flt<-lt.flt<-age.flt<-index.flt<-NA if(!is.null(rv.Ct$data)){ct.flt<-c(1:(ncol(rv.Ct$data)))} if(!is.null(rv.Lt$data)){lt.flt<-rv.Lt$data[,3]} if(!is.null(rv.Age$data)){age.flt<-rv.Age$data[,3]} if(!is.null(rv.Index$data)){index.flt<-rv.Index$data[,3]} fleets.no.negs<-unique(na.omit(c(ct.flt,lt.flt,age.flt,index.flt)))[unique(na.omit(c(ct.flt,lt.flt,age.flt,index.flt)))>0] #remove any negative fleets if(length(fleets.no.negs)!=length(seq(1:max(fleets.no.negs)))) { sendSweetAlert( session = session, title = "Model Warning", text = "Non-consecutive fleet numbering. Check all data sets (e.g., catch, lengths, ages, indices) to make sure all fleets from 1 to the maximum fleet number are found when considered across all data sets. For instance, if you have 3 total fleets, there should not be a fleet number > 3 (e.g., 1,2,4). All fleets are not expected in each data file, just across all data files.", type = "warning") } }) ####### # observeEvent(input$reset_lt, { # rv.Lt$data <- NULL # shinyjs::reset('file1') # }) # # observeEvent(input$reset_lt, { # # output$Ltplot<-renderPlot({ # # rv.Lt$data <- NULL # # if (is.null(rv.Lt$data)) return(NULL) # # }) # # }) # observeEvent(input$reset_age, { # rv.Age$data <- NULL # shinyjs::reset('file3') # }) # observeEvent(input$reset_ct, { # rv.Ct$data <- NULL # shinyjs::reset('file2') # }) ##################################################### onclick("est_LHparms",id="panel_SS_est") observe({ shinyjs::show("Data_panel") hideTab(inputId = "tabs", target = "11") #shinyjs::hide("OS_choice") #shinyjs::hide("run_SS") #shinyjs::hide("run_SSS") }) #To get the ObserveEvent to work, each statement in req needs to be unique. #This explains the workaround of ((as.numeric(input$tabs)*x)/x)<4, where x is the unique type of assessment being run #This input allows other tabs to have different side panels. #Switch back to data from different tabs observeEvent(req(((as.numeric(input$tabs)*99)/99)<4), { shinyjs::show("Data_panel") shinyjs::show("Existing_files") shinyjs::hide("panel_Ct_F_LO") shinyjs::hide("panel_data_wt_lt") shinyjs::hide("panel_ct_wt_LO") shinyjs::hide("panel_SSS") shinyjs::hide("panel_SSLO_LH") shinyjs::hide("panel_SSLO_fixed") shinyjs::hide("panel_SS_LH_fixed_est_tog") shinyjs::hide("panel_SS_LH_fixed") shinyjs::hide("panel_SS_fixed") shinyjs::hide("panel_SS_LH_est") shinyjs::hide("panel_SS_est") shinyjs::hide("panel_SS_stock_status") shinyjs::hide("panel_SSS_prod") shinyjs::hide("panel_SS_LO_prod") shinyjs::hide("panel_SS_prod_fixed") shinyjs::hide("panel_SS_prod_est") shinyjs::hide("panel_selectivity") shinyjs::hide("panel_selectivity_sss") shinyjs::hide("panel_SS_recdevs") shinyjs::hide("panel_SS_jitter") shinyjs::hide("panel_RPs") shinyjs::hide("panel_Forecasts") shinyjs::hide("panel_Mod_dims") shinyjs::hide("panel_advanced_SS") shinyjs::hide("panel_advanced_user_SS") shinyjs::hide("panel_advanced_SSS") shinyjs::hide("panel_SSS_reps") shinyjs::hide("OS_choice") shinyjs::hide("Scenario_panel") shinyjs::hide("run_SSS") shinyjs::hide("run_SS") shinyjs::hide("Modeff_panel") shinyjs::hide("Profile_panel") shinyjs::hide("Retro_panel") shinyjs::hide("Sensi_Comparison_panel") shinyjs::hide("Ensemble_panel") hideTab(inputId = "tabs", target = "11") showTab(inputId = "tabs", target = "2") # hideTab(inputId = "tabs", target = "3") # hideTab(inputId = "tabs", target = "4") # hideTab(inputId = "tabs", target = "5") # hideTab(inputId = "tabs", target = "6") }) #Reset when all things are clicked off observeEvent(req(((as.numeric(input$tabs)*1)/1)<4&is.null(rv.Lt$data)&is.null(rv.Ct$data)&is.null(rv.Age$data)&is.null(rv.Index$data)&any(is.null(input$user_model),!input$user_model)), { shinyjs::show("Data_panel") shinyjs::show("Existing_files") shinyjs::hide("panel_Ct_F_LO") shinyjs::hide("panel_data_wt_lt") shinyjs::hide("panel_ct_wt_LO") shinyjs::hide("panel_SSS") shinyjs::hide("panel_SSLO_LH") shinyjs::hide("panel_SSLO_fixed") shinyjs::hide("panel_SS_LH_fixed_est_tog") shinyjs::hide("panel_SS_LH_fixed") shinyjs::hide("panel_SS_fixed") shinyjs::hide("panel_SS_LH_est") shinyjs::hide("panel_SS_est") shinyjs::hide("panel_SS_stock_status") shinyjs::hide("panel_SSS_prod") shinyjs::hide("panel_SS_LO_prod") shinyjs::hide("panel_SS_prod_fixed") shinyjs::hide("panel_SS_prod_est") shinyjs::hide("panel_selectivity") shinyjs::hide("panel_selectivity_sss") shinyjs::hide("panel_SS_recdevs") shinyjs::hide("panel_SS_jitter") shinyjs::hide("panel_RPs") shinyjs::hide("panel_Forecasts") shinyjs::hide("panel_Mod_dims") shinyjs::hide("panel_advanced_SS") shinyjs::hide("panel_advanced_user_SS") shinyjs::hide("panel_advanced_SSS") shinyjs::hide("panel_SSS_reps") shinyjs::hide("OS_choice") shinyjs::hide("Scenario_panel") shinyjs::hide("run_SSS") shinyjs::hide("run_SS") shinyjs::hide("Modeff_panel") shinyjs::hide("Profile_panel") shinyjs::hide("Retro_panel") shinyjs::hide("Sensi_Comparison_panel") shinyjs::hide("Ensemble_panel") hideTab(inputId = "tabs", target = "11") showTab(inputId = "tabs", target = "2") }) #User chosen model observeEvent(req(!is.null(input$user_model)&input$user_model), { shinyjs::show("Data_panel") shinyjs::show("Existing_files") shinyjs::hide("panel_Ct_F_LO") shinyjs::show("panel_data_wt_lt") shinyjs::hide("panel_ct_wt_LO") shinyjs::hide("panel_SSS") shinyjs::hide("panel_SSLO_LH") shinyjs::hide("panel_SSLO_fixed") shinyjs::hide("panel_SS_LH_fixed_est_tog") shinyjs::hide("panel_SS_LH_fixed") shinyjs::hide("panel_SS_fixed") shinyjs::hide("panel_SS_LH_est") shinyjs::hide("panel_SS_est") shinyjs::hide("panel_SS_stock_status") shinyjs::hide("panel_SSS_prod") shinyjs::hide("panel_SS_LO_prod") shinyjs::hide("panel_SS_prod_fixed") shinyjs::hide("panel_SS_prod_est") shinyjs::hide("panel_selectivity") shinyjs::hide("panel_selectivity_sss") shinyjs::hide("panel_SS_recdevs") shinyjs::show("panel_SS_jitter") shinyjs::show("panel_RPs") shinyjs::show("panel_Forecasts") shinyjs::hide("panel_Mod_dims") shinyjs::hide("panel_SSS_reps") shinyjs::hide("panel_advanced_SS") shinyjs::show("panel_advanced_user_SS") shinyjs::hide("panel_advanced_SSS") shinyjs::show("OS_choice") shinyjs::show("Scenario_panel") shinyjs::hide("run_SSS") shinyjs::show("run_SS") shinyjs::hide("Modeff_panel") shinyjs::hide("Profile_panel") shinyjs::hide("Retro_panel") shinyjs::hide("Sensi_Comparison_panel") shinyjs::hide("Ensemble_panel") #shinyjs::show("tab_sss") showTab(inputId = "tabs", target = "11") hideTab(inputId = "tabs", target = "2") }) #SSS panels observeEvent(req(((as.numeric(input$tabs)*1)/1)<4&is.null(rv.Lt$data)&!is.null(rv.Ct$data)&is.null(rv.Age$data)&is.null(rv.Index$data)&any(is.null(input$user_model),!input$user_model)), { shinyjs::show("Data_panel") shinyjs::hide("Existing_files") shinyjs::hide("panel_Ct_F_LO") shinyjs::hide("panel_data_wt_lt") shinyjs::hide("panel_ct_wt_LO") shinyjs::show("panel_SSS") shinyjs::hide("panel_SSLO_LH") shinyjs::hide("panel_SSLO_fixed") shinyjs::hide("panel_SS_LH_fixed_est_tog") shinyjs::hide("panel_SS_LH_fixed") shinyjs::hide("panel_SS_fixed") shinyjs::hide("panel_SS_LH_est") shinyjs::hide("panel_SS_est") shinyjs::show("panel_SS_stock_status") shinyjs::show("panel_SSS_prod") shinyjs::hide("panel_SS_LO_prod") shinyjs::hide("panel_SS_prod_fixed") shinyjs::hide("panel_SS_prod_est") shinyjs::hide("panel_selectivity") shinyjs::show("panel_selectivity_sss") shinyjs::hide("panel_SS_recdevs") shinyjs::hide("panel_SS_jitter") shinyjs::show("panel_RPs") shinyjs::show("panel_Forecasts") shinyjs::show("panel_Mod_dims") shinyjs::show("panel_SSS_reps") shinyjs::hide("panel_advanced_SS") shinyjs::hide("panel_advanced_user_SS") shinyjs::show("panel_advanced_SSS") shinyjs::show("OS_choice") shinyjs::show("Scenario_panel") shinyjs::show("run_SSS") shinyjs::hide("run_SS") shinyjs::hide("Modeff_panel") shinyjs::hide("Profile_panel") shinyjs::hide("Retro_panel") shinyjs::hide("Sensi_Comparison_panel") shinyjs::hide("Ensemble_panel") #shinyjs::show("tab_sss") showTab(inputId = "tabs", target = "11") hideTab(inputId = "tabs", target = "2") }) #SS-LO panels observeEvent(req(((as.numeric(input$tabs)*2)/2)<4&all(!is.null(c(rv.Lt$data,rv.Age$data)),is.null(rv.Ct$data))&any(is.null(input$user_model),!input$user_model)), { shinyjs::show("Data_panel") shinyjs::show("Existing_files") shinyjs::show("panel_Ct_F_LO") shinyjs::show("panel_data_wt_lt") if(length(unique(rv.Lt$data[,3]))>1|length(unique(rv.Age$data[,3]))>1){shinyjs::show("panel_ct_wt_LO")} if(length(unique(rv.Lt$data[,3]))==1|length(unique(rv.Age$data[,3]))==1){shinyjs::hide("panel_ct_wt_LO")} #if(input$Ct_F_LO_select){shinyjs::show("panel_ct_wt_LO")} #if(input$Ct_F_LO_select==NULL){shinyjs::hide("panel_ct_wt_LO")} shinyjs::hide("panel_SSS") shinyjs::show("panel_SSLO_LH") shinyjs::show("panel_SSLO_fixed") shinyjs::hide("panel_SS_LH_fixed_est_tog") shinyjs::hide("panel_SS_LH_fixed") shinyjs::hide("panel_SS_fixed") shinyjs::hide("panel_SS_LH_est") shinyjs::hide("panel_SS_est") shinyjs::hide("panel_SS_stock_status") shinyjs::hide("panel_SSS_prod") shinyjs::show("panel_SS_LO_prod") shinyjs::hide("panel_SS_prod_fixed") shinyjs::hide("panel_SS_prod_est") shinyjs::show("panel_selectivity") shinyjs::hide("panel_selectivity_sss") shinyjs::show("panel_SS_recdevs") shinyjs::show("panel_SS_jitter") shinyjs::show("panel_RPs") shinyjs::show("panel_Forecasts") shinyjs::show("panel_Mod_dims") shinyjs::show("panel_advanced_SS") shinyjs::hide("panel_advanced_user_SS") shinyjs::hide("panel_advanced_SSS") shinyjs::hide("panel_SSS_reps") shinyjs::show("OS_choice") shinyjs::show("Scenario_panel") shinyjs::hide("run_SSS") shinyjs::show("run_SS") shinyjs::hide("Modeff_panel") shinyjs::hide("Profile_panel") shinyjs::hide("Retro_panel") shinyjs::hide("Sensi_Comparison_panel") shinyjs::hide("Ensemble_panel") hideTab(inputId = "tabs", target = "11") showTab(inputId = "tabs", target = "2") # showTab(inputId = "tabs", target = "3") # showTab(inputId = "tabs", target = "4") # hideTab(inputId = "tabs", target = "5") # showTab(inputId = "tabs", target = "6") }) #SS-CL fixed parameters observeEvent(req(((as.numeric(input$tabs)*3)/3)<4&all(any(input$est_parms==FALSE,input$est_parms2==FALSE),any(all(!is.null(rv.Lt$data),!is.null(rv.Ct$data)),all(!is.null(rv.Age$data),!is.null(rv.Ct$data)),all(!is.null(rv.Index$data),!is.null(rv.Ct$data))))&any(is.null(input$user_model),!input$user_model)), { shinyjs::show("Data_panel") shinyjs::show("Existing_files") shinyjs::hide("panel_Ct_F_LO") if(any(!is.null(rv.Lt$data),!is.null(rv.Age$data))){shinyjs::show("panel_data_wt_lt")} else (shinyjs::hide("panel_data_wt_lt")) shinyjs::hide("panel_ct_wt_LO") shinyjs::hide("panel_SSS") shinyjs::hide("panel_SSLO_LH") shinyjs::hide("panel_SSLO_fixed") shinyjs::show("panel_SS_LH_fixed_est_tog") shinyjs::show("panel_SS_LH_fixed") shinyjs::show("panel_SS_fixed") shinyjs::hide("panel_SS_LH_est") shinyjs::hide("panel_SS_est") shinyjs::hide("panel_SS_stock_status") shinyjs::hide("panel_SSS_prod") shinyjs::hide("panel_SS_LO_prod") shinyjs::show("panel_SS_prod_fixed") shinyjs::hide("panel_SS_prod_est") shinyjs::show("panel_selectivity") shinyjs::hide("panel_selectivity_sss") shinyjs::show("panel_SS_recdevs") shinyjs::show("panel_SS_jitter") shinyjs::show("panel_RPs") shinyjs::show("panel_Forecasts") shinyjs::show("panel_Mod_dims") shinyjs::show("panel_advanced_SS") shinyjs::hide("panel_advanced_user_SS") shinyjs::hide("panel_advanced_SSS") shinyjs::show("OS_choice") shinyjs::show("Scenario_panel") shinyjs::hide("panel_SSS_reps") shinyjs::hide("run_SSS") shinyjs::show("run_SS") shinyjs::hide("Modeff_panel") shinyjs::hide("Profile_panel") shinyjs::hide("Retro_panel") shinyjs::hide("Sensi_Comparison_panel") shinyjs::hide("Ensemble_panel") #shinyjs::hide(selector = "#navbar li a[data-value=11]") hideTab(inputId = "tabs", target = "11") showTab(inputId = "tabs", target = "2") # show(selector = '#hello li a[data-value="2"]') #show(selector = '#hello li a[data-value="2"]') # showTab(inputId = "tabs", target = "2") # showTab(inputId = "tabs", target = "3") # showTab(inputId = "tabs", target = "4") # showTab(inputId = "tabs", target = "5") # showTab(inputId = "tabs", target = "6") }) #SS-CL with parameter estimates observeEvent(req(((as.numeric(input$tabs)*4)/4)<4&all(input$est_parms==TRUE,any(all(!is.null(rv.Lt$data),!is.null(rv.Ct$data)),all(!is.null(rv.Age$data),!is.null(rv.Ct$data)),all(!is.null(rv.Index$data),!is.null(rv.Ct$data))))&any(is.null(input$user_model),!input$user_model)), { shinyjs::show("Data_panel") shinyjs::show("Existing_files") shinyjs::hide("panel_Ct_F_LO") if(any(!is.null(rv.Lt$data),!is.null(rv.Age$data))){shinyjs::show("panel_data_wt_lt")} else (shinyjs::hide("panel_data_wt_lt")) shinyjs::hide("panel_ct_wt_LO") shinyjs::hide("panel_SSS") shinyjs::hide("panel_SSLO_LH") shinyjs::hide("panel_SSLO_fixed") shinyjs::show("panel_SS_LH_fixed_est_tog") shinyjs::hide("panel_SS_LH_fixed") shinyjs::hide("panel_SS_fixed") shinyjs::show("panel_SS_LH_est") shinyjs::show("panel_SS_est") shinyjs::hide("panel_SS_stock_status") shinyjs::hide("panel_SSS_prod") shinyjs::hide("panel_SS_LO_prod") shinyjs::hide("panel_SS_prod_fixed") shinyjs::show("panel_SS_prod_est") shinyjs::show("panel_selectivity") shinyjs::hide("panel_selectivity_sss") shinyjs::show("panel_SS_recdevs") shinyjs::show("panel_SS_jitter") shinyjs::show("panel_RPs") shinyjs::show("panel_Forecasts") shinyjs::show("panel_Mod_dims") shinyjs::show("panel_advanced_SS") shinyjs::hide("panel_advanced_user_SS") shinyjs::hide("panel_advanced_SSS") shinyjs::show("OS_choice") shinyjs::show("Scenario_panel") shinyjs::hide("panel_SSS_reps") shinyjs::hide("run_SSS") shinyjs::show("run_SS") shinyjs::hide("Modeff_panel") shinyjs::hide("Profile_panel") shinyjs::hide("Retro_panel") shinyjs::hide("Sensi_Comparison_panel") shinyjs::hide("Ensemble_panel") hideTab(inputId = "tabs", target = "11") showTab(inputId = "tabs", target = "2") # showTab(inputId = "tabs", target = "3") # showTab(inputId = "tabs", target = "4") # showTab(inputId = "tabs", target = "5") # showTab(inputId = "tabs", target = "6") }) #Model Efficiency observeEvent(req((as.numeric(input$tabs)*12/12)==12), { shinyjs::hide("Data_panel") shinyjs::hide("Existing_files") shinyjs::hide("panel_Ct_F_LO") shinyjs::hide("panel_data_wt_lt") shinyjs::hide("panel_ct_wt_LO") shinyjs::hide("panel_SSS") shinyjs::hide("panel_SSLO_LH") shinyjs::hide("panel_SSLO_fixed") shinyjs::hide("panel_SS_LH_fixed_est_tog") shinyjs::hide("panel_SS_LH_fixed") shinyjs::hide("panel_SS_fixed") shinyjs::hide("panel_SS_LH_est") shinyjs::hide("panel_SS_est") shinyjs::hide("panel_SS_stock_status") shinyjs::hide("panel_SSS_prod") shinyjs::hide("panel_SS_LO_prod") shinyjs::hide("panel_SS_prod_fixed") shinyjs::hide("panel_SS_prod_est") shinyjs::hide("panel_selectivity") shinyjs::hide("panel_selectivity_sss") shinyjs::hide("panel_SS_recdevs") shinyjs::hide("panel_SS_jitter") shinyjs::hide("panel_RPs") shinyjs::hide("panel_Forecasts") shinyjs::hide("panel_Mod_dims") shinyjs::hide("panel_advanced_SS") shinyjs::hide("panel_advanced_user_SS") shinyjs::hide("panel_advanced_SSS") shinyjs::hide("OS_choice") shinyjs::hide("Scenario_panel") shinyjs::hide("panel_SSS_reps") shinyjs::hide("run_SSS") shinyjs::hide("run_SS") shinyjs::show("Modeff_panel") shinyjs::hide("Profile_panel") shinyjs::hide("Retro_panel") shinyjs::hide("Sensi_Comparison_panel") shinyjs::hide("Ensemble_panel") hideTab(inputId = "tabs", target = "11") showTab(inputId = "tabs", target = "2") # showTab(inputId = "tabs", target = "3") # showTab(inputId = "tabs", target = "4") # showTab(inputId = "tabs", target = "5") # showTab(inputId = "tabs", target = "6") }) #Profiles observeEvent(req((as.numeric(input$tabs)*4/4)==4), { shinyjs::hide("Data_panel") shinyjs::hide("Existing_files") shinyjs::hide("panel_Ct_F_LO") shinyjs::hide("panel_data_wt_lt") shinyjs::hide("panel_ct_wt_LO") shinyjs::hide("panel_SSS") shinyjs::hide("panel_SSLO_LH") shinyjs::hide("panel_SSLO_fixed") shinyjs::hide("panel_SS_LH_fixed_est_tog") shinyjs::hide("panel_SS_LH_fixed") shinyjs::hide("panel_SS_fixed") shinyjs::hide("panel_SS_LH_est") shinyjs::hide("panel_SS_est") shinyjs::hide("panel_SS_stock_status") shinyjs::hide("panel_SSS_prod") shinyjs::hide("panel_SS_LO_prod") shinyjs::hide("panel_SS_prod_fixed") shinyjs::hide("panel_SS_prod_est") shinyjs::hide("panel_selectivity") shinyjs::hide("panel_selectivity_sss") shinyjs::hide("panel_SS_recdevs") shinyjs::hide("panel_SS_jitter") shinyjs::hide("panel_RPs") shinyjs::hide("panel_Forecasts") shinyjs::hide("panel_Mod_dims") shinyjs::hide("panel_advanced_SS") shinyjs::hide("panel_advanced_user_SS") shinyjs::hide("panel_advanced_SSS") shinyjs::hide("OS_choice") shinyjs::hide("Scenario_panel") shinyjs::hide("panel_SSS_reps") shinyjs::hide("run_SSS") shinyjs::hide("run_SS") shinyjs::hide("Modeff_panel") shinyjs::show("Profile_panel") shinyjs::hide("Retro_panel") shinyjs::hide("Sensi_Comparison_panel") shinyjs::hide("Ensemble_panel") hideTab(inputId = "tabs", target = "11") showTab(inputId = "tabs", target = "2") # showTab(inputId = "tabs", target = "3") # showTab(inputId = "tabs", target = "4") # showTab(inputId = "tabs", target = "5") # showTab(inputId = "tabs", target = "6") }) #Retrospecitves observeEvent(req((as.numeric(input$tabs)*5/5)==5), { shinyjs::hide("Data_panel") shinyjs::hide("Existing_files") shinyjs::hide("panel_Ct_F_LO") shinyjs::hide("panel_data_wt_lt") shinyjs::hide("panel_ct_wt_LO") shinyjs::hide("panel_SSS") shinyjs::hide("panel_SSLO_LH") shinyjs::hide("panel_SSLO_fixed") shinyjs::hide("panel_SS_LH_fixed_est_tog") shinyjs::hide("panel_SS_LH_fixed") shinyjs::hide("panel_SS_fixed") shinyjs::hide("panel_SS_LH_est") shinyjs::hide("panel_SS_est") shinyjs::hide("panel_SS_stock_status") shinyjs::hide("panel_SSS_prod") shinyjs::hide("panel_SS_LO_prod") shinyjs::hide("panel_SS_prod_fixed") shinyjs::hide("panel_SS_prod_est") shinyjs::hide("panel_selectivity") shinyjs::hide("panel_selectivity_sss") shinyjs::hide("panel_SS_recdevs") shinyjs::hide("panel_SS_jitter") shinyjs::hide("panel_RPs") shinyjs::hide("panel_Forecasts") shinyjs::hide("panel_Mod_dims") shinyjs::hide("panel_advanced_SS") shinyjs::hide("panel_advanced_user_SS") shinyjs::hide("panel_advanced_SSS") shinyjs::hide("OS_choice") shinyjs::hide("Scenario_panel") shinyjs::hide("panel_SSS_reps") shinyjs::hide("run_SSS") shinyjs::hide("run_SS") shinyjs::hide("Modeff_panel") shinyjs::hide("Profile_panel") shinyjs::show("Retro_panel") shinyjs::hide("Sensi_Comparison_panel") shinyjs::hide("Ensemble_panel") hideTab(inputId = "tabs", target = "11") showTab(inputId = "tabs", target = "2") # showTab(inputId = "tabs", target = "3") # showTab(inputId = "tabs", target = "4") # showTab(inputId = "tabs", target = "5") # showTab(inputId = "tabs", target = "6") }) #Sensitivities observeEvent(req((as.numeric(input$tabs)*6/6)==6), { shinyjs::hide("Data_panel") shinyjs::hide("Existing_files") shinyjs::hide("panel_Ct_F_LO") shinyjs::hide("panel_data_wt_lt") shinyjs::hide("panel_ct_wt_LO") shinyjs::hide("panel_SSS") shinyjs::hide("panel_SSLO_LH") shinyjs::hide("panel_SSLO_fixed") shinyjs::hide("panel_SS_LH_fixed_est_tog") shinyjs::hide("panel_SS_LH_fixed") shinyjs::hide("panel_SS_fixed") shinyjs::hide("panel_SS_LH_est") shinyjs::hide("panel_SS_est") shinyjs::hide("panel_SS_stock_status") shinyjs::hide("panel_SSS_prod") shinyjs::hide("panel_SS_LO_prod") shinyjs::hide("panel_SS_prod_fixed") shinyjs::hide("panel_SS_prod_est") shinyjs::hide("panel_selectivity") shinyjs::hide("panel_selectivity_sss") shinyjs::hide("panel_SS_recdevs") shinyjs::hide("panel_SS_jitter") shinyjs::hide("panel_RPs") shinyjs::hide("panel_Forecasts") shinyjs::hide("panel_Mod_dims") shinyjs::hide("panel_advanced_SS") shinyjs::hide("panel_advanced_user_SS") shinyjs::hide("panel_advanced_SSS") shinyjs::hide("OS_choice") shinyjs::hide("Scenario_panel") shinyjs::hide("panel_SSS_reps") shinyjs::hide("run_SSS") shinyjs::hide("run_SS") shinyjs::hide("Modeff_panel") shinyjs::hide("Profile_panel") shinyjs::hide("Retro_panel") shinyjs::show("Sensi_Comparison_panel") shinyjs::hide("Ensemble_panel") hideTab(inputId = "tabs", target = "11") showTab(inputId = "tabs", target = "2") # showTab(inputId = "tabs", target = "3") # showTab(inputId = "tabs", target = "4") # showTab(inputId = "tabs", target = "5") # showTab(inputId = "tabs", target = "6") }) #Ensembles observeEvent(req((as.numeric(input$tabs)*7/7)==7), { shinyjs::hide("Data_panel") shinyjs::hide("Existing_files") shinyjs::hide("panel_Ct_F_LO") shinyjs::hide("panel_data_wt_lt") shinyjs::hide("panel_ct_wt_LO") shinyjs::hide("panel_SSS") shinyjs::hide("panel_SSLO_LH") shinyjs::hide("panel_SSLO_fixed") shinyjs::hide("panel_SS_LH_fixed_est_tog") shinyjs::hide("panel_SS_LH_fixed") shinyjs::hide("panel_SS_fixed") shinyjs::hide("panel_SS_LH_est") shinyjs::hide("panel_SS_est") shinyjs::hide("panel_SS_stock_status") shinyjs::hide("panel_SSS_prod") shinyjs::hide("panel_SS_LO_prod") shinyjs::hide("panel_SS_prod_fixed") shinyjs::hide("panel_SS_prod_est") shinyjs::hide("panel_selectivity") shinyjs::hide("panel_selectivity_sss") shinyjs::hide("panel_SS_recdevs") shinyjs::hide("panel_SS_jitter") shinyjs::hide("panel_RPs") shinyjs::hide("panel_Forecasts") shinyjs::hide("panel_Mod_dims") shinyjs::hide("panel_advanced_SS") shinyjs::hide("panel_advanced_user_SS") shinyjs::hide("panel_advanced_SSS") shinyjs::hide("panel_SSS_reps") shinyjs::hide("OS_choice") shinyjs::hide("Scenario_panel") shinyjs::hide("run_SSS") shinyjs::hide("run_SS") shinyjs::hide("Modeff_panel") shinyjs::hide("Profile_panel") shinyjs::hide("Retro_panel") shinyjs::hide("Sensi_Comparison_panel") shinyjs::show("Ensemble_panel") hideTab(inputId = "tabs", target = "11") showTab(inputId = "tabs", target = "2") # showTab(inputId = "tabs", target = "3") # showTab(inputId = "tabs", target = "4") # showTab(inputId = "tabs", target = "5") # showTab(inputId = "tabs", target = "6") }) ######################################## ############################# ######### UI INPUTS ######### ############################# # User activated pop-up parameter values --------------- #Model dimensions output$Model_dims1 <- renderUI({ inFile1 = rv.Lt$data inFile2 = rv.Ct$data inFile3 = rv.Age$data #No file inputs if (is.null(inFile1) & is.null(inFile2) & is.null(inFile3)) return(NULL) #If have lengths and/or ages, but no catches if (any(!is.null(inFile1), !is.null(inFile3))& is.null(inFile2)){ styr.in = min(inFile1[,1],inFile3[,1]) endyr.in = max(inFile1[,1],inFile3[,1]) # if(!(anyNA(c(Linf(), k_vbgf(),t0_vbgf())))& input$Ct_F_LO_select=="Constant Catch"){ # styr.in = min(inFile1[,1],inFile3[,1])-round(VBGF.age(Linf(), k_vbgf(), t0_vbgf(), Linf()*0.95)) # } } #If have catches if (!is.null(inFile2)){ styr.in<-min(inFile2[,1]) endyr.in<-max(inFile2[,1]) } #If lengths or ages with catches if (!is.null(inFile1) &!is.null(inFile2)|!is.null(inFile3) &!is.null(inFile2)){ styr.in<-min(inFile1[,1],inFile2[,1],inFile3[,1]) endyr.in<-max(inFile1[,1],inFile2[,1],inFile3[,1]) } fluidRow(column(width=4, numericInput("styr", "Starting year", value=styr.in, min=1, max=10000, step=1)), column(width=4, numericInput("endyr","Ending year", value=endyr.in, min=1, max=10000, step=1))) # if (!is.null(inFile2)){ # fluidRow(column(width=4, numericInput("styr", "Starting year", # value=min(inFile2[,1]), min=1, max=10000, step=1)), # column(width=4, numericInput("endyr", "Ending year", # value=max(inFile2[,1]), min=1, max=10000, step=1))) # } # print(styr.in) # print(endyr.in) }) output$Model_dims2 <- renderUI({ Ct.data = rv.Ct$data # if (is.null(Ct.data)) return(NULL) if (!is.null(Ct.data)){ fluidRow(column(width=4, numericInput("styr", "Starting year", value=min(Ct.data[,1]), min=1, max=10000, step=1)), column(width=4, numericInput("endyr", "Ending year", value=max(Ct.data[,1]), min=1, max=10000, step=1))) } }) # output$Female_parms_inputs_label <- reactive({ # if(!is.null(input$file1)) # { # (output$Female_parms_inputs_label<- renderUI({ # fluidRow(column(width=6,numericInput("Nages","Max. age", value=NA,min=1, max=1000, step=1)), # column(width=6,numericInput("M_f", "Natural mortality", value=NA,min=0, max=10000, step=0.01))) # })) # } # }) #Male life history parameters output$Male_parms_inputs_label <- renderUI({ if(input$male_parms){ h5(em("Male")) } }) output$Male_parms_inputs1 <- renderUI({ if(input$male_parms){ fluidRow(column(width=6, numericInput("M_m", "Natural mortality", value=NA, min=0, max=10000, step=0.01)), column(width=6, numericInput("Linf_m", "Asymptotic size (Linf)", value=NA, min=0, max=10000, step=0.01))) } }) output$Male_parms_inputs2 <- renderUI({ if(input$male_parms){ fluidRow(column(width=6, numericInput("k_m", "Growth coefficient k", value=NA, min=0, max=10000, step=0.01)), column(width=6, numericInput("t0_m", "Age at length 0 (t0)", value=NA, min=0, max=10000, step=0.01))) } }) output$Male_parms_inputs3 <- renderUI({ if(input$male_parms){ fluidRow(column(width=6, textInput("CV_lt_m", "CV at length (young then old)", value="0.1,0.1"))) } }) output$Male_parms_inputs4 <- renderUI({ if(input$male_parms){ fluidRow(column(width=6, numericInput("WLa_m", "Weight-length alpha", value=0.00001, min=0, max=10000, step=0.000000001)), column(width=6, numericInput("WLb_m", "Weight-length beta", value=3, min=0, max=10000, step=0.01))) } }) output$Male_parms_inputs_label_fix <- renderUI({ if(input$male_parms_fix){ h5(em("Male")) } }) output$Male_parms_inputs1_fix <- renderUI({ if(input$male_parms_fix){ fluidRow(column(width=6, numericInput("M_m_fix", "Natural mortality", value=NA, min=0, max=10000, step=0.01)), column(width=6, numericInput("Linf_m_fix", "Asymptotic size (Linf)", value=NA, min=0, max=10000, step=0.01))) } }) output$Male_parms_inputs2_fix <- renderUI({ if(input$male_parms_fix){ fluidRow(column(width=6, numericInput("k_m_fix", "Growth coefficient k", value=NA, min=0, max=10000, step=0.01)), column(width=6, numericInput("t0_m_fix", "Age at length 0 (t0)", value=NA, min=0, max=10000, step=0.01))) } }) output$Male_parms_inputs3_fix <- renderUI({ if(input$male_parms_fix){ fluidRow(column(width=6, textInput("CV_lt_m_fix", "CV at length (young then old)", value="0.1,0.1"))) } }) output$Male_parms_inputs4_fix <- renderUI({ if(input$male_parms_fix){ fluidRow(column(width=6, numericInput("WLa_m_fix", "Weight-Length alpha", value=0.00001, min=0, max=10000, step=0.000000001)), column(width=6, numericInput("WLb_m_fix", "Weight-length beta", value=3, min=0, max=10000, step=0.01))) } }) output$Male_parms_inputs_label_est <- renderUI({ if(input$male_parms_est){ h4(em("Male")) } }) output$Male_parms_inputs_M_est <- renderUI({ if(input$male_parms_est){ dropdownButton( selectInput("M_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")), numericInput("M_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001), numericInput("M_m_SD", "SD", value=0,min=0, max=10000, step=0.001), numericInput("M_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001), circle = FALSE, right=TRUE, status = "danger", icon = icon("skull-crossbones"), width = "300px",label="Natural mortality" ) } }) output$Male_parms_inputs_space1 <- renderUI({ if(input$male_parms_est){ br() } }) output$Male_parms_inputs_space2 <- renderUI({ if(input$male_parms_est){ br() } }) output$Male_parms_inputs_space3 <- renderUI({ if(input$male_parms_est){ br() } }) output$Male_parms_inputs_space4 <- renderUI({ if(input$male_parms_est){ br() } }) output$Male_parms_inputs_space5 <- renderUI({ if(input$male_parms_est){ br() } }) output$Male_parms_inputs_Growth_label <- renderUI({ if(input$male_parms_est){ h5(strong("Growth")) } }) output$Male_parms_inputs_Linf_est <- renderUI({ if(input$male_parms_est){ dropdownButton( selectInput("Linf_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")), numericInput("Linf_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001), numericInput("Linf_m_SD", "SD", value=0,min=0, max=10000, step=0.001), numericInput("Linf_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001), circle = FALSE, right=TRUE, status = "danger", icon = icon("infinity"), width = "300px",label="Linf: Asymptotic size" ) } }) output$Male_parms_inputs_k_est <- renderUI({ if(input$male_parms_est){ dropdownButton( selectInput("k_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")), numericInput("k_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001), numericInput("k_m_SD", "SD", value=0,min=0, max=10000, step=0.001), numericInput("k_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001), circle = FALSE, right=TRUE, status = "danger", icon = icon("ruler-horizontal"), width = "300px",label="k: VB growth coefficient" ) } }) output$Male_parms_inputs_t0_est <- renderUI({ if(input$male_parms_est){ dropdownButton( selectInput("t0_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")), numericInput("t0_m_mean", "Mean", value=0,min=-100, max=100, step=0.001), numericInput("t0_m_SD", "SD", value=0,min=0, max=100, step=0.001), numericInput("t0_m_phase", "Phase", value=-1,min=-999, max=100, step=0.001), circle = FALSE, right=TRUE, status = "danger", icon = icon("baby-carriage"), width = "300px",label="t0: Age at size 0" ) } }) output$Male_parms_inputs_CV_est_young <- renderUI({ if(input$male_parms_est){ dropdownButton( selectInput("CV_lt_m_young_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")), numericInput("CV_lt_m_young_mean", "Mean", value=0.1,min=0, max=10000, step=0.001), numericInput("CV_lt_m_young_SD", "SD", value=0,min=0, max=10000, step=0.001), numericInput("CV_lt_m_young_phase", "Phase", value=-1,min=-999, max=10, step=0.001), circle = FALSE, right=TRUE, status = "danger", icon = icon("dice"), width = "300px",label="CV at length (young)" ) } }) output$Male_parms_inputs_CV_est_old <- renderUI({ if(input$male_parms_est){ dropdownButton( selectInput("CV_lt_m_old_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")), numericInput("CV_lt_m_old_mean", "Mean", value=0.1,min=0, max=10000, step=0.001), numericInput("CV_lt_m_old_SD", "SD", value=0,min=0, max=10000, step=0.001), numericInput("CV_lt_m_old_phase", "Phase", value=-1,min=-999, max=10, step=0.001), circle = FALSE, right=TRUE, status = "danger", icon = icon("dice"), width = "300px",label="CV at length (old)" ) } }) output$Male_parms_inputs_WL_est <- renderUI({ if(input$male_parms_est){ fluidRow(column(width=6, numericInput("WLa_m_est", "Weight-length alpha", value=0.00001, min=0, max=10000, step=0.000000001)), column(width=6, numericInput("WLb_m_est", "Weight-length beta", value=3, min=0, max=10000, step=0.01))) } }) #h5(strong("M")), # fluidRow(column(width=4,style='padding:1px;',align="center", selectInput("M_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal"))), # column(width=3,style='padding:2px;',align="center",numericInput("M_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001)), # column(width=3,style='padding:2px;',align="center",numericInput("M_m_SD", "SD", value=0,min=0, max=10000, step=0.001)), # column(width=2,style='padding:2px;',align="center",numericInput("M_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001))) # } # }) # output$Male_parms_inputs_Linf_est <- renderUI({ # if(input$male_parms_est){ # #h5(strong("Linf")), # fluidRow(column(width=4,style='padding:1px;',align="center",selectInput("Linf_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal"))), # column(width=3,style='padding:2px;',align="center",numericInput("Linf_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001)), # column(width=3,style='padding:2px;',align="center",numericInput("Linf_m_SD", "SD", value=0,min=0, max=10000, step=0.001)), # column(width=2,style='padding:2px;',align="center",numericInput("Linf_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001))) # } # }) # output$Male_parms_inputs_k_est <- renderUI({ # if(input$male_parms_est){ # #h5(strong("k")), # fluidRow(column(width=4,style='padding:2px;',selectInput("k_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal"))), # column(width=3,style='padding:2px;',numericInput("k_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001)), # column(width=3,style='padding:2px;',numericInput("k_m_SD", "SD", value=0,min=0, max=10000, step=0.001)), # column(width=2,style='padding:2px;',align="center",numericInput("k_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001))) # } # }) # output$Male_parms_inputs_t0_est <- renderUI({ # if(input$male_parms_est){ # #h5(strong("t0")), # fluidRow(column(width=4,style='padding:2px;',selectInput("t0_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal"))), # column(width=3,style='padding:2px;',numericInput("t0_m_mean", "Mean", value=NA,min=0, max=10000, step=0.001)), # column(width=3,style='padding:2px;',numericInput("t0_m_SD", "SD", value=0,min=0, max=10000, step=0.001)), # column(width=2,style='padding:2px;',align="center",numericInput("t0_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001))) # } # }) # output$Male_parms_inputs_CV_est <- renderUI({ # if(input$male_parms_est){ # #h5(strong("Length CV")), # fluidRow(column(width=4,style='padding:2px;',selectInput("CV_lt_m_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal"))), # column(width=3,style='padding:2px;',numericInput("CV_lt_m_mean", "Mean", value=0.1,min=0, max=10000, step=0.001)), # column(width=3,style='padding:2px;',numericInput("CV_lt_m_SD", "SD", value=0,min=0, max=10000, step=0.001)), # column(width=2,style='padding:2px;',align="center",numericInput("CV_lt_m_phase", "Phase", value=-1,min=-999, max=10, step=0.001))) # } # }) #Male life history parameters output$Male_parms_inputs_label_SSS<- renderUI({ if(input$male_parms_SSS){ h5(em("Male")) } }) output$Male_parms_inputs_M_SSS<- renderUI({ if(input$male_parms_SSS){ dropdownButton( selectInput("M_m_prior_sss","Prior type",c("lognormal","normal","uniform","no prior")), numericInput("M_m_mean_sss", "Mean", value=NA,min=0, max=10000, step=0.001), numericInput("M_m_SD_sss", "SD", value=0.44,min=0, max=10000, step=0.001), circle = FALSE, right=TRUE, status = "danger", icon = icon("skull-crossbones"), width = "300px",label="Natural mortality" ) } }) output$Male_parms_inputs_space1_SSS <- renderUI({ if(input$male_parms_SSS){ br() } }) output$Male_parms_inputs_space2_SSS <- renderUI({ if(input$male_parms_SSS){ br() } }) output$Male_parms_inputs_space3_SSS <- renderUI({ if(input$male_parms_SSS){ br() } }) output$Male_parms_inputs_space4_SSS <- renderUI({ if(input$male_parms_SSS){ br() } }) output$Male_parms_inputs_space5_SSS <- renderUI({ if(input$male_parms_SSS){ br() } }) output$Male_parms_inputs_Growth_label_SSS <- renderUI({ if(input$male_parms_SSS){ h5(strong("Growth")) } }) output$Male_parms_inputs_Linf_SSS <- renderUI({ if(input$male_parms_SSS){ dropdownButton( selectInput("Linf_m_prior_sss","Prior type",c("no prior","normal")), numericInput("Linf_m_mean_sss", "Mean", value=NA,min=0, max=10000, step=0.001), numericInput("Linf_m_SD_sss", "SD", value=0,min=0, max=10000, step=0.001), circle = FALSE, right=TRUE, status = "danger", icon = icon("infinity"), width = "300px",label="Linf: Asymptotic size" ) } }) output$Male_parms_inputs_k_SSS <- renderUI({ if(input$male_parms_SSS){ dropdownButton( selectInput("k_m_prior_sss","Prior type",c("no prior","normal")), numericInput("k_m_mean_sss", "Mean", value=NA,min=0, max=10000, step=0.001), numericInput("k_m_SD_sss", "SD", value=0,min=0, max=10000, step=0.001), circle = FALSE, right=TRUE, status = "danger", icon = icon("ruler-horizontal"), width = "300px",label="k: VB growth coefficient" ) } }) output$Male_parms_inputs_t0_SSS <- renderUI({ if(input$male_parms_SSS){ dropdownButton( selectInput("t0_m_prior_sss","Prior type",c("no prior","normal")), numericInput("t0_m_mean_sss", "Mean", value=0,min=-100, max=100, step=0.001), numericInput("t0_m_SD_sss", "SD", value=0,min=0, max=1000, step=0.001), circle = FALSE, right=TRUE, status = "danger", icon = icon("baby-carriage"), width = "300px",label="t0: Age at size 0" ) } }) output$Male_parms_inputs_CV_young_SSS <- renderUI({ if(input$male_parms_SSS){ dropdownButton( selectInput("CV_lt_m_young_prior_sss","Prior type",c("no prior")), numericInput("CV_lt_m_young_mean_sss", "Mean", value=0.1,min=0, max=10000, step=0.001), numericInput("CV_lt_m_young_SD_sss", "SD", value=0,min=0, max=10000, step=0.001), circle = FALSE, right=TRUE, status = "danger", icon = icon("dice"), width = "300px",label="CV at length" ) } }) output$Male_parms_inputs_CV_old_SSS <- renderUI({ if(input$male_parms_SSS){ dropdownButton( selectInput("CV_lt_m_old_prior_sss","Prior type",c("no prior")), numericInput("CV_lt_m_old_mean_sss", "Mean", value=0.1,min=0, max=10000, step=0.001), numericInput("CV_lt_m_old_SD_sss", "SD", value=0,min=0, max=10000, step=0.001), circle = FALSE, right=TRUE, status = "danger", icon = icon("dice"), width = "300px",label="CV at length" ) } }) output$Male_parms_inputs_WL_SSS<- renderUI({ if(input$male_parms_SSS){ fluidRow(column(width=6,numericInput("WLa_m_sss", "Weight-Length alpha", value=0.00001,min=0, max=10000, step=0.000000001)), column(width=6,numericInput("WLb_m_sss","Weight-length beta", value=3,min=0, max=10000, step=0.01))) } }) #Selectivity paramters output$Sel_parms1 <- renderUI({ fluidRow(column(width=8, textInput("Sel50", "Length at 50% Selectivity",value="")), column(width=4, textInput("Sel50_phase", "Est. phase", value=""))) }) output$Sel_parms2<- renderUI({ fluidRow(column(width=8, textInput("Selpeak", "Length at Peak Selectvity", value="")), column(width=4, textInput("Selpeak_phase", "Est. phase", value=""))) }) output$Sel_parms3 <- renderUI({ if(input$Sel_choice=="Dome-shaped"){ fluidRow(column(width=8, textInput("PeakDesc", "Length at 1st declining selectivity",value="10000")), column(width=4, textInput("PeakDesc_phase", "Est. phase",value=""))) } }) output$Sel_parms4 <- renderUI({ if(input$Sel_choice=="Dome-shaped"){ fluidRow(column(width=8, textInput("LtPeakFinal", "Width of declining selectivity",value="0.0001")), column(width=4, textInput("LtPeakFinal_phase", "Est. phase",value=""))) } }) output$Sel_parms5 <- renderUI({ if(input$Sel_choice=="Dome-shaped"){ fluidRow(column(width=8, textInput("FinalSel", "Selectivity at max bin size",value="0.99999")), column(width=4, textInput("FinalSel_phase", "Est. phase",value=""))) } }) output$Sel_parms1_sss <- renderUI({ fluidRow(column(width=6, textInput("Sel50_sss", "Length at 50% Selectivity",value="")), column(width=6, textInput("Selpeak_sss", "Length at Peak Selectvity", value=""))) }) output$Sel_parms2_sss <- renderUI({ if(input$Sel_choice_sss=="Dome-shaped"){ fluidRow(column(width=6, textInput("PeakDesc_sss", "Length at 1st declining selectivity",value="10000")), column(width=6, textInput("LtPeakFinal_sss", "Width of declining selectivity",value="0.0001"))) } }) output$Sel_parms3_sss <- renderUI({ if(input$Sel_choice_sss=="Dome-shaped"){ fluidRow(column(width=8, textInput("FinalSel_sss", "Selectivity at max bin size",value="0.99999"))) } }) #Recruitment parameter inputs output$Rec_options1 <- renderUI({ if(input$rec_choice){ fluidRow(column(width=6, numericInput("sigmaR", "Rec. varaibility (sR)", value=0.5, min=0, max=10, step=0.01))) } }) output$Rec_options2 <- renderUI({ if(input$rec_choice){ fluidRow(column(width=6, numericInput("Rdev_startyr", "Rec. devs. start year", value=input$styr, min=1, max=10000, step=1)), column(width=6, numericInput("Rdev_endyr", "Rec. devs. end year", value=input$endyr, min=1, max=10000, step=1))) } }) output$Rec_options3 <- renderUI({ if(input$biasC_choice){ fluidRow(column(width=6, numericInput("NobiasC_early", "Early last year", value=input$styr, min=1, max=10000, step=1)), column(width=6, numericInput("NobiasC_recent", "1st recent year", value=input$endyr, min=1, max=10000, step=1))) } }) output$Rec_options4 <- renderUI({ if(input$biasC_choice){ fluidRow(column(width=6, numericInput("BiasC_startyr", "Start year", value=input$styr, min=1, max=10000, step=1)), column(width=6, numericInput("BiasC_endyr", "End year", value=input$endyr, min=1, max=10000, step=1))) } }) output$Rec_options5 <- renderUI({ if(input$biasC_choice){ fluidRow(column(width=6, numericInput("BiasC","Maximum bias adjustment", value=1,min=0, max=1, step=0.001))) } }) output$Rec_options6 <- renderUI({ if(input$rec_choice){ fluidRow(column(width=6, selectInput("RecDevChoice","Recruit deviation option",c("1: Devs sum to zero","2: Simple deviations","3: deviation vector","4: option 3 plus penalties"),selected="1: Devs sum to zero"))) } }) #Jitter value output$Jitter_value <- renderUI({ if(input$jitter_choice){ fluidRow(column(width=6, numericInput("jitter_fraction", "Jitter value", value=0.01, min=0, max=10, step=0.001)), column(width=6, numericInput("Njitter", "# of jitters", value=0, min=1, max=10000, step=1))) } }) #Choose reference points output$RP_selection1<- renderUI({ if(input$RP_choices){ fluidRow(column(width=6, numericInput("SPR_target", "SPR target", value=0.5, min=0, max=1, step=0.001)), column(width=6, numericInput("B_target", "Biomass target", value=0.4, min=0, max=1, step=0.001))) } }) output$RP_selection2<- renderUI({ if(input$RP_choices){ fluidRow(column(width=6,selectInput("CR_Ct_F","Control rule type", c("1: Catch fxn of SSB, buffer on F", "2: F fxn of SSB, buffer on F", "3: Catch fxn of SSB, buffer on catch", "4: F fxn of SSB, buffer on catch"))), #column(width=4, numericInput("CR_Ct_F", "Control rule type", # value=1, min=0, max=1, step=0.001)), column(width=3, numericInput("slope_hi", "Upper ratio value", value=0.4, min=0, max=1, step=0.001)), column(width=3, numericInput("slope_low", "Lower ratio value", value=0.1, min=0, max=1, step=0.001))) } }) output$Forecasts<- renderUI({ if(input$Forecast_choice){ fluidRow(column(width=6, numericInput("forecast_num", "# of forecast years", value=2, min=1, max=1000, step=1)), column(width=6, textInput("forecast_buffer", "Control rule buffer", value="1"))) } }) output$AdvancedSS_nohess<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "no_hess", label = "Turn off Hessian (speeds up runs, but no variance estimation)", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_nohess_user<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "no_hess", label = "Turn off Hessian (speeds up runs, but no variance estimation)", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_addcomms<- renderUI({ fluidRow(column(width=6, prettyCheckbox( inputId = "add_comms", label = "Add additional SS run commands", shape = "round", outline = TRUE, status = "info"))) }) output$AdvancedSS_addcomms_comms <- renderUI({ if(!is.null(input$add_comms)){ if(input$add_comms){ fluidRow(column(width=12, textInput("add_comms_in", "Enter additional run commands", value=""))) } } }) output$AdvancedSS_addcomms_user<- renderUI({ fluidRow(column(width=6, prettyCheckbox( inputId = "add_comms", label = "Add additional SS run commands", shape = "round", outline = TRUE, status = "info"))) }) output$AdvancedSS_addcomms_comms_user <- renderUI({ if(!is.null(input$add_comms_user)){ if(input$add_comms_user){ fluidRow(column(width=12, textInput("add_comms_in", "Enter additional run commands", value=""))) } } }) output$AdvancedSS_noplots<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "no_plots_tables", label = "Turn off plots", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_noplots_user<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "no_plots_tables", label = "Turn off plots", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_noestabs<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "no_tables", label = "No exectutive summary tables", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_noestabs_user<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "no_tables", label = "No exectutive summary tables", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_par<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "use_par", label = "Use par file (i.e., parameter file from previous run)?", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_par_user<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "use_par", label = "Use par file (i.e., parameter file from previous run)?", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_phase0<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "use_phase0", label = "Turn off estimation of all parameters (phase = 0)?", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_phase0_user<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "use_phase0", label = "Turn off estimation of all parameters (phase = 0)?", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_datanew<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "use_datanew", label = "Use the data_echo.ss_new file?", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_datanew_user<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "use_datanew", label = "Use the data_echo.ss_new file?", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_controlnew<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "use_controlnew", label = "Use the control.ss_new file?", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_controlnew_user<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "use_controlnew", label = "Use the control.ss_new file?", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_forecastnew<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "use_forecastnew", label = "Use the forecast.ss_new file?", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_forecastnew_user<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "use_forecastnew", label = "Use the forecast.ss_new file?", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_GT1<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "GT1", label = "Use only one growth type (default is 5)", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_GT5_SSS<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "GT5", label = "Use 5 growth types (default is 1)", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_Sex3<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "Sex3", label = "Retain sex ratio in length compositions (Sex option = 3)", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_Indexvar<- renderUI({ # if(input$advance_ss_click){ fluidRow(column(width=6, prettyCheckbox( inputId = "Indexvar", label = "Estimate additional variance on each abundance index?", shape = "round", outline = TRUE, status = "info"))) # } }) output$AdvancedSS_ageerror<- renderUI({ fluidRow(column(width=12, prettyCheckbox( inputId = "Ageing_error_choice", label = "Add custom ageing error matrices?", shape = "round", outline = TRUE, status = "info"))) }) output$AdvancedSS_ageerror_in <- renderUI({ if(!is.null(input$Ageing_error_choice)){ if(input$Ageing_error_choice){ #h4(strong("Choose data file")), fluidRow(column(width=12,fileInput('file33', 'Ageing error file', accept = c( 'text/csv', 'text/comma-separated-values', 'text/tab-separated-values', 'text/plain', '.csv' ) ))) } } }) output$AdvancedSS_Ctunits<- renderUI({ fluidRow(column(width=12, prettyCheckbox( inputId = "Ct_units_choice", label = "Specify catch units (1=biomass (default); 2=numbers) for each fleet?", shape = "round", outline = TRUE, status = "info"))) }) output$AdvancedSS_Ctunitsfleets <- renderUI({ if(!is.null(input$Ct_units_choice)){ if(input$Ct_units_choice){ fluidRow(column(width=12, textInput("fleet_ct_units", "Enter catch units for each fleet", value=""))) } } }) output$AdvancedSS_Ctunits_SSS<- renderUI({ fluidRow(column(width=12, prettyCheckbox( inputId = "Ct_units_choice_SSS", label = "Specify catch units for each fleet?", shape = "round", outline = TRUE, status = "info"))) }) output$AdvancedSS_Ctunitsfleets_SSS<- renderUI({ if(!is.null(input$Ct_units_choice_SSS)){ if(input$Ct_units_choice_SSS){ fluidRow(column(width=12, textInput("fleet_ct_units_SSS", "Enter catch units for each fleet (1=biomass; 2=numbers)", value=""))) } } }) output$AdvancedSS_retro_choice<- renderUI({ fluidRow(column(width=6, prettyCheckbox( inputId = "Retro_choice", label = "Do retrospective runs? Input minus from current year", shape = "round", outline = TRUE, status = "info"))) }) output$AdvancedSS_retro_years <- renderUI({ if(!is.null(input$Retro_choice)){ if(input$Retro_choice){ fluidRow(column(width=6, numericInput("first_retro_year", "1st retro year", value=-1, min=-1, max=-500, step=-1)), column(width=6, numericInput("final_retro_year", "Last retro year", value=-10, min=-1, max=-500, step=-1))) } } }) output$AdvancedSS_retro_choice_user <- renderUI({ fluidRow(column(width=6, prettyCheckbox( inputId = "Retro_choice", label = "Do retrospective runs? Input minus from current year", shape = "round", outline = TRUE, status = "info"))) }) output$AdvancedSS_retro_years_user <- renderUI({ if(!is.null(input$Retro_choice)){ if(input$Retro_choice){ fluidRow(column(width=6, numericInput("first_retro_year", "1st retro year", value=-1, min=-1, max=-500, step=-1)), column(width=6, numericInput("final_retro_year", "Last retro year", value=-10, min=-1, max=-500, step=-1))) } } }) output$AdvancedSS_Ltbin <- renderUI({ # if(input$advance_ss_click){ if(!is.null(rv.Lt$data)){bin.step<-as.numeric(colnames(rv.Lt$data)[7])-as.numeric(colnames(rv.Lt$data)[6])} if(is.null(rv.Lt$data)){bin.step<-2} fluidRow(column(width=4, numericInput("lt_bin_size", "bin size", value=bin.step, min=0, max=10000, step=1)), column(width=4, numericInput("lt_min_bin", "minimum bin", value=4, min=0, max=10000, step=0.01)), column(width=4, numericInput("lt_max_bin", "maximum bin", value=2*(round((Linf()+(Linf()*0.2326))/2))+2, min=0, max=10000, step=0.01))) # } }) output$Profile_multi_values <- renderUI({ #if(!is.null(input$multi_profile)){ # if(input$multi_profile){ #h4(strong("Choose data file")), fluidRow(column(width=12,fileInput('file_multi_profile', 'Profile input values', accept = c( 'text/csv', 'text/comma-separated-values', 'text/tab-separated-values', 'text/plain', '.csv' ) ))) # } # } }) # roots <- getVolumes()() ############################################### ############################################### ############################################### ################# PARAMETERS ################## ############################################### FleetNs<-reactive({ if(all(c(is.null(rv.Ct$data[,2],rv.Lt$data[,3],rv.Age$data[,3],rv.Index$data[,3])))) return(NULL) fleetnum<-rep(1,max(rv.Lt$data[,3],rv.Age$data[,3],rv.Index$data[,3])) FleetNs<-paste(as.character(fleetnum), collapse=",") #print(FleetNs) FleetNs }) Nages<-reactive({ Nages<-NA if(all(c(is.null(input$M_f),is.null(input$M_f_fix),is.null(input$M_f_mean),is.null(input$M_f_mean_sss),is.null(rv.Age$data)))) return(NULL) if(!is.na(input$M_f)) {Nages<-ceiling(5.4/input$M_f)} if(!is.na(input$M_f_fix)) {Nages<-ceiling(5.4/input$M_f_fix)} if(!is.na(input$M_f_mean)) {Nages<-ceiling(5.4/input$M_f_mean)} if(!is.na(input$M_f_mean_sss)) {Nages<-ceiling(5.4/input$M_f_mean_sss)} if(!is.null(rv.Age$data)) { Nages_in<-max(as.numeric(colnames(rv.Age$data[,9:ncol(rv.Age$data)]))) if(!is.na(Nages)&Nages_in>Nages){Nages<-Nages_in} if(is.na(Nages)){Nages<-Nages_in} } Nages }) M_f_in<-reactive({ M_f_in<-NA if(all(c(is.null(input$M_f),is.null(input$M_f_fix),is.null(input$M_f_mean),is.null(input$M_f_mean_sss)))) return(NULL) if(!is.na(input$M_f)) {M_f_in<-input$M_f} if(!is.na(input$M_f_fix)) {M_f_in<-input$M_f_fix} if(!is.na(input$M_f_mean)) {M_f_in<-input$M_f_mean} if(!is.na(input$M_f_mean_sss)) {M_f_in<-input$M_f_mean_sss} M_f_in }) M_m_in<-reactive({ M_m_in<-NA if(all(c(is.null(input$M_m),is.null(input$M_m_fix),is.null(input$M_m_mean),is.null(input$M_m_mean_sss)))) return(NULL) if(any(input$male_parms&!is.na(input$M_m))) {M_m_in<-input$M_m} if(any(input$male_parms_fix&!is.na(input$M_m_fix))) {M_m_in<-input$M_m_fix} if(any(input$male_parms_est&!is.na(input$M_m_mean))) {M_m_in<-input$M_m_mean} if(any(input$male_parms_SSS&!is.na(input$M_m_mean_sss))) {M_m_in<-input$M_m_mean_sss} M_m_in }) Linf<-reactive({ Linf<-NA if(all(c(is.null(input$Linf_f),is.null(input$Linf_f_fix),is.null(input$Linf_f_mean),is.null(input$Linf_f_mean_sss)))) return(NULL) if(!is.na(input$Linf_f)) {Linf<-input$Linf_f} if(!is.na(input$Linf_f_fix)) {Linf<-input$Linf_f_fix} if(!is.na(input$Linf_f_mean)) {Linf<-input$Linf_f_mean} if(!is.na(input$Linf_f_mean_sss)) {Linf<-input$Linf_f_mean_sss} Linf }) Linf_m_in<-reactive({ Linf_m_in<-NA if(all(c(is.null(input$Linf_m),is.null(input$Linf_m_fix),is.null(input$Linf_m_mean),is.null(input$Linf_m_mean_sss)))) return(NULL) if(any(input$male_parms&!is.na(input$Linf_m))) {Linf_m_in<-input$Linf_m} if(any(input$male_parms_fix&!is.na(input$Linf_m_fix))) {Linf_m_in<-input$Linf_m_fix} if(any(input$male_parms_est&!is.na(input$Linf_m_mean))) {Linf_m_in<-input$Linf_m_mean} if(any(input$male_parms_SSS&!is.na(input$Linf_m_mean_sss))) {Linf_m_in<-input$Linf_m_mean_sss} Linf_m_in }) k_vbgf<-reactive({ k_vbgf<-NA if(all(c(is.null(input$k_f),is.null(input$k_f_fix),is.null(input$k_f_mean),is.null(input$k_f_mean_sss)))) return(NULL) if(!is.na(input$k_f)) {k_vbgf<-input$k_f} if(!is.na(input$k_f_fix)) {k_vbgf<-input$k_f_fix} if(!is.na(input$k_f_mean)) {k_vbgf<-input$k_f_mean} if(!is.na(input$k_f_mean_sss)) {k_vbgf<-input$k_f_mean_sss} k_vbgf }) #Process life history input for plots k_vbgf_m_in<-reactive({ k_vbgf_m_in<-NA if(all(c(is.null(input$k_m),is.null(input$k_m_fix),is.null(input$k_m_mean),is.null(input$k_m_mean_sss)))) return(NULL) if(any(input$male_parms&!is.na(input$k_m))) {k_vbgf_m_in<-input$k_m} if(any(input$male_parms_fix&!is.na(input$k_m_fix))) {k_vbgf_m_in<-input$k_m_fix} if(any(input$male_parms_est&!is.na(input$k_m_mean))) {k_vbgf_m_in<-input$k_m_mean} if(any(input$male_parms_SSS&!is.na(input$k_m_mean_sss))) {k_vbgf_m_in<-input$k_m_mean_sss} k_vbgf_m_in }) t0_vbgf<-reactive({ t0_vbgf<-NA if(all(c(is.null(input$t0_f),is.null(input$t0_f_fix),is.null(input$t0_f_mean),is.null(input$t0_f_mean_sss)))) return(NULL) if(!is.na(input$t0_f)) {t0_vbgf<-input$t0_f} if(!is.na(input$t0_f_fix)) {t0_vbgf<-input$t0_f_fix} if(!is.na(input$t0_f_mean)) {t0_vbgf<-input$t0_f_mean} if(!is.na(input$t0_f_mean_sss)) {t0_vbgf<-input$t0_f_mean_sss} t0_vbgf }) t0_vbgf_m_in<-reactive({ t0_vbgf_m_in<-NA if(all(c(is.null(input$t0_m),is.null(input$t0_m_fix),is.null(input$t0_m_mean),is.null(input$t0_m_mean_sss)))) return(NULL) if(any(input$male_parms&!is.na(input$t0_m))) {t0_vbgf_m_in<-input$t0_m} if(any(input$male_parms_fix&!is.na(input$t0_m_fix))) {t0_vbgf_m_in<-input$t0_m_fix} if(any(input$male_parms_est&!is.na(input$t0_m_mean))) {t0_vbgf_m_in<-input$t0_m_mean} if(any(input$male_parms_SSS&!is.na(input$t0_m_mean_sss))) {t0_vbgf_m_in<-input$t0_m_mean_sss} t0_vbgf_m_in }) L50<-reactive({ L50<-NA if(all(c(is.null(input$L50_f),is.null(input$L50_f_fix),is.null(input$L50_f_est),is.null(input$L50_f_sss)))) return(NULL) if(!is.na(input$L50_f)) {L50<-input$L50_f} if(!is.na(input$L50_f_fix)) {L50<-input$L50_f_fix} if(!is.na(input$L50_f_est)) {L50<-input$L50_f_est} if(!is.na(input$L50_f_sss)) {L50<-input$L50_f_sss} L50 }) L95<-reactive({ L95<-NA if(all(c(is.null(input$L95_f),is.null(input$L95_f_fix),is.null(input$L95_f_est),is.null(input$L95_f_sss)))) return(NULL) if(!is.na(input$L95_f)) {L95<-input$L95_f} if(!is.na(input$L95_f_fix)) {L95<-input$L95_f_fix} if(!is.na(input$L95_f_est)) {L95<-input$L95_f_est} if(!is.na(input$L95_f_sss)) {L95<-input$L95_f_sss} L95 }) ############# ### PLOTS ### ############# ################## ### CATCH PLOT ### ################## observeEvent(req(!is.null(rv.Ct$data)), { shinyjs::show(output$catch_plots_label<-renderText({"Removal history"})) }) observeEvent(req(!is.null(rv.Ct$data)), { output$Ctplot_it<-renderUI({ if(!is.null(rv.Ct$data)) { output$Ctplot <- renderPlot({ if (is.null(rv.Ct$data)) return(NULL) rv.Ct$data %>% pivot_longer(-1, names_to = "Fleet", values_to = "catch") %>% ggplot(aes_string(names(.)[1], "catch", color = "Fleet")) + geom_point() + geom_line(lwd=1.5) + ylab("Removals") + xlab("Year") + scale_color_viridis_d() }) plotOutput("Ctplot") } }) }) ########################## ### LENGTH COMPS PLOTS ### ########################## observeEvent(req(!is.null(rv.Lt$data)), { shinyjs::show(output$lt_comp_plots_label<-renderText({"Length compositions"})) }) observeEvent(req(!is.null(rv.Lt$data)), { output$Ltplot_it<-renderUI({ if(!is.null(rv.Lt$data)) { # L50.plot<-Linf.plot<--1 # if(L50()!=NA){L50.plot=L50()} #if(Linf()!=NA){Linf.plot=Linf()} output$Ltplot<-renderPlot({ if (is.null(rv.Lt$data)) return(NULL) rv.Lt$data %>% rename_all(tolower) %>% dplyr::select(-nsamps) %>% pivot_longer(c(-year, -fleet, -sex)) %>% mutate(Year = factor(year), name = as.numeric(gsub("[^0-9.-]", "", name))) %>% ggplot(aes(name, value, color=Year)) + geom_line() + #geom_col(position="dodge") + facet_grid(sex~fleet, scales="free_y",labeller = label_both) + # facet_wrap(sex~year, scales="free_y",ncol=5) + xlab("Length bin") + ylab("Frequency") + scale_fill_viridis_d()+ #geom_vline(xintercept = -1)+ #geom_textvline(label = "L50", xintercept = L50(), vjust = 1.3) + #geom_textvline(label="Linf", xintercept = Linf(), vjust = -0.7,hjust=2) + #,lty=c(1,1,2),color=c("black","purple","blue") geom_vline(xintercept = c(-1,L50(),Linf()),linetype=c("solid","solid","dashed"),colour = c("black", "black", "blue"),na.rm = TRUE,show.legend = TRUE)+ xlim(0,NA) }) plotOutput("Ltplot") } }) }) # observeEvent(req(!is.null(input$file1)), { # output$Ltplot<-renderPlot({ # inFile<- input$file1 # # if (is.null(inFile)) { # # return(NULL) # # shinyjs::hide("Ltplot")} # # else{ # Lt.comp.data<-read.csv(inFile$datapath,check.names=FALSE) # lt.dat.plot<-(Lt.comp.data)[,c(-4)] # dat.gg<-melt(lt.dat.plot,id=colnames(lt.dat.plot)[1:3]) # colnames(dat.gg)<-c("year","fleet","sex","bin","ltnum") # ggplot(dat.gg,aes(bin,ltnum,fill=factor(fleet)))+ # geom_col(color="white",position="dodge")+ # #geom_col(fill="#236192",color="white")+ # facet_wrap(~year,scales="free_y")+ # xlab("Length bin")+ # ylab("Frequency")+ # labs(fill="Fleet")+ # scale_fill_viridis(discrete=TRUE, option="viridis") # #scale_x_discrete(breaks=c(1,5,10,20),labels=as.character(levels(dat.gg$bin))[c(1,5,10,20)]) # #scale_fill_brewer(palette = "BuPu") # # } # }) # }) ################# ### AGE PLOTS ### ################# observeEvent(req(!is.null(rv.Age$data)), { shinyjs::show(output$marginal_age_comp_plots_label<-renderText({"Marginal age compositions"})) }) observeEvent(req(!is.null(rv.Age$data)), { shinyjs::show(output$conditional_age_comp_plots_label<-renderText({"Conditional age at length"})) }) observeEvent(req(!is.null(rv.Age$data)), { marginal_ages<-subset(rv.Age$data,Lbin_hi<0) Cond_ages<-subset(rv.Age$data,Lbin_hi>=0) output$Ageplot_it_marginal<-renderUI({ if(!is.null(rv.Age$data)) { output$Ageplot_marginal<-renderPlot({ #inFile_age <- rv.Age$data # if (is.null(rv.Age$data)) return(NULL) if (nrow(marginal_ages)==0) return(NULL) # rv.Age$data %>% marginal_ages %>% rename_all(tolower) %>% dplyr::select(-nsamps,-lbin_hi) %>% pivot_longer(c(-year, -fleet, -sex, -lbin_low)) %>% mutate(Year = factor(year), name = as.numeric(gsub("[^0-9.-]", "", name))) %>% ggplot(aes(name, value, color=Year)) + geom_line() + # geom_col(position="dodge") + #facet_wrap(sex~year, scales="free_y",ncol=5) + facet_grid(sex~fleet, scales="free_y",labeller = label_both) + #scale_y_continuous(limits=c(0,max(colSums(rv.Age$data[-1,7:ncol(rv.Age$data)]))))+ #scale_y_continuous(limits=c(0,20))+ xlab("Age bin") + ylab("Frequency") + scale_fill_viridis_d() }) plotOutput("Ageplot_marginal") } }) output$Ageplot_it_cond<-renderUI({ if(!is.null(rv.Age$data)) { output$Ageplot_conditional<-renderPlot({ # if (is.null(rv.Age$data)) return(NULL) if (nrow(Cond_ages)==0) return(NULL) Cond_ages_plots<-melt(Cond_ages[,c(1,3,4,7,9:ncol(Cond_ages))],id.vars=c("Year","Fleet","Sex","Lbin_hi")) Cond_ages_plots_pos<-subset(Cond_ages_plots,value>0) ggplot(Cond_ages_plots_pos,aes(x=as.numeric(variable),y=as.numeric(Lbin_hi),color=Year))+ geom_point()+ facet_grid(vars(Sex),vars(Fleet),labeller = label_both)+ xlab("Age bin")+ ylab("Length bin") }) plotOutput("Ageplot_conditional") } }) }) # output$Ageplot <- renderPlot({ # inFile_age <- rv.Age$data # if (is.null(inFile_age)) return(NULL) # rv.Age$data %>% # pivot_longer(-1, names_to = "year", values_to = "ltnum") %>% # rename(bin = Bins) %>% # ggplot(aes(bin, ltnum)) + # geom_col(fill="#1D252D", color="white") + # facet_wrap(~year) + # xlab("Age bin") + # ylab("Frequency") # }) ################## ### INDEX PLOT ### ################## observeEvent(req(!is.null(rv.Index$data)), { shinyjs::show(output$index_plots_label<-renderText({"Indices of Abundance"})) }) observeEvent(req(!is.null(rv.Index$data)), { output$Indexplot_it<-renderUI({ if(!is.null(rv.Index$data)) { output$Indexplot <- renderPlot({ if (is.null(rv.Index$data)) return(NULL) plot.Index<-rv.Index$data plot.Index[,3]<-as.factor(plot.Index[,3]) plot.Index.zscore<-list() for(i in 1:length(unique(plot.Index$Fleet))) { plot.Index.temp<-plot.Index[plot.Index$Fleet %in% unique(plot.Index$Fleet)[i],] plot.Index.temp$Index<-(plot.Index.temp$Index-mean(plot.Index.temp$Index))/sd(plot.Index.temp$Index) plot.Index.zscore[[i]]<-plot.Index.temp } plot.Index.zs<-do.call("rbind", plot.Index.zscore) ggplot(plot.Index.zs,aes(x=Year,y=Index,group=Fleet, colour=Fleet)) + geom_line(lwd=1.1) + geom_errorbar(aes(ymin=qlnorm(0.0275,log(Index),CV),ymax=qlnorm(0.975,log(Index),CV),group=Fleet),width=0,size=1)+ geom_point(aes(colour=Fleet),size=4) + ylab("Z-score") + xlab("Year") + scale_color_viridis_d() }) plotOutput("Indexplot") } }) }) ##################### ### Plot M by age ### ##################### output$Mplot<-renderPlot({ mf.in = M_f_in()+0.000000000000001 mm.in = M_f_in()+0.000000000000001 # if(input$male_parms|input$male_parms_fix) if(input$male_parms|input$male_parms_SSS|input$male_parms_fix|input$male_parms_est) { mm.in = M_m_in()+0.000000000000001 } if(any(is.na(c(mf.in, mm.in)))|any(is.null(c(mf.in, mm.in)))) return(NULL) Female_M = data.frame(Ages = 0:Nages(), PopN = exp(-mf.in * 0:Nages()), Sex="Female") Male_M = data.frame(Ages = 0:Nages(), PopN=exp(-mm.in * 0:Nages()), Sex="Male") M_sexes <- rbind(Female_M, Male_M) Nage_4_plot <- grobTree(textGrob(paste0("Max age =", Nages()), x=0.1, y=0.95, hjust=0, gp=gpar(col="darkblue", fontsize=12, fontface="italic"))) ggplot(M_sexes,aes(Ages, PopN, color=Sex))+ geom_line(aes(linetype=Sex), lwd=2)+ ylab("Cohort decline by M")+ annotation_custom(Nage_4_plot) }) ############################## ### Plot VBGF and maturity ### ############################## output$VBGFplot<-renderPlot({ f_Linf = m_Linf = Linf() f_k = m_k = k_vbgf() f_t0 = m_t0 = t0_vbgf() f_L50 = L50() f_L95 = L95() maxage = Nages() if(any(input$male_parms,input$male_parms_SSS,input$male_parms_fix,input$male_parms_est)) { m_Linf = Linf_m_in() m_k = k_vbgf_m_in() m_t0 = t0_vbgf_m_in() } if(any(is.na(c(f_Linf, f_k, f_t0)))=="FALSE"){ vbgf_female = data.frame(Age = c(f_t0:Nages()), Length = VBGF(f_Linf, f_k, f_t0, c(f_t0:Nages())), Sex="Female") vbgf_male = data.frame(Age = f_t0:Nages(), Length=VBGF(m_Linf, m_k, m_t0, c(f_t0:Nages())), Sex="Male") rbind(vbgf_female,vbgf_male) %>% ggplot(aes(Age, Length, color=Sex)) + geom_line(aes(linetype=Sex), lwd=2) -> vbgf.plot if(any(is.na(c(f_L50, f_L95)))=="FALSE"){ age.mat = data.frame(Age = VBGF.age(f_Linf, f_k, f_t0, c(f_L50, f_L95)), Length = c(f_L50, f_L95), Sex="Female") vbgf.plot + geom_point(data = age.mat, aes(Age, Length), color = "darkorange", size=6) + geom_text(data = age.mat,label=c("Lmat50%", "Lmat95%"), nudge_x = -0.1 * Nages(), color="black") -> vbgf.plot } vbgf.plot } }) ################### ### Selectivity ### ################### # observeEvent(req(input$Sel50,input$Selpeak), { # shinyjs::show(output$Sel_plots_label<-renderText({"Selectivity"})) # }) #h4("Selectivity") output$Dep_plot_title<-renderUI({ if(((as.numeric(input$tabs)*1)/1)<4&is.null(rv.Lt$data)&!is.null(rv.Ct$data)&is.null(rv.Age$data)&is.null(rv.Index$data)&any(is.null(input$user_model),!input$user_model)){ h4("Relative Stock Status Prior") } }) output$Dep_plot_it<-renderUI({ if(((as.numeric(input$tabs)*1)/1)<4&is.null(rv.Lt$data)&!is.null(rv.Ct$data)&is.null(rv.Age$data)&is.null(rv.Index$data)&any(is.null(input$user_model),!input$user_model)){ output$Depletion_plot <- renderPlot({ if(!is.na(input$status_year)&!is.na(input$Depl_mean_sss)) { if(input$Depl_prior_sss=="beta"){dep.hist.sss<-data.frame(draws=rbeta.ab(100000,input$Depl_mean_sss,input$Depl_SD_sss,0,1))} if(input$Depl_prior_sss=="lognormal"){dep.hist.sss<-data.frame(draws=rlnorm(100000,log(input$Depl_mean_sss),input$Depl_SD_sss))} if(input$Depl_prior_sss=="truncated normal"){dep.hist.sss<-data.frame(draws=rtruncnorm(100000,0,1,input$Depl_mean_sss,input$Depl_SD_sss))} if(input$Depl_prior_sss=="uniform"){dep.hist.sss<-data.frame(draws=runif(100000,input$Depl_mean_sss,input$Depl_SD_sss))} if(input$Depl_prior_sss=="no prior"){NULL} Depletion_plot<-gghistogram(dep.hist.sss, x = "draws", fill = "purple") Depletion_plot } }) plotOutput("Depletion_plot") } }) output$Selplot <- renderPlot({ if(input$Sel_choice=="Logistic"&any(any(input$Sel50[1]=="",is.null(input$Sel50)),any(input$Selpeak[1]=="",is.null(input$Selpeak)))) return(NULL) if(input$Sel_choice=="Logistic") { if(all(length(as.numeric(trimws(unlist(strsplit(input$Sel50,",")))))==length(as.numeric(trimws(unlist(strsplit(input$Selpeak,","))))), all(input$Sel50!=""), all(!is.null(input$Sel50)), all(input$Selpeak!=""), all(!is.null(input$Selpeak)))) { Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50,",")))) Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak,",")))) PeakDesc<-rep(10000,length(Selpeak)) LtPeakFinal<-rep(0.0001,length(Selpeak)) FinalSel<-rep(0.999,length(Selpeak)) # if(input$Sel_choice=="Logistic") # { # } # if(input$Sel_choice=="Dome-shaped") # { # PeakDesc<-as.numeric(trimws(unlist(strsplit(input$PeakDesc,",")))) # LtPeakFinal<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal,",")))) # FinalSel<-as.numeric(trimws(unlist(strsplit(input$FinalSel,",")))) # } Sel.out<-doubleNorm24.sel(Sel50=Sel50[1],Selpeak=Selpeak[1],PeakDesc=PeakDesc[1],LtPeakFinal=LtPeakFinal[1],FinalSel=FinalSel[1]) Sel.out<-data.frame(Bin=Sel.out[,1],Sel=Sel.out[,2],Fleet="Fleet 1") if(length(Sel50)>1) { for(ii in 2:length(Sel50)) { Sel.out.temp<-doubleNorm24.sel(Sel50=Sel50[ii],Selpeak=Selpeak[ii],PeakDesc=PeakDesc[ii],LtPeakFinal=LtPeakFinal[ii],FinalSel=FinalSel[ii]) Sel.out.temp<-data.frame(Bin=Sel.out.temp[,1],Sel=Sel.out.temp[,2],Fleet=paste0("Fleet ",ii)) Sel.out<-rbind(Sel.out,Sel.out.temp) } } selplot.out<-ggplot(Sel.out,aes(Bin,Sel,colour=Fleet)) + geom_line(lwd=1.5) + ylab("Length Bins") + xlab("Selectivity") + scale_color_viridis_d() } } if(input$Sel_choice=="Dome-shaped") { if(all(length(as.numeric(trimws(unlist(strsplit(input$Sel50,",")))))==length(as.numeric(trimws(unlist(strsplit(input$Selpeak,","))))), length(as.numeric(trimws(unlist(strsplit(input$Sel50,",")))))==length(as.numeric(trimws(unlist(strsplit(input$PeakDesc,","))))), length(as.numeric(trimws(unlist(strsplit(input$Sel50,",")))))==length(as.numeric(trimws(unlist(strsplit(input$LtPeakFinal,","))))), length(as.numeric(trimws(unlist(strsplit(input$Sel50,",")))))==length(as.numeric(trimws(unlist(strsplit(input$FinalSel,","))))), all(input$Sel50!=""), all(!is.null(input$Sel50)), all(input$Selpeak!=""), all(!is.null(input$Selpeak)))) { Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50,",")))) Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak,",")))) PeakDesc<-as.numeric(trimws(unlist(strsplit(input$PeakDesc,",")))) LtPeakFinal<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal,",")))) FinalSel<-as.numeric(trimws(unlist(strsplit(input$FinalSel,",")))) # if(input$Sel_choice=="Logistic") # { # PeakDesc<-rep(10000,length(Selpeak)) # LtPeakFinal<-rep(0.0001,length(Selpeak)) # FinalSel<-rep(0.999,length(Selpeak)) # } # if(input$Sel_choice=="Dome-shaped") # { # } Sel.out<-doubleNorm24.sel(Sel50=Sel50[1],Selpeak=Selpeak[1],PeakDesc=PeakDesc[1],LtPeakFinal=LtPeakFinal[1],FinalSel=FinalSel[1]) Sel.out<-data.frame(Bin=Sel.out[,1],Sel=Sel.out[,2],Fleet="Fleet 1") if(length(Sel50)>1) { for(ii in 2:length(Sel50)) { Sel.out.temp<-doubleNorm24.sel(Sel50=Sel50[ii],Selpeak=Selpeak[ii],PeakDesc=PeakDesc[ii],LtPeakFinal=LtPeakFinal[ii],FinalSel=FinalSel[ii]) Sel.out.temp<-data.frame(Bin=Sel.out.temp[,1],Sel=Sel.out.temp[,2],Fleet=paste0("Fleet ",ii)) Sel.out<-rbind(Sel.out,Sel.out.temp) } } selplot.out<-ggplot(Sel.out,aes(Bin,Sel,colour=Fleet)) + geom_line(lwd=1.5) + ylab("Length Bins") + xlab("Selectivity") + scale_color_viridis_d() } } if(!is.null(get0("selplot.out"))){return(selplot.out)} else(return(NULL)) }) output$Selplot_SSS <- renderPlot({ if(input$Sel_choice_sss=="Logistic"&any(any(input$Sel50_sss[1]=="",is.null(input$Sel50_sss)),any(input$Selpeak_sss[1]=="",is.null(input$Selpeak_sss)))) return(NULL) if(input$Sel_choice_sss=="Logistic") { if(all(length(as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))))==length(as.numeric(trimws(unlist(strsplit(input$Selpeak_sss,","))))), all(input$Sel50_sss!=""), all(!is.null(input$Sel50_sss)), all(input$Selpeak_sss!=""), all(!is.null(input$Selpeak_sss)))) { Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))) Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak_sss,",")))) PeakDesc<-rep(10000,length(Selpeak)) LtPeakFinal<-rep(0.0001,length(Selpeak)) FinalSel<-rep(0.999,length(Selpeak)) Sel.out<-doubleNorm24.sel(Sel50=Sel50[1],Selpeak=Selpeak[1],PeakDesc=PeakDesc[1],LtPeakFinal=LtPeakFinal[1],FinalSel=FinalSel[1]) Sel.out<-data.frame(Bin=Sel.out[,1],Sel=Sel.out[,2],Fleet="Fleet 1") if(length(Sel50)>1) { for(ii in 2:length(Sel50)) { Sel.out.temp<-doubleNorm24.sel(Sel50=Sel50[ii],Selpeak=Selpeak[ii],PeakDesc=PeakDesc[ii],LtPeakFinal=LtPeakFinal[ii],FinalSel=FinalSel[ii]) Sel.out.temp<-data.frame(Bin=Sel.out.temp[,1],Sel=Sel.out.temp[,2],Fleet=paste0("Fleet ",ii)) Sel.out<-rbind(Sel.out,Sel.out.temp) } } selplot.out<-ggplot(Sel.out,aes(Bin,Sel,colour=Fleet)) + geom_line(lwd=1.5) + ylab("Length Bins") + xlab("Selectivity") + scale_color_viridis_d() } } if(input$Sel_choice_sss=="Dome-shaped") { if(all(length(as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))))==length(as.numeric(trimws(unlist(strsplit(input$Selpeak_sss,","))))), length(as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))))==length(as.numeric(trimws(unlist(strsplit(input$PeakDesc_sss,","))))), length(as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))))==length(as.numeric(trimws(unlist(strsplit(input$LtPeakFinal_sss,","))))), length(as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))))==length(as.numeric(trimws(unlist(strsplit(input$FinalSel_sss,","))))), all(input$Sel50_sss!=""), all(!is.null(input$Sel50_sss)), all(input$Selpeak_sss!=""), all(!is.null(input$Selpeak_sss)))) { Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))) Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak_sss,",")))) PeakDesc<-as.numeric(trimws(unlist(strsplit(input$PeakDesc_sss,",")))) LtPeakFinal<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal_sss,",")))) FinalSel<-as.numeric(trimws(unlist(strsplit(input$FinalSel_sss,",")))) Sel.out<-doubleNorm24.sel(Sel50=Sel50[1],Selpeak=Selpeak[1],PeakDesc=PeakDesc[1],LtPeakFinal=LtPeakFinal[1],FinalSel=FinalSel[1]) Sel.out<-data.frame(Bin=Sel.out[,1],Sel=Sel.out[,2],Fleet="Fleet 1") if(length(Sel50)>1) { for(ii in 2:length(Sel50)) { Sel.out.temp<-doubleNorm24.sel(Sel50=Sel50[ii],Selpeak=Selpeak[ii],PeakDesc=PeakDesc[ii],LtPeakFinal=LtPeakFinal[ii],FinalSel=FinalSel[ii]) Sel.out.temp<-data.frame(Bin=Sel.out.temp[,1],Sel=Sel.out.temp[,2],Fleet=paste0("Fleet ",ii)) Sel.out<-rbind(Sel.out,Sel.out.temp) } } selplot.out<-ggplot(Sel.out,aes(Bin,Sel,colour=Fleet)) + geom_line(lwd=1.5) + ylab("Length Bins") + xlab("Selectivity") + scale_color_viridis_d() } } if(!is.null(get0("selplot.out"))){return(selplot.out)} else(return(NULL)) }) ############################################# ### END PLOTS ### ############################################# ############################################# ######## PREPARE FILES andD RUN SSS ######### ############################################# SSS.run<-observeEvent(input$run_SSS,{ show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[1],text="Create model files") print(1) # progress <- shiny::Progress$new(session, min=1, max=2) # on.exit(progress$close()) # progress$set(message = 'Model run in progress', # detail = '') # for (i in 1:2) { # progress$set(value = i) # Sys.sleep(0.5) # } #Copy and move files if(file.exists(paste0("Scenarios/",input$Scenario_name))) { unlink(paste0("Scenarios/",input$Scenario_name),recursive=TRUE) # file.remove(paste0(getwd(),"/Scenarios/",input$Scenario_name)) } #if(input$) { file.copy(paste0("SSS_files/sssexample_BH"),paste0("Scenarios"),recursive=TRUE,overwrite=TRUE) file.rename(paste0("Scenarios/sssexample_BH"), paste0("Scenarios/",input$Scenario_name)) } #if() # { # file.copy(paste0(getwd(),"/SSS_files/sssexample_RickPow"),paste0(getwd(),"/Scenarios"),recursive=TRUE,overwrite=TRUE) # file.rename(paste0(getwd(),"/Scenarios/sssexample_RickPow"), paste0(getwd(),"/Scenarios/",input$Scenario_name)) # } #Read data and control files data.file<-SS_readdat(paste0("Scenarios/",input$Scenario_name,"/sss_example.dat")) ctl.file<-SS_readctl(paste0("Scenarios/",input$Scenario_name,"/sss_example.ctl"),use_datlist = TRUE, datlist=data.file) #Read, edit then write new DATA file data.file$styr<-input$styr data.file$endyr<-input$endyr data.file$Nages<-Nages() #Catches Catch.data<-rv.Ct$data catch.dep.fleets<-ncol(Catch.data) data.file$Nfleets<-catch.dep.fleets if(!is.null(rv.Index$data)) { index.fleets<-max(rv.Index$data$Fleet) if(index.fleets>catch.dep.fleets) {data.file$Nfleets<-index.fleets} if(index.fleets==catch.dep.fleets) {data.file$Nfleets<-index.fleets+1} if(index.fleets<catch.dep.fleets) {data.file$Nfleets<-catch.dep.fleets} } if((data.file$Nfleets-1)>1){ for(i in 1:(data.file$Nfleets-2)) { data.file$fleetinfo<-rbind(data.file$fleetinfo,data.file$fleetinfo[1,]) data.file$CPUEinfo<-rbind(data.file$CPUEinfo,data.file$CPUEinfo[1,]) } data.file$fleetinfo$fleetname<-c(paste0("Fishery",1:(catch.dep.fleets-1)),"Depl") data.file$fleetinfo$type[c(2,data.file$Nfleets)]<-c(1,3) data.file$fleetinfo$surveytiming[c(2,data.file$Nfleets)]<-c(-1,0.1) data.file$CPUEinfo[,1]<-1:data.file$Nfleets data.file$CPUEinfo[c(2,data.file$Nfleets),2]<-c(1,34) data.file$CPUE$index<-data.file$Nfleets } year.in<-Catch.data[,1] catch.cols<-colnames(data.file$catch) catch_temp<-list() for(i in 1:(data.file$Nfleets-1)) { catch_temp[[i]]<-data.frame( c(-999,year.in), rep(1,length(year.in)+1), rep(i,length(year.in)+1), c(0,Catch.data[,i+1]), rep(0.01,length(year.in)+1) ) } data.file$catch<-list.rbind(catch_temp) colnames(data.file$catch)<-catch.cols #Relative stock status data.file$CPUE$year<-c(input$styr,input$status_year) #Length composition data if(input$Linf_f_mean_sss>30){data.file$binwidth<-2} data.file$minimum_size<-floor(input$Linf_f_mean_sss/10) data.file$maximum_size<-ceiling(input$Linf_f_mean_sss+(input$Linf_f_mean_sss*0.1)) data.file$lbin_vector<-seq(data.file$minimum_size,data.file$maximum_size,data.file$binwidth) data.file$N_lbinspop<-length(data.file$lbin_vector) #Age composition data # if (is.null(inFile_age)){ # data.file$N_agebins<-Nages() # data.file$agebin_vector<-1:Nages() # data.file$ageerror<-data.frame(matrix(c(rep(-1,(Nages()+1)),rep(0.001,(Nages()+1))),2,(Nages()+1),byrow=TRUE)) # colnames(data.file$ageerror)<-paste0("age",1:Nages()) # } #Catch units if(input$Ct_units_choice_SSS) { ct.units<-as.numeric(trimws(unlist(strsplit(input$fleet_ct_units_SSS,",")))) #data.file$fleetinfo[ct.units,4]<-2 #use this when just specifying which are fleets are numbers data.file$fleetinfo[,4]<-c(ct.units,1) } SS_writedat(data.file,paste0("Scenarios/",input$Scenario_name,"/sss_example.dat"),overwrite=TRUE) ####################### END DATA FILE ##################################### ####################### START SSS CTL FILE ##################################### if(!is.null(input$GT5)){if(input$GT5) { ctl.file$N_platoon<-5 ctl.file$sd_ratio<-0.7 ctl.file$submorphdist<-c(-1,0.25,0.5,0.25,0.125) } } #if(all(any(input$est_parms==TRUE,input$est_parms2==FALSE),any(all(!is.null(rv.Lt$data),!is.null(rv.Ct$data)),all(!is.null(rv.Age$data),!is.null(rv.Ct$data))))==TRUE) #{ fem_vbgf<-VBGF(input$Linf_f_mean_sss,input$k_f_mean_sss,input$t0_f_mean_sss,c(0:Nages())) #c("lognormal","truncated normal","uniform","beta") prior.name<-c("no prior","symmetric beta", "beta","lognormal","gamma","normal") prior.type<-c(0:3,5,6) #Females #M if(input$M_f_prior=="lognormal"){ctl.file$MG_parms[1,3:4]<-c(input$M_f_mean_sss,log(input$M_f_mean_sss))} else {ctl.file$MG_parms[1,3:4]<-c(input$M_f_mean_sss,input$M_f_mean_sss)} #L0 ctl.file$Growth_Age_for_L1<-input$t0_f_mean_sss ctl.file$Growth_Age_for_L1<-0 #if(input$t0_f_prior=="lognormal"){ctl.file$MG_parms[2,3:4]<-c(fem_vbgf[1],log(fem_vbgf[1]))} #else {ctl.file$MG_parms[2,3:4]<-fem_vbgf[1]} if(input$t0_f_prior=="lognormal"){ctl.file$MG_parms[2,3:4]<-c(0,log(0.0000001))} else {ctl.file$MG_parms[2,3:4]<-0} #Linf if(input$Linf_f_prior=="lognormal"){ctl.file$MG_parms[3,3:4]<-c(input$Linf_f_mean_sss,log(input$Linf_f_mean_sss))} else{ctl.file$MG_parms[3,3:4]<-input$Linf_f_mean_sss} #k if(input$k_f_prior=="lognormal"){ctl.file$MG_parms[4,3:4]<-c(input$k_f_mean_sss,log(input$k_f_mean_sss))} else {ctl.file$MG_parms[4,3:4]<-input$k_f_mean_sss} #CV young if(input$CV_lt_f_young_prior=="lognormal"){ctl.file$MG_parms[5,3:4]<-c(input$CV_lt_f_young_mean_sss,log(input$CV_lt_f_young_mean_sss))} else{ctl.file$MG_parms[5,3:4]<-input$CV_lt_f_young_mean_sss} #CV old if(input$CV_lt_f_old_prior=="lognormal"){ctl.file$MG_parms[6,3:4]<-c(input$CV_lt_f_old_mean_sss,log(input$CV_lt_f_old_mean_sss))} else{ctl.file$MG_parms[6,3:4]<-input$CV_lt_f_old_mean_sss} #Weight-length ctl.file$MG_parms[7,3:4]<-input$WLa_f_sss #coefficient ctl.file$MG_parms[8,3:4]<- input$WLb_f_sss #exponent #Maturity ctl.file$MG_parms[9,3:4]<-input$L50_f_sss #Lmat50% ctl.file$MG_parms[10,3:4]<- log(0.05/0.95)/(input$L95_f_sss-input$L50_f_sss) #Maturity slope #Males ctl.file$MG_parms[13,3:4]<-c(input$M_f_mean_sss,log(input$M_f_mean_sss)) #M #ctl.file$MG_parms[14,3:4]<-fem_vbgf[1] #L0 ctl.file$MG_parms[14,3:4]<-0 #L0 ctl.file$MG_parms[15,3:4]<-input$Linf_f_mean_sss #Linf ctl.file$MG_parms[16,3:4]<-input$k_f_mean_sss #k ctl.file$MG_parms[17,3:4]<-input$CV_lt_f_young_mean_sss #CV ctl.file$MG_parms[18,3:4]<-input$CV_lt_f_old_mean_sss #CV #Weight-length ctl.file$MG_parms[19,3:4]<-input$WLa_f_sss #coefficient ctl.file$MG_parms[20,3:4]<- input$WLb_f_sss #exponent ctl.file$MG_parms[11,3:4]<-input$Fec_a_f_sss #coefficient ctl.file$MG_parms[12,3:4]<- input$Fec_b_f_sss #exponent if(input$male_offset_SSS) { ctl.file$parameter_offset_approach<-2 #Change to offset approach ctl.file$MG_parms[13,c(1,3:4)]<-0 #M ctl.file$MG_parms[14,c(1,3:4)]<-0 #L0 ctl.file$MG_parms[15,c(1,3:4)]<-0 #Linf ctl.file$MG_parms[16,c(1,3:4)]<-0 #k ctl.file$MG_parms[17,c(1,3:4)]<-0 #CV ctl.file$MG_parms[18,c(1,3:4)]<-0 #CV #Weight-length ctl.file$MG_parms[19,c(1,3:4)]<-input$WLa_f_sss #coefficient ctl.file$MG_parms[20,c(1,3:4)]<-input$WLb_f_sss #exponent } if(input$male_parms_SSS) { male_vbgf_sss<-VBGF(input$Linf_m_mean_sss,input$k_m_mean_sss,input$t0_m_mean_sss,c(input$t0_f_mean_sss:Nages())) #M if(input$M_m_prior_sss=="lognormal"){ctl.file$MG_parms[13,3:4]<-c(input$M_m_mean_sss,log(input$M_m_mean_sss))} else {ctl.file$MG_parms[13,3:4]<-c(input$M_m_mean_sss,input$M_m_mean_sss)} #L0 if(input$t0_f_prior_sss=="lognormal"){ctl.file$MG_parms[14,3:4]<-c(male_vbgf_sss[1],log(male_vbgf_sss[1]))} else {ctl.file$MG_parms[14,3:4]<-c(male_vbgf_sss[1],male_vbgf_sss[1])} # if(input$t0_f_prior_sss=="lognormal"){ctl.file$MG_parms[14,3:4]<-c(0,log(0.0000001))} #else {ctl.file$MG_parms[14,3:4]<-c(0,0)} #Linf if(input$Linf_f_prior_sss=="lognormal"){ctl.file$MG_parms[15,3:4]<-c(input$Linf_m_mean_sss,log(input$Linf_m_mean_sss))} else{ctl.file$MG_parms[15,3:4]<-c(input$Linf_m_mean_sss,input$Linf_m_mean_sss)} #k if(input$k_f_prior_sss=="lognormal"){ctl.file$MG_parms[16,3:4]<-c(input$k_m_mean_sss,log(input$k_m_mean_sss))} else {ctl.file$MG_parms[16,3:4]<-c(input$k_m_mean_sss,input$k_m_mean_sss)} #CV young if(input$CV_lt_f_young_prior_sss=="lognormal"){ctl.file$MG_parms[17,3:4]<-c(input$CV_lt_m_young_mean_sss,log(input$CV_lt_m_young_mean_sss))} else{ctl.file$MG_parms[17,3:4]<-c(input$CV_lt_m_young_mean_sss,input$CV_lt_m_young_mean_sss)} #CV old if(input$CV_lt_f_old_prior_sss=="lognormal"){ctl.file$MG_parms[18,3:4]<-c(input$CV_lt_m_old_mean_sss,log(input$CV_lt_m_old_mean_sss))} else{ctl.file$MG_parms[18,3:4]<-c(input$CV_lt_m_old_mean_sss,input$CV_lt_m_old_mean_sss)} #Weight-length ctl.file$MG_parms[19,3:4]<-input$WLa_m_sss #coefficient ctl.file$MG_parms[20,3:4]<- input$WLb_m_sss #exponent } #S-R #ctl.file$SR_parms[1,3:4]<-input$lnR0 #lnR0 if(input$h_ss_prior=="lognormal"){ctl.file$SR_parms[2,3:4]<-c(input$h_mean_ss,log(h_mean_ss))} else{ctl.file$SR_parms[2,3:4]<-input$h_mean_ss} #} # ctl.file$Q_options[1]<-data.file$Nfleets #Selectivity Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50_sss,",")))) Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak_sss,",")))) bin.width<-data.file$lbin_vector[2]-data.file$lbin_vector[1] if(input$Sel_choice_sss=="Logistic") { #ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector)+2*bin.width,max(data.file$lbin_vector)-2*bin.width) ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector)) ctl.file$size_selex_parms[1,3:4]<- Selpeak[1] ctl.file$size_selex_parms[2,3:4]<- 15 ctl.file$size_selex_parms[3,3:4]<- log(-((Sel50[1]-Selpeak[1])^2/log(0.5))) ctl.file$size_selex_parms[4,3:4]<- -15 ctl.file$size_selex_parms[6,3:4]<- 15 } if(input$Sel_choice_sss=="Dome-shaped") { PeakDesc<-as.numeric(trimws(unlist(strsplit(input$PeakDesc_sss,",")))) LtPeakFinal<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal_sss,",")))) FinalSel<-as.numeric(trimws(unlist(strsplit(input$FinalSel_sss,",")))) #ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector)+2*bin.width,max(data.file$lbin_vector)-2*bin.width) ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector)) ctl.file$size_selex_parms[1,3:4]<- Selpeak[1] ctl.file$size_selex_parms[2,3:4]<- -log((max(data.file$lbin_vector)-Selpeak[1]-bin.width)/(PeakDesc[1]-Selpeak[1]-bin.width)) ctl.file$size_selex_parms[3,3:4]<- log(-((Sel50[1]-Selpeak[1])^2/log(0.5))) ctl.file$size_selex_parms[4,3:4]<- log(LtPeakFinal[1]) ctl.file$size_selex_parms[6,3:4]<- -log((1/(FinalSel[1]+0.000000001)-1)) } #Add other fleets if((data.file$Nfleets-1)>1){ for(i in 1:(data.file$Nfleets-2)) { #ctl.file$init_F<-rbind(ctl.file$init_F,ctl.file$init_F[1,]) ctl.file$size_selex_types<-rbind(ctl.file$size_selex_types,ctl.file$size_selex_types[1,]) ctl.file$age_selex_types<-rbind(ctl.file$age_selex_types,ctl.file$age_selex_types[1,]) ctl.file$size_selex_parms<-rbind(ctl.file$size_selex_parms,ctl.file$size_selex_parms[1:6,]) if(input$Sel_choice_sss=="Logistic") { #ctl.file$size_selex_parms[6*i+1,1:2]<-c(min(data.file$lbin_vector)+2*bin.width,max(data.file$lbin_vector)-2*bin.width) ctl.file$size_selex_parms[6*i+1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector)) ctl.file$size_selex_parms[6*i+1,3:4]<- Selpeak[i+1] ctl.file$size_selex_parms[6*i+2,3:4]<- 15 ctl.file$size_selex_parms[6*i+3,3:4]<- log(-((Sel50[i+1]-Selpeak[i+1])^2/log(0.5))) ctl.file$size_selex_parms[6*i+4,3:4]<- -15 ctl.file$size_selex_parms[6*i+6,3:4]<- 15 } if(input$Sel_choice_sss=="Dome-shaped") { ctl.file$size_selex_parms[6*i+1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector)) ctl.file$size_selex_parms[6*i+1,3:4]<- Selpeak[i+1] ctl.file$size_selex_parms[6*i+2,3:4]<- -log((max(data.file$lbin_vector)-Selpeak[i+1]-bin.width)/(PeakDesc[i+1]-Selpeak[i+1]-bin.width)) ctl.file$size_selex_parms[6*i+3,3:4]<- log(-((Sel50[i+1]-Selpeak[i+1])^2/log(0.5))) ctl.file$size_selex_parms[6*i+4,3:4]<- log(LtPeakFinal[i+1]) ctl.file$size_selex_parms[6*i+6,3:4]<- -log((1/(FinalSel[i+1]+0.000000001)-1)) } } ctl.file$size_selex_types[,1]<-c(rep(24,data.file$Nfleets-1),0) ctl.file$age_selex_types[,1]<-10 #Re-label so r4ss can interpret these new entries #rownames(ctl.file$init_F)<-paste0("InitF_seas_1_flt_",1:data.file$Nfleets,"Fishery",1:data.file$Nfleets) rownames(ctl.file$age_selex_types)<-rownames(ctl.file$size_selex_types)<-c(paste0("Fishery",1:(data.file$Nfleets-1)),"Depl") size_selex_parms_rownames<-list() for(f_i in 1:(data.file$Nfleets-1)) { size_selex_parms_rownames[[f_i]]<-c(paste0("SizeSel_P_1_Fishery",f_i,"(",f_i,")"), paste0("SizeSel_P_2_Fishery",f_i,"(",f_i,")"), paste0("SizeSel_P_3_Fishery",f_i,"(",f_i,")"), paste0("SizeSel_P_4_Fishery",f_i,"(",f_i,")"), paste0("SizeSel_P_5_Fishery",f_i,"(",f_i,")"), paste0("SizeSel_P_6_Fishery",f_i,"(",f_i,")")) } size_selex_parms_rownames<-unlist(size_selex_parms_rownames) rownames(ctl.file$size_selex_parms)<-size_selex_parms_rownames } SS_writectl(ctl.file,paste0("Scenarios/",input$Scenario_name,"/sss_example.ctl"),overwrite=TRUE) #Forecast file modfications #Reference points #if(!input$use_forecastnew) #{ forecast.file<-SS_readforecast(paste0("Scenarios/",input$Scenario_name,"/forecast.ss")) if(input$RP_choices){ forecast.file$SPRtarget<-input$SPR_target forecast.file$Btarget<-input$B_target CR_choices<-c("1: Catch fxn of SSB, buffer on F", "2: F fxn of SSB, buffer on F", "3: Catch fxn of SSB, buffer on catch", "4: F fxn of SSB, buffer on catch") CR_choices_num.vec<-c(1:4) forecast.file$ControlRuleMethod<-CR_choices_num.vec[CR_choices==input$CR_Ct_F] forecast.file$SBforconstantF<-input$slope_hi forecast.file$BfornoF<-input$slope_low } if(input$Forecast_choice) { forecast.file$Nforecastyrs<-input$forecast_num buffer.in<-as.numeric(trimws(unlist(strsplit(input$forecast_buffer,",")))) if(length(buffer.in)==1){forecast.file$Flimitfraction<-buffer.in} if(length(buffer.in)>1) { forecast.file$Flimitfraction<--1 buffer.datafr<-data.frame(Year=c((data.file$endyr+1):(data.file$endyr+input$forecast_num)),Fraction=buffer.in) rownames(buffer.datafr)<-paste0("#_Flimitfraction_m",1:input$forecast_num) forecast.file$Flimitfraction_m<-buffer.datafr } } SS_writeforecast(forecast.file,paste0("Scenarios/",input$Scenario_name),overwrite=TRUE) #} #if(input$use_forecastnew) # { # forecast.file<-SS_readforecast(paste0("Scenarios/",input$Scenario_name,"/forecast.ss_new")) # SS_writeforecast(forecast.file,paste0("Scenarios/",input$Scenario_name),overwrite=TRUE) # } #Set prior inputs #0 = normal #10 = truncated normal #1 = symmetric beta (rbeta) #2 = beta #3 = lognormal #30 = truncated lognormal #4 = uniform #99 = used only for the steepness parameter. Indicates h will come from FMSY/M prior sss.prior.name<-c("no prior","symmetric beta","beta","normal","truncated normal","lognormal","truncated lognormal","uniform") sss.prior.type<-c(-1,1,2,0,10,3,30,4) Dep.in_sss<-c(sss.prior.type[sss.prior.name==input$Depl_prior_sss],input$Depl_mean_sss,input$Depl_SD_sss) h.in_sss<-c(sss.prior.type[sss.prior.name==input$h_prior_sss],input$h_mean_sss,input$h_SD_sss) if(!input$male_offset_SSS) { M.in_sss<-c(sss.prior.type[sss.prior.name==input$M_prior_sss],input$M_f_mean_sss,input$M_f_SD_sss,sss.prior.type[sss.prior.name==input$M_prior_sss],input$M_f_mean_sss,input$M_f_SD_sss) Linf.in_sss<-c(sss.prior.type[sss.prior.name==input$Linf_f_prior_sss],input$Linf_f_mean_sss,input$Linf_f_SD_sss,sss.prior.type[sss.prior.name==input$Linf_f_prior_sss],input$Linf_f_mean_sss,input$Linf_f_SD_sss) k.in_sss<-c(sss.prior.type[sss.prior.name==input$k_f_prior_sss],input$k_f_mean_sss,input$k_f_SD_sss,sss.prior.type[sss.prior.name==input$k_f_prior_sss],input$k_f_mean_sss,input$k_f_SD_sss) t0.in_sss<-c(sss.prior.type[sss.prior.name==input$t0_f_prior_sss],input$t0_f_mean_sss,input$t0_f_SD_sss,sss.prior.type[sss.prior.name==input$t0_f_prior_sss],input$t0_f_mean_sss,input$t0_f_SD_sss) } if(input$male_offset_SSS) { M.in_sss<-c(sss.prior.type[sss.prior.name==input$M_prior_sss],input$M_f_mean_sss,input$M_f_SD_sss,sss.prior.type[sss.prior.name==input$M_prior_sss],0,0) Linf.in_sss<-c(sss.prior.type[sss.prior.name==input$Linf_f_prior_sss],input$Linf_f_mean_sss,input$Linf_f_SD_sss,sss.prior.type[sss.prior.name==input$Linf_f_prior_sss],0,0) k.in_sss<-c(sss.prior.type[sss.prior.name==input$k_f_prior_sss],input$k_f_mean_sss,input$k_f_SD_sss,sss.prior.type[sss.prior.name==input$k_f_prior_sss],0,0) t0.in_sss<-c(sss.prior.type[sss.prior.name==input$t0_f_prior_sss],input$t0_f_mean_sss,input$t0_f_SD_sss,sss.prior.type[sss.prior.name==input$t0_f_prior_sss],0,0) } if(input$male_parms_SSS) { M.in_sss<-c(sss.prior.type[sss.prior.name==input$M_prior_sss],input$M_f_mean_sss,input$M_f_SD_sss,sss.prior.type[sss.prior.name==input$M_m_prior_sss],input$M_m_mean_sss,input$M_m_SD_sss) Linf.in_sss<-c(sss.prior.type[sss.prior.name==input$Linf_f_prior_sss],input$Linf_f_mean_sss,input$Linf_f_SD_sss,sss.prior.type[sss.prior.name==input$Linf_m_prior_sss],input$Linf_m_mean_sss,input$Linf_f_SD_sss) k.in_sss<-c(sss.prior.type[sss.prior.name==input$k_f_prior_sss],input$k_f_mean_sss,input$k_f_SD_sss,sss.prior.type[sss.prior.name==input$k_m_prior_sss],input$k_m_mean_sss,input$k_m_SD_sss) t0.in_sss<-c(sss.prior.type[sss.prior.name==input$t0_f_prior_sss],input$t0_f_mean_sss,input$t0_f_SD_sss,sss.prior.type[sss.prior.name==input$t0_m_prior_sss],input$t0_m_mean_sss,input$t0_m_SD_sss) } show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[2],text="Model run in progress") #Run SSS SSS.out<-SSS(paste0("Scenarios/",input$Scenario_name), file.name=c("sss_example.dat","sss_example.ctl"), reps=input$SSS_reps, seed.in=input$SSS_seed, Dep.in=Dep.in_sss, M.in=M.in_sss, SR_type=3, h.in=h.in_sss, FMSY_M.in=c(-1,0.5,0.1), BMSY_B0.in=c(-1,0.5,0.1), Linf.k.cor=input$Linf_k_cor_sss, Linf.in=Linf.in_sss, k.in=k.in_sss, t0.in=t0.in_sss, Zfrac.Beta.in=c(-99,0.2,0.6,-99,0.5,2), R_start=c(0,input$lnR0_sss), doR0.loop=c(1,round(input$lnR0_sss*0.5),round(input$lnR0_sss*1.5),(round(input$lnR0_sss*1.3)-round(input$lnR0_sss*0.5))/10), sum_age=0, ts_yrs=c(input$styr,input$endyr), pop.ltbins=NA, #ofl_yrs=c(input$endyr+1,input$endyr+2), sexes=T, BH_FMSY_comp=F, OStype=input$OS_choice) #save(SSS.out) show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[3],text="Process model output") if(exists(load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP")))) { output$SSS_priors_post<-renderPlot({ if(exists(load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP")))) { load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP")) sss.M.f<-rbind(data.frame(value=SSS.out$Prior$M_f,type="prior",metric="Female M"),data.frame(value=SSS.out$Post$M_f,type="post",metric="Female M")) sss.M.m<-rbind(data.frame(value=SSS.out$Prior$M_m,type="prior",metric="Male M"),data.frame(value=SSS.out$Post$M_m,type="post",metric="Male M")) sss.h<-rbind(data.frame(value=SSS.out$Prior$h,type="prior",metric="h"),data.frame(value=SSS.out$Post$h,type="post",metric="h")) sss.Dep<-rbind(data.frame(value=SSS.out$Prior$Dep,type="prior",metric="Dep"),data.frame(value=SSS.out$Post$Dep.Obs,type="post",metric="Dep")) sss.vals.out<-rbind(sss.M.f,sss.M.m,sss.h,sss.Dep) ggplot(sss.vals.out,aes(x=value,color=type,fill=type))+ geom_histogram(position="dodge",alpha=0.5)+ theme(legend.position="bottom")+ theme(legend.title=element_blank())+ facet_grid(~metric,scales = "free") # Mf.plot<-ggplot(sss.M.f,aes(x=value,color=type))+geom_histogram(position="dodge",alpha=0.5,fill="white") # Mm.plot<-ggplot(sss.M.m,aes(x=value,color=type))+geom_histogram(position="dodge",alpha=0.5,fill="white") # h.plot<-ggplot(sss.h,aes(x=value,color=type))+geom_histogram(position="dodge",alpha=0.5,fill="white") # Dep.plot<-ggplot(sss.Dep,aes(x=value,color=type))+geom_histogram(position="dodge",alpha=0.5,fill="white") } else{return(NULL)} }) output$SSS_growth_priors_post<-renderPlot({ if(exists(load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP")))) { load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP")) sss.L1_f<-rbind(data.frame(value=SSS.out$Prior$L1_f,type="prior",metric="Female L1"),data.frame(value=SSS.out$Post$L1_f,type="post",metric="Female L1")) sss.Linf_f<-rbind(data.frame(value=SSS.out$Prior$Linf_f,type="prior",metric="Female Linf"),data.frame(value=SSS.out$Post$Linf_f,type="post",metric="Female Linf")) sss.k_f<-rbind(data.frame(value=SSS.out$Prior$k_f,type="prior",metric="Female k"),data.frame(value=SSS.out$Post$k_f,type="post",metric="Female k")) sss.L1_m<-rbind(data.frame(value=SSS.out$Prior$L1_m,type="prior",metric="Male L1"),data.frame(value=SSS.out$Post$L1_m,type="post",metric="Male L1")) sss.Linf_m<-rbind(data.frame(value=SSS.out$Prior$Linf_m,type="prior",metric="Male Linf"),data.frame(value=SSS.out$Post$Linf_m,type="post",metric="Male Linf")) sss.k_m<-rbind(data.frame(value=SSS.out$Prior$k_m,type="prior",metric="Male k"),data.frame(value=SSS.out$Post$k_m,type="post",metric="Male k")) sss.vals.growth.out<-rbind(sss.L1_f,sss.Linf_f,sss.k_f,sss.L1_m,sss.Linf_m,sss.k_m) ggplot(sss.vals.growth.out,aes(x=value,color=type,fill=type))+ geom_histogram(position="dodge",alpha=0.5)+ theme(legend.position="bottom")+ theme(legend.title=element_blank())+ facet_wrap(~metric,scales = "free") } else{return(NULL)} }) output$SSS_OFL_plot<-renderPlot({ if(exists(load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP")))) { load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP")) ofl.years<-as.numeric(unique(melt(SSS.out$OFL)$Var2)) ggplot(melt(SSS.out$OFL),aes(Var2,value,group=Var2))+ geom_boxplot(fill="#236192")+ scale_x_continuous(breaks=ofl.years,labels=as.character(ofl.years))+ ylab("OFL (mt)")+ xlab("Year") } else{return(NULL)} }) output$SSS_ABC_plot<-renderPlot({ if(exists(load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP")))) { load(paste0("Scenarios/",input$Scenario_name,"/SSS_out.DMP")) abc.years<-as.numeric(unique(melt(SSS.out$ABC)$Var2)) ggplot(melt(SSS.out$ABC),aes(Var2,value,group=Var2))+ geom_boxplot(fill="#658D1B")+ scale_x_continuous(breaks=abc.years,labels=as.character(abc.years))+ ylab("ABC (mt)")+ xlab("Year") } else{return(NULL)} }) } remove_modal_spinner() }) ############### ### END SSS ### ############### ################################################################## ### PREPARE FILES and RUN Length and Age-based Stock Synthsis ### ################################################################## SS.file.update<-observeEvent(input$run_SS,{ # if(is.null(inFile) | !anyNA(inp$ # styr,ndyr, # input$Nages, # input$M_f, # input$k_f, # input$Linf_f, # input$t0_f, # input$L50_f, # input$L95_f, # input$M_m, # input$k_m, # input$Linf_m, # input$t0_m, # input$L50_m, # input$L95_m, # )) # { updateTabsetPanel(session, "tabs", selected = '1') # progress <- shiny::Progress$new(session, min=1, max=2) # on.exit(progress$close()) # progress$set(message = 'Model run in progress', # detail = '') # for (i in 1:2) { # progress$set(value = i) # Sys.sleep(0.5) # } if(!any(input$use_par,input$use_datanew,input$use_controlnew,input$user_model)) #if(which(c(input$use_par,input$use_datanew,input$use_datanew_user,input$use_controlnew,input$use_controlnew_user,input$user_model))!=0) { #Copy and move files if(file.exists(paste0("Scenarios/",input$Scenario_name))) { unlink(paste0("Scenarios/",input$Scenario_name),recursive=TRUE) #Deletes previous run # file.remove(paste0(getwd(),"/Scenarios/",input$Scenario_name)) } if(input$Ct_F_LO_select=="Estimate F" & is.null(rv.Ct$data)){ file.copy(paste0("SS_LO_F_files"),paste0("Scenarios"),recursive=TRUE,overwrite=TRUE) file.rename(paste0("Scenarios/SS_LO_F_files"), paste0("Scenarios/",input$Scenario_name)) } else{ file.copy(paste0("SS_LB_files"),paste0("Scenarios"),recursive=TRUE,overwrite=TRUE) file.rename(paste0("Scenarios/SS_LB_files"), paste0("Scenarios/",input$Scenario_name)) } } # if(!input$use_customfile) # { # } #Read data and control files if(!input$user_model) { data.file<-SS_readdat(paste0("Scenarios/",input$Scenario_name,"/datafile.dat")) ctl.file<-SS_readctl(paste0("Scenarios/",input$Scenario_name,"/controlfile.ctl"),use_datlist = TRUE, datlist=data.file) } if(input$use_datanew) { data.file<-SS_readdat(paste0("Scenarios/",input$Scenario_name,"/data_echo.ss_new")) } if(input$use_controlnew) { data.file<-SS_readdat(paste0("Scenarios/",input$Scenario_name,"/data_echo.ss_new")) ctl.file<-SS_readctl(paste0("Scenarios/",input$Scenario_name,"/control.ss_new"),use_datlist = TRUE, datlist=data.file) } # data.file<-SS_readdat(paste0(getwd(),"/Scenarios/",input$Scenario_name,"/SS_LB.dat")) # ctl.file<-SS_readctl(paste0(getwd(),"/Scenarios/",input$Scenario_name,"/SS_LB.ctl"),use_datlist = TRUE, datlist=data.file) #if(input$Ct_F_LO_select=="Estimate F" & is.null(rv.Ct$data)) # { # data.file<-SS_readdat(paste0(getwd(),"/Scenarios/",input$Scenario_name,"/SS_LB.dat")) # ctl.file<-SS_readctl(paste0(getwd(),"/Scenarios/",input$Scenario_name,"/SS_LB.ctl"),use_datlist = TRUE, datlist=data.file) # } if(!input$user_model) { #Prepare inputs to evaluate any errors Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50,",")))) Sel50_phase<-as.numeric(trimws(unlist(strsplit(input$Sel50_phase,",")))) Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak,",")))) Selpeak_phase<-as.numeric(trimws(unlist(strsplit(input$Selpeak_phase,",")))) bin.width<-data.file$lbin_vector[2]-data.file$lbin_vector[1] minmaxbin<-min(Selpeak[1]-min(data.file$lbin_vector),max(data.file$lbin_vector)-Selpeak[1]) #sel.inputs.comps<-length(Sel50)-length(Sel50_phase)-length(Selpeak)-length(Selpeak_phase) sel.inputs.lts<-c(length(Sel50),length(Sel50_phase),length(Selpeak),length(Selpeak_phase)) Nfleets<-max(ncol(rv.Ct$data)-1,rv.Lt$data[,3],rv.Age$data[,3],rv.Index$data[,3]) if(input$Sel_choice=="Dome-shaped") { PeakDesc<-as.numeric(trimws(unlist(strsplit(input$PeakDesc,",")))) PeakDesc_phase<-as.numeric(trimws(unlist(strsplit(input$PeakDesc_phase,",")))) LtPeakFinal<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal,",")))) LtPeakFinal_phase<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal_phase,",")))) FinalSel<-as.numeric(trimws(unlist(strsplit(input$FinalSel,",")))) FinalSel_phase<-as.numeric(trimws(unlist(strsplit(input$FinalSel_phase,",")))) minmaxbin<-min(Selpeak[1]-min(data.file$lbin_vector),max(data.file$lbin_vector)-Selpeak[1]) sel.inputs.lts<-c(length(Sel50),length(Sel50_phase),length(Selpeak),length(Selpeak_phase),length(PeakDesc),length(PeakDesc_phase),length(LtPeakFinal),length(LtPeakFinal_phase),length(FinalSel),length(FinalSel_phase)) } #Search for errors in inputs #Throw warning if not enough selectivity inputs if(!all(Nfleets==sel.inputs.lts)) { #Throw warning if not enough selectivity inputs sendSweetAlert( session = session, title = "Selectivity input warning", text = "Please check to see if you have provided filled in the inputs correctly. Especially check selectivity for missing fleets (both in parameter and phases). Total fleets includes fishing fleets and surveys.", type = "error") remove_modal_spinner() } if(all(Nfleets==sel.inputs.lts)) { checkmod<-1 #add object to verify no errors in inputs and model can be run show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[2],text="Model run in progress") if(!input$use_par) { if(all(!input$use_datanew,!input$user_model)) { #Read, edit then write new DATA file data.file$styr<-input$styr data.file$endyr<-input$endyr data.file$Nages<-Nages() if(!is.null(rv.Ct$data)){catch.fleets<-max(ncol(rv.Ct$data)-1)} if(all(!is.null(rv.Lt$data),is.null(rv.Ct$data))){catch.fleets<-max(rv.Lt$data[,3])} data.file$Nfleets<-max(catch.fleets,rv.Lt$data[,3],rv.Age$data[,3],rv.Index$data[,3]) ######### #Catches# ######### if (is.null(rv.Ct$data)) { #inFile<- rv.Lt$data Lt.comp.data<-rv.Lt$data Age.comp.data<- rv.Age$data #data.file$Nfleets<-max(Lt.comp.data[,2],Age.comp.data[,2]) if(input$Ct_F_LO_select=="Estimate F"){data.file$bycatch_fleet_info[4:5]<-c(input$styr,input$endyr)} if(data.file$Nfleets>1){ for(i in 1:(data.file$Nfleets-1)) { if(input$Ct_F_LO_select=="Estimate F"){data.file$bycatch_fleet_info<-rbind(data.file$bycatch_fleet_info,data.file$bycatch_fleet_info[1,])} } if(input$Ct_F_LO_select=="Estimate F"){data.file$bycatch_fleet_info[,1]<-c(1:data.file$Nfleets)} } year.in<-input$styr:input$endyr catch.cols<-colnames(data.file$catch) catch_temp<-list() if(catch.fleets==1){catch.level<-1000} if(catch.fleets>1){ catch.level<-as.numeric(trimws(unlist(strsplit(input$Wt_fleet_Ct,",")))) catch.level<-catch.level/sum(catch.level)*1000 } for(i in 1:catch.fleets) { catch_temp[[i]]<-data.frame( c(-999,year.in), rep(1,length(year.in)+1), rep(i,length(year.in)+1), c(catch.level[i],rep(catch.level[i],length(year.in))), c(0.01,rep(1000,length(year.in))) ) } data.file$catch<-list.rbind(catch_temp) colnames(data.file$catch)<-catch.cols } if(!is.null(rv.Ct$data)) { Catch.data<-rv.Ct$data #data.file$Nfleets<-max(ncol(Catch.data)-1,data.file$Nfleets) year.in<-Catch.data[,1] catch.cols<-colnames(data.file$catch) catch_temp<-list() for(i in 1:catch.fleets) { catch_temp[[i]]<-data.frame( c(-999,year.in), rep(1,length(year.in)+1), rep(i,length(year.in)+1), c(0.00000000000000000001,Catch.data[,i+1]), rep(0.01,length(year.in)+1) ) } data.file$catch<-list.rbind(catch_temp) colnames(data.file$catch)<-catch.cols } #Index data if (!is.null(rv.Index$data)) { Index.data<-rv.Index$data data.file$N_cpue<-unique(rv.Index$data[,3]) data.file$CPUE<-data.frame(year=rv.Index$data[,1],seas=rv.Index$data[,2],index=rv.Index$data[,3],obs=rv.Index$data[,4],se_log=rv.Index$data[,5]) } ######################### #Length composition data# ######################### #Population length data bins data.file$binwidth<-2 if(!is.null(rv.Lt$data)){data.file$binwidth<-as.numeric(colnames(rv.Lt$data)[7])-as.numeric(colnames(rv.Lt$data)[6])} data.file$minimum_size<-2 if(!is.null(rv.Lt$data)){data.file$minimum_size<-as.numeric(colnames(rv.Lt$data)[6])} max.bin.in<-2*(round((Linf()+(Linf()*0.25))/2))+2 #0.2326 data.file$maximum_size<-max.bin.in # if(input$advance_ss_click) # { data.file$binwidth<-input$lt_bin_size data.file$minimum_size<-input$lt_min_bin data.file$maximum_size<-input$lt_max_bin # } #inFile<- rv.Lt$data if (is.null(rv.Lt$data)) { if(input$est_parms==FALSE){Linf_bins<-input$Linf_f_fix} if(input$est_parms==TRUE){Linf_bins<-input$Linf_f_mean} data.file$binwidth<-2 data.file$minimum_size<-2 max.bin.in<-2*(round((Linf()+(Linf()*0.25))/2))+2 #0.2326 data.file$maximum_size<-max.bin.in data.file$lbin_vector<-seq(data.file$minimum_size,data.file$maximum_size,data.file$binwidth) data.file$N_lbins<-length(data.file$lbin_vector) data.file$lencomp<-NULL } if (!is.null(rv.Lt$data)) { Lt.comp.data<-rv.Lt$data data.file$N_lbins<-ncol(Lt.comp.data)-5 data.file$lbin_vector<-as.numeric(colnames(rv.Lt$data)[6:ncol(rv.Lt$data)]) #as.numeric(colnames(Lt.comp.data[,5:ncol(Lt.comp.data)])) if(data.file$maximum_size<max(data.file$lbin_vector)){data.file$maximum_size<-(2*round(max(data.file$lbin_vector)/2))+2} lt.data.names<-c(colnames(data.file$lencomp[,1:6]),paste0("f",data.file$lbin_vector),paste0("m",data.file$lbin_vector)) lt.data.females<-lt.data.males<-lt.data.unknowns<-lt.data.sex3<-data.frame(matrix(rep(NA,length(lt.data.names)),nrow=1)) colnames(Lt.comp.data)[1:5]<-c("Year","Month","Fleet","Sex","Nsamps") #female lengths if(nrow(subset(Lt.comp.data,Sex==1))>0){ Lt.comp.data_female<-subset(Lt.comp.data,Sex==1 & Nsamps>0) samp.yrs<-Lt.comp.data_female[,1] lt.data.females<-data.frame(cbind(samp.yrs, Lt.comp.data_female[,2], Lt.comp.data_female[,3], Lt.comp.data_female[,4], rep(0,length(samp.yrs)), Lt.comp.data_female[,5], Lt.comp.data_female[,6:ncol(Lt.comp.data_female)], Lt.comp.data_female[,6:ncol(Lt.comp.data_female)]*0) ) } #male lengths if(nrow(subset(Lt.comp.data,Sex==2))>0){ Lt.comp.data_male<-subset(Lt.comp.data,Sex==2 & Nsamps>0) samp.yrs_males<-Lt.comp.data_male[,1] lt.data.males<-data.frame(cbind(samp.yrs_males, Lt.comp.data_male[,2], Lt.comp.data_male[,3], Lt.comp.data_male[,4], rep(0,length(samp.yrs_males)), Lt.comp.data_male[,5], Lt.comp.data_male[,6:ncol(Lt.comp.data_male)]*0, Lt.comp.data_male[,6:ncol(Lt.comp.data_male)]) ) } #unknown sex lengths if(nrow(subset(Lt.comp.data,Sex==0))>0){ Lt.comp.data_unknown<-subset(Lt.comp.data,Sex==0 & Nsamps>0) samp.yrs_unknown<-Lt.comp.data_unknown[,1] lt.data.unknowns<-data.frame(cbind(samp.yrs_unknown, Lt.comp.data_unknown[,2], Lt.comp.data_unknown[,3], Lt.comp.data_unknown[,4], rep(0,length(samp.yrs_unknown)), Lt.comp.data_unknown[,5], Lt.comp.data_unknown[,6:ncol(Lt.comp.data_unknown)], Lt.comp.data_unknown[,6:ncol(Lt.comp.data_unknown)]*0) ) } #Maintain sample sex ratio if(input$Sex3){ yrsfleet_females<-paste0(Lt.comp.data_female[,1],Lt.comp.data_female[,3]) yrsfleet_males<-paste0(Lt.comp.data_male[,1],Lt.comp.data_male[,3]) #Match years #samp.yrs_sex3<-samp.yrs_females[match(samp.yrs_males,samp.yrs_females)] sex3_match_female<-yrsfleet_females%in%yrsfleet_males sex3_match_male<-yrsfleet_males%in%yrsfleet_females #Subset years Lt.comp.data_female_sex3<-Lt.comp.data_female[sex3_match_female,] Lt.comp.data_male_sex3<-Lt.comp.data_male[sex3_match_male,] lt.data.sex3<-data.frame(cbind(Lt.comp.data_female_sex3[,1], Lt.comp.data_female_sex3[,2], Lt.comp.data_female_sex3[,3], rep(3,nrow(Lt.comp.data_female_sex3)), rep(0,nrow(Lt.comp.data_female_sex3)), Lt.comp.data_female_sex3[,5]+Lt.comp.data_male_sex3[,4], Lt.comp.data_female_sex3[,6:ncol(Lt.comp.data_female_sex3)], Lt.comp.data_male_sex3[,6:ncol(Lt.comp.data_male_sex3)]) ) lt.data.females<-lt.data.females[!sex3_match_female,] lt.data.males<-lt.data.males[!sex3_match_male,] } colnames(lt.data.females)<-colnames(lt.data.males)<-colnames(lt.data.unknowns)<-colnames(lt.data.sex3)<-lt.data.names data.file$lencomp<-na.omit(rbind(lt.data.unknowns,lt.data.females,lt.data.males,lt.data.sex3)) } #} #else{ # data.file$lencomp<-data.frame(matrix(cbind(samp.yrs, # rep(1,length(samp.yrs)), # rep(1,length(samp.yrs)), # rep(1,length(samp.yrs)), # rep(0,length(samp.yrs)), # colSums(Lt.comp.data[-1]), # t(Lt.comp.data)[-1,], # t(Lt.comp.data)[-1,]*0), # nrow=length(samp.yrs), # ncol=6+length(Lt.comp.data[,1])*2, # byrow=FALSE))[,,drop=FALSE] # } # colnames(data.file$lencomp)<-lt.data.names ###################### #Age composition data# ###################### Age.comp.data<-rv.Age$data if (is.null(Age.comp.data)) { data.file$N_agebins<-Nages() data.file$agebin_vector<-0:(Nages()-1) data.file$ageerror<-data.frame(matrix(c(rep(-1,(Nages()+1)),rep(0.001,(Nages()+1))),2,(Nages()+1),byrow=TRUE)) colnames(data.file$ageerror)<-paste0("age",0:Nages()) } if (!is.null(Age.comp.data)) { data.file$N_agebins<-ncol(Age.comp.data)-8 data.file$agebin_vector<-as.numeric(colnames(Age.comp.data[,9:ncol(Age.comp.data)])) data.file$ageerror<-data.frame(matrix(c(rep(-1,(Nages()+1)),rep(0.001,(Nages()+1))),2,(Nages()+1),byrow=TRUE)) if(!is.null(input$Ageing_error_choice)){ if(input$Ageing_error_choice) { data.file$ageerror<-data.frame((rv.AgeErr$data)) data.file$N_ageerror_definitions<-nrow(rv.AgeErr$data)/2 } } #Label object for r4ss colnames(data.file$ageerror)<-paste0("age",0:Nages()) rownames(data.file$ageerror)<-c(1:nrow(data.file$ageerror)) # data.file$ageerror<-data.frame(matrix(c(rep(-1,(Nages()+1)),rep(0.001,(Nages()+1))),2,(Nages()+1),byrow=TRUE)) # colnames(data.file$ageerror)<-paste0("age",1:Nages()) age.data.names<-c(c("Yr","Month","Fleet","Sex","Part","Ageerr","Lbin_lo","Lbin_hi","Nsamp"),paste0("f",data.file$agebin_vector),paste0("m",data.file$agebin_vector)) age.data.females<-age.data.males<-age.data.unknowns<-data.frame(matrix(rep(NA,length(age.data.names)),nrow=1)) colnames(Age.comp.data)[1:8]<-c("Year","Month","Fleet","Sex","AgeErr","Lbin_low","Lbin_hi","Nsamps") #female ages if(nrow(subset(Age.comp.data,Sex==1))>0){ Age.comp.data_female<-subset(Age.comp.data,Sex==1 & Nsamps>0) samp.yrs_females<-Age.comp.data_female[,1] age.data.females<-data.frame(cbind(samp.yrs_females, Age.comp.data_female[,2], Age.comp.data_female[,3], Age.comp.data_female[,4], rep(0,length(samp.yrs_females)), Age.comp.data_female[,5], Age.comp.data_female[,6], Age.comp.data_female[,7], Age.comp.data_female[,8], Age.comp.data_female[,9:ncol(Age.comp.data_female)], Age.comp.data_female[,9:ncol(Age.comp.data_female)]*0) ) } #male ages if(nrow(subset(Age.comp.data,Sex==2))>0){ Age.comp.data_male<-subset(Age.comp.data,Sex==2 & Nsamps>0) samp.yrs_males<-Age.comp.data_male[,1] age.data.males<-data.frame(cbind(samp.yrs_males, Age.comp.data_male[,2], Age.comp.data_male[,3], Age.comp.data_male[,4], rep(0,length(samp.yrs_males)), Age.comp.data_male[,5], Age.comp.data_male[,6], Age.comp.data_male[,7], Age.comp.data_male[,8], Age.comp.data_male[,9:ncol(Age.comp.data_male)]*0, Age.comp.data_male[,9:ncol(Age.comp.data_male)]) ) } #unknown sex ages if(nrow(subset(Age.comp.data,Sex==0))>0){ Age.comp.data_unknown<-subset(Age.comp.data,Sex==0 & Nsamps>0) samp.yrs_unknown<-Age.comp.data_unknown[,1] age.data.unknowns<-data.frame(cbind(samp.yrs_unknown, Age.comp.data_unknown[,2], Age.comp.data_unknown[,3], Age.comp.data_unknown[,4], rep(0,length(samp.yrs_unknown)), Age.comp.data_unknown[,5], Age.comp.data_unknown[,6], Age.comp.data_unknown[,7], Age.comp.data_unknown[,8], Age.comp.data_unknown[,9:ncol(Age.comp.data_unknown)], Age.comp.data_unknown[,9:ncol(Age.comp.data_unknown)]*0) ) } #if(nrow(subset(Age.comp.data,Sex==0))>0){age.data.unknowns<-data.frame(cbind( # age.data.unknowns, # Age.comp.data[1,7:ncol(Age.comp.data_unknown)], # Age.comp.data[1,7:ncol(Age.comp.data_unknown)]*0)) # } colnames(age.data.females)<-colnames(age.data.males)<-colnames(age.data.unknowns)<-age.data.names data.file$agecomp<-na.omit(rbind(age.data.females,age.data.males,age.data.unknowns)) } # inFile_age<- rv.Age$data # if (is.null(inFile_age)){ # data.file$N_agebins<-Nages() # data.file$agebin_vector<-1:Nages() # data.file$ageerror<-data.frame(matrix(c(rep(-1,(Nages()+1)),rep(0.001,(Nages()+1))),2,(Nages()+1),byrow=TRUE)) # colnames(data.file$ageerror)<-paste0("age",1:Nages()) # } # if (!is.null(inFile_age)){ # Age.comp.data<-rv.Age$data # age.classes<-nrow(Age.comp.data) # data.file$N_agebins<-age.classes # data.file$agebin_vector<-Age.comp.data[,1] # data.file$ageerror<-data.frame(matrix(c(rep(-1,(age.classes+1)),rep(0.001,(age.classes+1))),2,(age.classes+1),byrow=TRUE)) # colnames(data.file$ageerror)<-paste0("age",1:Nages()) # age.samp.yrs<-as.numeric(colnames(Age.comp.data)[-1]) # age.data.names<-c(c("Yr","Seas","FltSvy","Gender","Part","Ageerr","Lbin_lo","Lbin_hi","Nsamp"),paste0("f",Age.comp.data[,1]),paste0("m",Age.comp.data[,1])) # if(length(age.samp.yrs)==1){ # data.file$agecomp<-data.frame(matrix(c(samp.yrs, # rep(1,length(age.samp.yrs)), # rep(1,length(age.samp.yrs)), # rep(1,length(age.samp.yrs)), # rep(0,length(age.samp.yrs)), # rep(-1,length(age.samp.yrs)), # rep(-1,length(age.samp.yrs)), # rep(-1,length(age.samp.yrs)), # colSums(Age.comp.data[-1]), # t(Age.comp.data)[-1,], # t(Age.comp.data)[-1,]*0), # nrow=length(age.samp.yrs), # ncol=9+length(Age.comp.data[,1])*2, # byrow=FALSE))[,,drop=FALSE] # } # else{ # data.file$agecomp<-data.frame(matrix(cbind(samp.yrs, # rep(1,length(age.samp.yrs)), # rep(1,length(age.samp.yrs)), # rep(1,length(age.samp.yrs)), # rep(0,length(age.samp.yrs)), # rep(-1,length(age.samp.yrs)), # rep(-1,length(age.samp.yrs)), # rep(-1,length(age.samp.yrs)), # colSums(Age.comp.data[-1]), # t(Age.comp.data)[-1,], # t(Age.comp.data)[-1,]*0), # nrow=length(age.samp.yrs), # ncol=9+length(Age.comp.data[,1])*2, # byrow=FALSE))[,,drop=FALSE] # } # colnames(data.file$agecomp)<-age.data.names # } #Create data info if(data.file$Nfleets>1){ for(i in 1:(data.file$Nfleets-1)) { data.file$fleetinfo<-rbind(data.file$fleetinfo,data.file$fleetinfo[1,]) data.file$CPUEinfo<-rbind(data.file$CPUEinfo,data.file$CPUEinfo[1,]) data.file$len_info<-rbind(data.file$len_info,data.file$len_info[1,]) data.file$age_info<-rbind(data.file$age_info,data.file$age_info[1,]) } #Set Dirichlet on # data.file$age_info[,5]<-data.file$len_info[,5]<-1 #Set up the correct fleet enumeration # data.file$len_info[,6]<-1:data.file$Nfleets #Used for Dirichlet set-up # data.file$age_info[,6]<-(data.file$Nfleets+1):(2*data.file$Nfleets) #Used for Dirichlet set-up #Survey names if(is.null(rv.Ct$data)){data.file$fleetinfo$fleetname<-paste0("Fishery",1:data.file$Nfleets)} if(!is.null(rv.Ct$data)) { fishery.names<-gsub(" ","",colnames(rv.Ct$data)[-1]) if(!is.null(rv.Index$data)&data.file$Nfleets>catch.fleets) { Surveyonly<-subset(rv.Index$data,Fleet>catch.fleets) fleet.survey.names<-unique(c(fishery.names,unique(Surveyonly[,6]))) survey.fleets<-unique(Surveyonly[,3]) data.file$fleetinfo$fleetname<-fleet.survey.names } if(is.null(rv.Index$data)|all(!is.null(rv.Index$data)&data.file$Nfleets==catch.fleets)){data.file$fleetinfo$fleetname<-fishery.names} if(!is.null(rv.Index$data)& max(rv.Index$data[,3])>length(fishery.names)){data.file$fleetinfo[survey.fleets,1]<-3} } data.file$CPUEinfo[,1]<-1:data.file$Nfleets } if(!is.null(rv.Index$data)&data.file$Nfleets>catch.fleets) { if(any(fleet.survey.names=="RSS")) { data.file$CPUEinfo[grep("RSS",fleet.survey.names),2]<-34 } } #Change survey timing to 1 data.file$fleetinfo$surveytiming[data.file$fleetinfo$type%in%3]<-1 #Catch units if(input$Ct_units_choice) { ct.units<-as.numeric(trimws(unlist(strsplit(input$fleet_ct_units,",")))) #data.file$fleetinfo[ct.units,4]<-2 #use this when just specifying which are fleets are numbers data.file$fleetinfo[,4]<-ct.units } SS_writedat(data.file,paste0("Scenarios/",input$Scenario_name,"/datafile.dat"),overwrite=TRUE) } ####################### END DATA FILE ##################################### ################################################################################## ####################### START CTL FILE #################################### #Read, edit then write new CONTROL file if(all(!input$use_controlnew,!input$user_model)) { #Change to 1 platoon if(!is.null(input$GT1)){if(input$GT1){ctl.file$N_platoon<-1}} #LENGTH or AGE-ONLY if(all(!is.null(c(rv.Lt$data,rv.Age$data,rv.Index$data)),is.null(rv.Ct$data))==TRUE) { fem_vbgf<-VBGF(input$Linf_f,input$k_f,input$t0_f,c(0:Nages())) #Females ctl.file$MG_parms[1,3]<-input$M_f #M #ctl.file$MG_parms[2,3:4]<-fem_vbgf[1] #L0 ctl.file$Growth_Age_for_L1<-input$t0_f ctl.file$MG_parms[2,3:4]<-0 #L0 ctl.file$MG_parms[3,3:4]<-input$Linf_f #Linf ctl.file$MG_parms[4,3:4]<-input$k_f #k ctl.file$MG_parms[5,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f,","))))[1] #CV ctl.file$MG_parms[6,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f,","))))[2] #CV #Maturity ctl.file$MG_parms[9,3:4]<-input$L50_f #Lmat50% ctl.file$MG_parms[10,3:4]<- log(0.05/0.95)/(input$L95_f-input$L50_f) #Maturity slope #ctl.file$MG_parms[11,3:4]<-input$Fec_a_f #coefficient #ctl.file$MG_parms[12,3:4]<- input$Fec_b_f #exponent #Males ctl.file$MG_parms[13,3]<-input$M_f #M #ctl.file$MG_parms[14,3:4]<-fem_vbgf[1] #L0 ctl.file$MG_parms[14,3:4]<-0 #L0 ctl.file$MG_parms[15,3:4]<-input$Linf_f #Linf ctl.file$MG_parms[16,3:4]<-input$k_f #k ctl.file$MG_parms[17,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f,","))))[1] #CV ctl.file$MG_parms[18,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f,","))))[2] #CV #ctl.file$MG_parms[19,3:4]<-input$WLa_f #coefficient #ctl.file$MG_parms[20,3:4]<-input$WLb_f #exponent if(input$male_offset) { ctl.file$parameter_offset_approach<-2 #Change to offset approach ctl.file$MG_parms[13,3:4]<-0 #M ctl.file$MG_parms[14,3:4]<-0 #L0 ctl.file$MG_parms[15,3:4]<-0 #Linf ctl.file$MG_parms[16,3:4]<-0 #k ctl.file$MG_parms[17,3:4]<-0 #CV ctl.file$MG_parms[18,3:4]<-0 #CV #Weight-length ctl.file$MG_parms[19,3:4]<-0 #coefficient ctl.file$MG_parms[20,3:4]<-0 #exponent } if(input$male_parms) { male_vbgf<-VBGF(input$Linf_m,input$k_m,input$t0_m,c(input$t0_f:Nages())) ctl.file$MG_parms[13,3]<-input$M_m #M ctl.file$MG_parms[14,3:4]<-male_vbgf[1] #L0 ctl.file$MG_parms[15,3:4]<-input$Linf_m #Linf ctl.file$MG_parms[16,3:4]<-input$k_m #k ctl.file$MG_parms[17,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_m,","))))[1] #CV ctl.file$MG_parms[18,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_m,","))))[2] #CV # ctl.file$MG_parms[19,3:4]<-input$WLa_m #coefficient # ctl.file$MG_parms[20,3:4]<-input$WLb_m #exponent } if(input$Ct_F_LO_select=="Estimate F"){ctl.file$SR_parms[1,7]=-1} #lnR0 if(input$Ct_F_LO_select=="Constant Catch"){ctl.file$SR_parms[1,7]=1} #lnR0 ctl.file$SR_parms[2,3:4]<-input$h_LO #steepnes } #LENGTH and CATCH with fixed parameters if(all(any(input$est_parms==FALSE,input$est_parms2==FALSE),any(all(!is.null(rv.Lt$data),!is.null(rv.Ct$data)),all(!is.null(rv.Age$data),!is.null(rv.Ct$data)),all(!is.null(rv.Index$data),!is.null(rv.Ct$data))))==TRUE) { fem_vbgf<-VBGF(input$Linf_f_fix,input$k_f_fix,input$t0_f_fix,c(0:Nages())) #Females ctl.file$MG_parms[1,3]<-input$M_f_fix #M #ctl.file$MG_parms[2,3:4]<-fem_vbgf[1] #L0 ctl.file$Growth_Age_for_L1<-input$t0_f_fix ctl.file$MG_parms[2,3:4]<-0 #L0 ctl.file$MG_parms[3,3:4]<-input$Linf_f_fix #Linf ctl.file$MG_parms[4,3:4]<-input$k_f_fix #k ctl.file$MG_parms[5,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f_fix,","))))[1] #CV ctl.file$MG_parms[6,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f_fix,","))))[2] #CV #Weight-length ctl.file$MG_parms[7,3:4]<-input$WLa_f_fix #coefficient ctl.file$MG_parms[8,3:4]<- input$WLb_f_fix #exponent #Maturity ctl.file$MG_parms[9,3:4]<-input$L50_f_fix #Lmat50% ctl.file$MG_parms[10,3:4]<- log(0.05/0.95)/(input$L95_f_fix-input$L50_f_fix) #Maturity slope ctl.file$MG_parms[11,3:4]<-input$Fec_a_f_fix #coefficient ctl.file$MG_parms[12,3:4]<- input$Fec_b_f_fix #exponent #Males ctl.file$MG_parms[13,3]<-input$M_f_fix #M #ctl.file$MG_parms[14,3:4]<-fem_vbgf[1] #L0 ctl.file$MG_parms[14,3:4]<-0 #L0 ctl.file$MG_parms[15,3:4]<-input$Linf_f_fix #Linf ctl.file$MG_parms[16,3:4]<-input$k_f_fix #k ctl.file$MG_parms[17,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f_fix,","))))[1] #CV ctl.file$MG_parms[18,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_f_fix,","))))[2] #CV ctl.file$MG_parms[19,3:4]<-input$WLa_f_fix #coefficient ctl.file$MG_parms[20,3:4]<- input$WLb_f_fix #exponent if(input$male_offset_fix) { ctl.file$parameter_offset_approach<-2 #Change to offset approach ctl.file$MG_parms[13,3:4]<-0 #M ctl.file$MG_parms[14,3:4]<-0 #L0 ctl.file$MG_parms[15,3:4]<-0 #Linf ctl.file$MG_parms[16,3:4]<-0 #k ctl.file$MG_parms[17,3:4]<-0 #CV ctl.file$MG_parms[18,3:4]<-0 #CV #Weight-length ctl.file$MG_parms[19,3:4]<-0 #coefficient ctl.file$MG_parms[20,3:4]<-0 #exponent } if(input$male_parms_fix) { male_vbgf<-VBGF(input$Linf_m_fix,input$k_m_fix,input$t0_m_fix,c(input$t0_f_fix:Nages())) ctl.file$MG_parms[13,3]<-input$M_m_fix #M ctl.file$MG_parms[14,3:4]<-male_vbgf[1] #L0 ctl.file$MG_parms[15,3:4]<-input$Linf_m_fix #Linf ctl.file$MG_parms[16,3:4]<-input$k_m_fix #k ctl.file$MG_parms[17,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_m_fix,","))))[1] #CV ctl.file$MG_parms[18,3:4]<-as.numeric(trimws(unlist(strsplit(input$CV_lt_m_fix,","))))[2] #CV #Weight-length ctl.file$MG_parms[19,3:4]<-input$WLa_m_fix #coefficient ctl.file$MG_parms[20,3:4]<-input$WLb_m_fix #exponent } #S-R ctl.file$SR_parms[1,3:4]<-input$lnR0 #lnR0 ctl.file$SR_parms[2,3:4]<-input$h #steepnes } #LENGTH and CATCH with estimated parameters if(all(any(input$est_parms==TRUE,input$est_parms2==FALSE),any(all(!is.null(rv.Lt$data),!is.null(rv.Ct$data)),all(!is.null(rv.Age$data),!is.null(rv.Ct$data)),all(!is.null(rv.Index$data),!is.null(rv.Ct$data))))==TRUE) { fem_vbgf<-VBGF(input$Linf_f_mean,input$k_f_mean,input$t0_f_mean,c(0:Nages())) #c("lognormal","truncated normal","uniform","beta") prior.name<-c("no prior","symmetric beta", "beta","lognormal","gamma","normal") prior.type<-c(0:3,5,6) #Females #M if(input$M_f_prior=="lognormal"){ctl.file$MG_parms[1,3:4]<-c(input$M_f_mean,log(input$M_f_mean))} else {ctl.file$MG_parms[1,3:4]<-c(input$M_f_mean,input$M_f_mean)} ctl.file$MG_parms[1,5]<-input$M_f_SD ctl.file$MG_parms[1,6]<-prior.type[prior.name==input$M_f_prior] ctl.file$MG_parms[1,7]<-input$M_f_phase #L0 ctl.file$Growth_Age_for_L1<-input$t0_f_mean # if(input$t0_f_prior=="lognormal"){ctl.file$MG_parms[2,3:4]<-c(fem_vbgf[1],log(fem_vbgf[1]))} # else {ctl.file$MG_parms[2,3:4]<-fem_vbgf[1]} if(input$t0_f_prior=="lognormal"){ctl.file$MG_parms[2,3:4]<-c(0,log(0.0000001))} else {ctl.file$MG_parms[2,3:4]<-0} ctl.file$MG_parms[2,5]<-input$t0_f_SD ctl.file$MG_parms[2,6]<-prior.type[prior.name==input$t0_f_prior] ctl.file$MG_parms[2,7]<-input$t0_f_phase #Linf if(input$Linf_f_prior=="lognormal"){ctl.file$MG_parms[3,3:4]<-c(input$Linf_f_mean,log(input$Linf_f_mean))} else{ctl.file$MG_parms[3,3:4]<-input$Linf_f_mean} ctl.file$MG_parms[3,5]<-input$Linf_f_SD ctl.file$MG_parms[3,6]<-prior.type[prior.name==input$Linf_f_prior] ctl.file$MG_parms[3,7]<-input$Linf_f_phase #k if(input$k_f_prior=="lognormal"){ctl.file$MG_parms[4,3:4]<-c(input$k_f_mean,log(input$k_f_mean))} else {ctl.file$MG_parms[4,3:4]<-input$k_f_mean} ctl.file$MG_parms[4,5]<-input$k_f_SD ctl.file$MG_parms[4,6]<-prior.type[prior.name==input$k_f_prior] ctl.file$MG_parms[4,7]<-input$k_f_phase #CV young if(input$CV_lt_f_young_prior=="lognormal"){ctl.file$MG_parms[5,3:4]<-c(input$CV_lt_f_young_mean,log(input$CV_lt_f_young_mean))} else{ctl.file$MG_parms[5,3:4]<-input$CV_lt_f_young_mean} ctl.file$MG_parms[5,5]<-input$CV_lt_f_young_SD ctl.file$MG_parms[5,6]<-prior.type[prior.name==input$CV_lt_f_young_prior] ctl.file$MG_parms[5,7]<-input$CV_lt_f_young_phase #CV old if(input$CV_lt_f_old_prior=="lognormal"){ctl.file$MG_parms[6,3:4]<-c(input$CV_lt_f_old_mean,log(input$CV_lt_f_old_mean))} else{ctl.file$MG_parms[6,3:4]<-input$CV_lt_f_old_mean} ctl.file$MG_parms[6,3:4]<-input$CV_lt_f_old_mean ctl.file$MG_parms[6,5]<-input$CV_lt_f_old_SD ctl.file$MG_parms[6,6]<-prior.type[prior.name==input$CV_lt_f_old_prior] ctl.file$MG_parms[6,7]<-input$CV_lt_f_old_phase #Weight-length ctl.file$MG_parms[7,3:4]<-input$WLa_f_est #coefficient ctl.file$MG_parms[8,3:4]<- input$WLb_f_est #exponent #Maturity ctl.file$MG_parms[9,3:4]<-input$L50_f_est #Lmat50% ctl.file$MG_parms[10,3:4]<- log(0.05/0.95)/(input$L95_f_est-input$L50_f_est) #Maturity slope ctl.file$MG_parms[11,3:4]<-input$Fec_a_f_est #coefficient ctl.file$MG_parms[12,3:4]<- input$Fec_b_f_est #exponent #Males ctl.file$MG_parms[13,3:4]<-c(input$M_f_mean,log(input$M_f_mean)) #M #ctl.file$MG_parms[14,3:4]<-fem_vbgf[1] #L0 ctl.file$MG_parms[14,3:4]<-0 #L0 ctl.file$MG_parms[15,3:4]<-input$Linf_f_mean #Linf ctl.file$MG_parms[16,3:4]<-input$k_f_mean #k ctl.file$MG_parms[17,3:4]<-input$CV_lt_f_old_mean #CV ctl.file$MG_parms[18,3:4]<-input$CV_lt_f_old_mean #CV #Weight-length ctl.file$MG_parms[19,3:4]<-input$WLa_f_est #coefficient ctl.file$MG_parms[20,3:4]<- input$WLb_f_est #exponent if(input$male_offset_est) { ctl.file$parameter_offset_approach<-2 #Change to offset approach ctl.file$MG_parms[13,3:4]<-0 #M ctl.file$MG_parms[14,3:4]<-0 #L0 ctl.file$MG_parms[15,3:4]<-0 #Linf ctl.file$MG_parms[16,3:4]<-0 #k ctl.file$MG_parms[17,3:4]<-0 #CV ctl.file$MG_parms[18,3:4]<-0 #CV #Weight-length ctl.file$MG_parms[19,3:4]<-0 #coefficient ctl.file$MG_parms[20,3:4]<-0 #exponent } if(input$male_parms_est) { male_vbgf_est<-VBGF(input$Linf_m_mean,input$k_m_mean,input$t0_m_mean,c(input$t0_f_mean:Nages())) # ctl.file$MG_parms[13,3]<-input$M_m_mean #M # ctl.file$MG_parms[14,3:4]<-male_vbgf_est[1] #L0 # ctl.file$MG_parms[15,3:4]<-input$Linf_m_mean #Linf # ctl.file$MG_parms[16,3:4]<-input$k_m_mean #k # ctl.file$MG_parms[17,3:4]<-input$CV_lt_m_mean #CV # ctl.file$MG_parms[18,3:4]<-input$CV_lt_m_mean #CV #M if(input$M_m_prior=="lognormal"){ctl.file$MG_parms[13,3:4]<-c(input$M_m_mean,log(input$M_m_mean))} else {ctl.file$MG_parms[13,3:4]<-c(input$M_m_mean,input$M_m_mean)} ctl.file$MG_parms[13,5]<-input$M_m_SD ctl.file$MG_parms[13,6]<-prior.type[prior.name==input$M_m_prior] ctl.file$MG_parms[13,7]<-input$M_m_phase #L0 #if(input$t0_f_prior=="lognormal"){ctl.file$MG_parms[14,3:4]<-c(male_vbgf_est[1],log(male_vbgf_est[1]+0.000000001))} #else {ctl.file$MG_parms[14,3:4]<-male_vbgf_est[1]} if(input$t0_m_prior=="lognormal"){ctl.file$MG_parms[14,3:4]<-c(male_vbgf_est[1],log(male_vbgf_est[1]+0.000000001))} else {ctl.file$MG_parms[14,3:4]<-male_vbgf_est[1]} ctl.file$MG_parms[14,5]<-input$t0_m_SD ctl.file$MG_parms[14,6]<-prior.type[prior.name==input$t0_m_prior] ctl.file$MG_parms[14,7]<-input$t0_m_phase #Linf if(input$Linf_m_prior=="lognormal"){ctl.file$MG_parms[15,3:4]<-c(input$Linf_m_mean,log(input$Linf_m_mean))} else{ctl.file$MG_parms[15,3:4]<-input$Linf_m_mean} ctl.file$MG_parms[15,5]<-input$Linf_m_SD ctl.file$MG_parms[15,6]<-prior.type[prior.name==input$Linf_m_prior] ctl.file$MG_parms[15,7]<-input$Linf_m_phase #k if(input$k_m_prior=="lognormal"){ctl.file$MG_parms[16,3:4]<-c(input$k_m_mean,log(input$k_m_mean))} else {ctl.file$MG_parms[16,3:4]<-input$k_m_mean} ctl.file$MG_parms[16,5]<-input$k_m_SD ctl.file$MG_parms[16,6]<-prior.type[prior.name==input$k_m_prior] ctl.file$MG_parms[16,7]<-input$k_m_phase #CV young if(input$CV_lt_m_young_prior=="lognormal"){ctl.file$MG_parms[17,3:4]<-c(input$CV_lt_m_young_mean,log(input$CV_lt_m_young_mean))} else{ctl.file$MG_parms[17,3:4]<-input$CV_lt_m_young_mean} ctl.file$MG_parms[17,5]<-input$CV_lt_m_young_SD ctl.file$MG_parms[17,6]<-prior.type[prior.name==input$CV_lt_m_young_prior] ctl.file$MG_parms[17,7]<-input$CV_lt_m_young_phase #CV old if(input$CV_lt_m_old_prior=="lognormal"){ctl.file$MG_parms[18,3:4]<-c(input$CV_lt_m_old_mean,log(input$CV_lt_m_old_mean))} else{ctl.file$MG_parms[18,3:4]<-input$CV_lt_m_old_mean} ctl.file$MG_parms[18,5]<-input$CV_lt_m_old_SD ctl.file$MG_parms[18,6]<-prior.type[prior.name==input$CV_lt_m_old_prior] ctl.file$MG_parms[18,7]<-input$CV_lt_m_old_phase #Weight-length ctl.file$MG_parms[19,3:4]<-input$WLa_m_est #coefficient ctl.file$MG_parms[20,3:4]<- input$WLb_m_est #exponent } #S-R ctl.file$SR_parms[1,3:4]<-input$lnR0_est #lnR0 if(input$h_ss_prior=="lognormal"){ctl.file$SR_parms[2,3:4]<-c(input$h_mean_ss,log(h_mean_ss))} else{ctl.file$SR_parms[2,3:4]<-input$h_mean_ss} ctl.file$SR_parms[2,5]<-input$h_SD_ss ctl.file$SR_parms[2,6]<-prior.type[prior.name==input$h_ss_prior] ctl.file$SR_parms[2,7]<-input$h_phase } #Recruitment estimation ctl.file$do_recdev<-0 ctl.file$recdev_phase<- -1 ctl.file$MainRdevYrFirst<-input$styr #Start year of recruitment estimation ctl.file$MainRdevYrLast<-input$endyr #Last year of recruitment estimation ctl.file$last_early_yr_nobias_adj<-input$styr #End year of early rev devs (no bias) ctl.file$first_yr_fullbias_adj<-input$styr #First year full bias ctl.file$last_yr_fullbias_adj<-input$endyr #Last year full bias ctl.file$first_recent_yr_nobias_adj<-input$endyr #First year recent no bias if(input$rec_choice) { ctl.file$SR_parms[3,3:4]<-input$sigmaR #sigma R if(input$RecDevChoice=="1: Devs sum to zero"){ctl.file$do_recdev<-1} if(input$RecDevChoice=="2: Simple deviations"){ctl.file$do_recdev<-2} if(input$RecDevChoice=="3: deviation vector"){ctl.file$do_recdev<-3} if(input$RecDevChoice=="4: option 3 plus penalties"){ctl.file$do_recdev<-4} ctl.file$MainRdevYrFirst<-input$Rdev_startyr #Start year of recruitment estimation ctl.file$MainRdevYrLast<-input$Rdev_endyr #Last year of recruitment estimation ctl.file$recdev_phase<- 1 if(input$biasC_choice) { #With bias correction ctl.file$recdev_early_start<--1 #Year early rec dev phase starts ctl.file$recdev_early_phase<-3 #Early rec dev phase ctl.file$Fcast_recr_phase<-0 #Forecast rec dev phase ctl.file$last_early_yr_nobias_adj<-input$NobiasC_early #End year of early rev devs (no bias) ctl.file$first_yr_fullbias_adj<-input$BiasC_startyr #First year full bias ctl.file$last_yr_fullbias_adj<-input$BiasC_endyr #Last year full bias ctl.file$first_recent_yr_nobias_adj<-input$NobiasC_recent #First year recent no bias ctl.file$max_bias_adj<-input$BiasC #Max bias adjustment } } #SELECTIVITY #Length Selectivity if(input$Ct_F_LO_select=="Estimate F" & is.null(rv.Ct$data)){ctl.file$size_selex_types[2]<-3} #Change to recognize discard fishery Sel50<-as.numeric(trimws(unlist(strsplit(input$Sel50,",")))) Sel50_phase<-as.numeric(trimws(unlist(strsplit(input$Sel50_phase,",")))) Selpeak<-as.numeric(trimws(unlist(strsplit(input$Selpeak,",")))) Selpeak_phase<-as.numeric(trimws(unlist(strsplit(input$Selpeak_phase,",")))) bin.width<-data.file$lbin_vector[2]-data.file$lbin_vector[1] minmaxbin<-min(Selpeak[1]-min(data.file$lbin_vector),max(data.file$lbin_vector)-Selpeak[1]) sel.inputs.comps<-length(Sel50)-length(Sel50_phase)-length(Selpeak)-length(Selpeak_phase) sel.inputs.lts<-c(length(Sel50),length(Sel50_phase),length(Selpeak),length(Selpeak_phase)) if(input$Sel_choice=="Logistic") { #Throw warning if not enough selectivity inputs if(!all(data.file$Nfleets==sel.inputs.lts)) { sendSweetAlert( session = session, title = "Selectivity input warning", text = "Please check to see if you have provided selectivity inputs (both parameter and phases) for all fleets in the model. This includes fishing fleets and surverys.", type = "error") remove_modal_spinner() stopApp() } #ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector)+2*bin.width,max(data.file$lbin_vector)-2*bin.width) #ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector)) ctl.file$size_selex_parms[1,1:2]<-c(Selpeak[1]-minmaxbin,Selpeak[1]+minmaxbin) ctl.file$size_selex_parms[1,3:4]<- Selpeak[1] ctl.file$size_selex_parms[2,3:4]<- 15 ctl.file$size_selex_parms[3,3:4]<- log(-((Sel50[1]-Selpeak[1])^2/log(0.5))) ctl.file$size_selex_parms[4,3:4]<- -15 ctl.file$size_selex_parms[6,3:4]<- 15 #phases ctl.file$size_selex_parms[1,7]<- Selpeak_phase[1] ctl.file$size_selex_parms[2,7]<- -1 ctl.file$size_selex_parms[3,7]<- Sel50_phase[1] ctl.file$size_selex_parms[4,7]<- -1 ctl.file$size_selex_parms[6,7]<- -1 } if(input$Sel_choice=="Dome-shaped") { #Throw warning if not enough selectivity inputs sel.inputs.comps<-length(Sel50)-length(Sel50_phase)-length(Selpeak)-length(Selpeak_phase)-length(PeakDesc)-length(PeakDesc_phase)-length(LtPeakFinal)-length(LtPeakFinal_phase)-length(FinalSel)-length(FinalSel_phase) sel.inputs.lts<-c(length(Sel50),length(Sel50_phase),length(Selpeak),length(Selpeak_phase),length(PeakDesc),length(PeakDesc_phase),length(LtPeakFinal),length(LtPeakFinal_phase),length(FinalSel),length(FinalSel_phase)) if(!all(data.file$Nfleets==sel.inputs.lts)) { sendSweetAlert( session = session, title = "Selectivity input warning", text = "Please check to see if you have provided selectivity inputs (both parameter and phases) for all fleets in the model. This includes fishing fleets and surverys.", type = "error") remove_modal_spinner() break } browser() PeakDesc<-as.numeric(trimws(unlist(strsplit(input$PeakDesc,",")))) PeakDesc_phase<-as.numeric(trimws(unlist(strsplit(input$PeakDesc_phase,",")))) LtPeakFinal<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal,",")))) LtPeakFinal_phase<-as.numeric(trimws(unlist(strsplit(input$LtPeakFinal_phase,",")))) FinalSel<-as.numeric(trimws(unlist(strsplit(input$FinalSel,",")))) FinalSel_phase<-as.numeric(trimws(unlist(strsplit(input$FinalSel_phase,",")))) minmaxbin<-min(Selpeak[1]-min(data.file$lbin_vector),max(data.file$lbin_vector)-Selpeak[1]) #ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector)+2*bin.width,max(data.file$lbin_vector)-2*bin.width) #ctl.file$size_selex_parms[1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector)) ctl.file$size_selex_parms[1,1:2]<-c(Selpeak[1]-minmaxbin,Selpeak[1]+minmaxbin) ctl.file$size_selex_parms[1,3:4]<- Selpeak[1] ctl.file$size_selex_parms[2,3:4]<- -log((max(data.file$lbin_vector)-Selpeak[1]-bin.width)/(PeakDesc[1]-Selpeak[1]-bin.width)) ctl.file$size_selex_parms[3,3:4]<- log(-((Sel50[1]-Selpeak[1])^2/log(0.5))) ctl.file$size_selex_parms[4,3:4]<- log(LtPeakFinal[1]) ctl.file$size_selex_parms[6,3:4]<- -log((1/(FinalSel[1]+0.000000001)-1)) #phases ctl.file$size_selex_parms[1,7]<- Selpeak_phase[1] ctl.file$size_selex_parms[2,7]<- PeakDesc_phase[1] ctl.file$size_selex_parms[3,7]<- Sel50_phase[1] ctl.file$size_selex_parms[4,7]<- LtPeakFinal_phase[1] ctl.file$size_selex_parms[6,7]<- FinalSel_phase[1] } # if(input$dirichlet) # { # dirichlet.index<-c(unique(data.file$lencomp[,3]),(unique(data.file$agecomp[,3])+3)) # ctl.file$dirichlet_parms[dirichlet.index,3:4]<-0 # ctl.file$dirichlet_parms[dirichlet.index,7]<-2 # } #Add other fleets if(data.file$Nfleets>1){ for(i in 1:(data.file$Nfleets-1)) { ctl.file$init_F<-rbind(ctl.file$init_F,ctl.file$init_F[1,]) ctl.file$size_selex_types<-rbind(ctl.file$size_selex_types,ctl.file$size_selex_types[1,]) if(input$Ct_F_LO_select=="Estimate F" & is.null(rv.Ct$data)){ctl.file$size_selex_types[,2]<-3} ctl.file$age_selex_types<-rbind(ctl.file$age_selex_types,ctl.file$age_selex_types[1,]) ctl.file$size_selex_parms<-rbind(ctl.file$size_selex_parms,ctl.file$size_selex_parms[1:6,]) minmaxbin<-min(Selpeak[i+1]-min(data.file$lbin_vector),max(data.file$lbin_vector)-Selpeak[i+1]) if(input$Sel_choice=="Logistic") { ctl.file$size_selex_parms[6*i+3,3:4]<- log(-((Sel50[i+1]-Selpeak[i+1])^2/log(0.5))) ctl.file$size_selex_parms[6*i+3,7]<- Sel50_phase[i+1] #ctl.file$size_selex_parms[6*i+1,1:2]<-c(min(data.file$lbin_vector)+2*bin.width,max(data.file$lbin_vector)-2*bin.width) # ctl.file$size_selex_parms[6*i+1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector)) ctl.file$size_selex_parms[6*i+1,1:2]<-c(Selpeak[i+1]-minmaxbin,Selpeak[1]+minmaxbin) ctl.file$size_selex_parms[6*i+1,3:4]<- Selpeak[i+1] ctl.file$size_selex_parms[6*i+1,7]<- Selpeak_phase[i+1] ctl.file$size_selex_parms[6*i+2,3:4]<- 15 ctl.file$size_selex_parms[6*i+2,7]<- -1 ctl.file$size_selex_parms[6*i+4,3:4]<- -15 ctl.file$size_selex_parms[6*i+4,7]<- -1 ctl.file$size_selex_parms[6*i+6,3:4]<- 15 ctl.file$size_selex_parms[6*i+6,7]<- -1 } if(input$Sel_choice=="Dome-shaped") { # ctl.file$size_selex_parms[6*i+1,1:2]<-c(min(data.file$lbin_vector),max(data.file$lbin_vector)) ctl.file$size_selex_parms[6*i+1,1:2]<-c(Selpeak[i+1]-minmaxbin,Selpeak[1]+minmaxbin) ctl.file$size_selex_parms[6*i+1,3:4]<- Selpeak[i+1] ctl.file$size_selex_parms[6*i+1,7]<- Selpeak_phase[i+1] ctl.file$size_selex_parms[6*i+2,3:4]<- -log((max(data.file$lbin_vector)-Selpeak[i+1]-bin.width)/(PeakDesc[i+1]-Selpeak[i+1]-bin.width)) ctl.file$size_selex_parms[6*i+2,7]<- PeakDesc_phase[i+1] ctl.file$size_selex_parms[6*i+3,3:4]<- log(-((Sel50[i+1]-Selpeak[i+1])^2/log(0.5))) ctl.file$size_selex_parms[6*i+3,7]<- Sel50_phase[i+1] ctl.file$size_selex_parms[6*i+4,3:4]<- log(LtPeakFinal[i+1]) ctl.file$size_selex_parms[6*i+4,7]<- LtPeakFinal_phase[i+1] ctl.file$size_selex_parms[6*i+6,3:4]<- -log((1/(FinalSel[i+1]+0.000000001)-1)) ctl.file$size_selex_parms[6*i+6,7]<- FinalSel_phase[i+1] } #Dirichlet data-weighting # ctl.file$dirichlet_parms<-rbind(ctl.file$dirichlet_parms,ctl.file$dirichlet_parms[1:2,]) } #Re-label so r4ss can interpret these new entries rownames(ctl.file$init_F)<-paste0("InitF_seas_1_flt_",1:data.file$Nfleets,"Fishery",1:data.file$Nfleets) rownames(ctl.file$age_selex_types)<-rownames(ctl.file$size_selex_types)<-paste0("Fishery",1:data.file$Nfleets) size_selex_parms_rownames<-list() for(f_i in 1:data.file$Nfleets) { size_selex_parms_rownames[[f_i]]<-c(paste0("SizeSel_P_1_Fishery",f_i,"(",f_i,")"), paste0("SizeSel_P_2_Fishery",f_i,"(",f_i,")"), paste0("SizeSel_P_3_Fishery",f_i,"(",f_i,")"), paste0("SizeSel_P_4_Fishery",f_i,"(",f_i,")"), paste0("SizeSel_P_5_Fishery",f_i,"(",f_i,")"), paste0("SizeSel_P_6_Fishery",f_i,"(",f_i,")")) } size_selex_parms_rownames<-unlist(size_selex_parms_rownames) rownames(ctl.file$size_selex_parms)<-size_selex_parms_rownames } #Remove surveys from initial F lines and add q and xtra variance lines if(!is.null(rv.Index$data)) { if(data.file$Nfleets>catch.fleets){ctl.file$init_F<-ctl.file$init_F[-survey.fleets,]} q.setup.names<-c("fleet","link","link_info","extra_se","biasadj", "float") q.setup.lines<-data.frame(t(c(unique(rv.Index$data[,3])[1],1,0,0,0,1))) if(input$Indexvar){q.setup.lines<-data.frame(t(c(unique(rv.Index$data[,3])[1],1,0,1,0,1)))} qnames<-c("LO","HI","INIT","PRIOR","PR_SD","PR_type","PHASE","env_var&link","dev_link","dev_minyr","dev_maxyr","dev_PH","Block","Block_Fxn") q.lines<-data.frame(t(c(-15,15,1,0,1,0,-1,rep(0,7)))) if(input$Indexvar){q.lines<-data.frame(rbind(c(-15,15,1,0,1,0,-1,rep(0,7)),c(0,5,0,0,99,0,3,0,0,0,0,0,0,0)))} if(length(unique(rv.Index$data[,3]))>1) { for(q in 2:length(unique(rv.Index$data[,3]))) { if(!input$Indexvar) { q.setup.lines<-rbind(q.setup.lines,c(unique(rv.Index$data[,3])[q],1,0,0,0,1)) q.lines<-rbind(q.lines,c(-15,15,1,0,1,0,-1,rep(0,7))) } if(input$Indexvar) { q.setup.lines<-rbind(q.setup.lines,c(unique(rv.Index$data[,3])[q],1,0,1,0,1)) #if(unique(rv.Index$data[,6])[q]!="RSS"){q.setup.lines<-rbind(q.setup.lines,c(unique(rv.Index$data[,3])[q],1,0,1,0,1))} #if(unique(rv.Index$data[,6])[q]=="RSS"){q.setup.lines<-rbind(q.setup.lines,c(unique(rv.Index$data[,3])[q],1,0,0,0,1))} if(unique(rv.Index$data[,6])[q]!="RSS"){q.lines<-rbind(q.lines,data.frame(rbind(c(-15,15,1,0,1,0,-1,rep(0,7)),c(0,5,0,0,99,0,3,0,0,0,0,0,0,0))))} if(unique(rv.Index$data[,6])[q]=="RSS"){q.lines<-rbind(q.lines,data.frame(rbind(c(-15,15,1,0,1,0,-1,rep(0,7)),c(0,5,0,0,99,0,-3,0,0,0,0,0,0,0))))} } } } names(q.setup.lines)<-q.setup.names rownames(q.setup.lines)<-unique(rv.Index$data[,6]) ctl.file$Q_options<-q.setup.lines names(q.lines)<-qnames if(!input$Indexvar){rownames(q.lines)<-paste0("LnQ_base_",unique(rv.Index$data[,6]),"(",unique(rv.Index$data[,3]),")")} #rnames.temp<-c(paste0("LnQ_base_",unique(rv.Index$data[,5]),"(",unique(rv.Index$data[,2]),")"),paste0("Q_extraSD_",unique(rv.Index$data[,5]),"(",unique(rv.Index$data[,2]),")")) #rnames.temp[1:length(rnames.temp)%%2 != 0] if(input$Indexvar) { qnames.temp1<-paste0("LnQ_base_",unique(rv.Index$data[,6]),"(",unique(rv.Index$data[,3]),")") qnames.temp2<-paste0("Q_extraSD_",unique(rv.Index$data[,6]),"(",unique(rv.Index$data[,3]),")") qnames.temp<-as.vector(rbind(qnames.temp1,qnames.temp2)) # if(length(rnames.temp1)>1) # { # for(xx in 2:length(rnames.temp1)) # { # rnames.temp<-c(rnames.temp1[x],rnames.temp2[x]) # } # } rownames(q.lines)<-qnames.temp } ctl.file$Q_parms<-q.lines if(data.file$Nfleets>catch.fleets) { if(any(fleet.survey.names=="RSS")) { RSS.index<-grep("RSS",fleet.survey.names) #ctl.file$Q_parms<-ctl.file$Q_parms ctl.file$size_selex_types[RSS.index,1]<-0 #Rename RSS selectivity types ctl.file$size_selex_parms<-ctl.file$size_selex_parms[-c((RSS.index*6-5):(RSS.index*6)),] #Remove selectivity related to RSS } } } # if(input$Data_wt=="Dirichlet") # { # Dirichlet.fleets<-c(unique(data.file$lencomp[,3]),(unique(data.file$agecomp[,3])+data.file$Nfleets)) # # if(Dirichlet.fleets>1) # # { # # for(i in 1:length(Dirichlet.fleets)){ctl.file$dirichlet_parms<-rbind(ctl.file$dirichlet_parms,ctl.file$dirichlet_parms[1,])} # # } # ctl.file$dirichlet_parms[Dirichlet.fleets,3:4]<-0.5 # ctl.file$dirichlet_parms[Dirichlet.fleets,7]<-2 # } #Change data weights # Lt_dat_wts<-as.numeric(trimws(unlist(strsplit(input$Lt_datawts,",")))) # ctl.file$Variance_adjustments[1,]<-Lt_dat_wts #Change likelihood component weight of catch if (is.null(rv.Ct$data)) { lts.lambdas<-ctl.file$lambdas[1,] ct.lambdas<-ctl.file$lambdas[2,] init.ct.lambdas<-ctl.file$lambdas[3,] if(data.file$Nfleets>1) { for(i_lam in 2:data.file$Nfleets) { lts.lambdas_temp<-ctl.file$lambdas[1,] ct.lambdas_temp<-ct.lambdas[1,] init.ct.lambdas_temp<-init.ct.lambdas[1,] lts.lambdas_temp[1,2]<-ct.lambdas_temp[1,2]<-init.ct.lambdas_temp[1,2]<-i_lam lts.lambdas<-rbind(lts.lambdas,lts.lambdas_temp) ct.lambdas<-rbind(ct.lambdas,ct.lambdas_temp) init.ct.lambdas<-rbind(init.ct.lambdas,init.ct.lambdas_temp) } } if(input$Ct_F_LO_select=="Estimate F") { if(data.file$Nfleets>1) { lt.lam.in<-as.numeric(trimws(unlist(strsplit(input$Wt_fleet_Ct,","))))/sum(as.numeric(trimws(unlist(strsplit(input$Wt_fleet_Ct,","))))) lt.lam<-lt.lam.in/max(lt.lam.in) lts.lambdas[,4]<-lt.lam } if(data.file$Nfleets==1) { lts.lambdas[,4]<-1 } } rownames(lts.lambdas)<-paste0("length_Fishery",c(1:data.file$Nfleets),"_sizefreq_method_1_Phz1") ct.lambdas[,4]<-0 rownames(ct.lambdas)<-paste0("catch_Fishery",c(1:data.file$Nfleets),"_Phz1") init.ct.lambdas[,4]<-0 rownames(init.ct.lambdas)<-paste0("init_equ_catch_Fishery",c(1:data.file$Nfleets),"_lambda_for_init_equ_catch_can_only_enable/disable for_all_fleets_Phz1") ctl.file$lambdas<-rbind(lts.lambdas,ct.lambdas,init.ct.lambdas) ctl.file$N_lambdas<-nrow(ctl.file$lambdas) # ctl.file$lambdas[1,4]<-0 } if(!is.null(rv.Ct$data)) { ct.lambdas<-ctl.file$lambdas[2,] init.ct.lambdas<-ctl.file$lambdas[3,] if(data.file$Nfleets>1) { for(i_lam in 2:data.file$Nfleets) { ct.lambdas_temp<-ct.lambdas[1,] init.ct.lambdas_temp<-init.ct.lambdas[1,] ct.lambdas_temp[1,2]<-init.ct.lambdas_temp[1,2]<-i_lam ct.lambdas<-rbind(ct.lambdas,ct.lambdas_temp) init.ct.lambdas<-rbind(init.ct.lambdas,init.ct.lambdas_temp) } } ct.lambdas[,4]<-1 rownames(ct.lambdas)<-paste0("catch_Fishery",c(1:data.file$Nfleets),"_Phz1") init.ct.lambdas[,4]<-0 ctl.file$lambdas<-rbind(ct.lambdas,init.ct.lambdas) rownames(init.ct.lambdas)<-paste0("init_equ_catch_Fishery",c(1:data.file$Nfleets),"_lambda_for_init_equ_catch_can_only_enable/disable for_all_fleets_Phz1") ctl.file$N_lambdas<-data.file$Nfleets*2 #ctl.file$lambdas[1,4]<-1 # ctl.file$lambdas[2,4]<-0 ctl.file$init_F[,3]<-0.00000000000000000001 ctl.file$init_F[,7]<--1 } SS_writectl(ctl.file,paste0("Scenarios/",input$Scenario_name,"/controlfile.ctl"),overwrite=TRUE) } } } } ####################### END CTL FILE #################################### if(exists("checkmod")|input$user_model) { starter.file<-SS_readstarter(paste0("Scenarios/",input$Scenario_name,"/starter.ss")) #Use par file if(input$use_par) { starter.file$init_values_src<-1 } if(!input$use_par|is.null(input$use_par)) { starter.file$init_values_src<-0 } #Use datanew file if(input$use_datanew) { starter.file$datfile<-"data_echo.ss_new" } if(!input$use_datanew|is.null(input$use_datanew)) { if(!input$user_model|is.null(input$use_datanew)){starter.file$datfile<-"datafile.dat"} } #Use controlnew file if(input$use_controlnew) { starter.file$ctlfile<-"control.ss_new" } if(!input$use_controlnew|is.null(input$use_controlnew)) { if(!input$user_model|is.null(input$use_controlnew)){starter.file$ctlfile<-"controlfile.ctl"} } #Phase 0 if(input$use_phase0) { starter.file$last_estimation_phase<-0 } if(!input$use_par|is.null(input$use_par)) { starter.file$last_estimation_phase<-6 } #Jitter selection starter.file$jitter_fraction<-0 # if(input$jitter_choice) # { # starter.file$jitter_fraction<-input$jitter_fraction # starter.file$init_values_src<-0 # } SS_writestarter(starter.file,paste0("Scenarios/",input$Scenario_name),overwrite=TRUE) #Forecast file modfications #Reference points if(!input$use_forecastnew) { forecast.file<-SS_readforecast(paste0("Scenarios/",input$Scenario_name,"/forecast.ss")) if(input$RP_choices){ forecast.file$SPRtarget<-input$SPR_target forecast.file$Btarget<-input$B_target CR_choices<-c("1: Catch fxn of SSB, buffer on F", "2: F fxn of SSB, buffer on F", "3: Catch fxn of SSB, buffer on catch", "4: F fxn of SSB, buffer on catch") CR_choices_num.vec<-c(1:4) forecast.file$ControlRuleMethod<-CR_choices_num.vec[CR_choices==input$CR_Ct_F] forecast.file$SBforconstantF<-input$slope_hi forecast.file$BfornoF<-input$slope_low } if(input$Forecast_choice) { forecast.file$Nforecastyrs<-input$forecast_num buffer.in<-as.numeric(trimws(unlist(strsplit(input$forecast_buffer,",")))) if(length(buffer.in)==1){forecast.file$Flimitfraction<-buffer.in} if(length(buffer.in)>1) { forecast.file$Flimitfraction<--1 buffer.datafr<-data.frame(Year=c((data.file$endyr+1):(data.file$endyr+input$forecast_num)),Fraction=buffer.in) #rownames(buffer.datafr)<-paste0("#_Flimitfraction_m",1:input$forecast_num) forecast.file$Flimitfraction_m<-buffer.datafr } } SS_writeforecast(forecast.file,paste0("Scenarios/",input$Scenario_name),overwrite=TRUE) } if(input$use_forecastnew) { forecast.file<-SS_readforecast(paste0("Scenarios/",input$Scenario_name,"/forecast.ss_new")) SS_writeforecast(forecast.file,paste0("Scenarios/",input$Scenario_name),overwrite=TRUE) } ######## #Run Stock Synthesis and plot output show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[2],text="Model run in progress") if(input$Data_wt=="None"){DataWT_opt<-"none"} if(input$Data_wt=="Dirichlet"){DataWT_opt<-"DM"} if(input$Data_wt=="Francis"){DataWT_opt<-"Francis"} if(input$Data_wt=="McAllister-Ianelli"){DataWT_opt<-"MI"} if(is.null(input$no_hess)){ cmd.in<-"" if(input$add_comms==TRUE){cmd.in=paste0(" ",input$add_comms_in)} RUN.SS(paste0("Scenarios/",input$Scenario_name),ss.cmd=cmd.in,OS.in=input$OS_choice) if(!file.exists(paste0("Scenarios/",input$Scenario_name,"data_echo.ss_new"))) { cmd.in<-" -nohess" if(input$add_comms==TRUE){cmd.in=paste0(" -nohess ",input$add_comms_in)} RUN.SS(paste0("Scenarios/",input$Scenario_name),ss.cmd=cmd.in,OS.in=input$OS_choice) } } if(!is.null(input$no_hess)) { if(input$no_hess) { cmd.in<-" -nohess" if(input$add_comms==TRUE){cmd.in=paste0(" -nohess ",input$add_comms_in)} RUN.SS(paste0("Scenarios/",input$Scenario_name),ss.cmd=cmd.in,OS.in=input$OS_choice) } if(!input$no_hess) { cmd.in<-"" if(input$add_comms==TRUE){cmd.in=paste0(" ",input$add_comms_in)} RUN.SS(paste0("Scenarios/",input$Scenario_name),ss.cmd=cmd.in,OS.in=input$OS_choice) } } if(file.exists(paste0("Scenarios/",input$Scenario_name,"/data_echo.ss_new"))) { Model.output<-try(SS_output(paste0("Scenarios/",input$Scenario_name),verbose=FALSE,printstats = FALSE)) if(class(Model.output)=="try-error") { Model.output<-SS_output(paste0("Scenarios/",input$Scenario_name),verbose=FALSE,printstats = FALSE,covar=FALSE) } if(input$Data_wt!="None") { if(Model.output$inputs$covar==TRUE) { tune_comps(Model.output,dir=paste0("Scenarios/",input$Scenario_name),niters_tuning=3,option=DataWT_opt,show_in_console = TRUE,verbose=FALSE) Model.output<-try(SS_output(paste0("Scenarios/",input$Scenario_name),verbose=FALSE,printstats = FALSE)) } if(Model.output$inputs$covar==FALSE) { tune_comps(Model.output,dir=paste0("Scenarios/",input$Scenario_name),option=DataWT_opt,niters_tuning=3,extras = " -nohess",show_in_console = TRUE,verbose=FALSE) Model.output<-SS_output(paste0("Scenarios/",input$Scenario_name),verbose=FALSE,printstats = FALSE,covar=FALSE) } } data.file<-SS_readdat(paste0("Scenarios/",input$Scenario_name,"/data_echo.ss_new")) #No plots or figures if(is.null(input$no_plots_tables)) { show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[4],text="Making plots") SS_plots(Model.output,maxyr=data.file$endyr+1,verbose=FALSE) } if(is.null(input$no_tables)) { show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[5],text="Making tables") try(SSexecutivesummary(Model.output)) } if(!is.null(input$no_plots_tables)){ if(input$no_plots_tables==FALSE) { #Make SS plots show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[4],text="Making plots") SS_plots(Model.output,maxyr=data.file$endyr+1,verbose=FALSE) } } if(!is.null(input$no_tables)){ if(input$no_tables==FALSE) { #Make SS tables show_modal_spinner(spin="flower",color=wes_palettes$Zissou1[5],text="Making tables") try(SSexecutivesummary(Model.output)) } } #Run multiple jitters if(input$jitter_choice) { if(input$Njitter>0) { show_modal_spinner(spin="flower",color=wes_palettes$Moonrise1[1],text="Run jitters") #file.copy(paste0("Scenarios/",input$Scenario_name,"/ss.exe"),paste0("Scenarios/",input$Scenario_name,"/ss_copy.exe"),overwrite = FALSE) jits<-jitter( dir=paste0(getwd(),"/Scenarios/",input$Scenario_name), Njitter=input$Njitter, printlikes = TRUE, jitter_fraction=input$jitter_fraction, init_values_src=0, verbose=FALSE, extras = "-nohess" ) profilemodels <- SSgetoutput(dirvec=paste0("Scenarios/",input$Scenario_name), keyvec=0:input$Njitter, getcovar=FALSE) profilesummary <- SSsummarize(profilemodels) minlikes<-profilesummary$likelihoods[1,-length(profilesummary$likelihoods)]==min(profilesummary$likelihoods[1,-length(profilesummary$likelihoods)]) #Find best fit model index.minlikes<-c(1:length(minlikes))[minlikes] jitter.likes<-profilesummary$likelihoods[1,-length(profilesummary$likelihoods)] ref.like<-min(jitter.likes,na.rm = TRUE) #Make plot and save to folder main.dir<-getwd() if(!file.exists(paste0("Scenarios/",input$Scenario_name,"/Jitter Results"))) { dir.create(paste0("Scenarios/",input$Scenario_name,"/Jitter Results")) } setwd(paste0("Scenarios/",input$Scenario_name,"/Jitter Results")) png("jitterplot.png") jitterplot<-plot(c(1:length(jitter.likes)),jitter.likes,type="p",col="black",bg="blue",pch=21,xlab="Jitter run",ylab="-log likelihood value",cex=1.25) points(c(1:length(jitter.likes))[jitter.likes>ref.like],jitter.likes[jitter.likes>ref.like],type="p",col="black",bg="red",pch=21,cex=1.25) abline(h=ref.like) # likebc<-round((length(jitter.likes[ref.like==jitter.likes])/(input$Njitter+1))*100,0) # likelessbc<-round((length(jitter.likes[ref.like>jitter.likes])/(input$Njitter+1))*100,0) # like10<-round((length(jitter.likes[(ref.like+10)<jitter.likes])/(input$Njitter+1))*100,0) # like2<-round(((length(jitter.likes[(ref.like+2)>jitter.likes])-(length(jitter.likes[ref.like==jitter.likes])))/(input$Njitter+1))*100,0) # like_2_10<-round(100-(likebc+like10+like2),0) # legend("topright",c(paste(" ",likelessbc,"% < BC",sep=""),paste(likebc,"% = BC",sep=""),paste(like2,"% < BC+2",sep=""),paste(like_2_10,"% > BC+2 & < BC+10",sep=""),paste(like10,"% > BC+10",sep="")),bty="n") dev.off() save(profilesummary,file=paste0("jitter_summary.DMP")) SSplotComparisons(profilesummary, legendlabels = c(0:input$Njitter), ylimAdj = 1.30, subplot = c(1), new = FALSE,print=TRUE,plotdir=getwd()) SSplotComparisons(profilesummary, legendlabels = c(0:input$Njitter), ylimAdj = 1.30, subplot = c(3), new = FALSE,print=TRUE,plotdir=getwd()) output$Jitterplot<-renderPlot({ # if(input$Njitter==1){return(NULL)} # if(input$Njitter>1) # { #jitter.likes<-profilesummary$likelihoods[1,-length(profilesummary$likelihoods)] #ref.like<-min(jitter.likes) jitterplot<-plot(c(1:length(jitter.likes)),jitter.likes,type="p",col="black",bg="blue",pch=21,xlab="Jitter run",ylab="-log likelihood value",cex=1.25) points(c(1:length(jitter.likes))[jitter.likes>ref.like],jitter.likes[jitter.likes>ref.like],type="p",col="black",bg="red",pch=21,cex=1.25) abline(h=ref.like) # likebc<-round((length(jitter.likes[ref.like==jitter.likes])/(input$Njitter+1))*100,0) # likelessbc<-round((length(jitter.likes[ref.like>jitter.likes])/(input$Njitter+1))*100,0) # like10<-round((length(jitter.likes[(ref.like+10)<jitter.likes])/(input$Njitter+1))*100,0) # like2<-round(((length(jitter.likes[(ref.like+2)>jitter.likes])-(length(jitter.likes[ref.like==jitter.likes])))/(input$Njitter+1))*100,0) # like_2_10<-round(100-(likebc+like10+like2),0) # legend("topright",c(paste(" ",likelessbc,"% < BC",sep=""),paste(likebc,"% = BC",sep=""),paste(like2,"% < BC+2",sep=""),paste(like_2_10,"% > BC+2 & < BC+10",sep=""),paste(like10,"% > BC+10",sep="")),bty="n") # } }) #Spawning output comp output$Jittercompplot1<-renderPlot({ SSplotComparisons(profilesummary, legendlabels = c(0:input$Njitter), ylimAdj = 1.30, subplot = c(1), new = FALSE) }) #Relative stock status comp output$Jittercompplot2<-renderPlot({ SSplotComparisons(profilesummary, legendlabels = c(0:input$Njitter), ylimAdj = 1.30, subplot = c(3), new = FALSE) }) #R-run to get new best fit model show_modal_spinner(spin="flower",color=wes_palettes$Moonrise1[2],text="Re-run best model post-jitters") file.copy(paste0(main.dir,"/Scenarios/",input$Scenario_name,"/ss.par_",(index.minlikes[1]-1),".sso"),paste0(main.dir,"/Scenarios/",input$Scenario_name,"/ss.par"),overwrite = TRUE) #file.rename(paste0("Scenarios/",input$Scenario_name,"/ss_copy.exe"),paste0("Scenarios/",input$Scenario_name,"/ss.exe"),overwrite = FALSE) starter.file$init_values_src<-1 starter.file$jitter_fraction<-0 SS_writestarter(starter.file,paste0(main.dir,"/Scenarios/",input$Scenario_name),overwrite=TRUE) RUN.SS(paste0(main.dir,"/Scenarios/",input$Scenario_name),ss.cmd="",OS.in=input$OS_choice) Model.output<-try(SS_output(paste0(main.dir,"/Scenarios/",input$Scenario_name),verbose=FALSE,printstats = FALSE)) if(class(Model.output)=="try-error") { Model.output<-SS_output(paste0(main.dir,"/Scenarios/",input$Scenario_name),verbose=FALSE,printstats = FALSE,covar=FALSE) } show_modal_spinner(spin="flower",color=wes_palettes$Moonrise1[3],text="Making plots") SS_plots(Model.output,maxyr=data.file$endyr+1,verbose=FALSE) show_modal_spinner(spin="flower",color=wes_palettes$Moonrise1[4],text="Making tables") try(SSexecutivesummary(Model.output)) } setwd(main.dir) } #Add retro runs # if(input$Retro_choice){ # mydir<-paste0(getwd(),"/Scenarios/") # model_settings = get_settings(settings = list(base_name = input$Scenario_name, # run = "retro", # retro_yrs = input$first_retro_year:input$final_retro_year)) # # tryCatch({ # run_diagnostics(mydir = mydir, model_settings = model_settings) # # }, # # warning = function(warn){ # # showNotification(paste0(warn), type = 'warning') # # }, # # error = function(err){ # # showNotification(paste0(err), type = 'err') # # }) # } #Convergence diagnostics output$converge.grad <- renderText({ max.grad<-paste0("Maximum gradient: ",Model.output$maximum_gradient_component) }) output$converge.covar <- renderText({ covar<-paste0("Was covariance file created? ",Model.output$inputs$covar) }) output$converge.dec <- renderText({ if(Model.output$maximum_gradient_component<0.1 & Model.output$inputs$covar==TRUE) {converge.dec<-"Model appears converged. Please check outputs for nonsense."} else{converge.dec<-"Model may not have converged or inputs are missing. Please use the Jitter option or check/change starting values before re-running model."} }) #Relative biomass output$SSout_relSB_table <- renderTable({ SB_indices<-c(which(rownames(Model.output$derived_quants)==paste0("Bratio_",input$endyr)), which(rownames(Model.output$derived_quants)=="B_MSY/SSB_unfished"), which(rownames(Model.output$derived_quants)==paste0("SPRratio_",input$endyr)), which(rownames(Model.output$derived_quants)==paste0("OFLCatch_",(input$endyr+1))), which(rownames(Model.output$derived_quants)==paste0("ForeCatch_",(input$endyr+1))) ) Output_relSB_table<-data.frame(Model.output$derived_quants[SB_indices,1:3]) # Label=c(paste0("SO",input$endyr+1,"/SO_0"), # "SO_MSY/SO_0", # paste0("SPR",input$endyr+1), # paste0("OFL",(input$endyr+1)), # paste0("ABC",(input$endyr+1)) # )) Output_relSB_table[,1]<-c(paste0("SO",input$endyr,"/SO_0"), "SO_MSY/SO_0", paste0("1-SPR",input$endyr), paste0("OFL",(input$endyr+1)), paste0("ABC",(input$endyr+1)) ) Output_relSB_table # rownames=c(expression(SO[input$endyr]/SO[0]), # expression(SO[MSY]/SO[0]), # expression(SPR[input$endyr]), # expression(OFL[input$endyr]), # expression(ABC[input$endyr]) # )) # Output_relSB_table[,1]<-c(expression('B',[input$endyr],'/B',[0]), # expression('B'[MSY]/'B'[0]), # expression('SPR'[input$endyr]), # expression('OFL'[input$endyr]), # expression('ABC'[input$endyr]) # ) }) #F estimate and relative to FMSY and proxies output$SSout_F_table <- renderTable({ F_indices<-c(which(rownames(Model.output$derived_quants)==paste0("F_",input$endyr)), which(rownames(Model.output$derived_quants)=="annF_Btgt"), which(rownames(Model.output$derived_quants)=="annF_SPR"), which(rownames(Model.output$derived_quants)=="annF_MSY") ) F_values<-Model.output$derived_quants[F_indices,1:3] }) #Time series output output$SSout_table <- renderTable({ # Output_table<-Model.output$sprseries[-nrow(Model.output$sprseries),c(1,5,6,7,8,9,11,12,13,25,37)] Output_table<-Model.output$sprseries[,c(1,5,6,7,8,9,11,12,13,25,37)] }) #Paramters output$Parameters_table <- renderTable({ cbind(rownames(Model.output$estimated_non_dev_parameters),Model.output$estimated_non_dev_parameters) }) } if(!file.exists(paste0("Scenarios/",input$Scenario_name,"/data_echo.ss_new"))) { sendSweetAlert( session = session, title = "Model Warning", text = "Model did not run or Hessian did not invert. Double check data files for errors and each input for missing values (or for 0 SD for lognormal priors) and/or re-run model using a different model specification (e.g., starting values).", type = "warning") } remove_modal_spinner() observeEvent(exists("Model.output"), { updateTabsetPanel(session, "tabs", selected = '2') }) } }) ############################################################### ### Likelihood profiles, Sensitivities, and Ensemble models ### ############################################################### roots <- getVolumes()() # pathModelout <- reactive({ shinyDirChoose(input, "Modelout_dir", roots= roots,session=session, filetypes=c('', 'txt')) return(parseDirPath(roots, input$Modelout_dir)) }) observeEvent(as.numeric(input$tabs)==2,{ #observeEvent(exists("Model.output"),{ pathModelout.dir <-pathModelout() if(!identical(pathModelout.dir, character(0))) { #dir.create(paste0(pathModelout.dir,"/Scenarios")) file.copy(paste0("Scenarios/",input$Scenario_name), pathModelout.dir,recursive=TRUE,overwrite=TRUE) if(input$Retro_choice){file.copy(paste0("Scenarios/",input$Scenario_name,"_retro"), pathModelout.dir,recursive=TRUE,overwrite=TRUE)} } }) ######################## ### Model efficiency ### ######################## shinyDirChoose(input,"ModEff_dir", roots=roots,session=session, filetypes=c('', 'txt')) pathRetro <- reactive({ return(parseDirPath(roots, input$ModEff_dir)) }) # if(exists("ModEff_dir")){print(ModEff_dir)} # observeEvent(as.numeric(input$tabs)==12,{ # output$ModEff_model_pick<-renderUI({ # pickerInput( # inputId = "myModEff", # label = "Choose model to evaluate", # choices = list.files(pathModEff()), # options = list( # `actions-box` = TRUE, # size = 12, # `selected-text-format` = "count > 3" # ), # multiple = TRUE # ) # }) # }) observeEvent(req(input$run_adnuts),{ modeff.mod.dir<-parseDirPath(roots, input$ModEff_dir) #pathModEff() modeff.dir<-dirname(modeff.mod.dir) modeff.name<-paste0(basename(modeff.mod.dir),"_",input$ModEff_choice) if(dir.exists(file.path(modeff.dir,modeff.name))==FALSE) { dir.create(file.path(modeff.dir,modeff.name)) file.copy(list.files(modeff.mod.dir,full.names=TRUE),to=file.path(modeff.dir,modeff.name),recursive=TRUE,overwrite=TRUE) } #optimize model if(input$Opt_mod==TRUE) { show_modal_spinner(spin="flower",color=wes_palettes$Rushmore[1],text=paste0("Run initial optimization?")) RUN.SS(file.path(modeff.dir,modeff.name),ss.cmd="/ss -nox -mcmc 100 -hbf",OS.in=input$OS_choice) remove_modal_spinner() } #Set mcmc model show_modal_spinner(spin="flower",color=wes_palettes$Rushmore[1],text=paste0("Run ",input$ModEff_choice," model")) chains <- parallel::detectCores()-1 m<-"ss" p<-file.path(modeff.dir,modeff.name) #Run MCMC model with either rwm or nuts if(input$ModEff_choice=="RWM") { fit_model<- sample_rwm(model=m, path=p, iter=input$iter, warmup=0.25*input$iter, chains=chains, thin=input$thin, duration=NULL) } if (input$ModEff_choice=="Nuts") { fit_model <- sample_nuts(model=m, path=p, iter=input$iter, warmup=0.25*input$iter, chains=4, cores=4,control=list(metric='mle', max_treedepth=5),mceval=TRUE) } fit.mod.summary<-utils::capture.output(summary(fit_model), file=NULL) output$fit.model.summary <- renderText({ #paste0(fit.mod.summary[1],fit.mod.summary[2],fit.mod.summary[3]) fit.mod.summary }) parmax<-10 if(length(fit_model$par_names)<10){parmax<-length(fit_model$par_names)} png(paste0(p,"/pairs_plot_slow.png"),width=600, height=350) pairs_admb(fit_model, pars=1:parmax, order='slow') dev.off() png(paste0(p,"/pairs_plot_fast.png"),width=600, height=350) pairs_admb(fit_model, pars=1:parmax, order='fast') dev.off() output$pairs_slow <- renderImage({ #region image.path1<-normalizePath(paste0(p,"/pairs_plot_fast.png"),mustWork=FALSE) return(list( src = paste0(p,"/pairs_plot_slow.png"), contentType = "image/png", # width = 400, # height = 300, style='height:60vh')) },deleteFile=FALSE) output$pairs_fast <- renderImage({ #region image.path1<-normalizePath(paste0(p,"/pairs_plot_fast.png"),mustWork=FALSE) return(list( src = paste0(p,"/pairs_plot_fast.png"), contentType = "image/png", # width = 400, # height = 300, style='height:60vh')) },deleteFile=FALSE) save(fit_model,file=paste0(p,"/fit_model.RData")) remove_modal_spinner() #if(input$run_stanout==TRUE){launch_shinyadmb(fit_model)} }) ########################### ### Likelihood profiles ### ########################### pathLP <- reactive({ shinyDirChoose(input, "LP_dir", roots=roots,session=session, filetypes=c('', 'txt')) return(parseDirPath(roots, input$LP_dir)) }) observeEvent(as.numeric(input$tabs)==4,{ pathLP.dir <-pathLP() output$LikeProf_model_picks<-renderUI({ pickerInput( inputId = "myPicker_LP", label = "Choose parameters to profile over", choices = c("Steepness","lnR0","Natural mortality female","Linf female","k female", "CV@Lt young female","CV@Lt old female","Natural mortality male","Linf male","k male", "CV@Lt young male", "CV@Lt old male"), options = list( `actions-box` = TRUE, size = 12, `selected-text-format` = "count > 3" ), multiple = TRUE ) }) }) observeEvent(input$run_Profiles,{ show_modal_spinner(spin="flower",color=wes_palettes$Darjeeling1[1],text="Profiles running") starter.file<-SS_readstarter(paste0(pathLP(),"/starter.ss")) #data.file<-SS_readdat(paste0(pathLP(),"/data_echo.ss_new")) #ctl.file<-SS_readctl(paste0(pathLP(),"/control.ss_new"),use_datlist = TRUE, datlist=data.file) rep.parms<-SS_output(pathLP(),covar=FALSE,verbose=FALSE) rep.parms.names<-rownames(rep.parms$parameters) # SS_parm_names<-c("SR_BH_steep", "SR_LN(R0)","NatM_p_1_Fem_GP_1","L_at_Amax_Fem_GP_1","VonBert_K_Fem_GP_1","CV_young_Fem_GP_1","CV_old_Fem_GP_1","NatM_p_1_Mal_GP_1","L_at_Amax_Mal_GP_1","VonBert_K_Mal_GP_1","CV_young_Mal_GP_1","CV_old_Mal_GP_1") #SS_parm_names<-c(rownames(ctl.file$SR_parms)[2], rownames(ctl.file$SR_parms)[1],rownames(ctl.file$MG_parms)[1],rownames(ctl.file$MG_parms)[3],rownames(ctl.file$MG_parms)[4],rownames(ctl.file$MG_parms)[5],rownames(ctl.file$MG_parms)[6],rownames(ctl.file$MG_parms)[13],rownames(ctl.file$MG_parms)[15],rownames(ctl.file$MG_parms)[16],rownames(ctl.file$MG_parms)[17],rownames(ctl.file$MG_parms)[18]) SS_parm_names<-c(rep.parms.names[24], rep.parms.names[23],rep.parms.names[1],rep.parms.names[3],rep.parms.names[4],rep.parms.names[5],rep.parms.names[6],rep.parms.names[13],rep.parms.names[15],rep.parms.names[16],rep.parms.names[17],rep.parms.names[18]) parmnames<-input$myPicker_LP parmnames_vec<-c("Steepness","lnR0","Natural mortality female","Linf female","k female", "CV@Lt young female","CV@Lt old female","Natural mortality male","Linf male","k male", "CV@Lt young male", "CV@Lt old male") prof_parms_names<-SS_parm_names[parmnames_vec%in%parmnames] prior_like<-starter.file$prior_like use_prior_like_in<-rep(0,length(prof_parms_names)) if(prior_like==1){use_prior_like_in = rep(1,length(prof_parms_names))} mydir = dirname(pathLP()) get = get_settings_profile( parameters = prof_parms_names, low = as.numeric(trimws(unlist(strsplit(input$Prof_Low_val,",")))), high = as.numeric(trimws(unlist(strsplit(input$Prof_Hi_val,",")))), step_size = as.numeric(trimws(unlist(strsplit(input$Prof_step,",")))), param_space = rep('real',length(as.numeric(trimws(unlist(strsplit(input$Prof_Low_val,",")))))), use_prior_like = use_prior_like_in ) model_settings = get_settings(settings = list(base_name = basename(pathLP()), run = "profile", profile_details = get)) try(run_diagnostics(mydir = mydir, model_settings = model_settings)) file.remove(paste0(dirname(mydir),"/run_diag_warning.txt")) output$LikeProf_plot_modout <- renderImage({ image.path1<-normalizePath(file.path(paste0(pathLP(),"_profile_",prof_parms_names[1],"/parameter_panel_",prof_parms_names[1],".png")),mustWork=FALSE) return(list( src = image.path1, contentType = "image/png", # width = 400, # height = 300, style='height:60vh')) },deleteFile=FALSE) output$LikeProf_plot_Piner <- renderImage({ image.path2<-normalizePath(file.path(paste0(pathLP(),"_profile_",prof_parms_names[1],"/piner_panel_",prof_parms_names[1],".png")),mustWork=FALSE) return(list( src = image.path2, contentType = "image/png", # width = 400, # height = 300, style='height:60vh')) },deleteFile=FALSE) output$LikeProf_plot_SO <- renderImage({ image.path3<-normalizePath(file.path(paste0(pathLP(),"_profile_",prof_parms_names[1],"/",prof_parms_names[1],"_trajectories_compare1_spawnbio.png")),mustWork=FALSE) return(list( src = image.path3, contentType = "image/png", # width = 400, # height = 300, style='height:60vh')) },deleteFile=FALSE) output$LikeProf_plot_SOt_SO0 <- renderImage({ image.path4<-normalizePath(file.path(paste0(pathLP(),"_profile_",prof_parms_names[1],"/",prof_parms_names[1],"_trajectories_compare3_Bratio.png")),mustWork=FALSE) return(list( src = image.path4, contentType = "image/png", # width = 400, # height = 300, style='height:60vh')) },deleteFile=FALSE) remove_modal_spinner() }) observeEvent(input$run_MultiProfiles,{ show_modal_spinner(spin="flower",color=wes_palettes$Darjeeling1[2],text="Multi-profiles running") refdir<-pathLP() mydir <- dirname(refdir) #Read in reference model ref.model<-SS_output(refdir) #Read in parameter files par.df <- fread(input$file_multi_profile$datapath,check.names=FALSE,data.table=FALSE) L <- readLines(input$file_multi_profile$datapath, n = 1) if(grepl(";", L)) {par.df <- read.csv2(input$file_multi_profile$datapath,check.names=FALSE)} SS_parm_names<-rownames(ref.model$parameters)[c(23:24,1,3,4:6,13,15:18)] parmnames_vec<-c("Steepness","lnR0","Natural mortality female","Linf female","k female", "CV@Lt young female","CV@Lt old female","Natural mortality male","Linf male","k male", "CV@Lt young male", "CV@Lt old male") parmnames<-colnames(par.df) prof_parms_names<-SS_parm_names[parmnames_vec%in%parmnames] modelnames<-paste0(parmnames[1]," ",par.df[,1],";",parmnames[2]," ",par.df[,2]) #Make new folder #para = rownames(model_settings$profile_details)[aa] profile_dir <- paste0(refdir,"_profile_", paste(prof_parms_names,collapse="_")) dir.create(profile_dir, showWarnings = FALSE) if (length(list.files(profile_dir)) !=0) { remove <- list.files(profile_dir) file.remove(file.path(profile_dir, remove)) } all_files <- list.files(refdir) file.copy(from = file.path(refdir,all_files), to = profile_dir, overwrite = TRUE) #Set-up the starter file control file starter.file<-SS_readstarter(paste0(profile_dir,"/starter.ss")) starter.file$ctlfile<-"control_modified.ss" starter.file$init_values_src<-0 starter.file$prior_like<-1 SS_writestarter(starter.file,profile_dir,overwrite=TRUE) # low_in <- as.numeric(trimws(unlist(strsplit(input$Prof_Low_val,",")))), # high_in <- as.numeric(trimws(unlist(strsplit(input$Prof_Hi_val,",")))), # step_size_in <- as.numeric(trimws(unlist(strsplit(input$Prof_step,",")))) # par.df<-data.frame(mapply(function(x) seq(low[x],high[x],step_size[x]),x=1:length(low))) # colnames(par.df)<-prof_parms_names if(input$Hess_multi_like==FALSE) { profile <- profile( dir = profile_dir, # directory masterctlfile = "control.ss_new", newctlfile = "control_modified.ss", string = prof_parms_names, profilevec = par.df, extras = "-nohess", prior_check=TRUE, show_in_console = TRUE ) } if(input$Hess_multi_like==TRUE) { profile <- profile( dir = profile_dir, # directory masterctlfile = "control.ss_new", newctlfile = "control_modified.ss", string = prof_parms_names, profilevec = par.df, prior_check=TRUE, show_in_console = TRUE ) } # get model output profilemodels <- SSgetoutput(dirvec=profile_dir,keyvec=1:nrow(par.df), getcovar=FALSE) n <- length(profilemodels) profilesummary <- SSsummarize(profilemodels) try(SSplotComparisons(profilesummary, legendlabels = modelnames, ylimAdj = 1.30, new = FALSE,plot=FALSE,print=TRUE, legendloc = 'topleft',uncertainty=TRUE,plotdir=profile_dir,btarg=TRP_multi_like,minbthresh=LRP_multi_like)) save(profilesummary,file=paste0(profile_dir,"/multiprofile.DMP")) # add total likelihood (row 1) to table created above par.df$like <- as.numeric(profilesummary$likelihoods[1, 1:n]) par.df$likediff <- as.numeric(profilesummary$likelihoods[1, 1:n]-ref.model$likelihoods_used[1,1]) par.df$Bratio <- as.numeric(profilesummary$Bratio[grep((profilesummary$endyrs[1]),profilesummary$Bratio$Label), 1:n]) par.df$SB0 <- as.numeric(profilesummary$SpawnBio[1, 1:n]) par.df$SBcurrent <- as.numeric(profilesummary$SpawnBio[grep((profilesummary$endyrs[1]),profilesummary$SpawnBio$Label), 1:n]) SBcurrmax<-max(par.df$SBcurrent) colnames(par.df)<-c(parmnames,c("Likelihood","Likelihood_difference",paste0("SB",profilesummary$endyrs[1],"/SB0"),"SB0",paste0("SB",profilesummary$endyrs[1]))) save(par.df,file=paste0(profile_dir,"/multiprofilelikelihoods.DMP")) write.csv(par.df,file=paste0(profile_dir,"/multiprofilelikelihoods.csv")) #This reactive object is needed to get the plots to work plot.dat<-reactive({ plot.dat<-melt(par.df,id.vars=c( colnames(par.df)[1:2]),measure.vars=c("Likelihood_difference",paste0("SB",profilesummary$endyrs[1],"/SB0"),"SB0",paste0("SB",profilesummary$endyrs[1]))) plot.dat }) blank_data<- data.frame(variable = c("Likelihood_difference", "Likelihood_difference", paste0("SB",profilesummary$endyrs[1],"/SB0"), paste0("SB",profilesummary$endyrs[1],"/SB0"), "SB0", "SB0",paste0("SB",profilesummary$endyrs[1]),paste0("SB",profilesummary$endyrs[1])), x =min(par.df[,1]),y = c(min(par.df$Likelihood_difference),max(par.df$Likelihood_difference), 0, 1, 0, ceiling(max(par.df$SB0)),0,ceiling(SBcurrmax))) blank_data$variable<-factor(blank_data$variable,c("Likelihood_difference",paste0("SB",profilesummary$endyrs[1],"/SB0"),"SB0",paste0("SB",profilesummary$endyrs[1]))) refmodel.dat<-data.frame(variable = c("Likelihood_difference",paste0("SB",profilesummary$endyrs[1],"/SB0"),"SB0",paste0("SB",profilesummary$endyrs[1])), x =ref.model$parameters[grep(prof_parms_names[1],ref.model$parameters$Label),3],y = c(0,ref.model$sprseries$Deplete[grep((profilesummary$endyrs[1]),profilesummary$Bratio$Label)+1],ref.model$SBzero,ref.model$derived_quants[grep((profilesummary$endyrs[1]),profilesummary$SpawnBio$Label),2])) #multiprofplotfun<-function(plot.dat) #{ output$LikeProf_multiplot <- renderPlot({ multiplot<-ggplot(plot.dat(),aes(plot.dat()[,1],value))+ geom_line(lwd=1.25)+ facet_wrap(~variable,scales="free_y")+ geom_blank(data = blank_data, aes(x = x, y = y,z="variable"))+ ylab("Difference in -log likelihood")+ scale_x_continuous(name = paste(parmnames[1],"and",parmnames[2]), breaks =par.df[,1], labels = paste0(par.df[,1],"\n",par.df[,2]))+ geom_hline(data = data.frame(yint=c(-1.96,0,1.96,0.4,0.25),variable=c("Likelihood_difference","Likelihood_difference","Likelihood_difference",paste0("SB",profilesummary$endyrs[1],"/SB0"),paste0("SB",profilesummary$endyrs[1],"/SB0"))), aes(yintercept = yint), linetype = c("solid","dotted","solid","dotted","solid"),color=c("red","black","red","darkgreen","red"),lwd=1)+ geom_point(data=refmodel.dat,aes(x=x,y=y),color="blue",size=4)+ theme_bw() ggsave(paste0(profile_dir,"/","multilikelihood_profile.png"),width=10,height=10,units="in") multiplot }) #} # output$LikeProf_multiplot <- renderPlot({ # plotPNG(func=multiprofplotfun(plot.dat()),paste0(profile_dir,"/",paste(parmnames,collapse="_"),"_multilikelihood_profile.png")) # }) # plot.dat2<-reactive({ # plot.dat2<-melt(par.df,id.vars=c( colnames(par.df)[1:2]),measure.vars=c("Likelihood_difference",paste0("SB",profilesummary$endyrs[1]-1,"/SB0"),"SB0",paste0("SB",profilesummary$endyrs[1]-1))) # plot.dat2 # }) # png(file = paste0(profile_dir,"/","multilikelihood_profile.png"),width = 10, height = 10, units = "in", res = 300, pointsize = pt) # # multiplot # ggplot(plot.dat2(),aes(plot.dat2()[,1],value))+ # geom_line(lwd=1.25)+ # facet_wrap(~variable,scales="free_y")+ # #geom_blank(data = blank_data, aes(x = x, y = y,z="variable"))+ # ylab("Difference in -log likelihood")+ # #scale_x_continuous(name = paste(parmnames[1],"and",parmnames[2]), # # breaks =par.df[,1], # # labels = paste0(par.df[,1],"\n",par.df[,2]))+ # geom_hline(data = data.frame(yint=c(-1.96,0,1.96,0.4,0.25),variable=c("Likelihood_difference","Likelihood_difference","Likelihood_difference",paste0("SB",profilesummary$endyrs[1]-1,"/SB0"),paste0("SB",profilesummary$endyrs[1]-1,"/SB0"))), # aes(yintercept = yint), linetype = c("solid","dotted","solid","dotted","solid"),color=c("red","black","red","darkgreen","red"),lwd=1)+ # #geom_point(data=refmodel.dat,aes(x=x,y=y),color="blue",size=3)+ # theme_bw() # multiprofplot #dev.off() # png(file = paste0(profile_dir,"/",paste(parmnames,collapse="_"),"_multilikelihood_profile.png"),width = 10, height = 10, units = "in", res = 300, pointsize = pt) # output$LikeProf_multiplot <- renderImage({ # image.path<-normalizePath(file.path(paste0(profile_dir,paste0("\\",paste(parmnames,collapse="_"),"_multilikelihood_profile.png"))),mustWork=FALSE) # return(list( # src = image.path, # contentType = "image/png", # # width = 400, # # height = 300, # style='height:60vh')) # },deleteFile=FALSE) # reshape data frame into a matrix for use with contour # pngfun(wd = mydir, file = paste0("contour_profile.png"), h = 7,w = 12) # contour(x = as.numeric(rownames(like_matrix)), # y = as.numeric(colnames(like_matrix)), # z = like_matrix) # dev.off() # make contour plot # output$LikeProf_multi_contour <- renderPlot({ # like_matrix <- reshape2::acast(par.df, colnames(par.df)[1]~colnames()[2], value.var="like") # pngfun(wd = mydir, file = paste0("contour_profile.png"), h = 7,w = 12) # contour(x = as.numeric(rownames(like_matrix)), # y = as.numeric(colnames(like_matrix)), # z = like_matrix) # dev.off() # }) remove_modal_spinner() }) ################# ############################### ####### Retrospectives ######## ############################### shinyDirChoose(input,"Retro_dir", roots=roots,session=session, filetypes=c('', 'txt')) pathRetro <- reactive({ return(parseDirPath(roots, input$Retro_dir)) }) observeEvent(input$run_Retro_comps,{ #if(input$run_Retro_comps){ show_modal_spinner(spin="flower",color=wes_palettes$Royal1[1],text="Running retrospectives") mydir_in<-dirname(pathRetro()) scenario_in<-basename(pathRetro()) model_settings = get_settings(settings = list(base_name = scenario_in, run = "retro", retro_yrs = input$first_retro_year_in:input$final_retro_year_in)) run_diagnostics(mydir = mydir_in, model_settings = model_settings) # tryCatch({ # run_diagnostics(mydir = mydir_in, model_settings = model_settings) # }, # warning = function(warn){ # showNotification(paste0(warn), type = 'warning') # }, # error = function(err){ # showNotification(paste0(err), type = 'err') # }) #} output$Retro_comp_plotSB <- renderImage({ image.path<-normalizePath(file.path(paste0(pathRetro(),"_retro/compare2_spawnbio_uncertainty.png")),mustWork=FALSE) return(list( src = image.path, contentType = "image/png", # width = 400, # height = 300, style='height:60vh')) },deleteFile=FALSE) output$Retro_comp_plotBratio <- renderImage({ image.path<-normalizePath(file.path(paste0(pathRetro(),"_retro/compare4_Bratio_uncertainty.png")),mustWork=FALSE) return(list( src = image.path, contentType = "image/png", # width = 400, # height = 300, style='height:60vh')) },deleteFile=FALSE) remove_modal_spinner() }) ############################## ############################### ### Sensitivity comparisons ### ############################### pathSensi <- reactive({ shinyDirChoose(input, "Sensi_dir", roots=roots,session=session, filetypes=c('', 'txt')) return(parseDirPath(roots, input$Sensi_dir)) }) observeEvent(as.numeric(input$tabs)==6,{ output$Sensi_model_Ref<-renderUI({ #dirinfo <- parseDirPath(roots, input$Sensi_dir) pickerInput( inputId = "myPicker_Ref", label = "Choose reference model", #choices = list.files(dirinfo), choices = list.files(pathSensi()), options = list( `actions-box` = TRUE, size = 12, `selected-text-format` = "count > 3" ), multiple = FALSE ) }) }) observeEvent(!is.null(input$myPicker_Ref),{ # observeEvent(as.numeric(input$tabs)==6,{ output$Sensi_model_picks<-renderUI({ #dirinfo <- parseDirPath(roots, input$Sensi_dir) pickerInput( inputId = "myPicker", label = "Choose scenarios to compare to reference model", #choices = list.files(dirinfo), choices = list.files(pathSensi()), options = list( `actions-box` = TRUE, size = 12, `selected-text-format` = "count > 3" ), multiple = TRUE ) }) }) #SS.comparisons<-observeEvent(as.numeric(input$tabs)==5,{ Sensi_model_dir_out<-eventReactive(req(input$run_Sensi_comps&!is.null(input$myPicker)&as.numeric(input$tabs)==6),{ if(!file.exists(paste0(pathSensi(),"/Sensitivity Comparison Plots"))) { dir.create(paste0(pathSensi(),"/Sensitivity Comparison Plots")) } Sensi_model_dir_out_Ref<-paste0(pathSensi(),"/",input$myPicker_Ref) Sensi_model_dir_sensi<-paste0(pathSensi(),"/",input$myPicker) Sensi_model_dir<-c(Sensi_model_dir_out_Ref,Sensi_model_dir_sensi) Sensi_model_dir }) #&exists(Sensi_model_dir_out()) observeEvent(req(input$run_Sensi_comps),{ show_modal_spinner(spin="flower",color=wes_palettes$Rushmore[1],text="Comparisons running") modelnames<-c(input$myPicker_Ref,input$myPicker) zz<-list() Runs<-length(Sensi_model_dir_out()) for(i in 1:Runs) {zz[[i]]<-SS_output(paste0(Sensi_model_dir_out()[i]))} modsummary.sensi<- SSsummarize(zz) col.vec = rc(n=length(modelnames), alpha = 1) shade = adjustcolor(col.vec[1], alpha.f = 0.10) TRP.in<-input$Sensi_TRP LRP.in<-input$Sensi_LRP if(is.na(TRP.in)){TRP.in<-0} if(is.na(LRP.in)){LRP.in<-0} dir.create(paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file)) #Sensi_uncertainty_choice<-input$Sensi_uncertainty_choice #if (all(is.na(quantsSD[, i]) | quantsSD[, i] == 0)) Sensi_uncertainty_choice<-TRUE pngfun(wd = paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file), file = paste0(input$Sensi_comp_file,".png"), h = 7,w = 12) par(mfrow = c(1,3)) try(SSplotComparisons(modsummary.sensi, legendlabels = modelnames, ylimAdj = 1.30, subplot = c(2,4),col = col.vec, new = FALSE,btarg=TRP.in,minbthresh=LRP.in,uncertainty=Sensi_uncertainty_choice)) try(SSplotComparisons(modsummary.sensi, legendlabels = modelnames, ylimAdj = 1.30, subplot = 11,col = col.vec, new = FALSE, legendloc = 'topleft',btarg=TRP.in,minbthresh=LRP.in,uncertainty=Sensi_uncertainty_choice)) dev.off() try(SSplotComparisons(modsummary.sensi, legendlabels = modelnames, ylimAdj = 1.30,col = col.vec, new = FALSE,print=TRUE, legendloc = 'topleft',btarg=TRP.in,minbthresh=LRP.in,uncertainty=Sensi_uncertainty_choice,plotdir=paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file))) save(modsummary.sensi,file=paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file,"/",input$Sensi_comp_file,".DMP")) pngfun(wd = paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file), file = paste0(input$Sensi_comp_file,"_no_uncertainty.png"), h = 7,w = 12) par(mfrow = c(1,3)) try(SSplotComparisons(modsummary.sensi, legendlabels = modelnames, ylimAdj = 1.30, subplot = c(1,3),col = col.vec, new = FALSE,btarg=TRP.in,minbthresh=LRP.in,uncertainty=Sensi_uncertainty_choice)) try(SSplotComparisons(modsummary.sensi, legendlabels = modelnames, ylimAdj = 1.30, subplot = 11,col = col.vec, new = FALSE, legendloc = 'topleft',btarg=TRP.in,minbthresh=LRP.in,uncertainty=Sensi_uncertainty_choice)) dev.off() output$Sensi_comp_plot <- renderImage({ if (all(is.na(modsummary.sensi$quantsSD[, 1]) | modsummary.sensi$quantsSD[, 1] == 0)) { image.path<-normalizePath(file.path(paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file,"/",input$Sensi_comp_file, '_no_uncertainty.png')),mustWork=FALSE) return(list( src = image.path, contentType = "image/png", # width = 400, # height = 300, style='height:60vh')) } else { image.path<-normalizePath(file.path(paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file,"/",input$Sensi_comp_file, '.png')),mustWork=FALSE) return(list( src = image.path, contentType = "image/png", # width = 400, # height = 300, style='height:60vh')) } },deleteFile=FALSE) #Relative error sensitivity plots SensiRE_breaks_in<-as.numeric(trimws(unlist(strsplit(input$SensiRE_breaks,",")))) SensiRE_xcenter_in<-as.numeric(trimws(unlist(strsplit(input$SensiRE_xcenter,",")))) SensiRE_ycenter_in<-as.numeric(trimws(unlist(strsplit(input$SensiRE_ycenter,",")))) SensiRE_headers_in<-trimws(unlist(strsplit(input$SensiRE_headers,","))) yminmax_sensi<-rep(c(input$SensiRE_ymin,input$SensiRE_ymax),5) r4ss::SS_Sensi_plot(dir=paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file,"/"), model.summaries=modsummary.sensi, current.year=modsummary.sensi$endyrs[1]+1, mod.names=modelnames, #List the names of the sensitivity runs #likelihood.out=c(0,0,0), Sensi.RE.out="Sensi_RE_out.DMP", #Saved file of relative errors CI=0.95, #Confidence interval box based on the reference model TRP.in=input$Sensi_TRP, #Target relative abundance value LRP.in=input$Sensi_LRP, #Limit relative abundance value sensi_xlab="Sensitivity scenarios", #X-axis label ylims.in=yminmax_sensi, #Y-axis label plot.figs=c(1,1,1,1,1,1), #Which plots to make/save? sensi.type.breaks=SensiRE_breaks_in, #vertical breaks that can separate out types of sensitivities anno.x=SensiRE_xcenter_in, # Vertical positioning of the sensitivity types labels anno.y=SensiRE_ycenter_in, # Horizontal positioning of the sensitivity types labels anno.lab=SensiRE_headers_in #Sensitivity types labels ) output$SensiRE_comp_plot <- renderImage({ image.path<-normalizePath(file.path(paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file,"/Sensi_REplot_SB_Dep_F_MSY.png")),mustWork=FALSE) return(list( src = image.path, contentType = "image/png", width = 800, height = 1200, style='height:60vh')) },deleteFile=FALSE) output$SensiRElog_comp_plot <- renderImage({ image.path<-normalizePath(file.path(paste0(pathSensi(),"/Sensitivity Comparison Plots/",input$Sensi_comp_file,"/Sensi_logREplot_SB_Dep_F_MSY.png")),mustWork=FALSE) return(list( src = image.path, contentType = "image/png", width = 400, height = 300, style='height:60vh')) },deleteFile=FALSE) remove_modal_spinner() }) ############################# ############################# # image.path<-eventReactive(exists(file.path(paste0(path1(),"/Sensitivity Comparison Plots/", # input$Sensi_comp_file, '.png'))),{ # image.path<-normalizePath(file.path(paste0(path1(),"/Sensitivity Comparison Plots/", # input$Sensi_comp_file, '.png')),mustWork=FALSE) # }) # output$Sensi_comp_plot <- renderImage({ # image.path<-normalizePath(file.path(paste0(path1(),"/Sensitivity Comparison Plots/", # input$Sensi_comp_file, '.png')),mustWork=FALSE) # return(list( # src = image.path, # contentType = "image/png", # # width = 400, # # height = 300, # style='height:60vh')) # print(input$run_Sensi_comps[1]) # },deleteFile=FALSE) #################################### ########################## ### Ensemble modelling ### ########################## pathEnsemble <- reactive({ shinyDirChoose(input, "Ensemble_dir", roots=roots, filetypes=c('', 'txt')) return(parseDirPath(roots, input$Ensemble_dir)) }) #Used to have as.numeric(input$tabs)==4 observeEvent(as.numeric(input$tabs)==7,{ output$Ensemble_model_picks<-renderUI({ pickerInput( inputId = "myEnsemble", label = "Choose scenarios to ensemble", choices = list.files(pathEnsemble()), options = list( `actions-box` = TRUE, size = 12, `selected-text-format` = "count > 3" ), multiple = TRUE ) }) }) #Ensemble_model_dir_out<-eventReactive(req(input$run_Ensemble&!is.null(input$myEnsemble)&as.numeric(input$tabs)==6),{ observeEvent(req(input$run_Ensemble&!is.null(input$myEnsemble)&as.numeric(input$tabs)==7),{ show_modal_spinner(spin="flower",color=wes_palettes$Rushmore[1],text="Prepare models to combine into ensembles") #Ensemble_model_dir_out<-eventReactive(input$run_Ensemble,{ #Ensemble.outputs<-eventReactive(input$run_Ensemble,{ if(!file.exists(paste0(pathEnsemble(),"/Ensemble outputs"))) { dir.create(paste0(pathEnsemble(),"/Ensemble outputs")) } Ensemble_model_dir_out<-paste0(pathEnsemble(),"/Ensemble outputs/",input$Ensemble_file) dir.create(Ensemble_model_dir_out) # }) # print(Ensemble_model_dir_out()) # exists("Ensemble_model_dir_out()") #Ensemble_model_dir_out #}) #exists(Ensemble_model_dir_out()) # observeEvent(req(input$run_Ensemble&!is.null(input$myEnsemble)),{ # Ensemble.outputs<-eventReactive(input$run_Ensemble,{ modelnames<-input$myEnsemble zz<-list() Runs<-length(input$myEnsemble) for(i in 1:Runs) {zz[[i]]<-SS_output(paste0(pathEnsemble(),"/",input$myEnsemble[i]))} modsummary.ensemble<- SSsummarize(zz) Ensemble_wts<-as.numeric(trimws(unlist(strsplit(input$Ensemble_wts,",")))) Stand_ensemble_wts<-Ensemble_wts/sum(Ensemble_wts) Nsamps_ensemble<-10000 Nsamps_ensemble_wts<-round(Nsamps_ensemble*Stand_ensemble_wts) #Calculate weighted values mean.fxn <- function(x, y) rnorm(numdraws, mean = x, sd = y) #Spawning outputs #Bratio SpOt_en<-Bratio_en<-F_en<-SPR_en<-list() SO_0<-SO_t<-Bratio_t<-F_t<-SPR_t<-data.frame(Year=NA,Metric=NA,Model=NA) #Create weighted ensembles for (i in 1:length(Nsamps_ensemble_wts)) { numdraws<-Nsamps_ensemble_wts[i] SpOt_en[[i]]<-Map(mean.fxn,modsummary.ensemble$SpawnBio[,i],modsummary.ensemble$SpawnBioSD[,i]) names(SpOt_en[[i]])<-modsummary.ensemble$SpawnBio$Yr SO_0<-rbind(SO_0,data.frame(Year=as.numeric(names(SpOt_en[[i]][1])),Metric=unlist(SpOt_en[[i]][1]),Model=input$myEnsemble[i])) SO_t<-rbind(SO_t,data.frame(Year=names(SpOt_en[[i]][nrow(modsummary.ensemble$SpawnBio)]),Metric=unlist(SpOt_en[[i]][length(Nsamps_ensemble_wts)]),Model=input$myEnsemble[i])) Bratio_en[[i]]<-Map(mean.fxn,modsummary.ensemble$Bratio[,i],modsummary.ensemble$BratioSD[,i]) names(Bratio_en[[i]])<-modsummary.ensemble$Bratio$Yr Bratio_t<-rbind(Bratio_t,data.frame(Year=names(Bratio_en[[i]][nrow(modsummary.ensemble$Bratio)]),Metric=unlist(Bratio_en[[i]][nrow(modsummary.ensemble$Bratio)]),Model=input$myEnsemble[i])) F_en[[i]]<-Map(mean.fxn,modsummary.ensemble$Fvalue[,i],modsummary.ensemble$FvalueSD[,i]) names(F_en[[i]])<-modsummary.ensemble$Fvalue$Yr F_t<-rbind(F_t,data.frame(Year=names(F_en[[i]][nrow(modsummary.ensemble$Fvalue)]),Metric=unlist(F_en[[i]][nrow(modsummary.ensemble$Fvalue)]),Model=input$myEnsemble[i])) SPR_en[[i]]<-Map(mean.fxn,modsummary.ensemble$SPRratio[,i],modsummary.ensemble$SPRratioSD[,i]) names(SPR_en[[i]])<-modsummary.ensemble$SPRratio$Yr SPR_t<-rbind(SPR_t,data.frame(Year=names(SPR_en[[i]][nrow(modsummary.ensemble$SPRratio)]),Metric=unlist(SPR_en[[i]][nrow(modsummary.ensemble$SPRratio)]),Model=input$myEnsemble[i])) } #Reduce(intersect,list(names(list1),names(list2),names(list3))) # Code to find matches in multiple vectors. For future option of mixing models with different dimensions. #Assemble ensembles Ensemble_SO<-SpOt_en[[1]] Ensemble_Bratio<-Bratio_en[[1]] Ensemble_F<-F_en[[1]] Ensemble_SPR<-SPR_en[[1]] for(ii in 2:length(Nsamps_ensemble_wts)) { Ensemble_SO<-mapply(c,Ensemble_SO,SpOt_en[[ii]]) Ensemble_Bratio<-mapply(c,Ensemble_Bratio,Bratio_en[[ii]]) Ensemble_F<-mapply(c,Ensemble_F,F_en[[ii]]) Ensemble_SPR<-mapply(c,Ensemble_SPR,SPR_en[[ii]]) } SO_0<-rbind(SO_0[-1,],data.frame(Year=as.numeric(colnames(Ensemble_SO)[1]),Metric=Ensemble_SO[,1],Model="Ensemble")) SO_t<-rbind(SO_t[-1,],data.frame(Year=as.numeric(colnames(Ensemble_SO)[ncol(Ensemble_SO)]),Metric=Ensemble_SO[,ncol(Ensemble_SO)],Model="Ensemble")) Bratio_t<-rbind(Bratio_t[-1,],data.frame(Year=as.numeric(colnames(Ensemble_Bratio)[ncol(Ensemble_Bratio)]),Metric=Ensemble_Bratio[,ncol(Ensemble_Bratio)],Model="Ensemble")) F_t<-rbind(F_t[-1,],data.frame(Year=as.numeric(colnames(Ensemble_F)[ncol(Ensemble_F)]),Metric=Ensemble_F[,ncol(Ensemble_F)],Model="Ensemble")) SPR_t<-rbind(SPR_t[-1,],data.frame(Year=as.numeric(colnames(Ensemble_SPR)[ncol(Ensemble_SPR)]),Metric=Ensemble_SPR[,ncol(Ensemble_SPR)],Model="Ensemble")) SO_0$Year<-as.factor(SO_0$Year) SO_t$Year<-as.factor(SO_t$Year) Bratio_t$Year<-as.factor(Bratio_t$Year) F_t$Year<-as.factor(F_t$Year) SPR_t$Year<-as.factor(SPR_t$Year) # mean_cl_quantile <- function(x, q = c(0.1, 0.9), na.rm = TRUE){ # dat <- data.frame(y = mean(x, na.rm = na.rm), # ymin = quantile(x, probs = q[1], na.rm = na.rm), # ymax = quantile(x, probs = q[2], na.rm = na.rm)) # return(dat) # } show_modal_spinner(spin="flower",color=wes_palettes$Rushmore[2],text="Preparing ensemble plots") #Boxplots gg1<-ggplot(SO_0,aes(Model,Metric))+ geom_violin()+ ylab("Initial Spawning Output") gg2<-ggplot(SO_t,aes(Model,Metric))+ geom_violin()+ ylab("Terminal Year Spawning Output") gg3<-ggplot(Bratio_t,aes(Model,Metric))+ geom_violin()+ ylab("Relative stock status") gg4<-ggplot(F_t,aes(Model,Metric))+ geom_violin()+ ylab("Fishing mortality") gg5<-ggplot(SPR_t,aes(Model,Metric))+ geom_violin()+ ylab("1-SPR") ggarrange(gg1,gg2,gg3,gg4,gg5) ggsave(paste0(Ensemble_model_dir_out,"/Ensemble_comp_plots.png")) output$Ensemble_plots <- renderPlot({ ggarrange(gg1,gg2,gg3,gg4,gg5)}) #Spawning Output plot Ensemble_SO_plot<-reshape2::melt(Ensemble_SO,value.name="SO") colnames(Ensemble_SO_plot)[2]<-"Year" Ensemble_SO_plot$Year<-as.factor(Ensemble_SO_plot$Year) ggplot(Ensemble_SO_plot,aes(Year,SO,fill=Year))+ geom_violin()+ theme(legend.position="none")+ theme(axis.text.x = element_text(angle = 45, hjust = 1,vjust=0.5,size=10))+ ylab("Spawning Output") ggsave(paste0(Ensemble_model_dir_out,"/Ensemble_SO.png")) #Relative stock status plot Ensemble_Bratio_plot<-reshape2::melt(Ensemble_Bratio,value.name="Bratio") colnames(Ensemble_Bratio_plot)[2]<-"Year" Ensemble_Bratio_plot$Year<-as.factor(Ensemble_Bratio_plot$Year) ggplot(Ensemble_Bratio_plot,aes(Year,Bratio,fill=Year))+ geom_violin()+ theme(legend.position="none")+ theme(axis.text.x = element_text(angle = 45, hjust = 1,vjust=0.5,size=10))+ ylab("SBt/SO0") ggsave(paste0(Ensemble_model_dir_out,"/Ensemble_Bratio.png")) #F plot Ensemble_F_plot<-reshape2::melt(Ensemble_F,value.name="F") colnames(Ensemble_F_plot)[2]<-"Year" Ensemble_F_plot$Year<-as.factor(Ensemble_F_plot$Year) ggplot(Ensemble_F_plot,aes(Year,F,fill=Year))+ geom_violin()+ theme(legend.position="none")+ theme(axis.text.x = element_text(angle = 45, hjust = 1,vjust=0.5,size=10))+ ylab("Fishing mortality") ggsave(paste0(Ensemble_model_dir_out,"/Ensemble_F.png")) #1-SPR plot Ensemble_SPR_plot<-reshape2::melt(Ensemble_SO,value.name="SPR") colnames(Ensemble_SPR_plot)[2]<-"Year" Ensemble_SPR_plot$Year<-as.factor(Ensemble_SPR_plot$Year) ggplot(Ensemble_SPR_plot,aes(Year,SPR,fill=Year))+ geom_violin()+ theme(legend.position="none")+ theme(axis.text.x = element_text(angle = 45, hjust = 1,vjust=0.5,size=10))+ ylab("1-SPR") ggsave(paste0(Ensemble_model_dir_out,"/Ensemble_SPR.png")) #Get simpler plots for SB0, SBcurrent, RSS, F, and SPR in terminal year # ggplot(reshape2::melt(Ensemble_Bratio,value.name="Bratio"),aes(Var2,Bratio))+ # stat_summary(geom = "line", fun = median)+ # ylim(0,1)+ # stat_summary(geom = "ribbon", fun.data = mean_cl_quantile, alpha = 0.3) #Make outputs show_modal_spinner(spin="flower",color=wes_palettes$Rushmore[3],text="Saving ensemble objects") Model.outputs<-list("Spawning Output"=SpOt_en,"Relative Stock Status"=Bratio_en,"Fishing mortality"=F_en,"1-SPR"=SPR_en) Ensemble.outputs<-list("Spawning Output"=Ensemble_SO,"Relative Stock Status"=Ensemble_Bratio,"Fishing mortality"=Ensemble_F,"1-SPR"=Ensemble_SPR) Ensemble.outputs.plots<-list("Spawning Output"=Ensemble_SO_plot,"Relative Stock Status"=Ensemble_Bratio_plot,"Fishing mortality"=Ensemble_F_plot,"1-SPR"=Ensemble_SPR_plot) save(Model.outputs,file=paste0(Ensemble_model_dir_out,"/Model_results",".DMP")) save(Ensemble.outputs,file=paste0(Ensemble_model_dir_out,"/Ensemble_results",".DMP")) save(Ensemble.outputs.plots,file=paste0(Ensemble_model_dir_out,"/Ensemble_results_plots",".DMP")) remove_modal_spinner() # return(Ensemble.outputs) }) #}) #observeEvent(req(input$run_Ensemble&exists("Ensemble.outputs()")),{ # # }) #Create figures of weighted values # output$Sensi_comp_plot <- renderImage({ # image.path<-normalizePath(file.path(paste0(path1(),"/Sensitivity Comparison Plots/", # input$Sensi_comp_file, '.png')),mustWork=FALSE) # return(list( # src = image.path, # contentType = "image/png", # # width = 400, # # height = 300, # style='height:60vh')) # },deleteFile=FALSE) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/MphCalcLogL.R \name{MphCalcLogL} \alias{MphCalcLogL} \title{Calculate log likelihood} \usage{ MphCalcLogL(eval, D_l, Qi, UltVehiY, xHiy) } \arguments{ \item{eval}{eigenvalues vector from decomposition of relatedness matrix} \item{D_l}{vector of eigenvalues from decomposition of Ve matrix} \item{Qi}{inverse of Q matrix} \item{UltVehiY}{matrix of (transformed) Y values} \item{xHiy}{vector} } \description{ Calculate log likelihood }
/man/MphCalcLogL.Rd
no_license
fboehm/gemma2
R
false
true
516
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/MphCalcLogL.R \name{MphCalcLogL} \alias{MphCalcLogL} \title{Calculate log likelihood} \usage{ MphCalcLogL(eval, D_l, Qi, UltVehiY, xHiy) } \arguments{ \item{eval}{eigenvalues vector from decomposition of relatedness matrix} \item{D_l}{vector of eigenvalues from decomposition of Ve matrix} \item{Qi}{inverse of Q matrix} \item{UltVehiY}{matrix of (transformed) Y values} \item{xHiy}{vector} } \description{ Calculate log likelihood }
autorun = function(autorun_data, finaloutput,input,ROI_profile,is_autorun) { blah=list() #Preparation of necessary variables and folders to store figures and information of the fitting # if (is_autorun=='N') {indexes=input$x1_select if (is_autorun=='N') {indexes=input$x1_rows_selected } else { indexes=1:dim(autorun_data$dataset)[1] } # print(ROI_profile) for (spectrum_index in indexes) { ROI_buckets=which(round(autorun_data$ppm,6)==round(ROI_profile[1,1],6)):which(round(autorun_data$ppm,6)==round(ROI_profile[1,2],6)) # print(ROI_buckets) Xdata= as.numeric(autorun_data$ppm[ROI_buckets]) Ydata = as.numeric(autorun_data$dataset[spectrum_index, ROI_buckets]) other_fit_parameters = fitting_variables() other_fit_parameters$freq = autorun_data$freq other_fit_parameters$ROI_buckets = ROI_buckets other_fit_parameters$buck_step = autorun_data$buck_step fitting_type = as.character(ROI_profile[1, 3]) signals_to_quantify = which(ROI_profile[, 7] == 1) signals_codes = replicate(length(signals_to_quantify), NA) signals_names = replicate(length(signals_to_quantify), NA) j = 1 for (i in signals_to_quantify) { k = which(autorun_data$signals_names == ROI_profile[i, 4]) signals_codes[j] = autorun_data$signals_codes[k] signals_names[j] = as.character(autorun_data$signals_names[k]) j = j + 1 } experiment_name = autorun_data$Experiments[[spectrum_index]] plot_path = file.path(autorun_data$export_path, experiment_name, signals_names) for (i in seq_along(plot_path)) if (!dir.exists(plot_path[i])) dir.create(plot_path[i]) # print(plot_path) # If the quantification is through integration with or without baseline if (fitting_type == "Clean Sum" || fitting_type == "Baseline Sum") { is_roi_testing = "N" clean_fit = ifelse(fitting_type == "Clean Sum", "Y", "N") integration_parameters = data.frame(plot_path, is_roi_testing, clean_fit) fa = integration(integration_parameters, Xdata, Ydata) results_to_save=fa$results_to_save p=fa$p blah$integration_parameters=integration_parameters #Generation of output variables specific of every quantification #If the quantification is through fitting with or without baseline } else if (fitting_type == "Clean Fitting" || fitting_type == "Baseline Fitting") { is_roi_testing = "N" clean_fit='N' signals_names=autorun_data$signals_names[1:2] signals_codes=autorun_data$signals_codes[1:2] # clean_fit = ifelse(fitting_type == "Clean Fitting", "Y", # "N") # print(ROI_profile) #Parameters of every signal necessary for the fitting initial_fit_parameters = ROI_profile[, 5:11,drop=F] # initial_fit_parameters=as.data.frame(apply(initial_fit_parameters,2,as.numeric)) # initial_fit_parameters = initial_fit_parameters[complete.cases(initial_fit_parameters),] colnames(initial_fit_parameters) = c( "positions", "widths", "quantification_or_not", "multiplicities", "Jcoupling", "roof_effect", "shift_tolerance" ) #Ydata is scaled to improve the quality of the fitting scaledYdata = as.vector(Ydata / (max(Ydata))) #Other parameters necessary for the fitting independent of the type of signal other_fit_parameters$clean_fit = clean_fit #Adaptation of the info of the parameters into a single matrix and preparation (if necessary) of the background signals that will conform the baseline FeaturesMatrix = fitting_prep(Xdata, scaledYdata, initial_fit_parameters, other_fit_parameters) #Calculation of the parameters that will achieve the best fitting signals_parameters = fittingloop(FeaturesMatrix, Xdata, scaledYdata, other_fit_parameters) #Fitting of the signals multiplicities=FeaturesMatrix[,11] roof_effect=FeaturesMatrix[,12] fitted_signals = fitting_optimization(signals_parameters, Xdata,multiplicities,roof_effect) # signals_parameters=as.matrix(signals_parameters) dim(signals_parameters) = c(5, dim(FeaturesMatrix)[1]) rownames(signals_parameters) = c( 'intensity', 'shift', 'width', 'gaussian', 'J_coupling' ) other_fit_parameters$signals_to_quantify=signals_to_quantify #Generation of output data about the fitting and of the necessary variables for the generation ofa figure output_data = output_generator( signals_to_quantify, fitted_signals, scaledYdata, Xdata, signals_parameters,multiplicities ) output_data$intensity=signals_parameters[1, signals_to_quantify] * max(Ydata) output_data$width=signals_parameters[3, signals_to_quantify] #Generation of the dataframe with the final output variables results_to_save = data.frame( shift = output_data$shift, Area = output_data$Area * max(Ydata), signal_area_ratio = output_data$signal_area_ratio, fitting_error = output_data$fitting_error, intensity = output_data$intensity, width = output_data$width ) #Adaptation of the quantification to de-scaled Ydata results_to_save$Area = results_to_save$Area * max(Ydata) #Generation of the figure when the conditions specified in the Parameters file are accomplished # r=1 plot_data = rbind( output_data$signals_sum, output_data$baseline_sum, output_data$fitted_sum, output_data$signals ) rownames(plot_data) = c("signals_sum", "baseline_sum", "fitted_sum", as.character(ROI_profile[,1])) # plotdata = data.frame(Xdata=autorun_data$ppm[ROI_buckets], t(dataset[input$x1_select,ROI_buckets,drop=F])) plotdata2 = data.frame(Xdata, Ydata, plot_data[3, ] * max(Ydata), plot_data[2, ] * max(Ydata)) plotdata3 <- melt(plotdata2, id = "Xdata") plotdata3$variable = c( rep('Original Spectrum', length(Ydata)), rep('Generated Spectrum', length(Ydata)), rep('Generated Background', length(Ydata)) ) plotdata4 = data.frame(Xdata, (t(plot_data[-c(1, 2, 3), , drop = F]) * max(Ydata))) plotdata5 = melt(plotdata4, id = "Xdata") # p=plot_ly(data=plotdata3,x=~Xdata,y=~value,color=~variable,type='scatter',mode='lines') %>% layout(xaxis = list(autorange = "reversed")) # p <- add_trace(p,data=plotdata5,x = ~Xdata, # y = ~value, # colour = 'Surrounding signals', # group = ~variable) # p <- add_trace(p,data=plotdata5,x = ~Xdata, # y = ~value, # colour = 'Surrounding signals', # group = ~variable) # plot_ly(data=plotdata3,x=~Xdata,y=~value,color=~variable,type='scatter',mode='lines') %>% layout(xaxis = list(autorange = "reversed")) p=ggplot() + geom_line(data = plotdata3, aes( x = Xdata, y = value, colour = variable, group = variable )) + geom_line(data = plotdata5, aes( x = Xdata, y = value, colour = 'Surrounding signals', group = variable )) + scale_x_reverse() + labs(x='ppm',y='Intensity') + expand_limits(y=0) for (r in 1:length(other_fit_parameters$signals_to_quantify)) { plotdata = data.frame(Xdata, signals = plot_data[3 + other_fit_parameters$signals_to_quantify[r], ] * max(Ydata)) p=p + geom_area( data = plotdata, aes( x = Xdata, y = signals, position = 'fill', fill = 'Quantified Signal' ) ) } signals_parameters=t(rbind(signals_parameters,multiplicities,roof_effect)) print(signals_parameters) blah$signals_parameters=signals_parameters blah$other_fit_parameters=other_fit_parameters blah$results_to_save=results_to_save blah$import_excel_profile=ROI_profile blah$Ydata=Ydata blah$fitted_signals=fitted_signals blah$plot_data=plot_data blah$FeaturesMatrix=FeaturesMatrix blah$signals_parameters=signals_parameters blah$Xdata=Xdata } blah$p=p blah$plot_path=plot_path blah$results_to_save=results_to_save blah$spectrum_index=spectrum_index blah$signals_codes=signals_codes blah$fitting_type=fitting_type if (is_autorun=='Y') { save_roi_testing(blah,autorun_data, finaloutput) print('New') } } # blah$finaloutput=finaloutput # blah$autorun_data=autorun_data return(blah) }
/load.R
no_license
user05011988/userproject
R
false
false
9,552
r
autorun = function(autorun_data, finaloutput,input,ROI_profile,is_autorun) { blah=list() #Preparation of necessary variables and folders to store figures and information of the fitting # if (is_autorun=='N') {indexes=input$x1_select if (is_autorun=='N') {indexes=input$x1_rows_selected } else { indexes=1:dim(autorun_data$dataset)[1] } # print(ROI_profile) for (spectrum_index in indexes) { ROI_buckets=which(round(autorun_data$ppm,6)==round(ROI_profile[1,1],6)):which(round(autorun_data$ppm,6)==round(ROI_profile[1,2],6)) # print(ROI_buckets) Xdata= as.numeric(autorun_data$ppm[ROI_buckets]) Ydata = as.numeric(autorun_data$dataset[spectrum_index, ROI_buckets]) other_fit_parameters = fitting_variables() other_fit_parameters$freq = autorun_data$freq other_fit_parameters$ROI_buckets = ROI_buckets other_fit_parameters$buck_step = autorun_data$buck_step fitting_type = as.character(ROI_profile[1, 3]) signals_to_quantify = which(ROI_profile[, 7] == 1) signals_codes = replicate(length(signals_to_quantify), NA) signals_names = replicate(length(signals_to_quantify), NA) j = 1 for (i in signals_to_quantify) { k = which(autorun_data$signals_names == ROI_profile[i, 4]) signals_codes[j] = autorun_data$signals_codes[k] signals_names[j] = as.character(autorun_data$signals_names[k]) j = j + 1 } experiment_name = autorun_data$Experiments[[spectrum_index]] plot_path = file.path(autorun_data$export_path, experiment_name, signals_names) for (i in seq_along(plot_path)) if (!dir.exists(plot_path[i])) dir.create(plot_path[i]) # print(plot_path) # If the quantification is through integration with or without baseline if (fitting_type == "Clean Sum" || fitting_type == "Baseline Sum") { is_roi_testing = "N" clean_fit = ifelse(fitting_type == "Clean Sum", "Y", "N") integration_parameters = data.frame(plot_path, is_roi_testing, clean_fit) fa = integration(integration_parameters, Xdata, Ydata) results_to_save=fa$results_to_save p=fa$p blah$integration_parameters=integration_parameters #Generation of output variables specific of every quantification #If the quantification is through fitting with or without baseline } else if (fitting_type == "Clean Fitting" || fitting_type == "Baseline Fitting") { is_roi_testing = "N" clean_fit='N' signals_names=autorun_data$signals_names[1:2] signals_codes=autorun_data$signals_codes[1:2] # clean_fit = ifelse(fitting_type == "Clean Fitting", "Y", # "N") # print(ROI_profile) #Parameters of every signal necessary for the fitting initial_fit_parameters = ROI_profile[, 5:11,drop=F] # initial_fit_parameters=as.data.frame(apply(initial_fit_parameters,2,as.numeric)) # initial_fit_parameters = initial_fit_parameters[complete.cases(initial_fit_parameters),] colnames(initial_fit_parameters) = c( "positions", "widths", "quantification_or_not", "multiplicities", "Jcoupling", "roof_effect", "shift_tolerance" ) #Ydata is scaled to improve the quality of the fitting scaledYdata = as.vector(Ydata / (max(Ydata))) #Other parameters necessary for the fitting independent of the type of signal other_fit_parameters$clean_fit = clean_fit #Adaptation of the info of the parameters into a single matrix and preparation (if necessary) of the background signals that will conform the baseline FeaturesMatrix = fitting_prep(Xdata, scaledYdata, initial_fit_parameters, other_fit_parameters) #Calculation of the parameters that will achieve the best fitting signals_parameters = fittingloop(FeaturesMatrix, Xdata, scaledYdata, other_fit_parameters) #Fitting of the signals multiplicities=FeaturesMatrix[,11] roof_effect=FeaturesMatrix[,12] fitted_signals = fitting_optimization(signals_parameters, Xdata,multiplicities,roof_effect) # signals_parameters=as.matrix(signals_parameters) dim(signals_parameters) = c(5, dim(FeaturesMatrix)[1]) rownames(signals_parameters) = c( 'intensity', 'shift', 'width', 'gaussian', 'J_coupling' ) other_fit_parameters$signals_to_quantify=signals_to_quantify #Generation of output data about the fitting and of the necessary variables for the generation ofa figure output_data = output_generator( signals_to_quantify, fitted_signals, scaledYdata, Xdata, signals_parameters,multiplicities ) output_data$intensity=signals_parameters[1, signals_to_quantify] * max(Ydata) output_data$width=signals_parameters[3, signals_to_quantify] #Generation of the dataframe with the final output variables results_to_save = data.frame( shift = output_data$shift, Area = output_data$Area * max(Ydata), signal_area_ratio = output_data$signal_area_ratio, fitting_error = output_data$fitting_error, intensity = output_data$intensity, width = output_data$width ) #Adaptation of the quantification to de-scaled Ydata results_to_save$Area = results_to_save$Area * max(Ydata) #Generation of the figure when the conditions specified in the Parameters file are accomplished # r=1 plot_data = rbind( output_data$signals_sum, output_data$baseline_sum, output_data$fitted_sum, output_data$signals ) rownames(plot_data) = c("signals_sum", "baseline_sum", "fitted_sum", as.character(ROI_profile[,1])) # plotdata = data.frame(Xdata=autorun_data$ppm[ROI_buckets], t(dataset[input$x1_select,ROI_buckets,drop=F])) plotdata2 = data.frame(Xdata, Ydata, plot_data[3, ] * max(Ydata), plot_data[2, ] * max(Ydata)) plotdata3 <- melt(plotdata2, id = "Xdata") plotdata3$variable = c( rep('Original Spectrum', length(Ydata)), rep('Generated Spectrum', length(Ydata)), rep('Generated Background', length(Ydata)) ) plotdata4 = data.frame(Xdata, (t(plot_data[-c(1, 2, 3), , drop = F]) * max(Ydata))) plotdata5 = melt(plotdata4, id = "Xdata") # p=plot_ly(data=plotdata3,x=~Xdata,y=~value,color=~variable,type='scatter',mode='lines') %>% layout(xaxis = list(autorange = "reversed")) # p <- add_trace(p,data=plotdata5,x = ~Xdata, # y = ~value, # colour = 'Surrounding signals', # group = ~variable) # p <- add_trace(p,data=plotdata5,x = ~Xdata, # y = ~value, # colour = 'Surrounding signals', # group = ~variable) # plot_ly(data=plotdata3,x=~Xdata,y=~value,color=~variable,type='scatter',mode='lines') %>% layout(xaxis = list(autorange = "reversed")) p=ggplot() + geom_line(data = plotdata3, aes( x = Xdata, y = value, colour = variable, group = variable )) + geom_line(data = plotdata5, aes( x = Xdata, y = value, colour = 'Surrounding signals', group = variable )) + scale_x_reverse() + labs(x='ppm',y='Intensity') + expand_limits(y=0) for (r in 1:length(other_fit_parameters$signals_to_quantify)) { plotdata = data.frame(Xdata, signals = plot_data[3 + other_fit_parameters$signals_to_quantify[r], ] * max(Ydata)) p=p + geom_area( data = plotdata, aes( x = Xdata, y = signals, position = 'fill', fill = 'Quantified Signal' ) ) } signals_parameters=t(rbind(signals_parameters,multiplicities,roof_effect)) print(signals_parameters) blah$signals_parameters=signals_parameters blah$other_fit_parameters=other_fit_parameters blah$results_to_save=results_to_save blah$import_excel_profile=ROI_profile blah$Ydata=Ydata blah$fitted_signals=fitted_signals blah$plot_data=plot_data blah$FeaturesMatrix=FeaturesMatrix blah$signals_parameters=signals_parameters blah$Xdata=Xdata } blah$p=p blah$plot_path=plot_path blah$results_to_save=results_to_save blah$spectrum_index=spectrum_index blah$signals_codes=signals_codes blah$fitting_type=fitting_type if (is_autorun=='Y') { save_roi_testing(blah,autorun_data, finaloutput) print('New') } } # blah$finaloutput=finaloutput # blah$autorun_data=autorun_data return(blah) }
library("testthat"); library("GMSE"); context("Landscape initialisation"); test_that("Landscape dimensions are initialised accurately", { skip_on_cran(); land <- make_landscape(model = "IBM", rows = 10, cols = 10, cell_types = 1, cell_val_mn = 1, cell_val_sd = 0, cell_val_max = 1, cell_val_min = 1, layers = 3, ownership = 1, owners = 1, public_land = 0 ); expect_equal(dim(land), c(10, 10, 3)); }) test_that("Landscape values are initialised accurately", { skip_on_cran(); land <- make_landscape(model = "IBM", rows = 10, cols = 10, cell_types = 1, cell_val_mn = 1, cell_val_sd = 0, cell_val_max = 1, cell_val_min = 1, layers = 3, ownership = 0, owners = 1, public_land = 1 ); expect_equal(max(land), 1); expect_equal(min(land), 1); }) test_that("Landscape values are reset when needed", { skip_on_cran(); land <- make_landscape(model = "IBM", rows = 10, cols = 10, cell_types = 1, cell_val_mn = 1, cell_val_sd = 0, cell_val_max = 1, cell_val_min = 1, layers = 3, ownership = 1, owners = 1, public_land = 0 ); expect_equal(age_land(land+1, land, 2)[,,2], land[,,2]); }) test_that("All users are given land evenly distributed", { skip_on_cran(); land <- make_landscape(model = "IBM", rows = 64, cols = 64, cell_types = 1, cell_val_mn = 1, cell_val_sd = 0, cell_val_max = 1, cell_val_min = 1, layers = 3, ownership = 1, owners = 16, public_land = 0 ); expect_equal(unique(table(land[,,3])), (64*64/16) ); }) test_that("All users are given land when unequal", { skip_on_cran(); land <- make_landscape(model = "IBM", rows = 10, cols = 10, cell_types = 1, cell_val_mn = 1, cell_val_sd = 0, cell_val_max = 1, cell_val_min = 1, layers = 3, ownership = 1, owners = 7, public_land = 0 ); expect_equal(length(unique(as.vector(land[,,3]))), 7 ); }) test_that("Public land is added appropriately", { skip_on_cran(); land <- make_landscape(model = "IBM", rows = 10, cols = 10, cell_types = 1, cell_val_mn = 1, cell_val_sd = 0, cell_val_max = 1, cell_val_min = 1, layers = 3, ownership = 1, owners = 4, public_land = 0.5 ); expect_equal(sum(land[,,3] == 1) / (10 * 10), 0.5 ); expect_equal(length(unique(as.vector(land[,,3]))), (4 + 1) ); })
/tests/testthat/test-make_landscape.R
no_license
ConFooBio/gmse
R
false
false
4,474
r
library("testthat"); library("GMSE"); context("Landscape initialisation"); test_that("Landscape dimensions are initialised accurately", { skip_on_cran(); land <- make_landscape(model = "IBM", rows = 10, cols = 10, cell_types = 1, cell_val_mn = 1, cell_val_sd = 0, cell_val_max = 1, cell_val_min = 1, layers = 3, ownership = 1, owners = 1, public_land = 0 ); expect_equal(dim(land), c(10, 10, 3)); }) test_that("Landscape values are initialised accurately", { skip_on_cran(); land <- make_landscape(model = "IBM", rows = 10, cols = 10, cell_types = 1, cell_val_mn = 1, cell_val_sd = 0, cell_val_max = 1, cell_val_min = 1, layers = 3, ownership = 0, owners = 1, public_land = 1 ); expect_equal(max(land), 1); expect_equal(min(land), 1); }) test_that("Landscape values are reset when needed", { skip_on_cran(); land <- make_landscape(model = "IBM", rows = 10, cols = 10, cell_types = 1, cell_val_mn = 1, cell_val_sd = 0, cell_val_max = 1, cell_val_min = 1, layers = 3, ownership = 1, owners = 1, public_land = 0 ); expect_equal(age_land(land+1, land, 2)[,,2], land[,,2]); }) test_that("All users are given land evenly distributed", { skip_on_cran(); land <- make_landscape(model = "IBM", rows = 64, cols = 64, cell_types = 1, cell_val_mn = 1, cell_val_sd = 0, cell_val_max = 1, cell_val_min = 1, layers = 3, ownership = 1, owners = 16, public_land = 0 ); expect_equal(unique(table(land[,,3])), (64*64/16) ); }) test_that("All users are given land when unequal", { skip_on_cran(); land <- make_landscape(model = "IBM", rows = 10, cols = 10, cell_types = 1, cell_val_mn = 1, cell_val_sd = 0, cell_val_max = 1, cell_val_min = 1, layers = 3, ownership = 1, owners = 7, public_land = 0 ); expect_equal(length(unique(as.vector(land[,,3]))), 7 ); }) test_that("Public land is added appropriately", { skip_on_cran(); land <- make_landscape(model = "IBM", rows = 10, cols = 10, cell_types = 1, cell_val_mn = 1, cell_val_sd = 0, cell_val_max = 1, cell_val_min = 1, layers = 3, ownership = 1, owners = 4, public_land = 0.5 ); expect_equal(sum(land[,,3] == 1) / (10 * 10), 0.5 ); expect_equal(length(unique(as.vector(land[,,3]))), (4 + 1) ); })
NULL #' Climate data. #' #' The climate data used in the blogpost. #' #' @usage data(dtClimate) #' #' @source #' http://doi.org/10.7289/V5D21VHZ #' Downloaded from ftp://ftp.ncdc.noaa.gov/pub/data/ghcn/daily/by_year/ #' #' Menne, M.J., I. Durre, B. Korzeniewski, S. McNeal, K. Thomas, X. Yin, #' S. Anthony, R. Ray, R.S. Vose, B.E.Gleason, and T.G. Houston, #' 2012: Global Historical Climatology Network - Daily, Version 3.12 "dtClimate"
/R/Data.R
no_license
cran/ggTimeSeries
R
false
false
440
r
NULL #' Climate data. #' #' The climate data used in the blogpost. #' #' @usage data(dtClimate) #' #' @source #' http://doi.org/10.7289/V5D21VHZ #' Downloaded from ftp://ftp.ncdc.noaa.gov/pub/data/ghcn/daily/by_year/ #' #' Menne, M.J., I. Durre, B. Korzeniewski, S. McNeal, K. Thomas, X. Yin, #' S. Anthony, R. Ray, R.S. Vose, B.E.Gleason, and T.G. Houston, #' 2012: Global Historical Climatology Network - Daily, Version 3.12 "dtClimate"
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{extract_boards} \alias{extract_boards} \title{extract_boards} \usage{ extract_boards(lines) } \arguments{ \item{lines}{lines extracted from the file} } \value{ board class objects, if lines do not meet these specifications, return NA } \description{ extract_boards } \examples{ lines <- readLines(file) lines <- lines \%>\% trimws() \%>\% .[. != ""] \%>\% paste(collapse = ",") extract_boards(lines) }
/man/extract_boards.Rd
no_license
sthkindacrazy/percolate
R
false
true
495
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{extract_boards} \alias{extract_boards} \title{extract_boards} \usage{ extract_boards(lines) } \arguments{ \item{lines}{lines extracted from the file} } \value{ board class objects, if lines do not meet these specifications, return NA } \description{ extract_boards } \examples{ lines <- readLines(file) lines <- lines \%>\% trimws() \%>\% .[. != ""] \%>\% paste(collapse = ",") extract_boards(lines) }
cobweb<-function(x0,r,last=20,...){ x=seq(0,1,.01) par(mfrow=c(2,1)) plot(x,r*x*(1-x),type="l",col="blue",...) points(x,x,type="l",col="red") xn=x0 for( i in 1:last){ xp=xn xn=logistic(xp,r) prev=c(xp,xn) points(xp,xn,col="red") segments(prev[1],prev[2],xn,xn,lwd=.1) segments(prev[1],prev[1],xp,xn,lwd=.1) } timeSerieLogistic(x0,r,last) } timeSerieLogistic<-function(x0,r,last){ #par(mfrow=c(2,1) plot(1,1,xlim=c(0,last),ylim=c(0,1),type="n") xn=x0 for( i in 1:last){ xp=xn xn=logistic(xp,r) points(i,xn) } } logistic<-function(x,r){ return(r*x*(1-x)) }
/doc/cours/summerSchool/2016CSSS/josh/class1.R
no_license
simoncarrignon/phd
R
false
false
597
r
cobweb<-function(x0,r,last=20,...){ x=seq(0,1,.01) par(mfrow=c(2,1)) plot(x,r*x*(1-x),type="l",col="blue",...) points(x,x,type="l",col="red") xn=x0 for( i in 1:last){ xp=xn xn=logistic(xp,r) prev=c(xp,xn) points(xp,xn,col="red") segments(prev[1],prev[2],xn,xn,lwd=.1) segments(prev[1],prev[1],xp,xn,lwd=.1) } timeSerieLogistic(x0,r,last) } timeSerieLogistic<-function(x0,r,last){ #par(mfrow=c(2,1) plot(1,1,xlim=c(0,last),ylim=c(0,1),type="n") xn=x0 for( i in 1:last){ xp=xn xn=logistic(xp,r) points(i,xn) } } logistic<-function(x,r){ return(r*x*(1-x)) }
######################################################## ######################################################## #Project Euler# ######################################################## ######################################################## #################################################################################################### #Problem 11: Largest Product in a grid # n the 20??20 grid below, four numbers along a diagonal line have been marked in red. # # 08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08 # 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00 # 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65 # 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91 # 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80 # 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50 # 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70 # 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21 # 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72 # 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95 # 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92 # 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57 # 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58 # 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40 # 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66 # 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69 # 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36 # 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16 # 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54 # 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48 # # The product of these numbers is 26 ?? 63 ?? 78 ?? 14 = 1788696. # # What is the greatest product of four adjacent numbers in the same direction (up, down, left, right, or diagonally) in the 20??20 grid? #################################################################################################### #Clear the environment rm(list= ls()) # Input the Matrix M <- matrix(c(08, 02, 22, 97, 38, 15, 00, 40, 00, 75, 04, 05, 07, 78, 52, 12, 50, 77, 91, 08, 49, 49, 99, 40, 17, 81, 18, 57, 60, 87, 17, 40, 98, 43, 69, 48, 04, 56, 62, 00, 81, 49, 31, 73, 55, 79, 14, 29, 93, 71, 40, 67, 53, 88, 30, 03, 49, 13, 36, 65, 52, 70, 95, 23, 04, 60, 11, 42, 69, 24, 68, 56, 01, 32, 56, 71, 37, 02, 36, 91, 22, 31, 16, 71, 51, 67, 63, 89, 41, 92, 36, 54, 22, 40, 40, 28, 66, 33, 13, 80, 24, 47, 32, 60, 99, 03, 45, 02, 44, 75, 33, 53, 78, 36, 84, 20, 35, 17, 12, 50, 32, 98, 81, 28, 64, 23, 67, 10, 26, 38, 40, 67, 59, 54, 70, 66, 18, 38, 64, 70, 67, 26, 20, 68, 02, 62, 12, 20, 95, 63, 94, 39, 63, 08, 40, 91, 66, 49, 94, 21, 24, 55, 58, 05, 66, 73, 99, 26, 97, 17, 78, 78, 96, 83, 14, 88, 34, 89, 63, 72, 21, 36, 23, 09, 75, 00, 76, 44, 20, 45, 35, 14, 00, 61, 33, 97, 34, 31, 33, 95, 78, 17, 53, 28, 22, 75, 31, 67, 15, 94, 03, 80, 04, 62, 16, 14, 09, 53, 56, 92, 16, 39, 05, 42, 96, 35, 31, 47, 55, 58, 88, 24, 00, 17, 54, 24, 36, 29, 85, 57, 86, 56, 00, 48, 35, 71, 89, 07, 05, 44, 44, 37, 44, 60, 21, 58, 51, 54, 17, 58, 19, 80, 81, 68, 05, 94, 47, 69, 28, 73, 92, 13, 86, 52, 17, 77, 04, 89, 55, 40, 04, 52, 08, 83, 97, 35, 99, 16, 07, 97, 57, 32, 16, 26, 26, 79, 33, 27, 98, 66, 88, 36, 68, 87, 57, 62, 20, 72, 03, 46, 33, 67, 46, 55, 12, 32, 63, 93, 53, 69, 04, 42, 16, 73, 38, 25, 39, 11, 24, 94, 72, 18, 08, 46, 29, 32, 40, 62, 76, 36, 20, 69, 36, 41, 72, 30, 23, 88, 34, 62, 99, 69, 82, 67, 59, 85, 74, 04, 36, 16, 20, 73, 35, 29, 78, 31, 90, 01, 74, 31, 49, 71, 48, 86, 81, 16, 23, 57, 05, 54, 01, 70, 54, 71, 83, 51, 54, 69, 16, 92, 33, 48, 61, 43, 52, 01, 89, 19, 67, 48), nrow = 20, ncol = 20, byrow = TRUE) #Horizontal product product_horizontal <- c() for(i in 1:20){ for(j in 1:17){ product_horizontal <- c(product_horizontal, M[i,j]*M[i,j+1]*M[i,j+2]*M[i,j+3]) } } max_product_horizontal <- max(product_horizontal) #Vertical product product_vertical <- c() for(j in 1:20){ for(i in 1:17){ product_vertical <- c(product_vertical, M[i,j]*M[i+1,j]*M[i+2,j]*M[i+3,j]) } } max_product_vertical <- max(product_vertical) #Forward Diagonal product product_diagonal <- c() for(j in 1:17){ for(i in 1:17){ product_diagonal <- c(product_diagonal, M[i,j]*M[i+1,j+1]*M[i+2,j+2]*M[i+3,j+3]) } } max_product_diagonal <- max(product_diagonal) #Backward Diagnal product product_diagonal2 <- c() for(i in 1:17){ for(j in 4:20){ product_diagonal2 <- c(product_diagonal2, M[i,j]*M[i+1,j-1]*M[i+2,j-2]*M[i+3,j-3]) } } max_product_diagonal2 <- max(product_diagonal2) max_product <- max(max_product_diagonal2, max_product_diagonal, max_product_vertical, max_product_horizontal) #################################################################################################### #Problem 12: Highly Divisible Triangular Number # The sequence of triangle numbers is generated by adding the natural numbers. #So the 7th triangle number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28. The first ten terms would be: # 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ... # Let us list the factors of the first seven triangle numbers: # 1: 1 # 3: 1,3 # 6: 1,2,3,6 # 10: 1,2,5,10 # 15: 1,3,5,15 # 21: 1,3,7,21 # 28: 1,2,4,7,14,28 # We can see that 28 is the first triangle number to have over five divisors. # What is the value of the first triangle number to have over five hundred divisors? #################################################################################################### #Clear the environment rm(list= ls()) #Write a function to find number of factors num_fac <- function(x){ #A number has atleast 2 factors: 1 and itself output <- 2 #We only need to check the number of factors till sqrt of x as any number below that has a mirror number above that for(i in 2:floor(sqrt(x))) { #Add 2 the divisor and quotient to factor list output <- if (x %% i == 0) output + 2 else output } #If x is divisible by sqrt(x) then that should be counted only once; Hard code 1,2,3 output <- if(x==1) 1 else if(x == 2 | x==3) 2 else if(sqrt(x) == floor(sqrt(x))) output - 1 else output output } start_time <- Sys.time() #Start with traingular number i <- 2 triangular_num <- 1 num_factors <- 1 while(num_factors <= 500) { triangular_num <- triangular_num + i num_factors <- num_fac(triangular_num) i <- i + 1 } num_factors triangular_num end_time <- Sys.time() end_time - start_time #Takes a bit of time but does the job #################################################################################################### #Problem 17: Number Letter Count # If the numbers 1 to 5 are written out in words: one, two, three, four, five, # then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total. # If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used? # NOTE: Do not count spaces or hyphens. For example, 342 (three hundred and forty-two) contains 23 letters and 115 (one hundred and fifteen) contains 20 letters. The use of "and" when writing out numbers is in compliance with British usage. #################################################################################################### #Clear the environment rm(list= ls()) #one to nine x <- nchar("one") + nchar("two") + nchar("three") + nchar("four") + nchar("five") + nchar("six") + nchar("seven") + nchar("eight") + nchar("nine") #ten to nineteen y <- nchar("ten") + nchar("eleven") + nchar("twelve") + nchar("thirteen") + nchar("fourteen") + nchar("fifteen") + nchar("sixteen") + nchar("seventeen") + nchar("eighteen") + nchar("nineteen") #10s z <- nchar("twenty") + nchar("thirty") + nchar("forty") + nchar("fifty") + nchar("sixty") + nchar("seventy") + nchar("eighty") + nchar("ninety") # one to 99 p <- 9*x + y + 10*z #special chars q <- nchar("hundred") r <- nchar("and") s <- nchar("onethousand") #Total (formula worked out on paper) 10*p + 900*q + 9*99*r + 100*x + s #################################################################################################### # Problem 19: Counting Sundays # You are given the following information, but you may prefer to do some research for yourself. # 1 Jan 1900 was a Monday. # Thirty days has September, # April, June and November. # All the rest have thirty-one, # Saving February alone, # Which has twenty-eight, rain or shine. # And on leap years, twenty-nine. # A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400. # How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)? #################################################################################################### #Clear the environment rm(list= ls()) #Find out the day for 1 Jan 1901 dow_1jan1900 <- 1 #365 as 1900 is not divisible by 400 dow_1jan1901 <- dow_1jan1900 + (365%%7) #Create a vector that stores the day on which the 1st of each month falls dow <- c(dow_1jan1901) #Object storing what was the day of the 1st of the current month dow_month <- dow #Month - start with Jan month <- 1 #Year - start with 1901 year <- 1901 #Go on till year is 2000 iclusive while(year < 2001) { #Months having 31 days if(month %in% c(1,3,5,7,8,10,12)) { dow_month <- dow_month + (31 %% 7) #Start again if the calculation goes above 7 dow_month <- if(dow_month < 8) dow_month else dow_month - 7 #Can also do: (dow_month + 31) %% 7 for the above two lines #Append to the vector dow <- c(dow, dow_month) } #Months having 30 days else if(month %in% c(4,6,9,11)) { dow_month <- dow_month + (30 %% 7) dow_month <- if(dow_month < 8) dow_month else dow_month - 7 dow <- c(dow, dow_month) } #February in leap years has 29 days else if(month == 2 & ((year %% 4 == 0 & year %% 100 != 0) | year %% 400 == 0)){ dow_month <- dow_month + (29 %% 7) dow_month <- if(dow_month < 8) dow_month else dow_month - 7 dow <- c(dow, dow_month) } #February in non-leap years has 28 days else { dow <- c(dow, dow_month) } #Increase month counter till 12 and then start again month <- if(month + 1 < 13) month + 1 else 1 #Increase year counter every 12 months year <- if(month + 1 == 13) year + 1 else year } #Number of months beginning on Sunday (7th day of week) length(dow[dow == 7]) #################################################################################################### # Problem 20: Factorial digit sum # n! means n ?? (n ??? 1) ?? ... ?? 3 ?? 2 ?? 1 # For example, 10! = 10 ?? 9 ?? ... ?? 3 ?? 2 ?? 1 = 3628800, # and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27. # Find the sum of the digits in the number 100! #################################################################################################### #Clear the environment rm(list= ls()) #Load a package that makes things easier library(gmp) #Get factorial of 99 - use as.bigz to not lose precision num <- as.bigz(99) factorial_num <- factorial(num) #R cannot perform mathematical operations on such a big number, so I convert it to a string num_fac_char <- toString(factorial_num) #Split the string, store in a vector, convert individual component to the numbers vec_num_components <- as.numeric(unlist(strsplit(num_fac_char, ""))) #Sum of numbers sum(vec_num_components)
/Project_Euler_Codes_11.R
no_license
gunjitag/codes_for_fun
R
false
false
11,683
r
######################################################## ######################################################## #Project Euler# ######################################################## ######################################################## #################################################################################################### #Problem 11: Largest Product in a grid # n the 20??20 grid below, four numbers along a diagonal line have been marked in red. # # 08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08 # 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00 # 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65 # 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91 # 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80 # 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50 # 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70 # 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21 # 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72 # 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95 # 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92 # 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57 # 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58 # 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40 # 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66 # 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69 # 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36 # 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16 # 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54 # 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48 # # The product of these numbers is 26 ?? 63 ?? 78 ?? 14 = 1788696. # # What is the greatest product of four adjacent numbers in the same direction (up, down, left, right, or diagonally) in the 20??20 grid? #################################################################################################### #Clear the environment rm(list= ls()) # Input the Matrix M <- matrix(c(08, 02, 22, 97, 38, 15, 00, 40, 00, 75, 04, 05, 07, 78, 52, 12, 50, 77, 91, 08, 49, 49, 99, 40, 17, 81, 18, 57, 60, 87, 17, 40, 98, 43, 69, 48, 04, 56, 62, 00, 81, 49, 31, 73, 55, 79, 14, 29, 93, 71, 40, 67, 53, 88, 30, 03, 49, 13, 36, 65, 52, 70, 95, 23, 04, 60, 11, 42, 69, 24, 68, 56, 01, 32, 56, 71, 37, 02, 36, 91, 22, 31, 16, 71, 51, 67, 63, 89, 41, 92, 36, 54, 22, 40, 40, 28, 66, 33, 13, 80, 24, 47, 32, 60, 99, 03, 45, 02, 44, 75, 33, 53, 78, 36, 84, 20, 35, 17, 12, 50, 32, 98, 81, 28, 64, 23, 67, 10, 26, 38, 40, 67, 59, 54, 70, 66, 18, 38, 64, 70, 67, 26, 20, 68, 02, 62, 12, 20, 95, 63, 94, 39, 63, 08, 40, 91, 66, 49, 94, 21, 24, 55, 58, 05, 66, 73, 99, 26, 97, 17, 78, 78, 96, 83, 14, 88, 34, 89, 63, 72, 21, 36, 23, 09, 75, 00, 76, 44, 20, 45, 35, 14, 00, 61, 33, 97, 34, 31, 33, 95, 78, 17, 53, 28, 22, 75, 31, 67, 15, 94, 03, 80, 04, 62, 16, 14, 09, 53, 56, 92, 16, 39, 05, 42, 96, 35, 31, 47, 55, 58, 88, 24, 00, 17, 54, 24, 36, 29, 85, 57, 86, 56, 00, 48, 35, 71, 89, 07, 05, 44, 44, 37, 44, 60, 21, 58, 51, 54, 17, 58, 19, 80, 81, 68, 05, 94, 47, 69, 28, 73, 92, 13, 86, 52, 17, 77, 04, 89, 55, 40, 04, 52, 08, 83, 97, 35, 99, 16, 07, 97, 57, 32, 16, 26, 26, 79, 33, 27, 98, 66, 88, 36, 68, 87, 57, 62, 20, 72, 03, 46, 33, 67, 46, 55, 12, 32, 63, 93, 53, 69, 04, 42, 16, 73, 38, 25, 39, 11, 24, 94, 72, 18, 08, 46, 29, 32, 40, 62, 76, 36, 20, 69, 36, 41, 72, 30, 23, 88, 34, 62, 99, 69, 82, 67, 59, 85, 74, 04, 36, 16, 20, 73, 35, 29, 78, 31, 90, 01, 74, 31, 49, 71, 48, 86, 81, 16, 23, 57, 05, 54, 01, 70, 54, 71, 83, 51, 54, 69, 16, 92, 33, 48, 61, 43, 52, 01, 89, 19, 67, 48), nrow = 20, ncol = 20, byrow = TRUE) #Horizontal product product_horizontal <- c() for(i in 1:20){ for(j in 1:17){ product_horizontal <- c(product_horizontal, M[i,j]*M[i,j+1]*M[i,j+2]*M[i,j+3]) } } max_product_horizontal <- max(product_horizontal) #Vertical product product_vertical <- c() for(j in 1:20){ for(i in 1:17){ product_vertical <- c(product_vertical, M[i,j]*M[i+1,j]*M[i+2,j]*M[i+3,j]) } } max_product_vertical <- max(product_vertical) #Forward Diagonal product product_diagonal <- c() for(j in 1:17){ for(i in 1:17){ product_diagonal <- c(product_diagonal, M[i,j]*M[i+1,j+1]*M[i+2,j+2]*M[i+3,j+3]) } } max_product_diagonal <- max(product_diagonal) #Backward Diagnal product product_diagonal2 <- c() for(i in 1:17){ for(j in 4:20){ product_diagonal2 <- c(product_diagonal2, M[i,j]*M[i+1,j-1]*M[i+2,j-2]*M[i+3,j-3]) } } max_product_diagonal2 <- max(product_diagonal2) max_product <- max(max_product_diagonal2, max_product_diagonal, max_product_vertical, max_product_horizontal) #################################################################################################### #Problem 12: Highly Divisible Triangular Number # The sequence of triangle numbers is generated by adding the natural numbers. #So the 7th triangle number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28. The first ten terms would be: # 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ... # Let us list the factors of the first seven triangle numbers: # 1: 1 # 3: 1,3 # 6: 1,2,3,6 # 10: 1,2,5,10 # 15: 1,3,5,15 # 21: 1,3,7,21 # 28: 1,2,4,7,14,28 # We can see that 28 is the first triangle number to have over five divisors. # What is the value of the first triangle number to have over five hundred divisors? #################################################################################################### #Clear the environment rm(list= ls()) #Write a function to find number of factors num_fac <- function(x){ #A number has atleast 2 factors: 1 and itself output <- 2 #We only need to check the number of factors till sqrt of x as any number below that has a mirror number above that for(i in 2:floor(sqrt(x))) { #Add 2 the divisor and quotient to factor list output <- if (x %% i == 0) output + 2 else output } #If x is divisible by sqrt(x) then that should be counted only once; Hard code 1,2,3 output <- if(x==1) 1 else if(x == 2 | x==3) 2 else if(sqrt(x) == floor(sqrt(x))) output - 1 else output output } start_time <- Sys.time() #Start with traingular number i <- 2 triangular_num <- 1 num_factors <- 1 while(num_factors <= 500) { triangular_num <- triangular_num + i num_factors <- num_fac(triangular_num) i <- i + 1 } num_factors triangular_num end_time <- Sys.time() end_time - start_time #Takes a bit of time but does the job #################################################################################################### #Problem 17: Number Letter Count # If the numbers 1 to 5 are written out in words: one, two, three, four, five, # then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total. # If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used? # NOTE: Do not count spaces or hyphens. For example, 342 (three hundred and forty-two) contains 23 letters and 115 (one hundred and fifteen) contains 20 letters. The use of "and" when writing out numbers is in compliance with British usage. #################################################################################################### #Clear the environment rm(list= ls()) #one to nine x <- nchar("one") + nchar("two") + nchar("three") + nchar("four") + nchar("five") + nchar("six") + nchar("seven") + nchar("eight") + nchar("nine") #ten to nineteen y <- nchar("ten") + nchar("eleven") + nchar("twelve") + nchar("thirteen") + nchar("fourteen") + nchar("fifteen") + nchar("sixteen") + nchar("seventeen") + nchar("eighteen") + nchar("nineteen") #10s z <- nchar("twenty") + nchar("thirty") + nchar("forty") + nchar("fifty") + nchar("sixty") + nchar("seventy") + nchar("eighty") + nchar("ninety") # one to 99 p <- 9*x + y + 10*z #special chars q <- nchar("hundred") r <- nchar("and") s <- nchar("onethousand") #Total (formula worked out on paper) 10*p + 900*q + 9*99*r + 100*x + s #################################################################################################### # Problem 19: Counting Sundays # You are given the following information, but you may prefer to do some research for yourself. # 1 Jan 1900 was a Monday. # Thirty days has September, # April, June and November. # All the rest have thirty-one, # Saving February alone, # Which has twenty-eight, rain or shine. # And on leap years, twenty-nine. # A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400. # How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)? #################################################################################################### #Clear the environment rm(list= ls()) #Find out the day for 1 Jan 1901 dow_1jan1900 <- 1 #365 as 1900 is not divisible by 400 dow_1jan1901 <- dow_1jan1900 + (365%%7) #Create a vector that stores the day on which the 1st of each month falls dow <- c(dow_1jan1901) #Object storing what was the day of the 1st of the current month dow_month <- dow #Month - start with Jan month <- 1 #Year - start with 1901 year <- 1901 #Go on till year is 2000 iclusive while(year < 2001) { #Months having 31 days if(month %in% c(1,3,5,7,8,10,12)) { dow_month <- dow_month + (31 %% 7) #Start again if the calculation goes above 7 dow_month <- if(dow_month < 8) dow_month else dow_month - 7 #Can also do: (dow_month + 31) %% 7 for the above two lines #Append to the vector dow <- c(dow, dow_month) } #Months having 30 days else if(month %in% c(4,6,9,11)) { dow_month <- dow_month + (30 %% 7) dow_month <- if(dow_month < 8) dow_month else dow_month - 7 dow <- c(dow, dow_month) } #February in leap years has 29 days else if(month == 2 & ((year %% 4 == 0 & year %% 100 != 0) | year %% 400 == 0)){ dow_month <- dow_month + (29 %% 7) dow_month <- if(dow_month < 8) dow_month else dow_month - 7 dow <- c(dow, dow_month) } #February in non-leap years has 28 days else { dow <- c(dow, dow_month) } #Increase month counter till 12 and then start again month <- if(month + 1 < 13) month + 1 else 1 #Increase year counter every 12 months year <- if(month + 1 == 13) year + 1 else year } #Number of months beginning on Sunday (7th day of week) length(dow[dow == 7]) #################################################################################################### # Problem 20: Factorial digit sum # n! means n ?? (n ??? 1) ?? ... ?? 3 ?? 2 ?? 1 # For example, 10! = 10 ?? 9 ?? ... ?? 3 ?? 2 ?? 1 = 3628800, # and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27. # Find the sum of the digits in the number 100! #################################################################################################### #Clear the environment rm(list= ls()) #Load a package that makes things easier library(gmp) #Get factorial of 99 - use as.bigz to not lose precision num <- as.bigz(99) factorial_num <- factorial(num) #R cannot perform mathematical operations on such a big number, so I convert it to a string num_fac_char <- toString(factorial_num) #Split the string, store in a vector, convert individual component to the numbers vec_num_components <- as.numeric(unlist(strsplit(num_fac_char, ""))) #Sum of numbers sum(vec_num_components)
#' Calculate hiearchical clustering and plot dendrogram. #' #' Args: #' m: Expression matrix (rows are features, columns are samples). #' method_distance: Distance metric used for clustering. See ?dist. #' Can be also correlation metrix ("pearson", "kendall", "spearman"). See ?cor. #' method_clustering: Clustering method. See ?hclust. #' color_by: Vector of discrete values to color samples by. #' Length must be the same as number of columns in 'm'. #' color_by_lab: Name of the color legend. #' title: Main plot title. #' #' Returns: #' dendrogram object plot_hc <- function( m, method_distance = "euclidean", method_clustering = "complete", color_by = NULL, color_by_lab = "Group", title = "Hierarchical Clustering" ) { m2 <- data.frame(t(m)) if(method_distance %in% c("pearson", "kendall", "spearman")){ d <- as.dist(1-cor(t(m2),method=method_distance)) } else{ d <- dist(m2, method = method_distance) } dend <- as.dendrogram(hclust(d, method_clustering)) cols <- unique(as.vector(color_by)) cols_vec <- rep(NA, length(as.vector(color_by))) j<-1 for (i in as.vector(color_by)) { cols_vec[j] <- match(i, cols) + 1 j <- j + 1 } labels_order <- order.dendrogram(dend) ordered_cols <- rep(NA, length(as.vector(color_by))) j<- 1 for (i in as.vector(labels_order)) { ordered_cols[j] <- cols_vec[i] j <- j + 1 } dend <- dend %>% set("labels_col", ordered_cols) plot(dend, main = title) legend( "topright", title = color_by_lab, legend = cols, pch = 19, col = 2:(length(cols)+1), bty = "n" ) } #' Calculate hiearchical clustering and plot dendrogram using the dendextend package. #' #' Args: #' m: Expression matrix (rows are features, columns are samples). #' method_distance: Distance metric used for clustering. See ?dist. #' Can be also correlation metrix ("pearson", "kendall", "spearman"). See ?cor. #' method_clustering: Clustering method. See ?hclust. #' color_by: Vector of discrete values to color samples by. #' Length must be the same as number of columns in 'm'. #' color_by_lab: Name of the color legend. #' title: Main plot title. #' #' Returns: #' dendrogram object plot_hc2 <- function( m, method_distance = "euclidean", method_clustering = "complete", color_by = NULL, color_by_lab = "Group", title = "Hierarchical Clustering" ) { } #' Select features (rows) with the highest variance across the samples (columns). #' This will be useful for visualization (mainly heatmaps) of large expression matrices. #' #' Args: #' m: Expression matrix (rows are features, columns are samples). #' n_top_features: Number of top features. #' #' Returns: #' Subset of 'm' with 'n_top_features' with the highest variance across the samples. select_var_features <- function(m, n_top_features) { variances <- apply(X=m, MARGIN=1, FUN=var) sorted <- sort(variances, decreasing=TRUE, index.return=TRUE)$ix[1:n_top_features] return(m[sorted, ]) } #' Using the ggplot2 package, plot the first PC or first to three PCs of samples in expression matrix. #' #' Args: #' m: Expression matrix (rows are features, columns are samples). #' sample_data: Dataframe describing samples. #' plot_type: "single" for PC1 vs. PC2, "multi" for combinations of PC1-3 and their cumulative explained variance. #' n_top_features: Number of top features with the highest variance across the samples. #' color_by: Column name in sample_data to use for point coloring. #' shape_by: Column noneame in sample_data to use for point shape. #' label_by: Column name in sample_data to use for point labels. #' point_size: Point size (numeric). #' text_size: Label text size (numeric). #' center: Whether to center PCA. See ?prcomp. #' scale.: Whether to scale PCA. See ?prcomp. #' #' Returns: #' list(pca = <prcomp object>, pca_df = <combined dataframe of sample_data and PCs>, plot = <ggplot2 or patchwork object (depends on plot_type)>) plot_pca <- function( m, sample_data, plot_type = c("single", "multi"), n_top_features = Inf, color_by = NULL, shape_by = NULL, label_by = NULL, point_size = 2, text_size = 2.5, center = TRUE, scale. = TRUE ) { if(!is.infinite(n_top_features)){ m <- select_var_features(m, n_top_features) } m_pca <- prcomp(as.data.frame(t(m)),scale = scale., center = center) df_pca <- as.data.frame(m_pca$x) df_combined <- cbind(sample_data, df_pca) var <- as.vector(summary(m_pca)$importance[2,]) var <- ceiling(var*100) if("single" %in% plot_type){ p <- ggplot(df_combined, aes(x = PC1, y = PC2)) + geom_point(aes_string(shape=shape_by, color=color_by), size=point_size) + xlab(paste("PC1 (", var[1], "%)", sep ="")) + ylab(paste("PC2 (", var[2], "%)", sep="")) if(!is.null(label_by)){ p <- p +geom_text(aes_string(label=label_by),hjust=0, vjust=0, size=text_size) } } else{ cum_var <- as.vector(summary(m_pca)$importance[3,]) df_cum <- data.frame(PC = 1:length(cum_var), Cum_var = cum_var) p1 <- ggplot(df_combined, aes(x = PC1, y = PC2)) + geom_point(aes_string(shape=shape_by, color=color_by), size=point_size) + xlab(paste("PC1 (", var[1], "%)", sep ="")) + ylab(paste("PC2 (", var[2], "%)", sep="")) p2 <- ggplot(df_combined, aes(x = PC1, y = PC3)) + geom_point(aes_string(shape=shape_by, color=color_by), size=point_size) + xlab(paste("PC1 (", var[1], "%)", sep ="")) + ylab(paste("PC3 (", var[3], "%)", sep="")) p3 <- ggplot(df_combined, aes(x = PC2, y = PC3)) + geom_point(aes_string(shape=shape_by, color=color_by), size=point_size) + xlab(paste("PC2 (", var[2], "%)", sep ="")) + ylab(paste("PC3 (", var[3], "%)", sep="")) p4 <- ggplot(data=df_cum, aes(x=PC, y=cum_var)) + geom_bar(stat="identity") + ylab("Cumulative % of var. explained") p <- ggarrange(p1, p2, p3, p4, ncol=2, nrow=2, common.legend = TRUE, legend="bottom") } res <- list("pca" = m_pca, "pca_df" = df_combined, "plot" = p) return(res) } #' Using the GGally::ggpairs() function, plot grid of PCA plots (PC1 vs. PC2, PC1 vs. PC3, PC2 vs. PC3, etc). #' When n_components == 2, use normal ggplot2. #' #' Args: #' m: Expression matrix (rows are features, columns are samples). #' sample_data: Dataframe describing samples. #' output_file: File to save plot in. #' n_components: Number of PCs to plot. #' n_top_features: Number of top features with the highest variance across the samples. #' color_by: Column name in sample_data to use for point coloring. #' color_legend_lab: Name of the color legend. #' shape_by: Column name in sample_data to use for point shape. #' shape_legend_lab: Name of the shape legend. #' label_by: Column name in sample_data to use for point labels. #' point_size: Point size (numeric). #' text_size: Label text size (numeric). #' title: Plot title. #' subtitle: Plot subtitle. #' center: Whether to center PCA. See ?prcomp. #' scale.: Whether to scale PCA. See ?prcomp. #' #' Returns: #' list(pca = <prcomp object>, pca_df = <combined dataframe of sample_data and PCs>, plot = <ggplot2 object>) plot_pca_ggpairs <- function( m, sample_data, n_components = 5, n_top_features = Inf, color_by = NULL, color_legend_lab = NULL, shape_by = NULL, shape_legend_lab = NULL, label_by = NULL, point_size = 2, text_size = 2.5, title = NULL, subtitle = NULL, center = TRUE, scale. = TRUE ) { m_pca <- prcomp(as.data.frame(t(m)),scale = scale., center = center) df_pca <- as.data.frame(m_pca$x) df_combined <- cbind(df_pca, sample_data) if(n_components == 2){ p <- plot_pca(m,sample_data, plot_type = "single", n_top_features = n_top_features, color_by = color_by, shape_by = shape_by, label_by = label_by, point_size = point_size, text_size = text_size, center = center, scale. = scale )$plot } else{ p <- GGally::ggpairs(df_combined, columns = 1:n_components, ggplot2::aes_string(colour=color_by, shape=shape_by), upper = list(continuous = "points", discrete = "points", na = "points"), diag=list(continuous = "blankDiag", discrete = "blankDiag", na = "blankDiag"), legend = 3, progress=FALSE) } return(list(pca = m_pca, pca_df = df_combined, plot = p + labs(title = title, subtitle = subtitle))) } #' Plot heatmap using the ComplexHeatmap package. #' #' Args: #' m: Expression matrix (rows are features, columns are samples). #' z_score: If TRUE, calculate row z-score. #' column_annotation: Dataframe used for annotation of columns. #' row_annotation: Dataframe used for annotation of rows. #' title: Heatmap title. #' legend_title: Heatmap color legend title. #' show_row_names: If TRUE, show rownames in the heatmap. #' show_col_names: If TRUE, show colnames in the heatmap. #' color_palette: Function to generate colors for annotations. #' color_mapping: Named list of named vectors to map colors to variable levels. #' See https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#simple-annotation #' #' Returns: #' ComplexHeatmap object plot_heatmap <- function( m, n_top_features = Inf, z_score = FALSE, column_annotation = NULL, row_annotation = NULL, title = "", legend_title = "Values", show_row_names = TRUE, show_column_names = TRUE, cluster_rows = TRUE, cluster_columns = TRUE, color_palette = scales::hue_pal(), color_mapping = NULL ) { if(!is.infinite(n_top_features)){ m <- select_var_features(m, n_top_features) } #if(z_score){ # m <- scale(as.matrix(m)) #} if(!(is.null(row_annotation))){ m <- as.data.frame(m) m$rowNames <- row.names(m) m <- as.data.frame(m) %>% dplyr::filter(., rowNames %in% row.names(as.data.frame(row_annotation))) m <- subset(m, select = -rowNames) p_heatmap <- Heatmap( as.matrix(m), name = legend_title, column_title = title, show_row_names = show_row_names, show_column_names = show_column_names, cluster_rows = cluster_rows, cluster_columns = cluster_columns, top_annotation = HeatmapAnnotation(df = as.data.frame(column_annotation)), right_annotation = rowAnnotation(df = as.data.frame(row_annotation)) ) } else{ p_heatmap <- Heatmap( as.matrix(m), name = legend_title, column_title = title, show_row_names = show_row_names, show_column_names = show_column_names, cluster_rows = cluster_rows, cluster_columns = cluster_columns, top_annotation = HeatmapAnnotation(df = column_annotation) ) } return(p_heatmap) } #' Create a heatmap using the heatmaply package. #' #' Args: #' m: Expression matrix (rows are features, columns are samples). #' z_score: If TRUE, calculate row z-score. #' column_annotation: Dataframe used for annotation of columns. #' row_annotation: Dataframe used for annotation of rows. #' title: Heatmap title. #' legend_title: Heatmap color legend title. #' #' Returns: #' plotly object plot_heatmaply <- function( m, z_score = FALSE, column_annotation = NULL, row_annotation = NULL, main = NULL, legend_title = NULL, showticklabels = c(TRUE, TRUE) ) { m <- as.data.frame(m) m$rowNames <- row.names(m) m <- as.data.frame(m) %>% dplyr::filter(., rowNames %in% row.names(as.data.frame(row_annotation))) m <- subset(m, select = -rowNames) m <- m %>% dplyr::select(., row.names(row_annotation)) if(is.null(column_annotation)){ p <- heatmaply_cor( m, main = main, showticklabels = showticklabels, key.title = legend_title, row_side_colors = row_annotation ) } else{ p <- heatmaply_cor( m, main = main, showticklabels = showticklabels, key.title = legend_title, row_side_colors = row_annotation, col_side_colors = column_annotation ) } return(p) } #' Using the ggpubr::ggboxplot() function, plot boxplots of gene expression. #' #' Args: #' plot_data: data.frame (long format) #' x: Column to divide x-axis values to (e.g. sample groups). #' y: Column to compute boxplots on y-axis. #' facet_by: One or two columns used for facetting. #' feature: Name of a feature from which boxplots will be made. #' Data will be filtered based on facet_by. #' E.g. if facet_by = "gene" and feature = "CD24", only boxplots for "CD24" will be made. #' color_by: Column to use for boxplot and point coloring. #' x_lab: Name of x-axe. #' y_lab: Name of y-axe. #' main: Main plot title. #' add: Add something more to boxplots. #' Allowed values are one or the combination of: #' "none", "dotplot", "jitter", "boxplot", "point", "mean", #' "mean_se", "mean_sd", "mean_ci", "mean_range", "median", #' "median_iqr", "median_mad", "median_range". #' See ?ggpubr::ggboxplot #' point_size: Size of points inside boxplots. #' outlier_shape: Which point shape to use for outliers. #' do_t_test: Whether to do the t-test and display p-values inside the plot. #' #' Returns: #' ggplot2 object plot_boxplots <- function( plot_data, x, y, facet_by, feature = NULL, color_by = NULL, x_lab = x, y_lab = y, main = NULL, add = "jitter", point_size = 2, outlier_shape = 0, do_t_test = TRUE ) { if(!is.null(feature)){ f <- feature plot_data <- plot_data %>% dplyr::filter(!!sym(facet_by) == f) } p<-ggplot(plot_data, aes_string(x=x, y=y, color=color_by)) + geom_boxplot(outlier.shape = outlier_shape) + geom_jitter(aes(size = point_size)) + facet_wrap(reformulate(facet_by,"."), nrow = 2, ncol = 2) + xlab(x_lab) + ylab(y_lab) + ggtitle(main) + theme(legend.position="top") if(do_t_test){ p <- p + stat_compare_means(method = "t.test") } return(p) } #' Compute the M value of CP values. #' #' Args: #' gene: Name of gene to compute the M value for. #' cp: Matrix or dataframe of CP values. Rows are genes and columns are samples. #' #' Returns: #' M value (numeric) compute_m <- function(gene, cp) { # Ajk = {(aji/aki)} # Vjk = st.dev(Ajk) # Mj = sum(Vjk)/(n-1) others <- rownames(cp)[!(rownames(cp) == gene)] a_jk <- rep(NA, ncol(cp)) v_j <- rep(NA, length(others)) v_index <- 1 for(k in others){ in_A <- cp[c(gene, k),] index <- 1 for(i in colnames(in_A)){ a_jk[index] <- (in_A[gene, i]) - (in_A[k, i]) index <- index + 1 } v_j[v_index] <- sd(a_jk) v_index <- v_index + 1 } m_j <- sum(v_j)/length(others) return(m_j) } #' For a single gene, test for statistical significance of difference in group means. #' #' Args: #' gene: Name of gene to test. #' gene_data: Dataframe in long format. #' gene_col: Column with genes. #' value_col: Column with values to test. #' group_col: Column with sample groups. There must be exactly two different groups. #' test: Statistical test to perform. It must have the same interface as t.test() #' verbose: Whether to print test results. #' #' Returns: #' htest object test_gene <- function(gene, gene_data, gene_col, value_col, group_col, test = t.test, verbose = TRUE) { g <- gene gene_df <- gene_data %>% dplyr::filter(!!sym(gene_col) == g) group_col value_col exp1 <- expr(!!ensym(value_col) ~ !!ensym(group_col)) test_res <- test(formula = eval(exp1), data = gene_df) if(verbose){ print(test_res) } return(test_res) } #' For all genes in the input dataframe, test for statistical significance of difference in group means. #' #' Args: #' gene_data: Dataframe in long format. #' gene_col: Column with genes. #' value_col: Column with values to test. #' group_col: Column with sample groups. There must be exactly two different groups. #' test: Statistical test to perform. It must have the same interface as t.test() #' #' Returns: #' tibble object test_gene_table <- function(gene_data, gene_col, value_col, group_col, test = t.test) { genes <- unique(as.matrix(gene_data)[,gene_col]) htest_list <- lapply(genes, function(x) test_gene(x, gene_data, gene_col, value_col, group_col, test, verbose = FALSE)) p_val <- sapply(1:length(htest_list), function(x) htest_list[[x]]$p.value) df <- data.frame(gene = genes, p_value = p_val) df <- dplyr::mutate( df, significance = case_when( is.na(p_value) ~ NA_character_, p_value < 0.001 ~ "***", p_value < 0.01 ~ "**", p_value < 0.05 ~ "*", TRUE ~ "NS" ), test_res = htest_list ) return(as_tibble(df)) } #' Return asterisks according to p-values. #' #' Args: #' p_value: Vector of p-values. #' #' Returns: #' character vector asterisk <- function(p_value) { }
/project/age_library.R
no_license
cervenku/Hodking_lymphoma_gene_expression_analysis
R
false
false
17,056
r
#' Calculate hiearchical clustering and plot dendrogram. #' #' Args: #' m: Expression matrix (rows are features, columns are samples). #' method_distance: Distance metric used for clustering. See ?dist. #' Can be also correlation metrix ("pearson", "kendall", "spearman"). See ?cor. #' method_clustering: Clustering method. See ?hclust. #' color_by: Vector of discrete values to color samples by. #' Length must be the same as number of columns in 'm'. #' color_by_lab: Name of the color legend. #' title: Main plot title. #' #' Returns: #' dendrogram object plot_hc <- function( m, method_distance = "euclidean", method_clustering = "complete", color_by = NULL, color_by_lab = "Group", title = "Hierarchical Clustering" ) { m2 <- data.frame(t(m)) if(method_distance %in% c("pearson", "kendall", "spearman")){ d <- as.dist(1-cor(t(m2),method=method_distance)) } else{ d <- dist(m2, method = method_distance) } dend <- as.dendrogram(hclust(d, method_clustering)) cols <- unique(as.vector(color_by)) cols_vec <- rep(NA, length(as.vector(color_by))) j<-1 for (i in as.vector(color_by)) { cols_vec[j] <- match(i, cols) + 1 j <- j + 1 } labels_order <- order.dendrogram(dend) ordered_cols <- rep(NA, length(as.vector(color_by))) j<- 1 for (i in as.vector(labels_order)) { ordered_cols[j] <- cols_vec[i] j <- j + 1 } dend <- dend %>% set("labels_col", ordered_cols) plot(dend, main = title) legend( "topright", title = color_by_lab, legend = cols, pch = 19, col = 2:(length(cols)+1), bty = "n" ) } #' Calculate hiearchical clustering and plot dendrogram using the dendextend package. #' #' Args: #' m: Expression matrix (rows are features, columns are samples). #' method_distance: Distance metric used for clustering. See ?dist. #' Can be also correlation metrix ("pearson", "kendall", "spearman"). See ?cor. #' method_clustering: Clustering method. See ?hclust. #' color_by: Vector of discrete values to color samples by. #' Length must be the same as number of columns in 'm'. #' color_by_lab: Name of the color legend. #' title: Main plot title. #' #' Returns: #' dendrogram object plot_hc2 <- function( m, method_distance = "euclidean", method_clustering = "complete", color_by = NULL, color_by_lab = "Group", title = "Hierarchical Clustering" ) { } #' Select features (rows) with the highest variance across the samples (columns). #' This will be useful for visualization (mainly heatmaps) of large expression matrices. #' #' Args: #' m: Expression matrix (rows are features, columns are samples). #' n_top_features: Number of top features. #' #' Returns: #' Subset of 'm' with 'n_top_features' with the highest variance across the samples. select_var_features <- function(m, n_top_features) { variances <- apply(X=m, MARGIN=1, FUN=var) sorted <- sort(variances, decreasing=TRUE, index.return=TRUE)$ix[1:n_top_features] return(m[sorted, ]) } #' Using the ggplot2 package, plot the first PC or first to three PCs of samples in expression matrix. #' #' Args: #' m: Expression matrix (rows are features, columns are samples). #' sample_data: Dataframe describing samples. #' plot_type: "single" for PC1 vs. PC2, "multi" for combinations of PC1-3 and their cumulative explained variance. #' n_top_features: Number of top features with the highest variance across the samples. #' color_by: Column name in sample_data to use for point coloring. #' shape_by: Column noneame in sample_data to use for point shape. #' label_by: Column name in sample_data to use for point labels. #' point_size: Point size (numeric). #' text_size: Label text size (numeric). #' center: Whether to center PCA. See ?prcomp. #' scale.: Whether to scale PCA. See ?prcomp. #' #' Returns: #' list(pca = <prcomp object>, pca_df = <combined dataframe of sample_data and PCs>, plot = <ggplot2 or patchwork object (depends on plot_type)>) plot_pca <- function( m, sample_data, plot_type = c("single", "multi"), n_top_features = Inf, color_by = NULL, shape_by = NULL, label_by = NULL, point_size = 2, text_size = 2.5, center = TRUE, scale. = TRUE ) { if(!is.infinite(n_top_features)){ m <- select_var_features(m, n_top_features) } m_pca <- prcomp(as.data.frame(t(m)),scale = scale., center = center) df_pca <- as.data.frame(m_pca$x) df_combined <- cbind(sample_data, df_pca) var <- as.vector(summary(m_pca)$importance[2,]) var <- ceiling(var*100) if("single" %in% plot_type){ p <- ggplot(df_combined, aes(x = PC1, y = PC2)) + geom_point(aes_string(shape=shape_by, color=color_by), size=point_size) + xlab(paste("PC1 (", var[1], "%)", sep ="")) + ylab(paste("PC2 (", var[2], "%)", sep="")) if(!is.null(label_by)){ p <- p +geom_text(aes_string(label=label_by),hjust=0, vjust=0, size=text_size) } } else{ cum_var <- as.vector(summary(m_pca)$importance[3,]) df_cum <- data.frame(PC = 1:length(cum_var), Cum_var = cum_var) p1 <- ggplot(df_combined, aes(x = PC1, y = PC2)) + geom_point(aes_string(shape=shape_by, color=color_by), size=point_size) + xlab(paste("PC1 (", var[1], "%)", sep ="")) + ylab(paste("PC2 (", var[2], "%)", sep="")) p2 <- ggplot(df_combined, aes(x = PC1, y = PC3)) + geom_point(aes_string(shape=shape_by, color=color_by), size=point_size) + xlab(paste("PC1 (", var[1], "%)", sep ="")) + ylab(paste("PC3 (", var[3], "%)", sep="")) p3 <- ggplot(df_combined, aes(x = PC2, y = PC3)) + geom_point(aes_string(shape=shape_by, color=color_by), size=point_size) + xlab(paste("PC2 (", var[2], "%)", sep ="")) + ylab(paste("PC3 (", var[3], "%)", sep="")) p4 <- ggplot(data=df_cum, aes(x=PC, y=cum_var)) + geom_bar(stat="identity") + ylab("Cumulative % of var. explained") p <- ggarrange(p1, p2, p3, p4, ncol=2, nrow=2, common.legend = TRUE, legend="bottom") } res <- list("pca" = m_pca, "pca_df" = df_combined, "plot" = p) return(res) } #' Using the GGally::ggpairs() function, plot grid of PCA plots (PC1 vs. PC2, PC1 vs. PC3, PC2 vs. PC3, etc). #' When n_components == 2, use normal ggplot2. #' #' Args: #' m: Expression matrix (rows are features, columns are samples). #' sample_data: Dataframe describing samples. #' output_file: File to save plot in. #' n_components: Number of PCs to plot. #' n_top_features: Number of top features with the highest variance across the samples. #' color_by: Column name in sample_data to use for point coloring. #' color_legend_lab: Name of the color legend. #' shape_by: Column name in sample_data to use for point shape. #' shape_legend_lab: Name of the shape legend. #' label_by: Column name in sample_data to use for point labels. #' point_size: Point size (numeric). #' text_size: Label text size (numeric). #' title: Plot title. #' subtitle: Plot subtitle. #' center: Whether to center PCA. See ?prcomp. #' scale.: Whether to scale PCA. See ?prcomp. #' #' Returns: #' list(pca = <prcomp object>, pca_df = <combined dataframe of sample_data and PCs>, plot = <ggplot2 object>) plot_pca_ggpairs <- function( m, sample_data, n_components = 5, n_top_features = Inf, color_by = NULL, color_legend_lab = NULL, shape_by = NULL, shape_legend_lab = NULL, label_by = NULL, point_size = 2, text_size = 2.5, title = NULL, subtitle = NULL, center = TRUE, scale. = TRUE ) { m_pca <- prcomp(as.data.frame(t(m)),scale = scale., center = center) df_pca <- as.data.frame(m_pca$x) df_combined <- cbind(df_pca, sample_data) if(n_components == 2){ p <- plot_pca(m,sample_data, plot_type = "single", n_top_features = n_top_features, color_by = color_by, shape_by = shape_by, label_by = label_by, point_size = point_size, text_size = text_size, center = center, scale. = scale )$plot } else{ p <- GGally::ggpairs(df_combined, columns = 1:n_components, ggplot2::aes_string(colour=color_by, shape=shape_by), upper = list(continuous = "points", discrete = "points", na = "points"), diag=list(continuous = "blankDiag", discrete = "blankDiag", na = "blankDiag"), legend = 3, progress=FALSE) } return(list(pca = m_pca, pca_df = df_combined, plot = p + labs(title = title, subtitle = subtitle))) } #' Plot heatmap using the ComplexHeatmap package. #' #' Args: #' m: Expression matrix (rows are features, columns are samples). #' z_score: If TRUE, calculate row z-score. #' column_annotation: Dataframe used for annotation of columns. #' row_annotation: Dataframe used for annotation of rows. #' title: Heatmap title. #' legend_title: Heatmap color legend title. #' show_row_names: If TRUE, show rownames in the heatmap. #' show_col_names: If TRUE, show colnames in the heatmap. #' color_palette: Function to generate colors for annotations. #' color_mapping: Named list of named vectors to map colors to variable levels. #' See https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#simple-annotation #' #' Returns: #' ComplexHeatmap object plot_heatmap <- function( m, n_top_features = Inf, z_score = FALSE, column_annotation = NULL, row_annotation = NULL, title = "", legend_title = "Values", show_row_names = TRUE, show_column_names = TRUE, cluster_rows = TRUE, cluster_columns = TRUE, color_palette = scales::hue_pal(), color_mapping = NULL ) { if(!is.infinite(n_top_features)){ m <- select_var_features(m, n_top_features) } #if(z_score){ # m <- scale(as.matrix(m)) #} if(!(is.null(row_annotation))){ m <- as.data.frame(m) m$rowNames <- row.names(m) m <- as.data.frame(m) %>% dplyr::filter(., rowNames %in% row.names(as.data.frame(row_annotation))) m <- subset(m, select = -rowNames) p_heatmap <- Heatmap( as.matrix(m), name = legend_title, column_title = title, show_row_names = show_row_names, show_column_names = show_column_names, cluster_rows = cluster_rows, cluster_columns = cluster_columns, top_annotation = HeatmapAnnotation(df = as.data.frame(column_annotation)), right_annotation = rowAnnotation(df = as.data.frame(row_annotation)) ) } else{ p_heatmap <- Heatmap( as.matrix(m), name = legend_title, column_title = title, show_row_names = show_row_names, show_column_names = show_column_names, cluster_rows = cluster_rows, cluster_columns = cluster_columns, top_annotation = HeatmapAnnotation(df = column_annotation) ) } return(p_heatmap) } #' Create a heatmap using the heatmaply package. #' #' Args: #' m: Expression matrix (rows are features, columns are samples). #' z_score: If TRUE, calculate row z-score. #' column_annotation: Dataframe used for annotation of columns. #' row_annotation: Dataframe used for annotation of rows. #' title: Heatmap title. #' legend_title: Heatmap color legend title. #' #' Returns: #' plotly object plot_heatmaply <- function( m, z_score = FALSE, column_annotation = NULL, row_annotation = NULL, main = NULL, legend_title = NULL, showticklabels = c(TRUE, TRUE) ) { m <- as.data.frame(m) m$rowNames <- row.names(m) m <- as.data.frame(m) %>% dplyr::filter(., rowNames %in% row.names(as.data.frame(row_annotation))) m <- subset(m, select = -rowNames) m <- m %>% dplyr::select(., row.names(row_annotation)) if(is.null(column_annotation)){ p <- heatmaply_cor( m, main = main, showticklabels = showticklabels, key.title = legend_title, row_side_colors = row_annotation ) } else{ p <- heatmaply_cor( m, main = main, showticklabels = showticklabels, key.title = legend_title, row_side_colors = row_annotation, col_side_colors = column_annotation ) } return(p) } #' Using the ggpubr::ggboxplot() function, plot boxplots of gene expression. #' #' Args: #' plot_data: data.frame (long format) #' x: Column to divide x-axis values to (e.g. sample groups). #' y: Column to compute boxplots on y-axis. #' facet_by: One or two columns used for facetting. #' feature: Name of a feature from which boxplots will be made. #' Data will be filtered based on facet_by. #' E.g. if facet_by = "gene" and feature = "CD24", only boxplots for "CD24" will be made. #' color_by: Column to use for boxplot and point coloring. #' x_lab: Name of x-axe. #' y_lab: Name of y-axe. #' main: Main plot title. #' add: Add something more to boxplots. #' Allowed values are one or the combination of: #' "none", "dotplot", "jitter", "boxplot", "point", "mean", #' "mean_se", "mean_sd", "mean_ci", "mean_range", "median", #' "median_iqr", "median_mad", "median_range". #' See ?ggpubr::ggboxplot #' point_size: Size of points inside boxplots. #' outlier_shape: Which point shape to use for outliers. #' do_t_test: Whether to do the t-test and display p-values inside the plot. #' #' Returns: #' ggplot2 object plot_boxplots <- function( plot_data, x, y, facet_by, feature = NULL, color_by = NULL, x_lab = x, y_lab = y, main = NULL, add = "jitter", point_size = 2, outlier_shape = 0, do_t_test = TRUE ) { if(!is.null(feature)){ f <- feature plot_data <- plot_data %>% dplyr::filter(!!sym(facet_by) == f) } p<-ggplot(plot_data, aes_string(x=x, y=y, color=color_by)) + geom_boxplot(outlier.shape = outlier_shape) + geom_jitter(aes(size = point_size)) + facet_wrap(reformulate(facet_by,"."), nrow = 2, ncol = 2) + xlab(x_lab) + ylab(y_lab) + ggtitle(main) + theme(legend.position="top") if(do_t_test){ p <- p + stat_compare_means(method = "t.test") } return(p) } #' Compute the M value of CP values. #' #' Args: #' gene: Name of gene to compute the M value for. #' cp: Matrix or dataframe of CP values. Rows are genes and columns are samples. #' #' Returns: #' M value (numeric) compute_m <- function(gene, cp) { # Ajk = {(aji/aki)} # Vjk = st.dev(Ajk) # Mj = sum(Vjk)/(n-1) others <- rownames(cp)[!(rownames(cp) == gene)] a_jk <- rep(NA, ncol(cp)) v_j <- rep(NA, length(others)) v_index <- 1 for(k in others){ in_A <- cp[c(gene, k),] index <- 1 for(i in colnames(in_A)){ a_jk[index] <- (in_A[gene, i]) - (in_A[k, i]) index <- index + 1 } v_j[v_index] <- sd(a_jk) v_index <- v_index + 1 } m_j <- sum(v_j)/length(others) return(m_j) } #' For a single gene, test for statistical significance of difference in group means. #' #' Args: #' gene: Name of gene to test. #' gene_data: Dataframe in long format. #' gene_col: Column with genes. #' value_col: Column with values to test. #' group_col: Column with sample groups. There must be exactly two different groups. #' test: Statistical test to perform. It must have the same interface as t.test() #' verbose: Whether to print test results. #' #' Returns: #' htest object test_gene <- function(gene, gene_data, gene_col, value_col, group_col, test = t.test, verbose = TRUE) { g <- gene gene_df <- gene_data %>% dplyr::filter(!!sym(gene_col) == g) group_col value_col exp1 <- expr(!!ensym(value_col) ~ !!ensym(group_col)) test_res <- test(formula = eval(exp1), data = gene_df) if(verbose){ print(test_res) } return(test_res) } #' For all genes in the input dataframe, test for statistical significance of difference in group means. #' #' Args: #' gene_data: Dataframe in long format. #' gene_col: Column with genes. #' value_col: Column with values to test. #' group_col: Column with sample groups. There must be exactly two different groups. #' test: Statistical test to perform. It must have the same interface as t.test() #' #' Returns: #' tibble object test_gene_table <- function(gene_data, gene_col, value_col, group_col, test = t.test) { genes <- unique(as.matrix(gene_data)[,gene_col]) htest_list <- lapply(genes, function(x) test_gene(x, gene_data, gene_col, value_col, group_col, test, verbose = FALSE)) p_val <- sapply(1:length(htest_list), function(x) htest_list[[x]]$p.value) df <- data.frame(gene = genes, p_value = p_val) df <- dplyr::mutate( df, significance = case_when( is.na(p_value) ~ NA_character_, p_value < 0.001 ~ "***", p_value < 0.01 ~ "**", p_value < 0.05 ~ "*", TRUE ~ "NS" ), test_res = htest_list ) return(as_tibble(df)) } #' Return asterisks according to p-values. #' #' Args: #' p_value: Vector of p-values. #' #' Returns: #' character vector asterisk <- function(p_value) { }
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 9214 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 9214 c c Input Parameter (command line, file): c input filename QBFLIB/Amendola-Ricca-Truszczynski/wgrowing/ctrl.e#1.a#3.E#128.A#48.c#.w#3.s#9.asp.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 3247 c no.of clauses 9214 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 9214 c c QBFLIB/Amendola-Ricca-Truszczynski/wgrowing/ctrl.e#1.a#3.E#128.A#48.c#.w#3.s#9.asp.qdimacs 3247 9214 E1 [] 0 128 3119 9214 NONE
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Amendola-Ricca-Truszczynski/wgrowing/ctrl.e#1.a#3.E#128.A#48.c#.w#3.s#9.asp/ctrl.e#1.a#3.E#128.A#48.c#.w#3.s#9.asp.R
no_license
arey0pushpa/dcnf-autarky
R
false
false
712
r
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 9214 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 9214 c c Input Parameter (command line, file): c input filename QBFLIB/Amendola-Ricca-Truszczynski/wgrowing/ctrl.e#1.a#3.E#128.A#48.c#.w#3.s#9.asp.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 3247 c no.of clauses 9214 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 9214 c c QBFLIB/Amendola-Ricca-Truszczynski/wgrowing/ctrl.e#1.a#3.E#128.A#48.c#.w#3.s#9.asp.qdimacs 3247 9214 E1 [] 0 128 3119 9214 NONE
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/counting.R \name{count_references} \alias{count_references} \title{Count alignment hits to a reference database.} \usage{ count_references(object, ...) } \arguments{ \item{object}{An experiment data table as returned by any alignment method like \code{\link{align_short_reads}} or \code{\link{align_long_reads}}.} \item{...}{A configuration as generated by \code{\link{config_count}}.} } \value{ A list containing the used alignments and the transcript counts in `counts`. } \description{ This will correct for effective reference lengths as done by almost any good tool those days. So the returned counts are not correlated with feature lengths. By default an expectation maximization algorithm is used to resolve multiple mappings of one read to many references which pretty much always happens in metagenomics data sets. The optimized likelihood function is very similar to the one in kallisto (https://doi.org/10.1038/nbt.3519). } \details{ Note that for the EM method there will be a NA reference reported which corresponds to the approximate abundance of references not contained in the database. }
/man/count_references.Rd
permissive
Gibbons-Lab/mbtools
R
false
true
1,185
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/counting.R \name{count_references} \alias{count_references} \title{Count alignment hits to a reference database.} \usage{ count_references(object, ...) } \arguments{ \item{object}{An experiment data table as returned by any alignment method like \code{\link{align_short_reads}} or \code{\link{align_long_reads}}.} \item{...}{A configuration as generated by \code{\link{config_count}}.} } \value{ A list containing the used alignments and the transcript counts in `counts`. } \description{ This will correct for effective reference lengths as done by almost any good tool those days. So the returned counts are not correlated with feature lengths. By default an expectation maximization algorithm is used to resolve multiple mappings of one read to many references which pretty much always happens in metagenomics data sets. The optimized likelihood function is very similar to the one in kallisto (https://doi.org/10.1038/nbt.3519). } \details{ Note that for the EM method there will be a NA reference reported which corresponds to the approximate abundance of references not contained in the database. }
############################################################################### ## Exemplo do capítulo sobre EVT de medidas condicionais de risco utilizando ## o método processo pontual auto-excitavel. ## Dados da GS entre 05/05/1999 a 04/10/2016 ############################################################################### library(xts) library(PerformanceAnalytics) library(xtable) library(PtProcess) library(fExtremes) library(gPdtest) library(plotly) library(boot) setwd("C:\\Users\\Rafael\\Documents\\UDESC\\TCC\\TCC Latex svn\\TCC-R-codes") prices <- as.xts(read.zoo(read.csv("evt-exemplo GS.csv"), format = "%Y-%m-%d", FUN = as.Date)) returns <- na.omit(Return.calculate(prices, method = "log")) losses <- -1*returns losses <- losses[,"Adj.Close"] names(losses) <- "losses" ############################################################################################################# ## Pode-se usar os pacotes PtProcess (mais geral) ou SAPP (função etasap) ## Utilizando o pacote PtProcess ## Definindo o modelo de processo pontual marcado - mpp ## Nosso modelo sera um ETAS e a distribuição das marcas uma distribuicao de Pareto com qsi > 0 ############################################################################################################# # Quero um DF com tempos e valores de perdas acima de u=1.5% u <- quantile(losses, 0.9) Lu <- losses[which(losses[,"losses"]>u),"losses"] Tj <- as.numeric(index(Lu)-index(losses[1,])) Lj <- coredata(Lu) # Eh o valor original da perda, dado que ela seja maior que u Luj <- Lj - u # Eh a perda em excesso, que sera a entrada para as densidades lambdag e dgP_mark Datadf <- data.frame(Luj, time = Tj/365) # tem que ajustar o nome da coluna de Luj colnames(Datadf)[1] <- "magnitude" #Ht <- data.frame(time=as.numeric(index(losses)-index(losses[1,])), magnitude=coredata(losses[,"Adj.Close"]))# tem que ajustar o nome da coluna #colnames(Ht)[2] <- "magnitude" #evalpts <- Ht[,"time"] # valores onde sera avaliada a funcao intensidade plot(Datadf$time, Datadf$magnitude, type="h") ############################################################################################################# ## Vamos primeiro estimar os parametros da GPD gpd <- gpdFit(as.timeSeries(losses[,"losses"]), u=u, type = "mle") xi <- gpd@fit$par.ests[1] beta <- gpd@fit$par.ests[2] ############################################################################################################# ## Agora que ja temos o DF de dados e os pontos de avaliacao, temos de especificar o modelo MPP ## A funcao intensidade de base (lambdag) sabemos que será uma ETAS ## A densidade das marcas eh uma GPD com parametros qsi e beta ## Precisamos especificar as funcoes densidade e simulacao das marcas, dgP_mark e rgP_mark ## o vetor de parametros contem TODOS os parametros das duas densidades, sendo os primeiros ## pertencentes ao modelo ETAS (5), apos vem os parametros da GPD (2), Logo ## params = mu, A, alpha, c, p, xi, beta ############################################################################################################# dgP_mark <- function(x, data, params){ g <- (1/beta)*(1+((xi*x[,"magnitude"])/beta))^(-1-(1/xi)) return(log(g)) } rgP_mark <- function(ti, data, params){ y <- rgp(1, beta, xi) return(list(magnitude = y)) } ############################################################################################################# ## Mapeamento dos parametros gmap <- expression(params[1:5]) #mmap <- expression(params) ## Mapeamento para o otimizador nlmmap <- function(y, p){ y$params <- p return(y) } expmap <- function(y, p){ y$params <- exp(p) return(y) } ## O objeto MPP propriamente dito Dados <- Datadf parameters <- c(0.05, 3.1, 1.5, 0.1, 1.1) # Valores iniciais dos parametros para lambdag TT <- c(Dados$time[1], Dados$time[nrow(Dados)]) sepp <- mpp(data = Dados, gif = etas_gif, marks = list(dgP_mark, rgP_mark), params = parameters, gmap = gmap, mmap = NULL, TT = TT) ## Como os parametros do modelo devem ser todos positivos, podemos tirar o log deles e passar ## para o otimizador para que este trabalhe com todos os reais initial <- log(parameters) ## A funcao negativa logaritimica de verossimilhanca negloglik deve ser minimizada # Utilizar dois passos. Primeiro optim e depois nlm sepphat <- optim(initial, neglogLik, gr = NULL, object = sepp, pmap = expmap, control = list(trace = 0, maxit = 100)) if(sepphat$convergence == 0){ # Então a convergencia foi bem sucedida seppmodel <- expmap(sepp, sepphat$par) }else{ # Agora pode-se usar os parametros estimados para serem valores iniciais de uma nova otimizacao initial <- sepphat$par sepphat <- nlm(neglogLik, initial, object = sepp, pmap = expmap, hessian = TRUE, print.level = 0, iterlim = 500, typsize = initial) seppmodel <- expmap(sepp, sepphat$estimate) } theta <- with(seppmodel, c(params[1]/365, params[2]/365, params[3], params[4], params[5], xi, beta)) names(theta) <- c("tau", "psi", "delta", "gamma", "rho", "xi", "beta") ## Estimando os erros padroes dos parametros de lambdag H <- sepphat$hessian CovM <- solve(H) # Matriz de covariancias eh a inversa da Hessiana # SE e^params = sqrt(se^2*e^2*params) setheta <- sqrt(exp(2*sepphat$estimate)*diag(CovM)) setheta <- c(setheta[1]/365, setheta[2]/365, setheta[3:5]) ## Dado a convergencia do modelo podemos plotar a funcao intensidade de base pts <- seq(0, as.numeric(index(losses[nrow(losses),])-index(losses[1,]))/365, length.out = nrow(losses)) lambdag <- etas_gif(Dados, evalpts = pts, params = seppmodel$params)/365 exc <- (losses[,"losses"]-u)-((losses[,"losses"]-u)<0)*(losses[,"losses"]-u) ############################################################################################################ ## Calculo de VaR e ES com base em lambdag VaRESpp <- function(u, beta, xi, alpha=0.99, lambda){ VaRpp <- u+(beta/xi)*(((1-alpha)/lambda)^(-xi)-1) ESpp <- ((VaRpp)/(1-xi))+((beta-xi*u)/(1-xi)) return(cbind(VaRpp, ESpp)) } vares <- VaRESpp(u, beta, xi, alpha=0.99, lambdag) ## Calculo de VaR e ES não condicionais, com base na GPD apenas urisk <- gpdRiskMeasures(gpd, prob = 0.99) ## Serie xts com dados das perdas, perdas em excesso e lambdag ts <- cbind(losses, pts, exc, lambdag, vares) names(ts) <- c("losses", "indextimes", "excess", "intensity", "VaR", "ES") op <- par(mfrow=c(2,1)) plot(ts[,"intensity"], xlab = "", ylab = "Intensidade", main = "Processo Pontual de Auto-Excitação", major.format="%m/%Y") plot(ts[,"excess"], type="h", xlab = "Data", ylab = "Perdas em excesso", main = "", major.format="%m/%Y") par(op) ## Grafico do VaR e ES sobre as perdas efetivas plot(ts[-1,"losses"], type="h", xlab = "Data", ylab = "Perdas", main = "Medidas Condicionais de Risco", ylim = c(0, 0.22), major.format="%m/%Y") # A primeira perda nao tem VaR para comparar lines(ts[-length(ts[,"VaR"]),"VaR"], col="red", lwd = 2) # O ultimo VaR so sera comparado com a proxima perda que ainda nao ocorreu lines(ts[-length(ts[,"ES"]),"ES"], col="blue", lwd = 2) # O ultimo ES so sera comparado com a proxima perda que ainda nao ocorreu abline(h=urisk[c(2,3)], col=c("red", "blue"), lty = c(2,4), lwd = 2) legend("topright", legend = c("VaR incondicional", "ES incondicional"), col = c("red", "blue"), lty = c(2, 4), lwd = 2, cex = 1.2) ## Analise da adequacao do fit plot(residuals(seppmodel), xlab = "Evento Número", ylab = "Tempo Transformado", pty = "s") abline(0, 1)
/evt-exemplo GS2.R
no_license
rfbressan/tcc-r
R
false
false
7,587
r
############################################################################### ## Exemplo do capítulo sobre EVT de medidas condicionais de risco utilizando ## o método processo pontual auto-excitavel. ## Dados da GS entre 05/05/1999 a 04/10/2016 ############################################################################### library(xts) library(PerformanceAnalytics) library(xtable) library(PtProcess) library(fExtremes) library(gPdtest) library(plotly) library(boot) setwd("C:\\Users\\Rafael\\Documents\\UDESC\\TCC\\TCC Latex svn\\TCC-R-codes") prices <- as.xts(read.zoo(read.csv("evt-exemplo GS.csv"), format = "%Y-%m-%d", FUN = as.Date)) returns <- na.omit(Return.calculate(prices, method = "log")) losses <- -1*returns losses <- losses[,"Adj.Close"] names(losses) <- "losses" ############################################################################################################# ## Pode-se usar os pacotes PtProcess (mais geral) ou SAPP (função etasap) ## Utilizando o pacote PtProcess ## Definindo o modelo de processo pontual marcado - mpp ## Nosso modelo sera um ETAS e a distribuição das marcas uma distribuicao de Pareto com qsi > 0 ############################################################################################################# # Quero um DF com tempos e valores de perdas acima de u=1.5% u <- quantile(losses, 0.9) Lu <- losses[which(losses[,"losses"]>u),"losses"] Tj <- as.numeric(index(Lu)-index(losses[1,])) Lj <- coredata(Lu) # Eh o valor original da perda, dado que ela seja maior que u Luj <- Lj - u # Eh a perda em excesso, que sera a entrada para as densidades lambdag e dgP_mark Datadf <- data.frame(Luj, time = Tj/365) # tem que ajustar o nome da coluna de Luj colnames(Datadf)[1] <- "magnitude" #Ht <- data.frame(time=as.numeric(index(losses)-index(losses[1,])), magnitude=coredata(losses[,"Adj.Close"]))# tem que ajustar o nome da coluna #colnames(Ht)[2] <- "magnitude" #evalpts <- Ht[,"time"] # valores onde sera avaliada a funcao intensidade plot(Datadf$time, Datadf$magnitude, type="h") ############################################################################################################# ## Vamos primeiro estimar os parametros da GPD gpd <- gpdFit(as.timeSeries(losses[,"losses"]), u=u, type = "mle") xi <- gpd@fit$par.ests[1] beta <- gpd@fit$par.ests[2] ############################################################################################################# ## Agora que ja temos o DF de dados e os pontos de avaliacao, temos de especificar o modelo MPP ## A funcao intensidade de base (lambdag) sabemos que será uma ETAS ## A densidade das marcas eh uma GPD com parametros qsi e beta ## Precisamos especificar as funcoes densidade e simulacao das marcas, dgP_mark e rgP_mark ## o vetor de parametros contem TODOS os parametros das duas densidades, sendo os primeiros ## pertencentes ao modelo ETAS (5), apos vem os parametros da GPD (2), Logo ## params = mu, A, alpha, c, p, xi, beta ############################################################################################################# dgP_mark <- function(x, data, params){ g <- (1/beta)*(1+((xi*x[,"magnitude"])/beta))^(-1-(1/xi)) return(log(g)) } rgP_mark <- function(ti, data, params){ y <- rgp(1, beta, xi) return(list(magnitude = y)) } ############################################################################################################# ## Mapeamento dos parametros gmap <- expression(params[1:5]) #mmap <- expression(params) ## Mapeamento para o otimizador nlmmap <- function(y, p){ y$params <- p return(y) } expmap <- function(y, p){ y$params <- exp(p) return(y) } ## O objeto MPP propriamente dito Dados <- Datadf parameters <- c(0.05, 3.1, 1.5, 0.1, 1.1) # Valores iniciais dos parametros para lambdag TT <- c(Dados$time[1], Dados$time[nrow(Dados)]) sepp <- mpp(data = Dados, gif = etas_gif, marks = list(dgP_mark, rgP_mark), params = parameters, gmap = gmap, mmap = NULL, TT = TT) ## Como os parametros do modelo devem ser todos positivos, podemos tirar o log deles e passar ## para o otimizador para que este trabalhe com todos os reais initial <- log(parameters) ## A funcao negativa logaritimica de verossimilhanca negloglik deve ser minimizada # Utilizar dois passos. Primeiro optim e depois nlm sepphat <- optim(initial, neglogLik, gr = NULL, object = sepp, pmap = expmap, control = list(trace = 0, maxit = 100)) if(sepphat$convergence == 0){ # Então a convergencia foi bem sucedida seppmodel <- expmap(sepp, sepphat$par) }else{ # Agora pode-se usar os parametros estimados para serem valores iniciais de uma nova otimizacao initial <- sepphat$par sepphat <- nlm(neglogLik, initial, object = sepp, pmap = expmap, hessian = TRUE, print.level = 0, iterlim = 500, typsize = initial) seppmodel <- expmap(sepp, sepphat$estimate) } theta <- with(seppmodel, c(params[1]/365, params[2]/365, params[3], params[4], params[5], xi, beta)) names(theta) <- c("tau", "psi", "delta", "gamma", "rho", "xi", "beta") ## Estimando os erros padroes dos parametros de lambdag H <- sepphat$hessian CovM <- solve(H) # Matriz de covariancias eh a inversa da Hessiana # SE e^params = sqrt(se^2*e^2*params) setheta <- sqrt(exp(2*sepphat$estimate)*diag(CovM)) setheta <- c(setheta[1]/365, setheta[2]/365, setheta[3:5]) ## Dado a convergencia do modelo podemos plotar a funcao intensidade de base pts <- seq(0, as.numeric(index(losses[nrow(losses),])-index(losses[1,]))/365, length.out = nrow(losses)) lambdag <- etas_gif(Dados, evalpts = pts, params = seppmodel$params)/365 exc <- (losses[,"losses"]-u)-((losses[,"losses"]-u)<0)*(losses[,"losses"]-u) ############################################################################################################ ## Calculo de VaR e ES com base em lambdag VaRESpp <- function(u, beta, xi, alpha=0.99, lambda){ VaRpp <- u+(beta/xi)*(((1-alpha)/lambda)^(-xi)-1) ESpp <- ((VaRpp)/(1-xi))+((beta-xi*u)/(1-xi)) return(cbind(VaRpp, ESpp)) } vares <- VaRESpp(u, beta, xi, alpha=0.99, lambdag) ## Calculo de VaR e ES não condicionais, com base na GPD apenas urisk <- gpdRiskMeasures(gpd, prob = 0.99) ## Serie xts com dados das perdas, perdas em excesso e lambdag ts <- cbind(losses, pts, exc, lambdag, vares) names(ts) <- c("losses", "indextimes", "excess", "intensity", "VaR", "ES") op <- par(mfrow=c(2,1)) plot(ts[,"intensity"], xlab = "", ylab = "Intensidade", main = "Processo Pontual de Auto-Excitação", major.format="%m/%Y") plot(ts[,"excess"], type="h", xlab = "Data", ylab = "Perdas em excesso", main = "", major.format="%m/%Y") par(op) ## Grafico do VaR e ES sobre as perdas efetivas plot(ts[-1,"losses"], type="h", xlab = "Data", ylab = "Perdas", main = "Medidas Condicionais de Risco", ylim = c(0, 0.22), major.format="%m/%Y") # A primeira perda nao tem VaR para comparar lines(ts[-length(ts[,"VaR"]),"VaR"], col="red", lwd = 2) # O ultimo VaR so sera comparado com a proxima perda que ainda nao ocorreu lines(ts[-length(ts[,"ES"]),"ES"], col="blue", lwd = 2) # O ultimo ES so sera comparado com a proxima perda que ainda nao ocorreu abline(h=urisk[c(2,3)], col=c("red", "blue"), lty = c(2,4), lwd = 2) legend("topright", legend = c("VaR incondicional", "ES incondicional"), col = c("red", "blue"), lty = c(2, 4), lwd = 2, cex = 1.2) ## Analise da adequacao do fit plot(residuals(seppmodel), xlab = "Evento Número", ylab = "Tempo Transformado", pty = "s") abline(0, 1)
#' @title Draw an XY plot comparing two series of p-values (or corrected p-values, or e-values). #' #' @author Jacques van Helden (\email{Jacques.van-Helden@@univ-amu.fr}) #' #' @description Draw an XY plot comparing two series of #' p-values (or corrected p-values, or e-values). #' #' @details #' First version: 2015-03 #' Last modification: 2015-03 #' #' @param frame.to.plot A data.frame comprizing two columns, with the respective p-values (corrected or not) to plot. #' @param xlab=names(frame.to.plot)[1] Passed to plot() #' @param ylab=names(frame.to.plot)[2] Passed to plot() #' @param alpha=0.05 Alpha threshold to select positives and compute the contingency table. #' @param score.name="p-value" Score name to display on the legend (e.g. "p-value", "e-value", "fdr"). #' @param plot.result=TRUE Draw a comparison plot #' @param plot.colors=TRUE Use colors for the plot #' @param plot.cex=0.7 Point size for the plot (passed to graphics::plot() function). #' @param legend.cex=0.9 Font size factor for legend. This allows to choose separate font sizes to draw the plot itself and the legend. #' @param legend.corner="bottomright" Position where legend should be placed. #' @param ... Additional parameters are passed to plot() #' #' @examples #' ################################################################ #' ## Load dataset from DenBoer, 2009 and define two groups of interest #' library(denboer2009) #' group1 <- "Bo" #' group2 <- "Bt" #' #' verbose(paste("Selecting samples for groups", group1, "and", group2), 1) #' selected.groups <- c(group1, group2) #' selected.samples <- denboer2009.pheno$sample.labels %in% selected.groups #' #' ## Selected expression profiles #' selected.probesets <- apply(denboer2009.amp == "P", 1, sum) >= 30 #' expr <- denboer2009.expr[selected.probesets, selected.samples] #' sample.labels <- denboer2009.pheno$sample.labels[selected.samples] #' verbose(paste("Expression profiles: ", nrow(expr), "probesets x", ncol(expr), "samples"), 1) #' table(sample.labels) #' #' ################################################################ #' ## Compare p-values returned by Student and Welch t-tests #' student.result <- tTestPerRow(x=expr,cl = sample.labels, var.equal = TRUE) #' welch.result <- tTestPerRow(x=expr,cl = sample.labels, var.equal = FALSE) #' plotPvalCompa(data.frame("Student.e.value"=student.result$table$e.value, #' "Welch.e.value"=welch.result$table$e.value), #' score.name = "e-value", #' legend.corner="bottomright") #' #' @export plotPvalCompa <- function (frame.to.plot, xlab=names(frame.to.plot)[1], ylab=names(frame.to.plot)[2], alpha=0.05, score.name="p-value", plot.result=TRUE, plot.colors=TRUE, plot.cex=0.7, legend.cex = 0.9, legend.corner="bottomright", ...) { result <- list() ## Omit NA values from the input table frame.to.plot <- na.omit(frame.to.plot) score1 <- frame.to.plot[,1] score2 <- frame.to.plot[,2] result$score.table <- frame.to.plot ## Count the number of elements below the threshold in each col result$score.table$score1.signif <- score1 <= alpha result$score.table$score2.signif <- score2 <= alpha result$score.table$score2.or.score1.signif <- result$score.table$score1.signif | result$score.table$score2.signif result$score.table$score2.and.score1.signif <- result$score.table$score1.signif & result$score.table$score2.signif ## Count number of significant rows for each test result$nb.signif.score1 <- sum(result$score.table$score1.signif) result$nb.signif.score2 <- sum(result$score.table$score2.signif) result$nb.signif.both <- sum((result$score.table$score1.signif) & (result$score.table$score2.signif)) result$nb.signif.any <- sum((result$score.table$score1.signif) | (result$score.table$score2.signif)) result$nb.signif.none <- sum((!result$score.table$score1.signif) & (!result$score.table$score2.signif)) result$jaccard <- result$nb.signif.both / result$nb.signif.any result$contingency <- table(result$score.table$score1.signif, result$score.table$score2.signif) ################################################################ ## Comparison plot if (plot.result) { ## Define density colors to highlight places where many points overlap density.cols <- densCols(-log10(frame.to.plot), nbin=256, col=colorRampPalette(c("#BBBBBB", "#000000"))) ## Add colors to highlight consistencies and inconsistencies between significant features if (plot.colors) { density.cols[result$score.table$score1.signif] <- "blue" density.cols[result$score.table$score2.signif] <- "#00BB00" density.cols[result$score.table$score1.signif & result$score.table$score2.signif] <- "red" } else { density.cols[result$score.table$score1.signif | result$score.table$score2.signif] <- "#888888" density.cols[result$score.table$score1.signif & result$score.table$score2.signif] <- "#444444" } ## Define point characters to highlight consistencies and inconsistencies compa.pch <- rep(1, nrow(frame.to.plot)) compa.pch[result$score.table$score1.signif] <- 3 compa.pch[result$score.table$score2.signif] <- 4 compa.pch[result$score.table$score1.signif & result$score.table$score2.signif] <- 8 ## Plot the comparison plot(frame.to.plot, xlab=xlab, ylab=ylab, panel.first=grid(), col=density.cols, log="xy", pch=compa.pch, cex=plot.cex, ...) abline(a=0, b=1) ## Plot the diagonal ## Mark the alpha thresholds if (plot.colors) { abline(v=alpha, lty="solid", col="blue", lwd=1) abline(h=alpha, lty="solid", col="#00BB00", lwd=1) } else { abline(v=alpha, lty="dashed", col="black", lwd=1) abline(h=alpha, lty="dashed", col="black", lwd=1) } # Add a legend if (plot.colors) { legend.col=c("white", "white", "blue", "#00BB00", "red", "black", "white", "white") } else { legend.col="black" } legend(legend.corner, c(paste("Total:", nrow(frame.to.plot)), paste(score.name, "<=", alpha), paste(xlab, ":", result$nb.signif.score1), paste(ylab, ":", result$nb.signif.score2), paste("both:", result$nb.signif.both), paste("none:", result$nb.signif.none), paste("Jaccard:", signif(digits=2, result$jaccard)) ), bty="o",bg="white", pch=c(-1,-1,3,4,8,1,-1), cex=legend.cex, col=legend.col) } }
/R/plotPvalCompa.R
no_license
jvanheld/stats4bioinfo
R
false
false
6,945
r
#' @title Draw an XY plot comparing two series of p-values (or corrected p-values, or e-values). #' #' @author Jacques van Helden (\email{Jacques.van-Helden@@univ-amu.fr}) #' #' @description Draw an XY plot comparing two series of #' p-values (or corrected p-values, or e-values). #' #' @details #' First version: 2015-03 #' Last modification: 2015-03 #' #' @param frame.to.plot A data.frame comprizing two columns, with the respective p-values (corrected or not) to plot. #' @param xlab=names(frame.to.plot)[1] Passed to plot() #' @param ylab=names(frame.to.plot)[2] Passed to plot() #' @param alpha=0.05 Alpha threshold to select positives and compute the contingency table. #' @param score.name="p-value" Score name to display on the legend (e.g. "p-value", "e-value", "fdr"). #' @param plot.result=TRUE Draw a comparison plot #' @param plot.colors=TRUE Use colors for the plot #' @param plot.cex=0.7 Point size for the plot (passed to graphics::plot() function). #' @param legend.cex=0.9 Font size factor for legend. This allows to choose separate font sizes to draw the plot itself and the legend. #' @param legend.corner="bottomright" Position where legend should be placed. #' @param ... Additional parameters are passed to plot() #' #' @examples #' ################################################################ #' ## Load dataset from DenBoer, 2009 and define two groups of interest #' library(denboer2009) #' group1 <- "Bo" #' group2 <- "Bt" #' #' verbose(paste("Selecting samples for groups", group1, "and", group2), 1) #' selected.groups <- c(group1, group2) #' selected.samples <- denboer2009.pheno$sample.labels %in% selected.groups #' #' ## Selected expression profiles #' selected.probesets <- apply(denboer2009.amp == "P", 1, sum) >= 30 #' expr <- denboer2009.expr[selected.probesets, selected.samples] #' sample.labels <- denboer2009.pheno$sample.labels[selected.samples] #' verbose(paste("Expression profiles: ", nrow(expr), "probesets x", ncol(expr), "samples"), 1) #' table(sample.labels) #' #' ################################################################ #' ## Compare p-values returned by Student and Welch t-tests #' student.result <- tTestPerRow(x=expr,cl = sample.labels, var.equal = TRUE) #' welch.result <- tTestPerRow(x=expr,cl = sample.labels, var.equal = FALSE) #' plotPvalCompa(data.frame("Student.e.value"=student.result$table$e.value, #' "Welch.e.value"=welch.result$table$e.value), #' score.name = "e-value", #' legend.corner="bottomright") #' #' @export plotPvalCompa <- function (frame.to.plot, xlab=names(frame.to.plot)[1], ylab=names(frame.to.plot)[2], alpha=0.05, score.name="p-value", plot.result=TRUE, plot.colors=TRUE, plot.cex=0.7, legend.cex = 0.9, legend.corner="bottomright", ...) { result <- list() ## Omit NA values from the input table frame.to.plot <- na.omit(frame.to.plot) score1 <- frame.to.plot[,1] score2 <- frame.to.plot[,2] result$score.table <- frame.to.plot ## Count the number of elements below the threshold in each col result$score.table$score1.signif <- score1 <= alpha result$score.table$score2.signif <- score2 <= alpha result$score.table$score2.or.score1.signif <- result$score.table$score1.signif | result$score.table$score2.signif result$score.table$score2.and.score1.signif <- result$score.table$score1.signif & result$score.table$score2.signif ## Count number of significant rows for each test result$nb.signif.score1 <- sum(result$score.table$score1.signif) result$nb.signif.score2 <- sum(result$score.table$score2.signif) result$nb.signif.both <- sum((result$score.table$score1.signif) & (result$score.table$score2.signif)) result$nb.signif.any <- sum((result$score.table$score1.signif) | (result$score.table$score2.signif)) result$nb.signif.none <- sum((!result$score.table$score1.signif) & (!result$score.table$score2.signif)) result$jaccard <- result$nb.signif.both / result$nb.signif.any result$contingency <- table(result$score.table$score1.signif, result$score.table$score2.signif) ################################################################ ## Comparison plot if (plot.result) { ## Define density colors to highlight places where many points overlap density.cols <- densCols(-log10(frame.to.plot), nbin=256, col=colorRampPalette(c("#BBBBBB", "#000000"))) ## Add colors to highlight consistencies and inconsistencies between significant features if (plot.colors) { density.cols[result$score.table$score1.signif] <- "blue" density.cols[result$score.table$score2.signif] <- "#00BB00" density.cols[result$score.table$score1.signif & result$score.table$score2.signif] <- "red" } else { density.cols[result$score.table$score1.signif | result$score.table$score2.signif] <- "#888888" density.cols[result$score.table$score1.signif & result$score.table$score2.signif] <- "#444444" } ## Define point characters to highlight consistencies and inconsistencies compa.pch <- rep(1, nrow(frame.to.plot)) compa.pch[result$score.table$score1.signif] <- 3 compa.pch[result$score.table$score2.signif] <- 4 compa.pch[result$score.table$score1.signif & result$score.table$score2.signif] <- 8 ## Plot the comparison plot(frame.to.plot, xlab=xlab, ylab=ylab, panel.first=grid(), col=density.cols, log="xy", pch=compa.pch, cex=plot.cex, ...) abline(a=0, b=1) ## Plot the diagonal ## Mark the alpha thresholds if (plot.colors) { abline(v=alpha, lty="solid", col="blue", lwd=1) abline(h=alpha, lty="solid", col="#00BB00", lwd=1) } else { abline(v=alpha, lty="dashed", col="black", lwd=1) abline(h=alpha, lty="dashed", col="black", lwd=1) } # Add a legend if (plot.colors) { legend.col=c("white", "white", "blue", "#00BB00", "red", "black", "white", "white") } else { legend.col="black" } legend(legend.corner, c(paste("Total:", nrow(frame.to.plot)), paste(score.name, "<=", alpha), paste(xlab, ":", result$nb.signif.score1), paste(ylab, ":", result$nb.signif.score2), paste("both:", result$nb.signif.both), paste("none:", result$nb.signif.none), paste("Jaccard:", signif(digits=2, result$jaccard)) ), bty="o",bg="white", pch=c(-1,-1,3,4,8,1,-1), cex=legend.cex, col=legend.col) } }
setwd("~/R/Course 4 (Exploratory Data Analysis)/C4W1/") library(data.table) if(!file.exists("household_power_consumption.txt")) { fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" download.file(fileURL, destfile = "./household_power_consumption.zip", method = "curl") unzip("./household_power_consumption.zip") } ## Load data into memory power <- read.csv2("household_power_consumption.txt", header = TRUE, comment.char = "", na.strings = "?", nrows = 2085259, stringsAsFactors = FALSE) dt <- power[power$Date %in% c("1/2/2007","2/2/2007"),] dt$datetime <- strptime(paste(dt$Date,dt$Time), "%d/%m/%Y %H:%M:%S") dt$Global_active_power <- as.numeric(dt$Global_active_power) ## First plot png(file="plot1.png",width=480,height=480) hist(dt$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red") dev.off()
/plot1.R
no_license
ujjawal2910/ExData_Plotting1
R
false
false
974
r
setwd("~/R/Course 4 (Exploratory Data Analysis)/C4W1/") library(data.table) if(!file.exists("household_power_consumption.txt")) { fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" download.file(fileURL, destfile = "./household_power_consumption.zip", method = "curl") unzip("./household_power_consumption.zip") } ## Load data into memory power <- read.csv2("household_power_consumption.txt", header = TRUE, comment.char = "", na.strings = "?", nrows = 2085259, stringsAsFactors = FALSE) dt <- power[power$Date %in% c("1/2/2007","2/2/2007"),] dt$datetime <- strptime(paste(dt$Date,dt$Time), "%d/%m/%Y %H:%M:%S") dt$Global_active_power <- as.numeric(dt$Global_active_power) ## First plot png(file="plot1.png",width=480,height=480) hist(dt$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red") dev.off()
# Startup message .onAttach <- function(libname, pkgname) { packageStartupMessage("\nTo cite electionsBR in publications, use: citation('electionsBR')") packageStartupMessage("To learn more, visit: http://electionsbr.com\n") } #' Returns a vector with the abbreviations of all Brazilian states #' #' @export uf_br <- function() { c("AC", "AL", "AM", "AP", "BA", "CE", "DF", "ES", "GO", "MA", "MG", "MS", "MT", "PA", "PB", "PE", "PI", "PR", "RJ", "RN", "RO", "RR", "RS", "SC", "SE", "SP", "TO") } #' Returns a vector with the abbreviations of all Brazilian parties #' #' The character vector includes only parties that ran in elections in 2016. #' #' @export parties_br <- function() { c("AVANTE", "CIDADANIA", "DC", "DEM", "MDB", "NOVO", "PATRIOTA", "PC do B", "PCB", "PCO", "PDT", "PEN", "PHS", "PMB", "PMN", "PODE", "PP", "PPL", "PPS", "PR", "PRB", "PROS", "PRP", "PRTB", "PSB", "PSC", "PSD", "PSDB", "PSDC", "PSL", "PSOL", "PSTU", "PT", "PT do B", "PTB", "PTC", "PTN", "PV", "REDE", "REPUBLICANOS", "SD", "SOLIEDARIEDADE", "UP") } # Reads and rbinds multiple data.frames in the same directory #' @import dplyr juntaDados <- function(uf, encoding, br_archive){ archive <- Sys.glob("*")[grepl(".pdf", Sys.glob("*")) == FALSE] %>% .[grepl(uf, .)] %>% file.info() %>% .[.$size > 200, ] %>% row.names() if(!br_archive){ archive <- archive[grepl("BR", archive) == FALSE] } else { archive <- archive[grepl("BR", archive) == TRUE] } if(grepl(".csv", archive[1])){ test_col_names <- TRUE }else{ test_col_names <- FALSE } lapply(archive, function(x) tryCatch( suppressWarnings(readr::read_delim(x, col_names = test_col_names, delim = ";", locale = readr::locale(encoding = encoding), col_types = readr::cols(), progress = F, escape_double = F)), error = function(e) NULL)) %>% data.table::rbindlist() %>% dplyr::as_tibble() } # Converts electoral data from Latin-1 to ASCII #' @import dplyr to_ascii <- function(banco, encoding){ if(encoding == "Latin-1") encoding <- "latin1" dplyr::mutate_if(banco, is.character, dplyr::funs(iconv(., from = encoding, to = "ASCII//TRANSLIT"))) } # Tests federal election year inputs test_fed_year <- function(year){ if(!is.numeric(year) | length(year) != 1 | !year %in% seq(1994, 2018, 4)) stop("Invalid input. Please, check the documentation and try again.") } # Tests federal election year inputs test_local_year <- function(year){ if(!is.numeric(year) | length(year) != 1 | !year %in% seq(1996, 2020, 4)) stop("Invalid input. Please, check the documentation and try again.") } # Test federal positions #test_fed_position <- function(position){ # position <- tolower(position) # if(!is.character(position) | length(position) != 1 | !position %in% c("presidente", # "governador", # "senador", # "deputado federal", # "deputado estadual", # "deputado distrital")) stop("Invalid input. Please, check the documentation and try again.") #} # Test federal positions #test_local_position <- function(position){ # position <- tolower(position) # if(!is.character(position) | length(position) != 1 | !position %in% c("prefeito", # "vereador")) stop("Invalid input. Please, check the documentation and try again.") #} # Converts electoral data from Latin-1 to ASCII test_encoding <- function(encoding){ if(encoding == "Latin-1") encoding <- "latin1" if(!encoding %in% tolower(iconvlist())) stop("Invalid encoding. Check iconvlist() to view a list with all valid encodings.") } # Test br types test_br <- function(br_archive){ if(!is.logical(br_archive)) message("'br_archive' must be logical (TRUE or FALSE).") } # Tests state acronyms test_uf <- function(uf) { uf <- gsub(" ", "", uf) %>% toupper() uf <- match.arg(uf, c("AC", "AL", "AM", "AP", "BA", "CE", "DF", "ES", "GO", "MA", "MG", "MS", "MT", "PA", "PB", "PE", "PI", "PR", "RJ", "RN", "RO", "RR", "RS", "SC", "SE", "SP", "TO", "ALL"), several.ok = T) if("ALL" %in% uf) return(".") else return(paste(uf, collapse = "|")) } # Replace position by cod position # replace_position_cod <- function(position){ # position <- tolower(position) # return(switch(position, "presidente" = 1, # "governador" = 3, # "senador" = 5, # "deputado federal" = 6, # "deputado estadual" = 7, # "deputado distrital" = 8, # "prefeito" = 11, # "vereador" = 13)) # } # Function to export data to .dta and .sav export_data <- function(df) { haven::write_dta(df, "electoral_data.dta") haven::write_sav(df, "electoral_data.sav") message(paste0("Electoral data files were saved on: ", getwd(), ".\n")) } # Data download download_unzip <- function(url, dados, filenames, year){ if(!file.exists(dados)){ sprintf(url, filenames) %>% download.file(dados) message("Processing the data...") unzip(dados, exdir = paste0("./", year)) } else{ message("Processing the data...") unzip(dados, exdir = paste0("./", year)) } } # Avoid the R CMD check note about magrittr's dot utils::globalVariables(".")
/R/utils.R
no_license
cran/electionsBR
R
false
false
6,065
r
# Startup message .onAttach <- function(libname, pkgname) { packageStartupMessage("\nTo cite electionsBR in publications, use: citation('electionsBR')") packageStartupMessage("To learn more, visit: http://electionsbr.com\n") } #' Returns a vector with the abbreviations of all Brazilian states #' #' @export uf_br <- function() { c("AC", "AL", "AM", "AP", "BA", "CE", "DF", "ES", "GO", "MA", "MG", "MS", "MT", "PA", "PB", "PE", "PI", "PR", "RJ", "RN", "RO", "RR", "RS", "SC", "SE", "SP", "TO") } #' Returns a vector with the abbreviations of all Brazilian parties #' #' The character vector includes only parties that ran in elections in 2016. #' #' @export parties_br <- function() { c("AVANTE", "CIDADANIA", "DC", "DEM", "MDB", "NOVO", "PATRIOTA", "PC do B", "PCB", "PCO", "PDT", "PEN", "PHS", "PMB", "PMN", "PODE", "PP", "PPL", "PPS", "PR", "PRB", "PROS", "PRP", "PRTB", "PSB", "PSC", "PSD", "PSDB", "PSDC", "PSL", "PSOL", "PSTU", "PT", "PT do B", "PTB", "PTC", "PTN", "PV", "REDE", "REPUBLICANOS", "SD", "SOLIEDARIEDADE", "UP") } # Reads and rbinds multiple data.frames in the same directory #' @import dplyr juntaDados <- function(uf, encoding, br_archive){ archive <- Sys.glob("*")[grepl(".pdf", Sys.glob("*")) == FALSE] %>% .[grepl(uf, .)] %>% file.info() %>% .[.$size > 200, ] %>% row.names() if(!br_archive){ archive <- archive[grepl("BR", archive) == FALSE] } else { archive <- archive[grepl("BR", archive) == TRUE] } if(grepl(".csv", archive[1])){ test_col_names <- TRUE }else{ test_col_names <- FALSE } lapply(archive, function(x) tryCatch( suppressWarnings(readr::read_delim(x, col_names = test_col_names, delim = ";", locale = readr::locale(encoding = encoding), col_types = readr::cols(), progress = F, escape_double = F)), error = function(e) NULL)) %>% data.table::rbindlist() %>% dplyr::as_tibble() } # Converts electoral data from Latin-1 to ASCII #' @import dplyr to_ascii <- function(banco, encoding){ if(encoding == "Latin-1") encoding <- "latin1" dplyr::mutate_if(banco, is.character, dplyr::funs(iconv(., from = encoding, to = "ASCII//TRANSLIT"))) } # Tests federal election year inputs test_fed_year <- function(year){ if(!is.numeric(year) | length(year) != 1 | !year %in% seq(1994, 2018, 4)) stop("Invalid input. Please, check the documentation and try again.") } # Tests federal election year inputs test_local_year <- function(year){ if(!is.numeric(year) | length(year) != 1 | !year %in% seq(1996, 2020, 4)) stop("Invalid input. Please, check the documentation and try again.") } # Test federal positions #test_fed_position <- function(position){ # position <- tolower(position) # if(!is.character(position) | length(position) != 1 | !position %in% c("presidente", # "governador", # "senador", # "deputado federal", # "deputado estadual", # "deputado distrital")) stop("Invalid input. Please, check the documentation and try again.") #} # Test federal positions #test_local_position <- function(position){ # position <- tolower(position) # if(!is.character(position) | length(position) != 1 | !position %in% c("prefeito", # "vereador")) stop("Invalid input. Please, check the documentation and try again.") #} # Converts electoral data from Latin-1 to ASCII test_encoding <- function(encoding){ if(encoding == "Latin-1") encoding <- "latin1" if(!encoding %in% tolower(iconvlist())) stop("Invalid encoding. Check iconvlist() to view a list with all valid encodings.") } # Test br types test_br <- function(br_archive){ if(!is.logical(br_archive)) message("'br_archive' must be logical (TRUE or FALSE).") } # Tests state acronyms test_uf <- function(uf) { uf <- gsub(" ", "", uf) %>% toupper() uf <- match.arg(uf, c("AC", "AL", "AM", "AP", "BA", "CE", "DF", "ES", "GO", "MA", "MG", "MS", "MT", "PA", "PB", "PE", "PI", "PR", "RJ", "RN", "RO", "RR", "RS", "SC", "SE", "SP", "TO", "ALL"), several.ok = T) if("ALL" %in% uf) return(".") else return(paste(uf, collapse = "|")) } # Replace position by cod position # replace_position_cod <- function(position){ # position <- tolower(position) # return(switch(position, "presidente" = 1, # "governador" = 3, # "senador" = 5, # "deputado federal" = 6, # "deputado estadual" = 7, # "deputado distrital" = 8, # "prefeito" = 11, # "vereador" = 13)) # } # Function to export data to .dta and .sav export_data <- function(df) { haven::write_dta(df, "electoral_data.dta") haven::write_sav(df, "electoral_data.sav") message(paste0("Electoral data files were saved on: ", getwd(), ".\n")) } # Data download download_unzip <- function(url, dados, filenames, year){ if(!file.exists(dados)){ sprintf(url, filenames) %>% download.file(dados) message("Processing the data...") unzip(dados, exdir = paste0("./", year)) } else{ message("Processing the data...") unzip(dados, exdir = paste0("./", year)) } } # Avoid the R CMD check note about magrittr's dot utils::globalVariables(".")
testlist <- list(mu = 3.22535934918083e-319, var = 0) result <- do.call(metafolio:::est_beta_params,testlist) str(result)
/metafolio/inst/testfiles/est_beta_params/libFuzzer_est_beta_params/est_beta_params_valgrind_files/1612988445-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
121
r
testlist <- list(mu = 3.22535934918083e-319, var = 0) result <- do.call(metafolio:::est_beta_params,testlist) str(result)
# # ICPC15.R, 20 May 19 # Data from: # License Usage and Changes: {A} Large-Scale Study on {GitHub} # Christopher Vendome and Mario Linares-V\'{a}squez and Gabriele Bavota and Massimiliano {Di Penta} and Daniel German and Denys Poshyvanyk # # Example from: # Evidence-based Software Engineering: based on the publicly available data # Derek M. Jones # # TAG Java licensing evolution source("ESEUR_config.r") library("plyr") cnt_licenses=function(df) { return(data.frame(program=length(unique(df$program)), files=sum(df$files))) } cnt_prog_licenses=function(df) { return(data.frame(num_license=length(unique(df$license)), files=sum(df$files))) } sum_year=function(df) { return(data.frame(program=sum(df$program), files=sum(df$files))) } plot_lic_cnt=function(df) { if (sum(df$files) > 1000) lines(df$year, cumsum(df$files)) } plot_lic_perc=function(df) { lines(df$year, 100*cumsum(df$files)/cumsum(year_total$files[df$year-1991]), col=df$col_str) } lic=read.csv(paste0(ESEUR_dir, "economics/ICPC15.csv.xz"), as.is=TRUE) lic$year=as.integer(substr(lic$date, 1, 4)) lic=subset(lic, year >= 1992) l_per_year=ddply(lic, .(year, license), cnt_licenses) year_total=ddply(l_per_year, .(year), sum_year) lic_1kfiles=ddply(l_per_year, .(license), function(df) if(sum(df$files) < 1000) NULL else df) lic_str=unique(lic_1kfiles$license) pal_col=rainbow(length(lic_str)) lic_1kfiles$col_str=mapvalues(lic_1kfiles$license, lic_str, pal_col) # plot(1950, type="n", log="y", # xlim=c(1995, 2012), ylim=c(1, 1e6), # xlab="Year", ylab="Files") # # d_ply(l1k_per_year, .(license), plot_lic_cnt) plot(1950, type="n", log="y", xlim=c(2000, 2012), ylim=c(1e-0, 75), xlab="Year", ylab="Files (cumulative percentage)") d_ply(lic_1kfiles, .(license), plot_lic_perc) legend(x="topright", legend=lic_str, bty="n", fill=pal_col, cex=0.9) # Number of programs whose constituent files contain a given number # of licenses (denoted by different colored lines; see legend). # # pal_col=rainbow(10) # # # cnt_prog_licenses=function(df) # { # return(data.frame(num_license=length(unique(df$license)), # files=sum(df$files))) # } # # # plot_prog_lic=function(df) # { # lines(df$year, df$freq, col=pal_col[df$x]) # } # # # prog_lic=ddply(lic, .(year, program), cnt_prog_licenses) # year_total=ddply(prog_lic, .(year), # function(df) # count(df$num_license)) # # plot(1950, type="n", log="y", # xlim=c(2000, 2012), ylim=c(1, 620), # xlab="Year", ylab="Programs\n") # # d_ply(year_total, .(x), plot_prog_lic) # # # legend(x="topleft", legend=1:10, bty="n", fill=pal_col, cex=1.2) #
/economics/ICPC15.R
no_license
sebastianBIanalytics/ESEUR-code-data
R
false
false
2,624
r
# # ICPC15.R, 20 May 19 # Data from: # License Usage and Changes: {A} Large-Scale Study on {GitHub} # Christopher Vendome and Mario Linares-V\'{a}squez and Gabriele Bavota and Massimiliano {Di Penta} and Daniel German and Denys Poshyvanyk # # Example from: # Evidence-based Software Engineering: based on the publicly available data # Derek M. Jones # # TAG Java licensing evolution source("ESEUR_config.r") library("plyr") cnt_licenses=function(df) { return(data.frame(program=length(unique(df$program)), files=sum(df$files))) } cnt_prog_licenses=function(df) { return(data.frame(num_license=length(unique(df$license)), files=sum(df$files))) } sum_year=function(df) { return(data.frame(program=sum(df$program), files=sum(df$files))) } plot_lic_cnt=function(df) { if (sum(df$files) > 1000) lines(df$year, cumsum(df$files)) } plot_lic_perc=function(df) { lines(df$year, 100*cumsum(df$files)/cumsum(year_total$files[df$year-1991]), col=df$col_str) } lic=read.csv(paste0(ESEUR_dir, "economics/ICPC15.csv.xz"), as.is=TRUE) lic$year=as.integer(substr(lic$date, 1, 4)) lic=subset(lic, year >= 1992) l_per_year=ddply(lic, .(year, license), cnt_licenses) year_total=ddply(l_per_year, .(year), sum_year) lic_1kfiles=ddply(l_per_year, .(license), function(df) if(sum(df$files) < 1000) NULL else df) lic_str=unique(lic_1kfiles$license) pal_col=rainbow(length(lic_str)) lic_1kfiles$col_str=mapvalues(lic_1kfiles$license, lic_str, pal_col) # plot(1950, type="n", log="y", # xlim=c(1995, 2012), ylim=c(1, 1e6), # xlab="Year", ylab="Files") # # d_ply(l1k_per_year, .(license), plot_lic_cnt) plot(1950, type="n", log="y", xlim=c(2000, 2012), ylim=c(1e-0, 75), xlab="Year", ylab="Files (cumulative percentage)") d_ply(lic_1kfiles, .(license), plot_lic_perc) legend(x="topright", legend=lic_str, bty="n", fill=pal_col, cex=0.9) # Number of programs whose constituent files contain a given number # of licenses (denoted by different colored lines; see legend). # # pal_col=rainbow(10) # # # cnt_prog_licenses=function(df) # { # return(data.frame(num_license=length(unique(df$license)), # files=sum(df$files))) # } # # # plot_prog_lic=function(df) # { # lines(df$year, df$freq, col=pal_col[df$x]) # } # # # prog_lic=ddply(lic, .(year, program), cnt_prog_licenses) # year_total=ddply(prog_lic, .(year), # function(df) # count(df$num_license)) # # plot(1950, type="n", log="y", # xlim=c(2000, 2012), ylim=c(1, 620), # xlab="Year", ylab="Programs\n") # # d_ply(year_total, .(x), plot_prog_lic) # # # legend(x="topleft", legend=1:10, bty="n", fill=pal_col, cex=1.2) #
"load.library" <- function (myPackages) { # start loop to determine if each package is installed for(package in myPackages){ # if package is installed locally, load if(package %in% rownames(installed.packages())) do.call('library', list(package)) # if package is not installed locally, download, then load else { install.packages(package) do.call("library", list(package)) } } }
/lib/load_library.R
no_license
pvangay/mwas
R
false
false
440
r
"load.library" <- function (myPackages) { # start loop to determine if each package is installed for(package in myPackages){ # if package is installed locally, load if(package %in% rownames(installed.packages())) do.call('library', list(package)) # if package is not installed locally, download, then load else { install.packages(package) do.call("library", list(package)) } } }
importfiles <- function(name) { namesoffiles <- list.files() importedlist <- lapply(namesoffiles, read.csv) unlisted <- unlist(importedlist) numberofsamples <- as.numeric(length(namesoffiles)) numberofrows <- numberofsamples * 2 numberofbarcodes <- as.numeric(length(unlisted) / numberofsamples / 2) dim(unlisted) <- c(numberofbarcodes, numberofrows) columnswithreads <- seq(from = 2, to = dim(unlisted)[2], by = 2) tableofreads <- as.data.frame(unlisted[,columnswithreads]) tableofreads <- apply(tableofreads, 2, as.numeric) barcodelist <- unlisted[,1] takename <- function(x) { y <- unlist(strsplit(x, " ")) y[1] } samplenames <<- unlist(lapply(namesoffiles, takename)) namedtable <- data.frame(barcodelist, tableofreads) colnames(namedtable) <- c("barcode", samplenames) BarcodeFrequencies <<- namedtable write.csv(BarcodeFrequencies, name, row.names = FALSE) } plotSTAMP <- function(name) {tiffname <- paste(name, ".tiff", sep="") tiff(tiffname, width = 1000, height = 1000, points = 20) column <- which(name == colnames(BarcodeFrequencies)) x <- 1:dim(BarcodeFrequencies)[1] y <- log10(BarcodeFrequencies[,column]/sum(BarcodeFrequencies[,column])) plot(x, y, ylim = c(-6, 0), xlab = "Barcode", ylab = "Log10 Frequency", main =name) dev.off()} sapply(samplenames, plotSTAMP) ###merging all 5 pools together firstmaster <- read.csv("FirstPoolFrequencies.csv", row.names = 1) secondmaster <- read.csv("SecondPoolFrequencies.csv", row.names = 1) thirdmaster <- read.csv("ThirdPoolFrequencies.csv", row.names = 1) fourthmaster <- read.csv("FourthPoolFrequencies.csv", row.names = 1) fifthmaster <- read.csv("FifthPoolFrequencies.csv", row.names = 1) addcolumns <- function (df1, df2) { df1names <- colnames(df1) df2names <- colnames(df2) start <- df1$barcode addnewcolumns <- function(t) { if(length(which(df1names == t)) == 0) {start <- data.frame(start, as.numeric(df2[,df2names ==t]))} if(length(which(df1names == t)) == 1) {start <- data.frame(start, rowSums(cbind(as.numeric(df2[,df2names ==t]), as.numeric(df1[,df1names ==t]))))} colnames(start)[dim(start)[2]] <- t start <<- start } sapply(df2names, addnewcolumns) reinsertcolumns <- function(t) { if(length(which(df2names == t)) == 0) { start <- data.frame(start, as.numeric(df1[,df1names ==t])) colnames(start)[dim(start)[2]] <- t } start <<- start } sapply(df1names, reinsertcolumns) combineddf <<- start } addcolumns(firstmaster, secondmaster) addcolumns(combineddf, thirdmaster) addcolumns(combineddf, fourthmaster) addcolumns(combineddf, fifthmaster) combineddf <- data.frame(firstmaster$barcode, combineddf) write.csv(combineddf, "FinalMaster.csv") ##now manually curate and reimported, removed in.p2
/STAMPR_Scripts/ImportSamples.R
permissive
hullahalli/stampr_rtisan
R
false
false
2,802
r
importfiles <- function(name) { namesoffiles <- list.files() importedlist <- lapply(namesoffiles, read.csv) unlisted <- unlist(importedlist) numberofsamples <- as.numeric(length(namesoffiles)) numberofrows <- numberofsamples * 2 numberofbarcodes <- as.numeric(length(unlisted) / numberofsamples / 2) dim(unlisted) <- c(numberofbarcodes, numberofrows) columnswithreads <- seq(from = 2, to = dim(unlisted)[2], by = 2) tableofreads <- as.data.frame(unlisted[,columnswithreads]) tableofreads <- apply(tableofreads, 2, as.numeric) barcodelist <- unlisted[,1] takename <- function(x) { y <- unlist(strsplit(x, " ")) y[1] } samplenames <<- unlist(lapply(namesoffiles, takename)) namedtable <- data.frame(barcodelist, tableofreads) colnames(namedtable) <- c("barcode", samplenames) BarcodeFrequencies <<- namedtable write.csv(BarcodeFrequencies, name, row.names = FALSE) } plotSTAMP <- function(name) {tiffname <- paste(name, ".tiff", sep="") tiff(tiffname, width = 1000, height = 1000, points = 20) column <- which(name == colnames(BarcodeFrequencies)) x <- 1:dim(BarcodeFrequencies)[1] y <- log10(BarcodeFrequencies[,column]/sum(BarcodeFrequencies[,column])) plot(x, y, ylim = c(-6, 0), xlab = "Barcode", ylab = "Log10 Frequency", main =name) dev.off()} sapply(samplenames, plotSTAMP) ###merging all 5 pools together firstmaster <- read.csv("FirstPoolFrequencies.csv", row.names = 1) secondmaster <- read.csv("SecondPoolFrequencies.csv", row.names = 1) thirdmaster <- read.csv("ThirdPoolFrequencies.csv", row.names = 1) fourthmaster <- read.csv("FourthPoolFrequencies.csv", row.names = 1) fifthmaster <- read.csv("FifthPoolFrequencies.csv", row.names = 1) addcolumns <- function (df1, df2) { df1names <- colnames(df1) df2names <- colnames(df2) start <- df1$barcode addnewcolumns <- function(t) { if(length(which(df1names == t)) == 0) {start <- data.frame(start, as.numeric(df2[,df2names ==t]))} if(length(which(df1names == t)) == 1) {start <- data.frame(start, rowSums(cbind(as.numeric(df2[,df2names ==t]), as.numeric(df1[,df1names ==t]))))} colnames(start)[dim(start)[2]] <- t start <<- start } sapply(df2names, addnewcolumns) reinsertcolumns <- function(t) { if(length(which(df2names == t)) == 0) { start <- data.frame(start, as.numeric(df1[,df1names ==t])) colnames(start)[dim(start)[2]] <- t } start <<- start } sapply(df1names, reinsertcolumns) combineddf <<- start } addcolumns(firstmaster, secondmaster) addcolumns(combineddf, thirdmaster) addcolumns(combineddf, fourthmaster) addcolumns(combineddf, fifthmaster) combineddf <- data.frame(firstmaster$barcode, combineddf) write.csv(combineddf, "FinalMaster.csv") ##now manually curate and reimported, removed in.p2
# Yige Wu @WashU March 2020 ## for calculating the fraction of tumor cells with cnv in each chr region using the cnvs in representative genes # set up libraries and output directory ----------------------------------- ## set working directory dir_base = "~/Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/" setwd(dir_base) source("./ccRCC_snRNA_analysis/load_pkgs.R") source("./ccRCC_snRNA_analysis/functions.R") source("./ccRCC_snRNA_analysis/variables.R")## set run id version_tmp <- 1 run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp) ## set output directory dir_out <- paste0(makeOutDir(), run_id, "/") dir.create(dir_out) # input dependencies ------------------------------------------------------ ## input barcode to chr-region cnv state # infercnv_run_id <- "Individual.20200305.v1" # barcode2cnv_bychr_df <- fread(input = "./Resources/Analysis_Results/copy_number/annotate_barcode_with_cnv/annotate_barcode_with_chr_level_cnv_using_cnv_genes/20200318.v1/Expected_CNV_State_By_Chr_Region_By_Barcode.Using_Representative_Genes.20200318.v1.tsv", data.table = F) infercnv_run_id <- "Individual.20200207.v1" annotation_run_id <- "20200415.v1" file2read <- paste0("./Resources/Analysis_Results/copy_number/annotate_barcode_with_cnv/annotate_barcode_with_chr_level_cnv_using_cnv_genes/", annotation_run_id, "/", infercnv_run_id, ".CNV_State_By_Chr_Region_By_Barcode.Using_Representative_Genes.", annotation_run_id, ".tsv") barcode2cnv_bychr_df <- fread(input = file2read, data.table = F) ## input barcode to cell type info barcode2celltype_df <- fread(input = "./Resources/Analysis_Results/map_celltype_to_barcode/map_celltype_to_all_cells/20200410.v1/30_aliquot_integration.barcode2celltype.20200410.v1.tsv", data.table = F) ## input id meta data id_metadata_df <- fread(input = "./Resources/Analysis_Results/sample_info/make_meta_data/20191105.v1/meta_data.20191105.v1.tsv", data.table = F) ## chromosome regions from which the CNVs are annotated here chr_regions2process <- unique(ccrcc_cna_genes_df$chr_region) chr_regions2process <- as.vector(chr_regions2process) # filter to only tumor cells and only tumor sample ---------------------------------------------- ## add integrated barcode to the cnv table barcode2cnv_bychr_df <- merge(barcode2cnv_bychr_df %>% rename(individual_barcode = barcode), barcode2celltype_df %>% select(orig.ident, integrated_barcode, individual_barcode) %>% rename(aliquot = orig.ident), by = c("aliquot", "individual_barcode"), all.x = T) ## get the tumor aliquot ids tumor_aliquot_ids <- id_metadata_df$Aliquot.snRNA[id_metadata_df$Sample_Type == "Tumor"] ## get malignant nephron epithelium cell barcodes malignant_barcodes <- barcode2celltype_df$integrated_barcode[barcode2celltype_df$Cell_type.detailed == "Tumor cells"] ## filter by malignant cell barcodes and tumor samples only tumorcell2cnv_bychr_df <- barcode2cnv_bychr_df %>% filter(integrated_barcode %in% malignant_barcodes) %>% filter(aliquot %in% tumor_aliquot_ids) # calculate the fraction of tumor cells with expected cnv ----------------- ## group by sample and aggregate the number of barcodes with expected cnv tumorcell2cnv_bychr_mat <- tumorcell2cnv_bychr_df[, chr_regions2process] tumorcell2isexpectedcnv_bychr_mat <- (!is.na(tumorcell2cnv_bychr_mat) & tumorcell2cnv_bychr_mat == "Expected") + 0 count_expectedcnv_bychr_byaliquot_df <- aggregate(x = tumorcell2isexpectedcnv_bychr_mat, list(tumorcell2cnv_bychr_df$aliquot), sum) count_expectedcnv_bychr_byaliquot_df <- count_expectedcnv_bychr_byaliquot_df %>% rename(aliquot = Group.1) ## group by sample and aggregate the number of total barcodes tumorcell2iscnv_bychr_mat <- (!is.na(tumorcell2cnv_bychr_mat)) + 0 count_cnv_bychr_byaliquot_df <- aggregate(x = tumorcell2iscnv_bychr_mat, list(tumorcell2cnv_bychr_df$aliquot), sum) count_cnv_bychr_byaliquot_df <- count_cnv_bychr_byaliquot_df %>% rename(aliquot = Group.1) # write outputs ------------------------------------------- ## write the number cells with expected cnv file2write <- paste0(dir_out, infercnv_run_id, "number_of_tumorcells.expectedCNA.by_chr_region.", run_id, ".tsv") write.table(x = count_expectedcnv_bychr_byaliquot_df, file = file2write, quote = F, sep = "\t", row.names = F) ## write the number cells with cnv calls file2write <- paste0(dir_out, infercnv_run_id, "number_of_tumorcells.withCNAcalls.by_chr_region.", run_id, ".tsv") write.table(x = count_cnv_bychr_byaliquot_df, file = file2write, quote = F, sep = "\t", row.names = F) ## write the number cells with cnv calls frac_expectedcnv_bychr_byaliquot_df <- count_expectedcnv_bychr_byaliquot_df[, chr_regions2process]/count_cnv_bychr_byaliquot_df[,chr_regions2process] frac_expectedcnv_bychr_byaliquot_df <- cbind(data.frame(aliquot = count_expectedcnv_bychr_byaliquot_df$aliquot), frac_expectedcnv_bychr_byaliquot_df) file2write <- paste0(dir_out, infercnv_run_id, "fraction_of_tumorcells.expectedCNA.by_chr_region.", run_id, ".tsv") write.table(x = frac_expectedcnv_bychr_byaliquot_df, file = file2write, quote = F, sep = "\t", row.names = F)
/copy_number/summarize_cnv_fraction/estimate_fraction_of_tumorcells_with_expectedcnv_perchrregion_per_sample_using_cnvgenes.R
no_license
ding-lab/ccRCC_snRNA_analysis
R
false
false
5,331
r
# Yige Wu @WashU March 2020 ## for calculating the fraction of tumor cells with cnv in each chr region using the cnvs in representative genes # set up libraries and output directory ----------------------------------- ## set working directory dir_base = "~/Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/" setwd(dir_base) source("./ccRCC_snRNA_analysis/load_pkgs.R") source("./ccRCC_snRNA_analysis/functions.R") source("./ccRCC_snRNA_analysis/variables.R")## set run id version_tmp <- 1 run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp) ## set output directory dir_out <- paste0(makeOutDir(), run_id, "/") dir.create(dir_out) # input dependencies ------------------------------------------------------ ## input barcode to chr-region cnv state # infercnv_run_id <- "Individual.20200305.v1" # barcode2cnv_bychr_df <- fread(input = "./Resources/Analysis_Results/copy_number/annotate_barcode_with_cnv/annotate_barcode_with_chr_level_cnv_using_cnv_genes/20200318.v1/Expected_CNV_State_By_Chr_Region_By_Barcode.Using_Representative_Genes.20200318.v1.tsv", data.table = F) infercnv_run_id <- "Individual.20200207.v1" annotation_run_id <- "20200415.v1" file2read <- paste0("./Resources/Analysis_Results/copy_number/annotate_barcode_with_cnv/annotate_barcode_with_chr_level_cnv_using_cnv_genes/", annotation_run_id, "/", infercnv_run_id, ".CNV_State_By_Chr_Region_By_Barcode.Using_Representative_Genes.", annotation_run_id, ".tsv") barcode2cnv_bychr_df <- fread(input = file2read, data.table = F) ## input barcode to cell type info barcode2celltype_df <- fread(input = "./Resources/Analysis_Results/map_celltype_to_barcode/map_celltype_to_all_cells/20200410.v1/30_aliquot_integration.barcode2celltype.20200410.v1.tsv", data.table = F) ## input id meta data id_metadata_df <- fread(input = "./Resources/Analysis_Results/sample_info/make_meta_data/20191105.v1/meta_data.20191105.v1.tsv", data.table = F) ## chromosome regions from which the CNVs are annotated here chr_regions2process <- unique(ccrcc_cna_genes_df$chr_region) chr_regions2process <- as.vector(chr_regions2process) # filter to only tumor cells and only tumor sample ---------------------------------------------- ## add integrated barcode to the cnv table barcode2cnv_bychr_df <- merge(barcode2cnv_bychr_df %>% rename(individual_barcode = barcode), barcode2celltype_df %>% select(orig.ident, integrated_barcode, individual_barcode) %>% rename(aliquot = orig.ident), by = c("aliquot", "individual_barcode"), all.x = T) ## get the tumor aliquot ids tumor_aliquot_ids <- id_metadata_df$Aliquot.snRNA[id_metadata_df$Sample_Type == "Tumor"] ## get malignant nephron epithelium cell barcodes malignant_barcodes <- barcode2celltype_df$integrated_barcode[barcode2celltype_df$Cell_type.detailed == "Tumor cells"] ## filter by malignant cell barcodes and tumor samples only tumorcell2cnv_bychr_df <- barcode2cnv_bychr_df %>% filter(integrated_barcode %in% malignant_barcodes) %>% filter(aliquot %in% tumor_aliquot_ids) # calculate the fraction of tumor cells with expected cnv ----------------- ## group by sample and aggregate the number of barcodes with expected cnv tumorcell2cnv_bychr_mat <- tumorcell2cnv_bychr_df[, chr_regions2process] tumorcell2isexpectedcnv_bychr_mat <- (!is.na(tumorcell2cnv_bychr_mat) & tumorcell2cnv_bychr_mat == "Expected") + 0 count_expectedcnv_bychr_byaliquot_df <- aggregate(x = tumorcell2isexpectedcnv_bychr_mat, list(tumorcell2cnv_bychr_df$aliquot), sum) count_expectedcnv_bychr_byaliquot_df <- count_expectedcnv_bychr_byaliquot_df %>% rename(aliquot = Group.1) ## group by sample and aggregate the number of total barcodes tumorcell2iscnv_bychr_mat <- (!is.na(tumorcell2cnv_bychr_mat)) + 0 count_cnv_bychr_byaliquot_df <- aggregate(x = tumorcell2iscnv_bychr_mat, list(tumorcell2cnv_bychr_df$aliquot), sum) count_cnv_bychr_byaliquot_df <- count_cnv_bychr_byaliquot_df %>% rename(aliquot = Group.1) # write outputs ------------------------------------------- ## write the number cells with expected cnv file2write <- paste0(dir_out, infercnv_run_id, "number_of_tumorcells.expectedCNA.by_chr_region.", run_id, ".tsv") write.table(x = count_expectedcnv_bychr_byaliquot_df, file = file2write, quote = F, sep = "\t", row.names = F) ## write the number cells with cnv calls file2write <- paste0(dir_out, infercnv_run_id, "number_of_tumorcells.withCNAcalls.by_chr_region.", run_id, ".tsv") write.table(x = count_cnv_bychr_byaliquot_df, file = file2write, quote = F, sep = "\t", row.names = F) ## write the number cells with cnv calls frac_expectedcnv_bychr_byaliquot_df <- count_expectedcnv_bychr_byaliquot_df[, chr_regions2process]/count_cnv_bychr_byaliquot_df[,chr_regions2process] frac_expectedcnv_bychr_byaliquot_df <- cbind(data.frame(aliquot = count_expectedcnv_bychr_byaliquot_df$aliquot), frac_expectedcnv_bychr_byaliquot_df) file2write <- paste0(dir_out, infercnv_run_id, "fraction_of_tumorcells.expectedCNA.by_chr_region.", run_id, ".tsv") write.table(x = frac_expectedcnv_bychr_byaliquot_df, file = file2write, quote = F, sep = "\t", row.names = F)
# TODO: Add comment # # Author: tiffn_000 ############################################################################### ###################### ####################### ####################### year <- 1981 for(i in 1:length(american)){ MKTloop <- data.frame(period = rep(NA, 8), tau = rep(NA, 8), p2s = rep(NA, 8), station=rep(NA,8),start = rep(NA, 8), end_date = rep(NA, 8)) MK <- MannKendall(american[[i]]$HydroYear$All$Data$Discharge_acfte6_day[which(format(american[[i]]$HydroYear$All$Data$Date,"%Y-%m")==paste(year,"-10", sep=""))[[1]]:length(american[[i]]$HydroYear$All$Data$Discharge_acfte6_day)]) MKTloop$tau[[1]] <- MK$tau MKTloop$p2s[[1]] <- MK$sl MKTloop$station[[1]] <- american[[i]]$raw$site_no[[1]] MKTloop$start[[1]] <- as.character(american[[i]]$HydroYear$All$Data$Date[[which(format(american[[i]]$HydroYear$All$Data$Date,"%Y-%m")==paste(year,"-10", sep=""))[[1]]]]) MKTloop$end_date[[1]] <- as.character(tail(american[[i]]$HydroYear$All$Data$Date,1)) MKTloop$period[[1]] <- "HY" MK <- MannKendall(american[[i]]$Winter_3mon$All$Data$Discharge_acfte6_day[which(format(american[[i]]$Winter_3mon$All$Data$Date,"%Y-%m")==paste(year,"-12", sep=""))[[1]]:length(american[[i]]$Winter_3mon$All$Data$Discharge_acfte6_day)]) MKTloop$tau[[2]] <- MK$tau MKTloop$p2s[[2]] <- MK$sl MKTloop$station[[2]] <- american[[i]]$raw$site_no[[1]] MKTloop$start[[2]] <- as.character(american[[i]]$Winter_3mon$All$Data$Date[[which(format(american[[i]]$Winter_3mon$All$Data$Date,"%Y-%m")==paste(year,"-12", sep=""))[[1]]]]) MKTloop$end_date[[2]] <- as.character(tail(american[[i]]$Winter_3mon$All$Data$Date,1)) MKTloop$period[[2]] <- "3MON" MK <- MannKendall(american[[i]]$Winter_6mon$All$Data$Discharge_acfte6_day[which(format(american[[i]]$Winter_6mon$All$Data$Date,"%Y-%m")==paste(year,"-11", sep=""))[[1]]:length(american[[i]]$Winter_6mon$All$Data$Discharge_acfte6_day)]) MKTloop$tau[[3]] <- MK$tau MKTloop$p2s[[3]] <- MK$sl MKTloop$station[[3]] <- american[[i]]$raw$site_no[[1]] MKTloop$start[[3]] <- as.character(american[[i]]$Winter_6mon$All$Data$Date[[which(format(american[[i]]$Winter_6mon$All$Data$Date,"%Y-%m")==paste(year,"-11", sep=""))[[1]]]]) MKTloop$end_date[[3]] <- as.character(tail(american[[i]]$Winter_6mon$All$Data$Date,1)) MKTloop$period[[3]] <- "6MON" MK <- MannKendall(american[[i]]$Winter_monthly$All$DEC$Data$Discharge_acfte6_day[which(format(american[[i]]$Winter_monthly$All$DEC$Data$Date,"%Y")==year)[[1]]:length(american[[i]]$Winter_monthly$All$DEC$Data$Discharge_acfte6_day)]) MKTloop$tau[[4]] <- MK$tau MKTloop$p2s[[4]] <- MK$sl MKTloop$station[[4]] <- american[[i]]$raw$site_no[[1]] MKTloop$start[[4]] <- as.character(american[[i]]$Winter_monthly$All$DEC$Data$Date[[which(format(american[[i]]$Winter_monthly$All$DEC$Data$Date,"%Y")==year)[[1]]]]) MKTloop$end_date[[4]] <- as.character(tail(american[[i]]$Winter_monthly$All$DEC$Data$Date,1)) MKTloop$period[[4]] <- "DEC" MK <- MannKendall(american[[i]]$Winter_monthly$All$JAN$Data$Discharge_acfte6_day[which(format(american[[i]]$Winter_monthly$All$JAN$Data$Date,"%Y")==(year+1))[[1]]:length(american[[i]]$Winter_monthly$All$JAN$Data$Discharge_acfte6_day)]) MKTloop$tau[[5]] <- MK$tau MKTloop$p2s[[5]] <- MK$sl MKTloop$station[[5]] <- american[[i]]$raw$site_no[[1]] MKTloop$start[[5]] <- as.character(american[[i]]$Winter_monthly$All$JAN$Data$Date[[which(format(american[[i]]$Winter_monthly$All$JAN$Data$Date,"%Y")==(year+1))[[1]]]]) MKTloop$end_date[[5]] <- as.character(tail(american[[i]]$Winter_monthly$All$JAN$Data$Date,1)) MKTloop$period[[5]] <- "JAN" MK <- MannKendall(american[[i]]$Winter_monthly$All$FEB$Data$Discharge_acfte6_day[which(format(american[[i]]$Winter_monthly$All$FEB$Data$Date,"%Y")==(year+1))[[1]]:length(american[[i]]$Winter_monthly$All$FEB$Data$Discharge_acfte6_day)]) MKTloop$tau[[6]] <- MK$tau MKTloop$p2s[[6]] <- MK$sl MKTloop$station[[6]] <- american[[i]]$raw$site_no[[1]] MKTloop$start[[6]] <- as.character(american[[i]]$Winter_monthly$All$FEB$Data$Date[[which(format(american[[i]]$Winter_monthly$All$FEB$Data$Date,"%Y")==(year+1))[[1]]]]) MKTloop$end_date[[6]] <- as.character(tail(american[[i]]$Winter_monthly$All$FEB$Data$Date,1)) MKTloop$period[[6]] <- "FEB" MK <- MannKendall(american[[i]]$Winter_monthly$All$MAR$Data$Discharge_acfte6_day[which(format(american[[i]]$Winter_monthly$All$MAR$Data$Date,"%Y")==(year+1))[[1]]:length(american[[i]]$Winter_monthly$All$MAR$Data$Discharge_acfte6_day)]) MKTloop$tau[[7]] <- MK$tau MKTloop$p2s[[7]] <- MK$sl MKTloop$station[[7]] <- american[[i]]$raw$site_no[[1]] MKTloop$start[[7]] <- as.character(american[[i]]$Winter_monthly$All$MAR$Data$Date[[which(format(american[[i]]$Winter_monthly$All$MAR$Data$Date,"%Y")==(year+1))[[1]]]]) MKTloop$end_date[[7]] <- as.character(tail(american[[i]]$Winter_monthly$All$MAR$Data$Date,1)) MKTloop$period[[7]] <- "MAR" MK <- MannKendall(american[[i]]$Winter_monthly$All$APR$Data$Discharge_acfte6_day[which(format(american[[i]]$Winter_monthly$All$APR$Data$Date,"%Y")==(year+1))[[1]]:length(american[[i]]$Winter_monthly$All$APR$Data$Discharge_acfte6_day)]) MKTloop$tau[[8]] <- MK$tau MKTloop$p2s[[8]] <- MK$sl MKTloop$station[[8]] <- american[[i]]$raw$site_no[[1]] MKTloop$start[[8]] <- as.character(american[[i]]$Winter_monthly$All$APR$Data$Date[[which(format(american[[i]]$Winter_monthly$All$APR$Data$Date,"%Y")==(year+1))[[1]]]]) MKTloop$end_date[[8]] <- as.character(tail(american[[i]]$Winter_monthly$All$APR$Data$Date,1)) MKTloop$period[[8]] <- "APR" MKTloop$start <- as.Date(MKTloop$start) MKTloop$end_date <- as.Date(MKTloop$end_date) write.csv(MKTloop, file=paste("C:\\Users\\tiffn_000\\Documents\\Data\\American\\MKT_stream_sites\\",year,"\\MKT_", names(american)[[i]],"_",year,".csv", sep="")) }
/R/American_MKT.R
no_license
tnkocis/stReamflowstats
R
false
false
5,874
r
# TODO: Add comment # # Author: tiffn_000 ############################################################################### ###################### ####################### ####################### year <- 1981 for(i in 1:length(american)){ MKTloop <- data.frame(period = rep(NA, 8), tau = rep(NA, 8), p2s = rep(NA, 8), station=rep(NA,8),start = rep(NA, 8), end_date = rep(NA, 8)) MK <- MannKendall(american[[i]]$HydroYear$All$Data$Discharge_acfte6_day[which(format(american[[i]]$HydroYear$All$Data$Date,"%Y-%m")==paste(year,"-10", sep=""))[[1]]:length(american[[i]]$HydroYear$All$Data$Discharge_acfte6_day)]) MKTloop$tau[[1]] <- MK$tau MKTloop$p2s[[1]] <- MK$sl MKTloop$station[[1]] <- american[[i]]$raw$site_no[[1]] MKTloop$start[[1]] <- as.character(american[[i]]$HydroYear$All$Data$Date[[which(format(american[[i]]$HydroYear$All$Data$Date,"%Y-%m")==paste(year,"-10", sep=""))[[1]]]]) MKTloop$end_date[[1]] <- as.character(tail(american[[i]]$HydroYear$All$Data$Date,1)) MKTloop$period[[1]] <- "HY" MK <- MannKendall(american[[i]]$Winter_3mon$All$Data$Discharge_acfte6_day[which(format(american[[i]]$Winter_3mon$All$Data$Date,"%Y-%m")==paste(year,"-12", sep=""))[[1]]:length(american[[i]]$Winter_3mon$All$Data$Discharge_acfte6_day)]) MKTloop$tau[[2]] <- MK$tau MKTloop$p2s[[2]] <- MK$sl MKTloop$station[[2]] <- american[[i]]$raw$site_no[[1]] MKTloop$start[[2]] <- as.character(american[[i]]$Winter_3mon$All$Data$Date[[which(format(american[[i]]$Winter_3mon$All$Data$Date,"%Y-%m")==paste(year,"-12", sep=""))[[1]]]]) MKTloop$end_date[[2]] <- as.character(tail(american[[i]]$Winter_3mon$All$Data$Date,1)) MKTloop$period[[2]] <- "3MON" MK <- MannKendall(american[[i]]$Winter_6mon$All$Data$Discharge_acfte6_day[which(format(american[[i]]$Winter_6mon$All$Data$Date,"%Y-%m")==paste(year,"-11", sep=""))[[1]]:length(american[[i]]$Winter_6mon$All$Data$Discharge_acfte6_day)]) MKTloop$tau[[3]] <- MK$tau MKTloop$p2s[[3]] <- MK$sl MKTloop$station[[3]] <- american[[i]]$raw$site_no[[1]] MKTloop$start[[3]] <- as.character(american[[i]]$Winter_6mon$All$Data$Date[[which(format(american[[i]]$Winter_6mon$All$Data$Date,"%Y-%m")==paste(year,"-11", sep=""))[[1]]]]) MKTloop$end_date[[3]] <- as.character(tail(american[[i]]$Winter_6mon$All$Data$Date,1)) MKTloop$period[[3]] <- "6MON" MK <- MannKendall(american[[i]]$Winter_monthly$All$DEC$Data$Discharge_acfte6_day[which(format(american[[i]]$Winter_monthly$All$DEC$Data$Date,"%Y")==year)[[1]]:length(american[[i]]$Winter_monthly$All$DEC$Data$Discharge_acfte6_day)]) MKTloop$tau[[4]] <- MK$tau MKTloop$p2s[[4]] <- MK$sl MKTloop$station[[4]] <- american[[i]]$raw$site_no[[1]] MKTloop$start[[4]] <- as.character(american[[i]]$Winter_monthly$All$DEC$Data$Date[[which(format(american[[i]]$Winter_monthly$All$DEC$Data$Date,"%Y")==year)[[1]]]]) MKTloop$end_date[[4]] <- as.character(tail(american[[i]]$Winter_monthly$All$DEC$Data$Date,1)) MKTloop$period[[4]] <- "DEC" MK <- MannKendall(american[[i]]$Winter_monthly$All$JAN$Data$Discharge_acfte6_day[which(format(american[[i]]$Winter_monthly$All$JAN$Data$Date,"%Y")==(year+1))[[1]]:length(american[[i]]$Winter_monthly$All$JAN$Data$Discharge_acfte6_day)]) MKTloop$tau[[5]] <- MK$tau MKTloop$p2s[[5]] <- MK$sl MKTloop$station[[5]] <- american[[i]]$raw$site_no[[1]] MKTloop$start[[5]] <- as.character(american[[i]]$Winter_monthly$All$JAN$Data$Date[[which(format(american[[i]]$Winter_monthly$All$JAN$Data$Date,"%Y")==(year+1))[[1]]]]) MKTloop$end_date[[5]] <- as.character(tail(american[[i]]$Winter_monthly$All$JAN$Data$Date,1)) MKTloop$period[[5]] <- "JAN" MK <- MannKendall(american[[i]]$Winter_monthly$All$FEB$Data$Discharge_acfte6_day[which(format(american[[i]]$Winter_monthly$All$FEB$Data$Date,"%Y")==(year+1))[[1]]:length(american[[i]]$Winter_monthly$All$FEB$Data$Discharge_acfte6_day)]) MKTloop$tau[[6]] <- MK$tau MKTloop$p2s[[6]] <- MK$sl MKTloop$station[[6]] <- american[[i]]$raw$site_no[[1]] MKTloop$start[[6]] <- as.character(american[[i]]$Winter_monthly$All$FEB$Data$Date[[which(format(american[[i]]$Winter_monthly$All$FEB$Data$Date,"%Y")==(year+1))[[1]]]]) MKTloop$end_date[[6]] <- as.character(tail(american[[i]]$Winter_monthly$All$FEB$Data$Date,1)) MKTloop$period[[6]] <- "FEB" MK <- MannKendall(american[[i]]$Winter_monthly$All$MAR$Data$Discharge_acfte6_day[which(format(american[[i]]$Winter_monthly$All$MAR$Data$Date,"%Y")==(year+1))[[1]]:length(american[[i]]$Winter_monthly$All$MAR$Data$Discharge_acfte6_day)]) MKTloop$tau[[7]] <- MK$tau MKTloop$p2s[[7]] <- MK$sl MKTloop$station[[7]] <- american[[i]]$raw$site_no[[1]] MKTloop$start[[7]] <- as.character(american[[i]]$Winter_monthly$All$MAR$Data$Date[[which(format(american[[i]]$Winter_monthly$All$MAR$Data$Date,"%Y")==(year+1))[[1]]]]) MKTloop$end_date[[7]] <- as.character(tail(american[[i]]$Winter_monthly$All$MAR$Data$Date,1)) MKTloop$period[[7]] <- "MAR" MK <- MannKendall(american[[i]]$Winter_monthly$All$APR$Data$Discharge_acfte6_day[which(format(american[[i]]$Winter_monthly$All$APR$Data$Date,"%Y")==(year+1))[[1]]:length(american[[i]]$Winter_monthly$All$APR$Data$Discharge_acfte6_day)]) MKTloop$tau[[8]] <- MK$tau MKTloop$p2s[[8]] <- MK$sl MKTloop$station[[8]] <- american[[i]]$raw$site_no[[1]] MKTloop$start[[8]] <- as.character(american[[i]]$Winter_monthly$All$APR$Data$Date[[which(format(american[[i]]$Winter_monthly$All$APR$Data$Date,"%Y")==(year+1))[[1]]]]) MKTloop$end_date[[8]] <- as.character(tail(american[[i]]$Winter_monthly$All$APR$Data$Date,1)) MKTloop$period[[8]] <- "APR" MKTloop$start <- as.Date(MKTloop$start) MKTloop$end_date <- as.Date(MKTloop$end_date) write.csv(MKTloop, file=paste("C:\\Users\\tiffn_000\\Documents\\Data\\American\\MKT_stream_sites\\",year,"\\MKT_", names(american)[[i]],"_",year,".csv", sep="")) }
### How are alist(a) and alist(a = ) different? Think about both the input and ### the output. # An easy way to see the difference is by making functions with those args. pryr::make_function(alist(a), quote(a)) # Error: all_named(args) is not TRUE pryr::make_function(alist(a = ), quote(a)) # function (a) { a } # `alist(a)` returns an unnamed list of length 1 with the first element being # the name `a` (note that this refers to the `name` class, which is distinct # from being named `a`), so it is unsuitable for use in a function. # `alist(a = )` returns a named list with the first element having name `a` # and the first element being empty.
/13_expressions/05_pairlists/exercise1.r
no_license
Bohdan-Khomtchouk/adv-r-book-solutions
R
false
false
654
r
### How are alist(a) and alist(a = ) different? Think about both the input and ### the output. # An easy way to see the difference is by making functions with those args. pryr::make_function(alist(a), quote(a)) # Error: all_named(args) is not TRUE pryr::make_function(alist(a = ), quote(a)) # function (a) { a } # `alist(a)` returns an unnamed list of length 1 with the first element being # the name `a` (note that this refers to the `name` class, which is distinct # from being named `a`), so it is unsuitable for use in a function. # `alist(a = )` returns a named list with the first element having name `a` # and the first element being empty.
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/results_access.R, R/results_directions.R, % R/results_distance.R, R/results_elevation.R, R/results_geocode.R, % R/results_places.R, R/results_roads.R \name{access_result} \alias{access_result} \alias{direction_instructions} \alias{direction_routes} \alias{direction_legs} \alias{direction_steps} \alias{direction_points} \alias{direction_polyline} \alias{distance_origins} \alias{distance_destinations} \alias{distance_elements} \alias{elevation} \alias{elevation_location} \alias{geocode_coordinates} \alias{geocode_address} \alias{geocode_address_components} \alias{geocode_place} \alias{geocode_type} \alias{place} \alias{place_next_page} \alias{place_name} \alias{place_location} \alias{place_type} \alias{place_hours} \alias{place_open} \alias{nearest_roads_coordinates} \title{Access Result} \usage{ access_result( res, result = c("instructions", "routes", "legs", "steps", "points", "polyline", "coordinates", "address", "address_components", "geo_place_id", "dist_origins", "dist_destinations", "elevation", "elev_location", "place", "place_name", "next_page", "place_location", "place_type", "place_hours", "place_open") ) direction_instructions(res) direction_routes(res) direction_legs(res) direction_steps(res) direction_points(res) direction_polyline(res) distance_origins(res) distance_destinations(res) distance_elements(res) elevation(res) elevation_location(res) geocode_coordinates(res) geocode_address(res) geocode_address_components(res) geocode_place(res) geocode_type(res) place(res) place_next_page(res) place_name(res) place_location(res) place_type(res) place_hours(res) place_open(res) nearest_roads_coordinates(res) } \arguments{ \item{res}{result from a Google API query} \item{result}{the specific field of the result you want to access} } \description{ Methods for accessing specific elements of a Google API query. } \section{Functions}{ \itemize{ \item \code{direction_instructions}: the instructions from a directions query \item \code{direction_routes}: the routes from a directions query \item \code{direction_legs}: the legs from a directions query \item \code{direction_steps}: the steps from a directions query \item \code{direction_points}: the points from a directions query \item \code{direction_polyline}: the encoded polyline from a direction query \item \code{distance_origins}: the origin addresses from a distance query \item \code{distance_destinations}: the destination addresses from a distance query \item \code{distance_elements}: the element results from a distance query \item \code{elevation}: the elevation from an elevation query \item \code{elevation_location}: the elevation from an elevation query \item \code{geocode_coordinates}: the coordinates from a geocode or reverse geocode query \item \code{geocode_address}: the formatted address from a geocode or reverse geocode query \item \code{geocode_address_components}: the address components from a geocode or reverse geocode query \item \code{geocode_place}: the place id from a geocode or reverse geocode query \item \code{geocode_type}: the geocoded place types from a geocode or reverse geocode query \item \code{place}: the place_id from a places query \item \code{place_next_page}: the next page token from a places query \item \code{place_name}: the place name from a places query \item \code{place_location}: the location from a places query \item \code{place_type}: the type of place from a places query \item \code{place_hours}: the opening hours from a place details query \item \code{place_open}: the open now result from a place details query \item \code{nearest_roads_coordinates}: the coordinates from a nearest roads query }} \examples{ \dontrun{ apiKey <- "your_api_key" ## results returned as a list (simplify == TRUE) lst <- google_directions(origin = c(-37.8179746, 144.9668636), destination = c(-37.81659, 144.9841), mode = "walking", key = apiKey, simplify = TRUE) ## results returned as raw JSON character vector js <- google_directions(origin = c(-37.8179746, 144.9668636), destination = c(-37.81659, 144.9841), mode = "walking", key = apiKey, simplify = FALSE) access_result(js, "polyline") direction_polyline(js) } }
/man/access_result.Rd
permissive
SymbolixAU/googleway
R
false
true
4,502
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/results_access.R, R/results_directions.R, % R/results_distance.R, R/results_elevation.R, R/results_geocode.R, % R/results_places.R, R/results_roads.R \name{access_result} \alias{access_result} \alias{direction_instructions} \alias{direction_routes} \alias{direction_legs} \alias{direction_steps} \alias{direction_points} \alias{direction_polyline} \alias{distance_origins} \alias{distance_destinations} \alias{distance_elements} \alias{elevation} \alias{elevation_location} \alias{geocode_coordinates} \alias{geocode_address} \alias{geocode_address_components} \alias{geocode_place} \alias{geocode_type} \alias{place} \alias{place_next_page} \alias{place_name} \alias{place_location} \alias{place_type} \alias{place_hours} \alias{place_open} \alias{nearest_roads_coordinates} \title{Access Result} \usage{ access_result( res, result = c("instructions", "routes", "legs", "steps", "points", "polyline", "coordinates", "address", "address_components", "geo_place_id", "dist_origins", "dist_destinations", "elevation", "elev_location", "place", "place_name", "next_page", "place_location", "place_type", "place_hours", "place_open") ) direction_instructions(res) direction_routes(res) direction_legs(res) direction_steps(res) direction_points(res) direction_polyline(res) distance_origins(res) distance_destinations(res) distance_elements(res) elevation(res) elevation_location(res) geocode_coordinates(res) geocode_address(res) geocode_address_components(res) geocode_place(res) geocode_type(res) place(res) place_next_page(res) place_name(res) place_location(res) place_type(res) place_hours(res) place_open(res) nearest_roads_coordinates(res) } \arguments{ \item{res}{result from a Google API query} \item{result}{the specific field of the result you want to access} } \description{ Methods for accessing specific elements of a Google API query. } \section{Functions}{ \itemize{ \item \code{direction_instructions}: the instructions from a directions query \item \code{direction_routes}: the routes from a directions query \item \code{direction_legs}: the legs from a directions query \item \code{direction_steps}: the steps from a directions query \item \code{direction_points}: the points from a directions query \item \code{direction_polyline}: the encoded polyline from a direction query \item \code{distance_origins}: the origin addresses from a distance query \item \code{distance_destinations}: the destination addresses from a distance query \item \code{distance_elements}: the element results from a distance query \item \code{elevation}: the elevation from an elevation query \item \code{elevation_location}: the elevation from an elevation query \item \code{geocode_coordinates}: the coordinates from a geocode or reverse geocode query \item \code{geocode_address}: the formatted address from a geocode or reverse geocode query \item \code{geocode_address_components}: the address components from a geocode or reverse geocode query \item \code{geocode_place}: the place id from a geocode or reverse geocode query \item \code{geocode_type}: the geocoded place types from a geocode or reverse geocode query \item \code{place}: the place_id from a places query \item \code{place_next_page}: the next page token from a places query \item \code{place_name}: the place name from a places query \item \code{place_location}: the location from a places query \item \code{place_type}: the type of place from a places query \item \code{place_hours}: the opening hours from a place details query \item \code{place_open}: the open now result from a place details query \item \code{nearest_roads_coordinates}: the coordinates from a nearest roads query }} \examples{ \dontrun{ apiKey <- "your_api_key" ## results returned as a list (simplify == TRUE) lst <- google_directions(origin = c(-37.8179746, 144.9668636), destination = c(-37.81659, 144.9841), mode = "walking", key = apiKey, simplify = TRUE) ## results returned as raw JSON character vector js <- google_directions(origin = c(-37.8179746, 144.9668636), destination = c(-37.81659, 144.9841), mode = "walking", key = apiKey, simplify = FALSE) access_result(js, "polyline") direction_polyline(js) } }
print.aidsConsist <- function( x, ... ) { cat( "\nChecking theoretical consistency of an " ) if( x$mono$priceIndex == "TL" ) { cat( "Almost Ideal Demand System (AIDS):\n" ) } else { cat( "Linear Approximate Almost Ideal Demand System (LA-AIDS):\n" ) } # Addinp-up cat( "The adding-up condition is" ) if( !x$addingUp ) { cat( " NOT" ) } cat( " fulfilled\n" ) # homogeneity cat( "The homogeneity condition is" ) if( !x$homogeneity ) { cat( " NOT" ) } cat( " fulfilled\n" ) # symmetry cat( "The symmetry condition is" ) if( !x$symmetry ) { cat( " NOT" ) } cat( " fulfilled\n" ) # monotonicity print( x$mono, header = FALSE ) # concavity if( !is.null( x$concav ) ) { print( x$concav, header = FALSE ) } invisible( x ) }
/R/print.aidsConsist.R
no_license
cran/micEconAids
R
false
false
832
r
print.aidsConsist <- function( x, ... ) { cat( "\nChecking theoretical consistency of an " ) if( x$mono$priceIndex == "TL" ) { cat( "Almost Ideal Demand System (AIDS):\n" ) } else { cat( "Linear Approximate Almost Ideal Demand System (LA-AIDS):\n" ) } # Addinp-up cat( "The adding-up condition is" ) if( !x$addingUp ) { cat( " NOT" ) } cat( " fulfilled\n" ) # homogeneity cat( "The homogeneity condition is" ) if( !x$homogeneity ) { cat( " NOT" ) } cat( " fulfilled\n" ) # symmetry cat( "The symmetry condition is" ) if( !x$symmetry ) { cat( " NOT" ) } cat( " fulfilled\n" ) # monotonicity print( x$mono, header = FALSE ) # concavity if( !is.null( x$concav ) ) { print( x$concav, header = FALSE ) } invisible( x ) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dataset.R \docType{data} \name{TGGATESsmall} \alias{TGGATESsmall} \title{TGGATESsmall dataset} \format{ ToxicoSet object } \usage{ data(TGGATESsmall) } \description{ Documentation for this dataset will be added at a later date. For now I just need this package to pass the CRAN checks! This dataset powers the example usage in the roxygen2 documentation for ToxicoGx. } \references{ Lamb et al. The Connectivity Map: using gene-expression signatures to connect small molecules, genes, and disease. Science, 2006. } \keyword{datasets}
/man/TGGATESsmall.Rd
no_license
bbyun28/ToxicoGx
R
false
true
614
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dataset.R \docType{data} \name{TGGATESsmall} \alias{TGGATESsmall} \title{TGGATESsmall dataset} \format{ ToxicoSet object } \usage{ data(TGGATESsmall) } \description{ Documentation for this dataset will be added at a later date. For now I just need this package to pass the CRAN checks! This dataset powers the example usage in the roxygen2 documentation for ToxicoGx. } \references{ Lamb et al. The Connectivity Map: using gene-expression signatures to connect small molecules, genes, and disease. Science, 2006. } \keyword{datasets}
% Generated by roxygen2 (4.0.1): do not edit by hand \name{list.do} \alias{list.do} \title{Call a function with a list of arguments as provided} \usage{ list.do(.data, fun, ...) } \arguments{ \item{.data}{\code{list}} \item{fun}{The \code{function} to call} \item{...}{The additional parameters passed to \code{do.call}} } \description{ Call a function with a list of arguments as provided } \examples{ \dontrun{ x <- lapply(1:3,function(i) { c(a=i,b=i^2)}) df <- lapply(1:3,function(i) { data.frame(a=i,b=i^2,c=letters[i])}) list.do(x,rbind) list.do(x,rbind) as.list(1:10) \%>>\% list.map(x -> list.do(x,rnorm)) } }
/man/list.do.Rd
permissive
timelyportfolio/rlist
R
false
false
620
rd
% Generated by roxygen2 (4.0.1): do not edit by hand \name{list.do} \alias{list.do} \title{Call a function with a list of arguments as provided} \usage{ list.do(.data, fun, ...) } \arguments{ \item{.data}{\code{list}} \item{fun}{The \code{function} to call} \item{...}{The additional parameters passed to \code{do.call}} } \description{ Call a function with a list of arguments as provided } \examples{ \dontrun{ x <- lapply(1:3,function(i) { c(a=i,b=i^2)}) df <- lapply(1:3,function(i) { data.frame(a=i,b=i^2,c=letters[i])}) list.do(x,rbind) list.do(x,rbind) as.list(1:10) \%>>\% list.map(x -> list.do(x,rnorm)) } }
`Integrate1DCorSquared` <- function(x, xKnown, betaj, aj.power, corMatrix, W) { origCorMatrix = corMatrix y = x for (i in 1:length(x)) { corMatrix = origCorMatrix * exp(-betaj*abs(x[i]-xKnown)**aj.power) y[i] = (corMatrix%*%W)**2 } return (y) }
/mlegpFULL/R/Integrate1DCorSquared.R
no_license
jackdawjackdaw/mlegp-custom-libs
R
false
false
318
r
`Integrate1DCorSquared` <- function(x, xKnown, betaj, aj.power, corMatrix, W) { origCorMatrix = corMatrix y = x for (i in 1:length(x)) { corMatrix = origCorMatrix * exp(-betaj*abs(x[i]-xKnown)**aj.power) y[i] = (corMatrix%*%W)**2 } return (y) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mutCovariate.indel.freq.table.muts.R \name{mutCovariate.indel.freq.table.muts} \alias{mutCovariate.indel.freq.table.muts} \title{Prepare covariates matrix for mutated sites in a specfic chromosome.} \usage{ mutCovariate.indel.freq.table.muts( continuous.features, discrete.features, sample.specific.features, polyAT, polyCG, sites ) } \arguments{ \item{continuous.features}{Continuous epigenetic features selected for model fitting.} \item{discrete.features}{Discrete epigenetic features selected for model fitting.} \item{sample.specific.features}{Sample-specific features.} \item{polyAT}{All polyA or polyT positions in whole genome.} \item{polyCG}{All polyC or polyG positions in whole genome.} \item{sites}{Mutated sites in a specific chromosome.} } \value{ Covariate matrix for mutated sites. } \description{ Prepare covariates matrix for mutated sites in a specfic chromosome. }
/MutSpot_Rpackage/man/mutCovariate.indel.freq.table.muts.Rd
no_license
skandlab/MutSpot
R
false
true
980
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mutCovariate.indel.freq.table.muts.R \name{mutCovariate.indel.freq.table.muts} \alias{mutCovariate.indel.freq.table.muts} \title{Prepare covariates matrix for mutated sites in a specfic chromosome.} \usage{ mutCovariate.indel.freq.table.muts( continuous.features, discrete.features, sample.specific.features, polyAT, polyCG, sites ) } \arguments{ \item{continuous.features}{Continuous epigenetic features selected for model fitting.} \item{discrete.features}{Discrete epigenetic features selected for model fitting.} \item{sample.specific.features}{Sample-specific features.} \item{polyAT}{All polyA or polyT positions in whole genome.} \item{polyCG}{All polyC or polyG positions in whole genome.} \item{sites}{Mutated sites in a specific chromosome.} } \value{ Covariate matrix for mutated sites. } \description{ Prepare covariates matrix for mutated sites in a specfic chromosome. }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/content_objects.R \name{TestOrder} \alias{TestOrder} \title{TestOrder Object} \usage{ TestOrder(customer = NULL, lineItems = NULL, paymentMethod = NULL, predefinedDeliveryAddress = NULL, promotions = NULL, shippingCost = NULL, shippingCostTax = NULL, shippingOption = NULL) } \arguments{ \item{customer}{The details of the customer who placed the order} \item{lineItems}{Line items that are ordered} \item{paymentMethod}{The details of the payment method} \item{predefinedDeliveryAddress}{Identifier of one of the predefined delivery addresses for the delivery} \item{promotions}{The details of the merchant provided promotions applied to the order} \item{shippingCost}{The total cost of shipping for all items} \item{shippingCostTax}{The tax for the total shipping cost} \item{shippingOption}{The requested shipping option} } \value{ TestOrder object } \description{ TestOrder Object } \details{ Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}} No description }
/googlecontentv2.auto/man/TestOrder.Rd
permissive
Phippsy/autoGoogleAPI
R
false
true
1,072
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/content_objects.R \name{TestOrder} \alias{TestOrder} \title{TestOrder Object} \usage{ TestOrder(customer = NULL, lineItems = NULL, paymentMethod = NULL, predefinedDeliveryAddress = NULL, promotions = NULL, shippingCost = NULL, shippingCostTax = NULL, shippingOption = NULL) } \arguments{ \item{customer}{The details of the customer who placed the order} \item{lineItems}{Line items that are ordered} \item{paymentMethod}{The details of the payment method} \item{predefinedDeliveryAddress}{Identifier of one of the predefined delivery addresses for the delivery} \item{promotions}{The details of the merchant provided promotions applied to the order} \item{shippingCost}{The total cost of shipping for all items} \item{shippingCostTax}{The tax for the total shipping cost} \item{shippingOption}{The requested shipping option} } \value{ TestOrder object } \description{ TestOrder Object } \details{ Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}} No description }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{bikes} \alias{bikes} \title{Bike Sharing Data} \format{A dataframe with 951 rows and 3 columns. \describe{ \item{\code{Day}}{The day of the week of the day.} \item{\code{Weather}}{The type of weather on that day. Coded with respect to the following values: \itemize{ \item{\code{1}}{clear, few clouds, partly cloudy} \item{\code{2}}{mist & cloudy, mist & broken clouds, mist & few clouds, mist} \item{\code{3}}{light snow, light rain & Thunderstorm & scattered clouds, light rain & scattered clouds} \item{\code{4}}{heavy rain & ice pellets & thunderstorm & mist, snow & fog} }} \item{\code{Casual}}{The number of casual bike users} }} \usage{ data("bikes") } \description{ A dataset information about bicycle rentals for days in the years 2011 and 2012. } \note{ The documentation does not auto-update. If one changes the base file, this documentation will no longer remain accurate. Project 2 data. } \references{ Fanaee-T, Hadi and Gamma, J "Event Labeling Combining Ensemble Detectors and background knowledge", Progress in Articial Intelligence (2013); \url{https://capitalbikeshare.com/system-data} Data Filtered by Philipp Burchhardt, Kayla Frisoli and Frank Kovacs. } \keyword{datasets}
/man/bikes.Rd
permissive
frank113/cmu202
R
false
true
1,350
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{bikes} \alias{bikes} \title{Bike Sharing Data} \format{A dataframe with 951 rows and 3 columns. \describe{ \item{\code{Day}}{The day of the week of the day.} \item{\code{Weather}}{The type of weather on that day. Coded with respect to the following values: \itemize{ \item{\code{1}}{clear, few clouds, partly cloudy} \item{\code{2}}{mist & cloudy, mist & broken clouds, mist & few clouds, mist} \item{\code{3}}{light snow, light rain & Thunderstorm & scattered clouds, light rain & scattered clouds} \item{\code{4}}{heavy rain & ice pellets & thunderstorm & mist, snow & fog} }} \item{\code{Casual}}{The number of casual bike users} }} \usage{ data("bikes") } \description{ A dataset information about bicycle rentals for days in the years 2011 and 2012. } \note{ The documentation does not auto-update. If one changes the base file, this documentation will no longer remain accurate. Project 2 data. } \references{ Fanaee-T, Hadi and Gamma, J "Event Labeling Combining Ensemble Detectors and background knowledge", Progress in Articial Intelligence (2013); \url{https://capitalbikeshare.com/system-data} Data Filtered by Philipp Burchhardt, Kayla Frisoli and Frank Kovacs. } \keyword{datasets}
data_input <- function(data, method, pen.value, costfunc, minseglen, Q, var=0, shape=1){ if(var !=0){ mu<-var }else{ mu <- mean(data) } sumstat=cbind(c(0,cumsum(coredata(data))),c(0,cumsum(coredata(data)^2)),cumsum(c(0,(coredata(data)-mu)^2))) if(method=="PELT"){ #out=PELT.meanvar.norm(coredata(data),pen.value) out=PELT(sumstat,pen=pen.value,cost_func = costfunc,minseglen=minseglen, shape=shape) ## K NEW ## #cpts=out[[2]] } else if(method=="BinSeg"){ out=BINSEG(sumstat,pen=pen.value,cost_func = costfunc,minseglen=minseglen,Q=Q, shape=shape) ## K NEW ## #cpts=out[[2]] # out=binseg.meanvar.norm(coredata(data),Q,pen.value) # if(out$op.cpts==0){cpts=n} # else{cpts=c(sort(out$cps[1,1:out$op.cpts]),n)} # the above is now inside the BINSEG function } else { stop('Unknown method, should be either PELT or BinSeg.') } return(out) }
/R/data_input.R
no_license
diego-urgell/changepointNegbin
R
false
false
917
r
data_input <- function(data, method, pen.value, costfunc, minseglen, Q, var=0, shape=1){ if(var !=0){ mu<-var }else{ mu <- mean(data) } sumstat=cbind(c(0,cumsum(coredata(data))),c(0,cumsum(coredata(data)^2)),cumsum(c(0,(coredata(data)-mu)^2))) if(method=="PELT"){ #out=PELT.meanvar.norm(coredata(data),pen.value) out=PELT(sumstat,pen=pen.value,cost_func = costfunc,minseglen=minseglen, shape=shape) ## K NEW ## #cpts=out[[2]] } else if(method=="BinSeg"){ out=BINSEG(sumstat,pen=pen.value,cost_func = costfunc,minseglen=minseglen,Q=Q, shape=shape) ## K NEW ## #cpts=out[[2]] # out=binseg.meanvar.norm(coredata(data),Q,pen.value) # if(out$op.cpts==0){cpts=n} # else{cpts=c(sort(out$cps[1,1:out$op.cpts]),n)} # the above is now inside the BINSEG function } else { stop('Unknown method, should be either PELT or BinSeg.') } return(out) }
library(tidyverse) daily_air <- read_rds("./After wrangling/airpollution_meteor_mean.rds") %>% rename(Date = date, SO2 = SO2_Avg, CO = CO_Avg, O3 = O3_Avg, NO2 = NO2_Avg, PM10 = PM10_Avg, PM25 = PM25_Avg, Humid = Humidity_Avg, Prec = Precipitation, Temp = Temperature_Avg, Temp_range = Temperature_range) %>% rename( Temperature = Temp, `Temperature gap` = Temp_range, Precipitation = Prec, `Relative humidity` = Humid ) %>% mutate( # ppm to ppb. ppb = 1000*ppm SO2 = 1000*SO2, # 아황산가스 CO = 1000*CO, # 일산화탄소 O3 = 1000*O3, # 오존 NO2 = 1000*NO2 # 이산화질소 ) holidays <- read_csv("./After wrangling/korean_holiday_2004-2022_adjust.csv") %>% rename(holiday = dateName, Date = date) %>% select(Date, holiday)
/Source_useful_data.R
permissive
yangboyubyron/Data_Useful
R
false
false
951
r
library(tidyverse) daily_air <- read_rds("./After wrangling/airpollution_meteor_mean.rds") %>% rename(Date = date, SO2 = SO2_Avg, CO = CO_Avg, O3 = O3_Avg, NO2 = NO2_Avg, PM10 = PM10_Avg, PM25 = PM25_Avg, Humid = Humidity_Avg, Prec = Precipitation, Temp = Temperature_Avg, Temp_range = Temperature_range) %>% rename( Temperature = Temp, `Temperature gap` = Temp_range, Precipitation = Prec, `Relative humidity` = Humid ) %>% mutate( # ppm to ppb. ppb = 1000*ppm SO2 = 1000*SO2, # 아황산가스 CO = 1000*CO, # 일산화탄소 O3 = 1000*O3, # 오존 NO2 = 1000*NO2 # 이산화질소 ) holidays <- read_csv("./After wrangling/korean_holiday_2004-2022_adjust.csv") %>% rename(holiday = dateName, Date = date) %>% select(Date, holiday)
#clears global environment #rm(list =ls()) library("dplyr") # Hofstede's cultural dimensions # From: https://geerthofstede.com/research-and-vsm/dimension-data-matrix/ hofstede <- read.csv( "https://geerthofstede.com/wp-content/uploads/2016/08/6-dimensions-for-website-2015-08-16.csv", stringsAsFactors = FALSE, sep = ";" #the file isn't separated by commas, but instead semi-colons ) idv_data_df <- hofstede %>% select(country, idv) %>% mutate(idv = as.numeric(idv)) %>% filter(!is.na(idv)) %>% arrange(idv) idv_summary_df <- idv_data_df %>% summarize(avg_idv = mean(idv), min_idv = min(idv), max_idv = max(idv)) max_country <- idv_data_df %>% filter(idv == max(idv)) %>% pull(country) min_country <- idv_data_df %>% filter(idv == min(idv)) %>% pull(country)
/analysis.R
no_license
datru0ng/rmddemo
R
false
false
815
r
#clears global environment #rm(list =ls()) library("dplyr") # Hofstede's cultural dimensions # From: https://geerthofstede.com/research-and-vsm/dimension-data-matrix/ hofstede <- read.csv( "https://geerthofstede.com/wp-content/uploads/2016/08/6-dimensions-for-website-2015-08-16.csv", stringsAsFactors = FALSE, sep = ";" #the file isn't separated by commas, but instead semi-colons ) idv_data_df <- hofstede %>% select(country, idv) %>% mutate(idv = as.numeric(idv)) %>% filter(!is.na(idv)) %>% arrange(idv) idv_summary_df <- idv_data_df %>% summarize(avg_idv = mean(idv), min_idv = min(idv), max_idv = max(idv)) max_country <- idv_data_df %>% filter(idv == max(idv)) %>% pull(country) min_country <- idv_data_df %>% filter(idv == min(idv)) %>% pull(country)
fn_extract_page_ref <- function(a_html) { # receives xml document and it returns a character vector of Urls lcl_xp <- paste0("//div[@class = 'ListCard-content group']", "//div[@class = 'ListCard-heading']/p/a/@href") lcl_xml_ns <- xml2::xml_find_all(a_html, lcl_xp) lcl_str_ref <- xml2::xml_text(lcl_xml_ns) return(lcl_str_ref) }
/r_trade_me/fn_extract_page_ref.R
no_license
thefactmachine/holiday_house
R
false
false
356
r
fn_extract_page_ref <- function(a_html) { # receives xml document and it returns a character vector of Urls lcl_xp <- paste0("//div[@class = 'ListCard-content group']", "//div[@class = 'ListCard-heading']/p/a/@href") lcl_xml_ns <- xml2::xml_find_all(a_html, lcl_xp) lcl_str_ref <- xml2::xml_text(lcl_xml_ns) return(lcl_str_ref) }
# server.R library(dplyr) library(plotly) library(shiny) # Read in data #setwd('~/Documents/info-201/m14-shiny/exercise-3/') source('./scripts/buildMap.R') df <- read.csv('./data/electoral_college.csv', stringsAsFactors = FALSE) state.codes <- read.csv('./data/state_codes.csv', stringsAsFactors = FALSE) # Join together state.codes and df joined.data <- left_join(df, state.codes, by="state") # Compute the electoral votes per 100K people in each state joined.data <- joined.data %>% mutate(ratio = votes/population * 100000) # Start shinyServer shinyServer(function(input, output) { # Render a plotly object that returns your map output$map <- renderPlotly({ return(BuildMap(joined.data, 'population')) }) })
/exercise-3/server.R
permissive
lleontan/m14-shiny
R
false
false
730
r
# server.R library(dplyr) library(plotly) library(shiny) # Read in data #setwd('~/Documents/info-201/m14-shiny/exercise-3/') source('./scripts/buildMap.R') df <- read.csv('./data/electoral_college.csv', stringsAsFactors = FALSE) state.codes <- read.csv('./data/state_codes.csv', stringsAsFactors = FALSE) # Join together state.codes and df joined.data <- left_join(df, state.codes, by="state") # Compute the electoral votes per 100K people in each state joined.data <- joined.data %>% mutate(ratio = votes/population * 100000) # Start shinyServer shinyServer(function(input, output) { # Render a plotly object that returns your map output$map <- renderPlotly({ return(BuildMap(joined.data, 'population')) }) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/additionalTable1.R \name{additionalResult} \alias{additionalResult} \title{Generate result for revision} \usage{ additionalResult( connectionDetails = connectionDetails, cdmDatabaseSchema = cdmDatabaseSchema, cohortDatabaseSchema = cohortDatabaseSchema, cohortTable = cohortTable, oracleTempSchema = oracleTempSchema, outputFolder = outputFolder, databaseId = databaseId, minCellCount = 5 ) } \arguments{ \item{connectionDetails}{An object of type \code{connectionDetails} as created using the \code{\link[DatabaseConnector]{createConnectionDetails}} function in the DatabaseConnector package.} \item{cdmDatabaseSchema}{Schema name where your patient-level data in OMOP CDM format resides. Note that for SQL Server, this should include both the database and schema name, for example 'cdm_data.dbo'.} \item{cohortDatabaseSchema}{Schema name where intermediate data can be stored. You will need to have write priviliges in this schema. Note that for SQL Server, this should include both the database and schema name, for example 'cdm_data.dbo'.} \item{cohortTable}{The name of the table that will be created in the work database schema. This table will hold the exposure and outcome cohorts used in this study.} \item{oracleTempSchema}{Should be used in Oracle to specify a schema where the user has write priviliges for storing temporary tables.} \item{outputFolder}{Name of local folder to place results; make sure to use forward slashes (/). Do not use a folder on a network drive since this greatly impacts performance.} \item{databaseId}{A short string for identifying the database (e.g. 'Synpuf').} \item{minCellCount}{The minimum number of subjects contributing to a count before it can be included in packaged results.} } \description{ Generate result for revision } \details{ Generate result for revision The \code{createCohorts}, \code{synthesizePositiveControls}, \code{runAnalyses}, and \code{runDiagnostics} arguments are intended to be used to run parts of the full study at a time, but none of the parts are considerd to be optional. } \examples{ \dontrun{ connectionDetails <- createConnectionDetails(dbms = "postgresql", user = "joe", password = "secret", server = "myserver") execute(connectionDetails, cdmDatabaseSchema = "cdm_data", cohortDatabaseSchema = "study_results", cohortTable = "cohort", oracleTempSchema = NULL, outputFolder = "c:/temp/study_results", maxCores = 4) } }
/man/additionalResult.Rd
permissive
JaehyeongCho/TicagrelorVsClopidogrel
R
false
true
2,686
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/additionalTable1.R \name{additionalResult} \alias{additionalResult} \title{Generate result for revision} \usage{ additionalResult( connectionDetails = connectionDetails, cdmDatabaseSchema = cdmDatabaseSchema, cohortDatabaseSchema = cohortDatabaseSchema, cohortTable = cohortTable, oracleTempSchema = oracleTempSchema, outputFolder = outputFolder, databaseId = databaseId, minCellCount = 5 ) } \arguments{ \item{connectionDetails}{An object of type \code{connectionDetails} as created using the \code{\link[DatabaseConnector]{createConnectionDetails}} function in the DatabaseConnector package.} \item{cdmDatabaseSchema}{Schema name where your patient-level data in OMOP CDM format resides. Note that for SQL Server, this should include both the database and schema name, for example 'cdm_data.dbo'.} \item{cohortDatabaseSchema}{Schema name where intermediate data can be stored. You will need to have write priviliges in this schema. Note that for SQL Server, this should include both the database and schema name, for example 'cdm_data.dbo'.} \item{cohortTable}{The name of the table that will be created in the work database schema. This table will hold the exposure and outcome cohorts used in this study.} \item{oracleTempSchema}{Should be used in Oracle to specify a schema where the user has write priviliges for storing temporary tables.} \item{outputFolder}{Name of local folder to place results; make sure to use forward slashes (/). Do not use a folder on a network drive since this greatly impacts performance.} \item{databaseId}{A short string for identifying the database (e.g. 'Synpuf').} \item{minCellCount}{The minimum number of subjects contributing to a count before it can be included in packaged results.} } \description{ Generate result for revision } \details{ Generate result for revision The \code{createCohorts}, \code{synthesizePositiveControls}, \code{runAnalyses}, and \code{runDiagnostics} arguments are intended to be used to run parts of the full study at a time, but none of the parts are considerd to be optional. } \examples{ \dontrun{ connectionDetails <- createConnectionDetails(dbms = "postgresql", user = "joe", password = "secret", server = "myserver") execute(connectionDetails, cdmDatabaseSchema = "cdm_data", cohortDatabaseSchema = "study_results", cohortTable = "cohort", oracleTempSchema = NULL, outputFolder = "c:/temp/study_results", maxCores = 4) } }
library(broman) ### Name: xlimlabel ### Title: Calulate horizontal limit in user coordinates for adding labels ### Aliases: xlimlabel ### ** Examples x <- runif(15, -1, 1)*10 xlabs <- sapply(sample(1:20, 15, replace=TRUE), function(a) paste(LETTERS[1:a], collapse="")) par(mfrow=c(2,1), las=1) ## Labels to the left ## xlims <- xlimlabel(x, xlabs, pos=2) plot(x, 1:length(x), xlim=xlims, ylab="Index") text(x, 1:length(x), xlabs, pos=2) ## Labels to the right ## xlims <- xlimlabel(x, xlabs, pos=4, cex=0.7) plot(x, 1:length(x), xlim=xlims, ylab="Index") text(x, 1:length(x), xlabs, pos=4, cex=0.7)
/data/genthat_extracted_code/broman/examples/xlimlabel.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
623
r
library(broman) ### Name: xlimlabel ### Title: Calulate horizontal limit in user coordinates for adding labels ### Aliases: xlimlabel ### ** Examples x <- runif(15, -1, 1)*10 xlabs <- sapply(sample(1:20, 15, replace=TRUE), function(a) paste(LETTERS[1:a], collapse="")) par(mfrow=c(2,1), las=1) ## Labels to the left ## xlims <- xlimlabel(x, xlabs, pos=2) plot(x, 1:length(x), xlim=xlims, ylab="Index") text(x, 1:length(x), xlabs, pos=2) ## Labels to the right ## xlims <- xlimlabel(x, xlabs, pos=4, cex=0.7) plot(x, 1:length(x), xlim=xlims, ylab="Index") text(x, 1:length(x), xlabs, pos=4, cex=0.7)
library(shiny) library(ggplot2) library(datasets) shinyServer(function(input, output, session) { inputData <- reactive({ trees[, c(input$xcol, input$ycol)] }) output$plot1 <- renderPlot({ plot(inputData(), pch = 20, cex = 2, xlab = paste("X Variable:", input$xcol, sep=" "), ylab = paste("Y Variable:", input$ycol, sep=" ")) }) })
/server.R
no_license
EdKerrie/ddp-shiny-app
R
false
false
375
r
library(shiny) library(ggplot2) library(datasets) shinyServer(function(input, output, session) { inputData <- reactive({ trees[, c(input$xcol, input$ycol)] }) output$plot1 <- renderPlot({ plot(inputData(), pch = 20, cex = 2, xlab = paste("X Variable:", input$xcol, sep=" "), ylab = paste("Y Variable:", input$ycol, sep=" ")) }) })
get_catalog_ipums <- function( data_name = "ipums" , output_dir , ... ){ if( !( 'project' %in% names(list(...)) ) || !( list(...)[["project"]] %in% c( "usa" , "cps" , "international" ) ) ) stop( "`project` parameter must be specified. choices are 'usa' , 'cps' , 'international'" ) if( !( 'your_email' %in% names(list(...)) ) ) stop( "`your_email` parameter must be specified. create an account at https://www.ipums.org/" ) if( !( 'your_password' %in% names(list(...)) ) ) stop( "`your_password` parameter must be specified. create an account at https://www.ipums.org/" ) project <- list(...)[["project"]] your_email <- list(...)[["your_email"]] your_password <- list(...)[["your_password"]] this_cookie <- authenticate_ipums( your_email = your_email , your_password = your_password , project = project ) this_download <- httr::GET( paste0( "https://" , project , ".ipums.org/" , project , "-action/extract_requests/download" ) , httr::set_cookies( .cookies = this_cookie ) ) catalog <- rvest::html_table( httr::content( this_download ) )[[2]] names( catalog ) <- stringr::str_trim( paste( names( catalog ) , catalog[ 1 , ] ) ) catalog <- catalog[ -1 , ] catalog[ , ] <- sapply( catalog[ , ] , function( z ) gsub( "( +)" , " " , gsub( "\n" , " " , z ) ) ) names( catalog ) <- gsub( " (click to edit)" , "" , names( catalog ) , fixed = TRUE ) catalog <- catalog[ , !( names( catalog ) %in% "Hide selections Show all" ) ] project_sub <- ifelse( project == 'international' , 'ipumsi' , project ) catalog$full_url <- ifelse( catalog[ , "Formatted Data" ] == "CSV" , paste0( "https://" , project , ".ipums.org/" , project , "-action/downloads/extract_files/" , project_sub , "_" , stringr::str_pad( catalog[ , "Extract Number" ] , 5 , pad = '0' ) , ".csv.gz" ) , NA ) catalog$xml_url <- gsub( "\\.csv\\.gz" , ".xml" , catalog$full_url ) catalog$db_tablename <- ifelse( is.na( catalog$full_url ) , NA , gsub( "( +)" , "_" , stringr::str_trim( gsub( "[^a-z0-9]" , " " , tolower( catalog$Description ) ) ) ) ) catalog$output_filename <- ifelse( is.na( catalog$full_url ) , NA , paste0( output_dir , '/' , catalog$db_tablename , '.rds' ) ) catalog$dbfolder <- ifelse( is.na( catalog$full_url ) , NA , paste0( output_dir , "/MonetDB" ) ) httr::GET( paste0( "https://" , project , ".ipums.org/" , project , "-action/users/logout" ) , httr::set_cookies( .cookies = this_cookie ) ) catalog } lodown_ipums <- function( data_name = "ipums" , catalog , ... ){ if( !( 'project' %in% names(list(...)) ) || !( list(...)[["project"]] %in% c( "usa" , "cps" , "international" ) ) ) stop( "`project` parameter must be specified. choices are 'usa' , 'cps' , 'international'" ) if( !( 'your_email' %in% names(list(...)) ) ) stop( "`your_email` parameter must be specified. create an account at https://www.ipums.org/" ) if( !( 'your_password' %in% names(list(...)) ) ) stop( "`your_password` parameter must be specified. create an account at https://www.ipums.org/" ) project <- list(...)[["project"]] your_email <- list(...)[["your_email"]] your_password <- list(...)[["your_password"]] this_cookie <- authenticate_ipums( your_email = your_email , your_password = your_password , project = project ) tf <- tempfile() ; tf2 <- tempfile() for ( i in seq_len( nrow( catalog ) ) ){ if( !grepl( 'CSV' , catalog[ i , 'Formatted Data' ] ) ){ cat( paste0( data_name , " catalog entry " , i , " of " , nrow( catalog ) , " skipped because no csv file available.\n\nif you intended to import this extract, please visit\n`https://" , project , ".ipums.org/" , project , "-action/extract_requests/download` and revise or resubmit with the csv option checked.\r\n\n" ) ) } else { csv_filename <- gsub( "\\.rds" , ".csv" , catalog[ i , 'output_filename' ] ) # download the actual file httr::GET( catalog[ i , 'full_url' ] , httr::write_disk( tf , overwrite = TRUE ) , httr::set_cookies( .cookies = this_cookie ) , httr::progress() ) # store the file to the local disk R.utils::gunzip( tf , csv_filename , overwrite = TRUE ) xml <- httr::GET( catalog[ i , 'xml_url' ] , httr::set_cookies( .cookies = this_cookie ) ) csv_file_structure <- unlist( XML::xpathSApply( XML::xmlParse( xml ) , "//*//*//*//*" , XML::xmlGetAttr , "type" ) ) csv_file_structure <- csv_file_structure[ csv_file_structure != 'rectangular' ] # simple check that the stored csv file matches the loaded structure if( !( length( csv_file_structure ) == ncol( read.csv( csv_filename , nrow = 10 ) ) ) ) stop( "number of columns in final csv file does not match ipums structure xml file" ) # decide whether column types should be character or numeric colTypes <- ifelse( csv_file_structure == 'character' , 'CLOB' , 'DOUBLE PRECISION' ) # determine the column names from the csv file cn <- toupper( names( read.csv( csv_filename , nrow = 1 ) ) ) # for any column names that conflict with a monetdb reserved word, add an underscore cn[ cn %in% getFromNamespace( "reserved_monetdb_keywords" , "MonetDBLite" ) ] <- paste0( cn[ cn %in% getFromNamespace( "reserved_monetdb_keywords" , "MonetDBLite" ) ] , "_" ) # force all column names to be lowercase, since MonetDB.R is now case-sensitive cn <- tolower( cn ) if( !is.na( catalog[ i , 'output_filename' ] ) ){ # read in as a data.frame x <- data.frame( readr::read_csv( csv_filename , col_names = cn , col_types = paste0( ifelse( csv_file_structure == 'character' , 'c' , 'd' ) , collapse = "" ) , skip = 1 ) ) saveRDS( x , file = catalog[ i , 'output_filename' ] ) catalog[ i , 'case_count' ] <- nrow( x ) cat( paste0( data_name , " catalog entry " , i , " of " , nrow( catalog ) , " stored at '" , catalog[ i , 'output_filename' ] , "'\r\n\n" ) ) } if( !is.na( catalog[ i , 'dbfolder' ] ) ){ # open the connection to the monetdblite database db <- DBI::dbConnect( MonetDBLite::MonetDBLite() , catalog[ i , 'dbfolder' ] ) # paste column names and column types together sequentially colDecl <- paste( cn , colTypes ) # construct a character string containing the create table command sql_create_table <- sprintf( paste( "CREATE TABLE" , catalog[ i , 'db_tablename' ] , "(%s)" ) , paste( colDecl , collapse = ", " ) ) # construct the table in the database DBI::dbSendQuery( db , sql_create_table ) # import the csv file into the database. DBI::dbSendQuery( db , paste0( "COPY OFFSET 2 INTO " , catalog[ i , 'db_tablename' ] , " FROM '" , normalizePath( csv_filename ) , "' USING DELIMITERS ',','\\n','\"' NULL AS ''" # , " BEST EFFORT" # <-- if your import breaks for some reason, # you could try uncommenting the preceding line ) ) # count the number of lines in the csv file on your local disk csv_lines <- R.utils::countLines( csv_filename ) # count the number of records in the imported table dbtable_lines <- DBI::dbGetQuery( db , paste( 'SELECT COUNT(*) FROM' , catalog[ i , 'db_tablename' ] ) )[ 1 , 1 ] # the imported table should have one fewer line than the csv file, # because the csv file has headers stopifnot( csv_lines == dbtable_lines + 1 ) catalog[ i , 'case_count' ] <- dbtable_lines DBI::dbDisconnect( db , shutdown = TRUE ) cat( paste0( data_name , " catalog entry " , i , " of " , nrow( catalog ) , " stored in '" , catalog[ i , 'dbfolder' ] , "'\r\n\n" ) ) } # delete the temporary files suppressWarnings( file.remove( tf ) ) } } httr::GET( paste0( "https://" , project , ".ipums.org/" , project , "-action/users/logout" ) , httr::set_cookies( .cookies = this_cookie ) ) catalog } # thanks to the amazing respondents on stackoverflow for this algorithm # http://stackoverflow.com/questions/34829920/how-to-authenticate-a-shibboleth-multi-hostname-website-with-httr-in-r authenticate_ipums <- function( your_email , your_password , project ){ tf <- tempfile() this_page <- httr::GET( paste0( "https://" , project , ".ipums.org/" , project , "-action/menu" ) ) writeBin( this_page$content , tf ) if( any( grepl( "Logout" , readLines( tf ) ) ) ) return( invisible( TRUE ) ) httr::set_config( httr::config( ssl_verifypeer = 0L ) ) # get first page # p1 <- httr::GET( paste0( "https://" , project , ".ipums.org/" , project , "-action/users/login" ) , httr::verbose( info = TRUE ) ) p1 <- httr::GET( paste0( "https://" , project , ".ipums.org/" , project , "-action/users/login" ) ) # post login credentials b2 <- list( "j_username" = your_email , "j_password" = your_password ) c2 <- c( JSESSIONID = p1$cookies[ p1$cookies$domain=="#HttpOnly_live.identity.popdata.org" , ]$value , `_idp_authn_lc_key` = p1$cookies[ p1$cookies$domain == "live.identity.popdata.org" , ]$value ) p2 <- httr::POST( p1$url , body = b2 , httr::set_cookies( .cookies = c2 ) , encode = "form" ) # parse hidden fields h2 <- xml2::read_html( p2$content ) form <- rvest::html_form(h2) # post hidden fields b3 <- list( "RelayState" = form[[1]]$fields[[1]]$value , "SAMLResponse" = form[[1]]$fields[[2]]$value ) c3 <- c( JSESSIONID = p1$cookies[ p1$cookies$domain == "#HttpOnly_live.identity.popdata.org" , ]$value , `_idp_session` = p2$cookies[ p2$cookies$name == "_idp_session" , ]$value , `_idp_authn_lc_key` = p2$cookies[p2$cookies$name == "_idp_authn_lc_key" , ]$value ) p3 <- httr::POST( form[[1]]$url , body = b3 , httr::set_cookies( .cookies = c3 ) , encode = "form" ) # get interesting page c4 <- c( JSESSIONID = p3$cookies[p1$cookies$domain==paste0( project , ".ipums.org" ) && p3$cookies$name == "JSESSIONID" , ]$value , `_idp_session` = p3$cookies[ p3$cookies$name == "_idp_session" , ]$value , `_idp_authn_lc_key` = p3$cookies[ p3$cookies$name == "_idp_authn_lc_key" , ]$value ) p4 <- httr::GET( paste0( "https://" , project , ".ipums.org/" , project , "-action/menu" ) , httr::set_cookies( .cookies = c4 ) ) # return the appropriate cookies c4 }
/R/ipums.R
no_license
jjsjaime/lodown
R
false
false
10,375
r
get_catalog_ipums <- function( data_name = "ipums" , output_dir , ... ){ if( !( 'project' %in% names(list(...)) ) || !( list(...)[["project"]] %in% c( "usa" , "cps" , "international" ) ) ) stop( "`project` parameter must be specified. choices are 'usa' , 'cps' , 'international'" ) if( !( 'your_email' %in% names(list(...)) ) ) stop( "`your_email` parameter must be specified. create an account at https://www.ipums.org/" ) if( !( 'your_password' %in% names(list(...)) ) ) stop( "`your_password` parameter must be specified. create an account at https://www.ipums.org/" ) project <- list(...)[["project"]] your_email <- list(...)[["your_email"]] your_password <- list(...)[["your_password"]] this_cookie <- authenticate_ipums( your_email = your_email , your_password = your_password , project = project ) this_download <- httr::GET( paste0( "https://" , project , ".ipums.org/" , project , "-action/extract_requests/download" ) , httr::set_cookies( .cookies = this_cookie ) ) catalog <- rvest::html_table( httr::content( this_download ) )[[2]] names( catalog ) <- stringr::str_trim( paste( names( catalog ) , catalog[ 1 , ] ) ) catalog <- catalog[ -1 , ] catalog[ , ] <- sapply( catalog[ , ] , function( z ) gsub( "( +)" , " " , gsub( "\n" , " " , z ) ) ) names( catalog ) <- gsub( " (click to edit)" , "" , names( catalog ) , fixed = TRUE ) catalog <- catalog[ , !( names( catalog ) %in% "Hide selections Show all" ) ] project_sub <- ifelse( project == 'international' , 'ipumsi' , project ) catalog$full_url <- ifelse( catalog[ , "Formatted Data" ] == "CSV" , paste0( "https://" , project , ".ipums.org/" , project , "-action/downloads/extract_files/" , project_sub , "_" , stringr::str_pad( catalog[ , "Extract Number" ] , 5 , pad = '0' ) , ".csv.gz" ) , NA ) catalog$xml_url <- gsub( "\\.csv\\.gz" , ".xml" , catalog$full_url ) catalog$db_tablename <- ifelse( is.na( catalog$full_url ) , NA , gsub( "( +)" , "_" , stringr::str_trim( gsub( "[^a-z0-9]" , " " , tolower( catalog$Description ) ) ) ) ) catalog$output_filename <- ifelse( is.na( catalog$full_url ) , NA , paste0( output_dir , '/' , catalog$db_tablename , '.rds' ) ) catalog$dbfolder <- ifelse( is.na( catalog$full_url ) , NA , paste0( output_dir , "/MonetDB" ) ) httr::GET( paste0( "https://" , project , ".ipums.org/" , project , "-action/users/logout" ) , httr::set_cookies( .cookies = this_cookie ) ) catalog } lodown_ipums <- function( data_name = "ipums" , catalog , ... ){ if( !( 'project' %in% names(list(...)) ) || !( list(...)[["project"]] %in% c( "usa" , "cps" , "international" ) ) ) stop( "`project` parameter must be specified. choices are 'usa' , 'cps' , 'international'" ) if( !( 'your_email' %in% names(list(...)) ) ) stop( "`your_email` parameter must be specified. create an account at https://www.ipums.org/" ) if( !( 'your_password' %in% names(list(...)) ) ) stop( "`your_password` parameter must be specified. create an account at https://www.ipums.org/" ) project <- list(...)[["project"]] your_email <- list(...)[["your_email"]] your_password <- list(...)[["your_password"]] this_cookie <- authenticate_ipums( your_email = your_email , your_password = your_password , project = project ) tf <- tempfile() ; tf2 <- tempfile() for ( i in seq_len( nrow( catalog ) ) ){ if( !grepl( 'CSV' , catalog[ i , 'Formatted Data' ] ) ){ cat( paste0( data_name , " catalog entry " , i , " of " , nrow( catalog ) , " skipped because no csv file available.\n\nif you intended to import this extract, please visit\n`https://" , project , ".ipums.org/" , project , "-action/extract_requests/download` and revise or resubmit with the csv option checked.\r\n\n" ) ) } else { csv_filename <- gsub( "\\.rds" , ".csv" , catalog[ i , 'output_filename' ] ) # download the actual file httr::GET( catalog[ i , 'full_url' ] , httr::write_disk( tf , overwrite = TRUE ) , httr::set_cookies( .cookies = this_cookie ) , httr::progress() ) # store the file to the local disk R.utils::gunzip( tf , csv_filename , overwrite = TRUE ) xml <- httr::GET( catalog[ i , 'xml_url' ] , httr::set_cookies( .cookies = this_cookie ) ) csv_file_structure <- unlist( XML::xpathSApply( XML::xmlParse( xml ) , "//*//*//*//*" , XML::xmlGetAttr , "type" ) ) csv_file_structure <- csv_file_structure[ csv_file_structure != 'rectangular' ] # simple check that the stored csv file matches the loaded structure if( !( length( csv_file_structure ) == ncol( read.csv( csv_filename , nrow = 10 ) ) ) ) stop( "number of columns in final csv file does not match ipums structure xml file" ) # decide whether column types should be character or numeric colTypes <- ifelse( csv_file_structure == 'character' , 'CLOB' , 'DOUBLE PRECISION' ) # determine the column names from the csv file cn <- toupper( names( read.csv( csv_filename , nrow = 1 ) ) ) # for any column names that conflict with a monetdb reserved word, add an underscore cn[ cn %in% getFromNamespace( "reserved_monetdb_keywords" , "MonetDBLite" ) ] <- paste0( cn[ cn %in% getFromNamespace( "reserved_monetdb_keywords" , "MonetDBLite" ) ] , "_" ) # force all column names to be lowercase, since MonetDB.R is now case-sensitive cn <- tolower( cn ) if( !is.na( catalog[ i , 'output_filename' ] ) ){ # read in as a data.frame x <- data.frame( readr::read_csv( csv_filename , col_names = cn , col_types = paste0( ifelse( csv_file_structure == 'character' , 'c' , 'd' ) , collapse = "" ) , skip = 1 ) ) saveRDS( x , file = catalog[ i , 'output_filename' ] ) catalog[ i , 'case_count' ] <- nrow( x ) cat( paste0( data_name , " catalog entry " , i , " of " , nrow( catalog ) , " stored at '" , catalog[ i , 'output_filename' ] , "'\r\n\n" ) ) } if( !is.na( catalog[ i , 'dbfolder' ] ) ){ # open the connection to the monetdblite database db <- DBI::dbConnect( MonetDBLite::MonetDBLite() , catalog[ i , 'dbfolder' ] ) # paste column names and column types together sequentially colDecl <- paste( cn , colTypes ) # construct a character string containing the create table command sql_create_table <- sprintf( paste( "CREATE TABLE" , catalog[ i , 'db_tablename' ] , "(%s)" ) , paste( colDecl , collapse = ", " ) ) # construct the table in the database DBI::dbSendQuery( db , sql_create_table ) # import the csv file into the database. DBI::dbSendQuery( db , paste0( "COPY OFFSET 2 INTO " , catalog[ i , 'db_tablename' ] , " FROM '" , normalizePath( csv_filename ) , "' USING DELIMITERS ',','\\n','\"' NULL AS ''" # , " BEST EFFORT" # <-- if your import breaks for some reason, # you could try uncommenting the preceding line ) ) # count the number of lines in the csv file on your local disk csv_lines <- R.utils::countLines( csv_filename ) # count the number of records in the imported table dbtable_lines <- DBI::dbGetQuery( db , paste( 'SELECT COUNT(*) FROM' , catalog[ i , 'db_tablename' ] ) )[ 1 , 1 ] # the imported table should have one fewer line than the csv file, # because the csv file has headers stopifnot( csv_lines == dbtable_lines + 1 ) catalog[ i , 'case_count' ] <- dbtable_lines DBI::dbDisconnect( db , shutdown = TRUE ) cat( paste0( data_name , " catalog entry " , i , " of " , nrow( catalog ) , " stored in '" , catalog[ i , 'dbfolder' ] , "'\r\n\n" ) ) } # delete the temporary files suppressWarnings( file.remove( tf ) ) } } httr::GET( paste0( "https://" , project , ".ipums.org/" , project , "-action/users/logout" ) , httr::set_cookies( .cookies = this_cookie ) ) catalog } # thanks to the amazing respondents on stackoverflow for this algorithm # http://stackoverflow.com/questions/34829920/how-to-authenticate-a-shibboleth-multi-hostname-website-with-httr-in-r authenticate_ipums <- function( your_email , your_password , project ){ tf <- tempfile() this_page <- httr::GET( paste0( "https://" , project , ".ipums.org/" , project , "-action/menu" ) ) writeBin( this_page$content , tf ) if( any( grepl( "Logout" , readLines( tf ) ) ) ) return( invisible( TRUE ) ) httr::set_config( httr::config( ssl_verifypeer = 0L ) ) # get first page # p1 <- httr::GET( paste0( "https://" , project , ".ipums.org/" , project , "-action/users/login" ) , httr::verbose( info = TRUE ) ) p1 <- httr::GET( paste0( "https://" , project , ".ipums.org/" , project , "-action/users/login" ) ) # post login credentials b2 <- list( "j_username" = your_email , "j_password" = your_password ) c2 <- c( JSESSIONID = p1$cookies[ p1$cookies$domain=="#HttpOnly_live.identity.popdata.org" , ]$value , `_idp_authn_lc_key` = p1$cookies[ p1$cookies$domain == "live.identity.popdata.org" , ]$value ) p2 <- httr::POST( p1$url , body = b2 , httr::set_cookies( .cookies = c2 ) , encode = "form" ) # parse hidden fields h2 <- xml2::read_html( p2$content ) form <- rvest::html_form(h2) # post hidden fields b3 <- list( "RelayState" = form[[1]]$fields[[1]]$value , "SAMLResponse" = form[[1]]$fields[[2]]$value ) c3 <- c( JSESSIONID = p1$cookies[ p1$cookies$domain == "#HttpOnly_live.identity.popdata.org" , ]$value , `_idp_session` = p2$cookies[ p2$cookies$name == "_idp_session" , ]$value , `_idp_authn_lc_key` = p2$cookies[p2$cookies$name == "_idp_authn_lc_key" , ]$value ) p3 <- httr::POST( form[[1]]$url , body = b3 , httr::set_cookies( .cookies = c3 ) , encode = "form" ) # get interesting page c4 <- c( JSESSIONID = p3$cookies[p1$cookies$domain==paste0( project , ".ipums.org" ) && p3$cookies$name == "JSESSIONID" , ]$value , `_idp_session` = p3$cookies[ p3$cookies$name == "_idp_session" , ]$value , `_idp_authn_lc_key` = p3$cookies[ p3$cookies$name == "_idp_authn_lc_key" , ]$value ) p4 <- httr::GET( paste0( "https://" , project , ".ipums.org/" , project , "-action/menu" ) , httr::set_cookies( .cookies = c4 ) ) # return the appropriate cookies c4 }
# Set working drive if not running on HPC if(.Platform$OS.type == "unix") { } else { setwd("C:/github/MIMICS_HiRes") } ######################################## # Load MIMICS data and ftns from Brute Forcing script ######################################## source("MIMICS_ftns/MIMICS_repeat_base.R") ######################################## # Load forcing data and parameter set ######################################## data <- read.csv("RCrk_Modelling_Data/RCrk_SOC_Cal+Val.csv", as.is=T) # Trim out data columns not required for the MC run ## Save on dataframe size and add any needed columns back after MIMICS run data <- data %>% select(Site, lat, long, SOC, CLAY, pGPP, TSOI, lig_N) # Load parameter set params_raw <- read.csv("MC/Output/RC_MIM_param_combos_RMSE_less2.csv") params_raw$run_num <- seq(1,nrow(params),1) # Select only the MIMICS parameter columns params <- params_raw %>% select(run_num, Vslope_x, Vint_x, Kslope_x, Kint_x, CUE_x, Tau_x, desorb_x, fPHYS_x) ####################### # Run MIMICS ####################### # Set number of cores to use no_cores <- availableCores() - 1 plan(multicore, gc = FALSE, workers = no_cores) # Run MIMICS! print(paste0("Start time: ", Sys.time())) start_time <- Sys.time() MIMruns <- params %>% split(1:nrow(params)) %>% future_map(~ MIMrepeat(forcing_df = data, rparams = ., output_type = "all"), .progress=TRUE) %>% bind_rows() print(paste0("Task time: ", Sys.time() - start_time)) # Release CPU cores plan(sequential) nbrOfWorkers() # Clean up memory gc() ## Join parameters to MIMICS output table MIMruns_full <- MIMruns %>% left_join(params_raw) ########################################## # Save MC output data ########################################## saveRDS(MIMruns_full, paste0("MIM_pset_runs_", format(Sys.time(), "%Y%m%d_%H%M%S"), ".rds"))
/Helpful scripts/MIM_param_set_run.R
permissive
piersond/MIMICS_HiRes
R
false
false
1,902
r
# Set working drive if not running on HPC if(.Platform$OS.type == "unix") { } else { setwd("C:/github/MIMICS_HiRes") } ######################################## # Load MIMICS data and ftns from Brute Forcing script ######################################## source("MIMICS_ftns/MIMICS_repeat_base.R") ######################################## # Load forcing data and parameter set ######################################## data <- read.csv("RCrk_Modelling_Data/RCrk_SOC_Cal+Val.csv", as.is=T) # Trim out data columns not required for the MC run ## Save on dataframe size and add any needed columns back after MIMICS run data <- data %>% select(Site, lat, long, SOC, CLAY, pGPP, TSOI, lig_N) # Load parameter set params_raw <- read.csv("MC/Output/RC_MIM_param_combos_RMSE_less2.csv") params_raw$run_num <- seq(1,nrow(params),1) # Select only the MIMICS parameter columns params <- params_raw %>% select(run_num, Vslope_x, Vint_x, Kslope_x, Kint_x, CUE_x, Tau_x, desorb_x, fPHYS_x) ####################### # Run MIMICS ####################### # Set number of cores to use no_cores <- availableCores() - 1 plan(multicore, gc = FALSE, workers = no_cores) # Run MIMICS! print(paste0("Start time: ", Sys.time())) start_time <- Sys.time() MIMruns <- params %>% split(1:nrow(params)) %>% future_map(~ MIMrepeat(forcing_df = data, rparams = ., output_type = "all"), .progress=TRUE) %>% bind_rows() print(paste0("Task time: ", Sys.time() - start_time)) # Release CPU cores plan(sequential) nbrOfWorkers() # Clean up memory gc() ## Join parameters to MIMICS output table MIMruns_full <- MIMruns %>% left_join(params_raw) ########################################## # Save MC output data ########################################## saveRDS(MIMruns_full, paste0("MIM_pset_runs_", format(Sys.time(), "%Y%m%d_%H%M%S"), ".rds"))
rm(list=ls()) library(ggplot2) library(reshape2) genecountDir <- "/data3/zhaochen/project/colon_cancer/colon_chip/chromHMM/genecount/" plotSaccter <- function (filename,rnafile,marker,region,time,cols) { # chip-seq rpm file <- read.delim(filename,sep = "\t",check.names=FALSE) # rna-seq fpkm rna <- read.delim(rnafile,sep = ",",check.names=FALSE) rna <- rna[,c(22,2:16)] #a <- basename(filename) #region <- paste(unlist(strsplit(a,split = "_"))[2],unlist(strsplit(a,split = "_"))[3],sep = "_") #marker <- unlist(strsplit(a,split = "_"))[4] chip_df <- melt(file[,cols],id.vars = "gene_name", variable.name ="rep",value.name = "rpm") rna_df <- melt(rna[,cols],id.vars = "gene_name",value.name = "fpkm") data <- merge(x = rna_df,y = chip_df) #write.table(data,file = "/data3/zhaochen/project/colon_cancer/colon_chip/chromHMM/genecount/chip/H3K27ac/mm10_genebody_0bp_chip_rna.txt",sep = "\t") correlation <- cor(data$fpkm,data$rpm,method = "spearman") correlation ggplot(data,aes(y = log2(fpkm+1),x = log2(rpm))) + geom_point(position = position_jitter(width = 0.3,height = 0.06),alpha=0.4,shape=19,size=1.5,colour="grey60") + # stat_density2d(aes(alpha=..density..),geom = "tile",contour = FALSE) + # stat_smooth(formula = data$rpm ~ data$fpkm,method = lm,se = FALSE,colour="black") + theme_classic(base_size = 18,base_family = "sans",base_line_size = 1.1) + #annotate("text",label=paste("cor:",round(correlation,4),sep = " "),x = 500,y = 1500,parse=TRUE,hjust=0.5,vjust=0.5,size=8) + scale_x_continuous(expand = c(0.03,0)) + scale_y_continuous(expand = c(0.03,0)) + labs(title = paste(time,region,"\ncor:",round(correlation,4),sep = " "),y="RNA-seq log2(FPKM+1)",x=paste(marker ,"log2(RPM)",sep = " ")) + theme(plot.title = element_text(size = 18,hjust = 0.5,vjust = 0.5), axis.title = element_text(size = 18,colour = "black"), axis.text = element_text(size = 18,colour = "black")) ggsave(paste("/data3/zhaochen/project/colon_cancer/colon_chip/chromHMM/genecount/chip/",marker,"/",marker,"_",time,"_",region,"_log2.png",sep = "")) } Markers <- c("H3K27ac","H3K4me1","H3K4me3","H3K27me3","H3K9me3") rnafiles <- list.files(path = genecountDir,pattern = "merge_rna.csv") # need change time and cols #### week0 for (marker in Markers) { for (rnafile in rnafiles) { a <- basename(rnafile) region <- paste(unlist(strsplit(a,split = "_"))[2],unlist(strsplit(a,split = "_"))[3],sep = "_") rnafile <- paste(genecountDir,rnafile,sep = "") filename <- paste(genecountDir,"chip/",marker,"/","mm10_",region,"_",marker,"_rpm.txt",sep = "") print(rnafile) print(filename) plotSaccter(filename = filename,rnafile = rnafile,marker = marker,region = region,time = "0week",cols=c(1,3,4)) } } ### week2 for (marker in Markers) { for (rnafile in rnafiles) { a <- basename(rnafile) region <- paste(unlist(strsplit(a,split = "_"))[2],unlist(strsplit(a,split = "_"))[3],sep = "_") rnafile <- paste(genecountDir,rnafile,sep = "") filename <- paste(genecountDir,"chip/",marker,"/","mm10_",region,"_",marker,"_rpm.txt",sep = "") plotSaccter(filename = filename,rnafile = rnafile,marker = marker,region = region,time = "2weeks",cols=c(1,5:7)) } } ### week4 for (marker in Markers) { for (rnafile in rnafiles) { a <- basename(rnafile) region <- paste(unlist(strsplit(a,split = "_"))[2],unlist(strsplit(a,split = "_"))[3],sep = "_") rnafile <- paste(genecountDir,rnafile,sep = "") filename <- paste(genecountDir,"chip/",marker,"/","mm10_",region,"_",marker,"_rpm.txt",sep = "") plotSaccter(filename = filename,rnafile = rnafile,marker = marker,region = region,time = "4weeks",cols=c(1,8:10)) } } #### week7 for (marker in Markers) { for (rnafile in rnafiles) { print(marker) a <- basename(rnafile) region <- paste(unlist(strsplit(a,split = "_"))[2],unlist(strsplit(a,split = "_"))[3],sep = "_") rnafile <- paste(genecountDir,rnafile,sep = "") filename <- paste(genecountDir,"chip/",marker,"/","mm10_",region,"_",marker,"_rpm.txt",sep = "") plotSaccter(filename = filename,rnafile = rnafile,marker = marker,region = region,time = "7weeks",cols=c(1,11:13)) } } #### week10 for (marker in Markers) { for (rnafile in rnafiles) { print(marker) a <- basename(rnafile) region <- paste(unlist(strsplit(a,split = "_"))[2],unlist(strsplit(a,split = "_"))[3],sep = "_") rnafile <- paste(genecountDir,rnafile,sep = "") filename <- paste(genecountDir,"chip/",marker,"/","mm10_",region,"_",marker,"_rpm.txt",sep = "") plotSaccter(filename = filename,rnafile = rnafile,marker = marker,region = region,time = "10weeks",cols=c(1,14:16)) } } # png("/data3/zhaochen/project/colon_cancer/colon_chip/chromHMM/genecount/chip/H3K27ac/H3K27ac_0week_genebody_log2_scatter.png") # smoothScatter(data$rpm, data$fpkm,pch = 19, # transformation = function(x) x ^ 0.5 # Scale # ) # dev.off()
/ChIP-seq/chrommHMM/step4_scatterplot.R
no_license
ZhaoChen96/CRC
R
false
false
5,000
r
rm(list=ls()) library(ggplot2) library(reshape2) genecountDir <- "/data3/zhaochen/project/colon_cancer/colon_chip/chromHMM/genecount/" plotSaccter <- function (filename,rnafile,marker,region,time,cols) { # chip-seq rpm file <- read.delim(filename,sep = "\t",check.names=FALSE) # rna-seq fpkm rna <- read.delim(rnafile,sep = ",",check.names=FALSE) rna <- rna[,c(22,2:16)] #a <- basename(filename) #region <- paste(unlist(strsplit(a,split = "_"))[2],unlist(strsplit(a,split = "_"))[3],sep = "_") #marker <- unlist(strsplit(a,split = "_"))[4] chip_df <- melt(file[,cols],id.vars = "gene_name", variable.name ="rep",value.name = "rpm") rna_df <- melt(rna[,cols],id.vars = "gene_name",value.name = "fpkm") data <- merge(x = rna_df,y = chip_df) #write.table(data,file = "/data3/zhaochen/project/colon_cancer/colon_chip/chromHMM/genecount/chip/H3K27ac/mm10_genebody_0bp_chip_rna.txt",sep = "\t") correlation <- cor(data$fpkm,data$rpm,method = "spearman") correlation ggplot(data,aes(y = log2(fpkm+1),x = log2(rpm))) + geom_point(position = position_jitter(width = 0.3,height = 0.06),alpha=0.4,shape=19,size=1.5,colour="grey60") + # stat_density2d(aes(alpha=..density..),geom = "tile",contour = FALSE) + # stat_smooth(formula = data$rpm ~ data$fpkm,method = lm,se = FALSE,colour="black") + theme_classic(base_size = 18,base_family = "sans",base_line_size = 1.1) + #annotate("text",label=paste("cor:",round(correlation,4),sep = " "),x = 500,y = 1500,parse=TRUE,hjust=0.5,vjust=0.5,size=8) + scale_x_continuous(expand = c(0.03,0)) + scale_y_continuous(expand = c(0.03,0)) + labs(title = paste(time,region,"\ncor:",round(correlation,4),sep = " "),y="RNA-seq log2(FPKM+1)",x=paste(marker ,"log2(RPM)",sep = " ")) + theme(plot.title = element_text(size = 18,hjust = 0.5,vjust = 0.5), axis.title = element_text(size = 18,colour = "black"), axis.text = element_text(size = 18,colour = "black")) ggsave(paste("/data3/zhaochen/project/colon_cancer/colon_chip/chromHMM/genecount/chip/",marker,"/",marker,"_",time,"_",region,"_log2.png",sep = "")) } Markers <- c("H3K27ac","H3K4me1","H3K4me3","H3K27me3","H3K9me3") rnafiles <- list.files(path = genecountDir,pattern = "merge_rna.csv") # need change time and cols #### week0 for (marker in Markers) { for (rnafile in rnafiles) { a <- basename(rnafile) region <- paste(unlist(strsplit(a,split = "_"))[2],unlist(strsplit(a,split = "_"))[3],sep = "_") rnafile <- paste(genecountDir,rnafile,sep = "") filename <- paste(genecountDir,"chip/",marker,"/","mm10_",region,"_",marker,"_rpm.txt",sep = "") print(rnafile) print(filename) plotSaccter(filename = filename,rnafile = rnafile,marker = marker,region = region,time = "0week",cols=c(1,3,4)) } } ### week2 for (marker in Markers) { for (rnafile in rnafiles) { a <- basename(rnafile) region <- paste(unlist(strsplit(a,split = "_"))[2],unlist(strsplit(a,split = "_"))[3],sep = "_") rnafile <- paste(genecountDir,rnafile,sep = "") filename <- paste(genecountDir,"chip/",marker,"/","mm10_",region,"_",marker,"_rpm.txt",sep = "") plotSaccter(filename = filename,rnafile = rnafile,marker = marker,region = region,time = "2weeks",cols=c(1,5:7)) } } ### week4 for (marker in Markers) { for (rnafile in rnafiles) { a <- basename(rnafile) region <- paste(unlist(strsplit(a,split = "_"))[2],unlist(strsplit(a,split = "_"))[3],sep = "_") rnafile <- paste(genecountDir,rnafile,sep = "") filename <- paste(genecountDir,"chip/",marker,"/","mm10_",region,"_",marker,"_rpm.txt",sep = "") plotSaccter(filename = filename,rnafile = rnafile,marker = marker,region = region,time = "4weeks",cols=c(1,8:10)) } } #### week7 for (marker in Markers) { for (rnafile in rnafiles) { print(marker) a <- basename(rnafile) region <- paste(unlist(strsplit(a,split = "_"))[2],unlist(strsplit(a,split = "_"))[3],sep = "_") rnafile <- paste(genecountDir,rnafile,sep = "") filename <- paste(genecountDir,"chip/",marker,"/","mm10_",region,"_",marker,"_rpm.txt",sep = "") plotSaccter(filename = filename,rnafile = rnafile,marker = marker,region = region,time = "7weeks",cols=c(1,11:13)) } } #### week10 for (marker in Markers) { for (rnafile in rnafiles) { print(marker) a <- basename(rnafile) region <- paste(unlist(strsplit(a,split = "_"))[2],unlist(strsplit(a,split = "_"))[3],sep = "_") rnafile <- paste(genecountDir,rnafile,sep = "") filename <- paste(genecountDir,"chip/",marker,"/","mm10_",region,"_",marker,"_rpm.txt",sep = "") plotSaccter(filename = filename,rnafile = rnafile,marker = marker,region = region,time = "10weeks",cols=c(1,14:16)) } } # png("/data3/zhaochen/project/colon_cancer/colon_chip/chromHMM/genecount/chip/H3K27ac/H3K27ac_0week_genebody_log2_scatter.png") # smoothScatter(data$rpm, data$fpkm,pch = 19, # transformation = function(x) x ^ 0.5 # Scale # ) # dev.off()
#' Calculate a vertical profile (`vp`) from a polar volume (`pvol`) file #' #' Calculates a vertical profile of biological scatterers (`vp`) from a polar #' volume (`pvol`) file using the algorithm #' [vol2bird](https://github.com/adokter/vol2bird/) ([Dokter et al. #' 2011](https://doi.org/10.1098/rsif.2010.0116)). Requires a running #' [Docker](https://www.docker.com/) daemon, unless a local installation of #' vol2bird is specified with `local_install`. #' #' @param file Character (vector). Either a path to a single radar polar volume #' (`pvol`) file containing multiple scans/sweeps, or multiple paths to scan #' files containing a single scan/sweep. Note that `pvol` objects are not #' supported. The file data format should be either 1) #' [ODIM](https://github.com/adokter/vol2bird/blob/master/doc/OPERA2014_O4_ODIM_H5-v2.2.pdf) #' format, which is the implementation of the OPERA data information model in #' the [HDF5](https://support.hdfgroup.org/HDF5/) format, 2) a format #' supported by the [RSL #' library](http://trmm-fc.gsfc.nasa.gov/trmm_gv/software/rsl/) or 3) Vaisala #' IRIS (IRIS RAW) format. #' @param vpfile Character. File name. When provided, writes a vertical profile #' file (`vpfile`) in the ODIM HDF5 format to disk. #' @param pvolfile_out Character. File name. When provided, writes a polar #' volume (`pvol`) file in the ODIM HDF5 format to disk. Useful for converting #' RSL formats to ODIM. #' @param autoconf Logical. When `TRUE`, default optimal configuration settings #' are selected automatically and other user settings are ignored. #' @param verbose Logical. When `TRUE`, Docker `stdout` is piped to the R #' console. Always `TRUE` on Windows. #' @param warnings Logical. When `TRUE`, vol2bird warnings are piped to the R #' console. #' @param mount Character. Directory path of the mount point for the Docker #' container. #' @param sd_vvp_threshold Numeric. Lower threshold for the radial velocity #' standard deviation (profile quantity `sd_vvp`) in m/s. Biological signals #' with `sd_vvp < sd_vvp_threshold` are set to zero. Defaults to 2 m/s for #' C-band radars and 1 m/s for S-band radars. #' @param rcs Numeric. Radar cross section per bird to use, in cm^2. #' @param dual_pol Logical. When `TRUE`, uses dual-pol mode, in which #' meteorological echoes are filtered using the correlation coefficient #' `rho_hv`. When `FALSE`, uses single polarization mode based only on #' reflectivity and radial velocity quantities. #' @param rho_hv Numeric. Lower threshold in correlation coefficient to use for #' filtering meteorological scattering. #' @param elev_min Numeric. Minimum elevation angle to include, in degrees. #' @param elev_max Numeric. Maximum elevation angle to include, in degrees. #' @param azim_min Numeric. Minimum azimuth to include, in degrees clockwise #' from north. #' @param azim_max Numeric. Maximum azimuth to include, in degrees clockwise #' from north. #' @param range_min Numeric. Minimum range to include, in m. #' @param range_max Numeric. Maximum range to include, in m. #' @param n_layer Numeric. Number of altitude layers to use in generated #' profile. #' @param h_layer Numeric. Width of altitude layers to use in generated profile, #' in m. #' @param nyquist_min Numeric. Minimum Nyquist velocity of scans to include, in #' m/s. #' @param dealias Logical. Whether to dealias radial velocities. This should #' typically be done when the scans in the polar volume have low Nyquist #' velocities (below 25 m/s). #' @param dbz_quantity Name of the available reflectivity factor to use if not #' `DBZH` (e.g. `DBZV`, `TH`, `TV`). #' @param mistnet Logical. Whether to use the MistNet segmentation model. #' @param mistnet_elevations Numeric vector of length 5. Elevation angles to #' feed to the MistNet segmentation model, which expects exactly 5 elevation #' scans at 0.5, 1.5, 2.5, 3.5 and 4.5 degrees. Specifying different elevation #' angles may compromise segmentation results. #' @param local_install Character. Path to local vol2bird installation (e.g. #' `your/vol2bird_install_directory/vol2bird/bin/vol2bird`). #' @param local_mistnet Character. Path to local MistNet segmentation model in #' PyTorch format (e.g. `/your/path/mistnet_nexrad.pt`). #' #' @return A vertical profile object of class `vp`. When defined, output files #' `vpfile` and `pvolfile_out` are saved to disk. #' #' @export #' #' @details #' ## Typical use #' #' Common arguments set by users are `file`, `vpfile`, `autoconf` and `mount`. #' Turn on `autoconf` to automatically select the optimal parameters for a given #' radar file. The default for C-band data is to apply rain-filtering in single #' polarization mode and dual polarization mode when available. The default for #' S-band data is to apply precipitation filtering in dual-polarization mode #' only. #' #' Arguments that sometimes require non-default values are: `rcs`, #' `sd_vvp_threshold`, `range_max`, `dual_pol`, `dealias`. Other arguments are #' typically left at their defaults. #' #' ## mount #' #' On repeated calls of [calculate_vp()], the Docker container mount can be #' recycled from one call to the next if subsequent calls share the same `mount` #' argument. Re-mounting a Docker container takes time, therefore it is advised #' to choose a mount point that is a parent directory of all volume files to be #' processed, such that [calculate_vp()] calls are as fast as possible. #' #' ## sd_vvp_threshold #' #' For altitude layers with a VVP-retrieved radial velocity standard deviation #' value below the threshold `sd_vvp_threshold`, the bird density `dens` is set #' to zero (see vertical profile [`vp`][summary.vp()] class). This threshold #' might be dependent on radar processing settings. Results from validation #' campaigns so far indicate that 2 m/s is the best choice for this parameter #' for most C-band weather radars, which is used as the C-band default. For #' S-band, the default threshold is 1 m/s. #' #' ## rcs #' #' The default radar cross section (`rcs`) (11 cm^2) corresponds to the average #' value found by Dokter et al. (2011) in a calibration campaign of a full #' migration autumn season in western Europe at C-band. Its value may depend on #' radar wavelength. `rcs` will scale approximately \eqn{M^{2/3}} with `M` the #' bird's mass. #' #' ## dual_pol #' #' For S-band (radar wavelength ~ 10 cm), currently only `dual_pol = TRUE` mode #' is recommended. #' #' ## azim_min / azim_max #' #' `azim_min` and `azim_max` only affects reflectivity-derived estimates in the #' profile (`DBZH`, `eta`, `dens`), not radial-velocity derived estimates (`u`, #' `v`, `w`, `ff`, `dd`, `sd_vvp`), which are estimated on all azimuths at all #' times. `azim_min`, `azim_max` may be set to exclude an angular sector with #' high ground clutter. #' #' ## range_min / range_max #' #' Using default values of `range_min` and `range_max` is recommended. Ranges #' closer than 5 km tend to be contaminated by ground clutter, while range gates #' beyond 35 km become too wide to resolve the default altitude layer width of #' 200 meter (see [beam_width()]). `range_max` may be extended up to 40 km #' (`40000`) for volumes with low elevations only, in order to extend coverage #' to higher altitudes. #' #' ## h_layer #' #' The algorithm has been tested and developed for altitude layers with `h_layer #' = 200`m. Smaller widths than 100 m are not recommended as they may cause #' instabilities of the volume velocity profiling (VVP) and dealiasing routines, #' and effectively lead to pseudo-replicated altitude data, since altitudinal #' patterns smaller than the beam width cannot be resolved. #' #' ## dealias #' #' Dealiasing uses the torus mapping method by Haase et al. (2004). #' #' ## Local installation #' #' You can bypass the Docker container and speed up processing by installing #' vol2bird locally (not on Windows). Point `local_install` to the path of your #' local vol2bird executable, e.g. #' `/your/vol2bird_install_directory/vol2bird/bin/vol2bird`. Your local vol2bird #' executable will be called through a bash login shell. `LD_LIBRARY_PATH` #' (Linux) or `DYLD_LIBRARY_PATH` (Mac) should be correctly specified in your #' `.bashrc` or `.bash_profile` file and contain all the required shared #' libraries by vol2bird. See vol2bird installation pages on #' [GitHub](https://github.com/adokter/vol2bird) for details. #' #' When using MistNet with a local vol2bird installation, also point parameter #' `local_mistnet` to your local download of the MistNet segmentation model in #' PyTorch format, e.g. `/your/path/mistnet_nexrad.pt`. The MistNet model can #' be downloaded at <https://s3.amazonaws.com/mistnet/mistnet_nexrad.pt>. #' #' @seealso #' * [summary.pvol()] #' * [summary.vp()] #' #' @references #' Dokter et al. (2011) is the main reference for the profiling algorithm #' (vol2bird) underlying this function. When using the `mistnet` option, please #' also cite Lin et al. (2019). When dealiasing data (`dealias`), please also #' cite Haase et al. (2004). #' #' * Dokter AM, Liechti F, Stark H, Delobbe L,Tabary P, Holleman I (2011) Bird #' migration flight altitudes studied by a network of operational weather #' radars, Journal of the Royal Society Interface 8 (54), pp. 30-43. #' <doi:10.1098/rsif.2010.0116> #' * Haase G & Landelius T (2004) #' Dealiasing of Doppler radar velocities using a torus mapping. Journal of #' Atmospheric and Oceanic Technology 21(10), pp. 1566-1573. #' <doi:10.1175/1520-0426(2004)021%3C1566:DODRVU%3E2.0.CO;2> #' * Lin T-Y, Winner K, Bernstein G, Mittal A, Dokter AM, Horton KG, Nilsson C, #' Van Doren BM, Farnsworth A, La Sorte FA, Maji S, Sheldon D (2019) MistNet: #' Measuring historical bird migration in the US using archived weather radar #' data and convolutional neural networks. Methods in Ecology and Evolution 10 #' (11), pp. 1908-22. <doi:10.1111/2041-210X.13280> #' #' @examples #' \dontrun{ #' # Locate and read the polar volume example file #' pvolfile <- system.file("extdata", "volume.h5", package = "bioRad") #' #' # Copy the file to a home directory with read/write permissions #' file.copy(pvolfile, "~/volume.h5") #' #' # Calculate the profile #' vp <- calculate_vp("~/volume.h5") #' #' # Get summary info #' vp #' #' # Clean up #' file.remove("~/volume.h5") #' } calculate_vp <- function(file, vpfile = "", pvolfile_out = "", autoconf = FALSE, verbose = FALSE, warnings = TRUE, mount = dirname(file[1]), sd_vvp_threshold, rcs = 11, dual_pol = TRUE, rho_hv = 0.95, elev_min = 0, elev_max = 90, azim_min = 0, azim_max = 360, range_min = 5000, range_max = 35000, n_layer = 20, h_layer = 200, dealias = TRUE, nyquist_min = if (dealias) 5 else 25, dbz_quantity = "DBZH", mistnet = FALSE, mistnet_elevations = c(0.5, 1.5, 2.5, 3.5, 4.5), local_install, local_mistnet) { # check input arguments assert_that( is.character(file), msg = "`file` must be a path to a file (or a vector of paths to files)." ) for (filename in file) { assert_that(file.exists(filename)) } if (!are_equal(vpfile, "")) { assert_that(is.writeable(dirname(vpfile))) } if (!are_equal(pvolfile_out, "")) { assert_that(is.writeable(dirname(pvolfile_out))) } if (!is.logical(mistnet)) { stop("`mistnet` must be a logical value.") } if (mistnet && !.pkgenv$mistnet) { stop("MistNet has not been installed, see update_docker() for install instructions.") } if (!is.logical(dealias)) { stop("`dealias` must be a logical value.") } if (file.access(mount, 0) == -1) { stop(glue("Can't find `mount` directory: {mount}")) } if (file.access(mount, 2) == -1) { stop(glue("No write permission to `mount` directory: {mount}")) } if ((missing(local_install) && !missing(local_mistnet)) || (!missing(local_install) && missing(local_mistnet))) { stop("To use local vol2bird and MistNet model, specify both `local_install` and `local_mistnet`.") } assert_that(is.numeric(mistnet_elevations)) assert_that(length(mistnet_elevations) == 5) if (!.pkgenv$docker && missing(local_install)) { stop( "Requires a running Docker daemon.\nTo enable calculate_vp(), start ", "your local Docker daemon, and run check_docker() in R." ) } assert_that(is.flag(autoconf)) assert_that(is.flag(verbose)) assert_that(is.flag(warnings)) assert_that(is.writeable(mount)) if (!missing(sd_vvp_threshold)) { assert_that(is.number(sd_vvp_threshold)) assert_that(sd_vvp_threshold >= 0) } assert_that(is.number(rcs)) assert_that(rcs > 0) assert_that(is.flag(dual_pol)) assert_that(is.number(rho_hv)) assert_that( rho_hv >= 0 & rho_hv <= 1, msg = "`rho_hv` must be a number between 0 and 1." ) assert_that(is.number(elev_min)) assert_that( elev_min >= -90 & elev_min <= 90, msg = "`elev_min` must be a number between -90 and 90." ) assert_that(is.number(elev_max)) assert_that( elev_max >= -90 & elev_max <= 90, msg = "`elev_max` must be a number between -90 and 90." ) assert_that( elev_max > elev_min, msg = "`elev_max` must be larger than `elev_min`." ) assert_that(is.number(azim_min)) assert_that( azim_min >= 0 & azim_min <= 360, msg = "`azim_min` must be a number between 0 and 360." ) assert_that(is.number(azim_max)) assert_that( azim_max >= 0 & azim_max <= 360, msg = "`azim_max` must be a number between 0 and 360." ) assert_that(is.number(range_min)) assert_that( range_min >= 0, msg = "`range_min` must be a positive number." ) assert_that(is.number(range_max)) assert_that( range_max > 0, msg = "`range_max` must be a positive number." ) assert_that( range_max > range_min, msg = "`range_max` must be larger than `range_min`." ) assert_that(is.count(n_layer)) assert_that(is.number(h_layer)) assert_that( h_layer > 0, msg = "`h_layer` must be a positive number." ) assert_that(is.number(nyquist_min)) assert_that( nyquist_min > 0, msg = "`nyquist_min` must be a positive number." ) assert_that( dbz_quantity %in% c("DBZ", "DBZH", "DBZV", "TH", "TV"), msg = "`dbz_quantity` must be either `DBZ`, `DBZH`, `DBZV`, `TH` or `TV`." ) assert_that(is.flag(mistnet)) assert_that( !(mistnet && !.pkgenv$mistnet), msg = "Can't find MistNet installation, see update_docker() for install instructions.") assert_that(is.flag(dealias)) assert_that( .pkgenv$docker | !missing(local_install), msg = glue( "Requires a running Docker daemon.\nTo enable calculate_vp(), start ", "your local Docker daemon, and run check_docker() in R." ) ) filedir <- dirname(normalizePath(file[1], winslash = "/")) assert_that(is.writeable(filedir)) assert_that( grepl(normalizePath(mount, winslash = "/"), filedir, fixed = TRUE), msg = "Mount point `mount` must be a parent directory of the input `file`." ) # check whether vol2bird container supports multiple input files multi_file_support <- !is.null(.pkgenv$vol2bird_version) && !is.na(.pkgenv$vol2bird_version) && .pkgenv$vol2bird_version > numeric_version("0.3.20") if (!missing(local_install)) multi_file_support <- TRUE assert_that(!(length(file) > 1 && !multi_file_support), msg = glue( "Current vol2bird installation does not support multiple input files. ", "Provide a single input file containing a polar volume, or run ", "update_docker() to update." ) ) profile.tmp <- tempfile(tmpdir = filedir) if (missing(local_install)) { assert_that( mount_docker_container(normalizePath(mount, winslash = "/")) == 0, msg = "Failed to start vol2bird Docker container, see check_docker()." ) } # put options file in place, to be read by vol2bird container opt.values <- c( as.character(c( rcs, rho_hv, elev_min, elev_max, azim_min, azim_max, range_min, range_max, n_layer, h_layer, nyquist_min, dbz_quantity )), if (dual_pol) "TRUE" else "FALSE", if (dealias) "TRUE" else "FALSE" ) opt.names <- c( "SIGMA_BIRD", "RHOHVMIN", "ELEVMIN", "ELEVMAX", "AZIMMIN", "AZIMMAX", "RANGEMIN", "RANGEMAX", "NLAYER", "HLAYER", "MIN_NYQUIST_VELOCITY", "DBZTYPE", "DUALPOL", "DEALIAS_VRAD" ) if (!missing(sd_vvp_threshold)) { opt.values <- c(as.character(sd_vvp_threshold), opt.values) opt.names <- c("STDEV_BIRD", opt.names) } if (mistnet) { opt.values <- c( opt.values, "TRUE", paste("{", paste(as.character(mistnet_elevations), collapse = ", "), paste = "}", sep = ""), ifelse(missing(local_install), "/MistNet/mistnet_nexrad.pt", normalizePath(local_mistnet)) ) opt.names <- c(opt.names, "USE_MISTNET", "MISTNET_ELEVS", "MISTNET_PATH") } opt <- data.frame( "option" = opt.names, "is" = rep("=", length(opt.values)), "value" = opt.values ) if (missing(local_install)) { optfile <- paste(normalizePath(mount, winslash = "/"), "/options.conf", sep = "" ) } else { optfile <- paste(getwd(), "/options.conf", sep = "") } if (file.exists(optfile)) { optfile_save <- paste(optfile, ".", format(Sys.time(), "%Y%m%d%H%M%S"), sep = "") warning(glue( "`options.conf` file found in directory {mount}. Renamed to ", "{basename(optfile_save)} to prevent overwrite." )) file.rename(optfile, optfile_save) } # only use user configuration when autoconfiguration is off. if (!autoconf) { write.table(opt, file = optfile, col.names = FALSE, row.names = FALSE, quote = FALSE ) } # prepare docker input filenames relative to mountpoint prefixstart <- if (mount == "/") 1 else 2 prefix <- substring( filedir, prefixstart + nchar(normalizePath(mount, winslash = "/")) ) if (nchar(prefix) > 0) { prefix <- paste(prefix, "/", sep = "") } # we have a valid vol2bird version > 0.3.20, so we can use multiple file inputs if (multi_file_support) { pvolfile_docker <- paste("-i ", prefix, basename(file), sep = "", collapse = " ") profile.tmp.docker <- paste("-o ", prefix, basename(profile.tmp), sep = "") if (pvolfile_out != "") { pvolfile_out_docker <- paste("-p ", prefix, basename(pvolfile_out), sep = "") } else { pvolfile_out_docker <- "" } } else { # only single polar volume file input supported pvolfile_docker <- paste(prefix, basename(file), sep = "") profile.tmp.docker <- paste(prefix, basename(profile.tmp), sep = "") if (pvolfile_out != "") { pvolfile_out_docker <- paste(prefix, basename(pvolfile_out), sep = "") } else { pvolfile_out_docker <- "" } } docker_command <- paste( "docker exec vol2bird bash -c \"cd data && vol2bird ", pvolfile_docker, profile.tmp.docker, pvolfile_out_docker, "\"" ) # run vol2bird container if (.Platform$OS.type == "unix") { # on mac and linux: if (missing(local_install)) { result <- system(docker_command, ignore.stdout = !verbose, ignore.stderr = !warnings ) } else { # using a local install of vol2bird: result <- system(paste("bash -l -c \"", local_install, file, profile.tmp, pvolfile_out, "\""), ignore.stdout = !verbose, ignore.stderr = !warnings ) } } else { # on Windows platforms: result <- suppressWarnings(system(docker_command)) } if (result != 0) { if (file.exists(optfile)) file.remove(optfile) stop("Failed to run vol2bird.") } # read output into a vp object output <- read_vpfiles(profile.tmp) # clean up if (vpfile == "") { file.remove(profile.tmp) } else { file.rename(profile.tmp, vpfile) } if (file.exists(optfile)) { file.remove(optfile) } output }
/R/calculate_vp.R
permissive
nicobzz/bioRad
R
false
false
20,072
r
#' Calculate a vertical profile (`vp`) from a polar volume (`pvol`) file #' #' Calculates a vertical profile of biological scatterers (`vp`) from a polar #' volume (`pvol`) file using the algorithm #' [vol2bird](https://github.com/adokter/vol2bird/) ([Dokter et al. #' 2011](https://doi.org/10.1098/rsif.2010.0116)). Requires a running #' [Docker](https://www.docker.com/) daemon, unless a local installation of #' vol2bird is specified with `local_install`. #' #' @param file Character (vector). Either a path to a single radar polar volume #' (`pvol`) file containing multiple scans/sweeps, or multiple paths to scan #' files containing a single scan/sweep. Note that `pvol` objects are not #' supported. The file data format should be either 1) #' [ODIM](https://github.com/adokter/vol2bird/blob/master/doc/OPERA2014_O4_ODIM_H5-v2.2.pdf) #' format, which is the implementation of the OPERA data information model in #' the [HDF5](https://support.hdfgroup.org/HDF5/) format, 2) a format #' supported by the [RSL #' library](http://trmm-fc.gsfc.nasa.gov/trmm_gv/software/rsl/) or 3) Vaisala #' IRIS (IRIS RAW) format. #' @param vpfile Character. File name. When provided, writes a vertical profile #' file (`vpfile`) in the ODIM HDF5 format to disk. #' @param pvolfile_out Character. File name. When provided, writes a polar #' volume (`pvol`) file in the ODIM HDF5 format to disk. Useful for converting #' RSL formats to ODIM. #' @param autoconf Logical. When `TRUE`, default optimal configuration settings #' are selected automatically and other user settings are ignored. #' @param verbose Logical. When `TRUE`, Docker `stdout` is piped to the R #' console. Always `TRUE` on Windows. #' @param warnings Logical. When `TRUE`, vol2bird warnings are piped to the R #' console. #' @param mount Character. Directory path of the mount point for the Docker #' container. #' @param sd_vvp_threshold Numeric. Lower threshold for the radial velocity #' standard deviation (profile quantity `sd_vvp`) in m/s. Biological signals #' with `sd_vvp < sd_vvp_threshold` are set to zero. Defaults to 2 m/s for #' C-band radars and 1 m/s for S-band radars. #' @param rcs Numeric. Radar cross section per bird to use, in cm^2. #' @param dual_pol Logical. When `TRUE`, uses dual-pol mode, in which #' meteorological echoes are filtered using the correlation coefficient #' `rho_hv`. When `FALSE`, uses single polarization mode based only on #' reflectivity and radial velocity quantities. #' @param rho_hv Numeric. Lower threshold in correlation coefficient to use for #' filtering meteorological scattering. #' @param elev_min Numeric. Minimum elevation angle to include, in degrees. #' @param elev_max Numeric. Maximum elevation angle to include, in degrees. #' @param azim_min Numeric. Minimum azimuth to include, in degrees clockwise #' from north. #' @param azim_max Numeric. Maximum azimuth to include, in degrees clockwise #' from north. #' @param range_min Numeric. Minimum range to include, in m. #' @param range_max Numeric. Maximum range to include, in m. #' @param n_layer Numeric. Number of altitude layers to use in generated #' profile. #' @param h_layer Numeric. Width of altitude layers to use in generated profile, #' in m. #' @param nyquist_min Numeric. Minimum Nyquist velocity of scans to include, in #' m/s. #' @param dealias Logical. Whether to dealias radial velocities. This should #' typically be done when the scans in the polar volume have low Nyquist #' velocities (below 25 m/s). #' @param dbz_quantity Name of the available reflectivity factor to use if not #' `DBZH` (e.g. `DBZV`, `TH`, `TV`). #' @param mistnet Logical. Whether to use the MistNet segmentation model. #' @param mistnet_elevations Numeric vector of length 5. Elevation angles to #' feed to the MistNet segmentation model, which expects exactly 5 elevation #' scans at 0.5, 1.5, 2.5, 3.5 and 4.5 degrees. Specifying different elevation #' angles may compromise segmentation results. #' @param local_install Character. Path to local vol2bird installation (e.g. #' `your/vol2bird_install_directory/vol2bird/bin/vol2bird`). #' @param local_mistnet Character. Path to local MistNet segmentation model in #' PyTorch format (e.g. `/your/path/mistnet_nexrad.pt`). #' #' @return A vertical profile object of class `vp`. When defined, output files #' `vpfile` and `pvolfile_out` are saved to disk. #' #' @export #' #' @details #' ## Typical use #' #' Common arguments set by users are `file`, `vpfile`, `autoconf` and `mount`. #' Turn on `autoconf` to automatically select the optimal parameters for a given #' radar file. The default for C-band data is to apply rain-filtering in single #' polarization mode and dual polarization mode when available. The default for #' S-band data is to apply precipitation filtering in dual-polarization mode #' only. #' #' Arguments that sometimes require non-default values are: `rcs`, #' `sd_vvp_threshold`, `range_max`, `dual_pol`, `dealias`. Other arguments are #' typically left at their defaults. #' #' ## mount #' #' On repeated calls of [calculate_vp()], the Docker container mount can be #' recycled from one call to the next if subsequent calls share the same `mount` #' argument. Re-mounting a Docker container takes time, therefore it is advised #' to choose a mount point that is a parent directory of all volume files to be #' processed, such that [calculate_vp()] calls are as fast as possible. #' #' ## sd_vvp_threshold #' #' For altitude layers with a VVP-retrieved radial velocity standard deviation #' value below the threshold `sd_vvp_threshold`, the bird density `dens` is set #' to zero (see vertical profile [`vp`][summary.vp()] class). This threshold #' might be dependent on radar processing settings. Results from validation #' campaigns so far indicate that 2 m/s is the best choice for this parameter #' for most C-band weather radars, which is used as the C-band default. For #' S-band, the default threshold is 1 m/s. #' #' ## rcs #' #' The default radar cross section (`rcs`) (11 cm^2) corresponds to the average #' value found by Dokter et al. (2011) in a calibration campaign of a full #' migration autumn season in western Europe at C-band. Its value may depend on #' radar wavelength. `rcs` will scale approximately \eqn{M^{2/3}} with `M` the #' bird's mass. #' #' ## dual_pol #' #' For S-band (radar wavelength ~ 10 cm), currently only `dual_pol = TRUE` mode #' is recommended. #' #' ## azim_min / azim_max #' #' `azim_min` and `azim_max` only affects reflectivity-derived estimates in the #' profile (`DBZH`, `eta`, `dens`), not radial-velocity derived estimates (`u`, #' `v`, `w`, `ff`, `dd`, `sd_vvp`), which are estimated on all azimuths at all #' times. `azim_min`, `azim_max` may be set to exclude an angular sector with #' high ground clutter. #' #' ## range_min / range_max #' #' Using default values of `range_min` and `range_max` is recommended. Ranges #' closer than 5 km tend to be contaminated by ground clutter, while range gates #' beyond 35 km become too wide to resolve the default altitude layer width of #' 200 meter (see [beam_width()]). `range_max` may be extended up to 40 km #' (`40000`) for volumes with low elevations only, in order to extend coverage #' to higher altitudes. #' #' ## h_layer #' #' The algorithm has been tested and developed for altitude layers with `h_layer #' = 200`m. Smaller widths than 100 m are not recommended as they may cause #' instabilities of the volume velocity profiling (VVP) and dealiasing routines, #' and effectively lead to pseudo-replicated altitude data, since altitudinal #' patterns smaller than the beam width cannot be resolved. #' #' ## dealias #' #' Dealiasing uses the torus mapping method by Haase et al. (2004). #' #' ## Local installation #' #' You can bypass the Docker container and speed up processing by installing #' vol2bird locally (not on Windows). Point `local_install` to the path of your #' local vol2bird executable, e.g. #' `/your/vol2bird_install_directory/vol2bird/bin/vol2bird`. Your local vol2bird #' executable will be called through a bash login shell. `LD_LIBRARY_PATH` #' (Linux) or `DYLD_LIBRARY_PATH` (Mac) should be correctly specified in your #' `.bashrc` or `.bash_profile` file and contain all the required shared #' libraries by vol2bird. See vol2bird installation pages on #' [GitHub](https://github.com/adokter/vol2bird) for details. #' #' When using MistNet with a local vol2bird installation, also point parameter #' `local_mistnet` to your local download of the MistNet segmentation model in #' PyTorch format, e.g. `/your/path/mistnet_nexrad.pt`. The MistNet model can #' be downloaded at <https://s3.amazonaws.com/mistnet/mistnet_nexrad.pt>. #' #' @seealso #' * [summary.pvol()] #' * [summary.vp()] #' #' @references #' Dokter et al. (2011) is the main reference for the profiling algorithm #' (vol2bird) underlying this function. When using the `mistnet` option, please #' also cite Lin et al. (2019). When dealiasing data (`dealias`), please also #' cite Haase et al. (2004). #' #' * Dokter AM, Liechti F, Stark H, Delobbe L,Tabary P, Holleman I (2011) Bird #' migration flight altitudes studied by a network of operational weather #' radars, Journal of the Royal Society Interface 8 (54), pp. 30-43. #' <doi:10.1098/rsif.2010.0116> #' * Haase G & Landelius T (2004) #' Dealiasing of Doppler radar velocities using a torus mapping. Journal of #' Atmospheric and Oceanic Technology 21(10), pp. 1566-1573. #' <doi:10.1175/1520-0426(2004)021%3C1566:DODRVU%3E2.0.CO;2> #' * Lin T-Y, Winner K, Bernstein G, Mittal A, Dokter AM, Horton KG, Nilsson C, #' Van Doren BM, Farnsworth A, La Sorte FA, Maji S, Sheldon D (2019) MistNet: #' Measuring historical bird migration in the US using archived weather radar #' data and convolutional neural networks. Methods in Ecology and Evolution 10 #' (11), pp. 1908-22. <doi:10.1111/2041-210X.13280> #' #' @examples #' \dontrun{ #' # Locate and read the polar volume example file #' pvolfile <- system.file("extdata", "volume.h5", package = "bioRad") #' #' # Copy the file to a home directory with read/write permissions #' file.copy(pvolfile, "~/volume.h5") #' #' # Calculate the profile #' vp <- calculate_vp("~/volume.h5") #' #' # Get summary info #' vp #' #' # Clean up #' file.remove("~/volume.h5") #' } calculate_vp <- function(file, vpfile = "", pvolfile_out = "", autoconf = FALSE, verbose = FALSE, warnings = TRUE, mount = dirname(file[1]), sd_vvp_threshold, rcs = 11, dual_pol = TRUE, rho_hv = 0.95, elev_min = 0, elev_max = 90, azim_min = 0, azim_max = 360, range_min = 5000, range_max = 35000, n_layer = 20, h_layer = 200, dealias = TRUE, nyquist_min = if (dealias) 5 else 25, dbz_quantity = "DBZH", mistnet = FALSE, mistnet_elevations = c(0.5, 1.5, 2.5, 3.5, 4.5), local_install, local_mistnet) { # check input arguments assert_that( is.character(file), msg = "`file` must be a path to a file (or a vector of paths to files)." ) for (filename in file) { assert_that(file.exists(filename)) } if (!are_equal(vpfile, "")) { assert_that(is.writeable(dirname(vpfile))) } if (!are_equal(pvolfile_out, "")) { assert_that(is.writeable(dirname(pvolfile_out))) } if (!is.logical(mistnet)) { stop("`mistnet` must be a logical value.") } if (mistnet && !.pkgenv$mistnet) { stop("MistNet has not been installed, see update_docker() for install instructions.") } if (!is.logical(dealias)) { stop("`dealias` must be a logical value.") } if (file.access(mount, 0) == -1) { stop(glue("Can't find `mount` directory: {mount}")) } if (file.access(mount, 2) == -1) { stop(glue("No write permission to `mount` directory: {mount}")) } if ((missing(local_install) && !missing(local_mistnet)) || (!missing(local_install) && missing(local_mistnet))) { stop("To use local vol2bird and MistNet model, specify both `local_install` and `local_mistnet`.") } assert_that(is.numeric(mistnet_elevations)) assert_that(length(mistnet_elevations) == 5) if (!.pkgenv$docker && missing(local_install)) { stop( "Requires a running Docker daemon.\nTo enable calculate_vp(), start ", "your local Docker daemon, and run check_docker() in R." ) } assert_that(is.flag(autoconf)) assert_that(is.flag(verbose)) assert_that(is.flag(warnings)) assert_that(is.writeable(mount)) if (!missing(sd_vvp_threshold)) { assert_that(is.number(sd_vvp_threshold)) assert_that(sd_vvp_threshold >= 0) } assert_that(is.number(rcs)) assert_that(rcs > 0) assert_that(is.flag(dual_pol)) assert_that(is.number(rho_hv)) assert_that( rho_hv >= 0 & rho_hv <= 1, msg = "`rho_hv` must be a number between 0 and 1." ) assert_that(is.number(elev_min)) assert_that( elev_min >= -90 & elev_min <= 90, msg = "`elev_min` must be a number between -90 and 90." ) assert_that(is.number(elev_max)) assert_that( elev_max >= -90 & elev_max <= 90, msg = "`elev_max` must be a number between -90 and 90." ) assert_that( elev_max > elev_min, msg = "`elev_max` must be larger than `elev_min`." ) assert_that(is.number(azim_min)) assert_that( azim_min >= 0 & azim_min <= 360, msg = "`azim_min` must be a number between 0 and 360." ) assert_that(is.number(azim_max)) assert_that( azim_max >= 0 & azim_max <= 360, msg = "`azim_max` must be a number between 0 and 360." ) assert_that(is.number(range_min)) assert_that( range_min >= 0, msg = "`range_min` must be a positive number." ) assert_that(is.number(range_max)) assert_that( range_max > 0, msg = "`range_max` must be a positive number." ) assert_that( range_max > range_min, msg = "`range_max` must be larger than `range_min`." ) assert_that(is.count(n_layer)) assert_that(is.number(h_layer)) assert_that( h_layer > 0, msg = "`h_layer` must be a positive number." ) assert_that(is.number(nyquist_min)) assert_that( nyquist_min > 0, msg = "`nyquist_min` must be a positive number." ) assert_that( dbz_quantity %in% c("DBZ", "DBZH", "DBZV", "TH", "TV"), msg = "`dbz_quantity` must be either `DBZ`, `DBZH`, `DBZV`, `TH` or `TV`." ) assert_that(is.flag(mistnet)) assert_that( !(mistnet && !.pkgenv$mistnet), msg = "Can't find MistNet installation, see update_docker() for install instructions.") assert_that(is.flag(dealias)) assert_that( .pkgenv$docker | !missing(local_install), msg = glue( "Requires a running Docker daemon.\nTo enable calculate_vp(), start ", "your local Docker daemon, and run check_docker() in R." ) ) filedir <- dirname(normalizePath(file[1], winslash = "/")) assert_that(is.writeable(filedir)) assert_that( grepl(normalizePath(mount, winslash = "/"), filedir, fixed = TRUE), msg = "Mount point `mount` must be a parent directory of the input `file`." ) # check whether vol2bird container supports multiple input files multi_file_support <- !is.null(.pkgenv$vol2bird_version) && !is.na(.pkgenv$vol2bird_version) && .pkgenv$vol2bird_version > numeric_version("0.3.20") if (!missing(local_install)) multi_file_support <- TRUE assert_that(!(length(file) > 1 && !multi_file_support), msg = glue( "Current vol2bird installation does not support multiple input files. ", "Provide a single input file containing a polar volume, or run ", "update_docker() to update." ) ) profile.tmp <- tempfile(tmpdir = filedir) if (missing(local_install)) { assert_that( mount_docker_container(normalizePath(mount, winslash = "/")) == 0, msg = "Failed to start vol2bird Docker container, see check_docker()." ) } # put options file in place, to be read by vol2bird container opt.values <- c( as.character(c( rcs, rho_hv, elev_min, elev_max, azim_min, azim_max, range_min, range_max, n_layer, h_layer, nyquist_min, dbz_quantity )), if (dual_pol) "TRUE" else "FALSE", if (dealias) "TRUE" else "FALSE" ) opt.names <- c( "SIGMA_BIRD", "RHOHVMIN", "ELEVMIN", "ELEVMAX", "AZIMMIN", "AZIMMAX", "RANGEMIN", "RANGEMAX", "NLAYER", "HLAYER", "MIN_NYQUIST_VELOCITY", "DBZTYPE", "DUALPOL", "DEALIAS_VRAD" ) if (!missing(sd_vvp_threshold)) { opt.values <- c(as.character(sd_vvp_threshold), opt.values) opt.names <- c("STDEV_BIRD", opt.names) } if (mistnet) { opt.values <- c( opt.values, "TRUE", paste("{", paste(as.character(mistnet_elevations), collapse = ", "), paste = "}", sep = ""), ifelse(missing(local_install), "/MistNet/mistnet_nexrad.pt", normalizePath(local_mistnet)) ) opt.names <- c(opt.names, "USE_MISTNET", "MISTNET_ELEVS", "MISTNET_PATH") } opt <- data.frame( "option" = opt.names, "is" = rep("=", length(opt.values)), "value" = opt.values ) if (missing(local_install)) { optfile <- paste(normalizePath(mount, winslash = "/"), "/options.conf", sep = "" ) } else { optfile <- paste(getwd(), "/options.conf", sep = "") } if (file.exists(optfile)) { optfile_save <- paste(optfile, ".", format(Sys.time(), "%Y%m%d%H%M%S"), sep = "") warning(glue( "`options.conf` file found in directory {mount}. Renamed to ", "{basename(optfile_save)} to prevent overwrite." )) file.rename(optfile, optfile_save) } # only use user configuration when autoconfiguration is off. if (!autoconf) { write.table(opt, file = optfile, col.names = FALSE, row.names = FALSE, quote = FALSE ) } # prepare docker input filenames relative to mountpoint prefixstart <- if (mount == "/") 1 else 2 prefix <- substring( filedir, prefixstart + nchar(normalizePath(mount, winslash = "/")) ) if (nchar(prefix) > 0) { prefix <- paste(prefix, "/", sep = "") } # we have a valid vol2bird version > 0.3.20, so we can use multiple file inputs if (multi_file_support) { pvolfile_docker <- paste("-i ", prefix, basename(file), sep = "", collapse = " ") profile.tmp.docker <- paste("-o ", prefix, basename(profile.tmp), sep = "") if (pvolfile_out != "") { pvolfile_out_docker <- paste("-p ", prefix, basename(pvolfile_out), sep = "") } else { pvolfile_out_docker <- "" } } else { # only single polar volume file input supported pvolfile_docker <- paste(prefix, basename(file), sep = "") profile.tmp.docker <- paste(prefix, basename(profile.tmp), sep = "") if (pvolfile_out != "") { pvolfile_out_docker <- paste(prefix, basename(pvolfile_out), sep = "") } else { pvolfile_out_docker <- "" } } docker_command <- paste( "docker exec vol2bird bash -c \"cd data && vol2bird ", pvolfile_docker, profile.tmp.docker, pvolfile_out_docker, "\"" ) # run vol2bird container if (.Platform$OS.type == "unix") { # on mac and linux: if (missing(local_install)) { result <- system(docker_command, ignore.stdout = !verbose, ignore.stderr = !warnings ) } else { # using a local install of vol2bird: result <- system(paste("bash -l -c \"", local_install, file, profile.tmp, pvolfile_out, "\""), ignore.stdout = !verbose, ignore.stderr = !warnings ) } } else { # on Windows platforms: result <- suppressWarnings(system(docker_command)) } if (result != 0) { if (file.exists(optfile)) file.remove(optfile) stop("Failed to run vol2bird.") } # read output into a vp object output <- read_vpfiles(profile.tmp) # clean up if (vpfile == "") { file.remove(profile.tmp) } else { file.rename(profile.tmp, vpfile) } if (file.exists(optfile)) { file.remove(optfile) } output }
##' .. content for \description{} (no empty lines) .. ##' ##' .. content for \details{} .. ##' ##' @title ##' @param risk_evaluation visualize_risk_evaluation <- function(risk_evaluation, md_method_labels, md_type_labels, model_labels, outcome_labels, additional_missing_labels, times) { rspec <- round_spec() %>% round_using_decimal(digits = 2) %>% round_half_even() ggdata <- risk_evaluation %>% mutate(cal_error = cal_error * 1000) %>% group_by(outcome, md_strat, model, additional_missing_pct) %>% summarize(across(c(auc, ipa, cal_error), mean), .groups = 'drop') %>% mutate(md_strat = if_else(md_strat == 'mia', true = 'mia_si', false = md_strat)) %>% separate(md_strat, into = c('md_method', 'md_type'), sep = '_') %>% group_by(outcome, md_method, model) %>% mutate( across( .cols = c(auc, ipa, cal_error), .fns = list(pct_diff = ~ table_value(100*(max(.x)-min(.x))/min(.x), rspec = rspec)) ), across( .cols = c(auc, ipa, cal_error), .fns = list(lbl = ~table_value(100 * .x, rspec = rspec)) ) ) %>% mutate(md_method = recode(md_method, !!!md_method_labels), md_type = recode(md_type, !!!md_type_labels), model = recode(model, !!!model_labels), outcome = recode(outcome, !!!outcome_labels), additional_missing_pct = factor(additional_missing_pct), additional_missing_pct = fct_recode(additional_missing_pct, !!!additional_missing_labels), md_type = fct_relevel(md_type, 'Single imputation')) md_methods_upper_only <- c( "Mean or mode", "Missingness as an attribute" ) # auc plots ---- lower_values <- ggdata %>% group_by(outcome, md_method, model, additional_missing_pct) %>% select(-ipa, -cal_error) %>% filter(auc == min(auc)) %>% ungroup() %>% filter(!(md_method %in% md_methods_upper_only)) %>% nest(lwr = -c(model, outcome)) upper_values <- ggdata %>% group_by(outcome, md_method, model, additional_missing_pct) %>% select(-ipa, -cal_error) %>% filter(auc == max(auc)) %>% group_by(model) %>% nest(upr = -c(model, outcome)) output_auc <- ggdata %>% group_by(model, outcome) %>% nest() %>% left_join(lower_values) %>% left_join(upper_values) %>% mutate( data = map( .x = data, .f = ~ .x %>% mutate(md_method = fct_reorder(md_method, .x = auc, .fun = max)) ), plot = pmap( .l = list(data, lwr, upr), .f = ~ { ggplot(..1) + aes(x = auc, y = md_method, fill = md_type) + geom_line(aes(group = md_method), color = 'grey') + geom_point(size = 3, shape = 21) + geom_text( data = ..2, aes(label = auc_lbl), nudge_x = -0.003 ) + geom_text( data = ..3, aes(label = auc_lbl), nudge_x = +0.003 ) + labs(x = glue('Area underneath the ROC curve,', '{times} months post transplant', .sep = ' '), y = '', fill = '') + facet_wrap(~additional_missing_pct) + theme_bw() + scale_x_continuous( limits = c( min(..1$auc -0.01), max(..1$auc +0.01) ) ) + scale_fill_manual(values = c("orange", "purple")) + theme(legend.position = 'top', panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank(), panel.grid.major.y = element_line(linetype = 2)) } ) ) # ipa plots ---- lower_values <- ggdata %>% group_by(outcome, md_method, model, additional_missing_pct) %>% select(-auc, -cal_error) %>% filter(ipa == min(ipa)) %>% ungroup() %>% filter(!(md_method %in% md_methods_upper_only)) %>% nest(lwr = -c(model, outcome)) upper_values <- ggdata %>% group_by(outcome, md_method, model, additional_missing_pct) %>% select(-auc, -cal_error) %>% filter(ipa == max(ipa)) %>% group_by(model) %>% nest(upr = -c(model, outcome)) output_ipa <- ggdata %>% group_by(model, outcome) %>% nest() %>% left_join(lower_values) %>% left_join(upper_values) %>% mutate( data = map( .x = data, .f = ~ .x %>% mutate(md_method = fct_reorder(md_method, .x = ipa, .fun = max)) ), plot = pmap( .l = list(data, lwr, upr), .f = ~ { ggplot(..1) + aes(x = ipa, y = md_method, fill = md_type) + geom_line(aes(group = md_method), color = 'grey') + geom_point(size = 3, shape = 21) + geom_text( data = ..2, aes(label = ipa_lbl), nudge_x = -0.003 ) + geom_text( data = ..3, aes(label = ipa_lbl), nudge_x = +0.003 ) + labs(x = glue('Index of prediction accuracy,', '{times} months post transplant', .sep = ' '), y = '', fill = '') + facet_wrap(~additional_missing_pct) + theme_bw() + scale_x_continuous( limits = c( min(..1$ipa -0.01), max(..1$ipa +0.01) ) ) + scale_fill_manual(values = c("orange", "purple")) + theme(legend.position = 'top', text = element_text(size = 15), panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank(), panel.grid.major.y = element_line(linetype = 2)) } ) ) # calibration error plots ---- lower_values <- ggdata %>% group_by(outcome, md_method, model, additional_missing_pct) %>% select(-auc, -ipa) %>% filter(cal_error == min(cal_error)) %>% ungroup() %>% filter(!(md_method %in% md_methods_upper_only)) %>% nest(lwr = -c(model, outcome)) upper_values <- ggdata %>% group_by(outcome, md_method, model, additional_missing_pct) %>% select(-auc, -ipa) %>% filter(cal_error == max(cal_error)) %>% group_by(model) %>% nest(upr = -c(model, outcome)) output_cal_error <- ggdata %>% group_by(model, outcome) %>% nest() %>% left_join(lower_values) %>% left_join(upper_values) %>% mutate( data = map( .x = data, .f = ~ .x %>% mutate(md_method = fct_reorder(md_method, .x = cal_error, .fun = max)) ), plot = pmap( .l = list(data, lwr, upr), .f = ~ { ggplot(..1) + aes(x = cal_error, y = md_method, fill = md_type) + geom_line(aes(group = md_method), color = 'grey') + geom_point(size = 3, shape = 21) + geom_text( data = ..2, aes(label = cal_error_lbl), nudge_x = -0.003 ) + geom_text( data = ..3, aes(label = cal_error_lbl), nudge_x = +0.003 ) + labs(x = glue('Calibration error,', '{times} months post transplant', .sep = ' '), y = '', fill = '') + facet_wrap(~additional_missing_pct) + theme_bw() + scale_x_continuous( limits = c( min(..1$cal_error -0.01), max(..1$cal_error +0.01) ) ) + scale_fill_manual(values = c("orange", "purple")) + theme(legend.position = 'top', text = element_text(size = 15), panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank(), panel.grid.major.y = element_line(linetype = 2)) } ) ) bind_rows(auc = output_auc, ipa = output_ipa, cal_error = output_cal_error, .id = 'metric') }
/R/visualize_risk_evaluation.R
no_license
bcjaeger/INTERMACS-missing-data
R
false
false
8,728
r
##' .. content for \description{} (no empty lines) .. ##' ##' .. content for \details{} .. ##' ##' @title ##' @param risk_evaluation visualize_risk_evaluation <- function(risk_evaluation, md_method_labels, md_type_labels, model_labels, outcome_labels, additional_missing_labels, times) { rspec <- round_spec() %>% round_using_decimal(digits = 2) %>% round_half_even() ggdata <- risk_evaluation %>% mutate(cal_error = cal_error * 1000) %>% group_by(outcome, md_strat, model, additional_missing_pct) %>% summarize(across(c(auc, ipa, cal_error), mean), .groups = 'drop') %>% mutate(md_strat = if_else(md_strat == 'mia', true = 'mia_si', false = md_strat)) %>% separate(md_strat, into = c('md_method', 'md_type'), sep = '_') %>% group_by(outcome, md_method, model) %>% mutate( across( .cols = c(auc, ipa, cal_error), .fns = list(pct_diff = ~ table_value(100*(max(.x)-min(.x))/min(.x), rspec = rspec)) ), across( .cols = c(auc, ipa, cal_error), .fns = list(lbl = ~table_value(100 * .x, rspec = rspec)) ) ) %>% mutate(md_method = recode(md_method, !!!md_method_labels), md_type = recode(md_type, !!!md_type_labels), model = recode(model, !!!model_labels), outcome = recode(outcome, !!!outcome_labels), additional_missing_pct = factor(additional_missing_pct), additional_missing_pct = fct_recode(additional_missing_pct, !!!additional_missing_labels), md_type = fct_relevel(md_type, 'Single imputation')) md_methods_upper_only <- c( "Mean or mode", "Missingness as an attribute" ) # auc plots ---- lower_values <- ggdata %>% group_by(outcome, md_method, model, additional_missing_pct) %>% select(-ipa, -cal_error) %>% filter(auc == min(auc)) %>% ungroup() %>% filter(!(md_method %in% md_methods_upper_only)) %>% nest(lwr = -c(model, outcome)) upper_values <- ggdata %>% group_by(outcome, md_method, model, additional_missing_pct) %>% select(-ipa, -cal_error) %>% filter(auc == max(auc)) %>% group_by(model) %>% nest(upr = -c(model, outcome)) output_auc <- ggdata %>% group_by(model, outcome) %>% nest() %>% left_join(lower_values) %>% left_join(upper_values) %>% mutate( data = map( .x = data, .f = ~ .x %>% mutate(md_method = fct_reorder(md_method, .x = auc, .fun = max)) ), plot = pmap( .l = list(data, lwr, upr), .f = ~ { ggplot(..1) + aes(x = auc, y = md_method, fill = md_type) + geom_line(aes(group = md_method), color = 'grey') + geom_point(size = 3, shape = 21) + geom_text( data = ..2, aes(label = auc_lbl), nudge_x = -0.003 ) + geom_text( data = ..3, aes(label = auc_lbl), nudge_x = +0.003 ) + labs(x = glue('Area underneath the ROC curve,', '{times} months post transplant', .sep = ' '), y = '', fill = '') + facet_wrap(~additional_missing_pct) + theme_bw() + scale_x_continuous( limits = c( min(..1$auc -0.01), max(..1$auc +0.01) ) ) + scale_fill_manual(values = c("orange", "purple")) + theme(legend.position = 'top', panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank(), panel.grid.major.y = element_line(linetype = 2)) } ) ) # ipa plots ---- lower_values <- ggdata %>% group_by(outcome, md_method, model, additional_missing_pct) %>% select(-auc, -cal_error) %>% filter(ipa == min(ipa)) %>% ungroup() %>% filter(!(md_method %in% md_methods_upper_only)) %>% nest(lwr = -c(model, outcome)) upper_values <- ggdata %>% group_by(outcome, md_method, model, additional_missing_pct) %>% select(-auc, -cal_error) %>% filter(ipa == max(ipa)) %>% group_by(model) %>% nest(upr = -c(model, outcome)) output_ipa <- ggdata %>% group_by(model, outcome) %>% nest() %>% left_join(lower_values) %>% left_join(upper_values) %>% mutate( data = map( .x = data, .f = ~ .x %>% mutate(md_method = fct_reorder(md_method, .x = ipa, .fun = max)) ), plot = pmap( .l = list(data, lwr, upr), .f = ~ { ggplot(..1) + aes(x = ipa, y = md_method, fill = md_type) + geom_line(aes(group = md_method), color = 'grey') + geom_point(size = 3, shape = 21) + geom_text( data = ..2, aes(label = ipa_lbl), nudge_x = -0.003 ) + geom_text( data = ..3, aes(label = ipa_lbl), nudge_x = +0.003 ) + labs(x = glue('Index of prediction accuracy,', '{times} months post transplant', .sep = ' '), y = '', fill = '') + facet_wrap(~additional_missing_pct) + theme_bw() + scale_x_continuous( limits = c( min(..1$ipa -0.01), max(..1$ipa +0.01) ) ) + scale_fill_manual(values = c("orange", "purple")) + theme(legend.position = 'top', text = element_text(size = 15), panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank(), panel.grid.major.y = element_line(linetype = 2)) } ) ) # calibration error plots ---- lower_values <- ggdata %>% group_by(outcome, md_method, model, additional_missing_pct) %>% select(-auc, -ipa) %>% filter(cal_error == min(cal_error)) %>% ungroup() %>% filter(!(md_method %in% md_methods_upper_only)) %>% nest(lwr = -c(model, outcome)) upper_values <- ggdata %>% group_by(outcome, md_method, model, additional_missing_pct) %>% select(-auc, -ipa) %>% filter(cal_error == max(cal_error)) %>% group_by(model) %>% nest(upr = -c(model, outcome)) output_cal_error <- ggdata %>% group_by(model, outcome) %>% nest() %>% left_join(lower_values) %>% left_join(upper_values) %>% mutate( data = map( .x = data, .f = ~ .x %>% mutate(md_method = fct_reorder(md_method, .x = cal_error, .fun = max)) ), plot = pmap( .l = list(data, lwr, upr), .f = ~ { ggplot(..1) + aes(x = cal_error, y = md_method, fill = md_type) + geom_line(aes(group = md_method), color = 'grey') + geom_point(size = 3, shape = 21) + geom_text( data = ..2, aes(label = cal_error_lbl), nudge_x = -0.003 ) + geom_text( data = ..3, aes(label = cal_error_lbl), nudge_x = +0.003 ) + labs(x = glue('Calibration error,', '{times} months post transplant', .sep = ' '), y = '', fill = '') + facet_wrap(~additional_missing_pct) + theme_bw() + scale_x_continuous( limits = c( min(..1$cal_error -0.01), max(..1$cal_error +0.01) ) ) + scale_fill_manual(values = c("orange", "purple")) + theme(legend.position = 'top', text = element_text(size = 15), panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank(), panel.grid.major.y = element_line(linetype = 2)) } ) ) bind_rows(auc = output_auc, ipa = output_ipa, cal_error = output_cal_error, .id = 'metric') }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clouddirectory_operations.R \name{clouddirectory_delete_directory} \alias{clouddirectory_delete_directory} \title{Deletes a directory} \usage{ clouddirectory_delete_directory(DirectoryArn) } \arguments{ \item{DirectoryArn}{[required] The ARN of the directory to delete.} } \description{ Deletes a directory. Only disabled directories can be deleted. A deleted directory cannot be undone. Exercise extreme caution when deleting directories. See \url{https://www.paws-r-sdk.com/docs/clouddirectory_delete_directory/} for full documentation. } \keyword{internal}
/cran/paws.security.identity/man/clouddirectory_delete_directory.Rd
permissive
paws-r/paws
R
false
true
639
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clouddirectory_operations.R \name{clouddirectory_delete_directory} \alias{clouddirectory_delete_directory} \title{Deletes a directory} \usage{ clouddirectory_delete_directory(DirectoryArn) } \arguments{ \item{DirectoryArn}{[required] The ARN of the directory to delete.} } \description{ Deletes a directory. Only disabled directories can be deleted. A deleted directory cannot be undone. Exercise extreme caution when deleting directories. See \url{https://www.paws-r-sdk.com/docs/clouddirectory_delete_directory/} for full documentation. } \keyword{internal}
rm(list = ls()) setwd("C:/Users/sruth/Documents") getwd() # #loading Libraries x = c("ggplot2", "corrgram", "DMwR", "caret", "randomForest", "e1071","geosphere", "DataCombine", "pROC", "doSNOW", "class", "readxl","ROSE","dplyr", "plyr", "reshape","xlsx", "pbapply", "unbalanced", "dummies", "MASS" , "gbm" ,"Information", "rpart", "tidyr", "miscTools") # #install.packages if not lapply(x, install.packages) # #load libraries lapply(x, require, character.only = TRUE) rm(x) #Input Data Source df = data.frame(read.csv('train_cab.csv')) df2 = data.frame(read.csv('test.csv')) ########################################################################### # EXPLORING DATA # ########################################################################### #viewing the data head(df) ########################################################################################## ###################### #structure of data or data types str(df) #Summary of data summary(df) #unique value of each count apply(df, 2,function(x) length(table(x))) df$pickup_datetime <- gsub('\\ UTC','',df$pickup_datetime) #Splitting Date and time df$Date <- as.Date(df$pickup_datetime) df$Year <- substr(as.character(df$Date),1,4) df$Month <- substr(as.character(df$Date),6,7) df$Weekday <- weekdays(as.POSIXct(df$Date), abbreviate = F) df$Date <- substr(as.character(df$Date),9,10) df$Time <- substr(as.factor(df$pickup_datetime),12,13) df$fare_amount <- as.numeric(df$fare_amount) #Now we can drop the column pickup_datetime as we have different columns df = subset(df, select = -c(pickup_datetime)) ######################################################################### # Checking Missing data # ######################################################################### apply(df, 2, function(x) {sum(is.na(x))}) # in R, 1 = Row & 2 = Col #Creating dataframe with missing values present in each variable null_val = data.frame(apply(df,2,function(x){sum(is.na(x))})) null_val$Columns = row.names(null_val) names(null_val)[1] = "null_percentage" #Calculating percentage missing value null_val$null_percentage = (null_val$null_percentage/nrow(df)) * 100 # Sorting null_val in Descending order null_val = null_val[order(-null_val$null_percentage),] row.names(null_val) = NULL # Reordering columns null_val = null_val[,c(2,1)] #viewing the % of missing data for all variales null_val #We have seen that null values are very less in our data set i.e. less than 1%. #So we can delete the columns having missing values df <- DropNA(df) #Verifying missing values after deletion sum(is.na(df)) names(df) # Convert degrees to radians- Our data is already in radians, so skipping this step #deg2rad <- function(deg) return(deg*pi/180) # Calculates the geodesic distance between two points specified by # radian latitude/longitude using the Haversine formula lat1 = df['pickup_latitude'] lat2 = df['dropoff_latitude'] long1 = df['pickup_longitude'] long2 = df['dropoff_longitude'] ##### Function to calculate distance ###### gcd_hf <- function(long1, lat1, long2, lat2) { R <- 6371.145 # Earth mean radius [km] delta.long <- (long2 - long1) delta.lat <- (lat2 - lat1) a <- sin(delta.lat/2)^2 + cos(lat1) * cos(lat2) * sin(delta.long/2)^2 c <- 2 * atan2(sqrt(a),sqrt(1-a)) d = R * c return(d) } #Running the function for all rows in dataframe for (i in 1:nrow(df)) { df$distance[i]= gcd_hf(df$pickup_longitude[i], df$pickup_latitude[i], df$dropoff_longitude[i], df$dropoff_latitude[i]) } #Now we can drop the columns for latitude/longitude as we have new column- Distance df = subset(df, select = -c(pickup_latitude,dropoff_latitude,pickup_longitude,dropoff_longitude)) #We have seen that fare_amount has negative values which should be removed df$fare_amount[df$fare_amount<=0] <- NA df$fare_amount[df$fare_amount>500] <- NA sum(is.na(df)) #So we can delete the columns having missing values df <- DropNA(df) #Verifying missing values after deletion sum(is.na(df)) summary(df) ###removing passangers count more than 6 df$passenger_count[df$passenger_count<1] <- NA df$passenger_count[df$passenger_count>6] <- NA sum(is.na(df)) df <- DropNA(df) sum(is.na(df)) summary(df) ###removing outliers in distance df$distance[df$distance <= 0] <- NA df$distance[df$distance > 500] <- NA sum(is.na(df)) df <- DropNA(df) sum(is.na(df)) summary(df) # From the above EDA and problem statement categorizing data in 2 categories "continuous" and "categorical" #Fare_amount being our target variable is excluded from the list. cont = c( 'distance') cata = c('Weekday', 'Month', 'Year' , 'Time', 'Date' , 'passenger_count') ######################################################################### # Visualizing the data # ######################################################################### #library(ggplot2) #Plot fare amount Vs. the days of the week. ggplot(data = df, aes(x = reorder(Weekday,-fare_amount), y = fare_amount))+ geom_bar(stat = "identity")+ labs(title = "Fare Amount Vs. days", x = "Days of the week", y = "Fare")+ theme(plot.title = element_text(hjust = 0.5, face = "bold"))+ theme(axis.text.x = element_text( color="black", size=6, angle=45)) #Plot Fare amount Vs. months ggplot(df,aes(x = reorder(Month,-fare_amount), y = fare_amount))+ geom_bar(stat = "identity")+ #ylim = c(0,1000) labs(title = "Fare Amount Vs. Month", x = "Month", y = "Fare")+ theme(axis.text.x = element_text( color="#993333", size=8)) ################################################################ # Outlier Analysis # ################################################################ #We have done manual updation so we will skip this step ################################################################ # Feature Selection # ################################################################ ## Dimension Reduction #We have already excluded the below columns that were redundant: #pickup_datetime, #pickup_latitude, #dropoff_latitude, #pickup_longitude, #dropoff_longitude #pickup_datetime #We will remove Time column also as it is not required ##df = subset(df, select = -c(Time)) ################################################################ # Feature Scaling # ################################################################ #We will go for Normalization. #Viewing data before Normalization. head(df) signedlog10 = function(x) { ifelse(abs(x) <= 1, 0, sign(x)*log10(abs(x))) } df$fare_amount = signedlog10(df$fare_amount) df$distance = signedlog10(df$distance) ##checking distribution hist(df$fare_amount) hist(df$distance) #Normalization for(i in cont) { print(i) df[,i] = (df[,i] - min(df[,i]))/(max(df[,i])-min(df[,i])) } hist(df$distance) #Viewing data after Normalization. head(df) #Creating dummy variables for categorical variables library(mlr) df1 = dummy.data.frame(df, cata) #Viewing data after adding dummies head(df1) #df1 = df ################################################################ # Sampling of Data # ################################################################ # #Divide data into trainset and testset using stratified sampling method #install.packages('caret') library(caret) set.seed(101) split_index = createDataPartition(df1$fare_amount, p = 0.7, list = FALSE) trainset = df1[split_index,] testset = df1[-split_index,] #Checking df Set Target Class table(trainset$fare_amount) ####FUNCTION to calculate MAPE#### MAPE = function(y, yhat){ mean(abs((y - yhat)/y))*100 } ########################################################################################## ################################################################## ## Basic approach for ML - Models ## ## We will first get a basic idea of how different models perform on our preprocesed data and then select the best model and make it ## ## more efficient for our Dataset ## ########################################################################################## ################################################################## #------------------------------------------Decision tree-------------------------------------------# #Develop Model on training data fit_DT = rpart(fare_amount ~., data = trainset, method = "anova") #Variable importance fit_DT$variable.importance # distance Time05 passenger_count ## 725793.64246 431.82787 13.85704 #Lets predict for test data pred_DT_test = predict(fit_DT, testset) # For test data print(postResample(pred = pred_DT_test, obs = testset$fare_amount)) #Compute R^2 dt_r2 = rSquared(testset$fare_amount, testset$fare_amount - pred_DT_test) print(dt_r2) #Compute MSE dt_mse = mean((testset$fare_amount - pred_DT_test)^2) print(dt_mse) #Compute MAPE dt_mape = MAPE(testset$fare_amount, pred_DT_test) print(dt_mape) # RMSE Rsquared MAE # 0.12 0.59 0.01 #------------------------------------------Linear Regression-------------------------------------------# #Develop Model on training data fit_LR = lm(fare_amount ~ ., data = trainset) #Lets predict for test data pred_LR_test = predict(fit_LR, testset) # For test data print(postResample(pred = pred_LR_test, obs = testset$fare_amount)) #Compute R^2 lr_r2 = rSquared(testset$fare_amount, testset$fare_amount - pred_LR_test) print(lr_r2) #Compute MSE lr_mse = mean((testset$fare_amount - pred_LR_test)^2) print(lr_mse) #Compute MAPE lr_mape = MAPE(testset$fare_amount, pred_LR_test) print(lr_mape) ##RMSE Rsquared MAE ##0.13 0.53 0.01 #-----------------------------------------Random Forest----------------------------------------------# #Develop Model on training data fit_RF = randomForest(fare_amount~., data = trainset) #Lets predict for test data pred_RF_test = predict(fit_RF, testset) # For test data print(postResample(pred = pred_RF_test, obs = testset$fare_amount)) #Compute R^2 rf_r2 = rSquared(testset$fare_amount, testset$fare_amount - pred_RF_test) print(rf_r2) #Compute MSE rf_mse = mean((testset$fare_amount - pred_RF_test)^2) print(rf_mse) #Compute MAPE rf_mape = MAPE(testset$fare_amount, pred_RF_test) print(rf_mape) # RMSE Rsquared MAE # #R2 # #MSE # #MAPE # #--------------------------------------------XGBoost-------------------------------------------# ### for xgboost it is required to make date variable as factor. trainset$Date <- as.factor(trainset$Date) #Develop Model on training data fit_XGB = gbm(fare_amount~., data = trainset, n.trees = 500, interaction.depth = 2) #Lets predict for test data pred_XGB_test = predict(fit_XGB, testset, n.trees = 500) # For test data print(postResample(pred = pred_XGB_test, obs = testset$fare_amount)) #Compute R^2 xgb_r2 = rSquared(testset$fare_amount, testset$fare_amount - pred_XGB_test) print(xgb_r2) #Compute MSE xgb_mse = mean((testset$fare_amount - pred_XGB_test)^2) print(xgb_mse) #Compute MAPE xgb_mape = MAPE(testset$fare_amount, pred_XGB_test) print(xgb_mape) # RMSE Rsquared MAE # #R2 # #MSE # #MAPE # #################-------------------------------Viewing summary of all models------------------------------ ############### # Create variables MSE <- c(dt_mse, lr_mse, rf_mse, xgb_mse) r2 <- c(dt_r2, lr_r2, rf_r2, xgb_r2) MAPE <- c(dt_mape, lr_mape, rf_mape, xgb_mape) # Join the variables to create a data frame results <- data.frame(MSE,r2,MAPE) results # MSE r2 MAPE #1 #2 #3 #4 ######################################################################################## # Saving output to file # ######################################################################################## write.csv(results,file = 'Cab Fare Prediction.csv',row.names = F)
/Cab fare prediction.R
no_license
sruthisodima/Fiery
R
false
false
11,852
r
rm(list = ls()) setwd("C:/Users/sruth/Documents") getwd() # #loading Libraries x = c("ggplot2", "corrgram", "DMwR", "caret", "randomForest", "e1071","geosphere", "DataCombine", "pROC", "doSNOW", "class", "readxl","ROSE","dplyr", "plyr", "reshape","xlsx", "pbapply", "unbalanced", "dummies", "MASS" , "gbm" ,"Information", "rpart", "tidyr", "miscTools") # #install.packages if not lapply(x, install.packages) # #load libraries lapply(x, require, character.only = TRUE) rm(x) #Input Data Source df = data.frame(read.csv('train_cab.csv')) df2 = data.frame(read.csv('test.csv')) ########################################################################### # EXPLORING DATA # ########################################################################### #viewing the data head(df) ########################################################################################## ###################### #structure of data or data types str(df) #Summary of data summary(df) #unique value of each count apply(df, 2,function(x) length(table(x))) df$pickup_datetime <- gsub('\\ UTC','',df$pickup_datetime) #Splitting Date and time df$Date <- as.Date(df$pickup_datetime) df$Year <- substr(as.character(df$Date),1,4) df$Month <- substr(as.character(df$Date),6,7) df$Weekday <- weekdays(as.POSIXct(df$Date), abbreviate = F) df$Date <- substr(as.character(df$Date),9,10) df$Time <- substr(as.factor(df$pickup_datetime),12,13) df$fare_amount <- as.numeric(df$fare_amount) #Now we can drop the column pickup_datetime as we have different columns df = subset(df, select = -c(pickup_datetime)) ######################################################################### # Checking Missing data # ######################################################################### apply(df, 2, function(x) {sum(is.na(x))}) # in R, 1 = Row & 2 = Col #Creating dataframe with missing values present in each variable null_val = data.frame(apply(df,2,function(x){sum(is.na(x))})) null_val$Columns = row.names(null_val) names(null_val)[1] = "null_percentage" #Calculating percentage missing value null_val$null_percentage = (null_val$null_percentage/nrow(df)) * 100 # Sorting null_val in Descending order null_val = null_val[order(-null_val$null_percentage),] row.names(null_val) = NULL # Reordering columns null_val = null_val[,c(2,1)] #viewing the % of missing data for all variales null_val #We have seen that null values are very less in our data set i.e. less than 1%. #So we can delete the columns having missing values df <- DropNA(df) #Verifying missing values after deletion sum(is.na(df)) names(df) # Convert degrees to radians- Our data is already in radians, so skipping this step #deg2rad <- function(deg) return(deg*pi/180) # Calculates the geodesic distance between two points specified by # radian latitude/longitude using the Haversine formula lat1 = df['pickup_latitude'] lat2 = df['dropoff_latitude'] long1 = df['pickup_longitude'] long2 = df['dropoff_longitude'] ##### Function to calculate distance ###### gcd_hf <- function(long1, lat1, long2, lat2) { R <- 6371.145 # Earth mean radius [km] delta.long <- (long2 - long1) delta.lat <- (lat2 - lat1) a <- sin(delta.lat/2)^2 + cos(lat1) * cos(lat2) * sin(delta.long/2)^2 c <- 2 * atan2(sqrt(a),sqrt(1-a)) d = R * c return(d) } #Running the function for all rows in dataframe for (i in 1:nrow(df)) { df$distance[i]= gcd_hf(df$pickup_longitude[i], df$pickup_latitude[i], df$dropoff_longitude[i], df$dropoff_latitude[i]) } #Now we can drop the columns for latitude/longitude as we have new column- Distance df = subset(df, select = -c(pickup_latitude,dropoff_latitude,pickup_longitude,dropoff_longitude)) #We have seen that fare_amount has negative values which should be removed df$fare_amount[df$fare_amount<=0] <- NA df$fare_amount[df$fare_amount>500] <- NA sum(is.na(df)) #So we can delete the columns having missing values df <- DropNA(df) #Verifying missing values after deletion sum(is.na(df)) summary(df) ###removing passangers count more than 6 df$passenger_count[df$passenger_count<1] <- NA df$passenger_count[df$passenger_count>6] <- NA sum(is.na(df)) df <- DropNA(df) sum(is.na(df)) summary(df) ###removing outliers in distance df$distance[df$distance <= 0] <- NA df$distance[df$distance > 500] <- NA sum(is.na(df)) df <- DropNA(df) sum(is.na(df)) summary(df) # From the above EDA and problem statement categorizing data in 2 categories "continuous" and "categorical" #Fare_amount being our target variable is excluded from the list. cont = c( 'distance') cata = c('Weekday', 'Month', 'Year' , 'Time', 'Date' , 'passenger_count') ######################################################################### # Visualizing the data # ######################################################################### #library(ggplot2) #Plot fare amount Vs. the days of the week. ggplot(data = df, aes(x = reorder(Weekday,-fare_amount), y = fare_amount))+ geom_bar(stat = "identity")+ labs(title = "Fare Amount Vs. days", x = "Days of the week", y = "Fare")+ theme(plot.title = element_text(hjust = 0.5, face = "bold"))+ theme(axis.text.x = element_text( color="black", size=6, angle=45)) #Plot Fare amount Vs. months ggplot(df,aes(x = reorder(Month,-fare_amount), y = fare_amount))+ geom_bar(stat = "identity")+ #ylim = c(0,1000) labs(title = "Fare Amount Vs. Month", x = "Month", y = "Fare")+ theme(axis.text.x = element_text( color="#993333", size=8)) ################################################################ # Outlier Analysis # ################################################################ #We have done manual updation so we will skip this step ################################################################ # Feature Selection # ################################################################ ## Dimension Reduction #We have already excluded the below columns that were redundant: #pickup_datetime, #pickup_latitude, #dropoff_latitude, #pickup_longitude, #dropoff_longitude #pickup_datetime #We will remove Time column also as it is not required ##df = subset(df, select = -c(Time)) ################################################################ # Feature Scaling # ################################################################ #We will go for Normalization. #Viewing data before Normalization. head(df) signedlog10 = function(x) { ifelse(abs(x) <= 1, 0, sign(x)*log10(abs(x))) } df$fare_amount = signedlog10(df$fare_amount) df$distance = signedlog10(df$distance) ##checking distribution hist(df$fare_amount) hist(df$distance) #Normalization for(i in cont) { print(i) df[,i] = (df[,i] - min(df[,i]))/(max(df[,i])-min(df[,i])) } hist(df$distance) #Viewing data after Normalization. head(df) #Creating dummy variables for categorical variables library(mlr) df1 = dummy.data.frame(df, cata) #Viewing data after adding dummies head(df1) #df1 = df ################################################################ # Sampling of Data # ################################################################ # #Divide data into trainset and testset using stratified sampling method #install.packages('caret') library(caret) set.seed(101) split_index = createDataPartition(df1$fare_amount, p = 0.7, list = FALSE) trainset = df1[split_index,] testset = df1[-split_index,] #Checking df Set Target Class table(trainset$fare_amount) ####FUNCTION to calculate MAPE#### MAPE = function(y, yhat){ mean(abs((y - yhat)/y))*100 } ########################################################################################## ################################################################## ## Basic approach for ML - Models ## ## We will first get a basic idea of how different models perform on our preprocesed data and then select the best model and make it ## ## more efficient for our Dataset ## ########################################################################################## ################################################################## #------------------------------------------Decision tree-------------------------------------------# #Develop Model on training data fit_DT = rpart(fare_amount ~., data = trainset, method = "anova") #Variable importance fit_DT$variable.importance # distance Time05 passenger_count ## 725793.64246 431.82787 13.85704 #Lets predict for test data pred_DT_test = predict(fit_DT, testset) # For test data print(postResample(pred = pred_DT_test, obs = testset$fare_amount)) #Compute R^2 dt_r2 = rSquared(testset$fare_amount, testset$fare_amount - pred_DT_test) print(dt_r2) #Compute MSE dt_mse = mean((testset$fare_amount - pred_DT_test)^2) print(dt_mse) #Compute MAPE dt_mape = MAPE(testset$fare_amount, pred_DT_test) print(dt_mape) # RMSE Rsquared MAE # 0.12 0.59 0.01 #------------------------------------------Linear Regression-------------------------------------------# #Develop Model on training data fit_LR = lm(fare_amount ~ ., data = trainset) #Lets predict for test data pred_LR_test = predict(fit_LR, testset) # For test data print(postResample(pred = pred_LR_test, obs = testset$fare_amount)) #Compute R^2 lr_r2 = rSquared(testset$fare_amount, testset$fare_amount - pred_LR_test) print(lr_r2) #Compute MSE lr_mse = mean((testset$fare_amount - pred_LR_test)^2) print(lr_mse) #Compute MAPE lr_mape = MAPE(testset$fare_amount, pred_LR_test) print(lr_mape) ##RMSE Rsquared MAE ##0.13 0.53 0.01 #-----------------------------------------Random Forest----------------------------------------------# #Develop Model on training data fit_RF = randomForest(fare_amount~., data = trainset) #Lets predict for test data pred_RF_test = predict(fit_RF, testset) # For test data print(postResample(pred = pred_RF_test, obs = testset$fare_amount)) #Compute R^2 rf_r2 = rSquared(testset$fare_amount, testset$fare_amount - pred_RF_test) print(rf_r2) #Compute MSE rf_mse = mean((testset$fare_amount - pred_RF_test)^2) print(rf_mse) #Compute MAPE rf_mape = MAPE(testset$fare_amount, pred_RF_test) print(rf_mape) # RMSE Rsquared MAE # #R2 # #MSE # #MAPE # #--------------------------------------------XGBoost-------------------------------------------# ### for xgboost it is required to make date variable as factor. trainset$Date <- as.factor(trainset$Date) #Develop Model on training data fit_XGB = gbm(fare_amount~., data = trainset, n.trees = 500, interaction.depth = 2) #Lets predict for test data pred_XGB_test = predict(fit_XGB, testset, n.trees = 500) # For test data print(postResample(pred = pred_XGB_test, obs = testset$fare_amount)) #Compute R^2 xgb_r2 = rSquared(testset$fare_amount, testset$fare_amount - pred_XGB_test) print(xgb_r2) #Compute MSE xgb_mse = mean((testset$fare_amount - pred_XGB_test)^2) print(xgb_mse) #Compute MAPE xgb_mape = MAPE(testset$fare_amount, pred_XGB_test) print(xgb_mape) # RMSE Rsquared MAE # #R2 # #MSE # #MAPE # #################-------------------------------Viewing summary of all models------------------------------ ############### # Create variables MSE <- c(dt_mse, lr_mse, rf_mse, xgb_mse) r2 <- c(dt_r2, lr_r2, rf_r2, xgb_r2) MAPE <- c(dt_mape, lr_mape, rf_mape, xgb_mape) # Join the variables to create a data frame results <- data.frame(MSE,r2,MAPE) results # MSE r2 MAPE #1 #2 #3 #4 ######################################################################################## # Saving output to file # ######################################################################################## write.csv(results,file = 'Cab Fare Prediction.csv',row.names = F)
context('subsetting') .bed_subset <- function() { bo <- bed_open(plinker:::fetch_sample_bed()) snp_ids <- bed_bim_df(bo)$SNP sample_ids <- bed_sample_IDs(bo) ### edge cases expect_error(bed_subset(bo, snp_IDs = snp_ids, snp_idx = 1), "incompatible parameters") expect_error(bed_subset(bo, sample_IDs = sample_ids, sample_idx = 1), "incompatible parameters") expect_error(bed_subset(bo, sample_idx = 1, snp_idx = 1, invert = TRUE), "can not use both SNP and sample subsetting with invert=TRUE") expect_identical(bed_subset(bo, snp_IDs = snp_ids[10:15]), bed_subset_snps_by_IDs(bo, snp_ids[10:15])) expect_identical(bed_subset(bo, snp_idx = 3), bed_subset_snps_by_idx(bo, 3)) expect_identical(bed_subset(bo, sample_IDs = sample_ids[10:15]), bed_subset_samples_by_IDs(bo, sample_ids[10:15])) expect_identical(bed_subset(bo, sample_idx = 3), bed_subset_samples_by_idx(bo, 3)) # invert expect_identical(bed_subset(bo, snp_IDs = snp_ids[1:10], invert = TRUE), bed_subset_snps_by_IDs(bo, snp_ids[11:17])) expect_identical(bed_subset(bo, snp_idx = 17:11, invert = TRUE), bed_subset_snps_by_idx(bo, 1:10)) expect_identical(bed_subset(bo, sample_IDs = sample_ids[1:10], invert = TRUE), bed_subset_samples_by_IDs(bo, sample_ids[11:89])) expect_identical(bed_subset(bo, sample_idx = 81:89, invert = TRUE), bed_subset_samples_by_idx(bo, 1:80)) bo2 <- bed_subset(bo, snp_idx = 3:9, sample_IDs = sample_ids[10:15]) expect_identical(bed_snp_idx(bo2), 3:9) expect_identical(bed_sample_idx(bo2), 10:15) bo3 <- bed_subset(bo2, snp_IDs = snp_ids[5:6], sample_idx = 4:2) expect_identical(bed_snp_idx(bo3), 5:6) expect_identical(bed_sample_idx(bo3), (10:15)[4:2]) # invert bo3 <- bed_subset(bo2, snp_IDs = snp_ids[5:6], invert = TRUE) expect_identical(bed_snp_IDs(bo3), snp_ids[c(3:4, 7:9)]) bo3 <- bed_subset(bo2, sample_idx = 4:2, invert = TRUE) expect_identical(bed_sample_idx(bo3), c(10L, 14L, 15L)) } test_that('bed_subset', .bed_subset()) .bed_subset_samples_by_idx <- function() { bo <- bed_open(plinker:::fetch_sample_bed()) nb <- bed_nb_samples(bo) expect_equal(nb, 89) ### edge cases expect_error(bed_subset_samples_by_idx(bo, NULL), 'empty sample_idx') expect_error(bed_subset_samples_by_idx(bo, 100), 'bad sample_idx range') # duplicates expect_error(bed_subset_samples_by_idx(bo, c(1, 2, 1)), 'duplicated') expect_error(bed_subset_samples_by_idx(bo, c(1, NA)), 'missing') bo2 <- bed_subset_samples_by_idx(bo, c(21:35)) expect_equal(bed_nb_samples(bo2), 15) expect_equal(bed_nb_samples(bo2, subset = FALSE), 89) expect_identical(bed_sample_idx(bo2), 21:35) df <- bed_fam_df(bo2) expect_equal(rownames(df), as.character(21:35)) expect_identical(df, bed_fam_df(bo2, subset = FALSE)[21:35, ]) # invert N.B: ordering is meaningless bo2 <- bed_subset_samples_by_idx(bo, 89:11, invert = TRUE) expect_identical(bed_sample_idx(bo2), 1:10) ### recursive subsetting bo2 <- bed_subset_samples_by_idx(bo, c(21:35)) bo3 <- bed_subset_samples_by_idx(bo2, c(3, 1, 5)) expect_equal(bed_nb_samples(bo3), 3) expect_equal(bed_nb_samples(bo3, subset = FALSE), 89) expect_identical(bed_sample_idx(bo3), c(23L, 21L, 25L)) bo4 <- bed_subset_samples_by_idx(bo3, 2) expect_equal(bed_nb_samples(bo4), 1) expect_identical(bed_sample_idx(bo4), 21L) # invert bo4 <- bed_subset_samples_by_idx(bo3, 2:3, invert = TRUE) expect_identical(bed_sample_idx(bo4), 23L) ### ordering bo2 <- bed_subset_samples_by_idx(bo, 10:5) expect_identical(bed_sample_idx(bo2), 10:5) } test_that('bed_subset_samples_by_idx', .bed_subset_samples_by_idx()) .bed_reset_subset_samples_by_idx <- function() { bo <- bed_open(plinker:::fetch_sample_bed()) expect_equal(bed_nb_samples(bo), 89) bo2 <- bed_subset_samples_by_idx(bo, c(3, 5, 16)) expect_equal(bed_nb_samples(bo2), 3) bo3 <- bed_reset_subset_samples_by_idx(bo2) expect_equal(bed_nb_samples(bo3), 89) } test_that('bed_reset_subset_samples_by_idx', .bed_reset_subset_samples_by_idx()) .bed_sample_IDs_to_idx <- function() { bed_sample_IDs_to_idx <- plinker:::bed_sample_IDs_to_idx bo <- bed_open(plinker:::fetch_sample_bed()) fam_df <- bed_fam_df(bo) sample_ids <- bed_sample_IDs(bo) sample_iids <- bed_sample_IDs(bo, ignore_fid = TRUE) expect_error(bed_sample_IDs_to_idx(bo, NULL), 'empty sample IDs') expect_error(bed_sample_IDs_to_idx(bo, sample_ids[c(1,1)]), 'duplicated') expect_error(bed_sample_IDs_to_idx(bo, fam_df$IID), 'bad sample IDs') expect_identical(bed_sample_IDs_to_idx(bo, rev(sample_ids)), nrow(fam_df):1) expect_identical(bed_sample_IDs_to_idx(bo, sample_iids, ignore_fid = TRUE), 1:nrow(fam_df)) expect_identical(bed_sample_IDs_to_idx(bo, sample_ids[51]), 51L) ### subsetting idx <- c(21:40, 7, 69) bo2 <- bed_subset_samples_by_idx(bo, idx) # out of range expect_error(bed_sample_IDs_to_idx(bo2, sample_ids), 'bad sample IDs') expect_identical(bed_sample_IDs_to_idx(bo2, bed_sample_IDs(bo2)[13:5]), 13:5) expect_error(bed_sample_IDs_to_idx(bo2, bed_sample_IDs(bo2, ignore_fid = TRUE)[13:5]), 'bad sample IDs') expect_identical(bed_sample_IDs_to_idx(bo2, bed_sample_IDs(bo2, ignore_fid = TRUE)[13:5], ignore_fid = TRUE), 13:5) } test_that('bed_sample_IDs_to_idx', .bed_sample_IDs_to_idx()) .bed_subset_samples_by_IDs <- function() { bo <- bed_open(plinker:::fetch_sample_bed()) nb <- bed_nb_samples(bo) expect_equal(nb, 89) ### edge cases expect_error(bed_subset_samples_by_IDs(bo, NULL), 'empty sample IDs') expect_error(bed_subset_samples_by_IDs(bo, "toto"), 'bad sample ID') ids <- bed_sample_IDs(bo) bo2 <- bed_subset_samples_by_IDs(bo, ids[40:30]) expect_equal(bed_nb_samples(bo2), 11) expect_equal(bed_nb_samples(bo2, subset = FALSE), 89) expect_identical(bed_sample_idx(bo2), 40:30) ### recursive subsetting ids3 <- ids[35:38] bo3 <- bed_subset_samples_by_IDs(bo2, ids3) expect_equal(bed_nb_samples(bo3), 4) expect_equal(bed_nb_samples(bo3, subset = FALSE), 89) expect_identical(bed_sample_idx(bo3), 35:38) expect_identical(bed_sample_IDs(bo3), ids3) bo4 <- bed_subset_samples_by_IDs(bo3, ids3[2]) expect_equal(bed_nb_samples(bo4), 1) expect_identical(bed_sample_idx(bo4), 36L) expect_identical(bed_sample_IDs(bo4), ids3[2]) bo4 <- bed_subset_samples_by_IDs(bo3, ids3[1:2], invert = TRUE) expect_identical(bed_sample_IDs(bo4), ids3[3:4]) } test_that('bed_subset_samples_by_IDs', .bed_subset_samples_by_IDs()) .bed_subset_snps_by_IDs <- function() { bo <- bed_open(plinker:::fetch_sample_bed()) nb <- bed_nb_snps(bo) expect_equal(nb, 17) ### edge cases expect_error(bed_subset_snps_by_IDs(bo, NULL), 'empty snp_idx') expect_error(bed_subset_snps_by_IDs(bo, "toto"), 'bad snp_IDs') bim_df <- bed_bim_df(bo) idx <- c(5:12, 1L, 16L) ids <- bim_df$SNP[idx] bo2 <- bed_subset_snps_by_IDs(bo, ids) expect_equal(bed_nb_snps(bo2), 10) expect_equal(bed_nb_snps(bo2, subset = FALSE), 17) expect_identical(bed_snp_idx(bo2), idx) expect_identical(bed_snp_IDs(bo2), ids) df <- bed_bim_df(bo2) expect_equal(rownames(df), as.character(idx)) expect_identical(df, bed_bim_df(bo2, subset = FALSE)[idx, ]) # invert bo2 <- bed_subset_snps_by_IDs(bo, ids, invert = TRUE) expect_identical(bed_snp_IDs(bo2), setdiff(bed_snp_IDs(bo), ids)) ### recursive subsetting ids2 <- ids[c(2, 4:7, 9)] bo2 <- bed_subset_snps_by_IDs(bo, ids) bo3 <- bed_subset_snps_by_IDs(bo2, ids2) expect_equal(bed_nb_snps(bo3), length(ids2)) expect_equal(bed_nb_snps(bo3, subset = FALSE), 17) expect_identical(sort(bim_df$SNP[bed_snp_idx(bo3)]), sort(ids2)) expect_identical(sort(bed_snp_IDs(bo3)), sort(unique(ids2))) bo4 <- bed_subset_snps_by_IDs(bo3, "rs10105623") expect_equal(bed_nb_snps(bo4), 1) expect_identical(bim_df$SNP[bed_snp_idx(bo4)], "rs10105623") expect_identical(bed_snp_IDs(bo4), "rs10105623") expect_identical(bed_snp_IDs(bo4, subset = FALSE), bim_df$SNP) # invert bo4 <- bed_subset_snps_by_IDs(bo3, c("rs7835221", "rs2460911", "rs12156420"), invert = TRUE) expect_identical(bed_snp_IDs(bo4), c("rs10105623", "rs17786052", "rs17121574")) } test_that('bed_subset_snps_by_IDs', .bed_subset_snps_by_IDs()) .bed_subset_snps_by_idx <- function() { bo <- bed_open(plinker:::fetch_sample_bed()) nb <- bed_nb_snps(bo) expect_equal(nb, 17) ### edge cases expect_error(bed_subset_snps_by_idx(bo, NULL), 'empty snp_idx') expect_error(bed_subset_snps_by_idx(bo, 20), 'bad snp_idx range') expect_error(bed_subset_snps_by_idx(bo, c(1, 2, 1)), 'duplicated') expect_error(bed_subset_snps_by_idx(bo, c(1, NA)), 'missing') # invert bo2 <- bed_subset_snps_by_idx(bo, 11:17, invert = TRUE) expect_identical(bed_snp_idx(bo2), 1:10) bo2 <- bed_subset_snps_by_idx(bo, c(3, 5, 16)) expect_equal(bed_nb_snps(bo2), 3) expect_equal(bed_nb_snps(bo2, subset = FALSE), 17) expect_identical(bed_snp_idx(bo2), c(3L, 5L, 16L)) df <- bed_bim_df(bo2) expect_equal(rownames(df), as.character(c(3L, 5L, 16L))) expect_identical(df, bed_bim_df(bo2, subset = FALSE)[c(3L, 5L, 16L), ]) ### recursive subsetting bo2 <- bed_subset_snps_by_idx(bo, c(3, 5, 16)) bo3 <- bed_subset_snps_by_idx(bo2, c(3, 1)) expect_equal(bed_nb_snps(bo3), 2) expect_equal(bed_nb_snps(bo3, subset = FALSE), 17) expect_identical(bed_snp_idx(bo3), c(16L, 3L)) bo4 <- bed_subset_snps_by_idx(bo3, 2) expect_equal(bed_nb_snps(bo4), 1) expect_identical(bed_snp_idx(bo4), 3L) # invert bo4 <- bed_subset_snps_by_idx(bo3, 1, invert = TRUE) expect_identical(bed_snp_idx(bo4), 3L) } test_that('bed_subset_snps_by_idx', .bed_subset_snps_by_idx()) .bed_reset_subset_snps_by_idx <- function() { bo <- bed_open(plinker:::fetch_sample_bed()) expect_equal(bed_nb_snps(bo), 17) bo2 <- bed_subset_snps_by_idx(bo, c(3, 5, 16)) expect_equal(bed_nb_snps(bo2), 3) bo3 <- bed_reset_subset_snps_by_idx(bo2) expect_equal(bed_nb_snps(bo3), 17) } test_that('bed_reset_subset_snps_by_idx', .bed_reset_subset_snps_by_idx())
/plinker/tests/testthat/test-subset.R
no_license
quartzbio/plinker_pkg
R
false
false
10,128
r
context('subsetting') .bed_subset <- function() { bo <- bed_open(plinker:::fetch_sample_bed()) snp_ids <- bed_bim_df(bo)$SNP sample_ids <- bed_sample_IDs(bo) ### edge cases expect_error(bed_subset(bo, snp_IDs = snp_ids, snp_idx = 1), "incompatible parameters") expect_error(bed_subset(bo, sample_IDs = sample_ids, sample_idx = 1), "incompatible parameters") expect_error(bed_subset(bo, sample_idx = 1, snp_idx = 1, invert = TRUE), "can not use both SNP and sample subsetting with invert=TRUE") expect_identical(bed_subset(bo, snp_IDs = snp_ids[10:15]), bed_subset_snps_by_IDs(bo, snp_ids[10:15])) expect_identical(bed_subset(bo, snp_idx = 3), bed_subset_snps_by_idx(bo, 3)) expect_identical(bed_subset(bo, sample_IDs = sample_ids[10:15]), bed_subset_samples_by_IDs(bo, sample_ids[10:15])) expect_identical(bed_subset(bo, sample_idx = 3), bed_subset_samples_by_idx(bo, 3)) # invert expect_identical(bed_subset(bo, snp_IDs = snp_ids[1:10], invert = TRUE), bed_subset_snps_by_IDs(bo, snp_ids[11:17])) expect_identical(bed_subset(bo, snp_idx = 17:11, invert = TRUE), bed_subset_snps_by_idx(bo, 1:10)) expect_identical(bed_subset(bo, sample_IDs = sample_ids[1:10], invert = TRUE), bed_subset_samples_by_IDs(bo, sample_ids[11:89])) expect_identical(bed_subset(bo, sample_idx = 81:89, invert = TRUE), bed_subset_samples_by_idx(bo, 1:80)) bo2 <- bed_subset(bo, snp_idx = 3:9, sample_IDs = sample_ids[10:15]) expect_identical(bed_snp_idx(bo2), 3:9) expect_identical(bed_sample_idx(bo2), 10:15) bo3 <- bed_subset(bo2, snp_IDs = snp_ids[5:6], sample_idx = 4:2) expect_identical(bed_snp_idx(bo3), 5:6) expect_identical(bed_sample_idx(bo3), (10:15)[4:2]) # invert bo3 <- bed_subset(bo2, snp_IDs = snp_ids[5:6], invert = TRUE) expect_identical(bed_snp_IDs(bo3), snp_ids[c(3:4, 7:9)]) bo3 <- bed_subset(bo2, sample_idx = 4:2, invert = TRUE) expect_identical(bed_sample_idx(bo3), c(10L, 14L, 15L)) } test_that('bed_subset', .bed_subset()) .bed_subset_samples_by_idx <- function() { bo <- bed_open(plinker:::fetch_sample_bed()) nb <- bed_nb_samples(bo) expect_equal(nb, 89) ### edge cases expect_error(bed_subset_samples_by_idx(bo, NULL), 'empty sample_idx') expect_error(bed_subset_samples_by_idx(bo, 100), 'bad sample_idx range') # duplicates expect_error(bed_subset_samples_by_idx(bo, c(1, 2, 1)), 'duplicated') expect_error(bed_subset_samples_by_idx(bo, c(1, NA)), 'missing') bo2 <- bed_subset_samples_by_idx(bo, c(21:35)) expect_equal(bed_nb_samples(bo2), 15) expect_equal(bed_nb_samples(bo2, subset = FALSE), 89) expect_identical(bed_sample_idx(bo2), 21:35) df <- bed_fam_df(bo2) expect_equal(rownames(df), as.character(21:35)) expect_identical(df, bed_fam_df(bo2, subset = FALSE)[21:35, ]) # invert N.B: ordering is meaningless bo2 <- bed_subset_samples_by_idx(bo, 89:11, invert = TRUE) expect_identical(bed_sample_idx(bo2), 1:10) ### recursive subsetting bo2 <- bed_subset_samples_by_idx(bo, c(21:35)) bo3 <- bed_subset_samples_by_idx(bo2, c(3, 1, 5)) expect_equal(bed_nb_samples(bo3), 3) expect_equal(bed_nb_samples(bo3, subset = FALSE), 89) expect_identical(bed_sample_idx(bo3), c(23L, 21L, 25L)) bo4 <- bed_subset_samples_by_idx(bo3, 2) expect_equal(bed_nb_samples(bo4), 1) expect_identical(bed_sample_idx(bo4), 21L) # invert bo4 <- bed_subset_samples_by_idx(bo3, 2:3, invert = TRUE) expect_identical(bed_sample_idx(bo4), 23L) ### ordering bo2 <- bed_subset_samples_by_idx(bo, 10:5) expect_identical(bed_sample_idx(bo2), 10:5) } test_that('bed_subset_samples_by_idx', .bed_subset_samples_by_idx()) .bed_reset_subset_samples_by_idx <- function() { bo <- bed_open(plinker:::fetch_sample_bed()) expect_equal(bed_nb_samples(bo), 89) bo2 <- bed_subset_samples_by_idx(bo, c(3, 5, 16)) expect_equal(bed_nb_samples(bo2), 3) bo3 <- bed_reset_subset_samples_by_idx(bo2) expect_equal(bed_nb_samples(bo3), 89) } test_that('bed_reset_subset_samples_by_idx', .bed_reset_subset_samples_by_idx()) .bed_sample_IDs_to_idx <- function() { bed_sample_IDs_to_idx <- plinker:::bed_sample_IDs_to_idx bo <- bed_open(plinker:::fetch_sample_bed()) fam_df <- bed_fam_df(bo) sample_ids <- bed_sample_IDs(bo) sample_iids <- bed_sample_IDs(bo, ignore_fid = TRUE) expect_error(bed_sample_IDs_to_idx(bo, NULL), 'empty sample IDs') expect_error(bed_sample_IDs_to_idx(bo, sample_ids[c(1,1)]), 'duplicated') expect_error(bed_sample_IDs_to_idx(bo, fam_df$IID), 'bad sample IDs') expect_identical(bed_sample_IDs_to_idx(bo, rev(sample_ids)), nrow(fam_df):1) expect_identical(bed_sample_IDs_to_idx(bo, sample_iids, ignore_fid = TRUE), 1:nrow(fam_df)) expect_identical(bed_sample_IDs_to_idx(bo, sample_ids[51]), 51L) ### subsetting idx <- c(21:40, 7, 69) bo2 <- bed_subset_samples_by_idx(bo, idx) # out of range expect_error(bed_sample_IDs_to_idx(bo2, sample_ids), 'bad sample IDs') expect_identical(bed_sample_IDs_to_idx(bo2, bed_sample_IDs(bo2)[13:5]), 13:5) expect_error(bed_sample_IDs_to_idx(bo2, bed_sample_IDs(bo2, ignore_fid = TRUE)[13:5]), 'bad sample IDs') expect_identical(bed_sample_IDs_to_idx(bo2, bed_sample_IDs(bo2, ignore_fid = TRUE)[13:5], ignore_fid = TRUE), 13:5) } test_that('bed_sample_IDs_to_idx', .bed_sample_IDs_to_idx()) .bed_subset_samples_by_IDs <- function() { bo <- bed_open(plinker:::fetch_sample_bed()) nb <- bed_nb_samples(bo) expect_equal(nb, 89) ### edge cases expect_error(bed_subset_samples_by_IDs(bo, NULL), 'empty sample IDs') expect_error(bed_subset_samples_by_IDs(bo, "toto"), 'bad sample ID') ids <- bed_sample_IDs(bo) bo2 <- bed_subset_samples_by_IDs(bo, ids[40:30]) expect_equal(bed_nb_samples(bo2), 11) expect_equal(bed_nb_samples(bo2, subset = FALSE), 89) expect_identical(bed_sample_idx(bo2), 40:30) ### recursive subsetting ids3 <- ids[35:38] bo3 <- bed_subset_samples_by_IDs(bo2, ids3) expect_equal(bed_nb_samples(bo3), 4) expect_equal(bed_nb_samples(bo3, subset = FALSE), 89) expect_identical(bed_sample_idx(bo3), 35:38) expect_identical(bed_sample_IDs(bo3), ids3) bo4 <- bed_subset_samples_by_IDs(bo3, ids3[2]) expect_equal(bed_nb_samples(bo4), 1) expect_identical(bed_sample_idx(bo4), 36L) expect_identical(bed_sample_IDs(bo4), ids3[2]) bo4 <- bed_subset_samples_by_IDs(bo3, ids3[1:2], invert = TRUE) expect_identical(bed_sample_IDs(bo4), ids3[3:4]) } test_that('bed_subset_samples_by_IDs', .bed_subset_samples_by_IDs()) .bed_subset_snps_by_IDs <- function() { bo <- bed_open(plinker:::fetch_sample_bed()) nb <- bed_nb_snps(bo) expect_equal(nb, 17) ### edge cases expect_error(bed_subset_snps_by_IDs(bo, NULL), 'empty snp_idx') expect_error(bed_subset_snps_by_IDs(bo, "toto"), 'bad snp_IDs') bim_df <- bed_bim_df(bo) idx <- c(5:12, 1L, 16L) ids <- bim_df$SNP[idx] bo2 <- bed_subset_snps_by_IDs(bo, ids) expect_equal(bed_nb_snps(bo2), 10) expect_equal(bed_nb_snps(bo2, subset = FALSE), 17) expect_identical(bed_snp_idx(bo2), idx) expect_identical(bed_snp_IDs(bo2), ids) df <- bed_bim_df(bo2) expect_equal(rownames(df), as.character(idx)) expect_identical(df, bed_bim_df(bo2, subset = FALSE)[idx, ]) # invert bo2 <- bed_subset_snps_by_IDs(bo, ids, invert = TRUE) expect_identical(bed_snp_IDs(bo2), setdiff(bed_snp_IDs(bo), ids)) ### recursive subsetting ids2 <- ids[c(2, 4:7, 9)] bo2 <- bed_subset_snps_by_IDs(bo, ids) bo3 <- bed_subset_snps_by_IDs(bo2, ids2) expect_equal(bed_nb_snps(bo3), length(ids2)) expect_equal(bed_nb_snps(bo3, subset = FALSE), 17) expect_identical(sort(bim_df$SNP[bed_snp_idx(bo3)]), sort(ids2)) expect_identical(sort(bed_snp_IDs(bo3)), sort(unique(ids2))) bo4 <- bed_subset_snps_by_IDs(bo3, "rs10105623") expect_equal(bed_nb_snps(bo4), 1) expect_identical(bim_df$SNP[bed_snp_idx(bo4)], "rs10105623") expect_identical(bed_snp_IDs(bo4), "rs10105623") expect_identical(bed_snp_IDs(bo4, subset = FALSE), bim_df$SNP) # invert bo4 <- bed_subset_snps_by_IDs(bo3, c("rs7835221", "rs2460911", "rs12156420"), invert = TRUE) expect_identical(bed_snp_IDs(bo4), c("rs10105623", "rs17786052", "rs17121574")) } test_that('bed_subset_snps_by_IDs', .bed_subset_snps_by_IDs()) .bed_subset_snps_by_idx <- function() { bo <- bed_open(plinker:::fetch_sample_bed()) nb <- bed_nb_snps(bo) expect_equal(nb, 17) ### edge cases expect_error(bed_subset_snps_by_idx(bo, NULL), 'empty snp_idx') expect_error(bed_subset_snps_by_idx(bo, 20), 'bad snp_idx range') expect_error(bed_subset_snps_by_idx(bo, c(1, 2, 1)), 'duplicated') expect_error(bed_subset_snps_by_idx(bo, c(1, NA)), 'missing') # invert bo2 <- bed_subset_snps_by_idx(bo, 11:17, invert = TRUE) expect_identical(bed_snp_idx(bo2), 1:10) bo2 <- bed_subset_snps_by_idx(bo, c(3, 5, 16)) expect_equal(bed_nb_snps(bo2), 3) expect_equal(bed_nb_snps(bo2, subset = FALSE), 17) expect_identical(bed_snp_idx(bo2), c(3L, 5L, 16L)) df <- bed_bim_df(bo2) expect_equal(rownames(df), as.character(c(3L, 5L, 16L))) expect_identical(df, bed_bim_df(bo2, subset = FALSE)[c(3L, 5L, 16L), ]) ### recursive subsetting bo2 <- bed_subset_snps_by_idx(bo, c(3, 5, 16)) bo3 <- bed_subset_snps_by_idx(bo2, c(3, 1)) expect_equal(bed_nb_snps(bo3), 2) expect_equal(bed_nb_snps(bo3, subset = FALSE), 17) expect_identical(bed_snp_idx(bo3), c(16L, 3L)) bo4 <- bed_subset_snps_by_idx(bo3, 2) expect_equal(bed_nb_snps(bo4), 1) expect_identical(bed_snp_idx(bo4), 3L) # invert bo4 <- bed_subset_snps_by_idx(bo3, 1, invert = TRUE) expect_identical(bed_snp_idx(bo4), 3L) } test_that('bed_subset_snps_by_idx', .bed_subset_snps_by_idx()) .bed_reset_subset_snps_by_idx <- function() { bo <- bed_open(plinker:::fetch_sample_bed()) expect_equal(bed_nb_snps(bo), 17) bo2 <- bed_subset_snps_by_idx(bo, c(3, 5, 16)) expect_equal(bed_nb_snps(bo2), 3) bo3 <- bed_reset_subset_snps_by_idx(bo2) expect_equal(bed_nb_snps(bo3), 17) } test_that('bed_reset_subset_snps_by_idx', .bed_reset_subset_snps_by_idx())
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Paket_alle_Funktionen_3-26_21Juni2019.R \name{mmmgee} \alias{mmmgee} \title{Covariance Matrix Estimation for Multiple Marginal GEE Models} \usage{ mmmgee(x, biascorr = FALSE) } \arguments{ \item{x}{a list of \code{geem} objects fitted with \code{geem2}. The \code{geem} objects must be different models calculated with data from the same subjects. In particular, the parameter \code{id} in the call to \code{geem2} must refer to the same subjects in each model.} \item{biascorr}{logical, if \code{TRUE}, a bias corrected covariance matrix is calculate by extending the method due to Mancl and DeRouen to multiple models. See references.} } \value{ A list with class \code{mmmgee} containing the following components: \describe{ \item{\code{beta}}{The stacked vector of regression coefficient estimates from the models in \code{x}.} \item{\code{V}}{The estimated covariance matrix of the regression coefficient estimates.} \item{\code{A}}{The outer component of \eqn{V=ABA}.} \item{\code{B}}{The inner component of \eqn{V=ABA}.} \item{\code{biascorr}}{The value of the input argument \code{biascorr} (logical).} \item{\code{n}}{A vector with the number of clusters in each model in \code{x}.} \item{\code{p}}{A vector with number of regression coefficients in each model in \code{x}.} } } \description{ Calculate the covariance matrix for a stacked vector of regression coefficients from multiple marginal GEE models fitted with \code{\link{geem2}}. } \examples{ data(keratosis) m1<-geem2(clearance~trt,id=id,data=keratosis,family=binomial,corstr="independence") m2<-geem2(pain~trt,id=id,data=keratosis[keratosis$lesion==1,],family=gaussian,corstr="independence") mmmgee(x=list(m1,m2),biascorr=TRUE) } \references{ Lloyd A. Mancl, Timothy A. DeRouen. A covariance estimator for GEE with improved small sample properties. Biometrics, 2001, 57(1):126-134. } \seealso{ \code{\link{geem2}}, \code{\link{mmmgee.test}} } \author{ Robin Ristl, \email{robin.ristl@meduniwien.ac.at} }
/man/mmmgee.Rd
no_license
cran/mmmgee
R
false
true
2,103
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Paket_alle_Funktionen_3-26_21Juni2019.R \name{mmmgee} \alias{mmmgee} \title{Covariance Matrix Estimation for Multiple Marginal GEE Models} \usage{ mmmgee(x, biascorr = FALSE) } \arguments{ \item{x}{a list of \code{geem} objects fitted with \code{geem2}. The \code{geem} objects must be different models calculated with data from the same subjects. In particular, the parameter \code{id} in the call to \code{geem2} must refer to the same subjects in each model.} \item{biascorr}{logical, if \code{TRUE}, a bias corrected covariance matrix is calculate by extending the method due to Mancl and DeRouen to multiple models. See references.} } \value{ A list with class \code{mmmgee} containing the following components: \describe{ \item{\code{beta}}{The stacked vector of regression coefficient estimates from the models in \code{x}.} \item{\code{V}}{The estimated covariance matrix of the regression coefficient estimates.} \item{\code{A}}{The outer component of \eqn{V=ABA}.} \item{\code{B}}{The inner component of \eqn{V=ABA}.} \item{\code{biascorr}}{The value of the input argument \code{biascorr} (logical).} \item{\code{n}}{A vector with the number of clusters in each model in \code{x}.} \item{\code{p}}{A vector with number of regression coefficients in each model in \code{x}.} } } \description{ Calculate the covariance matrix for a stacked vector of regression coefficients from multiple marginal GEE models fitted with \code{\link{geem2}}. } \examples{ data(keratosis) m1<-geem2(clearance~trt,id=id,data=keratosis,family=binomial,corstr="independence") m2<-geem2(pain~trt,id=id,data=keratosis[keratosis$lesion==1,],family=gaussian,corstr="independence") mmmgee(x=list(m1,m2),biascorr=TRUE) } \references{ Lloyd A. Mancl, Timothy A. DeRouen. A covariance estimator for GEE with improved small sample properties. Biometrics, 2001, 57(1):126-134. } \seealso{ \code{\link{geem2}}, \code{\link{mmmgee.test}} } \author{ Robin Ristl, \email{robin.ristl@meduniwien.ac.at} }
library(rerddap) ### Name: tabledap ### Title: Get ERDDAP tabledap data. ### Aliases: tabledap ### ** Examples ## Not run: ##D # Just passing the datasetid without fields gives all columns back ##D tabledap('erdCinpKfmBT') ##D ##D # Pass time constraints ##D tabledap('hawaii_soest_5742_4f35_ff55', 'time>=2011-08-24', 'time<=2011-09-01') ##D ##D # Pass in fields (i.e., columns to retrieve) & time constraints ##D tabledap('hawaii_soest_5742_4f35_ff55', ##D fields = c('longitude', 'latitude', 'speed_over_ground'), ##D 'time>=2011-08-24', 'time<=2011-09-01' ##D ) ##D ##D # Get info on a datasetid, then get data given information learned ##D info('erdCalCOFIlrvsiz')$variables ##D tabledap('erdCalCOFIlrvsiz', fields=c('latitude','longitude','larvae_size', ##D 'itis_tsn'), 'time>=2011-10-25', 'time<=2011-10-31') ##D ##D # An example workflow ##D ## Search for data ##D (out <- ed_search(query='fish', which = 'table')) ##D ## Using a datasetid, search for information on a datasetid ##D id <- "nwioosHudFishDetails" ##D info(id)$variables ##D ## Get data from the dataset ##D tabledap(id, fields = c('scientific_name', 'species_id', 'life_stage')) ##D ##D # Time constraint ##D ## Limit by time with date only ##D (info <- info('erdCinpKfmBT')) ##D tabledap(info, fields = c( ##D 'latitude','longitude','Haliotis_fulgens_Mean_Density'), ##D 'time>=2001-07-14') ##D ##D # Use distinct parameter - compare to distinct = FALSE ##D tabledap('hawaii_soest_5742_4f35_ff55', ##D fields=c('longitude','latitude','speed_over_ground'), ##D 'time>=2011-08-24', 'time<=2011-09-01', distinct = TRUE) ##D ##D # Use units parameter ##D ## In this example, values are the same, but sometimes they can be different ##D ## given the units value passed ##D tabledap('erdCinpKfmT', fields=c('longitude','latitude','time','temperature'), ##D 'time>=2007-09-19', 'time<=2007-09-21', units='udunits') ##D tabledap('erdCinpKfmT', fields=c('longitude','latitude','time','temperature'), ##D 'time>=2007-09-19', 'time<=2007-09-21', units='ucum') ##D ##D # Use orderby parameter ##D tabledap('erdCinpKfmT', fields=c('longitude','latitude','time','temperature'), ##D 'time>=2007-09-19', 'time<=2007-09-21', orderby='temperature') ##D # Use orderbymax parameter ##D tabledap('erdCinpKfmT', fields=c('longitude','latitude','time','temperature'), ##D 'time>=2007-09-19', 'time<=2007-09-21', orderbymax='temperature') ##D # Use orderbymin parameter ##D tabledap('erdCinpKfmT', fields=c('longitude','latitude','time','temperature'), ##D 'time>=2007-09-19', 'time<=2007-09-21', orderbymin='temperature') ##D # Use orderbyminmax parameter ##D tabledap('erdCinpKfmT', fields=c('longitude','latitude','time','temperature'), ##D 'time>=2007-09-19', 'time<=2007-09-21', orderbyminmax='temperature') ##D # Use orderbymin parameter with multiple values ##D tabledap('erdCinpKfmT', ##D fields=c('longitude','latitude','time','depth','temperature'), ##D 'time>=2007-06-10', 'time<=2007-09-21', ##D orderbymax=c('depth','temperature') ##D ) ##D ##D # Integrate with taxize ##D out <- tabledap('erdCalCOFIlrvcntHBtoHI', ##D fields = c('latitude','longitude','scientific_name','itis_tsn'), ##D 'time>=2007-06-10', 'time<=2007-09-21' ##D ) ##D tsns <- unique(out$itis_tsn[1:100]) ##D library("taxize") ##D classif <- classification(tsns, db = "itis") ##D head(rbind(classif)); tail(rbind(classif)) ##D ##D # Write to memory (within R), or to disk ##D (out <- info('erdCinpKfmBT')) ##D ## disk, by default (to prevent bogging down system w/ large datasets) ##D ## the 2nd call is much faster as it's mostly just the time of reading ##D ## in the table from disk ##D system.time( tabledap('erdCinpKfmBT', store = disk()) ) ##D system.time( tabledap('erdCinpKfmBT', store = disk()) ) ##D ## memory ##D tabledap('erdCinpKfmBT', store = memory()) ##D ##D # use a different ERDDAP server ##D ## NOAA IOOS NERACOOS ##D url <- "http://www.neracoos.org/erddap/" ##D tabledap("E01_optics_hist", url = url) ## End(Not run)
/data/genthat_extracted_code/rerddap/examples/tabledap.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
4,038
r
library(rerddap) ### Name: tabledap ### Title: Get ERDDAP tabledap data. ### Aliases: tabledap ### ** Examples ## Not run: ##D # Just passing the datasetid without fields gives all columns back ##D tabledap('erdCinpKfmBT') ##D ##D # Pass time constraints ##D tabledap('hawaii_soest_5742_4f35_ff55', 'time>=2011-08-24', 'time<=2011-09-01') ##D ##D # Pass in fields (i.e., columns to retrieve) & time constraints ##D tabledap('hawaii_soest_5742_4f35_ff55', ##D fields = c('longitude', 'latitude', 'speed_over_ground'), ##D 'time>=2011-08-24', 'time<=2011-09-01' ##D ) ##D ##D # Get info on a datasetid, then get data given information learned ##D info('erdCalCOFIlrvsiz')$variables ##D tabledap('erdCalCOFIlrvsiz', fields=c('latitude','longitude','larvae_size', ##D 'itis_tsn'), 'time>=2011-10-25', 'time<=2011-10-31') ##D ##D # An example workflow ##D ## Search for data ##D (out <- ed_search(query='fish', which = 'table')) ##D ## Using a datasetid, search for information on a datasetid ##D id <- "nwioosHudFishDetails" ##D info(id)$variables ##D ## Get data from the dataset ##D tabledap(id, fields = c('scientific_name', 'species_id', 'life_stage')) ##D ##D # Time constraint ##D ## Limit by time with date only ##D (info <- info('erdCinpKfmBT')) ##D tabledap(info, fields = c( ##D 'latitude','longitude','Haliotis_fulgens_Mean_Density'), ##D 'time>=2001-07-14') ##D ##D # Use distinct parameter - compare to distinct = FALSE ##D tabledap('hawaii_soest_5742_4f35_ff55', ##D fields=c('longitude','latitude','speed_over_ground'), ##D 'time>=2011-08-24', 'time<=2011-09-01', distinct = TRUE) ##D ##D # Use units parameter ##D ## In this example, values are the same, but sometimes they can be different ##D ## given the units value passed ##D tabledap('erdCinpKfmT', fields=c('longitude','latitude','time','temperature'), ##D 'time>=2007-09-19', 'time<=2007-09-21', units='udunits') ##D tabledap('erdCinpKfmT', fields=c('longitude','latitude','time','temperature'), ##D 'time>=2007-09-19', 'time<=2007-09-21', units='ucum') ##D ##D # Use orderby parameter ##D tabledap('erdCinpKfmT', fields=c('longitude','latitude','time','temperature'), ##D 'time>=2007-09-19', 'time<=2007-09-21', orderby='temperature') ##D # Use orderbymax parameter ##D tabledap('erdCinpKfmT', fields=c('longitude','latitude','time','temperature'), ##D 'time>=2007-09-19', 'time<=2007-09-21', orderbymax='temperature') ##D # Use orderbymin parameter ##D tabledap('erdCinpKfmT', fields=c('longitude','latitude','time','temperature'), ##D 'time>=2007-09-19', 'time<=2007-09-21', orderbymin='temperature') ##D # Use orderbyminmax parameter ##D tabledap('erdCinpKfmT', fields=c('longitude','latitude','time','temperature'), ##D 'time>=2007-09-19', 'time<=2007-09-21', orderbyminmax='temperature') ##D # Use orderbymin parameter with multiple values ##D tabledap('erdCinpKfmT', ##D fields=c('longitude','latitude','time','depth','temperature'), ##D 'time>=2007-06-10', 'time<=2007-09-21', ##D orderbymax=c('depth','temperature') ##D ) ##D ##D # Integrate with taxize ##D out <- tabledap('erdCalCOFIlrvcntHBtoHI', ##D fields = c('latitude','longitude','scientific_name','itis_tsn'), ##D 'time>=2007-06-10', 'time<=2007-09-21' ##D ) ##D tsns <- unique(out$itis_tsn[1:100]) ##D library("taxize") ##D classif <- classification(tsns, db = "itis") ##D head(rbind(classif)); tail(rbind(classif)) ##D ##D # Write to memory (within R), or to disk ##D (out <- info('erdCinpKfmBT')) ##D ## disk, by default (to prevent bogging down system w/ large datasets) ##D ## the 2nd call is much faster as it's mostly just the time of reading ##D ## in the table from disk ##D system.time( tabledap('erdCinpKfmBT', store = disk()) ) ##D system.time( tabledap('erdCinpKfmBT', store = disk()) ) ##D ## memory ##D tabledap('erdCinpKfmBT', store = memory()) ##D ##D # use a different ERDDAP server ##D ## NOAA IOOS NERACOOS ##D url <- "http://www.neracoos.org/erddap/" ##D tabledap("E01_optics_hist", url = url) ## End(Not run)
library(Hmisc) ### Name: spower ### Title: Simulate Power of 2-Sample Test for Survival under Complex ### Conditions ### Aliases: spower print.spower Quantile2 print.Quantile2 plot.Quantile2 ### logrank Gompertz2 Lognorm2 Weibull2 ### Keywords: htest survival ### ** Examples # Simulate a simple 2-arm clinical trial with exponential survival so # we can compare power simulations of logrank-Cox test with cpower() # Hazard ratio is constant and patients enter the study uniformly # with follow-up ranging from 1 to 3 years # Drop-in probability is constant at .1 and drop-out probability is # constant at .175. Two-year survival of control patients in absence # of drop-in is .8 (mortality=.2). Note that hazard rate is -log(.8)/2 # Total sample size (both groups combined) is 1000 # % mortality reduction by intervention (if no dropin or dropout) is 25 # This corresponds to a hazard ratio of 0.7283 (computed by cpower) cpower(2, 1000, .2, 25, accrual=2, tmin=1, noncomp.c=10, noncomp.i=17.5) ranfun <- Quantile2(function(x)exp(log(.8)/2*x), hratio=function(x)0.7283156, dropin=function(x).1, dropout=function(x).175) rcontrol <- function(n) ranfun(n, what='control') rinterv <- function(n) ranfun(n, what='int') rcens <- function(n) runif(n, 1, 3) set.seed(11) # So can reproduce results spower(rcontrol, rinterv, rcens, nc=500, ni=500, test=logrank, nsim=50) # normally use nsim=500 or 1000 ## Not run: ##D # Run the same simulation but fit the Cox model for each one to ##D # get log hazard ratios for the purpose of assessing the tightness ##D # confidence intervals that are likely to result ##D ##D set.seed(11) ##D u <- spower(rcontrol, rinterv, rcens, nc=500, ni=500, ##D test=logrank, nsim=50, cox=TRUE) ##D u ##D v <- print(u) ##D v[c('MOElower','MOEupper','SE')] ## End(Not run) # Simulate a 2-arm 5-year follow-up study for which the control group's # survival distribution is Weibull with 1-year survival of .95 and # 3-year survival of .7. All subjects are followed at least one year, # and patients enter the study with linearly increasing probability after that # Assume there is no chance of dropin for the first 6 months, then the # probability increases linearly up to .15 at 5 years # Assume there is a linearly increasing chance of dropout up to .3 at 5 years # Assume that the treatment has no effect for the first 9 months, then # it has a constant effect (hazard ratio of .75) # First find the right Weibull distribution for compliant control patients sc <- Weibull2(c(1,3), c(.95,.7)) sc # Inverse cumulative distribution for case where all subjects are followed # at least a years and then between a and b years the density rises # as (time - a) ^ d is a + (b-a) * u ^ (1/(d+1)) rcens <- function(n) 1 + (5-1) * (runif(n) ^ .5) # To check this, type hist(rcens(10000), nclass=50) # Put it all together f <- Quantile2(sc, hratio=function(x)ifelse(x<=.75, 1, .75), dropin=function(x)ifelse(x<=.5, 0, .15*(x-.5)/(5-.5)), dropout=function(x).3*x/5) par(mfrow=c(2,2)) # par(mfrow=c(1,1)) to make legends fit plot(f, 'all', label.curves=list(keys='lines')) rcontrol <- function(n) f(n, 'control') rinterv <- function(n) f(n, 'intervention') set.seed(211) spower(rcontrol, rinterv, rcens, nc=350, ni=350, test=logrank, nsim=50) # normally nsim=500 or more par(mfrow=c(1,1)) # Compose a censoring time generator function such that at 1 year # 5% of subjects are accrued, at 3 years 70% are accured, and at 10 # years 100% are accrued. The trial proceeds two years past the last # accrual for a total of 12 years of follow-up for the first subject. # Use linear interporation between these 3 points rcens <- function(n) { times <- c(0,1,3,10) accrued <- c(0,.05,.7,1) # Compute inverse of accrued function at U(0,1) random variables accrual.times <- approx(accrued, times, xout=runif(n))$y censor.times <- 12 - accrual.times censor.times } censor.times <- rcens(500) # hist(censor.times, nclass=20) accrual.times <- 12 - censor.times # Ecdf(accrual.times) # lines(c(0,1,3,10), c(0,.05,.7,1), col='red') # spower(..., rcens=rcens, ...) ## Not run: ##D # To define a control survival curve from a fitted survival curve ##D # with coordinates (tt, surv) with tt[1]=0, surv[1]=1: ##D ##D Scontrol <- function(times, tt, surv) approx(tt, surv, xout=times)$y ##D tt <- 0:6 ##D surv <- c(1, .9, .8, .75, .7, .65, .64) ##D formals(Scontrol) <- list(times=NULL, tt=tt, surv=surv) ##D ##D # To use a mixture of two survival curves, with e.g. mixing proportions ##D # of .2 and .8, use the following as a guide: ##D # ##D # Scontrol <- function(times, t1, s1, t2, s2) ##D # .2*approx(t1, s1, xout=times)$y + .8*approx(t2, s2, xout=times)$y ##D # t1 <- ...; s1 <- ...; t2 <- ...; s2 <- ...; ##D # formals(Scontrol) <- list(times=NULL, t1=t1, s1=s1, t2=t2, s2=s2) ##D ##D # Check that spower can detect a situation where generated censoring times ##D # are later than all failure times ##D ##D rcens <- function(n) runif(n, 0, 7) ##D f <- Quantile2(scontrol=Scontrol, hratio=function(x).8, tmax=6) ##D cont <- function(n) f(n, what='control') ##D int <- function(n) f(n, what='intervention') ##D spower(rcontrol=cont, rinterv=int, rcens=rcens, nc=300, ni=300, nsim=20) ##D ##D # Do an unstratified logrank test ##D library(survival) ##D # From SAS/STAT PROC LIFETEST manual, p. 1801 ##D days <- c(179,256,262,256,255,224,225,287,319,264,237,156,270,257,242, ##D 157,249,180,226,268,378,355,319,256,171,325,325,217,255,256, ##D 291,323,253,206,206,237,211,229,234,209) ##D status <- c(1,1,1,1,1,0,1,1,1,1,0,1,1,1,1,1,1,1,1,0, ##D 0,rep(1,19)) ##D treatment <- c(rep(1,10), rep(2,10), rep(1,10), rep(2,10)) ##D sex <- Cs(F,F,M,F,M,F,F,M,M,M,F,F,M,M,M,F,M,F,F,M, ##D M,M,M,M,F,M,M,F,F,F,M,M,M,F,F,M,F,F,F,F) ##D data.frame(days, status, treatment, sex) ##D table(treatment, status) ##D logrank(Surv(days, status), treatment) # agrees with p. 1807 ##D # For stratified tests the picture is puzzling. ##D # survdiff(Surv(days,status) ~ treatment + strata(sex))$chisq ##D # is 7.246562, which does not agree with SAS (7.1609) ##D # But summary(coxph(Surv(days,status) ~ treatment + strata(sex))) ##D # yields 7.16 whereas summary(coxph(Surv(days,status) ~ treatment)) ##D # yields 5.21 as the score test, not agreeing with SAS or logrank() (5.6485) ## End(Not run)
/data/genthat_extracted_code/Hmisc/examples/spower.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
6,497
r
library(Hmisc) ### Name: spower ### Title: Simulate Power of 2-Sample Test for Survival under Complex ### Conditions ### Aliases: spower print.spower Quantile2 print.Quantile2 plot.Quantile2 ### logrank Gompertz2 Lognorm2 Weibull2 ### Keywords: htest survival ### ** Examples # Simulate a simple 2-arm clinical trial with exponential survival so # we can compare power simulations of logrank-Cox test with cpower() # Hazard ratio is constant and patients enter the study uniformly # with follow-up ranging from 1 to 3 years # Drop-in probability is constant at .1 and drop-out probability is # constant at .175. Two-year survival of control patients in absence # of drop-in is .8 (mortality=.2). Note that hazard rate is -log(.8)/2 # Total sample size (both groups combined) is 1000 # % mortality reduction by intervention (if no dropin or dropout) is 25 # This corresponds to a hazard ratio of 0.7283 (computed by cpower) cpower(2, 1000, .2, 25, accrual=2, tmin=1, noncomp.c=10, noncomp.i=17.5) ranfun <- Quantile2(function(x)exp(log(.8)/2*x), hratio=function(x)0.7283156, dropin=function(x).1, dropout=function(x).175) rcontrol <- function(n) ranfun(n, what='control') rinterv <- function(n) ranfun(n, what='int') rcens <- function(n) runif(n, 1, 3) set.seed(11) # So can reproduce results spower(rcontrol, rinterv, rcens, nc=500, ni=500, test=logrank, nsim=50) # normally use nsim=500 or 1000 ## Not run: ##D # Run the same simulation but fit the Cox model for each one to ##D # get log hazard ratios for the purpose of assessing the tightness ##D # confidence intervals that are likely to result ##D ##D set.seed(11) ##D u <- spower(rcontrol, rinterv, rcens, nc=500, ni=500, ##D test=logrank, nsim=50, cox=TRUE) ##D u ##D v <- print(u) ##D v[c('MOElower','MOEupper','SE')] ## End(Not run) # Simulate a 2-arm 5-year follow-up study for which the control group's # survival distribution is Weibull with 1-year survival of .95 and # 3-year survival of .7. All subjects are followed at least one year, # and patients enter the study with linearly increasing probability after that # Assume there is no chance of dropin for the first 6 months, then the # probability increases linearly up to .15 at 5 years # Assume there is a linearly increasing chance of dropout up to .3 at 5 years # Assume that the treatment has no effect for the first 9 months, then # it has a constant effect (hazard ratio of .75) # First find the right Weibull distribution for compliant control patients sc <- Weibull2(c(1,3), c(.95,.7)) sc # Inverse cumulative distribution for case where all subjects are followed # at least a years and then between a and b years the density rises # as (time - a) ^ d is a + (b-a) * u ^ (1/(d+1)) rcens <- function(n) 1 + (5-1) * (runif(n) ^ .5) # To check this, type hist(rcens(10000), nclass=50) # Put it all together f <- Quantile2(sc, hratio=function(x)ifelse(x<=.75, 1, .75), dropin=function(x)ifelse(x<=.5, 0, .15*(x-.5)/(5-.5)), dropout=function(x).3*x/5) par(mfrow=c(2,2)) # par(mfrow=c(1,1)) to make legends fit plot(f, 'all', label.curves=list(keys='lines')) rcontrol <- function(n) f(n, 'control') rinterv <- function(n) f(n, 'intervention') set.seed(211) spower(rcontrol, rinterv, rcens, nc=350, ni=350, test=logrank, nsim=50) # normally nsim=500 or more par(mfrow=c(1,1)) # Compose a censoring time generator function such that at 1 year # 5% of subjects are accrued, at 3 years 70% are accured, and at 10 # years 100% are accrued. The trial proceeds two years past the last # accrual for a total of 12 years of follow-up for the first subject. # Use linear interporation between these 3 points rcens <- function(n) { times <- c(0,1,3,10) accrued <- c(0,.05,.7,1) # Compute inverse of accrued function at U(0,1) random variables accrual.times <- approx(accrued, times, xout=runif(n))$y censor.times <- 12 - accrual.times censor.times } censor.times <- rcens(500) # hist(censor.times, nclass=20) accrual.times <- 12 - censor.times # Ecdf(accrual.times) # lines(c(0,1,3,10), c(0,.05,.7,1), col='red') # spower(..., rcens=rcens, ...) ## Not run: ##D # To define a control survival curve from a fitted survival curve ##D # with coordinates (tt, surv) with tt[1]=0, surv[1]=1: ##D ##D Scontrol <- function(times, tt, surv) approx(tt, surv, xout=times)$y ##D tt <- 0:6 ##D surv <- c(1, .9, .8, .75, .7, .65, .64) ##D formals(Scontrol) <- list(times=NULL, tt=tt, surv=surv) ##D ##D # To use a mixture of two survival curves, with e.g. mixing proportions ##D # of .2 and .8, use the following as a guide: ##D # ##D # Scontrol <- function(times, t1, s1, t2, s2) ##D # .2*approx(t1, s1, xout=times)$y + .8*approx(t2, s2, xout=times)$y ##D # t1 <- ...; s1 <- ...; t2 <- ...; s2 <- ...; ##D # formals(Scontrol) <- list(times=NULL, t1=t1, s1=s1, t2=t2, s2=s2) ##D ##D # Check that spower can detect a situation where generated censoring times ##D # are later than all failure times ##D ##D rcens <- function(n) runif(n, 0, 7) ##D f <- Quantile2(scontrol=Scontrol, hratio=function(x).8, tmax=6) ##D cont <- function(n) f(n, what='control') ##D int <- function(n) f(n, what='intervention') ##D spower(rcontrol=cont, rinterv=int, rcens=rcens, nc=300, ni=300, nsim=20) ##D ##D # Do an unstratified logrank test ##D library(survival) ##D # From SAS/STAT PROC LIFETEST manual, p. 1801 ##D days <- c(179,256,262,256,255,224,225,287,319,264,237,156,270,257,242, ##D 157,249,180,226,268,378,355,319,256,171,325,325,217,255,256, ##D 291,323,253,206,206,237,211,229,234,209) ##D status <- c(1,1,1,1,1,0,1,1,1,1,0,1,1,1,1,1,1,1,1,0, ##D 0,rep(1,19)) ##D treatment <- c(rep(1,10), rep(2,10), rep(1,10), rep(2,10)) ##D sex <- Cs(F,F,M,F,M,F,F,M,M,M,F,F,M,M,M,F,M,F,F,M, ##D M,M,M,M,F,M,M,F,F,F,M,M,M,F,F,M,F,F,F,F) ##D data.frame(days, status, treatment, sex) ##D table(treatment, status) ##D logrank(Surv(days, status), treatment) # agrees with p. 1807 ##D # For stratified tests the picture is puzzling. ##D # survdiff(Surv(days,status) ~ treatment + strata(sex))$chisq ##D # is 7.246562, which does not agree with SAS (7.1609) ##D # But summary(coxph(Surv(days,status) ~ treatment + strata(sex))) ##D # yields 7.16 whereas summary(coxph(Surv(days,status) ~ treatment)) ##D # yields 5.21 as the score test, not agreeing with SAS or logrank() (5.6485) ## End(Not run)
source("/home/mr984/diversity_metrics/scripts/checkplot_initials.R") source("/home/mr984/diversity_metrics/scripts/checkplot_inf.R") reps<-50 outerreps<-1000 size<-rev(round(10^seq(2, 5, 0.25)))[ 7 ] nc<-12 plan(strategy=multisession, workers=nc) map(rev(1:outerreps), function(x){ start<-Sys.time() out<-checkplot_inf(flatten(flatten(SADs_list))[[17]], l=-1, inds=size, reps=reps) write.csv(out, paste("/scratch/mr984/SAD17","l",-1,"inds", size, "outernew", x, ".csv", sep="_"), row.names=F) rm(out) print(Sys.time()-start) })
/scripts/checkplots_for_parallel_amarel/asy_552.R
no_license
dushoff/diversity_metrics
R
false
false
537
r
source("/home/mr984/diversity_metrics/scripts/checkplot_initials.R") source("/home/mr984/diversity_metrics/scripts/checkplot_inf.R") reps<-50 outerreps<-1000 size<-rev(round(10^seq(2, 5, 0.25)))[ 7 ] nc<-12 plan(strategy=multisession, workers=nc) map(rev(1:outerreps), function(x){ start<-Sys.time() out<-checkplot_inf(flatten(flatten(SADs_list))[[17]], l=-1, inds=size, reps=reps) write.csv(out, paste("/scratch/mr984/SAD17","l",-1,"inds", size, "outernew", x, ".csv", sep="_"), row.names=F) rm(out) print(Sys.time()-start) })
#!/usr/bin/env Rscript bgrMeansPlots <- function() { h = c(10,15,16,18,73,80,6,72,82,6,76,86,7,76,83,7,69,74,6,59,61,6,43,47,3,39,51,7,67,83,13,104,125,13,110,138,14,117,150,13,115,140,13,108,132,11,102,122,11,78,92,11,60,69,8,45,49,8,23,24) v = c(0,0,0,1,2,2,2,16,18,3,34,40,7,52,63,16,77,92,18,109,119,19,144,158,25,158,179,23,163,186,16,122,158,13,132,165,11,124,147,11,113,129,12,88,102,12,69,83,2,35,45,1,11,15,1,1,2,0,0,0) ver = data.frame(b=v[seq(1, length(v), 3)], g=v[seq(2, length(v), 3)], r=v[seq(3, length(v), 3)]) hor = data.frame(b=h[seq(1, length(h), 3)], g=h[seq(2, length(h), 3)], r=h[seq(3, length(h), 3)]) pdf('BGR_means_hor_plot.pdf') matplot(hor, type=c("b"), pch=1, lwd=2, col=c("blue","red","dark green"), xlab="Bin", ylab="Mean Color Intensity", ylim=c(0,255), main = "BGR Color Means for Horizontal Bins") dev.off() pdf('BGR_means_ver_plot.pdf') matplot(ver, type=c("b"), pch=1, lwd=2, col=c("blue","red","dark green"), xlab="Bin", ylab="Mean Color Intensity", ylim=c(0,255), main = "BGR Color Means for Vertical Bins") dev.off() } bgrMeansPlots()
/make-plots.r
no_license
figure002/orchid-presentation
R
false
false
1,087
r
#!/usr/bin/env Rscript bgrMeansPlots <- function() { h = c(10,15,16,18,73,80,6,72,82,6,76,86,7,76,83,7,69,74,6,59,61,6,43,47,3,39,51,7,67,83,13,104,125,13,110,138,14,117,150,13,115,140,13,108,132,11,102,122,11,78,92,11,60,69,8,45,49,8,23,24) v = c(0,0,0,1,2,2,2,16,18,3,34,40,7,52,63,16,77,92,18,109,119,19,144,158,25,158,179,23,163,186,16,122,158,13,132,165,11,124,147,11,113,129,12,88,102,12,69,83,2,35,45,1,11,15,1,1,2,0,0,0) ver = data.frame(b=v[seq(1, length(v), 3)], g=v[seq(2, length(v), 3)], r=v[seq(3, length(v), 3)]) hor = data.frame(b=h[seq(1, length(h), 3)], g=h[seq(2, length(h), 3)], r=h[seq(3, length(h), 3)]) pdf('BGR_means_hor_plot.pdf') matplot(hor, type=c("b"), pch=1, lwd=2, col=c("blue","red","dark green"), xlab="Bin", ylab="Mean Color Intensity", ylim=c(0,255), main = "BGR Color Means for Horizontal Bins") dev.off() pdf('BGR_means_ver_plot.pdf') matplot(ver, type=c("b"), pch=1, lwd=2, col=c("blue","red","dark green"), xlab="Bin", ylab="Mean Color Intensity", ylim=c(0,255), main = "BGR Color Means for Vertical Bins") dev.off() } bgrMeansPlots()
testlist <- list(A = structure(c(2.17107980817984e+205, 9.53818252179844e+295 ), .Dim = 1:2), B = structure(c(2.19477802979333e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613122541-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
323
r
testlist <- list(A = structure(c(2.17107980817984e+205, 9.53818252179844e+295 ), .Dim = 1:2), B = structure(c(2.19477802979333e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rmse_from_one_func.R \name{rmse_from_one_func} \alias{rmse_from_one_func} \title{rmse_from_one_func} \usage{ rmse_from_one_func(vec) } \arguments{ \item{vec}{Vector of estimates around 1.} } \value{ Returns a single number: the RMSE. } \description{ Calculates RMSE with a known target of one. }
/man/rmse_from_one_func.Rd
permissive
rzgross/uRbanmatching
R
false
true
374
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rmse_from_one_func.R \name{rmse_from_one_func} \alias{rmse_from_one_func} \title{rmse_from_one_func} \usage{ rmse_from_one_func(vec) } \arguments{ \item{vec}{Vector of estimates around 1.} } \value{ Returns a single number: the RMSE. } \description{ Calculates RMSE with a known target of one. }
context("update") test_that("list.update", { # simple list x <- list(p1 = list(type = "A", score = list(c1 = 10, c2 = 8)), p2 = list(type = "B", score = list(c1 = 9, c2 = 9)), p3 = list(type = "B", score = list(c1 = 9, c2 = 7))) expect_identical(list.update(x, type = NULL), lapply(x, function(xi) { xi[-1] })) expect_identical(list.update(x, score = list(min = min(unlist(score)))), lapply(x, function(xi) { modifyList(xi, list(score = list(min = min(unlist(xi$score))))) })) expect_identical(list.update(x, range = range(unlist(score))), lapply(x, function(xi) { modifyList(xi, list(range = range(unlist(xi$score)))) })) expect_identical(list.update(x, n = length(.)), lapply(x, function(xi) { modifyList(xi, list(n = length(xi))) })) lapply(1:3, function(i) list.update(x, c = i)) })
/packrat/lib/x86_64-pc-linux-gnu/3.2.5/rlist/tests/testthat/test-update.R
no_license
harryprince/seamonster
R
false
false
862
r
context("update") test_that("list.update", { # simple list x <- list(p1 = list(type = "A", score = list(c1 = 10, c2 = 8)), p2 = list(type = "B", score = list(c1 = 9, c2 = 9)), p3 = list(type = "B", score = list(c1 = 9, c2 = 7))) expect_identical(list.update(x, type = NULL), lapply(x, function(xi) { xi[-1] })) expect_identical(list.update(x, score = list(min = min(unlist(score)))), lapply(x, function(xi) { modifyList(xi, list(score = list(min = min(unlist(xi$score))))) })) expect_identical(list.update(x, range = range(unlist(score))), lapply(x, function(xi) { modifyList(xi, list(range = range(unlist(xi$score)))) })) expect_identical(list.update(x, n = length(.)), lapply(x, function(xi) { modifyList(xi, list(n = length(xi))) })) lapply(1:3, function(i) list.update(x, c = i)) })
# Script for Class 2 # PLS 298 -- Applied Statistical Modeling -- F2019 #### Setup #### library(dplyr) library(lattice) library(lme4) library(arm) library(ggplot2) library(viridis) #### PART 1: Multiple regression #### ## Why might we want or need to include multiple variables in a model? # a) Include another variable to explain more variation (when no interaction between variables but both are explanatory). # Example: # Predicting the length of petals of Iris versicolor from other morphological measurements # In this case, excluding the additional relevant variable results in "underfit". plot(Petal.Length~Petal.Width, iris, subset=iris$Species=="versicolor") plot(Petal.Length~Sepal.Length, iris, subset=iris$Species=="versicolor") # Here's a useful plot similar to xyplot that plots the relationship between 2 data columns conditioned on a third. coplot(Petal.Length~Petal.Width|Sepal.Length, iris[iris$Species=="versicolor",],overlap = 0,number = 2) # Does the model get better when we add another explanatory variable? summary(lm(Petal.Length~Petal.Width, iris, subset=iris$Species=="versicolor")) summary(lm(Petal.Length~Petal.Width + Sepal.Length, iris, subset=iris$Species=="versicolor")) # b) Include another variable beacuse the association between the response and the explanatory variables depends on level of other variables. # When people write that an analysis "controlled for" other variables, they often simply mean that other variables were included in the analysis. They didn't control those other variables in the experimental sense. Instead, the claim is that the analysis is able to account for dependence in the relationship between the response and the explanatory variable of interest. # Example: # Predicting ANPP from rainfall from the grassland data from last class. We might expect the relationship between productivity and rainfall to depend on temperature. We can explore this graphically, and include temperature as a covariate in a regression. ANPP <- read.csv("./data/Grassland_NPP_31_site_summary.csv", na.strings = "-9999") # Let's drop the extreme high value for now ANPP <- filter(ANPP, ANPP1 < 1000) # Graphical display coplot(ANPP1~Rainfall|Temperature, ANPP) # Regression with rainfall alone summary(lm(ANPP1~Rainfall, ANPP)) # Adding temperature as a covariate summary(lm(ANPP1~Rainfall*Temperature, ANPP)) # Does the effect of rainfall seem to be stronger or weaker when we incorporate temperature into the model? What's the interpretation of the regression coefficients in this second model? # We can also look at this interaction by splitting the data into "high" and "low" temperature locations. # Maybe the easiest way is to manually create a categorical variable first, then use it to differentiate high and low temperature sites in the plot ANPP$Temp_cat <- ifelse(ANPP$Temperature>10, "Hi", "Lo") ggplot(ANPP, aes(x=Rainfall, y=ANPP1)) + geom_point(aes(color=Temp_cat)) + theme_bw() # Note this could also be done using temperature as a continuous variable ggplot(ANPP, aes(x=Rainfall, y=ANPP1)) + geom_point(aes(color=Temperature)) + scale_color_viridis() + theme_classic() #### PART 2: Regression fitting, diagnostics, interpretation #### # When you have a continuous and a categorical variable, the requirement to sample across the whole range equates to balance. # As we'll see, models with random effects are more robust to imbalance, but there's no way around needing to have a big enough sample size that is well stratified enough to learn about each level of a factor. # (Note here is another reason you might choose to use a random effect -- you can have low sample size in some groups/categories, you just won't learn much about that particular one but you can still generalize across them). d <- read.table("./data/Erodium_data_for_class2.csv", sep=",", header=T) # Data set explanation: # These data are from a greenhouse experiment in which widespread invasive plant Erodium cicutarium was planted in high and low-water treatments. This exercise explores the effect of plant phenology (how soon they flower) and watering treatment on plant size (stem length). # -- treatment (1/0) = low-water treatment (1) or high-water treatment (0) # -- stem_length = length of longest stem on each plant in cm # -- days_to_flower = time from planting to flowering in days # We can initially look at the effect of days to flower and treatment on plant size colors = ifelse(d$treatment == "low_water", "red", "blue") plot(stem_length~days_to_flower, d, pch=16, col=colors) # Relationships look pretty non-linear. What if we log-transform the response variable? plot(log(stem_length)~days_to_flower, d, pch=16, col=colors) m1 <- lm(log(stem_length)~days_to_flower*treatment, d) summary(m1) # Or you might prefer the display() function in Gelman & Hill's arm library which is much cleaner: display(m1) # Here's a quick way to check two assumptions of the normal linear model: par(mfrow=c(2, 1), mar=rep(3,4), mgp=c(2,1,0)) plot(m1, which=1:2) # Note: If you want to see and example of what these plots look like when these assumptions are badly violated, try refitting the model on non-log-transformed stem length and the repeating this plot. # But also note: per Gelman and Hill, these are really not terribly important assumptions and mild violations are no big deal! They don't even recommend looking at normal quantile plots of residuals. Residuals can, however be useful to look at because they can reveal other things, such as weird outliers, nonlinearity, and grouping structure (i.e. non-independence) in the data. Much more on this later in the course. #### Interpreting regression model parameters #### # How would you interpret the intercept and the effect of treatment in this model? # Does this give an interpretation that makes practical sense? If not, what can we do? # One reason to center the explanatory variables is to make the effects more interpretable. d.center <- d d.center$days_to_flower <- d$days_to_flower - mean(d$days_to_flower, na.rm=T) # Or we can do the same think using a function from dplyr: # d.center <- mutate(d, days_to_flower = scale(days_to_flower, center=TRUE, scale=FALSE)) # Then we can repeat the same regression and compare. m2 <- lm(log(stem_length)~days_to_flower*treatment, d.center) display(m1) display(m2) dev.off() plot(log(stem_length)~days_to_flower, d.center, pch=16, col=colors) # Which coefficients change? Now what do the intercept and treament effects mean? # Q how would you compare the sizes of the effect of treatment and days to flower? # G&H recommend centering all variables, and dividing continuous variables by 2x their standard deviation. This makes the effects of continuous variables more comparable to those of 1/0 categorical variables. # See G&H pp56-57. # It is, however, much more common practice to just divide by one standard deviation (rather than 2), and is what people usually mean when they say the data were centered and scaled. It doesn't matter much, because either procedure puts the explanatory variables on the same scale, which is good for computation, and puts regression coefficients on a similar footing for comparison. d.scaled <- d.center d.scaled$days_to_flower <- d.center$days_to_flower/(2*sd(d.center$days_to_flower)) # d.scaled <- mutate(d, days_to_flower = scale(days_to_flower, center=TRUE, scale=TRUE)) m3 <- lm(log(stem_length)~days_to_flower*treatment, d.scaled) display(m3) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# #### Pause here for class discussion #### # Please let the instructor know you have reached this point! #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# #### PART 3: How can you tell if a model is good, or good enough? #### # a) Single-number summaries. # R^2 and other measures of fit or prediction # b) Fit # Plot fitted values vs observed. # It can be helpful to plot the data with fitted lines. plot(log(stem_length)~days_to_flower, data=d.scaled, pch=16, col=colors) betas <- coef(m3) # this extracts the coefficients from the linear model abline(betas[1], betas[2], col="blue") # regression line for treatment=0 abline(betas[1]+betas[3], betas[2]+betas[4], col="red") # regression line for treatment=1 # Plotting the regression line plus confidence intervals. # Keeping it simple, let's use just one explanatory variable. m4 <- lm(log(stem_length)~days_to_flower, d.scaled) # set the range of the explanatory variable to use for displaying the predictions x.pred <- seq(min(d.scaled$days_to_flower)*1.1, max(d.scaled$days_to_flower)*1.1, by=0.05) # generates a sequence of values going just outside the range of observed x values # The function predict() lets us predict the response variable from various levels of the explanatory variable. erodium.pred <- predict(m4, data.frame(days_to_flower=x.pred), se.fit=TRUE, interval="prediction") ##### NB using mutate to create scaled variables causes error ##### Error: variable 'days_to_flower' was fitted with type "nmatrix.1" but type "numeric" was supplied # Make predictions into a table for easier plotting erodium_pred_out <- data.frame(x.pred = x.pred, fit = erodium.pred$fit[,1], lwr = erodium.pred$fit[,2], upr = erodium.pred$fit[,3]) # We can also get intervals around that prediction. We can get "predictive" interval, or a "confidence" interval (see below). plot(fit ~ x.pred, erodium_pred_out, type="l", ylim=c(-3, 4), ylab="log(Stem Length mm)", xlab="Days to flower") lines(lwr~x.pred, erodium_pred_out, lty=2) lines(upr~x.pred, erodium_pred_out, lty=2) # plot the data on top of the fit points(log(stem_length)~days_to_flower, d.scaled) # Are there more points than you'd expect outside the 95% prediction interval? # For comparison, here is a plot showing the confidence interval around the regression line: erodium.pred <- predict(m4, data.frame(days_to_flower=x.pred), se.fit=TRUE, interval="confidence") erodium_pred_out <- data.frame(x.pred = x.pred, fit = erodium.pred$fit[,1], lwr = erodium.pred$fit[,2], upr = erodium.pred$fit[,3]) plot(fit ~ x.pred, erodium_pred_out, type="l", ylim=c(-3, 4), ylab="log(Stem Length mm)", xlab="Days to flower") lines(lwr~x.pred, erodium_pred_out, lty=2) lines(upr~x.pred, erodium_pred_out, lty=2) # plot the data on top of the fit points(log(stem_length)~days_to_flower, d.scaled) # Why are the confidence intervals for the regression line narrower than the confidence intervals for the model predictions? # Regression predicts parameters i.e. gives confidence interval for mean & interval, prediction predicts line + error # Having done all that, a quicker way to plot confidence intervals is to use ggplot's statistical fit methods: ggplot(d.scaled, aes(x=days_to_flower, y=log(stem_length))) + geom_point() + stat_smooth(method=lm) #### Simulating replicate data #### # Can you simulate replicate data using the model? Remember what the regression model actually is, as a probability distribution, and what the parameters of that probability distribution are. # Hint 1: Note a short-cut is to use the function sim() in the arm library to generate random values of the fitted regression coefficients. You can then use this information to produce replicate simulated data sets. # Hint 2: the function rnorm(n, mean, sd) generates n random normally distributed data points with mean = "mean" and standard deviation "sd". # Comment: the function simulate() is what you'd probably ACTUALLY use to do this, but for now, try doing it "by hand" using sim() and rnorm() s <- sim(m4) coef(s) sigma.hat(s) for(i in seq(1,100)){ hist(rnorm(n = 100,coef(s)[i,2],sigma.hat(s)[i])) }
/code/Class2Script_2019.R
no_license
katherinelauck/test-katie
R
false
false
11,714
r
# Script for Class 2 # PLS 298 -- Applied Statistical Modeling -- F2019 #### Setup #### library(dplyr) library(lattice) library(lme4) library(arm) library(ggplot2) library(viridis) #### PART 1: Multiple regression #### ## Why might we want or need to include multiple variables in a model? # a) Include another variable to explain more variation (when no interaction between variables but both are explanatory). # Example: # Predicting the length of petals of Iris versicolor from other morphological measurements # In this case, excluding the additional relevant variable results in "underfit". plot(Petal.Length~Petal.Width, iris, subset=iris$Species=="versicolor") plot(Petal.Length~Sepal.Length, iris, subset=iris$Species=="versicolor") # Here's a useful plot similar to xyplot that plots the relationship between 2 data columns conditioned on a third. coplot(Petal.Length~Petal.Width|Sepal.Length, iris[iris$Species=="versicolor",],overlap = 0,number = 2) # Does the model get better when we add another explanatory variable? summary(lm(Petal.Length~Petal.Width, iris, subset=iris$Species=="versicolor")) summary(lm(Petal.Length~Petal.Width + Sepal.Length, iris, subset=iris$Species=="versicolor")) # b) Include another variable beacuse the association between the response and the explanatory variables depends on level of other variables. # When people write that an analysis "controlled for" other variables, they often simply mean that other variables were included in the analysis. They didn't control those other variables in the experimental sense. Instead, the claim is that the analysis is able to account for dependence in the relationship between the response and the explanatory variable of interest. # Example: # Predicting ANPP from rainfall from the grassland data from last class. We might expect the relationship between productivity and rainfall to depend on temperature. We can explore this graphically, and include temperature as a covariate in a regression. ANPP <- read.csv("./data/Grassland_NPP_31_site_summary.csv", na.strings = "-9999") # Let's drop the extreme high value for now ANPP <- filter(ANPP, ANPP1 < 1000) # Graphical display coplot(ANPP1~Rainfall|Temperature, ANPP) # Regression with rainfall alone summary(lm(ANPP1~Rainfall, ANPP)) # Adding temperature as a covariate summary(lm(ANPP1~Rainfall*Temperature, ANPP)) # Does the effect of rainfall seem to be stronger or weaker when we incorporate temperature into the model? What's the interpretation of the regression coefficients in this second model? # We can also look at this interaction by splitting the data into "high" and "low" temperature locations. # Maybe the easiest way is to manually create a categorical variable first, then use it to differentiate high and low temperature sites in the plot ANPP$Temp_cat <- ifelse(ANPP$Temperature>10, "Hi", "Lo") ggplot(ANPP, aes(x=Rainfall, y=ANPP1)) + geom_point(aes(color=Temp_cat)) + theme_bw() # Note this could also be done using temperature as a continuous variable ggplot(ANPP, aes(x=Rainfall, y=ANPP1)) + geom_point(aes(color=Temperature)) + scale_color_viridis() + theme_classic() #### PART 2: Regression fitting, diagnostics, interpretation #### # When you have a continuous and a categorical variable, the requirement to sample across the whole range equates to balance. # As we'll see, models with random effects are more robust to imbalance, but there's no way around needing to have a big enough sample size that is well stratified enough to learn about each level of a factor. # (Note here is another reason you might choose to use a random effect -- you can have low sample size in some groups/categories, you just won't learn much about that particular one but you can still generalize across them). d <- read.table("./data/Erodium_data_for_class2.csv", sep=",", header=T) # Data set explanation: # These data are from a greenhouse experiment in which widespread invasive plant Erodium cicutarium was planted in high and low-water treatments. This exercise explores the effect of plant phenology (how soon they flower) and watering treatment on plant size (stem length). # -- treatment (1/0) = low-water treatment (1) or high-water treatment (0) # -- stem_length = length of longest stem on each plant in cm # -- days_to_flower = time from planting to flowering in days # We can initially look at the effect of days to flower and treatment on plant size colors = ifelse(d$treatment == "low_water", "red", "blue") plot(stem_length~days_to_flower, d, pch=16, col=colors) # Relationships look pretty non-linear. What if we log-transform the response variable? plot(log(stem_length)~days_to_flower, d, pch=16, col=colors) m1 <- lm(log(stem_length)~days_to_flower*treatment, d) summary(m1) # Or you might prefer the display() function in Gelman & Hill's arm library which is much cleaner: display(m1) # Here's a quick way to check two assumptions of the normal linear model: par(mfrow=c(2, 1), mar=rep(3,4), mgp=c(2,1,0)) plot(m1, which=1:2) # Note: If you want to see and example of what these plots look like when these assumptions are badly violated, try refitting the model on non-log-transformed stem length and the repeating this plot. # But also note: per Gelman and Hill, these are really not terribly important assumptions and mild violations are no big deal! They don't even recommend looking at normal quantile plots of residuals. Residuals can, however be useful to look at because they can reveal other things, such as weird outliers, nonlinearity, and grouping structure (i.e. non-independence) in the data. Much more on this later in the course. #### Interpreting regression model parameters #### # How would you interpret the intercept and the effect of treatment in this model? # Does this give an interpretation that makes practical sense? If not, what can we do? # One reason to center the explanatory variables is to make the effects more interpretable. d.center <- d d.center$days_to_flower <- d$days_to_flower - mean(d$days_to_flower, na.rm=T) # Or we can do the same think using a function from dplyr: # d.center <- mutate(d, days_to_flower = scale(days_to_flower, center=TRUE, scale=FALSE)) # Then we can repeat the same regression and compare. m2 <- lm(log(stem_length)~days_to_flower*treatment, d.center) display(m1) display(m2) dev.off() plot(log(stem_length)~days_to_flower, d.center, pch=16, col=colors) # Which coefficients change? Now what do the intercept and treament effects mean? # Q how would you compare the sizes of the effect of treatment and days to flower? # G&H recommend centering all variables, and dividing continuous variables by 2x their standard deviation. This makes the effects of continuous variables more comparable to those of 1/0 categorical variables. # See G&H pp56-57. # It is, however, much more common practice to just divide by one standard deviation (rather than 2), and is what people usually mean when they say the data were centered and scaled. It doesn't matter much, because either procedure puts the explanatory variables on the same scale, which is good for computation, and puts regression coefficients on a similar footing for comparison. d.scaled <- d.center d.scaled$days_to_flower <- d.center$days_to_flower/(2*sd(d.center$days_to_flower)) # d.scaled <- mutate(d, days_to_flower = scale(days_to_flower, center=TRUE, scale=TRUE)) m3 <- lm(log(stem_length)~days_to_flower*treatment, d.scaled) display(m3) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# #### Pause here for class discussion #### # Please let the instructor know you have reached this point! #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# #### PART 3: How can you tell if a model is good, or good enough? #### # a) Single-number summaries. # R^2 and other measures of fit or prediction # b) Fit # Plot fitted values vs observed. # It can be helpful to plot the data with fitted lines. plot(log(stem_length)~days_to_flower, data=d.scaled, pch=16, col=colors) betas <- coef(m3) # this extracts the coefficients from the linear model abline(betas[1], betas[2], col="blue") # regression line for treatment=0 abline(betas[1]+betas[3], betas[2]+betas[4], col="red") # regression line for treatment=1 # Plotting the regression line plus confidence intervals. # Keeping it simple, let's use just one explanatory variable. m4 <- lm(log(stem_length)~days_to_flower, d.scaled) # set the range of the explanatory variable to use for displaying the predictions x.pred <- seq(min(d.scaled$days_to_flower)*1.1, max(d.scaled$days_to_flower)*1.1, by=0.05) # generates a sequence of values going just outside the range of observed x values # The function predict() lets us predict the response variable from various levels of the explanatory variable. erodium.pred <- predict(m4, data.frame(days_to_flower=x.pred), se.fit=TRUE, interval="prediction") ##### NB using mutate to create scaled variables causes error ##### Error: variable 'days_to_flower' was fitted with type "nmatrix.1" but type "numeric" was supplied # Make predictions into a table for easier plotting erodium_pred_out <- data.frame(x.pred = x.pred, fit = erodium.pred$fit[,1], lwr = erodium.pred$fit[,2], upr = erodium.pred$fit[,3]) # We can also get intervals around that prediction. We can get "predictive" interval, or a "confidence" interval (see below). plot(fit ~ x.pred, erodium_pred_out, type="l", ylim=c(-3, 4), ylab="log(Stem Length mm)", xlab="Days to flower") lines(lwr~x.pred, erodium_pred_out, lty=2) lines(upr~x.pred, erodium_pred_out, lty=2) # plot the data on top of the fit points(log(stem_length)~days_to_flower, d.scaled) # Are there more points than you'd expect outside the 95% prediction interval? # For comparison, here is a plot showing the confidence interval around the regression line: erodium.pred <- predict(m4, data.frame(days_to_flower=x.pred), se.fit=TRUE, interval="confidence") erodium_pred_out <- data.frame(x.pred = x.pred, fit = erodium.pred$fit[,1], lwr = erodium.pred$fit[,2], upr = erodium.pred$fit[,3]) plot(fit ~ x.pred, erodium_pred_out, type="l", ylim=c(-3, 4), ylab="log(Stem Length mm)", xlab="Days to flower") lines(lwr~x.pred, erodium_pred_out, lty=2) lines(upr~x.pred, erodium_pred_out, lty=2) # plot the data on top of the fit points(log(stem_length)~days_to_flower, d.scaled) # Why are the confidence intervals for the regression line narrower than the confidence intervals for the model predictions? # Regression predicts parameters i.e. gives confidence interval for mean & interval, prediction predicts line + error # Having done all that, a quicker way to plot confidence intervals is to use ggplot's statistical fit methods: ggplot(d.scaled, aes(x=days_to_flower, y=log(stem_length))) + geom_point() + stat_smooth(method=lm) #### Simulating replicate data #### # Can you simulate replicate data using the model? Remember what the regression model actually is, as a probability distribution, and what the parameters of that probability distribution are. # Hint 1: Note a short-cut is to use the function sim() in the arm library to generate random values of the fitted regression coefficients. You can then use this information to produce replicate simulated data sets. # Hint 2: the function rnorm(n, mean, sd) generates n random normally distributed data points with mean = "mean" and standard deviation "sd". # Comment: the function simulate() is what you'd probably ACTUALLY use to do this, but for now, try doing it "by hand" using sim() and rnorm() s <- sim(m4) coef(s) sigma.hat(s) for(i in seq(1,100)){ hist(rnorm(n = 100,coef(s)[i,2],sigma.hat(s)[i])) }
% Generated by roxygen2 (4.0.0): do not edit by hand \docType{data} \name{tic} \alias{tic} \title{Insurance Company Benchmark (COIL 2000) Data Set} \format{A data frame with 266 rows and 23 variables} \source{ \url{http://archive.ics.uci.edu/ml/datasets/Insurance+Company+Benchmark+(COIL+2000)} } \description{ This data set used in the CoIL 2000 Challenge contains information on customers of an insurance company. The data consists of 86 variables and includes product usage data and socio-demographic data. Detailed information, please refer to the Source. For imputation study, this dataset can be treated as a mixed-type data. } \details{ \itemize{ \item V1. a numeric variable \item V2. a categorical variable \item ... } } \references{ P. van der Putten and M. van Someren (eds). CoIL Challenge 2000: The Insurance Company Case. Published by Sentient Machine Research, Amsterdam. Also a Leiden Institute of Advanced Computer Science Technical Report 2000-09. June 22, 2000. } \keyword{datasets}
/man/tic.Rd
no_license
Lingbing/imputeR
R
false
false
1,007
rd
% Generated by roxygen2 (4.0.0): do not edit by hand \docType{data} \name{tic} \alias{tic} \title{Insurance Company Benchmark (COIL 2000) Data Set} \format{A data frame with 266 rows and 23 variables} \source{ \url{http://archive.ics.uci.edu/ml/datasets/Insurance+Company+Benchmark+(COIL+2000)} } \description{ This data set used in the CoIL 2000 Challenge contains information on customers of an insurance company. The data consists of 86 variables and includes product usage data and socio-demographic data. Detailed information, please refer to the Source. For imputation study, this dataset can be treated as a mixed-type data. } \details{ \itemize{ \item V1. a numeric variable \item V2. a categorical variable \item ... } } \references{ P. van der Putten and M. van Someren (eds). CoIL Challenge 2000: The Insurance Company Case. Published by Sentient Machine Research, Amsterdam. Also a Leiden Institute of Advanced Computer Science Technical Report 2000-09. June 22, 2000. } \keyword{datasets}
feature_extraction_i <- function(x) { cat("Computing trend ...\n") ts_trend <- apply(x, 1, trend) cat("Computing slope ...\n") ts_slope <- apply(x, 1, function(u) { tryCatch(slope_vector(u), error = function(e) NA) }) cat("Computing mean ...\n") ts_mean <- rowMeans(x, na.rm = TRUE) cat("Computing skewness ...\n") ts_skew <- apply(x, 1, moments::skewness, na.rm=TRUE) cat("Computing kurtosis ...\n") ts_kts <- apply(x, 1, moments::kurtosis, na.rm=TRUE) cat("Computing median ...\n") ts_median <- apply(x, 1, median, na.rm=TRUE) cat("Computing min ...\n") ts_min <- apply(x, 1, min, na.rm=TRUE) cat("Computing max ...\n") ts_max <- apply(x, 1, max, na.rm=TRUE) cat("Computing var ...\n") ts_var <- apply(x, 1, var, na.rm=TRUE) cat("Computing standard deviation ...\n") ts_stddev <- apply(x, 1, sd, na.rm=TRUE) cat("Computing iqr ...\n") ts_iqr <- apply(x, 1, IQR, na.rm=TRUE) ts_dyns <- data.frame( ts_skew = ts_skew, ts_trend = ts_trend, ts_slope = ts_slope, ts_kts = ts_kts, ts_median = ts_median, ts_min = ts_min, ts_max = ts_max, ts_var = ts_var, ts_mean = ts_mean, ts_stddev = ts_stddev, ts_iqr = ts_iqr ) ts_dyns <- replace_inf(ts_dyns) rownames(ts_dyns) <- NULL dplyr::as_tibble(ts_dyns) } slope_vector <- function(x) { #ver se a direcao do x ta certa time_x <- seq_along(x) lmfit <- lm(x ~ time_x) lmfit$coefficients[[2]] } trend <- function(x) { sd(x) / sd(diff(x)[-1]) } wavelet_dau <- function(x, lvl=4) { if (all(is.na(x))) { return(rep(0, times = lvl+1)) } result <- tryCatch(wavDWT(x, wavelet="s8", n.levels=lvl), error = function(e) NA) if (is.na(result[1])) { return(rep(0, times = lvl+1)) } dwt_result <- result$data E_a5 <- reconstruct(result) E_a5 <- norm(t(E_a5)) E_dk <- sapply(dwt_result[1:4], function(x) norm(t(x))) names(E_dk) <- paste0("Ed", 1:length(E_dk)) E_T <- E_a5 + sum(E_dk) E_ra5 <- E_a5 / E_T E_rdk <- E_dk / E_T c(E_ra5=E_ra5,E_rdk) } pairwise_ccf <- function(dfl, feats) { seq. <- 1:nrow(dfl[[1]]) combs <- combn(x = feats, m = 2) m <- matrix(0, nrow=length(seq.), ncol=ncol(combs)) for (u in 1:ncol(combs)) { select_ds <- dfl[combs[, u]] m[,u] <- vapply(seq., function(i) { x_1 <- unlist(select_ds[[1]][i,]) x_2 <- unlist(select_ds[[2]][i,]) tryCatch(ccf( x = x_1, y = x_2, lag.max = 0, plot = FALSE)$acf[[1]], error=function(e) NA) }, FUN.VALUE = double(1L)) } colnames(m) <- paste0("ccf_", apply(combs, 2, function(u) { paste(u, collapse = "") })) as.data.frame(m) } trend <- function(x) { sd(x) / sd(diff(x)[-1]) } max_lyapunov_exp <- function(x) { require(nonlinearTseries) len <- length(x) Reduce(max, nonlinearTseries::divergence( nonlinearTseries::maxLyapunov( time.series = x, min.embedding.dim = ceiling(len / 4), max.embedding.dim = ceiling(len / 2), radius = ceiling(len / 6), do.plot = FALSE ) )) } HURST <- function(x) { #require(Rwave) cwtwnoise <- DOG(x, 10, 3, 1, plot = FALSE) mcwtwnoise <- Mod(cwtwnoise) mcwtwnoise <- mcwtwnoise * mcwtwnoise wspwnoise <- tfmean(mcwtwnoise, plot = FALSE) hurst.est(wspwnoise, 1:7, 3, plot = FALSE)[[2]] } replace_inf <- function (df) { do.call(data.frame, lapply(df, function(j) { replace(j, is.infinite(j), NA) })) }
/src/feat-engineering.r
no_license
vcerqueira/layered_learning_time_series
R
false
false
4,284
r
feature_extraction_i <- function(x) { cat("Computing trend ...\n") ts_trend <- apply(x, 1, trend) cat("Computing slope ...\n") ts_slope <- apply(x, 1, function(u) { tryCatch(slope_vector(u), error = function(e) NA) }) cat("Computing mean ...\n") ts_mean <- rowMeans(x, na.rm = TRUE) cat("Computing skewness ...\n") ts_skew <- apply(x, 1, moments::skewness, na.rm=TRUE) cat("Computing kurtosis ...\n") ts_kts <- apply(x, 1, moments::kurtosis, na.rm=TRUE) cat("Computing median ...\n") ts_median <- apply(x, 1, median, na.rm=TRUE) cat("Computing min ...\n") ts_min <- apply(x, 1, min, na.rm=TRUE) cat("Computing max ...\n") ts_max <- apply(x, 1, max, na.rm=TRUE) cat("Computing var ...\n") ts_var <- apply(x, 1, var, na.rm=TRUE) cat("Computing standard deviation ...\n") ts_stddev <- apply(x, 1, sd, na.rm=TRUE) cat("Computing iqr ...\n") ts_iqr <- apply(x, 1, IQR, na.rm=TRUE) ts_dyns <- data.frame( ts_skew = ts_skew, ts_trend = ts_trend, ts_slope = ts_slope, ts_kts = ts_kts, ts_median = ts_median, ts_min = ts_min, ts_max = ts_max, ts_var = ts_var, ts_mean = ts_mean, ts_stddev = ts_stddev, ts_iqr = ts_iqr ) ts_dyns <- replace_inf(ts_dyns) rownames(ts_dyns) <- NULL dplyr::as_tibble(ts_dyns) } slope_vector <- function(x) { #ver se a direcao do x ta certa time_x <- seq_along(x) lmfit <- lm(x ~ time_x) lmfit$coefficients[[2]] } trend <- function(x) { sd(x) / sd(diff(x)[-1]) } wavelet_dau <- function(x, lvl=4) { if (all(is.na(x))) { return(rep(0, times = lvl+1)) } result <- tryCatch(wavDWT(x, wavelet="s8", n.levels=lvl), error = function(e) NA) if (is.na(result[1])) { return(rep(0, times = lvl+1)) } dwt_result <- result$data E_a5 <- reconstruct(result) E_a5 <- norm(t(E_a5)) E_dk <- sapply(dwt_result[1:4], function(x) norm(t(x))) names(E_dk) <- paste0("Ed", 1:length(E_dk)) E_T <- E_a5 + sum(E_dk) E_ra5 <- E_a5 / E_T E_rdk <- E_dk / E_T c(E_ra5=E_ra5,E_rdk) } pairwise_ccf <- function(dfl, feats) { seq. <- 1:nrow(dfl[[1]]) combs <- combn(x = feats, m = 2) m <- matrix(0, nrow=length(seq.), ncol=ncol(combs)) for (u in 1:ncol(combs)) { select_ds <- dfl[combs[, u]] m[,u] <- vapply(seq., function(i) { x_1 <- unlist(select_ds[[1]][i,]) x_2 <- unlist(select_ds[[2]][i,]) tryCatch(ccf( x = x_1, y = x_2, lag.max = 0, plot = FALSE)$acf[[1]], error=function(e) NA) }, FUN.VALUE = double(1L)) } colnames(m) <- paste0("ccf_", apply(combs, 2, function(u) { paste(u, collapse = "") })) as.data.frame(m) } trend <- function(x) { sd(x) / sd(diff(x)[-1]) } max_lyapunov_exp <- function(x) { require(nonlinearTseries) len <- length(x) Reduce(max, nonlinearTseries::divergence( nonlinearTseries::maxLyapunov( time.series = x, min.embedding.dim = ceiling(len / 4), max.embedding.dim = ceiling(len / 2), radius = ceiling(len / 6), do.plot = FALSE ) )) } HURST <- function(x) { #require(Rwave) cwtwnoise <- DOG(x, 10, 3, 1, plot = FALSE) mcwtwnoise <- Mod(cwtwnoise) mcwtwnoise <- mcwtwnoise * mcwtwnoise wspwnoise <- tfmean(mcwtwnoise, plot = FALSE) hurst.est(wspwnoise, 1:7, 3, plot = FALSE)[[2]] } replace_inf <- function (df) { do.call(data.frame, lapply(df, function(j) { replace(j, is.infinite(j), NA) })) }
##### Robust Design CJS with capture histories as list and manipulated (see RobustCJSRaggedArrayParallel.R) # Modeling temporal and individual covariate, NDVI #p and c are constant #modeled on a weekly scale #################################specify model in BUGS language sink("robust_CJS_weekly_phi_ndvi_0_ndvi_1_tmax_3_tmin_5_p_dot_c_dot.bug") cat(" model{ ###############Priors and constraints alpha.0 ~ dnorm(0, 0.4)T(-10,10) #prior for intercept alpha.ndvi_0 ~ dnorm(0, 0.4)T(-10,10) #prior for slope on NDVI no lag alpha.ndvi_1 ~ dnorm(0, 0.4)T(-10,10) #prior for slope on NDVI lag 1 alpha.tmax_3 ~ dnorm(0, 0.4)T(-10,10) #prior for slope on tmax lag 3 alpha.tmin_5 ~ dnorm(0, 0.4)T(-10,10) #prior for slope on tmin lag 5 mean.p ~ dnorm(0, 0.4)T(-10,10) # prior for p mean.c ~ dnorm(0, 0.4)T(-10,10) # prior for c for(i in 1:nind){ for(w in 1:(n.weeks-1)){ # phi has 2 dimensions [indiv, and weeks] logit(phi[i,w]) <- alpha.0 + alpha.ndvi_0 * ndvi_0[i,w] + alpha.ndvi_1 * ndvi_1[i,w] + alpha.tmax_3 * tmax_3[i,w] + alpha.tmin_5 * tmin_5[i,w] } #w for weeks } #i for individual for(i in 1:nind){ for(w in 1:n.weeks){ # p and c also have two dim logit(p[i, w]) <- mean.p # could specify covariates here logit(c[i, w]) <- mean.c } #w for weeks } #i for individual #############Likelihood # STATE PROCESS for(i in 1:nind){ # define latent state at first capture # dimensions [individual, week] z[i,f[i]] <- 1 # z is true (latent) state alive or dead, know alive at first capture for(w in (f[i]+1):n.weeks){ mu1[i, w] <- (phi[i, w-1]) * z[i, w-1] z[i, w] ~ dbern(mu1[i, w]) } # w } # i # OBSERVATION PROCESS for(obs in 1:n.obs){ y[obs] ~ dbern(z[id[obs], week[obs]] * ifelse(p.or.c[obs]==0, p[id[obs], week[obs]], c[id[obs], week[obs]]) ) # 0 represents p, 1 represents c (if caught before that session) } #obs } #model ",fill=TRUE) sink() ###########################################################################
/Old Code/robust_CJS_weekly_phi_cov_p_dot_c_dot.R
no_license
angieluis/BayesianMarkRecapSNV
R
false
false
2,267
r
##### Robust Design CJS with capture histories as list and manipulated (see RobustCJSRaggedArrayParallel.R) # Modeling temporal and individual covariate, NDVI #p and c are constant #modeled on a weekly scale #################################specify model in BUGS language sink("robust_CJS_weekly_phi_ndvi_0_ndvi_1_tmax_3_tmin_5_p_dot_c_dot.bug") cat(" model{ ###############Priors and constraints alpha.0 ~ dnorm(0, 0.4)T(-10,10) #prior for intercept alpha.ndvi_0 ~ dnorm(0, 0.4)T(-10,10) #prior for slope on NDVI no lag alpha.ndvi_1 ~ dnorm(0, 0.4)T(-10,10) #prior for slope on NDVI lag 1 alpha.tmax_3 ~ dnorm(0, 0.4)T(-10,10) #prior for slope on tmax lag 3 alpha.tmin_5 ~ dnorm(0, 0.4)T(-10,10) #prior for slope on tmin lag 5 mean.p ~ dnorm(0, 0.4)T(-10,10) # prior for p mean.c ~ dnorm(0, 0.4)T(-10,10) # prior for c for(i in 1:nind){ for(w in 1:(n.weeks-1)){ # phi has 2 dimensions [indiv, and weeks] logit(phi[i,w]) <- alpha.0 + alpha.ndvi_0 * ndvi_0[i,w] + alpha.ndvi_1 * ndvi_1[i,w] + alpha.tmax_3 * tmax_3[i,w] + alpha.tmin_5 * tmin_5[i,w] } #w for weeks } #i for individual for(i in 1:nind){ for(w in 1:n.weeks){ # p and c also have two dim logit(p[i, w]) <- mean.p # could specify covariates here logit(c[i, w]) <- mean.c } #w for weeks } #i for individual #############Likelihood # STATE PROCESS for(i in 1:nind){ # define latent state at first capture # dimensions [individual, week] z[i,f[i]] <- 1 # z is true (latent) state alive or dead, know alive at first capture for(w in (f[i]+1):n.weeks){ mu1[i, w] <- (phi[i, w-1]) * z[i, w-1] z[i, w] ~ dbern(mu1[i, w]) } # w } # i # OBSERVATION PROCESS for(obs in 1:n.obs){ y[obs] ~ dbern(z[id[obs], week[obs]] * ifelse(p.or.c[obs]==0, p[id[obs], week[obs]], c[id[obs], week[obs]]) ) # 0 represents p, 1 represents c (if caught before that session) } #obs } #model ",fill=TRUE) sink() ###########################################################################
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/misc.R \name{is_hpd} \alias{is_hpd} \title{Check if a matrix is Hermitian positive definite} \usage{ is_hpd(A, tol = 1e-15) } \description{ Check if a matrix is Hermitian positive definite } \keyword{internal}
/beyondWhittle/man/is_hpd.Rd
no_license
akhikolla/ClusterTests
R
false
true
288
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/misc.R \name{is_hpd} \alias{is_hpd} \title{Check if a matrix is Hermitian positive definite} \usage{ is_hpd(A, tol = 1e-15) } \description{ Check if a matrix is Hermitian positive definite } \keyword{internal}
create_placement <- function(group, strategy = "cluster", ...) { query <- list(Action = "CreatePlacementGroup", GroupName = group, Strategy = strategy) r <- ec2HTTP(query = query, ...) return(r) } delete_placement <- function(group, ...) { query <- list(Action = "DeletePlacementGroup", GroupName = group) r <- ec2HTTP(query = query, ...) return(r) } describe_placements <- function(group, filter, ...) { query <- list(Action = "DescribePlacementGroup") if(!missing(group)) { group <- as.list(group) names(group) <- paste0("InstanceId.", 1:length(group)) query <- c(query, group) } if(!missing(filter)) { query <- c(query, .makelist(filter, type = "Filter")) } r <- ec2HTTP(query = query, ...) return(r) }
/R/PlacementGroups.r
no_license
CDC/aws.ec2
R
false
false
848
r
create_placement <- function(group, strategy = "cluster", ...) { query <- list(Action = "CreatePlacementGroup", GroupName = group, Strategy = strategy) r <- ec2HTTP(query = query, ...) return(r) } delete_placement <- function(group, ...) { query <- list(Action = "DeletePlacementGroup", GroupName = group) r <- ec2HTTP(query = query, ...) return(r) } describe_placements <- function(group, filter, ...) { query <- list(Action = "DescribePlacementGroup") if(!missing(group)) { group <- as.list(group) names(group) <- paste0("InstanceId.", 1:length(group)) query <- c(query, group) } if(!missing(filter)) { query <- c(query, .makelist(filter, type = "Filter")) } r <- ec2HTTP(query = query, ...) return(r) }
#create function
/proj.R
no_license
jhall6ug/betarepo
R
false
false
17
r
#create function
library(doParallel) # Compute the pseudolikelihood for difference of accumulators though time # The likelihood is computed from an estimated density constructed from a # simulated data set data.mx. # The data set data.mx is an Nxtime matrix, where dimension 1 are the simulated # data from this proposal of theta* and dimenstion 2 is the state of the difference # of the accumulators at time i pseudolikelihood <- function(data.mx, obs.data, log=TRUE) { # Initialize the likelihood vectors to -750 for length of time # (so the default likelihood value is exp(-750) which is # approximately zero). like <- rep(-750,times=dim(obs.data)[2]) temp <- rep(-750,times=dim(obs.data)[1]) # Compute the likelihood at each time point i for(i in 1:dim(obs.data)[2]){ # Compute the kernel density estimate dens.1 <- density(data.mx[,i], kernel="epanechnikov") # Convert it to a function func.1 <- approxfun(dens.1$x,dens.1$y,rule=2) # Use the function to compute the likelihood of the observed RTs temp <- log(func.1(obs.data[,i])) temp[!is.finite(temp)] <- -750 like[i]=sum(temp) } # If there are any zeros, convert the log to -750 like[like==-Inf] <- -750 # Add log likelihood components together. Note the proportion terms that # rescale the conditional likelihoods to joints. like <- sum(like) # Convert from log back to likelihood if necessary and return if (log==FALSE) like <- exp(like) return(like) } # Wrapper function to us in sampler # I like to make mine so that it always takes # Theta* and data get_log_dens=function(params, data){ # Check that proposal is finite if(all(is.finite(params))){ # Container for simulated data sim.data=matrix(NA, nrow = n.obs, ncol = length(obs.data)) # print(params) # for (i in 1:n.obs){ # temp=rlcca(...) # sim.data[,i]=temp[,1]-temp[,2] # } # Simulate datasets for Pseudolike in Parallel sim.data=NULL for(i in 1:n.obs){ temp=rlcca(n.items = true$n.items[1], max.time = true$maxtime[1], startx = true$startx, drift = drift, K = params['K'], L = params['L'], eta = true$eta[1], dt = true$dt[1], tau = true$tau[1], t0 = params['t0']) # return difference if(all(is.finite(temp))){ sim.data=rbind(sim.data, temp[,1]-temp[,2]) }else{return(-Inf)} } # print("Sims Done!") # Return pseudolike of observed data under simulated prosposed data return(pseudolikelihood(data.mx=sim.data, obs.data=obs.data)) }else{return(-Inf) # If not finite, return worse density possible } } # Sample a "prior" distribution of the varaible in model # Essentially, this is how likely you think that parameters are samplePrior=function(){ # Use log tranform because these will later be eponentiated # This is fairly "tight" which makes the Initialization easier L=log(rgamma(1,.4,1)) # Leak K=log(rgamma(1,.1,1)) # Inhibition t0=log(rnorm(1,.2,.1))# NonDecision time out=tibble(L,K,t0) } # Compute density under a "prior" distribution of the varaible in model # Essentially, this is how likely you think that parameters are prior=function(params){ # Note that these are the same distribution as above # the exponentiation is used to cancel the log transform to calculate # the density under the prior distribution prior=0 # max(dnorm(exp(params["L"]),.4,1,log=TRUE),-750)+ # max(dnorm(exp(params["K"]),.1,1,log=TRUE),-750)+ # max(dnorm(exp(params["t0"]),.2,.1,log=TRUE),-750) }
/pseudolikelihood.r
no_license
coreykeyser/Dynamic-Evidence-fMRI
R
false
false
3,651
r
library(doParallel) # Compute the pseudolikelihood for difference of accumulators though time # The likelihood is computed from an estimated density constructed from a # simulated data set data.mx. # The data set data.mx is an Nxtime matrix, where dimension 1 are the simulated # data from this proposal of theta* and dimenstion 2 is the state of the difference # of the accumulators at time i pseudolikelihood <- function(data.mx, obs.data, log=TRUE) { # Initialize the likelihood vectors to -750 for length of time # (so the default likelihood value is exp(-750) which is # approximately zero). like <- rep(-750,times=dim(obs.data)[2]) temp <- rep(-750,times=dim(obs.data)[1]) # Compute the likelihood at each time point i for(i in 1:dim(obs.data)[2]){ # Compute the kernel density estimate dens.1 <- density(data.mx[,i], kernel="epanechnikov") # Convert it to a function func.1 <- approxfun(dens.1$x,dens.1$y,rule=2) # Use the function to compute the likelihood of the observed RTs temp <- log(func.1(obs.data[,i])) temp[!is.finite(temp)] <- -750 like[i]=sum(temp) } # If there are any zeros, convert the log to -750 like[like==-Inf] <- -750 # Add log likelihood components together. Note the proportion terms that # rescale the conditional likelihoods to joints. like <- sum(like) # Convert from log back to likelihood if necessary and return if (log==FALSE) like <- exp(like) return(like) } # Wrapper function to us in sampler # I like to make mine so that it always takes # Theta* and data get_log_dens=function(params, data){ # Check that proposal is finite if(all(is.finite(params))){ # Container for simulated data sim.data=matrix(NA, nrow = n.obs, ncol = length(obs.data)) # print(params) # for (i in 1:n.obs){ # temp=rlcca(...) # sim.data[,i]=temp[,1]-temp[,2] # } # Simulate datasets for Pseudolike in Parallel sim.data=NULL for(i in 1:n.obs){ temp=rlcca(n.items = true$n.items[1], max.time = true$maxtime[1], startx = true$startx, drift = drift, K = params['K'], L = params['L'], eta = true$eta[1], dt = true$dt[1], tau = true$tau[1], t0 = params['t0']) # return difference if(all(is.finite(temp))){ sim.data=rbind(sim.data, temp[,1]-temp[,2]) }else{return(-Inf)} } # print("Sims Done!") # Return pseudolike of observed data under simulated prosposed data return(pseudolikelihood(data.mx=sim.data, obs.data=obs.data)) }else{return(-Inf) # If not finite, return worse density possible } } # Sample a "prior" distribution of the varaible in model # Essentially, this is how likely you think that parameters are samplePrior=function(){ # Use log tranform because these will later be eponentiated # This is fairly "tight" which makes the Initialization easier L=log(rgamma(1,.4,1)) # Leak K=log(rgamma(1,.1,1)) # Inhibition t0=log(rnorm(1,.2,.1))# NonDecision time out=tibble(L,K,t0) } # Compute density under a "prior" distribution of the varaible in model # Essentially, this is how likely you think that parameters are prior=function(params){ # Note that these are the same distribution as above # the exponentiation is used to cancel the log transform to calculate # the density under the prior distribution prior=0 # max(dnorm(exp(params["L"]),.4,1,log=TRUE),-750)+ # max(dnorm(exp(params["K"]),.1,1,log=TRUE),-750)+ # max(dnorm(exp(params["t0"]),.2,.1,log=TRUE),-750) }
#' Plot method for objects of type Bolstad #' #' A unified plotting method for plotting the prior, likelihood and posterior #' from any of the analyses in the book #' #' The function provides a unified way of plotting the prior, likelihood and #' posterior from any of the functions in the library that return these #' quantities. It will produce an overlay of the lines by default, or separate #' panels if \code{overlay = FALSE}. #' #' @param x A S3 object of class Bolstad #' @param overlay if \code{FALSE} then up to three plots will be drawn #' side-by-side #' @param which Control which of the prior = 1, likelihood = 2, and posterior = #' 3, are plots. This is set to prior and posterior by default to retain #' compatibility with the book #' @param densCols The colors of the lines for each of the prior, likelihood and #' posterior #' @param legendLoc The location of the legend, usually either \code{"topright"} #' or \code{"topleft"} #' @param scaleLike If \code{TRUE}, then the likelihood will be scaled to have #' approximately the same maximum value as the posterior #' @param xlab Label for x axis #' @param ylab Label for y axis #' @param main Title of plot #' @param ylim Vector giving y coordinate range #' @param cex Character expansion multiplier #' @param \dots Any remaining arguments are fed to the \code{plot} command #' @author James Curran #' @keywords plot #' @examples #' #' x = rnorm(20,-0.5,1) #' ## find the posterior density with a N(0,1) prior on mu #' b = normnp(x,sigma=1) #' plot(b) #' plot(b, which = 1:3) #' plot(b, overlay = FALSE, which = 1:3) #' @export plot.Bolstad = function(x, overlay = TRUE, which = c(1, 3), densCols = c("red","green","blue")[which], legendLoc = "topleft", scaleLike = FALSE, xlab = eval(expression(x$name)), ylab = "", main = "Shape of prior and posterior", ylim = c(0, max(cbind(x$prior, x$likelihood, x$posterior)[,which]) * 1.1), cex = 0.7, ...){ which = sort(which) if(is.null(which) || length(which) <= 0 || length(which) > 3 || any(!grepl('^[1-3]+$', which))){ stop("parameter which can only take vectors of length 3 containing the values 1, 2 and 3") } if(scaleLike){ sf = max(x$posterior) / max(x$likelihood) x$likelihood = x$likelihood * sf } bLegend = !grepl("none", tolower(legendLoc)) if(overlay){ with(x,{ Y = as.matrix(cbind(prior, likelihood, posterior)[,which]); plot(param.x, Y[,1], ylim = ylim, type="l", lty = (3:1)[which[1]], col = densCols[1], xlab = xlab, ylab = "", main = main, ...); i = 2; while(i <= ncol(Y)){ lines(param.x, Y[,i], lty = (3:1)[which[i]], col = densCols[i]); i = i + 1; }; if(bLegend){ legend(legendLoc, lty = (3:1)[which], col = densCols, legend = c("Prior", "Likelihood", "Posterior")[which], bty = 'n', cex = cex); }; }) }else{ oldpar = par(mfrow = c(1, length(which)), mai = c(0.7, 0.1, 0.2, 0.1), yaxs = 'i', xaxs = 'i') with(x,{ Y = cbind(prior, likelihood, posterior)[,which] legend = c("Prior", "Likelihood", "Posterior")[which] plot(param.x, Y[,1], ylim = ylim, type="l", col = densCols[1], xlab = eval(expression(name)), ylab = "", main = legend[1], axes = FALSE, ...) axis(1) box() for(i in 2:ncol(Y)){ plot(param.x, Y[,i], ylim = ylim, col = densCols[i], type = 'l', xlab = "", main = legend[i], axes = FALSE, ...) box() } par(oldpar) }) } }
/R/plot.Bolstad.r
no_license
cran/Bolstad
R
false
false
3,949
r
#' Plot method for objects of type Bolstad #' #' A unified plotting method for plotting the prior, likelihood and posterior #' from any of the analyses in the book #' #' The function provides a unified way of plotting the prior, likelihood and #' posterior from any of the functions in the library that return these #' quantities. It will produce an overlay of the lines by default, or separate #' panels if \code{overlay = FALSE}. #' #' @param x A S3 object of class Bolstad #' @param overlay if \code{FALSE} then up to three plots will be drawn #' side-by-side #' @param which Control which of the prior = 1, likelihood = 2, and posterior = #' 3, are plots. This is set to prior and posterior by default to retain #' compatibility with the book #' @param densCols The colors of the lines for each of the prior, likelihood and #' posterior #' @param legendLoc The location of the legend, usually either \code{"topright"} #' or \code{"topleft"} #' @param scaleLike If \code{TRUE}, then the likelihood will be scaled to have #' approximately the same maximum value as the posterior #' @param xlab Label for x axis #' @param ylab Label for y axis #' @param main Title of plot #' @param ylim Vector giving y coordinate range #' @param cex Character expansion multiplier #' @param \dots Any remaining arguments are fed to the \code{plot} command #' @author James Curran #' @keywords plot #' @examples #' #' x = rnorm(20,-0.5,1) #' ## find the posterior density with a N(0,1) prior on mu #' b = normnp(x,sigma=1) #' plot(b) #' plot(b, which = 1:3) #' plot(b, overlay = FALSE, which = 1:3) #' @export plot.Bolstad = function(x, overlay = TRUE, which = c(1, 3), densCols = c("red","green","blue")[which], legendLoc = "topleft", scaleLike = FALSE, xlab = eval(expression(x$name)), ylab = "", main = "Shape of prior and posterior", ylim = c(0, max(cbind(x$prior, x$likelihood, x$posterior)[,which]) * 1.1), cex = 0.7, ...){ which = sort(which) if(is.null(which) || length(which) <= 0 || length(which) > 3 || any(!grepl('^[1-3]+$', which))){ stop("parameter which can only take vectors of length 3 containing the values 1, 2 and 3") } if(scaleLike){ sf = max(x$posterior) / max(x$likelihood) x$likelihood = x$likelihood * sf } bLegend = !grepl("none", tolower(legendLoc)) if(overlay){ with(x,{ Y = as.matrix(cbind(prior, likelihood, posterior)[,which]); plot(param.x, Y[,1], ylim = ylim, type="l", lty = (3:1)[which[1]], col = densCols[1], xlab = xlab, ylab = "", main = main, ...); i = 2; while(i <= ncol(Y)){ lines(param.x, Y[,i], lty = (3:1)[which[i]], col = densCols[i]); i = i + 1; }; if(bLegend){ legend(legendLoc, lty = (3:1)[which], col = densCols, legend = c("Prior", "Likelihood", "Posterior")[which], bty = 'n', cex = cex); }; }) }else{ oldpar = par(mfrow = c(1, length(which)), mai = c(0.7, 0.1, 0.2, 0.1), yaxs = 'i', xaxs = 'i') with(x,{ Y = cbind(prior, likelihood, posterior)[,which] legend = c("Prior", "Likelihood", "Posterior")[which] plot(param.x, Y[,1], ylim = ylim, type="l", col = densCols[1], xlab = eval(expression(name)), ylab = "", main = legend[1], axes = FALSE, ...) axis(1) box() for(i in 2:ncol(Y)){ plot(param.x, Y[,i], ylim = ylim, col = densCols[i], type = 'l', xlab = "", main = legend[i], axes = FALSE, ...) box() } par(oldpar) }) } }
# Module for referencing electrodes # Init virtualenv for module dev if(F){ library(rave) m = ModuleEnvir$new(module_id = 'mid', 'ref', script_path = './inst/modules/builtin_modules/reference/main.R'); init_app(m) rave_prepare('Lang_loc/YCB', 1, 'YCBpd', c(1,2), data_types = NULL) profvis::profvis({ rave_prepare(subject = 'Large/YAB', electrodes = c(1:10, 13:20), epoch = 'YABa', time_range = c(1,2), data_types = 'power', attach = F, reference = 'test') }) profvis::profvis({ rave_prepare(subject = 'Large/YAB', electrodes = c(1:10, 13:20), epoch = 'YABa', time_range = c(1,2), data_types = 'power', attach = F, reference = 'default') }) rave_data = getDefaultDataRepository() pryr::object_size(rave_data) } rave_prepare(subject = 'congruency1/YAB', electrodes = 64:65, epoch = 'YABa', time_range = c(1,2), data_types = NULL) # load libraries library(shiny) library(stringr) library(magrittr) # Shiny session used to update inputs in advanced ways session = getDefaultReactiveDomain() input = getDefaultReactiveInput() output = getDefaultReactiveOutput() local_data = shiny::reactiveValues( group_number = NULL, refresh = NULL, do_parallel_plot = NULL, load_mesh = F ) ref_group %?<-% list() # Environment to store information env = new.env(parent = baseenv()) env$ref_calc = 0 env$last_import = 'new..' # Load UIs source('UI.R') source('plot.R') # source('./inst/modules/builtin_modules/reference/UI.R') observeEvent(input[['bipolar_modal']], { # get group info group_info = current_group() if(!length(group_info)){ return() } ref_tbl = get_ref_table() electrodes = group_info$electrodes bptbl = ref_tbl[ref_tbl$Electrode %in% electrodes,] bptbl$Type = 'Bipolar Reference' if(nrow(bptbl) > 0 && unique(bptbl$Reference) == 'noref'){ e = bptbl$Electrode bptbl$Reference = c(paste0('ref_', e[-1]), '') } env$bipolar_tbl = bptbl showModal( shiny::modalDialog( title = 'Bipolar Reference', size = 'l', easyClose = F, footer = tagList( actionButton(ns('bp_confirm'), 'Confirm') ), DT::DTOutput(ns('bipolar_table')) ) ) }) observeEvent(input[[('bp_confirm')]], { tbl = env$bipolar_tbl ref_tbl = get_ref_table() if(nrow(tbl)){ for(ii in seq_len(nrow(tbl))){ sel = ref_tbl$Electrode == tbl$Electrode[ii] ref_tbl$Reference[sel] = tbl$Reference[ii] ref_tbl$Type[sel] = 'Bipolar Reference' } # save ref_tbl save_ref_table(ref_tbl) } removeModal(session = session) }) bipolar_proxy = DT::dataTableProxy('bipolar_table', session = session) output[[('bipolar_table')]] = DT::renderDT({ env$bipolar_tbl }, env = ..runtime_env, editable = TRUE) observeEvent(input[[('bipolar_table_cell_edit')]], { info = input[[('bipolar_table_cell_edit')]] i = info$row j = info$col v = info$value # string match electrode v = str_match(v, '(ref_|[\\ ]{0})([0-9]*)')[3] if(is_invalid(v, .invalids = c('null', 'na', 'blank'))){ v = '' }else{ v = subject$filter_all_electrodes(as.integer(v)) if(!length(v)){ return() }else{ v = str_c('ref_', v) } } bipolar_tbl = env$bipolar_tbl if(names(bipolar_tbl)[j] == 'Reference'){ env$bipolar_tbl[i, j] = v DT::replaceData(bipolar_proxy, env$bipolar_tbl, resetPaging = FALSE) # important } }) output[['elec_loc']] <- threejsr::renderThreejs({ local_data$refresh group_info = current_group() group_info %?<-% list(electrodes = NULL) name = group_info$rg_name ref_tbl = get_ref_table() if(is.blank(name)){ name = 'Current Group' } # join electrodes.csv with ref table tbl = merge(ref_tbl, subject$electrodes[,c('Electrode', 'Coord_x','Coord_y','Coord_z', 'Label')], id = 'Electrode', suffixes = c('.x', '')) tbl$Label[is.na(tbl$Label)] = 'No Label' electrodes = group_info$electrodes with(tbl, { sprintf('<p>Reference - %s (%s)<br/>Reference to - %s</p>', Group, Type, Reference) }) -> marker values = rep(-1, length(electrodes)) bad_electrodes = rave:::parse_selections(input[[('ref_bad')]]) values[electrodes %in% bad_electrodes] = 1 module_tools$plot_3d_electrodes( tbl = tbl, electrodes = electrodes, values = values, marker = marker, pal = colorRampPalette(c('navy', 'black', 'red'))(11), show_mesh = local_data$load_mesh # link_module = 'condition_explorer', # variable_name = 'electrode' ) }) observeEvent(input$load_mesh, { load_mesh = isolate(!local_data$load_mesh) local_data$load_mesh = load_mesh updateActionButton(session, 'load_mesh', label = ifelse(load_mesh, 'Hide Mesh', 'Show Mesh')) }) elec_loc_ui = function(){ tagList( actionLink(ns('load_mesh'), 'Show Mesh'), threejsr::threejsOutput(ns('elec_loc'), height = '300px') ) } observeEvent(input[[('cur_save')]], { ref_to = input[[('ref_to')]] group_info = current_group() if(is.null(group_info)){ return() } electrodes = group_info$electrodes bad_electrodes = rave:::parse_selections(input[[('ref_bad')]]) ref_table = get_ref_table() sel = ref_table$Electrode %in% electrodes ref_table$Group[sel] = group_info$rg_name if(group_info$rg_type %in% c('Common Average Reference', 'White Matter Reference', 'No Reference')){ ref_table$Reference[sel] = ref_to ref_table$Reference[sel & ref_table$Electrode %in% bad_electrodes] = '' # set bad electrodes ref_table$Type[sel] = group_info$rg_type save_ref_table(tbl = ref_table) showNotification(p( group_info$rg_name, ' (', group_info$rg_electrodes, ') is now set to be referenced to [', ref_to, ']' ), type = 'message') } }, priority = -1L) # Customized UI cur_group_ui = function(){ refresh = local_data$refresh logger('cur_group_ui') new_ref = local_data$has_new_ref if(length(cur_group) && cur_group <= length(ref_group)){ group_number = as.integer(cur_group) group_info = ref_group[[group_number]] group_type = group_info$rg_type group_name = group_info$rg_name electrodes = rave:::parse_selections(group_info$rg_electrodes) if(length(electrodes) == 0){ return(tagList( hr(), actionButton(ns('cur_group_save'), 'Preview & Export',width = '100%') )) } }else{ return(tagList( hr(), actionButton(ns('cur_group_save'), 'Preview & Export',width = '100%') )) } refs = get_refs() ref_names = names(refs); ref_names = c('noref', ref_names) ref_tbl = get_ref_table() sel = ref_tbl$Electrode %in% electrodes switch ( group_type, 'No Reference' = { selectInput(ns('ref_to'), 'Reference to:', choices = 'noref', selected = 'noref') }, 'Bipolar Reference' = { tagList( tags$label('Reference to:'), actionButton(ns('bipolar_modal'), 'Open Table', width = '100%', style = 'margin-bottom: 15px') ) }, # By default, it's either 'Common Average Reference' or 'White Matter Reference' { # try to get reference name selected = unique(c(ref_tbl$Reference[sel]), 'noref') selected = selected[selected != ''][1] selectInput(ns('ref_to'), 'Reference to:', choices = ref_names, selected = selected) } ) -> inp tagList( fluidRow( column( width = 7, inp, p( tags$label('Group Name: '), group_name, br(), tags$label('Electrodes: '), group_info$rg_electrodes, br(), tags$label('Bad Electrodes: '), textOutput(ns('bad_electrodes_out'), inline = T) ) ), column( width = 5, textInput(ns('ref_bad'), 'Bad Electrodes:', value = rave:::deparse_selections(ref_tbl$Electrode[sel & ref_tbl$Reference == ''])), div( style = 'float: right', actionButton(ns('cur_save'), 'Save Group') ) ) ), hr(), actionButton(ns('cur_group_save'), 'Preview & Export',width = '100%') ) } observeEvent(input[['elec_loc_callback']], { dat = input[['elec_loc_callback']] print(dat) do.call(switch_to, dat) }) output[[('bad_electrodes_out')]] = renderText({ bad_electrodes = rave:::parse_selections(input[[('ref_bad')]]) bad_electrodes = subject$filter_all_electrodes(bad_electrodes) if(length(bad_electrodes)){ bad_electrodes = rave:::deparse_selections(bad_electrodes) bad_electrodes }else{ 'No bad electrode' } }, env = ..runtime_env) # Utils current_group = function(){ group_number = as.integer(cur_group) if(!length(group_number) || group_number > length(ref_group)){ return() } group_info = ref_group[[group_number]] electrodes = rave:::parse_selections(group_info$rg_electrodes) electrodes = subject$filter_all_electrodes(electrodes) if(!length(electrodes)){ return() } group_info$electrodes = electrodes return(group_info) } get_ref_table = function(){ ref_info = cache(key = list( ref_name_alt = ref_name_alt, subject = subject$id ), import_external()) ref_table = ref_info$table ref_table } save_ref_table = function(tbl, is_new = FALSE){ print('Saving') val = list( table = tbl, new = is_new ) old = cache(key = list( ref_name_alt = ref_name_alt, subject = subject$id ), val, replace = T) if(nrow(old$table) != nrow(tbl)){ stop("Refernce table doesn't match") } local_data$ref_tbl = tbl invisible() } import_external = function(){ dirs = module_tools$get_subject_dirs() ref_name_alt %?<-% sprintf('reference_%s.csv', preload_info$reference_name) f = file.path(dirs$meta_dir, ref_name_alt) if(file.exists(f)){ tbl = read.csv(f, stringsAsFactors = F) if(!'Type' %in% names(tbl)){ tbl$Type = 'No Reference' }else{ tbl$Type[!tbl$Type %in% c('Common Average Reference', 'Bipolar Reference', 'White Matter Reference', 'No Reference')] = 'No Reference' } tbl = tbl[,c('Electrode', 'Group', 'Reference', 'Type')] is_new = T }else{ tbl = data.frame( Electrode = subject$preprocess_info('channels'), Group = '', Reference = 'noref', Type = 'No Reference', stringsAsFactors = F ) is_new = F } local_data$ref_tbl = tbl list( table = tbl, new = is_new ) } load_reference = function(){ dirs = module_tools$get_subject_dirs() ref_name_alt %?<-% sprintf('reference_%s.csv', preload_info$reference_name) # Get current settings key = list( ref_name_alt = ref_name_alt, subject = subject$id ) ref_info = cache(key = key, import_external()) ref_tbl = (ref_info$table) if(is.null(ref_tbl)){ return() } if(env$last_import != ref_name_alt){ env$last_import = ref_name_alt ref_info$new = TRUE }else{ ref_info$new = FALSE } # If ref_info$new, update compound input ref_group s.t. it matches with current settings, else replace and cache ref_info if(ref_info$new){ ref_info$new = FALSE unique_refs = ref_tbl[!duplicated(ref_tbl[,c('Group', 'Type')]), ] nn = nrow(unique_refs) if(nn > 0){ lapply(seq_len(nn), function(i){ # Group i row = unique_refs[i, ] # name updateTextInput(session, (sprintf('%s_%s_%d', 'ref_group', 'rg_name', i)), value = row$Group) # ref Method updateSelectInput(session, (sprintf('%s_%s_%d', 'ref_group', 'rg_type', i)), selected = row$Type) # Electrodes merged = merge(ref_tbl, row, by = c('Group', 'Type'), suffixes = c('', 'y')) updateTextInput( session, (sprintf('%s_%s_%d', 'ref_group', 'rg_electrodes', i)), value = rave:::deparse_selections(merged$Electrode) ) updateCompoundInput(session, ('ref_group'), to = nn) }) } }else{ # Construct table all_es = NULL for(ii in seq_len(length(ref_group))){ sub_group = ref_group[[ii]] sub_es = sub_group$rg_electrodes sub_es = rave:::parse_selections(sub_es) if(any(sub_es %in% all_es)){ dup_es = sub_es[sub_es %in% all_es] showNotification( p('Group [', sub_group$rg_name, '(', ii, ')] has duplicated electrode(s): ', rave:::deparse_selections(dup_es)), type = 'warning' ) } all_es = c(all_es, sub_es) sub_sel = ref_tbl$Electrode %in% sub_es if(any(sub_sel)){ ref_tbl$Group[sub_sel] = sub_group$rg_name ref_tbl$Type[sub_sel] = sub_group$rg_type } } ref_info$table = ref_tbl } cache(key = key, val = ref_info, replace = T) } gen_reference_blockwise = function(blockwise_table){ dirs = module_tools$get_subject_dirs() blocks = blockwise_table$Block refs = blockwise_table$Reference involved_es = rave:::parse_selections(refs) if(length(involved_es) == 0){ showNotification(p('No electrodes used. Why not use "noref"?'), type = 'error', session = session) return(FALSE) } fname = 'ref_0,' %&% rave:::deparse_selections(involved_es) %&% '.h5' f = file.path(dirs$channel_dir, 'reference', fname) unlink(f) subprogress = rave::progress('Loading Data', max = length(involved_es)) progress = rave::progress(sprintf('Generating reference [%s]', fname), max = length(blocks)+1) ref_data = new.env() for(ii in seq_along(blocks)){ b = blocks[[ii]] subprogress$reset() progress$inc('Loading data from block ' %&% b) es = rave:::parse_selections(refs[ii]) ref_data[[b]] = new.env() ref_data[[b]][['volt']] = 0 ref_data[[b]][['coef']] = 0 lapply(es, function(e){ subprogress$inc('Loading electrode ' %&% e) # load channel power = load_h5(file.path(dirs$channel_dir, 'power', sprintf('%d.h5', e)), name = '/raw/power/' %&% b)[] phase = load_h5(file.path(dirs$channel_dir, 'phase', sprintf('%d.h5', e)), name = '/raw/phase/' %&% b)[] volt = load_h5(file.path(dirs$channel_dir, 'voltage', sprintf('%d.h5', e)), name = '/raw/voltage/' %&% b)[] ref_data[[b]][['volt']] = ref_data[[b]][['volt']] + volt ref_data[[b]][['coef']] = ref_data[[b]][['coef']] + sqrt(power) * exp(1i * phase) }) if(length(es)){ ref_data[[b]][['volt']] = ref_data[[b]][['volt']] / length(es) ref_data[[b]][['coef']] = ref_data[[b]][['coef']] / length(es) }else{ e = involved_es[1] volt = load_h5(file.path(dirs$channel_dir, 'voltage', sprintf('%d.h5', e)), name = '/raw/voltage/' %&% b) power = load_h5(file.path(dirs$channel_dir, 'power', sprintf('%d.h5', e)), name = '/raw/power/' %&% b) ref_data[[b]][['volt']] = rep(0, length(volt)) ref_data[[b]][['coef']] = matrix(0, nrow = dim(power)[1], ncol = dim(power)[2]) } } progress$inc('Saving to disk...') # Average for(b in blocks){ volt = ref_data[[b]][['volt']] coef = ref_data[[b]][['coef']] coef = array(c(Mod(coef), Arg(coef)), dim = c(dim(coef), 2)) # Freq x Time x 2 save_h5(volt, file = f, name = sprintf('/voltage/%s', b), chunk = 1024, replace = T) save_h5(coef, file = f, name = sprintf('/wavelet/coef/%s', b), chunk = c(dim(coef)[1], 128, 2), replace = T) } progress$close() subprogress$close() removeModal() showNotification(p('Reference [', fname, '] exported.'), type = 'message') local_data$has_new_ref = Sys.time() } gen_reference = function(electrodes){ electrodes = subject$filter_all_electrodes(electrodes) if(length(electrodes) == 0){ return() } dirs = module_tools$get_subject_dirs() fname_h5 = sprintf('ref_%s.h5', rave:::deparse_selections(electrodes)) fname_fst = sprintf('ref_%s.fst', rave:::deparse_selections(electrodes)) f = file.path(dirs$channel_dir, 'reference', fname_h5) # generate reference # Step 0: chunk matrix ncores = rave_options('max_worker') ncols = ceiling(length(electrodes) / ncores) nes = length(electrodes) mat = matrix(NA, nrow = ncores, ncol = ncols) mat[seq_along(electrodes)] = electrodes # Summing up env$gen_volt = list() env$gen_coef = list() progress = rave::progress(sprintf('Generating reference [%s]', fname_h5), max = length(electrodes)+1) on.exit(progress$close()) blocks = subject$preprocess_info('blocks') lapply(seq_len(ncols), function(ii){ es = mat[, ii] es = es[!is.na(es)] lapply_async(es, function(e){ root_dir = dirs$channel_dir fname_h5 = sprintf('%d.h5', e) fname_fst = sprintf('%d.fst', e) sapply(blocks, function(b){ fst_file = file.path(root_dir, 'cache', 'power', 'raw', b, fname_fst) if(file.exists(fst_file)){ coef = fst::read_fst(fst_file) coef = t(sqrt(as.matrix(coef))) }else{ coef = sqrt(load_h5(file.path(root_dir, 'power', fname_h5), name = sprintf('/raw/power/%s', b))[]) } fst_file = file.path(root_dir, 'cache', 'phase', 'raw', b, fname_fst) if(file.exists(fst_file)){ phase = fst::read_fst(fst_file) phase = exp(1i * t((as.matrix(phase)))) }else{ phase = exp(1i * load_h5(file.path(root_dir, 'phase', fname_h5), name = sprintf('/raw/phase/%s', b))[]) } fst_file = file.path(root_dir, 'cache', 'voltage', 'raw', b, fname_fst) if(file.exists(fst_file)){ volt = fst::read_fst(fst_file)[,1] }else{ volt = load_h5(file.path(root_dir, 'voltage', fname_h5), name = sprintf('/raw/voltage/%s', b))[] } list( volt = volt, coef = coef * phase ) }, USE.NAMES = T, simplify = F) -> re gc() return(re) }, .call_back = function(i){ progress$inc(message = sprintf('Loading electrode %d', es[[i]])) }) -> re gc() lapply(re, function(dat){ for(b in blocks){ if(length(env$gen_volt[[b]])){ env$gen_volt[[b]] = env$gen_volt[[b]] + dat[[b]][['volt']] env$gen_coef[[b]] = env$gen_coef[[b]] + dat[[b]][['coef']] }else{ env$gen_volt[[b]] = dat[[b]][['volt']] env$gen_coef[[b]] = dat[[b]][['coef']] } } NULL }) }) progress$inc(message = 'Saving to disk.') ref_dir = file.path(dirs$channel_dir, 'cache', 'reference') # Average for(b in blocks){ volt = env$gen_volt[[b]] / nes coef = env$gen_coef[[b]] / nes coef = array(c(Mod(coef), Arg(coef)), dim = c(dim(coef), 2)) # Freq x Time x 2 save_h5(volt, file = f, name = sprintf('/voltage/%s', b), chunk = 1024, replace = T) save_h5(coef, file = f, name = sprintf('/wavelet/coef/%s', b), chunk = c(dim(coef)[1], 128, 2), replace = T) # fast_cache fast_cache = rave_options('fast_cache'); fast_cache %?<-% TRUE fst_coef = file.path(ref_dir, 'coef', b) fst_phase = file.path(ref_dir, 'phase', b) fst_volt = file.path(ref_dir, 'voltage', b) dir.create(fst_coef, recursive = T, showWarnings = F) dir.create(fst_phase, recursive = T, showWarnings = F) dir.create(fst_volt, recursive = T, showWarnings = F) if(fast_cache){ # fast cache referenced signals dat = as.data.frame(t(coef[,,1])) fst::write_fst(dat, file.path(fst_coef, fname_fst), compress = 100) dat = as.data.frame(t(coef[,,2])) fst::write_fst(dat, file.path(fst_phase, fname_fst), compress = 100) dat = data.frame(V1 = volt) fst::write_fst(dat, file.path(fst_volt, fname_fst), compress = 100) } } showNotification(p('Reference [', fname_h5, '] exported.'), type = 'message') local_data$has_new_ref = Sys.time() } get_refs = function(){ dirs = module_tools$get_subject_dirs() refs = list.files(file.path(dirs$channel_dir, 'reference'), pattern = '^ref_.*\\.h5$') if(!length(refs)){ return(list()) } es = str_split_fixed(refs, '(ref_)|(\\.h5)', n = 3)[,2] re = lapply(es, rave:::parse_selections) names(re) = 'ref_' %&% es re } output[[('export_table')]] <- DT::renderDT({ if(is.data.frame(local_data$ref_tbl)){ local_data$ref_tbl } }, env = ..runtime_env) observe({ val = input[[('ref_export_name')]] val %?<-% 'default' val = str_replace_all(val, '\\W', '') val = str_to_lower(val) val = 'Reference Table Name: (reference_' %&% val %&% '.csv)' updateTextInput(session, 'ref_export_name', label = val) }) export_ref_table = function(){ # get ref_table ref_tbl = get_ref_table() dirs = subject$dirs fname = input[[('ref_export_name')]] fname %?<-% 'default' fname = str_replace_all(fname, '\\W', '') fname = str_to_lower(fname) fname = 'reference_' %&% fname %&% '.csv' fpath = file.path(dirs$meta_dir, fname) rave:::safe_write_csv(data = ref_tbl, file = fpath, row.names = F) # write to preprocess that subject is already refrenced utils = rave_preprocess_tools() utils$load_subject(subject_code = subject$subject_code, project_name = subject$project_name) utils$save_to_subject(checklevel = 4) # 4 means referenced switch_to('condition_explorer') return(fname) } load_refchan = function(r, subject_channel_dir, blocks, ram = T){ es = str_extract(r, '[0-9,\\-]+') es = rave:::parse_selections(es) ref_file = file.path(subject_channel_dir, 'reference', sprintf('%s.h5', r)) if(!file.exists(ref_file)){ if(length(es) == 1){ volt = sapply(blocks, function(b){ load_h5(file.path(subject_channel_dir, 'voltage', sprintf('%d.h5', es)), '/raw/voltage/' %&% b, ram = ram) }, simplify = F, USE.NAMES = T) coef = sapply(blocks, function(b){ power = load_h5(file.path(subject_channel_dir, 'power', sprintf('%d.h5', es)), name = '/raw/power/' %&% b, ram = ram) phase = load_h5(file.path(subject_channel_dir, 'phase', sprintf('%d.h5', es)), name = '/raw/phase/' %&% b, ram = ram) list( power = power, phase = phase ) }, simplify = F, USE.NAMES = T) }else{ stop('Reference ', r, ' does not exist.') } }else{ volt = sapply(blocks, function(b){ load_h5(ref_file, '/voltage/' %&% b, ram = ram) }, simplify = F, USE.NAMES = T) coef = sapply(blocks, function(b){ load_h5(ref_file, '/wavelet/coef/' %&% b, ram = ram) }, simplify = F, USE.NAMES = T) } return(list(volt = volt, coef = coef)) } observeEvent(input$do_export_cache, { fname = export_ref_table() showNotification(p('Reference table [', fname, '] exported. Creating cache referenced data.'), type = 'message', id = ns('ref_export_cache_notification')) # Start cache ref_tbl = get_ref_table() electrodes = ref_tbl$Electrode blocks = subject$preprocess_info('blocks') subject_channel_dir = subject$dirs$channel_dir # Step 1 get all the references ref = table(ref_tbl$Reference) ref = ref[!names(ref) %in% c('', 'noref')] progress = progress(title = 'Create cache for electrodes', max = length(electrodes) + length(ref)) if(length(ref)){ ram = as.list(ref > 1) lapply_async(names(ram), function(r){ load_refchan(r = r, subject_channel_dir = subject_channel_dir, blocks = blocks, ram = ram[[r]]) }, .call_back = function(ii){ progress$inc(sprintf('Loading reference - [%s]', names(ram)[ii])) }) -> refs names(refs) = names(ram) }else{ refs = list() } ref_names = names(ref) for(b in blocks){ dir.create(file.path(subject_channel_dir, 'cache', 'power', 'ref', b), showWarnings = F, recursive = T) dir.create(file.path(subject_channel_dir, 'cache', 'phase', 'ref', b), showWarnings = F, recursive = T) dir.create(file.path(subject_channel_dir, 'cache', 'voltage', 'ref', b), showWarnings = F, recursive = T) } # write a 'noref' table to this file in case exporting scheme screw up cr_csv = file.path(subject_channel_dir, 'cache', 'cached_reference.csv') rave:::safe_write_csv(data.frame( Electrode = electrodes, Reference = 'noref' ), cr_csv, row.names = F) lapply_async(electrodes, function(e){ fname = sprintf('%d.h5', e) r = ref_tbl$Reference[ref_tbl$Electrode == e] if(is.blank(r)) { r = 'noref' } volt_fname = file.path(subject_channel_dir, 'voltage', fname) power_fname = file.path(subject_channel_dir, 'power', fname) phase_fname = file.path(subject_channel_dir, 'phase', fname) lapply(blocks, function(b){ # load electrode - raw volt = load_h5(volt_fname, '/raw/voltage/' %&% b, ram = T) # reference voltage volt_ref = refs[[r]][['volt']][[b]][] volt_ref %?<-% 0 volt = volt - volt_ref save_h5(volt, volt_fname, name = '/ref/voltage/' %&% b, replace = T, chunk = 1024) fst::write_fst(data.frame(V1 = volt), path = file.path(subject_channel_dir, 'cache', 'voltage', 'ref', b, sprintf('%d.fst', e))) # load electrode - coef power = load_h5(power_fname, '/raw/power/' %&% b, ram = T) phase = load_h5(phase_fname, '/raw/phase/' %&% b, ram = T) coef = sqrt(power) * exp(1i * phase) # reference coef coef_ref = refs[[r]][['coef']][[b]] if(!is.null(coef_ref)){ if('power' %in% names(coef_ref)){ coef_ref = sqrt(coef_ref$power[]) * exp(1i * coef_ref$phase[]) }else{ coef_ref = coef_ref[] coef_ref = coef_ref[,,1] * exp(1i * coef_ref[,,2]) } }else{ coef_ref = 0 } coef = coef - coef_ref # save power and phase power = Mod(coef)^2 phase = Arg(coef) dim = dim(power); dim[2] = 128 save_h5(power, power_fname, name = '/ref/power/' %&% b, replace = T, chunk = dim) fst::write_fst(as.data.frame(t(power)), path = file.path(subject_channel_dir, 'cache', 'power', 'ref', b, sprintf('%d.fst', e))) save_h5(phase, phase_fname, name = '/ref/phase/' %&% b, replace = T, chunk = dim) fst::write_fst(as.data.frame(t(phase)), path = file.path(subject_channel_dir, 'cache', 'phase', 'ref', b, sprintf('%d.fst', e))) rm(list = ls(envir = environment())); gc() invisible() }) # save reference save_h5(r, file = volt_fname, name = '/reference', replace = T, chunk = 1, size = 1000) save_h5(r, file = power_fname, name = '/reference', replace = T, chunk = 1, size = 1000) save_h5(r, file = phase_fname, name = '/reference', replace = T, chunk = 1, size = 1000) return(r) }, .call_back = function(ii){ progress$inc(sprintf('Referencing electrode - %d', electrodes[[ii]])) }) -> refs # overwrite cached_reference.csv write.csv(data.frame( Electrode = electrodes, Reference = unlist(refs) ), cr_csv, row.names = F) progress$close() showNotification(p('Now data are cached according to [', fname, ']. Reloading subject.'), type = 'message', id = ns('ref_export_cache_notification')) removeModal() fname = input[[('ref_export_name')]] fname %?<-% 'default' fname = str_replace_all(fname, '\\W', '') fname = str_to_lower(fname) module_tools$reload(reference = fname) reloadUI() }) observeEvent(input[[('do_export')]], { fname = export_ref_table() showNotification(p('Reference table [', fname, '] exported. Reloading subject.'), type = 'message', id = ns('ref_export_cache_notification')) removeModal() fname = input[[('ref_export_name')]] fname %?<-% 'default' fname = str_replace_all(fname, '\\W', '') fname = str_to_lower(fname) module_tools$reload(reference = fname) reloadUI() }) check_load_volt = function(){ if(is.null(env$volt)){ env$volt = module_tools$get_voltage2() } } # Rave Execute rave_execute({ # Part 0: load voltage data on the fly # Part 1: Load or new reference scheme load_reference() # Part 2: show a specific group local_data$refresh = Sys.time() }) observeEvent(input$cur_group_save, { # Save table local_data$ref_tbl = get_ref_table() showModal( shiny::modalDialog( title = 'Export Reference Table', size = 'l', easyClose = T, footer = fluidRow( div( class = 'col-md-4 col-md-push-8 col-sm-12', textInput(ns('ref_export_name'), 'Reference Name: ', value = 'default', placeholder = 'File name for reference table') ), column( width = 12L, modalButton('Cancel'), actionButton(ns('do_export'), 'Export'), actionButton(ns('do_export_cache'), 'Export & Cache') ) ), DT::DTOutput(ns('export_table')) ) ) }) # Output - visualizations console = function(){ print(reactiveValuesToList(input)) } ref_generator_ui = function(){ tagList( textInput(ns('ref_electrodes'), label = 'Electrodes', value = '', placeholder = 'e.g. 1-3,5'), actionButton(ns('ref_calc'), 'Generate Reference', width = '100%'), hr(), actionButton(ns('ref_blockwise'), 'Generate Reference for Each Blocks', width = '100%') ) } observeEvent(input$ref_blockwise, { env$blockwise_reference %?<-% data.frame( Block = subject$preprocess_info('blocks'), Reference = 'Zeros', stringsAsFactors = F ) showModal(modalDialog( title = 'Reference Generator', easyClose = F, size = 'l', footer = tagList( actionButton(ns('ref_modal_cancel'), 'Discard'), actionButton(ns('ref_modal_ok'), 'Generate Reference') ), div( p('WARNING: Reference is not recommended on the block level. ', 'This module only provides partial support and please do NOT cache the referenced electrodes. ', 'Also, visualizations will be in accurate if reference by blocks, so use at your own risk.', style = 'color:red;'), DT::DTOutput(ns('ref_modal_tbl')) ) )) }) observeEvent(input$ref_modal_cancel, { removeModal() }) output$ref_modal_tbl <- DT::renderDT({ env$blockwise_reference }, env = ..runtime_env, editable = T) ref_modal_tbl_proxy = DT::dataTableProxy('ref_modal_tbl', session = session) observeEvent(input$ref_modal_tbl_cell_edit, { info = input$ref_modal_tbl_cell_edit i = info$row j = info$col v = info$value # string match electrode v = str_match(v, '(ref_|[\\ ]{0})([0-9,\\-]*)')[3] if(is_invalid(v, .invalids = c('null', 'na', 'blank'))){ v = 'Zeros' }else{ v = rave:::parse_selections(v) v = subject$filter_all_electrodes(v) if(length(v)){ v = rave:::deparse_selections(v) }else{ v = 'Zeros' } } blockwise_reference = env$blockwise_reference if(names(blockwise_reference)[j] == 'Reference'){ env$blockwise_reference[i, j] = v DT::replaceData(ref_modal_tbl_proxy, env$blockwise_reference, resetPaging = FALSE) # important } }) observeEvent(input$ref_modal_ok, { blockwise_reference = env$blockwise_reference gen_reference_blockwise(blockwise_reference) }) observe({ ref_calc_label = 'Generate Reference' ref_es = rave:::parse_selections(input$ref_electrodes) if(length(ref_es)){ ref_es = subject$filter_all_electrodes(ref_es) if(length(ref_es)){ ref_calc_label = 'Generate [ref_' %&% rave:::deparse_selections(ref_es) %&% "]" } } updateActionButton(session, inputId = 'ref_calc', label = ref_calc_label) }) observeEvent(input$ref_calc, { ref_es = rave:::parse_selections(isolate(input$ref_electrodes)) ref_es = subject$filter_all_electrodes(ref_es) if(length(ref_es) == 0){ showNotification(p('No electrode(s) selected'), type = 'error') }else{ # check conditions if we need to create reference old_files = list.files(env$ref_dir, pattern = 'ref_.*\\.h5') old_files = str_split_fixed(old_files, '(ref_)|(\\.h5)', 3)[,2] new_file = rave:::deparse_selections(ref_es) if(new_file %in% old_files){ showNotification(p('Reference [ref_', new_file, '.h5] already exists.'), type = 'message') }else{ gen_reference(ref_es) } } }) # Debug if(FALSE){ m = ModuleEnvir$new(module_id = 'mid', 'ref', script_path = './inst/modules/builtin_modules/reference/main.R'); init_app(m, test.mode = T) ns = shiny::NS('mid') self = execenv = RAVEbeauchamplab:::..module_env$condition_explorer$private$exec_env$voa3gkLDZwEMBPmbClqi # Post self = execenv = m$private$exec_env$ execenv$private$inputs ref_group = execenv$param_env$ref_group session = execenv$static_env$session e = environment(execenv$static_env$console) }
/inst/modules/builtin_modules/reference/main.R
no_license
satpreetsingh/rave
R
false
false
32,163
r
# Module for referencing electrodes # Init virtualenv for module dev if(F){ library(rave) m = ModuleEnvir$new(module_id = 'mid', 'ref', script_path = './inst/modules/builtin_modules/reference/main.R'); init_app(m) rave_prepare('Lang_loc/YCB', 1, 'YCBpd', c(1,2), data_types = NULL) profvis::profvis({ rave_prepare(subject = 'Large/YAB', electrodes = c(1:10, 13:20), epoch = 'YABa', time_range = c(1,2), data_types = 'power', attach = F, reference = 'test') }) profvis::profvis({ rave_prepare(subject = 'Large/YAB', electrodes = c(1:10, 13:20), epoch = 'YABa', time_range = c(1,2), data_types = 'power', attach = F, reference = 'default') }) rave_data = getDefaultDataRepository() pryr::object_size(rave_data) } rave_prepare(subject = 'congruency1/YAB', electrodes = 64:65, epoch = 'YABa', time_range = c(1,2), data_types = NULL) # load libraries library(shiny) library(stringr) library(magrittr) # Shiny session used to update inputs in advanced ways session = getDefaultReactiveDomain() input = getDefaultReactiveInput() output = getDefaultReactiveOutput() local_data = shiny::reactiveValues( group_number = NULL, refresh = NULL, do_parallel_plot = NULL, load_mesh = F ) ref_group %?<-% list() # Environment to store information env = new.env(parent = baseenv()) env$ref_calc = 0 env$last_import = 'new..' # Load UIs source('UI.R') source('plot.R') # source('./inst/modules/builtin_modules/reference/UI.R') observeEvent(input[['bipolar_modal']], { # get group info group_info = current_group() if(!length(group_info)){ return() } ref_tbl = get_ref_table() electrodes = group_info$electrodes bptbl = ref_tbl[ref_tbl$Electrode %in% electrodes,] bptbl$Type = 'Bipolar Reference' if(nrow(bptbl) > 0 && unique(bptbl$Reference) == 'noref'){ e = bptbl$Electrode bptbl$Reference = c(paste0('ref_', e[-1]), '') } env$bipolar_tbl = bptbl showModal( shiny::modalDialog( title = 'Bipolar Reference', size = 'l', easyClose = F, footer = tagList( actionButton(ns('bp_confirm'), 'Confirm') ), DT::DTOutput(ns('bipolar_table')) ) ) }) observeEvent(input[[('bp_confirm')]], { tbl = env$bipolar_tbl ref_tbl = get_ref_table() if(nrow(tbl)){ for(ii in seq_len(nrow(tbl))){ sel = ref_tbl$Electrode == tbl$Electrode[ii] ref_tbl$Reference[sel] = tbl$Reference[ii] ref_tbl$Type[sel] = 'Bipolar Reference' } # save ref_tbl save_ref_table(ref_tbl) } removeModal(session = session) }) bipolar_proxy = DT::dataTableProxy('bipolar_table', session = session) output[[('bipolar_table')]] = DT::renderDT({ env$bipolar_tbl }, env = ..runtime_env, editable = TRUE) observeEvent(input[[('bipolar_table_cell_edit')]], { info = input[[('bipolar_table_cell_edit')]] i = info$row j = info$col v = info$value # string match electrode v = str_match(v, '(ref_|[\\ ]{0})([0-9]*)')[3] if(is_invalid(v, .invalids = c('null', 'na', 'blank'))){ v = '' }else{ v = subject$filter_all_electrodes(as.integer(v)) if(!length(v)){ return() }else{ v = str_c('ref_', v) } } bipolar_tbl = env$bipolar_tbl if(names(bipolar_tbl)[j] == 'Reference'){ env$bipolar_tbl[i, j] = v DT::replaceData(bipolar_proxy, env$bipolar_tbl, resetPaging = FALSE) # important } }) output[['elec_loc']] <- threejsr::renderThreejs({ local_data$refresh group_info = current_group() group_info %?<-% list(electrodes = NULL) name = group_info$rg_name ref_tbl = get_ref_table() if(is.blank(name)){ name = 'Current Group' } # join electrodes.csv with ref table tbl = merge(ref_tbl, subject$electrodes[,c('Electrode', 'Coord_x','Coord_y','Coord_z', 'Label')], id = 'Electrode', suffixes = c('.x', '')) tbl$Label[is.na(tbl$Label)] = 'No Label' electrodes = group_info$electrodes with(tbl, { sprintf('<p>Reference - %s (%s)<br/>Reference to - %s</p>', Group, Type, Reference) }) -> marker values = rep(-1, length(electrodes)) bad_electrodes = rave:::parse_selections(input[[('ref_bad')]]) values[electrodes %in% bad_electrodes] = 1 module_tools$plot_3d_electrodes( tbl = tbl, electrodes = electrodes, values = values, marker = marker, pal = colorRampPalette(c('navy', 'black', 'red'))(11), show_mesh = local_data$load_mesh # link_module = 'condition_explorer', # variable_name = 'electrode' ) }) observeEvent(input$load_mesh, { load_mesh = isolate(!local_data$load_mesh) local_data$load_mesh = load_mesh updateActionButton(session, 'load_mesh', label = ifelse(load_mesh, 'Hide Mesh', 'Show Mesh')) }) elec_loc_ui = function(){ tagList( actionLink(ns('load_mesh'), 'Show Mesh'), threejsr::threejsOutput(ns('elec_loc'), height = '300px') ) } observeEvent(input[[('cur_save')]], { ref_to = input[[('ref_to')]] group_info = current_group() if(is.null(group_info)){ return() } electrodes = group_info$electrodes bad_electrodes = rave:::parse_selections(input[[('ref_bad')]]) ref_table = get_ref_table() sel = ref_table$Electrode %in% electrodes ref_table$Group[sel] = group_info$rg_name if(group_info$rg_type %in% c('Common Average Reference', 'White Matter Reference', 'No Reference')){ ref_table$Reference[sel] = ref_to ref_table$Reference[sel & ref_table$Electrode %in% bad_electrodes] = '' # set bad electrodes ref_table$Type[sel] = group_info$rg_type save_ref_table(tbl = ref_table) showNotification(p( group_info$rg_name, ' (', group_info$rg_electrodes, ') is now set to be referenced to [', ref_to, ']' ), type = 'message') } }, priority = -1L) # Customized UI cur_group_ui = function(){ refresh = local_data$refresh logger('cur_group_ui') new_ref = local_data$has_new_ref if(length(cur_group) && cur_group <= length(ref_group)){ group_number = as.integer(cur_group) group_info = ref_group[[group_number]] group_type = group_info$rg_type group_name = group_info$rg_name electrodes = rave:::parse_selections(group_info$rg_electrodes) if(length(electrodes) == 0){ return(tagList( hr(), actionButton(ns('cur_group_save'), 'Preview & Export',width = '100%') )) } }else{ return(tagList( hr(), actionButton(ns('cur_group_save'), 'Preview & Export',width = '100%') )) } refs = get_refs() ref_names = names(refs); ref_names = c('noref', ref_names) ref_tbl = get_ref_table() sel = ref_tbl$Electrode %in% electrodes switch ( group_type, 'No Reference' = { selectInput(ns('ref_to'), 'Reference to:', choices = 'noref', selected = 'noref') }, 'Bipolar Reference' = { tagList( tags$label('Reference to:'), actionButton(ns('bipolar_modal'), 'Open Table', width = '100%', style = 'margin-bottom: 15px') ) }, # By default, it's either 'Common Average Reference' or 'White Matter Reference' { # try to get reference name selected = unique(c(ref_tbl$Reference[sel]), 'noref') selected = selected[selected != ''][1] selectInput(ns('ref_to'), 'Reference to:', choices = ref_names, selected = selected) } ) -> inp tagList( fluidRow( column( width = 7, inp, p( tags$label('Group Name: '), group_name, br(), tags$label('Electrodes: '), group_info$rg_electrodes, br(), tags$label('Bad Electrodes: '), textOutput(ns('bad_electrodes_out'), inline = T) ) ), column( width = 5, textInput(ns('ref_bad'), 'Bad Electrodes:', value = rave:::deparse_selections(ref_tbl$Electrode[sel & ref_tbl$Reference == ''])), div( style = 'float: right', actionButton(ns('cur_save'), 'Save Group') ) ) ), hr(), actionButton(ns('cur_group_save'), 'Preview & Export',width = '100%') ) } observeEvent(input[['elec_loc_callback']], { dat = input[['elec_loc_callback']] print(dat) do.call(switch_to, dat) }) output[[('bad_electrodes_out')]] = renderText({ bad_electrodes = rave:::parse_selections(input[[('ref_bad')]]) bad_electrodes = subject$filter_all_electrodes(bad_electrodes) if(length(bad_electrodes)){ bad_electrodes = rave:::deparse_selections(bad_electrodes) bad_electrodes }else{ 'No bad electrode' } }, env = ..runtime_env) # Utils current_group = function(){ group_number = as.integer(cur_group) if(!length(group_number) || group_number > length(ref_group)){ return() } group_info = ref_group[[group_number]] electrodes = rave:::parse_selections(group_info$rg_electrodes) electrodes = subject$filter_all_electrodes(electrodes) if(!length(electrodes)){ return() } group_info$electrodes = electrodes return(group_info) } get_ref_table = function(){ ref_info = cache(key = list( ref_name_alt = ref_name_alt, subject = subject$id ), import_external()) ref_table = ref_info$table ref_table } save_ref_table = function(tbl, is_new = FALSE){ print('Saving') val = list( table = tbl, new = is_new ) old = cache(key = list( ref_name_alt = ref_name_alt, subject = subject$id ), val, replace = T) if(nrow(old$table) != nrow(tbl)){ stop("Refernce table doesn't match") } local_data$ref_tbl = tbl invisible() } import_external = function(){ dirs = module_tools$get_subject_dirs() ref_name_alt %?<-% sprintf('reference_%s.csv', preload_info$reference_name) f = file.path(dirs$meta_dir, ref_name_alt) if(file.exists(f)){ tbl = read.csv(f, stringsAsFactors = F) if(!'Type' %in% names(tbl)){ tbl$Type = 'No Reference' }else{ tbl$Type[!tbl$Type %in% c('Common Average Reference', 'Bipolar Reference', 'White Matter Reference', 'No Reference')] = 'No Reference' } tbl = tbl[,c('Electrode', 'Group', 'Reference', 'Type')] is_new = T }else{ tbl = data.frame( Electrode = subject$preprocess_info('channels'), Group = '', Reference = 'noref', Type = 'No Reference', stringsAsFactors = F ) is_new = F } local_data$ref_tbl = tbl list( table = tbl, new = is_new ) } load_reference = function(){ dirs = module_tools$get_subject_dirs() ref_name_alt %?<-% sprintf('reference_%s.csv', preload_info$reference_name) # Get current settings key = list( ref_name_alt = ref_name_alt, subject = subject$id ) ref_info = cache(key = key, import_external()) ref_tbl = (ref_info$table) if(is.null(ref_tbl)){ return() } if(env$last_import != ref_name_alt){ env$last_import = ref_name_alt ref_info$new = TRUE }else{ ref_info$new = FALSE } # If ref_info$new, update compound input ref_group s.t. it matches with current settings, else replace and cache ref_info if(ref_info$new){ ref_info$new = FALSE unique_refs = ref_tbl[!duplicated(ref_tbl[,c('Group', 'Type')]), ] nn = nrow(unique_refs) if(nn > 0){ lapply(seq_len(nn), function(i){ # Group i row = unique_refs[i, ] # name updateTextInput(session, (sprintf('%s_%s_%d', 'ref_group', 'rg_name', i)), value = row$Group) # ref Method updateSelectInput(session, (sprintf('%s_%s_%d', 'ref_group', 'rg_type', i)), selected = row$Type) # Electrodes merged = merge(ref_tbl, row, by = c('Group', 'Type'), suffixes = c('', 'y')) updateTextInput( session, (sprintf('%s_%s_%d', 'ref_group', 'rg_electrodes', i)), value = rave:::deparse_selections(merged$Electrode) ) updateCompoundInput(session, ('ref_group'), to = nn) }) } }else{ # Construct table all_es = NULL for(ii in seq_len(length(ref_group))){ sub_group = ref_group[[ii]] sub_es = sub_group$rg_electrodes sub_es = rave:::parse_selections(sub_es) if(any(sub_es %in% all_es)){ dup_es = sub_es[sub_es %in% all_es] showNotification( p('Group [', sub_group$rg_name, '(', ii, ')] has duplicated electrode(s): ', rave:::deparse_selections(dup_es)), type = 'warning' ) } all_es = c(all_es, sub_es) sub_sel = ref_tbl$Electrode %in% sub_es if(any(sub_sel)){ ref_tbl$Group[sub_sel] = sub_group$rg_name ref_tbl$Type[sub_sel] = sub_group$rg_type } } ref_info$table = ref_tbl } cache(key = key, val = ref_info, replace = T) } gen_reference_blockwise = function(blockwise_table){ dirs = module_tools$get_subject_dirs() blocks = blockwise_table$Block refs = blockwise_table$Reference involved_es = rave:::parse_selections(refs) if(length(involved_es) == 0){ showNotification(p('No electrodes used. Why not use "noref"?'), type = 'error', session = session) return(FALSE) } fname = 'ref_0,' %&% rave:::deparse_selections(involved_es) %&% '.h5' f = file.path(dirs$channel_dir, 'reference', fname) unlink(f) subprogress = rave::progress('Loading Data', max = length(involved_es)) progress = rave::progress(sprintf('Generating reference [%s]', fname), max = length(blocks)+1) ref_data = new.env() for(ii in seq_along(blocks)){ b = blocks[[ii]] subprogress$reset() progress$inc('Loading data from block ' %&% b) es = rave:::parse_selections(refs[ii]) ref_data[[b]] = new.env() ref_data[[b]][['volt']] = 0 ref_data[[b]][['coef']] = 0 lapply(es, function(e){ subprogress$inc('Loading electrode ' %&% e) # load channel power = load_h5(file.path(dirs$channel_dir, 'power', sprintf('%d.h5', e)), name = '/raw/power/' %&% b)[] phase = load_h5(file.path(dirs$channel_dir, 'phase', sprintf('%d.h5', e)), name = '/raw/phase/' %&% b)[] volt = load_h5(file.path(dirs$channel_dir, 'voltage', sprintf('%d.h5', e)), name = '/raw/voltage/' %&% b)[] ref_data[[b]][['volt']] = ref_data[[b]][['volt']] + volt ref_data[[b]][['coef']] = ref_data[[b]][['coef']] + sqrt(power) * exp(1i * phase) }) if(length(es)){ ref_data[[b]][['volt']] = ref_data[[b]][['volt']] / length(es) ref_data[[b]][['coef']] = ref_data[[b]][['coef']] / length(es) }else{ e = involved_es[1] volt = load_h5(file.path(dirs$channel_dir, 'voltage', sprintf('%d.h5', e)), name = '/raw/voltage/' %&% b) power = load_h5(file.path(dirs$channel_dir, 'power', sprintf('%d.h5', e)), name = '/raw/power/' %&% b) ref_data[[b]][['volt']] = rep(0, length(volt)) ref_data[[b]][['coef']] = matrix(0, nrow = dim(power)[1], ncol = dim(power)[2]) } } progress$inc('Saving to disk...') # Average for(b in blocks){ volt = ref_data[[b]][['volt']] coef = ref_data[[b]][['coef']] coef = array(c(Mod(coef), Arg(coef)), dim = c(dim(coef), 2)) # Freq x Time x 2 save_h5(volt, file = f, name = sprintf('/voltage/%s', b), chunk = 1024, replace = T) save_h5(coef, file = f, name = sprintf('/wavelet/coef/%s', b), chunk = c(dim(coef)[1], 128, 2), replace = T) } progress$close() subprogress$close() removeModal() showNotification(p('Reference [', fname, '] exported.'), type = 'message') local_data$has_new_ref = Sys.time() } gen_reference = function(electrodes){ electrodes = subject$filter_all_electrodes(electrodes) if(length(electrodes) == 0){ return() } dirs = module_tools$get_subject_dirs() fname_h5 = sprintf('ref_%s.h5', rave:::deparse_selections(electrodes)) fname_fst = sprintf('ref_%s.fst', rave:::deparse_selections(electrodes)) f = file.path(dirs$channel_dir, 'reference', fname_h5) # generate reference # Step 0: chunk matrix ncores = rave_options('max_worker') ncols = ceiling(length(electrodes) / ncores) nes = length(electrodes) mat = matrix(NA, nrow = ncores, ncol = ncols) mat[seq_along(electrodes)] = electrodes # Summing up env$gen_volt = list() env$gen_coef = list() progress = rave::progress(sprintf('Generating reference [%s]', fname_h5), max = length(electrodes)+1) on.exit(progress$close()) blocks = subject$preprocess_info('blocks') lapply(seq_len(ncols), function(ii){ es = mat[, ii] es = es[!is.na(es)] lapply_async(es, function(e){ root_dir = dirs$channel_dir fname_h5 = sprintf('%d.h5', e) fname_fst = sprintf('%d.fst', e) sapply(blocks, function(b){ fst_file = file.path(root_dir, 'cache', 'power', 'raw', b, fname_fst) if(file.exists(fst_file)){ coef = fst::read_fst(fst_file) coef = t(sqrt(as.matrix(coef))) }else{ coef = sqrt(load_h5(file.path(root_dir, 'power', fname_h5), name = sprintf('/raw/power/%s', b))[]) } fst_file = file.path(root_dir, 'cache', 'phase', 'raw', b, fname_fst) if(file.exists(fst_file)){ phase = fst::read_fst(fst_file) phase = exp(1i * t((as.matrix(phase)))) }else{ phase = exp(1i * load_h5(file.path(root_dir, 'phase', fname_h5), name = sprintf('/raw/phase/%s', b))[]) } fst_file = file.path(root_dir, 'cache', 'voltage', 'raw', b, fname_fst) if(file.exists(fst_file)){ volt = fst::read_fst(fst_file)[,1] }else{ volt = load_h5(file.path(root_dir, 'voltage', fname_h5), name = sprintf('/raw/voltage/%s', b))[] } list( volt = volt, coef = coef * phase ) }, USE.NAMES = T, simplify = F) -> re gc() return(re) }, .call_back = function(i){ progress$inc(message = sprintf('Loading electrode %d', es[[i]])) }) -> re gc() lapply(re, function(dat){ for(b in blocks){ if(length(env$gen_volt[[b]])){ env$gen_volt[[b]] = env$gen_volt[[b]] + dat[[b]][['volt']] env$gen_coef[[b]] = env$gen_coef[[b]] + dat[[b]][['coef']] }else{ env$gen_volt[[b]] = dat[[b]][['volt']] env$gen_coef[[b]] = dat[[b]][['coef']] } } NULL }) }) progress$inc(message = 'Saving to disk.') ref_dir = file.path(dirs$channel_dir, 'cache', 'reference') # Average for(b in blocks){ volt = env$gen_volt[[b]] / nes coef = env$gen_coef[[b]] / nes coef = array(c(Mod(coef), Arg(coef)), dim = c(dim(coef), 2)) # Freq x Time x 2 save_h5(volt, file = f, name = sprintf('/voltage/%s', b), chunk = 1024, replace = T) save_h5(coef, file = f, name = sprintf('/wavelet/coef/%s', b), chunk = c(dim(coef)[1], 128, 2), replace = T) # fast_cache fast_cache = rave_options('fast_cache'); fast_cache %?<-% TRUE fst_coef = file.path(ref_dir, 'coef', b) fst_phase = file.path(ref_dir, 'phase', b) fst_volt = file.path(ref_dir, 'voltage', b) dir.create(fst_coef, recursive = T, showWarnings = F) dir.create(fst_phase, recursive = T, showWarnings = F) dir.create(fst_volt, recursive = T, showWarnings = F) if(fast_cache){ # fast cache referenced signals dat = as.data.frame(t(coef[,,1])) fst::write_fst(dat, file.path(fst_coef, fname_fst), compress = 100) dat = as.data.frame(t(coef[,,2])) fst::write_fst(dat, file.path(fst_phase, fname_fst), compress = 100) dat = data.frame(V1 = volt) fst::write_fst(dat, file.path(fst_volt, fname_fst), compress = 100) } } showNotification(p('Reference [', fname_h5, '] exported.'), type = 'message') local_data$has_new_ref = Sys.time() } get_refs = function(){ dirs = module_tools$get_subject_dirs() refs = list.files(file.path(dirs$channel_dir, 'reference'), pattern = '^ref_.*\\.h5$') if(!length(refs)){ return(list()) } es = str_split_fixed(refs, '(ref_)|(\\.h5)', n = 3)[,2] re = lapply(es, rave:::parse_selections) names(re) = 'ref_' %&% es re } output[[('export_table')]] <- DT::renderDT({ if(is.data.frame(local_data$ref_tbl)){ local_data$ref_tbl } }, env = ..runtime_env) observe({ val = input[[('ref_export_name')]] val %?<-% 'default' val = str_replace_all(val, '\\W', '') val = str_to_lower(val) val = 'Reference Table Name: (reference_' %&% val %&% '.csv)' updateTextInput(session, 'ref_export_name', label = val) }) export_ref_table = function(){ # get ref_table ref_tbl = get_ref_table() dirs = subject$dirs fname = input[[('ref_export_name')]] fname %?<-% 'default' fname = str_replace_all(fname, '\\W', '') fname = str_to_lower(fname) fname = 'reference_' %&% fname %&% '.csv' fpath = file.path(dirs$meta_dir, fname) rave:::safe_write_csv(data = ref_tbl, file = fpath, row.names = F) # write to preprocess that subject is already refrenced utils = rave_preprocess_tools() utils$load_subject(subject_code = subject$subject_code, project_name = subject$project_name) utils$save_to_subject(checklevel = 4) # 4 means referenced switch_to('condition_explorer') return(fname) } load_refchan = function(r, subject_channel_dir, blocks, ram = T){ es = str_extract(r, '[0-9,\\-]+') es = rave:::parse_selections(es) ref_file = file.path(subject_channel_dir, 'reference', sprintf('%s.h5', r)) if(!file.exists(ref_file)){ if(length(es) == 1){ volt = sapply(blocks, function(b){ load_h5(file.path(subject_channel_dir, 'voltage', sprintf('%d.h5', es)), '/raw/voltage/' %&% b, ram = ram) }, simplify = F, USE.NAMES = T) coef = sapply(blocks, function(b){ power = load_h5(file.path(subject_channel_dir, 'power', sprintf('%d.h5', es)), name = '/raw/power/' %&% b, ram = ram) phase = load_h5(file.path(subject_channel_dir, 'phase', sprintf('%d.h5', es)), name = '/raw/phase/' %&% b, ram = ram) list( power = power, phase = phase ) }, simplify = F, USE.NAMES = T) }else{ stop('Reference ', r, ' does not exist.') } }else{ volt = sapply(blocks, function(b){ load_h5(ref_file, '/voltage/' %&% b, ram = ram) }, simplify = F, USE.NAMES = T) coef = sapply(blocks, function(b){ load_h5(ref_file, '/wavelet/coef/' %&% b, ram = ram) }, simplify = F, USE.NAMES = T) } return(list(volt = volt, coef = coef)) } observeEvent(input$do_export_cache, { fname = export_ref_table() showNotification(p('Reference table [', fname, '] exported. Creating cache referenced data.'), type = 'message', id = ns('ref_export_cache_notification')) # Start cache ref_tbl = get_ref_table() electrodes = ref_tbl$Electrode blocks = subject$preprocess_info('blocks') subject_channel_dir = subject$dirs$channel_dir # Step 1 get all the references ref = table(ref_tbl$Reference) ref = ref[!names(ref) %in% c('', 'noref')] progress = progress(title = 'Create cache for electrodes', max = length(electrodes) + length(ref)) if(length(ref)){ ram = as.list(ref > 1) lapply_async(names(ram), function(r){ load_refchan(r = r, subject_channel_dir = subject_channel_dir, blocks = blocks, ram = ram[[r]]) }, .call_back = function(ii){ progress$inc(sprintf('Loading reference - [%s]', names(ram)[ii])) }) -> refs names(refs) = names(ram) }else{ refs = list() } ref_names = names(ref) for(b in blocks){ dir.create(file.path(subject_channel_dir, 'cache', 'power', 'ref', b), showWarnings = F, recursive = T) dir.create(file.path(subject_channel_dir, 'cache', 'phase', 'ref', b), showWarnings = F, recursive = T) dir.create(file.path(subject_channel_dir, 'cache', 'voltage', 'ref', b), showWarnings = F, recursive = T) } # write a 'noref' table to this file in case exporting scheme screw up cr_csv = file.path(subject_channel_dir, 'cache', 'cached_reference.csv') rave:::safe_write_csv(data.frame( Electrode = electrodes, Reference = 'noref' ), cr_csv, row.names = F) lapply_async(electrodes, function(e){ fname = sprintf('%d.h5', e) r = ref_tbl$Reference[ref_tbl$Electrode == e] if(is.blank(r)) { r = 'noref' } volt_fname = file.path(subject_channel_dir, 'voltage', fname) power_fname = file.path(subject_channel_dir, 'power', fname) phase_fname = file.path(subject_channel_dir, 'phase', fname) lapply(blocks, function(b){ # load electrode - raw volt = load_h5(volt_fname, '/raw/voltage/' %&% b, ram = T) # reference voltage volt_ref = refs[[r]][['volt']][[b]][] volt_ref %?<-% 0 volt = volt - volt_ref save_h5(volt, volt_fname, name = '/ref/voltage/' %&% b, replace = T, chunk = 1024) fst::write_fst(data.frame(V1 = volt), path = file.path(subject_channel_dir, 'cache', 'voltage', 'ref', b, sprintf('%d.fst', e))) # load electrode - coef power = load_h5(power_fname, '/raw/power/' %&% b, ram = T) phase = load_h5(phase_fname, '/raw/phase/' %&% b, ram = T) coef = sqrt(power) * exp(1i * phase) # reference coef coef_ref = refs[[r]][['coef']][[b]] if(!is.null(coef_ref)){ if('power' %in% names(coef_ref)){ coef_ref = sqrt(coef_ref$power[]) * exp(1i * coef_ref$phase[]) }else{ coef_ref = coef_ref[] coef_ref = coef_ref[,,1] * exp(1i * coef_ref[,,2]) } }else{ coef_ref = 0 } coef = coef - coef_ref # save power and phase power = Mod(coef)^2 phase = Arg(coef) dim = dim(power); dim[2] = 128 save_h5(power, power_fname, name = '/ref/power/' %&% b, replace = T, chunk = dim) fst::write_fst(as.data.frame(t(power)), path = file.path(subject_channel_dir, 'cache', 'power', 'ref', b, sprintf('%d.fst', e))) save_h5(phase, phase_fname, name = '/ref/phase/' %&% b, replace = T, chunk = dim) fst::write_fst(as.data.frame(t(phase)), path = file.path(subject_channel_dir, 'cache', 'phase', 'ref', b, sprintf('%d.fst', e))) rm(list = ls(envir = environment())); gc() invisible() }) # save reference save_h5(r, file = volt_fname, name = '/reference', replace = T, chunk = 1, size = 1000) save_h5(r, file = power_fname, name = '/reference', replace = T, chunk = 1, size = 1000) save_h5(r, file = phase_fname, name = '/reference', replace = T, chunk = 1, size = 1000) return(r) }, .call_back = function(ii){ progress$inc(sprintf('Referencing electrode - %d', electrodes[[ii]])) }) -> refs # overwrite cached_reference.csv write.csv(data.frame( Electrode = electrodes, Reference = unlist(refs) ), cr_csv, row.names = F) progress$close() showNotification(p('Now data are cached according to [', fname, ']. Reloading subject.'), type = 'message', id = ns('ref_export_cache_notification')) removeModal() fname = input[[('ref_export_name')]] fname %?<-% 'default' fname = str_replace_all(fname, '\\W', '') fname = str_to_lower(fname) module_tools$reload(reference = fname) reloadUI() }) observeEvent(input[[('do_export')]], { fname = export_ref_table() showNotification(p('Reference table [', fname, '] exported. Reloading subject.'), type = 'message', id = ns('ref_export_cache_notification')) removeModal() fname = input[[('ref_export_name')]] fname %?<-% 'default' fname = str_replace_all(fname, '\\W', '') fname = str_to_lower(fname) module_tools$reload(reference = fname) reloadUI() }) check_load_volt = function(){ if(is.null(env$volt)){ env$volt = module_tools$get_voltage2() } } # Rave Execute rave_execute({ # Part 0: load voltage data on the fly # Part 1: Load or new reference scheme load_reference() # Part 2: show a specific group local_data$refresh = Sys.time() }) observeEvent(input$cur_group_save, { # Save table local_data$ref_tbl = get_ref_table() showModal( shiny::modalDialog( title = 'Export Reference Table', size = 'l', easyClose = T, footer = fluidRow( div( class = 'col-md-4 col-md-push-8 col-sm-12', textInput(ns('ref_export_name'), 'Reference Name: ', value = 'default', placeholder = 'File name for reference table') ), column( width = 12L, modalButton('Cancel'), actionButton(ns('do_export'), 'Export'), actionButton(ns('do_export_cache'), 'Export & Cache') ) ), DT::DTOutput(ns('export_table')) ) ) }) # Output - visualizations console = function(){ print(reactiveValuesToList(input)) } ref_generator_ui = function(){ tagList( textInput(ns('ref_electrodes'), label = 'Electrodes', value = '', placeholder = 'e.g. 1-3,5'), actionButton(ns('ref_calc'), 'Generate Reference', width = '100%'), hr(), actionButton(ns('ref_blockwise'), 'Generate Reference for Each Blocks', width = '100%') ) } observeEvent(input$ref_blockwise, { env$blockwise_reference %?<-% data.frame( Block = subject$preprocess_info('blocks'), Reference = 'Zeros', stringsAsFactors = F ) showModal(modalDialog( title = 'Reference Generator', easyClose = F, size = 'l', footer = tagList( actionButton(ns('ref_modal_cancel'), 'Discard'), actionButton(ns('ref_modal_ok'), 'Generate Reference') ), div( p('WARNING: Reference is not recommended on the block level. ', 'This module only provides partial support and please do NOT cache the referenced electrodes. ', 'Also, visualizations will be in accurate if reference by blocks, so use at your own risk.', style = 'color:red;'), DT::DTOutput(ns('ref_modal_tbl')) ) )) }) observeEvent(input$ref_modal_cancel, { removeModal() }) output$ref_modal_tbl <- DT::renderDT({ env$blockwise_reference }, env = ..runtime_env, editable = T) ref_modal_tbl_proxy = DT::dataTableProxy('ref_modal_tbl', session = session) observeEvent(input$ref_modal_tbl_cell_edit, { info = input$ref_modal_tbl_cell_edit i = info$row j = info$col v = info$value # string match electrode v = str_match(v, '(ref_|[\\ ]{0})([0-9,\\-]*)')[3] if(is_invalid(v, .invalids = c('null', 'na', 'blank'))){ v = 'Zeros' }else{ v = rave:::parse_selections(v) v = subject$filter_all_electrodes(v) if(length(v)){ v = rave:::deparse_selections(v) }else{ v = 'Zeros' } } blockwise_reference = env$blockwise_reference if(names(blockwise_reference)[j] == 'Reference'){ env$blockwise_reference[i, j] = v DT::replaceData(ref_modal_tbl_proxy, env$blockwise_reference, resetPaging = FALSE) # important } }) observeEvent(input$ref_modal_ok, { blockwise_reference = env$blockwise_reference gen_reference_blockwise(blockwise_reference) }) observe({ ref_calc_label = 'Generate Reference' ref_es = rave:::parse_selections(input$ref_electrodes) if(length(ref_es)){ ref_es = subject$filter_all_electrodes(ref_es) if(length(ref_es)){ ref_calc_label = 'Generate [ref_' %&% rave:::deparse_selections(ref_es) %&% "]" } } updateActionButton(session, inputId = 'ref_calc', label = ref_calc_label) }) observeEvent(input$ref_calc, { ref_es = rave:::parse_selections(isolate(input$ref_electrodes)) ref_es = subject$filter_all_electrodes(ref_es) if(length(ref_es) == 0){ showNotification(p('No electrode(s) selected'), type = 'error') }else{ # check conditions if we need to create reference old_files = list.files(env$ref_dir, pattern = 'ref_.*\\.h5') old_files = str_split_fixed(old_files, '(ref_)|(\\.h5)', 3)[,2] new_file = rave:::deparse_selections(ref_es) if(new_file %in% old_files){ showNotification(p('Reference [ref_', new_file, '.h5] already exists.'), type = 'message') }else{ gen_reference(ref_es) } } }) # Debug if(FALSE){ m = ModuleEnvir$new(module_id = 'mid', 'ref', script_path = './inst/modules/builtin_modules/reference/main.R'); init_app(m, test.mode = T) ns = shiny::NS('mid') self = execenv = RAVEbeauchamplab:::..module_env$condition_explorer$private$exec_env$voa3gkLDZwEMBPmbClqi # Post self = execenv = m$private$exec_env$ execenv$private$inputs ref_group = execenv$param_env$ref_group session = execenv$static_env$session e = environment(execenv$static_env$console) }
context("coll_diag") hsb$race_1 <- ifelse(hsb$race == 1, 1, 0) hsb$race_2 <- ifelse(hsb$race == 2, 1, 0) hsb$race_3 <- ifelse(hsb$race == 3, 1, 0) hsb$race_4 <- ifelse(hsb$race == 4, 1, 0) model <- lm( write ~ read + math + science + race_2 + race_3 + race_4, data = hsb ) test_that("output from vif_tol matches expected result", { act <- ols_vif_tol(model) Variables <- c("read", "math", "science", "race_2", "race_3", "race_4") Tolerance <- c(0.482, 0.469, 0.475, 0.692, 0.602, 0.467) VIF <- c(2.074, 2.132, 2.104, 1.446, 1.662, 2.141) exp <- tibble(Variables, Tolerance, VIF) expect_equivalent(round(act$Tolerance, 3), exp$Tolerance) expect_equivalent(round(act$VIF, 3), exp$VIF) }) test_that("output from eigen_cindex matches expected result", { act <- ols_eigen_cindex(model) col1 <- c(4.865, 1.002, 1.000, 0.091, 0.018, 0.013, 0.011) col2 <- c(1.000, 2.203, 2.205, 7.298, 16.263, 19.583, 21.447) col3 <- c(0.001, 0.000, 0.000, 0.009, 0.874, 0.049, 0.067) col4 <- c(0.001, 0.000, 0.000, 0.012, 0.240, 0.375, 0.373) col5 <- c(0.001, 0.000, 0.000, 0.009, 0.016, 0.017, 0.957) col6 <- c(0.001, 0.000, 0.000, 0.007, 0.024, 0.904, 0.064) col7 <- c(0.002, 0.003, 0.608, 0.367, 0.003, 0.000, 0.017) col8 <- c(0.002, 0.479, 0.012, 0.431, 0.061, 0.013, 0.002) col9 <- c(0.004, 0.014, 0.006, 0.962, 0.002, 0.011, 0.001) exp <- data.frame(col1, col2, col3, col4, col5, col6, col7, col8, col9) names(exp) <- c("Eigenvalue", "Condition Index", "intercept", "read", "math", "science", "race_2", "race_3", "race_4") expect_equivalent(round(act, 3), exp) }) test_that("output from ols_coll_diag is as expected", { x <- cat("Tolerance and Variance Inflation Factor --------------------------------------- # A tibble: 4 x 3 Variables Tolerance VIF <chr> <dbl> <dbl> 1 disp 0.1218116 8.209402 2 hp 0.3454979 2.894373 3 wt 0.1962092 5.096601 4 drat 0.4386836 2.279547 Eigenvalue and Condition Index ------------------------------ Eigenvalue Condition Index intercept disp hp 1 4.692806914 1.000000 0.0002323252 0.001106455 0.002566185 2 0.240308641 4.419078 0.0036813894 0.034132904 0.031334562 3 0.052153430 9.485821 0.0009192095 0.058394262 0.735003722 4 0.011406889 20.283026 0.0014476535 0.885725642 0.207337511 5 0.003324127 37.573144 0.9937194224 0.020640737 0.023758021 wt drat 1 0.0007172086 0.0003775503 2 0.0009394254 0.0148250672 3 0.0700789813 0.0026259361 4 0.7179834661 0.0568226912 5 0.2102809185 0.9253487552") model <- lm(mpg ~ disp + hp + wt + drat, data = mtcars) expect_output(print(ols_coll_diag(model)), x) })
/tests/testthat/test-collindiag.R
no_license
topepo/olsrr
R
false
false
2,697
r
context("coll_diag") hsb$race_1 <- ifelse(hsb$race == 1, 1, 0) hsb$race_2 <- ifelse(hsb$race == 2, 1, 0) hsb$race_3 <- ifelse(hsb$race == 3, 1, 0) hsb$race_4 <- ifelse(hsb$race == 4, 1, 0) model <- lm( write ~ read + math + science + race_2 + race_3 + race_4, data = hsb ) test_that("output from vif_tol matches expected result", { act <- ols_vif_tol(model) Variables <- c("read", "math", "science", "race_2", "race_3", "race_4") Tolerance <- c(0.482, 0.469, 0.475, 0.692, 0.602, 0.467) VIF <- c(2.074, 2.132, 2.104, 1.446, 1.662, 2.141) exp <- tibble(Variables, Tolerance, VIF) expect_equivalent(round(act$Tolerance, 3), exp$Tolerance) expect_equivalent(round(act$VIF, 3), exp$VIF) }) test_that("output from eigen_cindex matches expected result", { act <- ols_eigen_cindex(model) col1 <- c(4.865, 1.002, 1.000, 0.091, 0.018, 0.013, 0.011) col2 <- c(1.000, 2.203, 2.205, 7.298, 16.263, 19.583, 21.447) col3 <- c(0.001, 0.000, 0.000, 0.009, 0.874, 0.049, 0.067) col4 <- c(0.001, 0.000, 0.000, 0.012, 0.240, 0.375, 0.373) col5 <- c(0.001, 0.000, 0.000, 0.009, 0.016, 0.017, 0.957) col6 <- c(0.001, 0.000, 0.000, 0.007, 0.024, 0.904, 0.064) col7 <- c(0.002, 0.003, 0.608, 0.367, 0.003, 0.000, 0.017) col8 <- c(0.002, 0.479, 0.012, 0.431, 0.061, 0.013, 0.002) col9 <- c(0.004, 0.014, 0.006, 0.962, 0.002, 0.011, 0.001) exp <- data.frame(col1, col2, col3, col4, col5, col6, col7, col8, col9) names(exp) <- c("Eigenvalue", "Condition Index", "intercept", "read", "math", "science", "race_2", "race_3", "race_4") expect_equivalent(round(act, 3), exp) }) test_that("output from ols_coll_diag is as expected", { x <- cat("Tolerance and Variance Inflation Factor --------------------------------------- # A tibble: 4 x 3 Variables Tolerance VIF <chr> <dbl> <dbl> 1 disp 0.1218116 8.209402 2 hp 0.3454979 2.894373 3 wt 0.1962092 5.096601 4 drat 0.4386836 2.279547 Eigenvalue and Condition Index ------------------------------ Eigenvalue Condition Index intercept disp hp 1 4.692806914 1.000000 0.0002323252 0.001106455 0.002566185 2 0.240308641 4.419078 0.0036813894 0.034132904 0.031334562 3 0.052153430 9.485821 0.0009192095 0.058394262 0.735003722 4 0.011406889 20.283026 0.0014476535 0.885725642 0.207337511 5 0.003324127 37.573144 0.9937194224 0.020640737 0.023758021 wt drat 1 0.0007172086 0.0003775503 2 0.0009394254 0.0148250672 3 0.0700789813 0.0026259361 4 0.7179834661 0.0568226912 5 0.2102809185 0.9253487552") model <- lm(mpg ~ disp + hp + wt + drat, data = mtcars) expect_output(print(ols_coll_diag(model)), x) })
#' #' @title Table of Information about Marathons #' @description An interesting data set to see the effects of goals on what should be a unimodal distrubtion of finish times. The NYT had a good article - https://www.nytimes.com/2014/04/23/upshot/what-good-marathons-and-bad-investments-have-in-common.html?rref=upshot&_r=1 #' @format A data frame with columns: #' \describe{ #' \item{year}{The variable is integer. The year of the marathon} #' \item{marathon}{The variable is character. The name of the marathon} #' \item{country}{The variable is character. The country where the marathon was held} #' \item{finishers}{The variable is integer. The number of finishers at the marathon} #' \item{mean_time}{The variable is numeric. The average finish time in minutes.} #' } #' @source \url{https://faculty.chicagobooth.edu/george.wu/research/marathon/marathon_names.htm} #' @examples #' \dontrun{ #' race_info #'} 'race_info' #' #' @title A resampled set of runners from all marathons with more 50 runners. #' @description Each marathon will have 100 runners (50 male, 50 female) per year. So any marathon with less than 50 runners in the group will have multiple resampled runners. This data set has over 500k runners. The original data had close to 10 million runners and a few more columns. The NYT had a good article - https://www.nytimes.com/2014/04/23/upshot/what-good-marathons-and-bad-investments-have-in-common.html?rref=upshot&_r=1 #' @format A data frame with columns: #' \describe{ #' \item{age}{The variable is numeric. The age of the runner} #' \item{gender}{The variable is character. The gender of the runner (M/F)} #' \item{chiptime}{The variable is numeric. The time in minutes for the runner} #' \item{year}{The variable is integer. The year of the marathon} #' \item{marathon}{The variable is character. The name of the marathon} #' \item{country}{The variable is character. The country where the marathon was held} #' \item{finishers}{The variable is integer. The number of finishers at the marathon} #' } #' @source \url{http://faculty.chicagobooth.edu/george.wu/research/marathon/data.htm} #' @examples #' \dontrun{ #' marathon_sample #'} 'marathon_sample' #' #' @title A random sample of 50% of males and females for each year of runners for all years of the New York City marathon where gender is recorded. #' @description This data set has just over 200k runners. The NYT had a good article - https://www.nytimes.com/2014/04/23/upshot/what-good-marathons-and-bad-investments-have-in-common.html?rref=upshot&_r=1. The NYC marathon website - https://www.nyrr.org/tcsnycmarathon #' @format A data frame with columns: #' \describe{ #' \item{age}{The variable is numeric. The age of the runner} #' \item{gender}{The variable is character. The gender of the runner (M/F)} #' \item{chiptime}{The variable is numeric. The time in minutes for the runner} #' \item{year}{The variable is numeric. The year of the marathon} #' \item{marathon}{The variable is character. The name of the marathon} #' \item{country}{The variable is character. The country where the marathon was held} #' \item{finishers}{The variable is numeric. The number of finishers at the marathon} #' } #' @source \url{http://faculty.chicagobooth.edu/george.wu/research/marathon/data.htm} #' @examples #' \dontrun{ #' marathon_nyc #'} 'marathon_nyc' #' #' @title The full set of runners for all races during 2010. #' @description This data set has 800k runners. The NYT had a good article - https://www.nytimes.com/2014/04/23/upshot/what-good-marathons-and-bad-investments-have-in-common.html?rref=upshot&_r=1. #' @format A data frame with columns: #' \describe{ #' \item{age}{The variable is numeric. The age of the runner} #' \item{gender}{The variable is character. The gender of the runner (M/F)} #' \item{chiptime}{The variable is numeric. The time in minutes for the runner} #' \item{year}{The variable is numeric. The year of the marathon} #' \item{marathon}{The variable is character. The name of the marathon} #' \item{country}{The variable is character. The country where the marathon was held} #' \item{finishers}{The variable is numeric. The number of finishers at the marathon} #' } #' @source \url{http://faculty.chicagobooth.edu/george.wu/research/marathon/data.htm} #' @examples #' \dontrun{ #' marathon_2010 #'} 'marathon_2010' #' #' @title The 50% sample of male/female runners for all years of the Berlin marathon that recorded gender. #' @description This data set has ~200k observations. Marathon website - https://www.bmw-berlin-marathon.com/en/ #' @format A data frame with columns: #' \describe{ #' \item{age}{The variable is numeric. The age of the runner} #' \item{gender}{The variable is character. The gender of the runner (M/F)} #' \item{chiptime}{The variable is numeric. The time in minutes for the runner} #' \item{year}{The variable is numeric. The year of the marathon} #' \item{marathon}{The variable is character. The name of the marathon} #' \item{country}{The variable is character. The country where the marathon was held} #' \item{finishers}{The variable is numeric. The number of finishers at the marathon} #' } #' @source \url{http://faculty.chicagobooth.edu/george.wu/research/marathon/data.htm} #' @examples #' \dontrun{ #' marathon_berlin #'} 'marathon_berlin' #' #' @title The full set of runners for the Big Sur marathon. #' @description This data set has ~40k observations. Marathon website - https://www.bigsurmarathon.org/ #' @format A data frame with columns: #' \describe{ #' \item{age}{The variable is numeric. The age of the runner} #' \item{gender}{The variable is character. The gender of the runner (M/F)} #' \item{chiptime}{The variable is numeric. The time in minutes for the runner} #' \item{year}{The variable is numeric. The year of the marathon} #' \item{marathon}{The variable is character. The name of the marathon} #' \item{country}{The variable is character. The country where the marathon was held} #' \item{finishers}{The variable is numeric. The number of finishers at the marathon} #' } #' @source \url{http://faculty.chicagobooth.edu/george.wu/research/marathon/data.htm} #' @examples #' \dontrun{ #' marathon_big_sur #'} 'marathon_big_sur' #' #' @title The full set of runners for the Jerusalem marathon. #' @description This data set has ~2.5k observations. Marathon website - https://jerusalem-marathon.com/en/home-page/ #' @format A data frame with columns: #' \describe{ #' \item{age}{The variable is numeric. The age of the runner} #' \item{gender}{The variable is character. The gender of the runner (M/F)} #' \item{chiptime}{The variable is numeric. The time in minutes for the runner} #' \item{year}{The variable is numeric. The year of the marathon} #' \item{marathon}{The variable is character. The name of the marathon} #' \item{country}{The variable is character. The country where the marathon was held} #' \item{finishers}{The variable is numeric. The number of finishers at the marathon} #' } #' @source \url{http://faculty.chicagobooth.edu/george.wu/research/marathon/data.htm} #' @examples #' \dontrun{ #' marathon_jerusalem #'} 'marathon_jerusalem' #' #' @title All of the runners for marathons with lat and long locations #' @description This data set has ~150k observations. #' @format A data frame with columns: #' \describe{ #' \item{age}{The variable is numeric. The age of the runner} #' \item{gender}{The variable is character. The gender of the runner (M/F)} #' \item{chiptime}{The variable is numeric. The time in minutes for the runner} #' \item{year}{The variable is numeric. The year of the marathon} #' \item{marathon}{The variable is character. The name of the marathon} #' \item{country}{The variable is character. The country where the marathon was held} #' \item{finishers}{The variable is numeric. The number of finishers at the marathon} #' } #' @source \url{http://faculty.chicagobooth.edu/george.wu/research/marathon/data.htm} #' @examples #' \dontrun{ #' marathon_location #'} 'marathon_location' #' #' @title #' @description This data set has ~2k observations. #' @format A data frame with columns: #' \describe{ #' \item{marathon}{The variable is character. The name of the marathon that matches all other files} #' \item{marathon_name}{The variable is character. A cleaned name of the marathon} #' \item{state_id}{The variable is character. The two letter ID for each US state} #' \item{city}{The variable is character. The name of the city where the race is held} #' \item{finishers}{The variable is integer. The number of finishers at the marathon} #' \item{mean_time}{The variable is numeric. The average finish time in minutes.} #' \item{lat}{The variable is numeric. The lattitude of the city as listed at https://simplemaps.com/data/us-cities} #' \item{lng}{The variable is numeric. The longitude of the city as listed at https://simplemaps.com/data/us-cities} #' \item{elevation_m}{The variable is numeric. The elevation in meters above sea level as estimated from the elevatr R package.} #' \item{date}{The variable is Date. The approximate date of the marathon. The year is correct but the month and day changes every year and we have marked it the same.} #' \item{month}{The variable is numeric. Approximate month of the marathon} #' \item{day}{The variable is numeric. Approximate day of the month of the marathon.} #' \item{year}{The variable is integer. The year of the marathon} #' } #' @source \url{http://faculty.chicagobooth.edu/george.wu/research/marathon/data.htm and https://simplemaps.com/data/us-cities} #' @examples #' \dontrun{ #' race_location #'} 'race_location'
/R/data.R
no_license
byuidatascience/data4marathons
R
false
false
9,687
r
#' #' @title Table of Information about Marathons #' @description An interesting data set to see the effects of goals on what should be a unimodal distrubtion of finish times. The NYT had a good article - https://www.nytimes.com/2014/04/23/upshot/what-good-marathons-and-bad-investments-have-in-common.html?rref=upshot&_r=1 #' @format A data frame with columns: #' \describe{ #' \item{year}{The variable is integer. The year of the marathon} #' \item{marathon}{The variable is character. The name of the marathon} #' \item{country}{The variable is character. The country where the marathon was held} #' \item{finishers}{The variable is integer. The number of finishers at the marathon} #' \item{mean_time}{The variable is numeric. The average finish time in minutes.} #' } #' @source \url{https://faculty.chicagobooth.edu/george.wu/research/marathon/marathon_names.htm} #' @examples #' \dontrun{ #' race_info #'} 'race_info' #' #' @title A resampled set of runners from all marathons with more 50 runners. #' @description Each marathon will have 100 runners (50 male, 50 female) per year. So any marathon with less than 50 runners in the group will have multiple resampled runners. This data set has over 500k runners. The original data had close to 10 million runners and a few more columns. The NYT had a good article - https://www.nytimes.com/2014/04/23/upshot/what-good-marathons-and-bad-investments-have-in-common.html?rref=upshot&_r=1 #' @format A data frame with columns: #' \describe{ #' \item{age}{The variable is numeric. The age of the runner} #' \item{gender}{The variable is character. The gender of the runner (M/F)} #' \item{chiptime}{The variable is numeric. The time in minutes for the runner} #' \item{year}{The variable is integer. The year of the marathon} #' \item{marathon}{The variable is character. The name of the marathon} #' \item{country}{The variable is character. The country where the marathon was held} #' \item{finishers}{The variable is integer. The number of finishers at the marathon} #' } #' @source \url{http://faculty.chicagobooth.edu/george.wu/research/marathon/data.htm} #' @examples #' \dontrun{ #' marathon_sample #'} 'marathon_sample' #' #' @title A random sample of 50% of males and females for each year of runners for all years of the New York City marathon where gender is recorded. #' @description This data set has just over 200k runners. The NYT had a good article - https://www.nytimes.com/2014/04/23/upshot/what-good-marathons-and-bad-investments-have-in-common.html?rref=upshot&_r=1. The NYC marathon website - https://www.nyrr.org/tcsnycmarathon #' @format A data frame with columns: #' \describe{ #' \item{age}{The variable is numeric. The age of the runner} #' \item{gender}{The variable is character. The gender of the runner (M/F)} #' \item{chiptime}{The variable is numeric. The time in minutes for the runner} #' \item{year}{The variable is numeric. The year of the marathon} #' \item{marathon}{The variable is character. The name of the marathon} #' \item{country}{The variable is character. The country where the marathon was held} #' \item{finishers}{The variable is numeric. The number of finishers at the marathon} #' } #' @source \url{http://faculty.chicagobooth.edu/george.wu/research/marathon/data.htm} #' @examples #' \dontrun{ #' marathon_nyc #'} 'marathon_nyc' #' #' @title The full set of runners for all races during 2010. #' @description This data set has 800k runners. The NYT had a good article - https://www.nytimes.com/2014/04/23/upshot/what-good-marathons-and-bad-investments-have-in-common.html?rref=upshot&_r=1. #' @format A data frame with columns: #' \describe{ #' \item{age}{The variable is numeric. The age of the runner} #' \item{gender}{The variable is character. The gender of the runner (M/F)} #' \item{chiptime}{The variable is numeric. The time in minutes for the runner} #' \item{year}{The variable is numeric. The year of the marathon} #' \item{marathon}{The variable is character. The name of the marathon} #' \item{country}{The variable is character. The country where the marathon was held} #' \item{finishers}{The variable is numeric. The number of finishers at the marathon} #' } #' @source \url{http://faculty.chicagobooth.edu/george.wu/research/marathon/data.htm} #' @examples #' \dontrun{ #' marathon_2010 #'} 'marathon_2010' #' #' @title The 50% sample of male/female runners for all years of the Berlin marathon that recorded gender. #' @description This data set has ~200k observations. Marathon website - https://www.bmw-berlin-marathon.com/en/ #' @format A data frame with columns: #' \describe{ #' \item{age}{The variable is numeric. The age of the runner} #' \item{gender}{The variable is character. The gender of the runner (M/F)} #' \item{chiptime}{The variable is numeric. The time in minutes for the runner} #' \item{year}{The variable is numeric. The year of the marathon} #' \item{marathon}{The variable is character. The name of the marathon} #' \item{country}{The variable is character. The country where the marathon was held} #' \item{finishers}{The variable is numeric. The number of finishers at the marathon} #' } #' @source \url{http://faculty.chicagobooth.edu/george.wu/research/marathon/data.htm} #' @examples #' \dontrun{ #' marathon_berlin #'} 'marathon_berlin' #' #' @title The full set of runners for the Big Sur marathon. #' @description This data set has ~40k observations. Marathon website - https://www.bigsurmarathon.org/ #' @format A data frame with columns: #' \describe{ #' \item{age}{The variable is numeric. The age of the runner} #' \item{gender}{The variable is character. The gender of the runner (M/F)} #' \item{chiptime}{The variable is numeric. The time in minutes for the runner} #' \item{year}{The variable is numeric. The year of the marathon} #' \item{marathon}{The variable is character. The name of the marathon} #' \item{country}{The variable is character. The country where the marathon was held} #' \item{finishers}{The variable is numeric. The number of finishers at the marathon} #' } #' @source \url{http://faculty.chicagobooth.edu/george.wu/research/marathon/data.htm} #' @examples #' \dontrun{ #' marathon_big_sur #'} 'marathon_big_sur' #' #' @title The full set of runners for the Jerusalem marathon. #' @description This data set has ~2.5k observations. Marathon website - https://jerusalem-marathon.com/en/home-page/ #' @format A data frame with columns: #' \describe{ #' \item{age}{The variable is numeric. The age of the runner} #' \item{gender}{The variable is character. The gender of the runner (M/F)} #' \item{chiptime}{The variable is numeric. The time in minutes for the runner} #' \item{year}{The variable is numeric. The year of the marathon} #' \item{marathon}{The variable is character. The name of the marathon} #' \item{country}{The variable is character. The country where the marathon was held} #' \item{finishers}{The variable is numeric. The number of finishers at the marathon} #' } #' @source \url{http://faculty.chicagobooth.edu/george.wu/research/marathon/data.htm} #' @examples #' \dontrun{ #' marathon_jerusalem #'} 'marathon_jerusalem' #' #' @title All of the runners for marathons with lat and long locations #' @description This data set has ~150k observations. #' @format A data frame with columns: #' \describe{ #' \item{age}{The variable is numeric. The age of the runner} #' \item{gender}{The variable is character. The gender of the runner (M/F)} #' \item{chiptime}{The variable is numeric. The time in minutes for the runner} #' \item{year}{The variable is numeric. The year of the marathon} #' \item{marathon}{The variable is character. The name of the marathon} #' \item{country}{The variable is character. The country where the marathon was held} #' \item{finishers}{The variable is numeric. The number of finishers at the marathon} #' } #' @source \url{http://faculty.chicagobooth.edu/george.wu/research/marathon/data.htm} #' @examples #' \dontrun{ #' marathon_location #'} 'marathon_location' #' #' @title #' @description This data set has ~2k observations. #' @format A data frame with columns: #' \describe{ #' \item{marathon}{The variable is character. The name of the marathon that matches all other files} #' \item{marathon_name}{The variable is character. A cleaned name of the marathon} #' \item{state_id}{The variable is character. The two letter ID for each US state} #' \item{city}{The variable is character. The name of the city where the race is held} #' \item{finishers}{The variable is integer. The number of finishers at the marathon} #' \item{mean_time}{The variable is numeric. The average finish time in minutes.} #' \item{lat}{The variable is numeric. The lattitude of the city as listed at https://simplemaps.com/data/us-cities} #' \item{lng}{The variable is numeric. The longitude of the city as listed at https://simplemaps.com/data/us-cities} #' \item{elevation_m}{The variable is numeric. The elevation in meters above sea level as estimated from the elevatr R package.} #' \item{date}{The variable is Date. The approximate date of the marathon. The year is correct but the month and day changes every year and we have marked it the same.} #' \item{month}{The variable is numeric. Approximate month of the marathon} #' \item{day}{The variable is numeric. Approximate day of the month of the marathon.} #' \item{year}{The variable is integer. The year of the marathon} #' } #' @source \url{http://faculty.chicagobooth.edu/george.wu/research/marathon/data.htm and https://simplemaps.com/data/us-cities} #' @examples #' \dontrun{ #' race_location #'} 'race_location'
getSample <- function(data, startDate, endDate){ # Gets a sample from a dataframe based on a Date interval. # # Args: # data: Dataframe with all the data.Must have a columns named TSDate of the Date type. # params: Parameters that are stored in the config file that contain the training period. # Returns: # Dataframe with the sample data. startSmpl <- which(data$TSDate==startDate) endSmpl <- which(data$TSDate==endDate) return (sample(startSmpl, endSmpl, data)) } sample <- function(startSmpl, endSmpl, data){ data <- data[startSmpl:endSmpl,] return(data) } removeDfColumns <- function(df, columns){ # Removes columns from a dataframe # # Args: # df: Dataframe with all the data. # columns: List with the names of columns to remove. # # Returns: # Dataframe without the specified columns. return(df[ ,!(colnames(df) %in% columns)]) } getDateList <- function(params, prod){ dateList <- list() if(prod){ startForecastDate <- as.Date(as.character( Sys.Date())) endForecastDate <- as.Date(as.character(startForecastDate + params$forecastHorizon )) startTrainingDate <- as.Date(as.character( Sys.Date() - params$prodTrainingLength)) dateList$training <- list( startTrainingDate, as.Date(Sys.Date() - 1)) dateList$forecasting <- list(startForecastDate, endForecastDate) }else{ startTrainingDate = as.Date(params$startDate) endTrainingDate = as.Date(ifelse(is.character(params$endTrainingDate), params$endTrainingDate, as.character(Sys.Date() - params$forecastHorizon + 1))) startForecastDate = as.Date(as.character(endTrainingDate + 1)) endForecastDate = as.Date(as.character(startForecastDate + params$forecastHorizon)) dateList$training <- list(startTrainingDate, endTrainingDate) dateList$forecasting <- list(startForecastDate, endForecastDate) } names(dateList$training) <- c("startDate", "endDate") names(dateList$forecasting) <- c("startDate", "endDate") return(dateList) } pkgCheckAndInstall <- function(x){ # Given the desired package name this function checks whether it has already been installed or not, # if not then it is installed. # # Args: # pkgName: Name of the package to install. if (!require(x,character.only = TRUE)) { install.packages(x,dep=TRUE) if(!require(x,character.only = TRUE)) stop("Package not found") } } checkDF <- function(df){ # This function checks the given dataframe to see whether it satisfies the required conditions for the model or not. # # Args: # df: The dataframe to check # # Returns: # If the given dataframe pass all the check TRUE, otherwise FALSE if (!"TSDate" %in% colnames(df) || all(is.na( df$TSDate ))) # Check that TSDate should not be NA. return(FALSE) else if (!"y" %in% colnames(df) || all(is.na(df$y))) # Check that y column is not empty. return(FALSE) else return(TRUE) } fitFMModel <- function(obj, model, trainingData){ UseMethod("fitFMModel", obj) } predictFMModel <- function(obj, fitModel, testData){ UseMethod("predictFMModel", obj) } fitAllFMModels <- function(models, trainingData, ...){ fittedModels <- list() allNames <- list() for(model in models){ result = tryCatch({ obj <- getObject(model) fit <- fitFMModel(obj, model, trainingData, ...) allNames <- c(allNames, model$name) fittedModels <- c(fittedModels,list(fit)) }, warning = function(w) { print(w$message) }, error = function(e) { #TODO: log error or insert in database print(e$message) }) } names(fittedModels) <- allNames return(fittedModels) } getObject <- function(model){ switch(model$type, FMLinearRegressor={ return (FMLinearRegressor()) }, FMRandomForests={return (FMRandomForests())}, FMAdditive={return (FMAdditive())}, FMBag={return (FMBag())}, FMClustering={return (FMClustering())}, FMFuzzySystems={return (FMFuzzySystems())}, FMGaussian={return (FMGaussian())}, FMLogicRegressor={return (FMLogicRegressor())}, FMMars={return (FMMars())}, FMNeuralNetworks={return (FMNeuralNetworks())}, FMPartDSA={return (FMPartDSA())}, FMRegressionTrees={return (FMRegressionTrees())}, FMRvm={return (FMRvm())}, FMSOMap={return (FMSOMap())}, FMSVM={return (FMSVM())}, FMTimeSeries={return (FMTimeSeries())}, { stop("Model Type not found %s", model$type) } ) } predictAllFMModels <- function(modelsToRun, fittedModels, testData, ...){ modelsPredictions <- list() for(model in modelsToRun){ pred <- predictFMModel(model, fittedModels, testData) modelsPredictions <- c(modelsPredictions,list(pred)) } names(modelsPredictions) <- names(obj$fittedModels) return(modelsPredictions) } getBestPredictionModel <- function(obj, predictions, testData){ for(results in predictions){ results <- c(results,predictFMRegressorModel(obj, model, testData)) } names(results) <- names(fittedModels) } getBestTrainingModel <- function(option="AIC"){ #TO DO return the best based on AIC or Rsquared } selectModelFeatures <- function(){ #TO DO # put option to receive model name that has feature selection # we have to get run that model and return the predictors # if that model isn't returned than # remove correlated # remove linear combo # remove near zero variance # } compareModels <- function(models){ #TO DO compare only on the forecast } compareModelsSignificance <- function(){} predictRoolingWindow <- function(){} storeResults <- function(){}
/code/utils/global_functions.R
no_license
miguelangcosta/forecasting-machine
R
false
false
5,906
r
getSample <- function(data, startDate, endDate){ # Gets a sample from a dataframe based on a Date interval. # # Args: # data: Dataframe with all the data.Must have a columns named TSDate of the Date type. # params: Parameters that are stored in the config file that contain the training period. # Returns: # Dataframe with the sample data. startSmpl <- which(data$TSDate==startDate) endSmpl <- which(data$TSDate==endDate) return (sample(startSmpl, endSmpl, data)) } sample <- function(startSmpl, endSmpl, data){ data <- data[startSmpl:endSmpl,] return(data) } removeDfColumns <- function(df, columns){ # Removes columns from a dataframe # # Args: # df: Dataframe with all the data. # columns: List with the names of columns to remove. # # Returns: # Dataframe without the specified columns. return(df[ ,!(colnames(df) %in% columns)]) } getDateList <- function(params, prod){ dateList <- list() if(prod){ startForecastDate <- as.Date(as.character( Sys.Date())) endForecastDate <- as.Date(as.character(startForecastDate + params$forecastHorizon )) startTrainingDate <- as.Date(as.character( Sys.Date() - params$prodTrainingLength)) dateList$training <- list( startTrainingDate, as.Date(Sys.Date() - 1)) dateList$forecasting <- list(startForecastDate, endForecastDate) }else{ startTrainingDate = as.Date(params$startDate) endTrainingDate = as.Date(ifelse(is.character(params$endTrainingDate), params$endTrainingDate, as.character(Sys.Date() - params$forecastHorizon + 1))) startForecastDate = as.Date(as.character(endTrainingDate + 1)) endForecastDate = as.Date(as.character(startForecastDate + params$forecastHorizon)) dateList$training <- list(startTrainingDate, endTrainingDate) dateList$forecasting <- list(startForecastDate, endForecastDate) } names(dateList$training) <- c("startDate", "endDate") names(dateList$forecasting) <- c("startDate", "endDate") return(dateList) } pkgCheckAndInstall <- function(x){ # Given the desired package name this function checks whether it has already been installed or not, # if not then it is installed. # # Args: # pkgName: Name of the package to install. if (!require(x,character.only = TRUE)) { install.packages(x,dep=TRUE) if(!require(x,character.only = TRUE)) stop("Package not found") } } checkDF <- function(df){ # This function checks the given dataframe to see whether it satisfies the required conditions for the model or not. # # Args: # df: The dataframe to check # # Returns: # If the given dataframe pass all the check TRUE, otherwise FALSE if (!"TSDate" %in% colnames(df) || all(is.na( df$TSDate ))) # Check that TSDate should not be NA. return(FALSE) else if (!"y" %in% colnames(df) || all(is.na(df$y))) # Check that y column is not empty. return(FALSE) else return(TRUE) } fitFMModel <- function(obj, model, trainingData){ UseMethod("fitFMModel", obj) } predictFMModel <- function(obj, fitModel, testData){ UseMethod("predictFMModel", obj) } fitAllFMModels <- function(models, trainingData, ...){ fittedModels <- list() allNames <- list() for(model in models){ result = tryCatch({ obj <- getObject(model) fit <- fitFMModel(obj, model, trainingData, ...) allNames <- c(allNames, model$name) fittedModels <- c(fittedModels,list(fit)) }, warning = function(w) { print(w$message) }, error = function(e) { #TODO: log error or insert in database print(e$message) }) } names(fittedModels) <- allNames return(fittedModels) } getObject <- function(model){ switch(model$type, FMLinearRegressor={ return (FMLinearRegressor()) }, FMRandomForests={return (FMRandomForests())}, FMAdditive={return (FMAdditive())}, FMBag={return (FMBag())}, FMClustering={return (FMClustering())}, FMFuzzySystems={return (FMFuzzySystems())}, FMGaussian={return (FMGaussian())}, FMLogicRegressor={return (FMLogicRegressor())}, FMMars={return (FMMars())}, FMNeuralNetworks={return (FMNeuralNetworks())}, FMPartDSA={return (FMPartDSA())}, FMRegressionTrees={return (FMRegressionTrees())}, FMRvm={return (FMRvm())}, FMSOMap={return (FMSOMap())}, FMSVM={return (FMSVM())}, FMTimeSeries={return (FMTimeSeries())}, { stop("Model Type not found %s", model$type) } ) } predictAllFMModels <- function(modelsToRun, fittedModels, testData, ...){ modelsPredictions <- list() for(model in modelsToRun){ pred <- predictFMModel(model, fittedModels, testData) modelsPredictions <- c(modelsPredictions,list(pred)) } names(modelsPredictions) <- names(obj$fittedModels) return(modelsPredictions) } getBestPredictionModel <- function(obj, predictions, testData){ for(results in predictions){ results <- c(results,predictFMRegressorModel(obj, model, testData)) } names(results) <- names(fittedModels) } getBestTrainingModel <- function(option="AIC"){ #TO DO return the best based on AIC or Rsquared } selectModelFeatures <- function(){ #TO DO # put option to receive model name that has feature selection # we have to get run that model and return the predictors # if that model isn't returned than # remove correlated # remove linear combo # remove near zero variance # } compareModels <- function(models){ #TO DO compare only on the forecast } compareModelsSignificance <- function(){} predictRoolingWindow <- function(){} storeResults <- function(){}
library(Devore7) ### Name: ex01.37 ### Title: R Data set: ex01.37 ### Aliases: ex01.37 ### Keywords: datasets ### ** Examples data(ex01.37) str(ex01.37)
/data/genthat_extracted_code/Devore7/examples/ex01.37.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
160
r
library(Devore7) ### Name: ex01.37 ### Title: R Data set: ex01.37 ### Aliases: ex01.37 ### Keywords: datasets ### ** Examples data(ex01.37) str(ex01.37)
library(shiny) library(dplyr) library(ggvis) shinyServer(function(input, output, session) { #load the data when the user inputs a file theData <- reactive({ infile <- input$datfile if(is.null(infile)) return(NULL) d <- read.csv(infile$datapath, header=input$header, sep=input$sep) }) output$contents <- renderTable({ dt = theData() }) # dynamic variable names observe({ data<-theData() updateSelectInput(session, 'x', choices = names(data)) updateSelectInput(session, 'y', choices = names(data)) }) # end observe #gets the y variable name, will be used to change the plot legends yVarName<-reactive({ input$y }) #gets the x variable name, will be used to change the plot legends xVarName<-reactive({ input$x }) #make the filteredData frame filteredData<-reactive({ data<-isolate(theData()) #if there is no input, make a dummy dataframe if(input$x=="x" && input$y=="y"){ if(is.null(data)){ data<-data.frame(x=0,y=0) } }else{ data<-data[,c(input$x,input$y)] names(data)<-c("x","y") } data }) #plot the ggvis plot in a reactive block so that it changes with filteredData vis<-reactive({ plotData<-filteredData() plotData %>% ggvis(~x, ~y,opacity := input$opacity) %>% layer_lines(stroke := "red", strokeWidth := 1) %>% add_axis("y", title = yVarName()) %>% add_axis("x", title = xVarName()) %>% add_tooltip(function(df) format(sqrt(df$x),digits=2)) }) vis%>%bind_shiny("plot", "plot_ui") })
/server.R
no_license
saitej09/DataProducts_CP
R
false
false
2,282
r
library(shiny) library(dplyr) library(ggvis) shinyServer(function(input, output, session) { #load the data when the user inputs a file theData <- reactive({ infile <- input$datfile if(is.null(infile)) return(NULL) d <- read.csv(infile$datapath, header=input$header, sep=input$sep) }) output$contents <- renderTable({ dt = theData() }) # dynamic variable names observe({ data<-theData() updateSelectInput(session, 'x', choices = names(data)) updateSelectInput(session, 'y', choices = names(data)) }) # end observe #gets the y variable name, will be used to change the plot legends yVarName<-reactive({ input$y }) #gets the x variable name, will be used to change the plot legends xVarName<-reactive({ input$x }) #make the filteredData frame filteredData<-reactive({ data<-isolate(theData()) #if there is no input, make a dummy dataframe if(input$x=="x" && input$y=="y"){ if(is.null(data)){ data<-data.frame(x=0,y=0) } }else{ data<-data[,c(input$x,input$y)] names(data)<-c("x","y") } data }) #plot the ggvis plot in a reactive block so that it changes with filteredData vis<-reactive({ plotData<-filteredData() plotData %>% ggvis(~x, ~y,opacity := input$opacity) %>% layer_lines(stroke := "red", strokeWidth := 1) %>% add_axis("y", title = yVarName()) %>% add_axis("x", title = xVarName()) %>% add_tooltip(function(df) format(sqrt(df$x),digits=2)) }) vis%>%bind_shiny("plot", "plot_ui") })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cbr_possible_codes.R \name{cbr_possible_codes} \alias{cbr_possible_codes} \title{Get list of possible currencies} \usage{ cbr_possible_codes(full_output = F) } \arguments{ \item{full_output}{By default, FALSE. If TRUE return columns: \itemize{ \item Name \item EngName \item Nominal \item ParentCode \item ISO_Num_Code \item ISO_Char_Code }} } \value{ Data frame, sorted by ISO code with currency names and nominals. } \description{ Use this, if you don't know, which ISO code of currency you need. Or if you don't shure that CBR have this rates or not. }
/man/cbr_possible_codes.Rd
no_license
sergeymong/cbrRatesR
R
false
true
646
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cbr_possible_codes.R \name{cbr_possible_codes} \alias{cbr_possible_codes} \title{Get list of possible currencies} \usage{ cbr_possible_codes(full_output = F) } \arguments{ \item{full_output}{By default, FALSE. If TRUE return columns: \itemize{ \item Name \item EngName \item Nominal \item ParentCode \item ISO_Num_Code \item ISO_Char_Code }} } \value{ Data frame, sorted by ISO code with currency names and nominals. } \description{ Use this, if you don't know, which ISO code of currency you need. Or if you don't shure that CBR have this rates or not. }
library(readr) library(magrittr) library(dplyr) cdc <- read_csv("cdc.csv") cdc$exerany <- factor(cdc$exerany,levels = c(1,0), labels = c("Yes","No")) cdc$hlthplan <- factor(cdc$hlthplan,levels = c(1,0), labels = c("Yes","No")) cdc$smoke100 <- factor(cdc$smoke100,levels = c(1,0), labels = c("Yes","No")) cdc$gender <- factor(cdc$gender,levels = c("m","f"), labels = c("Male","Female")) levels(cdc$genhlth) cdc$genhlth <- cdc$genhlth %>% factor( levels=c('excellent','very good','good','fair','poor'), ordered=TRUE) genhlth<- cdc$genhlth %>% table() %>% prop.table() genhlth %>% barplot() exerany<- cdc$exerany %>% table() %>% prop.table() exerany %>% barplot() hlthplan<- cdc$hlthplan %>% table() %>% prop.table() hlthplan %>% barplot() smoke<- cdc$smoke100 %>% table() %>% prop.table() smoke %>% barplot() gender<- cdc$gender %>% table() %>% prop.table() gender %>% barplot() healthgender <- table(cdc$genhlth, cdc$gender) %>% prop.table(margin = 2) healthgender healthgender %>%barplot(beside=TRUE,legend=rownames(healthgender)) healthsmoke <- table(cdc$genhlth, cdc$smoke100) %>% prop.table(margin = 2) healthsmoke healthsmoke %>%barplot(beside=TRUE,legend=rownames(healthsmoke)) bmi <- (cdc$weight / cdc$height^2) * 703 # Or also you can use mutate() function from the dplyr package. cdc <- cdc %>% mutate(bmi= (cdc$weight / cdc$height^2) * 703) cdc$bmi %>% boxplot() cdc$bmi %>% hist() cdc$bmi %>% summary cdc$bmi %>% quantile(c(.25,.5,.75)) cdc$bmi %>% IQR() cdc$bmi %>% sd() #We can define the upper and lower fence for an outlier as values 1.5 times the IQR #(Q3 - Q1 = 6.178516) above the third quartile, Q3 + (1.5*IQR) and below the first qu #artile, Q1 - (1.5*IQR) . In this case the upper fence for an outlier is 38.15777. UpperFence <- 28.89 + (1.5* 6.178516) #Calculate upper fence UpperFence # The lower fence for an outlier is 13.44223. LowerFence <- 22.71 - (1.5* 6.178516) #Calculate lower fence LowerFence # Now let's filter data to remove outliers. I used here between() function under dplyr package cdc_clean <- cdc %>% filter(., between(cdc$bmi, 13.44223, 38.15777)) cdc_clean %>% boxplot( cdc_clean$bmi ~ cdc_clean$genhlth, by = cdc_clean$gender, data = .,ylab = "BMI", xlab = "General Health")
/Tutorials/week2/code/R_Script.R
no_license
shonil24/Applied-Analytics
R
false
false
2,337
r
library(readr) library(magrittr) library(dplyr) cdc <- read_csv("cdc.csv") cdc$exerany <- factor(cdc$exerany,levels = c(1,0), labels = c("Yes","No")) cdc$hlthplan <- factor(cdc$hlthplan,levels = c(1,0), labels = c("Yes","No")) cdc$smoke100 <- factor(cdc$smoke100,levels = c(1,0), labels = c("Yes","No")) cdc$gender <- factor(cdc$gender,levels = c("m","f"), labels = c("Male","Female")) levels(cdc$genhlth) cdc$genhlth <- cdc$genhlth %>% factor( levels=c('excellent','very good','good','fair','poor'), ordered=TRUE) genhlth<- cdc$genhlth %>% table() %>% prop.table() genhlth %>% barplot() exerany<- cdc$exerany %>% table() %>% prop.table() exerany %>% barplot() hlthplan<- cdc$hlthplan %>% table() %>% prop.table() hlthplan %>% barplot() smoke<- cdc$smoke100 %>% table() %>% prop.table() smoke %>% barplot() gender<- cdc$gender %>% table() %>% prop.table() gender %>% barplot() healthgender <- table(cdc$genhlth, cdc$gender) %>% prop.table(margin = 2) healthgender healthgender %>%barplot(beside=TRUE,legend=rownames(healthgender)) healthsmoke <- table(cdc$genhlth, cdc$smoke100) %>% prop.table(margin = 2) healthsmoke healthsmoke %>%barplot(beside=TRUE,legend=rownames(healthsmoke)) bmi <- (cdc$weight / cdc$height^2) * 703 # Or also you can use mutate() function from the dplyr package. cdc <- cdc %>% mutate(bmi= (cdc$weight / cdc$height^2) * 703) cdc$bmi %>% boxplot() cdc$bmi %>% hist() cdc$bmi %>% summary cdc$bmi %>% quantile(c(.25,.5,.75)) cdc$bmi %>% IQR() cdc$bmi %>% sd() #We can define the upper and lower fence for an outlier as values 1.5 times the IQR #(Q3 - Q1 = 6.178516) above the third quartile, Q3 + (1.5*IQR) and below the first qu #artile, Q1 - (1.5*IQR) . In this case the upper fence for an outlier is 38.15777. UpperFence <- 28.89 + (1.5* 6.178516) #Calculate upper fence UpperFence # The lower fence for an outlier is 13.44223. LowerFence <- 22.71 - (1.5* 6.178516) #Calculate lower fence LowerFence # Now let's filter data to remove outliers. I used here between() function under dplyr package cdc_clean <- cdc %>% filter(., between(cdc$bmi, 13.44223, 38.15777)) cdc_clean %>% boxplot( cdc_clean$bmi ~ cdc_clean$genhlth, by = cdc_clean$gender, data = .,ylab = "BMI", xlab = "General Health")
############################################################## # # WEBEXPO official R scripts # # SEG ANALYSIS # # DATA PREPARATION BEFORE BAYESIAN ANALYSIS # # V1.0 Sept. 2018 # ############################################################# ## # # INPUT : # # vector of observations # should be a vector of strings, with observations as e.g. "2.1" , "<5" , "[2.3-5]" , ">23.1" # # # ## webexpo.seg.datapreparation <-function(data.in) { result <-list() result$data <-data.in result$leftcensored <-grepl('<' , data.in, fixed = TRUE) result$rightcensored <-grepl('>' , data.in , fixed = TRUE) result$intcensored <-grepl('[' , data.in , fixed = TRUE) result$notcensored <-!(result$leftcensored | result$rightcensored | result$intcensored) return(result) }
/DATA PREPARATION/SEG ANALYSIS/webexpo.seg.dataprep.R
permissive
webexpo/webexpo_r_lib
R
false
false
806
r
############################################################## # # WEBEXPO official R scripts # # SEG ANALYSIS # # DATA PREPARATION BEFORE BAYESIAN ANALYSIS # # V1.0 Sept. 2018 # ############################################################# ## # # INPUT : # # vector of observations # should be a vector of strings, with observations as e.g. "2.1" , "<5" , "[2.3-5]" , ">23.1" # # # ## webexpo.seg.datapreparation <-function(data.in) { result <-list() result$data <-data.in result$leftcensored <-grepl('<' , data.in, fixed = TRUE) result$rightcensored <-grepl('>' , data.in , fixed = TRUE) result$intcensored <-grepl('[' , data.in , fixed = TRUE) result$notcensored <-!(result$leftcensored | result$rightcensored | result$intcensored) return(result) }
\name{rotvector} \alias{rotvector} \alias{rotvector,orientation-method} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Create an orientation using vectorized 3x3 matrices } \description{ Creates a \code{\link{rotvector-class}} object. } \usage{ rotvector(m) } \arguments{ \item{m}{ n x 9 matrix or 9 element vector whose rows are vectorized 3x3 matrices, or an orientation object.} } \details{ Converts a matrix whose rows are vectorized 3x3 matrices (in column-major form) into an \code{\link{rotvector-class}} object. } \value{ A \code{\link{rotvector-class}} object. } \author{ Duncan Murdoch } \seealso{\code{\link{rotvector-class}}, \code{\link{rotmatrix}}, \code{\link{eulerzyx}}, \code{\link{eulerzxz}}, \code{\link{quaternion}}, \code{\link{skewvector}}, \code{\link{skewmatrix}}} \examples{ x <- rotvector(c(0,1,0,-1,0,0,0,0,1)) x rotmatrix(x) } \keyword{array }% at least one, from doc/KEYWORDS \keyword{algebra }% __ONLY ONE__ keyword per line
/man/rotvector.Rd
no_license
cran/orientlib
R
false
false
985
rd
\name{rotvector} \alias{rotvector} \alias{rotvector,orientation-method} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Create an orientation using vectorized 3x3 matrices } \description{ Creates a \code{\link{rotvector-class}} object. } \usage{ rotvector(m) } \arguments{ \item{m}{ n x 9 matrix or 9 element vector whose rows are vectorized 3x3 matrices, or an orientation object.} } \details{ Converts a matrix whose rows are vectorized 3x3 matrices (in column-major form) into an \code{\link{rotvector-class}} object. } \value{ A \code{\link{rotvector-class}} object. } \author{ Duncan Murdoch } \seealso{\code{\link{rotvector-class}}, \code{\link{rotmatrix}}, \code{\link{eulerzyx}}, \code{\link{eulerzxz}}, \code{\link{quaternion}}, \code{\link{skewvector}}, \code{\link{skewmatrix}}} \examples{ x <- rotvector(c(0,1,0,-1,0,0,0,0,1)) x rotmatrix(x) } \keyword{array }% at least one, from doc/KEYWORDS \keyword{algebra }% __ONLY ONE__ keyword per line
### Showing the Nyquist Frequency par(mfrow = c(2,1)) x_seq <- seq(-2*pi,2*pi,0.01) y <- sin(2*x_seq) x<-c(-2*pi,-7/4*pi,-3/2*pi,-5/4*pi,-pi,-3/4*pi,-pi/2,-1/4*pi,0, 1/4*pi,pi/2,3/4*pi,pi,5/4*pi,3/2*pi,7/4*pi,2*pi) plot(x_seq,y, xlab = "time", ylab = "x(t)", type = "l", col = "red") abline(h=0, col = "black") nyquist_x<-x[c(1,3,5,7,9,11,13,15,17)] points(x=nyquist_x, y=sin(2*nyquist_x), pch = 12, col = "blue") lines(x = x_seq, y = 0*sin(2*x_seq), col = "blue") plot(x_seq,y, xlab = "time", ylab = "x(t)", type = "l", col = "red") abline(h=0, col = "black") nyquist_gx <- x[c()] # this is a change
/plots/nyquist.R
no_license
sophiebull/thesis
R
false
false
619
r
### Showing the Nyquist Frequency par(mfrow = c(2,1)) x_seq <- seq(-2*pi,2*pi,0.01) y <- sin(2*x_seq) x<-c(-2*pi,-7/4*pi,-3/2*pi,-5/4*pi,-pi,-3/4*pi,-pi/2,-1/4*pi,0, 1/4*pi,pi/2,3/4*pi,pi,5/4*pi,3/2*pi,7/4*pi,2*pi) plot(x_seq,y, xlab = "time", ylab = "x(t)", type = "l", col = "red") abline(h=0, col = "black") nyquist_x<-x[c(1,3,5,7,9,11,13,15,17)] points(x=nyquist_x, y=sin(2*nyquist_x), pch = 12, col = "blue") lines(x = x_seq, y = 0*sin(2*x_seq), col = "blue") plot(x_seq,y, xlab = "time", ylab = "x(t)", type = "l", col = "red") abline(h=0, col = "black") nyquist_gx <- x[c()] # this is a change
\name{enve.RecPlot2.Peak-class} \Rdversion{1.1} \docType{class} \alias{enve.RecPlot2.Peak-class} \alias{enve.RecPlot2.Peak} %% \alias{$} \title{enve.RecPlot2.Peak S4 class} \description{Enve-omics representation of a peak in the sequencing depth histogram of a Recruitment plot (see `enve.recplot2.findPeaks`).} \section{Objects from the Class}{Objects can be created by calls of the form \code{new(enve.RecPlot2.Peak ...)}} \section{Slots}{ \describe{ \item{\code{dist}:}{(\code{character}) Distribution of the peak. Currently supported: 'norm' (normal) and 'sn' (skew-normal).} \item{\code{values}:}{(\code{numeric}) Sequencing depth values predicted to conform the peak.} \item{\code{values.res}:}{(\code{numeric}) Sequencing depth values not explained by this or previously identified peaks.} \item{\code{mode}:}{(\code{numeric}) Seed-value of mode anchoring the peak.} \item{\code{param.hat}:}{(\code{list}) Parameters of the distribution. A list of two values if dist='norm' (sd and mean), or three values if dist='sn' (omega=scale, alpha=shape, and xi=location). Note that the "dispersion" parameter is always first and the "location" parameter is always last.} \item{\code{n.hat}:}{(\code{numeric}) Number of bins estimated to be explained by this peak. This should ideally be equal to the length of `values`, but it's not and integer.} \item{\code{n.total}:}{(\code{numeric}) Total number of bins from which the peak was extracted.} \item{\code{err.res}:}{(\code{numeric}) Error left after adding the peak.} \item{\code{merge.logdist}:}{(\code{numeric}) Attempted `merge.logdist` parameter.} } } \section{Methods}{ \describe{ \item{$}{\code{signature(x = "enve.RecPlot2.Peak")}: ... } } } \author{Luis M. Rodriguez-R [aut, cre]}
/enveomics.R/man/enve.recplot2.peak-class.Rd
permissive
yoyohashao/enveomics
R
false
false
1,793
rd
\name{enve.RecPlot2.Peak-class} \Rdversion{1.1} \docType{class} \alias{enve.RecPlot2.Peak-class} \alias{enve.RecPlot2.Peak} %% \alias{$} \title{enve.RecPlot2.Peak S4 class} \description{Enve-omics representation of a peak in the sequencing depth histogram of a Recruitment plot (see `enve.recplot2.findPeaks`).} \section{Objects from the Class}{Objects can be created by calls of the form \code{new(enve.RecPlot2.Peak ...)}} \section{Slots}{ \describe{ \item{\code{dist}:}{(\code{character}) Distribution of the peak. Currently supported: 'norm' (normal) and 'sn' (skew-normal).} \item{\code{values}:}{(\code{numeric}) Sequencing depth values predicted to conform the peak.} \item{\code{values.res}:}{(\code{numeric}) Sequencing depth values not explained by this or previously identified peaks.} \item{\code{mode}:}{(\code{numeric}) Seed-value of mode anchoring the peak.} \item{\code{param.hat}:}{(\code{list}) Parameters of the distribution. A list of two values if dist='norm' (sd and mean), or three values if dist='sn' (omega=scale, alpha=shape, and xi=location). Note that the "dispersion" parameter is always first and the "location" parameter is always last.} \item{\code{n.hat}:}{(\code{numeric}) Number of bins estimated to be explained by this peak. This should ideally be equal to the length of `values`, but it's not and integer.} \item{\code{n.total}:}{(\code{numeric}) Total number of bins from which the peak was extracted.} \item{\code{err.res}:}{(\code{numeric}) Error left after adding the peak.} \item{\code{merge.logdist}:}{(\code{numeric}) Attempted `merge.logdist` parameter.} } } \section{Methods}{ \describe{ \item{$}{\code{signature(x = "enve.RecPlot2.Peak")}: ... } } } \author{Luis M. Rodriguez-R [aut, cre]}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/graphs.R \name{get.code.node.labels} \alias{get.code.node.labels} \title{Function to label HPO nodes in plot with just HPO code} \usage{ get.code.node.labels(hpo.terms, terms, plotting.context) } \arguments{ \item{hpo.terms}{R-Object representation of HPO} \item{terms}{Character vector of HPO terms} \item{plotting.context}{List object with hpo.phenotypes slot for list of character vectors of terms} } \value{ Character vector of labels, named by term } \description{ Function to label HPO nodes in plot with just HPO code }
/man/get.code.node.labels.Rd
no_license
cran/hpoPlot
R
false
true
608
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/graphs.R \name{get.code.node.labels} \alias{get.code.node.labels} \title{Function to label HPO nodes in plot with just HPO code} \usage{ get.code.node.labels(hpo.terms, terms, plotting.context) } \arguments{ \item{hpo.terms}{R-Object representation of HPO} \item{terms}{Character vector of HPO terms} \item{plotting.context}{List object with hpo.phenotypes slot for list of character vectors of terms} } \value{ Character vector of labels, named by term } \description{ Function to label HPO nodes in plot with just HPO code }
data <- read.csv("household_power_consumption.txt",sep=";",na.strings="?", colClasses=c(Date="character",Time="character")) data$Date<-as.Date(data$Date,"%d/%m/%Y") data$DateTime <-as.POSIXct(strptime(paste(data$Date,data$Time), "%Y-%m-%d %H:%M:%S")) data2 <- data[data$Date==as.Date("2007-02-01") | data$Date==as.Date("2007-02-02") ,] png(file="plot2.png",width = 480, height = 480) with(data2,plot(DateTime,Global_active_power ,type="l" , xlab="" ,ylab = "Global Active Power (kilowatts)" )) dev.off() ## Don't forget to close the PNG device!
/Plot2.R
no_license
daveze/ExData_Plotting1
R
false
false
612
r
data <- read.csv("household_power_consumption.txt",sep=";",na.strings="?", colClasses=c(Date="character",Time="character")) data$Date<-as.Date(data$Date,"%d/%m/%Y") data$DateTime <-as.POSIXct(strptime(paste(data$Date,data$Time), "%Y-%m-%d %H:%M:%S")) data2 <- data[data$Date==as.Date("2007-02-01") | data$Date==as.Date("2007-02-02") ,] png(file="plot2.png",width = 480, height = 480) with(data2,plot(DateTime,Global_active_power ,type="l" , xlab="" ,ylab = "Global Active Power (kilowatts)" )) dev.off() ## Don't forget to close the PNG device!
# Juuso Väistö, 25.11.2019, RStudio exercise 5: Dimensionality reduction techniques # Data wrangling for the exercise # Meta file for these datasets can be seen from here (http://hdr.undp.org/en/content/human-development-index-hdi) and here (http://hdr.undp.org/sites/default/files/hdr2015_technical_notes.pdf) are some technical notes # Read the Human Development Index (hd) data into memory hd <- read.csv("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/human_development.csv", stringsAsFactors = F) # Read the Gender Inequality (gii) data into memory gii <- read.csv("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/gender_inequality.csv", stringsAsFactors = F, na.strings = "..") # Explore the dimensions of the data dim(hd) dim(gii) # Explore the structure of the data str(hd) str(gii) # Summaries of the datasets summary(hd) summary(gii) # access into libraries library(dplyr) library(stringr) # Rename hd dataset long variable names hd <- rename(hd, HDI = Human.Development.Index..HDI., LEB = Life.Expectancy.at.Birth, EYE = Expected.Years.of.Education, MYE = Mean.Years.of.Education, GNI_per_Cap = Gross.National.Income..GNI..per.Capita, GNI_minus_HDI = GNI.per.Capita.Rank.Minus.HDI.Rank) # Rename gii dataset long variable names gii <- rename(gii, GII = Gender.Inequality.Index..GII., MMR = Maternal.Mortality.Ratio, ABR = Adolescent.Birth.Rate, PRP = Percent.Representation.in.Parliament, PSE_female = Population.with.Secondary.Education..Female., PSE_male = Population.with.Secondary.Education..Male., LFPR_female = Labour.Force.Participation.Rate..Female., LFPR_male = Labour.Force.Participation.Rate..Male.) # Ratio of Female and Male populations with secondary education in each country gii <- mutate(gii, PSE_fm_ratio = PSE_female / PSE_male) # Ratio of labour force participation of females and males in each country gii <- mutate(gii, LFPR_fm_ratio = LFPR_female / LFPR_male) # Join the datasets human <- inner_join(gii, hd, by = c("Country")) # remove the commas from GNI and print out a numeric version of it str_replace(human$GNI_per_Cap, pattern=",", replace ="") %>% as.numeric(human$GNI_per_Cap) #Selecting variables human <- dplyr::select(human, one_of(c("Country", "PSE_fm_ratio", "LFPR_fm_ratio", "LEB", "EYE", "GNI_per_Cap", "MMR", "ABR", "PRP"))) #Excluding missing cases human <- filter(human, complete.cases(human) == TRUE) #Removing observations which relate to regions instead of countries human <- human[1:155, ] #Defining row names of the data by the country names and removing the country name column from the data rownames(human) <- human$Country human <- human[ , 2:9] #Saving the new data write.csv(human, file = "C:/Users/juusov/Documents/IODS-project/Data/human.csv")
/Data/create_human.R
no_license
JVaisto/IODS-project
R
false
false
2,956
r
# Juuso Väistö, 25.11.2019, RStudio exercise 5: Dimensionality reduction techniques # Data wrangling for the exercise # Meta file for these datasets can be seen from here (http://hdr.undp.org/en/content/human-development-index-hdi) and here (http://hdr.undp.org/sites/default/files/hdr2015_technical_notes.pdf) are some technical notes # Read the Human Development Index (hd) data into memory hd <- read.csv("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/human_development.csv", stringsAsFactors = F) # Read the Gender Inequality (gii) data into memory gii <- read.csv("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/gender_inequality.csv", stringsAsFactors = F, na.strings = "..") # Explore the dimensions of the data dim(hd) dim(gii) # Explore the structure of the data str(hd) str(gii) # Summaries of the datasets summary(hd) summary(gii) # access into libraries library(dplyr) library(stringr) # Rename hd dataset long variable names hd <- rename(hd, HDI = Human.Development.Index..HDI., LEB = Life.Expectancy.at.Birth, EYE = Expected.Years.of.Education, MYE = Mean.Years.of.Education, GNI_per_Cap = Gross.National.Income..GNI..per.Capita, GNI_minus_HDI = GNI.per.Capita.Rank.Minus.HDI.Rank) # Rename gii dataset long variable names gii <- rename(gii, GII = Gender.Inequality.Index..GII., MMR = Maternal.Mortality.Ratio, ABR = Adolescent.Birth.Rate, PRP = Percent.Representation.in.Parliament, PSE_female = Population.with.Secondary.Education..Female., PSE_male = Population.with.Secondary.Education..Male., LFPR_female = Labour.Force.Participation.Rate..Female., LFPR_male = Labour.Force.Participation.Rate..Male.) # Ratio of Female and Male populations with secondary education in each country gii <- mutate(gii, PSE_fm_ratio = PSE_female / PSE_male) # Ratio of labour force participation of females and males in each country gii <- mutate(gii, LFPR_fm_ratio = LFPR_female / LFPR_male) # Join the datasets human <- inner_join(gii, hd, by = c("Country")) # remove the commas from GNI and print out a numeric version of it str_replace(human$GNI_per_Cap, pattern=",", replace ="") %>% as.numeric(human$GNI_per_Cap) #Selecting variables human <- dplyr::select(human, one_of(c("Country", "PSE_fm_ratio", "LFPR_fm_ratio", "LEB", "EYE", "GNI_per_Cap", "MMR", "ABR", "PRP"))) #Excluding missing cases human <- filter(human, complete.cases(human) == TRUE) #Removing observations which relate to regions instead of countries human <- human[1:155, ] #Defining row names of the data by the country names and removing the country name column from the data rownames(human) <- human$Country human <- human[ , 2:9] #Saving the new data write.csv(human, file = "C:/Users/juusov/Documents/IODS-project/Data/human.csv")
\name{FormatOut} \alias{FormatOut} \title{Formats the data in the "Solve" function for output.} \usage{ FormatOut(R, p, lambda, z, datas, likelihood) } \arguments{ \item{R}{The modal sequences.} \item{p}{Proportion of data in each cluster.} \item{lambda}{Mallows' spread parameters for each cluster.} \item{z}{Probability of cluster membership for each individual.} \item{datas}{Matrix of partial sequences.} \item{likelihood}{Vector of the log-likelihood of the model at each iteration.} } \value{ \item{R}{The modal sequences} \item{p}{Proportion in each cluster} \item{lambda}{Spread parameters for each cluster} \item{datas}{Rankings merged with their cluster membership, distance from each cluster center, and probability of each cluster membership} \item{min.like}{Likelihood at each iteration} } \description{ Data formatting function. } \author{ Erik Gregory } \keyword{BubbleSort} \keyword{Kendall}
/man/FormatOut.Rd
no_license
cran/RMallow
R
false
false
950
rd
\name{FormatOut} \alias{FormatOut} \title{Formats the data in the "Solve" function for output.} \usage{ FormatOut(R, p, lambda, z, datas, likelihood) } \arguments{ \item{R}{The modal sequences.} \item{p}{Proportion of data in each cluster.} \item{lambda}{Mallows' spread parameters for each cluster.} \item{z}{Probability of cluster membership for each individual.} \item{datas}{Matrix of partial sequences.} \item{likelihood}{Vector of the log-likelihood of the model at each iteration.} } \value{ \item{R}{The modal sequences} \item{p}{Proportion in each cluster} \item{lambda}{Spread parameters for each cluster} \item{datas}{Rankings merged with their cluster membership, distance from each cluster center, and probability of each cluster membership} \item{min.like}{Likelihood at each iteration} } \description{ Data formatting function. } \author{ Erik Gregory } \keyword{BubbleSort} \keyword{Kendall}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/PASWR-package.R \docType{data} \name{sunflower} \alias{sunflower} \title{Sunflower Defoliation} \format{ A data frame with 72 observations on the following 5 variables: \describe{ \item{location}{a factor with levels \code{A}, \code{B}, \code{C}, and \code{D} for locations Carcastillo, Melida, Murillo, and Unciti respectively} \item{stage}{a factor with levels \code{stage1}, \code{stage2}, \code{stage3}, \code{stage4}, and \code{stage5}} \item{defoli}{a factor with levels \code{control}, \code{treat1}, \code{treat2}, and \code{treat3}} \item{yield}{sunflower yield in kg/ha} \item{numseed}{number of seeds per sunflower head} } } \source{ Muro, J., \emph{et. al.} (2001) \dQuote{Defoliation Effects on Sunflower Yield Reduction.} Agronomy Journal, \bold{93}: 634-637. } \description{ Seventy-two field trials were conducted by applying four defoliation treatments (non defoliated control, 33\%, 66\%, and 100\%) at different growth stages (\code{stage}) ranging from pre-flowering (1) to physiological maturity (5) in four different locations of Navarra, Spain: Carcastillo (1), Melida (2), Murillo (3), and Unciti (4). There are two response variables: \code{yield} in kg/ha of the sunflower and \code{numseed}, the number of seeds per sunflower head. Data are stored in the data frame \code{sunflower}. Data used in Case Study: Sunflower defoliation from Chapter 11. } \examples{ summary(aov(yield ~ stage + defoli + stage:defoli, data = sunflower)) } \references{ Ugarte, M. D., Militino, A. F., and Arnholt, A. T. (2008) \emph{Probability and Statistics with R}. Chapman & Hall/CRC. } \keyword{datasets}
/man/sunflower.Rd
no_license
alanarnholt/PASWR
R
false
true
1,700
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/PASWR-package.R \docType{data} \name{sunflower} \alias{sunflower} \title{Sunflower Defoliation} \format{ A data frame with 72 observations on the following 5 variables: \describe{ \item{location}{a factor with levels \code{A}, \code{B}, \code{C}, and \code{D} for locations Carcastillo, Melida, Murillo, and Unciti respectively} \item{stage}{a factor with levels \code{stage1}, \code{stage2}, \code{stage3}, \code{stage4}, and \code{stage5}} \item{defoli}{a factor with levels \code{control}, \code{treat1}, \code{treat2}, and \code{treat3}} \item{yield}{sunflower yield in kg/ha} \item{numseed}{number of seeds per sunflower head} } } \source{ Muro, J., \emph{et. al.} (2001) \dQuote{Defoliation Effects on Sunflower Yield Reduction.} Agronomy Journal, \bold{93}: 634-637. } \description{ Seventy-two field trials were conducted by applying four defoliation treatments (non defoliated control, 33\%, 66\%, and 100\%) at different growth stages (\code{stage}) ranging from pre-flowering (1) to physiological maturity (5) in four different locations of Navarra, Spain: Carcastillo (1), Melida (2), Murillo (3), and Unciti (4). There are two response variables: \code{yield} in kg/ha of the sunflower and \code{numseed}, the number of seeds per sunflower head. Data are stored in the data frame \code{sunflower}. Data used in Case Study: Sunflower defoliation from Chapter 11. } \examples{ summary(aov(yield ~ stage + defoli + stage:defoli, data = sunflower)) } \references{ Ugarte, M. D., Militino, A. F., and Arnholt, A. T. (2008) \emph{Probability and Statistics with R}. Chapman & Hall/CRC. } \keyword{datasets}
context("math token spacing") test_that("invalid tokens return error", { expect_error(test_collection( "math_token_spacing", "non_strict_math_spacing_all", transformer = style_text, style = tidyverse_style, scope = "spaces", math_token_spacing = specify_math_token_spacing("hdk"), strict = FALSE ), "lookup") }) test_that("non-strict default: spacing around all", { expect_warning(test_collection( "math_token_spacing", "non_strict_math_spacing_all", transformer = style_text, style = tidyverse_style, scope = "spaces", math_token_spacing = specify_math_token_spacing(), strict = FALSE ), NA) }) test_that("strict default: spacing around all", { expect_warning(test_collection( "math_token_spacing", "strict_math_spacing_all", transformer = style_text, style = tidyverse_style, scope = "spaces", math_token_spacing = tidyverse_math_token_spacing(), strict = TRUE ), NA) }) test_that("strict no space around +", { expect_warning(test_collection( "math_token_spacing", "strict_math_spacing_zero_plus", transformer = style_text, style = tidyverse_style, scope = "spaces", math_token_spacing = specify_math_token_spacing(zero = "'+'") ), NA) }) test_that("strict no space around all but ^", { expect_warning(test_collection( "math_token_spacing", "strict_math_spacing_zero_all_but_power", transformer = style_text, style = tidyverse_style, scope = "spaces", math_token_spacing = specify_math_token_spacing(zero = c( "'+'", "'-'", "'/'", "'*'" )) ), NA) })
/tests/testthat/test-math_token_spacing.R
permissive
lorenzwalthert/styler
R
false
false
1,599
r
context("math token spacing") test_that("invalid tokens return error", { expect_error(test_collection( "math_token_spacing", "non_strict_math_spacing_all", transformer = style_text, style = tidyverse_style, scope = "spaces", math_token_spacing = specify_math_token_spacing("hdk"), strict = FALSE ), "lookup") }) test_that("non-strict default: spacing around all", { expect_warning(test_collection( "math_token_spacing", "non_strict_math_spacing_all", transformer = style_text, style = tidyverse_style, scope = "spaces", math_token_spacing = specify_math_token_spacing(), strict = FALSE ), NA) }) test_that("strict default: spacing around all", { expect_warning(test_collection( "math_token_spacing", "strict_math_spacing_all", transformer = style_text, style = tidyverse_style, scope = "spaces", math_token_spacing = tidyverse_math_token_spacing(), strict = TRUE ), NA) }) test_that("strict no space around +", { expect_warning(test_collection( "math_token_spacing", "strict_math_spacing_zero_plus", transformer = style_text, style = tidyverse_style, scope = "spaces", math_token_spacing = specify_math_token_spacing(zero = "'+'") ), NA) }) test_that("strict no space around all but ^", { expect_warning(test_collection( "math_token_spacing", "strict_math_spacing_zero_all_but_power", transformer = style_text, style = tidyverse_style, scope = "spaces", math_token_spacing = specify_math_token_spacing(zero = c( "'+'", "'-'", "'/'", "'*'" )) ), NA) })
#!/usr/bin/env R # Author Sean Maden # Run superintronic for gene of interest. library(superintronic); library(plyranges); library(Rsamtools); library(ggplot2) # get bam filepaths gene.name <- "ZRSR2"; gene.dpath <- gene.name; bam.fnv <- list.files(gene.dpath) bam.fnv <- bam.fnv[grepl("\\.sorted.bam", bam.fnv)]; head(bam.fnv) # test bams bam <- scanBam(file.path(gene.dpath, "ZRSR2_SRR5009377.sorted.bam")) # get bam coverages bamdf <- data.frame(bam=file.path(gene.dpath, list.files(gene.dpath)), name = list.files(gene.dpath), stringsAsFactors=FALSE) cvg <- superintronic::compute_coverage_long(bamdf, source = "bam") # get features from gtf gtf.path <- "gencode.v35.primary_assembly.annotation.gtf" features <- gtf.path %>% collect_parts() %>% filter(gene_name == gene.name) # get overlapping gene parts cvg_over_features <- cvg %>% select(-bam) %>% join_parts(features) cvg_over_features #------------------------------ # coverage and gene model plots #------------------------------ p <- cvg_over_features %>% mutate(strand = feature_strand) %>% view_coverage(score = score, colour = feature_type) + scale_color_brewer(palette = "Dark2") + guides(colour = FALSE) + labs(title = "Coverage over SRM") p gene_track <- view_segments(unnest_parts(features), colour = feature_type) gene_track p / gene_track p <- cvg_over_features %>% mutate(strand = feature_strand) %>% view_coverage(score = score, colour = feature_type, facets = vars(name)) + scale_color_brewer(palette = "Dark2") + guides(colour = FALSE) + labs(title = "Coverage over SRM") p / gene_track + patchwork::plot_layout(heights = c(3, 1))
/inst/analysis/run_superintronic.R
permissive
metamaden/rienv
R
false
false
1,725
r
#!/usr/bin/env R # Author Sean Maden # Run superintronic for gene of interest. library(superintronic); library(plyranges); library(Rsamtools); library(ggplot2) # get bam filepaths gene.name <- "ZRSR2"; gene.dpath <- gene.name; bam.fnv <- list.files(gene.dpath) bam.fnv <- bam.fnv[grepl("\\.sorted.bam", bam.fnv)]; head(bam.fnv) # test bams bam <- scanBam(file.path(gene.dpath, "ZRSR2_SRR5009377.sorted.bam")) # get bam coverages bamdf <- data.frame(bam=file.path(gene.dpath, list.files(gene.dpath)), name = list.files(gene.dpath), stringsAsFactors=FALSE) cvg <- superintronic::compute_coverage_long(bamdf, source = "bam") # get features from gtf gtf.path <- "gencode.v35.primary_assembly.annotation.gtf" features <- gtf.path %>% collect_parts() %>% filter(gene_name == gene.name) # get overlapping gene parts cvg_over_features <- cvg %>% select(-bam) %>% join_parts(features) cvg_over_features #------------------------------ # coverage and gene model plots #------------------------------ p <- cvg_over_features %>% mutate(strand = feature_strand) %>% view_coverage(score = score, colour = feature_type) + scale_color_brewer(palette = "Dark2") + guides(colour = FALSE) + labs(title = "Coverage over SRM") p gene_track <- view_segments(unnest_parts(features), colour = feature_type) gene_track p / gene_track p <- cvg_over_features %>% mutate(strand = feature_strand) %>% view_coverage(score = score, colour = feature_type, facets = vars(name)) + scale_color_brewer(palette = "Dark2") + guides(colour = FALSE) + labs(title = "Coverage over SRM") p / gene_track + patchwork::plot_layout(heights = c(3, 1))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tximport.R \name{tximport} \alias{tximport} \title{Import transcript-level abundances and counts for transcript- and gene-level analysis packages} \usage{ tximport(files, type = c("none", "salmon", "sailfish", "alevin", "kallisto", "rsem", "stringtie"), txIn = TRUE, txOut = FALSE, countsFromAbundance = c("no", "scaledTPM", "lengthScaledTPM", "dtuScaledTPM"), tx2gene = NULL, varReduce = FALSE, dropInfReps = FALSE, infRepStat = NULL, ignoreTxVersion = FALSE, ignoreAfterBar = FALSE, geneIdCol, txIdCol, abundanceCol, countsCol, lengthCol, importer = NULL, existenceOptional = FALSE, sparse = FALSE, sparseThreshold = 1, readLength = 75) } \arguments{ \item{files}{a character vector of filenames for the transcript-level abundances} \item{type}{character, the type of software used to generate the abundances. Options are "salmon", "sailfish", "alevin", "kallisto", "rsem", "stringtie", or "none". This argument is used to autofill the arguments below (geneIdCol, etc.) "none" means that the user will specify these columns.} \item{txIn}{logical, whether the incoming files are transcript level (default TRUE)} \item{txOut}{logical, whether the function should just output transcript-level (default FALSE)} \item{countsFromAbundance}{character, either "no" (default), "scaledTPM", "lengthScaledTPM", or "dtuScaledTPM". Whether to generate estimated counts using abundance estimates: \itemize{ \item scaled up to library size (scaledTPM), \item scaled using the average transcript length over samples and then the library size (lengthScaledTPM), or \item scaled using the median transcript length among isoforms of a gene, and then the library size (dtuScaledTPM). } dtuScaledTPM is designed for DTU analysis in combination with \code{txOut=TRUE}, and it requires specifing a \code{tx2gene} data.frame. dtuScaledTPM works such that within a gene, values from all samples and all transcripts get scaled by the same fixed median transcript length. If using scaledTPM, lengthScaledTPM, or geneLengthScaledTPM, the counts are no longer correlated across samples with transcript length, and so the length offset matrix should not be used.} \item{tx2gene}{a two-column data.frame linking transcript id (column 1) to gene id (column 2). the column names are not relevant, but this column order must be used. this argument is required for gene-level summarization for methods that provides transcript-level estimates only (kallisto, Salmon, Sailfish)} \item{varReduce}{whether to reduce per-sample inferential replicates information into a matrix of sample variances \code{variance} (default FALSE)} \item{dropInfReps}{whether to skip reading in inferential replicates (default FALSE)} \item{infRepStat}{a function to re-compute counts and abundances from the inferential replicates, e.g. \code{matrixStats::rowMedians} to re-compute counts as the median of the inferential replicates. The order of operations is: first counts are re-computed, then abundances are re-computed. Following this, if \code{countsFromAbundance} is not "no", \code{tximport} will again re-compute counts from the re-computed abundances. \code{infRepStat} should operate on rows of a matrix. (default is NULL)} \item{ignoreTxVersion}{logical, whether to split the tx id on the '.' character to remove version information, for easier matching with the tx id in gene2tx (default FALSE)} \item{ignoreAfterBar}{logical, whether to split the tx id on the '|' character (default FALSE)} \item{geneIdCol}{name of column with gene id. if missing, the gene2tx argument can be used} \item{txIdCol}{name of column with tx id} \item{abundanceCol}{name of column with abundances (e.g. TPM or FPKM)} \item{countsCol}{name of column with estimated counts} \item{lengthCol}{name of column with feature length information} \item{importer}{a function used to read in the files} \item{existenceOptional}{logical, should tximport not check if files exist before attempting import (default FALSE, meaning files must exist according to \code{file.exists})} \item{sparse}{logical, whether to try to import data sparsely (default is FALSE). Initial implementation for \code{txOut=TRUE}, \code{countsFromAbundance="no"} or \code{"scaledTPM"}, no inferential replicates. Only counts matrix is returned (and abundance matrix if using \code{"scaledTPM"})} \item{sparseThreshold}{the minimum threshold for including a count as a non-zero count during sparse import (default is 1)} \item{readLength}{numeric, the read length used to calculate counts from StringTie's output of coverage. Default value (from StringTie) is 75. The formula used to calculate counts is: \code{cov * transcript length / read length}} } \value{ a simple list containing matrices: abundance, counts, length. Another list element 'countsFromAbundance' carries through the character argument used in the tximport call. If detected, and \code{txOut=TRUE}, inferential replicates for each sample will be imported and stored as a list of matrices, itself an element \code{infReps} in the returned list. If \code{varReduce=TRUE} the inferential replicates will be summarized according to the sample variance, and stored as a matrix \code{variance}. The length matrix contains the average transcript length for each gene which can be used as an offset for gene-level analysis. } \description{ \code{tximport} imports transcript-level estimates from various external software and optionally summarizes abundances, counts, and transcript lengths to the gene-level (default) or outputs transcript-level matrices (see \code{txOut} argument). } \details{ \code{tximport} will also load in information about inferential replicates -- a list of matrices of the Gibbs samples from the posterior, or bootstrap replicates, per sample -- if these data are available in the expected locations relative to the \code{files}. The inferential replicates, stored in \code{infReps} in the output list, are on estimated counts, and therefore follow \code{counts} in the output list. By setting \code{varReduce=TRUE}, the inferential replicate matrices will be replaced by a single matrix with the sample variance per transcript/gene and per sample. While \code{tximport} summarizes to the gene-level by default, the user can also perform the import and summarization steps manually, by specifing \code{txOut=TRUE} and then using the function \code{summarizeToGene}. Note however that this is equivalent to \code{tximport} with \code{txOut=FALSE} (the default). Solutions to the error "tximport failed at summarizing to the gene-level": \enumerate{ \item provide a \code{tx2gene} data.frame linking transcripts to genes (more below) \item avoid gene-level summarization by specifying \code{txOut=TRUE} \item set \code{geneIdCol} to an appropriate column in the files } See \code{vignette('tximport')} for example code for generating a \code{tx2gene} data.frame from a \code{TxDb} object. Note that the \code{keys} and \code{select} functions used to create the \code{tx2gene} object are documented in the man page for \link[AnnotationDbi]{AnnotationDb-class} objects in the AnnotationDbi package (TxDb inherits from AnnotationDb). For further details on generating TxDb objects from various inputs see \code{vignette('GenomicFeatures')} from the GenomicFeatures package. For \code{type="alevin"} all arguments other than \code{files} are ignored, and \code{files} should point to a single \code{quants_mat.gz} file, in the directory structure created by the alevin software (e.g. do not move the file or delete the other important files). \code{tximport} is solely importing the gene-by-cell matrix of counts, as \code{txi$counts}, and effective lengths are not estimated. Length correction should not be applied to datasets where there is not an expected correlation of counts and feature length. } \examples{ # load data for demonstrating tximport # note that the vignette shows more examples # including how to read in files quickly using the readr package library(tximportData) dir <- system.file("extdata", package="tximportData") samples <- read.table(file.path(dir,"samples.txt"), header=TRUE) files <- file.path(dir,"salmon", samples$run, "quant.sf.gz") names(files) <- paste0("sample",1:6) # tx2gene links transcript IDs to gene IDs for summarization tx2gene <- read.csv(file.path(dir, "tx2gene.gencode.v27.csv")) txi <- tximport(files, type="salmon", tx2gene=tx2gene) } \references{ Charlotte Soneson, Michael I. Love, Mark D. Robinson (2015): Differential analyses for RNA-seq: transcript-level estimates improve gene-level inferences. F1000Research. \url{http://dx.doi.org/10.12688/f1000research.7563.1} }
/man/tximport.Rd
no_license
jiawu/tximport
R
false
true
8,761
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tximport.R \name{tximport} \alias{tximport} \title{Import transcript-level abundances and counts for transcript- and gene-level analysis packages} \usage{ tximport(files, type = c("none", "salmon", "sailfish", "alevin", "kallisto", "rsem", "stringtie"), txIn = TRUE, txOut = FALSE, countsFromAbundance = c("no", "scaledTPM", "lengthScaledTPM", "dtuScaledTPM"), tx2gene = NULL, varReduce = FALSE, dropInfReps = FALSE, infRepStat = NULL, ignoreTxVersion = FALSE, ignoreAfterBar = FALSE, geneIdCol, txIdCol, abundanceCol, countsCol, lengthCol, importer = NULL, existenceOptional = FALSE, sparse = FALSE, sparseThreshold = 1, readLength = 75) } \arguments{ \item{files}{a character vector of filenames for the transcript-level abundances} \item{type}{character, the type of software used to generate the abundances. Options are "salmon", "sailfish", "alevin", "kallisto", "rsem", "stringtie", or "none". This argument is used to autofill the arguments below (geneIdCol, etc.) "none" means that the user will specify these columns.} \item{txIn}{logical, whether the incoming files are transcript level (default TRUE)} \item{txOut}{logical, whether the function should just output transcript-level (default FALSE)} \item{countsFromAbundance}{character, either "no" (default), "scaledTPM", "lengthScaledTPM", or "dtuScaledTPM". Whether to generate estimated counts using abundance estimates: \itemize{ \item scaled up to library size (scaledTPM), \item scaled using the average transcript length over samples and then the library size (lengthScaledTPM), or \item scaled using the median transcript length among isoforms of a gene, and then the library size (dtuScaledTPM). } dtuScaledTPM is designed for DTU analysis in combination with \code{txOut=TRUE}, and it requires specifing a \code{tx2gene} data.frame. dtuScaledTPM works such that within a gene, values from all samples and all transcripts get scaled by the same fixed median transcript length. If using scaledTPM, lengthScaledTPM, or geneLengthScaledTPM, the counts are no longer correlated across samples with transcript length, and so the length offset matrix should not be used.} \item{tx2gene}{a two-column data.frame linking transcript id (column 1) to gene id (column 2). the column names are not relevant, but this column order must be used. this argument is required for gene-level summarization for methods that provides transcript-level estimates only (kallisto, Salmon, Sailfish)} \item{varReduce}{whether to reduce per-sample inferential replicates information into a matrix of sample variances \code{variance} (default FALSE)} \item{dropInfReps}{whether to skip reading in inferential replicates (default FALSE)} \item{infRepStat}{a function to re-compute counts and abundances from the inferential replicates, e.g. \code{matrixStats::rowMedians} to re-compute counts as the median of the inferential replicates. The order of operations is: first counts are re-computed, then abundances are re-computed. Following this, if \code{countsFromAbundance} is not "no", \code{tximport} will again re-compute counts from the re-computed abundances. \code{infRepStat} should operate on rows of a matrix. (default is NULL)} \item{ignoreTxVersion}{logical, whether to split the tx id on the '.' character to remove version information, for easier matching with the tx id in gene2tx (default FALSE)} \item{ignoreAfterBar}{logical, whether to split the tx id on the '|' character (default FALSE)} \item{geneIdCol}{name of column with gene id. if missing, the gene2tx argument can be used} \item{txIdCol}{name of column with tx id} \item{abundanceCol}{name of column with abundances (e.g. TPM or FPKM)} \item{countsCol}{name of column with estimated counts} \item{lengthCol}{name of column with feature length information} \item{importer}{a function used to read in the files} \item{existenceOptional}{logical, should tximport not check if files exist before attempting import (default FALSE, meaning files must exist according to \code{file.exists})} \item{sparse}{logical, whether to try to import data sparsely (default is FALSE). Initial implementation for \code{txOut=TRUE}, \code{countsFromAbundance="no"} or \code{"scaledTPM"}, no inferential replicates. Only counts matrix is returned (and abundance matrix if using \code{"scaledTPM"})} \item{sparseThreshold}{the minimum threshold for including a count as a non-zero count during sparse import (default is 1)} \item{readLength}{numeric, the read length used to calculate counts from StringTie's output of coverage. Default value (from StringTie) is 75. The formula used to calculate counts is: \code{cov * transcript length / read length}} } \value{ a simple list containing matrices: abundance, counts, length. Another list element 'countsFromAbundance' carries through the character argument used in the tximport call. If detected, and \code{txOut=TRUE}, inferential replicates for each sample will be imported and stored as a list of matrices, itself an element \code{infReps} in the returned list. If \code{varReduce=TRUE} the inferential replicates will be summarized according to the sample variance, and stored as a matrix \code{variance}. The length matrix contains the average transcript length for each gene which can be used as an offset for gene-level analysis. } \description{ \code{tximport} imports transcript-level estimates from various external software and optionally summarizes abundances, counts, and transcript lengths to the gene-level (default) or outputs transcript-level matrices (see \code{txOut} argument). } \details{ \code{tximport} will also load in information about inferential replicates -- a list of matrices of the Gibbs samples from the posterior, or bootstrap replicates, per sample -- if these data are available in the expected locations relative to the \code{files}. The inferential replicates, stored in \code{infReps} in the output list, are on estimated counts, and therefore follow \code{counts} in the output list. By setting \code{varReduce=TRUE}, the inferential replicate matrices will be replaced by a single matrix with the sample variance per transcript/gene and per sample. While \code{tximport} summarizes to the gene-level by default, the user can also perform the import and summarization steps manually, by specifing \code{txOut=TRUE} and then using the function \code{summarizeToGene}. Note however that this is equivalent to \code{tximport} with \code{txOut=FALSE} (the default). Solutions to the error "tximport failed at summarizing to the gene-level": \enumerate{ \item provide a \code{tx2gene} data.frame linking transcripts to genes (more below) \item avoid gene-level summarization by specifying \code{txOut=TRUE} \item set \code{geneIdCol} to an appropriate column in the files } See \code{vignette('tximport')} for example code for generating a \code{tx2gene} data.frame from a \code{TxDb} object. Note that the \code{keys} and \code{select} functions used to create the \code{tx2gene} object are documented in the man page for \link[AnnotationDbi]{AnnotationDb-class} objects in the AnnotationDbi package (TxDb inherits from AnnotationDb). For further details on generating TxDb objects from various inputs see \code{vignette('GenomicFeatures')} from the GenomicFeatures package. For \code{type="alevin"} all arguments other than \code{files} are ignored, and \code{files} should point to a single \code{quants_mat.gz} file, in the directory structure created by the alevin software (e.g. do not move the file or delete the other important files). \code{tximport} is solely importing the gene-by-cell matrix of counts, as \code{txi$counts}, and effective lengths are not estimated. Length correction should not be applied to datasets where there is not an expected correlation of counts and feature length. } \examples{ # load data for demonstrating tximport # note that the vignette shows more examples # including how to read in files quickly using the readr package library(tximportData) dir <- system.file("extdata", package="tximportData") samples <- read.table(file.path(dir,"samples.txt"), header=TRUE) files <- file.path(dir,"salmon", samples$run, "quant.sf.gz") names(files) <- paste0("sample",1:6) # tx2gene links transcript IDs to gene IDs for summarization tx2gene <- read.csv(file.path(dir, "tx2gene.gencode.v27.csv")) txi <- tximport(files, type="salmon", tx2gene=tx2gene) } \references{ Charlotte Soneson, Michael I. Love, Mark D. Robinson (2015): Differential analyses for RNA-seq: transcript-level estimates improve gene-level inferences. F1000Research. \url{http://dx.doi.org/10.12688/f1000research.7563.1} }
library("ggplot2") library("cowplot") rm(list=ls(all=TRUE)) ## Fig=5A #plotted from raw data on HPC using R copynumber package ## Fig=5B # read in data case_list_for_scna<-read.table("./case_scna_analysis.txt",header=T,sep="\t",stringsAsFactors=F) counts<-table(case_list_for_scna$Response) count_non_resp<-counts[1] count_resp<-counts[2] cyto_dat<-read.table("./cytoband_counts.txt",header=T,sep="\t",stringsAsFactors=F) # do Fishers Exact test per cytoband cyto_dat$del_pval<-1 cyto_dat$amp_pval<-1 for(i in 1:nrow(cyto_dat)){ res_loss <- fisher.test(matrix(c(cyto_dat[i,2],count_resp-cyto_dat[i,2],cyto_dat[i,3],count_non_resp-cyto_dat[i,3]),nrow = 2)) cyto_dat[i,6]<-res_loss$p.value res_gain <- fisher.test(matrix(c(cyto_dat[i,4],count_resp-cyto_dat[i,4],cyto_dat[i,5],count_non_resp-cyto_dat[i,5]),nrow = 2)) cyto_dat[i,7]<-res_gain$p.value } cyto_dat$del_qval<-p.adjust(cyto_dat$del_pval, "fdr") cyto_dat$amp_qval<-p.adjust(cyto_dat$amp_pval, "fdr") del_hits<-head(cyto_dat[order(cyto_dat$del_pval),],10) amp_hits<-head(cyto_dat[order(cyto_dat$amp_pval),],10) del_hits$freq_diff<-((del_hits$loss_count_responders/count_resp)-(del_hits$loss_count_non_responders/count_non_resp))*100 amp_hits$freq_diff<-((amp_hits$gain_count_responders/count_resp)-(amp_hits$gain_count_non_responders/count_non_resp))*100 # plot results p1<-ggplot(data=del_hits, aes(x=reorder(cytoband,freq_diff), y=freq_diff)) +geom_bar(stat="identity", position=position_dodge(),fill="darkred")+ theme_minimal()+coord_flip() p2<-ggplot(data=amp_hits, aes(x=reorder(cytoband,freq_diff), y=freq_diff)) +geom_bar(stat="identity", position=position_dodge(),fill="darkblue")+ theme_minimal()+coord_flip() plot_grid(p1,p2) #ggsave("./panel_5b.pdf",dpi=600) ## Panel=Fig5C #make 9q34 plot out_del_to_plot<-read.table("./9q34_deletion_freqs.txt",header=T,sep="\t",stringsAsFactors=F) out_del_to_plot_m<-melt(out_del_to_plot,measure.vars = c("resp_del_freq","non_resp_del_freq")) ggplot(out_del_to_plot_m) + geom_line(aes(y = -freq_diff, x = chr9_Mb),color="black",size=0.6,data = out_del_to_plot, stat="identity")+scale_y_continuous()+theme_bw() ## Panel=Fig5D #make freq plot by histology hist_freq<-read.table("./TRAF2_hist_freq.txt",header=T,stringsAsFactors=F) hist_freq_m<-melt(hist_freq,measure.vars = c("X._del_resp","X._del_non_resp")) hist_freq_m$Tumour_Type<-ordered(hist_freq_m$Tumour_Type,levels=c("Other","Melanoma","Bladder","Pan-cancer")) hist_freq_m$variable<-ordered(hist_freq_m$variable,levels=c("X._del_non_resp","X._del_resp")) ggplot(data=hist_freq_m, aes(x=Tumour_Type, y=value,color=variable,fill=variable)) +geom_bar(stat="identity", position=position_dodge())+ theme_classic()+coord_flip()+scale_fill_manual(values=c("darkred","darkblue"))+scale_color_manual(values=c("darkred","darkblue")) ## Panel=Fig5E dat<-read.table("./haplo_dat.txt",header=T,sep="\t",stringsAsFactors=F) dat$order<-rownames(dat) dat$order<-as.numeric(rownames(dat)) ggplot(dat, aes(order,prob_haploinsufficient,color = prob_haploinsufficient))+geom_point(shape=1, size=0.75)+theme_minimal()+scale_color_gradient(low = "pink", high = "darkred")
/code/Figure5/Fig5_script.R
no_license
rhd-savidr/CPI1000_paper
R
false
false
3,198
r
library("ggplot2") library("cowplot") rm(list=ls(all=TRUE)) ## Fig=5A #plotted from raw data on HPC using R copynumber package ## Fig=5B # read in data case_list_for_scna<-read.table("./case_scna_analysis.txt",header=T,sep="\t",stringsAsFactors=F) counts<-table(case_list_for_scna$Response) count_non_resp<-counts[1] count_resp<-counts[2] cyto_dat<-read.table("./cytoband_counts.txt",header=T,sep="\t",stringsAsFactors=F) # do Fishers Exact test per cytoband cyto_dat$del_pval<-1 cyto_dat$amp_pval<-1 for(i in 1:nrow(cyto_dat)){ res_loss <- fisher.test(matrix(c(cyto_dat[i,2],count_resp-cyto_dat[i,2],cyto_dat[i,3],count_non_resp-cyto_dat[i,3]),nrow = 2)) cyto_dat[i,6]<-res_loss$p.value res_gain <- fisher.test(matrix(c(cyto_dat[i,4],count_resp-cyto_dat[i,4],cyto_dat[i,5],count_non_resp-cyto_dat[i,5]),nrow = 2)) cyto_dat[i,7]<-res_gain$p.value } cyto_dat$del_qval<-p.adjust(cyto_dat$del_pval, "fdr") cyto_dat$amp_qval<-p.adjust(cyto_dat$amp_pval, "fdr") del_hits<-head(cyto_dat[order(cyto_dat$del_pval),],10) amp_hits<-head(cyto_dat[order(cyto_dat$amp_pval),],10) del_hits$freq_diff<-((del_hits$loss_count_responders/count_resp)-(del_hits$loss_count_non_responders/count_non_resp))*100 amp_hits$freq_diff<-((amp_hits$gain_count_responders/count_resp)-(amp_hits$gain_count_non_responders/count_non_resp))*100 # plot results p1<-ggplot(data=del_hits, aes(x=reorder(cytoband,freq_diff), y=freq_diff)) +geom_bar(stat="identity", position=position_dodge(),fill="darkred")+ theme_minimal()+coord_flip() p2<-ggplot(data=amp_hits, aes(x=reorder(cytoband,freq_diff), y=freq_diff)) +geom_bar(stat="identity", position=position_dodge(),fill="darkblue")+ theme_minimal()+coord_flip() plot_grid(p1,p2) #ggsave("./panel_5b.pdf",dpi=600) ## Panel=Fig5C #make 9q34 plot out_del_to_plot<-read.table("./9q34_deletion_freqs.txt",header=T,sep="\t",stringsAsFactors=F) out_del_to_plot_m<-melt(out_del_to_plot,measure.vars = c("resp_del_freq","non_resp_del_freq")) ggplot(out_del_to_plot_m) + geom_line(aes(y = -freq_diff, x = chr9_Mb),color="black",size=0.6,data = out_del_to_plot, stat="identity")+scale_y_continuous()+theme_bw() ## Panel=Fig5D #make freq plot by histology hist_freq<-read.table("./TRAF2_hist_freq.txt",header=T,stringsAsFactors=F) hist_freq_m<-melt(hist_freq,measure.vars = c("X._del_resp","X._del_non_resp")) hist_freq_m$Tumour_Type<-ordered(hist_freq_m$Tumour_Type,levels=c("Other","Melanoma","Bladder","Pan-cancer")) hist_freq_m$variable<-ordered(hist_freq_m$variable,levels=c("X._del_non_resp","X._del_resp")) ggplot(data=hist_freq_m, aes(x=Tumour_Type, y=value,color=variable,fill=variable)) +geom_bar(stat="identity", position=position_dodge())+ theme_classic()+coord_flip()+scale_fill_manual(values=c("darkred","darkblue"))+scale_color_manual(values=c("darkred","darkblue")) ## Panel=Fig5E dat<-read.table("./haplo_dat.txt",header=T,sep="\t",stringsAsFactors=F) dat$order<-rownames(dat) dat$order<-as.numeric(rownames(dat)) ggplot(dat, aes(order,prob_haploinsufficient,color = prob_haploinsufficient))+geom_point(shape=1, size=0.75)+theme_minimal()+scale_color_gradient(low = "pink", high = "darkred")
### Temporary data storage: Save data in the current R session ### To run, change the file name to app.R then type runApp() in the console library(shiny) # Define the fields we want to save from the form fields <- c("name", "used_shiny", "r_num_years") # Define saveData and loadData funcitons saveData <- function(data) { data <- as.data.frame(t(data)) if (exists("responses")) { responses <<- rbind(responses, data) } else { responses <<- data } } loadData <- function() { if (exists("responses")) { responses } } # Shiny app with 3 fields that the user can submit data for shinyApp( ui = fluidPage( DT::dataTableOutput("responses", width = 300), tags$hr(), textInput("name", "Name", ""), checkboxInput("used_shiny", "I've built a Shiny app in R before", FALSE), sliderInput("r_num_years", "Number of years using R", 0, 25, 2, ticks = FALSE), actionButton("submit", "Submit") ), server = function(input, output, session) { # Whenever a field is filled, aggregate all form data formData <- reactive({ data <- sapply(fields, function(x) input[[x]]) data }) # When the Submit button is clicked, save the form data observeEvent(input$submit, { saveData(formData()) }) # Show the previous responses # (update with current response when Submit is clicked) output$responses <- DT::renderDataTable({ input$submit loadData() }) } )
/app_CurrentRSession.R
no_license
homoxapien/mai_shiny_notes
R
false
false
1,488
r
### Temporary data storage: Save data in the current R session ### To run, change the file name to app.R then type runApp() in the console library(shiny) # Define the fields we want to save from the form fields <- c("name", "used_shiny", "r_num_years") # Define saveData and loadData funcitons saveData <- function(data) { data <- as.data.frame(t(data)) if (exists("responses")) { responses <<- rbind(responses, data) } else { responses <<- data } } loadData <- function() { if (exists("responses")) { responses } } # Shiny app with 3 fields that the user can submit data for shinyApp( ui = fluidPage( DT::dataTableOutput("responses", width = 300), tags$hr(), textInput("name", "Name", ""), checkboxInput("used_shiny", "I've built a Shiny app in R before", FALSE), sliderInput("r_num_years", "Number of years using R", 0, 25, 2, ticks = FALSE), actionButton("submit", "Submit") ), server = function(input, output, session) { # Whenever a field is filled, aggregate all form data formData <- reactive({ data <- sapply(fields, function(x) input[[x]]) data }) # When the Submit button is clicked, save the form data observeEvent(input$submit, { saveData(formData()) }) # Show the previous responses # (update with current response when Submit is clicked) output$responses <- DT::renderDataTable({ input$submit loadData() }) } )
packagesNeedingInstallation <- function(package) { available <- suppressMessages(suppressWarnings(sapply(package, require, quietly = TRUE, character.only = TRUE, warn.conflicts = FALSE))) missing <- package[!available] if (length(missing) > 0) return(missing) return(character()) } isCRANChoosen <- function() { return(getOption("repos")["CRAN"] != "@CRAN@") } installNeededPackages <- function(requiredPackages, defaultCRANmirror = "https://cran.rstudio.com/") { packagesNeedInstall<-packagesNeedingInstallation(requiredPackages) if(length(packagesNeedInstall)>0) { if(!isCRANChoosen()) { #chooseCRANmirror() options(repos=structure(c(CRAN="http://cran.rstudio.com/"))) if(!isCRANChoosen()) { options(repos = c(CRAN = defaultCRANmirror)) } } suppressMessages(suppressWarnings(install.packages(packagesNeedInstall))) packagesNeedInstall<-packagesNeedingInstallation(requiredPackages) if(length(packagesNeedInstall)==0) print("All required packages were successfully installed") else sprintf('The following package(s) were not installed: $s', paste(packagesNeedInstall,collapse = ',')) } }
/lib/functions.R
permissive
edublancas/song-lyrics
R
false
false
1,188
r
packagesNeedingInstallation <- function(package) { available <- suppressMessages(suppressWarnings(sapply(package, require, quietly = TRUE, character.only = TRUE, warn.conflicts = FALSE))) missing <- package[!available] if (length(missing) > 0) return(missing) return(character()) } isCRANChoosen <- function() { return(getOption("repos")["CRAN"] != "@CRAN@") } installNeededPackages <- function(requiredPackages, defaultCRANmirror = "https://cran.rstudio.com/") { packagesNeedInstall<-packagesNeedingInstallation(requiredPackages) if(length(packagesNeedInstall)>0) { if(!isCRANChoosen()) { #chooseCRANmirror() options(repos=structure(c(CRAN="http://cran.rstudio.com/"))) if(!isCRANChoosen()) { options(repos = c(CRAN = defaultCRANmirror)) } } suppressMessages(suppressWarnings(install.packages(packagesNeedInstall))) packagesNeedInstall<-packagesNeedingInstallation(requiredPackages) if(length(packagesNeedInstall)==0) print("All required packages were successfully installed") else sprintf('The following package(s) were not installed: $s', paste(packagesNeedInstall,collapse = ',')) } }
library(testthat) # most common expectations: # equality: expect_equal() and expect_identical() # regexp: expect_match() # catch-all: expect_true() and expect_false() # console output: expect_output() # messages: expect_message() # warning: expect_warning() # errors: expect_error() escapeString <- function(s) { t <- gsub("(\\\\)", "\\\\\\\\", s) t <- gsub("(\n)", "\\\\n", t) t <- gsub("(\r)", "\\\\r", t) t <- gsub("(\")", "\\\\\"", t) return(t) } prepStr <- function(s) { t <- escapeString(s) u <- eval(parse(text=paste0("\"", t, "\""))) if(s!=u) stop("Unable to escape string!") t <- paste0("\thtml <- \"", t, "\"") utils::writeClipboard(t) return(invisible()) } evaluationMode <- "sequential" processingLibrary <- "dplyr" description <- "test: sequential dplyr" countFunction <- "n()" isDevelopmentVersion <- (length(strsplit(packageDescription("pivottabler")$Version, "\\.")[[1]]) > 3) testScenarios <- function(description="test", releaseEvaluationMode="batch", releaseProcessingLibrary="dplyr", runAllForReleaseVersion=FALSE) { isDevelopmentVersion <- (length(strsplit(packageDescription("pivottabler")$Version, "\\.")[[1]]) > 3) if(isDevelopmentVersion||runAllForReleaseVersion) { evaluationModes <- c("sequential", "batch") processingLibraries <- c("dplyr", "data.table") } else { evaluationModes <- releaseEvaluationMode processingLibraries <- releaseProcessingLibrary } testCount <- length(evaluationModes)*length(processingLibraries) c1 <- character(testCount) c2 <- character(testCount) c3 <- character(testCount) c4 <- character(testCount) testCount <- 0 for(evaluationMode in evaluationModes) for(processingLibrary in processingLibraries) { testCount <- testCount + 1 c1[testCount] <- evaluationMode c2[testCount] <- processingLibrary c3[testCount] <- paste0(description, ": ", evaluationMode, " ", processingLibrary) c4[testCount] <- ifelse(processingLibrary=="data.table", ".N", "n()") } df <- data.frame(evaluationMode=c1, processingLibrary=c2, description=c3, countFunction=c4, stringsAsFactors=FALSE) return(df) } context("FIND GROUP TESTS") scenarios <- testScenarios("find groups tests: simple: variableNames") for(i in 1:nrow(scenarios)) { if(!isDevelopmentVersion) break evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode, compatibility=list(totalStyleIsCellStyle=TRUE, explicitHeaderSpansOfOne=TRUE, noDataGroupNBSP=TRUE)) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addColumnDataGroups("PowerType") pt$addRowDataGroups("TOC") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) highlight <- PivotStyle$new(pt, "cellHighlight", list("background-color"="#FFFF00")) groups <- pt$findColumnDataGroups(variableNames="TrainCategory") groupCount <- lapply(groups, function(grp) {grp$style <- highlight}) pt$evaluatePivot() # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # length(groups) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"2\" colspan=\"1\">&nbsp;</th>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"4\">Express Passenger</th>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"3\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"1\">Total</th>\n </tr>\n <tr>\n <th class=\"ColumnHeader\" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">HST</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" colspan=\"1\"></th>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Arriva Trains Wales</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">CrossCountry</th>\n <td class=\"Cell\">22133</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">22865</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22928</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">London Midland</th>\n <td class=\"Cell\">5638</td>\n <td class=\"Cell\">8849</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">14487</td>\n <td class=\"Cell\">5591</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">33792</td>\n <td class=\"Cell\">48279</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Virgin Trains</th>\n <td class=\"Cell\">2137</td>\n <td class=\"Cell\">6457</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">32987</td>\n <td class=\"Cell\">15306</td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">49025</td>\n <td class=\"Cell\">6484</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">34685</td>\n <td class=\"Cell\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 502260) expect_equal(length(groups), 3) expect_identical(as.character(pt$getHtml()), html) }) } scenarios <- testScenarios("find groups tests: simple: variableValues") for(i in 1:nrow(scenarios)) { evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode, compatibility=list(totalStyleIsCellStyle=TRUE, explicitHeaderSpansOfOne=TRUE, noDataGroupNBSP=TRUE)) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addColumnDataGroups("PowerType") pt$addRowDataGroups("TOC") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) highlight <- PivotStyle$new(pt, "cellHighlight", list("background-color"="#FFFF00")) groups <- pt$findColumnDataGroups(variableValues=list("PowerType"=c("DMU", "HST"))) groupCount <- lapply(groups, function(grp) {grp$style <- highlight}) pt$evaluatePivot() # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # length(groups) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"2\" colspan=\"1\">&nbsp;</th>\n <th class=\"ColumnHeader\" colspan=\"4\">Express Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"3\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n </tr>\n <tr>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"1\">HST</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" colspan=\"1\"></th>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Arriva Trains Wales</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">CrossCountry</th>\n <td class=\"Cell\">22133</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">22865</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22928</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">London Midland</th>\n <td class=\"Cell\">5638</td>\n <td class=\"Cell\">8849</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">14487</td>\n <td class=\"Cell\">5591</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">33792</td>\n <td class=\"Cell\">48279</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Virgin Trains</th>\n <td class=\"Cell\">2137</td>\n <td class=\"Cell\">6457</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">32987</td>\n <td class=\"Cell\">15306</td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">49025</td>\n <td class=\"Cell\">6484</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">34685</td>\n <td class=\"Cell\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 502260) expect_equal(length(groups), 3) expect_identical(as.character(pt$getHtml()), html) }) } scenarios <- testScenarios("find groups tests: simple: exclude totals") for(i in 1:nrow(scenarios)) { if(!isDevelopmentVersion) break evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode, compatibility=list(totalStyleIsCellStyle=TRUE, explicitHeaderSpansOfOne=TRUE, noDataGroupNBSP=TRUE)) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addColumnDataGroups("PowerType") pt$addRowDataGroups("TOC") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) highlight <- PivotStyle$new(pt, "cellHighlight", list("background-color"="#FFFF00")) groups <- pt$findColumnDataGroups(variableNames="TrainCategory", totals="exclude") groupCount <- lapply(groups, function(grp) {grp$style <- highlight}) pt$evaluatePivot() # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # length(groups) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"2\" colspan=\"1\">&nbsp;</th>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"4\">Express Passenger</th>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"3\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n </tr>\n <tr>\n <th class=\"ColumnHeader\" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">HST</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" colspan=\"1\"></th>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Arriva Trains Wales</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">CrossCountry</th>\n <td class=\"Cell\">22133</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">22865</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22928</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">London Midland</th>\n <td class=\"Cell\">5638</td>\n <td class=\"Cell\">8849</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">14487</td>\n <td class=\"Cell\">5591</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">33792</td>\n <td class=\"Cell\">48279</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Virgin Trains</th>\n <td class=\"Cell\">2137</td>\n <td class=\"Cell\">6457</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">32987</td>\n <td class=\"Cell\">15306</td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">49025</td>\n <td class=\"Cell\">6484</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">34685</td>\n <td class=\"Cell\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 502260) expect_equal(length(groups), 2) expect_identical(as.character(pt$getHtml()), html) }) } scenarios <- testScenarios("find groups tests: simple: only totals") for(i in 1:nrow(scenarios)) { if(!isDevelopmentVersion) break evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode, compatibility=list(totalStyleIsCellStyle=TRUE, explicitHeaderSpansOfOne=TRUE, noDataGroupNBSP=TRUE)) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addColumnDataGroups("PowerType") pt$addRowDataGroups("TOC") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) highlight <- PivotStyle$new(pt, "cellHighlight", list("background-color"="#FFFF00")) groups <- pt$findColumnDataGroups(variableNames="TrainCategory", totals="only") groupCount <- lapply(groups, function(grp) {grp$style <- highlight}) pt$evaluatePivot() # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # length(groups) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"2\" colspan=\"1\">&nbsp;</th>\n <th class=\"ColumnHeader\" colspan=\"4\">Express Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"3\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"1\">Total</th>\n </tr>\n <tr>\n <th class=\"ColumnHeader\" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">HST</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" colspan=\"1\"></th>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Arriva Trains Wales</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">CrossCountry</th>\n <td class=\"Cell\">22133</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">22865</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22928</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">London Midland</th>\n <td class=\"Cell\">5638</td>\n <td class=\"Cell\">8849</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">14487</td>\n <td class=\"Cell\">5591</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">33792</td>\n <td class=\"Cell\">48279</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Virgin Trains</th>\n <td class=\"Cell\">2137</td>\n <td class=\"Cell\">6457</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">32987</td>\n <td class=\"Cell\">15306</td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">49025</td>\n <td class=\"Cell\">6484</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">34685</td>\n <td class=\"Cell\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 502260) expect_equal(length(groups), 1) expect_identical(as.character(pt$getHtml()), html) }) } scenarios <- testScenarios("find groups tests: simple: includeDescendantGroups") for(i in 1:nrow(scenarios)) { if(!isDevelopmentVersion) break evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode, compatibility=list(totalStyleIsCellStyle=TRUE, explicitHeaderSpansOfOne=TRUE, noDataGroupNBSP=TRUE)) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addColumnDataGroups("PowerType") pt$addRowDataGroups("TOC") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) highlight <- PivotStyle$new(pt, "cellHighlight", list("background-color"="#FFFF00")) groups <- pt$findColumnDataGroups( variableValues=list("TrainCategory"="Ordinary Passenger"), includeDescendantGroup=TRUE) groupCount <- lapply(groups, function(grp) {grp$style <- highlight}) pt$evaluatePivot() # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # length(groups) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"2\" colspan=\"1\">&nbsp;</th>\n <th class=\"ColumnHeader\" colspan=\"4\">Express Passenger</th>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"3\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n </tr>\n <tr>\n <th class=\"ColumnHeader\" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">HST</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" colspan=\"1\"></th>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Arriva Trains Wales</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">CrossCountry</th>\n <td class=\"Cell\">22133</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">22865</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22928</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">London Midland</th>\n <td class=\"Cell\">5638</td>\n <td class=\"Cell\">8849</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">14487</td>\n <td class=\"Cell\">5591</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">33792</td>\n <td class=\"Cell\">48279</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Virgin Trains</th>\n <td class=\"Cell\">2137</td>\n <td class=\"Cell\">6457</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">32987</td>\n <td class=\"Cell\">15306</td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">49025</td>\n <td class=\"Cell\">6484</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">34685</td>\n <td class=\"Cell\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 502260) expect_equal(length(groups), 4) expect_identical(as.character(pt$getHtml()), html) }) } scenarios <- testScenarios("find groups tests: combinations: variableNames") for(i in 1:nrow(scenarios)) { if(!isDevelopmentVersion) break evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode, compatibility=list(totalStyleIsCellStyle=TRUE, explicitHeaderSpansOfOne=TRUE, noDataGroupNBSP=TRUE)) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addColumnDataGroups("PowerType") pt$addRowDataGroups("TOC") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) highlight <- PivotStyle$new(pt, "cellHighlight", list("background-color"="#00FFFF")) groups <- pt$findColumnDataGroups(matchMode="combinations", variableNames=c("TrainCategory", "PowerType")) groupCount <- lapply(groups, function(grp) {grp$style <- highlight}) pt$evaluatePivot() # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # length(groups) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"2\" colspan=\"1\">&nbsp;</th>\n <th class=\"ColumnHeader\" colspan=\"4\">Express Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"3\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n </tr>\n <tr>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">HST</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\"></th>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Arriva Trains Wales</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">CrossCountry</th>\n <td class=\"Cell\">22133</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">22865</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22928</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">London Midland</th>\n <td class=\"Cell\">5638</td>\n <td class=\"Cell\">8849</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">14487</td>\n <td class=\"Cell\">5591</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">33792</td>\n <td class=\"Cell\">48279</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Virgin Trains</th>\n <td class=\"Cell\">2137</td>\n <td class=\"Cell\">6457</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">32987</td>\n <td class=\"Cell\">15306</td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">49025</td>\n <td class=\"Cell\">6484</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">34685</td>\n <td class=\"Cell\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 502260) expect_equal(length(groups), 8) expect_identical(as.character(pt$getHtml()), html) }) } scenarios <- testScenarios("find groups tests: combinations: variableValues") for(i in 1:nrow(scenarios)) { evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode, compatibility=list(totalStyleIsCellStyle=TRUE, explicitHeaderSpansOfOne=TRUE, noDataGroupNBSP=TRUE)) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addColumnDataGroups("PowerType") pt$addRowDataGroups("TOC") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) highlight <- PivotStyle$new(pt, "cellHighlight", list("background-color"="#00FFFF")) groups <- pt$findColumnDataGroups(matchMode="combinations", variableValues=list("TrainCategory"="Express Passenger", "PowerType"=c("DMU", "HST"))) groupCount <- lapply(groups, function(grp) {grp$style <- highlight}) pt$evaluatePivot() # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # length(groups) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"2\" colspan=\"1\">&nbsp;</th>\n <th class=\"ColumnHeader\" colspan=\"4\">Express Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"3\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n </tr>\n <tr>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">HST</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" colspan=\"1\"></th>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Arriva Trains Wales</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">CrossCountry</th>\n <td class=\"Cell\">22133</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">22865</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22928</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">London Midland</th>\n <td class=\"Cell\">5638</td>\n <td class=\"Cell\">8849</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">14487</td>\n <td class=\"Cell\">5591</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">33792</td>\n <td class=\"Cell\">48279</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Virgin Trains</th>\n <td class=\"Cell\">2137</td>\n <td class=\"Cell\">6457</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">32987</td>\n <td class=\"Cell\">15306</td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">49025</td>\n <td class=\"Cell\">6484</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">34685</td>\n <td class=\"Cell\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 502260) expect_equal(length(groups), 2) expect_identical(as.character(pt$getHtml()), html) }) } scenarios <- testScenarios("find groups tests: combinations: specific sub total") for(i in 1:nrow(scenarios)) { if(!isDevelopmentVersion) break evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode, compatibility=list(totalStyleIsCellStyle=TRUE, explicitHeaderSpansOfOne=TRUE, noDataGroupNBSP=TRUE)) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addColumnDataGroups("PowerType") pt$addRowDataGroups("TOC") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) highlight <- PivotStyle$new(pt, "cellHighlight", list("background-color"="#00FFFF")) groups <- pt$findColumnDataGroups(matchMode="combinations", variableValues=list("TrainCategory"="Express Passenger", "PowerType"="**")) groupCount <- lapply(groups, function(grp) {grp$style <- highlight}) pt$evaluatePivot() # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # length(groups) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"2\" colspan=\"1\">&nbsp;</th>\n <th class=\"ColumnHeader\" colspan=\"4\">Express Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"3\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n </tr>\n <tr>\n <th class=\"ColumnHeader\" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">HST</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" colspan=\"1\"></th>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Arriva Trains Wales</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">CrossCountry</th>\n <td class=\"Cell\">22133</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">22865</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22928</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">London Midland</th>\n <td class=\"Cell\">5638</td>\n <td class=\"Cell\">8849</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">14487</td>\n <td class=\"Cell\">5591</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">33792</td>\n <td class=\"Cell\">48279</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Virgin Trains</th>\n <td class=\"Cell\">2137</td>\n <td class=\"Cell\">6457</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">32987</td>\n <td class=\"Cell\">15306</td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">49025</td>\n <td class=\"Cell\">6484</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">34685</td>\n <td class=\"Cell\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 502260) expect_equal(length(groups), 1) expect_identical(as.character(pt$getHtml()), html) }) } scenarios <- testScenarios("find groups tests: atLevels") for(i in 1:nrow(scenarios)) { if(!isDevelopmentVersion) break evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode, compatibility=list(totalStyleIsCellStyle=TRUE, explicitHeaderSpansOfOne=TRUE, noDataGroupNBSP=TRUE)) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addColumnDataGroups("PowerType") pt$addRowDataGroups("TOC") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) highlight <- PivotStyle$new(pt, "cellHighlight", list("background-color"="#00FFFF")) groups <- pt$findColumnDataGroups(atLevels=2) groupCount <- lapply(groups, function(grp) {grp$style <- highlight}) pt$evaluatePivot() # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # length(groups) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"2\" colspan=\"1\">&nbsp;</th>\n <th class=\"ColumnHeader\" colspan=\"4\">Express Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"3\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n </tr>\n <tr>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">HST</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\"></th>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Arriva Trains Wales</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">CrossCountry</th>\n <td class=\"Cell\">22133</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">22865</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22928</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">London Midland</th>\n <td class=\"Cell\">5638</td>\n <td class=\"Cell\">8849</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">14487</td>\n <td class=\"Cell\">5591</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">33792</td>\n <td class=\"Cell\">48279</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Virgin Trains</th>\n <td class=\"Cell\">2137</td>\n <td class=\"Cell\">6457</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">32987</td>\n <td class=\"Cell\">15306</td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">49025</td>\n <td class=\"Cell\">6484</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">34685</td>\n <td class=\"Cell\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 502260) expect_equal(length(groups), 8) expect_identical(as.character(pt$getHtml()), html) }) } scenarios <- testScenarios("find groups tests: minChildCount") for(i in 1:nrow(scenarios)) { if(!isDevelopmentVersion) break evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode, compatibility=list(totalStyleIsCellStyle=TRUE, explicitHeaderSpansOfOne=TRUE, noDataGroupNBSP=TRUE)) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addRowDataGroups("TOC") pt$addRowDataGroups("PowerType") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) highlight <- PivotStyle$new(pt, "cellHighlight", list("background-color"="#00FFFF")) groups <- pt$findRowDataGroups(minChildCount=3) groupCount <- lapply(groups, function(grp) {grp$style <- highlight}) pt$evaluatePivot() # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # length(groups) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\" colspan=\"2\">&nbsp;</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Express Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"2\">Arriva Trains Wales</th>\n <th class=\"RowHeader\" rowspan=\"1\">DMU</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: #00FFFF; \" rowspan=\"3\">CrossCountry</th>\n <th class=\"RowHeader\" rowspan=\"1\">DMU</th>\n <td class=\"Cell\">22133</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22196</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">HST</th>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">732</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">22865</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22928</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: #00FFFF; \" rowspan=\"3\">London Midland</th>\n <th class=\"RowHeader\" rowspan=\"1\">DMU</th>\n <td class=\"Cell\">5638</td>\n <td class=\"Cell\">5591</td>\n <td class=\"Cell\">11229</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">EMU</th>\n <td class=\"Cell\">8849</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">37050</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">14487</td>\n <td class=\"Cell\">33792</td>\n <td class=\"Cell\">48279</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: #00FFFF; \" rowspan=\"3\">Virgin Trains</th>\n <th class=\"RowHeader\" rowspan=\"1\">DMU</th>\n <td class=\"Cell\">2137</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">2137</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">EMU</th>\n <td class=\"Cell\">6457</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">6457</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">8594</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <th class=\"RowHeader\" rowspan=\"1\"></th>\n <td class=\"Cell\">49025</td>\n <td class=\"Cell\">34685</td>\n <td class=\"Cell\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 502260) expect_equal(length(groups), 3) expect_identical(as.character(pt$getHtml()), html) }) } scenarios <- testScenarios("find groups tests: maxChildCount") for(i in 1:nrow(scenarios)) { if(!isDevelopmentVersion) break evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode, compatibility=list(totalStyleIsCellStyle=TRUE, explicitHeaderSpansOfOne=TRUE, noDataGroupNBSP=TRUE)) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addRowDataGroups("TOC") pt$addRowDataGroups("PowerType") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) highlight <- PivotStyle$new(pt, "cellHighlight", list("background-color"="#00FFFF")) groups <- pt$findRowDataGroups(atLevels=1, maxChildCount=2) groupCount <- lapply(groups, function(grp) {grp$style <- highlight}) pt$evaluatePivot() # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # length(groups) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\" colspan=\"2\">&nbsp;</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Express Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: #00FFFF; \" rowspan=\"2\">Arriva Trains Wales</th>\n <th class=\"RowHeader\" rowspan=\"1\">DMU</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"3\">CrossCountry</th>\n <th class=\"RowHeader\" rowspan=\"1\">DMU</th>\n <td class=\"Cell\">22133</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22196</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">HST</th>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">732</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">22865</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22928</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"3\">London Midland</th>\n <th class=\"RowHeader\" rowspan=\"1\">DMU</th>\n <td class=\"Cell\">5638</td>\n <td class=\"Cell\">5591</td>\n <td class=\"Cell\">11229</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">EMU</th>\n <td class=\"Cell\">8849</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">37050</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">14487</td>\n <td class=\"Cell\">33792</td>\n <td class=\"Cell\">48279</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"3\">Virgin Trains</th>\n <th class=\"RowHeader\" rowspan=\"1\">DMU</th>\n <td class=\"Cell\">2137</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">2137</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">EMU</th>\n <td class=\"Cell\">6457</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">6457</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">8594</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: #00FFFF; \" rowspan=\"1\">Total</th>\n <th class=\"RowHeader\" rowspan=\"1\"></th>\n <td class=\"Cell\">49025</td>\n <td class=\"Cell\">34685</td>\n <td class=\"Cell\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 502260) expect_equal(length(groups), 2) expect_identical(as.character(pt$getHtml()), html) }) } scenarios <- testScenarios("find groups tests: outline groups") for(i in 1:nrow(scenarios)) { if(!isDevelopmentVersion) break evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode, compatibility=list(totalStyleIsCellStyle=TRUE, explicitHeaderSpansOfOne=TRUE, noDataGroupNBSP=TRUE)) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addRowDataGroups("TOC", outlineBefore=TRUE) pt$addRowDataGroups("PowerType") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) groups <- pt$findRowDataGroups(outlineGroups="only") pt$setStyling(groups=groups, declarations=list("background-color"="#00FFFF")) groupCount <- length(groups) pt$evaluatePivot() # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # length(groups) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\" colspan=\"2\">&nbsp;</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Express Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n </tr>\n <tr>\n <th class=\"OutlineRowHeader\" style=\"background-color: #00FFFF; \" rowspan=\"1\" colspan=\"2\">Arriva Trains Wales</th>\n <td class=\"OutlineCell\" colspan=\"3\">&nbsp;</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"2\"></th>\n <th class=\"RowHeader\" rowspan=\"1\">DMU</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"OutlineRowHeader\" style=\"background-color: #00FFFF; \" rowspan=\"1\" colspan=\"2\">CrossCountry</th>\n <td class=\"OutlineCell\" colspan=\"3\">&nbsp;</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"3\"></th>\n <th class=\"RowHeader\" rowspan=\"1\">DMU</th>\n <td class=\"Cell\">22133</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22196</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">HST</th>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">732</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">22865</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22928</td>\n </tr>\n <tr>\n <th class=\"OutlineRowHeader\" style=\"background-color: #00FFFF; \" rowspan=\"1\" colspan=\"2\">London Midland</th>\n <td class=\"OutlineCell\" colspan=\"3\">&nbsp;</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"3\"></th>\n <th class=\"RowHeader\" rowspan=\"1\">DMU</th>\n <td class=\"Cell\">5638</td>\n <td class=\"Cell\">5591</td>\n <td class=\"Cell\">11229</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">EMU</th>\n <td class=\"Cell\">8849</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">37050</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">14487</td>\n <td class=\"Cell\">33792</td>\n <td class=\"Cell\">48279</td>\n </tr>\n <tr>\n <th class=\"OutlineRowHeader\" style=\"background-color: #00FFFF; \" rowspan=\"1\" colspan=\"2\">Virgin Trains</th>\n <td class=\"OutlineCell\" colspan=\"3\">&nbsp;</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"3\"></th>\n <th class=\"RowHeader\" rowspan=\"1\">DMU</th>\n <td class=\"Cell\">2137</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">2137</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">EMU</th>\n <td class=\"Cell\">6457</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">6457</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">8594</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <th class=\"RowHeader\" rowspan=\"1\"></th>\n <td class=\"Cell\">49025</td>\n <td class=\"Cell\">34685</td>\n <td class=\"Cell\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 502260) expect_equal(length(groups), 4) expect_identical(as.character(pt$getHtml()), html) }) } scenarios <- testScenarios("find groups tests: restrict by rows, cols, cells") for(i in 1:nrow(scenarios)) { if(!isDevelopmentVersion) break evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addColumnDataGroups("PowerType") pt$addRowDataGroups("TOC") pt$addRowDataGroups("Status") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) pt$evaluatePivot() # find the data groups that are related to column 2 grps <- pt$findColumnDataGroups(columnNumbers=2) pt$setStyling(groups=grps, declarations=list("background-color"="yellow")) # find the DMU data groups that are related to columns 5 to 8 grps <- pt$findColumnDataGroups(columnNumbers=5:8, variableValues=list("PowerType"="DMU")) pt$setStyling(groups=grps, declarations=list("background-color"="lightgreen")) # find a totals data group that is related to the cell at (3, 7) cells <- pt$getCell(3, 7) pt$setStyling(cells=cells, declarations=list("background-color"="pink")) grps <- pt$findColumnDataGroups(variableValues=list("PowerType"="**"), cells=cells) pt$setStyling(groups=grps, declarations=list("background-color"="pink")) # find the data groups at the second level in the hierarchy that are related to row 3 grps <- pt$findRowDataGroups(atLevel=2, rowNumbers=3) pt$setStyling(groups=grps, declarations=list("background-color"="lightcyan")) # find the data groups related to rows 5 to 10 with Status "A" or "R" grps <- pt$findRowDataGroups(rowNumbers=5:10, variableValues=list("Status"=c("A", "R"))) pt$setStyling(groups=grps, declarations=list("background-color"="plum")) # find the data groups related to row 11:13 or cells in the range (12, 4) to (15, 5) cells <- pt$getCells(rowNumbers=12:15, columnNumbers=4:5, matchMode="combinations") pt$setStyling(cells=cells, declarations=list("background-color"="palegreen")) grps <- pt$findRowDataGroups(rowNumbers=11:13, cells=cells) pt$setStyling(groups=grps, declarations=list("background-color"="palegreen")) # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"2\" colspan=\"2\">&nbsp;</th>\n <th class=\"ColumnHeader\" style=\"background-color: yellow; \" colspan=\"4\">Express Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"3\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\">Total</th>\n </tr>\n <tr>\n <th class=\"ColumnHeader\">DMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: yellow; \">EMU</th>\n <th class=\"ColumnHeader\">HST</th>\n <th class=\"ColumnHeader\">Total</th>\n <th class=\"ColumnHeader\" style=\"background-color: lightgreen; \">DMU</th>\n <th class=\"ColumnHeader\">EMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: pink; \">Total</th>\n <th class=\"ColumnHeader\">&nbsp;</th>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"4\">Arriva Trains Wales</th>\n <th class=\"RowHeader\">A</th>\n <td class=\"Cell\">3018</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Total\">3018</td>\n <td class=\"Cell\">815</td>\n <td class=\"Cell\"></td>\n <td class=\"Total\">815</td>\n <td class=\"Total\">3833</td>\n </tr>\n <tr>\n <th class=\"RowHeader\">C</th>\n <td class=\"Cell\">59</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Total\">59</td>\n <td class=\"Cell\">15</td>\n <td class=\"Cell\"></td>\n <td class=\"Total\">15</td>\n <td class=\"Total\">74</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: lightcyan; \">R</th>\n <td class=\"Cell\">2</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Total\">2</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Total\" style=\"background-color: pink; \"></td>\n <td class=\"Total\">2</td>\n </tr>\n <tr>\n <th class=\"RowHeader\">Total</th>\n <td class=\"Total\">3079</td>\n <td class=\"Total\"></td>\n <td class=\"Total\"></td>\n <td class=\"Total\">3079</td>\n <td class=\"Total\">830</td>\n <td class=\"Total\"></td>\n <td class=\"Total\">830</td>\n <td class=\"Total\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"4\">CrossCountry</th>\n <th class=\"RowHeader\" style=\"background-color: plum; \">A</th>\n <td class=\"Cell\">21561</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">709</td>\n <td class=\"Total\">22270</td>\n <td class=\"Cell\">60</td>\n <td class=\"Cell\"></td>\n <td class=\"Total\">60</td>\n <td class=\"Total\">22330</td>\n </tr>\n <tr>\n <th class=\"RowHeader\">C</th>\n <td class=\"Cell\">546</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">23</td>\n <td class=\"Total\">569</td>\n <td class=\"Cell\">2</td>\n <td class=\"Cell\"></td>\n <td class=\"Total\">2</td>\n <td class=\"Total\">571</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: plum; \">R</th>\n <td class=\"Cell\">26</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Total\">26</td>\n <td class=\"Cell\">1</td>\n <td class=\"Cell\"></td>\n <td class=\"Total\">1</td>\n <td class=\"Total\">27</td>\n </tr>\n <tr>\n <th class=\"RowHeader\">Total</th>\n <td class=\"Total\">22133</td>\n <td class=\"Total\"></td>\n <td class=\"Total\">732</td>\n <td class=\"Total\">22865</td>\n <td class=\"Total\">63</td>\n <td class=\"Total\"></td>\n <td class=\"Total\">63</td>\n <td class=\"Total\">22928</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: palegreen; \" rowspan=\"4\">London Midland</th>\n <th class=\"RowHeader\" style=\"background-color: plum; \">A</th>\n <td class=\"Cell\">5534</td>\n <td class=\"Cell\">8599</td>\n <td class=\"Cell\"></td>\n <td class=\"Total\">14133</td>\n <td class=\"Cell\">5520</td>\n <td class=\"Cell\">27331</td>\n <td class=\"Total\">32851</td>\n <td class=\"Total\">46984</td>\n </tr>\n <tr>\n <th class=\"RowHeader\">C</th>\n <td class=\"Cell\">101</td>\n <td class=\"Cell\">235</td>\n <td class=\"Cell\"></td>\n <td class=\"Total\">336</td>\n <td class=\"Cell\">67</td>\n <td class=\"Cell\">847</td>\n <td class=\"Total\">914</td>\n <td class=\"Total\">1250</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: palegreen; \">R</th>\n <td class=\"Cell\">3</td>\n <td class=\"Cell\">15</td>\n <td class=\"Cell\"></td>\n <td class=\"Total\">18</td>\n <td class=\"Cell\">4</td>\n <td class=\"Cell\">23</td>\n <td class=\"Total\">27</td>\n <td class=\"Total\">45</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: palegreen; \">Total</th>\n <td class=\"Total\">5638</td>\n <td class=\"Total\">8849</td>\n <td class=\"Total\"></td>\n <td class=\"Total\" style=\"background-color: palegreen; \">14487</td>\n <td class=\"Total\" style=\"background-color: palegreen; \">5591</td>\n <td class=\"Total\">28201</td>\n <td class=\"Total\">33792</td>\n <td class=\"Total\">48279</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: palegreen; \" rowspan=\"4\">Virgin Trains</th>\n <th class=\"RowHeader\" style=\"background-color: palegreen; \">A</th>\n <td class=\"Cell\">2028</td>\n <td class=\"Cell\">6331</td>\n <td class=\"Cell\"></td>\n <td class=\"Total\" style=\"background-color: palegreen; \">8359</td>\n <td class=\"Cell\" style=\"background-color: palegreen; \"></td>\n <td class=\"Cell\"></td>\n <td class=\"Total\"></td>\n <td class=\"Total\">8359</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: palegreen; \">C</th>\n <td class=\"Cell\">107</td>\n <td class=\"Cell\">119</td>\n <td class=\"Cell\"></td>\n <td class=\"Total\" style=\"background-color: palegreen; \">226</td>\n <td class=\"Cell\" style=\"background-color: palegreen; \"></td>\n <td class=\"Cell\"></td>\n <td class=\"Total\"></td>\n <td class=\"Total\">226</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: palegreen; \">R</th>\n <td class=\"Cell\">2</td>\n <td class=\"Cell\">7</td>\n <td class=\"Cell\"></td>\n <td class=\"Total\" style=\"background-color: palegreen; \">9</td>\n <td class=\"Cell\" style=\"background-color: palegreen; \"></td>\n <td class=\"Cell\"></td>\n <td class=\"Total\"></td>\n <td class=\"Total\">9</td>\n </tr>\n <tr>\n <th class=\"RowHeader\">Total</th>\n <td class=\"Total\">2137</td>\n <td class=\"Total\">6457</td>\n <td class=\"Total\"></td>\n <td class=\"Total\">8594</td>\n <td class=\"Total\"></td>\n <td class=\"Total\"></td>\n <td class=\"Total\"></td>\n <td class=\"Total\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\">Total</th>\n <th class=\"RowHeader\">&nbsp;</th>\n <td class=\"Total\">32987</td>\n <td class=\"Total\">15306</td>\n <td class=\"Total\">732</td>\n <td class=\"Total\">49025</td>\n <td class=\"Total\">6484</td>\n <td class=\"Total\">28201</td>\n <td class=\"Total\">34685</td>\n <td class=\"Total\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 753390) expect_identical(as.character(pt$getHtml()), html) }) }
/tests/testthat/test-B-15-findGrouptests.R
no_license
cran/pivottabler
R
false
false
62,255
r
library(testthat) # most common expectations: # equality: expect_equal() and expect_identical() # regexp: expect_match() # catch-all: expect_true() and expect_false() # console output: expect_output() # messages: expect_message() # warning: expect_warning() # errors: expect_error() escapeString <- function(s) { t <- gsub("(\\\\)", "\\\\\\\\", s) t <- gsub("(\n)", "\\\\n", t) t <- gsub("(\r)", "\\\\r", t) t <- gsub("(\")", "\\\\\"", t) return(t) } prepStr <- function(s) { t <- escapeString(s) u <- eval(parse(text=paste0("\"", t, "\""))) if(s!=u) stop("Unable to escape string!") t <- paste0("\thtml <- \"", t, "\"") utils::writeClipboard(t) return(invisible()) } evaluationMode <- "sequential" processingLibrary <- "dplyr" description <- "test: sequential dplyr" countFunction <- "n()" isDevelopmentVersion <- (length(strsplit(packageDescription("pivottabler")$Version, "\\.")[[1]]) > 3) testScenarios <- function(description="test", releaseEvaluationMode="batch", releaseProcessingLibrary="dplyr", runAllForReleaseVersion=FALSE) { isDevelopmentVersion <- (length(strsplit(packageDescription("pivottabler")$Version, "\\.")[[1]]) > 3) if(isDevelopmentVersion||runAllForReleaseVersion) { evaluationModes <- c("sequential", "batch") processingLibraries <- c("dplyr", "data.table") } else { evaluationModes <- releaseEvaluationMode processingLibraries <- releaseProcessingLibrary } testCount <- length(evaluationModes)*length(processingLibraries) c1 <- character(testCount) c2 <- character(testCount) c3 <- character(testCount) c4 <- character(testCount) testCount <- 0 for(evaluationMode in evaluationModes) for(processingLibrary in processingLibraries) { testCount <- testCount + 1 c1[testCount] <- evaluationMode c2[testCount] <- processingLibrary c3[testCount] <- paste0(description, ": ", evaluationMode, " ", processingLibrary) c4[testCount] <- ifelse(processingLibrary=="data.table", ".N", "n()") } df <- data.frame(evaluationMode=c1, processingLibrary=c2, description=c3, countFunction=c4, stringsAsFactors=FALSE) return(df) } context("FIND GROUP TESTS") scenarios <- testScenarios("find groups tests: simple: variableNames") for(i in 1:nrow(scenarios)) { if(!isDevelopmentVersion) break evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode, compatibility=list(totalStyleIsCellStyle=TRUE, explicitHeaderSpansOfOne=TRUE, noDataGroupNBSP=TRUE)) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addColumnDataGroups("PowerType") pt$addRowDataGroups("TOC") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) highlight <- PivotStyle$new(pt, "cellHighlight", list("background-color"="#FFFF00")) groups <- pt$findColumnDataGroups(variableNames="TrainCategory") groupCount <- lapply(groups, function(grp) {grp$style <- highlight}) pt$evaluatePivot() # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # length(groups) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"2\" colspan=\"1\">&nbsp;</th>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"4\">Express Passenger</th>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"3\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"1\">Total</th>\n </tr>\n <tr>\n <th class=\"ColumnHeader\" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">HST</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" colspan=\"1\"></th>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Arriva Trains Wales</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">CrossCountry</th>\n <td class=\"Cell\">22133</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">22865</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22928</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">London Midland</th>\n <td class=\"Cell\">5638</td>\n <td class=\"Cell\">8849</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">14487</td>\n <td class=\"Cell\">5591</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">33792</td>\n <td class=\"Cell\">48279</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Virgin Trains</th>\n <td class=\"Cell\">2137</td>\n <td class=\"Cell\">6457</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">32987</td>\n <td class=\"Cell\">15306</td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">49025</td>\n <td class=\"Cell\">6484</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">34685</td>\n <td class=\"Cell\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 502260) expect_equal(length(groups), 3) expect_identical(as.character(pt$getHtml()), html) }) } scenarios <- testScenarios("find groups tests: simple: variableValues") for(i in 1:nrow(scenarios)) { evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode, compatibility=list(totalStyleIsCellStyle=TRUE, explicitHeaderSpansOfOne=TRUE, noDataGroupNBSP=TRUE)) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addColumnDataGroups("PowerType") pt$addRowDataGroups("TOC") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) highlight <- PivotStyle$new(pt, "cellHighlight", list("background-color"="#FFFF00")) groups <- pt$findColumnDataGroups(variableValues=list("PowerType"=c("DMU", "HST"))) groupCount <- lapply(groups, function(grp) {grp$style <- highlight}) pt$evaluatePivot() # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # length(groups) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"2\" colspan=\"1\">&nbsp;</th>\n <th class=\"ColumnHeader\" colspan=\"4\">Express Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"3\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n </tr>\n <tr>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"1\">HST</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" colspan=\"1\"></th>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Arriva Trains Wales</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">CrossCountry</th>\n <td class=\"Cell\">22133</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">22865</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22928</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">London Midland</th>\n <td class=\"Cell\">5638</td>\n <td class=\"Cell\">8849</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">14487</td>\n <td class=\"Cell\">5591</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">33792</td>\n <td class=\"Cell\">48279</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Virgin Trains</th>\n <td class=\"Cell\">2137</td>\n <td class=\"Cell\">6457</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">32987</td>\n <td class=\"Cell\">15306</td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">49025</td>\n <td class=\"Cell\">6484</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">34685</td>\n <td class=\"Cell\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 502260) expect_equal(length(groups), 3) expect_identical(as.character(pt$getHtml()), html) }) } scenarios <- testScenarios("find groups tests: simple: exclude totals") for(i in 1:nrow(scenarios)) { if(!isDevelopmentVersion) break evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode, compatibility=list(totalStyleIsCellStyle=TRUE, explicitHeaderSpansOfOne=TRUE, noDataGroupNBSP=TRUE)) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addColumnDataGroups("PowerType") pt$addRowDataGroups("TOC") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) highlight <- PivotStyle$new(pt, "cellHighlight", list("background-color"="#FFFF00")) groups <- pt$findColumnDataGroups(variableNames="TrainCategory", totals="exclude") groupCount <- lapply(groups, function(grp) {grp$style <- highlight}) pt$evaluatePivot() # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # length(groups) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"2\" colspan=\"1\">&nbsp;</th>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"4\">Express Passenger</th>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"3\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n </tr>\n <tr>\n <th class=\"ColumnHeader\" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">HST</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" colspan=\"1\"></th>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Arriva Trains Wales</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">CrossCountry</th>\n <td class=\"Cell\">22133</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">22865</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22928</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">London Midland</th>\n <td class=\"Cell\">5638</td>\n <td class=\"Cell\">8849</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">14487</td>\n <td class=\"Cell\">5591</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">33792</td>\n <td class=\"Cell\">48279</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Virgin Trains</th>\n <td class=\"Cell\">2137</td>\n <td class=\"Cell\">6457</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">32987</td>\n <td class=\"Cell\">15306</td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">49025</td>\n <td class=\"Cell\">6484</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">34685</td>\n <td class=\"Cell\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 502260) expect_equal(length(groups), 2) expect_identical(as.character(pt$getHtml()), html) }) } scenarios <- testScenarios("find groups tests: simple: only totals") for(i in 1:nrow(scenarios)) { if(!isDevelopmentVersion) break evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode, compatibility=list(totalStyleIsCellStyle=TRUE, explicitHeaderSpansOfOne=TRUE, noDataGroupNBSP=TRUE)) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addColumnDataGroups("PowerType") pt$addRowDataGroups("TOC") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) highlight <- PivotStyle$new(pt, "cellHighlight", list("background-color"="#FFFF00")) groups <- pt$findColumnDataGroups(variableNames="TrainCategory", totals="only") groupCount <- lapply(groups, function(grp) {grp$style <- highlight}) pt$evaluatePivot() # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # length(groups) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"2\" colspan=\"1\">&nbsp;</th>\n <th class=\"ColumnHeader\" colspan=\"4\">Express Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"3\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"1\">Total</th>\n </tr>\n <tr>\n <th class=\"ColumnHeader\" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">HST</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" colspan=\"1\"></th>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Arriva Trains Wales</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">CrossCountry</th>\n <td class=\"Cell\">22133</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">22865</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22928</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">London Midland</th>\n <td class=\"Cell\">5638</td>\n <td class=\"Cell\">8849</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">14487</td>\n <td class=\"Cell\">5591</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">33792</td>\n <td class=\"Cell\">48279</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Virgin Trains</th>\n <td class=\"Cell\">2137</td>\n <td class=\"Cell\">6457</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">32987</td>\n <td class=\"Cell\">15306</td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">49025</td>\n <td class=\"Cell\">6484</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">34685</td>\n <td class=\"Cell\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 502260) expect_equal(length(groups), 1) expect_identical(as.character(pt$getHtml()), html) }) } scenarios <- testScenarios("find groups tests: simple: includeDescendantGroups") for(i in 1:nrow(scenarios)) { if(!isDevelopmentVersion) break evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode, compatibility=list(totalStyleIsCellStyle=TRUE, explicitHeaderSpansOfOne=TRUE, noDataGroupNBSP=TRUE)) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addColumnDataGroups("PowerType") pt$addRowDataGroups("TOC") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) highlight <- PivotStyle$new(pt, "cellHighlight", list("background-color"="#FFFF00")) groups <- pt$findColumnDataGroups( variableValues=list("TrainCategory"="Ordinary Passenger"), includeDescendantGroup=TRUE) groupCount <- lapply(groups, function(grp) {grp$style <- highlight}) pt$evaluatePivot() # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # length(groups) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"2\" colspan=\"1\">&nbsp;</th>\n <th class=\"ColumnHeader\" colspan=\"4\">Express Passenger</th>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"3\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n </tr>\n <tr>\n <th class=\"ColumnHeader\" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">HST</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: #FFFF00; \" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" colspan=\"1\"></th>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Arriva Trains Wales</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">CrossCountry</th>\n <td class=\"Cell\">22133</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">22865</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22928</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">London Midland</th>\n <td class=\"Cell\">5638</td>\n <td class=\"Cell\">8849</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">14487</td>\n <td class=\"Cell\">5591</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">33792</td>\n <td class=\"Cell\">48279</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Virgin Trains</th>\n <td class=\"Cell\">2137</td>\n <td class=\"Cell\">6457</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">32987</td>\n <td class=\"Cell\">15306</td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">49025</td>\n <td class=\"Cell\">6484</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">34685</td>\n <td class=\"Cell\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 502260) expect_equal(length(groups), 4) expect_identical(as.character(pt$getHtml()), html) }) } scenarios <- testScenarios("find groups tests: combinations: variableNames") for(i in 1:nrow(scenarios)) { if(!isDevelopmentVersion) break evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode, compatibility=list(totalStyleIsCellStyle=TRUE, explicitHeaderSpansOfOne=TRUE, noDataGroupNBSP=TRUE)) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addColumnDataGroups("PowerType") pt$addRowDataGroups("TOC") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) highlight <- PivotStyle$new(pt, "cellHighlight", list("background-color"="#00FFFF")) groups <- pt$findColumnDataGroups(matchMode="combinations", variableNames=c("TrainCategory", "PowerType")) groupCount <- lapply(groups, function(grp) {grp$style <- highlight}) pt$evaluatePivot() # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # length(groups) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"2\" colspan=\"1\">&nbsp;</th>\n <th class=\"ColumnHeader\" colspan=\"4\">Express Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"3\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n </tr>\n <tr>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">HST</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\"></th>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Arriva Trains Wales</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">CrossCountry</th>\n <td class=\"Cell\">22133</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">22865</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22928</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">London Midland</th>\n <td class=\"Cell\">5638</td>\n <td class=\"Cell\">8849</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">14487</td>\n <td class=\"Cell\">5591</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">33792</td>\n <td class=\"Cell\">48279</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Virgin Trains</th>\n <td class=\"Cell\">2137</td>\n <td class=\"Cell\">6457</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">32987</td>\n <td class=\"Cell\">15306</td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">49025</td>\n <td class=\"Cell\">6484</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">34685</td>\n <td class=\"Cell\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 502260) expect_equal(length(groups), 8) expect_identical(as.character(pt$getHtml()), html) }) } scenarios <- testScenarios("find groups tests: combinations: variableValues") for(i in 1:nrow(scenarios)) { evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode, compatibility=list(totalStyleIsCellStyle=TRUE, explicitHeaderSpansOfOne=TRUE, noDataGroupNBSP=TRUE)) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addColumnDataGroups("PowerType") pt$addRowDataGroups("TOC") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) highlight <- PivotStyle$new(pt, "cellHighlight", list("background-color"="#00FFFF")) groups <- pt$findColumnDataGroups(matchMode="combinations", variableValues=list("TrainCategory"="Express Passenger", "PowerType"=c("DMU", "HST"))) groupCount <- lapply(groups, function(grp) {grp$style <- highlight}) pt$evaluatePivot() # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # length(groups) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"2\" colspan=\"1\">&nbsp;</th>\n <th class=\"ColumnHeader\" colspan=\"4\">Express Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"3\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n </tr>\n <tr>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">HST</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" colspan=\"1\"></th>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Arriva Trains Wales</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">CrossCountry</th>\n <td class=\"Cell\">22133</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">22865</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22928</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">London Midland</th>\n <td class=\"Cell\">5638</td>\n <td class=\"Cell\">8849</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">14487</td>\n <td class=\"Cell\">5591</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">33792</td>\n <td class=\"Cell\">48279</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Virgin Trains</th>\n <td class=\"Cell\">2137</td>\n <td class=\"Cell\">6457</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">32987</td>\n <td class=\"Cell\">15306</td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">49025</td>\n <td class=\"Cell\">6484</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">34685</td>\n <td class=\"Cell\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 502260) expect_equal(length(groups), 2) expect_identical(as.character(pt$getHtml()), html) }) } scenarios <- testScenarios("find groups tests: combinations: specific sub total") for(i in 1:nrow(scenarios)) { if(!isDevelopmentVersion) break evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode, compatibility=list(totalStyleIsCellStyle=TRUE, explicitHeaderSpansOfOne=TRUE, noDataGroupNBSP=TRUE)) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addColumnDataGroups("PowerType") pt$addRowDataGroups("TOC") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) highlight <- PivotStyle$new(pt, "cellHighlight", list("background-color"="#00FFFF")) groups <- pt$findColumnDataGroups(matchMode="combinations", variableValues=list("TrainCategory"="Express Passenger", "PowerType"="**")) groupCount <- lapply(groups, function(grp) {grp$style <- highlight}) pt$evaluatePivot() # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # length(groups) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"2\" colspan=\"1\">&nbsp;</th>\n <th class=\"ColumnHeader\" colspan=\"4\">Express Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"3\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n </tr>\n <tr>\n <th class=\"ColumnHeader\" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">HST</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" colspan=\"1\"></th>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Arriva Trains Wales</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">CrossCountry</th>\n <td class=\"Cell\">22133</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">22865</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22928</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">London Midland</th>\n <td class=\"Cell\">5638</td>\n <td class=\"Cell\">8849</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">14487</td>\n <td class=\"Cell\">5591</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">33792</td>\n <td class=\"Cell\">48279</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Virgin Trains</th>\n <td class=\"Cell\">2137</td>\n <td class=\"Cell\">6457</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">32987</td>\n <td class=\"Cell\">15306</td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">49025</td>\n <td class=\"Cell\">6484</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">34685</td>\n <td class=\"Cell\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 502260) expect_equal(length(groups), 1) expect_identical(as.character(pt$getHtml()), html) }) } scenarios <- testScenarios("find groups tests: atLevels") for(i in 1:nrow(scenarios)) { if(!isDevelopmentVersion) break evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode, compatibility=list(totalStyleIsCellStyle=TRUE, explicitHeaderSpansOfOne=TRUE, noDataGroupNBSP=TRUE)) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addColumnDataGroups("PowerType") pt$addRowDataGroups("TOC") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) highlight <- PivotStyle$new(pt, "cellHighlight", list("background-color"="#00FFFF")) groups <- pt$findColumnDataGroups(atLevels=2) groupCount <- lapply(groups, function(grp) {grp$style <- highlight}) pt$evaluatePivot() # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # length(groups) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"2\" colspan=\"1\">&nbsp;</th>\n <th class=\"ColumnHeader\" colspan=\"4\">Express Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"3\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n </tr>\n <tr>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">HST</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">DMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">EMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\">Total</th>\n <th class=\"ColumnHeader\" style=\"background-color: #00FFFF; \" colspan=\"1\"></th>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Arriva Trains Wales</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">CrossCountry</th>\n <td class=\"Cell\">22133</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">22865</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22928</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">London Midland</th>\n <td class=\"Cell\">5638</td>\n <td class=\"Cell\">8849</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">14487</td>\n <td class=\"Cell\">5591</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">33792</td>\n <td class=\"Cell\">48279</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Virgin Trains</th>\n <td class=\"Cell\">2137</td>\n <td class=\"Cell\">6457</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">32987</td>\n <td class=\"Cell\">15306</td>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\">49025</td>\n <td class=\"Cell\">6484</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">34685</td>\n <td class=\"Cell\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 502260) expect_equal(length(groups), 8) expect_identical(as.character(pt$getHtml()), html) }) } scenarios <- testScenarios("find groups tests: minChildCount") for(i in 1:nrow(scenarios)) { if(!isDevelopmentVersion) break evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode, compatibility=list(totalStyleIsCellStyle=TRUE, explicitHeaderSpansOfOne=TRUE, noDataGroupNBSP=TRUE)) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addRowDataGroups("TOC") pt$addRowDataGroups("PowerType") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) highlight <- PivotStyle$new(pt, "cellHighlight", list("background-color"="#00FFFF")) groups <- pt$findRowDataGroups(minChildCount=3) groupCount <- lapply(groups, function(grp) {grp$style <- highlight}) pt$evaluatePivot() # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # length(groups) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\" colspan=\"2\">&nbsp;</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Express Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"2\">Arriva Trains Wales</th>\n <th class=\"RowHeader\" rowspan=\"1\">DMU</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: #00FFFF; \" rowspan=\"3\">CrossCountry</th>\n <th class=\"RowHeader\" rowspan=\"1\">DMU</th>\n <td class=\"Cell\">22133</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22196</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">HST</th>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">732</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">22865</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22928</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: #00FFFF; \" rowspan=\"3\">London Midland</th>\n <th class=\"RowHeader\" rowspan=\"1\">DMU</th>\n <td class=\"Cell\">5638</td>\n <td class=\"Cell\">5591</td>\n <td class=\"Cell\">11229</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">EMU</th>\n <td class=\"Cell\">8849</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">37050</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">14487</td>\n <td class=\"Cell\">33792</td>\n <td class=\"Cell\">48279</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: #00FFFF; \" rowspan=\"3\">Virgin Trains</th>\n <th class=\"RowHeader\" rowspan=\"1\">DMU</th>\n <td class=\"Cell\">2137</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">2137</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">EMU</th>\n <td class=\"Cell\">6457</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">6457</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">8594</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <th class=\"RowHeader\" rowspan=\"1\"></th>\n <td class=\"Cell\">49025</td>\n <td class=\"Cell\">34685</td>\n <td class=\"Cell\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 502260) expect_equal(length(groups), 3) expect_identical(as.character(pt$getHtml()), html) }) } scenarios <- testScenarios("find groups tests: maxChildCount") for(i in 1:nrow(scenarios)) { if(!isDevelopmentVersion) break evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode, compatibility=list(totalStyleIsCellStyle=TRUE, explicitHeaderSpansOfOne=TRUE, noDataGroupNBSP=TRUE)) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addRowDataGroups("TOC") pt$addRowDataGroups("PowerType") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) highlight <- PivotStyle$new(pt, "cellHighlight", list("background-color"="#00FFFF")) groups <- pt$findRowDataGroups(atLevels=1, maxChildCount=2) groupCount <- lapply(groups, function(grp) {grp$style <- highlight}) pt$evaluatePivot() # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # length(groups) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\" colspan=\"2\">&nbsp;</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Express Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: #00FFFF; \" rowspan=\"2\">Arriva Trains Wales</th>\n <th class=\"RowHeader\" rowspan=\"1\">DMU</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"3\">CrossCountry</th>\n <th class=\"RowHeader\" rowspan=\"1\">DMU</th>\n <td class=\"Cell\">22133</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22196</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">HST</th>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">732</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">22865</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22928</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"3\">London Midland</th>\n <th class=\"RowHeader\" rowspan=\"1\">DMU</th>\n <td class=\"Cell\">5638</td>\n <td class=\"Cell\">5591</td>\n <td class=\"Cell\">11229</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">EMU</th>\n <td class=\"Cell\">8849</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">37050</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">14487</td>\n <td class=\"Cell\">33792</td>\n <td class=\"Cell\">48279</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"3\">Virgin Trains</th>\n <th class=\"RowHeader\" rowspan=\"1\">DMU</th>\n <td class=\"Cell\">2137</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">2137</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">EMU</th>\n <td class=\"Cell\">6457</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">6457</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">8594</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: #00FFFF; \" rowspan=\"1\">Total</th>\n <th class=\"RowHeader\" rowspan=\"1\"></th>\n <td class=\"Cell\">49025</td>\n <td class=\"Cell\">34685</td>\n <td class=\"Cell\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 502260) expect_equal(length(groups), 2) expect_identical(as.character(pt$getHtml()), html) }) } scenarios <- testScenarios("find groups tests: outline groups") for(i in 1:nrow(scenarios)) { if(!isDevelopmentVersion) break evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode, compatibility=list(totalStyleIsCellStyle=TRUE, explicitHeaderSpansOfOne=TRUE, noDataGroupNBSP=TRUE)) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addRowDataGroups("TOC", outlineBefore=TRUE) pt$addRowDataGroups("PowerType") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) groups <- pt$findRowDataGroups(outlineGroups="only") pt$setStyling(groups=groups, declarations=list("background-color"="#00FFFF")) groupCount <- length(groups) pt$evaluatePivot() # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # length(groups) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\" colspan=\"2\">&nbsp;</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Express Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"1\">Total</th>\n </tr>\n <tr>\n <th class=\"OutlineRowHeader\" style=\"background-color: #00FFFF; \" rowspan=\"1\" colspan=\"2\">Arriva Trains Wales</th>\n <td class=\"OutlineCell\" colspan=\"3\">&nbsp;</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"2\"></th>\n <th class=\"RowHeader\" rowspan=\"1\">DMU</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">3079</td>\n <td class=\"Cell\">830</td>\n <td class=\"Cell\">3909</td>\n </tr>\n <tr>\n <th class=\"OutlineRowHeader\" style=\"background-color: #00FFFF; \" rowspan=\"1\" colspan=\"2\">CrossCountry</th>\n <td class=\"OutlineCell\" colspan=\"3\">&nbsp;</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"3\"></th>\n <th class=\"RowHeader\" rowspan=\"1\">DMU</th>\n <td class=\"Cell\">22133</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22196</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">HST</th>\n <td class=\"Cell\">732</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">732</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">22865</td>\n <td class=\"Cell\">63</td>\n <td class=\"Cell\">22928</td>\n </tr>\n <tr>\n <th class=\"OutlineRowHeader\" style=\"background-color: #00FFFF; \" rowspan=\"1\" colspan=\"2\">London Midland</th>\n <td class=\"OutlineCell\" colspan=\"3\">&nbsp;</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"3\"></th>\n <th class=\"RowHeader\" rowspan=\"1\">DMU</th>\n <td class=\"Cell\">5638</td>\n <td class=\"Cell\">5591</td>\n <td class=\"Cell\">11229</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">EMU</th>\n <td class=\"Cell\">8849</td>\n <td class=\"Cell\">28201</td>\n <td class=\"Cell\">37050</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">14487</td>\n <td class=\"Cell\">33792</td>\n <td class=\"Cell\">48279</td>\n </tr>\n <tr>\n <th class=\"OutlineRowHeader\" style=\"background-color: #00FFFF; \" rowspan=\"1\" colspan=\"2\">Virgin Trains</th>\n <td class=\"OutlineCell\" colspan=\"3\">&nbsp;</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"3\"></th>\n <th class=\"RowHeader\" rowspan=\"1\">DMU</th>\n <td class=\"Cell\">2137</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">2137</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">EMU</th>\n <td class=\"Cell\">6457</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">6457</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <td class=\"Cell\">8594</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"1\">Total</th>\n <th class=\"RowHeader\" rowspan=\"1\"></th>\n <td class=\"Cell\">49025</td>\n <td class=\"Cell\">34685</td>\n <td class=\"Cell\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 502260) expect_equal(length(groups), 4) expect_identical(as.character(pt$getHtml()), html) }) } scenarios <- testScenarios("find groups tests: restrict by rows, cols, cells") for(i in 1:nrow(scenarios)) { if(!isDevelopmentVersion) break evaluationMode <- scenarios$evaluationMode[i] processingLibrary <- scenarios$processingLibrary[i] description <- scenarios$description[i] countFunction <- scenarios$countFunction[i] test_that(description, { library(pivottabler) pt <- PivotTable$new(processingLibrary=processingLibrary, evaluationMode=evaluationMode) pt$addData(bhmtrains) pt$addColumnDataGroups("TrainCategory") pt$addColumnDataGroups("PowerType") pt$addRowDataGroups("TOC") pt$addRowDataGroups("Status") pt$defineCalculation(calculationName="TotalTrains", summariseExpression=countFunction) pt$evaluatePivot() # find the data groups that are related to column 2 grps <- pt$findColumnDataGroups(columnNumbers=2) pt$setStyling(groups=grps, declarations=list("background-color"="yellow")) # find the DMU data groups that are related to columns 5 to 8 grps <- pt$findColumnDataGroups(columnNumbers=5:8, variableValues=list("PowerType"="DMU")) pt$setStyling(groups=grps, declarations=list("background-color"="lightgreen")) # find a totals data group that is related to the cell at (3, 7) cells <- pt$getCell(3, 7) pt$setStyling(cells=cells, declarations=list("background-color"="pink")) grps <- pt$findColumnDataGroups(variableValues=list("PowerType"="**"), cells=cells) pt$setStyling(groups=grps, declarations=list("background-color"="pink")) # find the data groups at the second level in the hierarchy that are related to row 3 grps <- pt$findRowDataGroups(atLevel=2, rowNumbers=3) pt$setStyling(groups=grps, declarations=list("background-color"="lightcyan")) # find the data groups related to rows 5 to 10 with Status "A" or "R" grps <- pt$findRowDataGroups(rowNumbers=5:10, variableValues=list("Status"=c("A", "R"))) pt$setStyling(groups=grps, declarations=list("background-color"="plum")) # find the data groups related to row 11:13 or cells in the range (12, 4) to (15, 5) cells <- pt$getCells(rowNumbers=12:15, columnNumbers=4:5, matchMode="combinations") pt$setStyling(cells=cells, declarations=list("background-color"="palegreen")) grps <- pt$findRowDataGroups(rowNumbers=11:13, cells=cells) pt$setStyling(groups=grps, declarations=list("background-color"="palegreen")) # pt$renderPivot() # sum(pt$cells$asMatrix(), na.rm=TRUE) # prepStr(as.character(pt$getHtml())) html <- "<table class=\"Table\">\n <tr>\n <th class=\"RowHeader\" rowspan=\"2\" colspan=\"2\">&nbsp;</th>\n <th class=\"ColumnHeader\" style=\"background-color: yellow; \" colspan=\"4\">Express Passenger</th>\n <th class=\"ColumnHeader\" colspan=\"3\">Ordinary Passenger</th>\n <th class=\"ColumnHeader\">Total</th>\n </tr>\n <tr>\n <th class=\"ColumnHeader\">DMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: yellow; \">EMU</th>\n <th class=\"ColumnHeader\">HST</th>\n <th class=\"ColumnHeader\">Total</th>\n <th class=\"ColumnHeader\" style=\"background-color: lightgreen; \">DMU</th>\n <th class=\"ColumnHeader\">EMU</th>\n <th class=\"ColumnHeader\" style=\"background-color: pink; \">Total</th>\n <th class=\"ColumnHeader\">&nbsp;</th>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"4\">Arriva Trains Wales</th>\n <th class=\"RowHeader\">A</th>\n <td class=\"Cell\">3018</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Total\">3018</td>\n <td class=\"Cell\">815</td>\n <td class=\"Cell\"></td>\n <td class=\"Total\">815</td>\n <td class=\"Total\">3833</td>\n </tr>\n <tr>\n <th class=\"RowHeader\">C</th>\n <td class=\"Cell\">59</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Total\">59</td>\n <td class=\"Cell\">15</td>\n <td class=\"Cell\"></td>\n <td class=\"Total\">15</td>\n <td class=\"Total\">74</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: lightcyan; \">R</th>\n <td class=\"Cell\">2</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Total\">2</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Total\" style=\"background-color: pink; \"></td>\n <td class=\"Total\">2</td>\n </tr>\n <tr>\n <th class=\"RowHeader\">Total</th>\n <td class=\"Total\">3079</td>\n <td class=\"Total\"></td>\n <td class=\"Total\"></td>\n <td class=\"Total\">3079</td>\n <td class=\"Total\">830</td>\n <td class=\"Total\"></td>\n <td class=\"Total\">830</td>\n <td class=\"Total\">3909</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" rowspan=\"4\">CrossCountry</th>\n <th class=\"RowHeader\" style=\"background-color: plum; \">A</th>\n <td class=\"Cell\">21561</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">709</td>\n <td class=\"Total\">22270</td>\n <td class=\"Cell\">60</td>\n <td class=\"Cell\"></td>\n <td class=\"Total\">60</td>\n <td class=\"Total\">22330</td>\n </tr>\n <tr>\n <th class=\"RowHeader\">C</th>\n <td class=\"Cell\">546</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\">23</td>\n <td class=\"Total\">569</td>\n <td class=\"Cell\">2</td>\n <td class=\"Cell\"></td>\n <td class=\"Total\">2</td>\n <td class=\"Total\">571</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: plum; \">R</th>\n <td class=\"Cell\">26</td>\n <td class=\"Cell\"></td>\n <td class=\"Cell\"></td>\n <td class=\"Total\">26</td>\n <td class=\"Cell\">1</td>\n <td class=\"Cell\"></td>\n <td class=\"Total\">1</td>\n <td class=\"Total\">27</td>\n </tr>\n <tr>\n <th class=\"RowHeader\">Total</th>\n <td class=\"Total\">22133</td>\n <td class=\"Total\"></td>\n <td class=\"Total\">732</td>\n <td class=\"Total\">22865</td>\n <td class=\"Total\">63</td>\n <td class=\"Total\"></td>\n <td class=\"Total\">63</td>\n <td class=\"Total\">22928</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: palegreen; \" rowspan=\"4\">London Midland</th>\n <th class=\"RowHeader\" style=\"background-color: plum; \">A</th>\n <td class=\"Cell\">5534</td>\n <td class=\"Cell\">8599</td>\n <td class=\"Cell\"></td>\n <td class=\"Total\">14133</td>\n <td class=\"Cell\">5520</td>\n <td class=\"Cell\">27331</td>\n <td class=\"Total\">32851</td>\n <td class=\"Total\">46984</td>\n </tr>\n <tr>\n <th class=\"RowHeader\">C</th>\n <td class=\"Cell\">101</td>\n <td class=\"Cell\">235</td>\n <td class=\"Cell\"></td>\n <td class=\"Total\">336</td>\n <td class=\"Cell\">67</td>\n <td class=\"Cell\">847</td>\n <td class=\"Total\">914</td>\n <td class=\"Total\">1250</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: palegreen; \">R</th>\n <td class=\"Cell\">3</td>\n <td class=\"Cell\">15</td>\n <td class=\"Cell\"></td>\n <td class=\"Total\">18</td>\n <td class=\"Cell\">4</td>\n <td class=\"Cell\">23</td>\n <td class=\"Total\">27</td>\n <td class=\"Total\">45</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: palegreen; \">Total</th>\n <td class=\"Total\">5638</td>\n <td class=\"Total\">8849</td>\n <td class=\"Total\"></td>\n <td class=\"Total\" style=\"background-color: palegreen; \">14487</td>\n <td class=\"Total\" style=\"background-color: palegreen; \">5591</td>\n <td class=\"Total\">28201</td>\n <td class=\"Total\">33792</td>\n <td class=\"Total\">48279</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: palegreen; \" rowspan=\"4\">Virgin Trains</th>\n <th class=\"RowHeader\" style=\"background-color: palegreen; \">A</th>\n <td class=\"Cell\">2028</td>\n <td class=\"Cell\">6331</td>\n <td class=\"Cell\"></td>\n <td class=\"Total\" style=\"background-color: palegreen; \">8359</td>\n <td class=\"Cell\" style=\"background-color: palegreen; \"></td>\n <td class=\"Cell\"></td>\n <td class=\"Total\"></td>\n <td class=\"Total\">8359</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: palegreen; \">C</th>\n <td class=\"Cell\">107</td>\n <td class=\"Cell\">119</td>\n <td class=\"Cell\"></td>\n <td class=\"Total\" style=\"background-color: palegreen; \">226</td>\n <td class=\"Cell\" style=\"background-color: palegreen; \"></td>\n <td class=\"Cell\"></td>\n <td class=\"Total\"></td>\n <td class=\"Total\">226</td>\n </tr>\n <tr>\n <th class=\"RowHeader\" style=\"background-color: palegreen; \">R</th>\n <td class=\"Cell\">2</td>\n <td class=\"Cell\">7</td>\n <td class=\"Cell\"></td>\n <td class=\"Total\" style=\"background-color: palegreen; \">9</td>\n <td class=\"Cell\" style=\"background-color: palegreen; \"></td>\n <td class=\"Cell\"></td>\n <td class=\"Total\"></td>\n <td class=\"Total\">9</td>\n </tr>\n <tr>\n <th class=\"RowHeader\">Total</th>\n <td class=\"Total\">2137</td>\n <td class=\"Total\">6457</td>\n <td class=\"Total\"></td>\n <td class=\"Total\">8594</td>\n <td class=\"Total\"></td>\n <td class=\"Total\"></td>\n <td class=\"Total\"></td>\n <td class=\"Total\">8594</td>\n </tr>\n <tr>\n <th class=\"RowHeader\">Total</th>\n <th class=\"RowHeader\">&nbsp;</th>\n <td class=\"Total\">32987</td>\n <td class=\"Total\">15306</td>\n <td class=\"Total\">732</td>\n <td class=\"Total\">49025</td>\n <td class=\"Total\">6484</td>\n <td class=\"Total\">28201</td>\n <td class=\"Total\">34685</td>\n <td class=\"Total\">83710</td>\n </tr>\n</table>" expect_equal(sum(pt$cells$asMatrix(), na.rm=TRUE), 753390) expect_identical(as.character(pt$getHtml()), html) }) }