blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a5b7251e73fd2fb483ba593aca8f87e9b2933231
|
14742f3be11eaeffcb46a7d92e336ed038d1b668
|
/R_script_inventory/83357india.R
|
8ecbb7bc1498095125037b5cd4d6cf4882be1c42
|
[] |
no_license
|
ShawnPengxy/loupeman
|
c4eeace02ee7e04545b927d1e6efb98877edee3c
|
a92a19162a13373e4991eef5a33fbfb78abb9bd9
|
refs/heads/master
| 2020-04-04T19:18:39.957219
| 2015-10-08T09:51:00
| 2015-10-08T09:51:00
| 41,746,718
| 1
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 1,662
|
r
|
83357india.R
|
library(reshape)
importdata<-`83357india` #导入变量名转换
importdata<-rename(importdata, c(Shape="shape",Weight="carat", Color="color",Clarity="clarity",Cut="cut", Polish="polish", Symm="symmetry", Fl="fluorescence",Lab="report", Report.No="reportno", LotID="stoneid",SC3="back", Rap.Rate="rapprice"))
if(length(which(importdata$back>0))>length(which(importdata$back<0))) importdata$back=-importdata$back
importdata$rapprice<-as.numeric(gsub(",","",importdata$rapprice))
measurement<-c(unlist(paste(importdata$Ms1,importdata$Ms2,importdata$Ms3, sep="*")))
rapnetid<-(rep(83357, length(measurement)))
milky<-rep(NA, length(measurement))
colsh<-rep(NA, length(measurement))
green<-rep(NA, length(measurement))
price<-rep(NA, length(measurement))
importdata$shape[which(importdata$shape=="BR")]<-"圆形"
temp2<-importdata$Remark%in%c("Milky\n","Milky","ML","ML-1")
colsh[temp2]<-"无咖"
green[temp2]<-"无绿"
milky[temp2]<-"带奶"
temp2<-importdata$Remark%in%c("Br","BR","Brown","Brown ","Dark br ","DARK BROWN ","Faint Brown")
colsh[temp2]<-"带咖"
green[temp2]<-"无绿"
milky[temp2]<-"无奶"
temp2<-importdata$Remark%in%c("Green")
colsh[temp2]<-"无咖"
green[temp2]<-"带绿"
milky[temp2]<-"无奶"
OPut<-cbind(rapnetid, colsh)
OPut<-cbind(OPut, milky)
OPut<-cbind(OPut, green)
OPut<-cbind(OPut, price)
OPut<-cbind(OPut, measurement)
OOPut<-cbind(OPut, importdata)
Myvars<-c("shape","carat","color","clarity","cut","polish","symmetry","fluorescence","colsh","milky","green","measurement","report","reportno","rapnetid","stoneid","back","rapprice","price")
Fin<-OOPut[Myvars]
write.csv(Fin,file="./R_input/83357india.csv",row.names = F)
|
eb936ed8f0ccbfbb7b56295f64d9e9073bb17baa
|
13f2a4c289474781cd73199154361d7e3da795dc
|
/plot3.R
|
7c237290f3b76b648865fdbc7c8e97ac27639a14
|
[] |
no_license
|
pdiazs/ExData_Plotting1
|
8df4b82b1ccf72068f3c8ddd328d7f9e71384405
|
4b6744f03f2a71fac9f1cf1af39a36287844dc4c
|
refs/heads/master
| 2021-01-14T12:11:21.116209
| 2014-05-07T19:55:56
| 2014-05-07T19:55:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,278
|
r
|
plot3.R
|
## Read Data and review
## The script assumes G:/R/Coursera Explora/ as path. Change and adapt to your needs
household_power_consumption <- read.csv("G:/R/Coursera Explora/exdata_data_household_power_consumption/household_power_consumption.txt", sep=";")
View(household_power_consumption)
str(household_power_consumption)
##Transform Data into adequate classes subset and define Var x
household_power_consumption$Date <- strptime(household_power_consumption$Date, "%d/%m/%Y")
sub.hpcs = subset(household_power_consumption, as.Date(Date) >= '2007-02-01'& as.Date(Date)< '2007-02-03')
sub.hpcs[ sub.hpcs == "?" ] = NA
sub.hpcs$Sub_metering_1<-as.numeric(as.character(sub.hpcs$Sub_metering_1))
sub.hpcs$Sub_metering_2<-as.numeric(as.character(sub.hpcs$Sub_metering_2))
sub.hpcs$Sub_metering_3<-as.numeric(as.character(sub.hpcs$Sub_metering_3))
## Auxiliary variables
rot<-c("Thu", "Fri","Sat")
rot2<-c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
## Graphic device
png('plot4.png',width = 480, height = 480)
plot(sub.hpcs$Sub_metering_1, type="l",ylab="Energy sub metering",xlab="", xaxt = 'n')
axis(1, at=c(1,1440,2880), labels=rot[1:3])
lines(sub.hpcs$Sub_metering_2, col=2)
lines(sub.hpcs$Sub_metering_3, col=3)
legend(x="topright", rot2, lwd=1, col=c(1,2,3))
dev.off()
|
7c5c06a96ac32777f31b2f6571a07c907f86ea39
|
40e0e1165c26b3c024c6689861c59efd6151c81a
|
/R/response-salesforce.R
|
475d4a0ed1f1ce4c2b52592e877fe72e6ab2b73a
|
[] |
no_license
|
nteetor/jedi
|
7d539ab48b8518fabf0f3d017e1dfbbf9daa5950
|
3888b54694378b23ef8bdc368810bc82a2619269
|
refs/heads/master
| 2016-09-06T18:52:50.675419
| 2015-08-28T20:59:47
| 2015-08-28T20:59:47
| 41,367,238
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 421
|
r
|
response-salesforce.R
|
#' HTTP error checking
#'
#' Better handling for Salesforce error responses.
#'
#' @importFrom httr stop_for_status content
#' @importFrom magrittr %>%
#' @export
check_salesforce_response <- function(res){
tryCatch(
httr::stop_for_status(res),
error = function(e) {
error_msg <- httr::content(res) %>%
unlist %>%
unname
stop(paste(error_msg, collapse = ' '), call. = FALSE)
})
}
|
2d0a3c1dfa2942ad7e8d95c71118541cc35799eb
|
26c8f804713c611eb5c931d09a2a71f526ead746
|
/globals.r
|
16b18cbd050d03cfea040f0470a339d040850b79
|
[] |
no_license
|
djinnome/crop
|
f57c2daec28bb040af87164706756a9f01f07a45
|
c3f409c5efc4d518b560b160e4afdb78d7824aec
|
refs/heads/master
| 2021-01-11T06:36:15.885581
| 2017-02-06T21:19:23
| 2017-02-06T21:19:23
| 81,134,029
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,590
|
r
|
globals.r
|
##jeremy zucker
##7.20.2011
##globals.r
library('Rcplex')
suppressMessages(library('Matrix'))
options(stringsAsFactors=FALSE)
setwd('/msc/neurospora/FBA/farm')
source('check_ko.r'); source('check_fba.r')
source('check_rxn.r')
source('check_new_media.r'); source('changeNuts.r')
source('cut_mets.r'); source('cut_revs.r')
source('gapfind.r')
source('fba.r'); source('fba_dual.r'); source('fba_na.r')
source('subset_mat.r'); source('flux_of_met.r'); source('minFlux4met.r')
source('fva_fcns.r'); source('tight_ub_fcns.r')
source('read_fba.r')
source('subset_rxns.r')
source('trace_mets.r')
source('min_export.r')
source('min_met_adj.r')
source('min_pen.r')
source('parse_supp.r'); source('test_supp.r')
source('get_biomass_mass.r')
source('irrev2rev.r')
source('predict_biolog.r')
source('call_check_ko.r')
source('metab_dilute_s.r')
source('merge_DBs.r')
source('make_sink.r')
source('ncu2rxns.r')
source('probs4rxns.r')
source('thermo_fcns.r')
source('make_rxn_annot.r')
source('get_x0.r')
source('get_x0_simple.r')
source('get_x0_md.r')
source('which_col.r')
source('get_met_mat.r')
source('parse_smm.r')
source('get_imbalance.r')
source('x2beta.r')
source('named_vec.r')
source('sparse_mat_fcns.r')
source('get_md_constraints_3c.r')
source('run_get_md_constraints_3c.r')
source('named_vec.r')
source('get_dual_constraints.r')
source('biolog_confusion.r')
source('run_pred_biolog.r')
source('get_gene_supp_mat.r')
source('make_grid.r')
source('my_rectangle.r')
source('multiGeneKO.r')
source('sl_add_names.r')
source('make_grid.r')
source('my_rectangle.r')
Sys.setlocale("LC_ALL", "C")
|
bc9f5ff9b36c021ddbc3c9554d1b95ed92f0179f
|
f153381432a864aa0f1cf789d27aa2e0aba00614
|
/man/pad_sequences.Rd
|
9a0692074809bac0c1bec972b29117e6b561c6e8
|
[] |
no_license
|
rdrr1990/keras
|
0f997cf8632f6db623afcdb376ea8c258923e094
|
72b510456f15f5570388d0e610aa4917f1f1674b
|
refs/heads/master
| 2021-05-06T06:42:08.086819
| 2017-12-30T00:11:11
| 2017-12-30T00:11:11
| 113,892,962
| 2
| 0
| null | 2017-12-11T18:19:25
| 2017-12-11T18:19:24
| null |
UTF-8
|
R
| false
| true
| 1,363
|
rd
|
pad_sequences.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{pad_sequences}
\alias{pad_sequences}
\title{Pads each sequence to the same length (length of the longest sequence).}
\usage{
pad_sequences(sequences, maxlen = NULL, dtype = "int32", padding = "pre",
truncating = "pre", value = 0)
}
\arguments{
\item{sequences}{List of lists where each element is a sequence}
\item{maxlen}{int, maximum length}
\item{dtype}{type to cast the resulting sequence.}
\item{padding}{'pre' or 'post', pad either before or after each sequence.}
\item{truncating}{'pre' or 'post', remove values from sequences larger than maxlen either in the beginning or in the end of the sequence}
\item{value}{float, value to pad the sequences to the desired value.}
}
\value{
Array with dimensions (number_of_sequences, maxlen)
}
\description{
Pads each sequence to the same length (length of the longest sequence).
}
\details{
If maxlen is provided, any sequence longer than maxlen is truncated to maxlen.
Truncation happens off either the beginning (default) or
the end of the sequence. Supports post-padding and pre-padding (default).
}
\seealso{
Other text preprocessing: \code{\link{make_sampling_table}},
\code{\link{skipgrams}},
\code{\link{text_hashing_trick}},
\code{\link{text_one_hot}},
\code{\link{text_to_word_sequence}}
}
|
a043ce121e13f382e9fb1f8318c92273514ef41c
|
84141707307dd1e74bfd58d8a23e418728f34037
|
/R/rp_optimize.R
|
f05727b8e34d240a1c1b4e6597bfa0136d11a6ff
|
[] |
no_license
|
rossb34/RcppRP
|
c6c61562d027eecb63fb6823fcbe07bc0af05740
|
7500bcfca6351d63abb60e8fa9931b6a9619ba80
|
refs/heads/master
| 2016-09-06T07:12:02.387358
| 2013-11-13T04:40:12
| 2013-11-13T04:40:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,410
|
r
|
rp_optimize.R
|
#' V1 Random portfolio optimization
#'
#' TODO Random portfolio optimization description
#'
#' @param R
#' @param portfolio
#' @param search_size
#' @param objective
#' @param rp
#' @param \dots
rp_optimize_v1 <- function(R, objective, search_size=2000, portfolio=NULL, rp=NULL, momentFUN="set_moments", ...){
if(!inherits(x=portfolio, "portfolio")) stop("portfolio object passed in must be of class 'portfolio'")
call <- match.call()
# R <- checkData(R)
N <- length(portfolio$assets)
if (ncol(R) > N) {
R <- R[, names(portfolio$assets)]
}
if(!is.null(rp)){
# The user has passed in a matrix for rp, we do not need to generate
if(dim(rp)[2] == ncol(R)){
rp <- t(rp)
} else if(dim(rp)[1] != ncol(R)){
stop("rp must be an n x m matrix where n is equal to the number of assets
and m is equal to the number of portfolios to test.")
}
} else {
if(hasArg(method)) method=match.call(expand.dots=TRUE)$method else method="sample"
if(hasArg(max_permutations)) max_permutations=match.call(expand.dots=TRUE)$max_permutations else max_permutations=200
rp <- rcpp_random_portfolios(portfolio=portfolio, method="sample", n_portfolios=search_size, max_permutations=max_permutations)
rp <- t(rp)
}
rownames(rp) <- paste("w", names(portfolio$assets), sep=".")
# print(rp)
dotargs <- list(...)
# set portfolio moments only once
if(!is.function(momentFUN)){
momentFUN <- match.fun(momentFUN)
}
# TODO FIXME should match formals later
.mformals <- dotargs
.formals <- formals(momentFUN)
onames <- names(.formals)
if (length(.mformals)) {
dargs <- .mformals
pm <- pmatch(names(dargs), onames, nomatch = 0L)
names(dargs[pm > 0L]) <- onames[pm]
.formals[pm] <- dargs[pm > 0L]
}
.formals$R <- R
# .formals$portfolio <- portfolio
.formals$... <- NULL
# If no dotargs are passed in, .formals was a pairlist and do.call was failing
if(!inherits(.formals, "list")) .formals <- as.list(.formals)
mout <- try((do.call(momentFUN, .formals)) ,silent=TRUE)
if(inherits(mout,"try-error")) {
stop(paste("portfolio moment function failed with message",mout))
}
# print(mout)
valid_objectives = c("max_return", "min_variance", "min_sd", "max_qu", "max_sharpe")
if(!(objective %in% valid_objectives)) stop("Invalid objective")
if(objective == "max_qu"){
if(!hasArg(risk_aversion)) risk_aversion=match.call(expand.dots=TRUE)$risk_aversion else risk_aversion=2
}
opt <- rcpp_rp_optimize(rp=rp, args_list=mout, objective=objective)
colnames(opt) <- "out"
rp_results <- cbind(opt, t(rp))
# print(rp_results)
rp_results[which.min(rp_results[,"out"]), ]
}
#' V2 Random portfolio optimization
#'
#' TODO Random portfolio optimization description
#'
#' @param R
#' @param portfolio
#' @param search_size
#' @param objective
#' @param rp
#' @param \dots
rp_optimize_v2 <- function(R, objective=NULL, portfolio=NULL, search_size=2000, rp=NULL, momentFUN="set_moments", ...){
if(!inherits(x=portfolio, "portfolio")) stop("portfolio object passed in must be of class 'portfolio'")
call <- match.call()
# R <- checkData(R)
N <- length(portfolio$assets)
if (ncol(R) > N) {
R <- R[, names(portfolio$assets)]
}
if(!is.null(rp)){
# The user has passed in a matrix for rp, we do not need to generate
if(dim(rp)[2] == ncol(R)){
rp <- t(rp)
} else if(dim(rp)[1] != ncol(R)){
stop("rp must be an n x m matrix where n is equal to the number of assets
and m is equal to the number of portfolios to test.")
}
} else {
if(hasArg(method)) method=match.call(expand.dots=TRUE)$method else method="sample"
if(hasArg(max_permutations)) max_permutations=match.call(expand.dots=TRUE)$max_permutations else max_permutations=200
rp <- rcpp_random_portfolios(portfolio=portfolio, method="sample", n_portfolios=search_size, max_permutations=max_permutations)
rp <- t(rp)
}
rownames(rp) <- paste("w", names(portfolio$assets), sep=".")
# print(rp)
dotargs <- list(...)
# set portfolio moments only once
if(!is.function(momentFUN)){
momentFUN <- match.fun(momentFUN)
}
# TODO FIXME should match formals later
.mformals <- dotargs
.formals <- formals(momentFUN)
onames <- names(.formals)
if (length(.mformals)) {
dargs <- .mformals
pm <- pmatch(names(dargs), onames, nomatch = 0L)
names(dargs[pm > 0L]) <- onames[pm]
.formals[pm] <- dargs[pm > 0L]
}
.formals$R <- R
# .formals$portfolio <- portfolio
.formals$... <- NULL
# If no dotargs are passed in, .formals was a pairlist and do.call was failing
if(!inherits(.formals, "list")) .formals <- as.list(.formals)
mout <- try((do.call(momentFUN, .formals)) ,silent=TRUE)
if(inherits(mout,"try-error")) {
stop(paste("portfolio moment function failed with message",mout))
}
# print(mout)
valid_names = c("mean", "sd", "var")
obj_names <- unlist(lapply(portfolio$objectives, function(x) x$name))
if(!all(obj_names %in% valid_names)) stop("Invalid objective name")
opt2 <- rcpp_rp_optimize_v2(rp=rp, objectives_list=portfolio$objectives, args_list=mout)
rownames(opt2) <- c(obj_names, "out")
rp_results <- t(rbind(opt2, rp))
# print(rp_results)
optimal <- rp_results[which.min(rp_results[,"out"]), ]
list(rp_results=rp_results, optimal=optimal)
}
|
b211bac5d58a596a0774fe73c343b8e22d6d7d85
|
db5021657865118d2c48c41d852cd766e6657913
|
/man/plot.multiplot.Rd
|
d5ec8d4502f99dccc6c8b492ab9c779b5053412e
|
[
"MIT"
] |
permissive
|
GSuvorov/sentR
|
31cbea2ee29a4afdfa51ba5b1bdad0bf7d813e92
|
5ee173e1ed025b824784c7a892eb38c42bf6824d
|
refs/heads/master
| 2021-01-19T03:35:56.981465
| 2016-09-14T04:53:24
| 2016-09-14T04:53:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 442
|
rd
|
plot.multiplot.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/plot.R
\name{plot.multiplot}
\alias{plot.multiplot}
\title{Multiplot (ggplot) Utility}
\usage{
\method{plot}{multiplot}(..., plotlist = NULL, cols)
}
\arguments{
\item{plotlist}{the list of plots}
\item{\dots}{the plots to stitch together}
\item{the}{columns for the plot}
}
\description{
Provides a utility to plot multiple ggplot instances together
}
|
7592d0b47aa03a0e235a6406126972cf971a2faa
|
c0b759b690e6de46933b9dc5f5d5decbd12895cd
|
/R/FourierStats-package.r
|
6e6ffb3300e5013d3b5d5de544e2f065a7967950
|
[] |
no_license
|
bakerdh/FourierStats
|
d54800478f26737d63858f756495217c2931cde4
|
e60aef797f04c3c68894681f3c7070cb52ea5e7f
|
refs/heads/master
| 2023-07-31T22:05:08.568685
| 2021-09-16T05:25:41
| 2021-09-16T05:25:41
| 321,050,967
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,497
|
r
|
FourierStats-package.r
|
#' Documentation for FourierStats R package
#' The FourierStats package implements several statistical tests for use with bivariate data such as the complex (real and imaginary) components of the Fourier spectrum. Included functions implement the T-squared-circ test of Victor & Mast (1991), and the condition index and ANOVA-squared-circ tests described by Baker (2021).
#' Functions include:
#' - analysecplx: a function to analyse complex Fourier data, following the guidelines from the Baker (2021) paper
#' - tsqc.test: implementation of the T-squared-circ test of Victor & Mast (1991)
#' - tsqh.test: Hotelling's T-squared test (one and two sample versions)
#' - CI.test: condition index test, to test the assumptions of the T-squared-circ test
#' - anovacirc.test: implementation of one-way between subjects and repeated measures ANOVA-squared-circ test
#' - amperrors: function to calculate error bars for amplitudes incorporating coherent averaging
#' - pairwisemahal: calculates the pairwise Mahalanobis distance between two group means
#' - clustercorrect: implements cluster correction described by Maris & Oostenveld (2007) for multivariate T-tests
#' - getel: helper function that calculates the bounding ellipse for a cloud of points
#' - fftshift: helper function that performs the quadrant shift of a 2D Fourier spectrum
#'
#' package available from: https://github.com/bakerdh/FourierStats
#' for further details see: http://arxiv.org/abs/2101.04408
#' DHB 12/01/21
|
b727b92915ec6e344f607eccced50b29544020c0
|
be7bdeae33d212cbb850f83499ae2d3bd9092391
|
/man/filterXts.Rd
|
e916d56717a3462a97d681a02db9f88212d66cd1
|
[] |
no_license
|
NanisTe/dataSciencePack
|
fc36bc9a4e272f9ef685151581a164244cc2bf9b
|
c0a9c3451d7a7b05d0f1d621db18708fbd8ee422
|
refs/heads/master
| 2020-05-26T23:58:44.091152
| 2020-01-06T12:58:28
| 2020-01-06T12:58:52
| 188,417,039
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,137
|
rd
|
filterXts.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filterXts.R
\name{filterXts}
\alias{filterXts}
\title{Filter data.frame with xts syntax}
\usage{
filterXts(df, xtsstring, tzone = "UTC", by_colname = "Datetime")
}
\arguments{
\item{df}{a data.frame with at least one column of POSIXct values.}
\item{xtsstring}{a string which describes the desired date range to be
filtered out (see \code{\link[xts]{xts}}).}
\item{tzone}{give the timezone format to be used. default = "UTC"}
\item{by_colname}{give the column name of the datetime column to be used.}
}
\value{
a filtered data.frame which is a subset of the original data.frame \code{df}
Prepare a named character to be used in the by = statement in the join function.
Its name has to be the column name of the matching column from original
Creating a named Vector for inner_join its by argument
}
\description{
This function converts your data.frame which must have a cloumn with POSIXct
values into an xts object and filters the xts object according to the
argument xtsstring. It converts the filtered xts back into a data.frame
and returns it.
}
|
adb7297fb55637bf2f2a9b5051c3a52ac4c21993
|
74ef91ee3475da842cf3f9700d2c2e7fdf5998a0
|
/man/read_expression_xml.Rd
|
e05bae9159b22fb5c2e4f5b2e49dca8d9fa51853
|
[
"MIT"
] |
permissive
|
ramiromagno/geisha
|
cd7c2cfce6dd77ce3d9f582d74cc04858a9519eb
|
10bada460e090c135970057827fc35fc13789dc3
|
refs/heads/master
| 2023-06-27T12:48:15.885046
| 2021-07-24T09:25:05
| 2021-07-24T09:25:05
| 374,492,199
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 477
|
rd
|
read_expression_xml.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_expression_xml.R
\name{read_expression_xml}
\alias{read_expression_xml}
\title{Imports data-raw/expression.xml as a tibble}
\usage{
read_expression_xml()
}
\value{
Returns a tibble with (part of) the data present in \code{expression.xml}.
}
\description{
This function reads \code{data-raw/expression.xml} as a tibble. This function
is meant to be used only by the developer of this package.
}
|
57b3b3407c809666ee12d1db2f19024202f55e86
|
92d4c5c58feb1d1c4455bda494ec7dc36f34512b
|
/R/fitness.R
|
40aaf98f9ebfe373e4577d2bfc051fbc013deae3
|
[] |
no_license
|
Giatomo/CuRveR
|
097ab184561a1b59e511c18743ff20116cf1ce2e
|
33a71b3a11f1cc2fe16ad6790a5cf14585d44fe1
|
refs/heads/main
| 2023-04-14T02:56:42.960408
| 2022-07-19T15:12:19
| 2022-07-19T15:12:19
| 414,199,514
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,950
|
r
|
fitness.R
|
#' @export
least_absolute_deviation <- function(model) {
\(parameters) {
-sum(abs(model$y - do.call(model$equation, append(list(model$x), parameters))))
}
}
#' @export
ordinary_least_squares <- function(model) {
\(parameters) {
-sum((model$y - do.call(model$equation, append(list(model$x), parameters)))^2)
}
}
#' @export
tukey_bisquare <- function(model, scaling_method = "aad", c = 9) {
\(parameters) {
scaling <- switch(scaling_method,
"aad" = mean(abs(model$y - mean(model$y))),
"mad" = median(abs(model$y - mean(model$y))),
"sd" = sd(model$y)
)
r <- model$y - do.call(model$equation, append(list(model$x), parameters))
z <- r / scaling
p_z <- ((c^6) - (((c^2) - (z^2))^3)) / 6
p_z[abs(z) >= c] <- 0
return(-sum(p_z))
}
}
#' @export
huber <- function(model, scaling_method = "aad", c = 9) {
\(parameters) {
scaling <- switch(scaling_method,
"aad" = mean(abs(model$y - mean(model$y))),
"mad" = median(abs(model$y - mean(model$y))),
"sd" = sd(model$y)
)
r <- model$y - do.call(model$equation, append(list(model$x), parameters))
z <- r / scaling
p_z <- (z^2) / 2
p_z[abs(z) >= c] <- ((c * abs(z)) - ((c^2) / 2))[abs(z) >= c]
return(-sum(p_z))
}
}
#' @export
andrew <- function(model, scaling_method = "aad") {
\(parameters) {
scaling <- switch(scaling_method,
"aad" = mean(abs(model$y - mean(model$y))),
"mad" = median(abs(model$y - mean(model$y))),
"sd" = sd(model$y)
)
r <- model$y - do.call(model$equation, append(list(model$x), parameters))
z <- r / scaling
p_z <- 1 - cos(z)
p_z[abs(z) > pi] <- 0
return(-sum(p_z))
}
}
#' @export
jaeckel <- function(model) {
\(parameters) {
r <- abs(model$y - do.call(model$equation, append(list(model$x), parameters)))
r <- sort(r)
n <- length(r)
ranks <- 1:n
return(-sum(abs(ranks - ((n + 1) / 2)) * r))
}
}
|
5971a792632a764e5050fe47697fc2d7cd58b3ab
|
834ae363f18064560082ab638dbfc814fa72d76d
|
/R intro.R
|
cf566dbb1842f460f3372baf8d9f991e981d0492
|
[] |
no_license
|
vish3108/DSA-assignment
|
e6575111ea289b520f919f81b6d93fa7b0065de9
|
cb61aa1033a32c835c10876b235419da1f64ae94
|
refs/heads/main
| 2023-02-06T05:01:17.591398
| 2020-12-18T17:57:26
| 2020-12-18T17:57:26
| 322,667,352
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 248
|
r
|
R intro.R
|
x = 10
x
a = 10
a
class(a)
a = "Hello World"
class(a)
a = TRUE
class(a)
a = FALSE
class (a)
# Logical TRUE and logical FALSE are equivalent to 1 and 0 respectively.
a= FALSE + TRUE
#basic calculations
a
factorial(x)
a^x
x*a^x
|
a674a2c48d1bbf6d05f86eaa068ce466430a41a2
|
97f677c4c25409815436fc2cb549b620c505a4cd
|
/analyse_results.R
|
6ba9752b3d5a95907567cf041fc4f692513e5914
|
[] |
no_license
|
ascelin/interdiscp_cons_sci
|
041c286b79aca9a40ae59d915d57c038ec6f155b
|
cee36aeb99dcc882e283f3919bec6a9c2fd722a5
|
refs/heads/master
| 2020-03-17T08:57:07.136931
| 2018-05-15T03:48:09
| 2018-05-15T03:48:09
| 133,455,810
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,468
|
r
|
analyse_results.R
|
# Rscript to analyse the refereces of Biological Conservation and Conservation Biology
# to run:
# source('analyse_results.R')
rm(list=ls())
source('analyse_results_functions.R')
years.vec <- 1999:2015
#years.vec <- 2012:2015
# Set this global variable so that when importing the data, strings won't be treated as factors
options( stringsAsFactors=F )
# Read in the list of journals associated with each discipline.
# This functions returns a list where each element is a matrix of
# journals names for each different classification. The n parameter is
# the number to read in, -1 means all, use a small number (eg 10) for
# testing. Column 1 is the full names, column 2 is the abbreviated
# names.
journal.list <- read.in.journal.classifications( n=-1 )
# Loop through years and disciplines and count the total number of
# citations each year for each discipline
journal <- 'biological_conservation_articles'
results <- count.citations.over.years( years.vec, journal, journal.list )
journal <- 'conservation_biology_articles'
results <- count.citations.over.years( years.vec, journal, journal.list )
# make a quick plot of just a few journals to see if it worked.
par(mfrow = c(3,2) )
for( i in 3:5 ) {
plot( results$year, results[,i], type = 'b',
ylab=paste(colnames(results)[i],'(total cites)') )
plot( results$year, results[,i]/results$num.articles, type = 'b',
ylab=paste(colnames(results)[i],'(cites per article)' ) )
}
|
f60d31daa145df1c29f0e48c67e078fa58ff5a38
|
1c66e4bba54504196b8fff52b70fec02af3e042c
|
/z.R
|
133e5aa8998dd512994568e0d558322e7b23232e
|
[] |
no_license
|
dushoff/makeR
|
204343b785d86d720e2a872ca4a52fb6318e421a
|
bad9a2b71e669f61e4ffd22c9595205cc06497a1
|
refs/heads/master
| 2022-12-20T04:49:00.419646
| 2020-10-14T02:56:07
| 2020-10-14T02:56:07
| 277,214,351
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 112
|
r
|
z.R
|
source("makestuff/makeRfuns.R")
el <- loadEnvironmentList()
el[["x"]][["a"]]
el[["y"]][["a"]]
## saveVars()
|
e268d3b79221adbd852e4138f667b9a697066a7d
|
7c74ad5c66ca63ac059ec28ca2ff82b82017a4d2
|
/man/checkArm.Rd
|
ec39f865e347610794b262d8ee28f877fc58d962
|
[] |
no_license
|
zrmacc/Temporal
|
8e2e455502a723299632a3b873575251935f1600
|
59c2ecca363fc2034935f0629c5f9c4b9d253bd3
|
refs/heads/master
| 2021-07-21T07:03:43.371429
| 2021-07-20T14:43:38
| 2021-07-20T14:43:38
| 145,231,053
| 5
| 1
| null | 2020-03-13T20:19:34
| 2018-08-18T15:27:03
|
R
|
UTF-8
|
R
| false
| true
| 291
|
rd
|
checkArm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Inputs.R
\name{CheckArm}
\alias{CheckArm}
\title{Check Arm}
\usage{
CheckArm(arm)
}
\arguments{
\item{arm}{0/1, treatment arm.}
}
\value{
None.
}
\description{
Check whether treatment arm is properly formatted.
}
|
7da2ab2a3baa3de14746ec151d4302e469925542
|
e766e5fde923a75fc0bb56577d2a9eea3fc336d4
|
/tests/testthat/test_reset.R
|
55cd2bd2477f5978009a82ffd371b76549a83575
|
[] |
no_license
|
jrthompson54/DGEobj
|
e269ebc218f7b4a81d0651a3806afca86f656942
|
9aa6c45c204dd1b4d393aa50af45f9602af65ce0
|
refs/heads/master
| 2022-06-02T09:19:04.339475
| 2022-05-21T20:00:32
| 2022-05-21T20:00:32
| 250,066,452
| 3
| 2
| null | 2022-05-21T20:00:33
| 2020-03-25T19:02:59
|
R
|
UTF-8
|
R
| false
| false
| 11,606
|
r
|
test_reset.R
|
context("reset.R functions")
skip_if(setup_failed)
test_that('reset.R: gene level data', {
test_t_obj <- t_obj
test_t_reset <- resetDGEobj(test_t_obj)
# object validation
expect_s3_class(test_t_reset, "DGEobj")
expect_equivalent(showMeta(test_t_reset), showMeta(test_t_obj))
# validate level is gene before and after reset
test_t_meta <- showMeta(test_t_obj)
expect_equal("gene", test_t_meta$Value[3])
expect_equal(attr(test_t_obj, "level"), "gene")
test_t_meta_reset <- showMeta(test_t_reset)
expect_equal("gene", test_t_meta_reset$Value[3])
expect_equal(attr(test_t_reset, "level"), "gene")
# reset after subsetting
expect_equal(dim(test_t_obj), t_dim)
test_t_obj <- test_t_obj[c(1:10), ]
test_t_reset <- resetDGEobj(test_t_obj)
expect_equal(dim(test_t_reset), c(1000, 48))
# check names after reset
expect_named(test_t_obj, c('counts_orig', 'counts', 'design_orig', 'design', 'geneData_orig',
'geneData', 'granges_orig', 'granges', 'DGEList', 'ReplicateGroupDesign',
'ReplicateGroupDesign_Elist', 'ReplicateGroupDesign_fit',
'ReplicateGroupDesign_fit_cm', 'ReplicateGroupDesign_fit_cf',
'BDL_vs_Sham', 'EXT1024_vs_BDL', 'Nint_vs_BDL', 'Sora_vs_BDL'))
expect_named(test_t_reset, c("counts_orig", "counts", "design_orig", "design",
"geneData_orig", "geneData", "granges_orig", "granges"))
# testing t_obj with rm item
test_t_obj <- rmItem(test_t_obj, "BDL_vs_Sham")
expect_false("BDL_vs_Sham" %in% names(test_t_obj))
test_t_reset <- resetDGEobj(test_t_obj)
expect_false("BDL_vs_Sham" %in% names(test_t_reset)) # should not be restored
# testing t_obj with add item
test_t_obj <- addItem(test_t_obj,
item = 'Fred Flintstone',
itemName = 'Cartoon',
itemType = 'meta',
itemAttr = list('MyAttribute' = 'testObject'))
test_t_attr <- getAttributes(test_t_obj)
expect_equivalent(attributes(test_t_obj$Cartoon), 'testObject')
expect_true("Cartoon" %in% names(test_t_obj))
test_t_reset <- resetDGEobj(test_t_obj)
expect_null(attributes(test_t_reset$Cartoon), 'testObject')
expect_false("Cartoon" %in% names(test_t_reset)) # check if removed
# testing t_obj after class change
test_t_obj <- as.list.DGEobj(test_t_obj)
expect_equal(class(test_t_obj), "list")
# coerce back into DGEobj, expect reset to work
class(test_t_obj) <- "DGEobj"
expect_silent(resetDGEobj(test_t_obj))
})
# test isoform level data
test_that('reset.R: isoform level data', {
test_t_obj <- t_isoform_obj
test_t_reset <- resetDGEobj(test_t_obj)
# object validation
expect_s3_class(test_t_reset, "DGEobj")
expect_equivalent(showMeta(test_t_reset), showMeta(test_t_obj))
# validate level is isoform before and after reset
test_t_meta <- showMeta(test_t_obj)
expect_equal("isoform", test_t_meta$Value[3])
expect_equal(attr(test_t_obj, "level"), "isoform")
test_t_meta_reset <- showMeta(test_t_reset)
expect_equal("isoform", test_t_meta_reset$Value[3])
expect_equal(attr(test_t_reset, "level"), "isoform")
# reset after subsetting
test_t_dim <- dim(t_isoform_obj)
expect_equal(dim(test_t_obj),test_t_dim)
test_t_obj <- test_t_obj[c(1:10), ]
test_t_reset <- resetDGEobj(test_t_obj)
expect_equal(dim(test_t_reset), test_t_dim)
# check names after reset
expect_named(test_t_obj, c("intensities_orig", "intensities", "design_orig",
"design", "isoformData_orig", "isoformData"))
expect_named(test_t_reset, c("intensities_orig", "intensities", "design_orig",
"design", "isoformData_orig", "isoformData"))
# testing test_t_obj with rm item
test_t_obj <- rmItem(test_t_obj, "isoformData")
expect_false("isoformData" %in% names(test_t_obj))
test_t_reset <- resetDGEobj(test_t_obj)
expect_true("isoformData" %in% names(test_t_reset)) # check if restored
# testing test_t_obj with add item
test_t_obj <- addItem(test_t_obj,
item = 'Fred Flintstone',
itemName = 'Cartoon',
itemType = 'meta',
itemAttr = list('MyAttribute' = 'testObject'))
test_t_attr <- getAttributes(test_t_obj)
expect_equivalent(attributes(test_t_obj$Cartoon), 'testObject')
expect_true("Cartoon" %in% names(test_t_obj))
test_t_reset <- resetDGEobj(test_t_obj)
expect_null(attributes(test_t_reset$Cartoon), 'testObject')
expect_false("Cartoon" %in% names(test_t_reset)) # check if removed
# testing test_t_obj after class change
test_t_obj <- as.list.DGEobj(test_t_obj)
expect_equal(class(test_t_obj), "list")
# coerce back into DGEobj, expect reset to work
class(test_t_obj) <- "DGEobj"
expect_silent(resetDGEobj(test_t_obj))
})
# test exon level data
test_that('reset.R: exon level data', {
test_t_obj <- t_exon_obj
test_t_reset <- resetDGEobj(test_t_obj)
# object validation
expect_s3_class(test_t_reset, "DGEobj")
expect_equivalent(showMeta(test_t_reset), showMeta(test_t_obj))
# validate level is exon before and after reset
test_t_meta <- showMeta(test_t_obj)
expect_equal("exon", test_t_meta$Value[3])
expect_equal(attr(test_t_obj, "level"), "exon")
test_t_meta_reset <- showMeta(test_t_reset)
expect_equal("exon", test_t_meta_reset$Value[3])
expect_equal(attr(test_t_reset, "level"), "exon")
# reset after subsetting
test_t_dim <- dim(t_exon_obj)
expect_equal(dim(test_t_obj), test_t_dim)
test_t_obj <- test_t_obj[c(1:10), ]
test_t_reset <- resetDGEobj(test_t_obj)
expect_equal(dim(test_t_reset), test_t_dim)
# check names after reset
expect_named(test_t_obj, c('counts_orig', 'counts', 'design_orig', 'design', 'exonData_orig',
'exonData', 'granges_orig', 'granges'))
expect_named(test_t_reset, c("counts_orig", "counts", "design_orig", "design",
"exonData_orig", "exonData", "granges_orig", "granges"))
# testing test_t_obj with rm item
test_t_obj <- rmItem(test_t_obj, "granges")
expect_false("granges" %in% names(test_t_obj))
test_t_reset <- resetDGEobj(test_t_obj)
expect_true("granges" %in% names(test_t_reset)) # check if persists
# testing test_t_obj with add item
test_t_obj <- addItem(test_t_obj,
item = 'Fred Flintstone',
itemName = 'Cartoon',
itemType = 'meta',
itemAttr = list('MyAttribute' = 'testObject'))
test_t_attr <- getAttributes(test_t_obj)
expect_equivalent(attributes(test_t_obj$Cartoon), 'testObject')
expect_true("Cartoon" %in% names(test_t_obj))
test_t_reset <- resetDGEobj(test_t_obj)
expect_null(attributes(test_t_reset$Cartoon), 'testObject')
expect_false("Cartoon" %in% names(test_t_reset)) # check if removed
# testing test_t_obj after class change
test_t_obj <- as.list.DGEobj(test_t_obj)
expect_equal(class(test_t_obj), "list")
# coerce back into DGEobj, expect reset to work
class(test_t_obj) <- "DGEobj"
expect_silent(resetDGEobj(test_t_obj))
})
# test protein level data
test_that('reset.R: protein level data', {
test_t_obj <- t_protein_obj
test_t_reset <- resetDGEobj(test_t_obj)
# object validation
expect_s3_class(test_t_reset, "DGEobj")
expect_equivalent(showMeta(test_t_reset), showMeta(test_t_obj))
# validate level is protein before and after reset
test_t_meta <- showMeta(test_t_obj)
expect_equal("protein", test_t_meta$Value[3])
expect_equal(attr(test_t_obj, "level"), "protein")
test_t_meta_reset <- showMeta(test_t_reset)
expect_equal("protein", test_t_meta_reset$Value[3])
expect_equal(attr(test_t_reset, "level"), "protein")
# reset after subsetting
test_t_dim <- dim(t_protein_obj)
expect_equal(dim(test_t_obj), test_t_dim)
test_t_obj <- test_t_obj[c(1:10), ]
test_t_reset <- resetDGEobj(test_t_obj)
expect_equal(dim(test_t_reset), test_t_dim)
# check names after reset
expect_named(test_t_obj, c('intensities_orig', 'intensities', 'design_orig',
'design', 'proteinData_orig', 'proteinData'))
expect_named(test_t_reset, c('intensities_orig', 'intensities', 'design_orig',
'design', 'proteinData_orig', 'proteinData'))
# testing test_t_obj with rm item
test_t_obj <- rmItem(test_t_obj, "intensities")
expect_false("intensities" %in% names(test_t_obj))
test_t_reset <- resetDGEobj(test_t_obj)
expect_true("intensities" %in% names(test_t_reset)) # check if persists
# testing test_t_obj with add item
test_t_obj <- addItem(test_t_obj,
item = 'Fred Flintstone',
itemName = 'Cartoon',
itemType = 'meta',
itemAttr = list('MyAttribute' = 'testObject'))
test_t_attr <- getAttributes(test_t_obj)
expect_equivalent(attributes(test_t_obj$Cartoon), 'testObject')
expect_true("Cartoon" %in% names(test_t_obj))
test_t_reset <- resetDGEobj(test_t_obj)
expect_null(attributes(test_t_reset$Cartoon), 'testObject')
expect_false("Cartoon" %in% names(test_t_reset)) # check if removed
# testing test_t_obj after class change
test_t_obj <- as.list.DGEobj(test_t_obj)
expect_equal(class(test_t_obj), "list")
# coerce back into DGEobj, expect reset to work
class(test_t_obj) <- "DGEobj"
expect_silent(resetDGEobj(test_t_obj))
})
# misc tests
test_that('reset.R: misc', {
test_t_obj <- t_obj
test_t_reset <- resetDGEobj(t_obj)
#test invalid level
expect_error(test_t_obj <- initDGEobj(primaryAssayData = t_obj$counts,
rowData = t_obj$geneData,
colData = t_obj$design,
level = "Fred Flinstone"),
regexp = "The specified level must be one of: gene, exon, isoform, protein, affy")
# testing t_obj with new attributes
new_attributes <- list("attribute1" = runif(100, min = 0, max = 2), "attribute2" = LETTERS)
test_t_obj <- setAttributes(test_t_obj, new_attributes)
test_t_attr <- getAttributes(test_t_obj)
expect_true(exists('attribute1', where = getAttributes(test_t_obj)))
expect_setequal(test_t_attr$attribute2, LETTERS)
test_t_reset <- resetDGEobj(test_t_obj)
test_t_reset_attr <- getAttributes(test_t_reset)
expect_true(exists('attribute1', where = getAttributes(test_t_reset)))
expect_setequal(test_t_reset_attr$attribute2, LETTERS)
# testing t_obj without platformType (no longer required for reset)
test_t_obj <- setAttributes(t_obj, list("PlatformType" = NULL))
expect_s3_class(resetDGEobj(test_t_obj), "DGEobj")
# testing rm item with _orig
test_t_obj <- rmItem(test_t_obj, "counts_orig")
expect_false("counts_orig" %in% names(test_t_obj))
expect_error(resetDGEobj(test_t_obj),
regexp = 'The requested itemName should be in the DGEobj. Use names(dgeObj) to see the available items.',
fixed = TRUE)
})
|
101310e5c01b3410e0a3cdd58665e620b8d89f26
|
859b8efd47dd6150546566f5993142f6e73a76a9
|
/scripts/r-scripts/r-functions/blast_data.R
|
e9212ea4bfc8afea190f140e34473a9885b7345f
|
[
"MIT"
] |
permissive
|
TeamMacLean/ruth-effectors-prediction
|
f2d3811e6775f77592d7084e6414255003287049
|
79bce829012e90acd28374140b646da61ee2cfe1
|
refs/heads/master
| 2022-11-24T12:37:27.559637
| 2020-07-29T16:38:44
| 2020-07-29T16:38:44
| 181,357,224
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,697
|
r
|
blast_data.R
|
# Function for getting the data blast-ed
library(tidyverse)
library(Biostrings)
library(seqRFLP)
get_fasta_from_df <- function(df, column_id, column_seq, label = NULL, fasta_name = NULL, dir_path = "data/secreted_data/split-blast/fasta_files") {
#' Function to automatically change the dataframe to fasta data
#'
#' @param df dataframe. The dataframe we want to change to fasta data (need to be R dataframe not tibble)
#' @param dir_path path string. String contains path where we will save the data
if (is.null(fasta_name)) {
df_name <- deparse(substitute(df))
} else {
df_name <- fasta_name
}
# Change the label to become ID name
if (!is.null(label)) {
df <- df %>%
dplyr::ungroup() %>%
dplyr::mutate(
{{ column_id }} := stringr::str_c({{ column_id }}, label, sep = "_")
) %>%
dplyr::select(
{{ column_id }}, {{ column_seq }}
)
} else {
df <- df %>%
# dplyr::ungroup() %>%
dplyr::select(
{{ column_id }}, {{ column_seq }}
)
}
data_fa <- df %>%
as.data.frame() %>%
seqRFLP::dataframe2fas(file = paste0(dir_path, "/", df_name, ".fasta"))
message("The data frame has been saved in ", paste0(dir_path, "/", df_name, ".fasta"))
}
get_blast_data <- function(database_fasta_path, query_fasta_path, dir_path = "data/secreted_data/split-blast/blast_files") {
#' Blast data using the
#'
#' @param database_fasta_path dataframe. The path of the data for the dataframe for the database
#' @param query_fasta_path dataframe. The path of the data for the dataframe for the query
#' @param dir_path dataframe. The path where the directory we want to save
# function to get the actual name
get_name <- function(path1, path2) {
list_name <- c(path1, path2) %>%
stringr::str_split("/") %>%
purrr::map(
.f = function(x) {
x[[length(x)]] %>%
stringr::str_remove_all(".fasta")
}
) %>%
unlist()
return(list_name)
}
# List of data to blast
db_name <- get_name(database_fasta_path, query_fasta_path)[1]
query_name <- get_name(database_fasta_path, query_fasta_path)[2]
# result_name <- here::here(dir_path, paste0(db_name, "_vs_", query_name, ".tsv"))
result_name <- paste0(dir_path, "/", db_name, "_vs_", query_name, ".tsv")
# Making the database
system(paste("makeblastdb ", "-in ", database_fasta_path, "-dbtype ", "prot"), intern = TRUE)
# Blast the database against the query name
system(paste("blastp ", "-query ", query_fasta_path, "-db ", database_fasta_path, "-out ", result_name, " -outfmt ", "\"6 qseqid qlen sseqid slen length nident mismatch positive\""))
# message("The BLAST results have been saved in ", result_name %>% stringr::str_remove_all("/Users/kristian/Documents/Workspace/ruth-effectors-prediction/"))
}
# function to read the results and get the list of row index
blast_results <- function(result_path, percent_threshold = 95) {
#' Function to read all of the data
#'
#' @param result_path path string. Path of blast result done using function get_blast_data()
#' @param percent_threshold integer. Certain percentage (%) of the threshold of identical protein
# Read the results and turn it into dataframe
# qseqid means Query Seq-id
# qlen means Query sequence length
# sseqid means Subject Seq-id
# slen means Subject sequence length
# length means Alignment length
# nident means Number of identical matches
# mismatch means Number of mismatches
# positive means Number of positive-scoring matches
df_results <- data.table::fread(result_path)
if (nrow(df_results) == 0) {
df_results <- df_results %>% rbind(t(rep(NA, 8)))
}
df_results <- df_results %>%
setNames(c("qseqid", "qlen", "sseqid", "slen", "length", "nident", "mismatch", "positive")) %>%
rowwise() %>%
dplyr::mutate(
percent_identical = (nident / max(qlen, slen)) * 100, # The percentage of identical sequence over the longer sequence
percent_positive = (positive / max(qlen, slen)) * 100 # The percentage of positive sequence over the longer sequence
)
# Get the data frame where the percent identical > 90
df_identical_protein <- df_results %>%
filter(percent_identical > percent_threshold)
# Get the row indices of the subject data for all of the identical percentage > 90%
subject_index_list_to_remove <- df_results %>%
filter(percent_identical > percent_threshold) %>%
select(sseqid) %>%
unique() %>%
unlist()
# Get the row indices of the query data for all of the identical percentage > 90%
query_index_list_to_remove <- df_results %>%
filter(percent_identical > percent_threshold) %>%
select(qseqid) %>%
unique() %>%
unlist()
# Make list of all of the values
list_results <- list(
df = df_results,
df_identical_protein = df_identical_protein,
subject_index = subject_index_list_to_remove,
query_index = query_index_list_to_remove
)
# Return the lists
return(list_results)
}
blast_with_ifself <- function(df, col_id, col_seq, percent_threshold) {
temp_dir <- tempdir()
get_fasta_from_df(
df = df,
column_id = {{ col_id }},
column_seq = {{ col_seq }},
fasta_name = "fasta_self",
dir_path = temp_dir
)
get_blast_data(
database_fasta_path = paste0(temp_dir, "/fasta_self.fasta"),
query_fasta_path = paste0(temp_dir, "/fasta_self.fasta"),
dir_path = temp_dir
)
blast_results(
result_path = paste0(temp_dir, "/fasta_self_vs_fasta_self.tsv"),
)[["df"]] %>%
filter(
qseqid != sseqid,
percent_identical > percent_threshold
) %>%
arrange(desc(percent_identical))
}
|
2bf971ae00b3af4c3b79ea36efb68e75b179ae84
|
bca540badfb6adccbe95828c34d0effd7c900992
|
/man/computeIntervalFraction.Rd
|
ca7fe3248d86de1b228b58c8c7c46046f949f48d
|
[
"MIT"
] |
permissive
|
uvic-cisur/intermahpr
|
6b166d2506f74fcf5ce6104d05c9becb1bcd8015
|
821f3735b720495e1b97bf91c39478fbe9f14daa
|
refs/heads/master
| 2021-07-13T03:25:31.633424
| 2020-06-16T16:40:51
| 2020-06-16T16:40:51
| 146,017,580
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 419
|
rd
|
computeIntervalFraction.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scenarios.R
\name{computeIntervalFraction}
\alias{computeIntervalFraction}
\title{Compute a given scenario's AAF for current drinkers in a given interval of
consumption}
\usage{
computeIntervalFraction(.data, lower = -Inf, upper = Inf)
}
\description{
Compute a given scenario's AAF for current drinkers in a given interval of
consumption
}
|
b65800f2061076f788207c579a3861ac5389254c
|
652859d4c5bd704e78e1da39ad0e10aac53d1737
|
/R/util.R
|
e52dbda3620dbcf2cf0c0c32f8edde161df73564
|
[] |
no_license
|
wush978/RembedPy
|
ffa1c720a67383f4fba63a2746078662c4bccd16
|
8cc0a1876d56bc755fa0a8e48f87395392aedf94
|
refs/heads/master
| 2020-05-29T19:45:35.787126
| 2013-05-12T03:06:47
| 2013-05-12T03:06:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 534
|
r
|
util.R
|
#
# Author: Wush Wu
# Copyright (c) Bridgewell Inc.
#
check_argv <- function(argv) {
argv <- sapply(argv, function(a) {
if (class(a)[1] == "py-ptr")
return (a)
else
return (new("py-ptr", a))
}, simplify=FALSE)
argv.name <- names(argv)
if (is.null(argv.name)) {
return(list(list=argv, dict=list()))
}
index.list <- which(argv.name == "")
if (length(index.list) > 0) {
return(list(list=argv[index.list], dict=argv[-index.list]))
} else {
return(list(list=list(), dict=argv))
}
}
|
231d12789fa69f2e1384e1fa9ee250d250578cd1
|
d62d9ea2f6aa749fa48455bddbd3208279ce6449
|
/R/convert-relative-initial.R
|
8d9f147ea263d9ef24dcd283fff391e278b55ae3
|
[] |
no_license
|
jporobicg/atlantistools
|
3bffee764cca1c3d8c7a298fd3a0b8b486b7957e
|
75ea349fe21435e9d15e8d12ac8060f7ceef31a2
|
refs/heads/master
| 2021-01-12T03:06:55.821723
| 2017-05-26T04:03:33
| 2017-05-26T04:03:33
| 78,160,576
| 1
| 0
| null | 2017-05-25T23:35:23
| 2017-01-06T00:51:21
|
R
|
UTF-8
|
R
| false
| false
| 1,218
|
r
|
convert-relative-initial.R
|
#' Calculate relative timeseries using the initial value as benchmark.
#'
#' @param data Dataframe to apply the transformation to.
#' @param col Character value giving the name of the column to transform.
#' Default is \code{"atoutput"}.
#' @return Dataframe with transformed column 'col'.
#' @export
#'
#' @examples
#' df <- convert_relative_initial(ref_structn)
#' head(df[df$layer == 1, ], n = 15)
convert_relative_initial <- function(data, col = "atoutput") {
if (!"time" %in% names(data)) stop("Column time is missing in data.")
# Divide values by reference value (time = min(time))
ref <- dplyr::ungroup(data)
ref <- dplyr::filter_(ref, ~time == min(time))
ref$time <- NULL
names(ref)[names(ref) == col] <- "atoutput_ref"
result <- data %>%
dplyr::left_join(ref, by = names(data)[!names(data) %in% c("time", col)]) %>%
dplyr::mutate_(.dots = stats::setNames(list(lazyeval::interp(~var / atoutput_ref, var = as.name(col))), col))
# Replace division by 0 with 0!
result$atoutput[result$atoutput_ref == 0] <- 0
# Remove NAs. Some groups have missing values at first time step (carrion)
result <- result[!is.na(result$atoutput), ]
result$atoutput_ref <- NULL
return(result)
}
|
19c762dae77e01c390b1cf1517808b70c3230a2a
|
c2390033eb85dda2d14dfe546ff77637b9d4d3e2
|
/man/HSImetadata.Rd
|
aed968df049c0a8c0747b3812bbc723201d32168
|
[] |
no_license
|
cran/ecorest
|
43f4c8927c3b4c022b5f5e2e0f16b07c76279ef9
|
23dff972bb8378acdc2f438746cc0791e76d5eb2
|
refs/heads/master
| 2022-11-08T03:22:43.847002
| 2020-06-26T09:50:03
| 2020-06-26T09:50:03
| 276,657,352
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,447
|
rd
|
HSImetadata.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HSImetadata.R
\docType{data}
\name{HSImetadata}
\alias{HSImetadata}
\title{Habitat suitability index (HSI) model metadata}
\format{A data frame with 519 rows and 55 variables:
\describe{
\item{model}{Model name}
\item{submodel}{Model specifications}
\item{species}{Scientific nomenclature of modeled taxa}
\item{geography}{Geographic range of organism}
\item{ecosystem}{Type of habitat}
\item{documentation}{Citation of original model}
\item{note}{Conditions under which model may be applied}
\item{website}{Link to original individual model source}
\item{SIV1}{Suitability index values for each organism specific condition}
\item{SIV2}{Suitability index values for each organism specific condition}
\item{SIV3}{Suitability index values for each organism specific condition}
\item{SIV4}{Suitability index values for each organism specific condition}
\item{SIV5}{Suitability index values for each organism specific condition}
\item{SIV6}{Suitability index values for each organism specific condition}
\item{SIV7}{Suitability index values for each organism specific condition}
\item{SIV8}{Suitability index values for each organism specific condition}
\item{SIV9}{Suitability index values for each organism specific condition}
\item{SIV10}{Suitability index values for each organism specific condition}
\item{SIV11}{Suitability index values for each organism specific condition}
\item{SIV12}{Suitability index values for each organism specific condition}
\item{SIV13}{Suitability index values for each organism specific condition}
\item{SIV14}{Suitability index values for each organism specific condition}
\item{SIV15}{Suitability index values for each organism specific condition}
\item{SIV16}{Suitability index values for each organism specific condition}
\item{SIV17}{Suitability index values for each organism specific condition}
\item{SIV18}{Suitability index values for each organism specific condition}
\item{SIV19}{Suitability index values for each organism specific condition}
\item{SIV20}{Suitability index values for each organism specific condition}
\item{SIV21}{Suitability index values for each organism specific condition}
\item{SIV22}{Suitability index values for each organism specific condition}
\item{CF}{Food component equation}
\item{CC}{Cover component equation}
\item{CCF}{Cover-food component equation}
\item{CWF}{Winter food component}
\item{CW}{Water component}
\item{CCB}{Cover breeding component}
\item{CCN}{Cover nesting component}
\item{CWQ}{Water quality component}
\item{CR}{Reproduction component}
\item{CCR}{Cover reproduction component}
\item{CD}{Disturbance component}
\item{COT}{Other component}
\item{CL}{Larval component}
\item{CEL}{Embryo and larval component}
\item{CE}{Embryo component}
\item{CJ}{Juvenile component}
\item{CFr}{Fry component}
\item{CS}{Spawning component}
\item{CA}{Adult component}
\item{CI}{Island component}
\item{CNI}{Non-island component}
\item{CWFC}{Winter cover food component}
\item{CT}{Topography component}
\item{CJA}{Juvenile adult component}
\item{Eqtn}{HSI overarching model equation in R syntax}
}}
\source{
\url{https://pubs.er.usgs.gov/}
}
\usage{
HSImetadata
}
\description{
Metadata for 519 U.S. Fish and Wildlife Service Habitat suitability index (HSI) models
}
\keyword{datasets}
|
8308d371895b6e4d2bdbeed9aa29e6345eeb54d0
|
7d7608095ef8ba660d8a5ec618bcef2a4565be93
|
/scripts/cod_pollock_correlation.R
|
5d8130ff0a1c391af82614a3f73ffffc68a05aac
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
mikelitzow/predict-R
|
3c79c7e65c34b6e8c39bfe8ca3a10cb2e22bba14
|
7cecdc72dcff94263531003c6432e782d934799d
|
refs/heads/main
| 2023-04-06T22:57:51.997792
| 2022-01-28T02:48:13
| 2022-01-28T02:48:13
| 357,273,463
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,777
|
r
|
cod_pollock_correlation.R
|
# calculate correlation between cod and pollock time series for Discussion
library(brms)
## load cod data ----------------------------------
# load best brms model fit to beach seine data
cod_recr_2_zinb <- readRDS("./output/cod_recr_2_zinb.rds")
ce1s_1 <- conditional_effects(cod_recr_2_zinb, effect = "year_fac", re_formula = NA,
probs = c(0.025, 0.975))
print(ce1s_1)
# load eco-foci larval / spawning habitat data
foci.larv <- read.csv("data/LarvalPollockandCodCPUE_TimeSeries.csv")
head(foci.larv)
cod.larv <- foci.larv %>%
filter(Common_Name2 == "Pacific cod")
spawn.habitat <- read.csv("data/GOA_Pcod_SpawningHabitatSuitability.csv")
head(spawn.habitat)
# combine the three data sets
seine.dat <- ce1s_1$year_fac %>%
mutate(year = as.numeric(as.character(year_fac)),
estimate = log(estimate__)) %>%
select(year, estimate)
names(seine.dat)[2] <- "cod.seine"
cod.larv <- cod.larv %>%
mutate(larv.est = log(MeanCPUE)) %>%
select(larv.est, Year)
names(cod.larv) <-c("cod.larval", "year")
names(spawn.habitat)[1:2] <- c("year", "cod.habitat")
# load DFA trend
trend <- read.csv("./output/cod_dfa_trend.csv", row.names = 1)
names(trend)[2] <- "cod.dfa"
trend <- trend %>%
select(year, cod.dfa)
cod <- data.frame(year = 1981:2020)
cod <- left_join(cod, cod.larv)
cod <- left_join(cod, spawn.habitat)
cod <- left_join(cod, seine.dat)
cod <- left_join(cod, trend)
head(cod)
## load pollock data -----------------------------------
# first, seine estimates
poll_recr_2_zinb_reduced_bays <- readRDS("./output/poll_recr_2_zinb_reduced_bays.rds")
ce1s_1 <- conditional_effects(poll_recr_2_zinb_reduced_bays, effect = "year_fac", re_formula = NA,
probs = c(0.025, 0.975))
# load eco-foci larval / age-0 abundance data
foci.larv <- read.csv("data/ECO-FOCI_larval_pollock.csv")
pollock.larv <- foci.larv %>%
mutate(pollock.larval = log(MeanCPUE)) %>%
select(pollock.larval, Year)
head(pollock.larv)
names(pollock.larv)[2] <- "year"
foci.juv <- read.csv("data/ECO-FOCI_age_0_pollock.csv")
head(foci.juv)
# combine the three data sets
seine.dat <- ce1s_1$year_fac %>%
mutate(year = as.numeric(as.character(year_fac))) %>%
select(year, estimate__)
names(seine.dat)[2] <- "pollock.seine"
names(foci.juv)[1:2] <- c("year", "pollock.trawl")
# and dfa trend!
trend <- read.csv("./output/poll_dfa_trend.csv", row.names = 1)
names(trend)[2] <- "pollock.dfa"
trend <- trend %>%
select(year, pollock.dfa)
pollock <- data.frame(year = 1981:2020)
pollock <- left_join(pollock, pollock.larv)
pollock <- left_join(pollock, foci.juv)
pollock <- left_join(pollock, seine.dat)
pollock <- left_join(pollock, trend)
head(pollock)
both <- left_join(pollock, cod)
cor(both, use = "p")
|
b5a58e17c9339b302f1b6a20ead9328be572a0e0
|
b666252becca31e08354068153d67b4a5b5b0fe3
|
/tests/testthat/test-count.R
|
5ee805eb9d8986be34554fbe4a597b6d48d6491d
|
[
"MIT"
] |
permissive
|
SevillaR/HivepodR
|
fe1e471bd8c39b6fe0ffe2be254345c5cb5d64f0
|
c3a4b9911f46d25ff446cc8b898a03064db0dc2c
|
refs/heads/master
| 2021-01-10T15:35:53.292637
| 2016-02-06T11:53:04
| 2016-02-06T11:53:04
| 50,691,761
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 485
|
r
|
test-count.R
|
context("count")
test_that("count offices > 2", {
cnx <- connect("https://jacaton-r.herokuapp.com", "demo", "1234")
resource <- resource(cnx, "oficinas")
out <- count(resource)
expect_equal(TRUE, out > 2)
})
test_that("count sevilla office == 1", {
cnx <- connect("https://jacaton-r.herokuapp.com", "demo", "1234")
resource <- resource(cnx, "oficinas")
out <- count(resource, conditions=buildCondition("nombre", "==", "Seville"))
expect_equal(out, 1)
})
|
6024035040260b9bdb0b361562fa25a33d7272a3
|
58e574a90e06146e9340db77da0419c49650c238
|
/analysis.R
|
059f4d41071503cc140670a2b63bce3a097b14a5
|
[] |
no_license
|
jykimgithub/mock_thesis-1
|
2479c7d4f2414fb1f5c9da6085d432a751ae297e
|
e395a8e11e529dc17bc3d301fbe5fd6605462c07
|
refs/heads/master
| 2020-07-14T23:59:06.785259
| 2019-08-30T17:54:06
| 2019-08-30T17:54:06
| 146,926,209
| 1
| 0
| null | 2018-08-31T18:01:05
| 2018-08-31T18:01:05
| null |
UTF-8
|
R
| false
| false
| 367
|
r
|
analysis.R
|
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
install.packages("pacman")
pacman::p_load(tidyverse)
data <- read_csv("data.csv")
filter(data, GEO %in% c("Alberta","Prince Edward Island")) %>%
ggplot(aes(x = Date, y = as.numeric(Value))) +
geom_line(aes(color = GEO)) +
labs(x = "Year", y = "Household income per capita") +
theme_bw()
|
708ab7a74d87b8d35f29990055993c4026a385bd
|
32e33fd5ba6bf63f8e9be13b599e96d9e1d10c49
|
/r/migration_models.r
|
340ba07567ed26f825eef2079e5ce8e04be6842d
|
[] |
no_license
|
benscarlson/howto
|
ecd469ca0a05159b1f0951e2ac8c8de3bb6ff39b
|
6601f6b003ee18b49466ad36a60672acbb71c70d
|
refs/heads/master
| 2023-08-07T02:36:32.142882
| 2023-07-28T20:25:38
| 2023-07-28T20:25:38
| 60,221,130
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 291
|
r
|
migration_models.r
|
#----
#---- migrateR
#----
dat <- dat0 %>% as.data.frame
mods <- as.ltraj( #Create ltraj object
xy=dat %>% select(lon,lat),
date=dat$timestamp,
id=dat$individual_id) %>%
mvmtClass #Run the migration model
mvmt2dt(mods) #modeled start and end dates of migratory movements
|
47b83463c386bb17f82b1daaf7167124bb53896e
|
0a14dae80ad320a309f434b9d6590d691d7acac4
|
/cigarette(2).R
|
2471ea977e6f0b33420ceedc051f4198373c9e96
|
[] |
no_license
|
FChia11/R
|
eaa50d8a7ea2db766b5b32f654b018640f5cb868
|
5a4f125598cfc76152662ce0549235105b911585
|
refs/heads/main
| 2023-04-23T07:39:47.060562
| 2021-04-27T07:49:24
| 2021-04-27T07:49:24
| 362,025,070
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,953
|
r
|
cigarette(2).R
|
####################################
### Regression Analysis ###
### PC session 1: Cigarette data ###
####################################
# Libraries required: rgl and MASS
rm(list = ls())
# Load cigarette data
cigarette <- read.table("cigarette.txt", header = TRUE)
cigarette <- cigarette[,-1]
cigarette
n <- dim(cigarette)[1]
p <- dim(cigarette)[2]
attach(cigarette)
# Descriptive statistics
summary(cigarette)
# Histograms
par(mfrow = c(2,2))
hist(Tar)
hist(Nicotine)
hist(Weight)
hist(CO)
# Boxplots
par(mfrow = c(2,2))
boxplot(Tar, main = "Boxplot of Tar")
boxplot(Nicotine, main = "Boxplot of Nicotine")
boxplot(Weight, main = "Boxplot of Weight")
boxplot(CO, main = "Boxplot of CO")
# observation 3 is deviating from the main trend
par(mfrow = c(1,1))
# Scatter plot
pairs(cigarette)
pairs(cigarette, panel = function(x,y) {points(x,y); lines(lowess(x,y), col = "red")})
# CO ~ Tar
fit1 <- lm(CO ~ Tar, data = cigarette)
fit1
plot(Tar, CO)
abline(fit1, col = "red")
# intercept: average CO content for cigarettes with 0 Tar is 2.743
# slope: if Tar is increased one unit, the average CO content increases 0.801 units
# CO ~ Nicotine
fit2 <- lm(CO ~ Nicotine, data = cigarette)
fit2
plot(Nicotine, CO)
abline(fit2, col = "red")
# intercept: average CO content for cigarettes with 0 Nicotine is 1.665
# slope: if Nicotine is increased one unit, the average CO content increases 12.395 units
# Co ~ Tar + Nicotine
fit3 <- lm(CO ~ Tar + Nicotine, data = cigarette)
fit3
fit3.sum <- summary(fit3)
fit3.sum
# regression slope of Nicotine changed sign
# regression slope of Nicotine is non-significant since P-value = 0.492 > 0.05
# 3D plot regression surface
library(rgl)
plot3d(Tar, Nicotine, CO, type = "s", col = "red", size = 1)
fit3.coef <- coefficients(fit3)
a <- fit3.coef["Tar"]
b <- fit3.coef["Nicotine"]
c <- - 1
d <- fit3.coef["(Intercept)"]
planes3d(a, b, c, d, alpha = 0.3)
# Outlier diagnostics
library(MASS)
fit3.stdres <- stdres(fit3)
plot(fit3.stdres, ylim = c(-3,3))
abline(h = -2.5, lty = 2)
abline(h = 2.5, lty = 2)
# observation 3 is an outlier
# Co ~ Tar + Nicotine (without observation 3)
fit4 <- lm(CO ~ Tar + Nicotine, data = cigarette[-3,])
fit4
fit4.sum <- summary(fit4)
fit4.sum
# regression slope of Nicotine is again positive
# regression slope of Nicotine is still non-significant since P-value = 0.846 > 0.05
# 3D plot regression surface
library(rgl)
plot3d(Tar[-3], Nicotine[-3], CO[-3], type = "s", col = "red", size = 1, xlab = "Tar", ylab = "Nicotine", zlab = "CO")
fit4.coef <- coefficients(fit4)
a <- fit4.coef["Tar"]
b <- fit4.coef["Nicotine"]
c <- - 1
d <- fit4.coef["(Intercept)"]
planes3d(a, b, c, d, alpha = 0.3)
# Correlation between Tar and Nicotine
plot(Tar, Nicotine)
cor(Tar, Nicotine)
# this correlation could be the cause of a non-significant Nicotine
detach(cigarette)
|
5a8d50e2abac1286300780e59d09be955fb4ff84
|
1925c9e5c8abf8dcdbc2cff95ea565fc59257545
|
/ui.R
|
5bf3f0446bda9746bfbf154b2e2544bcf4262341
|
[] |
no_license
|
olinux/developingDataProducts
|
53f974648ce9fa9fa6bedb09576d21324867bcb6
|
26c08f42333c91ab009b4159a1e264fb0e7d1c2b
|
refs/heads/master
| 2021-01-23T00:10:10.673319
| 2015-06-20T10:34:52
| 2015-06-20T10:34:52
| 37,765,291
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 958
|
r
|
ui.R
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://www.rstudio.com/shiny/
#
library(shiny)
shinyUI(pageWithSidebar(
# Application title
headerPanel("The car selector"),
# Sidebar with a slider input for number of bins
sidebarPanel(
wellPanel(
h3("How To"),
p("Select your preferences in terms of miles per gallons, horse power and cylinders. You will then see the according cars in the result table on the left. The highlighted columns indicate the values which are the closest to your selection.")
),
sliderInput("mpg", "Miles per gallon", min=10, max=40, value=10),
sliderInput("hp", "Horse power", min=50, max=300, value=50),
selectInput("cyl", "Cylinders", c("Irrelevant"=NA, "4"=4, "6"=6, "8"=8))
),
# Show a plot of the generated distribution
mainPanel(
htmlOutput("result")
)
))
|
af60477b1e59e16dd0875e3091185ba3172acead
|
fe891be23d33f7a41ffb368e9d20852f7e0735a9
|
/man/set_stage.Rd
|
03c3b832733bc7842f531d70609656c5a401a22e
|
[
"MIT"
] |
permissive
|
FedericoCarli/stagedtrees
|
cfc7e2c1383618591df0307c0a2dea252293ece9
|
b10897b3198d8cb39c14e1dbcfbc552f848c723f
|
refs/heads/main
| 2021-08-22T01:35:21.728501
| 2021-04-22T14:06:18
| 2021-04-22T14:06:18
| 185,995,569
| 0
| 0
|
NOASSERTION
| 2021-03-03T13:58:29
| 2019-05-10T13:44:45
|
R
|
UTF-8
|
R
| false
| true
| 390
|
rd
|
set_stage.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/1b-base-model-function.R
\name{set_stage}
\alias{set_stage}
\title{Set stage to path}
\usage{
set_stage(object, path, stage)
}
\arguments{
\item{object}{an object of class \code{sevt}.}
\item{path}{Vector of the path.}
\item{stage}{stage to be assigned.}
}
\description{
Set stage to path
}
\keyword{internal}
|
96cbb6f2929bbb33dc13e33b7be40ce551122700
|
bce8156a9e5b39f17f5c4f6fcd4c9fbff4d74897
|
/man/backtest-functions.Rd
|
45a2a3f599d867b2529358a49a5492d81923bc05
|
[] |
no_license
|
cran/fPortfolio
|
fb8f26496a32fd8712361a20cbb325c0bfcffe01
|
d0189fabdf712c043fb13feb80f47696ac645cef
|
refs/heads/master
| 2023-04-29T14:30:55.700486
| 2023-04-25T06:50:06
| 2023-04-25T06:50:06
| 17,695,954
| 10
| 10
| null | 2015-04-23T18:15:24
| 2014-03-13T04:38:33
|
R
|
UTF-8
|
R
| false
| false
| 3,035
|
rd
|
backtest-functions.Rd
|
\name{backtest-functions}
\alias{equidistWindows}
\alias{tangencyStrategy}
\alias{emaSmoother}
\title{User defined functions to perform portfolio backtesting}
\description{
Default windows, strategy and smoothing functions used for portfolio
backtesting.
}
\usage{
equidistWindows(data, backtest = portfolioBacktest())
tangencyStrategy(data, spec = portfolioSpec(), constraints = "LongOnly",
backtest = portfolioBacktest())
emaSmoother(weights, spec, backtest)
}
\arguments{
\item{data}{
a multivariate time series described by an S4 object of class
\code{timeSeries}. If your timeSerie is not a \code{timeSeries}
object, consult the generic function \code{as.timeSeries} to
convert your time series.
}
\item{backtest}{
an S4 object of class \code{fPFOLIOBACKTEST} as returned by the
function \code{portfolioBacktest}.
}
\item{spec}{
an S4 object of class \code{fPFOLIOSPEC} as returned by the function
\code{portfolioSpec}.
}
\item{constraints}{
a character string vector, containing the constraints of the form\cr
\code{"minW[asset]=percentage"} for box constraints resp. \cr
\code{"maxsumW[assets]=percentage"} for sector constraints.
}
\item{weights}{
a numeric vector, containing the portfolio weights of an asset
}
}
\details{
\bold{equidistWindows:}\cr
Defines equal distant rolling windows.
The function requires two arguments: \code{data} and
\code{backtest}, see above. To assign the horizon
value to the backtest specification structure, use the function
\code{setWindowsHorizon}.
\bold{tangencyStrategy:}\cr
A pre-defined tangency portfolio strategy.
The function requires four arguments: \code{data}, \code{spec},
\code{constraints} and \code{backtest}, see above.
\bold{emaSmoother:}\cr
A pre-defined weights smoother (EMA) for portfolio backtesting.
The function requires three arguments: \code{weights}, \code{spec}
and \code{backtest}, see above. To assign initial starting weights,
smoothing parameter (lambda) or whether to perform double smoothing
to the backtest specification structure, use the functions
\code{setSmootherInitialWeights}, \code{setSmootherLambda}
and \code{setSmootherDoubleSmoothing}, respectively.
}
\value{
\code{equidistWindows}\cr
function returns the "from" and "to" dates of the rolling window
in a list form.
\code{tangencyStrategy}\cr
function returns a S4 object of class \code{"fPORTFOLIO"}.
\code{emaSmoother}\cr
function returns a numeric vector of smoothed weights.
}
\references{
W\"urtz, D., Chalabi, Y., Chen W., Ellis A. (2009);
\emph{Portfolio Optimization with R/Rmetrics},
Rmetrics eBook, Rmetrics Association and Finance Online, Zurich.
}
\keyword{models}
|
44fd5c3031bb1cb8b5b43b23cb659681f692a283
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.compute/man/ec2_modify_reserved_instances.Rd
|
2f2130406c47e30b55b5cad961db1c4fb00ade6d
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,248
|
rd
|
ec2_modify_reserved_instances.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_modify_reserved_instances}
\alias{ec2_modify_reserved_instances}
\title{Modifies the configuration of your Reserved Instances, such as the
Availability Zone, instance count, or instance type}
\usage{
ec2_modify_reserved_instances(
ReservedInstancesIds,
ClientToken = NULL,
TargetConfigurations
)
}
\arguments{
\item{ReservedInstancesIds}{[required] The IDs of the Reserved Instances to modify.}
\item{ClientToken}{A unique, case-sensitive token you provide to ensure idempotency of your
modification request. For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html}{Ensuring Idempotency}.}
\item{TargetConfigurations}{[required] The configuration settings for the Reserved Instances to modify.}
}
\description{
Modifies the configuration of your Reserved Instances, such as the Availability Zone, instance count, or instance type. The Reserved Instances to be modified must be identical, except for Availability Zone, network platform, and instance type.
See \url{https://www.paws-r-sdk.com/docs/ec2_modify_reserved_instances/} for full documentation.
}
\keyword{internal}
|
5d3055373a0e8d3ced63bd582bea4cf32cc4e111
|
946fc043152995ccf404a9b46efbd570e6a0fa16
|
/R/sampling_weight.R
|
931d949131af98b813623ad9b033eed747163361
|
[] |
no_license
|
Lujia-Bai/STResampling-DSAA2019
|
9b2c0c3979d3be04eb91bb0e34a25746256f4e66
|
31f32a815d940f9dd5f3b60a4f980d4245683372
|
refs/heads/master
| 2023-04-01T02:07:41.471046
| 2021-04-06T15:48:27
| 2021-04-06T15:48:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,066
|
r
|
sampling_weight.R
|
#' Calculate utility-based relevance
#'
#' Calculate relevance of values given a parametrization
#' of the relevance function.
#' Most relevant: phi -> 1; less relevant: phi -> 0.
#' @param y vector of values to calculate relevance of
#' @param phi.control list of parameters as returned
#' by function \code{UBL::phi.control}
#' @seealso \code{\link[UBL]{phi}}, \code{\link[UBL]{phi.control}}
get_phi <- function(y, phi.control){
# require(UBL)
UBL::phi(y, phi.control)
}
#' Calculate temporally-biased re-sampling weights
#'
#' Calculate weights for re-sampling with a temporal bias.
#' Most recent observations have weights that tend to 1,
#' while the oldest observations have weights that tend to 0
#' (meaning they are less likely to be kept).
#' Most recent observations: w -> 1; oldest: w -> 0.
#'
#' @param times a vector of time-stamps
#' @param phi a vector of the relevance values of
#' \code{df}'s target variable
#' @param rel.thr a relevance threshold above which an
#' observation is considered relevant
#'
#' @return A vector of temporally-biased re-sampling weights, scaled
#' to fit within range [0,1].
#' @author Mariana Oliveira
get_time_wts <- function(times, phi, rel.thr){
# check types
assertthat::assert_that(lubridate::is.Date(times) | lubridate::is.POSIXct(times) |
lubridate::is.POSIXlt(times) | lubridate::is.POSIXt(times),
msg = "times must be of type Date or POSIX")
# overall normal and relevant inds
norm_inds <- which(phi < rel.thr)
relev_inds <- which(phi >= rel.thr)
time_wts <- vector(mode="numeric", length=length(times))
time_wts <- rep(NA, length(times))
# scale time so most recent = 1
time_wts[norm_inds] <- as.numeric( lubridate::seconds( lubridate::interval(times[norm_inds], min(times[norm_inds])))) /
as.numeric( lubridate::seconds( lubridate::interval(max(times[norm_inds]), min(times[norm_inds])) ))
# scale time so most recent = 1
time_wts[relev_inds] <- as.numeric( lubridate::seconds( lubridate::interval(times[relev_inds], min(times[relev_inds])))) /
as.numeric( lubridate::seconds( lubridate::interval(max(times[relev_inds]), min(times[relev_inds])) ))
time_wts
}
#' Calculate spatially-biased re-sampling weights
#'
#' Calculate weights for re-sampling with a spatial bias.
#' Observations have a distance that tends to 1 as
#' they are farther away from the closest relevant case (besides itself)
#' at time slice \code{t} (meaning they are more likely to be kept).
#' Farthest away from relevant cases at time slice t: d -> 1.
#'
#' @param df a data frame
#' @param phi a vector of the relevance values of
#' \code{df}'s target variable
#' @param rel.thr a relevance threshold above which an
#' observation is considered relevant
#' @param time the column name of the time-stamp
#' @param sites_sf An sf obejct containing station and IDs and
#' geometry points of the locations. As an alternative, provide
#' \code{lon}, \code{lat}, and \code{crs}
#' @inheritParams df2site_sf
#'
#' @return A vector of spatially-biased re-sampling weights, scaled
#' to fit within range [0,1].
get_space_wts <- function(df, phi, rel.thr, sites_sf=NULL,
lon=NULL, lat=NULL, crs=NULL, site_id, time){
# get sites into right format
if(is.null(sites_sf)){
assertthat::assert_that(!is.null(lon), !is.null(lat), !is.null(crs),
msg = "Please provide locations object of type sf or
CRS code and names of longitude and latitude columns")
sites_sf <- df2site_sf(df, site_id, lon, lat, crs)
}
# create distance matrix
dists <- get_spatial_dist_mat(sites_sf, site_id)
max_dist <- max(dists)
timz <- df[[time]]
space_wts <- vector(mode="numeric", length=nrow(df))
space_wts <- rep(NA, length(space_wts))
for(i in 1:length(unique(timz))){
# get time slice
t <- unique(timz)[i]
inds_t <- which(df[[time]]==t)
# get indices of relevant cases at time slice t
relev_inds <- inds_t[which(phi[inds_t] >= rel.thr)]
# get indices of normal cases
norm_inds <- setdiff(inds_t, relev_inds)
if(!length(relev_inds)){
# if there are no relevant cases, all have max distance
# (will be normalized to d=1)
space_wts[inds_t] <- max_dist # 1
}else{
# otherwise, for each case
# find minimum distance to relevant case (at time slice t)
relev_sites <- df[relev_inds, site_id]
for(i in inds_t){
s <- df[i, site_id]
#if(length(setdiff(relev_sites, s))==0) browser()
# if i is the only relevant case, it has maximum distance to other relevant cases
if((length(unique(relev_sites))==1) && (s %in% relev_sites)){
d <- max_dist # 1
# get minimum distance (to a relevant case)
}else{
# check row for site s
row <- which(rownames(dists)==paste0("SITE_",s))
# check columns of sites that were relevant at this time slice (except itself)
cols <- which(colnames(dists) %in% paste0("SITE_", setdiff(relev_sites, s)))
d <- min(dists[row, cols])
}
# this is the raw space weight
space_wts[i] <- d
}
}
if(t==timz[1]) assertthat::assert_that(all(df[which(!is.na(space_wts)),time]==t))
}
# overall normal and relevant inds
norm_inds <- which(phi < rel.thr)
relev_inds <- which(phi >= rel.thr)
# each group of weights is normalized to scale [0,1]
space_wts[norm_inds] <- norm_scale(space_wts[norm_inds])
space_wts[relev_inds] <- norm_scale(space_wts[relev_inds])
space_wts
}
#' Get spatio-temporal re-sampling weights
#'
#' A function that calculates different weights for
#' re-sampling that is temporally and/or spatially biased.
#'
#' @details \code{phi} gives the target variable's relevance
#' (higher relevance: phi -> 1; lower relevance: phi -> 0);
#' \code{time_wts} gives the observation's temporally biased
#' re-sampling weight (most recent observations: w -> 1;
#' oldest: w -> 0.); \code{space_wts} gives the observation's
#' spatially biased re-sampling weight (farthest away from other
#' relevant cases at time slice: d -> 1.).
#' High \code{time_wts} or \code{space_wts} means the observation is
#' more likely to be kept.
#'
#' @param form a formula describing the learning task
#' @param df a data frame
#' @param alpha weighting parameter for temporal and spatial
#' re-sampling probabilities. Default 0.5
#' @param beta weighting parameter for spatiotemporal weight and phi for
#' re-sampling probabilities. Default 0.9
#' @param epsilon minimum weight to be added to all observations.
#' Default 1E-4
#' @inheritParams get_phi
#' @inheritParams get_space_wts
#'
#' @return a data.frame with relevance \code{phi},
#' temporally biased weights \code{time_wts},
#' and spatially biased weights \code{space_wts} for
#' each row in \code{df}.
#'
#' @seealso \code{\link{get_phi}}, \code{\link{get_time_wts}},
#' \code{\link{get_space_wts}}.
#'
#' @export
sample_wts <- function(form, df, phi.control, alpha = 0.5, beta = 0.9,
rel.thr=0.9,
epsilon=1E-4,
site_id="site_id", time="time", sites_sf = NULL,
lon=NULL, lat=NULL, crs = NULL){
# require(assertthat)
assertthat::assert_that(alpha>=0, alpha<=1, msg = "alpha must be between 0 and 1")
# check that there are no NAs in time and space tags
assertthat::assert_that(!any(is.na(df[[time]])),
!any(is.na(df[[site_id]])),
msg = "variables 'time' and 'site_id' cannot contain any NAs")
# check that there are no NAs in target
y <- stats::model.response(stats::model.frame(form, df, na.action = NULL))
assertthat::assert_that(!any(is.na(y)),
msg = "target variable must not contain any NAs")
# check that either sites_sf or lon/lat are provided
if(is.null(sites_sf)){
assertthat::assert_that(!is.null(lon), !is.null(lat), !is.null(crs),
msg = "please provide locations object of type sf or
CRS code and names of longitude and latitude columns")
assertthat::assert_that(!any(is.na(df[[lat]])), !any(is.na(df[[lon]])),
msg = "variables 'lat' and 'lon' cannot contain any NAs")
}
# RELEVANCE
phi <- get_phi(y, phi.control)
# TIME
timz <- df[[time]]
time_wts <- get_time_wts(times = timz, phi = phi, rel.thr = rel.thr)
# SPACE
space_wts <- get_space_wts(df = df, phi = phi, rel.thr = rel.thr, site_id = site_id,
sites_sf = sites_sf, lon = lon, lat = lat, time = time, crs = crs)
assertthat::assert_that(length(y)==length(phi),
length(phi)==length(time_wts),
length(time_wts)==length(space_wts))
stprob <- data.frame(phi=phi, time_wts=time_wts, space_wts=space_wts)
stprob$stprob_add <- (alpha*stprob$time_wts+(1-alpha)*stprob$space_wts) + epsilon
stprob
}
|
d14116478f12800dbf9982cf1fd2f5c148e1e5ed
|
38912de2b401fae879612b9d77902dd6b3962ba1
|
/Metapopulation R Code/R Program/Attempt to debug.r
|
c35de1f64212fd07ebd433b85537d92af00872be
|
[] |
no_license
|
stephaniebland/MetapopulationModel
|
330091741454de381e8cdef9cab503018df876bb
|
15278b039c129f86a575626a5fbe3e68cd42df0b
|
refs/heads/main
| 2023-05-06T11:42:08.035249
| 2021-06-01T04:47:57
| 2021-06-01T04:47:57
| 372,700,801
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,152
|
r
|
Attempt to debug.r
|
#Patch Size
A<-rep(0,10); dim(A)<-c(10)
for (i in 1:10) {
A[i]=1
}
#Since patches are circular, ri=radius of ith patch.
r<-rep(0,10); dim(r)<-c(10)
for(i in 1:10) {
r[i]=sqrt(A[i]/pi)
}
#Assign xi and yi
x<-rep(0,10); dim(x)<-c(10)
y<-rep(0,10); dim(y)<-c(10)
cat('Laundry\n')
x
y
r
d<-rep(0,10^2)
dim(d)<-c(10,10)
sp<-rep(0,10^2)
dim(d)<-c(10,10)
for (i in 1:10) {
x[i]<-runif(1,0,10)
y[i]<-runif(1,0,10)
for (j in 1:10) {
d[i,j]<-((x[i]-x[j])^2+(y[i]-y[j])^2)^.5
#sp[i,j] is space between patches
# sp[i,j]<-d[i,j]-r[i]-r[j]
}
# while ((min(sp[i,j],true))<=0) {
# x[i]=runif(1,0,Tx)
# y[i]=runif(1,0,Ty)
# for (j in 1:(i-1)) {
# d[i,j]<-((x[i]-x[j])^2+(y[i]-y[j])^2)^.5
#sp[i,j] is space between patches
# sp[i,j]<-d[i,j]-r[i]-r[j]
# }
# }
}
for (i in 1:10) {
for (j in 1:10) {
d[i,j]<-((x[i]-x[j])^2+(y[i]-y[j])^2)^.5
}
}
dim(sp)=c(10:10)
dim(d)=c(10:10)
for (i in 1:10) {
for (j in 1:10) {
sp[i,j]=d[i,j]-r[i]-r[j]
}
}
cat('\n')
cat('x and y\n')
x
y
cat('now for d:\n')
d
cat('now sp\n')
sp
plot(x,y)
|
ec58cdfbc4092f351b8cccb03f640d89010add97
|
493f2c7a53fb09a6252dd0703967b4536967e2eb
|
/R/weibull_grid_method.R
|
16e3cb1cc226b19815f7ab006a381b3e22906ea5
|
[
"MIT"
] |
permissive
|
sdtaylor/flowergrids
|
66a36d93b0069f33adac02a50b8ff89b30bf7448
|
f1fc0f24acec09606ab5bfa0efd88b6533519358
|
refs/heads/master
| 2021-09-22T08:39:46.186629
| 2021-09-14T19:15:05
| 2021-09-14T19:15:05
| 201,821,543
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,432
|
r
|
weibull_grid_method.R
|
#' @description The Weibull Grid method as described in Taylor et al.
#' Combines the methodology of Fink et al. 2010 with the Weibull estimator
#' as described in Pearse et al. 2017
#'
weibull_grid = function(doy_points,
stratum_size_x=0.1,
stratum_size_y=0.1,
boxes_per_stratum=5,
box_size=0.3,
xlimits=c(0,1),
ylimits=c(0,1),
edge_buffer=0.1,
not_enough_data_fallback='use_na',
max_n_per_box=50){
model_details = list()
model_details$doy_points = doy_points
model_details$stratum_size_x = stratum_size_x
model_details$stratum_size_y = stratum_size_y
model_details$boxes_per_stratum = boxes_per_stratum
model_details$box_size = box_size
model_details$xlimits = xlimits
model_details$ylimits = ylimits
model_details$edge_buffer = edge_buffer
model_details$max_n_per_box = max_n_per_box
model_details$not_enough_data_fallback = not_enough_data_fallback
# The uniformly random boxes.
boxes = create_grid_boxes(stratum_size_x = stratum_size_x,
stratum_size_y = stratum_size_y,
boxes_per_stratum = boxes_per_stratum,
box_size = box_size,
xlimits = xlimits,
ylimits = ylimits)
# This take a subset of doy_points and return the estimated
# values
weibull_estimator_for_grid = function(doy_points_subset){
estimates = list()
# estimates$onset_estimate = tryCatch(as.numeric(phest::weib.limit(doy_points_subset$doy, k=max_n_per_box)[1]),
# error = function(cond){fallback(doy_points_subset$doy)})
# estimates$end_estimate = tryCatch(as.numeric(phest::weib.limit(doy_points_subset$doy*-1, k=max_n_per_box)[1]) * -1,
# error = function(cond){fallback(doy_points_subset$doy * -1) * -1})
estimates$onset_estimate = phest::weib.limit(doy_points_subset$doy, k=50)[1]
estimates$end_estimate = as.numeric(phest::weib.limit(doy_points_subset$doy*-1, k=50)[1]) * -1
estimates$peak_estimate = mean(doy_points_subset$doy)
return(estimates)
}
# fit_estimators does the spatial subsetting and fitting
model_details$fitted_boxes = fit_estimators(boxes = boxes,
data = doy_points,
estimator = weibull_estimator_for_grid)
return(structure(model_details, class = 'weibull_grid'))
}
#' @description Make predictions using a Weibull Grid model
#'
#' @param model
#' @param doy_points data.frame
#' @param type
#' @param se
#' @param level
#'
#' @return vector if se is FALSE, data.frame if TRUE
predict.weibull_grid = function(model,
doy_points,
type = 'onset',
se = F,
level = 0.95){
outside_buffer = function(x,y){
!within_bounds2(x,y,
x_low = model$xlimits[1] + model$edge_buffer,
x_high = model$xlimits[2] - model$edge_buffer,
y_low = model$ylimits[1] + model$edge_buffer,
y_high = model$ylimits[2] - model$edge_buffer)
}
lower_quantile = (1 - level)/2
upper_quantile = 1 - lower_quantile
estimate_metrics_from_model = function(x, y){
box_subset = subset_boxes_to_point(x = x,
y = y,
boxes = model$fitted_boxes)
estimates = list()
estimates$x = x
estimates$y = y
if(outside_buffer(x,y)){
estimates$onset_estimate = NA
estimates$onset_estimate_upper = NA
estimates$onset_estimate_lower = NA
estimates$end_estimate = NA
estimates$end_estimate_upper = NA
estimates$end_estimate_lower = NA
estimates$peak_estimate = NA
estimates$peak_estimate_upper = NA
estimates$peak_estimate_lower = NA
estimates$outside_buffer = TRUE
} else {
estimates$onset_estimate = median(box_subset$onset_estimate, na.rm=T)
estimates$onset_estimate_upper = quantile(box_subset$onset_estimate, upper_quantile, na.rm=T)
estimates$onset_estimate_lower = quantile(box_subset$onset_estimate, lower_quantile, na.rm=T)
estimates$end_estimate = median(box_subset$end_estimate, na.rm=T)
estimates$end_estimate_upper = quantile(box_subset$end_estimate, upper_quantile, na.rm=T)
estimates$end_estimate_lower = quantile(box_subset$end_estimate, lower_quantile, na.rm=T)
estimates$peak_estimate = median(box_subset$peak_estimate, na.rm=T)
estimates$peak_estimate_upper = quantile(box_subset$peak_estimate, upper_quantile, na.rm=T)
estimates$peak_estimate_lower = quantile(box_subset$peak_estimate, lower_quantile, na.rm=T)
estimates$outside_buffer = FALSE
}
return(estimates)
}
# Get estimates for each prediction point
point_estimates = purrr::pmap_df(doy_points[c('x','y')], estimate_metrics_from_model)
outside_buffer_count = sum(point_estimates$outside_buffer)
if(outside_buffer_count>0){
warning(paste(outside_buffer_count,'points were outside the buffer and could not be estimated.'))
}
if(type == 'onset'){
point_estimates$estimate = point_estimates$onset_estimate
point_estimates$estimate_lower = point_estimates$onset_estimate_lower
point_estimates$estimate_upper = point_estimates$onset_estimate_upper
} else if(type == 'end'){
point_estimates$estimate = point_estimates$end_estimate
point_estimates$estimate_lower = point_estimates$end_estimate_lower
point_estimates$estimate_upper = point_estimates$end_estimate_upper
} else if(type == 'peak'){
point_estimates$estimate = point_estimates$peak_estimate
point_estimates$estimate_lower = point_estimates$peak_estimate_lower
point_estimates$estimate_upper = point_estimates$peak_estimate_upper
} else {
stop(paste('unknown prediction type: ',type))
}
if(se){
return(dplyr::select(point_estimates, estimate, estimate_lower, estimate_upper))
} else{
return(point_estimates$estimate)
}
}
|
46bbaf4aba6882a42c2c8713a5d35f660685c7cc
|
7bb14f259425e599291c35da9fd2844869019847
|
/localR/docker_status.R
|
01ad919e04fa2767b69d650ee9551f9c516ebcf4
|
[] |
no_license
|
MazamaScience/MazamaProductionUtils
|
fc1f5ac0cb2378de92d8a68c527ba1e30c09d6e2
|
f4871d7ae69aae5b8622616b076eeb7ba95b7e34
|
refs/heads/master
| 2020-04-06T20:10:56.239737
| 2018-11-17T00:18:00
| 2018-11-17T00:18:00
| 157,764,425
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,337
|
r
|
docker_status.R
|
library(stringr)
library(dplyr)
parseDockerStats <- function(text) {
# Read stats into table
col_positions <- readr::fwf_positions(
start = c(1,21,41,65,85,107,129),
end = c(19,39,63,83,105,107,NA),
col_names = c("container", "cpu_perc", "mem_usage", "mem_perc", "net_io", "block_io", "pids")
)
df1 <- readr::read_fwf(
text,
col_positions,
col_types = c('cccccci'),
skip = 1
)
# Parse columns
cpu_perc <- str_replace(df1$cpu_perc, "%", "")
mem_use <- str_extract(df1$mem_usage, "\\d*\\.\\d*") %>% as.numeric()
mem_use_units <- str_extract(df1$mem_usage, "[[:alpha:]]+\\b")
mem_use_multiplier <- ifelse(str_detect(mem_use_units, "M"), 1, # Mb
ifelse(str_detect(mem_use_units, "K"), .001, # Kb
ifelse(str_detect(mem_use_units, "G"), 1000, # Gb
.000001 # bytes
)))
mem_use_mb <- mem_use*mem_use_multiplier
mem_perc <- str_replace(df1$mem_perc, "%", "") %>% as.numeric()
df <- data_frame(containerID = df1$container,
cpuPercent = cpu_perc,
mem_use_mb = mem_use_mb,
mem_use_perc = mem_perc,
PIDs = df1$pids)
return(df)
}
parseDockerPs <- function(text) {
# Read stats into table
col_positions <- readr::fwf_positions(
start = c(1,21,58,83,103,123,179),
end = c(19,56,81,101,121,177,NA),
col_names = c("containerID", "image", "command", "created", "status", "ports", "name")
)
df1 <- readr::read_fwf(
text,
col_positions,
col_types = c('ccccccc'),
skip = 1
)
df <- mutate(df1, command = str_remove_all(command, '"'))
return(df)
}
stats_text <- readr::read_file("docker_stats.txt")
ps_text <- readr::read_file("docker_ps.txt")
stats <- parseDockerStats(stats_text)
ps <- parseDockerPs(ps_text)
docker_status <- left_join(stats, ps, by = "containerID")
docker_status
docker_status %>% arrange(desc(mem_use_perc))
docker_status %>% arrange(desc(cpuPercent))
docker_status %>% arrange(desc(mem_use_mb))
docker_status %>% arrange(desc(PIDs))
docker_status %>%
filter(stringr::str_detect(image, "^monitor-")) %>%
arrange(desc(cpuPercent)) %>%
select(name, cpuPercent, mem_use_perc, status)
|
c0a7c7289f60db66f661ac1c8b4e5dbeb578f48f
|
03e91b39ad4fc882c368846faf257ce24d2a6107
|
/glbh0034_functions_v3.R
|
9a0cabec22d1b8f5038aebf822cf4e5fdee1f107
|
[] |
no_license
|
liwu1/glbh0034
|
928f09c990ce8e04a3a14e4e3e91f6f56deddf9a
|
d1b9fadcb0b3007db39867234146aff1f0fc2381
|
refs/heads/main
| 2023-03-22T01:04:56.291921
| 2021-03-08T18:57:55
| 2021-03-08T18:57:55
| 344,211,387
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 27,648
|
r
|
glbh0034_functions_v3.R
|
##############
## packages ##
##############
require(pROC)
require(mixtools)
require(scales)
###################################
## finite mixture model function ##
###################################
plot_fmm1 <- function(data,mu1,sig1,mu2,sig2,cutoff,plot_x,gauss1a,gauss2a) {
x_axis <- c(0.0001,0.001,0.01,0.05,0.1,0.5,1,1.5,2,4)
log_axis <- log(x_axis)
hist(log(data$optical_density),breaks=35,main="seropositivity threshold\n(finite mixture model)",cex.main=1,col="grey",border="white",
xlab="optical density (OD)\n(log scale)",freq=F,ylim=c(0,0.5), xlim=c(-8,4), axes=F,cex.lab=0.9)
axis(side=1,at=log_axis,labels=x_axis,cex.axis=0.8)
axis(side=2,at=seq(0,0.5,0.1),cex.axis=0.8)
abline(v=cutoff,col="red",lwd=2)
abline(v=mu1,col="blue")
lines(plot_x,gauss1a,col="blue")
legend("topleft",legend=c(paste("sero-positive >",round(exp(cutoff),2), "OD"),"negative population"),
col=c("red","blue"),lty=1, bty="n", cex=0.75, seg.len=1)
}
plot_fmm <- function(data,mu1,sig1,mu2,sig2,cutoff) {
plot_x <- seq( min(log(data$optical_density),na.rm=T),max(log(data$optical_density),na.rm=T),0.1)
gauss1a <- dnorm(plot_x,mu1,sig1)
gauss2a <- dnorm(plot_x,mu2,sig2)
x_axis <- c(0.0001,0.001,0.01,0.05,0.1,0.5,1,1.5,2,4)
log_axis <- log(x_axis)
hist(log(data$optical_density),breaks=35,main="seropositivity threshold\n(finite mixture model)",cex.main=1,col="grey",border="white",
xlab="optical density (OD)\n(log scale)",freq=F,ylim=c(0,0.5), xlim=c(-8,4), axes=F,cex.lab=0.9)
axis(side=1,at=log_axis,labels=x_axis,cex.axis=0.8)
axis(side=2,at=seq(0,0.5,0.1),cex.axis=0.8)
abline(v=cutoff,col="red",lwd=2)
abline(v=mu1,col="blue")
lines(plot_x,gauss1a,col="blue")
legend("topleft",legend=c(paste("sero-positive >",round(exp(cutoff),2), "OD"),"negative population"),
col=c("red","blue"),lty=1, bty="n", cex=0.75, seg.len=1)
}
#####################################
## sero-conversion rate functions ##
#####################################
plot_seroprev <- function(plot.age.profile,district,colour) {
max.age <- max(plot.age.profile$age.profiles[,2])+5
age_vector <- c(1:max.age)
seroprev_x <- plot.age.profile$age.profiles[,2]
seroprev_y <- plot.age.profile$age.profiles[,3]
seroprev_y_li <- plot.age.profile$age.profiles[,4]
seroprev_y_ui <- plot.age.profile$age.profiles[,5]
plot(age_vector,rep(1,max.age),pch=19,ylim=c(0,1.1),col="white",xlim=c(1,50),
cex.axis=1.1,cex.main=1.5,cex.lab=1.1,axes=F,ylab="",xlab="")
axis(side=1,at=c(0,10,20,30,40,50))
axis(side=2,at=c(0,0.2,0.4,0.6,0.8,1,1.1),labels=c(0,0.2,0.4,0.6,0.8,1,""))
mtext(side=1,"Age, years",line=2.5,font=2,cex=1)
mtext(side=2,"Sero-prevalence",line=2.5,font=2,cex=1)
mtext(side=3,district,line=0,font=2,cex=1.5)
points(seroprev_x,seroprev_y,pch=21,col=colour,bg=alpha(colour,0.4),cex=1.5)
segments(x0=seroprev_x,x1=seroprev_x,y0=seroprev_y_li,seroprev_y_ui,col=colour)
}
plot_scr <- function(plot.age.profile,district,colour,scr_fit) {
if (district=="Jinja") row <- 1
if (district=="Kanungu") row <- 2
if (district=="Tororo") row <- 3
max.age <- max(plot.age.profile$age.profiles[,2])+5
age_vector <- c(1:max.age)
seroprev_x <- plot.age.profile$age.profiles[,2]
seroprev_y <- plot.age.profile$age.profiles[,3]
seroprev_y_li <- plot.age.profile$age.profiles[,4]
seroprev_y_ui <- plot.age.profile$age.profiles[,5]
p.lambda.rcm1 <- scr_fit[row,"lambda.est"]
p.lambda.rcm1_li <- scr_fit[row,"lambda.lower"]
p.lambda.rcm1_ui <- scr_fit[row,"lambda.upper"]
p.rho.rcm1 <- scr_fit[row,"rho.est"]
plot.pred <- theo.seroprev(rep(p.lambda.rcm1,max.age),rep(p.rho.rcm1,max.age))
plot.pred_li <- theo.seroprev(rep(p.lambda.rcm1_li,max.age),rep(p.rho.rcm1,max.age))
plot.pred_ui <- theo.seroprev(rep(p.lambda.rcm1_ui,max.age),rep(p.rho.rcm1,max.age))
pol_y <- c(age_vector,rev(age_vector))
pol_x <- c(plot.pred_li$seroprev,plot.pred_ui$seroprev[length(plot.pred_ui$seroprev):1])
plot(age_vector,rep(1,max.age),pch=19,ylim=c(0,1.1),col="white",xlim=c(1,50),
cex.axis=1.1,cex.main=1.5,cex.lab=1.1,axes=F,ylab="",xlab="")
axis(side=1,at=c(0,10,20,30,40,50))
axis(side=2,at=c(0,0.2,0.4,0.6,0.8,1,1.1),labels=c(0,0.2,0.4,0.6,0.8,1,""))
mtext(side=1,"Age, years",line=2.5,font=2,cex=1)
mtext(side=2,"Sero-prevalence",line=2.5,font=2,cex=1)
mtext(side=3,district,line=0,font=2,cex=1.5)
points(seroprev_x,seroprev_y,pch=21,col=colour,bg=alpha(colour,0.4),cex=1.5)
segments(x0=seroprev_x,x1=seroprev_x,y0=seroprev_y_li,seroprev_y_ui,col=colour)
polygon(pol_y,pol_x, col=alpha(colour,0.2), border=NA )
lines(age_vector,plot.pred$seroprev,col=colour,lwd=2)
lambda_lab <- round(p.lambda.rcm1,4)
text(10,0.14,bquote(lambda*": "*.(round(p.lambda.rcm1,3))*" ["*.(round(p.lambda.rcm1_li,3))*" - "*.(round(p.lambda.rcm1_ui,3))*"]"),adj=0,cex=1.1)
text(10,0.05,bquote(rho*": "*.(round(p.rho.rcm1,3))),adj=0,cex=1.1)
}
plot_scr2 <- function(plot.age.profile,district,colour,scr_fit2) {
max.age <- max(plot.age.profile$age.profiles[,2])+5
age_vector <- c(1:max.age)
seroprev_x <- plot.age.profile$age.profiles[,2]
seroprev_y <- plot.age.profile$age.profiles[,3]
seroprev_y_li <- plot.age.profile$age.profiles[,4]
seroprev_y_ui <- plot.age.profile$age.profiles[,5]
p.lambda1 <- scr_fit2["lambda1"]
p.lambda2 <- scr_fit2["lambda2"]
p.rho <- scr_fit2["rho"]
p.time <- scr_fit2["time.of.change"]
plot.lambda <- unlist(c(rep(p.lambda1,max.age-p.time),rep(p.lambda2,p.time)))
plot.rho <- unlist(rep(p.rho,max.age))
plot.pred <- theo.seroprev(plot.lambda,plot.rho)
plot(age_vector,rep(1,max.age),pch=19,ylim=c(0,1.1),col="white",xlim=c(1,50),
cex.axis=1.1,cex.main=1.5,cex.lab=1.1,axes=F,ylab="",xlab="")
axis(side=1,at=c(0,10,20,30,40,50))
axis(side=2,at=c(0,0.2,0.4,0.6,0.8,1,1.1),labels=c(0,0.2,0.4,0.6,0.8,1,""))
mtext(side=1,"Age, years",line=2.5,font=2,cex=1)
mtext(side=2,"Sero-prevalence",line=2.5,font=2,cex=1)
mtext(side=3,district,line=0,font=2,cex=1.5)
points(seroprev_x,seroprev_y,pch=21,col=colour,bg=alpha(colour,0.4),cex=1.5)
segments(x0=seroprev_x,x1=seroprev_x,y0=seroprev_y_li,seroprev_y_ui,col=colour)
lines(age_vector,plot.pred$seroprev,col=colour,lwd=2)
lambda1_lab <- as.numeric(round(p.lambda1,4))
lambda2_lab <- round(p.lambda2,4)
legend("bottomright",legend=c(paste0("lambda1: ",lambda1_lab),paste0("lambda2: ",lambda2_lab)),bty="n",cex=0.8)
}
#######################################
## reverse catalytic model functions ##
#######################################
# confidence interval #
find_ci <- function(results) {
x <- results$par
n <- length(x)
V <- solve(results$hessian)
lci <- vector(length=n)
uci <- vector(length=n)
for(i in 1:n){
se <- sqrt(-V[i,i])
lci[i] <- x[i]-1.96*se
uci[i] <- x[i]+1.96*se
}
M <- cbind(x, lci, uci)
colnames(M) <- (c("estimate", "lower", "upper"))
return(M)
}
# log likelihood #
log_lik <- function(x, calc_p, calc_deriv_p, pos, ...) {
p <- calc_p(x, ...)
ll <- pos*log(p) + (1-pos)*log(1-p)
return(sum(ll))
}
# gradiant #
grad <- function(x, calc_p, calc_deriv_p, pos, ...) {
p <- calc_p(x, ...)
dp <- calc_deriv_p(x, ...)
dl_dp <- (pos-p)/(p*(1-p))
n <- length(x)
d <- vector(length=n)
for(i in 1:n) {
di <- dl_dp*dp[,i]
d[i]=sum(di)
}
return(d)
}
# probability of sero-positivity - function of lambda, rho, and individual age #
calc_p0 <- function(x, age) {
lambda <- exp(x[1])
rho <- exp(x[2])
p <- lambda/(lambda+rho)*(1-exp(-(lambda+rho)*age))
return(p)
}
# derivative probability sero-positivity #
calc_deriv_p0 <- function(x, age) {
lambda <- exp(x[1])
rho <- exp(x[2])
b <- lambda+rho
a <- exp(-b*age)
theta <- lambda/b
d1 <- lambda*(rho/b^2*(1-a)+theta*age*a)
d2 <- rho*(-lambda/b^2*(1-a)+theta*age*a)
return(cbind(d1,d2))
}
predict_p <- function(ages, results, calc_p, calc_deriv_p, ...) {
V <- -solve(results$hessian)
x <- results$par
n <- length(ages)
pred <- vector(length=n)
lpred <- vector(length=n)
upred <- vector(length=n)
for(i in 1:n) {
age <- ages[i]
if(age==0) {
pred[i]=0
lpred[i]=0
upred[i]=0
} else {
p <- calc_p(x, age, ...)
# find CI for logit(p) using the delta method #
logit_p <- qlogis(p)
dp <- calc_deriv_p(x, age, ...)
deriv_logit <- 1/(p*(1-p))
d <- vector(length=length(x))
for(j in 1:length(x))
d[j] <- deriv_logit*dp[j]
V1 <- d %*% V %*% d
se <- sqrt(V1[1,1])
pred[i] <- p
lpred[i] <- plogis(logit_p - 1.96*se)
upred[i] <- plogis(logit_p + 1.96*se)
}
}
M <- cbind(ages, pred, lpred, upred)
colnames(M) <- (c("age", "predicted", "lower", "upper"))
return(M)
}
dbinom.mod <- function(x,n,p) lfactorial(n) - lfactorial(x) - lfactorial(n-x) + x*log(p) + (n-x)*log(1-p)
loglik.null <- function(n1,n0) {
p.binom <- sum(n1)/(sum(n1)+sum(n0))
cat(sum(n1),sum(n0),'\n',sep='\t')
print(p.binom)
lista <- which(p.binom==0)
p.binom[lista] <- 0.0001
lista <- which(p.binom==1)
p.binom[lista] <- 0.9991
data <- cbind(n1,n1+n0)
data <- cbind(data,p.binom)
loglik < -as.numeric(apply(data,1,function(x)dbinom.mod(x=x[1],n=x[2],p=x[3])))
loglik.total <- sum(loglik)
return(loglik.total)
}
# log likelihood SCR with fixed rho #
loglik.scm.fixed <- function(n1,n0,t,lambda,rho) {
p <- lambda/(lambda+rho)
g <- lambda+rho
p.binom <- p*(1-exp(-g*t))
lista <- which(p.binom==0)
p.binom[lista] <- 0.0001
lista <- which(p.binom==1)
p.binom[lista] <- 0.9991
data <- cbind(n1,n1+n0)
data <- cbind(data,p.binom)
loglik <- as.numeric(apply(data,1,function(x) dbinom.mod(x=x[1],n=x[2],p=x[3])))
loglik.total <- sum(loglik)
return(loglik.total)
}
# scr fit functions #
fit_model <- function(names, init, predict, calc_p, calc_deriv_p, pos, age, ...) {
results <- optim(par=init, fn=log_lik,gr=grad, calc_p, calc_deriv_p, pos, age, ..., control=list(reltol=1E-15, fnscale=-1), method = "BFGS", hessian = TRUE)
est <- exp(find_ci(results))
rownames(est) <- names
if(length(predict)>=1) {
pred <- predict_p(predict, results, calc_p, calc_deriv_p, ...)
return(list(estimates=est, LL=results$value, pred=pred))
}
else
return(list(estimates=est, LL=results$value))
}
# reversible catalytic model with a single force of infection (lambda), sero-reversion (rho) #
rev_cat <- function(age, pos, predict=vector()){
return(fit_model(names=c("lambda","rho"), init=c(-2, -3), predict=predict, calc_p=calc_p0, calc_deriv_p=calc_deriv_p0, pos=pos, age=age))
}
null.model.analysis <- function(scm.object,analysis='overall') {
if(analysis=='overall') {
tabela <- table(scm.object$age,scm.object$seropos)
loglik <- loglik.null(n1=tabela[,2],n0=tabela[,1])
output <- list(loglik.total=loglik,df=1)
return(output)
}
if(analysis=='split') {
loglik <- 0
df <- 0
groups <- unique(scm.object$group)
for(i in groups) {
lista <- which(scm.object$group==i)
tabela <- table(scm.object$age[lista],scm.object$seropos[lista])
loglik <- loglik+loglik.null(tabela[,2],tabela[,1])
}
output <- list(loglik.total=loglik,df=length(groups))
return(output)
}
if(analysis!='split'&analysis!='overall')cat("'Analysis' option unknown! Try 'overall' or 'split'\n")
}
# creates matrix of individual level data - age, seropositivity, optional stratification (group) #
create.data.object <- function(age,seropos,group=NULL) {
n.age <- length(age)
n.seropos <- length(seropos)
if(is.null(group)==T) group <- rep(1,n.age)
if(n.age==n.seropos) out <- list(age=round(age),seropos=seropos,group=as.character(group),model='null') else out <- NULL
return(out)
}
# creates matrix with sero-prevalence by age category #
create.age.profile <- function(scm.object,analysis='overall',lag=0.05,cl=0.95) {
age <- scm.object$age
sero.pos <- scm.object$seropos
if(analysis=='overall') {
output <- age.profile.group(age,sero.pos,lag,cl)
group <- rep('overall',dim(output)[1])
output <- data.frame(group=group,output)
output <- list(age.profiles=output,analysis='overall')
return(output)
}
if(analysis=='split') {
groups <- unique(scm.object$group)
output <- c()
group <- c()
for(i in groups) {
lista <- which(scm.object$group==i)
results <- age.profile.group(age[lista],sero.pos[lista],lag,cl)
group <- c(group,rep(i,dim(results)[1]))
output <- rbind(output,results)
}
output <- data.frame(group=group,output)
output <- list(age.profiles=output,analysis='split')
return(output)
}
if(analysis!='overall'&analysis!='split') {
exist.group <- which(unique(scm.object$group)==analysis)
if(length(exist.group)>0) {
lista <- which(scm.object$group==analysis)
output <- age.profile.group(age[lista],sero.pos[lista],lag,cl)
group <- rep(analysis,dim(output)[1])
output <- data.frame(group=group,output)
output <- list(age.profiles=output,analysis=analysis)
return(output)
} else {
cat("ERROR: Group name does not exist in the data set!\t")
}
}
}
age.profile.group <- function(age,sero.pos,lag,cl){
list.prop <- seq(lag,1,lag)
list.quantiles <- unique(round(quantile(age,list.prop)))
num.points <- length(list.quantiles)
output <- matrix(NA,ncol=4,nrow=num.points)
list.aux <- which(age<=list.quantiles[1])
output[1,1] <- median(age[list.aux])
output[1,2] <- mean(sero.pos[list.aux])
output[1,3:4] <- binom.test(sum(sero.pos[list.aux],na.rm=T),length(list.aux),conf.level=cl)$conf.int
for(i in 2:num.points){
list.aux <- which(age>list.quantiles[i-1]&age<=list.quantiles[i])
output[i,1] <- median(age[list.aux])
output[i,2] <- mean(sero.pos[list.aux])
output[i,3:4] <- binom.test(sum(sero.pos[list.aux],na.rm=T),length(list.aux),conf.level=cl)$conf.int
}
output <- data.frame(output)
colnames(output) <- c('age','sero.prev','lower','upper')
return(output)
}
simple.rcm.analysis <- function(scm.object,analysis='overall',int.lambda=c(0,1),int.rho=c(0.001,0.250),lag=0.01,age.predict=1:60){
age <- scm.object$age
sero.pos <- scm.object$seropos
if(analysis=='overall') {
results <- rev_cat(age=age,pos=sero.pos,predict=age.predict)
output <- data.frame(results$pred)
tabela <- table(age,sero.pos)
age.values <- sort(unique(age))
lambda <- results$estimates[1,1]
rho <- results$estimates[2,1]
loglik <- loglik.scm.fixed(n1=tabela[,2],n0=tabela[,1],t=age.values,lambda=lambda,rho=rho)
fitted.values <- sapply(age.values,function(x,lambda,rho)lambda/(lambda+rho)*(1-exp(-(lambda+rho)*x)),lambda=lambda,rho=rho)
fitted.values2 <- sapply(age,function(x,lambda,rho)lambda/(lambda+rho)*(1-exp(-(lambda+rho)*x)),lambda=lambda,rho=rho)
fit.roc <- roc(sero.pos,fitted.values2)
output <- list(loglik.total=loglik,estimates=results$estimates,df=2,model='M1',analysis='overall',age=age.values,
fitted.values=fitted.values,fitted.values2=fitted.values2,roc=auc(fit.roc))
return(output)
}
if(analysis=='split-shared-lambda') {
results <- c()
groups <- unique(scm.object$group)
for(lambda1 in seq(int.lambda[1],int.lambda[2],lag)) {
loglik <- 0
est.rho <- c()
for(i in groups) {
lista <- which(scm.object$group==i)
seropos <- scm.object$seropos[lista]
age <- scm.object$age[lista]
fit <- my.mle.function.lambda(age,seropos,lambda1)
loglik <- loglik+fit[1]
est.rho <- c(est.rho,fit[2])
}
results <- rbind(results,c(loglik,lambda1,est.rho))
}
aux <- which.max(results[,1])
results <- results[aux,]
estimates <- data.frame(Group=groups,lambda=rep(results[2],length(groups)),rho=results[3:(length(groups)+2)])
results <- list(loglik.total=results[1],estimates=estimates,df=length(groups)+1,model='M1',analysis='split-shared-lambda')
return(results)
}
if(analysis=='split-shared-rho') {
results <- c()
groups <- unique(scm.object$group)
for(rho1 in seq(int.rho[1],int.rho[2],lag)) {
loglik <- 0
est.lambda <- c()
for(i in groups) {
lista <- which(scm.object$group==i)
seropos <- scm.object$seropos[lista]
age <- scm.object$age[lista]
fit <- my.mle.function.rho(age,seropos,rho1)
loglik <- loglik+fit[1]
est.lambda <- c(est.lambda,fit[2])
}
results <- rbind(results,c(loglik,est.lambda,rho1))
}
aux <- which.max(results[,1])
results <- results[aux,]
estimates <- data.frame(Group=groups,lambda=results[2:(length(groups)+1)],rho=rep(results[length(results)],length(groups)))
results <- list(loglik.total=results[1],estimates=estimates,df=length(groups)+1,model='M1',analysis='split-shared-rho')
return(results)
}
if(analysis=='split-unshared-rho') {
groups <- unique(scm.object$group)
output <- c()
for(i in groups) {
lista <- which(scm.object$group==i)
results <- rev_cat(age=age[lista],pos=sero.pos[lista],predict=age.predict)
tabela <- table(age[lista],sero.pos[lista])
loglik <- loglik.scm.fixed(n1=tabela[,2],n0=tabela[,1],t=as.numeric(rownames(tabela)),lambda=results$estimates[1,1],rho=results$estimates[2,1])
output <- rbind(output,c(loglik,matrix(t(results$estimates),nrow=1,byrow=T)))
}
loglik.total <- sum(output[,1])
output <- data.frame(Group=groups,output[,-1])
colnames(output) <- c('Group','lambda.est','lambda.lower','lambda.upper','rho.est','rho.lower','rho.upper')
output <- list(loglik.total=loglik.total,estimates=output,df=2*length(groups),model='M1',analysis='split-unshared-rho')
return(output)
}
if(analysis!='overall'&analysis!='split-unshared-rho'&analysis!='split-shared-rho') {
results <- c()
group.exist <- which(unique(scm.object$group)==analysis)
if(length(group.exist)>0) {
lista <- which(scm.object$group==analysis)
age <- age[lista]
sero.pos <- sero.pos[lista]
results <- rev_cat(age=age,pos=sero.pos,predict=age.predict)
output <- data.frame(results$pred)
tabela <- table(age,sero.pos)
age.values <- sort(unique(age))
lambda <- results$estimates[1,1]
rho <- results$estimates[2,1]
loglik <- loglik.scm.fixed(n1=tabela[,2],n0=tabela[,1],t=age.values,lambda=lambda,rho=rho)
fitted.values <- sapply(age.values,function(x,lambda,rho)lambda/(lambda+rho)*(1-exp(-(lambda+rho)*x)),lambda=lambda,rho=rho)
fitted.values2 <- sapply(age,function(x,lambda,rho)lambda/(lambda+rho)*(1-exp(-(lambda+rho)*x)),lambda=lambda,rho=rho)
fit.roc <- roc(sero.pos,fitted.values2)
output <- list(loglik.total=loglik,estimates=results$estimates,df=2,model='M1',analysis='overall',age=age.values,fitted.values=fitted.values,fitted.values2=fitted.values2,roc=auc(fit.roc))
return(output)
} else {
cat('ERROR: Group does not exist in the data set!')
}
}
}
two.rcm.analysis<-function(scm.object,analysis='overall',rho.int=c(0.001,0.250),time.int=c(1,10),time.step=1,int.rho=c(0.001,0.250),lag.rho=0.01,time.common=NULL,age.predict=1:60,trace=F){
age <- scm.object$age
sero.pos <- scm.object$seropos
results <- c()
if(analysis=='overall') {
output<-two.rcm.analysis.overall(age,sero.pos,time.int,time.step,trace)
age.values<-sort(unique(age))
lambda<-output$estimates[1:2,1]
rho<-output$estimates[3,1]
fitted.values <- as.numeric(sapply(age.values,seromodel2.time.of.change,change=output$time.of.change,x=c(lambda,rho)))
fitted.values2 <- as.numeric(sapply(age,seromodel2.time.of.change,change=output$time.of.change,x=c(lambda,rho)))
fit.roc2 <- roc(sero.pos,fitted.values2)
output <- list(loglik.total=output$loglik.total,estimates=output$estimates,time.of.change=output$time.of.change,df=4,model='M2',analysis='overall',age=age.values,fitted.values=fitted.values,fitted.values2=fitted.values2,roc=auc(fit.roc2))
return(output)
}
if (analysis=='split-shared-rho') {
loglik.max<-(-1)*(10^6)
for (rho1 in seq(int.rho[1],int.rho[2],lag.rho)) {
loglik<-0
estimates<-c()
j<-1
for (i in unique(scm.object$group)) {
lista <- which(scm.object$group==i)
fit <- my.mle.scm2(scm.object$age[lista],scm.object$seropos[lista],lambda=c(0.01,0.005),rho=rho1,time=time.common[j])
loglik <- loglik+fit$value[1]
estimates<-rbind(estimates,c(i,fit$value[1],fit$par,rho1,time.common[j]))
j<-j+1
}
if(loglik.max<loglik){
loglik.max<-loglik
results<-estimates
}
}
results<-data.frame(Group=as.character(results[,1]),loglik=as.numeric(results[,2]),lambda1=as.numeric(results[,3]),lambda2=as.numeric(results[,4]),rho=as.numeric(results[,5]),time.of.change=as.numeric(results[,6]))
return(list(loglik.total=sum(results[,2]),estimates=results[,-2],df=2*dim(results)[1]+1,model='M2',analysis='split-shared-rho'))
}
if (analysis=='split-unshared-rho') {
groups <- unique(scm.object$group)
output<-c()
for (i in groups) {
lista<-which(scm.object$group==i)
results<-two.rcm.analysis.overall(age[lista],sero.pos[lista],time.int,time.step,trace)
output<-rbind(output,c(results$loglik.total,matrix(t(results$estimates),nrow=1,byrow=T),results$time.of.change))
}
loglik.total<-sum(output[,1])
output<-data.frame(Group=groups,output[,-1])
colnames(output)<-c('Group','lambda1.est','lambda1.lower','lambda1.upper','lambda2.est','lambda2.lower','lambda2.upper','rho.est','rho.lower','rho.upper','time.of.change')
output<-list(loglik.total=loglik.total,estimates=output,df=3*length(groups),model='M2',analysis='split-unshared-rho')
return(output)
}
if(analysis!='overall'&analysis!='split-shared-rho'&analysis!='split-unshared-rho') {
age <- scm.object$age
sero.pos <- scm.object$seropos
results <- c()
aux <- which(unique(scm.object$group)==analysis)
if (length(aux)>0) {
lista <- which(scm.object$group==analysis)
age <- age[lista]
sero.pos <- sero.pos[lista]
output <- two.rcm.analysis.overall(age,sero.pos,time.int,time.step,trace)
age.values <- sort(unique(age))
lambda <- output$estimates[1:2,1]
rho <- output$estimates[3,1]
fitted.values <- as.numeric(sapply(age.values,seromodel2.time.of.change,change=output$time.of.change,x=c(lambda,rho)))
fitted.values2 <- as.numeric(sapply(age,seromodel2.time.of.change,change=output$time.of.change,x=c(lambda,rho)))
fit.roc <- roc(sero.pos,fitted.values2)
output <- list(loglik.total=output$loglik.total,estimates=output$estimates,time.of.change=output$time.of.change,df=4,model='M2',analysis='overall',age=age.values,fitted.values=fitted.values,fitted.values2=fitted.values2,roc=auc(fit.roc))
return(output)
} else {
cat('ERROR: Group does not exist in the data set\n')
}
}
}
two.rcm.analysis.overall <- function(age,sero.pos,time.int,time.step,trace) {
times <- seq(time.int[1],time.int[2],time.step)
loglik <- (-1)*(10^6)
tabela <- table(age,sero.pos)
for(i in 1:length(times)){
results=rev_cat2(age=age , pos=sero.pos, change=times[i], predict=1:60)
loglik.new <- loglik.scm2.fixed(n1=tabela[,2],n0=tabela[,1],t=as.numeric(rownames(tabela)),lambda=results$estimates[1:2,1],rho=results$estimates[3,1],time.of.change=times[i])
if(loglik<loglik.new) {
loglik <- loglik.new
output <- list(loglik.total=loglik,estimates=results$estimates,time.of.change=times[i],df=3)
}
if(trace==T)cat('time of change=',lista[i],', log.likelihood=',loglik,'\n',sep='')
}
return(output)
}
# maximum likelihood function for rho #
my.mle.function.rho <- function(age,seropos,rho) {
tabela <- table(age,seropos)
age <- as.numeric(rownames(tabela))
mle.fit <- optimize(loglik.scm.fixed,interval=c(10^(-15),10),n1=tabela[,2],n0=tabela[,1],t=age,rho=rho,maximum=T)
estimates.mle <- c(mle.fit$objective,mle.fit$maximum,rho)
return(c(estimates.mle))
}
matrix.prob <- function(t,lambda,rho) {
p1 <- lambda/(lambda+rho)*(1-exp(-(lambda+rho)*t))
p2 <- rho/(lambda+rho)*(1-exp(-(lambda+rho)*t))
out <- matrix(c(1-p1,p1,p2,1-p2),ncol=2,nrow=2,byrow=T)
return(out)
}
theo.seroprev <- function(lambda,rho) {
age.max <- length(lambda)
prob <- c()
predict.prob <- c()
for(age in 1:age.max) {
prob <- matrix(c(1,0),ncol=2,nrow=1)
l1 <- lambda[age.max-age+1]
r1 <- rho[age.max-age+1]
t.aux <- 1
if(age==1) {
prob <- prob%*%matrix.prob(1,l1,r1)
} else {
for(x in 1:(age-1)){
if(l1==lambda[age.max-age+x+1]&r1==rho[age.max-age+x+1]) {
t.aux <- t.aux+1
} else {
if(x!=age) {
prob <- prob%*%matrix.prob(t.aux,l1,r1)
t.aux <- 1
l1 <- lambda[age.max-age+1+x]
r1 <- rho[age.max-age+1+x]
}
}
}
prob <- prob%*%matrix.prob(t.aux,l1,r1)
}
predict.prob<-rbind(predict.prob,prob)
}
out <- cbind(1:age.max,lambda[age.max:1])
out <- cbind(out,rho[age.max:1])
out <- cbind(out,predict.prob)
out <- data.frame(out[,-4])
colnames(out) <- c('age','lambda','rho','seroprev')
return(out)
}
my.mle.scm2 <- function(age,seropos,lambda,rho,time) {
tabela <- table(age,seropos)
age <- as.numeric(rownames(tabela))
fit <- optim(par=lambda,fn=loglik.scm2.fixed,n1=tabela[,2],n0=tabela[,1],t=age,time.of.change=time,rho=rho,control=list(fnscale=-1,pgtol=1E-15))
return(fit)
}
loglik.scm2.fixed <- function(n1,n0,t,lambda,rho,time.of.change) {
p.binom<-as.numeric(sapply(t,seromodel2.time.of.change,change=time.of.change,x=c(lambda,rho)))
lista<-which(p.binom==0)
p.binom[lista]<-0.0001
lista<-which(p.binom==1)
p.binom[lista]<-0.9991
data<-cbind(n1,n1+n0)
data<-cbind(data,p.binom)
loglik<-as.numeric(apply(data,1,function(x)dbinom.mod(x=x[1],n=x[2],p=x[3])))
loglik.total<-sum(na.omit(loglik))
return(loglik.total)
}
seromodel2.time.of.change <- function(age, change,x) {
lambda1 <- x[1]
lambda2 <- x[2]
rho <- x[3]
small <- 1E-6
b0 <- age-change
theta1 <- lambda1/(lambda1+rho)
theta2 <- lambda2/(lambda2+rho)
p_b <- (age > change)*theta1*(1-exp(-(lambda1+rho)*b0))
return((age <= change+small)*(theta2*(1-exp(-(lambda2+rho)*age))) + (age > change+small)*((p_b-theta2)*exp(-(lambda2+rho)*change)+theta2))
}
|
ae5630dce6ec475cee325668951d56e3e95f3b57
|
0c55f047f3a80bb94c6a7ad050c9c44e60a73fb9
|
/Process/process_data.R
|
0ea301759dca8a2ce468b849e3cbdc7b4d81666c
|
[] |
no_license
|
Tuc-Nguyen/HLF-Robot-Image-Analysis-2.1
|
3d2ab9476656ab9d2a4518512bb34c23da39381c
|
bdb652a1169fb2f66117ddda971e8fabe07dd037
|
refs/heads/master
| 2022-04-14T06:01:16.323714
| 2020-03-26T16:36:22
| 2020-03-26T16:36:22
| 250,310,692
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,984
|
r
|
process_data.R
|
#####################################################################################
#######Exclude Data points from final analysis
#####################################################################################
####Read in data
classes = c("numeric","numeric","character","numeric","factor","numeric","factor","factor")
# Row Col Name Size Media Timepoint Temp Array(1-6)
# setwd("./organize_data")
df = read.table("../Analyze/consolidated_data.txt",header=T, colClasses=classes,sep="\t")
##Add a column with "Condition whichis the combination of Media and Temperature
Condition = paste(df$Media, df$Temp, sep = "_")
Well=paste("r",df$Row,"c",df$Col,sep="")
df$Condition = factor(Condition)
df$Well = Well
################################
##CLEAN AND TRANSFORM THE DATA
##remove strains that are problematic
strain_remove_table = read.table("strains_to_remove.txt",header=T,sep="\t")
df = subset(df, !(df$Gene %in% strain_remove_table$Gene ))
##remove spots that are problematic
spot_remove_table = read.table("spots_to_remove.txt",header=T,sep="\t",
colClasses = c("factor","factor","factor",
"numeric","numeric","character"))
library(dplyr)
df = anti_join(df, spot_remove_table, by = c("Array","Media" , "Temp", "Row","Column"))
##Remove the outer ring in each block, outer ring of whole plate, and second most outer ring of whole plate
unusable_rows = c(1,2,4,5,8,9,12,13,16,17,20,21,24,25,28,29,31,32)
unusable_cols = c(1,2,8,9,16,17,24,25,32,33,40,41,47,48)
df = subset(df, !(df$Row %in% unusable_rows))
df = subset(df, !(df$Col %in% unusable_cols))
##At this stage, the df has had all the spots in the perimeters of the blocks removed
df = df[order(df$Array,df$Condition,df$Time.Point,df$Row,df$Col),]
df = subset(df, Media!="YPD")
####Output all the time points
write.table(df, "all_time_points.tab",row.names=F,sep="\t")
|
5349e8388b8cddd4a0744b303753216d12cfb0ad
|
6473a59283c6da0d9c6921f2f0839e8ee1428795
|
/interval estimation of population when population variance is known.R
|
e4c1604e6cadd68ae6eda93cd3d19b9f4f192f84
|
[] |
no_license
|
pasindusiriwardana95/R-language
|
512403059cfbe71ae404568e327ad4933b9caa61
|
9de47b199619fec8d67a433b27c3edf1d3eede59
|
refs/heads/master
| 2020-04-12T12:23:36.845564
| 2018-12-20T23:19:27
| 2018-12-20T23:19:27
| 162,471,986
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 263
|
r
|
interval estimation of population when population variance is known.R
|
library(MASS)
height.response=na.omit(survey$Height)
n=length(height.response)
sigma=9.48
div=sigma/sqrt(n)
#confidence level = 0.95
alpha=1-0.95
z=qnorm(1-(alpha/2))
#margin of error
E=z*div;E
#finding the sample mean
xbar=mean(height.response)
xbar+c(-E,E)
|
4ddf4e5c86fcb62bcb4a745c45edcbc75e34fb27
|
155d41ff55c50f662d760cef3c7d6f1d2a5fe523
|
/RevoScaleR_GettingStarted/code/Chapter04.R
|
a31e88d14811865f4a3aae42fd6a1c0da26bc178
|
[] |
no_license
|
jfortuny/RevoScaleR_GettingStarted
|
9938eca29440960af62055cab3418cb26606b6cd
|
4a385c08d0f604dd203bd65f4c01514aeceefd46
|
refs/heads/master
| 2021-01-13T13:23:04.843439
| 2016-10-31T17:51:24
| 2016-10-31T17:51:24
| 72,448,034
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,098
|
r
|
Chapter04.R
|
# Preliminaries ########################################################################
library(RevoScaleR)
# 4.1 ##################################################################################
sampleDataDir <- rxGetOption("sampleDataDir")
inputFile <- file.path(sampleDataDir, "AirlineDemoSmall.csv")
startTime <- Sys.time()
airDS <- rxImport(inData = inputFile,
outFile = "./data/ADS.xdf",
missingValueString = "M",
stringsAsFactors = TRUE,
overwrite = TRUE)
(runTime <- Sys.time() - startTime)
str(airDS)
head(airDS)
#airDS <- rxImport(inData = "./data/ADS.xdf",
#missingValueString = "M",
#stringsAsFactors = TRUE)
colInfo <- list(DayOfWeek = list(type = "factor",
levels = c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday")))
startTime <- Sys.time()
airDS <- rxImport(inData = inputFile,
outFile = "./data/ADS.xdf",
missingValueString = "M",
colInfo = colInfo,
overwrite = TRUE)
(runTime <- Sys.time() - startTime)
#airDS <- rxImport(inData = "./data/ADS.xdf",
#missingValueString = "M",
#colInfo = colInfo,
#stringsAsFactors = TRUE)
# 4.2 ##################################################################################
dim(airDS)
nrow(airDS)
ncol(airDS)
head(airDS)
rxGetVarInfo(airDS)
myData <- rxReadXdf(airDS, numRows = 10, startRow = 100000)
myData
str(myData)
# 4.3 ##################################################################################
adsSummary <- rxSummary( ~ ArrDelay + CRSDepTime + DayOfWeek, data = airDS)
adsSummary
rxSummary( ~ ArrDelay:DayOfWeek, data = airDS)
options("device.ask.default" = T)
rxHistogram( ~ ArrDelay, data = airDS)
rxHistogram( ~ CRSDepTime, data = airDS)
rxHistogram( ~ DayOfWeek, data = airDS)
options("device.ask.default" = FALSE)
myData <- rxDataStep(inData = airDS,
rowSelection = ArrDelay > 240 & ArrDelay <= 300,
varsToKeep = c("ArrDelay", "DayOfWeek"))
rxHistogram( ~ ArrDelay, data = myData)
# 4.4 ##################################################################################
# 4.4.1 ################################################################################
arrDelayLm1 <- rxLinMod(formula = ArrDelay ~ DayOfWeek, data = airDS)
summary(arrDelayLm1)
arrDelayLm1
# 4.4.2 ################################################################################
arrDelayLm2 <- rxLinMod(formula = ArrDelay ~ DayOfWeek, data = airDS,
cube = TRUE)
summary(arrDelayLm2)
countsDF <- rxResultsDF(arrDelayLm2, type = "counts")
countsDF
rxLinePlot(ArrDelay ~ DayOfWeek, data = countsDF, main = "Average Arrival Delay by Day Of Week")
# 4.4.3 ################################################################################
arrDelayLm3 <- rxLinMod(ArrDelay ~ DayOfWeek:F(CRSDepTime),
data = airDS, cube = TRUE)
arrDelayDT <- rxResultsDF(arrDelayLm3, type = "counts")
head(arrDelayDT, 15)
rxLinePlot(ArrDelay ~ CRSDepTime | DayOfWeek, data = arrDelayDT,
title = "Average Arrival Delay by Day Of Ween By Departure Hour")
# 4.5 ##################################################################################
|
17a000b31e033b75ed5d3b338a227dae299b2dd4
|
70d475f213f2658d0b3e249ec905a8381e5fdc2d
|
/R/Plotting.R
|
5102c2254016e910b3767f97537e8823588fc1b9
|
[
"MIT"
] |
permissive
|
bharatm26/CCNMF
|
dbc26cedfd4d2acc6426ef8e027bf5fd7be62657
|
dd328cf1991b8b7177274bf9f0a9d5d1ba69e011
|
refs/heads/master
| 2023-03-24T11:45:28.314741
| 2021-01-05T04:51:15
| 2021-01-05T04:51:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,402
|
r
|
Plotting.R
|
#' @description Output the integrated figure for tsne and heatmap
#' @param CNVmatrix_input input cnv matrix
#' @param RNAmatrix_input inout RNA matrix
#' @param ncluster the number of subclones
#' @param Result_CCNMF The result of CCNMF
#' @return DE genes
#'
#' @export
Plot_integrated_figure <- function(CNVmatrix_input, RNAmatrix_input, ncluster, Result_CCNMF){
S1 <- Result_CCNMF[[5]]
S2 <- Result_CCNMF[[6]]
RNADE <- DiffExp(RNAmatrix_input, S2)
if(length(unique(S2)) == 2){
commonDE <- RNADE[[3]][1:10]
}else if(length(unique(S2)) > 2){
commonDE <- c()
DE_list <- RNADE[[3]]
for(i in 1:length(DE_list)){
commonDE <- c(commonDE, DE_list[[i]][1:5])
}
commonDE <- unique(commonDE)
}
# DNADE <- DiffExp(CNVmatrix_input, S1)
# commonDE <- DNADE[[3]][1:10]
X <- CNVmatrix_input
D <- CNVmatrix_input[commonDE, ]
a <- median(D)
b <- max(D)
c <- min(D)
X1 <- (CNVmatrix_input-a)/(b - a)
X2 <- (CNVmatrix_input-a)/(a- c)
X[which(CNVmatrix_input > a)] <- X1[which(CNVmatrix_input > a)] * 2
X[which(CNVmatrix_input <= a)] <- X2[which(CNVmatrix_input <= a)] * 2
RNAmatrix_input <- as.matrix(RNAmatrix_input)
PlotMainResult(X, RNAmatrix_input, ResultsCCNMF, commonDE)
return(commonDE)
}
#' Output the all visualization of results from CCNMF
#' Especially, the paired heatmap of differential genes and dimension reduction for both scRNA-seq and scDNA-seq data
#' @import ggplot2
#' @import cowplot
#' @import ggplotify
#'
#' @description Output the all visualization of results from CCNMF
#' @importFrom Rtsne Rtsne
#' @importFrom uwot umap
#' @import ggplot2
#' @import grDevices
#'
#' @param CNVmatrix copy number matrix
#' @param RNAmatrix gene expression matrix
#' @param Result_CCNMF The result of CCNMF
#' @param DElist The list of differentiate genes
#'
#' @return The integrated figure
#' @export
PlotMainResult <- function(CNVmatrix, RNAmatrix, Result_CCNMF, DElist){
if(is.list(DElist)){
P <- DElist[[1]]
}else if (is.character(DElist)){
P <- DElist
}
DNAheat <- Plot_heatmap(CNVmatrix, Result_CCNMF[[5]], P, Datatype = 'DNA', title = 'Signature gene heatmap of scDNA-seq data')
DNAheat <- as.ggplot(DNAheat)
RNAheat <- Plot_heatmap(RNAmatrix, Result_CCNMF[[6]], P, Datatype = 'RNA', title = 'Signature gene heatmap of scRNA-seq data')
RNAheat <- as.ggplot(RNAheat)
H1 <- Result_CCNMF[[1]]
H2 <- Result_CCNMF[[2]]
S1 <- Result_CCNMF[[5]]
S2 <- Result_CCNMF[[6]]
DNAdim <- Plottsne(H1, S1, 'Tsne plot of scDNA-seq data', Datatype = 'scDNA-seq', need_PCA = FALSE)
RNAdim <- Plottsne(H2, S2, 'Tsne plot of scRNA-seq data', Datatype = 'scRNA-seq', need_PCA = FALSE)
myplot <- plot_grid(DNAdim, RNAdim, DNAheat, RNAheat, labels = c('A', 'B', 'C', 'D'), label_size = 12, scale = c(1, 1, 0.95, 0.95))
dev.off()
ggsave(filename ='allfigure.pdf', plot = myplot, width = 8.5, height = 6)
}
#' Plot the dimensional figure for scRNA-seq and scDNA-seq datasets
#' The rows in Data matrix represent the cells, the columns represent the genes/chr bins
#'
#' Plot the dimensional reduction figure by PCA
#' @import stats
#' @param Data the orgial matrix or H matrix when the raw matrix after NMF
#' @param label the clusters label of this figure which is the result of CCNMF
#' @param Datatype The type of input Data matrix, 'scRNA-seq' or 'scDNA-seq'
#' @param title the title of the figure
#' @param need_PCA logic
#'
#' @return The pdf figure based on PCA
#' @export
Plotpca <- function(Data, label, title, Datatype = 'scRNA-seq'){
pca <- prcomp(t(Data))
myplot <- Plot(pca$x[, 1:2], as.character(label), title, 'pca 1', 'pca 2')
return(myplot)
}
#' Plot the dimensional reduction figure by tsne
#' Before tsne, the PCA is applied to the original Data, then select the top 15 PCs and apply
#' tsne to these components.
#' @import stats
#' @param Data the orgial matrix or H matrix when the raw matrix after NMF
#' @param label the clusters label of this figure which is the result of CCNMF
#' @param Datatype The type of input Data matrix, 'scRNA-seq' or 'scDNA-seq'
#' @param title the title of the figure
#' @param need_PCA logic
#'
#' @return The pdf figure based on tsne
#' @export
Plottsne <- function(Data, label, title, Datatype = 'scRNA-seq', need_PCA = TRUE){
if (need_PCA == TRUE){
pca <- prcomp(t(Data))
if(dim(pca$x)[2] < 15){
tsne <- Rtsne(pca$x, dims = 2, perplexity = 30, check_duplicates = FALSE, verbose = TRUE, max_iter = 500)
}else{
tsne <- Rtsne(pca$x[, 1:15], dims = 2, perplexity = 30, check_duplicates = FALSE, verbose = TRUE, max_iter = 500)
}
}else{
tsne <- Rtsne(t(Data), dims = 2, perplexity = 30, check_duplicates = FALSE, verbose = TRUE, max_iter = 500)
}
myplot <- Plot(tsne$Y, as.character(label), title, 'tsne 1', 'tsne 2')
return(myplot)
}
#' Plot the dimensional reduction figure by umap
#' Firstly, we apply PCA for the high-dimensional data, then use Umap to the top 15 PCs.
#' @import stats
#' @param Data the orgial matrix or H matrix when the raw matrix after NMF
#' @param label the clusters label of this figure which is the result of CCNMF
#' @param Datatype The type of input Data matrix, 'scRNA-seq' or 'scDNA-seq'
#' @param need_PCA Logic
#'
#' @return The pdf figure based on umap
#' @export
Plotumap <- function(Data, label, Datatype = 'scRNA-seq', need_PCA = TRUE){
if (need_PCA == TRUE){
pca <- prcomp(t(Data))
if(dim(pca$x)[2] < 15){
umap <- umap(pca$x, n_neighbors = 15, learning_rate = 0.5, init = "random")
}else{
umap <- umap(pca$x[, 1:15], n_neighbors = 15, learning_rate = 0.5, init = "random")
}
}else{
umap <- umap(t(Data), n_neighbors = 15, learning_rate = 0.5, init = "random")
}
myplot <- Plot(umap, as.character(label), paste0('Umap for ', Datatype), 'umap 1', 'umap 2')
return(myplot)
}
#' The function of plotting figure
#' @import ggplot2
#' @param Data the input data which need to be plotted, which can be raw matrix or H matrix concluded by NMF
#' @param Cluster the clusters label
#' @param title the title of the figure, which is a string
#' @param labelx X-coordinate of name, such as 'pca 1', 'tsne 1', 'umap 1'
#' @param labely Y-coordinate of name, such as 'pca 2', 'tsne 2', 'umap 2'
#'
#' @return A pdf file
#' @export
Plot <- function(Data, Cluster, title, labelx, labely){
Data <- as.data.frame(Data)
colnames(Data) <- c('V1', 'V2')
ncluster <- max(Cluster)
Clones <- paste0('C', Cluster, sep='')
myplot <- ggplot(data = Data, ggplot2::aes(x = V1, y=V2, color = Clones)) +
geom_point(size = 1) +
labs(title= title, x = labelx, y= labely, fill = 'Clones') +
scale_fill_discrete(name='Clones') +
theme(plot.title = element_text(size = 11, color = 'black', face = 'bold', hjust = 0.5)) +
theme(axis.title.x = element_text(size = 11, color = 'black', face = 'bold', vjust=0.5, hjust = 0.5)) +
theme(axis.title.y = element_text(size = 11, color = 'black', face = 'bold', vjust=0.5, hjust = 0.5)) +
theme(axis.text.x = element_text(size = 7, color = 'black', face = 'bold')) +
theme(axis.text.y = element_text(size = 7, color = 'black', face = 'bold')) +
theme(legend.text= element_text(size=7,color="black", face= "bold", vjust=0.5, hjust=0.5)) +
theme(legend.title = element_text(size=7,color="black", face= "bold"))
if(length(unique(Cluster)) == 2){
Label_color <- c('C1' = "#00BFC4", 'C2' = "#F8766D")
}else if(length(unique(Cluster)) >=3 & length(unique(Cluster)) <= 9){
Label_color <- brewer.pal(length(unique(Cluster)), "Set1")
label_cluster <- paste0('C', Cluster)
names(Label_color) <- sort(unique(label_cluster))
}else if(length(unique(Cluster)) > 9){
print("The number of clusters exceed 9, please add additional colorlabel for clusters.")
}
myplot <- myplot + scale_colour_manual(values = Label_color)
return(myplot)
}
#' Plot the heatmap for differential genes in scRNA-seq data
#' @import RColorBrewer
#' @import grDevices
#' @import pheatmap
#'
#' @param Data scRNA-seq gene expression matrix
#' @param label the clustering label of scRNA-seq data
#' @param Datatype scDNAseq or scRNA-seq
#' @param P the p_values matrix by t test the DE genes among clusters in scRNA-seq data
#' @param title a string, default as 'The heatmap of differential expression in scRNA-seq data
#'
#' @return D_assign: a matrix that rows are the clusters and cloumns are the DE genes of all clusters
#' @return A pdf file which is the heatmap figure
#' @export
Plot_heatmap <- function(Data, label, P, Datatype = 'DNA', title = 'The heatmap of differential expression in scRNA-seq data'){
# if input P is a dataframe, which means the p-values for each cluster
# otherwise, P is the vector of differential genes, whose format is character.
if(is.data.frame(P)){
index <- apply(P, 2, function(x){order(x, decreasing=F)[1:10]})
DEgene <- c()
for (i in 1: dim(index)[2]){
DEgene <- c(DEgene, rownames(P)[index[, i]])
}
}else if(is.character(P)){
DEgene <- P
}
D <- Data[unique(DEgene), ]
cluster_num <- c()
cell_name <- c()
D_assign = as.data.frame(matrix(0, dim(D)[1], dim(D)[2]))
for (j in 1:max(label)){
cluster_num <- c(cluster_num, length(which(label == j)))
cell_name <-c(cell_name, colnames(D)[which(label ==j)])
if (j == 1){
D_assign[, 1: cluster_num[1]] = D[, which(label == j)]
}
else{
a = sum(cluster_num[1:j-1]) + 1
b = sum(cluster_num[1:j])
D_assign[, a : b] = D[, which(label == j)]
}
}
rownames(D_assign) <- rownames(D)
colnames(D_assign) <- cell_name
D_assign <- as.data.frame(t(D_assign))
annotation_row = data.frame(Clones = factor(rep(paste('C', 1:max(label), sep = ''), cluster_num)))
rownames(annotation_row) = cell_name
if(Datatype == 'DNA'){
myplot <- pheatmap::pheatmap(D_assign, color=colorRampPalette(rev(c("red","white","blue")))(102), cluster_cols = FALSE, cluster_rows = FALSE, show_rownames = F, fontsize = 7, annotation_row = annotation_row, angle_col = "45",annotation_legend = FALSE, main = title)
}else{
myplot <- pheatmap::pheatmap(D_assign, color=colorRampPalette(rev(c("red","white","blue")))(102), cluster_cols = FALSE, cluster_rows = FALSE, show_rownames = F, fontsize = 7, annotation_row = annotation_row, angle_col = "45",annotation_legend = FALSE, main = title)
}
return(myplot)
}
|
0f25e3b96283de4f9d2672defc5573fffee0e549
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610051754-test.R
|
ed30abee4f48cfd7cb4b3086f5522003e4444ed7
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 523
|
r
|
1610051754-test.R
|
testlist <- list(rates = c(NaN, -4.24399158143648e-314, NaN, -Inf, 3.60297094497336e-306, 4.56545065531948e-317, 0, 3.60739284464096e-313, 2.26527122355628e-318, 8.54602158179859e+194, 7.17085996195659e-310, 8.44254251528635e-227, 3.79223320198803e-270, 1.00891823378368e-309, 2.1077683068534e-309, -5.35532635250002e+305, 1.38803996951476e-309, -6.69969684635851e-287, 2.89689873904097e-312, -3.18411881987467e-248, 0), thresholds = NaN, x = numeric(0))
result <- do.call(grattan::IncomeTax,testlist)
str(result)
|
85e4b553a19b1d1ec861ff117aa0aafed4704a75
|
f641603643dc268a101aa0c7ca8fbc25853f1070
|
/R/classif.depth.R
|
84205d8c241497a0bdfff2f866b485800ce7de53
|
[] |
no_license
|
dgorbachev/fda.usc
|
8c012f6ec0be6d6611b5aa054b7f1ae1999cad5a
|
643a6673cdfbeec7d195e82900b89791ebb1277e
|
refs/heads/master
| 2021-05-01T08:44:00.652927
| 2018-02-11T22:29:50
| 2018-02-11T22:29:50
| 121,173,291
| 0
| 1
| null | 2018-02-11T22:22:34
| 2018-02-11T22:22:34
| null |
UTF-8
|
R
| false
| false
| 8,685
|
r
|
classif.depth.R
|
################################################################################
################################################################################
classif.depth<-function(group,fdataobj,newfdataobj,depth="RP",par.depth=list(),CV="none"){
#,control=list(trace=FALSE,draw=TRUE)
C<-match.call()
if (!is.factor(group)) group<-factor(group)
ismissing<-missing(newfdataobj)
if (ismissing) newfdataobj<-fdataobj
group<-factor(group,levels=levels(group)[which(table(group)>0)])
func.clas<-list()
lev<-levels(group)
ng<-length(lev)
nc<-ncol(newfdataobj)
nvec<-table(group)
# p<-nvec[1]/n
if (depth %in% c("PD","HD","RP","RPD","RT")){
if (is.null(par.depth$proj)) {
d <- nc
u <- matrix(runif(d*500,-1,1),500,d)
norm <- sqrt(rowSums(u*u))
arg <- u/norm
if (depth %in% c("RP","RPD","RT")) par.depth$proj<-fdata(arg,fdataobj$argvals,fdataobj$rangeval)
else par.depth$proj<-arg
}
}
if (depth %in% c("mband","mmode","HD","SD","PD","MhD")) par.depth$x<-newfdataobj
else par.depth[["fdataobj"]]<-newfdataobj
depth<-paste("depth.",depth,sep="")
if (ismissing) {
ismdist<-is.matrix(par.depth$metric)
if (ismdist) {
mdist<-par.depth$metric
}
# if (depth %in% c("HD","SD","PD","MhD")) par.depth$x<-fdataobj
# else par.depth[["fdataobj"]]<-fdataobj
# print(names(par.depth))
n<-nrow(fdataobj)
x<-array(NA,dim=c(n,nc,ng))
Df<-matrix(NA,ncol=ng,nrow=n)
# if (CV!=TRUE){
ind<-matrix(NA,nrow=n,ncol=ng)
for (i in 1:ng) {
ind[,i]<-group==lev[i]
nam<-c(paste("depth ",lev[i],sep=""),paste("depth ",paste(lev[-i],collapse=",")))
if (depth %in% c("depth.mband","depth.mmode","depth.SD","depth.HD","depth.PD","depth.MhD")) par.depth$xx<-fdataobj[ind[,i],]
else par.depth$fdataori<-fdataobj[ind[,i],]
if (ismdist) {
par.depth$metric<-mdist[,ind[,i]]
par.depth$metric2<-mdist[ind[,i],ind[,i]] }
Df[,i]<-switch(depth,
depth.HD= do.call(depth,par.depth)$dep,
depth.SD= do.call(depth,par.depth)$dep,
depth.PD= do.call(depth,par.depth)$dep,
depth.MhD= do.call(depth,par.depth)$dep,
depth.FM=do.call(depth,par.depth)$dep,
depth.mode=do.call(depth,par.depth)$dep,
depth.mmode=do.call(depth,par.depth)$dep,
depth.RPD=do.call(depth,par.depth)$dep,
depth.RP=do.call(depth,par.depth)$dep,
depth.RT=do.call(depth,par.depth)$dep,
depth.mband=do.call(depth,par.depth)$dep,
depth.band=do.call(depth,par.depth)$dep)
}
group.pred<-group.est<-factor(lev[apply(Df,1,which.max)],levels=lev) # Maximum depth
# }
if (CV==TRUE) {
group.est<-group
for (j in 1:n) {
xj<-fdataobj[j,]
xnoj<-fdataobj[-j,]
ind<-matrix(NA,nrow=n-1,ncol=ng)
for (i in 1:ng) {
ind[,i]<-group[-j]==lev[i]
xnoji<-xnoj[ind[,i],]
nam<-c(paste("depth ",lev[i],sep=""),paste("depth ",paste(lev[-i],collapse=",")))
if (depth %in% c("depth.mband","depth.mmode","depth.SD","depth.HD","depth.PD","depth.MhD")) par.depth$xx<-xnoji
else par.depth$fdataori<-xnoji
if (ismdist) {
par.depth$metric<-mdist[,ind[,i]]
par.depth$metric2<-mdist[ind[,i],ind[,i]] }
Df[,i]<-switch(depth,
depth.HD= do.call(depth,par.depth)$dep,
depth.PD= do.call(depth,par.depth)$dep,
depth.SD= do.call(depth,par.depth)$dep,
depth.MhD= do.call(depth,par.depth)$dep,
depth.FM=do.call(depth,par.depth)$dep,
depth.mode=do.call(depth,par.depth)$dep,
depth.mmode=do.call(depth,par.depth)$dep,
depth.RPD=do.call(depth,par.depth)$dep,
depth.RP=do.call(depth,par.depth)$dep,
depth.band=do.call(depth,par.depth)$dep,
depth.mband=do.call(depth,par.depth)$dep,
depth.RT=do.call(depth,par.depth)$dep)
}
group.est[j]<-factor(lev[which.max(Df[j,])],levels=lev) # Maximum depth }
}
}
prob.classification<-diag(table(group,group.est))/table(group)
mis<-mean(group.est!=group)
output<-list("group.est"=group.est,"group.pred"=group.pred,"dep"=Df,"depth"=depth, "par.depth"=par.depth,
"group"=group,"fdataobj"=fdataobj,"C"=C,"prob.classification"=prob.classification,"max.prob"=1-mis)
class(output)=c("classif")
return(output)
}
else { # new data
n<-nrow(newfdataobj)
n0<-nrow(fdataobj)
x<-array(NA,dim=c(n,nc,ng))
Df<-matrix(NA,ncol=ng,nrow=n)
ind<-matrix(NA,nrow=n0,ncol=ng)
for (i in 1:ng) {
ind[,i]<-group==lev[i]
nam<-c(paste("depth ",lev[i],sep=""),paste("depth ",paste(lev[-i],collapse=",")))
if (depth %in% c("depth.mband","depth.mmode","depth.SD","depth.HD","depth.PD","depth.MhD")) par.depth$xx<-fdataobj[ind[,i],]
else par.depth$fdataori<-fdataobj[ind[,i],]
Df[,i]<-switch(depth,
depth.HD= do.call(depth,par.depth)$dep,
depth.PD= do.call(depth,par.depth)$dep,
depth.SD= do.call(depth,par.depth)$dep,
depth.MhD= do.call(depth,par.depth)$dep,
depth.FM=do.call(depth,par.depth)$dep,
depth.mode=do.call(depth,par.depth)$dep,
depth.mmode=do.call(depth,par.depth)$dep,
depth.RP=do.call(depth,par.depth)$dep,
depth.RPD=do.call(depth,par.depth)$dep,
depth.band=do.call(depth,par.depth)$dep,
depth.mband=do.call(depth,par.depth)$dep,
depth.RT=do.call(depth,par.depth)$dep)
}
group.pred<-factor(lev[apply(Df,1,which.max)],levels=lev) # Maximum depth
if (CV!="none"){
if (depth %in% c("mband","mmode","HD","SD","PD","MhD")) par.depth$x<-fdataobj
else par.depth[["fdataobj"]]<-fdataobj
n<-nrow(fdataobj)
x<-array(NA,dim=c(n,nc,ng))
Df2<-matrix(NA,ncol=ng,nrow=n)
ind<-matrix(NA,nrow=n,ncol=ng)
if (!CV) {
for (i in 1:ng) {
ind[,i]<-group==lev[i]
nam<-c(paste("depth ",lev[i],sep=""),paste("depth ",paste(lev[-i],collapse=",")))
if (depth %in% c("depth.mband","depth.mmode","depth.HD","depth.PD","depth.MhD")) par.depth$xx<-fdataobj[ind[,i],]
else par.depth$fdataori<-fdataobj[ind[,i],]
Df2[,i]<-switch(depth,
depth.HD= do.call(depth,par.depth)$dep,
depth.PD= do.call(depth,par.depth)$dep,
depth.SD= do.call(depth,par.depth)$dep,
depth.MhD= do.call(depth,par.depth)$dep,
depth.FM=do.call(depth,par.depth)$dep,
depth.mode=do.call(depth,par.depth)$dep,
depth.mmode=do.call(depth,par.depth)$dep,
depth.RP=do.call(depth,par.depth)$dep,
depth.RPD=do.call(depth,par.depth)$dep,
depth.band=do.call(depth,par.depth)$dep,
depth.mband=do.call(depth,par.depth)$dep,
depth.RT=do.call(depth,par.depth)$dep)
}
group.est<-factor(lev[apply(Df2,1,which.max)],levels=lev) # Maximum depth
}
else {
group.est<-group
for (j in 1:n) {
xj<-fdataobj[j,]
xnoj<-fdataobj[-j,]
ind<-matrix(NA,nrow=n-1,ncol=ng)
for (i in 1:ng) {
ind[,i]<-group[-j]==lev[i]
xnoji<-xnoj[ind[,i],]
nam<-c(paste("depth ",lev[i],sep=""),paste("depth ",paste(lev[-i],collapse=",")))
if (depth %in% c("depth.mband","depth.mmode","depth.HD","depth.PD","depth.MhD")) par.depth$xx<-xnoji
else par.depth$fdataori<-xnoji
Df2[,i]<-switch(depth,
depth.HD= do.call(depth,par.depth)$dep,
depth.PD= do.call(depth,par.depth)$dep,
depth.SD= do.call(depth,par.depth)$dep,
depth.MhD= do.call(depth,par.depth)$dep,
depth.FM=do.call(depth,par.depth)$dep,
depth.mode=do.call(depth,par.depth)$dep,
depth.mmode=do.call(depth,par.depth)$dep,
depth.RP=do.call(depth,par.depth)$dep,
depth.RPD=do.call(depth,par.depth)$dep,
depth.band=do.call(depth,par.depth)$dep,
depth.mband=do.call(depth,par.depth)$dep,
depth.RT=do.call(depth,par.depth)$dep)
}
group.est[j]<-factor(lev[which.max(Df2[j,])],levels=lev) # Maximum depth }
}
}
prob.classification<-diag(table(group,group.est))/table(group)
mis<-mean(group.est!=group)
return(list("group.est"=group.est,"group.pred"=group.pred,"dep"=Df,"dep.ori"=Df2,"depth"=depth,
"par.depth"=par.depth,"group"=group,"fdataobj"=fdataobj,"C"=C,
"prob.classification"=prob.classification,"max.prob"=1-mis))
}
else return(list("group.pred"=group.pred,"dep"=Df,"depth"=depth,
"par.depth"=par.depth,"group"=group,"fdataobj"=fdataobj,"C"=C))
}
}
################################################################################
################################################################################
|
7fc3b92e362b7666627ce824f603420a3cc9b012
|
d25b1890d4b9d1726c56f9387441bac6eb2a1359
|
/R_Scripts/Simple_Boosting Example.R
|
5967e9f517f2aae408366a11c81680ef1757fa5b
|
[] |
no_license
|
jasonchanhku/R-Machine-Learning-Projects
|
e33718468c9483bb2b19896c8534e250a89758c1
|
af81ceb0d827ef42b6104ce0b0c2687a2d90c815
|
refs/heads/master
| 2020-08-03T01:44:23.549263
| 2017-07-03T01:45:06
| 2017-07-03T01:45:06
| 73,554,995
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 359
|
r
|
Simple_Boosting Example.R
|
library(adabag) #for boosting
set.seed(300)
#building the classifier
m_adaboost <- boosting(default ~., data = credit)
#building the predictor
p_adaboost <- predict(m_adaboost, credit)
#ConfusionMatrix
p_adaboost$confusion
#boosting with Cross Validation (CV)
adaboost_cv <- boosting.cv(default ~., data = credit)
#Confusion Matrix
adaboost_cv$confusion
|
f1fd691984538d9de417f9e90498196e609ff8b5
|
b137d991ca185d006fe891081e57a0959985f29c
|
/app.R
|
2c5e2e9a57164f4cb5d33a5898144ce24ee85b17
|
[] |
no_license
|
mariumtapal/sds235-final-project
|
aac7a460a70a07efb9a0667dbf4795150e19885e
|
94d2ac268c074b31b1c28bbbe93a9018ba619fd7
|
refs/heads/main
| 2023-05-03T05:37:56.283701
| 2021-05-18T19:40:12
| 2021-05-18T19:40:12
| 363,787,087
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,504
|
r
|
app.R
|
# load in required packages
library(shiny)
library(shinythemes)
library(leaflet)
library(tidyverse)
library(plotly)
library(here)
library(RColorBrewer)
library(kableExtra)
library(htmltools)
library(reactable)
# load in source files
source("allegations.R")
source("precincts.R")
source("demographics.R")
# Define UI for application that draws a histogram
ui <- fluidPage(
# CSS for making the footer stick
tags$body(
tags$style(HTML("body {
margin: 0;
height: 100%;
min-height: 100%;}"))
),
# Application theme
theme = shinytheme("flatly"),
# Application title
titlePanel("Exploring NYC Civilian Complaints about the NYPD"),
# Create tabs
tabsetPanel(
tabPanel(
"Home",
h3("General Overview"),
p("In April 2017, the New York City Police Department initiated the use of 24,000 body cameras for its
patrol force and street units. The intent of body cams is to foster greater peace and trust
between civilians and police by recording their interactions. Specifically, the use of the cameras
is meant to dissuade and prevent police brutality, which is often racially motivated. In June of 2017, NYC
Mayor Bill de Blasio announced a policy that requires the NYPD to release all body cam footage of
incidents involving force which ended in injury or death."),
tags$a(href = "https://apnews.com/article/2fea6f0179f8e95e332c2c4deeaa861a", "Source: AP News"),
p("With all of this in mind,
we wanted to create a tool that allows city administrators, such as the District Attorney, to visualize
the impact of body cameras on civilian complaints against the NYPD. As the body camera changes were
enacted in 2017, we chose to look at the allegations from 2016 and 2018."),
h3("Data Source"),
p("We draw on data from Propublica's dataset Civilian Complaints Against New York City Police Officers.
This data comes from New York City's Civilian Complaint Review Board and
encompasses complaints from September 1985 to January 2020. Each observation in the data contains
information on the officer and complainant involved, as well as information about the complaint itself.
Our project centers on a selection of these variables for the years 2016 and 2018. Additionally, we used
a GPS visualizer and data from the New York City government website to manually map precinct locations."),
tags$a(
href = "https://www.propublica.org/datastore/dataset/civilian-complaints-against-new-york-city-police-officers",
"Propublica Data, "
),
tags$a(href = "https://www.gpsvisualizer.com/geocoder/", "GPS Visualizer, "),
tags$a(href = "https://www1.nyc.gov/site/nypd/bureaus/patrol/precincts-landing.page", "Precincts Data"),
h3("Relevant Variables"),
p("This project focuses on the types of allegations and whether they were substantiated, the distribution
of allegations by precinct, and the race and gender demographics of both the officers and complainants.
We use the following variables in our visualizations:"),
tags$ul(
tags$li("year_received: the year the complaint was filed"),
tags$li("rank_incident: the rank of the officer at the time of the incident"),
tags$li("mos_ethnicity: the race of the officer"),
tags$li("mos_gender: the gender of the officer"),
tags$li("complainant_ethnicity: the race of the complainant"),
tags$li("complainant_gender: the gender of the complainant"),
tags$li("fado_type: the category the allegation falls under"),
tags$li("precinct: the precinct of the officer in the allegation"),
tags$li("board_disposition: whether the allegation was substantiated and found to be in violation of the
NYPD's rules")
),
p("For further information on the variables in the dataset, see the corresponding data
dictionary."),
tags$a(
href = "https://github.com/mariumtapal/sds235-final-project/blob/main/data/data_descriptions.xlsx",
"Data Dictionary"
),
h3("Available Tabs"),
p("Navigating the tabs above takes you to the different visualizations we have constructed. The
Allegations tab illustrates the distribution of ranks among the officers in the allegations and the
types of allegations, as well as whether they were substantiated. The NYC Precincts tab contains a
map of all precincts with the numbers of allegations by location. The Race/Gender Demographics tab
has a series of plots illustrating the demographics of officers and complainants, both individually
and combined. Finally, every tab provides the ability to switch between the data for 2016 and 2018
allegations."),
p(),
),
tabPanel(
"Allegations",
h3("Searchable Allegations Table"),
p("An important aspect of better understanding civilian complaints, is to see what they are and who they are against.
There may be cases where a member of service is repeatedly reported. There are cases where certain allegations
are not even considered by the NYPD. The Allegations tab is here to help you navigate these ideas
and aid you in exploring further."),
p("Below you can find a searchable table. It depicts the number of allegations
associated with each member of service as well as their rank for 2016 and 2018. In addition,
you are able to see their gender, age and ethnicity."),
reactableOutput("table"),
h3("Types of Allegations"),
p("Here you can filter by year in order to explore the types of allegations that are most frequent.
The categories are large as a result of the NYPD generalization of complaints. It is important to note
that the severity in each of these categories can vary greatly, from a gun being pointed to severe physical
force and exploitation."),
fluidRow(
column(
3, radioButtons("year_allegation_i1",
label = "Select Year",
choices = list(2016, 2018)
)
),
column(9, plotlyOutput("year_allegation_o1"))
),
h3("Ranks of Allegations"),
p("Here you can filter by year in order to explore the ranks of officers for 2016 and 2018 who have had
complaints against them by civilians. Over the two years, certain members of service may have changed roles
for various reasons, including promotion."),
fluidRow(
column(
3, radioButtons("year_allegation_i2",
label = "Select Year",
choices = list(2016, 2018)
)
),
column(9, plotlyOutput("year_allegation_o2"))
),
h3("Types of Complaints Pursued or Not Pursued"),
p("Here you can filter by year in order to explore the types of complaints that are pursued or not at all.It is
important to note that:"),
tags$ul(
tags$li("Substantiated: means the alleged conduct occurred and it violated the rules.
The NYPD can choose to ignore those recommendations. It has discretion over what,
if any, discipline is imposed."),
tags$li("Exonerated: means the alleged conduct occurred but did not violate the NYPD’s rules,
which often give officers significant discretion over use of force."),
tags$li("Unsubstantiated: means that the CCRB has fully investigated but could not
affirmatively conclude both that the conduct occurred and that it broke the rules."),
),
fluidRow(
column(
3, radioButtons("year_allegation_i3",
label = "Select Year",
choices = list(2016, 2018)
)
),
column(9, plotlyOutput("year_allegation_o3"))
),
h3("Final Note about the allegations"),
p("Overall, it is important to note that despite the extent of exploration we can do with this data set,
it is vital to remember that there are many civilians who never report their complaints, and the ones
who do, may not be at the privilege to fully express what has occurred to them due to fear and/or intimidation.
At the moment and based on this data set, we can simply explore what is available to us by looking at what
types of complaints get pursued, how many get pursued, the types of allegations there are and
how often certain members of service are reported by civilians.")
),
tabPanel(
"NYC Precincts",
h3("Map: Allegations by Year"),
p("This interactive map shows the number of allegations
against police officers in each NYC precinct in the years 2016 and 2018, before
and after the body cameras were introduced. The default view shows the combined
allegations for 2016 and 2016. You can view a specfic year by clicking on the buttons
below. Furthermore, you can select which boroughs you would like to look at by
using the multi-select option on the upper-right corner of the map."),
p(),
p("Each circle marker represents a precinct. You can hover on these markers to
view the precinct name, address, and the number of allegations.
The size of the marks reflects the number of allegations."),
fluidRow(
column(
3,
radioButtons("select_year_leaflet",
label = "Select Year",
choices = list(
"2016 and 2018 Combined", 2016,
2018
)
),
),
column(
9,
textOutput("select_year_leaflet"),
leafletOutput("leaflets")
)
),
p(),
h3("Stats: Allegations by Precinct"),
p("The table below shows the summary of allegations for each police
precinct in New York City by year. From the drop-down menu, select a
precinct (this field is searchable) and select the year to view the table.
Please refer to the home tab for details of each category."),
fluidRow(
column(
3, selectizeInput("precinct",
choices = sort(unique(map_data$precinct.y)),
label = "Select Precinct", selected = "1st Precinct"
),
radioButtons("year_precinct",
label = "Select Year",
choices = list(
2016,
2018
)
)
),
column(9, htmlOutput("table_precinct"))
),
h3("Limitation"),
p("Due to limited information about the physical location of command stations other than
precincts or detective squads, a subset of the original data is used in the making of the
map and table on this page."),
),
tabPanel(
"Race/Gender Demographics",
h3("Officer Demographics"),
p("The following graph illustrates the racial and gender breakdowns of all officers who appeared
in a formal complaint in 2016 or 2018. Using the dropdown menus on the side, you can select which
demographic to plot and toggle between 2016 and 2018 to see how the numbers changed. Additionally,
hovering over each bar reveals more information."),
fluidRow(
column(
3, "Officer Demographics",
selectInput("select_year1",
label = "Year",
choices = list(
2016,
2018
)
),
selectInput("select_variable",
label = "Demographic",
choices = list(
"Race",
"Gender"
)
)
),
column(
9,
textOutput("output_year1"),
textOutput("output_variable"),
plotlyOutput("mos_race")
)
),
h4("A Note on Limitations"),
p("These graphs only include the officers who appeared in a complaint. The data
therefore does not take into account the racial and gender breakdown of the whole NYPD. It is possible
that we see the percentages in the graph above because they are the percentages of the whole police force,
and not because there is one group that disproportionately appears in the allegations. Moreover, because the data
only considers two genders for officers, officers that identify outside the gender binary are not represented.
While we were unable to obtain data on the NYPD in 2016 and 2018, as of 2021 the NYPD is 81% male and 19% female.
Additionally, 46% of officers are white, 30% are Hispanic, 15% are Black, and 9% are Asian. These numbers are not
necessarily representative of the force in 2016 and 2018, but they can provide some context."),
tags$a(
href = "https://www1.nyc.gov/site/ccrb/policy/data-transparency-initiative-mos.page",
"Current NYPD Demographics"
),
h3("Complainant Demographics"),
p("Similar to the graph above, this interactive barplot shows the race and gender distributions of
all complainants in the 2016 or 2018 allegations. The drop downs on the side allow you to select
the demographic and year you would like to see."),
fluidRow(
column(
3, "Complainant Demographics",
selectInput("select_year2",
label = "Year",
choices = list(
2016,
2018
)
),
selectInput("select_demographic",
label = "Demographic",
choices = list(
"Race",
"Gender"
)
)
),
column(
9,
textOutput("output_year_2"),
textOutput("output_demographic"),
plotlyOutput("complainant_race")
)
),
p(),
h3("Race of Complainants by Officer Race"),
p("Finally, the graph below breaks down the races of officers and the races of complainants for each
of the two years. For each race of officer, the graph shows the number of associated complainants
in each racial category. For example, in 2016, 282 Hispanic complainants leveraged complaints against
a white officer. You can switch between years using the menu on the side."),
fluidRow(
column(
3, "Race of Officers and Complainants",
selectInput("select_year",
label = "Year",
choices = list(
2016,
2018
)
)
),
column(
9,
textOutput("output_year"),
plotlyOutput("race_plot")
)
),
p(),
h4("A Final Note on Limitations"),
p("Because this data relies on the reported civilian complaints, there is a risk of reporting bias. Thus,
when looking at these numbers and demographics, we must also ask who may be underrepresented. It is possible
there are particular groups who feel less safe reporting a complaint, or who don't believe it will
amount to anything. Such bias must be taken account when analyzing this potentially skewed data.")
)
),
# Footer
div(
class = "footer",
includeHTML("footer.html")
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
# put in output components here
output$mos_race <- renderPlotly({
off <- mos_race
if (input$select_year1 == 2016 & input$select_variable == "Race") {
off <- mos_race_2016
}
else if (input$select_year1 == 2016 & input$select_variable == "Gender") {
off <- mos_gender_2016
}
else if (input$select_year1 == 2018 & input$select_variable == "Race") {
off <- mos_race_2018
}
else if (input$select_year1 == 2018 & input$select_variable == "Gender") {
off <- mos_gender_2018
}
off
})
output$complainant_race <- renderPlotly({
comp <- complainant_race
if (input$select_year2 == 2016 & input$select_demographic == "Race") {
comp <- comp_race_2016
}
else if (input$select_year2 == 2016 & input$select_demographic == "Gender") {
comp <- comp_gender_2016
}
else if (input$select_year2 == 2018 & input$select_demographic == "Race") {
comp <- comp_race_2018
}
else if (input$select_year2 == 2018 & input$select_demographic == "Gender") {
comp <- comp_gender_2018
}
comp
})
output$race_plot <- renderPlotly({
res <- race_plot
if (input$select_year == 2016) {
res <- race_plot_2016
}
else if (input$select_year == 2018) {
res <- race_plot_2018
}
res
})
output$leaflets <- renderLeaflet({
off <- leaflet_all
if (input$select_year_leaflet == 2016) {
off <- leaflet_2016
}
else if (input$select_year_leaflet == 2018) {
off <- leaflet_2018
}
off
})
# summary table for precincts
output$table_precinct <- reactive({
req(input$precinct)
req(input$year_precinct)
map_data %>%
filter(precinct.y == input$precinct & year_received == input$year_precinct) %>%
group_by(fado_type, allegation, board_disposition) %>%
count() %>%
arrange(desc(n)) %>%
knitr::kable("html", col.names = c(
"Top-level Category of Complaint",
"Specific Category of Complaint",
"Finding by the CCRB", "Count"
)) %>%
kable_styling("striped", full_width = T)
})
# eleni's plots start here
output$year_allegation_o1 <- renderPlotly({
off <- mos_allegations_2016
if (input$year_allegation_i1 == 2016) {
off <- mos_allegations_2016
}
else if (input$year_allegation_i1 == 2018) {
off <- mos_allegations_2018
}
off
})
output$year_allegation_o2 <- renderPlotly({
off <- mos_officers_2016
if (input$year_allegation_i2 == 2016) {
off <- mos_officers_2016
}
else if (input$year_allegation_i2 == 2018) {
off <- mos_officers_2018
}
off
})
output$year_allegation_o3 <- renderPlotly({
off <- complaintresult2016
if (input$year_allegation_i3 == 2016) {
off <- complaintresult2016
}
else if (input$year_allegation_i3 == 2018) {
off <- complaintresult2018
}
off
})
output$table <- renderReactable({
table
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
4d8348d7f2023b497963ec087858632f47d33abc
|
8380fb173f06df282bc0e5323518a706be02207e
|
/EXAMEN DISTRIBUCION BINOMIAL Y POISSON.R
|
2a28b133fb05aa183fffb4e295a20f769c5f10bd
|
[] |
no_license
|
Jhovana22/probabilidad-y-estadistica
|
5e27c07d14f5943375d3777328519e47c590ba1b
|
d542582e6fa8df0bbb4fa3175a8fe2b50d202708
|
refs/heads/master
| 2023-02-04T10:02:51.423156
| 2020-12-16T19:05:00
| 2020-12-16T19:05:00
| 300,017,956
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 2,129
|
r
|
EXAMEN DISTRIBUCION BINOMIAL Y POISSON.R
|
#-----DISTRIBUCION BINOMIAL------
#Ejercicio pagina 36
#Hay 12 preguntas de opcion multiple en un examen de matematicas.
#cada pregunta tienen cinco posibles respuestas,
#y solo una de ellas es correcta. Encuentra la probabilidad
#de tener cuatro o menos respuestas correctas si un estudiante
#intenta responder a cada pregunta al azar
#Probabilidad de escoger una respuesta de 5 posibles
p= 1/5
#Espacio muestral (12 preguntas en total)
n=12
#Probabilidad de tener 4 respuestas correctas elegidas aleatoriamente
k=4
#Funcion en R
dbinom(4,size=12, prob=0.2)
#Ejercicio pagina 37
#Probabilidad de tener cuatro o menos respuestas
#correctas de forma aleatoria
#dbinom con k=0,1,2,3,4
sum(dbinom(0:4,12,0.2))
#Ejercicio pagina 38
#Probabilidad de que 2 o 3 preguntas sean respondidas correctamente
#dbinom con k=0,1,2,3
sum(dbinom(2:3,12,0.2))
#Supongamos que la empresa Apple fabricó el Iphone 11S con una probabilidad de
#0.005 de ser defectuoso. El producto se envía en una caja que contiene 25 artículos.
#¿Cuál es la probabilidad de que una caja elegida al azar contenga un Iphone 11S
#defectuoso?
#Probabilidad de que salga un iphone defectuoso en una caja
p2=0.005
#Espacio muestral
n2=25
#probabilidad de que salga defectuoso
k2=1
dbinom(1, size=25, prob=0.005)
#-----DISTRIBUCION POISSON-----
#Ejercicio pagina 40
#Numero de alumnos
x=35
#Tiempo de llegada
lamda=15
#Probabilidad
dpois(35,15)
#Numero de alumnos
x=50
#Tiempo de llegada
lamda=15
#Probabilidad
dpois(50,15)
#Ejercico pagina 41
#1.-¿Cuál es la probabilidad de que a la escuela lleguen 34 alumnos en 5 minutos?
#Numero de alumnos
x2=34
#Tiempo
lamda2=5
#Probabilidad
dpois(34,5)
#2.-¿Cuál es la probabilidad de que a la salida de la escuela, se encuentren 60 alumnos en
#5 minutos?
#Numero de alumnos
x3=60
#Tiempo
lamda3=5
#Probabilidad
dpois(60,5)
#3.- ¿Cuál es la probabilidad de que a la salida de la escuela, se encuentren 120 alumnos
#en 5 minutos?
#Numero de alumnos
x4=120
#Tiempo
lamda4=5
#Probabilidad
dpois(120,5)
|
e84e51f89d7b4562ff96eb1df53a475eca09a3c1
|
ac93fa4e6ac2c90b2f13ffd3d4145fbf13b17d99
|
/man/fars_map_state.Rd
|
6de60ea5ee742527e549b9ae0b4dd25f78b55bd0
|
[
"MIT"
] |
permissive
|
acjackman/building_r_packages
|
4060f73ba559602db1cfc874db6ec041c5bf296e
|
7182a81a39840af30949f36768527181dbfac80e
|
refs/heads/master
| 2021-01-22T06:32:20.381764
| 2017-02-20T20:03:55
| 2017-02-20T20:03:55
| 81,769,611
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 435
|
rd
|
fars_map_state.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_map_state}
\alias{fars_map_state}
\title{Show map of accidents in a state.}
\usage{
fars_map_state(state.num, year)
}
\arguments{
\item{state.num}{state number in the}
\item{year}{data year to create a map for}
}
\description{
Plot all accidents for a single year on a map.
}
\examples{
\dontrun{
fars_map_state(35, 2014)
}
}
|
d8c4adcfc100f6b915b6e81cb87175a9d885ce3e
|
12f11dc0fcbc628f2d38485e9bc86f92926ec04d
|
/tests/testthat/test-get_emlspice.R
|
d69e2c57146f5e296aa57328ed01826c8c80e095
|
[
"CC-BY-4.0"
] |
permissive
|
isteves/emlspice
|
34ad44971e5e628b0334cd2b7f8374a59975790f
|
e082c026357ea4def99913dd20171bcfc1423d1c
|
refs/heads/master
| 2020-03-20T11:13:53.432190
| 2018-06-28T05:35:06
| 2018-06-28T05:35:06
| 137,396,850
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 875
|
r
|
test-get_emlspice.R
|
context("get_emlspice")
eml_path <- system.file("LeConte_meteo_metadata.xml", package = "emlspice")
eml <- eml2::read_eml(eml_path)
test_that("Files write to disk", {
dir_path <- tempdir()
get_emlspice(eml, dir_path)
files <- list.files(dir_path,
pattern = "access|attributes|biblio|creators",
full.names = TRUE)
expect_true(any(grepl("access.csv", files)))
expect_true(any(grepl("attributes.csv", files)))
expect_true(any(grepl("biblio.csv", files)))
expect_true(any(grepl("creators.csv", files)))
file.remove(files)
})
test_that("get_emlspice returns a list of tibbles", {
spice_ex <- get_emlspice(eml)
expect_equal(length(spice_ex), 4)
tbl_lgl <- spice_ex %>% purrr::map(class) %>% purrr::map(~"tbl" %in% .)
expect_true(all(unlist(tbl_lgl)))
})
|
65fc258b4d71a6f4ac7acf454252470eabcf62d1
|
d5e63db3141fe278a8a674f792c58c7406473026
|
/R/create_label_from_metadata.r
|
5a56e04226466aca0a5d7a5df9a4332a7e475571
|
[] |
no_license
|
KonradZych/SIAMCAT
|
e8fbdb99ff331498729410dd8eafffb9a73e3f99
|
57ddaa40a466ac1c14994e502c696283f9ea0567
|
refs/heads/master
| 2020-03-07T05:16:22.056037
| 2018-06-05T08:04:53
| 2018-06-05T08:04:53
| 127,291,251
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,389
|
r
|
create_label_from_metadata.r
|
#!/usr/bin/Rscript
### SIAMCAT - Statistical Inference of Associations between
### Microbial Communities And host phenoTypes R flavor EMBL
### Heidelberg 2012-2018 GNU GPL 3.0
#' @title create a label object from metadata
#'
#' @description This function creates a label object from metadata
#'
#' @usage create.label.from.metadata(meta, column, case,
#' control=NULL, p.lab = NULL, n.lab = NULL, verbose=1)
#'
#' @param meta metadata as read by \link{read.meta}
#' of \link[phyloseq]{sample_data-class}
#'
#' @param column name of column that will be used
#' to create the label
#'
#' @param case name of a label that will be used as a positive label. If the
#' variable is binary, the other label will be used as a negative one. If the
#' variable has multiple values, all the other values will be used a negative
#' label (testing one vs rest).
#'
#' @param control name of a label or vector with names that will be used as a
#' negative label. All values that are nor equal to case and control will be
#' dropped. Default to NULL in which case: If the variable is binary, the value
#' not equal to case will be used as negative. If the variable has multiple
#' values, all the values not equal to cases will be used a negative label
#' (testing one vs rest).
#'
#' @param p.lab name of the positive label (useful mostly for visualizations).
#' Default to NULL in which case the value of the positive label will be used.
#'
#' @param n.lab name of the negative label (useful mostly for visualizations).
#' Default to NULL in which case the value of the negative label will be used
#' for binary variables and "rest" will be used for variables with multiple
#' values.
#'
#' @param verbose control output: \code{0} for no output at all, \code{1}
#' for only information about progress and success, \code{2} for normal
#' level of information and \code{3} for full debug information,
#' defaults to \code{1}
#'
#' @keywords create.label.from.metadata
#'
#' @return an object of class \link{label-class}
#'
#' @examples
#' data(siamcat_example)
#' label <- create.label.from.metadata(meta(siamcat_example),"fobt",
#' case = 1, control = 0)
#'
#' @export
create.label.from.metadata <- function(meta, column, case, control = NULL,
p.lab = NULL, n.lab = NULL, verbose=1) {
if (verbose > 1)
message("+ starting create.label.from.metadata")
s.time <- proc.time()[3]
if (!column %in% colnames(meta))
stop("ERROR: Column ", column, " not found in the metadata\n")
metaColumn <- vapply(meta[, column], as.character,
FUN.VALUE = character(nrow(meta)))
names(metaColumn) <- rownames(meta)
labels <- unique(metaColumn)
if (length(labels) == 2){
if (verbose > 0) message("Column ", column, " contains binary label\n")
if(!case%in%labels){
stop("Column ", column, " does not contain value:",case,"\n")
}
if (is.null(control)) {
control <- setdiff(unique(labels), case)
} else {
if(!control%in%labels){
stop("Column ", column, " does not contain value:",control,"\n")
}
}
}else if(length(labels) > 2){
if(!case%in%labels){
stop("Column ", column, " does not contain value:",case,"\n")
}
if (is.null(control)) {
control <- "rest"
} else {
if(!control%in%labels){
stop("Column ", column, " does not contain value:",control,"\n")
}
if(any(!labels%in%c(case, control))){
metaColumn <- metaColumn[which(metaColumn%in%c(case, control))]
warning("Dropping values: ",
labels[which(!labels%in%c(case, control))])
}
}
}
if (verbose > 0)
message("Label used as case:\n ",case,
"\nLabel used as control:\n ",
paste(labels[which(labels!=case)], collapse = ","))
label <-
list(
label = rep(-1, length(metaColumn)),
positive.lab = 1,
negative.lab = (-1)
)
label$n.lab <- gsub("[_.-]", " ", control)
label$p.lab <- gsub("[_.-]", " ", case)
class.descr <- c(-1, 1)
names(class.descr) <- c(label$n.lab, label$p.lab)
names(label$label) <- names(metaColumn)
label$header <-
paste0("#BINARY:1=", label$p.lab, ";-1=", label$n.lab)
label$label[which(metaColumn == case)] <- 1
label$n.idx <- label$label == label$negative.lab
label$p.idx <- label$label == label$positive.lab
label$info <- list(type = "BINARY", class.descr = class.descr)
labelRes <-
label(
list(
label = label$label,
header = label$header,
info = label$info,
positive.lab = label$positive.lab,
negative.lab = label$negative.lab,
n.idx = label$n.idx,
p.idx = label$p.idx,
n.lab = label$n.lab,
p.lab = label$p.lab
)
)
e.time <- proc.time()[3]
if (verbose > 0)
message(paste(
"+ finished create.label.from.metadata in",
formatC(e.time - s.time, digits = 3),
"s"
))
return(labelRes)
}
|
5a4872190a8f5a6ab95226c90a5ce91f2c93ec13
|
6e670174d9698e4101c7bbadcae865e0360ef6ff
|
/server.R
|
132d07050d7d8483561462bb1bbbdc92fc5dd8f7
|
[] |
no_license
|
seanokeeffe/MATH154_FinalProject
|
483a71d7aff4d40c93c674631f98aaa885dabe5a
|
973de9a0941522899546501bd5980e28aaef7084
|
refs/heads/master
| 2021-01-10T04:55:19.029618
| 2015-12-16T05:30:00
| 2015-12-16T05:30:00
| 47,749,845
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,378
|
r
|
server.R
|
# MATH154 - Computational Statistics
# Pomona College
# Professor Hardin
#
# Final Project
# Jozi McKiernan, Sean O'Keeffe, Sam Woodman
#-----------------------------------------------------------------------------------------------------
# Server File for Shiny App
#-----------------------------------------------------------------------------------------------------
# load required packages
require(shiny)
require(dplyr)
require(ggmap)
# load in data on trades and teams
load("~/allData.RData")
load("~/trades.RData")
load("~/player_stats_selected.RData")
# Train our predictor
source("~/logistic_regression.R", local=TRUE, echo=FALSE)
# Base map layer of the US
usa <- get_map(location='USA', zoom=4)
server <- function(input, output) {
# Reactive Functions------------------------------------------------------------------
predictBatter <- reactive({ function() {
new_batter <- data.frame(input$age, input$ba, input$obp, input$rppa, input$`2bppa`, input$`3bppa`, input$hrppa, input$rbippa, input$sbppa, input$soppa)
colnames(new_batter) <- c('Age', 'BA', 'OBP', 'R/PA', '2B/PA', '3B/PA', 'HR/PA', 'RBI/PA', 'SB/PA', 'SO/PA')
print(new_batter)
if (predict(glm.fit.bat, newdata = new_batter, type = "response")>.4) {
"YES"
} else {
"NO"
}
}})
predictPitcher <- reactive({ function() {
new_pitcher <- data.frame(input$age_p, input$wl, input$era, input$shpg, input$svpg, input$fip, input$whip, input$`hr9`, input$`s9`)
colnames(new_pitcher) <- c('Age', 'W-L%', 'ERA', 'SHO/G', 'SV/G', 'FIP', 'WHIP', 'HR9', 'SO9')
if (predict(glm.fit.pitch, newdata = new_pitcher, type = "response")>.5) {
"YES"
} else {
"NO"
}
}})
plotMap <- reactive({ function() {
tradeMap <- ggmap(usa)
# For each team that is selected, add a line segment for all the trades
for (team in input$team) {
temp <- all_data %>% filter(NewTeam == team)
temp_color <- as.character(temp$color[1])
tradeMap <- tradeMap + geom_segment(aes(x=lon_o, y=lat_o, xend=lon_n, yend=lat_n), data=temp, color=temp_color)
}
remove(temp, temp_color, team)
tradeMap
}})
getBatterVarPlot <- reactive({ function() {
temp_df <- data.frame(traded = players.hitters.stats$traded, stat = players.hitters.stats[[input$var_b]])
ggplot(data = temp_df) + geom_jitter(aes(x=stat, y=traded, color=traded))
}})
getPitcherVarPlot <- reactive({ function() {
temp_df <- data.frame(traded = players.pitchers.stats$traded, stat = players.pitchers.stats[[input$var_p]])
ggplot(data = temp_df) + geom_jitter(aes(x=stat, y=traded, color=traded))
}})
#-------------------------------------------------------------------------------------
output$pred_b <- renderText({
predictBatter()()
})
output$pred_p <- renderText({
predictPitcher()()
})
# Output for mapping the trades
output$map <- renderPlot({
plotMap()()
})
# Output of the data that makes the map
output$table <- renderDataTable(
all_data %>% select(-c(MatchKey, lat_n, lon_n, lat_o, lon_o, color)) %>%
filter(NewTeam %in% input$team)
)
# Output of the batter variable vs traded plot
output$plot_b <- renderPlot({
getBatterVarPlot()()
})
# Output of the pitcher variable vs traded plot
output$plot_p <- renderPlot({
getPitcherVarPlot()()
})
}
|
4e216b56977c72ad58402103a87e632714625504
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/prabclus/examples/incmatrix.Rd.R
|
b29ae18e3ae75432c40b504633fc52f646cadf0c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 194
|
r
|
incmatrix.Rd.R
|
library(prabclus)
### Name: incmatrix
### Title: Nestedness matrix
### Aliases: incmatrix
### Keywords: spatial array
### ** Examples
data(kykladspecreg)
incmatrix(t(kykladspecreg))$ninc
|
5d8fa9d27c41a8ade64833a32d4a63c395fe4df3
|
d74e6aeab27aa7f36e9d92c18fe015fa1581efe5
|
/ForLabTechs/EasyButton_Calculate_OddsRatios.R
|
ce7f22b0279bed2379f7985cc3dd7bf90fd64ec4
|
[] |
no_license
|
Steven-N-Hart/CouchLab
|
a345bd661450635a4d886f82932a5c2dcd995b10
|
2c29e39d7e042167bcb4fd57d2f75064daf17750
|
refs/heads/master
| 2021-01-21T04:40:19.107484
| 2016-06-17T13:27:14
| 2016-06-17T13:27:14
| 44,977,250
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,021
|
r
|
EasyButton_Calculate_OddsRatios.R
|
options(stringsAsFactors=F)
`%nin%` <- Negate(`%in%`)
#################################################################################
### Configure this Script to Run on Your Machine ###
### Edit Here ###
#################################################################################
# 1. This is just a directory, but needs 2 "\" instead of just 1.
# Go to where you want your input files are.
# You can copy/paste this right from the folder window & add extra backslashes.
setwd("W:\\SEQUENCING\\Ambry Updated Cohort 85000_4-8-16\\Ambry data analysis\\breast cancer")
# 2. This is the name of the file.
# Please follow the designated format [5 columns, Gene, Case_AC, Case_AN, Control_AC, Control_AN]
# For this test.txt file, I saved it as: "Text (Tab delimited) (*.txt)" <See Step 3.>
inputfile <- "Ambry_ExAC_BAD.txt"
# 3. When you save the file from excel, as a text file it uses a column delimitor, which did you choose?
file_delimitor <- "\t" # Tab-seperated Fields (*.txt)
# file_delimitor <- "," # Comma-seperated Fields (*.csv)
#### How do I run this Code? ####
# If you've opened this code up in Rstudio, just click "Source" in the upper right corner.
# If you are runing this on command line type: Rscript EasyButton_Calculate_OddsRatios.R
# If you are running this in R GUI, in the window type:
# > source('V:/SEQUENCING/Margaret/EasyButton_Calculate_OddsRatios.R', echo=TRUE)
#################################################################################
### You are Done Editing. Do not Change Code Below ###
#################################################################################
Input<-read.csv(file=inputfile,header=T, sep=file_delimitor,na.strings = '.')
#Check input file format
expected_names <- c("Gene", "Case_AC", "Case_AN", "Control_AC", "Control_AN")
missing_names <- expected_names %nin% names(Input)
for( n in 1:length(missing_names) ){
if(missing_names[n]){stop(paste("Input File Missing Column:", expected_names[n]))}
}
# Check for non-numeric
is_num <- is.numeric(Input$Case_AC)
if(!is_num){stop("Case AC Column Does Not Contain Only Numbers!")}
is_num <- is.numeric(Input$Case_AN)
if(!is_num){stop("Case AN Column Does Not Contain Only Numbers!")}
is_num <- is.numeric(Input$Control_AC)
if(!is_num){stop("Control AC Column Does Not Contain Only Numbers!")}
is_num <- is.numeric(Input$Control_AN)
if(!is_num){stop("Control AN Column Does Not Contain Only Numbers!")}
# Check for Zeros in AN
has_zero <- any(Input$Case_AN == 0)
if(has_zero){stop("Case AN Column Contains a Zero! Must Be Non-Zero Only!")}
has_zero <- any(Input$Control_AN == 0)
if(has_zero){stop("Control AN Column Contains a Zero! Must Be Non-Zero Only!")}
#######################################
### Basic Functions ###
remain<-function(xAC,xAN){
r<-(xAN-xAC);
if(r < 0){r<-1}
return(r)
}
DataResult=NULL
for(l in 1:nrow(Input)){
csAC <- Input[l,]$Case_AC
nnAC <- Input[l,]$Control_AC
mat=matrix(c(csAC,remain(csAC,Input[l,]$Case_AN), nnAC, remain(nnAC,Input[l,]$Control_AN)), nrow = 2)
res=fisher.test(mat)
resOR=round(res$estimate,digits = 3)
resPval=sprintf("%.3g", res$p.val)
resCI=paste(round(res$conf.int[1],digits = 3),round(res$conf.int[2],digits = 3),sep="-")
csFreq=round((csAC/Input[l,]$Case_AN)*100,digits = 2)
nnFreq=round((nnAC/Input[l,]$Control_AN)*100,digits = 2)
DataResult=rbind(DataResult,as.vector(
c(Input[l,]$Gene,resOR,resPval,resCI,csAC,Input[l,]$Case_AN,
paste(csFreq,"%",sep=''),nnAC,Input[l,]$Control_AN,paste(nnFreq,"%",sep=''))))
}
#ncol(DataResult)
colnames(DataResult)<-c("Gene","OR","p-value","95-ConfInt","CaseAC","CaseAN","CaseFreq","ControlAC","ControlAN","ControlFreq")
nom <- sub("^([^.]*).*", "\\1", inputfile)
write.table(DataResult,file=paste(nom,"_Results.tsv",sep=""),quote=F,sep="\t",row.names=F,col.names = T)
|
67fbb744a29bb716e4acfd2096ad0efca7c39924
|
4550ff52458adba546e208d14554afa768a3d92e
|
/cachematrix.R
|
2fee1dcb85379d55048321a130a5ffff6fee35fd
|
[] |
no_license
|
paritush/ProgrammingAssignment2
|
0c773da11f4e823e69d0866e97e71fa3a7468b8d
|
f922cfa84742582d0304f7b26c3de2f12d60e96b
|
refs/heads/master
| 2022-06-22T22:33:40.611196
| 2020-05-13T10:51:07
| 2020-05-13T10:51:07
| 263,602,048
| 0
| 0
| null | 2020-05-13T10:48:44
| 2020-05-13T10:48:42
| null |
UTF-8
|
R
| false
| false
| 2,546
|
r
|
cachematrix.R
|
## This function is used to return a list where we store the matrix and inverse of the matrix. It creates a special matrix object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
Mat_Inv <- NULL # Initializing our Matrix inverse as Null
set <- function(y){ # Y parameter matrix is cached and stored as x and Inverse matrix is initialized as NULL
x <<- y
Mat_Inv <<- NULL
}
get <- function() x # Returning the value of x matrix which is the input matrix.
##setInverse <- function(inv) Mat_Inv <<- inv
setInverse <- function(inverse) Mat_Inv <<- inverse # Caching and Storing the inverse of the matrix in Mat_Inv
getInverse <- function() Mat_Inv # returing the Mat_Inv after caching from setInverse
list(set = set, # Creating list to return the calculations from each functions
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
#This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
#If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) { # Gets list as parameter from function makeCacheMatrix
Mat_Inv <- x$getInverse() # Return a matrix from cache to check if the Mat_Inv has inverse matrix stored in cache or not
if(!is.null(Mat_Inv)){ # If the Mat_Inv already has cached value,then it is returned directly.
print("Getting cached matrix")
return(Mat_Inv)
}
data <- x$get()
Mat_Inv <- solve(data, ...) # Calculates inverse of the matrix
x$setInverse(Mat_Inv) # Passing the inverse matrix as paramter to setInverse function
Mat_Inv # Returning the inverse matrix if not found in cache.
}
Mat = matrix(1:4, nrow = 2, ncol = 2, byrow = TRUE) # Creating a 2x2 matrix
Mat_cache = makeCacheMatrix(Mat) # Calling makeCacheMatrix using our matrix Mat
cacheSolve(Mat_cache) # Calling cacheSolve using the returned list from makeCacheMatrix
|
fdfc3de575d5fd877aa606e5f585fb60ed9617bb
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/2800_5/rinput.R
|
07bf1b229a9a036074b8730ecd4f4474c69f531b
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("2800_5.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="2800_5_unrooted.txt")
|
7ad7898e50cbfaa2421ad65be7699ce544cce25d
|
5020e1f37ccb3691081c1c58785fa6967a7a4f9c
|
/server.R
|
cfdaafcb7e9f2635f0854be9f65cea380d25ddf3
|
[] |
no_license
|
Zezzzzz/IS415-Neighbourhood-WatchDocs
|
c7f5c24a1f7069a7d573fec915a4f6a81647ec3e
|
12484198e778cc32fd8301871548814cfab1bc52
|
refs/heads/master
| 2020-04-30T15:08:45.793966
| 2019-04-11T18:24:19
| 2019-04-11T18:24:19
| 176,912,045
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,883
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(rgdal)
library(maptools)
library(raster)
library(spatstat)
library(sf)
library(tmap)
library(tidyverse)
library(SpatialAcc)
library(leaflet)
library(shiny)
library(tbart)
library(dplyr)
library(rgeos)
library(proj4)
library(rsconnect)
library(pracma)
library(compareDF)
# Define server logic
shinyServer(function(input, output, session) {
## Logic for Interactive Map Tab ##
output$map <- renderLeaflet({
leaflet() %>%
addTiles() %>%
setView(lng = 103.8509, lat = 1.3800, zoom = 12)
})
observeEvent(c(input$subzone),{
# set new clinic location to NULL due to Subzone change
new_clinic_location <<- NULL
# retrieve all the clinic and elderly points in each HDB of the Subzone
getClinicAndElderlyPointsInSubzone(input$subzone)
subzone <- mpsz[mpsz$SUBZONE_N == input$subzone,]
p <- as(subzone, 'Spatial')
sp_cent <- gCentroid(p, byid = TRUE)
proj4string <- proj4string(sp_cent)
xy <- data.frame(sp_cent)
pj <- project(xy, proj4string, inverse=TRUE)
latlon <- data.frame(lat=pj$y, lon=pj$x)
output$map <- renderLeaflet({
leaflet() %>%
addTiles() %>%
#clearShapes() %>%
setView(lng = latlon$lon, lat = latlon$lat, zoom = 16)
})
})
observeEvent(c(input$map_click), {
if(!is.null(new_clinic_location)) {
new_clinic_location <<- NULL
leafletProxy("map") %>%
clearControls() %>%
clearShapes()
} else {
if(input$analysisType == 'allocation') {
new_clinic_location <<- input$map_click
leafletProxy("map") %>%
clearControls() %>%
addCircles(new_clinic_location$lng, new_clinic_location$lat,
radius=18, color="black", fillColor = "purple")
}
}
})
observeEvent(c(input$map_click, input$Type, input$subzone, input$accMethod,
input$analysisType, input$cCapacity), {
clinic_results <- reactive({
mpsz_clinics %>% filter(SUBZONE_N == input$subzone)
})
hdb_results <- reactive({
mpsz_HDB %>% filter(SUBZONE_N == input$subzone)
})
acc_results <- reactive({
clinics_coords <- clinic_results() %>% st_coordinates()
hdb_coords <- hdb_results() %>% st_coordinates()
# check that reactive expr return value is not empty before proceeding
if(nrow(clinics_coords) != 0 & nrow(hdb_coords) != 0) {
capacity <- round(sum(hdb_results()$No_of_Elderly_in_block) / nrow(clinic_results()))
clinics <- clinic_results() %>% mutate(`capacity` = capacity)
dm <- distance(hdb_coords, clinics_coords)
acc_val <- data.frame(ac(hdb_results()$No_of_Elderly_in_block,
clinics$capacity, dm,
power = 2, family = input$accMethod))
colnames(acc_val) <- "accVal"
acc_val <- tbl_df(acc_val)
acc_val <- lapply(acc_val, function(x) replace(x, !is.finite(x), 0))
HDB_acc <- bind_cols(hdb_results(), acc_val)
}
})
if(all(c("clinics_combined", "hdb") %in% input$Type)) {
if(!is.null(acc_results()) & input$analysisType == 'geoAcc') {
sum_dist_df2 <<- data.frame(matrix(ncol = 3, nrow = 0))
x <<- c("LAT","LONG","SUM_DIST")
colnames(sum_dist_df2) <<- x
#pal = colorQuantile("Purples", n = 5, acc_results()$accVal)
quantileNum <- 5
probs <- seq(0, 1, length.out = quantileNum + 1)
bins <- quantile(acc_results()$accVal, probs, na.rm = TRUE, names = FALSE)
while (length(unique(bins)) != length(bins)) {
quantileNum <- quantileNum - 1
probs <- seq(0, 1, length.out = quantileNum + 1)
bins <- quantile(acc_results()$accVal, probs, na.rm = TRUE, names = FALSE)
}
pal <- colorBin("YlGn", bins = bins)
leafletProxy("map", data = acc_results()) %>%
clearControls() %>%
clearShapes() %>%
clearMarkers() %>%
addCircleMarkers(lng = ~LONG,
lat = ~LAT,
popup = paste("", acc_results()$blk_no_street, "<br><br>",
"Acc-Val: ", acc_results()$accVal),
color = "black",
fillColor = ~pal(acc_results()$accVal),
fillOpacity = 0.8)
#addLegend(pal = pal, values = ~accVal, opacity = 0.8, position = "bottomright")
} else if(input$analysisType == 'allocation') {
allocateElderly(input$subzone, input$cCapacity, new_clinic_location)
if(nrow(total_allocated_elderly) > 0) {
total_allocated_elderly <<- total_allocated_elderly %>% mutate(freq = 1)
allocated_aggregated <<- aggregate(total_allocated_elderly$freq,
by=list(blk_no_street=total_allocated_elderly$blk_no_street),
FUN=sum) %>% rename(no_of_elderly_allocated = x)
} else {
allocated_aggregated <<- allocated_aggregated[0,]
}
if(nrow(total_unallocated_elderly) > 0) {
total_unallocated_elderly <<- total_unallocated_elderly %>% mutate(freq = 1)
unallocated_aggregated <<- aggregate(total_unallocated_elderly$freq,
by=list(blk_no_street=total_unallocated_elderly$blk_no_street),
FUN=sum) %>% rename(no_of_elderly_unallocated = x)
} else {
unallocated_aggregated <<- unallocated_aggregated[0,]
}
colorOnHDB(unallocated_aggregated, allocated_aggregated)
allocation_result <<- full_join(allocated_aggregated, unallocated_aggregated, by = "blk_no_street")
allocation_result[is.na(allocation_result)] <<- 0
allocation_result <<- allocation_result[order(allocation_result$blk_no_street),]
output$allocation_result_output <- renderDataTable(allocation_result)
output$total_elderly_in_subzone <- renderText(
paste("<b>", "TOTAL ELDERLY IN ", input$subzone, ": </b>", nrow(mpsz_HDB_split)))
output$total_elderly_allocated <- renderText(
paste("<b>", "TOTAL ELDERLY ALLOCATED: ", "</b>", nrow(total_allocated_elderly)))
output$total_alloc_dist <- renderText(
paste("<b>", "TOTAL ALLOCATION DISTANCE: ", "</b>", sum(total_allocated_elderly$allocdist)))
output$total_elderly_unallocated <- renderText(
paste("<b>", "TOTAL ELDERLY UNALLOCATED: ", "</b>", nrow(total_unallocated_elderly)))
leafletProxy("map", data = final_results) %>%
clearControls() %>%
clearMarkers() %>%
addCircleMarkers(lng = ~LONG,
lat = ~LAT,
popup = paste(final_results$blk_no_street, "<br><br>",
"Elderly Allocated: ", final_results$no_of_elderly_allocated,
"<br>", "Elderly Unallocated: ",
final_results$no_of_elderly_unallocated),
color = "black",
fillColor = final_results$color,
fillOpacity = 0.8)
}
} else if(c("clinics_combined") %in% input$Type) {
leafletProxy("map", data = clinic_results()) %>%
clearControls() %>%
clearShapes() %>%
clearMarkers() %>%
addAwesomeMarkers(lng = ~LONG,
lat = ~LAT,
popup = paste("", clinic_results()$clinic_name, "<br><br>",
"", clinic_results()$address),
icon = makeAwesomeIcon(icon = "icon", markerColor = "blue"))
} else if(c("hdb") %in% input$Type) {
leafletProxy("map", data = hdb_results()) %>%
clearControls() %>%
clearShapes() %>%
clearMarkers() %>%
addAwesomeMarkers(lng = ~LONG,
lat = ~LAT,
popup = paste("", hdb_results()$blk_no_street, "<br><br>",
"Elderly Population: ", hdb_results()$No_of_Elderly_in_block),
icon = makeAwesomeIcon(icon = "icon", markerColor = "orange"))
} else {
leafletProxy("map", data = NULL) %>%
clearControls() %>%
clearShapes() %>%
clearMarkers()
}
})
## Logic for Data Explorer Tab ##
output$viewDataTable <- renderDataTable(elderly_per_hdb)
observeEvent(input$selectDT, {
if(input$selectDT == "elderly_per_hdb") {
elderly_per_hdb <- mpsz_HDB %>% st_set_geometry(NULL) %>%
select(`blk_no_street`, `postal_code`, `SUBZONE_N`, `No_of_Elderly_in_block`)
output$viewDataTable <- renderDataTable(elderly_per_hdb)
} else {
total_clinics <- mpsz_clinics %>% st_set_geometry(NULL) %>%
select(`clinic_name`, `address`, `SUBZONE_N`)
output$viewDataTable <- renderDataTable(total_clinics)
}
})
})
# Function to return all clinic points and elderly points in each HDB in a particular Subzone
getClinicAndElderlyPointsInSubzone <- function(subzone) {
cat("clinicAndElderlyInSubzone: ", subzone, "\n")
mpsz_HDB_filtered <<- mpsz_HDB[mpsz_HDB$SUBZONE_N==subzone, ]
clinics_combined_filtered <<- mpsz_clinics[mpsz_clinics$SUBZONE_N==subzone, ]
clinics_combined_filtered <<- na.omit(clinics_combined_filtered)
mpsz_HDB_split <<- matrix(ncol = 4, nrow = 0)
x <<- c("blk_no_street","LAT","LONG","SUBZONE_N")
colnames(mpsz_HDB_split) <<- x
if(nrow(mpsz_HDB_filtered) > 0) {
withProgress(message = "Calculation in progress", detail = "This may take a while...", value = 0, {
for(i in 1:nrow(mpsz_HDB_filtered)){
incProgress(1/10)
n <<- mpsz_HDB_filtered$No_of_Elderly_in_block[i]
lat <<- mpsz_HDB_filtered$LAT[i]
long <<- mpsz_HDB_filtered$LONG[i]
radiusLat <<- 0.0001
radiusLong <<- 0.0001
angle <<- 2*pi*rand(n,1)
rLat <<- radiusLat*sqrt(rand(n,1))
rLong <<- radiusLong*sqrt(rand(n,1))
latToAdd <<- rLat*cos(angle)+ lat;
longToAdd <<- rLong*sin(angle)+ long;
for(j in 1:n){
newRow_df <<- data.frame(blk_no_street=mpsz_HDB_filtered$blk_no_street[i],
LAT = latToAdd[j],
LONG = longToAdd[j],
LAT1 = latToAdd[j],
LONG1 = longToAdd[j],
SUBZONE_N = mpsz_HDB_filtered$SUBZONE_N[i])
mpsz_HDB_split <<- rbind(mpsz_HDB_split, newRow_df)
}
}
})
mpsz_HDB_split_sf <<- st_as_sf(mpsz_HDB_split, coords = c("LONG1","LAT1"), crs = 4326)
clinics_combined_filtered <<- clinics_combined_filtered %>% select(clinic_name, LAT, LONG, SUBZONE_N) %>%
st_set_geometry(NULL)
clinics_combined_filtered$LAT1 <<- clinics_combined_filtered$LAT
clinics_combined_filtered$LONG1 <<- clinics_combined_filtered$LONG
clinics_combined_filtered <<- st_as_sf(clinics_combined_filtered, coords = c("LONG1","LAT1"),
crs = 4326)
mpsz_HDB_filtered_sp <<- as_Spatial(mpsz_HDB_split_sf)
clinics_combined_filtered_sp <<- as_Spatial(clinics_combined_filtered)
}
}
# Function to run the allocations algorithm of tbart
allocateElderly <- function(subzone, capacity, new_clinic_location) {
cat("allocateElderly Subzone: ", subzone, " | allocateElderly Capacity: ", capacity)
if(!is.null(new_clinic_location)) {
new_clinic <- data.frame(clinic_name="New Clinic",
LAT=new_clinic_location$lat,
LONG = new_clinic_location$lng,
SUBZONE_N = subzone)
new_clinic_list <- clinics_combined_filtered %>% select(clinic_name, LAT, LONG, SUBZONE_N) %>%
st_set_geometry(NULL)
new_clinic_list <- rbind(new_clinic_list, new_clinic)
new_clinic_list$LAT1 <- new_clinic_list$LAT
new_clinic_list$LONG1 <- new_clinic_list$LONG
new_clinic_list <- st_as_sf(new_clinic_list, coords = c("LONG1","LAT1"), crs = 4326)
new_clinic_list_sp <- as_Spatial(new_clinic_list)
alloc_results <<- allocations(mpsz_HDB_filtered_sp, new_clinic_list_sp, p=nrow(new_clinic_list))
alloc_results <<- st_as_sf(alloc_results, coords = c("LONG", "LAT"), crs = st_crs(mpsz))
alloc_results <<- alloc_results[order(alloc_results$allocation, alloc_results$allocdist),]
subzone_clinics <<- st_as_sf(new_clinic_list_sp, coords = c("LONG", "LAT"),
crs = st_crs(mpsz)) %>% mutate(capacity = capacity)
} else {
alloc_results <<- allocations(mpsz_HDB_filtered_sp, clinics_combined_filtered_sp, p=nrow(clinics_combined_filtered))
alloc_results <<- st_as_sf(alloc_results, coords = c("LONG", "LAT"), crs = st_crs(mpsz))
alloc_results <<- alloc_results[order(alloc_results$allocation, alloc_results$allocdist),]
subzone_clinics <<- st_as_sf(clinics_combined_filtered_sp, coords = c("LONG", "LAT"),
crs = st_crs(mpsz)) %>% mutate(capacity = capacity)
}
# create empty data frame with header columns
total_allocated_elderly <<- alloc_results[0,]
total_unallocated_elderly <<- alloc_results[0,]
### FIRST RUN OF ALLOCATION ALGORITHM ###
withProgress(message = "Calculation in progress", detail = "This may take a while...", value = 0, {
for(i in 1:nrow(subzone_clinics)) {
incProgress(1/10)
clinic_row <<- subzone_clinics[i,]
# get the total elderlys allocated to this allocation ID
clinic_n_allocation <<- alloc_results[which(alloc_results$allocation == i), ]
if(nrow(clinic_n_allocation) >= capacity) {
# capacity for this clinic is maxed out
subzone_clinics$capacity[i] <<- 0
total_allocated_elderly <<- rbind(total_allocated_elderly, clinic_n_allocation %>% slice(1:capacity))
# get the remaining unallocated elderlys, append to total unallocated elderlys
unallocated_elderly <<- clinic_n_allocation %>% slice(capacity+1:nrow(clinic_n_allocation))
total_unallocated_elderly <<- rbind(total_unallocated_elderly, unallocated_elderly)
} else if(nrow(clinic_n_allocation) < capacity) {
# calculate the remaining capacity
subzone_clinics$capacity[i] <<- capacity - nrow(clinic_n_allocation)
total_allocated_elderly <<- rbind(total_allocated_elderly, clinic_n_allocation %>% slice(1:nrow(clinic_n_allocation)))
}
}
})
withProgress(message = "Calculation in progress", detail = "This may take a while...", value = 0, {
### CONTINUOUS ALLOCATION ALGORITHM ###
while(nrow(subzone_clinics[which(subzone_clinics$capacity > 0), ]) > 0 &
nrow(total_unallocated_elderly) > 0) {
incProgress(1/10)
no_of_clinics <<- nrow(subzone_clinics[which(subzone_clinics$capacity > 0), ])
clinics_remaining <<- subzone_clinics[which(subzone_clinics$capacity > 0), ]
total_unallocated_elderly <<- total_unallocated_elderly %>%
select("blk_no_street","LAT","LONG","SUBZONE_N")
# convert both data.frames to SpatialPointsDataFrame
clinics_remaining_sp <<- as_Spatial(clinics_remaining)
total_unallocated_elderly_sp <<- as_Spatial(total_unallocated_elderly)
# run ALLOCATION algorithm again
alloc_results <<- allocations(total_unallocated_elderly_sp,
clinics_remaining_sp, p=no_of_clinics)
alloc_results <<- st_as_sf(alloc_results, coords = c("LONG", "LAT"), crs = st_crs(mpsz))
alloc_results <<- alloc_results[order(alloc_results$allocation, alloc_results$allocdist),]
# create empty data frame with header columns
total_unallocated_elderly <<- alloc_results[0,]
for(i in 1:nrow(clinics_remaining)) {
clinic_row <<- clinics_remaining[i,]
# get the total elderlys allocated to this allocation ID
clinic_n_allocation <<- alloc_results[which(alloc_results$allocation == i), ]
clinic_capacity <<- subzone_clinics$capacity[subzone_clinics$clinic_name == clinic_row$clinic_name]
if(nrow(clinic_n_allocation) >= clinic_capacity) {
subzone_clinics$capacity[subzone_clinics$clinic_name == clinic_row$clinic_name] <<- 0
# get the unallocated elderlys, append to total unallocated elderlys
unallocated_elderly <<- clinic_n_allocation %>% slice(clinic_capacity+1:nrow(clinic_n_allocation))
total_unallocated_elderly <<- rbind(total_unallocated_elderly, unallocated_elderly)
total_allocated_elderly <<- rbind(total_allocated_elderly, clinic_n_allocation %>% slice(1:clinic_capacity))
} else if(nrow(clinic_n_allocation) < clinic_capacity) {
# calculate the remaining capacity
subzone_clinics$capacity[subzone_clinics$clinic_name == clinic_row$clinic_name] <<- clinic_capacity - nrow(clinic_n_allocation)
total_allocated_elderly <<- rbind(total_allocated_elderly, clinic_n_allocation %>% slice(1:nrow(clinic_n_allocation)))
}
}
}
})
total_unallocated_elderly <<- total_unallocated_elderly %>% select("blk_no_street","LAT","LONG","SUBZONE_N")
}
# Function to combine allocated, unallocated and half-allocated HDB elderly and denote
# each of them with different colors (green, red, orange)
colorOnHDB <- function(allocated, unallocated) {
final_results <<- full_join(unallocated, allocated) %>% mutate(color = "orange")
final_results[is.na(final_results)] <<- 0
for(i in 1:nrow(final_results)) {
row <- final_results[i,]
if(row$no_of_elderly_allocated == 0) {
final_results$color[i] <<- "red"
} else if(row$no_of_elderly_unallocated == 0) {
final_results$color[i] <<- "green"
}
}
final_results <<- full_join(final_results, mpsz_HDB_filtered) %>%
select("blk_no_street","no_of_elderly_allocated", "no_of_elderly_unallocated",
"LAT","LONG", "color", "geometry")
}
|
c9b829f123fe66f184e31b3b318a3d694e71bb09
|
c9f552cd3050970368c0e0ff774ce98316a22edc
|
/PathSimR_Shiny/PathSimR_Shiny_v1.R
|
fa45ebecdc5599c87123ad0bc2168e12901c08bc
|
[] |
no_license
|
nujcharee/PathSimR
|
b754753e844fa5abf79fcb797192b736f21bba90
|
b431b32cecc71deebc3cd4a46c6158f8ff95ce9e
|
refs/heads/master
| 2022-12-11T22:58:04.354153
| 2020-09-15T11:23:43
| 2020-09-15T11:23:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 548,276
|
r
|
PathSimR_Shiny_v1.R
|
# install.packages("shiny")
# install.packages("DiagrammeR")
# install.packages("magrittr")
# install.packages("readr")
# install.packages("DT")
# install.packages("openxlsx")
# install.packages("grid")
# install.packages("gridExtra")
# install.packages("parallel")
# install.packages("data.table")
# install.packages("tidyverse")
# install.packages("shinyalert")
# install.packages("shinyMatrix")
# install.packages("fitdistrplus")
# install.packages("shinyBS")
# install.packages("shinyjs")
# install.packages("shinythemes")
library(shiny)
library(DiagrammeR)
library(magrittr)
library(readr)
library(DT)
library(openxlsx)
library(grid)
library(gridExtra)
library(parallel)
library(data.table)
library(tidyverse)
library(shinyalert)
library(shinyMatrix)
library(fitdistrplus)
library(shinyBS)
library(shinyjs)
library(shinythemes)
options(shiny.maxRequestSize = 30 * 1024 ^ 2) # Sets the Shiny file upload size limit to 30MB
#### Creating the starting name matricies ####
m1 <- matrix(nrow = 1,
ncol = 1,
data = c("A"))
colnames(m1) <- c("Service Points")
rownames(m1) <- c("Enter Names in Right Column")
m2 = matrix(nrow = 1,
ncol = 1,
data = c("B"))
colnames(m2) <- c("Exits")
rownames(m2) <- c("Enter Names in Right Column")
##### SHINY UI CODE #####
ui <- navbarPage(
theme = shinytheme("cerulean"),
title = c(tagList(icon("compass"), "Navigation Bar")),
id = "navbar",
####INTRODUCTION TAB ####
tabPanel("Introduction",
sidebarLayout(
sidebarPanel(
#Makes some manual changes to the CSS to increase the size of checkboxes and font on tabs
tags$head(
tags$style(
".checkbox { /* checkbox is a div class*/line-height: 40px;margin-bottom: 40px; /*set the margin, so boxes don't overlap*/}
input[type='checkbox']{ /* style for checkboxes */
width: 30px; /*Desired width*/
height: 30px; /*Desired height*/
line-height: 30px;
}
span {
margin-left: 15px; /*set the margin, so boxes don't overlap labels*/
line-height: 30px;
}
* { font-family: Helvetica }
.nav-tabs {font-size: 30px}
"
),
tags$style(
HTML(
"
input[type=number] {
-moz-appearance:textfield;
}
input[type=number]::{
-moz-appearance:textfield;
}
input[type=number]::-webkit-outer-spin-button,
input[type=number]::-webkit-inner-spin-button {
-webkit-appearance: none;
margin: 0;
}
"
)
),
tags$style(type = 'text/css', '.navbar {font-size: 17px;}')
),
h1(strong("PathSimR")),
h2(em(
"A versatile tool for modelling pathway capacity in R"
)),
hr(),
fluidRow(column(
12,
align = "center",
actionButton(
inputId = "j2w",
label = "Start Pathway Wizard",
icon = icon("magic"),
style = 'padding:16px; font-size:150%'
)
)),
hr(),
fluidRow(column(
12,
align = "center",
actionButton(
inputId = "j2s1",
label = "Start Simulation Tool",
icon = icon("project-diagram"),
style = 'padding:16px; font-size:150%'
)
)),
hr(),
h3(
"PathSimR is a simulation tool designed to give insights into how care pathways are performing
and enable 'what if' analysis to identify more effective and efficient service configurations."
),
hr(),
em(strong(
p(
"An overview of the tool and a glossary of key terms can be found in the Overview & Glossary tab in the Navigation Bar (accessible at all times).
Moreover, throughout the tool, there are ",
actionLink(inputId = "info_help", label = icon("info-circle")),
" symbols which provide additional information on specific topics."
)
)),
em(strong(
p(
"New users are advised to read through the overview and glossary when first using the tool to familiarise themselves with the relevant terminology and ideas."
)
)),
em(strong(
p(
"All data must be entered in a consistent time unit (e.g. all data uploaded is on the scale of either days or hours, but not a mixture of the two - if using hours, enter a day as 24 time units, a week as 168 etc.). Users can choose a label for their time unit on the Network Import and Visualisation tab - this will not be used in calculations, but will be added to output tables and graphs."
)
)),
hr(),
p(
"Proceed through the tabs in the navigation bar at the top of the page, completing each tab in sequence.
Once all inputs have been entered and confirmed on a tab, the subsequent tab will appear.
The tabs can be navigated either using the 'previous' and 'next' buttons at the bottom of the page or clicking on the tabs themselves at the top of the page.
Instructions are given on every page regarding how to use the tool. Users may return to any previous tab and update inputs,
simply rerun any uploads/processes on subsequent tabs before moving on."
),
hr(),
p(
"The Pathway Wizard has been designed to help users collate the neccessary information required to run a simulation.
Like the simulation tool, complete each tab in sequence and then proceed into PathSimR."
),
hr(),
p(
strong("To zoom out, press control+- and to zoom in press control+shift+=")
),
p(
strong(
"For more information or guidance on how to use the tool, please contact the Modelling and Analytics team at BNSSG CCG."
)
),
width = 3
),
mainPanel(
br(),
fluidRow(
column(4, align = "center", img(src = 'THF_logo.png', style = "height:150px;")),
column(4, align = "center", img(src = 'BNSSG_logo.png', style = "height:150px;")),
column(4, align = "center", img(src = 'UoB_logo.png', style = "height:150px;"))
),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
fluidRow(column(
12, align = "center", img(src = 'Rplot.png', style = "height:400px;")
)),
#fluidRow(column(12,align="center",img(src='Logo2.jpg'))),
br(),
br(),
br(),
br(),
br(),
h5(strong(
"Example patient pathway (built in PathSimR)"
), align = "right")
)
)),
####OVERVIEW AND GLOSSARY TAB ####
tabPanel(
"Overview & Glossary",
navlistPanel(
tabPanel("Overview",
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(
7,
HTML(
"
<h1>Overview</h1>
<p>PathSimR is a Discrete Event Simulation (DES) tool designed exclusively in R. Aimed at use within the healthcare sector, PathSimR uses relevant terminology and constructs healthcare focussed metrics as outputs. For more information about DES, please consult the technical documentation.</p>
<h1>Moving Away From Average-based Modelling</h1>
<p> Planning by averages involves a simple equation: Capacity= Arrivals x Length of Service.
So with an average of 10 arrivals per day and each patient staying an average of 5 days, the capacity should be 50 beds.
Real data shows however that most patients don't actually stay for 5 days exactly and instead show large amounts of variation in their length of stay.
Averaging data and therefore removing the variation component needs to be done with extreme caution.
Often planning service provision based on data that has been averaged will lead to underestimating the amount
of capacity that is required and could have serious impacts on waiting times and capacity driven delays.</p>
<h1>Key Features and Assumptions of PathSimR</h1>
<p>PathSimR is capable of simulating patient pathways consisting of a number of user defined service points. Each service point can take the form of any service/treatment location that has a prescribed capacity and service length. In the case of a bedded ward in a hospital, this would be the number of beds and treatment length whilst in the case of a GP clinic, this would be number of GPs on shift and appointment length.</p>
<p>Each service point along the pathway has a number of user defined parameters including:</p>
<ul>
<li>Calendar-dependent external arrival rates (assumed to be Poisson distributed)</li>
<li>Calendar-dependent capacities</li>
<li>Service times that can be modelled as any parameter distribution available in R</li>
<li>Internal and external queue capacities (including zero and infinite)</li>
</ul>
<p>Movement between service points and subsequently movement to an exit is managed through a transition rate matrix which describes the proportion of patient who move between two locations on the pathway.</p>
<p>PathSimR deals with both blocking after service (due to lack of downstream capacity in the network) and with user-specified delays/transition times between service points/service points and pathway exits. The former arises when there is no available capacity in an onward service point and no queue to join which forces a patient to reside in their current location, stopping a new patient from starting. The latter, transition delays, are user-defined and are implemented in matrices which describe pairwise delay distributions (and their parameters) between pathway locations.</p>
<p>The tool makes a handful of assumptions in order to simplify the modelled systems. Firstly, with respect to queues and service points, PathSimR assumes First Come First Served (FCFS) queue discipline. Secondly, PathSimR assumes that once a patient joins a queue, they will not leave it until they are served at the associated service point (i.e. PathSimR does not allow reneging or baulking).</p>"
)
)
)),
tabPanel(
"Wizard & Setup Terms",
h3("Wizard & Setup"),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p("Time Units")), align =
"center"),
column(
4,
p(
" PathSimR does not have a prescribed time unit, instead users can use whichever time unit is appropriate. This must however be consistent throughout the tool and all data
entered must match the chosen units (e.g. all data included is on the scale of days: Arrivals per day, Length of Service on scale of days, prescribed delays in fractions of days)."
)
)
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p("Service Point")), align =
"center"),
column(
4,
p(
" Service Point is a ward, clinic or any treatment/service that occurs on the pathway. This can range from a GP surgery on a set timetable to a bedded ward providing continuous care.
The key defining feature of a service point is that it has an associated capacity and service time."
)
)
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p("Exit")), align = "center"),
column(
4,
p(
" An exit is any location/service where patients are no longer tracked,
i.e. they have left the pathway of interest. Example exits could be home, care home, death, another pathway that is not being modelled (e.g. 'Further Treatment', 'Out of patch'').
These locations have no associated capacity or LoS and are simply end points along the patient pathway."
)
)
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p("Length of Service")), align =
"center"),
column(
4,
p(
"For the purposes of PathSimR, the Length of Service (or Active Service) corresponds to the amount of time a patient is actually receiving treatment or using a service.
It does not include any time that the patient is blocked or delayed due to capacity restraints nor any prescribed delays.
It represents the time between a patient starting to recieve treatment and the earliest time when they could move on, i.e. the required service time.
This is different to the Length of Stay which is a metric calculated as part of the simulation and includes both active treatment time and any delays which may result due to the dynamics of the system."
)
)
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p(
"Distributions and Parameters"
)), align = "center"),
column(
4,
p(
"A probability distribution is a mathematical function that quantifies the likelihood of different possible outcomes. In PathSimR, these outcomes are lengths of time
representing service lengths, inter-arrival times and departure delays. Rather than every patient having a length of service equal to the mean, PathSimR relies on probability distributions to help capture the natural variation in the data.
There are a large variety of distributions that can be used in modelling, each of which requires different parameters to shape the probabilities."
)
)
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p("Transition Delay")), align =
"center"),
column(
4,
p(
"Transition delays are included in PathSimR to help simulate an expected amount of time that a patient will remain in a ward, clinic, or other service point after they have completed their treatment before they can move to their next service point or be discharged (to represent, for example, travel time, the completion of administrative tasks, sourcing of social care funding, discussions with families, or delays in discharge to services which are not being explicitly modelled).
Delays can occur between any pair of service points (that have a zero length queue between them) or between a service point and an exit point.
The delay operates by keeping a patient in their current location while holding a space for them in their onward location (when exiting the pathway, there is always space).
This is summarised in the Transition Delay tab in the outputs."
)
)
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p(
"Capacity Driven Delay"
)), align = "center"),
column(
4,
p(
"Capacity delays occur as a result of blocking after service and are not inputs into the model. They are listed here to help distinguish them from the Transition Delays.
They are a result of lack of downstream capacity within the network (i.e. at a service point with defined capacity) which forces a patient to stay in their current location until a space in an onward service point or queue is available.
This is summarised in the Capacity Driven Delay tab in the outputs."
)
)
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p("Warm-up period")), align =
"center"),
column(
4,
p(
"The warm-up period represents the time it takes for a simulation to reach stable running conditions and after which results can be recorded.
As each simulation starts from empty, it is important that the warm-up period be long enough so that the results collected are reflective of
the modelled system and not an emptier version. "
)
)
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p("Simulation period")), align =
"center"),
column(
4,
p(
"The Simulation Period is the amount of time over which to collect results from the simulation.
This will need to be sufficiently long such that a large number of patients can pass through the pathway.
For example, if a pathway has an average length of 365 days then simulating it for only 10 would not produce complete results,
as unique patients would not have completed the pathway. The simulation period should therefore be longer than the average pathway
length and long enough to collect data. "
)
)
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p("Replications")), align =
"center"),
column(
4,
p(
"Number of times to run a particular simulation so as to capture the natural variation in the system.
Results are averaged over all replications to ensure all system eventualities are captured."
)
)
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p(
"Network Input Template"
)), align = "center"),
column(
4,
p(
"A .csv file that specifies transition rates between service points, permitted internal and external queue sizes, and the probabilty distribution names and parameters for both service point Lengths of Service, and prescribed transition delays between pairs of servcie points/service points and exits."
)
)
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p(
"Calendar Input Template"
)), align = "center"),
column(
4,
p(
"A .csv file that includes the calendar of capacity and mean external arrival rates (by time period) for each service point"
)
)
),
br()
),
tabPanel(
"Output Terms",
h3("Output Terms"),
h4("Patient Based Outputs"),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p(
"Total time in system (TTIS)"
)), align = "center"),
column(
4,
p(
"Time between external arrival and departure to an exit for each patient."
)
)
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p("Wait")), align = "center"),
column(4, p(
"Time between arrival and service start at a service point (time spent in queue)."
))
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p("Active Service")), align =
"center"),
column(
4,
p(
"Time between service start and service end at the service point (e.g. treatment on a ward, a clinic appointment etc.)."
)
)
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p(
"Time Delayed (Capacity Driven)"
)), align = "center"),
column(
4,
p(
"Time between service end and start of transition delay (or departure if no transition delay) at the service point (e.g. treatment on a ward, a clinic appointment etc.) - the amount of time a patient spends blocked at a service point after completing their Active Service, waiting for capacity to become free downstream."
)
)
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p(
"Time Delayed (Transition)"
)), align = "center"),
column(
4,
p(
"Time between capacity driven delay end (or service end if no capacity delay) and departure from the service point (e.g. treatment on a ward, a clinic appointment etc.) - they user defined delay (not depedent on downstream capacity)."
)
)
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p(
"Length of Stay (LOS)"
)), align = "center"),
column(
4,
p(
"Time between service start and departure (Active Service + Delay to transfer + Transition delay (user-defined delay))"
)
)
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p(
"Delay To Transfer (DTT)"
)), align = "center"),
column(
4,
p(
"Time between service end and departure, i.e. the amount of time the patient is delayed due to a lack of capacity downstream (blocking after service) plus any transition delay."
)
)
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p("Rejection Rate")), align =
"center"),
column(
4,
p(
"Number of patients rejected from full external queues divided by the length of the simulation run."
)
)
),
h4("Service Point Based Outputs"),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p("Queue")), align = "center"),
column(
4,
p(
"Number of concurrent patients who have arrived at a service point and are yet to start the service."
)
)
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p(
"Occupancy/Patient Occupancy"
)), align = "center"),
column(
4,
p(
"Number of patients who are actually receiving or have received service and are occupying a space in the service point."
)
)
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p("Bed Occupancy")), align =
"center"),
column(
4,
p(
"The total number of beds currently not available to new arrivals - includes Occupancy/Patient Occupancy as above and also any 'empty' beds that are currently reserved for patients under Transition Delay upstream."
)
)
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p(
"Capacity Driven Delay"
)), align = "center"),
column(
4,
p(
"Number of patients concurrently delayed due to insufficient capacity downstream (blocking after service). These patients are included in the occupancy and the bed occupancy"
)
)
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p("Transition Delay")), align =
"center"),
column(
4,
p(
"Number of patients concurrently experiencing a prescribed transfer delay.
Patients moving to downstream nodes will also be reserving a space in the onward node and thus appear in the bed occupancy metric for that unit.
Patients are included in the occupancy and bed occupancy of the current node"
)
)
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p("% time at level")), align =
"center"),
column(
4,
p(
"The percentage of the total simulation time that a service point was at a particular level of the output measure of interest (calculated across all replications) - e.g. if there was a queue of length 5 at service point A was 150 time units out of a total simulation time of 1,500 time units, the '% time at level' for that unit and that queue would be 10%."
)
)
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, strong(p("Percentiles")), align =
"center"),
column(
4,
p(
"The percentage of total simulation time that the metric of interest was below (or above) the given level - e.g. if the 95th percentile occupancy for unit A was 6, then it's occupancy was at or below 6 for 95% of the simulation time (and conversely, there it was greater than six for 5% of the simulation time). The 50th percentile is the median."
)
)
),
br(),
h4("Connecting Events with Outputs"),
fluidRow(column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(
6,
p(
"The figure below shows how the different events that occur at each Service Point connect to the different outputs
listed on this page. The outputs in the coloured boxes represent the time spent in those states, e.g. the time
between Arrival and Service Start is defined as the Wait. Both Delay-To-Transfer and Length of Stay are combined metrics
that represent the sum of time spent in multiple states."
)
)),
fluidRow(column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(
6,
p(
"The Service Point Based Outputs refer to the number of patients concurrently existing in the same state/activity.
For example, the number of patients concurrently experiencing Capacity Driven Delay are all those that are between
Service End and Transition Start simultaneously."
)
)),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(6, img(src = 'Event_Outputs.png'))
)
),
widths = c(2, 10),
well = TRUE
)
),
####WIZARD 1 - SETUP TAB ####
tabPanel("W1. Setup",
sidebarLayout(
sidebarPanel(
h3(strong("Instructions")),
h4("Step 1: Enter names of all Service Points"),
p(
"'A' is currently listed as an example Service Point.
Enter names in the 'Service Point' column by selecting an empty cell or editing an existing one.
The entry form will automatically grow when the limit is reached.
To refresh, click away and then enter new name.",
actionLink(
inputId = "serv_point_help",
label = "What is a Service Point?",
icon = icon("info-circle")
),
style = "color:gray"
),
bsModal(
id = "modal_serv_point",
title = HTML("<h2><strong>Service Point Help</strong></h2>"),
trigger = "serv_point_help",
size = "large",
... =
HTML(
'
<p> A Service Point is a ward, clinic or any treatment/service that occurs on the pathway. This can range from a GP surgery on a set timetable to a bedded ward providing continuous care.
The key defining feature of a service point is that it has an associated capacity and service time.</p>
'
)
),
br(),
h4("Step 2: Enter names of all Exits"),
p(
"'B' is currently listed as an example Exit.
Enter names in the 'Exit' column by selecting an empty cell or editing an existing one.
The entry form will automatically grow when the limit is reached.
To refresh, click away and then enter new name.",
actionLink(
inputId = "exit_help",
label = "What is an Exit?",
icon = icon("info-circle")
),
style = "color:gray"
),
bsModal(
id = "modal_exit",
title = HTML("<h2><strong>Exit Help</strong></h2>"),
trigger = "exit_help",
size = "large",
... =
HTML(
"
<p> An exit is any location/service where patients are no longer tracked,
i.e. they have left the pathway of interest. Example exits could be home, care home, mortality, another pathway that isn't being modelled (e.g. 'Further Treatment', 'Out of patch'').
These locations have no associated capacity or LoS and are simply end points along the patient pathway.</p>
"
)
),
br(),
h4(
"Step 3: Check the resulting tables and ensure all entries are included"
),
br(),
h4("Step 4: Proceed by pressing the 'Next' button."),
p(
"If you require to add/remove any names during the wizard process, you can return to this page and edit the inputs
to restart the wizard.",
style = "color:gray"
),
br(),
fluidRow(
column(
6,
align = "center",
actionButton(
inputId = "jb2i2",
label = "Back to Intro",
icon = icon("arrow-left")
)
),
column(6, align = "center", actionButton(
inputId = "j2de", label = c(tagList("Next", icon("arrow-right")))
))
),
width = 3
),
mainPanel(
fluidRow(
br(),
fluidRow(h2(strong(
textOutput("duplicate")
)), align = "center"),
br(),
br(),
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(
4,
matrixInput(
inputId = "sp",
value = m1,
class = "character",
cols = list(
names = TRUE,
extend = FALSE,
editableNames = FALSE
),
rows = list(
names = TRUE,
extend = TRUE,
editableNames = FALSE,
delta = 1
),
copy = FALSE,
paste = TRUE
)
),
column(
width = 2,
offset = 0,
style = 'padding:0px;'
),
column(
4,
matrixInput(
inputId = "exit",
value = m2,
class = "character",
cols = list(
names = TRUE,
extend = FALSE,
editableNames = FALSE
),
rows = list(
names = TRUE,
extend = TRUE,
editableNames = FALSE,
delta = 1
),
copy = FALSE,
paste = TRUE
)
),
column(
width = 1,
offset = 0,
style = 'padding:0px;'
)
),
br(),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(4, tableOutput("sp_table"), align = "center"),
column(
width = 2,
offset = 0,
style = 'padding:0px;'
),
column(4, tableOutput("exit_table"), align = "center"),
column(
width = 1,
offset = 0,
style = 'padding:0px;'
)
)
)
)),
####WIZARD 2 - DATA ENTRY TAB ####
tabPanel("W2. Data Entry",
sidebarLayout(
sidebarPanel(
h3(strong("Instructions")),
br(),
h4(strong(
em("For each named tab on the right, fill out all the information")
)),
br(),
h4(
"Step 1: Enter a Length of Service distribution and parameters for the Service Point"
),
p(
"Select the distribution from the drop-down below and then enter the parameter values (as numbers) to the right, in the correct named box.
If the distribution and parameters for the service point are not know, use the Service Distribution tool (in the navigation bar above)
to either fit models to uploaded data or scale against BNSSG data and then enter resulting distributions and parameters.",
actionLink(
inputId = "serv_help",
label = "What is a Length of Service and how does it connect to distributions and parameters?",
icon = icon("info-circle")
)
,
style = "color:gray"
),
bsModal(
id = "modal_serv",
title = HTML(
"<h2><strong>Length of Service & Distributions Help</strong></h2>"
),
trigger = "serv_help",
size = "large",
... =
HTML(
'<h3><strong>Length of Service</strong></h3>
<p>For the purposes of PathSimR, the Length of Service (or Active Service) corresponds to the amount of time a patient is actually receiving treatment or using a service.
It does not include any time that the patient is blocked or delayed due to capacity restraints nor any prescribed delays.
It represents the time between a patient starting to recieve treatment and the earliest time when they could move on, i.e. the required service time.
This is different to the Length of Stay which is a metric calculated as part of the simulation and includes both active treatment time and any delays which
may result due to the dynamics of the system.</p>
<h3><strong>LoS Distributions and Parameters</strong></h3>
<p> Rather than every patient having a length of service equal to the mean, PathSimR uses probability distributions (range of values with different probabilities) to help capture the natural variation in the data.
For example, using only the average LoS would ignore instances where patients have substaintially longer service times, which in turn could impact effective capacity.
The probability of having a certain length of service is modelled by the user chosen distributions and parameters. PathSimR includes a Service Distribution Tool, which
allows users to either fit models to uploaded data or scale against BNSSG data and is accessed from the Navigation Bar.</p>
'
)
),
br(),
h4("Step 2: Enter information about maximum queue lengths"),
p(
"Enter numberic values into both boxes.",
actionLink(
inputId = "queue_help",
label = "What counts as a queue?",
icon = icon("info-circle")
),
style = "color:gray"
),
bsModal(
id = "modal_queue",
title = HTML("<h2><strong>External & Internal Queue Help</strong></h2>"),
trigger = "queue_help",
size = "large",
... =
fluidRow(column(
11,
HTML(
"
<p>A queue in PathSimR is simply a part of the pathway in which the patient can wait for the next service point. These queues can be as small or large as required, even
effectively infinite (e.g. when patients simply wait at home for the next available appointment and don't wait in a physical queue). In the case where there is effectively
unlimited/infinite queueing capacity, the user can enter a large number (e.g. 99999) to represent a queue that will never fill.
In PathSimR, queues are defined by the service point in which they enter, therefore if multiple service points (e.g. A, B & C) have patients that move to service point D,
the queue capacity would be shared between patients from A, B & C and the capacity would be defined at service point D.
There are two different types of queue:External Queues and Internal Queues, which are both described below. A service point can have both types of queue.</p>
<h3><strong>External Queues</strong></h3>
<p>An external queue is defined as a queue that accepts arrivals from outside of the pathway and therefore only connect to a single service point. A full external queue
causes arriving patients to be lost from the system, a metric which is recorded in the outputs.
These are the arrivals that determine the external arrival rate, entered in step 4 on this page.
In the pathway visualisation later in the tool, these will appear as red arrows.</p>
<h3><strong>Internal Queues</strong></h3>
<p>An internal queue is one that connects service points within the pathway network.
As outlined above, internal queues are defined by the downstream service point so each service point has only 1 internal queue,
not a unique queue between each service point. In the case where an internal queue is full, patients will have to wait in the
preceeding service point until a space in the queue becomes available.</p>
<h3><strong>Why are there two types of queue?</strong></h3>
<p>The two types of queues represent different aspects of the system. Setting the external queue length allows the user to manipulate how external arrivals are treated,
e.g. whether they can wait until a service space becomes available or, in the case of a zero length external queue, start service if there is a space in the service point or be lost to the system.
Internal queues dictate how patients move & wait between service points, with a zero length internal queue representing the requirement for continuous care and will potentially result in capacity
driven delays. </p>
"
)
))
),
br(),
h4("Step 3: Enter Transition Proportions and Transition Delays"),
p(
strong(
"Enter a value between and including 0 and 1 in the proportion box to represent the proportion of patients who move to that service point.",
"All proportion values should sum to 1 on each Service Point."
),
"If there is a Transition Delay associated with the move, select the describing distribution and enter the neccessary parameters.
A fixed Transition Delay can be modelled using the uniform distribution and entering the same value into the min and max boxes.",
actionLink(
inputId = "delay_help",
label = "What is a Delay and how are they important?",
icon = icon("info-circle")
),
style = "color:gray"
),
bsModal(
id = "modal_delay",
title = HTML("<h2><strong>Departure Delays Help</strong></h2>"),
trigger = "delay_help",
size = "large",
... =
fluidRow(column(
11,
HTML(
"
<h3>Transition Delays</h3>
<p>
Transition Delays are included in PathSimR to help simulate the amount of time needed to move from one unit to the next.
Delays can occur between any pair of service points (that have a zero length queue between them) or between a service point and an exit point.
The delay functions by keeping a patient in their current location while holding a space for them in their onward location (when exiting the pathway,
there is always space). The delays are formulated in the same way as the Length of Services, i.e. using probability distributions
to model variation. A fixed Transition Delay can be applied to patient using the uniform distribution and entering the same value
into the min and max boxes (e.g. If all patients need to have exactly a 2 day delay when exiting to a care home, select the uniform
distribution and enter a 2 into both parameter boxes). The Transition Delay tab in the outputs looks at the number of patients who are concurrently
experiencing a trasition delay through time.
</p>
<h3>Capacity Driven Departure Delays</h3>
<p> Even if no Transition Delays are included in the pathway, delays due to capacity can still occur. These delays are due to blocking after service
and arise when there is no available capacity in an onward service point and no queue to join. This forces the patient to reside in their current
location, stopping new patients from starting. The capacity delay ends when a space becomes available for the patient downstream. The Capacity Driven Delay
tab in the outputs looks at the number of patients who are concurrently experiencing a capacity driven delay through time.
</p>
<h3>What is Delay To Transfer?</h3>
<p>There is a output metric called 'Delay To Transfer', which looks at the amount of time between a patient finishing service and departing the service point.
This time is the sum of time experiencing any Capacity Driven Delay and any transition delay at a service point. This
metric can be found on the statistics output tabs in the Outputs section.
</p>
"
)
))
),
br(),
h4("Step 4: Complete the Calendars"),
p(
"The External Arrival Rate & Capacity are able to change at given times throughout the simulation. These changes occur at times set in the respective calendars.
Both calendars require at least 1 row to be filled.",
actionLink(
inputId = "cal_help",
label = "How do I fill the calendar?",
icon = icon("info-circle")
),
style = "color:gray"
),
bsModal(
id = "modal_calendar",
title = HTML("<h2><strong>Calendar Help</strong></h2>"),
trigger = "cal_help",
size = "large",
... =
HTML(
'<h3><strong>External Arrival Rate Calendar</strong></h3>
<p><b><em>Only include arrivals from outside the pathway i.e. those that would join the external queue
and have not moved from a Service Point on the pathway.
If there is no change in the external arrival rate through time, enter 0 in
the start column and the arrival rate (0 if there are no external arrivals) in the Arrival Rate column.</em></b> If the arrival
rate does change through time, fill out a row for each period in sequence, matching
the end times with the subsequent start times. The simulation will loop through the
calendar (i.e. will reach the max end time and then start again from the first
calendar entry). </p>
<p>The arrival calendar below follows the following pattern:</p>
<ul>
<li>Between time 0 and 100, the arrival rate is an average of 1 patient per time step.</li>
<li>Between time 100 and 150, there are no patients arriving.</li>
<li>Between time 150 and 200, the arrival rate is an average of 2 patients per time step.</li>
<li>The calendar then returns to the first row and starts again
(i.e. between time 200 and 300, average arrival rate is 1 patient per time step).</li>
</ul>'
),
br(),
fluidRow(column(
12, tableOutput("ext_arr_example"), align = "center"
)),
HTML(
'<p> </p>
<h3><strong>Capacity Calendar</strong></h3>
<p><b><em>If there is no change in the capacity through time, enter 0 in the start
column and the capacity in the Capcity column.</b></em> If the capacity does change
through time, fill out a row for each period in sequence, matching the end
times with the subsequent start times. The simulation will loop through the
calendar (i.e. will reach the max end time and then start again from the first calendar entry).
If modelling a clinic or a fixed time Service Point, the capacity can be set to 0 for a period
of time to represent a closed service.</p>
<p>The capacity calendar below follows the following pattern:</p>
<ul>
<li>Between time 0 and 30, the capacity is 24 (beds or service spaces).</li>
<li>Between time 30 and 90, the capacity is 48 (beds or service spaces).</li>
<li>Between time 90 and 180, the capacity is 72 (beds or service spaces).</li>
<li>The calendar then returns to the first row and starts again
(i.e. between time 180 and 210, the capacity is 24).</li>
</ul>
<p> </p>'
),
fluidRow(column(12, tableOutput("cap_example"), align =
"center"))
),
br(),
h4("Step 5: Repeat Steps 1 to 4 for each Service Point Tab"),
p(" ", style = "color:gray"),
br(),
h4(
"Step 6: Once all Service Point Tabs are complete, proceed by pressing the 'Next' button"
),
p(
" A new tab will also appear at the top of the page. If you require to edit any data entered on any tab during the wizard process, you can return to this page and edit the inputs.
If you do, then ensure that the subsequent pages are refreshed.",
style = "color:gray"
),
br(),
fluidRow(
column(
6,
align = "center",
actionButton(
inputId = "j2s",
label = "Previous",
icon = icon("arrow-left")
)
),
column(6, align = "center", actionButton(
inputId = "j2ftd", label = c(tagList("Next", icon("arrow-right")))
))
),
width = 3
),
mainPanel(uiOutput("tabs"))
)),
####WIZARD 3 - FINAL WIZARD TABLES & DOWNLOAD TAB ####
tabPanel(
"W3. Final Wizard Tables & Download",
sidebarLayout(
sidebarPanel(
h3(strong("Instructions")),
h4(
"Step 1: Press the 'Create/Refresh tables' button to see a summary of the data entered & Issues Log."
),
p(
"There are 4 tables: Issues, Mean Length of Service (only appears when no issues), Network Template and Calendar template",
style = "color:gray"
),
br(),
h4(
"Step 2: If there are any issues, return to the previous page and ammend the data inputs."
),
p(
"The location of the issue is listed along with a brief description.",
style = "color:gray"
),
br(),
h4(
"Step 3: Once there are no issues remaining, the option to download the templates for further use becomes available"
),
p(
"The templates created in the wizard can be saved down and then directly used in PathSimR at a later date.
Both templates are required for use in this way.",
style = "color:gray"
),
br(),
h4("Step 4: Proceed by pressing the 'Move to Simulation Tool' button."),
p(
"The inputs created in the wizard can be pulled through on the following page",
style = "color:gray"
),
br(),
fluidRow(column(
12,
align = "center",
actionButton(
inputId = "go",
label = "Create / Refresh tables",
style = 'padding:10px; font-size:150%'
)
)),
br(),
uiOutput("download_buttons"),
br(),
br(),
fluidRow(column(
6,
align = "center",
actionButton(
inputId = "jb2de",
label = "Previous",
icon = icon("arrow-left")
)
),
uiOutput("j2st")),
width = 3
),
mainPanel(
fluidRow(br(),
column(12, tableOutput("issues"), align = "center")),
fluidRow(br(),
column(12, tableOutput("means"), align = "center")),
fluidRow(br(),
column(12, tableOutput("var_view"), align = "center")),
fluidRow(br(),
column(12, tableOutput("cal_view"), align = "center"))
)
)
),
####SERVICE DISTRIBUTION TOOL TAB ####
tabPanel(
title = "Service Distribution Tool",
icon = icon("chart-area"),
sidebarLayout(
sidebarPanel(
h3(strong("Instructions")),
br(),
actionLink(
inputId = "model_help",
label = HTML("Which option do I need?"),
icon = icon("info-circle"),
style = ' font-size:150%'
),
br(),
bsModal(
id = "modal_model",
title = HTML("<h2><strong>Service Distribution Tool Help</strong></h2>"),
trigger = "model_help",
size = "large",
... =
HTML(
"
<p>
PathSimR's Service Distribution Tool contains 2 options depending on how much information is available about a service point. </p>
<p>If LoS data is available for the service point in question, then <strong>Option 1</strong> should be used. The data can be uploaded and
model fits run within the tool that provides the best fitting distribution and parameters that can be used. The data in question must be a single
column of data with no column header, saved as a csv. The graph below shows an example of uploaded data with 5 different best fitting distributions
plotted to show how the tool approximates the real data.</p>
<p>If only the mean LoS is known, then <strong>Option 2</strong> can be used, provided the service point type exists in the library (found on the Scale data by mean tab).
This portion of the tool scales a model distribution provided by BNSSG CCG to match the mean provided by the user, resulting in a model that has the correct shape and mean
for the service point type in question.
</p>
"
),
plotOutput("model_help_figure")
),
h4(strong("Option 1: Model fits to user data")),
h4(
em("Distribution & Parameters based on User data", style = "color:gray")
),
h5("Step 1: Select the 'Model fits to user data tab"),
h5(
"Step 2: Upload a single column csv that only includes LoS data - ",
em(" No Header required")
),
h5("Step 3: Press the 'Run Distribution Fit Tool' button"),
h5(
"Step 4: Inspect the histgram plot and model fit curves, the details of which are displayd in the Ranked Model Table"
),
h5(
"Step 5: Copy the top ranking model information from the table into the data entry page (i.e. Select the Distribution from the dropdown and enter the parameters listed)"
),
br(),
h4(strong("Option 2: Scale data by mean")),
h4(
em("Distribution & Parameters based on scaled data", style = "color:gray")
),
h5("Step 1: Select the 'Scale data by mean' tab"),
h5(
"Step 2: Select a Service Point from the drop-down library that matches the Service Point being modelled"
),
h5(
"Step 3: Enter the mean LoS associated with the modelled Service Point"
),
h5("Step 4: Press the 'Run/Refresh Scaling Tool' Button"),
h5(
"Step 5: Copy the model information from the table into the data entry page (i.e. Select the Distribution from the dropdown and eneter the parameters listed)"
),
h5(
"Optional Step: Inspect the distribution plot to see a visual version of the Length of Service Distribution"
),
width = 3
),
mainPanel(tabsetPanel(
tabPanel(
title = "Model fits to user data",
fileInput(
inputId = "los_dat",
label = "Upload csv",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv"),
width = '25%'
),
actionButton(inputId = "go_distfit", label = "Run Distribution Fit Tool"),
br(),
br(),
fluidRow(
column(10, plotOutput("los_fit_plot"), align = "center"),
column(2, br(), br(), br(), tableOutput("mini_summary"), align = "center")
),
fluidRow(br(),
h3(textOutput("los_text")),
p(textOutput("los_text_help"))),
fluidRow(column(12, tableOutput("los_fit_table"), align =
"center")),
h3(textOutput("fit_error"))
),
tabPanel(
title = "Scale data by mean",
fluidRow(column(
8,
br(),
p(
"Distributions and Parameters have been found for a variety of PODs/ Service Points, which are listed in the Service Point Library.
These were based on model fits to BNSSG data in order to match the shape of the Service time distribution. The data is rescaled based
on the Average Service value entered to create the required distribution."
)
)),
hr(),
fluidRow(
column(3, uiOutput("treatment_select_ui")),
column(
2,
numericInput(
inputId = "treatment_mean",
label = "Average Length of Service (Mean)",
min = 0,
value = 0,
step = 0.01
)
),
column(
1,
br(),
actionButton(inputId = "go_scaled_fit", label = "Run/Refresh Scaling Tool")
)
),
hr(),
tableOutput("scaled_fit"),
plotOutput("scaled_fit_plot")
)
))
)
),
####TOOL 1 - NETWORK IMPORT & VISUALISATION TAB ####
tabPanel(
"1. Network Import & Visualisation",
sidebarLayout(
# Sidebar panel for inputs -
sidebarPanel(
# Input: Select a file -
h5(strong("Instructions")),
h5(
"Step 1: Upload csv templates or bring through Wizard results by selecting the checkbox"
),
h5(
"Step 2: Press the 'Create visualisation' button to visualise the network."
),
p(
h5(
"If there is an error in the template, the issues log will appear and highlight the issue.",
style = "color:gray"
)
),
p(
h5(
"Optional Step: Toggle the checkboxes to see more information and refresh if appropriate",
style = "color:gray"
)
),
h5(
"Step 3: Once the network is created and correct, proceed to tab 2 (Simulation Setup & Run)"
),
hr(),
fluidRow(column(
12,
align = "center",
checkboxInput(
inputId = "w_temp",
label = "Bring Through Wizard Results",
value = 0
)
), style = 'font-size:125%'),
#add box to choose time unit ####
#not used for any calculation, just for labelling of outputs
selectInput(
inputId = "time_unit",
label = "Choose time unit",
choices = list(
"seconds",
"minutes",
"hours",
"days",
"weeks",
"months",
"years"
)
),
conditionalPanel(
condition = "input.w_temp== '0'",
fileInput(
inputId = "file1",
label = "Upload Network CSV",
buttonLabel = list(icon("project-diagram"), "Browse..."),
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")
),
# Input: Select a file --
fileInput(
inputId = "file2",
label = "Upload Calendar CSV",
buttonLabel = list(icon("calendar-alt"), "Browse..."),
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")
)
),
fluidRow(column(
12,
align = "center",
actionButton(
inputId = "go_viz",
label = "Create / Refresh Visualisation",
icon = icon("project-diagram"),
style = 'padding:10px; font-size:125%'
)
)),
br(),
fluidRow(column(
12,
align = "center",
actionLink(
inputId = "viz_help",
label = HTML("Understanding the Network Visualisation"),
icon = icon("info-circle"),
style = 'font-size:125%'
)
)),
br(),
bsModal(
id = "modal_viz",
title = HTML("<h2><strong>Network Visualisation Help</strong></h2>"),
trigger = "viz_help",
size = "large",
... =
p(
"The network visualisation summarises all the input information about the pathway in one place.
These are its key features:.",
style = 'font-size:110%'
),
br(),
h4(strong("Tooltips")),
p(
"Hovering over service points will display a detailed capacity calendar. Tooltip behaviour may differ depending on the make of browser (e.g. RStudio internal, Chrome, Edge, Firefox, Internet Explorer) being used to view the Shiny app.",
style = 'font-size:110%'
),
br(),
h4(strong("Acronyms")),
p("LOS: Average Length of Service", style = 'font-size:110%'),
p("Av Cap: Average Capacity", style = 'font-size:110%'),
p("IQC: Internal Queue Capacity", style = 'font-size:110%'),
p("EQC: External Queue Capacity", style = 'font-size:110%'),
br(),
h4(strong("Colours & Shapes")),
p("Service points: Blue Square", style = 'font-size:110%'),
p("Exits: Green Diamond", style = 'font-size:110%'),
p("Arrivals: Red Arrow", style = 'font-size:110%'),
p("Transfers with (prescribed) delay: Brown Arrow", style = 'font-size:110%'),
p("Transfers without (prescribed) delay: Black Arrow", style = 'font-size:110%')
),
hr(),
checkboxInput(
inputId = "disp1",
label = "Display network input table",
value = TRUE
),
checkboxInput(
inputId = "disp2",
label = "Display calendar input table",
value = TRUE
),
checkboxInput(
inputId = "disp3",
label = "Display extra Service Point information (Requires refresh)",
value = TRUE
),
hr(),
fluidRow(
column(
6,
align = "center",
actionButton(
inputId = "jb2i",
label = "Back to Intro",
icon = icon("arrow-left")
)
),
uiOutput("next_button")
),
width = 3
),
mainPanel(
# Output: Data file --
grVizOutput("network", height = "450px"),
tableOutput("file_check_issues"),
tableOutput("contents1"),
tableOutput("contents2")
)
)
),
####TOOL 2 - SIMULATION SETUP & RUN TAB ####
tabPanel(
"2. Simulation Setup & Run",
sidebarLayout(
# Sidebar panel for inputs --
sidebarPanel(
# Input: Select warm up & simulation period --
h4(strong("Instructions")),
br(),
actionLink(
inputId = "sim_mode_help",
label = HTML("<strong>Which Simulation Mode should I use?</strong>"),
icon = icon("info-circle"),
style = 'font-size:110%'
),
br(),
bsModal(
id = "modal_sim_mode",
title = HTML("<h2><strong>Simulation Mode Help</strong></h2>"),
trigger = "sim_mode_help",
size = "large",
... =
HTML(
"
<p>PathSimR has two Simulation Modes: Trial Simulation, and Full Simulation.</p>
<h4>Trial Simulation </h4>
<p>Runs a small number of replications (10) to allow users to (i) estimate a warm up period, (ii) get an idea of roughly how long per replication the simulation will take to run, and (iii) sense check the outputs, all before committing potentially substnatial amounts of computer time and resource to a full simulation run with a large number of replications.</p>
<p>The only input required is the the simulation period. Outputs will be restricted to the Warm-up Period Assistance and Average Through Time tabs when in Trial Simulation mode.</p>
<h4>Full Simulation </h4>
<p> Performs the full simulation, with a large number of replications to achieve (relative) statistical accuracy, and calculates a full suite of output measures and visualisations. The simulation is run for the user-defined simulation period plus the user-defined warm-up period, and outputs then calculated based on the post-warm-up period (i.e. starting from 'normal' levels of occupancy rather than from empty).</p>
<p>In full simluation mode, all outputs (excpet the Warm-Up Period Assistance tab) will be viewable, and the a selection of tables, dowloads and a summary document will be available to download. </p>
<p>The number of computer CPU cores will be automatically maximised in Full Simulation mode - this may reduce the capacity of the computer to perform other tasks while it is running.</p>
"
)
),
br(),
h4(strong("Mode 1: Trial Simulation")),
h5("Step 1: Input Simulation period below"),
actionLink(
inputId = "wu_help",
label = HTML("<strong>What are the warm-up and simulation periods?</strong>"),
icon = icon("info-circle")
),
bsModal(
id = "modal_wu",
title = HTML("<h2><strong>Warm-up and Simulation Period Help</strong></h2>"),
trigger = "wu_help",
size = "large",
... =
HTML(
"
<h3><strong>Warm-up Period</strong></h3>
<p>
The warm-up period represents the time it takes for a simulation to reach stable running conditions and after which results can be recorded.
As each simulation starts from empty, it is important that the warm-up period be long enough so that the results collected are reflective of
the modelled system and not an emptier version.</p>
<p>To determine a suitable warm-up period, first run the simulation in Trial Simulation Mode and look
at the resulting Warm-up Period Assistance and Average Through Time plots. The first two plots below show a stable system with a recognisable warm-up period.
The total in system graph stabilises around the value 30 after approximately 25 time units and all 5 metrics in the average through time graph
flatten out/stabilise after
roughly the same time period, therefore the full simulation should use a warm-up period of 25.</p>
<p>The second set of two plots shows an unstable system, characterised
by an ever increasing queue length and total number in system. This system can still be simulated but the results may not be sensible given the instability. If it is unclear
whether the system has stabilised, run the simulation again with a longer simulation period and update the warm-up period approximation appropriately.
</p>"
),
fluidRow(column(
6, img(
src = 'wu1.png',
height = "100%",
width = "100%"
)
),
column(
6, img(
src = 'wu2.png',
height = "100%",
width = "100%"
)
)),
fluidRow(column(
6, img(
src = 'wu3.png',
height = "100%",
width = "100%"
)
),
column(
6, img(
src = 'wu4.png',
height = "100%",
width = "100%"
)
)),
HTML(
"<h3><strong>Simulation Period</strong></h3>
<p>
The Simulation Period is the amount of time over which to collect results from the simulation. This will need to be sufficiently long such that
a large number of patients can pass through the pathway. For example, if a pathway has an average length of 365 days then simulating it for only 10
would not produce complete results, as unique patients would not have completed the pathway. The simulation period should therefore be longer than
the average pathway length and long enough to collect data. The warm-up and simulation periods sum to the total simulation length, over which the
external arrivals and capacity calendar will operate. Therefore, the simulation may start collecting data part way through a calendar cycle but this
will have no affect on the results.
</p> "
)
),
h5("Step 2: Create checklist and ensure all values are correct"),
h5(
"Step 3: Press the 'Run Simulation' button to produce trial results based on a small number of replications"
),
h5(
"Step 4: Check the Simulation Outputs tab to evaluate the results of the trial simulation to help estimate the warm-up period (using the warm-up period assistance and average through time tabs).
Proceed to 'Full Simulation' mode if simulation period is suitable or return to 'Trial Simulation' mode, update the inputs and re-run"
),
br(),
h4(strong("Mode 2: Full Simulation")),
h5(
"Step 1: Input warm-up period below based on the output from the warm-up period assistance tab"
),
h5("Step 2: Input number of simulation replications below"),
h5("Step 3: Refresh checklist and ensure all values are correct"),
h5(
"Step 4: Start the simulation by pressing the 'Run Simulation' button"
),
h5(
"Step 5: Simulation Outputs and Download Outputs tabs are now available"
),
# Horizontal line -
hr(),
fluidRow(column(
12,
align = "center",
selectInput(
inputId = "run_type",
label = "Select Mode",
choices = c("Trial Simulation", "Full Simulation"),
selected = "Trial Simulation",
selectize = F
)
)),
conditionalPanel(
condition = "input.run_type=='Full Simulation'",
hr(),
numericInput(
inputId = "wu",
label = "Length of warm-up period",
value = 0,
min = 0
)
),
hr(),
numericInput(
inputId = "st",
label = "Length of simulation period",
value = "",
min = 1
),
conditionalPanel(
condition = "input.run_type=='Full Simulation'",
hr(),
numericInput(
inputId = "reps",
label = "Number of simulation replications",
value = 100,
min = 1
)
),
hr(),
fluidRow(column(
12,
align = "center",
actionButton(
inputId = "checklist",
label = "Create / Refresh Checklist",
icon = icon("clipboard"),
style = 'padding:10px; font-size:125%'
)
)),
br(),
fluidRow(column(
12,
align = "center",
actionButton(
inputId = "sim",
label = "Run Simulation",
icon = icon("play"),
style = 'padding:10px; font-size:125%'
)
)),
br(),
fluidRow(
column(
6,
align = "center",
actionButton(
inputId = "jb2niv",
label = "Previous",
icon = icon("arrow-left")
)
),
uiOutput("next_button2")
),
width = 3
),
# Main panel for displaying outputs -
mainPanel(
useShinyalert(),
fluidRow(
column(4, align = "center", tableOutput("checklist_table_render")),
column(8, grVizOutput("cl_viz"))
),
fluidRow(column(12, align = "center", h1(textOutput(
"comp"
)))),
br()
#,
#fluidRow(column(12,align="center",tableOutput("run_time")))
)
)
),
####TOOL 3 - SIMULATION OUTPUTS TAB ####
tabPanel(
"3. Simulation Outputs",
navlistPanel(
id = "Simulation Outputs",
tabPanel(
title = "Output Interpretation",
icon = icon("book"),
fluidRow(
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(
9,
h2("Output Interpretation"),
p(
"The pages on the left show the key results from the simulation, a description of which can be found below.
Depending on the number of replications run, the graphs and tables may take a moment to render (Click through the tabs to ensure the rendering begins).
Return to the previous pages using the navigation bar above."
),
hr(),
h4(
strong(
"Warm-Up Period Assistance (Only available in Trial Simulation Mode)"
)
),
p(
"The warm-up period represents the time it takes for a simulation to reach stable running conditions and after which results can be recorded.
As each simulation starts from empty, it is important that the warm-up period be long enough so that the results collected are reflective of
the modelled system and not an emptier version. To determine a suitable warm-up period, the Warm-Up Period Assistance tab shows the total number of people in the
system through time. This metric can be used to work out how long the warm-up period needs to be."
),
hr(),
h4(
strong(
"Average Through Time Plot (Available in both Trial and Full Simulation Modes)"
)
),
p(
"A summary plot showing how each of the 5 time varying parameters vary through time with mean values plotted with envelopes of 50%, 95% and 99% percentile data.
This should allow the overarching trends in these metrics to be understood in a single figure and also how changes and shift in one metrics influence changes in
others. Variation in the mean lines could be the result of two different factors: 1) Sample size being too small (increase # of replications) or 2) The system has
inherent variation, either driven by random dynamics or the prescribed calendar schedule. This plot allows the user to quickly understand the dynamics of the system
e.g. if a unit has reached capcaity and stayed full, if the queue length has stabilised or if it is continuously increasing, whether the number of patients being delayed
due to capacity is lower than expected values."
),
hr(),
h4(
strong(
"Service Point & Pathway Statistics (Only available in Full Simulation Mode)"
)
),
p(
"These pages contains multiple tables looking at 7 key metrics of the patient experience (broken down by service node or summarised over the pathway):"
),
tags$ol(
tags$li(
strong("Total time in system"),
"- Amount of time between arriving at the first node and leaving the last on a patient pathway."
),
tags$li(
strong("Wait"),
"- Amount of time between arriving at a queue and starting service."
),
tags$li(
strong("Time Delayed (Capacity Driven)"),
"- Amount of time experiencing a capacity driven delay."
),
tags$li(
strong("Time Delayed (Transition)"),
"- Amount of time experiencing a transition delay."
),
tags$li(
strong("Length of Stay"),
"- Amount of time between starting service and departing to the next service."
),
tags$li(
strong("Delay to Transfer"),
"- Amount of time between finishing service and departure (i.e. end of any delays)."
),
tags$li(
strong("Rejection Rate"),
"- Number of external arrivals that were rejected due to full queues per time unit."
)
),
h4(
strong(
"Metrics Through Time Summaries (Only available in Full Simulation Mode)"
)
),
p(
"5 values are monitored throughout the simulation so their changes through time can be investigated through time:"
),
strong(tags$ul(
tags$li("Patient Occupancy"),
tags$li("Bed Occupancy"),
tags$li("Capacity Driven Delay"),
tags$li("Transition Delay"),
tags$li("Queue")
)),
p("Each page on the left contains the same 5 tables/graphs:"),
tags$ul(
tags$li(
strong("Top Left - Percentage time at level plot"),
" - A graph showing the amount of time each Service Point spent at level of the metric
(e.g. Amount of time Service Point A had an occupancy of 5 patients). The distribution of bars informs how the metric has varied throughout the simulation,
for example if the bars appear reasonably symetric around a value then the system is showing signs of stability. On the other hand, if one bar dominates then
the system is showing signs of underlying system dynamics e.g. constantly at full capacity or following a strict calendar."
),
br(),
tags$li(
strong("Top Right - Metric through time plot"),
" - A graph showing the metric in question through time, split by Service Point and replicate (max 5 random replicatations).
These represent actual simulation runs that are then combined (across all replications) to form the summary outputs. These should not be used to infer specific results, but
are intended to be illustrative of the variation found within simulation."
),
br(),
tags$li(
strong("Bottom Left - Percentiles summary table"),
" - A table outlining the values associated with different percentile levels e.g. 90% of the time, the Service Point has an occupancy of 5 or less"
),
br(),
tags$li(
strong("Bottom Right - Percentage time at level table"),
" - Raw data used to construct 'Percentage time at level' plot.
Can be filtered and sorted by any column and also contains a cumulative sum which can be used to calcuate percentiles."
),
br(),
tags$li(
strong("Bottom Centre - Average Over Simulation"),
" - Average value for the metric in question when using the data from the entire simulation."
)
),
p(
"All plots show a maximum of 5 replicates and have the same legends and colour coding for Service Points"
),
hr()
)
)
),
tabPanel(
"Warm-Up Period Assistance",
icon = icon("chart-line"),
h2(strong("Warm-Up Period Assistance")),
hr(),
conditionalPanel(
condition = "input.run_type=='Trial Simulation'",
p(
"The warm-up period represents the time it takes for a simulation to reach stable running conditions and after which results can be recorded.
As each simulation starts from empty, it is important that the warm-up period be long enough so that the results collected are reflective of
the modelled system and not an emptier version. To determine a suitable warm-up period, find the time after which the total number of people in system has leveled out/ stabilised.
For highly dynamic systems, you may also need to consult the average through time tab to see how the number of people in each service point and queue is changing. The
warm-up period can be determined in the same way as before but needs to be the time required for all metrics to stabilise."
),
plotOutput("tisp", height =
"850px")
),
conditionalPanel(condition = "input.run_type=='Full Simulation'",
h2(strong(
"Not Available in Full Simulation"
)))
),
tabPanel(
"Average Through Time Plot",
icon = icon("chart-line"),
h2(strong("Average Through Time Overview")),
hr(),
p(
"The plot below shows how each of the 5 time varying parameters vary through time with mean values plotted with envelopes of 50%, 95% and 99% percentile data.
This should allow the overarching trends in these metrics to be understood in a single figure and also how changes and shift in one metrics influence changes in
others. Variation in the mean lines could be the result of two different factors: 1) Sample size being too small (increase # of replications) or 2) The system has
iherent variation, either driven by random dynamics or the prescribed calendar schedule. This plot allows the user to quickly understand the dynamics of the system
e.g. if a unit has reached capcaity and stayed full, if the queue length has stabilised or if it is continuously increasing, whether the number of patients being delayed
due to capacity is lower than expected values."
),
plotOutput("multi_plot", height = "850px")
),
tabPanel(
"Service Point Statistics",
icon = icon("table"),
h2(strong("Service Point Statistics")),
hr(),
conditionalPanel(
condition = "input.run_type=='Full Simulation'",
fluidRow(column(
12, align = "center", grVizOutput("tables_viz1", height = "400px")
)),
br(),
fluidRow(
column(
4,
align = "center",
h4("Wait Time"),
tableOutput("node_wait_summary"),
align = "center"
),
column(
4,
align = "center",
h4("Time Delayed (Capacity Driven)"),
tableOutput("node_capacity_delay_summary"),
align = "center"
),
column(
4,
align = "center",
h4("Time Delayed (Transition)"),
tableOutput("node_transition_delay_summary"),
align = "center"
)
),
fluidRow(
column(
4,
align = "center",
h4("Length Of Stay"),
tableOutput("node_loss"),
align = "center"
),
column(
4,
align = "center",
h4("Delay-To-Transfer"),
tableOutput("node_dtts"),
align = "center"
),
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(2, h4("Rejection Rate"), tableOutput("rejs"), align =
"center")
)
),
conditionalPanel(condition = "input.run_type=='Trial Simulation'",
h2(
strong("Not Available in Trial Simulation")
))
),
tabPanel(
"Pathway Statistics",
icon = icon("table"),
h2(strong("Pathway Statistics")),
hr(),
conditionalPanel(
condition = "input.run_type=='Full Simulation'",
fluidRow(column(
12, align = "center", grVizOutput("tables_viz2", height = "400px")
)),
br(),
fluidRow(
column(
4,
align = "center",
h4("Wait Time"),
tableOutput("pat_wait_summary"),
align = "center"
),
column(
4,
align = "center",
h4("Time Delayed (Capacity Driven)"),
tableOutput("pat_capacity_delay_summary"),
align = "center"
),
column(
4,
align = "center",
h4("Time Delayed (Transition)"),
tableOutput("pat_transition_delay_summary"),
align = "center"
)
),
fluidRow(
column(
4,
align = "center",
h4("Length Of Stay"),
tableOutput("pat_loss"),
align = "center"
),
column(
4,
align = "center",
h4("Delay-To-Transfer"),
tableOutput("pat_dtts"),
align = "center"
),
column(
4,
align = "center",
h4("Total Time in System"),
tableOutput("ttiss"),
align = "center"
)
)
),
conditionalPanel(condition = "input.run_type=='Trial Simulation'",
h2(
strong("Not Available in Trial Simulation")
))
),
tabPanel(
"Patient Occupancy Summary",
icon = icon("user"),
h2(strong("Patient Occupancy Summary")),
hr(),
conditionalPanel(
condition = "input.run_type=='Full Simulation'",
fluidRow(
column(
6,
align = "center",
h3("% time at Patient Occupancy level"),
plotOutput("pto_plot", height = "500px")
),
column(
6,
align = "center",
h3("Patient Occupancy for 5 replicates"),
plotOutput("o", height = "500px")
)
),
fluidRow(
column(4, align = "center", dataTableOutput("opercentiles")),
column(8, align = "center", dataTableOutput("pto_percent", width =
"70%"))
),
fluidRow(
column(
width = 4,
offset = 0,
style = 'padding:0px;'
),
column(2, align = "center", dataTableOutput("avg_occupancy"))
)
),
conditionalPanel(condition = "input.run_type=='Trial Simulation'",
h2(
strong("Not Available in Trial Simulation")
))
),
tabPanel(
"Bed Occupancy Summary",
icon = icon("bed"),
h2(strong("Bed Occupancy Summary")),
hr(),
conditionalPanel(
condition = "input.run_type=='Full Simulation'",
fluidRow(
column(
6,
align = "center",
h3("% time at Bed Occupancy level"),
plotOutput("ptb_plot", height = "500px")
),
column(
6,
align = "center",
h3("Bed Occupancy for 5 replicates"),
plotOutput("b", height = "500px")
)
),
fluidRow(
column(4, align = "center", dataTableOutput("bpercentiles")),
column(8, align = "center", dataTableOutput("ptb_percent", width =
"70%"))
),
fluidRow(
column(
width = 4,
offset = 0,
style = 'padding:0px;'
),
column(2, align = "center", dataTableOutput("avg_occ_bed"))
)
),
conditionalPanel(condition = "input.run_type=='Trial Simulation'",
h2(
strong("Not Available in Trial Simulation")
))
),
tabPanel(
"Capacity Driven Delay Summary",
icon = icon("door-closed"),
h2(strong("Capacity Driven Delay Summary")),
hr(),
conditionalPanel(
condition = "input.run_type=='Full Simulation'",
fluidRow(
column(
6,
align = "center",
h3("% time at Capacity Delay level"),
plotOutput("ptd_plot", height = "500px")
),
column(
6,
align = "center",
h3("Capacity Delay Level for 5 replicates"),
plotOutput("d", height = "500px")
)
),
fluidRow(
column(4, align = "center", dataTableOutput("dpercentiles")),
column(8, align = "center", dataTableOutput("ptd_percent", width =
"70%"))
),
fluidRow(
column(
width = 4,
offset = 0,
style = 'padding:0px;'
),
column(2, align = "center", dataTableOutput("avg_delayed"))
)
),
conditionalPanel(condition = "input.run_type=='Trial Simulation'",
h2(
strong("Not Available in Trial Simulation")
))
),
tabPanel(
"Transition Delay Summary",
icon = icon("expand-arrows-alt"),
h2(strong("Transition Delay Summary")),
hr(),
conditionalPanel(
condition = "input.run_type=='Full Simulation'",
fluidRow(
column(
6,
align = "center",
h3("% time at Transition Delay level"),
plotOutput("ptt_plot", height = "500px")
),
column(
6,
align = "center",
h3("Transition Delay Level for 5 replicates"),
plotOutput("t", height = "500px")
)
),
fluidRow(
column(4, align = "center", dataTableOutput("tpercentiles")),
column(8, align = "center", dataTableOutput("ptt_percent", width =
"70%"))
),
fluidRow(
column(
width = 4,
offset = 0,
style = 'padding:0px;'
),
column(2, align = "center", dataTableOutput("avg_transition"))
)
),
conditionalPanel(condition = "input.run_type=='Trial Simulation'",
h2(
strong("Not Available in Trial Simulation")
))
),
tabPanel(
"Queueing Summary",
icon = icon("clock"),
h2(strong("Queueing Summary")),
hr(),
conditionalPanel(
condition = "input.run_type=='Full Simulation'",
fluidRow(
column(
6,
align = "center",
h3("% time at Queue Length"),
plotOutput("ptq_plot", height = "500px")
),
column(
6,
align = "center",
h3("Queue Length for 5 replicates"),
plotOutput("q", height = "500px")
)
),
fluidRow(
column(4, align = "center", dataTableOutput("qpercentiles")),
column(8, align = "center", dataTableOutput("ptq_percent", width =
"70%"))
),
fluidRow(
column(
width = 4,
offset = 0,
style = 'padding:0px;'
),
column(2, align = "center", dataTableOutput("avg_queue"))
)
),
conditionalPanel(condition = "input.run_type=='Trial Simulation'",
h2(
strong("Not Available in Trial Simulation")
))
),
widths = c(3, 9),
well = TRUE
)
),
####TOOL 4 - DOWNLOAD OUTPUTS TAB ####
tabPanel(
"4. Download Outputs",
sidebarLayout(
# Sidebar panel for buttons --
sidebarPanel(
# File Downloader --
h4(strong("Details")),
h5(
"A description of each of the files can be found in the",
strong("Output Library Document.")
),
hr(),
h4("Data Tables"),
p(
"PathSimR produces an excel workbook which contains all metrics produced within the tool at both replicate level and simulation level. Each tab is clearly labelled,
with the first half of the tabs relating to patient level metrics (e.g. wait times, Length of Stays etc) and the second half containing information regarding the through
time metrics (e.g. Occupancy, Queues etc). The final tab contains all the data required to recreate the 'Average Through Time' plot."
),
fluidRow(column(
12,
align = "center",
downloadButton(
outputId = "downloadtables",
label = "Download Tables",
icon = icon("download"),
style = 'padding:10px; font-size:125%'
)
)),
hr(),
h4("Simulation Plots"),
p(
"All plots created in PathSimR are saved down in a single PDF that can then be manipulated as needed. All the figures shown in PathSimR can be recreated from the data
provided in the Data Tables (download button above)."
),
fluidRow(column(
12,
align = "center",
downloadButton(
outputId = "downloadplot",
label = "Download Plots",
icon = icon("download"),
style = 'padding:10px; font-size:125%'
)
)),
hr(),
h4("Automated Report"),
p(
"An automated word document is produced which includes a collection of figures and data tables from the simulation. These have been sorted into sections and a brief description of
the metrics and figures is included. This report is designed to speed up the summary process and provides easy manipulation for the user."
),
fluidRow(column(
12,
align = "center",
downloadButton(
outputId = "downloadreport",
label = "Download Report",
icon = icon("download"),
style = 'padding:10px; font-size:125%'
)
)),
hr(),
width = 3
),
# Main panel for displaying outputs -
mainPanel()
),
useShinyalert()
)
)
#### SINHY SERVER CODE (INC SIM CODE) ####
server <- function(input, output, session) {
####Figures for Modals####
output$ext_arr_example = renderTable({
data.frame(
"Start Time" = c(0, 100, 150),
"End Time" = c(100, 150, 200),
"Arrival Rate" = c(1, 0, 2)
)
}, caption = "Example External Arrival Calendar",
caption.placement = getOption("xtable.caption.placement", "top"),
caption.width = getOption("xtable.caption.width", NULL), striped = TRUE, bordered = TRUE)
output$cap_example = renderTable({
data.frame(
"Start Time" = c(0, 30, 90),
"End Time" = c(30, 90, 180),
"Capacity" = c(24, 48, 72)
)
}, caption = "Example Capacity Calendar",
caption.placement = getOption("xtable.caption.placement", "top"),
caption.width = getOption("xtable.caption.width", NULL), striped = TRUE, bordered = TRUE)
output$model_help_figure <- renderPlot({
x <- c(rexp(10000, 1))
fe <- fitdist(data = x, distr = "exp")
fl <- fitdist(data = x, distr = "lnorm")
fu <- fitdist(data = x, distr = "unif")
fw <- fitdist(data = x, distr = "weibull")
fg <- fitdist(data = x, distr = "gamma")
p <-
denscomp(
ft = list(fe, fl, fu, fw, fg),
plotstyle = "ggplot",
breaks = 100,
#fitcol = c("#009E73","#F0E442", "#0072B2", "#D55E00", "#CC79A7"),
fitlty = 1
)
p <- p + theme_bw()
p
}, res = 128)
#### Navigation Buttons ####
hideTab(inputId = "navbar", target = "1. Network Import & Visualisation")
hideTab(inputId = "navbar", target = "2. Simulation Setup & Run")
hideTab(inputId = "navbar", target = "3. Simulation Outputs")
hideTab(inputId = "navbar", target = "4. Download Outputs")
hideTab(inputId = "navbar", target = "W1. Setup")
hideTab(inputId = "navbar", target = "W2. Data Entry")
hideTab(inputId = "navbar", target = "W3. Final Wizard Tables & Download")
hideTab(inputId = "navbar", target = "Service Distribution Tool")
observeEvent(input$j2w, {
hideTab(inputId = "navbar", target = "1. Network Import & Visualisation")
hideTab(inputId = "navbar", target = "2. Simulation Setup & Run")
hideTab(inputId = "navbar", target = "3. Simulation Outputs")
hideTab(inputId = "navbar", target = "4. Download Outputs")
hideTab(inputId = "navbar", target = "W1. Setup")
hideTab(inputId = "navbar", target = "W2. Data Entry")
hideTab(inputId = "navbar", target = "W3. Final Wizard Tables & Download")
hideTab(inputId = "navbar", target = "Service Distribution Tool")
showTab(inputId = "navbar", target = "W1. Setup")
updateTabsetPanel(session, "navbar",
selected = "W1. Setup")
})
observeEvent(input$j2s1, {
hideTab(inputId = "navbar", target = "1. Network Import & Visualisation")
hideTab(inputId = "navbar", target = "2. Simulation Setup & Run")
hideTab(inputId = "navbar", target = "3. Simulation Outputs")
hideTab(inputId = "navbar", target = "4. Download Outputs")
hideTab(inputId = "navbar", target = "W1. Setup")
hideTab(inputId = "navbar", target = "W2. Data Entry")
hideTab(inputId = "navbar", target = "W3. Final Wizard Tables & Download")
hideTab(inputId = "navbar", target = "Service Distribution Tool")
showTab(inputId = "navbar", target = "1. Network Import & Visualisation")
updateTabsetPanel(session, "navbar",
selected = "1. Network Import & Visualisation")
})
observeEvent(input$jb2i, {
updateTabsetPanel(session, "navbar",
selected = "Introduction")
})
observeEvent(input$j2PSR2, {
updateTabsetPanel(session, "navbar",
selected = "2. Simulation Setup & Run")
})
observeEvent(input$j2s, {
updateTabsetPanel(session, "navbar",
selected = "W1. Setup")
})
observeEvent(input$j2de, {
showTab(inputId = "navbar", target = "W2. Data Entry")
showTab(inputId = "navbar", target = "Service Distribution Tool")
updateTabsetPanel(session, "navbar",
selected = "W2. Data Entry")
})
observeEvent(input$j2ftd, {
showTab(inputId = "navbar", target = "W3. Final Wizard Tables & Download")
updateTabsetPanel(session, "navbar",
selected = "W3. Final Wizard Tables & Download")
})
observeEvent(input$jb2de, {
showTab(inputId = "navbar", target = "W2. Data Entry")
updateTabsetPanel(session, "navbar",
selected = "W2. Data Entry")
})
observeEvent(input$j2PSR, {
showTab(inputId = "navbar", target = "1. Network Import & Visualisation")
updateTabsetPanel(session, "navbar",
selected = "1. Network Import & Visualisation")
})
observeEvent(input$j2PSR3, {
updateTabsetPanel(session, "navbar",
selected = "3. Simulation Outputs")
})
observeEvent(input$jb2i2, {
updateTabsetPanel(session, "navbar",
selected = "Introduction")
})
observeEvent(input$jb2niv, {
updateTabsetPanel(session, "navbar",
selected = "1. Network Import & Visualisation")
})
##### START OF DYNAMIC WIZARD SERVER CODE ######
#### Name Input tables and checks ####
### Creates table of service point names ###
output$sp_table <- renderTable({
x <- input$sp
x <- unique(x)
rownames(x) <- 1:nrow(x)
colnames(x) <- "Service Point"
x <- trimws(x = x, which = "both")
x <- gsub(x = x, pattern = " ", "_")
x <- x[which(x != "")]
x <- data.frame("Service Points" = x)
colnames(x) <- "Service Points"
x
}, rownames = TRUE, striped = TRUE, bordered = TRUE)
### Creates table of exit names ###
output$exit_table <- renderTable({
x <- input$exit
x <- unique(x)
rownames(x) <- 1:nrow(x)
x <- trimws(x = x, which = "both")
x <- gsub(x = x, pattern = " ", "_")
x <- x[which(x != "")]
data.frame("Exits" = x)
}, rownames = TRUE, striped = TRUE, bordered = TRUE)
### Creates text for duplicates ###
output$duplicate <- renderText({
x <- input$sp
x <- unique(x)
rownames(x) <- 1:nrow(x)
colnames(x) <- "Service Point"
x <- trimws(x = x, which = "both")
x <- gsub(x = x, pattern = " ", "_")
x <- x[which(x != "")]
x <- data.frame("Service Points" = x)
colnames(x) <- "Service Points"
s <- x
x <- input$exit
x <- unique(x)
rownames(x) <- 1:nrow(x)
x <- trimws(x = x, which = "both")
x <- gsub(x = x, pattern = " ", "_")
x <- x[which(x != "")]
e <- data.frame("Exits" = x)
if (any(s[, 1] %in% e[, 1]) | any(e[, 1] %in% s[, 1])) {
"One or more names appear in both the Service Point & Exit lists. \n Please update before proceeding."
}
})
#### Creates the Data Entry Service Point tabs UI ####
output$tabs = renderUI({
x <- input$sp
x <- unique(x)
rownames(x) <- 1:nrow(x)
x <- trimws(x = x, which = "both")
x <- gsub(x = x, pattern = " ", "_")
sp <- x[which(x != "")]
x <- input$exit
x <- unique(x)
rownames(x) <- 1:nrow(x)
x <- trimws(x = x, which = "both")
x <- gsub(x = x, pattern = " ", "_")
exit <- x[which(x != "")]
node_number <- length(sp)
exit_number <- length(exit)
tabnames <- sp
node_names <- sp
exit_names <- exit
all_names <- c(node_names, exit_names)
#### Creates the transition probability inputs & delay departure entry (dynamic based on number of nodes & exits) ####
for (j in 1:node_number) {
assign(x = paste0("transition_", j),
value = lapply(1:length(all_names), function(i) {
if (j != i) {
column(
2,
fluidRow(
column(
12,
align = "center",
h4(all_names[i]),
style = 'padding:2px; font-size:150%'
)
),
numericInput(
inputId = paste0("transition_", j, "_", i),
label = paste("Proportion from", all_names[j], "to", all_names[i]),
value = 0,
min = 0,
max = 1,
step = 0.001
),
selectInput(
inputId = paste0("delay_dist_", j, "_", i),
label = paste(
"Distribution for Transition Delay from",
all_names[j],
"to",
all_names[i]
),
choices = c(
"None",
"Exponential",
"log-Normal",
"Uniform",
"Weibull",
"Gamma"
),
selected = "None",
selectize = F
),
fluidRow(
conditionalPanel(
condition = paste0(
"input.",
paste0("delay_dist_", j, "_", i),
" == 'None'"
),
disabled(column(
12,
textInput(
inputId = paste0("delay_param_none_1_", i),
value = "NA",
label = "No Parameters Required"
)
))
),
conditionalPanel(
condition = paste0(
"input.",
paste0("delay_dist_", j, "_", i),
" == 'Exponential'"
),
column(
6,
numericInput(
inputId = paste0("delay_param_exp_1_", j, "_", i),
label = "Rate",
value = "",
min = 0,
step = 0.0001
)
),
column(6, br())
),
conditionalPanel(
condition = paste0(
"input.",
paste0("delay_dist_", j, "_", i),
" == 'log-Normal'"
),
column(
6,
numericInput(
inputId = paste0("delay_param_lnorm_1_", j, "_", i),
label = "meanlog",
value = "",
step = 0.0001
)
),
column(
6,
numericInput(
inputId = paste0("delay_param_lnorm_2_", j, "_", i),
label = "sdlog",
value = "",
step = 0.0001
)
)
),
conditionalPanel(
condition = paste0(
"input.",
paste0("delay_dist_", j, "_", i),
" == 'Uniform'"
),
column(
6,
numericInput(
inputId = paste0("delay_param_unif_1_", j, "_", i),
label = "Min",
value = "",
min = 0,
step = 0.0001
)
),
column(
6,
numericInput(
inputId = paste0("delay_param_unif_2_", j, "_", i),
label = "Max",
value = "",
min = 0,
step = 0.0001
)
)
),
conditionalPanel(
condition = paste0(
"input.",
paste0("delay_dist_", j, "_", i),
" == 'Weibull'"
),
column(
6,
numericInput(
inputId = paste0("delay_param_weibull_1_", j, "_", i),
label = "Shape",
value = "",
step = 0.0001
)
),
column(
6,
numericInput(
inputId = paste0("delay_param_weibull_2_", j, "_", i),
label = "Scale",
value = "",
step = 0.0001
)
)
),
conditionalPanel(
condition = paste0(
"input.",
paste0("delay_dist_", j, "_", i),
" == 'Gamma'"
),
column(
6,
numericInput(
inputId = paste0("delay_param_gamma_1_", j, "_", i),
label = "Shape",
value = "",
step = 0.001
)
),
column(
6,
numericInput(
inputId = paste0("delay_param_gamma_2_", j, "_", i),
label = "Rate",
value = "",
step = 0.001
)
)
)
),
br(),
style = 'border:0.5px dashed #e6e6e6;'
)
} else {
column(
2,
fluidRow(
column(
12,
align = "center",
h4(all_names[i]),
style = 'padding:2px; font-size:150%'
)
),
disabled(
numericInput(
inputId = paste0("transition_", j, "_", i),
label = paste("Proportion from", all_names[j], "to", all_names[i]),
value = 0
)
),
disabled(
textInput(
inputId = paste0("delay_dist_", j, "_", i),
label = paste(
"Distribution for Transition Delay from",
all_names[j],
"to",
all_names[i]
),
value = "None"
)
),
fluidRow(column(12, disabled(
textInput(
inputId = paste0("delay_param_none_1_", i),
value = "NA",
label = "No Parameters Required"
)
))),
br(),
style = 'border:0.5px dashed #e6e6e6;'
)
}
}))
}
m3 = matrix(ncol = 3,
nrow = node_number,
data = "")
colnames(m3) <- c("Start Time", "End Time", "Arrival Rate")
m4 = matrix(ncol = 3,
nrow = node_number,
data = "")
colnames(m4) <- c("Start Time", "End Time", "Capacity")
#### Defines 'tabs' layout (dynamic based on number of nodes & exits) ####
myTabs = lapply(1:node_number, function(i) {
tabPanel(
title = HTML(tabnames[i]),
useShinyjs(),
br(),
h1(paste("Service Point Name:", tabnames[i])),
hr(),
h4("Length of Service Information"),
#p("If distribution and parameters for the service point are not know, use the Service Distribution Tool (in the bar above) to either fit models to uploaded data or scale against BNSSG data and then enter resulting distributions and parameters.", style="color:gray"),
fluidRow(
column(
2,
selectInput(
inputId = paste0("serv_dist_", i),
label = "Select a distribution",
choices = c(
"Exponential",
"log-Normal",
"Uniform",
"Weibull",
"Gamma"
),
selected = "Exponential"
),
selectize = F
),
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
conditionalPanel(
condition = paste0("input.", paste0("serv_dist_", i), " == 'Exponential'"),
column(
2,
numericInput(
inputId = paste0("serv_param_exp_1_", i),
label = "Rate",
value = "",
step = 0.001,
min = 0
)
)
),
conditionalPanel(
condition = paste0("input.", paste0("serv_dist_", i), " == 'log-Normal'"),
column(
2,
numericInput(
inputId = paste0("serv_param_lnorm_1_", i),
label = "meanlog",
value = "",
step = 0.001
)
),
column(
2,
numericInput(
inputId = paste0("serv_param_lnorm_2_", i),
label = "sdlog",
value = "",
step = 0.001
)
)
),
conditionalPanel(
condition = paste0("input.", paste0("serv_dist_", i), " == 'Uniform'"),
column(
2,
numericInput(
inputId = paste0("serv_param_unif_1_", i),
label = "Min",
value = "",
step = 0.001,
min = 0
)
),
column(
2,
numericInput(
inputId = paste0("serv_param_unif_2_", i),
label = "Max",
value = "",
step = 0.001,
min = 0
)
)
),
conditionalPanel(
condition = paste0("input.", paste0("serv_dist_", i), " == 'Weibull'"),
column(
2,
numericInput(
inputId = paste0("serv_param_weibull_1_", i),
label = "Shape",
value = "",
step = 0.001
)
),
column(
2,
numericInput(
inputId = paste0("serv_param_weibull_2_", i),
label = "Scale",
value = "",
step = 0.001
)
)
),
conditionalPanel(
condition = paste0("input.", paste0("serv_dist_", i), " == 'Gamma'"),
column(
2,
numericInput(
inputId = paste0("serv_param_gamma_1_", i),
label = "Shape",
value = "",
step = 0.001
)
),
column(
2,
numericInput(
inputId = paste0("serv_param_gamma_2_", i),
label = "Rate",
value = "",
step = 0.001
)
)
),
style = 'border-bottom:1px dashed silver;'
),
h4("Queue Information"),
#p("An external queue is defined as a queue that accepts arrivals from outside of the pathway.
# An internal queue is one that connects service points within the pathway network.", style="color:gray"),
fluidRow(
column(
2,
numericInput(
inputId = paste0("ext_q_", i),
label = "External Queue Capacity",
value = 0,
min = 0
)
),
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(
2,
numericInput(
inputId = paste0("int_q_", i),
label = "Internal Queue Capacity",
value = 0,
min = 0
)
),
style = 'border-bottom:1px dashed silver;'
),
fluidRow(column(
12,
h4("Transitions & Departure Delays"),
# p("All proportion values should sum to 1 for each Service Point tab.
# A fixed presscribed delay can be modelled using the uniform distribution and entering the same value into the min and max boxes.", style="color:gray"),
do.call(fluidRow, get(paste0(
"transition_", i
)))
)),
fluidRow(
column(
width = 12,
offset = 0,
style = 'padding:10px;'
),
style = 'border-bottom:1px dashed silver;'
),
fluidRow(
column(
5,
h4("External Arrival Rate Calendar"),
p(
"For more information, consult the instructions sidebar (Step 4) and 'How do I fill the calendar?' info button",
style = "color:gray"
),
fluidRow(column(
12,
matrixInput(
inputId = paste0("ext_arr_", i),
value = m3,
class = "numeric",
cols = list(
names = TRUE,
extend = FALSE,
editableNames = FALSE
),
rows = list(
names = TRUE,
extend = TRUE,
editableNames = FALSE,
delta = 1
),
copy = FALSE,
paste = TRUE
)
))
),
column(
width = 1,
offset = 0,
style = 'padding:0px;'
),
column(
5,
h4("Capacity Calendar"),
p(
"For more information, consult the instructions sidebar (Step 4) and 'How to fill out the calendar?' info button",
style = "color:gray"
),
fluidRow(column(
12,
matrixInput(
inputId = paste0("cap_", i),
value = m4,
class = "numeric",
cols = list(
names = TRUE,
extend = FALSE,
editableNames = FALSE
),
rows = list(
names = TRUE,
extend = TRUE,
editableNames = FALSE,
delta = 1
),
copy = FALSE,
paste = TRUE
)
))
),
column(
width = 1,
offset = 0,
style = 'padding:0px;'
)
)
)
})
do.call(tabsetPanel, myTabs)
})
#### Creates the trial Var_input ####
var <- eventReactive(input$go, {
x <- input$sp
x <- unique(x)
rownames(x) <- 1:nrow(x)
x <- trimws(x = x, which = "both")
x <- gsub(x = x, pattern = " ", "_")
sp <- x[which(x != "")]
x <- input$exit
x <- unique(x)
rownames(x) <- 1:nrow(x)
x <- trimws(x = x, which = "both")
x <- gsub(x = x, pattern = " ", "_")
exit <- x[which(x != "")]
node_number <- length(sp)
exit_number <- length(exit)
tabnames <- sp
node_names <- sp
exit_names <- exit
all_names <- c(node_names, exit_names)
## Transition Matrix
t1 = lapply(1:node_number, function(j) {
t2 = lapply(1:length(all_names), function(i) {
input[[paste0("transition_", j, "_", i)]]
})
t2
})
dat <- as.numeric(as.vector(unlist(t1)))
tm <- matrix(data = dat,
nrow = length(node_names),
byrow = T)
#tm<-t(tm)
colnames(tm) <- all_names
rownames(tm) <- node_names
## Queues
iq <- lapply(1:node_number , function(i) {
input[[paste0("int_q_", i)]]
})
iq <- as.numeric(as.vector(unlist(iq)))
eq <- lapply(1:node_number , function(i) {
input[[paste0("ext_q_", i)]]
})
eq <- as.numeric(as.vector(unlist(eq)))
## Service Distributions & Parameters
sd <- lapply(1:node_number , function(i) {
input[[paste0("serv_dist_", i)]]
})
sd <- as.vector(unlist(sd))
sdp <- lapply(1:node_number , function(i) {
if (sd[i] == "Exponential") {
input[[paste0("serv_param_exp_1_", i)]]
} else if (sd[i] == "log-Normal") {
paste0(input[[paste0("serv_param_lnorm_1_", i)]], ";", input[[paste0("serv_param_lnorm_2_", i)]])
} else if (sd[i] == "Uniform") {
paste0(input[[paste0("serv_param_unif_1_", i)]], ";", input[[paste0("serv_param_unif_2_", i)]])
} else if (sd[i] == "Weibull") {
paste0(input[[paste0("serv_param_weibull_1_", i)]], ";", input[[paste0("serv_param_weibull_2_", i)]])
} else if (sd[i] == "Gamma") {
paste0(input[[paste0("serv_param_gamma_1_", i)]], ";", input[[paste0("serv_param_gamma_2_", i)]])
}
})
sdp <- as.vector(unlist(sdp))
## Delay Distribution Matrix
dd1 = lapply(1:node_number, function(j) {
dd2 = lapply(1:length(all_names), function(i) {
input[[paste0("delay_dist_", j, "_", i)]]
})
dd2
})
dat <- as.vector(unlist(dd1))
ddm <- matrix(data = dat,
nrow = length(node_names),
byrow = T)
ddm[which(ddm == "None")] <- NA
ddm[which(ddm == "Exponential")] <- "exp"
ddm[which(ddm == "log-Normal")] <- "lnorm"
ddm[which(ddm == "Uniform")] <- "unif"
ddm[which(ddm == "Weibull")] <- "weibull"
ddm[which(ddm == "Gamma")] <- "gamma"
#tm<-t(tm)
colnames(ddm) <- all_names
rownames(ddm) <- paste0(node_names, "_Delay_Dist")
output$ddm <- renderTable({
ddm
})
## Delay Parameter Matrix
dp1 <- lapply(1:node_number , function(j) {
dp2 = lapply(1:length(all_names), function(i) {
test <- input[[paste0("delay_dist_", j, "_", i)]]
if (test == "None") {
NA
} else if (test == "Exponential") {
input[[paste0("delay_param_exp_1_", j, "_", i)]]
} else if (test == "log-Normal") {
paste0(input[[paste0("delay_param_lnorm_1_", j, "_", i)]], ";", input[[paste0("delay_param_lnorm_2_", j, "_", i)]])
} else if (test == "Uniform") {
paste0(input[[paste0("delay_param_unif_1_", j, "_", i)]], ";", input[[paste0("delay_param_unif_2_", j, "_", i)]])
} else if (test == "Weibull") {
paste0(input[[paste0("delay_param_weibull_1_", j, "_", i)]], ";", input[[paste0("delay_param_weibull_2_", j, "_", i)]])
} else if (test == "Gamma") {
paste0(input[[paste0("delay_param_gamma_1_", j, "_", i)]], ";", input[[paste0("delay_param_gamma_2_", j, "_", i)]])
}
})
dp2
})
ddp <- as.vector(unlist(dp1))
ddp <- matrix(data = ddp,
nrow = length(node_names),
byrow = T)
colnames(ddp) <- all_names
rownames(ddp) <- paste0(node_names, "_Delay_Params")
####
var <- cbind(tm, sd, sdp, eq, iq, ddm, ddp)
var <- rbind(var, matrix(NA, nrow = exit_number, ncol = ncol(var)))
rownames(var) <- all_names
colnames(var) <-
c(
all_names,
"serv_dist",
"serv_dist_param",
"ext_queue",
"int_queue",
paste0(all_names, "_delay_dist"),
paste0(all_names, "_delay_params")
)
var <- as.data.frame(var)
var$serv_dist <-
gsub(x = var$serv_dist,
pattern = "Exponential",
replacement = "exp")
var$serv_dist <-
gsub(x = var$serv_dist,
pattern = "log-Normal",
replacement = "lnorm")
var$serv_dist <-
gsub(x = var$serv_dist,
pattern = "Uniform",
replacement = "unif")
var$serv_dist <-
gsub(x = var$serv_dist,
pattern = "Weibull",
replacement = "weibull")
var$serv_dist <-
gsub(x = var$serv_dist,
pattern = "Gamma",
replacement = "gamma")
var
})
#### Creates the trial Cal_input ####
cal <- eventReactive(input$go, {
x <- input$sp
x <- unique(x)
rownames(x) <- 1:nrow(x)
x <- trimws(x = x, which = "both")
x <- gsub(x = x, pattern = " ", "_")
sp <- x[which(x != "")]
x <- input$exit
x <- unique(x)
rownames(x) <- 1:nrow(x)
x <- trimws(x = x, which = "both")
x <- gsub(x = x, pattern = " ", "_")
exit <- x[which(x != "")]
node_number <- length(sp)
exit_number <- length(exit)
tabnames <- sp
node_names <- sp
exit_names <- exit
all_names <- c(node_names, exit_names)
## External Arrival
ea = lapply(1:node_number, function(i) {
x <- as.data.frame(input[[paste0("ext_arr_", i)]])
x <- head(x, -1)
if (nrow(x) > 0) {
x <- cbind("ext_arr", paste0(node_names[i]), x)
colnames(x) <- c("metric", "node", "start", "end", "value")
}
x
})
ea_rows <- lapply(1:node_number, function(i) {
nrow(ea[[i]])
})
if (!all(ea_rows == 0)) {
eam <- rbindlist(ea[c(which(ea_rows > 0))])
}
## Capacity
cap = lapply(1:node_number, function(i) {
x <- as.data.frame(input[[paste0("cap_", i)]])
x <- head(x, -1)
if (nrow(x) > 0) {
x <- cbind("cap", paste0(node_names[i]), x)
colnames(x) <- c("metric", "node", "start", "end", "value")
}
x
})
cap_rows <- lapply(1:node_number, function(i) {
nrow(cap[[i]])
})
if (!all(cap_rows == 0)) {
capm <- rbindlist(cap[c(which(cap_rows > 0))])
}
if (exists("eam") & exists("capm")) {
cal <- rbind(eam, capm)
colnames(cal) <- c("metric", "node", "start", "end", "value")
cal
} else if (exists("eam")) {
cal <- eam
colnames(cal) <- c("metric", "node", "start", "end", "value")
cal
} else if (exists("capm")) {
cal <- capm
colnames(cal) <- c("metric", "node", "start", "end", "value")
cal
} else{
cal <-
data.frame(
"metric" = "",
"node" = "",
"start" = "",
"end" = "",
"value" = ""
)
cal
}
})
#### Creates the Var_input visual####
observeEvent(input$go, {
output$var_view <- renderTable({
var()
}, rownames = TRUE, striped = T, bordered = T, align = "c", caption = "Network Information",
caption.placement = getOption("xtable.caption.placement", "top"),
caption.width = getOption("xtable.caption.width", NULL))
})
#### Creates the Cal_input visual####
observeEvent(input$go, {
output$cal_view <- renderTable({
cal()
}, rownames = FALSE, striped = T, bordered = T, align = "c", caption = "Calendar Information",
caption.placement = getOption("xtable.caption.placement", "top"),
caption.width = getOption("xtable.caption.width", NULL), digits = 5)
})
#### Creates the input checklist####
observeEvent(input$go, {
issues <- c()
var <- var()
cal <- cal()
x <- input$sp
x <- unique(x)
rownames(x) <- 1:nrow(x)
x <- trimws(x = x, which = "both")
x <- gsub(x = x, pattern = " ", "_")
sp <- x[which(x != "")]
x <- input$exit
x <- unique(x)
rownames(x) <- 1:nrow(x)
x <- trimws(x = x, which = "both")
x <- gsub(x = x, pattern = " ", "_")
exit <- x[which(x != "")]
node_number <- length(sp)
exit_number <- length(exit)
tabnames <- sp
node_names <- sp
exit_names <- exit
all_names <- c(node_names, exit_names)
### Testing if the transition matrix has rowsums of 1###
f <- var[1:node_number, 1:length(all_names)]
indx <- sapply(f, is.factor)
f[indx] <-
lapply(f[indx], function(x)
as.numeric(as.character(x)))
transition <- as.data.frame(f)
if (sum(transition < 0) > 0 | sum(transition > 1) > 0) {
issues <-
c(issues, c(
paste0("Network Input"),
"All",
paste(
"Transition proportions contains value outside required range (replace with value between 0 and 1)",
sep = ""
)
))
}
rs <- rowSums(transition)
for (i in 1:node_number) {
x <- rs[i]
if (is.na(x)) {
issues <-
c(issues, c(
paste0("Network Input"),
node_names[i],
paste(
"Transition row contains NA (replace with 0 or value)",
sep = ""
)
))
} else if (!isTRUE(near(x, 1))) {
issues <-
c(issues, c(
paste0("Network Input"),
node_names[i],
paste(
"Transition proportion row does not sum to 1 (Currently:",
x,
")",
sep = ""
)
))
}
}
### Testing if the distribution parameter inputs are correct ###
f <-
var[1:node_number, (length(all_names) + 1):(length(all_names) + 2)]
indx <- sapply(f, is.factor)
f[indx] <- lapply(f[indx], function(x)
as.character(x))
serv_dist_param <- as.data.frame(f)
for (i in 1:node_number) {
if (serv_dist_param[i, 1] == "exp") {
x <- serv_dist_param[i, 2]
if (is.na(x)) {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Missing a service distribution parameter"
)
}
if ((!is.na(x)) & x <= 0) {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Service distribution parameter is not greater than 0"
)
}
} else{
x <- serv_dist_param[i, 2]
x <- strsplit(x, ";")[[1]]
if ("NA" %in% x) {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Missing a service distribution parameter"
)
}
if ((!("NA" %in% x)) &
any(x <= 0) & serv_dist_param[i, 1] == "unif") {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Service distribution parameters are not greater than 0"
)
}
if ((!("NA" %in% x)) &
any(x <= 0) & serv_dist_param[i, 1] == "gamma") {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Service distribution parameters are not greater than 0"
)
}
if ((!("NA" %in% x)) &
any(x <= 0) & serv_dist_param[i, 1] == "weibull") {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Service distribution parameters are not greater than 0"
)
}
if ((!("NA" %in% x)) & x[2] < 0 &
serv_dist_param[i, 1] == "lnorm") {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"lnorm service parameter (sdlog) is less than 0"
)
}
}
}
#### Testing if the Queue inputs are correct #
iq <- lapply(1:node_number , function(i) {
input[[paste0("int_q_", i)]]
})
iq <- as.numeric(as.vector(unlist(iq)))
eq <- lapply(1:node_number , function(i) {
input[[paste0("ext_q_", i)]]
})
eq <- as.numeric(as.vector(unlist(eq)))
for (i in 1:node_number) {
x <- iq[i]
if (is.na(x)) {
issues <-
c(issues, c(
paste0("Network Input"),
node_names[i],
paste("Need to enter Internal Queue Value")
))
}
if (x %% 1 != 0) {
issues <-
c(issues, c(
paste0("Network Input"),
node_names[i],
paste("Need to enter an integer Internal Queue Value")
))
}
if (x < 0) {
issues <-
c(issues, c(
paste0("Network Input"),
node_names[i],
paste("Need to enter a positive Internal Queue Value")
))
}
}
for (i in 1:node_number) {
x <- eq[i]
if (is.na(x)) {
issues <-
c(issues, c(
paste0("Network Input"),
node_names[i],
paste("Need to enter External Queue Value")
))
}
if (x %% 1 != 0) {
issues <-
c(issues, c(
paste0("Network Input"),
node_names[i],
paste("Need to enter an integer External Queue Value")
))
}
if (x < 0) {
issues <-
c(issues, c(
paste0("Network Input"),
node_names[i],
paste("Need to enter a positive External Queue Value")
))
}
}
### Testing if the delay parameter inputs are correct ###
f <-
var[1:node_number, (length(all_names) + 5):((2 * length(all_names)) + 4)]
indx <- sapply(f, is.factor)
f[indx] <- lapply(f[indx], function(x)
as.character(x))
delay_dist <- as.data.frame(f)
f <- var[1:node_number, (2 * length(all_names) + 5):ncol(var)]
indx <- sapply(f, is.factor)
f[indx] <- lapply(f[indx], function(x)
as.character(x))
delay_param <- as.data.frame(f)
for (j in 1:length(all_names)) {
for (i in 1:node_number) {
if (!is.na(delay_dist[i, j])) {
if (delay_dist[i, j] == "exp") {
x <- delay_param[i, j]
if (is.na(x)) {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Missing a delay distribution parameter"
)
}
if ((!is.na(x)) & x <= 0) {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Delay parameter is not greater than 0"
)
}
} else{
x <- delay_param[i, j]
x <- strsplit(x, ";")[[1]]
if ("NA" %in% x) {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Missing a delay distribution parameter"
)
}
if ((!("NA" %in% x)) & any(x <= 0) &
delay_dist[i, j] == "unif") {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Delay distribution parameters are not greater than 0"
)
}
if ((!("NA" %in% x)) &
any(x <= 0) & delay_dist[i, j] == "gamma") {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Delay distribution parameters are not greater than 0"
)
}
if ((!("NA" %in% x)) &
any(x <= 0) & delay_dist[i, j] == "weibull") {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Delay distribution parameters are not greater than 0"
)
}
if ((!("NA" %in% x)) & x[2] < 0 &
delay_dist[i, j] == "lnorm") {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"lnorm service parameter (sdlog) is less than 0"
)
}
}
}
}
}
### Testing if there is at least 1 row of capacity and ext_arrival rate for each service point###
row_test <- as.data.frame(cal[, 1:2])
for (j in c("cap", "ext_arr")) {
for (i in 1:node_number) {
x <- row_test[which(row_test[, 1] == j), ]
x <- x[which(x[, 2] == node_names[i]), ]
if (nrow(x) == 0) {
issues <-
c(issues,
"Calendar",
node_names[i],
paste0("Missing ", j, " input rows"))
}
}
}
### Testing that every line in the caledar template has a value entry###
value_test <- as.data.frame(cal)
for (j in c("cap", "ext_arr")) {
for (i in 1:node_number) {
x <- value_test[which(value_test[, 1] == j), ]
x <- x[which(x[, 2] == node_names[i]), 5]
if (length(x) > 0) {
if (any(is.na(x))) {
issues <-
c(
issues,
"Calendar",
node_names[i],
paste0("Missing ", j, " value entry in calendar")
)
}
if (!any(is.na(x))) {
if (any(x < 0)) {
issues <-
c(
issues,
"Calendar",
node_names[i],
paste0("Negative ", j, " value entry in calendar")
)
}
}
if (!any(is.na(x))) {
if (j == "cap" & all(x == 0)) {
issues <-
c(
issues,
"Calendar",
node_names[i],
paste0("All zero ", j, " values entered in calendar")
)
}
}
}
}
}
### Testing that nodes that have 2+ lines in the calendar have any values in the start and end columns ###
value_test <- as.data.frame(cal)
for (j in c("cap", "ext_arr")) {
for (i in 1:node_number) {
x <- value_test[which(value_test[, 1] == j), ]
x <- x[which(x[, 2] == node_names[i]), ]
if (nrow(x) > 1) {
start <- x[, 3]
end <- x[, 4]
if (any(is.na(start))) {
issues <-
c(
issues,
"Calendar",
node_names[i],
paste0("Missing start value(s) in ", j, " calendar")
)
}
if (any(is.na(end))) {
issues <-
c(
issues,
"Calendar",
node_names[i],
paste0("Missing end value(s) in ", j, " calendar")
)
}
}
}
}
### Testing that nodes that have a zero in the first start line in the calendar ###
value_test <- as.data.frame(cal)
for (j in c("cap", "ext_arr")) {
for (i in 1:node_number) {
x <- value_test[which(value_test[, 1] == j), ]
x <- x[which(x[, 2] == node_names[i]), ]
if (nrow(x) != 0) {
if (!is.na(x[1, 3])) {
start <- x[1, 3]
if (start != 0) {
issues <-
c(
issues,
"Calendar",
node_names[i],
paste0("Non-Zero Initial Start Time in ", j, " calendar")
)
}
}
if (is.na(x[1, 3])) {
issues <-
c(
issues,
"Calendar",
node_names[i],
paste0("Non-Zero Initial Start Time in ", j, " calendar")
)
}
}
}
}
### Testing that nodes that have 2+ lines in the calendar have matching values in the start and end columns ###
value_test <- as.data.frame(cal)
for (j in c("cap", "ext_arr")) {
for (i in 1:node_number) {
x <- value_test[which(value_test[, 1] == j), ]
x <- x[which(x[, 2] == node_names[i]), ]
if (nrow(x) > 1) {
start <- x[, 3]
end <- x[, 4]
start_tail <- tail(start, -1)
end_head <- head(end, -1)
start_tail[is.na(start_tail)] <- 0
end_head[is.na(end_head)] <- 0
if (any(!(start_tail == end_head))) {
issues <-
c(
issues,
"Calendar",
node_names[i],
paste0(
"Start & End values don't match up sequentially in ",
j,
" calendar"
)
)
}
}
}
}
### Testing that nodes that have ascending start and end values ###
value_test <- as.data.frame(cal)
for (j in c("cap", "ext_arr")) {
for (i in 1:node_number) {
x <- value_test[which(value_test[, 1] == j), ]
x <- x[which(x[, 2] == node_names[i]), ]
if (nrow(x) > 1) {
start <- x[, 3]
end <- x[, 4]
if (!any(is.na(start))) {
if (any(diff(start) <= 0)) {
issues <-
c(
issues,
"Calendar",
node_names[i],
paste0(
"Start values don't increase sequentially in ",
j,
" calendar"
)
)
}
}
if (!any(is.na(end))) {
if (any(diff(end) <= 0)) {
issues <-
c(
issues,
"Calendar",
node_names[i],
paste0(
"End values don't increase sequentially in ",
j,
" calendar"
)
)
}
}
}
}
}
### Testing that there are arrivals to at least one node ###
value_test <- as.data.frame(cal)
x <- value_test[which(value_test[, 1] == "ext_arr"), 5]
if (!any(is.na(x))) {
if (all(x == 0)) {
issues <-
c(
issues,
"Calendar",
"All",
paste0(
"No Arrival rates to any service point in the ext_arr calendar"
)
)
}
}
####
output$issues <- renderTable({
if (length(issues) == 0) {
issues <- c("Complete", "Complete", "Complete")
}
issues <- matrix(data = issues,
ncol = 3,
byrow = T)
colnames(issues) <- c("Location", "Service Point", "Issue")
issues
}, striped = T, bordered = T, align = "c", caption = '<font size=4 color="red"><strong><p>Issues Log</p></strong></font>',
caption.placement = getOption("xtable.caption.placement", "top"),
caption.width = getOption("xtable.caption.width", NULL))
output$means <- renderTable({
if (length(issues) == 0) {
var <- var()
cal <- cal()
mean_table <- c()
x <- input$sp
x <- unique(x)
rownames(x) <- 1:nrow(x)
x <- trimws(x = x, which = "both")
x <- gsub(x = x, pattern = " ", "_")
sp <- x[which(x != "")]
node_number <- length(sp)
node_names <- sp
for (i in 1:node_number) {
pars <-
as.numeric(unlist(strsplit(
as.character(var$serv_dist_param[i]), ";"
)))
if (var$serv_dist[i] == "exp") {
mean_table <- c(mean_table, 1 / pars)
} else if (var$serv_dist[i] == "unif") {
mean_table <- c(mean_table, (pars[1] + pars[2]) / 2)
} else if (var$serv_dist[i] == "lnorm") {
mean_table <- c(mean_table, exp(pars[1] + 0.5 * (pars[2]) ^ 2))
} else if (var$serv_dist[i] == "weibull") {
mean_table <- c(mean_table, pars[2] * (gamma(1 + 1 / pars[1])))
} else if (var$serv_dist[i] == "gamma") {
mean_table <- c(mean_table, pars[1] / pars[2])
} else{
mean_table <- c(mean_table, c("Error in Mean Calculation"))
}
}
mean_table <- as.data.frame(mean_table)
rownames(mean_table) <- node_names
colnames(mean_table) <- "Mean Length of Service"
mean_table
}
}, striped = T, bordered = T, align = "c", rownames = T, caption = "LoS Means",
caption.placement = getOption("xtable.caption.placement", "top"),
caption.width = getOption("xtable.caption.width", NULL))
output$download_buttons <- renderUI({
if (length(issues) == 0) {
fluidRow(
column(
6,
align = "center",
downloadButton(
outputId = "var_dl",
label = "Network Template Download",
style = 'padding:16px; font-size:110%'
)
),
column(
6,
align = "center",
downloadButton(
outputId = "cal_dl",
label = "Calendar Download",
style = 'padding:16px; font-size:110%'
)
)
)
}
})
output$j2st <- renderUI({
if (length(issues) == 0) {
column(6, align = "center", actionButton(inputId = "j2PSR", label = c(
tagList("Move to Simulation Tool", icon("arrow-right"))
)))
}
})
})
#### Creates the wizard template downloader####
### Creates the Var_input downloader###
output$var_dl <- downloadHandler(
filename = "var_input.csv",
content = function(file) {
write.csv(var(), file, row.names = TRUE)
}
)
### Creates the cal_input downloader###
output$cal_dl <- downloadHandler(
filename = "cal_input.csv",
content = function(file) {
write.csv(cal(), file, row.names = FALSE)
}
)
#### Length of Service Model Fit Tab####
observeEvent(input$go_distfit, {
req(input$los_dat)
df <- read.csv(input$los_dat$datapath,
header = F,
sep = ",")
if (is.numeric(df[, 1])) {
colnames(df) <- "data"
fe <- fitdist(data = df$data, distr = "exp")
fl <- fitdist(data = df$data, distr = "lnorm")
fu <- fitdist(data = df$data, distr = "unif")
fw <- fitdist(data = df$data, distr = "weibull")
fg <- fitdist(data = df$data, distr = "gamma")
output$los_plot <- renderPlot({
plotdist(df$data, histo = T, demp = T)
}, res = 128)
output$los_cf <- renderPlot({
descdist(df$data, boot = 100)
}, res = 128)
output$los_fit_plot <- renderPlot({
p <-
denscomp(
ft = list(fe, fl, fu, fw, fg),
plotstyle = "ggplot",
breaks = 100,
#fitcol = c("#009E73","#F0E442", "#0072B2", "#D55E00", "#CC79A7"),
fitlty = 1
)
p <- p + theme_bw()
p
}, res = 128)
output$los_text <- renderText({
c("Ranked Model Table")
})
output$los_text_help <- renderText({
c(
"The distributions below have been ranked in terms of best fit. The Rank 1 Distribution was found to fit closest to the provided data.
Simply use the top ranking model and enter the details in the data entry tab.
If the exponential distribution is the highest ranking, then there is only one parameter to copy across (rate), else there will be two. These are named in the table and should be copied
to the relevant box on the data entry page. If the histogram appears completely flat, it may be that the uniform distribution is the best fitting model. In this case, ignore the rankings and take the parameters from that row.
In the case where multiple distributions are found to have the same fit, some model fit lines may be obscured on the plot (i.e. plotting over eachother). These models will still be ranked but should be treated as ranking equally.
"
)
})
output$los_fit_table <- renderTable({
fes <- summary(fe)
fls <- summary(fl)
fus <- summary(fu)
fws <- summary(fw)
fgs <- summary(fg)
aic = c(fes$aic, fls$aic, fus$aic, fws$aic, fgs$aic)
del_aic <- aic - min(aic, na.rm = T)
aic_lik <- exp(-0.5 * del_aic)
aic_weight <- aic_lik / sum(aic_lik, na.rm = T)
means <-
c((1 / fes$estimate[1]),
(exp(fls$estimate[1] + (
0.5 * (fls$estimate[2]) ^ 2
))),
(0.5 * (fus$estimate[1] + fus$estimate[2])),
(fws$estimate[2] * gamma(1 + 1 / fws$estimate[1])),
(fgs$estimate[1] / fgs$estimate[2])
)
means <- unname(means)
mean_dif <- means - mean(df$data)
fit_table <- data.frame(
"Distribution" = c(
"exponential",
"log-normal",
"uniform",
"weibull",
"gamma"
),
"Parameter 1 Name" = c(
names(fes$estimate)[1],
names(fls$estimate)[1],
names(fus$estimate)[1],
names(fws$estimate)[1],
names(fgs$estimate)[1]
),
"Parameter 1 Value" = c(
fes$estimate[1],
fls$estimate[1],
fus$estimate[1],
fws$estimate[1],
fgs$estimate[1]
),
"Parameter 2 Name" = c(
names(fes$estimate)[2],
names(fls$estimate)[2],
names(fus$estimate)[2],
names(fws$estimate)[2],
names(fgs$estimate)[2]
),
"Parameter 2 Value" = c(
fes$estimate[2],
fls$estimate[2],
fus$estimate[2],
fws$estimate[2],
fgs$estimate[2]
),
"AIC Score" = c(ceiling(aic)),
"AIC Weight" = c(100 * signif(aic_weight, digits = 3)),
"Mean" = means,
"Diff from actual mean" = signif(mean_dif, digits = 3),
row.names = NULL
)
#rownames(fit_table)<-c()
fit_table <-
fit_table[order(fit_table$AIC.Weight,
decreasing = T,
na.last = T), ]
fit_table <- cbind("Rank" = 1:5, fit_table)
fit_table[which(fit_table$Distribution == "uniform"), c(7, 8)] <-
"Check Graph for fit"
colnames(fit_table) <-
c(
"Rank",
"Distribution",
"Parameter 1 Name",
"Parameter 1 Value",
"Parameter 2 Name",
"Parameter 2 Value",
"AIC Score",
"AIC Weight (/100)",
"Estiamted Mean",
"Diff from data mean"
)
fit_table <- fit_table[, -c(7, 8, 9, 10)]
fit_table
}, striped = T, bordered = T, align = "c")
output$fit_error <- renderText({
c("")
})
output$mini_summary <- renderTable({
mini_summary <-
data.frame(
"Metric" = c(
"Mean",
"Standard Deviation",
"Inter-quartile range",
"90th Percentile"
),
"Value" = c(
mean(df$data),
sd(df$data),
IQR(df$data),
quantile(df$data, probs = c(0.9))
)
)
mini_summary
}, striped = T, bordered = T, align = "c", caption = "Uploaded Data",
caption.placement = getOption("xtable.caption.placement", "top"),
caption.width = getOption("xtable.caption.width", NULL))
} else{
output$fit_error <- renderText({
c(
"Error: Ensure that the uploaded file is a csv, has only one column of numbers (No Header Required) and that they are located in the leftmost column"
)
})
output$los_fit_plot <- renderPlot({
})
output$los_text <- renderText({
})
output$los_text_help <- renderText({
})
output$los_fit_table <- renderTable({
})
output$lmini_summary <- renderTable({
})
}
})
#### Length of Service Scaled Means Tab####
#LOS distriubtion dataframe ####
#reads in pre-caculated values from csv stored in www folder
#mostly calcuated by interval-censored maximum likelihood distriubtion fitting on HES
#data, with candidate distrubtion chosen by AIC
#But with some fitted to non-interval censored regional data (where HES fits did not
#coverge or were otherwise unavailable). n.b the HES method is NOT the same as that
#in the "fit your own" data tab, which assumes uncensored data
pre_fitted_data <- read.csv("./www/fits_for_pathsimr.csv",
check.names = FALSE) %>%
arrange(Names)
output$treatment_select_ui <- renderUI({
x <- as.character(pre_fitted_data$Names)
selectInput(
inputId = "treatment_select",
label = "Service Point Library",
choices = x,
selected = x[1],
selectize = F,
width = '150%'
)
})
observeEvent(input$go_scaled_fit, {
table <- pre_fitted_data
req(input$treatment_mean)
df <-
#as.data.frame(subset(table, table$Names == input$treatment_select))
filter(table,Names == input$treatment_select)
if (df$Distribution == "exponential") {
df$`Parameter 1 Value` <- 1 / input$treatment_mean
df$`Parameter 1 Value` <-
as.character(signif(df$`Parameter 1 Value`, digits = 5))
} else if (df$Distribution == "log-normal") {
df$`Parameter 1 Value` <-
log(input$treatment_mean) - 0.5 * (df$`Parameter 2 Value`) ^ 2
df$`Parameter 1 Value` <-
as.character(signif(df$`Parameter 1 Value`, digits = 5))
df$`Parameter 2 Value` <-
as.character(signif(df$`Parameter 2 Value`, digits = 5))
} else if (df$Distribution == "gamma") {
df$`Parameter 2 Value` <- df$`Parameter 1 Value` / input$treatment_mean
df$`Parameter 1 Value` <-
as.character(signif(df$`Parameter 1 Value`, digits = 5))
df$`Parameter 2 Value` <-
as.character(signif(df$`Parameter 2 Value`, digits = 5))
} else if (df$Distribution == "weibull") {
df$`Parameter 2 Value` <-
input$treatment_mean / gamma(1 + (1 / df$`Parameter 1 Value`))
df$`Parameter 1 Value` <-
as.character(signif(df$`Parameter 1 Value`, digits = 5))
df$`Parameter 2 Value` <-
as.character(signif(df$`Parameter 2 Value`, digits = 5))
}
output$scaled_fit <- renderTable({
df
}, rownames = FALSE, striped = T, bordered = T, align = "c")
output$scaled_fit_plot <- renderPlot({
t_mean <- input$treatment_mean
if (df$Distribution == "exponential") {
x <- seq(0, (10 * as.numeric(t_mean)), length.out = 1000)
y <- dexp(x, rate = as.numeric(df$`Parameter 1 Value`))
dat = data.frame("Time" = x, "Probability" = y)
ggplot(data = dat) + geom_line(aes(x = Time, y = Probability),
size = 1,
col = "blue") + theme_bw()
} else if (df$Distribution == "log-normal") {
x <- seq(0, (10 * as.numeric(t_mean)), length.out = 1000)
y <-
dlnorm(
x,
meanlog = as.numeric(df$`Parameter 1 Value`),
sdlog = as.numeric(df$`Parameter 2 Value`)
)
dat = data.frame("Time" = x, "Probability" = y)
ggplot(data = dat) + geom_line(aes(x = Time, y = Probability),
size = 1,
col = "blue") + theme_bw()
} else if (df$Distribution == "gamma") {
x <- seq(0, (10 * as.numeric(t_mean)), length.out = 1000)
y <-
dgamma(
x,
shape = as.numeric(df$`Parameter 1 Value`),
rate = as.numeric(df$`Parameter 2 Value`)
)
dat = data.frame("Time" = x, "Probability" = y)
ggplot(data = dat) + geom_line(aes(x = Time, y = Probability),
size = 1,
col = "blue") + theme_bw()
} else if (df$Distribution == "weibull") {
x <- seq(0, (10 * as.numeric(t_mean)), length.out = 1000)
y <-
dweibull(
x,
shape = as.numeric(df$`Parameter 1 Value`),
scale = as.numeric(df$`Parameter 2 Value`)
)
dat = data.frame("Time" = x, "Probability" = y)
ggplot(data = dat) + geom_line(aes(x = Time, y = Probability),
size = 1,
col = "blue") + theme_bw()
}
}, res = 128)
})
######END OF WIZARD#######
###### START OF SIMULATION TOOL##########
####Template upload and checks ####
output$contents1 <- renderTable({
if (input$disp1 == TRUE) {
if (input$w_temp == 0) {
req(input$file1)
df <- read.csv(input$file1$datapath,
header = TRUE,
sep = ",")
rownames(df) <- df[, 1]
df <- df[, -1]
colnames(df)[1:(which(colnames(df) == "serv_dist") - 1)] <-
rownames(df)
df
} else{
var()
}
}
}, rownames = TRUE, caption = "Variable Inputs",
caption.placement = getOption("xtable.caption.placement", "top"),
caption.width = getOption("xtable.caption.width", NULL))
output$contents2 <- renderTable({
if (input$disp2 == TRUE) {
if (input$w_temp == 0) {
req(input$file2)
df <- read.csv(input$file2$datapath,
header = TRUE,
sep = ",")
df
} else{
cal()
}
}
}, caption = "Calendar Inputs",
caption.placement = getOption("xtable.caption.placement", "top"),
caption.width = getOption("xtable.caption.width", NULL))
issues <- eventReactive(input$go_viz, {
req(input$file1)
req(input$file2)
df <- read.csv(input$file1$datapath,
header = TRUE,
sep = ",")
rownames(df) <- df[, 1]
df <- df[, -1]
colnames(df)[1:(which(colnames(df) == "serv_dist") - 1)] <-
rownames(df)
df2 <- read.csv(input$file2$datapath,
header = TRUE,
sep = ",")
issues <- c()
var <- df
cal <- df2
x <- rownames(var[which(!is.na(var[, 1])), ])
x <- unique(x)
#rownames(x)<-1:nrow(x)
x <- trimws(x = x, which = "both")
x <- gsub(x = x, pattern = " ", "_")
sp <- x[which(x != "")]
x <- rownames(var[which(is.na(var[, 1])), ])
x <- unique(x)
#rownames(x)<-1:nrow(x)
x <- trimws(x = x, which = "both")
x <- gsub(x = x, pattern = " ", "_")
exit <- x[which(x != "")]
node_number <- length(sp)
exit_number <- length(exit)
node_names <- sp
exit_names <- exit
all_names <- c(node_names, exit_names)
### Testing if the names match between templates###
cal_names <- unique(cal$node)
if (length(node_names) != length(cal_names) |
any(!(node_names %in% cal_names))) {
issues <-
c(issues,
c(
paste0("Network & Cal input"),
"All",
"Service point names do not match between templates"
))
}
### Testing if the transition matrix has rowsums of 1###
f <- var[1:node_number, 1:length(all_names)]
indx <- sapply(f, is.factor)
f[indx] <-
lapply(f[indx], function(x)
as.numeric(as.character(x)))
transition <- as.data.frame(f)
if (sum(transition < 0) > 0 | sum(transition > 1) > 0) {
issues <-
c(issues, c(
paste0("Network Input"),
"All",
paste(
"Transition matrix contains value outside required range (replace with value between 0 and 1)",
sep = ""
)
))
}
rs <- rowSums(transition)
for (i in 1:node_number) {
x <- rs[i]
if (is.na(x)) {
issues <-
c(issues, c(
paste0("Network Input"),
node_names[i],
paste(
"Transition row contains NA (replace with 0 or value)",
sep = ""
)
))
} else if (!isTRUE(near(x, 1))) {
issues <-
c(issues, c(
paste0("Network Input"),
node_names[i],
paste("Row sum does not equal 1 (Currently:", x, ")", sep = "")
))
}
}
### Testing if the distribution parameter inputs are correct ###
f <-
var[1:node_number, (length(all_names) + 1):(length(all_names) + 2)]
indx <- sapply(f, is.factor)
f[indx] <- lapply(f[indx], function(x)
as.character(x))
serv_dist_param <- as.data.frame(f)
for (i in 1:node_number) {
x <- serv_dist_param[i, 1]
if (is.na(x)) {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Missing a service distribution"
)
}
}
### Testing if the distribution parameter inputs are correct ###
f <-
var[1:node_number, (length(all_names) + 1):(length(all_names) + 2)]
indx <- sapply(f, is.factor)
f[indx] <- lapply(f[indx], function(x)
as.character(x))
serv_dist_param <- as.data.frame(f)
for (i in 1:node_number) {
if (serv_dist_param[i, 1] == "exp") {
x <- serv_dist_param[i, 2]
if (is.na(x)) {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Missing a service distribution parameter"
)
}
if ((!is.na(x)) & x <= 0) {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Service distribution parameter is not greater than 0"
)
}
} else{
x <- serv_dist_param[i, 2]
x <- strsplit(x, ";")[[1]]
if ("NA" %in% x) {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Missing a service distribution parameter"
)
}
if ((!("NA" %in% x)) &
any(x <= 0) & serv_dist_param[i, 1] == "unif") {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Service distribution parameters are not greater than 0"
)
}
if ((!("NA" %in% x)) &
any(x <= 0) & serv_dist_param[i, 1] == "gamma") {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Service distribution parameters are not greater than 0"
)
}
if ((!("NA" %in% x)) &
any(x <= 0) & serv_dist_param[i, 1] == "weibull") {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Service distribution parameters are not greater than 0"
)
}
# if((!("NA" %in% x))&x[2]<0&serv_dist_param[i,1]=="lnorm"){
#
# issues<-c(issues,paste0("Network Input"),node_names[i],"lnorm service parameter (sdlog) is less than 0")
#
# }
}
}
#### Testing if the Queue inputs are correct #
iq <- var$int_queue
eq <- var$ext_queue
for (i in 1:node_number) {
x <- iq[i]
if (x == Inf) {
x = 9999
}
if (is.na(x)) {
issues <-
c(issues, c(
paste0("Network Input"),
node_names[i],
paste("Need to enter Internal Queue Value")
))
}
if (as.numeric(x) %% 1 != 0) {
issues <-
c(issues, c(
paste0("Network Input"),
node_names[i],
paste("Need to enter an integer Internal Queue Value")
))
}
if (as.numeric(x) < 0) {
issues <-
c(issues, c(
paste0("Network Input"),
node_names[i],
paste("Need to enter a positive Internal Queue Value")
))
}
}
for (i in 1:node_number) {
x <- eq[i]
if (x == Inf) {
x = 9999
}
if (is.na(x)) {
issues <-
c(issues, c(
paste0("Network Input"),
node_names[i],
paste("Need to enter External Queue Value")
))
}
if (as.numeric(x) %% 1 != 0) {
issues <-
c(issues, c(
paste0("Network Input"),
node_names[i],
paste("Need to enter an integer External Queue Value")
))
}
if (as.numeric(x) < 0) {
issues <-
c(issues, c(
paste0("Network Input"),
node_names[i],
paste("Need to enter a positive External Queue Value")
))
}
}
### Testing if the delay distribution inputs are correct ###
f <-
var[1:node_number, (length(all_names) + 5):((2 * length(all_names)) + 4)]
indx <- sapply(f, is.factor)
f[indx] <- lapply(f[indx], function(x)
as.character(x))
delay_dist <- as.data.frame(f)
f <- var[1:node_number, (2 * length(all_names) + 5):ncol(var)]
indx <- sapply(f, is.factor)
f[indx] <- lapply(f[indx], function(x)
as.character(x))
delay_param <- as.data.frame(f)
for (j in 1:length(all_names)) {
for (i in 1:node_number) {
if (!is.na(delay_param[i, j])) {
x <- delay_dist[i, j]
if (is.na(x)) {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Missing a delay distribution "
)
}
}
}
}
### Testing if the delay parameter inputs are correct ###
f <-
var[1:node_number, (length(all_names) + 5):((2 * length(all_names)) + 4)]
indx <- sapply(f, is.factor)
f[indx] <- lapply(f[indx], function(x)
as.character(x))
delay_dist <- as.data.frame(f)
f <- var[1:node_number, (2 * length(all_names) + 5):ncol(var)]
indx <- sapply(f, is.factor)
f[indx] <- lapply(f[indx], function(x)
as.character(x))
delay_param <- as.data.frame(f)
for (j in 1:length(all_names)) {
for (i in 1:node_number) {
if (!is.na(delay_dist[i, j])) {
if (delay_dist[i, j] == "exp") {
x <- delay_param[i, j]
if (is.na(x)) {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Missing a delay distribution parameter"
)
}
if ((!is.na(x)) & x <= 0) {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Delay parameter is not greater than 0"
)
}
} else{
x <- delay_param[i, j]
x <- strsplit(x, ";")[[1]]
if ("NA" %in% x) {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Missing a delay distribution parameter"
)
}
if ((!("NA" %in% x)) & any(x <= 0) &
delay_dist[i, j] == "unif") {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Delay distribution parameters are not greater than 0"
)
}
if ((!("NA" %in% x)) &
any(x <= 0) & delay_dist[i, j] == "gamma") {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Delay distribution parameters are not greater than 0"
)
}
if ((!("NA" %in% x)) &
any(x <= 0) & delay_dist[i, j] == "weibull") {
issues <-
c(
issues,
paste0("Network Input"),
node_names[i],
"Delay distribution parameters are not greater than 0"
)
}
# if((!("NA" %in% x))&x[2]<0&delay_dist[i,j]=="lnorm"){
#
# issues<-c(issues,paste0("Network Input"),node_names[i],"lnorm service parameter (sdlog) is less than 0")
#
# }
}
}
}
}
### Testing if there is at least 1 row of capacity and ext_arrival rate for each service point###
row_test <- as.data.frame(cal[, 1:2])
for (j in c("cap", "ext_arr")) {
for (i in 1:node_number) {
x <- row_test[which(row_test[, 1] == j), ]
x <- x[which(x[, 2] == node_names[i]), ]
if (nrow(x) == 0) {
issues <-
c(issues,
"Calendar",
node_names[i],
paste0("Missing ", j, " input rows"))
}
}
}
### Testing that every line in the calendar template has a value entry###
value_test <- as.data.frame(cal)
for (j in c("cap", "ext_arr")) {
for (i in 1:node_number) {
x <- value_test[which(value_test[, 1] == j), ]
x <- x[which(x[, 2] == node_names[i]), 5]
if (length(x) > 0) {
if (any(is.na(x))) {
issues <-
c(
issues,
"Calendar",
node_names[i],
paste0("Missing ", j, " value entry in calendar")
)
}
if (!any(is.na(x))) {
if (any(x < 0)) {
issues <-
c(
issues,
"Calendar",
node_names[i],
paste0("Negative ", j, " value entry in calendar")
)
}
}
if (!any(is.na(x))) {
if (j == "cap" & all(x == 0)) {
issues <-
c(
issues,
"Calendar",
node_names[i],
paste0("All zero ", j, " values entered in calendar")
)
}
}
}
}
}
### Testing that nodes that have 2+ lines in the calendar have any values in the start and end columns ###
value_test <- as.data.frame(cal)
for (j in c("cap", "ext_arr")) {
for (i in 1:node_number) {
x <- value_test[which(value_test[, 1] == j), ]
x <- x[which(x[, 2] == node_names[i]), ]
if (nrow(x) > 1) {
start <- x[, 3]
end <- x[, 4]
if (any(is.na(start))) {
issues <-
c(
issues,
"Calendar",
node_names[i],
paste0("Missing start value(s) in ", j, " calendar")
)
}
if (any(is.na(end))) {
issues <-
c(
issues,
"Calendar",
node_names[i],
paste0("Missing end value(s) in ", j, " calendar")
)
}
}
}
}
### Testing that nodes that have a zero in the first start line in the calendar ###
value_test <- as.data.frame(cal)
for (j in c("cap", "ext_arr")) {
for (i in 1:node_number) {
x <- value_test[which(value_test[, 1] == j), ]
x <- x[which(x[, 2] == node_names[i]), ]
if (nrow(x) != 0) {
if (!is.na(x[1, 3])) {
start <- x[1, 3]
if (start != 0) {
issues <-
c(
issues,
"Calendar",
node_names[i],
paste0("Non-Zero Initial Start Time in ", j, " calendar")
)
}
}
if (is.na(x[1, 3])) {
issues <-
c(
issues,
"Calendar",
node_names[i],
paste0("Non-Zero Initial Start Time in ", j, " calendar")
)
}
}
}
}
### Testing that nodes that have 2+ lines in the calendar have matching values in the start and end columns ###
value_test <- as.data.frame(cal)
for (j in c("cap", "ext_arr")) {
for (i in 1:node_number) {
x <- value_test[which(value_test[, 1] == j), ]
x <- x[which(x[, 2] == node_names[i]), ]
if (nrow(x) > 1) {
start <- x[, 3]
end <- x[, 4]
start_tail <- tail(start, -1)
end_head <- head(end, -1)
start_tail[is.na(start_tail)] <- 0
end_head[is.na(end_head)] <- 0
if (any(!(start_tail == end_head))) {
issues <-
c(
issues,
"Calendar",
node_names[i],
paste0(
"Start & End values don't match up sequentially in ",
j,
" calendar"
)
)
}
}
}
}
### Testing that nodes that have ascending start and end values ###
value_test <- as.data.frame(cal)
for (j in c("cap", "ext_arr")) {
for (i in 1:node_number) {
x <- value_test[which(value_test[, 1] == j), ]
x <- x[which(x[, 2] == node_names[i]), ]
if (nrow(x) > 1) {
start <- x[, 3]
end <- x[, 4]
if (!any(is.na(start))) {
if (any(diff(start) <= 0)) {
issues <-
c(
issues,
"Calendar",
node_names[i],
paste0(
"Start values don't increase sequentially in ",
j,
" calendar"
)
)
}
}
if (!any(is.na(end))) {
if (any(diff(end) <= 0)) {
issues <-
c(
issues,
"Calendar",
node_names[i],
paste0(
"End values don't increase sequentially in ",
j,
" calendar"
)
)
}
}
}
}
}
### Testing that there are arrivals to at least one node ###
value_test <- as.data.frame(cal)
x <- value_test[which(value_test[, 1] == "ext_arr"), 5]
if (!any(is.na(x))) {
if (all(x == 0)) {
issues <-
c(
issues,
"Calendar",
"All",
paste0(
"No Arrival rates to any service point in the ext_arr calendar"
)
)
}
}
if (length(issues) == 0) {
issues <- c("Complete", "Complete", "Complete")
}
issues <- matrix(data = issues,
ncol = 3,
byrow = T)
colnames(issues) <- c("Location", "Service Point", "Issue")
issues
})
output$file_check_issues <- renderTable({
issues <- issues()
issues
}, striped = T, bordered = T, align = "c", caption = "Issues Log",
caption.placement = getOption("xtable.caption.placement", "top"),
caption.width = getOption("xtable.caption.width", NULL))
#### NETWORK VISUALISATION ####
viz <- eventReactive(input$go_viz, {
if (input$w_temp == 0) {
req(input$file1)
req(input$file2)
var_input <-
read.csv(input$file1$datapath,
header = TRUE,
sep = ",")
rownames(var_input) <- var_input[, 1]
var_input <- var_input[, -1]
cal_input <-
read.csv(input$file2$datapath,
header = TRUE,
sep = ",")
issues <- issues()
req(issues[1, 1] == "Complete")
} else{
var_input <- var()
var_input <- as.data.frame(var_input)
f <- var_input
indx <- sapply(f, is.factor)
f[indx] <- lapply(f[indx], function(x)
as.character(x))
var_input <- as.data.frame(f)
f <- var_input[, 1:nrow(var_input)]
indx <- 1:nrow(var_input)
f[, indx] <- lapply(f[indx], function(x)
as.numeric(x))
var_input[, 1:nrow(var_input)] <- f
var_input$ext_queue <- as.numeric(var_input$ext_queue)
var_input$int_queue <- as.numeric(var_input$int_queue)
cal_input <- cal()
cal_input <- as.data.frame(cal_input)
cal_input$metric <- as.character(cal_input$metric)
cal_input$node <- as.character(cal_input$node)
cal_input$start <- as.numeric(as.character(cal_input$start))
cal_input$end <- as.numeric(as.character(cal_input$end))
cal_input$value <- as.numeric(as.character(cal_input$value))
}
nodes <-
rownames(var_input[which(rowSums(var_input[, 1:which(colnames(var_input) ==
"serv_dist") - 1], na.rm = T) != 0), ])
exits <-
rownames(var_input[which(rowSums(var_input[, 1:which(colnames(var_input) ==
"serv_dist") - 1], na.rm = T) == 0), ])
#ext_arr<-rownames(var_input[which(var_input$ext_arr>0),])
ext_arr <-
unique(cal_input$node[which(cal_input$metric == "ext_arr" &
cal_input$value > 0)])
delay_dist <-
var_input[, (nrow(var_input) + 5):(nrow(var_input) + nrow(var_input) + 4)] ## Import the template in csv
rownames(delay_dist) <- rownames(var_input)
colnames(delay_dist) <- rownames(var_input)
delay_dist[which(delay_dist == "", arr.ind = T)] <- NA
delay_param <-
var_input[, (nrow(var_input) + nrow(var_input) + 5):(ncol(var_input))] ## Import the template in csv
rownames(delay_param) <- rownames(var_input)
colnames(delay_param)[1:nrow(delay_param)] <- rownames(var_input)
delay_param[which(delay_param == "", arr.ind = T)] <- NA
from <- c(0)
to <- c(0)
for (i in 1:nrow(delay_dist)) {
for (j in 1:nrow(delay_dist)) {
if (!is.na(delay_dist[i, j])) {
from <- c(from, i)
to <- c(to, j)
}
}
}
delay_list <- cbind(from, to)
tmp <- rownames(delay_dist)
delay_exits <-
tmp[c(delay_list[, 2])][!tmp[c(delay_list[, 2])] %in% nodes]
var_input$serv_dist[which(rownames(var_input) %in% exits[!(exits %in% delay_exits)])] <-
NA
var_input$serv_dist_param[which(rownames(var_input) %in% exits[!(exits %in% delay_exits)])] <-
NA
cap_min <- vector()
for (i in nodes) {
cap_min <-
c(cap_min, min(cal_input$value[which(cal_input$node == i &
cal_input$metric == "cap")]))
}
cap_max <- vector()
for (i in nodes) {
cap_max <-
c(cap_max, max(cal_input$value[which(cal_input$node == i &
cal_input$metric == "cap")]))
}
cal_tooltip <- vector()
for (i in nodes) {
tmp <- cal_input[which(cal_input$node == i &
cal_input$metric == "cap"), ]
tmp2 <- vector()
for (j in 1:nrow(tmp)) {
tmp3 <-
paste("\n",
"Start:",
tmp[j, 3],
"End:",
tmp[j, 4],
"Capacity:",
tmp[j, 5],
"//")
tmp2 <- c(tmp2, tmp3)
}
tmp2 <- paste(tmp2, collapse = "")
tmp2 <- paste("Capacity Calendar //", tmp2)
cal_tooltip <- c(cal_tooltip, tmp2)
}
# Create a node data frame (ndf)
ndf1 <- create_node_df(
n = length(nodes),
type = "lower",
label = c(nodes),
fillcolor = "deepskyblue1",
color = "black",
fontcolor = "black",
shape = "square",
tooltip = cal_tooltip
)
ndf2 <- create_node_df(
n = length(exits),
type = "lower",
label = c(exits),
fillcolor = "green",
color = "black",
fontcolor = "black",
shape = "diamond",
tooltip = "Exit"
)
ndf3 <- create_node_df(
n = length(ext_arr),
type = "lower",
label = as.numeric(c(length(c(
nodes, exits
)) + 1):(length(c(
nodes, exits
)) + length(ext_arr))) ,
fillcolor = "white",
fontcolor = "white",
shape = "square",
color = "white"
)
ndf <- combine_ndfs(ndf1, ndf2, ndf3)
# Create an edge data frame (edf)
f <- vector()
t <- vector()
l <- vector()
edge_col <- vector()
edge_tip <- vector()
for (i in 1:length(nodes)) {
for (j in 1:length(c(nodes, exits))) {
if (var_input[i, j] > 0) {
f <- c(f, i)
t <- c(t, j)
if (!is.na(delay_dist[i, j])) {
l <- c(l, paste0(round(var_input[i, j] * 100, digits = 2), "%"))
edge_col <- c(edge_col, "sienna2")
if (delay_dist[i, j] == "exp") {
pars <-
as.numeric(unlist(strsplit(
x = as.character(delay_param[i, j]), split = ";"
)))
delay_mean <- 1 / pars[1]
} else if (delay_dist[i, j] == "unif") {
pars <-
as.numeric(unlist(strsplit(
x = as.character(delay_param[i, j]), split = ";"
)))
delay_mean <- (pars[1] + pars[2]) / 2
} else if (delay_dist[i, j] == "lnorm") {
pars <-
as.numeric(unlist(strsplit(
x = as.character(delay_param[i, j]), split = ";"
)))
delay_mean <- exp(pars[1] + 0.5 * (pars[2]) ^ 2)
} else if (delay_dist[i, j] == "weibull") {
pars <-
as.numeric(unlist(strsplit(
x = as.character(delay_param[i, j]), split = ";"
)))
delay_mean <- pars[2] * (gamma(1 + (1 / pars[1])))
} else if (delay_dist[i, j] == "gamma") {
pars <-
as.numeric(unlist(strsplit(
x = as.character(delay_param[i, j]), split = ";"
)))
delay_mean <- pars[1] / pars[2]
} else{
pars <-
as.numeric(unlist(strsplit(
x = as.character(delay_param[i, j]), split = ";"
)))
tmp2 <-
do.call(get(paste0("r", delay_dist[i, j])), as.list(c(10 ^ 7, pars))) #Creates a service time
delay_mean <- mean(tmp2)
}
edge_tip <-
c(
edge_tip,
paste0(
"Mean Delay: ",
delay_mean,
" (Delay Dist: ",
delay_dist[i, j],
")"
)
)
}
else{
l <- c(l, paste0(round(var_input[i, j] * 100, digits = 2), "%"))
edge_col <- c(edge_col, "black")
edge_tip <- c(edge_tip, paste0("No Delay"))
}
}
}
}
edf1 <- create_edge_df(
from = f,
to = t,
#rel = c("leading_to"),
label = l,
color = edge_col,
fontcolor = edge_col,
tooltip = edge_tip
)
edf2 <-
create_edge_df(
from = c(length(c(nodes, exits)) + 1):(length(c(nodes, exits)) + length(ext_arr)),
to = as.numeric(which(rownames(var_input) %in% ext_arr)),
#rel = c("leading_to"),
label = as.character("Arrivals"),
color = "red",
fontcolor = "red",
tooltip = "Arrival"
)
edf <- combine_edfs(edf1, edf2)
#Create a list of average LOS
LOS <- vector()
for (i in nodes) {
arr.dist <- var_input$serv_dist[which(rownames(var_input) == i)]
pars <-
as.numeric(unlist(strsplit(
as.character(var_input$serv_dist_param[which(rownames(var_input) == i)]), ";"
)))
if (arr.dist == "exp") {
tmp3 <- 1 / pars
LOS <- c(LOS, tmp3)
} else if (arr.dist == "unif") {
tmp3 <- (pars[1] + pars[2]) / 2
LOS <- c(LOS, tmp3)
} else if (arr.dist == "lnorm") {
tmp3 <- exp(pars[1] + 0.5 * (pars[2]) ^ 2)
LOS <- c(LOS, tmp3)
} else if (arr.dist == "weibull") {
tmp3 <- pars[2] * (gamma(1 + (1 / pars[1])))
LOS <- c(LOS, tmp3)
} else if (arr.dist == "gamma") {
tmp3 <- pars[1] / pars[2]
LOS <- c(LOS, tmp3)
} else{
tmp2 <-
do.call(get(paste0("r", arr.dist)), as.list(c(10 ^ 7, pars))) #Creates a service time
tmp3 <- mean(tmp2)
LOS <- c(LOS, tmp3)
}
}
LOS <- round(LOS, digits = 2)
TAC <- vector()
for (i in nodes) {
tmp <- cal_input[which(cal_input$node == i &
cal_input$metric == "cap"), ]
if (nrow(tmp) == 1) {
TAC <- c(TAC, tmp$value)
}
if (nrow(tmp) > 1) {
tmp2 <- sum(tmp$value * (tmp$end - tmp$start)) / max(tmp$end)
TAC <- c(TAC, tmp2)
}
}
TAC <- ceiling(TAC)
node_labels <- vector()
for (i in 1:length(nodes)) {
tmp1 <-
paste0(
nodes[i],
"\n",
" LOS: ",
LOS[i],
"\n",
"Av Cap: ",
TAC[i],
"\n",
"IQC: ",
var_input$int_queue[i],
"\n",
"EQC: ",
var_input$ext_queue[i]
)
node_labels <- c(node_labels, tmp1)
}
if (input$disp3 == TRUE) {
ndf$label[1:length(nodes)] <- node_labels
}
# Create a graph with the ndf and edf
graph <-
create_graph(nodes_df = ndf,
edges_df = edf)
graph$global_attrs[1, "value"] <- "dot"
graph$global_attrs[4, "value"] <- 20
graph$global_attrs[6, "value"] <- "false"
graph$global_attrs[14, "value"] <- 20
graph$global_attrs[17, "value"] <- 1
graph$global_attrs <-
rbind(graph$global_attrs, c("rankdir", "LR", "graph"))
graph$global_attrs <-
rbind(graph$global_attrs, c("splines", "true", "graph"))
showTab(inputId = "navbar", target = "2. Simulation Setup & Run")
output$next_button <- renderUI({
column(6, align = "center", actionButton(inputId = "j2PSR2", label = c(tagList(
"Next", icon("arrow-right")
))))
})
render_graph(graph)
})
output$network <- renderGrViz({
viz()
})
checklist_viz <- eventReactive(input$checklist, {
viz()
})
output$cl_viz <- renderGrViz({
checklist_viz()
})
checklist_table <- eventReactive(input$checklist, {
#req(input$reps)
req(input$st)
if (input$run_type == "Full Simulation") {
req(input$wu)
warm_up <- input$wu
}
if (input$run_type == "Trial Simulation") {
warm_up <- 0
}
if (input$w_temp == 0) {
req(input$file1)
df <- read.csv(input$file1$datapath,
header = TRUE,
sep = ",")
rownames(df) <- df[, 1]
df <- df[, -1]
colnames(df)[1:(which(colnames(df) == "serv_dist") - 1)] <-
rownames(df)
} else{
var_input <- var()
var_input <- as.data.frame(var_input)
f <- var_input
indx <- sapply(f, is.factor)
f[indx] <- lapply(f[indx], function(x)
as.character(x))
var_input <- as.data.frame(f)
f <- var_input[, 1:nrow(var_input)]
indx <- 1:nrow(var_input)
f[, indx] <- lapply(f[indx], function(x)
as.numeric(x))
var_input[, 1:nrow(var_input)] <- f
var_input$ext_queue <- as.numeric(var_input$ext_queue)
var_input$int_queue <- as.numeric(var_input$int_queue)
df <- var_input
}
nodes <-
length(rownames(df[which(rowSums(df[, 1:which(colnames(df) == "serv_dist") -
1], na.rm = T) != 0), ]))
#exits<-length(rownames(df[which(rowSums(df[,1:which(colnames(df)=="serv_dist")-1],na.rm = T)==0),]))
#delay_exits<-length(rownames(df[which(rowSums(df[,1:which(colnames(df)=="serv_dist")-1],na.rm = T)==0&as.character(df$serv_dist_param)!=""),]))
x <-
matrix(
data = c(
"Simulation Replications",
"Warm-up Period",
"Simulation Period",
"Total Simulation length",
"Number of Service points",
ifelse(
input$run_type == c("Full Simulation"),
ceiling(input$reps),
"NA"
),
warm_up,
input$st,
warm_up + input$st,
nodes
),
ncol = 2
)
colnames(x) <- c("Metric", "Value")
x
})
output$checklist_table_render <-
renderTable({
checklist_table()
}, caption = "Checklist",
caption.placement = getOption("xtable.caption.placement", "top"),
caption.width = getOption("xtable.caption.width", NULL))
observeEvent(input$sim, {
#req(input$file1)
#req(input$file2)
if (input$run_type == "Full Simulation") {
req(input$wu)
}
req(input$st)
req(input$st > 0)
if (input$run_type == c("Full Simulation")) {
req(input$reps > 0)
}
shinyalert(
title = paste0("Simulation Running \n (Started at : ", format(Sys.time()), ")"),
text = "The simulation is now running. If there is an error, a new message box will appear with advice.",
closeOnEsc = FALSE,
closeOnClickOutside = FALSE,
html = FALSE,
type = "info",
showConfirmButton = FALSE,
showCancelButton = FALSE,
confirmButtonText = "OK",
confirmButtonCol = "#87D9FF",
timer = 0,
imageUrl = "",
animation = TRUE
)
}, priority = 2)
#### SIMULATION ####
#sim_out is the object that will contain all the outputs from the simulation and is therefore important in rendering all of the outputs
# The tryCatch is a error capture system that results in a pop-up for the user if there are any errors within the system. The exact pop-up can be found at the bottom of the simulation section.
sim_out <- eventReactive(input$sim, {
tryCatch({
### Inputs and Initilisation ##################################################################
#req(input$file1)
#req(input$file2)
#req(checklist_table())
req(input$st > 0)
#if(input$run_type==c("Trial Simulation")){updateRadioButtons(session = session,inputId = "run_type",label = "Select Mode",choices = c("Trial Simulation","Full Simulation"),selected = "Full Simulation")}
if (input$run_type == c("Full Simulation")) {
req(input$reps > 0)
}
if (input$run_type == c("Trial Simulation")) {
reps <- 10
}
if (input$run_type == c("Full Simulation")) {
reps <- ceiling(input$reps)
}
if (input$run_type == c("Trial Simulation")) {
warm_up <- 0
}
if (input$run_type == c("Full Simulation")) {
warm_up <- input$wu
}
if (input$run_type == c("Trial Simulation")) {
#restrict to 2 (or 1) cores
#cl <- makeCluster(min(c(2, max(
# detectCores() - 1, 1
#))))
#set to use of n-1 cores
cl <- makeCluster(min(max(reps - 1, 1), detectCores() - 1))
}
if (input$run_type == c("Full Simulation")) {
cl <- makeCluster(min(max(reps - 1, 1), detectCores() - 1))
}
#ceiling(detectCores()/2)
hideTab(inputId = "navbar", target = "3. Simulation Outputs")
showTab(inputId = "navbar", target = "3. Simulation Outputs")
hideTab(inputId = "navbar", target = "4. Download Outputs")
hideTab(inputId = "3. Simulation Outputs", target = "Output Interpretation")
# hideTab(inputId = "Simulation Outputs",target = "Service Point Statistics")
# hideTab(inputId = "Simulation Outputs",target = "Pathway Statistics")
# hideTab(inputId = "Simulation Outputs",target = "Patient Occupancy Summary")
# hideTab(inputId = "Simulation Outputs",target = "Bed Occupancy Summary")
# hideTab(inputId = "Simulation Outputs",target = "Capacity Driven Delay Summary")
# hideTab(inputId = "Simulation Outputs",target = "Transition Delay Summary")
# hideTab(inputId = "Simulation Outputs",target = "Queueing Summary")
if (input$run_type == c("Full Simulation")) {
showTab(inputId = "navbar", target = "4. Download Outputs")
}
output$next_button2 <- renderUI({
column(6, align = "center", actionButton(inputId = "j2PSR3", label = c(tagList(
"Next", icon("arrow-right")
))))
})
library(shiny)
library(DiagrammeR)
library(magrittr)
library(readr)
library(DT)
library(openxlsx)
library(grid)
library(gridExtra)
#library(plotly)
library(parallel)
library(data.table)
library(tidyverse)
ptm <- proc.time()
##### Simulation Inputs ##############################################################
if (input$w_temp == 0) {
req(input$file1)
req(input$file2)
var_input <-
read.csv(input$file1$datapath,
header = TRUE,
sep = ",")
syst_names <-
cbind(as.numeric(c(1:nrow(var_input))), as.character(var_input[, 1]))
syst_names_single <- syst_names[, 2]
var_input <- var_input[, -1]
rownames(var_input) <- 1:nrow(var_input)
colnames(var_input)[1:nrow(var_input)] <- c(1:nrow(var_input))
cal_input <-
read.csv(input$file2$datapath,
header = TRUE,
sep = ",")
cal_input$node <- as.character(cal_input$node)
} else{
var_input <- var()
var_input <- as.data.frame(var_input)
f <- var_input
indx <- sapply(f, is.factor)
f[indx] <- lapply(f[indx], function(x)
as.character(x))
var_input <- as.data.frame(f)
f <- var_input[, 1:nrow(var_input)]
indx <- 1:nrow(var_input)
f[, indx] <- lapply(f[indx], function(x)
as.numeric(x))
var_input[, 1:nrow(var_input)] <- f
var_input$ext_queue <- as.numeric(var_input$ext_queue)
var_input$int_queue <- as.numeric(var_input$int_queue)
syst_names <-
cbind(as.numeric(c(1:nrow(var_input))), as.character(rownames(var_input)))
syst_names_single <- syst_names[, 2]
rownames(var_input) <- 1:nrow(var_input)
colnames(var_input)[1:nrow(var_input)] <- c(1:nrow(var_input))
cal_input <- cal()
cal_input <- as.data.frame(cal_input)
cal_input$metric <- as.character(cal_input$metric)
cal_input$node <- as.character(cal_input$node)
cal_input$start <- as.numeric(as.character(cal_input$start))
cal_input$end <- as.numeric(as.character(cal_input$end))
cal_input$value <- as.numeric(as.character(cal_input$value))
}
# var_input<-read.csv(input$file1$datapath,header = TRUE,sep = ",") ## Import the template in csv
#
# syst_names<-cbind(as.numeric(c(1:nrow(var_input))),as.character(var_input[,1]))
# syst_names_single<-syst_names[,2]
#
# var_input<-var_input[,-1]
# rownames(var_input)<-1:nrow(var_input)
# colnames(var_input)[1:nrow(var_input)]<-c(1:nrow(var_input))
nodes <-
as.numeric(rownames(var_input[which(rowSums(var_input[, 1:which(colnames(var_input) ==
"serv_dist") - 1], na.rm = T) != 0), ])) ##create a list of the service nodes
node_names <- syst_names[nodes, ]
node_names <- rbind(node_names, c(NA, NA))
rownames(node_names) <- c()
delay_dist <-
var_input[, (nrow(var_input) + 5):(nrow(var_input) + nrow(var_input) + 4)] ## Import the template in csv
rownames(delay_dist) <- 1:nrow(delay_dist)
colnames(delay_dist)[1:nrow(delay_dist)] <- c(1:nrow(delay_dist))
delay_dist[which(delay_dist == "", arr.ind = T)] <- NA
delay_param <-
var_input[, (nrow(var_input) + nrow(var_input) + 5):(ncol(var_input))] ## Import the template in csv
rownames(delay_param) <- 1:nrow(delay_param)
colnames(delay_param)[1:nrow(delay_param)] <-
c(1:nrow(delay_param))
delay_param[which(delay_param == "", arr.ind = T)] <- NA
rep_bed <- list()
from <- c(0)
to <- c(0)
for (i in 1:nrow(delay_dist)) {
for (j in 1:nrow(delay_dist)) {
if (!is.na(delay_dist[i, j])) {
from <- c(from, i)
to <- c(to, j)
}
}
}
delay_list <- cbind(from, to)
#
# cal_input<-read.csv(input$file2$datapath,header = TRUE,sep = ",") ## Import the template in csv
# cal_input$node<-as.character(cal_input$node)
#
if (!is.null(nrow(node_names))) {
for (i in 1:nrow(node_names)) {
cal_input$node[as.character(cal_input$node) == node_names[i, 2]] <-
as.numeric(i)
}
}
if (is.null(nrow(node_names))) {
cal_input$node[as.character(cal_input$node) == node_names[2]] <- 1
}
cap_cal_input <- cal_input[which(cal_input$metric == "cap"), ]
cap_cal_input <- as.data.frame(cap_cal_input)
arr_cal_input <- cal_input[which(cal_input$metric == "ext_arr"), ]
arr_cal_input <- as.data.frame(arr_cal_input)
cap_cal_input_original <- cap_cal_input
arr_cal_input_original <- arr_cal_input
### Shifting Calendars so that the start of the sim_time is the equivalent of 0 on the calendar
if (warm_up != 0) {
cap_cal_input_new <- cap_cal_input[0, ]
for (cc in nodes) {
cap_cal_shift <- cap_cal_input[which(cap_cal_input$node == cc), ]
if (nrow(cap_cal_shift) > 1) {
cap_cal_max = max(cap_cal_shift$end)
warm_up_modulo = warm_up %% cap_cal_max
if (warm_up_modulo != 0) {
cap_cal_shift$start <- cap_cal_shift$start + warm_up_modulo
cap_cal_shift$end <- cap_cal_shift$end + warm_up_modulo
cap_cal_stable <-
cap_cal_shift[1:min(which(cap_cal_shift$end >= cap_cal_max)), ]
cap_cal_stable$end[nrow(cap_cal_stable)] <- cap_cal_max
cap_cal_switch <-
cap_cal_shift[min(which(cap_cal_shift$end > cap_cal_max)):nrow(cap_cal_shift), ]
cap_cal_switch$start[1] <- cap_cal_max
cap_cal_switch$start <- cap_cal_switch$start - cap_cal_max
cap_cal_switch$end <- cap_cal_switch$end - cap_cal_max
cap_cal_shift <- rbind(cap_cal_stable, cap_cal_switch)
cap_cal_shift <-
cap_cal_shift[order(cap_cal_shift$start), ]
cap_cal_input_new <-
rbind(cap_cal_input_new, cap_cal_shift)
} else{
cap_cal_input_new <- rbind(cap_cal_input_new, cap_cal_shift)
}
} else{
cap_cal_input_new <- rbind(cap_cal_input_new, cap_cal_shift)
}
}
arr_cal_input_new <- arr_cal_input[0, ]
for (ac in nodes) {
arr_cal_shift <- arr_cal_input[which(arr_cal_input$node == ac), ]
if (nrow(arr_cal_shift) > 1) {
arr_cal_max = max(arr_cal_shift$end)
warm_up_modulo = warm_up %% arr_cal_max
if (warm_up_modulo != 0) {
arr_cal_shift$start <- arr_cal_shift$start + warm_up_modulo
arr_cal_shift$end <- arr_cal_shift$end + warm_up_modulo
arr_cal_stable <-
arr_cal_shift[1:min(which(arr_cal_shift$end > arr_cal_max)), ]
arr_cal_stable$end[nrow(arr_cal_stable)] <- arr_cal_max
arr_cal_switch <-
arr_cal_shift[min(which(arr_cal_shift$end > arr_cal_max)):nrow(arr_cal_shift), ]
arr_cal_switch$start[1] <- arr_cal_max
arr_cal_switch$start <- arr_cal_switch$start - arr_cal_max
arr_cal_switch$end <- arr_cal_switch$end - arr_cal_max
arr_cal_shift <- rbind(arr_cal_stable, arr_cal_switch)
arr_cal_shift <-
arr_cal_shift[order(arr_cal_shift$start), ]
arr_cal_input_new <-
rbind(arr_cal_input_new, arr_cal_shift)
} else{
arr_cal_input_new <- rbind(arr_cal_input_new, arr_cal_shift)
}
} else{
arr_cal_input_new <- rbind(arr_cal_input_new, arr_cal_shift)
}
}
cap_cal_input <- cap_cal_input_new
arr_cal_input <- arr_cal_input_new
}
# Sets the timer
record_scale <- 0.8
na_lim <- 100
rpi <- 0.1
sim_time <- input$st
t.period <- warm_up + sim_time #Sets simulation period
clusterExport(
cl = cl,
varlist = c(
"cl",
"var_input",
"syst_names",
"syst_names_single",
"nodes",
"delay_dist",
"delay_param",
"delay_list",
"cap_cal_input",
"arr_cal_input",
"cal_input",
"record_scale",
"na_lim",
"rpi",
"warm_up",
"sim_time",
"t.period",
"node_names",
"reps"
),
envir = environment()
)
clusterSetRNGStream(cl)
clusterEvalQ(
cl = cl,
c(
library(shiny),
library(DiagrammeR),
library(magrittr),
library(readr),
library(DT),
library(openxlsx),
library(grid),
library(gridExtra),
#library(plotly),
library(tidyverse)
)
)
####### SIMULATION CODE ##################################################################
outputs <- parLapply(
cl = cl,
X = 1:reps,
fun = function(j) {
#print(paste("replicate",j))
req(var_input)
req(cal_input)
time <- 0 #Sets time start
patient <- 0 #Sets initial patient label
#nodes<-as.numeric(rownames(var_input)) ##create a list of the service nodes
colnames(var_input)[1:(which(colnames(var_input) == "serv_dist") -
1)] <- c(1:(which(
colnames(var_input) == "serv_dist"
) - 1))
exits <-
as.numeric(rownames(var_input[which(rowSums(var_input[, 1:which(colnames(var_input) ==
"serv_dist") - 1], na.rm = T) == 0), ])) #Finds all the exit pathway nodes
exit_names <- syst_names[exits, ]
sch <-
matrix(NA, ncol = 6, nrow = max(3 * sum(
t.period * arr_cal_input$value, na.rm = T
)), 25)
colnames(sch) <-
c("time",
"event",
"patient",
"current_node",
"next_node",
"previous_node")
#sch<-data.frame(time=numeric(0),event=character(0),patient=numeric(0),current_node=numeric(0),next_node=numeric(0),previous_node=numeric(0)) #Creates a data frame for the event schedule
#record<-matrix(NA,ncol=6,nrow=23000)
#colnames(record)<-c("time","event","patient","current_node","next_node","previous_node")
#record<-data.frame(time=numeric(0),event=character(0),patient=numeric(0),current_node=numeric(0),next_node=numeric(0),previous_node=numeric(0)) #Creates a data frame to collect all records
blocked_mat <-
matrix(NA,
ncol = 6,
nrow = sum(cap_cal_input$value, na.rm = T))
colnames(blocked_mat) <-
c("time",
"event",
"patient",
"current_node",
"next_node",
"previous_node")
#blocked_mat<-data.frame(time=numeric(0),event=character(0),patient=numeric(0),current_node=numeric(0),next_node=numeric(0),previous_node=numeric(0)) #Creates a data frame to manage blocked patients
bed <-
data.frame(
time = as.numeric(0),
bed = as.numeric(0),
node = as.numeric(0),
rep = as.numeric(0)
)
## Create syst_ variables and assign initial zero value and exits to Inf####
for (i in nodes) {
tmp <- paste("syst_", i, sep = "")
assign(tmp, 0)
}
for (i in exits) {
tmp <- paste("syst_", i, sep = "")
assign(tmp, Inf)
}
##Create n_serv_ variables and assign capacity from input for cal time 0 and set exits to Inf####
initial_cap <- rep(x = 0, times = length(nodes))
for (i in nodes) {
initial_cap[i] <-
cap_cal_input$value[which(cap_cal_input$node == i &
cap_cal_input$start == 0)]
}
for (i in 1:length(nodes)) {
tmp <- paste("n_serv_", nodes[i], sep = "")
tmp2 <- initial_cap[i]
assign(tmp, tmp2)
}
for (i in exits) {
tmp <- paste("n_serv_", i, sep = "")
assign(tmp, Inf)
}
##Creates inward & outward nodes and probabilities####
for (i in 1:length(nodes)) {
tmp <- paste("inward_nodes_", nodes[i], sep = "")
tmp2 <- rownames(var_input[var_input[, i] > 0, ])
assign(tmp, tmp2)
}
onward_nodes <-
as.numeric(colnames(var_input[, 1:(which(colnames(var_input) == "serv_dist") -
1)]))
for (i in 1:length(nodes)) {
tmp <- paste("onward_nodes_prob_", nodes[i], sep = "")
tmp2 <-
as.vector(var_input[i, 1:(which(colnames(var_input) == "serv_dist") - 1)])
assign(tmp, tmp2)
}
# Creates the service distribution and parameters lists####
serv_dist <- var_input$serv_dist
serv_dist_param <- var_input$serv_dist_param
## Creates the arrivals schedules per node and combines them to form sch ####
#sch[1,]<-c(0,1,1,nodes[1],sample(x=onward_nodes,size = 1,prob = get(paste("onward_nodes_prob_",nodes[1],sep=""))),0) #Adds a patient to the schedule to enter service node 1 at time 0
for (i in 1:length(nodes)) {
arr_cal_input_temp <-
arr_cal_input[which(arr_cal_input$node == nodes[i]), ]
if (nrow(arr_cal_input_temp) == 1) {
arr_cal_input_temp$end <- t.period
}
for (w in 1:nrow(arr_cal_input_temp)) {
cycle_max <- max(arr_cal_input_temp$end)
start <- arr_cal_input_temp$start[w]
end <- arr_cal_input_temp$end[w]
if (end > t.period) {
end <- t.period
}
time = start
while (time < t.period) {
if (arr_cal_input_temp[w, ]$value != 0) {
while (time < end) {
tmp1 <- rexp(1, rate = arr_cal_input_temp$value[w])
time <- time + tmp1
#print(time)
if (time < end) {
patient <- patient + 1
sch[match(NA, sch[, "time"]), ] <-
c(
time,
1,
patient,
nodes[i],
sample(
x = onward_nodes,
size = 1,
prob = get(
paste("onward_nodes_prob_", nodes[i], sep = "")
)
),
0
)
}
#sch<-rbind(sch,data.frame(time=time,event="arrival",patient=patient,current_node=nodes[i],next_node=sample(x=onward_nodes,size = 1,prob = get(paste("onward_nodes_prob_",nodes[i],sep = ""))),previous_node="external"))
}
}
start <- start + cycle_max
time <- start
end <- end + cycle_max
}
}
}
sch <- sch[1:(match(NA, sch[, "time"]) + 5), ]
#loss_potential<-sum(sch[,"current_node"] %in% nodes[which(var_input$ext_arr>0&var_input$ext_queue!=Inf)])
record <- matrix(NA,
ncol = 6,
nrow = round(record_scale * nrow(sch), 0))
colnames(record) <-
c("time",
"event",
"patient",
"current_node",
"next_node",
"previous_node")
# Creates the service change schedules per node and combines them to form sch ####
cap_sch <-
data.frame(
time = numeric(),
event = numeric(),
patient = numeric(),
current_node = numeric(),
next_node = numeric(),
previous_node = numeric()
)
for (i in as.numeric(cap_cal_input$node[duplicated(cap_cal_input$node)])) {
cap_cal_input_temp <- cap_cal_input[which(cap_cal_input$node == i), ]
for (l in 1:sum(cap_cal_input_temp$node == i)) {
time = cap_cal_input_temp$start[l]
while (time < t.period) {
cap_sch <- rbind(cap_sch,
c(time, 7, 0, i, cap_cal_input_temp$value[l], 0))
time <- time + max(cap_cal_input_temp$end)
}
}
}
colnames(cap_sch) <-
c("time",
"event",
"patient",
"current_node",
"next_node",
"previous_node")
cap_sch <- cap_sch[order(cap_sch$time), ]
cap_sch <- cap_sch[-which(cap_sch$time == 0), ]
#write.csv(cap_sch,"cap_sch.csv")
sch <- rbind(sch, as.matrix(cap_sch))
rownames(sch) <- c()
##Creates the external queue list and sets queue max ####
for (i in 1:length(nodes)) {
tmp <- paste("ext_queue_", nodes[i], sep = "")
q_max <- var_input$ext_queue[i]
if (q_max == Inf) {
q_max = max(nrow(sch[which(sch[, "current_node"] == nodes[i]), ]), 1)
}
mat <- matrix(NA, ncol = 6, nrow = q_max)
colnames(mat) <-
c("time",
"event",
"patient",
"current_node",
"next_node",
"previous_node")
assign(tmp, mat)
tmp <- paste("ext_queue_max_", nodes[i], sep = "")
tmp2 <- var_input$ext_queue[i]
assign(tmp, tmp2)
}
##Creates the internal queue list and sets queue max ####
for (i in 1:length(nodes)) {
tmp <- paste("int_queue_", nodes[i], sep = "")
q_max <- var_input$int_queue[i]
if (q_max == Inf) {
q_max = nrow(sch)
}
mat <- matrix(NA, ncol = 6, nrow = q_max)
colnames(mat) <-
c("time",
"event",
"patient",
"current_node",
"next_node",
"previous_node")
assign(tmp, mat)
tmp <- paste("int_queue_max_", nodes[i], sep = "")
tmp2 <- var_input$int_queue[i]
assign(tmp, tmp2)
}
####SIMULATION CYCLE ######################################################################################
###START - Simulation Cycle###
while (min(sch[, "time"], na.rm = T) < t.period) {
#while(min(sch[,"time"],na.rm = T)<21.2) {
#print(min(sch[,"time"],na.rm = T))
time_test <- min(sch[, "time"], na.rm = T)
if (sum(is.na(record[, "time"])) <= na_lim) {
mat <- matrix(NA,
ncol = 6,
nrow = round(rpi * nrow(record), 0))
colnames(mat) <-
c(
"time",
"event",
"patient",
"current_node",
"next_node",
"previous_node"
)
record <- rbind(record, mat)
}
roi_test <- 7 %in% sch[which(sch[, "time"] == time_test), "event"]
if (roi_test == FALSE) {
roi <- which.min(sch[, "time"])
} else{
roi <- which(sch[, "time"] == time_test & sch[, "event"] == 7)
roi <- roi[1]
}
### EXTERNAL ARRIVALS###
if (sch[roi, "event"] == 1) {
#Checks if the event at the top of the sch is an arrival
###EXTERNAL ARRIVAL SCENARIOS -####
###
###1. SPACE IN THE NODE
###
###2. ELSE SPACE IN THE QUEUE
###
###3. ELSE QUEUE IS FULL ~ LOST
###
if (sch[roi, "event"] != 1) {
print("line199-Non_arrival_event_in_arrival_section")
}
### EXTERNAL ARRIVALS 1 - SPACE IN THE NODE #############################################################
if (get(paste("syst_", sch[roi, "current_node"], sep = "")) <
get(paste("n_serv_", sch[roi, "current_node"], sep = ""))) {
#Checks if there is space at the node for an arrival
record[match(NA, record[, "time"]), ] <-
sch[roi, ] #Adds the event to the record
arr.dist <-
serv_dist[which(nodes == (sch[roi, "current_node"]))]
pars <-
as.numeric(unlist(strsplit(
as.character(serv_dist_param[which(nodes == (sch[roi, "current_node"]))]), ";"
)))
tmp2 <-
do.call(get(paste0("r", arr.dist)), as.list(c(1, pars))) #Creates a service time
#tmp2<-do.call(paste("serv_dist_",sch$current_node[roi],sep=""),args = list())
record[match(NA, record[, "time"]), ] <-
c(min(sch[, "time"], na.rm = T), 2, patient = sch[roi, "patient"], sch[roi, "current_node"], sch[roi, "next_node"], 0) #Adds a service start event to the record
sch[match(NA, sch[, "time"]), ] <-
c(min(sch[, "time"], na.rm = T) + tmp2,
3,
patient = sch[roi, "patient"],
sch[roi, "current_node"],
sch[roi, "next_node"],
0) #Adds a service end event to the schedule
tmp3 <-
get(paste("syst_", sch[roi, "current_node"], sep = "")) + 1 #Adds 1 to the relevant node system
assign(paste("syst_", sch[roi, "current_node"], sep = ""), tmp3) #Assigns the increased node system value to the correct system variable
bed <-
rbind(bed, c(
time = sch[roi, "time"],
bed = get(paste("syst_", sch[roi, "current_node"], sep = "")),
node = sch[roi, "current_node"],
rep = j
))
if (get(paste("syst_", sch[roi, "current_node"], sep = "")) >
paste("n_serv_", sch[roi, "current_node"], sep = "")) {
print("line221- Added a patient to a node where there is no capacity")
}
if (time_test < min(sch[, "time"], na.rm = T)) {
print(
"line224- Event has been addded to the schedule that occurs before current event"
)
}
sch[roi, ] <-
c(rep(NA, 6)) #Removes the event from the schedule list
}
### EXTERNAL ARRIVALS 2 - SPACE IN THE QUEUE #############################################################
else
#If there is not space at the node then the patient is either added to the queue or if the queue is full then the patient is lost
if (sum(!is.na(get(
paste("ext_queue_", sch[roi, "current_node"], sep = "")
))) / 6 < get(paste("ext_queue_max_", sch[roi, "current_node"], sep = ""))) {
#If there is space in the queue then the patient is added to a queue
if (get(paste("syst_", sch[roi, "current_node"], sep = "")) <
get(paste("n_serv_", sch[roi, "current_node"], sep = ""))) {
print("line232- Added a patient to a queue where there is capacity")
}
record[match(NA, record[, "time"]), ] <-
sch[roi, ] #Adds the arrival to the record
tmp4 <-
paste("ext_queue_", sch[roi, "current_node"], sep = "") #Finds relevant queue
inter <-
get(tmp4) #Creates copy of queue to ammend
inter[match(NA, inter[, "time"]), ] <-
sch[roi, ] #Changes the correct row in the copy
assign(tmp4, inter) #Ressigns the correct queue list
if (sum(!is.na(get(
paste("ext_queue_", sch[roi, "current_node"], sep = "")
))) / 6 > get(paste("ext_queue_max_", sch[roi, "current_node"], sep = ""))) {
print(("line235-Exceeded external queue capactity"))
}
if (time_test < min(sch[, "time"], na.rm = T)) {
print(
"line240- Event has been addded to the schedule that occurs before current event"
)
}
sch[roi, ] <-
c(rep(NA, 6)) #Removes arrival event from the schedule
}
### EXTERNAL ARRIVALS 3 - NO SPACE IN NODE OR QUEUE THEREFORE LOST #####
else{
#If there isn't space in the queue then the patient is lost
record[match(NA, record[, "time"]), ] <-
sch[roi, ] #Adds the arrival to the record
record[match(NA, record[, "time"]), ] <-
c(min(sch[, "time"], na.rm = T), 5, sch[roi, "patient"], sch[roi, "current_node"], sch[roi, "next_node"], 0) #Adds the loss to the record
if (sum(!is.na(get(
paste("ext_queue_", sch[roi, "current_node"], sep = "")
))) / 6 < get(paste("ext_queue_max_", sch[roi, "current_node"], sep = ""))) {
print((
"line245-Lost patient even though there is capacity"
))
}
if (time_test < min(sch[, "time"], na.rm = T)) {
print(
"line251- Event has been addded to the schedule that occurs before current event"
)
}
sch[roi, ] <- c(rep(NA, 6))
}
}
###SERVICE END###
else if (sch[roi, "event"] == 3) {
#Checks if the event at the top of the sch is an arrival
if (sch[roi, "event"] != 3) {
print("line265-Non service_end event in service_end section")
}
### SERVICE END SCENARIOS################################################################
###
###1. NO SPACE AT ONWARD NODE OR ONWARD QUEUE & NOT AN EXIT ~~ BLOCKED
###
###2. SPACE IN ONWARD NODE OR ONWARD QUEUE (OR EXIT)
###
### a. PATIENT ADDED TO ONWARD NODE QUEUE
###
### 1. IF NO PATIENT WAITING UPSTREAM ~~ Add current patient departure and arrival to the record. Decrease the number in node system by 1 to allow new arrivals later
###
### 2. IF PATIENT WAITING UPSTREAM ~~
### Add current patient details (departure & arrival) to the record for their new node queue
### Add new patient details to the record for the waiting patient and schedule the service_end
### Shift node system values to reflect moving capacity
###
### a. NO MORE WAITING PATIENTS
###
### b. while(MORE WAITING PATIENTS UP THE CHAIN)
### {Add departure, arrival and service_start time to the record for the waiting patient and schedule the service_end AND Shift node system values to reflect moving capacity}
### {Includes filling empty capacity in fixed capacity queues}
### 1.service_end backfill
### 2.int queue backfill
### 3.ext_queue backfill
###
###
###
###
### b. NEXT NODE/EXIT IS PRESCRIBED TRANSITION DELAY ~~ Check if the patient is moving to a node or exit with a prescribed transition delay
###
###
###
### c. PATIENT ADDED TO ONWARD NODE SERVICE OR EXITS ~~ Check if there is a patient waiting in any of the inward service nodes or the queue. If multiple, then take patient with earliest service_end time
###
### 1. IF NO PATIENT WAITING ~~ Add current patient details to the record and new service_end time to the schedule. Decrease the number in node system by 1 to allow new arrivals later
###
### 2. IF PATIENT WAITING ~~
### Add current patient details (departure & arrival) to the record for their new node queue
### Add new patient details to the record for the waiting patient and schedule the service_end
### Shift node system values to reflect moving capacity
###
### a. NO MORE WAITING PATIENTS
###
### b. while(MORE WAITING PATIENTS UP THE CHAIN)
### {Add departure, arrival and service_start time to the record for the waiting patient and schedule the service_end AND Shift node system values to reflect moving capacity}
### {Includes filling empty capacity in fixed capacity queues}
### 1.service_end backfill
### 2.int queue backfill
### 3.ext_queue backfill
###
record[match(NA, record[, "time"]), ] <-
sch[roi, ] #Adds the service_end to the record
### SERVICE END 1 - THERE IS NO SPACE IN THE QUEUE OR THE SERVICE NODE SO THE PATIENT IS BLOCKED #############################################################
if (sch[roi, "next_node"] %in% nodes &&
get(paste("syst_", sch[roi, "next_node"], sep = "")) >= get(paste("n_serv_", sch[roi, "next_node"], sep =
"")) &&
sum(!is.na(get(
paste("int_queue_", sch[roi, "next_node"], sep = "")
))) / 6 >= get(paste("int_queue_max_", sch[roi, "next_node"], sep = ""))) {
## If the next node is not an exit and there is no space in the onward queue or node (i.e. no space anywhere)
if (get(paste("syst_", sch[roi, "next_node"], sep = "")) < get(paste("n_serv_", sch[roi, "next_node"], sep =
""))) {
print("line314-Blocked patient even though there is space in the node")
}
if (sum(!is.na(get(
paste("int_queue_", sch[roi, "next_node"], sep = "")
))) / 6 < get(paste("int_queue_max_", sch[roi, "next_node"], sep = ""))) {
print("line315-Blocked patient even though there is space in the queue")
}
blocked_mat[match(NA, blocked_mat[, "time"]), ] <-
sch[roi, ]
if (time_test < min(sch[, "time"], na.rm = T)) {
print(
"line320- Event has been addded to the schedule that occurs before current event"
)
}
sch[roi, ] <- c(rep(NA, 6))
}
else{
#### SERVICE END 2a - THERE IS NO SPACE IN THE NODE AND IT ISNT AN EXIT SO THE PATIENT IS ADDED TO THE QUEUE#########################################################
if (get(paste("syst_", sch[roi, "next_node"], sep = "")) >= get(paste("n_serv_", sch[roi, "next_node"], sep =
"")) &&
sch[roi, "next_node"] %in% nodes) {
## If the next node is not an exit and there is no space in the onward node (i.e. there is space in the queue, not the node)
record[match(NA, record[, "time"]), ] <-
c(min(sch[, "time"], na.rm = T), 8, sch[roi, "patient"], sch[roi, "current_node"], sch[roi, "next_node"], sch[roi, "previous_node"]) #Adds the transition start to the record
record[match(NA, record[, "time"]), ] <-
c(min(sch[, "time"], na.rm = T), 4, sch[roi, "patient"], sch[roi, "current_node"], sch[roi, "next_node"], sch[roi, "previous_node"]) #Adds the departure to the record
tmp99 <-
sample(x = onward_nodes,
size = 1,
prob = get(paste(
"onward_nodes_prob_", sch[roi, "next_node"], sep = ""
)))
record[match(NA, record[, "time"]), ] <-
c(min(sch[, "time"], na.rm = T),
event = 1,
sch[roi, "patient"],
sch[roi, "next_node"],
tmp99,
sch[roi, "current_node"]) #Adds the arrival to the record
tmp4 <-
paste("int_queue_", sch[roi, "next_node"], sep = "") #Finds relevant queue
inter <- get(tmp4)
inter[match(NA, inter[, "time"]), ] <-
c(min(sch[, "time"], na.rm = T), 1, sch[roi, "patient"], sch[roi, "next_node"], tmp99, sch[roi, "current_node"])
assign(tmp4, inter) #Adds the patient arrival record to the correct queue
if (sum(!is.na(get(
paste("int_queue_", sch[roi, "next_node"], sep = "")
))) / 6 > get(paste("int_queue_max_", sch[roi, "next_node"], sep = ""))) {
print(("line347-Exceed Internal queue capactity"))
}
tmp5 <-
get(paste("syst_", sch[roi, "current_node"], sep = "")) - 1 #Takes 1 from the relevant node system
assign(paste("syst_", sch[roi, "current_node"], sep = ""), tmp5) #Assigns the decreased node system value to the correct system variable
bed <-
rbind(bed, c(
time = sch[roi, "time"],
bed = get(paste("syst_", sch[roi, "current_node"], sep = "")),
node = sch[roi, "current_node"],
rep = j
))
if (get(paste("syst_", sch[roi, "current_node"], sep = "")) <
0) {
print("line355- Lowered syst value to below zero which is impossible")
}
backfill_loop <- "TRUE"
backfill <-
rbind(get(paste("int_queue_", sch[roi, "current_node"], sep = "")), get(paste("ext_queue_", sch[roi, "current_node"], sep =
"")), blocked_mat[c(which(blocked_mat[, "next_node"] == sch[roi, "current_node"])), ]) #Finds everyone who is either blocked or in a queue for the newly undercapacity node
if (sum(!is.na(backfill[, "patient"])) > 0 &
get(paste("n_serv_", sch[roi, "current_node"], sep = "")) > get(paste("syst_", sch[roi, "current_node"], sep =
""))) {
while (backfill_loop == "TRUE") {
#Finds the next available person from the queue or blocked node
if (sch[roi, "event"] != 3) {
print("line367-Non service_end event triggering backfill loop")
}
if (backfill[which.min(backfill[, "time"]), "event"] ==
3) {
if (!sum(delay_list[, 1] == backfill[which.min(backfill[, "time"]), "current_node"] &
delay_list[, 2] == backfill[which.min(backfill[, "time"]), "next_node"]) >
0) {
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
8,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a departure event to the record for the blocked patient
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
4,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a departure event to the record for the blocked patient
tmp99 <-
sample(
x = onward_nodes,
size = 1,
prob = get(
paste(
"onward_nodes_prob_",
backfill[which.min(backfill[, "time"]), "next_node"],
sep = ""
)
)
)
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
1,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "next_node"],
tmp99,
backfill[which.min(backfill[, "time"]), "current_node"]) #Adds an arrival event to the record for the blocked patient
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
2,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "next_node"],
tmp99,
backfill[which.min(backfill[, "time"]), "current_node"]) #Adds a service start event to the record for blocked patient
arr.dist <-
serv_dist[which(nodes == backfill[which.min(backfill[, "time"]), "next_node"])]
pars <-
as.numeric(unlist(strsplit(
as.character(serv_dist_param[which(nodes == backfill[which.min(backfill[, "time"]), "next_node"])]), ";"
)))
tmp7 <-
do.call(get(paste0("r", arr.dist)), as.list(c(1, pars))) #Creates a service time
#tmp7<-do.call(paste("serv_dist_",backfill$next_node[which.min(backfill[,"time"])],sep=""),args = list())
sch[match(NA, sch[, "time"]), ] <-
c(
min(sch[, "time"], na.rm = T) + tmp7,
3,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "next_node"],
tmp99,
backfill[which.min(backfill[, "time"]), "current_node"]
)
tmp97 <-
get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) - 1 #Takes 1 from the relevant node system
assign(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""),
tmp97) #Assigns the decreased node system value to the correct system variable
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste(
"syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)),
node = backfill[which.min(backfill[, "time"]), "current_node"],
rep = j
))
if (get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) < 0) {
print(
"line398- Lowered syst value within backfill loop to below zero which is impossible"
)
}
tmp_unblocked_node <-
backfill[which.min(backfill[, "time"]), "current_node"]
tmp_filled_node <-
backfill[which.min(backfill[, "time"]), "next_node"]
tmp_blocked_remove <-
which(
blocked_mat[, "current_node"] == tmp_unblocked_node &
blocked_mat[, "next_node"] == tmp_filled_node
)
blocked_mat[tmp_blocked_remove[which.min(blocked_mat[tmp_blocked_remove, "time"])], ] <-
c(rep(NA, 6))
tmp9 <-
get(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")) + 1 #Adds 1 to the relevant node system
assign(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
""),
tmp9) #Assigns the increased node system value to the correct system variable
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste(
"syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep = ""
)),
node = backfill[which.min(backfill[, "time"]), "next_node"],
rep = j
))
if (get(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")) > get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
""))) {
print(
"line413- Increased syst value within backfill loop to above capacity"
)
}
backfill <-
rbind(get(
paste(
"int_queue_",
tmp_unblocked_node,
sep = ""
)
),
get(
paste(
"ext_queue_",
tmp_unblocked_node,
sep = ""
)
),
blocked_mat[c(which(blocked_mat[, "next_node"] == tmp_unblocked_node)), ]) #Finds everyone who is either blocked or in a queue for the newly undercapacity node
if (sum(!is.na(backfill[, "patient"])) > 0) {
if (backfill[which.min(backfill[, "time"]), "event"] == 3) {
if (get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")) <= get(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
""))) {
backfill_loop = "FALSE"
}
}
else{
if (get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) <= get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""))) {
backfill_loop = "FALSE"
}
}
}
if (sum(!is.na(backfill[, "patient"])) == 0) {
backfill_loop = "FALSE"
}
}
else if (sum(delay_list[, 1] == backfill[which.min(backfill[, "time"]), "current_node"] &
delay_list[, 2] == backfill[which.min(backfill[, "time"]), "next_node"]) >
0) {
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
8,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a transfer_delay_start event to the record for the blocked patient
arr.dist <-
delay_dist[backfill[which.min(backfill[, "time"]), "current_node"], backfill[which.min(backfill[, "time"]), "next_node"]]
pars <-
as.numeric(unlist(strsplit(
as.character(delay_param[backfill[which.min(backfill[, "time"]), "current_node"], backfill[which.min(backfill[, "time"]), "next_node"]]), ";"
)))
tmp2 <-
do.call(get(paste0("r", arr.dist)), as.list(c(1, pars))) #Creates a service time
sch[match(NA, sch[, "time"]), ] <-
c(
min(sch[, "time"], na.rm = T) + tmp2,
6,
patient = backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]
)
if (backfill[which.min(backfill[, "time"]), "next_node"] %in% nodes) {
tmp5 <-
get(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")) + 1 #Adds 1 from the relevant node system
assign(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
""),
tmp5) #Assigns the increased node system value to the correct system variable
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste(
"syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep = ""
)),
node = backfill[which.min(backfill[, "time"]), "next_node"],
rep = j
))
}
tmp_unblocked_node <-
backfill[which.min(backfill[, "time"]), "current_node"]
tmp_filled_node <-
backfill[which.min(backfill[, "time"]), "next_node"]
tmp_blocked_remove <-
which(
blocked_mat[, "current_node"] == tmp_unblocked_node &
blocked_mat[, "next_node"] == tmp_filled_node
)
blocked_mat[tmp_blocked_remove[which.min(blocked_mat[tmp_blocked_remove, "time"])], ] <-
c(rep(NA, 6))
backfill_loop = "FALSE"
}
}## END OF SERVICE END PART OF BACKFILL LOOP
else if (backfill[which.min(backfill[, "time"]), "event"] ==
1 & backfill[which.min(backfill[, "time"]), "previous_node"] != 0) {
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
2,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a service start event to the record for the next person in the queue
arr.dist <-
serv_dist[which(nodes == backfill[which.min(backfill[, "time"]), "current_node"])]
pars <-
as.numeric(unlist(strsplit(
as.character(serv_dist_param[which(nodes == backfill[which.min(backfill[, "time"]), "current_node"])]), ";"
)))
tmp7 <-
do.call(get(paste0("r", arr.dist)), as.list(c(1, pars))) #Creates a service time
#tmp7<-do.call(paste("serv_dist_",backfill$current_node[which.min(backfill[,"time"])],sep=""),args = list()) #Draws a random service time from the distribution
sch[match(NA, sch[, "time"]), ] <-
c(
min(sch[, "time"], na.rm = T) + tmp7,
3,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]
) #Adds a service end event to schedule for the next person in the queue
queue_find <- "int"
tmp8 <-
get(paste(queue_find, "_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) #Find the queue in question
tmp8[which.min(tmp8[, "time"]), ] <-
c(rep(NA, 6)) #Remove the patient from the queue
assign(paste(queue_find, "_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""),
tmp8) #Reassign the queue to the correct variable name
tmp9 <-
get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) + 1 #Adds 1 to the relevant node system
assign(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""), tmp9) #Assigns the increased node system value to the correct system variable
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste(
"syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)),
node = backfill[which.min(backfill[, "time"]), "current_node"],
rep = j
))
if (get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) > get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""))) {
print(
"line455- Increased syst value within backfill loop to above capacity"
)
}
backfill <-
rbind(get(paste(
"int_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)), get(paste(
"ext_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)), blocked_mat[c(which(as.vector(blocked_mat[, "next_node"]) == backfill[which.min(backfill[, "time"]), "current_node"])), ]) #Finds everyone who is blocked for the newly undercapacity queue
if (length(backfill[which(backfill[, "event"] == 3), "event"]) !=
0) {
backfill <- rbind(backfill[which(backfill[, "event"] == 3), ], rep(NA, 6))
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
8,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a departure event to the record for the blocked patient
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
4,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a departure event to the record for the blocked patient
tmp99 <-
sample(
x = onward_nodes,
size = 1,
prob = get(
paste(
"onward_nodes_prob_",
backfill[which.min(backfill[, "time"]), "next_node"],
sep = ""
)
)
)
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
1,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "next_node"],
tmp99,
backfill[which.min(backfill[, "time"]), "current_node"]) #Adds an arrival event to the record for the blocked patient
tmp4 <-
paste("int_queue_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"") #Finds the correct queue for the patient to enter
inter <-
get(tmp4) #Creates copy of queue to ammend
inter[match(NA, inter[, "time"]), ] <-
c(backfill[which.min(backfill[, "time"]), "time"],
1,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "next_node"],
tmp99,
backfill[which.min(backfill[, "time"]), "current_node"])
assign(tmp4, inter) #Adds the patient arrival record to the correct queue
if (sum(!is.na(get(
paste("int_queue_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")
))) / 6 > get(paste("int_queue_max_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
""))) {
print((
"line480-Internal queue capactity exceeded"
))
}
tmp9 <-
get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) - 1 #Subtracts 1 to the relevant node system
assign(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""),
tmp9)
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste(
"syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)),
node = backfill[which.min(backfill[, "time"]), "current_node"],
rep = j
))
if (get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) < 0) {
print(
"line464- Lowered syst value within backfill loop to below zero"
)
}
tmp_unblocked_node <-
backfill[which.min(backfill[, "time"]), "current_node"]
tmp_filled_node <-
backfill[which.min(backfill[, "time"]), "next_node"]
tmp_blocked_remove <-
which(
blocked_mat[, "current_node"] == tmp_unblocked_node &
blocked_mat[, "next_node"] == tmp_filled_node
)
blocked_mat[tmp_blocked_remove[which.min(blocked_mat[tmp_blocked_remove, "time"])], ] <-
c(rep(NA, 6))
backfill <-
rbind(get(
paste("int_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")
),
get(
paste("ext_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")
),
blocked_mat[c(which(as.vector(blocked_mat[, "next_node"]) == backfill[which.min(backfill[, "time"]), "current_node"])), ]) #Finds everyone who is either blocked for the newly undercapacity queue
if (sum(!is.na(backfill[, "patient"])) > 0) {
if (backfill[which.min(backfill[, "time"]), "event"] == 3) {
if (get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")) <= get(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
""))) {
backfill_loop = "FALSE"
}
}
else{
if (get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) <= get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""))) {
backfill_loop = "FALSE"
}
}
}
}
else{
backfill_loop = "FALSE"
}
}## END OF ARRIVAL (Internal) PART OF BACKFILL LOOP
else if (backfill[which.min(backfill[, "time"]), "event"] ==
1 & backfill[which.min(backfill[, "time"]), "previous_node"] == 0) {
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
2,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a service start event to the record for the next person in the queue
arr.dist <-
serv_dist[which(nodes == backfill[which.min(backfill[, "time"]), "current_node"])]
pars <-
as.numeric(unlist(strsplit(
as.character(serv_dist_param[which(nodes == backfill[which.min(backfill[, "time"]), "current_node"])]), ";"
)))
tmp7 <-
do.call(get(paste0("r", arr.dist)), as.list(c(1, pars))) #Creates a service time
#tmp7<-do.call(paste("serv_dist_",backfill$current_node[which.min(backfill[,"time"])],sep=""),args = list()) #Draws a random service time from the distribution
sch[match(NA, sch[, "time"]), ] <-
c(
min(sch[, "time"], na.rm = T) + tmp7,
3,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]
) #Adds a service end event to schedule for the next person in the queue
queue_find <- "ext"
tmp8 <-
get(paste(queue_find, "_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) #Find the queue in question
tmp8[which.min(tmp8[, "time"]), ] <-
c(rep(NA, 6)) #Remove the patient from the queue
assign(paste(queue_find, "_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""),
tmp8) #Reassign the queue to the correct variable name
tmp9 <-
get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) + 1 #Adds 1 to the relevant node system
assign(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""), tmp9) #Assigns the increased node system value to the correct system variable
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste(
"syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)),
node = backfill[which.min(backfill[, "time"]), "current_node"],
rep = j
))
if (get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) > get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""))) {
print(
"line535- Increased syst value within backfill loop to above capacity"
)
}
backfill_loop = "FALSE"
}## END OF ARRIVAL (External) PART OF BACKFILL LOOP
else{
backfill_loop = "FALSE"
}
if (sum(!is.na(backfill[, "patient"])) == 0) {
backfill_loop = "FALSE"
}
}
}
if (time_test < min(sch[, "time"], na.rm = T)) {
print(
"line546- Event has been addded to the schedule that occurs before current event"
)
}
sch[roi, ] <-
c(rep(NA, 6))
} #Removes the original service end event from the schedule
#### SERVICE END 2b - NEXT NODE/EXIT IS PRESCRIBED TRANSITION DELAY###########################################################
else if (sum(delay_list[, 1] == sch[roi, "current_node"] &
delay_list[, 2] == sch[roi, "next_node"]) > 0) {
##Need new test for delay between service points or service point and exit
record[match(NA, record[, "time"]), ] <-
c(min(sch[, "time"], na.rm = T), 8, sch[roi, "patient"], sch[roi, "current_node"], sch[roi, "next_node"], sch[roi, "previous_node"]) #Adds the transfer_delay_start to the record
arr.dist <-
delay_dist[sch[roi, "current_node"], sch[roi, "next_node"]]
pars <-
as.numeric(unlist(strsplit(
as.character(delay_param[sch[roi, "current_node"], sch[roi, "next_node"]]), ";"
)))
tmp2 <-
do.call(get(paste0("r", arr.dist)), as.list(c(1, pars))) #Creates a service time
sch[match(NA, sch[, "time"]), ] <-
c(min(sch[, "time"], na.rm = T) + tmp2,
6,
patient = sch[roi, "patient"],
sch[roi, "current_node"],
sch[roi, "next_node"],
sch[roi, "previous_node"])
if (sch[roi, "next_node"] %in% nodes) {
tmp5 <-
get(paste("syst_", sch[roi, "next_node"], sep = "")) + 1 #Adds 1 from the relevant node system
assign(paste("syst_", sch[roi, "next_node"], sep = ""), tmp5) #Assigns the increased node system value to the correct system variable
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste("syst_", sch[roi, "next_node"], sep = "")),
node = sch[roi, "next_node"],
rep = j
))
}
sch[roi, ] <- c(rep(NA, 6))
}
#### SERVICE END 2c - THERE IS SPACE IN THE ONWARD NODE OR NO DELAY EXIT #####################################################
else{
#There is an empty space in the onward node or this is an no delay exit from the system
record[match(NA, record[, "time"]), ] <-
c(min(sch[, "time"], na.rm = T), 8, sch[roi, "patient"], sch[roi, "current_node"], sch[roi, "next_node"], sch[roi, "previous_node"]) #Adds the transfer_delay_start to the record
record[match(NA, record[, "time"]), ] <-
c(min(sch[, "time"], na.rm = T), 4, sch[roi, "patient"], sch[roi, "current_node"], sch[roi, "next_node"], sch[roi, "previous_node"]) #Adds the departure to the record
if (sch[roi, "next_node"] %in% nodes) {
#If the patient is exiting the system then they will not need an arrival, service_start or service_end to the record or sch
tmp99 <-
sample(
x = onward_nodes,
size = 1,
prob = get(paste(
"onward_nodes_prob_", sch[roi, "next_node"], sep = ""
))
) #Finds the next node destination after moving node
record[match(NA, record[, "time"]), ] <-
c(min(sch[, "time"], na.rm = T), 1, sch[roi, "patient"], sch[roi, "next_node"], tmp99, sch[roi, "current_node"]) #Adds arrival to the record
record[match(NA, record[, "time"]), ] <-
c(min(sch[, "time"], na.rm = T), 2, sch[roi, "patient"], sch[roi, "next_node"], tmp99, sch[roi, "current_node"]) #Adds service_start to the record
arr.dist <-
serv_dist[which(nodes == sch[roi, "next_node"])]
pars <-
as.numeric(unlist(strsplit(
as.character(serv_dist_param[which(nodes == sch[roi, "next_node"])]), ";"
)))
tmp2 <-
do.call(get(paste0("r", arr.dist)), as.list(c(1, pars))) #Creates a service time
#tmp2<-do.call(paste("serv_dist_",sch$next_node[roi],sep=""),args = list())
sch[match(NA, sch[, "time"]), ] <-
c(min(sch[, "time"], na.rm = T) + tmp2,
3,
sch[roi, "patient"],
sch[roi, "next_node"],
tmp99,
sch[roi, "current_node"]) #Adds service_end to the sch
tmp5 <-
get(paste("syst_", sch[roi, "next_node"], sep = "")) + 1 #Adds 1 from the relevant node system
assign(paste("syst_", sch[roi, "next_node"], sep = ""), tmp5) #Assigns the decreased node system value to the correct system variable
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste("syst_", sch[roi, "next_node"], sep = "")),
node = sch[roi, "next_node"],
rep = j
))
if (get(paste("syst_", sch[roi, "next_node"], sep = "")) >
get(paste("n_serv_", sch[roi, "next_node"], sep = ""))) {
print("line577- Increased syst value to above capacity")
}
}
tmp5 <-
get(paste("syst_", sch[roi, "current_node"], sep = "")) - 1 #Takes 1 from the relevant node system
assign(paste("syst_", sch[roi, "current_node"], sep = ""), tmp5) #Assigns the decreased node system value to the correct system variable
bed <-
rbind(bed, c(
time = sch[roi, "time"],
bed = get(paste("syst_", sch[roi, "current_node"], sep = "")),
node = sch[roi, "current_node"],
rep = j
))
if (get(paste("syst_", sch[roi, "current_node"], sep = "")) <
0) {
print("line585- Decreased syst value below 0")
}
backfill_loop <- "TRUE"
backfill <-
rbind(get(paste("int_queue_", sch[roi, "current_node"], sep = "")), get(paste("ext_queue_", sch[roi, "current_node"], sep =
"")), blocked_mat[c(which(blocked_mat[, "next_node"] == sch[roi, "current_node"])), ]) #Finds everyone who is either blocked or in a queue for the newly undercapacity node
if (sum(!is.na(backfill[, "patient"])) > 0 &
get(paste("n_serv_", sch[roi, "current_node"], sep = "")) > get(paste("syst_", sch[roi, "current_node"], sep =
""))) {
while (backfill_loop == "TRUE") {
#Finds the next available person from the queue or blocked node
if (sch[roi, "event"] != 3) {
print("line367-Non service_end event triggering backfill loop")
}
if (backfill[which.min(backfill[, "time"]), "event"] ==
3) {
if (!sum(delay_list[, 1] == backfill[which.min(backfill[, "time"]), "current_node"] &
delay_list[, 2] == backfill[which.min(backfill[, "time"]), "next_node"]) >
0) {
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
8,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a transfer delay start event to the record for the blocked patient
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
4,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a departure event to the record for the blocked patient
tmp99 <-
sample(
x = onward_nodes,
size = 1,
prob = get(
paste(
"onward_nodes_prob_",
backfill[which.min(backfill[, "time"]), "next_node"],
sep = ""
)
)
)
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
1,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "next_node"],
tmp99,
backfill[which.min(backfill[, "time"]), "current_node"]) #Adds an arrival event to the record for the blocked patient
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
2,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "next_node"],
tmp99,
backfill[which.min(backfill[, "time"]), "current_node"]) #Adds a service start event to the record for blocked patient
arr.dist <-
serv_dist[which(nodes == backfill[which.min(backfill[, "time"]), "next_node"])]
pars <-
as.numeric(unlist(strsplit(
as.character(serv_dist_param[which(nodes == backfill[which.min(backfill[, "time"]), "next_node"])]), ";"
)))
tmp7 <-
do.call(get(paste0("r", arr.dist)), as.list(c(1, pars))) #Creates a service time
#tmp7<-do.call(paste("serv_dist_",backfill$next_node[which.min(backfill[,"time"])],sep=""),args = list())
sch[match(NA, sch[, "time"]), ] <-
c(
min(sch[, "time"], na.rm = T) + tmp7,
3,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "next_node"],
tmp99,
backfill[which.min(backfill[, "time"]), "current_node"]
)
tmp97 <-
get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) - 1 #Takes 1 from the relevant node system
assign(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""),
tmp97) #Assigns the decreased node system value to the correct system variable
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste(
"syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)),
node = backfill[which.min(backfill[, "time"]), "current_node"],
rep = j
))
if (get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) < 0) {
print(
"line398- Lowered syst value within backfill loop to below zero which is impossible"
)
}
tmp_unblocked_node <-
backfill[which.min(backfill[, "time"]), "current_node"]
tmp_filled_node <-
backfill[which.min(backfill[, "time"]), "next_node"]
tmp_blocked_remove <-
which(
blocked_mat[, "current_node"] == tmp_unblocked_node &
blocked_mat[, "next_node"] == tmp_filled_node
)
blocked_mat[tmp_blocked_remove[which.min(blocked_mat[tmp_blocked_remove, "time"])], ] <-
c(rep(NA, 6))
tmp9 <-
get(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")) + 1 #Adds 1 to the relevant node system
assign(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
""),
tmp9) #Assigns the increased node system value to the correct system variable
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste(
"syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep = ""
)),
node = backfill[which.min(backfill[, "time"]), "next_node"],
rep = j
))
if (get(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")) > get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
""))) {
print(
"line413- Increased syst value within backfill loop to above capacity"
)
}
backfill <-
rbind(get(
paste(
"int_queue_",
tmp_unblocked_node,
sep = ""
)
),
get(
paste(
"ext_queue_",
tmp_unblocked_node,
sep = ""
)
),
blocked_mat[c(which(blocked_mat[, "next_node"] == tmp_unblocked_node)), ]) #Finds everyone who is either blocked or in a queue for the newly undercapacity node
if (sum(!is.na(backfill[, "patient"])) > 0) {
if (backfill[which.min(backfill[, "time"]), "event"] == 3) {
if (get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")) <= get(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
""))) {
backfill_loop = "FALSE"
}
}
else{
if (get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) <= get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""))) {
backfill_loop = "FALSE"
}
}
}
if (sum(!is.na(backfill[, "patient"])) == 0) {
backfill_loop = "FALSE"
}
}
else if (sum(delay_list[, 1] == backfill[which.min(backfill[, "time"]), "current_node"] &
delay_list[, 2] == backfill[which.min(backfill[, "time"]), "next_node"]) >
0) {
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
8,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a transfer delay start event to the record for the blocked patient
arr.dist <-
delay_dist[backfill[which.min(backfill[, "time"]), "current_node"], backfill[which.min(backfill[, "time"]), "next_node"]]
pars <-
as.numeric(unlist(strsplit(
as.character(delay_param[backfill[which.min(backfill[, "time"]), "current_node"], backfill[which.min(backfill[, "time"]), "next_node"]]), ";"
)))
tmp2 <-
do.call(get(paste0("r", arr.dist)), as.list(c(1, pars))) #Creates a service time
sch[match(NA, sch[, "time"]), ] <-
c(
min(sch[, "time"], na.rm = T) + tmp2,
6,
patient = backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]
)
if (backfill[which.min(backfill[, "time"]), "next_node"] %in% nodes) {
tmp5 <-
get(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")) + 1 #Adds 1 from the relevant node system
assign(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
""),
tmp5) #Assigns the increased node system value to the correct system variable
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste(
"syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep = ""
)),
node = backfill[which.min(backfill[, "time"]), "next_node"],
rep = j
))
}
tmp_unblocked_node <-
backfill[which.min(backfill[, "time"]), "current_node"]
tmp_filled_node <-
backfill[which.min(backfill[, "time"]), "next_node"]
tmp_blocked_remove <-
which(
blocked_mat[, "current_node"] == tmp_unblocked_node &
blocked_mat[, "next_node"] == tmp_filled_node
)
blocked_mat[tmp_blocked_remove[which.min(blocked_mat[tmp_blocked_remove, "time"])], ] <-
c(rep(NA, 6))
backfill_loop = "FALSE"
}
}## END OF SERVICE END PART OF BACKFILL LOOP
else if (backfill[which.min(backfill[, "time"]), "event"] ==
1 & backfill[which.min(backfill[, "time"]), "previous_node"] != 0) {
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
2,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a service start event to the record for the next person in the queue
arr.dist <-
serv_dist[which(nodes == backfill[which.min(backfill[, "time"]), "current_node"])]
pars <-
as.numeric(unlist(strsplit(
as.character(serv_dist_param[which(nodes == backfill[which.min(backfill[, "time"]), "current_node"])]), ";"
)))
tmp7 <-
do.call(get(paste0("r", arr.dist)), as.list(c(1, pars))) #Creates a service time
#tmp7<-do.call(paste("serv_dist_",backfill$current_node[which.min(backfill[,"time"])],sep=""),args = list()) #Draws a random service time from the distribution
sch[match(NA, sch[, "time"]), ] <-
c(
min(sch[, "time"], na.rm = T) + tmp7,
3,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]
) #Adds a service end event to schedule for the next person in the queue
queue_find <- "int"
tmp8 <-
get(paste(queue_find, "_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) #Find the queue in question
tmp8[which.min(tmp8[, "time"]), ] <-
c(rep(NA, 6)) #Remove the patient from the queue
assign(paste(queue_find, "_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""),
tmp8) #Reassign the queue to the correct variable name
tmp9 <-
get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) + 1 #Adds 1 to the relevant node system
assign(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""), tmp9) #Assigns the increased node system value to the correct system variable
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste(
"syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)),
node = backfill[which.min(backfill[, "time"]), "current_node"],
rep = j
))
if (get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) > get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""))) {
print(
"line455- Increased syst value within backfill loop to above capacity"
)
}
backfill <-
rbind(get(paste(
"int_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)), get(paste(
"ext_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)), blocked_mat[c(which(as.vector(blocked_mat[, "next_node"]) == backfill[which.min(backfill[, "time"]), "current_node"])), ]) #Finds everyone who is either blocked for the newly undercapacity queue
if (length(backfill[which(backfill[, "event"] == 3), "event"]) !=
0) {
backfill <- rbind(backfill[which(backfill[, "event"] == 3), ], rep(NA, 6))
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
8,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a departure event to the record for the blocked patient
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
4,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a departure event to the record for the blocked patient
tmp99 <-
sample(
x = onward_nodes,
size = 1,
prob = get(
paste(
"onward_nodes_prob_",
backfill[which.min(backfill[, "time"]), "next_node"],
sep = ""
)
)
)
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
1,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "next_node"],
tmp99,
backfill[which.min(backfill[, "time"]), "current_node"]) #Adds an arrival event to the record for the blocked patient
tmp4 <-
paste("int_queue_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"") #Finds the correct queue for the patient to enter
inter <-
get(tmp4) #Creates copy of queue to ammend
inter[match(NA, inter[, "time"]), ] <-
c(backfill[which.min(backfill[, "time"]), "time"],
1,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "next_node"],
tmp99,
backfill[which.min(backfill[, "time"]), "current_node"])
assign(tmp4, inter) #Adds the patient arrival record to the correct queue
if (sum(!is.na(get(
paste("int_queue_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")
))) / 6 > get(paste("int_queue_max_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
""))) {
print((
"line480-Internal queue capactity exceeded"
))
}
tmp9 <-
get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) - 1 #Subtracts 1 to the relevant node system
assign(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""),
tmp9)
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste(
"syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)),
node = backfill[which.min(backfill[, "time"]), "current_node"],
rep = j
))
if (get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) < 0) {
print(
"line464- Lowered syst value within backfill loop to below zero"
)
}
tmp_unblocked_node <-
backfill[which.min(backfill[, "time"]), "current_node"]
tmp_filled_node <-
backfill[which.min(backfill[, "time"]), "next_node"]
tmp_blocked_remove <-
which(
blocked_mat[, "current_node"] == tmp_unblocked_node &
blocked_mat[, "next_node"] == tmp_filled_node
)
blocked_mat[tmp_blocked_remove[which.min(blocked_mat[tmp_blocked_remove, "time"])], ] <-
c(rep(NA, 6))
backfill <-
rbind(get(
paste("int_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")
),
get(
paste("ext_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")
),
blocked_mat[c(which(as.vector(blocked_mat[, "next_node"]) == backfill[which.min(backfill[, "time"]), "current_node"])), ]) #Finds everyone who is either blocked for the newly undercapacity queue
if (sum(!is.na(backfill[, "patient"])) > 0) {
if (backfill[which.min(backfill[, "time"]), "event"] == 3) {
if (get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")) <= get(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
""))) {
backfill_loop = "FALSE"
}
}
else{
if (get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) <= get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""))) {
backfill_loop = "FALSE"
}
}
}
}
else{
backfill_loop = "FALSE"
}
}## END OF ARRIVAL (Internal) PART OF BACKFILL LOOP
else if (backfill[which.min(backfill[, "time"]), "event"] ==
1 & backfill[which.min(backfill[, "time"]), "previous_node"] == 0) {
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
2,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a service start event to the record for the next person in the queue
arr.dist <-
serv_dist[which(nodes == backfill[which.min(backfill[, "time"]), "current_node"])]
pars <-
as.numeric(unlist(strsplit(
as.character(serv_dist_param[which(nodes == backfill[which.min(backfill[, "time"]), "current_node"])]), ";"
)))
tmp7 <-
do.call(get(paste0("r", arr.dist)), as.list(c(1, pars))) #Creates a service time
#tmp7<-do.call(paste("serv_dist_",backfill$current_node[which.min(backfill[,"time"])],sep=""),args = list()) #Draws a random service time from the distribution
sch[match(NA, sch[, "time"]), ] <-
c(
min(sch[, "time"], na.rm = T) + tmp7,
3,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]
) #Adds a service end event to schedule for the next person in the queue
queue_find <- "ext"
tmp8 <-
get(paste(queue_find, "_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) #Find the queue in question
tmp8[which.min(tmp8[, "time"]), ] <-
c(rep(NA, 6)) #Remove the patient from the queue
assign(paste(queue_find, "_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""),
tmp8) #Reassign the queue to the correct variable name
tmp9 <-
get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) + 1 #Adds 1 to the relevant node system
assign(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""), tmp9) #Assigns the increased node system value to the correct system variable
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste(
"syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)),
node = backfill[which.min(backfill[, "time"]), "current_node"],
rep = j
))
if (get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) > get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""))) {
print(
"line535- Increased syst value within backfill loop to above capacity"
)
}
backfill_loop = "FALSE"
}## END OF ARRIVAL (External) PART OF BACKFILL LOOP
else{
backfill_loop = "FALSE"
}
if (sum(!is.na(backfill[, "patient"])) == 0) {
backfill_loop = "FALSE"
}
}
}
if (time_test < min(sch[, "time"], na.rm = T)) {
print(
"line776- Event has been addded to the schedule that occurs before current event"
)
}
sch[roi, ] <- c(rep(NA, 6))
}
}
}
###Delayed Departure/transfer###
###DELAYED DEPARTURE SCENARIOS ################################################################################
###
###1. Patient finishes transition delay and moves onto next node or exit
### 1.service_end backfill
### 2.int queue backfill
### 3.ext_queue backfill
###
else if (sch[roi, "event"] == 6) {
record[match(NA, record[, "time"]), ] <-
c(min(sch[, "time"], na.rm = T), 4, sch[roi, "patient"], sch[roi, "current_node"], sch[roi, "next_node"], sch[roi, "previous_node"]) #Adds the delayed departure to the record
tmp5 <-
get(paste("syst_", sch[roi, "current_node"], sep = "")) - 1 #Takes 1 from the relevant node system
assign(paste("syst_", sch[roi, "current_node"], sep = ""), tmp5) #Assigns the decreased node system value to the correct system variable
bed <-
rbind(bed, c(
time = sch[roi, "time"],
bed = get(paste("syst_", sch[roi, "current_node"], sep = "")),
node = sch[roi, "current_node"],
rep = j
))
if (sch[roi, "next_node"] %in% nodes) {
tmp99 <-
sample(x = onward_nodes,
size = 1,
prob = get(paste(
"onward_nodes_prob_", sch[roi, "next_node"], sep = ""
))) #Finds the next node destination after moving node
record[match(NA, record[, "time"]), ] <-
c(min(sch[, "time"], na.rm = T), 1, sch[roi, "patient"], sch[roi, "next_node"], tmp99, sch[roi, "current_node"]) #Adds arrival to the record
record[match(NA, record[, "time"]), ] <-
c(min(sch[, "time"], na.rm = T), 2, sch[roi, "patient"], sch[roi, "next_node"], tmp99, sch[roi, "current_node"]) #Adds service_start to the record
arr.dist <- serv_dist[which(nodes == sch[roi, "next_node"])]
pars <-
as.numeric(unlist(strsplit(
as.character(serv_dist_param[which(nodes == sch[roi, "next_node"])]), ";"
)))
tmp2 <-
do.call(get(paste0("r", arr.dist)), as.list(c(1, pars))) #Creates a service time
#tmp2<-do.call(paste("serv_dist_",sch$next_node[roi],sep=""),args = list())
sch[match(NA, sch[, "time"]), ] <-
c(min(sch[, "time"], na.rm = T) + tmp2,
3,
sch[roi, "patient"],
sch[roi, "next_node"],
tmp99,
sch[roi, "current_node"]) #Adds service_end to the sch
}
if (get(paste("syst_", sch[roi, "current_node"], sep = "")) <
0) {
print("line585- Decreased syst value below 0")
}
backfill_loop <- "TRUE"
backfill <-
rbind(get(paste("int_queue_", sch[roi, "current_node"], sep = "")), get(paste("ext_queue_", sch[roi, "current_node"], sep =
"")), blocked_mat[c(which(blocked_mat[, "next_node"] == sch[roi, "current_node"])), ]) #Finds everyone who is either blocked or in a queue for the newly undercapacity node
if (sum(!is.na(backfill[, "patient"])) > 0 &
get(paste("n_serv_", sch[roi, "current_node"], sep = "")) > get(paste("syst_", sch[roi, "current_node"], sep =
""))) {
while (backfill_loop == "TRUE") {
#Finds the next available person from the queue or blocked node
if (sch[roi, "event"] != 6) {
print("line367-Non service_end event triggering backfill loop")
}
if (backfill[which.min(backfill[, "time"]), "event"] == 3) {
if (!sum(delay_list[, 1] == backfill[which.min(backfill[, "time"]), "current_node"] &
delay_list[, 2] == backfill[which.min(backfill[, "time"]), "next_node"]) >
0) {
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
8,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a transfer delay start event to the record for the blocked patient
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
4,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a departure event to the record for the blocked patient
tmp99 <-
sample(
x = onward_nodes,
size = 1,
prob = get(
paste("onward_nodes_prob_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")
)
)
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
1,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "next_node"],
tmp99,
backfill[which.min(backfill[, "time"]), "current_node"]) #Adds an arrival event to the record for the blocked patient
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
2,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "next_node"],
tmp99,
backfill[which.min(backfill[, "time"]), "current_node"]) #Adds a service start event to the record for blocked patient
arr.dist <-
serv_dist[which(nodes == backfill[which.min(backfill[, "time"]), "next_node"])]
pars <-
as.numeric(unlist(strsplit(
as.character(serv_dist_param[which(nodes == backfill[which.min(backfill[, "time"]), "next_node"])]), ";"
)))
tmp7 <-
do.call(get(paste0("r", arr.dist)), as.list(c(1, pars))) #Creates a service time
#tmp7<-do.call(paste("serv_dist_",backfill$next_node[which.min(backfill[,"time"])],sep=""),args = list())
sch[match(NA, sch[, "time"]), ] <-
c(
min(sch[, "time"], na.rm = T) + tmp7,
3,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "next_node"],
tmp99,
backfill[which.min(backfill[, "time"]), "current_node"]
)
tmp97 <-
get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) - 1 #Takes 1 from the relevant node system
assign(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""), tmp97) #Assigns the decreased node system value to the correct system variable
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste(
"syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)),
node = backfill[which.min(backfill[, "time"]), "current_node"],
rep = j
))
if (get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) < 0) {
print(
"line398- Lowered syst value within backfill loop to below zero which is impossible"
)
}
tmp_unblocked_node <-
backfill[which.min(backfill[, "time"]), "current_node"]
tmp_filled_node <-
backfill[which.min(backfill[, "time"]), "next_node"]
tmp_blocked_remove <-
which(
blocked_mat[, "current_node"] == tmp_unblocked_node &
blocked_mat[, "next_node"] == tmp_filled_node
)
blocked_mat[tmp_blocked_remove[which.min(blocked_mat[tmp_blocked_remove, "time"])], ] <-
c(rep(NA, 6))
tmp9 <-
get(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")) + 1 #Adds 1 to the relevant node system
assign(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
""), tmp9) #Assigns the increased node system value to the correct system variable
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste(
"syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep = ""
)),
node = backfill[which.min(backfill[, "time"]), "next_node"],
rep = j
))
if (get(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")) > get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
""))) {
print(
"line413- Increased syst value within backfill loop to above capacity"
)
}
backfill <-
rbind(get(
paste("int_queue_", tmp_unblocked_node, sep = "")
), get(
paste("ext_queue_", tmp_unblocked_node, sep = "")
), blocked_mat[c(which(blocked_mat[, "next_node"] == tmp_unblocked_node)), ]) #Finds everyone who is either blocked or in a queue for the newly undercapacity node
if (sum(!is.na(backfill[, "patient"])) > 0) {
if (backfill[which.min(backfill[, "time"]), "event"] == 3) {
if (get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")) <= get(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
""))) {
backfill_loop = "FALSE"
}
}
else{
if (get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) <= get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""))) {
backfill_loop = "FALSE"
}
}
}
if (sum(!is.na(backfill[, "patient"])) == 0) {
backfill_loop = "FALSE"
}
}
else if (sum(delay_list[, 1] == backfill[which.min(backfill[, "time"]), "current_node"] &
delay_list[, 2] == backfill[which.min(backfill[, "time"]), "next_node"]) >
0) {
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
8,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a transfer delay start event to the record for the blocked patient
arr.dist <-
delay_dist[backfill[which.min(backfill[, "time"]), "current_node"], backfill[which.min(backfill[, "time"]), "next_node"]]
pars <-
as.numeric(unlist(strsplit(
as.character(delay_param[backfill[which.min(backfill[, "time"]), "current_node"], backfill[which.min(backfill[, "time"]), "next_node"]]), ";"
)))
tmp2 <-
do.call(get(paste0("r", arr.dist)), as.list(c(1, pars))) #Creates a service time
sch[match(NA, sch[, "time"]), ] <-
c(
min(sch[, "time"], na.rm = T) + tmp2,
6,
patient = backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]
)
if (backfill[which.min(backfill[, "time"]), "next_node"] %in% nodes) {
tmp5 <-
get(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")) + 1 #Adds 1 from the relevant node system
assign(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
""), tmp5) #Assigns the increased node system value to the correct system variable
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste(
"syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep = ""
)),
node = backfill[which.min(backfill[, "time"]), "next_node"],
rep = j
))
}
tmp_unblocked_node <-
backfill[which.min(backfill[, "time"]), "current_node"]
tmp_filled_node <-
backfill[which.min(backfill[, "time"]), "next_node"]
tmp_blocked_remove <-
which(
blocked_mat[, "current_node"] == tmp_unblocked_node &
blocked_mat[, "next_node"] == tmp_filled_node
)
blocked_mat[tmp_blocked_remove[which.min(blocked_mat[tmp_blocked_remove, "time"])], ] <-
c(rep(NA, 6))
backfill_loop = "FALSE"
}
}## END OF SERVICE END PART OF BACKFILL LOOP
else if (backfill[which.min(backfill[, "time"]), "event"] ==
1 & backfill[which.min(backfill[, "time"]), "previous_node"] != 0) {
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
2,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a service start event to the record for the next person in the queue
arr.dist <-
serv_dist[which(nodes == backfill[which.min(backfill[, "time"]), "current_node"])]
pars <-
as.numeric(unlist(strsplit(
as.character(serv_dist_param[which(nodes == backfill[which.min(backfill[, "time"]), "current_node"])]), ";"
)))
tmp7 <-
do.call(get(paste0("r", arr.dist)), as.list(c(1, pars))) #Creates a service time
#tmp7<-do.call(paste("serv_dist_",backfill$current_node[which.min(backfill[,"time"])],sep=""),args = list()) #Draws a random service time from the distribution
sch[match(NA, sch[, "time"]), ] <-
c(
min(sch[, "time"], na.rm = T) + tmp7,
3,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]
) #Adds a service end event to schedule for the next person in the queue
queue_find <- "int"
tmp8 <-
get(paste(queue_find, "_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) #Find the queue in question
tmp8[which.min(tmp8[, "time"]), ] <-
c(rep(NA, 6)) #Remove the patient from the queue
assign(paste(queue_find, "_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""),
tmp8) #Reassign the queue to the correct variable name
tmp9 <-
get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) + 1 #Adds 1 to the relevant node system
assign(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""), tmp9) #Assigns the increased node system value to the correct system variable
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste(
"syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)),
node = backfill[which.min(backfill[, "time"]), "current_node"],
rep = j
))
if (get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) > get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""))) {
print(
"line455- Increased syst value within backfill loop to above capacity"
)
}
backfill <-
rbind(get(paste(
"int_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)), get(paste(
"ext_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)), blocked_mat[c(which(as.vector(blocked_mat[, "next_node"]) == backfill[which.min(backfill[, "time"]), "current_node"])), ]) #Finds everyone who is either blocked for the newly undercapacity queue
if (length(backfill[which(backfill[, "event"] == 3), "event"]) !=
0) {
backfill <- rbind(backfill[which(backfill[, "event"] == 3), ], rep(NA, 6))
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
8,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a departure event to the record for the blocked patient
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
4,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a departure event to the record for the blocked patient
tmp99 <-
sample(
x = onward_nodes,
size = 1,
prob = get(
paste("onward_nodes_prob_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")
)
)
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
1,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "next_node"],
tmp99,
backfill[which.min(backfill[, "time"]), "current_node"]) #Adds an arrival event to the record for the blocked patient
tmp4 <-
paste("int_queue_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"") #Finds the correct queue for the patient to enter
inter <-
get(tmp4) #Creates copy of queue to ammend
inter[match(NA, inter[, "time"]), ] <-
c(backfill[which.min(backfill[, "time"]), "time"],
1,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "next_node"],
tmp99,
backfill[which.min(backfill[, "time"]), "current_node"])
assign(tmp4, inter) #Adds the patient arrival record to the correct queue
if (sum(!is.na(get(
paste("int_queue_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")
))) / 6 > get(paste("int_queue_max_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
""))) {
print((
"line480-Internal queue capactity exceeded"
))
}
tmp9 <-
get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) - 1 #Subtracts 1 to the relevant node system
assign(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""), tmp9)
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste(
"syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)),
node = backfill[which.min(backfill[, "time"]), "current_node"],
rep = j
))
if (get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) < 0) {
print("line464- Lowered syst value within backfill loop to below zero")
}
tmp_unblocked_node <-
backfill[which.min(backfill[, "time"]), "current_node"]
tmp_filled_node <-
backfill[which.min(backfill[, "time"]), "next_node"]
tmp_blocked_remove <-
which(
blocked_mat[, "current_node"] == tmp_unblocked_node &
blocked_mat[, "next_node"] == tmp_filled_node
)
blocked_mat[tmp_blocked_remove[which.min(blocked_mat[tmp_blocked_remove, "time"])], ] <-
c(rep(NA, 6))
backfill <-
rbind(get(paste(
"int_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)), get(paste(
"ext_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)), blocked_mat[c(which(as.vector(blocked_mat[, "next_node"]) == backfill[which.min(backfill[, "time"]), "current_node"])), ]) #Finds everyone who is either blocked for the newly undercapacity queue
if (sum(!is.na(backfill[, "patient"])) > 0) {
if (backfill[which.min(backfill[, "time"]), "event"] == 3) {
if (get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")) <= get(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
""))) {
backfill_loop = "FALSE"
}
}
else{
if (get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) <= get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""))) {
backfill_loop = "FALSE"
}
}
}
}
else{
backfill_loop = "FALSE"
}
}## END OF ARRIVAL (Internal) PART OF BACKFILL LOOP
else if (backfill[which.min(backfill[, "time"]), "event"] ==
1 & backfill[which.min(backfill[, "time"]), "previous_node"] == 0) {
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
2,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a service start event to the record for the next person in the queue
arr.dist <-
serv_dist[which(nodes == backfill[which.min(backfill[, "time"]), "current_node"])]
pars <-
as.numeric(unlist(strsplit(
as.character(serv_dist_param[which(nodes == backfill[which.min(backfill[, "time"]), "current_node"])]), ";"
)))
tmp7 <-
do.call(get(paste0("r", arr.dist)), as.list(c(1, pars))) #Creates a service time
#tmp7<-do.call(paste("serv_dist_",backfill$current_node[which.min(backfill[,"time"])],sep=""),args = list()) #Draws a random service time from the distribution
sch[match(NA, sch[, "time"]), ] <-
c(
min(sch[, "time"], na.rm = T) + tmp7,
3,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]
) #Adds a service end event to schedule for the next person in the queue
queue_find <- "ext"
tmp8 <-
get(paste(queue_find, "_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) #Find the queue in question
tmp8[which.min(tmp8[, "time"]), ] <-
c(rep(NA, 6)) #Remove the patient from the queue
assign(paste(queue_find, "_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""),
tmp8) #Reassign the queue to the correct variable name
tmp9 <-
get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) + 1 #Adds 1 to the relevant node system
assign(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""), tmp9) #Assigns the increased node system value to the correct system variable
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste(
"syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)),
node = backfill[which.min(backfill[, "time"]), "current_node"],
rep = j
))
if (get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) > get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""))) {
print(
"line535- Increased syst value within backfill loop to above capacity"
)
}
backfill_loop = "FALSE"
}## END OF ARRIVAL (External) PART OF BACKFILL LOOP
else{
backfill_loop = "FALSE"
}
if (sum(!is.na(backfill[, "patient"])) == 0) {
backfill_loop = "FALSE"
}
}
}
if (time_test < min(sch[, "time"], na.rm = T)) {
print(
"line776- Event has been addded to the schedule that occurs before current event"
)
}
sch[roi, ] <- c(rep(NA, 6))
}
### CAPACITY CHANGE###
###CAPACITY CHANGE SCENARIOS ##########################################
### 1. If the capacity has increased, find all patients who can batch join the new node and then cycle through the backfills repeatedly after each batched patient has been processed.
###
### 2. If the capacity has decreased, change the capacity value of the node so no new patients can start until the occupancy decreases below the new capacity. The occupancy should fall away until the new capacity is met.
###
### CAPACITY CHANGE 1 - CAPACITY INCREASED INVOKING BATCH ARRIVALS AND CYCLIC BACKFILL #############################################################
else if (sch[roi, "event"] == 7) {
tmp1 <- paste("n_serv_", sch[roi, "current_node"], sep = "")
assign(tmp1, sch[roi, "next_node"])
cap_node <- sch[roi, "current_node"]
if (get(paste("n_serv_", sch[roi, "current_node"], sep = "")) >
get(paste("syst_", sch[roi, "current_node"], sep = ""))) {
x <-
get(paste("n_serv_", sch[roi, "current_node"], sep = "")) - get(paste("syst_", sch[roi, "current_node"], sep =
""))
backfill <-
rbind(get(paste("int_queue_", sch[roi, "current_node"], sep = "")), get(paste("ext_queue_", sch[roi, "current_node"], sep =
"")), blocked_mat[c(which(blocked_mat[, "next_node"] == sch[roi, "current_node"])), ]) #Finds everyone who is either blocked or in a queue for the newly undercapacity node
backfill <-
rbind(backfill, rep(x = NA, times = 6), rep(x = NA, times = 6))
backfill <- backfill[order(backfill[, "time"]), ]
y <- sum(!is.na(backfill[, "time"]))
xy <- min(x, y)
if (xy > 0) {
backfill <- backfill[c(1:xy), ]
backfill <-
rbind(backfill,
rep(x = NA, times = 6),
rep(x = NA, times = 6))
backfill_loop <- "TRUE"
while (backfill_loop == "TRUE") {
#Finds the next available person from the queue or blocked node
if (backfill[which.min(backfill[, "time"]), "event"] == 3) {
if (!sum(delay_list[, 1] == backfill[which.min(backfill[, "time"]), "current_node"] &
delay_list[, 2] == backfill[which.min(backfill[, "time"]), "next_node"]) >
0) {
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
8,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a transfer delay start event to the record for the blocked patient
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
4,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a departure event to the record for the blocked patient
tmp99 <-
sample(
x = onward_nodes,
size = 1,
prob = get(
paste("onward_nodes_prob_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")
)
)
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
1,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "next_node"],
tmp99,
backfill[which.min(backfill[, "time"]), "current_node"]) #Adds an arrival event to the record for the blocked patient
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
2,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "next_node"],
tmp99,
backfill[which.min(backfill[, "time"]), "current_node"]) #Adds a service start event to the record for blocked patient
arr.dist <-
serv_dist[which(nodes == backfill[which.min(backfill[, "time"]), "next_node"])]
pars <-
as.numeric(unlist(strsplit(
as.character(serv_dist_param[which(nodes == backfill[which.min(backfill[, "time"]), "next_node"])]), ";"
)))
tmp7 <-
do.call(get(paste0("r", arr.dist)), as.list(c(1, pars))) #Creates a service time
#tmp7<-do.call(paste("serv_dist_",backfill$next_node[which.min(backfill[,"time"])],sep=""),args = list())
sch[match(NA, sch[, "time"]), ] <-
c(
min(sch[, "time"], na.rm = T) + tmp7,
3,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "next_node"],
tmp99,
backfill[which.min(backfill[, "time"]), "current_node"]
)
tmp97 <-
get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) - 1 #Takes 1 from the relevant node system
assign(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""),
tmp97) #Assigns the decreased node system value to the correct system variable
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste(
"syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)),
node = backfill[which.min(backfill[, "time"]), "current_node"],
rep = j
))
if (get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) < 0) {
print(
"line398- Lowered syst value within backfill loop to below zero which is impossible"
)
}
tmp_unblocked_node <-
backfill[which.min(backfill[, "time"]), "current_node"]
tmp_filled_node <-
backfill[which.min(backfill[, "time"]), "next_node"]
tmp_blocked_remove <-
which(
blocked_mat[, "current_node"] == tmp_unblocked_node &
blocked_mat[, "next_node"] == tmp_filled_node
)
blocked_mat[tmp_blocked_remove[which.min(blocked_mat[tmp_blocked_remove, "time"])], ] <-
c(rep(NA, 6))
tmp9 <-
get(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")) + 1 #Adds 1 to the relevant node system
assign(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
""), tmp9) #Assigns the increased node system value to the correct system variable
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste(
"syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep = ""
)),
node = backfill[which.min(backfill[, "time"]), "next_node"],
rep = j
))
if (get(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")) > get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
""))) {
print(
"line413- Increased syst value within backfill loop to above capacity"
)
}
backfill <-
rbind(get(
paste("int_queue_", tmp_unblocked_node, sep = "")
), get(
paste("ext_queue_", tmp_unblocked_node, sep = "")
), blocked_mat[c(which(blocked_mat[, "next_node"] == tmp_unblocked_node)), ]) #Finds everyone who is either blocked or in a queue for the newly undercapacity node
if (sum(!is.na(backfill[, "patient"])) > 0) {
if (backfill[which.min(backfill[, "time"]), "event"] == 3) {
if (get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")) <= get(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
""))) {
backfill_loop = "FALSE"
}
}
else{
if (get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) <= get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""))) {
backfill_loop = "FALSE"
}
}
}
if (sum(!is.na(backfill[, "patient"])) == 0) {
backfill_loop = "FALSE"
}
}
else if (sum(delay_list[, 1] == backfill[which.min(backfill[, "time"]), "current_node"] &
delay_list[, 2] == backfill[which.min(backfill[, "time"]), "next_node"]) >
0) {
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
8,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a transfer delay start event to the record for the blocked patient
arr.dist <-
delay_dist[backfill[which.min(backfill[, "time"]), "current_node"], backfill[which.min(backfill[, "time"]), "next_node"]]
pars <-
as.numeric(unlist(strsplit(
as.character(delay_param[backfill[which.min(backfill[, "time"]), "current_node"], backfill[which.min(backfill[, "time"]), "next_node"]]), ";"
)))
tmp2 <-
do.call(get(paste0("r", arr.dist)), as.list(c(1, pars))) #Creates a service time
sch[match(NA, sch[, "time"]), ] <-
c(
min(sch[, "time"], na.rm = T) + tmp2,
6,
patient = backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]
)
if (backfill[which.min(backfill[, "time"]), "next_node"] %in% nodes) {
tmp5 <-
get(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")) + 1 #Adds 1 from the relevant node system
assign(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
""),
tmp5) #Assigns the increased node system value to the correct system variable
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste(
"syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep = ""
)),
node = backfill[which.min(backfill[, "time"]), "next_node"],
rep = j
))
}
tmp_unblocked_node <-
backfill[which.min(backfill[, "time"]), "current_node"]
tmp_filled_node <-
backfill[which.min(backfill[, "time"]), "next_node"]
tmp_blocked_remove <-
which(
blocked_mat[, "current_node"] == tmp_unblocked_node &
blocked_mat[, "next_node"] == tmp_filled_node
)
blocked_mat[tmp_blocked_remove[which.min(blocked_mat[tmp_blocked_remove, "time"])], ] <-
c(rep(NA, 6))
backfill_loop = "FALSE"
}
}## END OF SERVICE END PART OF BACKFILL LOOP
else if (backfill[which.min(backfill[, "time"]), "event"] ==
1 & backfill[which.min(backfill[, "time"]), "previous_node"] != 0) {
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
2,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a service start event to the record for the next person in the queue
arr.dist <-
serv_dist[which(nodes == backfill[which.min(backfill[, "time"]), "current_node"])]
pars <-
as.numeric(unlist(strsplit(
as.character(serv_dist_param[which(nodes == backfill[which.min(backfill[, "time"]), "current_node"])]), ";"
)))
tmp7 <-
do.call(get(paste0("r", arr.dist)), as.list(c(1, pars))) #Creates a service time
#tmp7<-do.call(paste("serv_dist_",backfill$current_node[which.min(backfill[,"time"])],sep=""),args = list()) #Draws a random service time from the distribution
sch[match(NA, sch[, "time"]), ] <-
c(
min(sch[, "time"], na.rm = T) + tmp7,
3,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]
) #Adds a service end event to schedule for the next person in the queue
queue_find <- "int"
tmp8 <-
get(paste(queue_find, "_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) #Find the queue in question
tmp8[which.min(tmp8[, "time"]), ] <-
c(rep(NA, 6)) #Remove the patient from the queue
assign(paste(queue_find, "_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""),
tmp8) #Reassign the queue to the correct variable name
tmp9 <-
get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) + 1 #Adds 1 to the relevant node system
assign(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""), tmp9) #Assigns the increased node system value to the correct system variable
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste(
"syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)),
node = backfill[which.min(backfill[, "time"]), "current_node"],
rep = j
))
if (get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) > get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""))) {
print(
"line455- Increased syst value within backfill loop to above capacity"
)
}
backfill <-
rbind(get(paste(
"int_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)), get(paste(
"ext_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)), blocked_mat[c(which(as.vector(blocked_mat[, "next_node"]) == backfill[which.min(backfill[, "time"]), "current_node"])), ]) #Finds everyone who is either blocked for the newly undercapacity queue
if (length(backfill[which(backfill[, "event"] == 3), "event"]) !=
0) {
backfill <- rbind(backfill[which(backfill[, "event"] == 3), ], rep(NA, 6))
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
8,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a departure event to the record for the blocked patient
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
4,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a departure event to the record for the blocked patient
tmp99 <-
sample(
x = onward_nodes,
size = 1,
prob = get(
paste("onward_nodes_prob_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")
)
)
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
1,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "next_node"],
tmp99,
backfill[which.min(backfill[, "time"]), "current_node"]) #Adds an arrival event to the record for the blocked patient
tmp4 <-
paste("int_queue_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"") #Finds the correct queue for the patient to enter
inter <-
get(tmp4) #Creates copy of queue to ammend
inter[match(NA, inter[, "time"]), ] <-
c(backfill[which.min(backfill[, "time"]), "time"],
1,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "next_node"],
tmp99,
backfill[which.min(backfill[, "time"]), "current_node"])
assign(tmp4, inter) #Adds the patient arrival record to the correct queue
if (sum(!is.na(get(
paste("int_queue_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")
))) / 6 > get(paste("int_queue_max_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
""))) {
print((
"line480-Internal queue capactity exceeded"
))
}
tmp9 <-
get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) - 1 #Subtracts 1 to the relevant node system
assign(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""), tmp9)
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste(
"syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)),
node = backfill[which.min(backfill[, "time"]), "current_node"],
rep = j
))
if (get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) < 0) {
print(
"line464- Lowered syst value within backfill loop to below zero"
)
}
tmp_unblocked_node <-
backfill[which.min(backfill[, "time"]), "current_node"]
tmp_filled_node <-
backfill[which.min(backfill[, "time"]), "next_node"]
tmp_blocked_remove <-
which(
blocked_mat[, "current_node"] == tmp_unblocked_node &
blocked_mat[, "next_node"] == tmp_filled_node
)
blocked_mat[tmp_blocked_remove[which.min(blocked_mat[tmp_blocked_remove, "time"])], ] <-
c(rep(NA, 6))
backfill <-
rbind(get(paste(
"int_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)), get(paste(
"ext_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)), blocked_mat[c(which(as.vector(blocked_mat[, "next_node"]) == backfill[which.min(backfill[, "time"]), "current_node"])), ]) #Finds everyone who is either blocked for the newly undercapacity queue
if (sum(!is.na(backfill[, "patient"])) > 0) {
if (backfill[which.min(backfill[, "time"]), "event"] == 3) {
if (get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
"")) <= get(paste("syst_", backfill[which.min(backfill[, "time"]), "next_node"], sep =
""))) {
backfill_loop = "FALSE"
}
}
else{
if (get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) <= get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""))) {
backfill_loop = "FALSE"
}
}
}
}
else{
backfill_loop = "FALSE"
}
}## END OF ARRIVAL (Internal) PART OF BACKFILL LOOP
else if (backfill[which.min(backfill[, "time"]), "event"] ==
1 & backfill[which.min(backfill[, "time"]), "previous_node"] == 0) {
record[match(NA, record[, "time"]), ] <-
c(sch[roi, "time"],
2,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]) #Adds a service start event to the record for the next person in the queue
arr.dist <-
serv_dist[which(nodes == backfill[which.min(backfill[, "time"]), "current_node"])]
pars <-
as.numeric(unlist(strsplit(
as.character(serv_dist_param[which(nodes == backfill[which.min(backfill[, "time"]), "current_node"])]), ";"
)))
tmp7 <-
do.call(get(paste0("r", arr.dist)), as.list(c(1, pars))) #Creates a service time
#tmp7<-do.call(paste("serv_dist_",backfill$current_node[which.min(backfill[,"time"])],sep=""),args = list()) #Draws a random service time from the distribution
sch[match(NA, sch[, "time"]), ] <-
c(
min(sch[, "time"], na.rm = T) + tmp7,
3,
backfill[which.min(backfill[, "time"]), "patient"],
backfill[which.min(backfill[, "time"]), "current_node"],
backfill[which.min(backfill[, "time"]), "next_node"],
backfill[which.min(backfill[, "time"]), "previous_node"]
) #Adds a service end event to schedule for the next person in the queue
queue_find <- "ext"
tmp8 <-
get(paste(queue_find, "_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) #Find the queue in question
tmp8[which.min(tmp8[, "time"]), ] <-
c(rep(NA, 6)) #Remove the patient from the queue
assign(paste(queue_find, "_queue_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""),
tmp8) #Reassign the queue to the correct variable name
tmp9 <-
get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) + 1 #Adds 1 to the relevant node system
assign(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""), tmp9) #Assigns the increased node system value to the correct system variable
bed <-
rbind(bed,
c(
time = sch[roi, "time"],
bed = get(paste(
"syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""
)),
node = backfill[which.min(backfill[, "time"]), "current_node"],
rep = j
))
if (get(paste("syst_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
"")) > get(paste("n_serv_", backfill[which.min(backfill[, "time"]), "current_node"], sep =
""))) {
print(
"line535- Increased syst value within backfill loop to above capacity"
)
}
backfill_loop = "FALSE"
}## END OF ARRIVAL (External) PART OF BACKFILL LOOP
else{
backfill_loop = "FALSE"
}
if (sum(!is.na(backfill[, "patient"])) == 0) {
backfill_loop = "FALSE"
}
}
}
}
sch[roi, ] <- c(rep(NA, 6))
}
else{
print("Major Error")
}
}
###END - Simulation Cycle###
####OUTPUTS--------------------------------------------------------------
###Create standard record###
record <- as.data.frame(record[which(!is.na(record[, "time"])), ])
record$event[which(record$event == 1)] <- "arrival"
record$event[which(record$event == 2)] <- "service_start"
record$event[which(record$event == 3)] <- "service_end"
record$event[which(record$event == 4)] <- "departure"
record$event[which(record$event == 5)] <- "loss"
record$event[which(record$event == 6)] <- "delayed_departure"
record$event[which(record$event == 7)] <- "capacity_change"
record$event[which(record$event == 8)] <- "transition_start"
record$previous_node[which(record$previous_node == 0)] <-
"external"
#record<-record[which(!is.na(record[,"time"])),]
bed <- bed[-1, ]
### Create the patient & node metrics ######################################################
all_data <-
data.frame(
rep = as.numeric(),
patient = as.numeric(),
node = as.numeric(),
arr = as.numeric(),
wait = as.numeric(),
ss = as.numeric(),
service = as.numeric(),
se = as.numeric(),
delayed = as.numeric(),
tds = as.numeric(),
transition = as.numeric(),
dep = as.numeric()
)
library(tidyverse)
library(data.table)
for (i in 1:length(nodes)) {
arr_node <-
record[which(record$event == "arrival" &
record$current_node == nodes[i]), c(3, 1)]
ss_node <-
record[which(record$event == "service_start" &
record$current_node == nodes[i]), c(3, 1)]
se_node <-
record[which(record$event == "service_end" &
record$current_node == nodes[i]), c(3, 1)]
tds_node <-
record[which(record$event == "transition_start" &
record$current_node == nodes[i]), c(3, 1)]
dep_node <-
record[which(record$event == "departure" &
record$current_node == nodes[i]), c(3, 1)]
arr_node <- arr_node[which(arr_node$time > warm_up), ]
colnames(arr_node)[2] <- "arr"
ss_node <- ss_node[which(ss_node$time > warm_up), ]
colnames(ss_node)[2] <- "ss"
se_node <- se_node[which(se_node$time > warm_up), ]
colnames(se_node)[2] <- "se"
tds_node <- tds_node[which(tds_node$time > warm_up), ]
colnames(tds_node)[2] <- "tds"
dep_node <- dep_node[which(dep_node$time > warm_up), ]
colnames(dep_node)[2] <- "dep"
tmp1 <- merge(arr_node, ss_node, by = "patient", all = TRUE)
tmp2 <- merge(tmp1, se_node, by = "patient", all = TRUE)
tmp3 <- merge(tmp2, tds_node, by = "patient", all = TRUE)
tmp4 <- merge(tmp3, dep_node, by = "patient", all = TRUE)
tmp4 <- cbind(0, i, tmp4)
colnames(tmp4)[1] <- "rep"
colnames(tmp4)[2] <- "node"
tmp4 <- mutate(tmp4, wait = ss - arr)
tmp4 <- mutate(tmp4, service = se - ss)
tmp4 <- mutate(tmp4, delayed = tds - se)
tmp4 <- mutate(tmp4, transition = dep - tds)
tmp4 <-
tmp4[, c(
"rep",
"patient",
"node",
"arr",
"wait",
"ss",
"service",
"se",
"delayed",
"tds",
"transition",
"dep"
)]
tmp4[, "node"] <- node_names[c(tmp4[, "node"]), 2]
all_data <- rbindlist(list(all_data, tmp4))
}
#all_data<-rbindlist(all_data)
rep_node_dat <- all_data %>% group_by(rep, node)
pat_dat <- all_data %>%
group_by(patient, rep) %>%
transmute(
wait = sum(wait),
service = sum(service),
delayed = sum(delayed),
transition = sum(transition)
) %>%
ungroup() %>%
group_by(rep)
#change all of the below to include time units? #####
node_wait <-
as.data.frame(
summarise(
rep_node_dat,
metric = "wait",
mean = mean(wait, na.rm = T),
sd = sd(wait, na.rm = T),
iqr = IQR(wait, na.rm = T),
percentile_95 = quantile(wait, 0.95, na.rm = T)
)
)
node_active_service <-
as.data.frame(
summarise(
rep_node_dat,
metric = "active_service",
mean = mean(service, na.rm = T),
sd = sd(service, na.rm = T),
iqr = IQR(service, na.rm = T),
percentile_95 = quantile(
x = service,
probs = 0.95,
na.rm = TRUE
)
)
)
node_capacity_delay <-
as.data.frame(
summarise(
rep_node_dat,
metric = "capacity_delay",
mean = mean(delayed, na.rm = T),
sd = sd(delayed, na.rm = T),
iqr = IQR(delayed, na.rm = T),
percentile_95 = quantile(
x = delayed,
probs = 0.95,
na.rm = TRUE
)
)
)
node_transition_delay <-
as.data.frame(
summarise(
rep_node_dat,
metric = "transition_delay",
mean = mean(transition, na.rm = T),
sd = sd(transition, na.rm = T),
iqr = IQR(transition, na.rm = T),
percentile_95 = quantile(
x = transition,
probs = 0.95,
na.rm = TRUE
)
)
)
node_length_of_stay <-
as.data.frame(
summarise(
rep_node_dat,
metric = "length_of_stay",
mean = mean(service + delayed + transition, na.rm = T),
sd = sd(service + delayed + transition, na.rm = T),
iqr = IQR(service + delayed + transition, na.rm = T),
percentile_95 = quantile(
x = service + delayed + transition,
probs = 0.95,
na.rm = TRUE
)
)
)
node_delay_to_transfer <-
as.data.frame(
summarise(
rep_node_dat,
metric = "delay_to_transfer",
mean = mean(delayed + transition, na.rm = T),
sd = sd(delayed + transition, na.rm = T),
iqr = IQR(delayed + transition, na.rm = T),
percentile_95 = quantile(
x = delayed + transition,
probs = 0.95,
na.rm = TRUE
)
)
)
pat_wait <-
as.data.frame(
summarise(
pat_dat,
metric = "wait",
mean = mean(wait, na.rm = T),
sd = sd(wait, na.rm = T),
iqr = IQR(wait, na.rm = T),
percentile_95 = quantile(wait, 0.95, na.rm = T)
)
)
pat_active_service <-
as.data.frame(
summarise(
pat_dat,
metric = "service",
mean = mean(service, na.rm = T),
sd = sd(service, na.rm = T),
iqr = IQR(service, na.rm = T),
percentile_95 = quantile(service, 0.95, na.rm = T)
)
)
pat_capacity_delay <-
as.data.frame(
summarise(
pat_dat,
metric = "capacity_delay",
mean = mean(delayed, na.rm = T),
sd = sd(delayed, na.rm = T),
iqr = IQR(delayed, na.rm = T),
percentile_95 = quantile(delayed, 0.95, na.rm = T)
)
)
pat_transition_delay <-
as.data.frame(
summarise(
pat_dat,
metric = "transition_delay",
mean = mean(transition, na.rm = T),
sd = sd(transition, na.rm = T),
iqr = IQR(transition, na.rm = T),
percentile_95 = quantile(transition, 0.95, na.rm = T)
)
)
pat_length_of_stay <-
as.data.frame(
summarise(
pat_dat,
metric = "length_of_stay",
mean = mean(service + delayed + transition, na.rm = T),
sd = sd(service + delayed + transition, na.rm = T),
iqr = IQR(service + delayed + transition, na.rm = T),
percentile_95 = quantile(service + delayed + transition, 0.95, na.rm = T)
)
)
pat_delay_to_transfer <-
as.data.frame(
summarise(
pat_dat,
metric = "delay_to_transfer",
mean = mean(delayed + transition, na.rm = T),
sd = sd(delayed + transition, na.rm = T),
iqr = IQR(delayed + transition, na.rm = T),
percentile_95 = quantile(delayed + transition, 0.95, na.rm = T)
)
)
ttis_dat <- all_data %>%
group_by(patient, rep) %>%
transmute(ttis = max(dep) - min(arr))
total_time_in_system <- ttis_dat %>%
group_by(rep) %>% summarise(
node = "ALL",
metric = "total_time_in_system",
mean = mean(ttis, na.rm = T),
sd = sd(ttis, na.rm = T),
iqr = IQR(ttis, na.rm = T),
percentile_95 = quantile(ttis, 0.95, na.rm = T)
)
#all_metrics<-rbind(total_time_in_system,wait,active_service,length_of_stay,delay_to_transfer)
rm(rep_node_dat, node_dat, pat_dat, all_data)
### Create the rejected rate metrics #########################################################
rejected <-
data.frame(node = numeric(),
metric = character(),
mean = numeric())
for (i in 1:length(nodes)) {
rej_node <-
record[which(record$event == "loss" &
record$current_node == nodes[i]), c(3, 1)]
rej_node <- rej_node[which(rej_node$time > warm_up), ]
colnames(rej_node) <- c("patient", "rejected")
rejected <-
rbind(
rejected,
data.frame(
node = syst_names[i, 2],
metric = "rejected",
mean = nrow(rej_node) / sim_time
)
)
}
### Create the delayed metrics ######################################################
delayed <-
data.frame(
time = numeric(0),
event = numeric(0),
delayed = numeric(0),
node = numeric(0)
)
#print(j)
for (i in 1:length(nodes)) {
rec_temp_total <-
record[which(
record$current_node == nodes[i] &
(
record$event == "service_end" | record$event == "transition_start"
)
), ]
delayed_change <- as.vector(rec_temp_total$event)
delayed_change <-
replace(delayed_change, delayed_change == "service_end", 1)
delayed_change <-
replace(delayed_change,
delayed_change == "transition_start",
-1)
delayed_change <- as.numeric(delayed_change)
delayed_change <- cumsum(delayed_change)
delayed <-
rbind(
delayed,
data.frame(
time = rec_temp_total$time,
event = rec_temp_total$event,
delayed = delayed_change,
node = nodes[i]
)
)
}
delayed <-
cbind(delayed, c(diff(delayed$time), delayed$time[nrow(delayed)]))
colnames(delayed) <-
c("time",
"event",
"delayed",
"node",
"time_at_delayed_level")
if (warm_up > 0) {
delayed <- delayed[-which(delayed$time < warm_up), ]
}
#Calculating the time at each delayed length##
ptd <-
data.frame(
node = numeric(0),
delayed = numeric(),
time_at_delayed_level = numeric(),
percent_time_at_delayed_level = numeric()
)
for (i in 1:length(nodes)) {
node_delayed <- delayed[which(delayed$node == nodes[i]), ]
node_delayed <- node_delayed[-nrow(node_delayed), ]
tmp <-
data.frame(
node = numeric(0),
delayed = numeric(),
time_at_delayed_level = numeric()
)
for (k in unique(node_delayed$delayed)) {
time_at_k <-
sum(node_delayed$time_at_delayed_level[which(node_delayed$delayed == k)])
tmp <-
rbind(
tmp,
data.frame(
node = nodes[i],
delayed = k,
time_at_delayed_level = time_at_k
)
)
}
tmp2 <-
cbind(tmp, (100 * tmp$time_at_delayed_level / (
sum(tmp$time_at_delayed_level)
)))
colnames(tmp2) <-
c(
"node",
"delayed",
"time_at_delayed_level",
"percent_time_at_delayed_level"
)
ptd <- rbind(ptd, tmp2)
}
#rm(tmp_b_length,tmp_b_time,results,avg_b)
### Create the delayed through time data ######################################################
if (nrow(delayed) != 0) {
datd <-
data.frame(
time = delayed$time[which(delayed$time_at_delayed_level != 0)],
delayed = delayed$delayed[which(delayed$time_at_delayed_level != 0)],
node = as.character(delayed$node[which(delayed$time_at_delayed_level != 0)]),
rep = paste("rep", 0)
)
datd$node <- as.numeric(as.character(datd$node))
datd <- datd[order(datd$node), ]
datd$node <- syst_names_single[as.numeric(datd$node)]
datd$node <- as.factor(datd$node)
} else{
datd <- data.frame(
time = 0,
delayed = 0,
node = 0,
rep = paste("rep", 0)
)
datd <- datd[0, ]
}
### Create the queue metrics ######################################################
#Creating the queue tables###
queue <-
data.frame(
time = numeric(0),
event = numeric(0),
queue_length = numeric(0),
node = numeric(0)
)
for (i in 1:length(nodes)) {
rec_temp_total <-
record[which(
record$current_node == nodes[i] &
(
record$event == "arrival" |
record$event == "service_start" | record$event == "loss"
)
), ]
queue_change <- as.vector(rec_temp_total$event)
queue_change <-
replace(queue_change, queue_change == "arrival", 1)
queue_change <-
replace(queue_change, queue_change == "service_start", -1)
queue_change <- replace(queue_change, queue_change == "loss", -1)
queue_change <- as.numeric(queue_change)
queue_change <- cumsum(queue_change)
queue <-
rbind(
queue,
data.frame(
time = rec_temp_total$time,
event = rec_temp_total$event,
queue_length = queue_change,
node = nodes[i]
)
)
}
queue <-
cbind(queue, c(diff(queue$time), queue$time[nrow(queue)]))
colnames(queue) <-
c("time",
"event",
"queue_length",
"node",
"time_at_queue_length")
if (warm_up > 0) {
queue <- queue[-which(queue$time < warm_up), ]
}
#Calculating the time at each queue length##
ptq <-
data.frame(
node = numeric(0),
queue = numeric(),
time_at_queue_length = numeric(),
percent_time_at_queue_length = numeric()
)
for (i in 1:length(nodes)) {
node_queue <- queue[which(queue$node == nodes[i]), ]
node_queue <- node_queue[-nrow(node_queue), ]
tmp <-
data.frame(
node = numeric(0),
queue = numeric(),
time_at_queue_length = numeric()
)
for (k in unique(node_queue$queue)) {
time_at_k <-
sum(node_queue$time_at_queue_length[which(node_queue$queue == k)])
tmp <-
rbind(tmp,
data.frame(
node = nodes[i],
queue = k,
time_at_queue_length = time_at_k
))
}
tmp2 <-
cbind(tmp, (100 * tmp$time_at_queue_length / (
sum(tmp$time_at_queue_length)
)))
colnames(tmp2) <-
c("node",
"queue",
"time_at_queue_length",
"percent_time_at_queue_length")
ptq <- rbind(ptq, tmp2)
}
### Create the queue through time data ######################################################
if (nrow(queue) != 0) {
datq <-
data.frame(
time = queue$time[which(queue$time_at_queue_length != 0)],
queue_length = queue$queue_length[which(queue$time_at_queue_length != 0)],
node = as.character(queue$node[which(queue$time_at_queue_length != 0)]),
rep = paste("rep", 0)
)
datq$node <- as.numeric(as.character(datq$node))
datq <- datq[order(datq$node), ]
datq$node <- syst_names_single[as.numeric(datq$node)]
datq$node <- as.factor(datq$node)
} else{
datq <- data.frame(
time = 0,
queue_length = 0,
node = 0,
rep = paste("rep", 0)
)
datq <- datq[0, ]
}
### Create the occupancy metrics ######################################################
occupancy <-
data.frame(
time = numeric(0),
event = numeric(0),
occupancy = numeric(0),
occupancy_prop = numeric(0),
capacity = numeric(),
remainder_time = numeric(0),
node = numeric(0)
)
for (i in 1:length(nodes)) {
rec_temp_total <-
record[which(
record$current_node == nodes[i] &
(
record$event == "service_start" | record$event == "departure"
)
), ]
cap_cal_input_temp <-
cap_cal_input[which(cap_cal_input$node == i), ]
if (nrow(cap_cal_input_temp) == 1) {
cap_cal_input_temp$end <- sim_time
}
occupancy_change <- as.vector(rec_temp_total$event)
occupancy_change <-
replace(occupancy_change,
occupancy_change == "service_start",
1)
occupancy_change <-
replace(occupancy_change,
occupancy_change == "departure",
-1)
occupancy_change <- as.numeric(occupancy_change)
occupancy_change <- cumsum(occupancy_change)
rt <-
rec_temp_total$time %% max(cap_cal_input_temp$end[which(cap_cal_input_temp$node ==
i)])
tmp <-
data.frame(
time = rec_temp_total$time,
event = rec_temp_total$event,
occupancy = occupancy_change,
occupancy_prop = NA,
capacity = NA,
remainder_time = rt,
node = nodes[i]
)
for (time_gap in 1:nrow(cap_cal_input_temp)) {
tmp$capacity[which(
tmp$remainder_time >= cap_cal_input_temp$start[time_gap] &
tmp$remainder_time < cap_cal_input_temp$end[time_gap]
)] <- cap_cal_input_temp$value[time_gap]
}
tmp$occupancy_prop = tmp$occupancy / tmp$capacity
tmp$occupancy_prop[which(tmp$occupancy_prop == Inf |
tmp$occupancy_prop >= 1)] <- 1
tmp$occupancy_prop <- tmp$occupancy_prop * 100
occupancy <- rbind(occupancy, tmp)
}
occupancy <-
cbind(occupancy, c(diff(occupancy$time), occupancy$time[nrow(occupancy)]))
colnames(occupancy) <-
c(
"time",
"event",
"occupancy",
"occupancy_prop",
"capacity",
"remainder_time",
"node",
"time_at_occupancy"
)
if (warm_up > 0) {
occupancy <- occupancy[-which(occupancy$time < warm_up), ]
}
#Calculating the time at each occupancy##
pto <-
data.frame(
node = numeric(0),
occupancy = numeric(),
time_at_occupancy = numeric(),
percent_time_at_occupancy = numeric()
)
for (i in 1:length(nodes)) {
node_occupancy <- occupancy[which(occupancy$node == nodes[i]), ]
node_occupancy <- node_occupancy[-nrow(node_occupancy), ]
tmp <-
data.frame(
node = numeric(0),
occupancy = numeric(),
time_at_occupancy = numeric()
)
for (k in unique(node_occupancy$occupancy)) {
time_at_k <-
sum(node_occupancy$time_at_occupancy[which(node_occupancy$occupancy == k)])
tmp <-
rbind(
tmp,
data.frame(
node = nodes[i],
occupancy = k,
time_at_occupancy = time_at_k
)
)
}
tmp2 <-
cbind(tmp, (100 * tmp$time_at_occupancy / (sum(
tmp$time_at_occupancy
))))
colnames(tmp2) <-
c("node",
"occupancy",
"time_at_occupancy",
"percent_time_at_occupancy")
pto <- rbind(pto, tmp2)
}
#rm(results,tmp,node_occupancy,time_at_k,tmp2)
### Create the occupancy through time data ######################################################
if (nrow(occupancy) != 0) {
dato <-
data.frame(
time = occupancy$time[which(occupancy$time_at_occupancy != 0)],
occupancy = occupancy$occupancy[which(occupancy$time_at_occupancy != 0)],
node = occupancy$node[which(occupancy$time_at_occupancy != 0)],
rep = paste("rep", 0)
)
dato$node <- as.numeric(as.character(dato$node))
dato <- dato[order(dato$node), ]
dato$node <- syst_names_single[as.numeric(dato$node)]
dato$node <- as.factor(dato$node)
} else{
dato <- data.frame(
time = 0,
occupancy = 0,
node = 0,
rep = paste("rep", 0)
)
dato <- dato[0, ]
}
### Create the transition metrics ######################################################
transition <-
data.frame(
time = numeric(0),
event = numeric(0),
transition = numeric(0),
node = numeric(0)
)
for (i in 1:length(nodes)) {
rec_temp_total <-
record[which(
record$current_node == nodes[i] &
(
record$event == "transition_start" | record$event == "departure"
)
), ]
transition_change <- as.vector(rec_temp_total$event)
transition_change <-
replace(transition_change,
transition_change == "transition_start",
1)
transition_change <-
replace(transition_change,
transition_change == "departure",
-1)
transition_change <- as.numeric(transition_change)
transition_change <- cumsum(transition_change)
transition <-
rbind(
transition,
data.frame(
time = rec_temp_total$time,
event = rec_temp_total$event,
transition = transition_change,
node = nodes[i]
)
)
}
transition <-
cbind(transition, c(diff(transition$time), transition$time[nrow(transition)]))
colnames(transition) <-
c("time",
"event",
"transition",
"node",
"time_at_transition_level")
if (warm_up > 0) {
transition <- transition[-which(transition$time < warm_up), ]
}
#rm(rec_temp_total,results,transition_change)
#Calculating the time at each transition length##
ptt <-
data.frame(
node = numeric(0),
transition = numeric(),
time_at_transition_level = numeric(),
percent_time_at_transition_level = numeric()
)
for (i in 1:length(nodes)) {
node_transition <- transition[which(transition$node == nodes[i]), ]
node_transition <- node_transition[-nrow(node_transition), ]
tmp <-
data.frame(
node = numeric(0),
transition = numeric(),
time_at_transition_level = numeric()
)
for (k in unique(node_transition$transition)) {
time_at_k <-
sum(node_transition$time_at_transition_level[which(node_transition$transition ==
k)])
tmp <-
rbind(
tmp,
data.frame(
node = nodes[i],
transition = k,
time_at_transition_level = time_at_k
)
)
}
tmp2 <-
cbind(tmp, (100 * tmp$time_at_transition_level / (
sum(tmp$time_at_transition_level)
)))
colnames(tmp2) <-
c(
"node",
"transition",
"time_at_transition_level",
"percent_time_at_transition_level"
)
ptt <- rbind(ptt, tmp2)
}
#rm(results,tmp,node_transition,time_at_k,tmp2)
### Create the transition through time data ######################################################
if (nrow(transition) != 0) {
datt <-
data.frame(
time = transition$time[which(transition$time_at_transition_level != 0)],
transition = transition$transition[which(transition$time_at_transition_level !=
0)],
node = as.character(transition$node[which(transition$time_at_transition_level !=
0)]),
rep = paste("rep", 0)
)
datt$node <- as.numeric(as.character(datt$node))
datt <- datt[order(datt$node), ]
datt$node <- syst_names_single[as.numeric(datt$node)]
datt$node <- as.factor(datt$node)
} else{
datt <- data.frame(
time = 0,
transition = 0,
node = 0,
rep = paste("rep", 0)
)
datt <- datt[0, ]
}
### Create the occ_bed metrics ######################################################
occ_bed <-
data.frame(time = numeric(0),
occ_bed = numeric(0),
node = numeric(0))
for (i in 1:length(nodes)) {
rec_temp_total <- bed[which(bed$node == nodes[i]), ]
occ_bed <-
rbind(
occ_bed,
data.frame(
time = rec_temp_total$time,
occ_bed = rec_temp_total$bed,
node = nodes[i]
)
)
}
occ_bed <-
cbind(occ_bed, c(diff(occ_bed$time), occ_bed$time[nrow(occ_bed)]))
colnames(occ_bed) <-
c("time", "occ_bed", "node", "time_at_occ_bed_level")
if (warm_up > 0) {
occ_bed <- occ_bed[-which(occ_bed$time < warm_up), ]
}
occ_bed
#### % time at bed occupancy level#
ptb <-
data.frame(
node = numeric(0),
occ_bed = numeric(),
time_at_occ_bed_level = numeric(),
percent_time_at_occ_bed_level = numeric()
)
for (i in 1:length(nodes)) {
node_occ_bed <- occ_bed[which(occ_bed$node == nodes[i]), ]
node_occ_bed <- node_occ_bed[-nrow(node_occ_bed), ]
tmp <-
data.frame(
node = numeric(0),
occ_bed = numeric(),
time_at_occ_bed_level = numeric()
)
for (k in unique(node_occ_bed$occ_bed)) {
time_at_k <-
sum(node_occ_bed$time_at_occ_bed_level[which(node_occ_bed$occ_bed == k)])
tmp <-
rbind(
tmp,
data.frame(
node = nodes[i],
occ_bed = k,
time_at_occ_bed_level = time_at_k
)
)
}
tmp2 <-
cbind(tmp, (100 * tmp$time_at_occ_bed_level / (
sum(tmp$time_at_occ_bed_level)
)))
colnames(tmp2) <-
c(
"node",
"occ_bed",
"time_at_occ_bed_level",
"percent_time_at_occ_bed_level"
)
ptb <- rbind(ptb, tmp2)
}
### Create the occ_bed through time data ######################################################
if (nrow(occ_bed) != 0) {
datb <-
data.frame(
time = occ_bed$time[which(occ_bed$time_at_occ_bed_level != 0)],
occ_bed = occ_bed$occ_bed[which(occ_bed$time_at_occ_bed_level != 0)],
node = occ_bed$node[which(occ_bed$time_at_occ_bed_level != 0)],
rep = paste("rep", 0)
)
datb$node <- as.numeric(as.character(datb$node))
datb <- datb[order(datb$node), ]
datb$node <- syst_names_single[as.numeric(datb$node)]
datb$node <- as.factor(datb$node)
} else{
datb <- data.frame(
time = 0,
occ_bed = 0,
node = 0,
rep = paste("rep", 0)
)
datb <- datb[0, ]
}
### Create the multi data & through time uniform ######################################################
dato_multi <- cbind(dato, rep(x = "occupancy", nrow(dato)))
colnames(dato_multi) <- c("time", "value", "node", "rep", "metric")
datd_multi <- cbind(datd, rep(x = "delayed", nrow(datd)))
colnames(datd_multi) <- c("time", "value", "node", "rep", "metric")
datb_multi <- cbind(datb, rep(x = "occ_bed", nrow(datb)))
colnames(datb_multi) <- c("time", "value", "node", "rep", "metric")
datt_multi <- cbind(datt, rep(x = "transition", nrow(datt)))
colnames(datt_multi) <- c("time", "value", "node", "rep", "metric")
datq_multi <- cbind(datq, rep(x = "queue", nrow(datq)))
colnames(datq_multi) <- c("time", "value", "node", "rep", "metric")
library(data.table)
library(tidyverse)
multi <-
rbindlist(list(
datb_multi,
datd_multi,
dato_multi,
datt_multi,
datq_multi
))
multi_spread <- spread(data = multi,
key = metric,
value = value)
multi_spread_uniform <-
data.frame(
time = numeric(),
node = numeric(),
rep = numeric(),
occ_bed = numeric(),
delayed = numeric(),
occupancy = numeric(),
transition = numeric(),
queue = numeric()
)
uniform_time <- seq(from = warm_up,
to = t.period,
by = 0.5)
for (i in nodes) {
base <-
multi_spread[which(as.character(multi_spread$node) == node_names[i, 2]), ] ## Reassigns names
uniform_ts <-
data.frame(
time = uniform_time,
node = node_names[i, 2],
rep = NA,
occ_bed = NA,
delayed = NA,
occupancy = NA,
transition = NA,
queue = NA
)
uniform_ts <-
rbindlist(list(base, uniform_ts),
fill = T,
use.names = T)
uniform_ts <- uniform_ts[order(uniform_ts$time), ]
uniform_ts <-
uniform_ts %>% fill(rep, occ_bed, delayed, occupancy, transition, queue) ## tidyr::fill function changes the NA values to the previous value down the df
uniform_ts <-
uniform_ts %>% fill(rep,
occ_bed,
delayed,
occupancy,
transition,
queue,
.direction = "up") ## tidyr::fill function changes the NA values to the pervious value up the df
multi_spread_uniform <-
rbindlist(list(multi_spread_uniform, uniform_ts), use.names = T)
}
multi_spread_uniform <-
multi_spread_uniform[which(multi_spread_uniform$time %in% uniform_time), ]
x <-
list(
nodes,
warm_up,
sim_time,
reps,
exits,
syst_names,
node_wait,
node_active_service,
node_length_of_stay,
node_delay_to_transfer,
pat_wait,
pat_active_service,
pat_length_of_stay,
pat_delay_to_transfer,
total_time_in_system,
rejected,
ptd,
ptq,
pto,
ptt,
ptb,
multi_spread_uniform,
delay_list,
cap_cal_input,
arr_cal_input,
node_capacity_delay,
node_transition_delay,
pat_capacity_delay,
pat_transition_delay
)
names(x) <-
c(
"nodes",
"warm_up",
"sim_time",
"reps",
"exits",
"syst_names",
"node_wait",
"node_active_service",
"node_length_of_stay",
"node_delay_to_transfer",
"pat_wait",
"pat_active_service",
"pat_length_of_stay",
"pat_delay_to_transfer",
"total_time_in_system",
"rejected",
"ptd",
"ptq",
"pto",
"ptt",
"ptb",
"multi_spread_uniform",
"delay_list",
"node_capacity_delay",
"node_transition_delay",
"pat_capacity_delay",
"pat_transition_delay"
)
rm(
record,
datd,
datq,
dato,
datt,
datb,
datq_multi,
datd_multi,
dato_multi,
datt_multi,
datb_multi,
multi,
multi_spread
)
#gc()
return(x)
}
)
#stopCluster(cl)
#### PLOTS AND SIMULATION LEVEL METRICS #########
nodes <- outputs[[1]][[1]]
warm_up <- outputs[[1]][[2]]
sim_time <- outputs[[1]][[3]]
reps <- outputs[[1]][[4]]
exits <- outputs[[1]][[5]]
syst_names <- outputs[[1]][[6]]
delay_list <- outputs[[1]][[23]]
cap_cal_input <- outputs[[1]][[24]]
arr_cal_input <- outputs[[1]][[25]]
node_wait <- sapply(outputs, function(x)
x[7])
node_active_service <- sapply(outputs, function(x)
x[8])
node_length_of_stay <- sapply(outputs, function(x)
x[9])
node_delay_to_transfer <- sapply(outputs, function(x)
x[10])
pat_wait <- sapply(outputs, function(x)
x[11])
pat_active_service <- sapply(outputs, function(x)
x[12])
pat_length_of_stay <- sapply(outputs, function(x)
x[13])
pat_delay_to_transfer <- sapply(outputs, function(x)
x[14])
total_time_in_system <- sapply(outputs, function(x)
x[15])
rejected <- sapply(outputs, function(x)
x[16])
ptd <- sapply(outputs, function(x)
x[17])
ptq <- sapply(outputs, function(x)
x[18])
pto <- sapply(outputs, function(x)
x[19])
ptt <- sapply(outputs, function(x)
x[20])
ptb <- sapply(outputs, function(x)
x[21])
multi_spread_uniform <- sapply(outputs, function(x)
x[22])
node_capacity_delay <- sapply(outputs, function(x)
x[26])
node_transition_delay <- sapply(outputs, function(x)
x[27])
pat_capacity_delay <- sapply(outputs, function(x)
x[28])
pat_transition_delay <- sapply(outputs, function(x)
x[29])
rm(outputs)
### Create the Simulation Summary Metrics ######################################################
for (rep_fill in 1:reps) {
if (!is.na(node_wait[[rep_fill]]$rep[1])) {
node_wait[[rep_fill]]$rep <- rep_fill
}
if (!is.na(node_active_service[[rep_fill]]$rep[1])) {
node_active_service[[rep_fill]]$rep <- rep_fill
}
if (!is.na(node_capacity_delay[[rep_fill]]$rep[1])) {
node_capacity_delay[[rep_fill]]$rep <- rep_fill
}
if (!is.na(node_transition_delay[[rep_fill]]$rep[1])) {
node_transition_delay[[rep_fill]]$rep <- rep_fill
}
if (!is.na(node_length_of_stay[[rep_fill]]$rep[1])) {
node_length_of_stay[[rep_fill]]$rep <- rep_fill
}
if (!is.na(node_delay_to_transfer[[rep_fill]]$rep[1])) {
node_delay_to_transfer[[rep_fill]]$rep <- rep_fill
}
if (!is.na(pat_wait[[rep_fill]]$rep[1])) {
pat_wait[[rep_fill]]$rep <- rep_fill
}
if (!is.na(pat_active_service[[rep_fill]]$rep[1])) {
pat_active_service[[rep_fill]]$rep <- rep_fill
}
if (!is.na(pat_capacity_delay[[rep_fill]]$rep[1])) {
pat_capacity_delay[[rep_fill]]$rep <- rep_fill
}
if (!is.na(pat_transition_delay[[rep_fill]]$rep[1])) {
pat_transition_delay[[rep_fill]]$rep <- rep_fill
}
if (!is.na(pat_length_of_stay[[rep_fill]]$rep[1])) {
pat_length_of_stay[[rep_fill]]$rep <- rep_fill
}
if (!is.na(pat_delay_to_transfer[[rep_fill]]$rep[1])) {
pat_delay_to_transfer[[rep_fill]]$rep <- rep_fill
}
if (!is.na(total_time_in_system[[rep_fill]]$rep[1])) {
total_time_in_system[[rep_fill]]$rep <- rep_fill
}
if (!is.na(rejected[[rep_fill]][1, 1])) {
rejected[[rep_fill]]$rep <- rep_fill
}
if (!is.na(multi_spread_uniform[[rep_fill]][1, 1])) {
multi_spread_uniform[[rep_fill]]$rep <- paste0("rep ", rep_fill)
}
}
node_wait <- rbindlist(node_wait)
node_active_service <- rbindlist(node_active_service)
node_capacity_delay <- rbindlist(node_capacity_delay)
node_transition_delay <- rbindlist(node_transition_delay)
node_length_of_stay <- rbindlist(node_length_of_stay)
node_delay_to_transfer <- rbindlist(node_delay_to_transfer)
pat_wait <- rbindlist(pat_wait)
pat_active_service <- rbindlist(pat_active_service)
pat_capacity_delay <- rbindlist(pat_capacity_delay)
pat_transition_delay <- rbindlist(pat_transition_delay)
pat_length_of_stay <- rbindlist(pat_length_of_stay)
pat_delay_to_transfer <- rbindlist(pat_delay_to_transfer)
total_time_in_system <- rbindlist(total_time_in_system)
rejected <- rbindlist(rejected)
node_wait_summary <-
node_wait %>% group_by(node) %>% summarise(
metric = "wait",
mean = mean(mean, na.rm = T),
sd = mean(sd, na.rm = T),
iqr = mean(iqr, na.rm = T),
percentile_95 = mean(percentile_95, na.rm = T)
) %>% as.data.frame()
node_active_service_summary <-
node_active_service %>% group_by(node) %>% summarise(
metric = "active_service",
mean = mean(mean, na.rm = T),
sd = mean(sd, na.rm = T),
iqr = mean(iqr, na.rm = T),
percentile_95 = mean(percentile_95, na.rm = T)
) %>% as.data.frame()
node_capacity_delay_summary <-
node_capacity_delay %>% group_by(node) %>% summarise(
metric = "capacity_delay",
mean = mean(mean, na.rm = T),
sd = mean(sd, na.rm = T),
iqr = mean(iqr, na.rm = T),
percentile_95 = mean(percentile_95, na.rm = T)
) %>% as.data.frame()
node_transition_delay_summary <-
node_transition_delay %>% group_by(node) %>% summarise(
metric = "transition_delay",
mean = mean(mean, na.rm = T),
sd = mean(sd, na.rm = T),
iqr = mean(iqr, na.rm = T),
percentile_95 = mean(percentile_95, na.rm = T)
) %>% as.data.frame()
node_length_of_stay_summary <-
node_length_of_stay %>% group_by(node) %>% summarise(
metric = "length_of_stay",
mean = mean(mean, na.rm = T),
sd = mean(sd, na.rm = T),
iqr = mean(iqr, na.rm = T),
percentile_95 = mean(percentile_95, na.rm = T)
) %>% as.data.frame()
node_delay_to_transfer_summary <-
node_delay_to_transfer %>% group_by(node) %>% summarise(
metric = "delay_to_transfer",
mean = mean(mean, na.rm = T),
sd = mean(sd, na.rm = T),
iqr = mean(iqr, na.rm = T),
percentile_95 = mean(percentile_95, na.rm = T)
) %>% as.data.frame()
pat_wait_summary <-
pat_wait %>% summarise(
metric = "wait",
mean = mean(mean, na.rm = T),
sd = mean(sd, na.rm = T),
iqr = mean(iqr, na.rm = T),
percentile_95 = mean(percentile_95, na.rm = T)
) %>% as.data.frame()
pat_active_service_summary <-
pat_active_service %>% summarise(
metric = "active_service",
mean = mean(mean, na.rm = T),
sd = mean(sd, na.rm = T),
iqr = mean(iqr, na.rm = T),
percentile_95 = mean(percentile_95, na.rm = T)
) %>% as.data.frame()
pat_capacity_delay_summary <-
pat_capacity_delay %>% summarise(
metric = "capacity_delay",
mean = mean(mean, na.rm = T),
sd = mean(sd, na.rm = T),
iqr = mean(iqr, na.rm = T),
percentile_95 = mean(percentile_95, na.rm = T)
) %>% as.data.frame()
pat_transition_delay_summary <-
pat_transition_delay %>% summarise(
metric = "transition_delay",
mean = mean(mean, na.rm = T),
sd = mean(sd, na.rm = T),
iqr = mean(iqr, na.rm = T),
percentile_95 = mean(percentile_95, na.rm = T)
) %>% as.data.frame()
pat_length_of_stay_summary <-
pat_length_of_stay %>% summarise(
metric = "length_of_stay",
mean = mean(mean, na.rm = T),
sd = mean(sd, na.rm = T),
iqr = mean(iqr, na.rm = T),
percentile_95 = mean(percentile_95, na.rm = T)
) %>% as.data.frame()
pat_delay_to_transfer_summary <-
pat_delay_to_transfer %>% summarise(
metric = "delay_to_transfer",
mean = mean(mean, na.rm = T),
sd = mean(sd, na.rm = T),
iqr = mean(iqr, na.rm = T),
percentile_95 = mean(percentile_95, na.rm = T)
) %>% as.data.frame()
total_time_in_system_summary <-
total_time_in_system %>% summarise(
metric = "total_time_in_system",
mean = mean(mean, na.rm = T),
sd = mean(sd, na.rm = T),
iqr = mean(iqr, na.rm = T),
percentile_95 = mean(percentile_95, na.rm = T)
) %>% as.data.frame()
pat_rep_summary <-
rbind(
pat_wait,
pat_active_service,
pat_capacity_delay,
pat_transition_delay,
pat_length_of_stay,
pat_delay_to_transfer
)
pat_total_summary <-
rbind(
pat_wait_summary,
pat_active_service_summary,
pat_capacity_delay_summary,
pat_transition_delay_summary,
pat_length_of_stay_summary,
pat_delay_to_transfer_summary
)
rejected_summary <-
rejected %>% group_by(node) %>% summarise(mean = mean(mean)) %>% as.data.frame()
### Create the delayed metrics ######################################################
#Calculating the time at each delayed length##
ptd_total <- as.data.frame(rbindlist(ptd))
rownames(ptd_total) <- c()
ptd_total$delayed <- as.numeric(as.character(ptd_total$delayed))
ptd_total$time_at_delayed_level <-
as.numeric(as.character(ptd_total$time_at_delayed_level))
ptd_total <- ptd_total[, -4]
ptd_time <-
ptd_total %>% group_by(node, delayed) %>% mutate(time_at_delayed_level =
sum(time_at_delayed_level) / reps)
ptd_time <- as.data.frame(ptd_time)
ptd_time <- unique(ptd_time)
ptd_time$node <- as.numeric(as.character(ptd_time$node))
ptd_time <- ptd_time[order(ptd_time$node, ptd_time$delayed), ]
ptd_percent <-
ptd_time %>% group_by(node) %>% transmute(
delayed,
percent_time_at_delayed_level = 100 * time_at_delayed_level / sum(time_at_delayed_level)
)
ptd_percent <-
ptd_percent %>% group_by(node) %>% transmute(
delayed,
percent_time_at_delayed_level,
cumulative_percent_time_at_delayed_level = cumsum(percent_time_at_delayed_level)
)
ptd_percent <- as.data.frame(ptd_percent)
ptd_percent <- unique(ptd_percent)
ptd_percent$node <- as.numeric(as.character(ptd_percent$node))
ptd_percent <-
ptd_percent[order(ptd_percent$node, ptd_percent$delayed), ]
ptd_percent$node <- as.factor(ptd_percent$node)
ptd_percent$node <-
syst_names_single[as.numeric(as.character(ptd_percent$node))]
ptd_percent$node <- as.factor(ptd_percent$node)
ptd_percent$node <-
factor(x = ptd_percent$node, levels = syst_names_single)
ptd_plot <-
ggplot(data = ptd_percent %>% mutate(node=str_replace_all(node,pattern="_",replacement=" ")),
aes(x = delayed, y = percent_time_at_delayed_level, fill = node)) +
geom_bar(stat = "identity", position = position_dodge()) +
facet_grid(node ~ ., labeller=label_wrap_gen(15)) +
theme_bw() +
#geom_text(aes(label=ifelse(signif(x = ptd_percent$percent_time_at_delayed_level,digits = 3)<100,signif(x = ptd_percent$percent_time_at_delayed_level,digits = 2),"")),vjust=-0.5,position = position_dodge(width=0.9), size=3)+ coord_cartesian(ylim = c(0,100))+
xlab("# concurrently delayed") +
ylab("% time at delayed level") +
theme(legend.position="none")
if (max(ptd_percent$delayed) == 1) {
ptd_plot <-
ptd_plot + scale_x_discrete(limits = c(min(ptd_percent$delayed), max(ptd_percent$delayed)))
}
#ptd_plot
#Delay Percentiles##
dpercentiles <- matrix(nrow = length(nodes), ncol = 8)
for (i in as.numeric(nodes)) {
if (length(unique(
ptd_percent$cumulative_percent_time_at_delayed_level[which(ptd_percent$node ==
syst_names_single[i])]
)) >= 2) {
tmp <-
approx(
x = ptd_percent$cumulative_percent_time_at_delayed_level[which(ptd_percent$node ==
syst_names_single[i])],
y = ptd_percent$delayed[which(ptd_percent$node == syst_names_single[i])],
xout = c(50, 80, 85, 90, 95, 99, 100),
ties = min,
rule = 2
)
tmp$y <- round(tmp$y, digits = 2)
dpercentiles[as.numeric(i), ] <-
c(syst_names_single[i], as.numeric(tmp$y))
}
else if (length(unique(
ptd_percent$cumulative_percent_time_at_delayed_level[which(ptd_percent$node ==
syst_names_single[i])]
)) == 0) {
dpercentiles[as.numeric(i), ] <-
c(syst_names_single[i], rep(x = NA, times = 7))
}
else{
dpercentiles[as.numeric(i), ] <-
c(syst_names_single[i], rep(x = 0, times = 7))
}
}
colnames(dpercentiles) <-
c("node",
"50th",
"80th",
"85th",
"90th",
"95th",
"99th",
"100th")
#Calculating the average delayed per node per replicate & then over the simulation per node##
#
#cl<-makeCluster(17)
#clusterExport(cl = cl,varlist = c("ptd","nodes","node_names"))
avg_delayed <- lapply(
X = ptd,
FUN = function(ptd) {
tmp <-
ptd %>% group_by(node) %>% summarise(avg_delayed = sum(delayed * time_at_delayed_level) /
sum(time_at_delayed_level)) %>% as.data.frame()
tmp$node <- node_names[tmp$node, 2]
tmp
}
)
#stopCluster(cl)
avg_delayed_summary <-
ptd_time %>% group_by(node) %>% summarise(avg_delayed = sum(delayed * time_at_delayed_level) /
sum(time_at_delayed_level)) %>% as.data.frame()
avg_delayed_summary$node <- node_names[avg_delayed_summary$node, 2]
# avg_delayed<-data.frame(abind(avg_delayed,along = 1))
# avg_delayed$avg_delayed<-as.numeric(as.character(avg_delayed$avg_delayed))
#rm(tmp_b_length,tmp_b_time,results,avg_b)
### Create the queue metrics ######################################################
#Calculating the time at each queue length##
ptq_total <- as.data.frame(rbindlist(ptq))
rownames(ptq_total) <- c()
ptq_total$queue <- as.numeric(as.character(ptq_total$queue))
ptq_total$time_at_queue_length <-
as.numeric(as.character(ptq_total$time_at_queue_length))
ptq_total <- ptq_total[, -4]
ptq_time <-
ptq_total %>% group_by(node, queue) %>% mutate(time_at_queue_length = sum(time_at_queue_length) /
reps)
ptq_time <- as.data.frame(ptq_time)
ptq_time <- unique(ptq_time)
ptq_time$node <- as.numeric(as.character(ptq_time$node))
ptq_time <- ptq_time[order(ptq_time$node, ptq_time$queue), ]
ptq_percent <-
ptq_time %>% group_by(node) %>% transmute(
queue,
percent_time_at_queue_length = 100 * time_at_queue_length / sum(time_at_queue_length)
)
ptq_percent <-
ptq_percent %>% group_by(node) %>% transmute(
queue,
percent_time_at_queue_length,
cumulative_percent_time_at_queue_length = cumsum(percent_time_at_queue_length)
)
ptq_percent <- as.data.frame(ptq_percent)
ptq_percent <- unique(ptq_percent)
ptq_percent$node <- as.numeric(as.character(ptq_percent$node))
ptq_percent <-
ptq_percent[order(ptq_percent$node, ptq_percent$queue), ]
ptq_percent$node <- as.factor(ptq_percent$node)
ptq_percent$node <-
syst_names_single[as.numeric(as.character(ptq_percent$node))]
ptq_percent$node <- as.factor(ptq_percent$node)
ptq_percent$node <-
factor(x = ptq_percent$node, levels = syst_names_single)
ptq_plot <-
ggplot(data = ptq_percent %>% mutate(node=str_replace_all(node,pattern="_",replacement=" ")),
aes(x = queue, y = percent_time_at_queue_length, fill = node)) +
geom_bar(stat = "identity", position = position_dodge()) +
facet_grid(node ~ ., scales = "free", labeller=label_wrap_gen(15)) +
theme_bw() +
xlab("# in queue") +
ylab("% time at queue level") +
theme(legend.position="none")
if (max(ptq_percent$queue) == 1) {
ptq_plot <-
ptq_plot + scale_x_discrete(limits = c(min(ptq_percent$queue), max(ptq_percent$queue)))
}
#ptq_plot
#Queue Percentiles##
qpercentiles <- matrix(nrow = length(nodes), ncol = 8)
for (i in as.numeric(nodes)) {
if (length(unique(
ptq_percent$cumulative_percent_time_at_queue_length[which(ptq_percent$node ==
syst_names_single[i])]
)) >= 2) {
tmp <-
approx(
x = ptq_percent$cumulative_percent_time_at_queue_length[which(ptq_percent$node ==
syst_names_single[i])],
y = ptq_percent$queue[which(ptq_percent$node == syst_names_single[i])],
xout = c(50, 80, 85, 90, 95, 99, 100),
ties = min,
rule = 2
)
tmp$y <- round(tmp$y, digits = 2)
qpercentiles[as.numeric(i), ] <- c(syst_names_single[i], tmp$y)
}
else if (length(unique(
ptq_percent$cumulative_percent_time_at_queue_length[which(ptq_percent$node ==
syst_names_single[i])]
)) == 0) {
qpercentiles[as.numeric(i), ] <-
c(syst_names_single[i], rep(x = NA, times = 7))
}
else {
qpercentiles[as.numeric(i), ] <-
c(syst_names_single[i], rep(x = 0, times = 7))
}
}
colnames(qpercentiles) <-
c("node",
"50th",
"80th",
"85th",
"90th",
"95th",
"99th",
"100th")
#Calculating the average queue per node per replicate & then over the simulation per node##
#
#cl<-makeCluster(17)
#clusterExport(cl = cl,varlist = c("ptq","nodes","node_names"))
avg_queue <- lapply(
X = ptq,
FUN = function(ptq) {
#library(tidyverse)
tmp <-
ptq %>% group_by(node) %>% summarise(avg_queue = sum(queue * time_at_queue_length) /
sum(time_at_queue_length)) %>% as.data.frame()
tmp$node <- node_names[tmp$node, 2]
tmp
}
)
#stopCluster(cl)
avg_queue_summary <-
ptq_time %>% group_by(node) %>% summarise(avg_queue = sum(queue * time_at_queue_length) /
sum(time_at_queue_length)) %>% as.data.frame()
avg_queue_summary$node <- node_names[avg_queue_summary$node, 2]
### Create the occupancy metrics ###############################################
#Calculating the time at each occupancy##
pto_total <- as.data.frame(rbindlist(pto))
rownames(pto_total) <- c()
pto_total$occupancy <-
as.numeric(as.character(pto_total$occupancy))
pto_total$time_at_occupancy <-
as.numeric(as.character(pto_total$time_at_occupancy))
pto_total <- pto_total[, -4]
pto_time <-
pto_total %>% group_by(node, occupancy) %>% mutate(time_at_occupancy = sum(time_at_occupancy) /
reps)
pto_time <- as.data.frame(pto_time)
pto_time <- unique(pto_time)
pto_time$node <- as.numeric(as.character(pto_time$node))
pto_time <- pto_time[order(pto_time$node, pto_time$occupancy), ]
pto_percent <-
pto_time %>% group_by(node) %>% transmute(
occupancy,
percent_time_at_occupancy = 100 * time_at_occupancy / sum(time_at_occupancy)
)
pto_percent <-
pto_percent %>% group_by(node) %>% transmute(
occupancy,
percent_time_at_occupancy,
cumulative_percent_time_at_occupancy = cumsum(percent_time_at_occupancy)
)
pto_percent <- as.data.frame(pto_percent)
pto_percent <- unique(pto_percent)
pto_percent$node <- as.numeric(as.character(pto_percent$node))
pto_percent <-
pto_percent[order(pto_percent$node, pto_percent$occupancy), ]
pto_percent$node <- as.factor(pto_percent$node)
pto_percent$node <-
syst_names_single[as.numeric(as.character(pto_percent$node))]
pto_percent$node <- as.factor(pto_percent$node)
pto_percent$node <-
factor(x = pto_percent$node, levels = syst_names_single)
pto_plot <-
ggplot(data = pto_percent %>% mutate(node=str_replace_all(node,pattern="_",replacement=" ")),
aes(x = occupancy, y = percent_time_at_occupancy, fill = node)) +
geom_bar(stat = "identity", position = position_dodge()) +
facet_grid(node ~ .,labeller=label_wrap_gen(15)) +
theme_bw() +
#geom_text(aes(label=ifelse(signif(x = pto_percent$percent_time_at_occupancy,digits = 3)<100,signif(x = pto_percent$percent_time_at_occupancy,digits = 2),"")),vjust=-0.5,position = position_dodge(width=0.9), size=3)+ coord_cartesian(ylim = c(0,100))+
xlab("Patient Occupancy") +
ylab("% time at patient occupancy level") +
theme(legend.position="none")
if (max(pto_percent$occupancy) == 1) {
pto_plot <-
pto_plot + scale_x_discrete(limits = c(
min(pto_percent$occupancy),
max(pto_percent$occupancy)
))
}
#pto_plot
#Occupancy Percentiles##
opercentiles <- matrix(nrow = length(nodes), ncol = 8)
for (i in as.numeric(nodes)) {
if (length(unique(pto_percent$cumulative_percent_time_at_occupancy[which(pto_percent$node ==
syst_names_single[i])])) >= 2) {
tmp <-
approx(
x = pto_percent$cumulative_percent_time_at_occupancy[which(pto_percent$node ==
syst_names_single[i])],
y = pto_percent$occupancy[which(pto_percent$node == syst_names_single[i])],
xout = c(50, 80, 85, 90, 95, 99, 100),
ties = min,
rule = 2
)
tmp$y <- round(tmp$y, digits = 2)
opercentiles[as.numeric(i), ] <- c(syst_names_single[i], tmp$y)
}
else if (length(unique(pto_percent$cumulative_percent_time_at_occupancy[which(pto_percent$node ==
syst_names_single[i])])) == 2) {
opercentiles[as.numeric(i), ] <-
c(syst_names_single[i], rep(x = NA, times = 7))
}
else{
opercentiles[as.numeric(i), ] <-
c(syst_names_single[i], rep(x = 0, times = 7))
}
}
colnames(opercentiles) <-
c("node",
"50th",
"80th",
"85th",
"90th",
"95th",
"99th",
"100th")
#Calculating the average occupancy per node per replicate##
#Calculating the average delayed per node per replicate & then over the simulation per node##
#cl<-makeCluster(17)
#clusterExport(cl = cl,varlist = c("pto","nodes","node_names"))
avg_occupancy <- lapply(
X = pto,
FUN = function(pto) {
#library(tidyverse)
tmp <-
pto %>% group_by(node) %>% summarise(avg_occupancy = sum(occupancy * time_at_occupancy) /
sum(time_at_occupancy)) %>% as.data.frame()
tmp$node <- node_names[tmp$node, 2]
tmp
}
)
#stopCluster(cl)
avg_occupancy_summary <-
pto_time %>% group_by(node) %>% summarise(avg_occupancy = sum(occupancy *
time_at_occupancy) / sum(time_at_occupancy)) %>% as.data.frame()
avg_occupancy_summary$node <-
node_names[avg_occupancy_summary$node, 2]
#rm(tmp_occupancy,tmp_o_time,results,avg_o)
### Create the transition metrics ######################################################
#Calculating the time at each transition length##
ptt_total <- as.data.frame(rbindlist(ptt))
rownames(ptt_total) <- c()
ptt_total$transition <-
as.numeric(as.character(ptt_total$transition))
ptt_total$time_at_transition_level <-
as.numeric(as.character(ptt_total$time_at_transition_level))
ptt_total <- ptt_total[, -4]
ptt_time <-
ptt_total %>% group_by(node, transition) %>% mutate(time_at_transition_level =
sum(time_at_transition_level) / reps)
ptt_time <- as.data.frame(ptt_time)
ptt_time <- unique(ptt_time)
ptt_time$node <- as.numeric(as.character(ptt_time$node))
ptt_time <- ptt_time[order(ptt_time$node, ptt_time$transition), ]
ptt_percent <-
ptt_time %>% group_by(node) %>% transmute(
transition,
percent_time_at_transition_level = 100 * time_at_transition_level / sum(time_at_transition_level)
)
ptt_percent <-
ptt_percent %>% group_by(node) %>% transmute(
transition,
percent_time_at_transition_level,
cumulative_percent_time_at_transition_level = cumsum(percent_time_at_transition_level)
)
ptt_percent <- as.data.frame(ptt_percent)
ptt_percent <- unique(ptt_percent)
ptt_percent$node <- as.numeric(as.character(ptt_percent$node))
ptt_percent <-
ptt_percent[order(ptt_percent$node, ptt_percent$transition), ]
ptt_percent$node <- as.factor(ptt_percent$node)
ptt_percent$node <-
syst_names_single[as.numeric(as.character(ptt_percent$node))]
ptt_percent$node <- as.factor(ptt_percent$node)
ptt_percent$node <-
factor(x = ptt_percent$node, levels = syst_names_single)
ptt_plot <-
ggplot(data = ptt_percent %>% mutate(node=str_replace_all(node,pattern="_",replacement=" ")),
aes(x = transition, y = percent_time_at_transition_level, fill = node)) +
geom_bar(stat = "identity", position = position_dodge()) +
facet_grid(node ~ .,labeller=label_wrap_gen(15)) +
theme_bw() +
#geom_text(aes(label=ifelse(signif(x = ptt_percent$percent_time_at_transition_level,digits = 3)<100,signif(x = ptt_percent$percent_time_at_transition_level,digits = 2),"")),vjust=-0.5,position = position_dodge(width=0.9), size=3)+ coord_cartesian(ylim = c(0,100))+
xlab("# concurrently in transition") +
ylab("% time at transition level") +
theme(legend.position="none")
if (max(ptt_percent$transition) == 1) {
ptt_plot <-
ptt_plot + scale_x_discrete(limits = c(
min(ptt_percent$transition),
max(ptt_percent$transition)
))
}
#ptt_plot
#transition Percentiles##
tpercentiles <- matrix(nrow = length(nodes), ncol = 8)
for (i in as.numeric(nodes)) {
if (length(unique(
ptt_percent$cumulative_percent_time_at_transition_level[which(ptt_percent$node ==
syst_names_single[i])]
)) >= 2) {
tmp <-
approx(
x = ptt_percent$cumulative_percent_time_at_transition_level[which(ptt_percent$node ==
syst_names_single[i])],
y = ptt_percent$transition[which(ptt_percent$node == syst_names_single[i])],
xout = c(50, 80, 85, 90, 95, 99, 100),
ties = min,
rule = 2
)
tmp$y <- round(tmp$y, digits = 2)
tpercentiles[as.numeric(i), ] <-
c(syst_names_single[i], as.numeric(tmp$y))
}
else if (length(unique(
ptt_percent$cumulative_percent_time_at_transition_level[which(ptt_percent$node ==
syst_names_single[i])]
)) == 0) {
tpercentiles[as.numeric(i), ] <-
c(syst_names_single[i], rep(x = NA, times = 7))
}
else{
tpercentiles[as.numeric(i), ] <-
c(syst_names_single[i], rep(x = 0, times = 7))
}
}
colnames(tpercentiles) <-
c("node",
"50th",
"80th",
"85th",
"90th",
"95th",
"99th",
"100th")
#Calculating the average transition per node per replicate##
#cl<-makeCluster(17)
#clusterExport(cl = cl,varlist = c("ptt","nodes","node_names"))
avg_transition <- lapply(
X = ptt,
FUN = function(ptt) {
#library(tidyverse)
tmp <-
ptt %>% group_by(node) %>% summarise(
avg_transition = sum(transition * time_at_transition_level) / sum(time_at_transition_level)
) %>% as.data.frame()
tmp$node <- node_names[tmp$node, 2]
tmp
}
)
#stopCluster(cl)
avg_transition_summary <-
ptt_time %>% group_by(node) %>% summarise(
avg_transition = sum(transition * time_at_transition_level) / sum(time_at_transition_level)
) %>% as.data.frame()
avg_transition_summary$node <-
node_names[avg_transition_summary$node, 2]
#
# #rm(tmp_t_length,tmp_t_time,results,avg_t)
#
### Create the Bed Occupancy metrics #######################################################
#
#### % time at bed occupancy level###
ptb_total <- as.data.frame(rbindlist(ptb))
rownames(ptb_total) <- c()
ptb_total$occ_bed <- as.numeric(as.character(ptb_total$occ_bed))
ptb_total$time_at_occ_bed_level <-
as.numeric(as.character(ptb_total$time_at_occ_bed_level))
ptb_total <- ptb_total[, -4]
ptb_time <-
ptb_total %>% group_by(node, occ_bed) %>% mutate(time_at_occ_bed_level =
sum(time_at_occ_bed_level) / reps)
ptb_time <- as.data.frame(ptb_time)
ptb_time <- unique(ptb_time)
ptb_time$node <- as.numeric(as.character(ptb_time$node))
ptb_time <- ptb_time[order(ptb_time$node, ptb_time$occ_bed), ]
ptb_percent <-
ptb_time %>% group_by(node) %>% transmute(
occ_bed,
percent_time_at_occ_bed_level = 100 * time_at_occ_bed_level / sum(time_at_occ_bed_level)
)
ptb_percent <-
ptb_percent %>% group_by(node) %>% transmute(
occ_bed,
percent_time_at_occ_bed_level,
cumulative_percent_time_at_occ_bed_level = cumsum(percent_time_at_occ_bed_level)
)
ptb_percent <- as.data.frame(ptb_percent)
ptb_percent <- unique(ptb_percent)
ptb_percent$node <- as.numeric(as.character(ptb_percent$node))
ptb_percent <-
ptb_percent[order(ptb_percent$node, ptb_percent$occ_bed), ]
ptb_percent$node <- as.factor(ptb_percent$node)
ptb_percent$node <-
syst_names_single[as.numeric(as.character(ptb_percent$node))]
ptb_percent$node <- as.factor(ptb_percent$node)
ptb_percent$node <-
factor(x = ptb_percent$node, levels = syst_names_single)
ptb_plot <-
ggplot(data = ptb_percent %>% mutate(node=str_replace_all(node,pattern="_",replacement=" ")),
aes(x = occ_bed, y = percent_time_at_occ_bed_level, fill = node)) +
geom_bar(stat = "identity", position = position_dodge()) +
facet_grid(node ~ .,labeller=label_wrap_gen(15)) +
theme_bw() +
#geom_text(aes(label=ifelse(signif(x = ptb_percent$percent_time_at_occ_bed_level,digits = 3)<100,signif(x = ptb_percent$percent_time_at_occ_bed_level,digits = 2),"")),vjust=-0.5,position = position_dodge(width=0.9), size=3)+ coord_cartesian(ylim = c(0,100))+
xlab("Bed Occupancy") +
ylab("% time at bed occupancy level") +
theme(legend.position="none")
if (max(ptb_percent$occ_bed) == 1) {
ptb_plot <-
ptb_plot + scale_x_discrete(limits = c(min(ptb_percent$occ_bed), max(ptb_percent$occ_bed)))
}
#ptq_plot
#Occ_Bed Percentiles##
bpercentiles <- matrix(nrow = length(nodes), ncol = 8)
for (i in as.numeric(nodes)) {
if (length(unique(
ptb_percent$cumulative_percent_time_at_occ_bed_level[which(ptb_percent$node ==
syst_names_single[i])]
)) >= 2) {
tmp <-
approx(
x = ptb_percent$cumulative_percent_time_at_occ_bed_level[which(ptb_percent$node ==
syst_names_single[i])],
y = ptb_percent$occ_bed[which(ptb_percent$node == syst_names_single[i])],
xout = c(50, 80, 85, 90, 95, 99, 100),
ties = min,
rule = 2
)
tmp$y <- round(tmp$y, digits = 2)
bpercentiles[as.numeric(i), ] <- c(syst_names_single[i], tmp$y)
#bpercentiles[as.numeric(i),]<-as.numeric(bpercentiles[as.numeric(i),])
}
else if (length(unique(
ptb_percent$cumulative_percent_time_at_occ_bed_level[which(ptb_percent$node ==
syst_names_single[i])]
)) == 0) {
bpercentiles[as.numeric(i), ] <-
c(syst_names_single[i], rep(x = NA, times = 7))
}
else {
bpercentiles[as.numeric(i), ] <-
c(syst_names_single[i], rep(x = 0, times = 7))
}
}
colnames(bpercentiles) <-
c("node",
"50th",
"80th",
"85th",
"90th",
"95th",
"99th",
"100th")
#Calculating the average occ_bed per node per replicate##
#
#cl<-makeCluster(17)
#clusterExport(cl = cl,varlist = c("ptb","nodes","node_names"))
avg_occ_bed <- lapply(
X = ptb,
FUN = function(ptb) {
#library(tidyverse)
tmp <-
ptb %>% group_by(node) %>% summarise(avg_occ_bed = sum(occ_bed * time_at_occ_bed_level) /
sum(time_at_occ_bed_level)) %>% as.data.frame()
tmp$node <- node_names[tmp$node, 2]
tmp
}
)
#stopCluster(cl)
avg_occ_bed_summary <-
ptb_time %>% group_by(node) %>% summarise(avg_occ_bed = sum(occ_bed * time_at_occ_bed_level) /
sum(time_at_occ_bed_level)) %>% as.data.frame()
avg_occ_bed_summary$node <- node_names[avg_occ_bed_summary$node, 2]
###### MULTI DATA TABLE ########################################################################
multi_spread_uniform <- rbindlist(multi_spread_uniform)
through_time_uniform <- multi_spread_uniform
through_time_uniform$time <- through_time_uniform$time - warm_up
rm(multi_spread_uniform)
through_time_uniform_gather <-
gather(
through_time_uniform,
key = "metric",
value = "value",
occ_bed,
delayed,
occupancy,
transition,
queue
)
avg_through_time <- through_time_uniform_gather %>%
group_by(time, node, metric) %>%
summarise(
mean = mean(value, na.rm = T),
L99 = quantile(value, 0.005, na.rm = T),
U99 = quantile(value, 0.995, na.rm = T),
L95 = quantile(value, 0.025, na.rm = T),
U95 = quantile(value, 0.975, na.rm = T),
L50 = quantile(value, 0.25, na.rm = T),
U50 = quantile(value, 0.75, na.rm = T)
) %>%
as.data.frame()
avg_through_time$metric <-
factor(
avg_through_time$metric,
levels = c('queue', 'occupancy', 'occ_bed', 'delayed', 'transition')
)
avg_through_time$node <-
factor(x = avg_through_time$node, levels = syst_names_single)
avg_through_time_plot <- ggplot(avg_through_time %>% mutate(node=str_replace_all(node,pattern="_",replacement=" "))) +
geom_ribbon(aes(
x = time,
ymin = L99,
ymax = U99,
fill = "99%"
), alpha = 0.25) +
geom_ribbon(aes(
x = time,
ymin = L95,
ymax = U95,
fill = "95%"
), alpha = 0.25) +
geom_ribbon(aes(
x = time,
ymin = L50,
ymax = U50,
fill = "50%"
), alpha = 0.25) +
scale_fill_manual(
name = "Percentiles",
values = c(
"99%" = "grey75",
"95%" = "grey60",
"50%" = "grey45"
),
breaks = c("99%", "95%", "50%")
) +
geom_line(aes(
x = time,
y = mean,
colour = metric
), size = 1.1) +
facet_grid(metric ~ node, scales = "free",labeller=label_wrap_gen(15)) +
ylab("Mean # of patients") +
xlab(paste0("Time (", input$time_unit, ")")) +
theme_bw() +
theme(panel.spacing = unit(1, "lines"),
axis.text.x = element_text(size = 7),
legend.position="none") +
expand_limits(y = 0)
through_time_mini <-
through_time_uniform_gather[which(
through_time_uniform_gather$rep == "rep 1" |
through_time_uniform_gather$rep == "rep 2" |
through_time_uniform_gather$rep == "rep 3" |
through_time_uniform_gather$rep == "rep 4" |
through_time_uniform_gather$rep == "rep 5"
), ]
through_time_mini$node <-
factor(x = through_time_mini$node, levels = syst_names_single)
total_in_system <-
through_time_uniform_gather[which(
through_time_uniform_gather$rep == "rep 1" |
through_time_uniform_gather$rep == "rep 2" |
through_time_uniform_gather$rep == "rep 3" |
through_time_uniform_gather$rep == "rep 4" |
through_time_uniform_gather$rep == "rep 5" |
through_time_uniform_gather$rep ==
"rep 6" |
through_time_uniform_gather$rep == "rep 7" |
through_time_uniform_gather$rep == "rep 8" |
through_time_uniform_gather$rep == "rep 9" |
through_time_uniform_gather$rep == "rep 10"
), ]
total_in_system$node <-
factor(x = total_in_system$node, levels = syst_names_single)
total_in_system <-
total_in_system[which(total_in_system$metric == "occupancy" |
total_in_system$metric == "queue"), ]
total_in_system <- total_in_system[, c(1, 3, 5)]
total_in_system_dat <-
total_in_system %>% group_by(time, rep) %>% summarise("value" = sum(value)) %>% as.data.frame()
rm(through_time_uniform_gather)
#the plot "o" ####
o <-
ggplot(data = through_time_mini[which(through_time_mini$metric == "occupancy"), ] %>%
mutate(node=str_replace_all(node,pattern="_",replacement=" "))) +
geom_step(aes(x = time, y = value, col = node)) +
facet_grid(node ~ rep,labeller=label_wrap_gen(15)) +
theme_bw() +
ylab("Occupancy") +
theme(panel.spacing.x = unit(1, "lines"),
axis.text.x = element_text(size = 7),
legend.position="none") +
xlab(paste0("Time (", input$time_unit, ")"))
#the plot "q" ####
q <-
ggplot(data = through_time_mini[which(through_time_mini$metric == "queue"), ] %>%
mutate(node=str_replace_all(node,pattern="_",replacement=" "))) +
geom_step(aes(x = time, y = value, col = node)) +
facet_grid(node ~ rep, labeller=label_wrap_gen(15)) +
theme_bw() +
ylab("Queue") +
xlab(paste0("Time (", input$time_unit, ")")) +
theme(panel.spacing.x = unit(1, "lines"),
axis.text.x = element_text(size =7),
legend.position="none")
#the plot "d" ####
d <-
ggplot(data = through_time_mini[which(through_time_mini$metric == "delayed"), ] %>%
mutate(node=str_replace_all(node,pattern="_",replacement=" "))) +
geom_step(aes(x = time, y = value, col = node)) +
facet_grid(node ~ rep, labeller=label_wrap_gen(15)) +
theme_bw() + ylab("Delayed") +
xlab(paste0("Time (", input$time_unit, ")")) +
theme(panel.spacing.x = unit(1, "lines"),
axis.text.x = element_text(size =7),
legend.position="none")
#the plot "t" ####
t <-
ggplot(data = through_time_mini[which(through_time_mini$metric == "transition"), ] %>%
mutate(node=str_replace_all(node,pattern="_",replacement=" "))) +
geom_step(aes(x = time, y = value, col = node)) +
facet_grid(node ~ rep, labeller=label_wrap_gen(15)) +
theme_bw() +
ylab("Transition") +
xlab(paste0("Time (", input$time_unit, ")")) +
theme(panel.spacing.x = unit(1, "lines"),
axis.text.x = element_text(size =7),
legend.position="none")
#the plot "b" ####
b <-
ggplot(data = through_time_mini[which(through_time_mini$metric == "occ_bed"), ] %>%
mutate(node=str_replace_all(node,pattern="_",replacement=" "))) +
geom_step(aes(x = time, y = value, col = node)) +
facet_grid(node ~ rep, labeller=label_wrap_gen(15)) +
theme_bw() + ylab("Bed Occupancy") +
xlab(paste0("Time (", input$time_unit, ")")) +
theme(panel.spacing.x = unit(1, "lines"),
axis.text.x = element_text(size = 7),
legend.position="none")
#the plot "tisp" ####
tisp <-
ggplot(data = total_in_system_dat) + geom_line(aes(x = time, y = value, group = rep),
col = "black",
alpha = 0.4) +
theme_bw() +
ylab("Total in System")
#[which(avg_through_time$metric=="occupancy"),]
#SIMULATION OUTPUT OBJECT LIST "combo" ####
#time units added to selected metric names in tables below ####
#these will be overwritten directly to remove underscores for the on-screen shiny outputs in some cases
#but will still pull through to the excel file download in the format below
combo <- list(
total_time_in_system = total_time_in_system %>% mutate(metric = paste0(metric, " (", input$time_unit, ")")),
total_time_in_system_summary = total_time_in_system_summary %>% mutate(metric =
paste0(metric, " (", input$time_unit, ")")),
node_wait = node_wait %>% mutate(metric = paste0(metric, " (", input$time_unit, ")")),
node_wait_summary = node_wait_summary %>% mutate(metric = paste0(metric, " (", input$time_unit, ")")),
pat_wait = pat_wait %>% mutate(metric = paste0(metric, " (", input$time_unit, ")")),
pat_wait_summary = pat_wait_summary %>% mutate(metric = paste0(metric, " (", input$time_unit, ")")),
node_active_service = node_active_service %>% mutate(metric = paste0(metric, " (", input$time_unit, ")")),
node_active_service_summary = node_active_service_summary %>% mutate(metric =
paste0(metric, " (", input$time_unit, ")")),
pat_active_service = pat_active_service %>% mutate(metric = paste0(metric, " (", input$time_unit, ")")),
pat_active_service_summary = pat_active_service_summary %>% mutate(metric =
paste0(metric, " (", input$time_unit, ")")),
node_length_of_stay = node_length_of_stay %>% mutate(metric = paste0(metric, " (", input$time_unit, ")")),
node_length_of_stay_summary = node_length_of_stay_summary %>% mutate(metric =
paste0(metric, " (", input$time_unit, ")")),
pat_length_of_stay = pat_length_of_stay %>% mutate(metric = paste0(metric, " (", input$time_unit, ")")),
pat_length_of_stay_summary = pat_length_of_stay_summary %>% mutate(metric =
paste0(metric, " (", input$time_unit, ")")),
node_delay_to_transfer = node_delay_to_transfer %>% mutate(metric =
paste0(metric, " (", input$time_unit, ")")),
node_delay_to_transfer_summary = node_delay_to_transfer_summary %>% mutate(metric =
paste0(metric, " (", input$time_unit, ")")),
pat_delay_to_transfer = pat_delay_to_transfer %>% mutate(metric =
paste0(metric, " (", input$time_unit, ")")),
pat_delay_to_transfer_summary = pat_delay_to_transfer_summary %>% mutate(metric =
paste0(metric, " (", input$time_unit, ")")),
pat_rep_summary = pat_rep_summary %>% mutate(metric = paste0(metric, " (", input$time_unit, ")")),
pat_total_summary = pat_total_summary %>% mutate(metric = paste0(metric, " (", input$time_unit, ")")),
ptd_percent = ptd_percent,
ptd_plot = ptd_plot,
avg_delayed = avg_delayed,
avg_delayed_summary = avg_delayed_summary,
d = d,
ptq_percent = ptq_percent,
ptq_plot = ptq_plot,
avg_queue = avg_queue,
avg_queue_summary = avg_queue_summary,
q = q,
pto_percent = pto_percent,
pto_plot = pto_plot,
avg_occupancy = avg_occupancy,
avg_occupancy_summary = avg_occupancy_summary,
o = o,
ptb_percent = ptb_percent,
ptb_plot = ptb_plot,
avg_occ_bed = avg_occ_bed,
avg_occ_bed_summary = avg_occ_bed_summary,
b = b,
ptt_percent = ptt_percent,
ptt_plot = ptt_plot,
avg_transition = avg_transition,
avg_transition_summary = avg_transition_summary,
t = t,
dpercentiles = dpercentiles,
qpercentiles = qpercentiles,
opercentiles = opercentiles,
bpercentiles = bpercentiles,
tpercentiles = tpercentiles,
rejected_summary = rejected_summary,
avg_through_time_plot = avg_through_time_plot,
reps = reps,
ptm = ptm,
avg_through_time = avg_through_time,
nodes = nodes,
warm_up = warm_up,
sim_time = sim_time,
exits = exits,
syst_names = syst_names,
delay_list = delay_list,
cap_cal_input = cap_cal_input_original,
arr_cal_input = arr_cal_input_original,
node_capacity_delay = node_capacity_delay,
node_capacity_delay_summary = node_capacity_delay_summary,
node_transition_delay = node_transition_delay,
node_transition_delay_summary = node_transition_delay_summary,
pat_capacity_delay = pat_capacity_delay,
pat_capacity_delay_summary = pat_capacity_delay_summary,
pat_transition_delay = pat_transition_delay,
pat_transition_delay_summary = pat_transition_delay_summary,
tisp = tisp,
#change - add simulation time unit for use in markdown report ####
#needs to be saved here so it can be added to parameters list
time_unit = input$time_unit #save character string time unit description for use as param in markdown
)
stopCluster(cl)
#change to check on number of simulation outputs ####
#this is a manual check to see if every item in "combo", the list of items that the sim_out function
#will return, has been created. Orignally, this list had 72 items and there was a hard-coded check
#to see if the length of the list was 72. A further item (the time unit) has now been added, and so
#the number being checked for is now 73
#The reason for performing this check, and what it is intended to achieve, needs to be clarified
if (length(combo) == 73) {
shinyalert(
title = paste0("Simulation Complete \n(", format(Sys.time()), ")"),
text = "",
closeOnEsc = TRUE,
closeOnClickOutside = TRUE,
html = FALSE,
type = "info",
showConfirmButton = TRUE,
showCancelButton = FALSE,
confirmButtonText = "OK",
confirmButtonCol = "#87D9FF",
timer = 0,
imageUrl = "",
animation = TRUE
)
} else{
shinyalert(
title = "Simulation Error",
text = "",
closeOnEsc = TRUE,
closeOnClickOutside = TRUE,
html = FALSE,
type = "info",
showConfirmButton = TRUE,
showCancelButton = FALSE,
confirmButtonText = "OK",
confirmButtonCol = "E60000",
timer = 0,
imageUrl = "",
animation = TRUE
)
hideTab(inputId = "navbar", target = "3. Simulation Outputs")
hideTab(inputId = "navbar", target = "4. Download Outputs")
}
return(combo)
}, error = function(e) {
shinyalert(
title = HTML(
'Simulation Error \n Try running the simulation for longer (increase simulation period length).
\n If the error persists, return to the data input pages and check that data has been entered correctly.'
),
text = "",
closeOnEsc = FALSE,
closeOnClickOutside = FALSE,
html = FALSE,
type = "info",
showConfirmButton = TRUE,
showCancelButton = FALSE,
confirmButtonText = "OK",
confirmButtonCol = "#FF0000",
timer = 0,
imageUrl = "",
animation = FALSE
)
hideTab(inputId = "navbar", target = "3. Simulation Outputs")
hideTab(inputId = "navbar", target = "4. Download Outputs")
return(NULL)
})
}) # END OF sim_out() FUNCTION ####
### OUTPUT RENDER TEXT ####
output$comp <- renderText({
req(sim_out())
x <- sim_out()
y <- x$reps
time <- proc.time() - x$ptm
p <-
c("**Simulation completed in ",
round(time[3], digits = 1),
" seconds**")
p
})
# output$run_time<-renderTable({
# req(sim_out())
# out<-sim_out()
# x<-out$reps
# time<-proc.time()-out$ptm
# rep_run<-time[3]/x
#
# y<-matrix(data = c("10 runs","100 runs","500 runs","1,000 runs","10,000 runs",
# round(10*rep_run/60,digits=2),round(100*rep_run/60,digits=2),round(500*rep_run/60,digits=2),round(1000*rep_run/60,digits=2),round(10000*rep_run/60,digits=2),
# round(10*rep_run/3600,digits=2),round(100*rep_run/3600,digits=2),round(500*rep_run/3600,digits=2),round(1000*rep_run/3600,digits=2),round(10000*rep_run/3600,digits=2)),
# ncol=3)
#
# colnames(y)<-c("# of replicates","Run time (in minutes)","Run time (in hours)")
# y
#
#
# },caption = "Run Time Estimates",
# caption.placement = getOption("xtable.caption.placement", "top"),
# caption.width = getOption("xtable.caption.width", NULL))
#
###RENDER TOTAL TIME IN SYSTEM #####
output$ttis <- renderDataTable({
req(sim_out())
x <- sim_out()
tmp <- x$total_time_in_system
#tmp<-format(tmp,digits=5)
}, caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(strong(
'Total time in system '
))), rownames = FALSE, filter = 'top', options = list(pageLength = 10, dom =
'tlp'))
output$ttiss <- renderTable({
req(sim_out())
x <- sim_out()
tmp <- x$total_time_in_system_summary
tmp$metric <- paste0("Total Time In System (", x$time_unit, ")")
colnames(tmp) <-
c("Metric",
"Mean",
"Standard Deviation",
"IQR",
"95th Percentile")
tmp <- format(tmp, digits = 4, scientific = F)
tmp
}, rownames = FALSE)
###RENDER WAITS #####
output$node_wait <- renderDataTable({
req(sim_out())
x <- sim_out()
tmp <- x$node_wait
tmp <- rbindlist(tmp)
#tmp<-format(tmp,digits=5)
}, caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(strong('Wait'))), rownames = FALSE, filter = 'top', options = list(pageLength = 10, dom =
'tlp'))
output$node_wait_summary <- renderTable({
req(sim_out())
x <- sim_out()
tmp <- x$node_wait_summary
tmp <- tmp[order(factor(x = tmp$node, levels = x$syst_names[, 2])), ]
tmp$metric <- paste0("Wait (", x$time_unit, ")")
tmp$node <- str_replace_all(tmp$node,pattern="_",replacement=" ")
colnames(tmp) <-
c("Service Point",
"Metric",
"Mean",
"Standard Deviation",
"IQR",
"95th Percentile")
tmp <- format(tmp, digits = 4, scientific = F)
}, rownames = FALSE)
output$pat_wait <- renderDataTable({
req(sim_out())
x <- sim_out()
tmp <- x$pat_wait
tmp <- rbindlist(tmp)
#tmp<-format(tmp,digits=5)
}, caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(strong('Wait'))), rownames = FALSE, filter = 'top', options = list(pageLength = 10, dom =
'tlp'))
output$pat_wait_summary <- renderTable({
req(sim_out())
x <- sim_out()
tmp <- x$pat_wait_summary
tmp$metric <- paste0("Wait (", x$time_unit, ")")
colnames(tmp) <-
c("Metric",
"Mean",
"Standard Deviation",
"IQR",
"95th Percentile")
tmp <- format(tmp, digits = 4, scientific = F)
}, rownames = FALSE)
###RENDER ACTIVE SERVICE #####
output$node_active_service <- renderDataTable({
req(sim_out())
x <- sim_out()
tmp <- x$node_active_service
tmp <- rbindlist(tmp)
#tmp<-format(tmp,digits=5)
}, caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(strong('Active Service'))), rownames = FALSE, filter =
'top', options = list(pageLength = 10, dom = 'tlp'))
output$node_active_service_summary <- renderTable({
req(sim_out())
x <- sim_out()
tmp <- x$node_active_service_summary
tmp <- tmp[order(factor(x = tmp$node, levels = x$syst_names[, 2])), ]
tmp$metric <- paste0("Active Service (", x$time_unit, ")")
tmp$node <- str_replace_all(tmp$node,pattern="_",replacement=" ")
colnames(tmp) <-
c("Service Point",
"Metric",
"Mean",
"Standard Deviation",
"IQR",
"95th Percentile")
tmp <- format(tmp, digits = 4, scientific = F)
}, rownames = FALSE)
output$pat_active_service <- renderDataTable({
req(sim_out())
x <- sim_out()
tmp <- x$pat_active_service
tmp <- rbindlist(tmp)
#tmp<-format(tmp,digits=5)
}, caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(strong('Active Service'))), rownames = FALSE, filter =
'top', options = list(pageLength = 10, dom = 'tlp'))
output$pat_active_service_summary <- renderTable({
req(sim_out())
x <- sim_out()
tmp <- x$pat_active_service_summary
colnames(tmp) <-
c("Metric",
"Mean",
"Standard Deviation",
"IQR",
"95th Percentile")
tmp <- format(tmp, digits = 4, scientific = F)
}, rownames = FALSE)
###RENDER CAPACITY DELAYS #####
output$node_capacity_delay <- renderDataTable({
req(sim_out())
x <- sim_out()
tmp <- x$node_capacity_delay
tmp <- rbindlist(tmp)
#tmp<-format(tmp,digits=5)
}, caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(
strong('Time Delayed (Capacity Driven)')
)), rownames = FALSE, filter = 'top', options = list(pageLength = 10, dom =
'tlp'))
output$node_capacity_delay_summary <- renderTable({
req(sim_out())
x <- sim_out()
tmp <- x$node_capacity_delay_summary
tmp <- tmp[order(factor(x = tmp$node, levels = x$syst_names[, 2])), ]
tmp$metric <- paste0("Capacity Delay (", x$time_unit, ")")
tmp$node <- str_replace_all(tmp$node,pattern="_",replacement=" ")
colnames(tmp) <-
c("Service Point",
"Metric",
"Mean",
"Standard Deviation",
"IQR",
"95th Percentile")
tmp <- format(tmp, digits = 4, scientific = F)
},
rownames = FALSE)
output$pat_capacity_delay <- renderDataTable({
req(sim_out())
x <- sim_out()
tmp <- x$pat_capacity_delay
tmp <- rbindlist(tmp)
#tmp<-format(tmp,digits=5)
}, caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(
strong('Time Delayed (Capacity Driven)')
)), rownames = FALSE, filter = 'top', options = list(pageLength = 10, dom =
'tlp'))
output$pat_capacity_delay_summary <- renderTable({
req(sim_out())
x <- sim_out()
tmp <- x$pat_capacity_delay_summary
tmp$metric <- paste0("Capacity Delay (", x$time_unit, ")")
colnames(tmp) <-
c("Metric",
"Mean",
"Standard Deviation",
"IQR",
"95th Percentile")
tmp <- format(tmp, digits = 4, scientific = F)
}, rownames = FALSE)
###RENDER TRANSITION DELAYS #####
output$node_transition_delay <- renderDataTable({
req(sim_out())
x <- sim_out()
tmp <- x$node_transition_delay
tmp <- tmp[order(factor(x = tmp$node, levels = x$syst_names[, 2])), ]
tmp <- rbindlist(tmp)
#tmp<-format(tmp,digits=5)
}, caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(strong(
'Time Delayed (Transition)'
))), rownames = FALSE, filter = 'top', options = list(pageLength = 10, dom =
'tlp'))
output$node_transition_delay_summary <- renderTable({
req(sim_out())
x <- sim_out()
tmp <- x$node_transition_delay_summary
tmp$metric <- paste0("Transition Delay (", x$time_unit, ")")
tmp$node <- str_replace_all(tmp$node,pattern="_",replacement=" ")
colnames(tmp) <-
c("Service Point",
"Metric",
"Mean",
"Standard Deviation",
"IQR",
"95th Percentile")
tmp <- format(tmp, digits = 4, scientific = F)
}, rownames = FALSE)
output$pat_transition_delay <- renderDataTable({
req(sim_out())
x <- sim_out()
tmp <- x$pat_transition_delay
tmp <- rbindlist(tmp)
#tmp<-format(tmp,digits=5)
}, caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(strong(
'Time Delayed (Transition)'
))), rownames = FALSE, filter = 'top', options = list(pageLength = 10, dom =
'tlp'))
output$pat_transition_delay_summary <- renderTable({
req(sim_out())
x <- sim_out()
tmp <- x$pat_capacity_delay_summary
tmp$metric <- paste0("Transition Delay (", x$time_unit, ")")
colnames(tmp) <-
c("Metric",
"Mean",
"Standard Deviation",
"IQR",
"95th Percentile")
tmp <- format(tmp, digits = 4, scientific = F)
}, rownames = FALSE)
###RENDER LENGTH OF STAY #####
output$node_los <- renderDataTable({
req(sim_out())
x <- sim_out()
tmp <- x$node_length_of_stay
tmp <- rbindlist(tmp)
#tmp<-format(tmp,digits=5)
}, caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(strong('Length of Stay'))), rownames = FALSE, filter =
'top', options = list(pageLength = 10, dom = 'tlp'))
output$node_loss <- renderTable({
req(sim_out())
x <- sim_out()
tmp <- x$node_length_of_stay_summary
tmp <- tmp[order(factor(x = tmp$node, levels = x$syst_names[, 2])), ]
tmp$metric <- paste0("Length Of Stay (", x$time_unit, ")")
tmp$node <- str_replace_all(tmp$node,pattern="_",replacement=" ")
colnames(tmp) <-
c("Service Point",
"Metric",
"Mean",
"Standard Deviation",
"IQR",
"95th Percentile")
tmp <- format(tmp, digits = 4, scientific = F)
}, rownames = FALSE)
output$pat_los <- renderDataTable({
req(sim_out())
x <- sim_out()
tmp <- x$pat_length_of_stay
tmp <- rbindlist(tmp)
#tmp<-format(tmp,digits=5)
}, caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(strong('Length of Stay'))), rownames = FALSE, filter =
'top', options = list(pageLength = 10, dom = 'tlp'))
output$pat_loss <- renderTable({
req(sim_out())
x <- sim_out()
tmp <- x$pat_length_of_stay_summary
tmp$metric <- paste0("Length Of Stay (", x$time_unit, ")")
colnames(tmp) <-
c("Metric",
"Mean",
"Standard Deviation",
"IQR",
"95th Percentile")
tmp <- format(tmp, digits = 4, scientific = F)
}, rownames = FALSE)
###RENDER DELAY TO TRANSFER #####
output$node_dtt <- renderDataTable({
req(sim_out())
x <- sim_out()
tmp <- x$node_delay_to_transfer
tmp <- rbindlist(tmp)
#tmp<-format(tmp,digits=5)
}, caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(strong(
'Delay to Transfer'
))), rownames = FALSE, filter = 'top', options = list(pageLength = 10, dom =
'tlp'))
output$node_dtts <- renderTable({
req(sim_out())
x <- sim_out()
tmp <- x$node_delay_to_transfer_summary
tmp <- tmp[order(factor(x = tmp$node, levels = x$syst_names[, 2])), ]
tmp$metric <- paste0("Delay To Transfer (", x$time_unit, ")")
tmp$node <- str_replace_all(tmp$node,pattern="_",replacement=" ")
colnames(tmp) <-
c("Service Point",
"Metric",
"Mean",
"Standard Deviation",
"IQR",
"95th Percentile")
tmp <- format(tmp, digits = 4, scientific = F)
}, rownames = FALSE)
output$pat_dtt <- renderDataTable({
req(sim_out())
x <- sim_out()
tmp <- x$pat_delay_to_transfer
tmp <- rbindlist(tmp)
#tmp<-format(tmp,digits=5)
}, caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(strong(
'Delay to Transfer'
))), rownames = FALSE, filter = 'top', options = list(pageLength = 10, dom =
'tlp'))
output$pat_dtts <- renderTable({
req(sim_out())
x <- sim_out()
tmp <- x$pat_delay_to_transfer_summary
tmp$metric <- paste0("Delay To Transfer (", x$time_unit, ")")
colnames(tmp) <-
c("Metric",
"Mean",
"Standard Deviation",
"IQR",
"95th Percentile")
tmp <- format(tmp, digits = 4, scientific = F)
}, rownames = FALSE)
###RENDER REJECTION RATE #####
output$rejs <- renderTable({
req(sim_out())
x <- sim_out()
tmp <- x$rejected_summary
tmp <- tmp[order(factor(x = tmp$node, levels = x$syst_names[, 2])), ]
tmp$node <- str_replace_all(tmp$node,pattern="_",replacement=" ")
colnames(tmp) <- c("Service Point", "Mean")
tmp <- format(tmp, digits = 4, scientific = F)
}, rownames = FALSE)
###RENDER DELAY METRICS #####
output$ptd_percent <- renderDataTable({
req(sim_out())
x <- sim_out()
tmp <- x$ptd_percent
tmp <- format(tmp, digits = 4, scientific = F)
tmp$node <- str_replace_all(tmp$node,pattern="_",replacement=" ")
colnames(tmp) <-
c(
"Service Point",
"Delayed Level",
"% time at Delayed Level",
"Cumulative % time at or below Delayed Level"
)
tmp
}, caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(
strong('Percentage time at delayed level')
)), rownames = FALSE, filter = 'top', options = list(pageLength = 10, dom =
'tlp'))
output$ptd_plot <- renderPlot({
req(sim_out())
x <- sim_out()
tmp <- x$ptd_plot
tmp
}, res = 128)
output$avg_delayed <- renderDataTable({
req(sim_out())
x <- sim_out()
tmp <- x$avg_delayed_summary
tmp[,1] <- str_replace_all(tmp[,1],pattern="_",replacement=" ")
#tmp<-rbindlist(tmp)
tmp[, 2] <- format(tmp[, 2], digits = 5)
tmp
}, caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(strong(
'Average # Delayed'
))), rownames = FALSE, options = list(pageLength = 10, dom = 'tlp'))
output$d <- renderPlot({
req(sim_out())
x <- sim_out()
tmp <- x$d
tmp
}, res = 128)
###RENDER QUEUE METRICS #####
output$ptq_percent <- renderDataTable({
req(sim_out())
x <- sim_out()
tmp <- x$ptq_percent
tmp <- format(tmp, digits = 4, scientific = F)
tmp$node <- str_replace_all(tmp$node,pattern="_",replacement=" ")
colnames(tmp) <-
c(
"Service Point",
"Queue Length",
"% time at Queue Length",
"Cumulative % time at or below Queue Length"
)
tmp
}, caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(
strong('Percentage time at queue length')
)), rownames = FALSE, filter = 'top', options = list(pageLength = 10, dom =
'tlp'))
output$ptq_plot <- renderPlot({
req(sim_out())
x <- sim_out()
tmp <- x$ptq_plot
tmp
}, res = 128)
output$avg_queue <- renderDataTable({
req(sim_out())
x <- sim_out()
tmp <- x$avg_queue_summary
tmp[,1] <- str_replace_all(tmp[,1],pattern="_",replacement=" ")
#tmp<-rbindlist(tmp)
tmp[, 2] <- format(tmp[, 2], digits = 5)
tmp
}, caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(strong(
'Average queue length'
))), rownames = FALSE, options = list(pageLength = 10, dom = 'tlp'))
output$q <- renderPlot({
req(sim_out())
x <- sim_out()
tmp <- x$q
tmp
}, res = 128)
###RENDER OCCUPANCY METRICS #####
output$pto_percent <- renderDataTable({
req(sim_out())
x <- sim_out()
tmp <- x$pto_percent
tmp <- format(tmp, digits = 4, scientific = F)
tmp$node <- str_replace_all(tmp$node,pattern="_",replacement=" ")
colnames(tmp) <-
c(
"Service Point",
"Patient Occupancy Level",
"% time at Patient Occupancy Level",
"Cumulative % time at or below Patient Occupancy Level"
)
tmp
}, caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(
strong('Percentage time at occupancy level')
)), rownames = FALSE, filter = 'top', options = list(pageLength = 10, dom =
'tlp'))
output$pto_plot <- renderPlot({
req(sim_out())
x <- sim_out()
tmp <- x$pto_plot
tmp
}, res = 128)
output$avg_occupancy <- renderDataTable({
req(sim_out())
x <- sim_out()
tmp <- x$avg_occupancy_summary
tmp[,1] <- str_replace_all(tmp[,1],pattern="_",replacement=" ")
#tmp<-rbindlist(tmp)
tmp[, 2] <- format(tmp[, 2], digits = 5)
tmp
}, caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(strong(
'Average Occupancy'
))), rownames = FALSE, options = list(pageLength = 10, dom = 'tlp'))
output$o <- renderPlot({
req(sim_out())
x <- sim_out()
tmp <- x$o
tmp
}, res = 128)
###RENDER TRANSITION METRICS #####
output$ptt_percent <- renderDataTable({
req(sim_out())
x <- sim_out()
tmp <- x$ptt_percent
tmp <- format(tmp, digits = 4, scientific = F)
tmp$node <- str_replace_all(tmp$node,pattern="_",replacement=" ")
colnames(tmp) <-
c(
"Service Point",
"Transition Level",
"% time at Transition Level",
"Cumulative % time at or below Transition Level"
)
tmp
}, caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(
strong('Percentage time at transition level')
)), rownames = FALSE, filter = 'top', options = list(pageLength = 10, dom =
'tlp'))
output$ptt_plot <- renderPlot({
req(sim_out())
x <- sim_out()
tmp <- x$ptt_plot
tmp
}, res = 128)
output$avg_transition <- renderDataTable({
req(sim_out())
x <- sim_out()
tmp <- x$avg_transition_summary
tmp[,1] <- str_replace_all(tmp[,1],pattern="_",replacement=" ")
#tmp<-rbindlist(tmp)
tmp[, 2] <- format(tmp[, 2], digits = 5)
tmp
}, caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(strong(
'Average Transition'
))), rownames = FALSE, options = list(pageLength = 10, dom = 'tlp'))
output$t <- renderPlot({
req(sim_out())
x <- sim_out()
tmp <- x$t
tmp
}, res = 128)
###RENDER BED OCCUPANCY METRICS #####
output$ptb_percent <- renderDataTable({
req(sim_out())
x <- sim_out()
tmp <- x$ptb_percent
tmp <- format(tmp, digits = 4, scientific = F)
tmp$node <- str_replace_all(tmp$node,pattern="_",replacement=" ")
colnames(tmp) <-
c(
"Service Point",
"Bed Occupancy Level",
"% time at Bed Occupancy Level",
"Cumulative % time at or below Bed Occupancy Level"
)
tmp
}, caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(
strong('Percentage time at occ_bed level')
)), rownames = FALSE, filter = 'top', options = list(pageLength = 10, dom =
'tlp'))
output$ptb_plot <- renderPlot({
req(sim_out())
x <- sim_out()
tmp <- x$ptb_plot
tmp
}, res = 128)
output$avg_occ_bed <- renderDataTable({
req(sim_out())
x <- sim_out()
tmp <- x$avg_occ_bed_summary
tmp[,1] <- str_replace_all(tmp[,1],pattern="_",replacement=" ")
#tmp<-rbindlist(tmp)
tmp[, 2] <- format(tmp[, 2], digits = 5)
tmp
}, caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(strong(
'Average Bed Occupanncy'
))), rownames = FALSE, options = list(pageLength = 10, dom = 'tlp'))
output$b <- renderPlot({
req(sim_out())
x <- sim_out()
tmp <- x$b
tmp
}, res = 128)
###RENDER MULTIPLOT #####
output$multi_plot <- renderPlot({
req(sim_out())
x <- sim_out()
tmp <- x$avg_through_time_plot
tmp
}, res = 175)
###RENDER Warm-Up Assistance Plot #####
output$tisp <- renderPlot({
req(sim_out())
x <- sim_out()
tmp <- x$tisp
tmp
}, res = 175)
###RENDER PERCENTILE TABLES #####
output$dpercentiles <- renderDataTable({
req(sim_out())
sketch = htmltools::withTags(table(class = 'display',
thead(tr(
th(rowspan = 2, 'Service Point'),
th(colspan = 7, 'Percentiles')
),
tr(lapply(
c("50th", "80th", "85th", "90th", "95th", "99th", "100th"),
th
)))))
x <- sim_out()
tmp <- x$dpercentiles
tmp[,1] <- str_replace_all(tmp[,1],pattern="_",replacement=" ")
#tmp<-ceiling(tmp)
#tmp<-type.convert(tmp)
#tmp<-format(tmp,digits=5)
datatable(
tmp,
container = sketch,
options = list(dom = 't', ordering = F),
caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(strong(
"Delay Percentiles"
)))
)
})
output$qpercentiles <- renderDataTable({
req(sim_out())
sketch = htmltools::withTags(table(class = 'display',
thead(tr(
th(rowspan = 2, 'Service Point'),
th(colspan = 7, 'Percentiles')
),
tr(lapply(
c("50th", "80th", "85th", "90th", "95th", "99th", "100th"),
th
)))))
x <- sim_out()
tmp <- x$qpercentiles
tmp[,1] <- str_replace_all(tmp[,1],pattern="_",replacement=" ")
# tmp<-ceiling(tmp)
# tmp<-type.convert(tmp)
#tmp<-format(tmp,digits=5)
datatable(
tmp,
container = sketch,
options = list(dom = 't', ordering = F),
caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(strong(
"Queue Percentiles"
)))
)
})
output$opercentiles <- renderDataTable({
req(sim_out())
sketch = htmltools::withTags(table(class = 'display',
thead(tr(
th(rowspan = 2, 'Service Point'),
th(colspan = 7, 'Percentiles')
),
tr(lapply(
c("50th", "80th", "85th", "90th", "95th", "99th", "100th"),
th
)))))
x <- sim_out()
tmp <- x$opercentiles
#format means you need to use matrix referencing without names here, rather than dollar sign/name referencing
tmp[,1] <- str_replace_all(tmp[,1],pattern="_",replacement=" ")
# tmp<-ceiling(tmp)
# tmp<-type.convert(tmp)
#tmp<-format(tmp,digits=5)
datatable(
tmp,
container = sketch,
options = list(dom = 't', ordering = F),
caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(strong(
"Occupancy Percentiles"
)))
)
})
output$bpercentiles <- renderDataTable({
req(sim_out())
sketch = htmltools::withTags(table(class = 'display',
thead(tr(
th(rowspan = 2, 'Service Point'),
th(colspan = 7, 'Percentiles')
),
tr(lapply(
c("50th", "80th", "85th", "90th", "95th", "99th", "100th"),
th
)))))
x <- sim_out()
tmp <- x$bpercentiles
tmp[,1] <- str_replace_all(tmp[,1],pattern="_",replacement=" ")
# tmp<-ceiling(tmp)
# tmp<-type.convert(tmp)
#tmp<-format(tmp,digits=5)
datatable(
tmp,
container = sketch,
options = list(dom = 't', ordering = F),
caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(strong(
"Bed Occupancy Percentiles"
)))
)
})
output$tpercentiles <- renderDataTable({
req(sim_out())
sketch = htmltools::withTags(table(class = 'display',
thead(tr(
th(rowspan = 2, 'Service Point'),
th(colspan = 7, 'Percentiles')
),
tr(lapply(
c("50th", "80th", "85th", "90th", "95th", "99th", "100th"),
th
)))))
x <- sim_out()
tmp <- x$tpercentiles
tmp[,1] <- str_replace_all(tmp[,1],pattern="_",replacement=" ")
# tmp<-ceiling(tmp)
# tmp<-type.convert(tmp)
#tmp<-format(tmp,digits=5)
datatable(
tmp,
container = sketch,
options = list(dom = 't', ordering = F),
caption = htmltools::tags$caption(style = 'caption-side: top; text-align: center;',
htmltools::h4(strong(
"Transition Percentiles"
)))
)
})
output$tables_viz1 <- renderGrViz({
viz()
})
output$tables_viz2 <- renderGrViz({
viz()
})
### XLSX DOWNLOAD HANDLER #####
output$downloadtables <- downloadHandler(
filename = function() {
paste0("Simulation Tables.xlsx")
},
content = function(filename) {
req(sim_out())
x <- sim_out()
shinyalert(
title = "Tables Rendering",
text = "",
closeOnEsc = FALSE,
closeOnClickOutside = FALSE,
html = FALSE,
type = "info",
showConfirmButton = FALSE,
showCancelButton = FALSE,
confirmButtonText = "OK",
confirmButtonCol = "#87D9FF",
timer = 0,
imageUrl = "",
animation = TRUE
)
list_of_datasets <-
list(
"total_time_in_system" = x$total_time_in_system,
"total_time_in_system_summary" = x$total_time_in_system_summary,
"pat_rep_summary" = x$pat_rep_summary,
"pat_total_summary" = x$pat_total_summary,
"node_wait" = x$node_wait,
"node_wait_summary" = x$node_wait_summary,
"node_active_service" = x$node_active_service,
"node_active_service_summary" = x$node_active_service_summary,
"node_capacity_delay" = x$node_capacity_delay,
"node_capacity_delay_summary" = x$node_capacity_delay_summary,
"node_transition_delay" = x$node_transition_delay,
"node_transition_delay_summary" = x$node_transition_delay_summary,
"node_length_of_stay" = x$node_length_of_stay,
"node_length_of_stay_summary" = x$node_length_of_stay_summary,
"node_delay_to_transfer" = x$node_delay_to_transfer,
"node_delay_to_transfer_summary" = x$node_delay_to_transfer_summary,
"rejected_summary" = x$rejected_summary,
"ptd_percent" = x$ptd_percent,
"dpercentiles" = x$dpercentiles,
"avg_delayed_summary" = x$avg_delayed_summary,
"ptq_percent" = x$ptq_percent,
"qpercentiles" = x$qpercentiles,
"avg_queue_summary" = x$avg_queue_summary,
"pto_percent" = x$pto_percent,
"opercentiles" = x$opercentiles,
"avg_occupancy_summary" = x$avg_occupancy_summary,
"ptb_percent" = x$ptb_percent,
"bpercentiles" = x$bpercentiles,
"avg_occ_bed_summary" = x$avg_occ_bed_summary,
"ptt_percent" = x$ptt_percent,
"tpercentiles" = x$tpercentiles,
"avg_transition_summary" = x$avg_transition_summary,
"avg_through_time_uniform" = x$avg_through_time
)
write.xlsx(x = list_of_datasets, file = filename)
shinyalert(
title = "Tables Download Complete",
text = "",
closeOnEsc = TRUE,
closeOnClickOutside = TRUE,
html = FALSE,
type = "info",
showConfirmButton = TRUE,
showCancelButton = FALSE,
confirmButtonText = "OK",
confirmButtonCol = "#87D9FF",
timer = 0,
imageUrl = "",
animation = TRUE
)
}
)
### PLOT DOWNLOAD HANDLER #####
output$downloadplot <- downloadHandler(
filename = "Plots.pdf",
content = function(file) {
req(sim_out())
shinyalert(
title = "Plots Rendering",
text = "",
closeOnEsc = FALSE,
closeOnClickOutside = FALSE,
html = FALSE,
type = "info",
showConfirmButton = FALSE,
showCancelButton = FALSE,
confirmButtonText = "OK",
confirmButtonCol = "#87D9FF",
timer = 0,
imageUrl = "",
animation = TRUE
)
x <- sim_out()
pdf(file = file,
width = 14,
height = 7)
print(x$pto_plot)
print(x$ptb_plot)
print(x$ptd_plot)
print(x$ptt_plot)
print(x$ptq_plot)
print(x$avg_through_time_plot)
print(x$o)
print(x$b)
print(x$d)
print(x$t)
print(x$q)
dev.off()
shinyalert(
title = "Plot Download Complete",
text = "",
closeOnEsc = TRUE,
closeOnClickOutside = TRUE,
html = FALSE,
type = "info",
showConfirmButton = TRUE,
showCancelButton = FALSE,
confirmButtonText = "OK",
confirmButtonCol = "#87D9FF",
timer = 0,
imageUrl = "",
animation = TRUE
)
}
)
### RMARKDOWN DOWNLOAD HANDLER #####
output$downloadreport <- downloadHandler(
filename = paste0("PathSimR_Report.docx"),
content = function(file) {
shinyalert(
title = "Report Compiling",
text = "",
closeOnEsc = FALSE,
closeOnClickOutside = FALSE,
html = FALSE,
type = "info",
showConfirmButton = FALSE,
showCancelButton = FALSE,
confirmButtonText = "OK",
confirmButtonCol = "#87D9FF",
timer = 0,
imageUrl = "",
animation = TRUE
)
# Copy the report file to a temporary directory before processing it, in
# case we don't have write permissions to the current working dir (which
# can happen when deployed).
tempReport1 <- file.path(tempdir(), "PathSimR_Report.Rmd")
tempReport2 <- file.path(tempdir(), "template.docx")
file.copy("PathSimR_Report.Rmd", tempReport1, overwrite = TRUE)
file.copy("template.docx", tempReport2, overwrite = TRUE)
x <- sim_out()
# Set up parameters to pass to Rmd document
params <- list(
total_time_in_system = x$total_time_in_system,
total_time_in_system_summary = x$total_time_in_system_summary,
node_wait = x$node_wait,
node_wait_summary = x$node_wait_summary,
pat_wait = x$pat_wait,
pat_wait_summary = x$pat_wait_summary,
node_active_service = x$node_active_service,
node_active_service_summary = x$node_active_service_summary,
pat_active_service = x$pat_active_service,
pat_active_service_summary = x$pat_active_service_summary,
node_length_of_stay = x$node_length_of_stay,
node_length_of_stay_summary = x$node_length_of_stay_summary,
pat_length_of_stay = x$pat_length_of_stay,
pat_length_of_stay_summary = x$pat_length_of_stay_summary,
node_delay_to_transfer = x$node_delay_to_transfer,
node_delay_to_transfer_summary = x$node_delay_to_transfer_summary,
pat_delay_to_transfer = x$pat_delay_to_transfer,
pat_delay_to_transfer_summary = x$pat_delay_to_transfer_summary,
pat_rep_summary = x$pat_rep_summary,
pat_total_summary = x$pat_total_summary,
ptd_percent = x$ptd_percent,
ptd_plot = x$ptd_plot,
avg_delayed = x$avg_delayed,
avg_delayed_summary = x$avg_delayed_summary,
d = x$d,
ptq_percent = x$ptq_percent,
ptq_plot = x$ptq_plot,
avg_queue = x$avg_queue,
avg_queue_summary = x$avg_queue_summary,
q = x$q,
pto_percent = x$pto_percent,
pto_plot = x$pto_plot,
avg_occupancy = x$avg_occupancy,
avg_occupancy_summary = x$avg_occupancy_summary,
o = x$o,
ptb_percent = x$ptb_percent,
ptb_plot = x$ptb_plot,
avg_occ_bed = x$avg_occ_bed,
avg_occ_bed_summary = x$avg_occ_bed_summary,
b = x$b,
ptt_percent = x$ptt_percent,
ptt_plot = x$ptt_plot,
avg_transition = x$avg_transition,
avg_transition_summary = x$avg_transition_summary,
t = x$t,
dpercentiles = x$dpercentiles,
qpercentiles = x$qpercentiles,
opercentiles = x$opercentiles,
bpercentiles = x$bpercentiles,
tpercentiles = x$tpercentiles,
rejected_summary = x$rejected_summary,
avg_through_time_plot = x$avg_through_time_plot,
reps = x$reps,
ptm = x$ptm,
avg_through_time = x$avg_through_time,
nodes = x$nodes,
warm_up = x$warm_up,
sim_time = x$sim_time,
exits = x$exits,
syst_names = x$syst_names,
delay_list = x$delay_list,
cap_cal_input = x$cap_cal_input,
arr_cal_input = x$arr_cal_input,
node_capacity_delay = x$node_capacity_delay,
node_capacity_delay_summary = x$node_capacity_delay_summary,
pat_capacity_delay = x$pat_capacity_delay,
pat_capacity_delay_summary = x$pat_capacity_delay_summary,
node_transition_delay = x$node_transition_delay,
node_transition_delay_summary = x$node_transition_delay_summary,
pat_transition_delay = x$pat_transition_delay,
pat_transition_delay_summary = x$pat_transition_delay_summary,
#add the time unit as a parameter ####
#need to ensure that it exists in the object x<-sim_out() first
time_unit = x$time_unit
)
# Knit the document, passing in the `params` list, and eval it in a
# child of the global environment (this isolates the code in the document
# from the code in this app).
rmarkdown::render(
tempReport1,
output_file = file,
params = params,
envir = new.env(parent = globalenv())
)
shinyalert(
title = "Report Download Complete",
text = "",
closeOnEsc = TRUE,
closeOnClickOutside = TRUE,
html = FALSE,
type = "info",
showConfirmButton = TRUE,
showCancelButton = FALSE,
confirmButtonText = "OK",
confirmButtonCol = "#87D9FF",
timer = 0,
imageUrl = "",
animation = TRUE
)
}
)
}
shinyApp(ui = ui, server = server)
|
3332bb4fb49ba020df1e4402c5dab3591a24d974
|
60b079bbf680d3078bf6e23461838d8d871552ec
|
/man/histomap.Rd
|
5c29fe74d36ff97773df417e6ebb795aa82416fd
|
[] |
no_license
|
tibo31/GeoXp
|
6a1968d27f03eea7aa80279af169c9a7ca753596
|
ac82d8ae02975dcc974c69a8057b5ce7b8927a6f
|
refs/heads/master
| 2023-04-07T23:56:48.838986
| 2023-04-06T07:05:51
| 2023-04-06T07:05:51
| 120,448,652
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,979
|
rd
|
histomap.Rd
|
\name{histomap}
\alias{histomap}
\title{Interactive Histogram and map}
\description{
The function \code{histomap()} draws a histogram of a given variable \code{name.var}
and a map with sites of coordinates \code{coordinates(sf.obj)}. Each site is associated to a value
of \code{name.var} and there is interactivity between the two windows.
}
\usage{
histomap(sf.obj, name.var, nbcol = 10, type = c("count", "percent", "density"),
criteria = NULL, carte = NULL, identify = NULL,
cex.lab = 0.8, pch = 16, col = "lightblue3", xlab = "", ylab = "", axes = FALSE,
lablong = "", lablat = "")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{sf.obj}{object of class sf}
\item{name.var}{a character; attribute name or column number in attribute table}
\item{nbcol}{number of cells for histogram (10 by default)}
\item{type}{Character string indicating type of histogram to be drawn. "percent" and "count" give relative frequency and frequency histograms, "density" produces a density scale histogram.}
\item{criteria}{a vector of size n of boolean which permit to represent preselected sites with a cross, using the tcltk window}
\item{carte}{matrix with 2 columns for drawing spatial polygonal contours : x and y coordinates of the vertices of the polygon}
\item{identify}{if not NULL, the name of the variable for identifying observations on the map}
\item{cex.lab}{character size of label}
\item{pch}{16 by default, symbol for selected points}
\item{col}{"lightblue3" by default, color of bars on the histogram}
\item{xlab}{a title for the graphic x-axis}
\item{ylab}{a title for the graphic y-axis}
\item{axes}{a boolean with TRUE for drawing axes on the map}
\item{lablong}{name of the x-axis that will be printed on the map}
\item{lablat}{name of the y-axis that will be printed on the map}
}
\details{
Sites selected by a bar on the histogram are represented on the map in red and the values
of sites selected on the map by `points' or `polygon' are
represented in red as a sub-histogram on the histogram.
}
\value{
In the case where user click on \code{save results} button,
a vector of integer is created as a global variable in \code{last.select} object.
It corresponds to the number of spatial units selected just before leaving the Tk window.
}
\references{Thibault Laurent, Anne Ruiz-Gazen, Christine Thomas-Agnan (2012), GeoXp: An R Package for Exploratory Spatial Data Analysis. \emph{Journal of Statistical Software}, 47(2), 1-23. \cr \cr
Roger S.Bivand, Edzer J.Pebesma, Virgilio Gomez-Rubio (2009), \emph{Applied Spatial Data Analysis with R}, Springer.
}
\author{Thomas-Agnan C., Aragon Y., Ruiz-Gazen A., Laurent T., Robidou L.}
\seealso{\code{\link{histomap}}, \code{\link{histobarmap}}, \code{\link{scattermap}}, \code{\link{densitymap}}}
\examples{
######
# data columbus
require("sf")
columbus <- sf::st_read(system.file("shapes/columbus.shp", package="spData")[1])
# columbus is included in the Spatial-Class object
# a very simple use of histomap :
histomap(columbus, "CRIME")
\dontrun{
# data on price indices of real estate in France
data(immob, package = "GeoXp")
# immob is a data.frame object. We have to create
# a Spatial object, by using first the longitude and latitude
require(sf)
immob.sf <- st_as_sf(immob, coords = c("longitude", "latitude"))
# optional : we add some contours that don't correspond to the spatial unit
# but are nice for mapping
midiP <- st_read(system.file("shapes/region.shp", package="GeoXp")[1])
# A basic call of histomap function
histomap(immob.sf, "prix.vente", carte = midiP,
identify = "Nom", cex.lab=0.6)
# ... with all options
histomap(immob.sf, "prix.vente", nbcol = 15, type = "percent",
criteria = immob$rentabilite > 5, carte = midiP, identify = "Nom",
cex.lab = 0.5, pch = 12,
col = "pink", xlab = "variation price", ylab = "percent",
axes = TRUE, lablong = "x", lablat = "y")
}
}
\keyword{spatial}
\keyword{univar}
|
712c94b895afe62e7756d1fd7a340b2db18693f5
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/adegenet/examples/fasta2genlight.Rd.R
|
e75d7ebf66384bd2383d761c7b66f5f134936c49
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 808
|
r
|
fasta2genlight.Rd.R
|
library(adegenet)
### Name: fasta2genlight
### Title: Extract Single Nucleotide Polymorphism (SNPs) from alignments
### Aliases: fasta2genlight
### Keywords: manip
### ** Examples
## Not run:
##D ## show the example file ##
##D ## this is the path to the file:
##D myPath <- system.file("files/usflu.fasta",package="adegenet")
##D myPath
##D
##D ## read the file
##D obj <- fasta2genlight(myPath, chunk=10) # process 10 sequences at a time
##D obj
##D
##D ## look at extracted information
##D position(obj)
##D alleles(obj)
##D locNames(obj)
##D
##D ## plot positions of polymorphic sites
##D temp <- density(position(obj), bw=10)
##D plot(temp, xlab="Position in the alignment", lwd=2, main="Location of the SNPs")
##D points(position(obj), rep(0, nLoc(obj)), pch="|", col="red")
## End(Not run)
|
5d4782ada900c8261cb3d099c9959c8ea2e5079c
|
d48a6be6d855db72443aa767d680e13596e2a180
|
/RMark/man/run.mark.model.Rd
|
e8c90c73046b3e416e35d67a399822a56066fe3a
|
[] |
no_license
|
jlaake/RMark
|
f77e79d6051f1abfd57832fd60f7b63540a42ab9
|
7505aefe594a24e8c5f2a9b0b8ac11ffbdb8a62d
|
refs/heads/master
| 2023-06-26T21:29:27.942346
| 2023-06-25T16:35:43
| 2023-06-25T16:35:43
| 2,009,580
| 17
| 15
| null | 2019-01-10T17:17:11
| 2011-07-06T23:44:02
|
R
|
UTF-8
|
R
| false
| true
| 4,705
|
rd
|
run.mark.model.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run.mark.model.R
\name{run.mark.model}
\alias{run.mark.model}
\title{Runs analysis with MARK model using MARK.EXE}
\usage{
run.mark.model(
model,
invisible = FALSE,
adjust = TRUE,
filename = NULL,
prefix = "mark",
realvcv = FALSE,
delete = FALSE,
external = FALSE,
threads = -1,
ignore.stderr = FALSE
)
}
\arguments{
\item{model}{MARK model created by \code{\link{make.mark.model}}}
\item{invisible}{if TRUE, exectution of MARK.EXE is hidden from view}
\item{adjust}{if TRUE, adjusts number of parameters (npar) to number of
columns in design matrix, modifies AIC and records both}
\item{filename}{base filename for files created by MARK.EXE. Files are named
filename.*.}
\item{prefix}{base filename prefix for files created by MARK.EXE; the files
are named prefixnnn.*}
\item{realvcv}{if TRUE the vcv matrix of the real parameters is extracted
and stored in the model results}
\item{delete}{if TRUE the output files are deleted after the results are
extracted}
\item{external}{if TRUE the mark object is saved externally rather than in
the workspace; the filename is kept in its place}
\item{threads}{number of cpus to use with mark.exe if positive or number of cpus to remain idle if negative}
\item{ignore.stderr}{If set TRUE, messages from mark.exe are suppressed; they are automatically suppressed with Rterm}
}
\value{
model: MARK model object with the base filename stored in
\code{output} and the extracted \code{results} from the output file appended
onto list; see \code{\link{mark}} for a detailed description of a
\code{mark} object.
}
\description{
Passes input file from model (\code{model$input}) to MARK, runs MARK, gets
\code{output} and extracts relevant values into \code{results} which is
appended to the \code{mark} model object.
}
\details{
This is a rather simple function that initiates the analysis with MARK and
extracts the output. An analysis was split into two functions
\code{\link{make.mark.model}} and \code{run.mark.model} to allow a set of
models to be created and then run individually or collectively with
\code{\link{run.models}}. By default, the execution of MARK.EXE will appear
in a separate window in which the progress can be monitored. The window can
be suppressed by setting the argument \code{invisible=TRUE}. The function
returns a \code{mark} object and it should be assigned to the same object to
replace the original model (e.g., \code{mymodel=run.mark.model(mymodel)}).
The element \code{output} is the base filename that links the objects to the
output files stored in the same directory as the R workspace. To removed
unneeded output files after deleting mark objects in the workspace, see
\code{\link{cleanup}}. \code{results} is a list of specific output values
that are extracted from the output. In extracting the results, the number of
parameters can be adjusted (\code{adjust=TRUE}) to match the number of
columns in the design matrix, which assumes that it is full rank and that
all of the parameters are estimable and not confounded. This can be useful
if that assumption is true, because on occasion MARK.EXE will report an
incorrect number of parameters in some cases in which the parameters are at
boundaries (e.g., 0 or 1 for probabilities). If the true parameter count is
neither that reported by MARK.EXE nor the number of columns in the design
matrix, then it can be adjusted using \code{\link{adjust.parameter.count}}.
If \code{filename} is assigned a value it is used to specify files with
those names. This is most useful to capture output from a model that has
already been run. If it finds the files with those names already exists, it
will ask if the results should be extracted from the files rather than
re-running the models.
}
\examples{
\donttest{
# This example is excluded from testing to reduce package check time
test=function()
{
data(dipper)
for(sex in unique(dipper$sex))
{
x=dipper[dipper$sex==sex,]
x.proc=process.data(x,model="CJS")
x.ddl=make.design.data(x.proc)
Phi.dot=list(formula=~1)
Phi.time=list(formula=~time)
p.dot=list(formula=~1)
p.time=list(formula=~time)
cml=create.model.list("CJS")
x.results=mark.wrapper(cml,data=x.proc,ddl=x.ddl,prefix=sex,delete=TRUE)
assign(paste(sex,"results",sep="."),x.results)
}
rm(Male.results,Female.results,x.results)
}
test()
cleanup(ask=FALSE,prefix="Male")
cleanup(ask=FALSE,prefix="Female")
}
}
\seealso{
\code{\link{make.mark.model}}, \code{\link{run.models}},
\code{\link{extract.mark.output}}, \code{\link{adjust.parameter.count}},
\code{\link{mark}}, \code{\link{cleanup}}
}
\author{
Jeff Laake
}
\keyword{model}
|
d1698172dd78de164c99458c61933f8d5a9cdf0c
|
0eb274cb573426d26bdba30ede223fde22168489
|
/man/modslavmse-package.Rd
|
15606f3909ecb1a6069c67590c10e17ecf617999
|
[] |
no_license
|
bernardsilverman/modslavmse
|
856d84048ffa07a21a35ba64b15b36498f8805bc
|
a880531c5e6ddf05ab5760012f44deeec6856df7
|
refs/heads/master
| 2022-01-14T14:03:25.871134
| 2019-08-09T21:40:13
| 2019-08-09T21:40:13
| 126,054,945
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,629
|
rd
|
modslavmse-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modslavmse.R
\docType{package}
\name{modslavmse-package}
\alias{modslavmse}
\alias{modslavmse-package}
\title{\pkg{modslavmse}}
\description{
\pkg{modslavmse} is a package for Multiple Systems Estimation as applied particularly in the context of Modern Slavery
}
\details{
This package serves three purposes. Firstly, it implements the method used in the original work by Bales, Hesketh and Silverman
to approach the question of the prevalence of Modern Slavery in the UK by estimating the 'dark figure' underlying data
collected in the 2013 National Referral Mechanism (NRM) by the National Crime Agency (NCA). That work used the package
\pkg{Rcapture} with a particular approach to the choice of two-factor interactions to fit in the model. This method is implemented
in the routine \code{\link{MSEfit}}. The data set \code{\link{UKdat}} gives the data used in the original paper.
The research also involved testing the robustness of the results by omitting some of the lists and/or combining some into single lists.
The routines \code{\link{omitlists}} and \code{\link{mergelists}} allow these operations to be done.
The other purpose of this package is to implement development versions of current research directions. A current focus is on Monte
Carlo Markov Chain approaches which allow some sort of model averaging, rather than the focus on a particular model implicit
in \code{MSEfit}. This makes use of the package MCMCpack.
The third, more specific, purpose is to allow full reproducibility of the work presented in Silverman (2018).
This is done through the scripts given in the Examples section below.
}
\examples{
data(UKdat, UKdat_5, UKdat_4, Ned, Ned_5, NewOrl, NewOrl_5, Kosovo) # the datasets used in the paper
make_AIC_stepwise_table1() # Table 5
make_AIC_stepwise_table2() # Table 6
make_allmodels_plots_script() # Figures 1, 2, 3 and 4
make_MCMCfit_tables_script() # Tables 7, 8, 9, 10, 11 and 12
make_MCMCeffects_table_script() # Table 13
make_madyor_table_script() # Table 14 and Figures 5 and 6, as well as some numbers in the text
make_LCMCR_table_script() # Table 15
}
\references{
K. B. Bales, O. Hesketh, and B. W. Silverman (2015). Modern Slavery in the UK: How many victims? Significance 12 (3), 16-21.
B. W. Silverman (2018). Model fitting in Multiple Systems Analysis for the quantification of Modern Slavery: Classical and Bayesian approaches.
}
\author{
\strong{Maintainer}: Bernard W. Silverman \email{mail@bernardsilverman.co.uk}
}
|
7a2b20df00f63c0715ecda127da5726ed1b6fcdf
|
89b54f1796ac953096f16b979dbf5ee92334ba0b
|
/packages/XGeneAnalysis/man/nn_p_value_testing.Rd
|
2a9b816b401c7d1e44bb8bc2d38e1013a79fd09c
|
[] |
no_license
|
a3cel2/xga
|
a69ee22918f5e6d2b77f6922c11e0e7a0214323f
|
b0ceaf2e5a2f7ee5457d86772cddd1be7fcbf7b7
|
refs/heads/master
| 2020-04-29T00:32:01.140644
| 2019-08-08T16:06:39
| 2019-08-08T16:06:39
| 175,696,840
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,806
|
rd
|
nn_p_value_testing.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nn_functions.R
\name{nn_p_value_testing}
\alias{nn_p_value_testing}
\title{p_value testing of neural network models}
\usage{
nn_p_value_testing(initial_model, condition_name, genotype_file,
resistance_file, nn_model_function = make_nn_model,
nn_model_parameters = NULL, genes = NULL, efflux_genes = NULL,
alpha = 0.05, diff_tolerance = 1e-04)
}
\arguments{
\item{initial_model}{a keras transporter neural network model (e.g. in the list returned by make_nn_model)}
\item{condition_name}{a vector of one or more conditions for which to test significant weights}
\item{genotype_file}{a data frame (or matrix) with strain names as row names and genes (optionally, 'Plate')
as column names. Genotype values for each gene are either 1 for knockout or 0 for wild-type. Plate is a factor}
\item{resistance_file}{a matrix with strain names as row names and condition names as column}
\item{nn_model_function}{function used to train a single neural network (defaults to make_nn_model)}
\item{nn_model_parameters}{parameters given to nn_model_function (named list with arguments to nn_model_function), populated automatically for make_nn_model etc}
\item{genes}{genes in the first layer of the neural network (and the second layer by default)}
\item{efflux_genes}{genes in the second layer of the neural network}
\item{alpha}{uncorrected p-value cutoff - Bonferroni correction is applied to this automatically}
\item{diff_tolerance}{when calculating a p value, what is the numerical threshold for determining which strain's predictions differ?}
}
\value{
a list (of the same format to calling get_weights on a keras model), with non-significant weights set to 0
}
\description{
p_value testing of neural network models
}
|
4c03094e6b956238101bcfc36e3b05249a802d0b
|
43e7fece644d753526b19448089d229d3f54815e
|
/man/summary.chtrs.Rd
|
ec8c4e84bb8f6bc69600010c8424f61883daf408
|
[] |
no_license
|
mattansb/cheatR
|
b183b9a282d7d490fe6baca1e70eee6b89bb588c
|
6309dc64cd2c2bb7358b7e8d02cc6e26b78b8eac
|
refs/heads/main
| 2022-05-26T02:53:39.386995
| 2022-04-13T10:12:13
| 2022-04-13T10:12:13
| 142,267,216
| 21
| 4
| null | 2022-04-13T10:12:14
| 2018-07-25T07:57:12
|
R
|
UTF-8
|
R
| false
| true
| 767
|
rd
|
summary.chtrs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R
\name{summary.chtrs}
\alias{summary.chtrs}
\title{Summarise Cheatrs}
\usage{
\method{summary}{chtrs}(object, bad_files = FALSE, ...)
}
\arguments{
\item{object}{output of \code{\link[=catch_em]{catch_em()}}.}
\item{bad_files}{logical. Instead of the result matrix, should return instead
the list of bad files (that did not compare / load)? Defaults to \code{FALSE}.}
\item{...}{Not used.}
}
\value{
The input \code{chtrs} matrix, or a list of bad files (when \code{bad_files = TRUE}).
}
\description{
Summarise Cheatrs
}
\examples{
if (interactive()) {
files <- choose.files()
res <- catch_em(files)
summary(res, bad_files = TRUE)
}
}
\author{
Mattan S. Ben-Shachar
}
|
5a81964814ee26cc48fc9615e2396eb9e126fb42
|
0a4cc2bafe6fb3396ac9c07dc1e382a8a897a2d5
|
/R/rptha/man/gCentroid.Rd
|
e02f2af898b7d2d96fd4f85871d389004505de94
|
[
"BSD-3-Clause"
] |
permissive
|
GeoscienceAustralia/ptha
|
240e360ff9c33cbdfa6033115841035c39e7a85f
|
124d0caa76ed143d87fa0dfe51434d9500268a5a
|
refs/heads/master
| 2023-08-31T12:00:57.055692
| 2023-08-31T06:05:18
| 2023-08-31T06:05:18
| 39,749,535
| 26
| 8
|
BSD-3-Clause
| 2023-08-29T04:13:20
| 2015-07-27T01:44:11
|
R
|
UTF-8
|
R
| false
| true
| 364
|
rd
|
gCentroid.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alternatives_rgeos.R
\name{gCentroid}
\alias{gCentroid}
\title{limited replacement for rgeos::gCentroid using sf functionality}
\usage{
gCentroid(spgeom, byid = FALSE)
}
\description{
Like rgeos, this treats all datasets as Cartesian. Use sf::st_centroid
for non-legacy applications.
}
|
09af2ca7c16dd324a586e3fd8fa8319bf94e4a06
|
7ae49f4ebde44b6c2310f17fe0c72a32a3bcc8e9
|
/tmb.R
|
15a38520e5841c5af5fb64983260346299883cfd
|
[] |
no_license
|
skanwal/Play
|
a237d6f15130f2892e47412402ad62bdea0454de
|
32343074158ea9a180c990b8dfac5318a73190e6
|
refs/heads/master
| 2021-06-11T02:38:32.596130
| 2020-11-10T22:39:45
| 2020-11-10T22:39:45
| 128,146,782
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,072
|
r
|
tmb.R
|
library(vcfR)
library(dplyr)
vcf_test = read.vcfR("../../Data/ensemble-pon-pass-test.vcf")
vcf.df <- cbind(as.data.frame(getFIX(vcf_test)), INFO2df(vcf_test))
View(vcf.df)
# Have a named vector with chr + chr_length
# You can extract the chr_length for chr, and then bin based on that
# In a named vector, you can access each element by name
# chr_lengths[chr_you_want]
chr_lengths <- c(249250621L, 243199373L, 198022430L, 191154276L,
180915260L, 171115067L, 159138663L, 146364022L,
141213431L, 135534747L, 135006516L, 133851895L,
115169878L, 107349540L, 102531392L, 90354753L,
81195210L, 78077248L, 59128983L, 63025520L,
48129895L, 51304566L, 155270560L, 59373566L, 16569L)
chr_names = c(1:22, "X", "Y", "MT")
names(chr_lengths) <- chr_names
chr_lengths["13"]
# testing smaller vcf
#extract chr and pos from vcf and make df from it
chr_pos <- data.frame(chr=getCHROM(vcf_test), pos=getPOS(vcf_test), stringsAsFactors = FALSE)
bins <- seq(from = 0 , to = 10000000, by = 1000000)
pos_binned <- cbind(chr_pos$pos, findInterval(chr_pos$pos, bins))
table(pos_binned[, 2])
mean(table(pos_binned[, 2]))
# running on bigger vcf
vcf_info_tidy <- vcfR::extract_info_tidy(vcf) # gives info in tidy format
vcf_fix_col <- vcfR::getFIX(vcf)
vcf_info_untidy <- vcfR::INFO2df(vcf) # never ends
vcf = read.vcfR("../../Data/ensemble-pon-pass.vcf")
vcf_all <- vcfR::vcfR2tidy(vcf) # gives everything in tidy format
table(vcf_all$gt$Indiv)
vcf_all2 <- vcf_all$fix # extract the 'fix' element from the vcf_all list
chr_pos_all <- data.frame(chr=getCHROM(vcf), pos=getPOS(vcf), stringsAsFactors = FALSE)
table(chr_pos_all$chr)
chr_pos_1 <- dplyr::filter(chr_pos_all, chr == "1")
chr1_length <- chr_lengths["1"]
bin_size <- 40000000
bins_chr1 <- seq(from = 0, to = chr1_length, by = bin_size)
pos_binned_chr1 <- cbind(chr_pos_1$pos, findInterval(chr_pos_1$pos, bins_chr1))
mean(table(pos_binned_chr1[,2]))
# Get chr number
# Filter vcf for that chr
# Take binsize as an argument
# Take chr as an argument
filter_chr <- function(x, vcf) {
vcf_chr_pos <- data.frame(chr=getCHROM(vcf), pos=getPOS(vcf), stringsAsFactors = FALSE)
return(filter(vcf_chr_pos, chr == x))
}
bin_chr <- function(chr_lengths, chr_name, bin_size) {
return(seq(from = 0, to = chr_lengths[chr_name], by = bin_size))
}
bin_chr(chr_lengths, "1", bin_size)
count_mut_in_chr <- function(chr_pos, bin_vec) {
x <- cbind(chr_pos$pos, findInterval(chr_pos$pos, bin_vec))
mean(table(x[, 2]))
}
x <- count_mut_in_chr(filter_chr("1", vcf), bin_chr(chr_lengths, "1", bin_size))
# apply same function to all chromosomes
fun1 <- function(arg_name) {
count_mut_in_chr(filter_chr(arg_name, vcf), bin_chr(chr_lengths, arg_name, bin_size))
}
results <- vector("numeric", length = length(chr_names))
for (i in 1:length(chr_names)) {
results[i] <- count_mut_in_chr(filter_chr(chr_names[i], vcf), bin_chr(chr_lengths, chr_names[i], bin_size))
}
sapply(chr_names[1:3], fun1)
sapply(c(1, 2, 3), function(arg) {
arg**2
})
|
6dc4be5a2f397f42187a6630c2d55946d928a5a1
|
5dcccb210e6656966eb0b91ab1c8143461d58f43
|
/case-studies/blueprint-wgbs/bp2019-01-create-data-object.R
|
49f1a8c9328b7db30b3af0aa83a0c5b4b943ef1c
|
[] |
no_license
|
stephaniehicks/methylCCPaper
|
7b47c505946ce0370d81c87022532a9e981ed374
|
f67fdfa77a38107ebb04a49ecb251f850248107a
|
refs/heads/master
| 2022-02-13T16:16:16.561930
| 2019-08-11T20:21:48
| 2019-08-11T20:21:48
| 109,314,917
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,190
|
r
|
bp2019-01-create-data-object.R
|
library(dplyr)
library(DeepBlueR)
library(foreach)
library(data.table)
library(readr)
workingDir_blueprint <-
"/users/shicks1/projects/methylCCPaper/case-studies/blueprint-wgbs"
dataPath <- "/users/shicks1/data/DNAm/blueprint_ihec"
# Test installation and connectivity by saying hello to the DeepBlue server:
deepblue_info("me")
# ok this works
deepblue_list_genomes()
deepblue_list_projects() # "BLUEPRINT Epigenome", "DEEP (IHEC)"
deepblue_list_techniques() # "WGBS", "RRBS", "BisulfiteSeq"
deepblue_list_epigenetic_marks() # "DNA Methylation"
deepblue_list_biosources() # e.g. blood, muscle, etc
# deepblue_list_experiments()
# next we search experiments
keep_biosource <- c("CD14-positive, CD16-negative classical monocyte",
"CD8-positive, alpha-beta T cell", "CD4-positive, alpha-beta T cell",
"CD38-negative naive B cell",
"cytotoxic CD56-dim natural killer cell",
"mature neutrophil", "mature eosinophil")
blueprint_DNA_meth <- deepblue_list_experiments(genome = "GRCh38",
epigenetic_mark = "DNA Methylation",
technique = "BisulfiteSeq",
biosource = keep_biosource,
project = "BLUEPRINT Epigenome")
# Then we remove `.bed` files and only lookg at `.wig` files
blueprint_DNA_meth <-
blueprint_DNA_meth %>%
filter(!grepl(".bed", name)) %>%
data.table()
# To get more information about one experiment, use `deepblue_info()`
deepblue_info("e93346")
## Extract meta-data
# Using the experiment IDs, extract meta data about each sample,
# including the biosource, etc.
custom_table = do.call("rbind", apply(blueprint_DNA_meth, 1, function(experiment){
experiment_id = experiment[1]
# Obtain the information about the experiment_id
info = deepblue_info(experiment_id)
# Print the experiment name, project, biosource, and epigenetic mark.
with(info, { data.frame(id = `_id`, name = name, project = project,
technique = technique, epigenetic_mark = epigenetic_mark,
biosource = sample_info$biosource_name,
tissue_type = sample_info$TISSUE_TYPE,
disease_status = extra_metadata$DISEASE,
donor_id = sample_info$DONOR_ID,
donor_age = extra_metadata$DONOR_AGE,
donor_sex = extra_metadata$DONOR_SEX,
experiment_id = extra_metadata$EXPERIMENT_ID,
sample_id = sample_id,
sample_name = sample_info$SAMPLE_NAME,
ample_barcode = extra_metadata$SAMPLE_BARCODE,
sample_description = extra_metadata$SAMPLE_DESCRIPTION,
sample_source = sample_info$source,
file_path = extra_metadata$FILE,
first_submission_date = extra_metadata$FIRST_SUBMISSION_DATE,
instrument_model = extra_metadata$INSTRUMENT_MODEL)
})
}))
saveRDS(custom_table, file = file.path(dataPath,"blueprint_blood_custom_table.RDS"))
dim(custom_table)
head(custom_table)
# we also write a file with the paths to the bigwigs to download directly
write_csv(data.frame(paste0("ftp://ftp.ebi.ac.uk/pub/databases/", custom_table$file_path)),
file.path(dataPath,"blueprint_blood_ftp_paths.csv"),
col_names = FALSE)
# **note**After much effort, I failed to download the WGBS data
# from the deepblueR bioconductor package or from the API directly.
# Instead, I decided to use the blueprint_blood_ftp_paths.csv
# file to download the data with wget. However, the code below this point
# I wrote to try and download the data. But my requests kept failing.
# Maybe it will be useful for someone else.
# Create two tables: one for `.call` files, and
# one for `.cov` files. The `.call` file contains
# the methylation signal (or percent of reads that
# are methylated). The `.cov` file contains the
# coverage of the methylation signal (or how many
# reads cover the CpG).
table_call <-
custom_table %>%
filter(grepl("calls.bs_call", name)) %>%
data.table()
table_cov <-
custom_table %>%
filter(grepl("calls.bs_cov", name)) %>%
data.table()
###### Parallelizing it
# We can also split this up for all the chromosomes so we do not
# hit the download limit of DeepBlue. We also break up the
# processing of the `.cov` and the `.call` files.
# list all available chromosomes in GRCh38
chromosomes_GRCh38 <- deepblue_extract_ids(
deepblue_chromosomes(genome = "GRCh38") )
# keep only the essential ones
chromosomes_GRCh38 <-
grep(pattern = "chr([0-9]{1,2}|X)$", chromosomes_GRCh38,
value = TRUE)
# We create `query_id`s, one for each chromosome to avoid
# hitting the limits of deepblue. First we process the
# `.call` files.
blueprint_regions_call <-
foreach(chr = chromosomes_GRCh38, .combine = c) %do%
{
query_id = deepblue_select_experiments(
experiment_name =
deepblue_extract_names(table_call),
chromosome = chr)
}
blueprint_regions_call # these are query_id's
# Then we process the `.cov` files.
blueprint_regions_cov <-
foreach(chr = chromosomes_GRCh38, .combine = c) %do%
{
query_id = deepblue_select_experiments(
experiment_name =
deepblue_extract_names(table_cov),
chromosome = chr)
}
blueprint_regions_cov # these are query_id's
# Next, we prepare to create the score matrix for the
# `.call` and `.cov` files.
exp_columns_call <- deepblue_select_column(table_call, "VALUE")
exp_columns_cov <- deepblue_select_column(table_cov, "VALUE")
# Then we submit requests for both the `.call` files
request_ids_call <- foreach(query_id = blueprint_regions_call,
.combine = c) %do%
{
deepblue_score_matrix(
experiments_columns = exp_columns_call,
aggregation_function = "max",
aggregation_regions_id = query_id)
}
request_ids_call
# check to see if the requests are done
foreach(request = request_ids_call, .combine = c) %do% {
deepblue_info(request)$state
}
# And `.cov` files
request_ids_cov <- foreach(query_id = blueprint_regions_cov ,
.combine = c) %do%
{
deepblue_score_matrix(
experiments_columns = exp_columns_cov ,
aggregation_function = "max",
aggregation_regions_id = query_id)
}
request_ids_cov
# check to see if the requests are done
foreach(request = request_ids_cov, .combine = c) %do% {
deepblue_info(request)$state
}
# Once the requests are complete, we can create the score matrix.
list_score_matrices_call <-
deepblue_batch_export_results(request_ids_call)
score_matrix_call <- data.table::rbindlist(
list_score_matrices_call, use.names = TRUE)
score_matrix_call[, 1:5, with=FALSE]
saveRDS(score_matrix_call,
file = file.path(dataPath, "blueprint_blood_call.RDS"))
rm(score_matrix_call)
list_score_matrices_cov <-
deepblue_batch_export_results(request_ids_cov)
score_matrix_cov <- data.table::rbindlist(
list_score_matrices_cov, use.names = TRUE)
score_matrix_cov[, 1:5, with=FALSE]
saveRDS(score_matrix_cov,
file = file.path(dataPath, "blueprint_blood_cov.RDS"))
rm(score_matrix_cov)
|
48817ce04035c47299749eae5dbe7effc5801dd0
|
823361be1cfd6002c63a482a6480ca3b18ec40fe
|
/PlayGameS3.R
|
7c8cbadab4566907b85881a0d9d5930a78fecb10
|
[] |
no_license
|
drmiller1220/PS3
|
b0d3f19da156529a79ff175b8c40a4d19e6e4e04
|
a9fce411af2b2328d823850da2ed79bbca6a2b94
|
refs/heads/master
| 2021-01-19T21:15:53.971619
| 2017-02-21T18:39:14
| 2017-02-21T18:39:14
| 82,477,843
| 0
| 0
| null | 2017-02-19T17:52:30
| 2017-02-19T17:52:30
| null |
UTF-8
|
R
| false
| false
| 354
|
r
|
PlayGameS3.R
|
##
#' Generic function for \code{PlayGame}
#'
#' This is the generic function for \code{PlayGame}. \code{PlayGame} can only be meaningfully
#' used by objects of class \code{door}.
#'
#' @param x an object used to select a method.
#'
#' @author David R. Miller
#'
PlayGame <- function(x){ # creating a generic for PlayGame
UseMethod("PlayGame")
}
|
48b132fca861fa4b296d00d546566cb9a502c1c0
|
ba0f3d97d242ccce773ba8411e8fc9b4b8517164
|
/plots/numdevices_distribution.R
|
6b9d6c62081bb92e456f2b31bbc2238b9e228cdc
|
[] |
no_license
|
agember/mpa
|
31e061f0932967e079ea1969badca0e5cc11df36
|
93c8fe234b810a09b41e13a7570f268eb201f37e
|
refs/heads/master
| 2021-01-10T14:51:23.227508
| 2015-10-14T01:42:29
| 2015-10-14T01:42:29
| 43,686,693
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,152
|
r
|
numdevices_distribution.R
|
datapath <- Sys.getenv("MGMTPLANE_DATA")
codepath <- Sys.getenv("MGMTPLANE_CODE")
datafile <- "all_metrics_all_nomissing.csv"
#source(paste(codepath,'analyze/read_metrics.R',sep="/"))
#source(paste(codepath,'analyze/quantile_bins.R',sep="/"))
#counts <- table(binned$NumDevices)
#counts <- counts/nrow(metrics)
#
#plotfile <- paste(datapath,'plots','numdevices_distribution.pdf',sep="/")
#pdf(plotfile, height=3, width=3)
#par(mar=c(3,3,1,0), mgp=c(2,0.5,0))
#xtics <- barplot(counts, ylab='Fraction of Networks', xlab='# of Devices', ylim=c(0,0.6), xaxt='n',
# cex.lab=1.5, cex.axis=1.5)
#axis(1, xtics, names(counts), cex.axis=1.5, tick=FALSE)
#box(which = "plot", lty = "solid")
#dev.off()
source(paste(codepath,'analyze/read_metrics.R',sep="/"))
x <- sort(metrics$NumDevices)
len <- length(metrics$NumDevices)-1
y <- c(0:len)/len
plotfile <- paste(datapath,'plots','numdevices_distribution.pdf',sep="/")
pdf(plotfile, height=3, width=3)
par(mar=c(3,3.5,1,0), mgp=c(2,0.3,0))
plot(x, y, ylab='Fraction of Networks', xlab='# of Devices', ylim=c(0,1), yaxt='n',
cex.lab=1.4, type='l')
axis(2,las=2,cex.axis=1.4,tck=0.03)
dev.off()
|
b46806be1073b3b7040b93655516b5b54ddf8f2a
|
91eb7aac6aec4726caa5536ddd563daf5483df6c
|
/R/parse-user.R
|
01d4eff621d326d6ad6d21cdc115a0fd0b60855a
|
[] |
no_license
|
dgrtwo/rparse
|
1daeca18de77ec6462f7dcf22873d2c883b1bc5f
|
bdef159d6485cf8da5b21fc6559d33bb8e98c388
|
refs/heads/master
| 2021-01-20T06:56:56.233260
| 2015-07-27T20:13:03
| 2015-07-27T20:13:03
| 33,457,983
| 2
| 1
| null | 2015-09-04T20:01:38
| 2015-04-05T22:54:04
|
R
|
UTF-8
|
R
| false
| false
| 1,620
|
r
|
parse-user.R
|
#' create a new user
#'
#' Create and sign in a user
#'
#' @param username desired username
#' @param password desired password
#' @param ... extra arguments to set, such as email
#'
#' @details A user's login token and other info is stored in the
#' \code{parse_user} option.
#'
#' @return A _User parse_object
#'
#' @export
parse_signup <- function(username, password, ...) {
ret <- parse_api_POST("users/", list(username = username, password = password, ...))
ret <- as.parse_object(ret, "_User")
options(parse_user = ret)
invisible(ret)
}
#' Log a user into Parse
#'
#' This logs a user into Parse based on a username and pasword combination.
#'
#' @param username username
#' @param password password
#'
#' @details A user's login token and other info is stored in the
#' \code{parse_user} option.
#'
#' @return A _User parse_object
#'
#' @export
parse_login <- function(username, password) {
ret <- parse_api_GET("login/", query = list(username = username, password = password))
ret <- as.parse_object(ret, "_User")
options(parse_user = ret)
invisible(ret)
}
#' Log out the current Parse user
#'
#' @export
parse_logout <- function() {
options(parse_user = NULL)
}
#' lookup information about the current user
#'
#' @export
parse_current_user <- function() {
as.parse_object(parse_api_GET("users/me"), "_User")
}
#' reset a user's password (if they have an email address registered)
#'
#' @param email User's e-mail address
#'
#' @export
parse_password_reset <- function(email) {
ret <- parse_api_POST("requestPasswordReset", email = email)
invisible(ret)
}
|
7b983d4a6f84386179d91c14c9261e4120cbe098
|
cc47a0995ab1bd977bccf7d462bd55655cb4712b
|
/man/temp_folder.Rd
|
ac4e6ba9515e8a0e466748703aa2ca7c13af158a
|
[
"MIT"
] |
permissive
|
Green-EyE/glamr
|
fbc5af9b82e36f5db7fb759b7cc5582b76342414
|
630218d7e4762cf443e14d99754e8736b42a6c89
|
refs/heads/main
| 2023-09-05T00:33:06.128482
| 2021-11-17T23:19:08
| 2021-11-17T23:19:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 833
|
rd
|
temp_folder.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{temp_folder}
\alias{temp_folder}
\title{Generate Temporary Folder}
\usage{
temp_folder(launch = FALSE)
}
\arguments{
\item{launch}{do you want to launch the temp folder in the Windows Explorer?
default = FALSE}
}
\value{
creates a temp directory and stores it as `folderpath_tmp`
}
\description{
`temp_folder` created a temporary folder in your AppData directory, which
will be automatically removed after you close your RStudio session.
}
\examples{
\dontrun{
load_secrets()
temp_folder(launch = TRUE)
purrr::walk2(.x = df_googlefiles$id,
.y = df_googlefiles$filename,
.f = ~googledrive::drive_download(googledrive::as_id(.x),
file.path(folderpath_tmp, .y))) }
}
|
6550f2ca49373c93b07a740ec65b24d4cfa1eecc
|
e2d6d1016a768b8db94d044d13789e3021a80fda
|
/R/plot_saturation_n.R
|
7ae4f4d0ceea52d6888044a29e91cea21e7b8e20
|
[] |
no_license
|
renzok/oligo4fun
|
60e814030fdc8f8707d82be7e3c71a5ffad19d53
|
5eb28e55b3bc41ca62fbfa71d7ba87a6a60dd38b
|
refs/heads/master
| 2021-01-16T19:03:21.789042
| 2016-02-29T22:09:24
| 2016-02-29T22:09:24
| 52,828,810
| 0
| 0
| null | 2016-02-29T22:14:49
| 2016-02-29T22:14:48
| null |
UTF-8
|
R
| false
| false
| 4,211
|
r
|
plot_saturation_n.R
|
#' Draw a saturation plot of the oligotyping input file (n) times
#' @description Perform multiple random subsamplings of your alignemnt.
#' @param aln a matrix containing the DNA sequences; this must be of class "DNAbin"
#' @param rep number of random subsamplings
#' @param nseqs a value for the number of sequences to pick
#' @param model a character string specifying the evolutionary model to be used by \code{\link[ape]{dist.dna}}
#' @param all a logical indicating whether to use all codon positions; defaults to FALSE so only the third codon position is used.
#' @param parallel a logical indicating whether to do the random subsampling on a sequential way or use the \code{\href{https://github.com/tudo-r/BatchJobs}{BatchJobs}}
#' framework to distribute the random subsamplings on different cores or in computer cluster
#' @param reg_id Name of registry. Displayed e.g. in mails or in cluster queue
#' @param reg_dir Path where files regarding the registry / jobs should be saved
#' @param conf_file Location of the configuration file to load
#' @details You can calculate multiple saturation plots and its associated statistics. The different
#' random subsamplings can be easily distributed over multiple cores or in a computer cluster.
#'
#' @return An object of class \dQuote{oligodiag} is a list containing at least the following components:
#' \describe{
#' \item{plot}{a ggplot object containing the saturation plots of each random sampling}
#' \item{seed}{the seeds used for picking the random sequences}
#' \item{aln}{a matrix of the random selected sequences stored in binary format for each random subsampling}
#' \item{combined_stats}{mean and standard deviation of transitions and transversions for each random subsampling}
#' \item{saturation}{whether your alignment presents saturation for each random subsampling}
#' \item{raw}{raw results for each random subsampling}
#' }
#'
#'
#' @examples saturation_plots <- plot_saturation_n(aln, nseqs = 1000, rep = 100, parallel = F)
#' @examples saturation_plots <- plot_saturation_n(aln, nseqs = 1000, rep = 100, parallel = T, reg_id = "test_id", reg_dir = "test-dir", conf_file = ".BatchJobs.R")
#' @export
plot_saturation_n<-function(aln = aln, rep = 100, nseqs = 1000, model = "K80", parallel = FALSE,
all = FALSE, reg_id = NULL, reg_dir = NULL, conf_file = NULL,
job_res = list(), ...){
results <- list()
if (parallel){
if ((is.null(conf_file)) || (is.null(reg_dir)) || (is.null(reg_id))){
stop("Please add the configuration file and registry values requiered for BatchJobs" , call. = FALSE)
}
function_args <- list()
fun <- plot_saturation
function_args <- list(aln = aln, nseqs = nseqs, model = model, all = all, verbose = FALSE, seed = 0, rsamp = TRUE)
iterations <- 1:rep
batch_function <- function(X) {
tmp <- do.call(fun, function_args)
}
BatchJobs::loadConfig(conf_file)
reg <- BatchJobs::makeRegistry(id=reg_id, file.dir=reg_dir)
id <- BatchJobs::batchMap(reg, batch_function, iterations)
plot_submission <- BatchJobs::submitJobs(reg, resources=job_res)
plot_run <- BatchJobs::waitForJobs(reg, id)
plot_runs <- reduceResultsList(reg)
removeRegistry(reg, ask = "no")
if (!plot_run) {
stop('Error in batch jobs', call. = FALSE)
}
}else{
plot_runs <- plyr::llply(1:rep, plot_saturation, ..., aln = aln, nseqs = nseqs, model = model, all = all, verbose = FALSE,
seed = 0, rsamp = TRUE, .parallel = F, .progress = plyr::progress_text(width = 80))
}
results$combined_stats <- dplyr::rbind_all(lapply(plot_runs, function(x) dplyr::rbind_list(x[["stats"]])))
results$raw <- plot_runs
results$plot <- lapply(plot_runs, function(x) x[["plot"]])
results$aln <- lapply(plot_runs, function(x) x[["aln"]])
results$aln <- lapply(results$aln, function(x) {class(x)<-"DNAbin"; x})
results$seed <- lapply(plot_runs, function(x) x[["seed"]])
results$saturation <- lapply(plot_runs, function(x) x[["saturation"]])
results$aln_no_3rd <- remove_3rd_codon(aln)
class(results)<-"oligodiag"
return(results)
}
|
f24f5ecc6874b6ff73ba6d18dc670238af370aca
|
04f349102910e5052ea34d3e7744e4d79a2fbb4f
|
/R/ffa_test_modifier.R
|
7ef63555545164893776c435b7d26a3d8f6994f4
|
[
"MIT"
] |
permissive
|
scoultersdcoe/CNAIM
|
f0728b00f0d0628e554975c78d767ee2c472fb3b
|
5c77ce4c50ef92fd05b9bb44b33fdca18302d020
|
refs/heads/master
| 2023-08-23T22:54:59.450292
| 2021-03-12T15:52:54
| 2021-03-12T15:52:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,697
|
r
|
ffa_test_modifier.R
|
#' @importFrom magrittr %>%
#' @title Oil Test Modifier
#' @description This function calculates the FFA test modifier based on the
#' levels of furfuraldehyde in the transformer oil. This function applies for
#' 33/10kV, 66/10kV and 132kV transformers. See e.g. section 6.13 on page
#' 67 in CNAIM (2017).
#' @param furfuraldehyde Numeric. Refers to the furfuraldehyde level in the
#' transformer oil. furfuraldehyde levels are measured in ppm.
#' A setting of \code{"Default"} will result in the best possible result.
#' @return Data table.
#' @source DNO Common Network Asset Indices Methodology (CNAIM),
#' Health & Criticality - Version 1.1, 2017:
#' \url{https://www.ofgem.gov.uk/system/files/docs/2017/05/dno_common_network_asset_indices_methodology_v1.1.pdf}
#' @export
#' @examples
#' # FFA test modifier
#' ffa_test_modifier(furfuraldehyde = 50)
ffa_test_modifier <- function(furfuraldehyde = "Default") {
if (furfuraldehyde == "Default") furfuraldehyde <- -0.01
ffa_test_factor <-
gb_ref$ffa_test_factor
ffa_test_factor$Lower[1] <- -Inf
for (n in 1:nrow(ffa_test_factor)){
if (furfuraldehyde > as.numeric(ffa_test_factor$Lower[n]) &
furfuraldehyde <= as.numeric(ffa_test_factor$Upper[n])) {
ffa_test_factor <- ffa_test_factor$`FFA Test Factor`[n]
break
}
}
ffa_test_cap <- 10
ffa_test_collar <- ifelse(is.na(2.33 * furfuraldehyde^0.68), 0.5,
2.33 * furfuraldehyde^0.68)
ffa_test_collar <- ifelse(ffa_test_collar > 7, 7, ffa_test_collar)
ffa_test_mod <- data.frame(ffa_test_factor,
ffa_test_cap,
ffa_test_collar)
return(ffa_test_mod)}
|
d184d06a7c94ce247fe8721d59a9741038526679
|
866c42f91361653ecfa6eb91194bdbfb54651fdd
|
/src/10_merge.R
|
190ce78bee2f96cbeff7492595cf7030f8baf011
|
[] |
no_license
|
MJAlexander/working_group
|
a4add7cf44502ee4148e3754730bb8c1ac276b8c
|
42f9a618b47638c88730cd095336303e441b6148
|
refs/heads/main
| 2023-09-03T19:27:45.225797
| 2021-10-29T19:46:00
| 2021-10-29T19:46:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 218
|
r
|
10_merge.R
|
## Updated
# 2021/10/25
# 2021/10/27
# 2021/10/29
# data is from a google form
d <- read.csv("data/Formal demographers' group.csv")
d2 <- read.csv("data/Formal demographers' group 2.csv")
D <- d %>%
bind_rows(d2)
|
eb6d30469bc07aa7edf0b1e13279da730924a7a4
|
f75379b97150017bfa14b10ccbbc637f67d194e0
|
/main.r
|
a3e4966f7716c90a10ff4f4e57bb29c371546bfe
|
[] |
no_license
|
mark-me/R_time_series
|
639c8692e3ae631bfaf5ce7b3b9df69506c9d206
|
105f852d4cf9f6e651442ea8bc8a7394e638b2df
|
refs/heads/master
| 2020-03-09T23:02:38.811778
| 2018-07-04T11:40:49
| 2018-07-04T11:40:49
| 129,049,353
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 918
|
r
|
main.r
|
setwd("~/R scripts/R_time_series")
source("project.R")
open_project("R_time_series", "~/R scripts")
# Time-series packages ----
library(xts)
library(forecast)
library(tseries)
# Bike sharing data ----
url_bike_sharing <- "https://archive.ics.uci.edu/ml/machine-learning-databases/00275/Bike-Sharing-Dataset.zip"
download_extract(url_bike_sharing, dir_input)
tbl_bike_sharing <- read_csv(paste0(dir_input, "/day.csv"))
tbl_bike_sharing$Date = as.Date(tbl_bike_sharing$dteday)
# Plot bike sharing data
ggplot(tbl_bike_sharing, aes(Date, cnt)) +
geom_line(col = col_graydon[1]) +
scale_x_date('Months') +
scale_y_continuous(labels = format_number) +
ylab("Daily Bike Checkouts") +
xlab("") +
theme_graydon("grid")
# Create time-season
ts_bike_sharing <- ts(tbl_bike_sharing$cnt)
ts_bike_sharing_new = tsclean(ts_bike_sharing)
test <- cbind(ts_bike_sharing, ts_bike_sharing_new)
test[,1] - test[,2]
|
e31f0526e66ff85a728fa4e55bc6fd11a6297157
|
f372297085cae82b1e2ab447d682b9233ca1cfa3
|
/R/HW.R
|
566b2c05ee2b4046362655b9eb5a173bd9f03363
|
[] |
no_license
|
SC19035/SC19035
|
5a9f98e4171ca1920331f5f21343fea1e79a2d0d
|
99dcc0139f0427a1c9c83bada52bbbc8ece0ed5f
|
refs/heads/master
| 2020-12-03T15:41:30.700801
| 2020-01-02T14:28:06
| 2020-01-02T14:28:06
| 231,376,173
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,153
|
r
|
HW.R
|
#' @title Benchmark R and Rcpp functions.
#' @name benchmarks
#' @description Use R package \code{microbenchmark} to compare the performance of C functions.
#' @import microbenchmark
#' @importFrom Rcpp evalCpp
#' @importFrom stats runif rnorm
#' @useDynLib SC19035
#' @examples
#' \dontrun{
#' ts <- microbenchmark(
#' rwC = rwMetropolisC(0.5, x0, N=1000),
#' rwR = rw.Metropolis(0.5, x0, N=1000))
#' print(summary(ts)[, c(1,3,5,6)])
#'
#' }
NULL
#' @title A rw.Metropolis sampler using R
#' @description A rw.Metropolis sampler using R
#' @param N the number of samples
#' @importFrom stats runif
#' @importFrom stats rnorm
#' @param x0 the random numbers
#' @param sigma the sigma
#' @return a random sample of size \code{n}
#' @examples
#' \dontrun{
#' sigma<-c(0.1,0.2)
#' x0<-200
#' rw.Metropolis(sigma[1], x0, N=1000)
#' }
#' @export
rw.Metropolis <- function(sigma, x0, N=1000) {
x <- numeric(N)
x[1] <- x0
u <- runif(N)
k <- 0
for (i in 2:N) {
y <- rnorm(1, x[i-1], sigma)
if (u[i] <= exp(-((abs(y)) - (abs(x[i-1])))))
x[i] <- y
else {
x[i] <- x[i-1]
k <- k+1
}
}
return(list(x = x, k = k))
}
|
ff012acfad8c9f939e2a02097b6eed1ec6151736
|
2cf80a17b74ef11b2ab2bde11d24330b52a99df5
|
/discussion_data_oct23_2.R
|
29e1c18c71f1cf01531fd55e6830009284c6bb63
|
[] |
no_license
|
maggierui/PiaazaData
|
7da8211c599e9117318155e4248e480da4449655
|
25ab0c5b6e07d4f4f23af94a3e408988f548e5dc
|
refs/heads/master
| 2021-05-04T00:27:31.521344
| 2016-11-01T02:48:32
| 2016-11-01T02:48:32
| 71,852,560
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,973
|
r
|
discussion_data_oct23_2.R
|
library(dplyr)
usersJson <- fromJSON("users.json")
class_contentJson<-fromJSON("class_content.json")
discussion_data<-data.frame(post_id=character(0), author=character(0), type=character(0), when=character(0),subject=character(0), content=character(0), stringsAsFactors=FALSE)
for(i in 1:nrow(class_contentJson)){
paste("i=",i)
post_record<-class_contentJson[i,]
post_log<-post_record$change_log
post_replies<-post_record$children
post_id<-post_log[[1]][[1]][[1]]
for(l in 1:length(post_record$history[[1]][[1]])){
author<-ifelse(is.null(post_record$history[[1]][[1]][[l]]),NA,post_record$history[[1]][[1]][[l]])
subject<-ifelse(is.null(post_record$history[[1]][[4]][[l]]),NA,post_record$history[[1]][[4]][[l]])
content<-ifelse(is.null(post_record$history[[1]][[3]][[l]]),NA,post_record$history[[1]][[3]][[l]])
type<-ifelse(is.null(post_record$type),NA,post_record$type)
when<-ifelse(is.null(post_log[[1]][[5]][[1]]),NA,post_log[[1]][[5]][[1]])
ob_list<-as.list(c(post_id,author,type,when,subject,content))
discussion_data<-rbind(discussion_data,setNames(ob_list,names(discussion_data)))
discussion_data[,1:6]<-apply(discussion_data[,1:6],2,as.character)
}
l<-1
if(length(post_replies[[1]])!=0){
for(j in 1:nrow(post_replies[[1]])){
paste("j is", j)
author<-ifelse(is.null(post_replies[[1]][[8]][[j]]),NA,post_replies[[1]][[8]][[j]])
post_replies_children<-post_replies[[1]][[2]][[j]]
content<-ifelse(is.null(post_replies[[1]]$subject[[j]]),NA,post_replies[[1]]$subject[[j]])
when<-ifelse(is.null(post_replies[[1]]$updated[[j]]),NA,post_replies[[1]]$updated[[j]])
type<-ifelse(is.null(post_replies[[1]]$type[[j]]),NA,post_replies[[1]]$type[[j]])
ob_list<-as.list(c(post_id,author,type,when,subject,content))
discussion_data<-rbind(discussion_data,setNames(ob_list,names(discussion_data)))
discussion_data[,1:6]<-apply(discussion_data[,1:6],2,as.character)
if(length(post_replies_children)!=0){
for (k in 1:nrow(post_replies_children)){
author<-ifelse(is.null(post_replies_children$uid[[k]]),NA,post_replies_children$uid[[k]])
content<-ifelse(is.null(post_replies_children$subject[[k]]),NA,post_replies_children$subject[[k]])
when<-ifelse(is.null(post_replies_children$updated[[k]]),NA,post_replies_children$updated[[k]])
type<-ifelse(is.null(post_replies_children$type[[k]]),NA,post_replies_children$type[[k]])
##discussion_data[nrow(discussion_data)+1]<-c(post_id,author,subject,content,type,when)
ob_list<-as.list(c(post_id,author,type,when,subject,content))
ob_list<-lapply(ob_list, function(x) ifelse(x == "NULL", NA, x))
discussion_data<-rbind(discussion_data,setNames(ob_list,names(discussion_data)))
discussion_data[,1:6]<-apply(discussion_data[,1:6],2,as.character)
}
k<-1
}
}
j<-1
}
}
write.csv(discussion_data, file = "Discussion_Data_Oct23.csv")
|
2cbbe84cb94c3d974a717fd3020a0de5d9550145
|
c09ddeaa31b0ef553d82bc617181784e80b03d71
|
/master_scripts_for_masters/coexpression_method_comparison_master.R
|
7805baea9a7e53194337cdd50fdd40150fd733de
|
[] |
no_license
|
alecstansell/code-masters
|
d4bafa7836ffe02fc9e9b02e94bce9c4e6cf92a1
|
e9df78f782caf0bb3eefb86c6197ecd1a06345aa
|
refs/heads/master
| 2022-09-18T09:43:10.488042
| 2020-05-28T10:50:32
| 2020-05-28T10:50:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,026
|
r
|
coexpression_method_comparison_master.R
|
##############################################################################################################################################################################################
#Master Clustering Comparison Experiment Using Cluster Profiler GO Enrichment
##############################################################################################################################################################################################
##############################################################################################################################################################################################
#Load Packages and script dirs
##############################################################################################################################################################################################
#List packages
packages <- c("tidyverse", "clusterProfiler", "org.Hs.eg.db", "biomaRt", "tidyr", "forcats", "ggplot2", "praise"
)
#Load packages
lapply(packages, require, character.only = TRUE)
#Directory where function scripts are
scriptdir <- "~/functions"
functions <- c('getmodule.R','get_entrez_function.R','clusterit.R', 'mixedtofloat.R')
for (i in 1:length(functions)){
source(file.path(scriptdir, functions[i]), echo = TRUE)
}
#Load SOM clustering function (Returns sig list of genes with comparison)
source('~/clustering/selforganisingmap/subset_self_organising_map.R')
#Load if you want to start from adjusted data
setwd("~/coexpression/final_coexpression/data")
load("final_heat_normalised.RData")
counts_input <- normalised_clean$variance_stabilised_counts
#####################################################################################################################################
#Run Self Organising map and return coexpression module groups
#####################################################################################################################################
#Run Self Organising Map for each comparison to return
EMvCM_SOM <- som_return_cluster(user_interface = FALSE, cluster = 5 , rlen = 2000)
NvsCM_SOM <- som_return_cluster(user_interface = FALSE, cluster = 5, rlen = 2000)
NvsEM_SOM <- som_return_cluster(user_interface = FALSE, cluster = 5, rlen = 2000)
#Create list of above results
SOMresults <- list(EMvCM_SOM, NvsCM_SOM, NvsEM_SOM)
setwd('~/clustering/selforganisingmap/data')
list.files()
load("SOM_pairwise_results.RData")
#####################################################################################################################################
#Kmeans analysis
#####################################################################################################################################
#Returns plot of kmeans and alist with the modules
source('~/clustering/kmeans/kmeans_all.R')
setwd('~/clustering/kmeans/data')
load_from_file = TRUE
if(load_from_file){
#load R data
load("kmeans_input.RData")
#Break up list
EMvCM_kmeans <- kmeans_mean_input$EMvsCM
NvsCM_kmeans <- kmeans_mean_input$NvsCM
NvsEM_kmeans <- kmeans_mean_input$NvsEM
}else{
#Load kmeans input function which takes all relevant genes for a compariosn and averages them
source('~/clustering/kmeans/kmeans_input.R')
#Create average foiles for kmeans input
EMvCM_kmeans <- kmeans_input(counts_input)
NvsCM_kmeans <- kmeans_input(counts_input)
NvsEM_kmeans <- kmeans_input(counts_input)
#Save intermediary file
kmeans_mean_input <- list(EMvCM_kmeans, NvsCM_kmeans, NvsEM_kmeans)
names(kmeans_mean_input) <- c("EMvsCM", "NvsCM", "NvsEM")
save(kmeans_mean_input, file = "kmeans_input.RData")
}
nrow(NvsEM_kmeans)
load_from_file <- TRUE
if(load_from_file){
setwd('~/clustering/kmeans/data')
load("kmeans_module_results.RData")
EMvCMkmeans_mod <- kmeans_mod_res$EMvCMkmeans_mod
NvsCM_kmeans_mod <- kmeans_mod_res$NvsCM_kmeans_mod
NvsEM_kmeans_mod <- kmeans_mod_res$NvsEM_kmeans_mod
}else{
#RUn kmeans clustering and return list of genes
EMvCMkmeans_mod <- kmeansit(EMvCM_kmeans, "EMvCM_kmeans")
length(NvsCM_kmeans_mod$log2FoldChange)
NvsCM_kmeans_mod <- kmeansit(NvsCM_kmeans, "NvsCM_kmeans")
#NvsEM_kmeans_mod <- kmeansit(NvsEM_kmeans, "NvsEM_kmeans")
NvsEM_kmeans_mod <- kmeansit(NvsEM_kmeans, "NvsEM_kmeans")
setwd('~/clustering/kmeans/data')
kmeans_mod_res <- list(EMvCMkmeans_mod, NvsCM_kmeans_mod, NvsEM_kmeans_mod)
names(kmeans_mod_res) <- c("EMvCMkmeans_mod", "NvsCM_kmeans_mod", "NvsEM_kmeans_mod")
save(kmeans_mod_res, file = "kmeans_module_results.RData")
}
#####################################################################################################################################
#Loop through all SOM Results and get cluster profiler gene modules
#####################################################################################################################################
#na.ommit(unique(gene_list$module)[1:3])
names(kmeans_mod_res)
SOMresults <- kmeans_mod_res
setwd("~/coexpression/final_coexpression/results/30157_genes/wgcna_lists")
EMCMwgcna <- read.csv("EMvsCM_coexpressed_mod_nums_genes.csv", stringsAsFactors = TRUE)
EMCMwgcna$modulenumber <- EMCMwgcna$WGCNA_module
NCMwgcna <- read.csv("NvsCM_coexpressed_mod_nums_genes.csv", stringsAsFactors = TRUE)
NCMwgcna$modulenumber <- NCMwgcna$WGCNA_module
NEMwgcna <- read.csv("NvsEM_coexpressed_mod_nums_genes.csv", stringsAsFactors = TRUE)
NEMwgcna$modulenumber <- NEMwgcna$WGCNA_module
head(NEMwgcna)
wgcna_all <- list(EMCMwgcna, NCMwgcna, NEMwgcna)
names(wgcna_all) <- c("EMCMwgcna", "NCMwgcna", "NEMwgcna")
save(wgcna_all, file = "wgcna_listsall.RData")
load("wgcna_listsall.RData")
gene_list <- wgcna_all$EMCMwgcna
##################################################################################################################################
##################################################################################################################################
'Loop for cluster Profiler using an object produced with all 3 comparisons WGCNA'
##################################################################################################################################
#####################################################################################################################################
#Loop for clusterprof for a particular comp
list_GO <- NULL
i <-
library(Homo.sapiens)
for(i in 1:3){
#Set gene list input
gene_list <- final_wgcna_allremoved[[i]]
#Remove na values
range <- na.omit(unique(gene_list$module))
#Ensure correct class type for lists (ie not intergers - must be characters)
outdir <- paste("~/coexpression/final_coexpression/results/30157_genes/cluster_profiler/all_methods/", names(final_wgcna_allremoved[i]), sep = "")
setwd(outdir)
for(module in range[1:length(range)]){
genes_module <- getmodule(gene_list, module)
#genes_entrez <- getentrez(genes_module$ENSEMBL)
genes_entrez <- select(Homo.sapiens, keytype='ENSEMBL', keys=as.character(genes),
columns=c('GENENAME', 'SYMBOL',"ENTREZID"))
# Get rid of duplicated entries with descriptions
genes_entrez <- gene_info[!duplicated(gene_info$ENSEMBL),]
#Inputs required list of entrez genes, universe and type of ontology required (BP, CC, MF, ALL) then writes to file each module GO terms in current working directory.
GO_termobj <- clusterit(genes_GO = genes_entrez$ENTREZID, universe_entrez = universe_entrez, ont = 'all')
list_GO <- list(list_GO, GO_termobj )
print(praise("${Exclamation}! ${adjective}!"))
}
}
##################################################################################################################################
##################################################################################################################################
'Using just a list of genes inputted from WGCNA modules'
##################################################################################################################################
##################################################################################################################################
#Loop for clusterprof for a particular comp
list_GO <- NULL
library(Homo.sapiens)
#Set gene list input
gene_list <- EMvsN_deseq2
#Remove na values
range <- na.omit(unique(gene_list$module))
#Ensure correct class type for lists (ie not intergers - must be characters)
# outdir <- paste("~/coexpression/final_coexpression/results/30157_genes/NvsEM/", names(final_wgcna_allremoved[3]), sep = "")
# setwd(outdir)
# getwd()
for(module in range[1:length(range)]){
genes_module <- getmodule(gene_list, module)
#genes_entrez <- getentrez(genes_module$ENSEMBL)
genes_entrez <- select(Homo.sapiens, keytype='ENSEMBL', keys=as.character(genes_module$ENSEMBL),
columns=c('GENENAME', 'SYMBOL',"ENTREZID"))
# Get rid of duplicated entries with descriptions
genes_entrez <- genes_entrez[!duplicated(genes_entrez$ENSEMBL),]
#Inputs required list of entrez genes, universe and type of ontology required (BP, CC, MF, ALL) then writes to file each module GO terms in current working directory.
GO_termobj <- clusterit(genes_GO = genes_entrez$ENTREZID, universe_entrez = universe_entrez, ont = 'all')
#list_GO <- list(list_GO, GO_termobj )
print(paste(praise("${Exclamation}! ${adjective}!"), module, "is done"))
}
##################################################################################################################################
##################################################################################################################################
'Amalgamate all GO terms'
##################################################################################################################################
#####################################################################################################################################
source('~/functions/version_nums_data_type_conversions/almagamte_go_terms.R')
#Run through all comparisons
#name <- "EMvsCM_wgcna_allmethods"
#name <-"NvsCM_wgcna_allmethods"
name <-"NvsEM_wgcna_allmethods"
name <-"module"
file_location <- paste("~/coexpression/final_coexpression/results/30157_genes/NvsEM/cluster_profiler/", name, sep = "")
out_dir <- "~/coexpression/final_coexpression/results/30157_genes/NvsEM"
almalgamate_GO(file_location, outdir)
#####################################################################################################################################
#Bar Graph to compare GO terms
#####################################################################################################################################
require(ggpubr)
#Containing all three methods for comparison with all GO terms across all three comparisons
all <- rbind(km_all, SOM_all, wgcna_all)
#Make gene ratios percent values
all$GeneRatio <- all$GeneRatio*100
# Compare methods
ggboxplot(all, x = "Type", y = "GeneRatio", color = "type",
add = "jitter", legend = "none") +
rotate_x_text(angle = 45)+
geom_hline(yintercept = mean(all$GeneRatio), linetype = 2)+ # Add horizontal line at base mean
stat_compare_means(method = "anova", label.y = 60)+ # Add global annova p-value
stat_compare_means(label = "p.signif", method = "t.test",
ref.group = ".all.") # Pairwise comparison against all
#####################################################################################################################################
#Code to see number of modules and frequency of each module
#####################################################################################################################################
#View table of frequencies of the modules
as.data.frame(table(gene_list$module))
##Write to file
write.csv(as.data.frame(table(gene_list$module)), "frequency_deseq_modules.csv")
|
2b623aec089a90c10f8bd841477838217987ef96
|
c87ed10b9b17e2695a5ae1ca61e9d45d510ef8b1
|
/R/modelEvalAUC.R
|
b7414336c372ff651d1751671016f7da103e164d
|
[] |
no_license
|
DrRoad/mlp-automl
|
3ffbaadc738d08e453719895b096e5838646235e
|
2026ee0c7e73c905fdca60bc984d412c288650a1
|
refs/heads/master
| 2020-04-21T15:42:47.578159
| 2017-12-14T18:36:49
| 2017-12-14T18:36:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 176
|
r
|
modelEvalAUC.R
|
modelEvalAUC <- function(actual, predicted)
{
# Functiont to calculate AUC
library(pROC)
roc_obj <- roc(actual, predicted)
score <- auc(roc_obj)
return(score)
}
|
4b8aeb36bd81a7934a963b076f9decad5bbb0906
|
7a5810ea96d123ed70891a64a39104406a1e8429
|
/191215_some_multivariate_TF_update_revision.R
|
80377cf5591a743b4b381a2704015a1aaff882b5
|
[] |
no_license
|
wesleylcai/bmcmedgenomics2020_metastasis
|
24ee04028028bcbb292f69f6cee42f8b04b4281a
|
16c9a013567a08c242e2c18ee58e57bf5e4235b9
|
refs/heads/master
| 2020-11-24T15:13:43.451580
| 2020-08-18T11:40:32
| 2020-08-18T11:40:32
| 228,210,666
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,882
|
r
|
191215_some_multivariate_TF_update_revision.R
|
# Last Updated:
# Author: Wesley Cai
# Purpose:
library(data.table)
library(ggplot2)
library(jetset)
library(reshape2)
library(survminer)
library(survival)
library(openxlsx)
library(VennDiagram)
# Get the jetset best probe per gene
jetset = function(symbol){
return(as.character(jmap(chip = "hgu133a", symbol = symbol)))
}
# load gsea
# source("/Users/wcai/Google_Drive/_Lab/Data/Bioinformatics/resources/gsea/gmt/load_gmt.R")
# load for revamp
# source("/Users/wcai/Google_Drive/_Lab/Data/Bioinformatics/190720_revamp/analysis/190720_load_files.R")
mainwd <- "/Users/wcai/Google_Drive/_Lab/Data/Bioinformatics/190720_revamp/analysis/"
inputfolder <- "input/"
outputfolder <- "output/tf_multivariate_update_revision"
dir.create(file.path(mainwd, inputfolder), recursive = TRUE, showWarnings = FALSE)
dir.create(file.path(mainwd, outputfolder), recursive = TRUE, showWarnings = FALSE)
setwd(file.path(mainwd, outputfolder))
lines.dict <- data.table(line.lower = c("bom", "brm", "lm", "common"),
line.fixed = c("BoM", "BrM2", "LM2", "Met"),
tissue = c("Bone", "Brain", "Lung", "Bone|Brain|Lung"))
#load(file = "some.multivariate.res.RData")
load("/Users/wcai/Google_Drive/_Lab/Data/GEO_datasets/metastasis_patient_datasets/morales/metDataset.Rdata")
load("/Users/wcai/Google_Drive/_Lab/Data/Bioinformatics/resources/cgdsr/new.total.genes.df.log2.RData")
load("/Users/wcai/Google_Drive/_Lab/Data/Bioinformatics/190720_revamp/analysis/hichip/output/motif_allinter_cluster_statistics/distal.prom.motif.res.RData")
metDataset <- t(metDataset)#[1:10,1:10])
metDataset <- as.data.table(metDataset, keep.rownames = "patient")
clinical <- fread("/Users/wcai/Google_Drive/_Lab/Data/GEO_datasets/metastasis_patient_datasets/morales/annotations/perou_annotation.txt", sep = "\t", header = TRUE, check.names = TRUE)
clinical <- clinical[Cohort != "NKI295",c(.(patient = GEO.array), .SD), .SDcols = c(grep("", colnames(clinical)))]
# Keep some variables
clinical$Subtype
clinical$ER.clinical <- factor(clinical$ER.clinical, c("0", "1"))
clinical$PR.clinical <- factor(clinical$PR.clinical, c("0", "1"))
clinical$Her2.clinical <- factor(clinical$Her2.clinical, c("0", "1"))
clinical$Age
clinical$LN.status <- factor(clinical$LN.status, c("0", "1"))
#clinical$Dscore # remove because custom score
#clinical$Proliferation # remove because custom score
clinical$T.Stage <- factor(clinical$T.Stage, c("1", "2", "3", "4"))
clinical$Differention <- factor(clinical$Differention, c("1", "2", "3", "4"))
clinical$Chemo <- factor(clinical$Chemo, c("0", "1"))
#clinical$Hormone <- factor(clinical$Hormone, c("0", "1")) # remove because already have hormone info
# Only keep some variables
covariates <- c("Subtype", "ER.clinical", "PR.clinical", "Her2.clinical",
"Age", "T.Stage", "Differention", "Chemo")
#covariates <- c("Subtype", "Age", "T.Stage", "Differention", "Chemo")
clinical.final <- clinical[complete.cases(clinical[,.SD,.SDcols = covariates]),.SD,
.SDcols = c("patient", "MFS", grep("relapse", colnames(clinical), value = TRUE),
covariates)]
nrow(clinical.final)
colnames(clinical.final)[1] <- "patient"
i <- "TFAP2C"
relapse.type <- "Lung.relapse"
final.list <- list()
no_probe <- c()
distal.prom.motif.res <- as.data.table(distal.prom.motif.res)
goi <- distal.prom.motif.res$hgnc
nrow(clinical.final[Subtype == "Basal"])
# For subtype expression specificity
load("/Users/wcai/Google_Drive/_Lab/Data/Bioinformatics/190720_revamp/analysis/output/tf_subtype_correlation/tfs.subtype.specificity.RData")
# Perform all multivariate cox
relapse.type <- "Lung.relapse"
#### Test RUNX2 and RARA ####
met.subset <- metDataset[,.(patient, RUNX2 = get(jetset("RUNX2")),
RARA = get(jetset("RARA")),
ETS2 = get(jetset("ETS2")))]
cor.test(met.subset[,get("RUNX2")], met.subset[,get("RARA")])
cor.test(met.subset[,get("RUNX2")], met.subset[,get("ETS2")])
met.subset <- merge(met.subset, clinical.final, by = "patient")
ggplot(met.subset[Subtype == "Basal"], aes(RUNX2, RARA, color = Subtype)) +
geom_point() +
geom_smooth(method = "lm") +
theme_bw()
#metDataset[,.SD,.SDcols = 1]
#### Test RUNX2 and RARA ####
for(relapse.type in c("Lung.relapse", "Brain.relapse", "Any.relapse")){
relapse.list <- list()
message(relapse.type)
i <- "TFAP2C"
for(i in goi){
if(is.na(jetset(i))){
no_probe <- c(no_probe, i)
} else {
dat <- merge(clinical.final, metDataset[,.SD, .SDcols = c("patient", jetset(i))], by = "patient", all.x = TRUE)
colnames(dat)[ncol(dat)] <- i
dat[, quant := cut(get(i), quantile(get(i), c(0,1/2,1)), labels = c("Low", "High"), include.lowest = TRUE)]
res.cox <- coxph(as.formula(paste0("Surv(MFS,", relapse.type, ") ~ ","quant + ", paste0(covariates, collapse = "+"))), data = dat)
res.cox.sum <- summary(res.cox)
res.cox.sum.df <- signif(res.cox.sum$coefficients[,c(2,5)],3)
colnames(res.cox.sum.df) <- c("HR", "pval")
HR <- t(res.cox.sum.df)["HR",]
names(HR) <- paste0(names(HR), ".HR")
pval <- t(res.cox.sum.df)["pval",]
names(pval) <- paste0(names(pval), ".pval")
logrank <- survdiff(as.formula(paste0("Surv(MFS,", relapse.type, ") ~ ","quant")), data = dat)
logrank.p <- 1 - pchisq(logrank$chisq, 1)
logrank.HR = (logrank$obs[2]/logrank$exp[2])/(logrank$obs[1]/logrank$exp[1])
names(logrank.p) <- "logrank.pval"
names(logrank.HR) <- "logrank.HR"
idx <- order(c(seq_along(pval), seq_along(HR)))
relapse.list[[i]] <- c(signif(logrank.p,3), signif(logrank.HR,3), (c(pval,HR))[idx])
#relapse.list[[i]] <- c(signif(logrank.p,3), signif(logrank.HR,3), pval, HR, signif(res.cox.sum$waldtest[3],3))
}
}
final.list[[relapse.type]] <- data.table(do.call(rbind, relapse.list), keep.rownames = "symbol")
}
# Adjust p.value and add column for Motif+HR concordance+Subtype specificity
final.list.adjust <- final.list
for(relapse.type in c("Lung.relapse", "Brain.relapse", "Any.relapse")){
tmp <- final.list[[relapse.type]]
# for(pval in grep("\\.pval", colnames(tmp), value = TRUE)){
# tmp[, eval(paste0(pval, ".adj")) := p.adjust(get(pval), method = "BH")]
# }
tmp <- merge(tmp, distal.prom.motif.res, by.x = "symbol", by.y = "hgnc", all.x = TRUE)
tmp <- merge(tmp, tfs.subtype.specificity[,.(symbol, enriched.subtype = subtype, depleted.subtype = d.subtype)], by = "symbol", all.x = TRUE)
if(relapse.type == "Lung.relapse"){
tmp[quantHigh.HR < 1 & logrank.HR < 1 & !is.na(lm.down), lm.concordant := TRUE]
tmp[quantHigh.HR > 1 & logrank.HR > 1 & !is.na(lm.up), lm.concordant := TRUE]
} else if(relapse.type == "Brain.relapse"){
tmp[quantHigh.HR < 1 & logrank.HR < 1 & !is.na(brm.down), brm.concordant := TRUE]
tmp[quantHigh.HR > 1 & logrank.HR > 1 & !is.na(brm.up), brm.concordant := TRUE]
}
final.list.adjust[[relapse.type]] <- tmp
}
test <- final.list.adjust[["Lung.relapse"]]
fwrite(final.list.adjust[["Lung.relapse"]], "RSAT.hichip.some.lung.multivariate.padj.txt", sep = "\t")
fwrite(final.list.adjust[["Brain.relapse"]], "RSAT.hichip.some.brain.multivariate.padj.txt", sep = "\t")
fwrite(final.list.adjust[["Any.relapse"]], "RSAT.hichip.some.any.multivariate.padj.txt", sep = "\t")
wb <- createWorkbook("RSAT.res")
for(type in names(final.list.adjust)){
tmp <- final.list.adjust[[type]]
addWorksheet(wb, sheetName = type, zoom = 150)
writeData(wb, type, tmp)
}
saveWorkbook(wb, paste0("RSAT.hichip.some.multivariate.xlsx"), overwrite = TRUE)
RSAT.hichip.some.multivariate.res <- final.list.adjust
save(RSAT.hichip.some.multivariate.res, file = "RSAT.hichip.some.multivariate.res.RData")
load("/Users/wcai/Google_Drive/_Lab/Data/Bioinformatics/190720_revamp/analysis/output/tf_multivariate_update_revision/RSAT.hichip.some.multivariate.res.RData")
#### Get Overlapping and Specific Survival, sig by either cox or log-rank ####
lung.concordant.sig <- RSAT.hichip.some.multivariate.res[["Lung.relapse"]][lm.concordant == TRUE & (quantHigh.pval < 0.05 | logrank.pval < 0.05),.(symbol, logrank.pval, logrank.HR, quantHigh.pval, quantHigh.HR, class, brm.down, brm.up, lm.down, lm.up, ensembl, brm_l2fc, brm_padj, lm_l2fc, lm_padj, lm.concordant)]
brain.concordant.sig <- RSAT.hichip.some.multivariate.res[["Brain.relapse"]][brm.concordant == TRUE & (quantHigh.pval < 0.05 | logrank.pval < 0.05),.(symbol, logrank.pval, logrank.HR, quantHigh.pval, quantHigh.HR, class, brm.down, brm.up, lm.down, lm.up, ensembl, brm_l2fc, brm_padj, lm_l2fc, lm_padj, brm.concordant)]
both.concordant.sig <- lung.concordant.sig[symbol %in% intersect(lung.concordant.sig$symbol, brain.concordant.sig$symbol),.(symbol, class, brm.down, brm.up, lm.down, lm.up, ensembl, brm_l2fc, brm_padj, lm_l2fc, lm_padj)]
wb <- createWorkbook("RSAT.concordant")
addWorksheet(wb, sheetName = "lung.concordant", zoom = 150)
writeData(wb, "lung.concordant", lung.concordant.sig)
addWorksheet(wb, sheetName = "brain.concordant", zoom = 150)
writeData(wb, "brain.concordant", brain.concordant.sig)
addWorksheet(wb, sheetName = "both.concordant", zoom = 150)
writeData(wb, "both.concordant", both.concordant.sig)
saveWorkbook(wb, paste0("RSAT.concordant.xlsx"), overwrite = TRUE)
fwrite(lung.concordant.sig, "lung.concordant.sig.txt", sep = "\t")
fwrite(brain.concordant.sig, "brain.concordant.sig.txt", sep = "\t")
fwrite(both.concordant.sig, "both.concordant.sig.txt", sep = "\t")
#### Get Overlapping and Specific Survival, sig by either cox or log-rank ####
pdf(paste0("survival.venn.pdf"), width = 2, height = 2)
draw.pairwise.venn(area1 = length(lung.concordant.sig$symbol), area2 = length(brain.concordant.sig$symbol), cross.area = length(both.concordant.sig$symbol),
category = c("", ""))
dev.off()
|
f28ca3e587464a518e57c5f5865fc21faa49fa28
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/B_analysts_sources_github/diegovalle/sinaloa-discrepancy/tests.R
|
81178adabc40715725c7bf184f3afd44a4042e5b
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 701
|
r
|
tests.R
|
#Test the function ICDSeq
expect_that(ICDSeq(start = "A1", end = "A3"), matches(c("A01", "A02", "A03")))
expect_that(ICDSeq(start = "Z1", end = "Z6"), matches(c("Z01", "Z02", "Z03",
"Z04", "Z05", "Z06")))
#Test that the mortality database is coded correctly
expect_that(deaths[deaths$CAUSADEF %in% ICDSeq("W25", "W29"),]$CAUSE,
matches("Cut/pierce"))
expect_that(deaths[deaths$CAUSADEF %in% ICDSeq("W32", "W34"),]$CAUSE,
matches("Firearm"))
expect_that(deaths[deaths$CAUSADEF %in% "X59",]$CAUSE,
matches("Unspecified"))
expect_that(deaths[deaths$CAUSADEF %in% ICDSeq("W65", "W74"),]$CAUSE,
matches("Drowning"))
|
497c5801da4382e979dbfbe38a6bcfc67bcd2c18
|
56868eb5bc5890adf9624d7d3b44083839fe812d
|
/Week08-FoodCarbonFootprint/foodCarbonFootprint.R
|
caa716bd9c514668e57906988e98c5d13fce3932
|
[] |
no_license
|
riveraderrick5/tidytuesday
|
371cc0fab8721c0bb27437eb20e158255c876b66
|
020ff375aafcd24ea6688031d0a7735b9d00ede1
|
refs/heads/master
| 2022-06-29T00:36:41.701816
| 2020-05-12T19:32:37
| 2020-05-12T19:32:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,764
|
r
|
foodCarbonFootprint.R
|
# Read in data --------------------------------------------------------------------------------
food_consumption <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-02-18/food_consumption.csv')
# Prep for plot -------------------------------------------------------------------------------
#libraries
library(tidyverse)
library(rnaturalearth)
#What industry emits the most CO2?
food_consumption %>%
group_by(food_category) %>%
summarise(mean = mean(co2_emmission), n = n())
##Beef, no surprise there
#What country has the most CO2 emissions for beef?
food_consumption %>%
filter(food_category == "Beef") %>%
arrange(desc(co2_emmission))
##Argentina
#Get world variable
world <- ne_countries(scale = "medium", returnclass = "sf")
#Extract beef for plot
beef <- food_consumption %>%
filter(food_category == "Beef") %>%
mutate(country.corr = str_replace(country,"USA","United States"))
worldBeef <- left_join(world,beef,by=c('name'='country.corr'))
#Remove Antartica
worldBeef<-filter(worldBeef,geounit != "Antarctica")
# Plot ----------------------------------------------------------------------------------------
beefPlot <- ggplot(data = worldBeef) +
geom_sf(aes(fill=co2_emmission), colour = NA) +
scale_fill_viridis_c("Emissions \n(Kg CO2/person/yr)", direction = -1) +
theme_minimal() +
theme(plot.title=element_text(hjust = .5, color = "black",face="bold",size=13),
axis.text.x=element_text(hjust = .5, color = "black",face="bold",size=10),
legend.title=element_text(size=10)) +
labs(title = (''~CO[2]~' emissions from beef consumption'))
#Save out plot
png('beefPlot.png', width=6, height=3, units = 'in',res=150)
beefPlot
dev.off()
|
f92d60a6d3bf36b201e5cfeafd8ab32bfb459377
|
d9d50ea391c073d306f8a6fcc9d6d8bc7610322a
|
/v1.R
|
c50138feb9212e2c517fce2a5ce1c4d7aca63f42
|
[] |
no_license
|
pvlohith/Gun-laws
|
c1ac1480f394d24e7ec19e126b880b3c2d5c0fb4
|
4469a4de02f2426be364eccc585f76bda74d3100
|
refs/heads/master
| 2020-04-08T17:00:40.643236
| 2018-11-30T01:54:09
| 2018-11-30T01:54:09
| 159,546,278
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,344
|
r
|
v1.R
|
rm(list=ls(all=TRUE))
#install.packages("haven")
library(haven)
library(ggplot2)
library(dplyr)
library(plm)
guns = read_dta("guns.dta")
guns1 = read.csv("3years.csv")
colnames(guns1)[1]="year"
guns
summary(guns)
?ggplot
?dplyr
#data grouped by year
a1 <- guns %>% select(year,vio,mur,rob,incarc_rate,pb1064,pw1064,pm1029,pop,avginc,density,stateid,shall) %>% arrange(year)
#average of different variables grouped by year
a2 <- a1 %>% group_by(year) %>% summarise(avg_vio=mean(vio),avg_mur=mean(mur),avg_rob=mean(rob),avg_incarc_rate=mean(incarc_rate),avg_pb1064=mean(pb1064), avg_pw1064=mean(pw1064),avg_pm1029=mean(pm1029),avg_pop =mean(pop),avg_avginc=mean(avginc),avg_density=mean(density) ) #average mur,vio and rob by year
#average of different variables grouped by state
a2_1 <- a1 %>% group_by(stateid) %>% summarise(avg_vio=mean(vio),avg_mur=mean(mur),avg_rob=mean(rob),avg_incarc_rate=mean(incarc_rate),avg_pb1064=mean(pb1064), avg_pw1064=mean(pw1064),avg_pm1029=mean(pm1029),avg_pop =mean(pop),avg_avginc=mean(avginc),avg_density=mean(density) )
#data where shall=0
a21 <- a1 %>% filter(shall==0)
#data where shall=1
a22 <- a1 %>% filter(shall==1)
#average of variables where shall=0 grouped by year
a21 <- a21 %>% group_by(year) %>% summarise(avg_vio=mean(vio),avg_mur=mean(mur),avg_rob=mean(rob),avg_incarc_rate=mean(incarc_rate),avg_pb1064=mean(pb1064), avg_pw1064=mean(pw1064),avg_pm1029=mean(pm1029),avg_pop =mean(pop),avg_avginc=mean(avginc),avg_density=mean(density) ) #average mur,vio and rob by year before shall law
#average of varaibles where shall=1 grouped by state
a22 <- a22 %>% group_by(year) %>% summarise(avg_vio=mean(vio),avg_mur=mean(mur),avg_rob=mean(rob),avg_incarc_rate=mean(incarc_rate),avg_pb1064=mean(pb1064), avg_pw1064=mean(pw1064),avg_pm1029=mean(pm1029),avg_pop =mean(pop),avg_avginc=mean(avginc),avg_density=mean(density) ) #average mur,vio and rob by year after shall law
?plot
plot(a2)
plot(a21)
plot(a22)
ggplot(data=a2, aes(x=year,y=avg_mur))+ geom_smooth()+labs(title="Average murder rate across years")
ggplot(data=a2, aes(x=year,y=avg_vio))+ geom_smooth()+labs(title="Average vio rate across years")
ggplot(data=a2, aes(x=year,y=avg_rob))+ geom_smooth()+labs(title="Average rob rate across years")
ggplot(data=a21, aes(x=year,y=avg_mur))+ geom_smooth()+labs(title="Average murder rate across years before shall law")
ggplot(data=a21, aes(x=year,y=avg_vio))+ geom_smooth()+labs(title="Average vio rate across years before shall law")
ggplot(data=a21, aes(x=year,y=avg_rob))+ geom_smooth()+labs(title="Average rob rate across years before shall law")
ggplot(data=a22, aes(x=year,y=avg_mur))+ geom_smooth()+labs(title="Average murder rate across years after shall law")
ggplot(data=a22, aes(x=year,y=avg_vio))+ geom_smooth()+labs(title="Average vio rate across years after shall law")
ggplot(data=a22, aes(x=year,y=avg_rob))+ geom_smooth()+labs(title="Average rob rate across years after shall law")
ggplot(data=a2, aes(x=avg_pb1064,y=avg_mur))+ geom_smooth()+labs(title="plot of agv_pb1064 vs avg_murder rate across years")
ggplot(data=a2, aes(x=avg_pm1029,y=avg_mur))+ geom_smooth()+labs(title="plot of agv_pm1029 vs avg_murder rate across years")
ggplot(data=a2, aes(x=avg_avginc,y=avg_mur))+ geom_smooth()+labs(title="plot of agv_avginc vs avg_murder rate across years")
ggplot(data=a2, aes(x=avg_pop,y=avg_mur))+ geom_smooth()+labs(title="plot of agv_pop vs avg_murder rate across years")
ggplot(data=a2, aes(x=avg_avginc,y=avg_incarc_rate))+ geom_smooth()+labs(title="plot of agv_avginc vs avg_incarc rate across years")
ggplot(data=a2, aes(x=avg_avginc,y=avg_mur))+ geom_smooth()+labs(title="plot of agv_avginc vs avg_mur rate across years")
ggplot(data=a2, aes(x=avg_avginc,y=avg_vio))+ geom_smooth()+labs(title="plot of agv_avginc vs avg_vio rate across years")
ggplot(data=a2, aes(x=avg_avginc,y=avg_rob))+ geom_smooth()+labs(title="plot of agv_avginc vs avg_rob rate across years")
#why dip at 85 for every variable?
#guns 1 has data across 3 years: the year before shall law is introduced, the year it was introduced and the year after it was introduced grouped by states
head(guns1)
summary(guns1)
guns1_stateid <- unique(guns1$stateid)
par(mfrow=c(4,3))
#plot of mur rate vs year for 3 years across states
for(i in guns1_stateid[1:24]){
temp <- guns1 %>% filter(stateid==i)
print(plot(temp$year,temp$mur, main=i))
}
#plot of vio rate vs year for 3 years across states
for(i in guns1_stateid[1:24]){
temp <- guns1 %>% filter(stateid==i)
print(plot(temp$year,temp$vio, main=i))
}
#plot of rate vs year for 3 years across states
for(i in guns1_stateid[1:24]){rob
temp <- guns1 %>% filter(stateid==i)
print(plot(temp$year,temp$rob, main=i))
}
############################models###########################
guns <- plm.data(guns,index=c("stateid","year"))
model12 <- plm(log(vio)~shall,model = "pooling",data = guns)
summary(model12)
model1 <- plm(log(vio)~shall+log(pb1064)+pw1064+pm1029+pop+avginc+log(density),model="pooling",data=guns)
summary(model1)
summary(model1, vcov=vcovHC(model1, method = "white1"))
model2 <- plm(log(vio)~shall+log(pb1064)+pw1064+pm1029+pop+avginc+log(density),model="within",data=guns)
summary(model2)
summary(model2, vcov=vcovHC(model2, method = "white1"))
model3 <- plm(log(vio)~shall+log(pb1064)+pw1064+pm1029+pop+avginc+log(density),model="between",data=guns)
summary(model3)
phtest(model2,model3)
##### Robbery
model5 <- plm(log(rob)~shall+log(pb1064)+pw1064+pm1029+pop+avginc+log(density),model="pooling",data=guns)
summary(model5)
summary(model5, vcov=vcovHC(model5, method = "white1"))
model6 <- plm(log(rob)~shall+log(pb1064)+pw1064+pm1029+pop+avginc+log(density),model="within",data=guns)
summary(model6)
summary(model6, vcov=vcovHC(model6, method = "white1"))
##### Murder
model8 <- plm(log(mur)~shall+log(pb1064)+pw1064+pm1029+pop+avginc+log(density),model="pooling",data=guns)
summary(model8)
summary(model8, vcov=vcovHC(model8, method = "white1"))
model9 <- plm(log(mur)~shall+log(pb1064)+pw1064+pm1029+pop+avginc+log(density),model="within",data=guns)
summary(model9)
summary(model9, vcov=vcovHC(model9, method = "white1"))
|
cb92093012c3bbe0ad23ab552ad98613b9650919
|
988bfa5ec156be6a3886565e145acd6d315b4c8c
|
/simulations/DynamicPanel/calculate_RMSE.R
|
4a21beac8aaf0ae057d944d83797a7b7ac278296
|
[] |
no_license
|
fditraglia/gfic
|
9c60d53912d19c9d27091977589ba29fb8d44c4e
|
b3ed1138599788d460f6e190ad452cc0575dd0fc
|
refs/heads/master
| 2021-03-27T14:44:02.212209
| 2017-10-27T20:48:24
| 2017-10-27T20:48:24
| 14,728,556
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,593
|
r
|
calculate_RMSE.R
|
#-----------------------------------------------------------------#
# Parameter values and sample size
#-----------------------------------------------------------------#
N.SIMS <- 2000
GAMMA <- seq(from = 0, to = 0.2, by = 0.005)
R.X.V <- seq(from = 0, to = 0.2, by = 0.005)
#-----------------------------------------------------------------#
# Load simulation functions
#-----------------------------------------------------------------#
source('functions.R')
#-----------------------------------------------------------------#
# Run simulation and write results to file.
#-----------------------------------------------------------------#
set.seed(1445)
setwd('./results/')
#----------- N = 250, T = 4
rmse.T4.N250 <- RMSE.grid.FAST(g = GAMMA, r = R.X.V, N.sims = N.SIMS,
N.t = 4, N.i = 250)
write.csv(rmse.T4.N250, file = 'rmse_T4_N250.csv', row.names = FALSE)
#----------- N = 250, T = 5
rmse.T5.N250 <- RMSE.grid.FAST(g = GAMMA, r = R.X.V, N.sims = N.SIMS,
N.t = 5, N.i = 250)
write.csv(rmse.T5.N250, file = 'rmse_T5_N250.csv', row.names = FALSE)
#----------- N = 500, T = 4
rmse.T4.N500 <- RMSE.grid.FAST(g = GAMMA, r = R.X.V, N.sims = N.SIMS,
N.t = 4, N.i = 500)
write.csv(rmse.T4.N500, file = 'rmse_T4_N500.csv', row.names = FALSE)
#----------- N = 500, T = 5
rmse.T5.N500 <- RMSE.grid.FAST(g = GAMMA, r = R.X.V, N.sims = N.SIMS,
N.t = 5, N.i = 500)
write.csv(rmse.T5.N500, file = 'rmse_T5_N500.csv', row.names = FALSE)
# Clean up
rm(list = ls())
|
3fa97adfc6232112298e39b8217b8827fbda27ec
|
8f1be5778fce0622c8026aa219a995361f723c8c
|
/BBMRIomics/R/Genotype_Helpers.R
|
d450dcae3aedf45321cbbc2b84c263a9f7875267
|
[] |
no_license
|
bbmri-nl/BBMRIomics
|
aa5112e9f20aafa9ae506332ba0db556e544f7b7
|
1c7d9a6ef966365be2b95e2066e8f2fd2006c757
|
refs/heads/master
| 2023-05-31T13:21:37.130878
| 2023-04-28T17:29:31
| 2023-04-28T17:29:31
| 95,667,968
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,849
|
r
|
Genotype_Helpers.R
|
.scan2data.frame <- function(file, nrow, ncol, col.names=NULL, ...) {
op <- options("stringsAsFactors" = FALSE)
on.exit(op)
df <- scan(file, nmax = nrow, ...)
df <- lapply(df, type.convert, as.is=TRUE)
attr(df, "row.names") <- .set_row_names(nrow)
if(!is.null(col.names))
names(df) <- col.names
attr(df, "class") <- "data.frame"
df
}
##' read dosages files impute2-transformed
##'
##' read dosages files impute2-transformed
##' @title read dosages files impute2-transformed
##' @param file character filename
##' @param yieldSize yieldSize for reading data in chunks
##' @param colClassesInfo describes the types of the columns
##' @param type data.frame GRanges or SummerizedExperiment
##' @param verbose default TRUE show progress message
##' @param ... additional arguments to scanTabix
##' @return data.frame
##' @author mvaniterson
##' @import SummarizedExperiment
##' @importFrom Rsamtools TabixFile headerTabix scanTabix yieldSize
##' @importFrom GenomicRanges GRanges
##' @importFrom IRanges IRanges
##' @importFrom S4Vectors DataFrame SimpleList
##' @importFrom utils type.convert
##' @export
##' @examples
##' \dontrun{
##' gzipped <- dir(file.path(VM_BASE_DATA, "GWAS_ImputationGoNLv5/dosages", RP3_BIOBANKS[1]),
##' pattern= "gz$", full.names=TRUE)
##' chunk <- read.dosages(gzipped[1], yieldSize=5000)
##' chunk[1:5, 1:10]
##' chunk <- read.dosages(gzipped[1], yieldSize=5000, type="GRanges")
##' chunk
##' chunk <- read.dosages(gzipped[1], yieldSize=5000, type="SummarizedExperiment")
##' chunk
##' colData(chunk)
##' rowRanges(chunk)
##' assay(chunk)[1:5, 1:5]
##' }
read.dosages <- function(file, yieldSize=NULL, colClassesInfo = c("character", "character", "integer", "numeric", "numeric",
"numeric", "integer", "integer", "character", "integer", "character",
"character"), type=c("data.frame", "GRanges", "SummarizedExperiment"), verbose=TRUE, ...) {
type <- match.arg(type)
if(verbose)
message("Reading chunk...")
if(class(file) != "TabixFile")
file <- TabixFile(file, yieldSize=yieldSize)
header <- gsub("#", "", headerTabix(file)$header)
header <- unlist(strsplit(header, "\\t"))
value <- scanTabix(file, ...)[[1]]
if(length(value) == 0)
return(NULL)
txt <- textConnection(value)
on.exit(close(txt))
if(type == "GRanges")
colClasses <- c(colClassesInfo, rep("NULL", length(header)-length(colClassesInfo)))
else
colClasses <- c(colClassesInfo, rep("numeric", length(header)-length(colClassesInfo)))
chunk <- .scan2data.frame(txt,
nrow=yieldSize(file),
ncol=length(header),
sep="\t",
what=as.list(colClasses),
quiet=TRUE,
skip=0,
col.names=header)
switch(type,
"data.frame"= chunk,
"GRanges"= with(chunk,
GRanges(seqname = paste0("chr", chr),
IRanges(as.integer(pos), width=1),
rsid=rsid, ref=ref, alt=alt)),
"SummarizedExperiment" = SummarizedExperiment(rowRanges = with(chunk,
GRanges(seqname = paste0("chr", chr),
IRanges(as.integer(pos), width=1),
rsid=rsid, ref=ref, alt=alt)),
assays=SimpleList(dosage = data.matrix(chunk[, -c(1:length(colClassesInfo))])),
colData=DataFrame(gwas_id = colnames(chunk)[-c(1:length(colClassesInfo))]))
)
}
.getHRC <- function(param, files, imputation_id, genotype){
chr <- unique(as.character(seqnames(param)))
fls <- grep(paste0("chr", chr, ".filtered.dose.vcf.gz"), files, value=TRUE)
if(length(param) == 0 | is.null(fls))
stop("No files found or no SNPs in input!")
m <- lapply(fls, function(fl) { ##optionally multiple files per chromosome
vcf <- readVcf(TabixFile(fl), "hg19", param=param)
if(genotype=="SM") {
m <- genotypeToSnpMatrix(vcf)$genotypes
m <- t(matrix(as.numeric(m), nrow=nrow(m), ncol=ncol(m), dimnames=dimnames(m)))
}
else
m <- geno(vcf)[[genotype]]
m
})
m <- do.call("cbind", m)
m <- m[, match(imputation_id, colnames(m)), drop=FALSE] ##return in proper order
m
}
.getHRCv1.1 <- function(param, files, imputation_id, genotype){
chr <- unique(as.character(seqnames(param)))
fls <- grep(paste0("chr", chr, "(.no.auto_male|.no.auto_female|).dose.vcf.gz"), files, value=TRUE)
if(length(param) == 0 | is.null(fls))
stop("No files found or no SNPs in input!")
m <- lapply(fls, function(fl) { ##optionally multiple files per chromosome
vcf <- readVcf(TabixFile(fl), "hg19", param=param)
if(genotype=="SM") {
m <- genotypeToSnpMatrix(vcf)$genotypes
m <- t(matrix(as.numeric(m), nrow=nrow(m), ncol=ncol(m), dimnames=dimnames(m)))
}
else
m <- geno(vcf)[[genotype]]
m
})
m <- do.call("cbind", m)
m <- m[, match(imputation_id, colnames(m)), drop=FALSE] ##return in proper order
m
}
.getGONL <- function(param, files, imputation_id, genotype){
chr <- unique(as.character(seqnames(param)))
file <- grep(paste0("chr", chr, ".release5.raw_SNVs.vcf.gz"), files, value=TRUE)
if(length(param) == 0 | is.null(file)) return(NULL)
vcf <- readVcf(TabixFile(file), "hg19", param=param)
if(genotype=="SM") {
m <- genotypeToSnpMatrix(vcf)$genotypes
m <- t(matrix(as.numeric(m), nrow=nrow(m), ncol=ncol(m), dimnames=dimnames(m)))
}
else
m <- geno(vcf)[[genotype]]
rownames(m) <- paste(seqnames(vcf@rowRanges), start(vcf@rowRanges), sep=":")
m[, match(imputation_id, colnames(m)), drop=FALSE] ##return in proper order
}
##' extract genotypes from vcf-files
##'
##' extract genotypes from vcf-files
##' given selection of SNPs and samples
##' @title extract genotypes from vcf-files
##' @param imputation_id imputation identifier
##' @param biobank biobank_id
##' @param snps GRanges with snps
##' @param type imputation type either "GoNL", "HRC", "HRCv1.1" or "GoNLv5"
##' @param geno extract either genotypes, dosages, genotype likelihoods or as snpMatrix
##' @param BASE genotype data location default e.g. VM_BASE_DATA
##' @param ... optional BPPARAM arguments
##' @return matrix with genotypes
##' @author mvaniterson
##' @importFrom VariantAnnotation readVcf genotypeToSnpMatrix geno
##' @importFrom Rsamtools TabixFile
##' @importFrom BiocParallel bplapply
##' @importFrom GenomicRanges split
##' @importFrom GenomeInfoDb mapSeqlevels seqlevels seqlevels<-
##' @export
getGenotypes <- function(imputation_id, biobank=c("ALL", "CODAM", "LL", "LLS", "NTR", "RS", "PAN"), snps, type=c("GoNL", "HRC", "HRCv1.1", "GoNLv5"), geno=c("GT", "DS", "GP", "SM"), BASE, ...){
type <- match.arg(type)
biobank <- match.arg(biobank)
geno <- match.arg(geno)
##snps should be of type GRanges
seqlevels(snps) <- mapSeqlevels(seqlevels(snps), "NCBI")
snps <- split(snps, as.character(seqnames(snps)))
if(type == "HRC") {
if(biobank == "ALL")
vcfs <- dir(file.path(BASE, "HRC_Imputation"), pattern="filtered.dose.vcf.gz$", full.names=TRUE, recursive=TRUE)
else
vcfs <- dir(file.path(BASE, "HRC_Imputation", biobank), pattern="filtered.dose.vcf.gz$", full.names=TRUE, recursive=TRUE)
##for(fl in vcfs) indexTabix(fl, format="vcf") ##if vcf are not indexed!
##TODO Bioconductor devel (bioc-3.4/R-3.3.0) contains `GenomicFiles` with vcfstack a nicer solution?
if(length(snps) > 1) {
genotypes <- bplapply(snps, .getHRC, files=vcfs, imputation_id = as.character(imputation_id), genotype = geno, ...)
genotypes <- do.call("rbind", genotypes)
} else {
genotypes <- .getHRC(snps[[1]], files=vcfs, imputation_id = as.character(imputation_id), genotype = geno, ...)
}
} else if(type == "HRCv1.1") {
if(biobank == "ALL")
vcfs <- dir(file.path(BASE, "HRCv1.1_Imputation"), pattern="dose.vcf.gz$", full.names=TRUE, recursive=TRUE)
else
vcfs <- dir(file.path(BASE, "HRCv1.1_Imputation", biobank), pattern="dose.vcf.gz$", full.names=TRUE, recursive=TRUE)
##for(fl in vcfs) indexTabix(fl, format="vcf") ##if vcf are not indexed!
##TODO Bioconductor devel (bioc-3.4/R-3.3.0) contains `GenomicFiles` with vcfstack a nicer solution?
if(length(snps) > 1) {
genotypes <- bplapply(snps, .getHRCv1.1, files=vcfs, imputation_id = as.character(imputation_id), genotype = geno, ...)
genotypes <- do.call("rbind", genotypes)
} else {
genotypes <- .getHRCv1.1(snps[[1]], files=vcfs, imputation_id = as.character(imputation_id), genotype = geno, ...)
}
} else if(type == "GoNL") {
vcfs <- dir(file.path(BASE, "gonl-snv-release-5.4"), pattern=".vcf.gz$", full.names=TRUE, recursive=TRUE)
genotypes <- bplapply(snps, .getGONL, files=vcfs, imputation_id = as.character(imputation_id), genotype = geno)
genotypes <- do.call("rbind", genotypes)
}
else if(type == "GoNLv5" | type == "HRCv1.1")
stop("Not implemented yet!")
genotypes
}
|
67210cc682cd0042cd28f3af841408d40e6d3a47
|
e18dcfa7a376b0608df9798d1d7749e8f7ea53c8
|
/RScripts/shannon-diversity/46_SDITable_Avg.R
|
ad35dbf80f790fc901315a9812f3da217e312338
|
[] |
no_license
|
asorgen/UEGP_WastewaterCulture
|
b4ebb3817555f350ca9d92dbd9c2c02a73bd4df7
|
f333b287859467d28c56607031a1dd35134c55f9
|
refs/heads/main
| 2023-03-24T20:29:25.300692
| 2021-03-23T21:20:35
| 2021-03-23T21:20:35
| 350,818,717
| 0
| 0
| null | 2021-03-23T18:47:23
| 2021-03-23T18:35:58
|
R
|
UTF-8
|
R
| false
| false
| 5,141
|
r
|
46_SDITable_Avg.R
|
#Author: Alicia Sorgen
#BioLockJ configuration: Alicia Sorgen
#Date: 01-18-21
#Description:
## Libraries
library(tidyr)
rm(list=ls())
pipeRoot = dirname(dirname(getwd()))
moduleDir <- dirname(getwd())
inputnModule = dir(pipeRoot, pattern="MetaUpdate", full.names=TRUE)
inputPath = file.path(inputnModule,"output/")
output = file.path(moduleDir,"output/")
frame=read.table(paste0(inputPath, "metaUpdate.tsv"),sep="\t",header = TRUE)
frame=frame[frame$SampleType=="Culture",]
##### Location Antibiotic SDI Table (Table S5) #####
var1 <- c("UPA","RES","HOS","INF","PCI","PCE","ATE","FCE","UV","DSA")
dFrame <- data.frame()
Location <- vector()
Antibiotic <- vector()
Average <- vector()
index <- 1
for (i in 1:length(var1)) {
Location[index] <- paste0(var1[i])
df1 <- frame[frame$Location %in% Location, ]
var2 <- unique(df1$Antibiotic)
var2 <- c("Neg", "Amp", "Cip", "Dox", "Sulf")
for (j in 1:length(var2)) {
Antibiotic[index] <- paste0(var2[j])
df2 <- df1[df1$Antibiotic %in% Antibiotic,]
if (nrow(df2) == 0) {
Average[index] <- 0
} else {
Average[index] <- mean(df2$shannon)
}
row <- data.frame(Location, Antibiotic, Average)
dFrame <- rbind(dFrame, row)
}
}
dFrame2 <- spread(dFrame, Antibiotic, Average)
frame2 <- frame[!(frame$Antibiotic %in% "Negative"),]
dFrame <- data.frame()
Location <- vector()
Average <- vector()
index <- 1
for (i in 1:length(var1)) {
Location[index] <- paste0(var1[i])
df1 <- frame[frame$Location %in% Location, ]
Average[index] <- mean(df1$shannon)
row <- data.frame(Location, Average)
dFrame <- rbind(dFrame, row)
}
colnames(dFrame)[colnames(dFrame)=="Average"] <- "Location Avg"
dFrame2 <- merge(dFrame2, dFrame, by = "Location")
dFrame <- data.frame()
Location <- vector()
Average <- vector()
index <- 1
for (i in 1:length(var1)) {
Location[index] <- paste0(var1[i])
df1 <- frame2[frame2$Location %in% Location, ]
df1 <- df1[!(df1$Antibiotic == "Neg"),]
Average[index] <- mean(df1$shannon)
row <- data.frame(Location, Average)
dFrame <- rbind(dFrame, row)
}
colnames(dFrame)[colnames(dFrame)=="Average"] <- "Location Avg (ARB only)"
dFrame2 <- merge(dFrame2, dFrame, by = "Location")
Antibiotics <- c("Antibiotic Avg", mean(dFrame2$Amp), mean(dFrame2$Cip), mean(dFrame2$Dox), mean(dFrame2$Neg), mean(dFrame2$Sulf), mean(dFrame2$ALL), mean(dFrame2$ARB))
final <- rbind(dFrame2, Antibiotics)
write.table(final, file=paste0(output, "Location_Antibiotic_SDITable.tsv"), sep="\t",row.names=FALSE)
##### Temperature Media SDI Table (Table S11) #####
var1 <- unique(frame$Temperature)
dFrame <- data.frame()
Temperature <- vector()
Media <- vector()
Average <- vector()
index <- 1
for (i in 1:length(var1)) {
Temperature[index] <- paste0(var1[i])
df1 <- frame[frame$Temperature %in% Temperature, ]
df1$Media <- factor(df1$Media)
var2 <- unique(df1$Media)
for (j in 1:length(var2)) {
Media[index] <- paste0(var2[j])
df2 <- df1[df1$Media %in% Media,]
Average[index] <- mean(df2$shannon)
row <- data.frame(Temperature, Media, Average)
dFrame <- rbind(dFrame, row)
}
}
dFrame2 <- spread(dFrame, Media, Average)
dFrame <- data.frame()
Temperature <- vector()
Average <- vector()
index <- 1
for (i in 1:length(var1)) {
Temperature[index] <- paste0(var1[i])
df1 <- frame[frame$Temperature %in% Temperature, ]
Average[index] <- mean(df1$shannon)
row <- data.frame(Temperature, Average)
dFrame <- rbind(dFrame, row)
}
colnames(dFrame)[colnames(dFrame)=="Average"] <- "Temperature Avg"
dFrame2 <- merge(dFrame2, dFrame, by = "Temperature")
Medias <- c("Media Avg", mean(dFrame2$LB), mean(dFrame2$R2A))
final <- rbind(dFrame2, Medias)
write.table(final, file=paste0(output, "Temperature_Media_SDITable_ALL.tsv"), sep="\t",row.names=FALSE)
var1 <- unique(frame2$Temperature)
dFrame <- data.frame()
Temperature <- vector()
Media <- vector()
Average <- vector()
index <- 1
for (i in 1:length(var1)) {
Temperature[index] <- paste0(var1[i])
df1 <- frame2[frame2$Temperature %in% Temperature, ]
df1 <- df1[!(df1$Antibiotic == "Neg"),]
df1$Media <- factor(df1$Media)
var2 <- unique(df1$Media)
for (j in 1:length(var2)) {
Media[index] <- paste0(var2[j])
df2 <- df1[df1$Media %in% Media,]
Average[index] <- mean(df2$shannon)
row <- data.frame(Temperature, Media, Average)
dFrame <- rbind(dFrame, row)
}
}
dFrame2 <- spread(dFrame, Media, Average)
dFrame <- data.frame()
Temperature <- vector()
Average <- vector()
index <- 1
for (i in 1:length(var1)) {
Temperature[index] <- paste0(var1[i])
df1 <- frame2[frame2$Temperature %in% Temperature, ]
Average[index] <- mean(df1$shannon)
row <- data.frame(Temperature, Average)
dFrame <- rbind(dFrame, row)
}
colnames(dFrame)[colnames(dFrame)=="Average"] <- "Temperature Avg"
dFrame2 <- merge(dFrame2, dFrame, by = "Temperature")
Medias <- c("Media Avg", mean(dFrame2$LB), mean(dFrame2$R2A))
final <- rbind(dFrame2, Medias)
write.table(final, file=paste0(output, "Temperature_Media_SDITable_ARB.tsv"), sep="\t",row.names=FALSE)
|
59d55f22f2a69f2d6d41a793d0db7219e34a2a28
|
00b9fe8730b31e003a328f3e66124904e8a3b2bc
|
/R/calculate_daly.R
|
9812498d20ca89abc8efc2c7f7e93f649a0b2729
|
[] |
no_license
|
rcquan/roadmap-daly
|
ee52f92e502966764f136586584771b0d8aa47af
|
58b0e9c9ebbe1ba1116e250ca9c8437ca240ac0c
|
refs/heads/master
| 2016-09-15T19:09:53.335829
| 2015-05-23T20:35:25
| 2015-05-23T20:35:25
| 34,160,965
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,617
|
r
|
calculate_daly.R
|
#############################
# Ryan Quan
# Columbia University
# GRAPH
# DOHMH Roadmap, Piece 1
# 2015-04-11
# rcq2102@columbia.edu
#
# The following script defines methods
# to calculate DALYs
#############################
library("plyr")
library("dplyr")
library("magrittr")
## -------------------------------------
calculateMichaudYLD <- function(checkRatio, yldyllRatio, nationalYLD, nycPop, nycYLL) {
## calculates YLDs based on the 2006 Michaud study
## Args:
## checkRatio: numeric. National YLD:YLL ratio to check if > 10 or < 10
## yldyllRatio: numeric. National YLD:YLL ratio to evaluate
## nationalYLD: numeric. National YLD rate
## nycPop: numeric. NYC Population
## nycYLL: numeric. NYC YLL
## Returns:
## nycYLD: New York City YLD estimate
nycYLDLogic <- (checkRatio >= 5 | is.na(checkRatio) | is.infinite(checkRatio) | is.na(nycYLL))
nycYLD <- ifelse(nycYLDLogic, nationalYLD * (nycPop / 100000), yldyllRatio * nycYLL)
return(nycYLD)
}
## -------------------------------------
calculatePrevalenceYLD <- function(nycPrevalence) {
## calculates prevalence-based YLD estimates from 2010 GBD Study
## Args:
## nycPrevalence: data.frame. NYC prevalence data with associated disability weights
## Returns:
## nycYLD: data.frame. NYC YLD estimates.
nycYLD <- nycPrevalence %>%
mutate(yld = prevalence * dependence_rate * dw_estimate,
yld_upper = prevalence * dependence_rate * dw_upper,
yld_lower = prevalence * dependence_rate * dw_lower)
return(nycYLD)
}
## -------------------------------------
calculateYLL <- function(mortalityData) {
## calculates YLLs from mortality data
nycYLL <- mortalityData %>%
mutate(le = sle - mean_age,
yll = mortality * (1 - exp((-0.03 * le))) / 0.03)
return(nycYLL)
}
## -------------------------------------
calculatePrevalenceDALY <- function(diseaseName, nycYLL, nycYLD) {
## calculates DALYs using prevalence-based YLDs from the 2010 GBD study
## Args:
## diseaseName: chr. The disease of interest.
## nycYLL: data.frame. New York City YLL estimates
## nycYLD: data.frame. New York City YLD estimates
## Returns:
## dalys: data.frame. New York City DALY estimates
diseaseYLL <- subsetDataByDisease(diseaseName, nycYLL)
nycYLD <- subsetDataByDisease(diseaseName, nycYLD)
dalys <- diseaseYLL %>%
group_by(cause_name, sex) %>%
summarize(yll = sum(yll, na.rm=TRUE)) %>%
join(nycYLD, c("cause_name", "sex"), type = "right") %>%
ungroup() %>%
filter(yld != 0) %>%
mutate(daly = ifelse(is.na(yll), 0 + yld, yll + yld),
daly_upper = ifelse(is.na(yll), 0 + yld_upper, yll + yld_upper),
daly_lower = ifelse(is.na(yll), 0 + yld_lower, yll + yld_lower)) %>%
select(cause_name, sex, daly, daly_lower, daly_upper, yll, yld, yld_lower, yld_upper)
return(dalys)
}
## -------------------------------------
calculateDALY <- function(diseaseName, population, nycYLL, nycYLD=NULL, nationalRates=NULL) {
## workhorse function to calculate DALY scores for specified disease using either
## prevalence-based YLD estimates or the Michaud approach using national YLD/YLL rates
diseaseYLL <- subsetDataByDisease(diseaseName, nycYLL)
if (!is.null(nycYLD) & !is.null(nationalRates)) {
stop("You cannot provide values to both nycYLD and nationalRates parameters.")
} else if (!is.null(nycYLD)) {
dalys <- calculatePrevalenceDALY(diseaseName, nycYLL, nycYLD)
return(dalys)
} else if (!is.null(nationalRates)) {
## subset datasets for specified disease
diseaseRates <- subsetDataByDisease(diseaseName, nationalRates)
## if disease not found in gbdData, return YLL data as DALYs
if (nrow(diseaseRates) == 0) {
dalys <- diseaseYLL %>%
group_by(cause_name, sex) %>%
summarize(yll = sum(yll),
daly = sum(yll))
return(dalys)
}
## compute national YLD:YLL ratio and join to NYC YLL and population data by age, sex
dalys <- diseaseRates %>%
## compute national YLD:YLL ratio
mutate(yldyll_ratio_mean = yld_nm_mean / yll_nm_mean,
yldyll_ratio_upper = yld_nm_upper / yll_nm_mean,
yldyll_ratio_lower = yld_nm_lower / yll_nm_mean) %>%
# join tables
join(population, by=c("ageGroup", "sex")) %>%
join(diseaseYLL, by=c("cause_name", "ageGroup", "sex")) %>%
## estimate YLDs using Michaud logic
mutate(yld = calculateMichaudYLD(yldyll_ratio_mean, yldyll_ratio_mean, yld_rt_mean, population, yll),
yld_upper = calculateMichaudYLD(yldyll_ratio_mean, yldyll_ratio_upper, yld_rt_upper, population, yll),
yld_lower = calculateMichaudYLD(yldyll_ratio_mean, yldyll_ratio_lower, yld_rt_lower, population, yll)) %>%
## collapse age groups
group_by(cause_name, sex) %>%
summarise_each(funs(sum(., na.rm=TRUE)), -c(cause_name, sex, ageGroup)) %>%
## calculate DALY estimates with lower and upper bounds
mutate(daly = yll + yld,
daly_upper = yll + yld_upper,
daly_lower = yll + yld_lower) %>%
select(cause_name, sex, daly, daly_lower, daly_upper, yll, yld, yld_lower, yld_upper)
return(dalys)
}
}
|
1c7833b86fbee4f200367aef00a6d8f247443a7b
|
40e2a560ec0e3bf8b1848fc40e2f4e7a01df5c9b
|
/ejercicios/EjerciciosT1_David.R
|
7a96b9cf8a6501d80bb289fe435d460fdfbbfd8f
|
[] |
no_license
|
dasafo/r-basic
|
629cd7523dfa335e2a380f2b2b21a2011599fb5f
|
aecf46cbb07235858cbf08ac3e5badd15559cce9
|
refs/heads/master
| 2021-06-28T14:20:57.346727
| 2021-03-03T19:39:21
| 2021-03-03T19:39:21
| 204,508,948
| 0
| 0
| null | 2019-08-26T17:46:34
| 2019-08-26T15:46:33
| null |
UTF-8
|
R
| false
| false
| 1,472
|
r
|
EjerciciosT1_David.R
|
#Tarea Tema 2
# 1- Si hubiéramos empezado a contar segundos a partir de las 12 campanadas
# que marcan el inicio de 2018, ¿a qué hora de qué día de qué año llegaríamos
# a los 250 millones de segundos? ¡Cuidado con los años bisiestos!
fseg <- function(seg){
min=seg%/%60
seg_r=seg%%60
hora=min%/%60
min_r=min%%60
dias=hora%/%24
hora_r=hora%%24
print(sprintf("%i días : %i horas : %i minutos : %i segundos",dias, hora_r, min_r, seg_r))
}
fseg(25*10^7)
años_t=dias%/%365
print(años_t)
dias_t=(dias%%365)-2
print(dias_t)
# 2- Cread una función que os resuelva una ecuación de primer grado (de la forma Ax+B=0).
# Es decir, vosotros tendréis que introducir como parámetros los coeficientes
# (en orden) y la función os tiene que devolver la solución. Por ejemplo, si la
# ecuación es 2x+4=0, vuestra función os tendría que devolver -2.
# Una vez creada la función, utilizadla para resolver las siguientes ecuaciones de primer grado:
# 5x+3=0
# 7x+4 = 18
# x+1 = 1
grado1 <- function(a,b,c){
x=(c-b)/a
print(paste(sprintf("La solución para %ix + %i = %i es x = %g", a, b, c, x )))
}
grado1(5,3,0)
grado1(7,4,18)
grado1(1,1,1)
# 3- Dad una expresión para calcular 3e-π y a continuación, dad el resultado que habéis
# obtenido con R redondeado a 3 cifras decimales.
# Dad el módulo del número complejo (2+3i)^2/(5+8i) redondeado a 3 cifras decimales.
calc=3*exp(-pi)
round(calc,3)
calc2=(2+3i)^2/(5+8i)
round(calc2,3)
|
c476b8bc03439ca89a9eae7fd1b5b9525dd4b29b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/colorpatch/examples/ComputeSymmetry.Rd.R
|
4d36de8952c06e09998b67c1747feb9c838e915d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 242
|
r
|
ComputeSymmetry.Rd.R
|
library(colorpatch)
### Name: ComputeSymmetry
### Title: Computes the symmetry of a given bi-variate color palette
### Aliases: ComputeSymmetry
### ** Examples
data("OptimGreenRedLAB")
df <- ComputeSymmetry(OptimGreenRedLAB)
print(df)
|
19346f095884a0cca6fe8874ce31a4ccdc0b7aa8
|
f745b8548381d45ec1e2c0a58db04e9e76a0019e
|
/qunar/main.R
|
3d398a0bcccdd4accd87e888376d16b16a791178
|
[] |
no_license
|
XMUSpiderman/AirplaneTicketScrapy
|
d79cd012e0eba0c302ebfa734a3fce5060d8895d
|
055727b3caf788487fab33c6bee65f30528cd86b
|
refs/heads/master
| 2016-08-12T09:34:08.688843
| 2016-03-16T01:19:51
| 2016-03-16T01:19:51
| 50,179,419
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 392
|
r
|
main.R
|
library(dplyr)
library(plyr)
library(stringr)
# setwd("/Users/shihchosen/Documents/Github/AirplaneTicketScrapy/qunar/")
source("./getFlightInfos.R")
files <- list.files(path = "./data/3_14/")%>%
str_c("./data/3_14/",.)
files
data314 <- lapply(files, getFlightInfos, queryDate ="2015-03-14")%>%
do.call("rbind",.)
write.csv(data314, "./data/output/data314.csv", row.names = FALSE)
|
067fe713217928dda3c2b6d40f96265129854847
|
5dd990f03c615ba8900ce9cb0bf0bc111e4e503b
|
/R/reexports.R
|
e7d2219b2e1de259a3cb725fd14a77fd21512e4f
|
[] |
no_license
|
Sprinterzzj/feasts
|
2c4ce92de7c3bc32def3b5da84af6a9d21d26a1d
|
64c0f66af547c0ae42b1d8a03990de1387a1bc91
|
refs/heads/master
| 2020-05-26T09:03:31.104268
| 2019-05-17T00:45:00
| 2019-05-17T00:45:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 285
|
r
|
reexports.R
|
#' @importFrom fablelite %>%
#' @export
fablelite::`%>%`
#' @export
tsibble::as_tsibble
#' @importFrom ggplot2 autoplot
#' @export
ggplot2::autoplot
#' @importFrom ggplot2 autolayer
#' @export
ggplot2::autolayer
#' @importFrom fablelite components
#' @export
fablelite::components
|
aeb59f4e8acc2cdaf38c2b9696cfea05704ae5db
|
6ce79966b1b89de1a6d6eb29cea945188c18652c
|
/R/models__linear__etc__idx.b2beta.R
|
d84d72d6b4909113bd308d0ecdad26f1c79682f5
|
[] |
no_license
|
feng-li/movingknots
|
d3041a0998f0873459814a09e413c714fff700c6
|
5f921070e4cd160a831c5191255f88dd7d4c850c
|
refs/heads/master
| 2021-06-10T00:18:57.172246
| 2021-03-22T05:56:44
| 2021-03-22T05:56:44
| 145,708,629
| 4
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 604
|
r
|
models__linear__etc__idx.b2beta.R
|
#' Make indices from b to beta.
#'
#' Details from the paper
#' @param p NA
#' @param q NA
#' @param q.i NA
#' @return NA
#' @author Feng Li, Department of Statistics, Stockholm University, Sweden.
#' @export
idx.b2beta <- function(p, q, q.i)
{
idx4b <- 1:(p*q) ## The original indices for b
cumidx <- c(0, cumsum(q.i))
matidx4bi <- matrix(0, q, p)
for(i in 1:length(q.i))
{
idx4bi <- (1+cumidx[i]*p):(cumidx[i+1]*p)
matidx4bi[(1+cumidx[i]):(cumidx[i+1]), ] <- matrix(idx4b[idx4bi], q.i[i])
}
idx4beta <- as.vector(matidx4bi)
return(idx4beta)
}
|
99dac8d7d37f3b1352d7be26ee0d8d4ea6bf3230
|
64fabdfd8e2670eb57e3cdf162f94d71b58fe6b4
|
/R/dna_repair_index.R
|
ead010ed06bd1b26f03b02c98c01151ab82a8965
|
[] |
no_license
|
d3b-center/celllines-profiling-analysis
|
564db86b6fb6c5bc8258fdf84bf12a1ea47963a1
|
66396cc64c094d08903ef8666353e85aad7eebd1
|
refs/heads/master
| 2021-06-23T15:32:27.993408
| 2021-04-07T23:07:29
| 2021-04-07T23:07:29
| 212,658,648
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,585
|
r
|
dna_repair_index.R
|
setwd('~/Projects/celllines-profiling-analysis/')
library(msigdbr)
library(tidyverse)
library(GSVA)
library(ggpubr)
library(reshape2)
source('R/pubTheme.R')
# z-score
zscore <- function(x){
z <- (x - mean(x)) / sd(x)
return(z)
}
# z-score FPKM data
expr.fpkm <- get(load('data/fpkm-matrix.RData'))
expr.fpkm <- log2(expr.fpkm+1)
expr.fpkm <- apply(expr.fpkm, 1, zscore)
expr.fpkm <- t(expr.fpkm)
# genesets
kegg_geneset <- msigdbr::msigdbr(species = "Homo sapiens",
category = "C2",
subcategory = "CP:KEGG")
kegg_geneset <- kegg_geneset %>%
filter(gs_name %in% c("KEGG_MISMATCH_REPAIR",
"KEGG_HOMOLOGOUS_RECOMBINATION",
"KEGG_NON_HOMOLOGOUS_END_JOINING",
"KEGG_BASE_EXCISION_REPAIR"))
run_ssgsea <- function(expr, geneset){
geneset <- list(geneset = geneset$human_gene_symbol)
GeneSetExprsMat <- gsva(expr = expr,
gset.idx.list = geneset,
method = "ssgsea",
min.sz = 1, max.sz = 1500,
mx.diff = F)
}
GeneSetExprsMat <- plyr::ddply(.data = kegg_geneset, .variables = 'gs_name', .fun = function(x) run_ssgsea(expr = expr.fpkm, geneset = x))
GeneSetExprsMat <- melt(GeneSetExprsMat)
GeneSetExprsMat <- cbind(GeneSetExprsMat, type = gsub(".*_","",GeneSetExprsMat$variable))
GeneSetExprsMat$type <- ifelse(GeneSetExprsMat$type == "tissue", "Solid_Tissue",
ifelse(GeneSetExprsMat$type == "s", "Suspension", "Adherent"))
GeneSetExprsMat$type <- factor(GeneSetExprsMat$type, levels = c("Adherent", "Solid_Tissue", "Suspension"))
# plot
my_comparisons <- list(c("Suspension", "Adherent"),
c("Suspension", "Solid_Tissue"),
c("Adherent", "Solid_Tissue"))
p <- ggplot(GeneSetExprsMat, aes(x = type, y = value, fill = factor(gs_name, unique(as.character(gs_name))))) +
stat_boxplot(geom ='errorbar', width = 0.7, lwd = 0.3) +
geom_boxplot(outlier.shape = 21, outlier.fill = "white", outlier.color = "white",
lwd = 0.3, fatten = 0.7, width = 0.7) +
theme_Publication2() +
ggtitle("GSVA: DNA Repair index") +
ylab("DNA Repair index") + xlab("") +
stat_compare_means(comparisons = my_comparisons, color = "darkred", size = 3) +
stat_compare_means(color = "darkred", label.y = 1.1) +
labs(fill = "KEGG genesets")
p
ggsave(filename = "results/dna-repair-index.pdf", plot = p, device = "pdf", width = 12, height = 6)
|
99bd87ee8d7e2d556c255ccde474721782802149
|
ee8586e9df303bb72ebfbac751be22084ba0ab58
|
/server.R
|
e5702fc158ee028b8c12070ae1a694fa9fd7407a
|
[] |
no_license
|
sachioiwamoto/shinyapp
|
4d4d76d4cf94207e028cfb4fb567ccf8e0b92edc
|
d4bca7ee1b4df249211cdf6ca55f3d609bc0d0f9
|
refs/heads/master
| 2021-01-23T07:02:33.465551
| 2015-04-26T06:26:54
| 2015-04-26T06:26:54
| 34,600,465
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,452
|
r
|
server.R
|
library(shiny)
library(reshape2)
library(ggplot2)
# Read and cleanse data (Economy and Growth Topic)
data <- read.table("./3_Topic_en_csv_v2.csv", header = TRUE, sep = ",", skip = 1)
data <- data[, -c(59,60)]
colnames(data)[c(5:58)] <- c(1960:2013)
# average <- function(country, indicator, year) (function here)
# Define server logic required to plot various variables against mpg
shinyServer(function(input, output) {
output$ocountry1 <- renderPrint({ input$country1 })
output$ocountry2 <- renderPrint({ input$country2 })
output$oindicator <- renderPrint({ input$indicator })
output$ofrom <- renderPrint({ input$year[1] })
output$oto <- renderPrint({ input$year[2] })
tmpdat1 <- reactive({
data[(data$Country.Name == input$country1 &
data$Indicator.Name == input$indicator),]
})
tmpdat2 <- reactive({
data[(data$Country.Name == input$country2 &
data$Indicator.Name == input$indicator),]
})
tmpdat3 <- reactive({
t(rbind(tmpdat1(), tmpdat2()))[c((as.numeric(input$year[1])-1955):
(as.numeric(input$year[2])-1955)),]
})
resultset <- reactive({
ylist <- c((input$year[1]):(input$year[2]))
temp <- as.data.frame(cbind(ylist, tmpdat3()))
colnames(temp) <- c("Year", input$country1, input$country2)
temp[, 1:3] <- sapply(temp[, 1:3], as.character)
temp[, 1:3] <- sapply(temp[, 1:3], as.numeric)
rownames(temp) <- NULL
temp
})
meltedset <- reactive({
melt(resultset(), id='Year')
})
fit1 = reactive({
lm(resultset()[,c(2)] ~ poly(resultset()[,c(1)], 4), data = resultset())
})
fit2 = reactive({
lm(resultset()[,c(3)] ~ poly(resultset()[,c(1)], 4), data = resultset())
})
output$plot <- renderPlot({
gp = ggplot(meltedset(),
aes(x=meltedset()[,c(1)], y=meltedset()[,c(3)], colour=meltedset()[,c(2)]),
environment=environment())
gp = gp + geom_smooth(method = "lm", formula = y~poly(x, 4))
gp = gp + geom_point(size=3, alpha=0.7)
gp = gp + xlab("Year") + ylab(input$indicator)
gp = gp + labs(colour="Countries")
print(gp)
})
output$table <- renderDataTable({ resultset() })
output$model1 <- renderPrint({ summary(fit1()) })
output$model2 <- renderPrint({ summary(fit2()) })
})
|
d8c7062c8b280f9a872e53a839dd4ce554ac083a
|
5d4429dc4708aa5e9e1d24a7e7740243ae0d4a47
|
/tests/callbacks_actionButtons/tests/test.R
|
9f806cca31e0503de1dabeb2da1f76413577065e
|
[] |
no_license
|
DavidPatShuiFong/DTedit
|
6c449449afba79c1d465825e361730e7357f3e11
|
082616e09aeb0d043e793c0d17a02532e1bac120
|
refs/heads/master
| 2022-11-16T02:22:46.032038
| 2021-10-23T11:56:42
| 2021-10-23T11:56:42
| 168,662,418
| 20
| 19
| null | 2021-01-24T07:31:58
| 2019-02-01T07:57:21
|
R
|
UTF-8
|
R
| false
| false
| 2,815
|
r
|
test.R
|
app <- ShinyDriver$new("../")
app$snapshotInit("test")
app$snapshot(items = list(export = TRUE))
app$setInputs(Grocery_List_add = "click")
app$setInputs(Grocery_List_add_Buy = "Bananas",
Grocery_List_add_Quantity = 12) # should dis-allow
app$setInputs(Grocery_List_insert = "click")
app$executeScript("$('.modal').modal('hide');") # close modal
# for closing modal in shinytest,
# see https://github.com/rstudio/shinytest/issues/227
# by LukasK13
Sys.sleep(2)
app$setInputs(Grocery_List_add = "click")
app$setInputs(Grocery_List_add_Buy = "Mangos",
Grocery_List_add_Quantity = 3)
app$setInputs(Grocery_List_insert = "click")
app$setInputs(Grocery_Listdt_rows_selected = 2, allowInputNoBinding_ = TRUE)
app$setInputs(Grocery_Listdt_rows_last_clicked = 2, allowInputNoBinding_ = TRUE)
app$setInputs(Grocery_List_remove = "click")
app$setInputs(Grocery_List_delete = "click")
# should dis-allow, as current logic only allows delete if quantity = 0
app$snapshot(items = list(export = TRUE))
app$executeScript("$('.modal').modal('hide');") # close modal
app$setInputs(Grocery_Listdt_rows_selected = 3, allowInputNoBinding_ = TRUE)
app$setInputs(Grocery_Listdt_rows_last_clicked = 3, allowInputNoBinding_ = TRUE)
app$setInputs(Grocery_List_edit = "click")
app$setInputs(Grocery_List_edit_Quantity = 0)
# current logic only allows delete if quantity = 0
app$setInputs(Grocery_List_update = "click")
app$setInputs(Grocery_List_update = "click")
app$setInputs(Grocery_Listdt_rows_selected = 3,
allowInputNoBinding_ = TRUE, wait_ = FALSE, values_ = FALSE)
app$setInputs(Grocery_Listdt_rows_last_clicked = 3,
allowInputNoBinding_ = TRUE, wait_ = FALSE, values_ = FALSE)
# the above two lines aren't actually expected to change the state
app$setInputs(Grocery_List_remove = "click")
app$setInputs(Grocery_List_delete = "click")
Sys.sleep(2)
app$snapshot(items = list(export = TRUE))
app$setInputs(Grocery_Listdt_rows_selected = 4, allowInputNoBinding_ = TRUE)
app$setInputs(Grocery_Listdt_rows_last_clicked = 4, allowInputNoBinding_ = TRUE)
app$setInputs(Grocery_List_edit = "click")
app$setInputs(Grocery_List_edit_Quantity = -2)
# should dis-allow negative quantity
app$setInputs(Grocery_List_update = "click")
app$executeScript("$('.modal').modal('hide');") # close modal
Sys.sleep(2)
app$executeScript('document.getElementById("addOne_1").click()')
app$executeScript('document.getElementById("subtractOne_2").click()')
# directly 'clicks' on the action buttons with Javascript
Sys.sleep(2)
app$snapshot(items = list(export = TRUE))
# wait for the process to close gracefully
# this allows covr to write out the coverage results
# https://github.com/rfaelens/exampleShinyTest/
# Ruben Faelens
p <- app$.__enclos_env__$private$shinyProcess
p$interrupt()
p$wait()
|
f0bbb4df535051fbc79957721bf9394c27119856
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/CGGP/R/CGGP_append_fs.R
|
18b421a50a4e803413890b31c93886f15514f12e
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,693
|
r
|
CGGP_append_fs.R
|
#' Calculate MSE over single dimension
#'
#' Calculated using grid of integration points.
#' Can be calculated exactly, but not much reason in 1D.
#'
#' @param xl Vector of points in 1D
#' @param theta Correlation parameters
#' @param CorrMat Function that gives correlation matrix for vectors of 1D points.
#'
#' @return MSE value
#' @export
#'
#' @examples
#' CGGP_internal_calcMSE(xl=c(0,.5,.9), theta=c(1,2,3),
#' CorrMat=CGGP_internal_CorrMatCauchySQT)
CGGP_internal_calcMSE <- function(xl, theta, CorrMat) {
S = CorrMat(xl, xl, theta)
xp = seq(-10^(-4),1+10^(-4),l=401)
Cp = CorrMat(xp,xl,theta)
n = length(xl)
cholS = chol(S)
CiCp = backsolve(cholS,backsolve(cholS,t(Cp), transpose = TRUE))
MSE_MAPal = mean(1 - rowSums(t(CiCp)*Cp))
MSE_MAPal
}
#' Calculate MSE over blocks
#'
#' Delta of adding block is product over i=1..d of IMSE(i,j-1) - IMSE(i,j)
#'
#' @param valsinds Block levels to calculate MSEs for
#' @param MSE_MAP Matrix of MSE values
#'
#' @return All MSE values
#' @export
#'
#' @examples
#' SG <- CGGPcreate(d=3, batchsize=100)
#' y <- apply(SG$design, 1, function(x){x[1]+x[2]^2})
#' SG <- CGGPfit(SG, Y=y)
#' MSE_MAP <- outer(1:SG$d, 1:8,
#' Vectorize(function(dimlcv, lcv1) {
#' CGGP_internal_calcMSE(SG$xb[1:SG$sizest[dimlcv]],
#' theta=SG$thetaMAP[(dimlcv-1)*SG$numpara+1:SG$numpara],
#' CorrMat=SG$CorrMat)
#' }))
#' CGGP_internal_calcMSEde(SG$po[1:SG$poCOUNT, ], MSE_MAP)
CGGP_internal_calcMSEde <- function(valsinds, MSE_MAP) {
maxparam <- -Inf # Was set to -10 and ruined it.
if(is.matrix(valsinds)){
MSE_de = rep(0, dim(valsinds)[1])
for (levellcv2 in 1:dim(valsinds)[1]) {
MSE_de[levellcv2] = 0
for (levellcv in 1:dim(valsinds)[2]) {
if (valsinds[levellcv2, levellcv] > 1.5) {
MSE_de[levellcv2] = MSE_de[levellcv2] + max(log(-MSE_MAP[levellcv, valsinds[levellcv2, levellcv]] +
MSE_MAP[levellcv, valsinds[levellcv2, levellcv] - 1]),maxparam)
} else {
# This is when no ancestor block, 1 comes from when there is no data.
# 1 is correlation times integrated value over range.
# This depends on correlation function.
MSE_de[levellcv2] = MSE_de[levellcv2] + max(log(-MSE_MAP[levellcv, valsinds[levellcv2, levellcv]] + 1),maxparam)
}
}
}
} else {
MSE_de = 0
for (levellcv in 1:length(valsinds)) {
if (valsinds[levellcv] > 1.5) {
MSE_de = MSE_de + max(log(-MSE_MAP[levellcv, valsinds[levellcv]] + MSE_MAP[levellcv, valsinds[levellcv] -1]),maxparam)
} else {
MSE_de = MSE_de + max(log(-MSE_MAP[levellcv, valsinds[levellcv]] + 1),maxparam)
}
}
}
MSE_de = exp(MSE_de)
return(MSE_de)
}
#' Add points to CGGP
#'
#' Add `batchsize` points to `SG` using `theta`.
#'
#' @param CGGP Sparse grid object
#' @param batchsize Number of points to add
#' @param selectionmethod How points will be selected: one of `UCB`, `TS`,
#' `MAP`, `Oldest`, `Random`, or `Lowest`.
#' `UCB` uses Upper Confidence Bound estimates for the parameters.
#' `TS` uses Thompson sampling, a random sample from the posterior.
#' `MAP` uses maximum a posteriori parameter estimates.
#' `Oldest` adds the block that has been available the longest.
#' `Random` adds a random block.
#' `Lowest` adds the block with the lowest sum of index levels.
#' `UCB` and `TS` are based on bandit algorithms and account for uncertainty
#' in the parameter estimates, but are the slowest.
#' `MAP` is fast but doesn't account for parameter uncertainty.
#' The other three are naive methods that are not adaptive and won't
#' perform well.
#' @importFrom stats quantile sd var
#'
#' @return SG with new points added.
#' @export
#' @family CGGP core functions
#'
#' @examples
#' SG <- CGGPcreate(d=3, batchsize=100)
#' y <- apply(SG$design, 1, function(x){x[1]+x[2]^2})
#' SG <- CGGPfit(SG, Y=y)
#' SG <- CGGPappend(CGGP=SG, batchsize=20, selectionmethod="MAP")
CGGPappend <- function(CGGP,batchsize, selectionmethod = "MAP"){
# ===== Check inputs =====
if (!(selectionmethod %in% c("UCB", "TS", "MAP", "Oldest", "Random", "Lowest"))) {
stop("selectionmethod in CGGPappend must be one of UCB, TS, MAP, Oldest, Random, or Lowest")
}
if (!is.null(CGGP$design_unevaluated)) {
stop("Can't append if CGGP has unevaluated design points.")
}
# Track how many design points there currently are in $design
n_before <- if (is.null(CGGP[["design"]]) || length(CGGP$design)==0) {
0
} else {
nrow(CGGP$design)
}
max_polevels = apply(CGGP$po[1:CGGP$poCOUNT, ,drop=FALSE], 2, max)
separateoutputparameterdimensions <- is.matrix(CGGP$thetaMAP)
# nopd is numberofoutputparameterdimensions
nopd <- if (separateoutputparameterdimensions) {
if (length(CGGP$y)>0) {ncol(CGGP$y)} else {ncol(CGGP$ys)}
} else {
1
}
# ==============================.
# ==== Calculate IMSE ====
# ==============================.
# Calculate integrated mean squared error (IMSE) values for the given method
if(selectionmethod=="MAP"){
# Set up blank array to store MSE values
MSE_MAP = array(0, dim=c(CGGP$d, CGGP$maxlevel,nopd))
# Loop over dimensions and design refinements
for (opdlcv in 1:nopd) {
thetaMAP.thisloop <- if (nopd==1) CGGP$thetaMAP else CGGP$thetaMAP[, opdlcv]
for (dimlcv in 1:CGGP$d) {
for (levellcv in 1:max_polevels[dimlcv]) {
# Calculate some sort of MSE from above, not sure what it's doing
MSE_MAP[dimlcv, levellcv, opdlcv] =
max(0,
abs(
CGGP_internal_calcMSE(
CGGP$xb[1:CGGP$sizest[levellcv]],
thetaMAP.thisloop[(dimlcv-1)*CGGP$numpara+1:CGGP$numpara],
CGGP$CorrMat
)
)
)
if (levellcv > 1.5) { # If past 1st level, it is as good as one below
MSE_MAP[dimlcv, levellcv, opdlcv] =
min(MSE_MAP[dimlcv, levellcv, opdlcv], MSE_MAP[dimlcv, levellcv - 1, opdlcv])
}
}
}
}
# Integrated MSE
IMES_MAP = rep(0, CGGP$ML)
# For all possible blocks, calculate MSE_MAP, need to apply it over nopd
IMES_MAP_beforemean = apply(MSE_MAP, 3,
function(x) {
CGGP_internal_calcMSEde(
CGGP$po[1:CGGP$poCOUNT, , drop=F],
x)
})
if (CGGP$poCOUNT==1) {
IMES_MAP_beforemean <- matrix(IMES_MAP_beforemean, nrow=1)
}
if (!is.matrix(IMES_MAP_beforemean)) {stop("Need a matrix here 0923859")}
# Need as.matrix in case of single value
# i.e. when only supp data and only po is initial point
# If multiple output but single opd, need to take mean
sigma2MAP.thisloop <- if (nopd==1) {
mean(CGGP$sigma2MAP)
} else {
CGGP$sigma2MAP
}
IMES_MAP[1:CGGP$poCOUNT] = rowMeans(
sweep(IMES_MAP_beforemean, 2,
sigma2MAP.thisloop, "*")
)
# Clean up to avoid silly errors
rm(opdlcv, thetaMAP.thisloop, sigma2MAP.thisloop)
} else if (selectionmethod %in% c("UCB", "TS")) { # selectionmethod is UCB or TS
MSE_PostSamples = array(0, c(CGGP$d, CGGP$maxlevel,CGGP$numPostSamples, nopd))
# Dimensions can be considered independently
# Loop over dimensions and design refinements
for (opdlcv in 1:nopd) { # Loop over output parameter dimensions
thetaPostSamples.thisloop <- if (nopd==1) {
CGGP$thetaPostSamples
} else {
CGGP$thetaPostSamples[ , , opdlcv]
}
for (dimlcv in 1:CGGP$d) { # Loop over each input dimension
for (levellcv in 1:max_polevels[dimlcv]) {
for(samplelcv in 1:CGGP$numPostSamples){
# Calculate some sort of MSE from above, not sure what it's doing
MSE_PostSamples[dimlcv, levellcv,samplelcv, opdlcv] =
max(0,
abs(
CGGP_internal_calcMSE(
CGGP$xb[1:CGGP$sizest[levellcv]],
thetaPostSamples.thisloop[(dimlcv-1)*CGGP$numpara +
1:CGGP$numpara,
samplelcv],
CGGP$CorrMat)
)
)
if (levellcv > 1.5) { # If past first level, it is as good as one below it
MSE_PostSamples[dimlcv, levellcv,samplelcv, opdlcv] =
min(MSE_PostSamples[dimlcv, levellcv,samplelcv, opdlcv],
MSE_PostSamples[dimlcv, levellcv - 1,samplelcv, opdlcv])
}
}
}
}
}
rm(opdlcv, dimlcv, levellcv, samplelcv) # Avoid dumb mistakes
IMES_PostSamples = matrix(0, CGGP$ML,CGGP$numPostSamples)
# Calculate sigma2 for all samples if needed
if (is.null(CGGP$sigma2_samples)) {
CGGP$sigma2_samples <- CGGP_internal_calc_sigma2_samples(CGGP)
}
sigma2.allsamples.alloutputs <- CGGP$sigma2_samples
for(samplelcv in 1:CGGP$numPostSamples){
if (nopd == 1) { # Will be a matrix
# Multiply by sigma2. If multiple output dimensions with
# shared parameters, take mean.
# Needed because each thetasample will have a different sigma2.
sigma2.thistime <- mean(sigma2.allsamples.alloutputs[samplelcv,])
IMES_PostSamples[1:CGGP$poCOUNT,samplelcv] = sigma2.thistime *
CGGP_internal_calcMSEde(CGGP$po[1:CGGP$poCOUNT,],
MSE_PostSamples[,,samplelcv,])
rm(sigma2.thistime) # Avoid mistakes
} else { # Is a 3d array, need to use an apply and then apply again with mean
IMES_PostSamples_beforemean <-
apply(MSE_PostSamples[,,samplelcv,], 3,
function(x){
CGGP_internal_calcMSEde(CGGP$po[1:CGGP$poCOUNT,,drop=F], x)
})
if (!is.matrix(IMES_PostSamples_beforemean)) {
# Happens when CGGP$poCOUNT is 1, when only initial block avail
if (CGGP$poCOUNT!=1) {stop("Something is wrong here #279287522")}
IMES_PostSamples_beforemean <- matrix(IMES_PostSamples_beforemean, nrow=1)
}
# Need sigma2 for this theta sample, already calculated in sigma2.allsamples.alloutputs
IMES_PostSamples[1:CGGP$poCOUNT,samplelcv] <-
apply(IMES_PostSamples_beforemean, 1,
function(x) {
# Weight by sigma2 samples
mean(sigma2.allsamples.alloutputs[samplelcv,] *
x)
})
}
}; rm(samplelcv)
# Get UCB IMES using 90% upper conf bound
IMES_UCB = numeric(CGGP$ML)
IMES_UCB[1:CGGP$poCOUNT] = apply(IMES_PostSamples[1:CGGP$poCOUNT,, drop=F],1,quantile, probs=0.9)
} else {
# Can be Oldest or Random or Lowest
}
# =============================.
# ==== Append points ====
# =============================.
# Append points to design until limit until reaching max_design_points
max_design_points = CGGP$ss + batchsize
while (max_design_points > CGGP$ss + min(CGGP$pogsize[1:CGGP$poCOUNT]) - .5) {
if(selectionmethod=="MAP"){
IMES = IMES_MAP
} else if(selectionmethod=="UCB"){
IMES = IMES_UCB
} else if(selectionmethod=="TS"){
IMES = IMES_PostSamples[,sample(1:CGGP$numPostSamples,1)]
} else if(selectionmethod=="Oldest"){
IMES = seq.int(from=CGGP$poCOUNT, to=1, by=-1)
# Multiply by size so it gets undone below
IMES <- IMES * CGGP$pogsize[1:CGGP$poCOUNT]
} else if(selectionmethod=="Random"){
IMES = rep(1,CGGP$poCOUNT)
# Multiply by size so it gets undone below
IMES <- IMES * CGGP$pogsize[1:CGGP$poCOUNT]
} else if(selectionmethod=="Lowest"){
IMES = rowSums(CGGP$po[1:CGGP$poCOUNT,])
# Make the lowest the highest value
IMES <- max(IMES) + 1 - IMES
# Multiply by size so it gets undone below
IMES <- IMES * CGGP$pogsize[1:CGGP$poCOUNT]
} else {
stop("Selection method not acceptable")
}
CGGP$uoCOUNT = CGGP$uoCOUNT + 1 #increment used count
# Find which blocks are still valid for selecting
stillpossible <- which(CGGP$pogsize[1:CGGP$poCOUNT] <
(max_design_points - CGGP$ss + 0.5))
# Pick block with max IMES per point in the block
metric <- IMES[1:CGGP$poCOUNT] / CGGP$pogsize[1:CGGP$poCOUNT]
# Find the best one that still fits
M_comp = max(metric[stillpossible])
# Find which ones are close to M_comp and pick randomly among them
possibleO = stillpossible[metric[stillpossible] >= 0.99*M_comp]
# If more than one is possible and near the best, randomly pick among them.
if(length(possibleO)>1.5){
pstar = sample(possibleO,1)
} else{
pstar = possibleO
}
l0 = CGGP$po[pstar, ] # Selected block
# Need to make sure there is still an open row in uo to set with new values
if (CGGP$uoCOUNT > nrow(CGGP$uo)) {
CGGP <- CGGP_internal_addrows(CGGP)
}
CGGP$uo[CGGP$uoCOUNT,] = l0 # Save selected block
CGGP$ss = CGGP$ss + CGGP$pogsize[pstar] # Update selected size
# ================================.
# ==== Update ancestors ====
# ================================.
# Protect against initial block which has no ancestors
if (CGGP$pilaCOUNT[pstar] > 0) { # Protect for initial block
new_an = CGGP$pila[pstar, 1:CGGP$pilaCOUNT[pstar]]
total_an = new_an
for (anlcv in 1:length(total_an)) { # Loop over ancestors
if (total_an[anlcv] > 1.5) { # If there's more than 1, do this
total_an = unique(
c(total_an,
CGGP$uala[total_an[anlcv], 1:CGGP$ualaCOUNT[total_an[anlcv]]])
)
}
}
CGGP$ualaCOUNT[CGGP$uoCOUNT] = length(total_an)
CGGP$uala[CGGP$uoCOUNT, 1:length(total_an)] = total_an
# Loop over all ancestors, update weight
for (anlcv in 1:length(total_an)) {
lo = CGGP$uo[total_an[anlcv],]
if (max(abs(lo - l0)) < 1.5) {
CGGP$w[total_an[anlcv]] = CGGP$w[total_an[anlcv]] + (-1)^abs(round(sum(l0-lo)))
}
}
}
CGGP$w[CGGP$uoCOUNT] = CGGP$w[CGGP$uoCOUNT] + 1
# Update data. Remove selected item, move rest up.
# First get correct indices to change. Protect when selecting initial point
new_indices <- if (CGGP$poCOUNT>1) {1:(CGGP$poCOUNT - 1)} else {numeric(0)}
old_indices <- setdiff(seq.int(1, CGGP$poCOUNT, 1), pstar)
# Then change the data
CGGP$po[new_indices,] = CGGP$po[old_indices,]
CGGP$pila[new_indices,] = CGGP$pila[old_indices,]
CGGP$pilaCOUNT[new_indices] = CGGP$pilaCOUNT[old_indices]
CGGP$pogsize[new_indices] = CGGP$pogsize[old_indices]
if(selectionmethod=="MAP"){
IMES_MAP[new_indices] = IMES_MAP[old_indices]
}
if(selectionmethod=="UCB"){
IMES_UCB[new_indices] = IMES_UCB[old_indices]
}
if(selectionmethod=="TS"){
IMES_PostSamples[new_indices,] = IMES_PostSamples[old_indices,]
}
# And reduce number of available blocks by one.
CGGP$poCOUNT = CGGP$poCOUNT - 1
# ==========================================.
# ==== Update new possible blocks ====
# ==========================================.
# Loop over possible descendents of selected block, add them if possible
for (dimlcv in 1:CGGP$d) {
lp = l0
lp[dimlcv] = lp[dimlcv] + 1
if (max(lp) <= CGGP$maxlevel && CGGP$poCOUNT < 4 * CGGP$ML) {
kvals = which(lp > 1.5) # Dimensions above base level
canuse = 1
ap = rep(0, CGGP$d)
nap = 0
for (activedimlcv in 1:length(kvals)) {
lpp = lp
lpp[kvals[activedimlcv]] = lpp[kvals[activedimlcv]] - 1
ismem = rep(1, CGGP$uoCOUNT)
for (dimdimlcv in 1:CGGP$d) {
ismem = ismem *
(CGGP$uo[1:CGGP$uoCOUNT, dimdimlcv] == lpp[dimdimlcv])
}
if (max(ismem) > 0.5) {
ap[activedimlcv] = which(ismem > 0.5)
nap = nap + 1
} else{
canuse = 0
}
}
if (canuse > 0.5) { # If it can be used, add to possible blocks
CGGP$poCOUNT = CGGP$poCOUNT + 1
CGGP$po[CGGP$poCOUNT,] = lp
CGGP$pogsize[CGGP$poCOUNT] = prod(CGGP$sizes[lp])
CGGP$pila[CGGP$poCOUNT, 1:nap] = ap[1:nap]
CGGP$pilaCOUNT[CGGP$poCOUNT] = nap
max_polevels_old = max_polevels
max_polevels = apply(CGGP$po[1:CGGP$poCOUNT, ,drop=F], 2, max)
if(selectionmethod=="MAP"){
for (opdlcv in 1:nopd) { # Loop over output parameter dimensions
thetaMAP.thisloop <- if (nopd==1) CGGP$thetaMAP else CGGP$thetaMAP[, opdlcv]
for (dimlcv in 1:CGGP$d) {
if((max_polevels_old[dimlcv]+0.5)<max_polevels[dimlcv]){
levellcv = max_polevels[dimlcv]
MSE_MAP[dimlcv, levellcv,
opdlcv] = max(0, abs(CGGP_internal_calcMSE(CGGP$xb[1:CGGP$sizest[levellcv]],
thetaMAP.thisloop[(dimlcv-1)*CGGP$numpara+1:CGGP$numpara],
CGGP$CorrMat)))
if (levellcv > 1.5) { # If past first level, it is as good as one below it. Why isn't this a result of calculation?
MSE_MAP[dimlcv, levellcv, opdlcv] = min(MSE_MAP[dimlcv, levellcv, opdlcv], MSE_MAP[dimlcv, levellcv - 1, opdlcv])
}
}
}
}
# Clean up
rm(thetaMAP.thisloop, opdlcv)
} else if (selectionmethod %in% c("UCB", "TS")){ # selection method is UCB or TS
for (opdlcv in 1:nopd) {
thetaPostSamples.thisloop <- if (nopd==1) CGGP$thetaPostSamples else CGGP$thetaPostSamples[, , opdlcv]
for (dimlcv_2 in 1:CGGP$d) { # dimlcv is already used for which descendent to add
if((max_polevels_old[dimlcv_2]+0.5)<max_polevels[dimlcv_2]){
levellcv = max_polevels[dimlcv_2]
for(samplelcv in 1:CGGP$numPostSamples){
# Calculate some sort of MSE from above, not sure what it's doing
MSE_PostSamples[dimlcv_2, levellcv,
samplelcv, opdlcv] = max(0,
abs(CGGP_internal_calcMSE(
CGGP$xb[1:CGGP$sizest[levellcv]],
thetaPostSamples.thisloop[(dimlcv_2-1)*CGGP$numpara+1:CGGP$numpara,
samplelcv],
CGGP$CorrMat)))
if (levellcv > 1.5) { # If past first level, it is as good as one below it. Why isn't this a result of calculation?
MSE_PostSamples[dimlcv_2, levellcv,
samplelcv, opdlcv] = min(MSE_PostSamples[dimlcv_2, levellcv,samplelcv, opdlcv],
MSE_PostSamples[dimlcv_2, levellcv - 1,samplelcv, opdlcv])
}
}; rm(samplelcv)
}
}; rm(dimlcv_2)
}
# Clean up
rm(thetaPostSamples.thisloop, opdlcv)
} else {
# Can be Oldest or Random or Lowest
}
if(selectionmethod=="MAP"){
# IMES_MAP[CGGP$poCOUNT] = CGGP_internal_calcMSEde(as.vector(CGGP$po[CGGP$poCOUNT, ]), MSE_MAP)
# Need to apply first
IMES_MAP_beforemeannewpoint <- apply(MSE_MAP, 3,
function(x) {CGGP_internal_calcMSEde(as.vector(CGGP$po[CGGP$poCOUNT, ]), x)})
# Take weighted mean over dimensions
IMES_MAP[CGGP$poCOUNT] <- mean(CGGP$sigma2MAP * IMES_MAP_beforemeannewpoint)
} else if (selectionmethod=="UCB" || selectionmethod=="TS"){
for(samplelcv in 1:CGGP$numPostSamples){
if (nopd == 1) { # is a matrix
# Each sample has different sigma2, so use. If multiple output
# parameter dimensions, take mean over sigma2.
sigma2.thistime <- mean(sigma2.allsamples.alloutputs[samplelcv,])
IMES_PostSamples[CGGP$poCOUNT,samplelcv] = sigma2.thistime *
CGGP_internal_calcMSEde(as.vector(CGGP$po[CGGP$poCOUNT, ]),
MSE_PostSamples[,,samplelcv,])
rm(sigma2.thistime)
} else { # is an array, need to apply
IMES_PostSamples_beforemeannewpoint = apply(MSE_PostSamples[,,samplelcv,],
3, # 3rd dim since samplelcv removes 3rd
function(x) {
CGGP_internal_calcMSEde(as.vector(CGGP$po[CGGP$poCOUNT, ]), x)
}
)
IMES_PostSamples[CGGP$poCOUNT,samplelcv] <- mean(sigma2.allsamples.alloutputs[samplelcv,] *
IMES_PostSamples_beforemeannewpoint)
}
}; rm(samplelcv)
IMES_UCB[CGGP$poCOUNT] = quantile(IMES_PostSamples[CGGP$poCOUNT,],probs=0.9)
} else if (selectionmethod %in% c("Oldest", "Random", "Lowest")) {
# nothing needed
} else {stop("Not possible #9235058")}
}
}
}
}
# Get design and other attributes updated
CGGP <- CGGP_internal_getdesignfromCGGP(CGGP)
# Check if none were added, return warning/error
if (n_before == nrow(CGGP$design)) {
warning("No points could be added. You may need a larger batch size.")
} else {
# Save design_unevaluated to make it easy to know which ones to add
CGGP$design_unevaluated <- CGGP$design[(n_before+1):nrow(CGGP$design),]
}
return(CGGP)
}
|
f1d865996764e32b5464d4dc3305b5fb4cd7665e
|
e86dfa2d53c17901489dbb94e5582e4ff897d68b
|
/man/plot_DetectionOverTime.Rd
|
e3ce155fb039d24e21719db5d6d3820dea130238
|
[
"MIT"
] |
permissive
|
galinajonsson/sparta
|
c432e70b219585cf3af4a20ac9a1175943b8e74a
|
e57b18fa9d9fb5e9118c5ab4cb0d84abdec78a76
|
refs/heads/master
| 2021-08-17T19:05:38.476016
| 2021-06-17T16:50:46
| 2021-06-17T16:50:46
| 147,808,000
| 0
| 0
| null | 2018-09-07T10:20:04
| 2018-09-07T10:20:04
| null |
UTF-8
|
R
| false
| true
| 1,295
|
rd
|
plot_DetectionOverTime.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_DetectionOverTime.R
\name{plot_DetectionOverTime}
\alias{plot_DetectionOverTime}
\title{Diagnostics for the detection model with respect to Length}
\usage{
plot_DetectionOverTime(
model,
spname = NULL,
min.yr = NULL,
legend_labels = NULL,
legend_title = NULL
)
}
\arguments{
\item{model}{a fitted sparta model of class \code{OccDet}.}
\item{spname}{optional name of the species (used for plotting)}
\item{min.yr}{optional first year of time series (used for plotting)}
\item{legend_labels}{optional names for legend labels. Should be a character vector with three elements if the model is fitted with categorical or continuous list length specifications, and four elements if the model is fitted with a mixed list length specification}
\item{legend_title}{optional name for legend title. Should be a character vector.}
}
\value{
This function returns plot showing the detection probability on the y axis and year on the x.
}
\description{
Creates a plot of detectability by year for differing list lengths from an occupancy model output.
}
\details{
Takes a object of \code{OccDet}
Calculates the detection probability and produces a plot of detectability over time for the reference data type.
}
|
8e0aa766c5f5cc3612af91048aa214dad4a80cb8
|
2684cc9d4c398690a10fa69ebacdc2e316a39bf9
|
/man/plot.trifield.Rd
|
7e1ab57e510038546497ffab48aae52b99348e0e
|
[] |
no_license
|
cran/trifield
|
caba782831cd8e31e0675756930c9439d5e64fd8
|
4ac597421ec5d48ec9c0ebecfa9ab0953686e819
|
refs/heads/master
| 2021-01-22T09:18:00.617550
| 2011-07-24T00:00:00
| 2011-07-24T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,575
|
rd
|
plot.trifield.Rd
|
\name{plot.trifield}
\alias{plot.trifield}
\title{
Plot a ternary field
}
\description{
A simple wrapper around image and contour for plotting ternary fields
}
\usage{
\method{plot}{trifield}(x, contours = TRUE, col = topo.colors(256), lab1 = "A = 0", lab2 = "B = 0", lab3 = "C = 0", tribox = TRUE, axis.lines = TRUE, ...)
}
\arguments{
\item{x}{
An object of class \code{trifield}
}
\item{contours}{
Make contours?
}
\item{col}{
Color palette to use
}
\item{lab1}{
First axis label
}
\item{lab2}{
Second axis label
}
\item{lab3}{
Third axis label
}
\item{tribox}{
Draw a triangle around the plot?
}
\item{axis.lines}{
Draw internal (altitute) axis lines?
}
\item{\dots}{
Additional graphics parameters to be passed to plot
}
}
\details{
This is a small demo function showing how one can make a ternary contour plot from a \code{trifield} object. The input object can be any list with x, y and z fields, but the result will be odd if non-NA z-values fall outside an equilateral triangle with unit altitude.
}
\value{
None
}
\author{
Tim Keitt <tkeitt@gmail.com>
}
\references{\url{http://dx.doi.org/10.1016/j.ecolmodel.2012.05.020}}
\seealso{
\code{\link{plot.default}}, \code{\link{image.default}}, \code{\link{contour.default}}
}
\examples{
# See demo(trifield)
\dontrun{
grid.size = 128
par(mar = rep(2, 4), oma = rep(0, 4))
tg = ternary.grid(grid.size)
f = function(x)
sin(2 * pi * x[1]) +
sin(3 * pi * x[2]) +
sin(4 * pi * x[3])
z = ternary.apply(tg, f)
tf = ternary.field(tg, z)
plot(tf)
ternary.legend()
}
}
\keyword{ hplot }
|
5f22f28ea519bed06dd6c166a8610851f3928023
|
16206bf1a9ef3e0591b9aa5b25ec04cbbde2ce29
|
/tcga/rna_seq_genes.r
|
c0ed9dd6b0a9b5b7986be18d9363afb10019b4ec
|
[] |
no_license
|
ipstone/data
|
73227781f3f65dfeee7aa2c552c6488fef742859
|
0f7606d5790a340d93a6d04eb2b929340603e782
|
refs/heads/master
| 2022-06-10T04:02:05.207453
| 2022-01-22T18:54:36
| 2022-01-22T18:54:38
| 270,737,979
| 0
| 0
| null | 2020-06-08T16:18:17
| 2020-06-08T16:18:16
| null |
UTF-8
|
R
| false
| false
| 487
|
r
|
rna_seq_genes.r
|
#' Get all valid gene symbols
#'
#' @return A character vector of HGNC symbols
rna_seq_genes = function() {
library(methods) # required; otherwise h5 error
file = h5::h5file(module_file("cache", "rna_seq2_vst.gctx"), mode="r")
genes = file["/0/META/ROW/id"][]
h5::h5close(file)
genes
}
if (is.null(module_name())) {
library(testthat)
hgnc = rna_seq_genes()
expect_true(is.character(hgnc))
expect_true(all(c("A1BG", "A1CF", "ZZEF1") %in% hgnc))
}
|
097b64ce83c8f1ed7d2ce051ed588c7328dde4b9
|
917079cb74de44e7633dad584f1df464c5d21ff9
|
/R/df_new_proc_keratong.R
|
0730751f0e56e3cf84d123280f920d38b1764175
|
[] |
no_license
|
yusriy/MPOB
|
fb995f4fa2334b6cf6c26b1f9cc06d12ef055783
|
deb2b7ffa5f5e500cc7a0bd389d73bc982bc20a9
|
refs/heads/master
| 2020-05-17T03:24:06.119743
| 2015-09-02T15:30:27
| 2015-09-02T15:30:27
| 41,714,993
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,691
|
r
|
df_new_proc_keratong.R
|
#Script to create and filter df_new
#####Loading data###########################################################
df_EC <- read.csv("Data/EC_biomet.csv")
#remove column 'X'
df_EC <- df_EC[,-1]
#create df_new
df_new <- df_EC
#Convert time_stamp from "factor" to "POSIXlt"
df_new$time_stamp <- as.POSIXlt(df_new$time_stamp)
#####Removing observations with#############################################
# 1) wind direction between 224 to 244 degrees of CSAT3 sonic anemometer
# 2) -10 < Z.L < 10
# 3) qc = 2
#1)Wind direction between 224 to 244 degrees of (behind) CSAT3 sonic anemometer
df_new$co2_flux[which(df_EC$wind_dir > 224 & df_EC$wind_dir < 244)] <- NA
df_new$qc_co2_flux[which(df_EC$wind_dir > 224 & df_EC$wind_dir < 244)] <- NA
df_new$h2o_flux[which(df_EC$wind_dir > 224 & df_EC$wind_dir < 244)] <- NA
df_new$qc_h2o_flux[which(df_EC$wind_dir > 224 & df_EC$wind_dir < 244)] <- NA
df_new$LE[which(df_EC$wind_dir > 224 & df_EC$wind_dir < 244)] <- NA
df_new$qc_LE[which(df_EC$wind_dir > 224 & df_EC$wind_dir < 244)] <- NA
df_new$H[which(df_EC$wind_dir > 224 & df_EC$wind_dir < 244)] <- NA
df_new$qc_H[which(df_EC$wind_dir > 224 & df_EC$wind_dir < 244)] <- NA
df_new$Z.L[which(df_EC$wind_dir > 224 & df_EC$wind_dir < 244)] <- NA
df_new$wind_dir[which(df_EC$wind_dir > 224 & df_EC$wind_dir < 244)] <- NA
#2)Assign values where -10 > Z.L or Z.L > 10 with NA
df_new$co2_flux[which(df_EC$Z.L > 10 | df_EC$Z.L < -10)] <- NA
df_new$qc_co2_flux[which(df_EC$Z.L > 10 | df_EC$Z.L < -10)] <- NA
df_new$h2o_flux[which(df_EC$Z.L > 10 | df_EC$Z.L < -10)] <- NA
df_new$qc_h2o_flux[which(df_EC$Z.L > 10 | df_EC$Z.L < -10)] <- NA
df_new$LE[which(df_EC$Z.L > 10 | df_EC$Z.L < -10)] <- NA
df_new$qc_LE[which(df_EC$Z.L > 10 | df_EC$Z.L < -10)] <- NA
df_new$H[which(df_EC$Z.L > 10 | df_EC$Z.L < -10)] <- NA
df_new$qc_H[which(df_EC$Z.L > 10 | df_EC$Z.L < -10)] <- NA
df_new$Z.L[which(df_EC$Z.L > 10 | df_EC$Z.L < -10)] <- NA
#3)assign qc = 2 with NA (relevant parameters only)
df_new$co2_flux[which(df_EC$qc_co2_flux == 2)] <- NA
df_new$h2o_flux[which(df_EC$qc_h2o_flux == 2)] <- NA
df_new$LE[which(df_EC$qc_LE == 2)] <- NA
df_new$H[which(df_EC$qc_H == 2)] <- NA
#####Create NA data frame to fill up missing time_stamp from################
#2015-01-15 16:30:00 till 2015-02-01 00:00:00
#Create dummy time_stamp column
janfeb <- matrix(seq(as.POSIXct('2015-01-15 16:30:00 MYT'),
as.POSIXct('2015-02-01 00:00:00 MYT'),1800)
,nrow=784, ncol=1)
janfebr <- as.POSIXlt(janfeb,origin = "1970-01-01")
janfebr <- as.data.frame(janfebr)
#Create dummy variables columns filled with NA
dummy_col <- matrix(rep(NA,118384),nrow=784,ncol=151)
dummy_col <- as.data.frame(dummy_col)
#Combine date column with dummy variables
df_dummy <- cbind(janfebr,dummy_col)
#giving df_dummy header names
header_new <- names(df_new)
names(df_dummy) <- header_new
#remove temporary variables
rm(janfeb,janfebr,dummy_col)
#Insert df_dummy into df_EC2 at point of missing timestamp
#Split df_EC2 at point of missing timestamp
df_new_1 <- df_new[1:23092,1:152]
df_new_2 <- df_new[23093:24435,1:152]
row.names(df_new_2)<-NULL
#Combine the data frames at point of missing timestamp
df_new <- rbind(df_new_1,df_dummy,df_new_2)
#remove temporary dataframes
rm(df_new_1,df_new_2,df_dummy)
#####Other missing time stamps###########################################################
#Missing time stamp (NA instead of timestamp) from
#2014-01-01 00:30:00 to 2014-02-01 00:00:00
df_new$time_stamp[5388:6875]<-seq(as.POSIXct('2014-01-01 00:30:00 MYT'),
as.POSIXct('2014-02-01 00:00:00 MYT'),1800)
#Missing time stamp for 2013-12-01 00:30:00pl
timestamp1 <- as.POSIXct('2013-12-01 00:30:00')
timestamp1 <- as.data.frame(timestamp1)
dummy_row <- matrix(rep(NA,151),nrow=1,ncol=151)
dummy_row <- as.data.frame(dummy_row)
dummy_12_01 <- cbind(timestamp1,dummy_row)
names(dummy_12_01) <- header_new
df_new_3 <- df_new[1:3900,1:152]
df_new_4 <- df_new[3901:25219,1:152]
row.names(df_new_4)<-NULL
df_new <- rbind(df_new_3,dummy_12_01,df_new_4)
rm(df_new_3,df_new_4,dummy_12_01,timestamp1)
#Missing time stamp for 2014-05-01 00:00:00
timestamp2 <- as.POSIXct('2014-05-01 00:00:00')
timestamp2 <- as.data.frame(timestamp2)
dummy_05_01 <- cbind(timestamp2,dummy_row)
names(dummy_05_01) <- header_new
df_new_5 <- df_new[1:11148,1:152]
df_new_6 <- df_new[11149:25220,1:152]
row.names(df_new_6)<-NULL
df_new <- rbind(df_new_5,dummy_05_01,df_new_6)
rm(df_new_5,df_new_6,dummy_05_01,timestamp2,dummy_row)
#Missing time stamp from 2014-11-21 04:00:00 till 2014-12-01 23:00:00
timestamp3 <- seq(as.POSIXct('2014-11-21 04:00:00 MYT'),
as.POSIXct('2014-12-01 23:00:00 MYT'),1800)
timestamp3 <- as.data.frame(timestamp3)
dummy_col2 <- matrix(rep(NA,78369),nrow=519,ncol=151)
dummy_col2 <- as.data.frame(dummy_col2)
dummy_11_21 <- cbind(timestamp3,dummy_col2)
names(dummy_11_21) <- header_new
df_new_7 <- df_new[1:20948,1:152]
df_new_8 <- df_new[20949:25221,1:152]
row.names(df_new_8)<-NULL
df_new <- rbind(df_new_7,dummy_11_21,df_new_8)
rm(df_new_7,df_new_8,dummy_11_21,dummy_col2,timestamp3,header_new)
#Remove extra time stamp at 2014-04-01 00:00:00 [9708]
df_new <- df_new[-c(9708),]
row.names(df_new) <- NULL
#timecheck<-as.data.frame(seq(as.POSIXct('2013-09-10 18:30:00 MYT'),
#as.POSIXct('2014-09-30 23:30:00 MYT'),1800))
#####Filter data#############################################################
#Spike removal by visual inspection
df_new$co2_flux[c(1368,1847,2069,2257,2843,5568,9058,9061,9067,9069,10375,10392,11213,12000,12057,
12422,12423,12462,12554,12702,12915,13872,14000,14086,14162,14253,15151,15247,
15831,16030,16124,16176,16511,16605,17787,17994,17996,18188,18189,18230,18233,
18332,18336,18650,18653,18912,18913,18914,19020,19038,19351,19390,19687,19724,
19972,19975,20304,20305,20367,20370,20439,20440,20498,20750,20752,20777,21584,
22075,22162,22164,23118,23177,23185,23231,23238,23275,23282,23375,23429,23560,
23564,24559,24570)] <- NA
df_new$h2o_flux[c(1368,1847,2257,3161,3452,4656,5814,7757,7864,9068,9597,10944,11273,11617,
12057,12422,12423,12426,13194,14253,16176,16605,17660,17996,18189,18331,
18336,18912,18915,19022,19023,19094,19106,19687,19765,19971,20213,20304,
20440,20498,22075,22162,22164,22312,23177,23272,23281,23282,24559,24570)] <- NA
df_new$LE[c(1368,1847,2257,3161,3452,4656,5814,7757,7864,9068,9597,10944,11273,11617,
12057,12422,12423,12426,13194,14253,16176,16605,17660,17996,18189,18331,
18336,18912,18915,19022,19023,19094,19106,19687,19765,19971,20213,20304,
20440,20498,22075,22162,22164,22312,23177,23272,23281,23282,24559,24570)] <- NA
df_new$H[c(1844,1847,9595,11228,11466,14162,14903,15533,15979,16685,18187,18914,23177)] <- NA
# Biomet data (slow response)
#Air temperature
# Ambient temperature at levels 1 to 5 (low to high)
#Change Kelvin to Celcius
df_new$Ta_1_1_1 <- df_new$Ta_1_1_1 - 273.15
df_new$Ta_2_1_1 <- df_new$Ta_2_1_1 - 273.15
df_new$Ta_3_1_1 <- df_new$Ta_3_1_1 - 273.15
df_new$Ta_4_1_1 <- df_new$Ta_4_1_1 - 273.15
df_new$Ta_5_1_1 <- df_new$Ta_5_1_1 - 273.15
#Soil temperature
#Change Kelvin to Celcius
df_new$Ts_1_1_1 <- df_new$Ts_1_1_1 - 273.15
df_new$Ts_2_1_1 <- df_new$Ts_2_1_1 - 273.15
df_new$Ts_3_1_1 <- df_new$Ts_3_1_1 - 273.15
# Rain gauge
# m (meter) to mm
df_new$Prain_1_1_1 <- df_new$Prain_1_1_1*1000
# Remove all probelmatic data and spikes after visual inspection
# Air Temperature
df_new$Ta_1_1_1[which(df_new$Ta_1_1_1 < 16)] <- NA
df_new$Ta_2_1_1[which(df_new$Ta_2_1_1 < 16)] <- NA
df_new$Ta_3_1_1[which(df_new$Ta_3_1_1 < 16)] <- NA
df_new$Ta_4_1_1[which(df_new$Ta_3_1_1 < 16)] <- NA
df_new$Ta_5_1_1[which(df_new$Ta_3_1_1 < 16)] <- NA
# RH
df_new$RH_1_1_1[which(df_new$RH_1_1_1 < 40 | df_new$RH_1_1_1 > 100)] <- NA
df_new$RH_2_1_1[which(df_new$RH_2_1_1 < 40 | df_new$RH_2_1_1 > 100)] <- NA
df_new$RH_3_1_1[which(df_new$RH_3_1_1 < 40 | df_new$RH_3_1_1 > 100)] <- NA
df_new$RH_4_1_1[which(df_new$RH_4_1_1 < 40 | df_new$RH_4_1_1 > 100)] <- NA
df_new$RH_5_1_1[which(df_new$RH_5_1_1 < 40 | df_new$RH_5_1_1 > 100)] <- NA
# Turn RH = 100% to NA because constant at 100
df_new$RH_1_1_1 [c(20427,20428,20466:20478,20520:20525,20620,20622,20854,20855,20856,
20857,20858,20859,20860,21482,21483,21484,21485,21486,21628,21629,21630,23163,
23164,23165,23166,23205,23206,23207,23208,23209,23210,23211,23212,23213,23214,
23215,23259,23260,23261,23262,23301,23302,23303,23304,23305,23306,23307,23308,
23309,23310,23311,23312,23342,23343,23344,23345,23346,23347,23348,23349,23350,
23351,23352,23353,23354,23355,23356,23357,23401,23402,23403,23404,23405)] <- NA
# Net radiation (Rn)
df_new$Rn_1_1_1[which(df_new$Rn_1_1_1 > 900 | df_new$Rn_1_1_1 < -100)] <- NA
# PAR (PPFD)
df_new$PPFD_1_1_1[which(df_new$PPFD_1_1_1 > 2000 | df_new$PPFD_1_1_1 < 0)] <- NA
df_new$PPFD_2_1_1[which(df_new$PPFD_2_1_1 > 2000 | df_new$PPFD_2_1_1 < 0)] <- NA
df_new$PPFD_3_1_1[which(df_new$PPFD_3_1_1 > 2000 | df_new$PPFD_3_1_1 < 0)] <- NA
# Rain
df_new$Prain_1_1_1[which(df_new$Prain_1_1_1 > 40 | df_new$Prain_1_1_1 <0)] <- NA
# Windspeed (RM Young)
df_new$WS_1_1_1[which(df_new$WS_1_1_1 > 15.4 | df_new$WS_1_1_1 <0)] <- NA
# Wind direction
df_new$WD_1_1_1[which(df_new$WD_1_1_1 <0 | df_new$WD_1_1_1 >360)] <- NA
#Soil water content
df_new$SWC_1_1_1[which(df_new$SWC_1_1_1 <0 | df_new$SWC_1_1_1 > 1)] <- NA
df_new$SWC_2_1_1[which(df_new$SWC_2_1_1 <0 | df_new$SWC_2_1_1 > 1)] <- NA
df_new$SWC_3_1_1[which(df_new$SWC_3_1_1 <0 | df_new$SWC_3_1_1 > 1)] <- NA
# Soil heat flux
df_new$SHF_1_1_1[which(df_new$SHF_1_1_1 < -200 | df_new$SHF_1_1_1 > 300)] <- NA
df_new$SHF_2_1_1[which(df_new$SHF_2_1_1 < -200 | df_new$SHF_2_1_1 > 300)] <- NA
df_new$SHF_3_1_1[which(df_new$SHF_3_1_1 < -200 | df_new$SHF_3_1_1 > 300)] <- NA
# Soil Temperature
df_new$Ts_1_1_1[which(df_new$Ts_1_1_1 <23 | df_new$Ts_1_1_1 >32 )] <- NA
df_new$Ts_2_1_1[which(df_new$Ts_2_1_1 <23 | df_new$Ts_2_1_1 >32 )] <- NA
df_new$Ts_3_1_1[which(df_new$Ts_3_1_1 <23 | df_new$Ts_3_1_1 >32 )] <- NA
######Storage calculation functions######################################################################
# Function to calculate absolute humidity (kg/m3) using the
# Magnus-Tetens equation
# T = temperature in deg C
# RH = relative humidity in %
abs_humidity <- function(temp,RH){
hum <- (6.112 * exp(17.67*temp/(temp+243.5)) * RH * 2.1674)/(273.15 + temp)
ans <- hum/1000 # To change to kg/m3 from g/m3
return(ans)
}
# Calculate rho * c_p of actual air (dry and moist air)
# temp = temperature in deg C
# rh = relative humidity in %
# P = atmospheric pressure in kPa
# Taken from Matlab script return by James from LI-COR.
rho_cp <- function(rh,temp,P){
# Since, we will using air_pressure from EddyPro output
# we have to change the units to kPa
P <- P / 1000 # [kPa]
R <- 8.31451 #[N m mol-1 K-1]
# Need to calculate c_p of moist air
c_p_moist <- 1859 + (0.13 * rh) + (temp*(0.193 + 0.00569*rh)) +
((temp^2)*(0.001 + 0.0005*rh)) # [J kg-1 C-1]
# Calculate c_p of dry air
c_p_dry <- 1005 + ((temp + 23.12)/3364) # [J kg-1 C-1]
# Calculate vapor pressure of saturated air
e_s1 <- 0.6112 * exp((17.62 * temp)/(243.12 + temp)) # kPa
# Apply pressure correction on e_s
e_s <- e_s1 * (1.00072 + (P * (3.2 + 0.00059 * temp^2)/100000)) #kPa
# Calculate vapor pressure of unsaturated air
e_a <- (rh * e_s)/100 # kPa
# Calculate density of dry air
rho_a <- 29.002 * (P * 1000)/(R * (temp + 273.15)) # [g m-3]
# Calculate density of moist air
rho_v <- 18.01 * (e_a * 1000)/(R * (temp + 273.15)) # [g m-3]
# Calculate final rho * c_p
rhocp <- ((c_p_moist*rho_v)/1000) + ((c_p_dry*rho_a)/1000) # [J m-3 C-1]
return(rhocp)
}
# Integrating using the trapezium area rule
trapezium_intg <- function(heights,x1,x2,x3,x4,x5){
area <- (0.5 * (heights[2] - heights[1]) * (x1 + x2)) +
(0.5 * (heights[3] - heights[2]) * (x2 + x3)) +
(0.5 * (heights[4] - heights[3]) * (x3 + x4)) +
(0.5 * (heights[5] - heights[4]) * (x5 + x4))
return(area)
}
#### Calculating storage H in canopy #################################
heights <- c(2,5,10,15,30.65) #Levels 1: 2 m, 2: 5 m, 3: 10 m, 4: 15 m, 5: 30.65 m
# 5 heights are used here unlike in James' (LI-COR) script which uses
# only 4 heights
# Calculating rho * cp for each level
rhocp1 <- rho_cp(df_new$RH_1_1_1,df_new$Ta_1_1_1,df_new$air_pressure)
rhocp2 <- rho_cp(df_new$RH_2_1_1,df_new$Ta_2_1_1,df_new$air_pressure)
rhocp3 <- rho_cp(df_new$RH_3_1_1,df_new$Ta_3_1_1,df_new$air_pressure)
rhocp4 <- rho_cp(df_new$RH_4_1_1,df_new$Ta_4_1_1,df_new$air_pressure)
rhocp5 <- rho_cp(df_new$RH_5_1_1,df_new$Ta_5_1_1,df_new$air_pressure)
# Calculating the difference of rho * c_p * (T2 - T1) in time
# Level 1, 2 m
rho_cp_dT1 <- numeric()
#rho_cp_dT[1] <- NA # The first one should be NA because there is no data
# before index 1
for (i in 1:length(rhocp1)){
rho_cp_dT1[i] <- ((rhocp1[i]*df_new$Ta_1_1_1[i]) -
(rhocp1[i-1]*df_new$Ta_1_1_1[i-1]))/(30 * 60)
}
# Level 2, 5 m
rho_cp_dT2 <- numeric()
#rho_cp_dT[1] <- NA # The first one should be NA because there is no data
# before index 1
for (i in 1:length(rhocp2)){
rho_cp_dT2[i] <- ((rhocp2[i]*df_new$Ta_2_1_1[i]) -
(rhocp2[i-1]*df_new$Ta_2_1_1[i-1]))/(30 * 60)
}
# Level 3, 10 m
rho_cp_dT3 <- numeric()
#rho_cp_dT[1] <- NA # The first one should be NA because there is no data
# before index 1
for (i in 1:length(rhocp3)){
rho_cp_dT3[i] <- ((rhocp3[i]*df_new$Ta_3_1_1[i]) -
(rhocp3[i-1]*df_new$Ta_3_1_1[i-1]))/(30 * 60)
}
# Level 4, 15 m
rho_cp_dT4 <- numeric()
#rho_cp_dT[1] <- NA # The first one should be NA because there is no data
# before index 1
for (i in 1:length(rhocp4)){
rho_cp_dT4[i] <- ((rhocp4[i]*df_new$Ta_4_1_1[i]) -
(rhocp4[i-1]*df_new$Ta_4_1_1[i-1]))/(30 * 60)
}
# Level 5, 30.65 or 30 m
rho_cp_dT5 <- numeric()
#rho_cp_dT[1] <- NA # The first one should be NA because there is no data
# before index 1
for (i in 1:length(rhocp5)){
rho_cp_dT5[i] <- ((rhocp5[i]*df_new$Ta_5_1_1[i]) -
(rhocp5[i-1]*df_new$Ta_5_1_1[i-1]))/(30 * 60)
}
# Integrating using the trapezium area rule
H_stor <- numeric()
for (i in 1:nrow(df_new)){
H_stor[i] <- trapezium_intg(heights,rho_cp_dT1[i],rho_cp_dT2[i],rho_cp_dT3[i],
rho_cp_dT4[i],rho_cp_dT5[i])
}
# Adding to df_new
df_new <- cbind(df_new,H_stor)
rm(rho_cp_dT1,rho_cp_dT2,rho_cp_dT3,rho_cp_dT4,
rho_cp_dT5,rhocp1,rhocp2,rhocp3,
rhocp4,rhocp5,H_stor)
#### Calculating storage LE in canopy ####
# Calculating absolute humidity from RH
# Level 1, 2 m
hum1 <- numeric()
for (i in 1:nrow(df_new)){
hum1[i] <- abs_humidity(df_new$Ta_1_1_1[i],df_new$RH_1_1_1[i])
}
# Level 2, 5 m
hum2 <- numeric()
for (i in 1:nrow(df_new)){
hum2[i] <- abs_humidity(df_new$Ta_2_1_1[i],df_new$RH_2_1_1[i])
}
# Level 3, 10 m
hum3 <- numeric()
for (i in 1:nrow(df_new)){
hum3[i] <- abs_humidity(df_new$Ta_3_1_1[i],df_new$RH_3_1_1[i])
}
# Level 4, 15 m
hum4 <- numeric()
for (i in 1:nrow(df_new)){
hum4[i] <- abs_humidity(df_new$Ta_4_1_1[i],df_new$RH_4_1_1[i])
}
# Level 5, 30.65 m
hum5 <- numeric()
for (i in 1:nrow(df_new)){
hum5[i] <- abs_humidity(df_new$Ta_5_1_1[i],df_new$RH_5_1_1[i])
}
# Adding to df_new
df_new <- cbind(df_new,hum1,hum2,hum3,hum4,hum5)
rm(hum1,hum2,hum3,hum4,hum5)
# Calculating storage LE in canopy
L_v = 2540000 # [J/kg]
# Level 1, 2 m
diff_hum1 <- numeric()
#diff_hum1[1] <- NA # The first one should be NA because there is no data
# before index 1
for (i in 1:length(df_new$hum1)){
diff_hum1[i] <- L_v * (df_new$hum1[i] - df_new$hum1[i-1])/(30 * 60)
}
# Level 2, 5 m
diff_hum2 <- numeric()
#diff_hum1[1] <- NA # The first one should be NA because there is no data
# before index 1
for (i in 1:length(df_new$hum2)){
diff_hum2[i] <- L_v * (df_new$hum2[i] - df_new$hum2[i-1])/(30 * 60)
}
# Level 3, 10 m
diff_hum3 <- numeric()
#diff_hum1[1] <- NA # The first one should be NA because there is no data
# before index 1
for (i in 1:length(df_new$hum3)){
diff_hum3[i] <- L_v * (df_new$hum3[i] - df_new$hum3[i-1])/(30 * 60)
}
# Level 4, 15 m
diff_hum4 <- numeric()
#diff_hum1[1] <- NA # The first one should be NA because there is no data
# before index 1
for (i in 1:length(df_new$hum4)){
diff_hum4[i] <- L_v * (df_new$hum4[i] - df_new$hum4[i-1])/(30 * 60)
}
# Level 5, 30.65 or 30 m
diff_hum5 <- numeric()
#diff_hum1[1] <- NA # The first one should be NA because there is no data
# before index 1
for (i in 1:length(df_new$hum5)){
diff_hum5[i] <- L_v * (df_new$hum5[i] - df_new$hum5[i-1])/(30 * 60)
}
# Integrating using the trapezium area rule
LE_stor <- numeric()
for (i in 1:nrow(df_new)){
LE_stor[i] <- trapezium_intg(heights,diff_hum1[i],diff_hum2[i],diff_hum3[i],
diff_hum4[i],diff_hum5[i])
}
# Adding to df_new
df_new <- cbind(df_new,LE_stor)
rm(LE_stor,L_v,diff_hum1,diff_hum2,diff_hum3,diff_hum4,diff_hum5,heights,i)
#### Calculate soil storage#######################################################
#Averaging values
#soil water content (m3/m3)
swc_avg <- rowMeans(df_new[,c(139,140,141)],na.rm=TRUE)
swc_avg <- as.data.frame(swc_avg)
#soil heat flux(W/m2)
shf_avg <- rowMeans(df_new[,c(142,143,144)],na.rm=TRUE)
shf_avg <- as.data.frame(shf_avg)
#soil temperature(c)
stemp_avg <- rowMeans(df_new[,c(145,146,147)],na.rm=TRUE)
stemp_avg <- as.data.frame(stemp_avg)
#combine data frames
df_new <- cbind(df_new,swc_avg,shf_avg,stemp_avg)
#remove temporary data
rm(swc_avg,shf_avg,stemp_avg)
#Heat storage from depth 0 to heat flux sensor depth
#Cs = (rho)b*Cd + (Theta)v*(rho)w*Cw
#where Cs = heat capacity of moist soil (J m-3 C-1)
# (rho)b = soil bulk density (for rengam series or typic paleudult; around 1400 kg m-3)
#(rho)w = density of water at 26.11C (average soil temperature, 996.76 kg m-3)
# cd = heat capacity of dry mineral soil (890 J kg-1 C-1; from Oliphant et al., 2004)
#(theta)v,tv = soil water content (m3 water /m-3 soil)
# Cw = heat capacity of water (4181.3 J kg-1 C-1)
# First term
# dry soil capacity = 1400*890 = 1246000(J m-3 C-1) assuming values are constant throughout
firstterm <- matrix(rep(1246000,25739),nrow=25739,ncol=1)
#second term
tv <- df_new$swc_avg
secondterm <- tv*996.76*4181.3 #(J m-3 C-1)
#adding both terms
Cs <- firstterm + secondterm
rm(firstterm,tv,secondterm)
#Calculating soil temperature difference across each 30min
soil_dT <- numeric()
#soil_dT[1] <- NA # The first one should be NA because there is no data
# before index 1
for (i in 1:length(df_new$stemp_avg)){
soil_dT[i] <- (df_new$stemp_avg[i] - df_new$stemp_avg[i-1])
}
#Calculating soil storage
#soil_stor = (soil_dT*Cs*d)/t (W m-2)
#where d = depth of the soil heat flux sensors (0.05m)
# t = time period (30min)
soil_stor <- (soil_dT*Cs*0.05)/(30*60)
#combine to main data frame
df_new <- cbind(df_new,soil_stor)
rm(soil_dT,Cs,soil_stor)
rm(df_EC)
|
14330b147f5b2b4b1a75ab680f4636386b776ef9
|
9670f235ca57516190abb724299e45ab246c7bc8
|
/rudolf_experimentation/distribution_graph_naltrexone.R
|
84c419b7212fdeb123ffee1c5279e9244cdeb952
|
[] |
no_license
|
LukasCincikas/MPhil-project-files
|
e77302983ec0efbbe0144ee0e3ecd98d38267b9b
|
c74064b76986db7a89f762510b20709fe63f8eef
|
refs/heads/main
| 2023-07-24T14:23:41.561443
| 2021-08-28T16:13:25
| 2021-08-28T16:13:25
| 312,322,303
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 39,796
|
r
|
distribution_graph_naltrexone.R
|
#Lukas Cincikas 2021
#A collection of various parameter graphs for the naltrexone study. Individual values are taken from .txt output files.
library(rstan)
library(patchwork)
data_control_alcohol_1 <- data.frame(
parameter = c("Colour consistency", "Bet consistency", "Loss/gain sensitivity"),
mean = c(-1.281435, -2.509639, -0.7862308),
lower_bound = c(0.4421553, 2.845272, 0.052656),
upper_bound = c(-2.967002, -8.079226, -1.656406)
)
data_control_alcohol_2 <- data.frame(
parameter = c("Side bias", "Delay aversion", "Previous outcome", "Odds sensitivity"),
mean = c(0.05519839, 0.002694728, 0.02364208, 0.1308009),
lower_bound = c(0.1406517, 0.03968826, 0.1871594, 0.5476102),
upper_bound = c(-0.02742524, -0.0353849, -0.1394677, -0.2833208)
)
data_control_poly_1 <- data.frame(
parameter = c("Bet consistency"),
mean = c(-5.034968),
lower_bound = c(0.4334),
upper_bound = c(-10.4868)
)
data_control_poly_1_col <- data.frame(
parameter = c("Colour consistency", "Loss/gain sensitivity"),
mean = c(-2.503876, -1.147968),
lower_bound = c(-1.000371, -0.3981),
upper_bound = c(-4.0591, -1.965494)
)
data_control_poly_2 <- data.frame(
parameter = c("Side bias", "Odds sensitivity"),
mean = c(0.02391672, 0.04783284),
lower_bound = c(0.09866658, 0.4278),
upper_bound = c(-0.048465, -0.3056343)
)
data_control_poly_2_col <- data.frame(
parameter = c("Delay aversion", "Previous outcome"),
mean = c(0.05362031, 0.2114642),
lower_bound = c(0.1033, 0.3879),
upper_bound = c(0.008075647, 0.0372774)
)
data_alcohol_poly_1 <- data.frame(
parameter = c("Colour consistency", "Bet consistency", "Loss/gain sensitivity", "Odds sensitivity"),
mean = c(1.2224, 2.5253, 0.3617, 0.082968),
lower_bound = c(-0.18266, -2.5712, -0.26059, -0.3781),
upper_bound = c(2.71566, 7.548, 1.0547, 0.54028)
)
data_alcohol_poly_2 <- data.frame(
parameter = c("Side bias", "Previous outcome"),
mean = c(0.03128, -0.1878),
lower_bound = c(-0.03115, -0.3896),
upper_bound = c(0.09406, 0.006728588)
)
data_alcohol_poly_2_col <- data.frame(
parameter = c("Delay aversion"),
mean = c(-0.050926),
lower_bound = c(-0.09943),
upper_bound = c(-0.005993)
)
data_naltrexone_1 <- data.frame(
parameter = c("Side bias", "Previous outcome", "Odds sensitivity"),
mean = c(0.00997, -0.068033, 0.044687),
lower_bound = c(0.057113, 0.0078057, 0.15477),
upper_bound = c(-0.038299, -0.14356, -0.06587)
)
data_naltrexone_1_col <- data.frame(
parameter = c("Delay aversion"),
mean = c(0.01585),
lower_bound = c(0.030881),
upper_bound = c(0.00119)
)
data_naltrexone_2 <- data.frame(
parameter = c("Bet consistency"),
mean = c(2.1668),
lower_bound = c(4.2633),
upper_bound = c(0.1087)
)
data_naltrexone_3 <- data.frame(
parameter = c("Colour consistency"),
mean = c(0.165968),
lower_bound = c(0.5882355),
upper_bound = c(-0.2648)
)
data_naltrexone_3_col <- data.frame(
parameter = c("Loss/gain sensitivity"),
mean = c(-0.2331),
lower_bound = c(-0.006263),
upper_bound = c(-0.47517)
)
graph_control_alcohol_1 <-
ggplot(data_control_alcohol_1, aes(x = parameter, y = mean)) +
geom_point(size=3.5) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound)) +
xlab("") +
ylab("") +
geom_hline(yintercept = 0, linetype = "dashed", size=0.5) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
graph_control_alcohol_2 <-
ggplot(data_control_alcohol_2, aes(x = parameter, y = mean)) +
geom_point(size=3.5) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound)) +
xlab("") +
ylab("NALT study Alcohol-Control group posterior mean differences (±95% HDI)") +
geom_hline(yintercept = 0, linetype = "dashed", size=0.5) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
graph_control_poly_1 <-
ggplot() +
geom_point(size=3.5, data=data_control_poly_1, aes(x = parameter, y = mean)) +
geom_point(size=3.5, data=data_control_poly_1_col, aes(x = parameter, y = mean), colour="magenta") +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data_control_poly_1) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_control_poly_1_col, colour="magenta") +
xlab("") +
ylab("") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold")
)
graph_control_poly_2 <-
ggplot() +
geom_point(size=3.5, data=data_control_poly_2, aes(x = parameter, y = mean)) +
geom_point(size=3.5, data=data_control_poly_2_col, aes(x = parameter, y = mean), colour="magenta") +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data_control_poly_2) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_control_poly_2_col, colour="magenta") +
xlab("") +
ylab("NALT study Substance-Control group posterior mean differences (±95% HDI)") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold")
)
graph_alcohol_poly_1 <-
ggplot(data_alcohol_poly_1, aes(x = parameter, y = mean)) +
geom_point(size=3.5) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound)) +
xlab("") +
ylab("") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold")
)
graph_alcohol_poly_2 <-
ggplot() +
geom_point(size=3.5, data=data_alcohol_poly_2, aes(x = parameter, y = mean)) +
geom_point(size=3.5, data=data_alcohol_poly_2_col, aes(x = parameter, y = mean), colour="magenta") +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data_alcohol_poly_2) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_alcohol_poly_2_col, colour="magenta") +
xlab("") +
ylab("NALT study Alcohol-Substance group posterior mean differences (±95% HDI)") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold")
)
graph_naltrexone_1 <-
ggplot() +
geom_point(size=3.5, data=data_naltrexone_1, aes(x = parameter, y = mean)) +
geom_point(size=3.5, data=data_naltrexone_1_col, aes(x = parameter, y = mean), colour="magenta") +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data_naltrexone_1) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_naltrexone_1_col, colour="magenta") +
xlab("") +
ylab("") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
graph_naltrexone_2 <-
ggplot(data_naltrexone_2, aes(x = parameter, y = mean)) +
geom_point(size=3.5, colour="magenta") +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), colour="magenta") +
xlab("") +
ylab("") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
graph_naltrexone_3 <-
ggplot() +
geom_point(size=3.5, data=data_naltrexone_3, aes(x = parameter, y = mean)) +
geom_point(size=3.5, data=data_naltrexone_3_col, aes(x = parameter, y = mean), colour="magenta") +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data_naltrexone_3) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_naltrexone_3_col, colour="magenta") +
xlab("") +
ylab("Naltrexone condition posterior mean differences (±95% HDI)") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
#Displays means of a few parameters for the groups
data_means <- data.frame(
group = c("Loss/gain (control)", "Loss/gain (alcohol)", "Loss/gain (substance)", "Colour consistency (control)", "Colour consistency (alcohol)", "Colour consistency (substance)", "Previous outcome (control)", "Previous outcome (alcohol)", "Previous outcome (substance)"),
mean = c(2.1794, 1.5254, 1.0976, 5.0044, 3.6385, 2.68, 0.2257, 0.3348, 0.4259),
lower_bound = c(1.5246, 0.9893, 0.7142, 3.747, 2.576, 1.843, 0.114, 0.1769, 0.2743),
upper_bound = c(3.0015, 2.237, 1.5928, 6.37, 4.956, 3.669, 0.3469, 0.49946, 0.58593)
)
graph_means <-
ggplot(data_means, aes(x = group, y = mean)) +
geom_point(size=3.5) +
geom_errorbar(aes(x=group, ymin=lower_bound, ymax=upper_bound)) +
xlab("") +
ylab("Group (placebo) parameter means") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
#Makes graphs with drug use comparison between the poly groups of the two studies
data_drugs_CUD <- data.frame(
drug = c("Stimulants", "Opiates", "Alcohol"),
proportions = c(100, 57.1, 10.7)
)
data_drugs_poly <- data.frame(
drug = c("Stimulants", "Opiates", "Alcohol"),
proportions = c(81.5, 55.6, 74.1)
)
graph_drugs_CUD <-
ggplot(data_drugs_CUD, aes(x = drug, y = proportions)) +
geom_col() +
xlab("CUD group of ATX study") +
ylab("Propotion of group") +
ylim(0, 100) +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
graph_drugs_poly <-
ggplot(data_drugs_poly, aes(x = drug, y = proportions)) +
geom_col() +
xlab("Substance group of naltrexone study") +
ylab("Propotion of group") +
ylim(0, 100) +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
#----------------------------------------------------------------------------------------
#This Section is for PLACEBO condition comparisons
#----------------------------------------------------------------------------------------
#PLACEBO comparison of CONTROLS and COMBINED SUBSTANCE groups
data_control_substance <- data.frame(
parameter = c("Side bias", "Delay aversion", "Previous outcome"),
mean = c(-0.0201, 0.0311, 0.155),
lower_bound = c(0.0688, 0.0664, 0.313),
upper_bound = c(-0.107, -0.0057, -0.00149)
)
data_control_substance_2 <- data.frame(
parameter = c("Bet consistency", "Odds sensitivity"),
mean = c(-3.08, -0.0246),
lower_bound = c( 1.69, 0.327),
upper_bound = c( -8.25, -0.374)
)
data_control_substance_2_col <- data.frame(
parameter = c("Colour consistency", "Loss/gain sensitivity"),
mean = c(-1.85, -0.868),
lower_bound = c(-0.38, -0.0844),
upper_bound = c(-3.4, -1.75)
)
graph_control_substance <-
ggplot(data_control_substance, aes(x = parameter, y = mean)) +
geom_point(size=3.5) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound)) +
xlab("") +
ylab("") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
graph_control_substance_2 <-
ggplot() +
geom_point(size=3.5, data=data_control_substance_2, aes(x = parameter, y = mean)) +
geom_point(size=3.5, data=data_control_substance_2_col, colour="magenta", aes(x = parameter, y = mean)) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_control_substance_2) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_control_substance_2_col, colour="magenta") +
xlab("") +
ylab("95% HDI of difference between control and combined substance groups on PLACEBO") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
#PLACEBO comparison of CONTROLS and ALCOHOL
data_control_alcohol_placebo <- data.frame(
parameter = c("Side bias", "Delay aversion", "Previous outcome"),
mean = c(-0.0377, 0.00664, 0.109),
lower_bound = c(0.0682, 0.0467, 0.31),
upper_bound = c(-0.142, -0.0335, -0.082)
)
data_control_alcohol_placebo_2 <- data.frame(
parameter = c("Colour consistency", "Bet consistency", "Loss/gain sensitivity", "Odds sensitivity"),
mean = c(-1.37, -2.85, -0.654, -0.0119),
lower_bound = c(0.468, 2.67, 0.305, 0.442),
upper_bound = c(-3.14, -8.6, -1.64, -0.459)
)
graph_control_alcohol_placebo <-
ggplot(data_control_alcohol_placebo, aes(x = parameter, y = mean)) +
geom_point(size=3.5) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound)) +
xlab("") +
ylab("") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
graph_control_alcohol_placebo_2 <-
ggplot(data_control_alcohol_placebo_2, aes(x = parameter, y = mean)) +
geom_point(size=3.5) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound)) +
xlab("") +
ylab("95% HDI of Control and Alcohol groups (placebo)") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
#PLACEBO comparison of CONTROL and POLYSUBSTANCE groups
data_control_poly_placebo <- data.frame(
parameter = c("Side bias"),
mean = c(-0.00244),
lower_bound = c(0.0875),
upper_bound = c(-0.0925)
)
data_control_poly_placebo_col <- data.frame(
parameter = c("Delay aversion", "Previous outcome"),
mean = c(0.0552, 0.2),
lower_bound = c(0.104, 0.395),
upper_bound = c(0.0106, 0.0111)
)
data_control_poly_placebo_2 <- data.frame(
parameter = c("Bet consistency", "Odds sensitivity"),
mean = c(-3.31, -0.0372),
lower_bound = c(2.42, 0.367),
upper_bound = c(-9.07, -0.425)
)
data_control_poly_placebo_2_col <- data.frame(
parameter = c("Colour consistency", "Loss/gain sensitivity"),
mean = c(-2.32, -1.08),
lower_bound = c(-0.751, -0.254),
upper_bound = c(-3.95, -1.98)
)
graph_control_poly_placebo <-
ggplot() +
geom_point(size=3.5, data=data_control_poly_placebo, aes(x = parameter, y = mean)) +
geom_point(size=3.5, data=data_control_poly_placebo_col, colour="magenta", aes(x = parameter, y = mean)) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_control_poly_placebo) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_control_poly_placebo_col, colour="magenta") +
xlab("") +
ylab("") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
graph_control_poly_placebo_2 <-
ggplot() +
geom_point(size=3.5, data=data_control_poly_placebo_2, aes(x = parameter, y = mean)) +
geom_point(size=3.5, data=data_control_poly_placebo_2_col, colour="magenta", aes(x = parameter, y = mean)) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_control_poly_placebo_2) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_control_poly_placebo_2_col, colour="magenta") +
xlab("") +
ylab("95% of Control and Polysubstance groups (On placebo)") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
#PLACEBO comparison of ALCOHOL and POLYSUBSTANCE groups
data_alcohol_poly_placebo <- data.frame(
parameter = c("Side bias", "Previous outcome"),
mean = c(-0.0353, -0.0911),
lower_bound = c(-0.121, -0.313),
upper_bound = c(0.0504, 0.134)
)
data_alcohol_poly_placebo_col <- data.frame(
parameter = c("Delay aversion"),
mean = c( -0.0488),
lower_bound = c(-0.0996),
upper_bound = c(-0.000709)
)
data_alcohol_poly_placebo_2 <- data.frame(
parameter = c("Colour consistency", "Bet consistency", "Loss/gain sensitivity", "Odds sensitivity"),
mean = c(0.958, 0.454, 0.428, 0.0253),
lower_bound = c(-0.512, -4.95, -0.309, -0.455),
upper_bound = c(2.51, 5.71, 1.24, 0.505)
)
graph_alcohol_poly_placebo <-
ggplot() +
geom_point(size=3.5, data=data_alcohol_poly_placebo, aes(x = parameter, y = mean)) +
geom_point(size=3.5, data=data_alcohol_poly_placebo_col, colour="magenta", aes(x = parameter, y = mean)) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_alcohol_poly_placebo) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_alcohol_poly_placebo_col, colour="magenta") +
xlab("") +
ylab("") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
graph_alcohol_poly_placebo_2 <-
ggplot(data_alcohol_poly_placebo_2, aes(x = parameter, y = mean)) +
geom_point(size=3.5) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound)) +
xlab("") +
ylab("95% HDI of Alcohol and Polysubstance groups (on placebo)") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
#----------------------------------------------------------------------------------------
#This Section is for NALTREXONE condition comparisons
#----------------------------------------------------------------------------------------
data_control_alcohol_naltrexone <- data.frame(
parameter = c("Delay aversion", "Previous outcome"),
mean = c(0.00125, 0.0618),
lower_bound = c(-0.0419, -0.121),
upper_bound = c(0.0445, 0.246)
)
data_control_alcohol_naltrexone_col <- data.frame(
parameter = c("Side bias"),
mean = c(-0.148),
lower_bound = c(-0.256),
upper_bound = c(-0.0435)
)
data_control_alcohol_naltrexone_2 <- data.frame(
parameter = c("Colour consistency", "Bet consistency", "Odds sensitivity"),
mean = c(1.2, 2.17, -0.274),
lower_bound = c(-0.647, -4.52, -0.713),
upper_bound = c(3.00, 8.86, 0.158)
)
data_control_alcohol_naltrexone_2_col <- data.frame(
parameter = c("Loss/gain sensitivity"),
mean = c(0.918),
lower_bound = c(0.123),
upper_bound = c(1.79)
)
graph_control_alcohol_naltrexone <-
ggplot() +
geom_point(size=3.5, data=data_control_alcohol_naltrexone, aes(x = parameter, y = mean)) +
geom_point(size=3.5, data=data_control_alcohol_naltrexone_col, colour="magenta", aes(x = parameter, y = mean)) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_control_alcohol_naltrexone) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_control_alcohol_naltrexone_col, colour="magenta") +
xlab("") +
ylab("") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
graph_control_alcohol_naltrexone_2 <-
ggplot() +
geom_point(size=3.5, data=data_control_alcohol_naltrexone_2, aes(x = parameter, y = mean)) +
geom_point(size=3.5, data=data_control_alcohol_naltrexone_2_col, colour="magenta", aes(x = parameter, y = mean)) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_control_alcohol_naltrexone_2) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_control_alcohol_naltrexone_2_col, colour="magenta") +
xlab("") +
ylab("95% of Control and Alcohol groups (On naltrexone)") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
data_control_poly_naltrexone <- data.frame(
parameter = c("Side bias"),
mean = c(-0.0503),
lower_bound = c(-0.144),
upper_bound = c(0.0438)
)
data_control_poly_naltrexone_col <- data.frame(
parameter = c("Delay aversion", "Previous outcome"),
mean = c(-0.0518, -0.223),
lower_bound = c(-0.107, -0.42),
upper_bound = c(-0.00125, -0.0326)
)
data_control_poly_naltrexone_2 <- data.frame(
parameter = c("Odds sensitivity"),
mean = c(-0.133),
lower_bound = c(-0.535),
upper_bound = c(0.235)
)
data_control_poly_naltrexone_2_col <- data.frame(
parameter = c("Loss/gain sensitivity", "Colour consistency", "Bet consistency"),
mean = c(1.21, 2.68, 6.76),
lower_bound = c(0.471, 1.11, 0.757),
upper_bound = c(2.05, 4.29, 12.9)
)
graph_control_poly_naltrexone <-
ggplot() +
geom_point(size=3.5, data=data_control_poly_naltrexone, aes(x = parameter, y = mean)) +
geom_point(size=3.5, data=data_control_poly_naltrexone_col, colour="magenta", aes(x = parameter, y = mean)) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_control_poly_naltrexone) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_control_poly_naltrexone_col, colour="magenta") +
xlab("") +
ylab("") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
graph_control_poly_naltrexone_2 <-
ggplot() +
geom_point(size=3.5, data=data_control_poly_naltrexone_2, aes(x = parameter, y = mean)) +
geom_point(size=3.5, data=data_control_poly_naltrexone_2_col, colour="magenta", aes(x = parameter, y = mean)) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_control_poly_naltrexone_2) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_control_poly_naltrexone_2_col, colour="magenta") +
xlab("") +
ylab("95% of Control and Polysubstance groups (On naltrexone)") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
data_alcohol_poly_naltrexone <- data.frame(
parameter = c(),
mean = c(),
lower_bound = c(),
upper_bound = c()
)
data_alcohol_poly_naltrexone_col <- data.frame(
parameter = c("Side bias", "Delay aversion", "Previous outcome"),
mean = c(0.0979, -0.053, -0.285),
lower_bound = c(0.0081, -0.108, -0.503),
upper_bound = c(0.185, -0.00232, -0.0707)
)
data_alcohol_poly_naltrexone_2 <- data.frame(
parameter = c("Colour consistency", "Bet consistency", "Loss/gain sensitivity", "Odds sensitivity"),
mean = c(1.49, 4.6, 0.296, 0.141),
lower_bound = c(-0.0154, -1.18, -0.283, -0.332),
upper_bound = c(3.08, 10.5, 0.929, 0.619)
)
data_alcohol_poly_naltrexone_2_col <- data.frame(
parameter = c(),
mean = c(),
lower_bound = c(),
upper_bound = c()
)
graph_alcohol_poly_naltrexone <-
ggplot() +
geom_point(size=3.5, data=data_alcohol_poly_naltrexone_col, colour="magenta", aes(x = parameter, y = mean)) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_alcohol_poly_naltrexone_col, colour="magenta") +
xlab("") +
ylab("") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
graph_alcohol_poly_naltrexone_2 <-
ggplot() +
geom_point(size=3.5, data=data_alcohol_poly_naltrexone_2, aes(x = parameter, y = mean)) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_alcohol_poly_naltrexone_2) +
xlab("") +
ylab("95% of Alcohol and Polysubstance groups (On naltrexone)") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
data_control_substance_naltrexone <- data.frame(
parameter = c("Delay aversion", "Previous outcome"),
mean = c(-0.0253, -0.0805),
lower_bound = c(-0.0655, -0.233),
upper_bound = c(0.0158, 0.0757)
)
data_control_substance_naltrexone_col <- data.frame(
parameter = c("Side bias"),
mean = c(-0.0992),
lower_bound = c(-0.19),
upper_bound = c(-0.0109)
)
data_control_substance_naltrexone_2 <- data.frame(
parameter = c("Odds sensitivity", "Bet consistency"),
mean = c(-0.203, 4.46),
lower_bound = c(-0.541, -1.05),
upper_bound = c(0.133, 10.3)
)
data_control_substance_naltrexone_2_col <- data.frame(
parameter = c("Loss/gain sensitivity", "Colour consistency"),
mean = c(1.07, 1.94),
lower_bound = c(0.377, 0.458),
upper_bound = c(1.88, 3.51)
)
graph_control_substance_naltrexone <-
ggplot() +
geom_point(size=3.5, data=data_control_substance_naltrexone, aes(x = parameter, y = mean)) +
geom_point(size=3.5, data=data_control_substance_naltrexone_col, colour="magenta", aes(x = parameter, y = mean)) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_control_substance_naltrexone) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_control_substance_naltrexone_col, colour="magenta") +
xlab("") +
ylab("") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
graph_control_substance_naltrexone_2 <-
ggplot() +
geom_point(size=3.5, data=data_control_substance_naltrexone_2, aes(x = parameter, y = mean)) +
geom_point(size=3.5, data=data_control_substance_naltrexone_2_col, colour="magenta", aes(x = parameter, y = mean)) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_control_substance_naltrexone_2) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_control_substance_naltrexone_2_col, colour="magenta") +
xlab("") +
ylab("95% of Control and combined substance groups (on naltrexone)") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
#----------------------------------------------------------------------------------------
#This Section is for CONDITION comparisons
#----------------------------------------------------------------------------------------
data_naltrexone_control <- data.frame(
parameter = c("Side bias", "Previous outcome"),
mean = c(-0.0696, -0.0186),
lower_bound = c(0.0138, 0.0914),
upper_bound = c(-0.153, -0.129)
)
data_naltrexone_control_col <- data.frame(
parameter = c("Delay aversion"),
mean = c(0.0197),
lower_bound = c(0.04),
upper_bound = c(0.00134)
)
data_naltrexone_control_2 <- data.frame(
parameter = c("Colour consistency", "Bet consistency", "Loss/gain sensitivity", "Odds sensitivity"),
mean = c(0.229, 3.09, -0.101, -0.107),
lower_bound = c(1.04, 6.99, 0.402, 0.101),
upper_bound = c(-0.57, -0.678, -0.641, -0.314)
)
data_naltrexone_control_2_col <- data.frame(
parameter = c(),
mean = c(),
lower_bound = c(),
upper_bound = c()
)
graph_naltrexone_control <-
ggplot() +
geom_point(size=3.5, data=data_naltrexone_control, aes(x = parameter, y = mean)) +
geom_point(size=3.5, data=data_naltrexone_control_col, colour="magenta", aes(x = parameter, y = mean)) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_naltrexone_control) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_naltrexone_control_col, colour="magenta") +
xlab("") +
ylab("") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
graph_naltrexone_control_2 <-
ggplot() +
geom_point(size=3.5, data=data_naltrexone_control_2, aes(x = parameter, y = mean)) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_naltrexone_control_2) +
xlab("") +
ylab("95% HDI of drug condition in Control group") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
data_naltrexone_alcohol <- data.frame(
parameter = c("Delay aversion"),
mean = c(0.0118),
lower_bound = c(0.042),
upper_bound = c(-0.0177)
)
data_naltrexone_alcohol_col <- data.frame(
parameter = c("Side bias", "Previous outcome"),
mean = c(0.116, -0.189),
lower_bound = c(0.214, -0.0341),
upper_bound = c(0.0159, -0.347)
)
data_naltrexone_alcohol_2 <- data.frame(
parameter = c("Colour consistency", "Bet consistency", "Odds sensitivity"),
mean = c(0.398, 3.78, 0.178),
lower_bound = c(1.24, 7.89, 0.384),
upper_bound = c(-0.463, -0.145, -0.0352)
)
data_naltrexone_alcohol_2_col <- data.frame(
parameter = c("Loss/gain sensitivity"),
mean = c(-0.365),
lower_bound = c(-0.0149),
upper_bound = c(-0.797)
)
graph_naltrexone_alcohol <-
ggplot() +
geom_point(size=3.5, data=data_naltrexone_alcohol, aes(x = parameter, y = mean)) +
geom_point(size=3.5, data=data_naltrexone_alcohol_col, colour="magenta", aes(x = parameter, y = mean)) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_naltrexone_alcohol) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_naltrexone_alcohol_col, colour="magenta") +
xlab("") +
ylab("") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
graph_naltrexone_alcohol_2 <-
ggplot() +
geom_point(size=3.5, data=data_naltrexone_alcohol_2, aes(x = parameter, y = mean)) +
geom_point(size=3.5, data=data_naltrexone_alcohol_2_col, colour="magenta", aes(x = parameter, y = mean)) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_naltrexone_alcohol_2) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_naltrexone_alcohol_2_col, colour="magenta") +
xlab("") +
ylab("95% HDI of drug condition in Alcohol group") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
data_naltrexone_poly <- data.frame(
parameter = c("Delay aversion", "Side bias", "Previous outcome"),
mean = c(0.016, -0.0169, 0.00404),
lower_bound = c(0.0431, 0.0491, 0.123),
upper_bound = c(-0.00896, -0.0849, -0.114)
)
data_naltrexone_poly_col <- data.frame(
parameter = c(),
mean = c(),
lower_bound = c(),
upper_bound = c()
)
data_naltrexone_poly_2 <- data.frame(
parameter = c("Colour consistency", "Bet consistency", "Odds sensitivity"),
mean = c(-0.13, -0.365, 0.063),
lower_bound = c(0.392, 2.43, 0.223),
upper_bound = c(-0.661, -3.31, -0.0885)
)
data_naltrexone_poly_2_col <- data.frame(
parameter = c("Loss/gain sensitivity"),
mean = c(-0.233),
lower_bound = c(-0.00688),
upper_bound = c(-0.505)
)
graph_naltrexone_poly <-
ggplot() +
geom_point(size=3.5, data=data_naltrexone_poly, aes(x = parameter, y = mean)) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_naltrexone_poly) +
xlab("") +
ylab("") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
graph_naltrexone_poly_2 <-
ggplot() +
geom_point(size=3.5, data=data_naltrexone_poly_2, aes(x = parameter, y = mean)) +
geom_point(size=3.5, data=data_naltrexone_poly_2_col, colour="magenta", aes(x = parameter, y = mean)) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_naltrexone_poly_2) +
geom_errorbar(aes(x=parameter, ymin=lower_bound, ymax=upper_bound), data=data_naltrexone_poly_2_col, colour="magenta") +
xlab("") +
ylab("95% HDI of drug condition in the Polysubstance group") +
geom_hline(yintercept = 0, linetype = "dashed", size=1) +
theme(text = element_text(size=20)) +
coord_flip() +
theme_minimal() +
theme(axis.line = element_line(size=0.5, colour="black"),
panel.grid = element_line(colour="white"),
axis.text = element_text(size=11, face = "bold"),
axis.ticks = element_line(size=1),
text = element_text(size=14, face = "bold"))
|
3a8a8ffe47f7bfa70b363bd6597bc03f2545be50
|
8daa079c673ebbfae5e876205431bd24b4f1fee6
|
/endophyte-script (2).R
|
0c46d5db35070fb331d31b1c67eae0253dd2dcfd
|
[] |
no_license
|
MahdiehSHM/Greenhouse
|
59127192d01d19699c4a4dc9be0e9bb6a6bf3781
|
6892b3aab90acf8c1a94a670e4885572ec19ebd8
|
refs/heads/master
| 2020-06-26T12:13:05.743045
| 2019-10-24T05:59:32
| 2019-10-24T05:59:32
| 199,627,568
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 68,701
|
r
|
endophyte-script (2).R
|
# factors affecting endophytic fungal diversity and communities
###################################
###################################
## Pakages we need for the analysis
library(mvabund)
library(vegan)
library(boral)
library(MASS)
library(ggplot2)
library(gridExtra)
library(mgcv)
##################################
##################################
## Data input
EndoAbun= read.csv (file="matrix_endo.csv",
header = T, row.names = 1)
EndoMetaData= read.csv (file="metadata_end.csv", header = T, row.names = 1)
# are the rownames matching?
rownames(EndoAbun) == rownames(EndoMetaData)
# setting the explaining predictors as factors
Tissue <- factor(EndoMetaData$SUBSTRATE, levels = c("Leaf", "Branch"))
Time <- factor(EndoMetaData$TIME, levels = c("1","2","3"))
Locality<- factor(EndoMetaData$LOCALITY, levels = c("Bisoton", "Hasan abad", "Khosro abad",
"Sarmil","Sorkhe dize"))
Temperature<-factor(EndoMetaData$TEMPRATURE, levels = c("25 Degree","4 Degree"))
IR= EndoMetaData$IR ### isolation rate
## Data distribution
hist(EndoMetaData$IR)
hist(log(EndoMetaData$IR))
boxplot(EndoMetaData$IR~EndoMetaData$SUBSTRATE)
boxplot(EndoMetaData$IR~EndoMetaData$TIME)
boxplot(EndoMetaData$IR~EndoMetaData$LOCALITY)
boxplot(EndoMetaData$IR~EndoMetaData$TEMPRATURE)
#########################
###### 1- Isolation rate
#########################
### GLM model for IR
### new model
Ir.m= glm(IR ~Temperature * Locality + Temperature *Time + Temperature * Tissue ,
data = EndoMetaData)
par(mfrow= c(2,2))
plot(Ir.m)
AIC(Ir.m)
IR.m.anova = anova(Ir.m, test = "F")
IR.m.sammary = summary(Ir.m)
## don't know what kind of model or family is good for this kind of data non-integer
#with lots of ZEROs
# plot the significant interactions
dev.off()
# ## Tissue * Time interaction
# timeplot= ggplot(data = EndoMetaData,aes(x=EndoMetaData$TIME,y=IR,
# fill= factor(EndoMetaData$SUBSTRATE)))+
# geom_bar(stat="identity",position="dodge",width=0.3)+
# scale_fill_manual(values = c ("darkgray", "lightgray"),name="Tissue type",
# breaks=c("Leaf", "Branch"),
# labels=c("Leaf", "Branch"))+
# xlab("Sampling time")+ylab("Isolation rate")+
# scale_x_continuous(breaks= c(1,2,3),labels = c("May", "Jun","July"))+
# theme_bw()+
# theme(legend.position = "top", axis.text= element_text(size = rel(1.3)),
# axis.title.y = element_text(size = rel(1.5)),
# axis.title.x = element_text(size = rel(1.5)),
# legend.text = element_text(size = rel(1.5)),
# legend.title = element_text(size = rel(1.5)),
# panel.grid.major = element_blank(),
# axis.line = element_line(colour = "black", color = "black"))
# ## Tissue * Locality interaction
#
ggplot(data = EndoMetaData,aes(x=Locality,y=IR, fill= factor(EndoMetaData$SUBSTRATE)))+
geom_bar(stat="identity",position="dodge",width=0.5)+
scale_fill_manual(values = c ("darkgray", "lightgray"),name="Tissue type",
breaks=c("Leaf", "Branch"),
labels=c("Leaf", "Branch"), guide = FALSE)+
xlab("Sampling site")+ylab("Isolation rate")+
theme_bw()+
theme(legend.position = "top", axis.text= element_text(size = rel(1.3)),
axis.title.y = element_text(size = rel(1.5)),
axis.title.x = element_text(size = rel(1.5)),
legend.text = element_text(size = rel(1.5)),
legend.title = element_text(size = rel(1.5)),
panel.grid.major = element_blank(),
axis.line = element_line(colour = "black", color = "black"))
################################
### FIG 2
################################
# dev.off()
# png(file = "fig 2.png", width = 2100, height = 2100, res= 300)
# grid.arrange(timeplot,locaplot,nrow=2, ncol=1 )
# dev.off()
########################################
# ### 2- Hill Diversities
########################################
# Remove zero observations for diversity and richness calculation
NotZero = EndoMetaData$SUCCESS > 0 #filter for zero-observation samples
EndoAbunZero = EndoAbun[NotZero,]
EndoMetaZero = EndoMetaData[NotZero,]
##aranging the factors with the new datasets
LocalityZ<- factor(EndoMetaZero$LOCALITY,levels = c("Bisoton", "Hasan abad", "Khosro abad",
"Sarmil","Sorkhe dize"))
TissueZ <- factor(EndoMetaZero$SUBSTRATE, levels = c("Leaf", "Branch"))
TimeZ <- factor(EndoMetaZero$TIME, levels = c("1","2","3"))
TemperatureZ<-factor(EndoMetaZero$TEMPRATURE, levels = c("25 Degree","4 Degree"))
#EndoRichness = specnumber(EndoAbunZero)
#hist(EndoRichness)
EndoHill = renyi(EndoAbunZero, scale=c(0,1,2), hill=T)
Endohill.1 = EndoHill$"0"#this is richness
Endohill.2 = EndoHill$"1"#antilogarithm of the Shannon representing the abundant species
Endohill.3 = EndoHill$"2"#inverse Simpson representing the very abundant species
hist(Endohill.1)
hist(Endohill.2)
hist(Endohill.3)
### MODELS
### I want to see if the time,loclaity and temperature have differensial effects on
# different tissues diversity
############### First hill
# EHill1.m= glm(Endohill.1~ TissueZ*LocalityZ+TissueZ*TimeZ+TissueZ*TemperatureZ
# , data = EndoMetaZero, family = poisson(link = "log"))
# AIC(EHill1.m)
# par(mfrow= c(2,2))
# plot(EHill1.m)
# dev.off()
#
# EHill1.m.anova= anova(EHill1.m, test = "Chisq")
# EHill1.m.summary= summary(EHill1.m)# why this doesn't show the significance?
########new model
EHill1.m.2= glm(Endohill.1~ TemperatureZ *LocalityZ+TemperatureZ *TimeZ+TemperatureZ * TissueZ
, data = EndoMetaZero, family = poisson(link = "log"))
AIC(EHill1.m.2)
par(mfrow= c(2,2))
plot(EHill1.m.2)
dev.off()
EHill1.m.2anova= anova(EHill1.m.2, test = "Chisq")
EHill1.m.2summary= summary(EHill1.m.2)
#### discard the locality factor
EHill1.m.3= glm(Endohill.1~ TemperatureZ *TimeZ+TemperatureZ * TissueZ
, data = EndoMetaZero, family = poisson(link = "log"))
AIC(EHill1.m.3)
par(mfrow= c(2,2))
plot(EHill1.m.3)
dev.off()
EHill1.m.3anova= anova(EHill1.m.3, test = "Chisq")
EHill1.m.3summary= summary(EHill1.m.3)
# See if glm.nb is a better fit
# EHill1.m.nb= glm.nb(Endohill.1~ TissueZ*LocalityZ+TissueZ*TimeZ+TissueZ*TemperatureZ
# , data = EndoMetaZero, link = "log")
# AIC(EHill1.m.nb)
# par(mfrow= c(2,2))
# plot(EHill1.m.nb)
# dev.off()
## These models are not that different but due to the warnings that I get from glm.nb
## I choose the poisson glm model
############### second Hill
## use the same model orders here
# EHill2.m= glm(Endohill.2~ TissueZ*LocalityZ+TissueZ*TimeZ+TissueZ*TemperatureZ
# ,data = EndoMetaZero, family =Gamma(link = "log"))
#AIC(EHill2.m)
# par(mfrow= c(2,2))
# plot(EHill2.m)
# dev.off()
#
# EHill2.m.anova= anova(EHill2.m,test = "F")
# EHill2.m.summary= summary(EHill2.m)
##############new model
EHill2.m2= glm(Endohill.2~ TemperatureZ *LocalityZ+TemperatureZ *TimeZ+TemperatureZ * TissueZ
,data = EndoMetaZero, family =Gamma(link = "log"))
AIC(EHill2.m2)
par(mfrow= c(2,2))
plot(EHill2.m2)
dev.off()
EHill2.m2.anova= anova(EHill2.m2,test = "F")
EHill2.m2.summary= summary(EHill2.m2)
#### discard the locality factor
EHill2.m3= glm(Endohill.2~ TemperatureZ *TimeZ+TemperatureZ * TissueZ
,data = EndoMetaZero, family =Gamma(link = "log"))
AIC(EHill2.m3)
par(mfrow= c(2,2))
plot(EHill2.m3)
dev.off()
EHill2.m3.anova= anova(EHill2.m3,test = "F")
EHill2.m3.summary= summary(EHill2.m3)
## try the glm.nb
# Ehill2.m.nb= glm.nb(Endohill.2~ TissueZ*LocalityZ+TissueZ*TimeZ+TissueZ*TemperatureZ
# ,data = EndoMetaZero)
# warnings()
############# Third Hill
# EHill3.m= glm(Endohill.3~ TissueZ*LocalityZ+TissueZ*TimeZ+TissueZ*TemperatureZ
# ,data = EndoMetaZero, family =Gamma(link = "log"))
# AIC(EHill3.m)
# par(mfrow= c(2,2))
# plot(EHill3.m)
# dev.off()
#
# EHill3.m.anova= anova(EHill3.m,test = "F")
# EHill3.m.summary= summary(EHill3.m)
########### new model
EHill3.m2= glm(Endohill.3~ TemperatureZ *TimeZ+TemperatureZ * TissueZ
,data = EndoMetaZero, family =Gamma(link = "log"))
AIC(EHill3.m2)
par(mfrow= c(2,2))
plot(EHill3.m2)
dev.off()
EHill3.m2.anova= anova(EHill3.m2,test = "F")
EHill3.m2.summary= summary(EHill3.m2)
##############################
#### 3- Community composition
##############################
### Define CORE OTUs
## Summarize OTU observation
TotalCount = apply(EndoAbun,2,sum)
## The average observation of OTUs
MeanCount=apply(EndoAbun,2,function(vec) mean(vec[vec>0]))
## In how many samples is an OTU present?
TotalPresent = apply(EndoAbun,2,function(vec) sum(vec>0))
## The highest number of an OTU in a sample
MaximumCount=apply(EndoAbun,2,max)
## Plotting observation against abundance
plot(TotalPresent, MaximumCount, xlab="OTU Observation",
ylab="OTU Maximum Abundance", pch=20)
plot(TotalPresent, log(MaximumCount), xlab="OTU Observation",
ylab="log(OTU Maximum Abundance)", pch=20)
## Create a smoothed trendline
gam1 = gam(log(MaximumCount)~s(TotalPresent))
plot(gam1, residuals=T, shade=T, rug=F, cex=2.6,
xlab="OTU Observation", ylab="log Mean Abundance") # , xaxp=c(0,150,15)
## keep core OTUs
OTUobserv = TotalPresent > 7
EndoCorAbun = EndoAbun[,OTUobserv]
### name of the Core OTUs
COREOTUS=colnames(EndoCorAbun)
#### Remove the Zero samples from the Core OTU abbundnace object and metadata
IsolSucc = apply(EndoCorAbun,1, sum)
NotZero2= IsolSucc>0
ECorAbunZero = EndoCorAbun[NotZero2,]
ECorMetaZero = EndoMetaData[NotZero2,]
row.names(ECorAbunZero)==row.names(ECorMetaZero)
corOTUs<-colnames(ECorAbunZero)
TissueC<-factor(ECorMetaZero$SUBSTRATE, levels = c("Leaf", "Branch"))
LocalityC<- factor(ECorMetaZero$LOCALITY,levels = c("Bisoton", "Hasan abad", "Khosro abad",
"Sarmil","Sorkhe dize"))
TimeC<- factor(ECorMetaZero$TIME, levels = c("1", "2","3"))
TemperatureC<- factor(ECorMetaZero$TEMPRATURE, levels = c("25 Degree","4 Degree"))
### Multispecies Model for Core OTUs
ECrMvabund= mvabund(ECorAbunZero)
plot(ECrMvabund)
EndoMV.m= manyglm (ECrMvabund~TissueC*LocalityC+TissueC*TimeC+TissueC*TemperatureC,
data = ECorMetaZero, family = "negative.binomial", show.residuals=T)
class(ECrMvabund)
plot.manyglm(EndoMV.m)
EndoMV.m.anova= anova.manyglm(EndoMV.m,nBoot=100, test="LR", p.uni="adjusted",
resamp="montecarlo")
EndoMV.m.sum= summary.manyglm(EndoMV.m, nBoot=100, test="LR",p.uni="adjusted",
resamp="montecarlo")
## Which OTUs are significaantly affected
EnAnova <- as.data.frame(EndoMV.m.anova$uni.p)
otuTissueEf<-colnames(EnAnova)[EnAnova["TissueC",]<= 0.05]
otuLocEf<- colnames(EnAnova)[EnAnova["LocalityC",]<= 0.05]
otuTimEf<- colnames(EnAnova)[EnAnova["TimeC",]<= 0.05]
otuTempEf<- colnames(EnAnova)[EnAnova["TemperatureC",]<= 0.05]
### try to visualize these effects
## Tissue effects
OTUtissu<- c("Byssochlamys.spectabilis.","Gnomoniaceae.sp..66","Microsphaeriopsis.olivacea",
"Penicillium.sp..A21","Preussia.sp..A31")
TissuABUN<- ECorAbunZero[OTUtissu]##Keeping only tissue affected OTUs
# get the mean valuse for each OTU in each tisse
Tissuemean <- aggregate(. ~ ECorMetaZero$SUBSTRATE, TissuABUN, mean)
#CReat a data frame of the mean valuse
TissuMeanfram<- as.data.frame(Tissuemean,optional=TRUE)
attr(TissuMeanfram, "row.names")<- c("Branch", "Leaf")
### creat a matrix of mean observation of OTUs affected by tissue for ploting
Tissudata<- data.matrix (TissuMeanfram[2:6],rownames.force = NA )
pdf(file = "Effect of Tissue on OTU observation.pdf", paper = "a4", width = 7, height = 4)
barplot(Tissudata, legend.text =TRUE, beside = TRUE,ylab= "mean observation per sample",
names.arg= c("B. spectabilis", "Gnomoniaceae sp.", "M. olivacea","Penicillium sp.",
"Preussia sp."), axes= TRUE,ylim= c(0,1), cex.names = 0.8,
args.legend = list(x = "topright",bty= "n"), border = "Black" )
dev.off()
### Temprature effects
OTUtemp<- c ("Aspergillus.sp..A20","Aureobasidium.sp..A17","Byssochlamys.spectabilis."
,"Microsphaeriopsis.olivacea", "Preussia.sp..A31")
TempABUN<- ECorAbunZero[OTUtemp]##Keeping only Temp affected OTUs
# get the mean valuse for each OTU in each temp
Tempmean <- aggregate(. ~ ECorMetaZero$TEMPRATURE, TempABUN, mean)
#CReat a data frame of the mean valuse
TempMeanfram<- as.data.frame(Tempmean,optional=TRUE)
attr(TempMeanfram, "row.names")<- c("25 Degree", "4 Degree")
### creat a matrix of mean observation of OTUs affected by temp for ploting
Tempdata<- data.matrix (TempMeanfram[2:6],rownames.force = NA )
pdf(file = "Effect of Temprature on OTU observation.pdf", paper = "a4", width = 7, height = 4)
barplot(Tempdata,legend.text =TRUE, beside = TRUE,ylab= "mean observation per sample" ,
names.arg= c ("A20","A17","B.spec","M.oliv",
"A31"), axes= TRUE,ylim= c(0,1.6), cex.names = 0.8,
args.legend = list(x = "topleft",bty= "n"), border = "Black",
width = 0.5)
dev.off()
## Locality effects
plot(ECorAbunZero$Byssochlamys.spectabilis.~ ECorMetaZero$LOCALITY)
# time effects
plot(ECorAbunZero$Alternaria.sp..A25~ ECorMetaZero$TIME)
##########################################################
##### NEW GLMS MODEL FOR CORE OTUS WITH DIFFERENT INTERACTIONS
##########################################################
EndoMV.m2= manyglm (ECrMvabund~TemperatureC*LocalityC+TemperatureC*TimeC+TemperatureC*TissueC,
data = ECorMetaZero, family = "negative.binomial", show.residuals=T)
plot.manyglm(EndoMV.m2)
EndoMV.m2.anova= anova.manyglm(EndoMV.m2,nBoot=100, test="LR", p.uni="adjusted",
resamp="montecarlo")
EndoMV.m2.sum= summary.manyglm(EndoMV.m2, nBoot=100, test="LR",p.uni="adjusted",
resamp="montecarlo")
## Which OTUs are significaantly affected
EnAnova2 <- as.data.frame(EndoMV.m2.anova$uni.p)
otuTissueEf2<-colnames(EnAnova2)[EnAnova2["TissueC",]<= 0.05]
otuLocEf2<- colnames(EnAnova2)[EnAnova2["LocalityC",]<= 0.05]
otuTimEf2<- colnames(EnAnova2)[EnAnova2["TimeC",]<= 0.05]
otuTempEf2<- colnames(EnAnova2)[EnAnova2["TemperatureC",]<= 0.05]
otutemplocEf<-colnames(EnAnova2)[EnAnova2["TemperatureC:LocalityC",]<= 0.05]
otutemtimEf<-colnames(EnAnova2)[EnAnova2["TemperatureC:TimeC",]<= 0.05]
otutemTISSEf<-colnames(EnAnova2)[EnAnova2["TemperatureC:TissueC",]<= 0.05]
####interaction plots
?boxplot()
boxplot(ECorAbunZero$Microsphaeriopsis.olivacea~TemperatureC)
boxplot(ECorAbunZero$Microsphaeriopsis.olivacea~TimeC)
# temp * Time interaction
# library(lattice)
bwplot(ECorAbunZero$Microsphaeriopsis.olivacea~LocalityC|TemperatureC,
ylab = "M. olivacea", xlab = "Sampling site",
scales=list(x=list(labels=c("Biseton","Mahi Dasht","Khosro abad","Sarmil","Sorkhe dize"))),
par.settings = list(box.umbrella=list(col= "black"),
box.dot=list(col= FALSE),
box.rectangle = list(col= "black")
, plot.symbol = list(col= "black")))
#
# barchart(ECorAbunZero$Cytospora.sp..AC35~TimeC|TemperatureC,
# ylab = "Cytospora sp.", xlab = "Time of sampling",
# scales=list(x=list(labels=c("May","Jun","July"))),
# col= "gray", borders=FALSE)
#
# bwplot(ECorAbunZero$Cytospora.sp..AC35~TimeC|TemperatureC,
# ylab = "Cytospora sp.", xlab = "Time of sampling",
# scales=list(x=list(labels=c("May","Jun","July"))),
# par.settings = list(box.umbrella=list(col= "black"),
# box.dot=list(col= FALSE),
# box.rectangle = list(col= "black")
# , plot.symbol = list(col= "black")))
dev.off()
library(FSA)
sd(ECorAbunZero$Cytospora.sp..AC35)
### bar plot
Cytosporaplot= ggplot(data = ECorMetaZero,aes(x=ECorMetaZero$TIME,y=ECorAbunZero$Cytospora.sp..AC35,
fill= factor(ECorMetaZero$TEMPRATURE)))+
geom_bar(stat="identity",position="dodge",width=0.5)+
scale_x_discrete(breaks=c("25 degree","4 degree"),labels=c("25 degree","4 degree"))+
scale_fill_manual(values=c("gray27", "gray60"), name = "Incubation temperature",
labels=c(expression(25*degree~C),expression(4*degree~C))) +
xlab("Sampling time")+ylab("Cytospora sp.")+
scale_x_continuous(breaks= c(1,2,3),labels = c("May", "June","July"))+
theme_bw()+
theme(legend.position = "top",axis.text= element_text(size = rel(1)),
axis.title.y = element_text(size = rel(1)),
axis.title.x = element_text(size = rel(1)),
legend.text = element_text(size = rel(1)),
legend.title = element_text(size = rel(1)),
panel.grid.major = element_blank(),
axis.line = element_line(colour = "black", color = "black"))
?geom_errorbar()
##new plot revision 2
ac35<-aggregate(ECorAbunZero$Cytospora.sp..AC35~ECorMetaZero$TIME+ECorMetaZero$TEMPRATURE, data= ECorMetaZero,mean)
ac35.se<-aggregate(ECorAbunZero$Cytospora.sp..AC35~ECorMetaZero$TIME+ECorMetaZero$TEMPRATURE, data= ECorMetaZero,se)
ac35.sd<-aggregate(ECorAbunZero$Cytospora.sp..AC35~ECorMetaZero$TIME+ECorMetaZero$TEMPRATURE, data= ECorMetaZero,sd)
ac35$time<-ac35$`ECorMetaZero$TIME`
ac35$`ECorMetaZero$TIME`<-NULL
ac35$temp<-ac35$`ECorMetaZero$TEMPRATURE`
ac35$`ECorMetaZero$TEMPRATURE`<-NULL
ac35$ac35mean<-ac35$`ECorAbunZero$Cytospora.sp..AC35`
ac35$`ECorAbunZero$Cytospora.sp..AC35`<-NULL
ac35$time<-as.factor(ac35$time)
ac35$se<-ac35.se$`ECorAbunZero$Cytospora.sp..AC35`
ac35$sd<-ac35.sd$`ECorAbunZero$Cytospora.sp..AC35`
ac35$se2<-ac35$se*0.25
#newplot fig 4
ac35.p<- ggplot(ac35, aes(x=time, y=ac35mean, fill=temp)) +
geom_bar(stat="identity",position="dodge",width=0.5,color="black") +
geom_errorbar(aes(ymin=ac35mean, ymax=ac35mean+se2), width=0.2,
position=position_dodge(0.5))+
scale_x_discrete(breaks=c("25 degree","4 degree"),labels=c("25 degree","4 degree"))+
scale_fill_manual(values=c("gray27", "gray60"), name = "Incubation temperature",
labels=c(expression(25*degree~C),expression(4*degree~C))) +
xlab("Sampling time")+ylab("Mean frequency of Cytospora sp. per sample")+
scale_x_discrete(breaks= c(1,2,3),labels = c("May", "June","July"))+
theme_bw()+
theme(legend.position = "top",axis.text= element_text(size = rel(1)),
axis.title.y = element_text(size = rel(1)),
axis.title.x = element_text(size = rel(1)),
legend.text = element_text(size = rel(1)),
legend.title = element_text(size = rel(1)),
panel.grid.major = element_blank(),
axis.line = element_line(colour = "black", color = "black"))
olivaceaplot= ggplot(data = ECorMetaZero,aes(x=ECorMetaZero$TIME,y=ECorAbunZero$Microsphaeriopsis.olivacea,
fill= factor(ECorMetaZero$TEMPRATURE)))+
geom_bar(stat="identity",position="dodge",width=0.5)+
scale_fill_manual(values=c("gray27", "gray60"), name = "Incubation temperature",
labels=c("4 degree", "25 degree")) +guides(fill=FALSE)+
xlab("Sampling time")+ylab("M. olivacea")+
scale_x_continuous(breaks= c(1,2,3),labels = c("May", "June","July"))+
theme_bw()+
theme(axis.text= element_text(size = rel(1)),
axis.title.y = element_text(size = rel(1)),
axis.title.x = element_text(size = rel(1)),
legend.text = element_text(size = rel(1)),
legend.title = element_text(size = rel(1)),
panel.grid.major = element_blank(),
axis.line = element_line(colour = "black", color = "black"))
#temp*site interaction for m.o
olivaceaplo2= ggplot(data = ECorMetaZero,aes(x=ECorMetaZero$LOCALITY,y=ECorAbunZero$Microsphaeriopsis.olivacea,
fill= factor(ECorMetaZero$TEMPRATURE)))+
geom_bar(stat="identity",position="dodge",width=0.5)+
scale_x_discrete(breaks=c("4 degree","25 degree"),labels=c("4 degree","25 degree"))+
scale_fill_manual(values=c("gray27", "gray60"), name = "Incubation temperature",
labels=c(expression(4*degree~C),expression(25*degree~C)))+guides(fill=FALSE)+
xlab("Sampling site")+ylab("M. olivacea")+
scale_x_discrete(breaks= c("Bisoton","Hasan abad","Khosro abad","Sarmil","Sorkhe dize"),
labels = c("Biseton","Mahi Dasht","Khosro abad","Sarmil","Sorkhe dize"))+
theme_bw()+
theme(legend.position = "top",axis.text= element_text(size = rel(1)),
axis.title.y = element_text(size = rel(1)),
axis.title.x = element_text(size = rel(1)),
legend.text = element_text(size = rel(1)),
legend.title = element_text(size = rel(1)),
panel.grid.major = element_blank(),
axis.line = element_line(colour = "black", color = "black"))
############################################333
# fig 4 R2
##new plot data
ac35<-aggregate(ECorAbunZero$Cytospora.sp..AC35~ECorMetaZero$TIME+ECorMetaZero$TEMPRATURE, data= ECorMetaZero,mean)
ac35.se<-aggregate(ECorAbunZero$Cytospora.sp..AC35~ECorMetaZero$TIME+ECorMetaZero$TEMPRATURE, data= ECorMetaZero,se)
ac35.sd<-aggregate(ECorAbunZero$Cytospora.sp..AC35~ECorMetaZero$TIME+ECorMetaZero$TEMPRATURE, data= ECorMetaZero,sd)
ac35$time<-ac35$`ECorMetaZero$TIME`
ac35$`ECorMetaZero$TIME`<-NULL
ac35$temp<-ac35$`ECorMetaZero$TEMPRATURE`
ac35$`ECorMetaZero$TEMPRATURE`<-NULL
ac35$ac35mean<-ac35$`ECorAbunZero$Cytospora.sp..AC35`
ac35$`ECorAbunZero$Cytospora.sp..AC35`<-NULL
ac35$time<-as.factor(ac35$time)
ac35$se<-ac35.se$`ECorAbunZero$Cytospora.sp..AC35`
ac35$sd<-ac35.sd$`ECorAbunZero$Cytospora.sp..AC35`
ac35$se2<-ac35$se*0.25
#newplot fig 4
ac35.p<- ggplot(ac35, aes(x=time, y=ac35mean, fill=temp)) +
geom_bar(stat="identity",position="dodge",width=0.5,color="black") +
geom_errorbar(aes(ymin=ac35mean, ymax=ac35mean+se2), width=0.2,
position=position_dodge(0.5))+
scale_x_discrete(breaks=c("25 degree","4 degree"),labels=c("25 degree","4 degree"))+
scale_fill_manual(values=c("gray27", "gray60"), name = "Incubation temperature",
labels=c(expression(25*degree~C),expression(4*degree~C))) +
xlab("Sampling time")+ylab("Cytospora sp.(mean frequency per sample)")+
scale_x_discrete(breaks= c(1,2,3),labels = c("May", "June","July"))+
theme_bw()+
theme(legend.position = "top",axis.text= element_text(size = rel(1)),
axis.title.y = element_text(size = rel(1)),
axis.title.x = element_text(size = rel(1)),
legend.text = element_text(size = rel(1)),
legend.title = element_text(size = rel(1)),
panel.grid.major = element_blank(),
axis.line = element_line(colour = "black", color = "black"))
## mo plot
mo.tt<-aggregate(ECorAbunZero$Microsphaeriopsis.olivacea~ECorMetaZero$TIME+ECorMetaZero$TEMPRATURE, data= ECorMetaZero,mean)
mo.tt.se<-aggregate(ECorAbunZero$Microsphaeriopsis.olivacea~ECorMetaZero$TIME+ECorMetaZero$TEMPRATURE, data= ECorMetaZero,se)
mo.tt$se<-mo.tt.se$`ECorAbunZero$Microsphaeriopsis.olivacea`
mo.tt$time<-mo.tt$`ECorMetaZero$TIME`
mo.tt$`ECorMetaZero$TIME`<-NULL
mo.tt$temp<-mo.tt$`ECorMetaZero$TEMPRATURE`
mo.tt$`ECorMetaZero$TEMPRATURE`<-NULL
mo.tt$mean<-mo.tt$`ECorAbunZero$Microsphaeriopsis.olivacea`
mo.tt$`ECorAbunZero$Microsphaeriopsis.olivacea`<-NULL
mo.tt$se2<-mo.tt$se*0.5
#new time plot
MO.T.p<- ggplot(mo.tt, aes(x=time, y=mean, fill=temp)) +
geom_bar(stat="identity",position="dodge",width=0.5,color="black") +
geom_errorbar(aes(ymin=mean, ymax=mean+se2), width=0.2,
position=position_dodge(0.5))+
scale_fill_manual(values=c("gray27", "gray60"), name = "Incubation temperature",
labels=c("25 degree","4 degree")) +guides(fill=FALSE)+
xlab("Sampling time")+ylab("M. olivacea(mean frequency per sample)")+
scale_x_continuous(breaks= c(1,2,3),labels = c("May", "June","July"))+
theme_bw()+
theme(axis.text= element_text(size = rel(1)),
axis.title.y = element_text(size = rel(1)),
axis.title.x = element_text(size = rel(1)),
legend.text = element_text(size = rel(1)),
legend.title = element_text(size = rel(1)),
panel.grid.major = element_blank(),
axis.line = element_line(colour = "black", color = "black"))
#### site plot mo
mo.tt2<-aggregate(ECorAbunZero$Microsphaeriopsis.olivacea~ECorMetaZero$LOCALITY+ECorMetaZero$TEMPRATURE, data= ECorMetaZero,mean)
mo.tt.se2<-aggregate(ECorAbunZero$Microsphaeriopsis.olivacea~ECorMetaZero$LOCALITY+ECorMetaZero$TEMPRATURE, data= ECorMetaZero,se)
mo.tt2$site<-mo.tt2$`ECorMetaZero$LOCALITY`
mo.tt2$`ECorMetaZero$LOCALITY`<-NULL
mo.tt2$temp<-mo.tt2$`ECorMetaZero$TEMPRATURE`
mo.tt2$`ECorMetaZero$TEMPRATURE`<-NULL
mo.tt2$mean<-mo.tt2$`ECorAbunZero$Microsphaeriopsis.olivacea`
mo.tt2$se<-mo.tt.se2$`ECorAbunZero$Microsphaeriopsis.olivacea`
mo.tt2$`ECorAbunZero$Microsphaeriopsis.olivacea`<-NULL
mo.tt2$se2<-mo.tt2$se*0.5
#new plot
MO.T.p2<- ggplot(mo.tt2, aes(x=site, y=mean, fill=temp)) +
geom_bar(stat="identity",position="dodge",width=0.5,color="black") +
geom_errorbar(aes(ymin=mean, ymax=mean+se2), width=0.2,
position=position_dodge(0.5))+
scale_fill_manual(values=c("gray27", "gray60"), name = "Incubation temperature",
labels=c("25 degree","4 degree")) +guides(fill=FALSE)+
xlab("Sampling site")+ylab("M. olivacea(mean frequency per sample)")+
scale_x_discrete(breaks= c("Bisoton","Hasan abad","Khosro abad","Sarmil","Sorkhe dize"),
labels = c("Biseton","Mahi Dasht","Khosro abad","Sarmil","Sorkhe dize"))+
theme_bw()+
theme(legend.position = "top",axis.text= element_text(size = rel(1)),
axis.title.y = element_text(size = rel(1)),
axis.title.x = element_text(size = rel(1)),
legend.text = element_text(size = rel(1)),
legend.title = element_text(size = rel(1)),
panel.grid.major = element_blank(),
axis.line = element_line(colour = "black", color = "black"))
### export fig 4 r2
dev.off()
png(filename = "Fig4-r2.png", res= 300, width = 2100, height=3000)
grid.arrange(ac35.p, MO.T.p,MO.T.p2,nrow=3, ncol=1)
dev.off()
######################################################
## FIG 4 forest pathology
######################################################
library(gridExtra)
dev.off()
png(filename = "Fig3-final.png", res= 300, width = 2100, height=3000)
grid.arrange(Cytosporaplot, olivaceaplot,olivaceaplo2, nrow=3, ncol=1)
dev.off()
##########################################################
############ FIG 3-2
dev.off()
png(filename = "Fig3-2.png", res= 300, width = 2100, height=3000)
par(mfrow=c(3,1),xpd=TRUE, oma= c(0,0,2,0))
Cytosporaplot
olivaceaplot
olivaceaplo2
dev.off()
################################
#### 5- NMDS and similarities
################################
ENdoNMDS<-metaMDS(ECorAbunZero, distance = "bray", k= 2, trymax = 20)
#############################
## Plot NMDS for localities
#############################
dev.off()
plot(ENdoNMDS$points, xlab="NMDs 1", ylab="NMDS 2")
ordiplot(ENdoNMDS, type = "n", display = "sites",xlab="NMDS 1", ylab="NMDS 2",
xlim= c(-2.5,2.5),ylim = c(-1.5,1.5))
points(ENdoNMDS$points, pch=20, col= "black", cex=0.5)
with(ECorMetaZero,ordiellipse(ENdoNMDS,ECorMetaZero$LOCALITY,cex=.5,
draw="polygon", col="blue",
alpha=100,kind="se",conf=0.95,
show.groups=(c("Hasan abad"))))
with(ECorMetaZero,ordiellipse(ENdoNMDS, ECorMetaZero$LOCALITY,cex=.5,
draw="polygon", col=c("red"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("Khosro abad"))))
with(ECorMetaZero,ordiellipse(ENdoNMDS,ECorMetaZero$LOCALITY,cex=.5,
draw="polygon", col=c("green"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("Sarmil"))))
with(ECorMetaZero,ordiellipse(ENdoNMDS, ECorMetaZero$LOCALITY,cex=.5,
draw="polygon", col=c("yellow"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("Sorkhe dize"))))
with(ECorMetaZero,ordiellipse(ENdoNMDS, ECorMetaZero$LOCALITY,cex=1.5,
draw="polygon", col= "black",
alpha=100, kind="se", conf=0.95,
show.groups=(c("Bisoton"))))#red
mylegend = legend("topright", c("Bisoton", "Hasan abad", "Khosro abad",
"Sarmil","Sorkhe dize"),
fill=c("black","blue","red","green","yellow"), border="white", bty="n")
## stress is not so good
## try 3D NMDS
NMDS.3<-metaMDS(ECorAbunZero, distance = "bray", k= 3, trymax = 20)
## stress is much better = 0.025
### Plot nmds1 &2
dev.off()
#pdf(file = "3D NMDS for localities.pdf", paper = "a4", width = 7, height = 4)
par(mfrow= c(1,3))
NMDS1.2=ordiplot(NMDS.3,choices=c(1,2), type = "n", display = "sites",xlab="NMDS 1",
ylab="NMDS 2"
,ylim = c(-2,2), xlim = c(-3,3))
points(NMDS.3$points[,1],NMDS.3$points[,2], pch=20, col= "black", cex= 0.8)
with(ECorMetaZero,ordiellipse(NMDS1.2,ECorMetaZero$LOCALITY, cex=.5,
draw="polygon", col="blue",
alpha=100,kind="se",conf=0.95,
show.groups=(c("Hasan abad"))))
with(ECorMetaZero,ordiellipse(NMDS1.2, ECorMetaZero$LOCALITY,cex=.5,
draw="polygon", col=c("red"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("Khosro abad"))))
with(ECorMetaZero,ordiellipse(NMDS1.2,ECorMetaZero$LOCALITY,cex=.5,
draw="polygon", col=c("green"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("Sarmil"))))
with(ECorMetaZero,ordiellipse(NMDS1.2, ECorMetaZero$LOCALITY,cex=.5,
draw="polygon", col=c("yellow"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("Sorkhe dize"))))
with(ECorMetaZero,ordiellipse(NMDS1.2, ECorMetaZero$LOCALITY,cex=.5,
draw="polygon", col= "black",
alpha=100, kind="se", conf=0.95,
show.groups=(c("Bisoton"))))#red
mylegend = legend("topright", c("Bisoton", "Hasan abad", "Khosro abad",
"Sarmil","Sorkhe dize"),
fill=c("black","blue","red","green","yellow"), border="white", bty="n")
## plot nmds2&3
NMDS2.3=ordiplot(NMDS.3,choices=c(2,3), type = "n", display = "sites",xlab="NMDS 2",
ylab="NMDS 3"
,ylim = c(-1.5,1.5), xlim = c(-2,2))
points(NMDS.3$points[,2],NMDS.3$points[,3], pch=20, col= "black", cex=0.3)
with(ECorMetaZero,ordiellipse(NMDS2.3,ECorMetaZero$LOCALITY,cex=.5,
draw="polygon", col="blue",
alpha=100,kind="se",conf=0.95,
show.groups=(c("Hasan abad"))))
with(ECorMetaZero,ordiellipse(NMDS2.3, ECorMetaZero$LOCALITY,cex=.5,
draw="polygon", col=c("black"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("Khosro abad"))))
with(ECorMetaZero,ordiellipse(NMDS2.3,ECorMetaZero$LOCALITY,cex=.5,
draw="polygon", col=c("green"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("Sarmil"))))
with(ECorMetaZero,ordiellipse(NMDS2.3, ECorMetaZero$LOCALITY,cex=.5,
draw="polygon", col=c("yellow"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("Sorkhe dize"))))
with(ECorMetaZero,ordiellipse(NMDS2.3, ECorMetaZero$LOCALITY,cex=1.5,
draw="polygon", col= "darkred",
alpha=100, kind="se", conf=0.95,
show.groups=(c("Bisoton"))))#red
### plot nmds 1&3
NMDS1.3=ordiplot(NMDS.3,choices=c(1,3), type = "n", display = "sites",xlab="NMDS 1",
ylab="NMDS 3"
,ylim = c(-1.5,1.5), xlim = c(-2,2))
points(NMDS.3$points[,1],NMDS.3$points[,3], pch=20, col= "black",cex=0.3)
with(ECorMetaZero,ordiellipse(NMDS1.3,ECorMetaZero$LOCALITY,cex=.5,
draw="polygon", col="blue",
alpha=100,kind="se",conf=0.95,
show.groups=(c("Hasan abad"))))
with(ECorMetaZero,ordiellipse(NMDS1.3, ECorMetaZero$LOCALITY,cex=.5,
draw="polygon", col=c("black"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("Khosro abad"))))
with(ECorMetaZero,ordiellipse(NMDS1.3,ECorMetaZero$LOCALITY,cex=.5,
draw="polygon", col=c("green"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("Sarmil"))))
with(ECorMetaZero,ordiellipse(NMDS1.3, ECorMetaZero$LOCALITY,cex=.5,
draw="polygon", col=c("yellow"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("Sorkhe dize"))))
with(ECorMetaZero,ordiellipse(NMDS1.3, ECorMetaZero$LOCALITY,cex=1.5,
draw="polygon", col= "darkred",
alpha=100, kind="se", conf=0.95,
show.groups=(c("Bisoton"))))#red
mylegend = legend("topright", c("Bisoton", "Hasan abad", "Khosro abad",
"Sarmil","Sorkhe dize"), cex=0.5,
fill=c("red","blue","black","green","yellow"),
border="white", bty="n")
dev.off()
################################
####### NMDS for time of sampling
################################
dev.off()
plot(ENdoNMDS$points, xlab="dimension 1", ylab="dimension 2")
ordiplot(ENdoNMDS, type = "n", display = "sites",xlab="NMDS 1", ylab="NMDS 2",
ylim = c(-2,2), xlim = c(-3,3))
points(ENdoNMDS$points, pch=20, col= as.numeric(ECorMetaZero$TIME))
#ordispider(ENdoNMDS,ECorMetaZero$TIME, col=c("grey"))
with(ECorMetaZero,ordiellipse(ENdoNMDS,ECorMetaZero$TIME,cex=.5,
draw="polygon", col="black",
alpha=100,kind="se",conf=0.95,
show.groups=(c("1"))))
with(ECorMetaZero,ordiellipse(ENdoNMDS, ECorMetaZero$TIME,cex=.5,
draw="polygon", col=c("red"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("2"))))
with(ECorMetaZero,ordiellipse(ENdoNMDS,ECorMetaZero$TIME,cex=.5,
draw="polygon", col=c("green"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("3"))))
legend("topright", c("May 2015","Jun 2015","July 2015"),
fill=c("black","red","green"),
border="white", bty="n")
##3D NMDS
## Plot nmds1 &2
dev.off()
#pdf(file = "3D NMDS time.pdf", paper = "a4", width = 7, height = 4)
#par(mfrow= c(1,3))
NMDS1.2=ordiplot(NMDS.3,choices=c(1,2), type = "n", display = "sites",xlab="NMDS 1",
ylab="NMDS 2", xlim = c(-2,2)
)
points(NMDS.3$points[,1],NMDS.3$points[,2], pch=20, col= "black", cex= 0.75)
with(ECorMetaZero,ordiellipse(NMDS1.2,ECorMetaZero$TIME,cex=.5,
draw="polygon", col="blue",
alpha=100,kind="se",conf=0.95,
show.groups=(c("1"))))
with(ECorMetaZero,ordiellipse(NMDS1.2, ECorMetaZero$TIME,cex=.5,
draw="polygon", col=c("red"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("2"))))
with(ECorMetaZero,ordiellipse(NMDS1.2,ECorMetaZero$TIME,cex=.5,
draw="polygon", col=c("green"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("3"))))
legend("topright", c("May 2015","Jun 2015","July 2015"),
fill=c("blue","red","green"),
border="white", bty="n")
## plot nmds2&3
NMDS2.3=ordiplot(NMDS.3,choices=c(2,3), type = "n", display = "sites",xlab="Dimension 2",
ylab="Dimension 3", ylim = c(-1,1), xlim = c(-1.5,1.5)
)
points(NMDS.3$points[,2],NMDS.3$points[,3], pch=20, col= "black", cex=0.3)
with(ECorMetaZero,ordiellipse(NMDS2.3,ECorMetaZero$TIME,cex=.5,
draw="polygon", col="blue",
alpha=100,kind="se",conf=0.95,
show.groups=(c("1"))))
with(ECorMetaZero,ordiellipse(NMDS2.3, ECorMetaZero$TIME,cex=.5,
draw="polygon", col=c("red"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("2"))))
with(ECorMetaZero,ordiellipse(NMDS2.3,ECorMetaZero$TIME,cex=.5,
draw="polygon", col=c("green"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("3"))))
legend("topright", c("May 2015","Jun 2015","July 2015"),
fill=c("blue","red","green"),
border="white", bty="n")
## this shows the changes much better than the others
### plot nmds 1&3
NMDS1.3=ordiplot(NMDS.3,choices=c(1,3), type = "n", display = "sites",xlab="Dimension 1",
ylab="Dimension 3"
,ylim = c(-1.5,1.5), xlim = c(-2,2))
points(NMDS.3$points[,1],NMDS.3$points[,3], pch=20, col= "black",cex=0.3)
with(ECorMetaZero,ordiellipse(NMDS1.3,ECorMetaZero$TIME,cex=.5,
draw="polygon", col="blue",
alpha=100,kind="se",conf=0.95,
show.groups=(c("1"))))
with(ECorMetaZero,ordiellipse(NMDS1.3, ECorMetaZero$TIME,cex=.5,
draw="polygon", col=c("red"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("2"))))
with(ECorMetaZero,ordiellipse(NMDS1.3,ECorMetaZero$TIME,cex=.5,
draw="polygon", col=c("green"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("3"))))
legend("topright", c("May 2015","Jun 2015","July 2015"),
fill=c("blue","red","green"),
border="white", bty="n")
dev.off()
############################
### 3D NMDS for tissue type
NMDS1.2=ordiplot(NMDS.3,choices=c(1,2), type = "n", display = "sites",xlab="NMDS 1",
ylab="NMDS 2", xlim = c(-2,4)
)
points(NMDS.3$points[,1],NMDS.3$points[,2], pch=20, col= "black", cex= 0.75)
with(ECorMetaZero,ordiellipse(NMDS1.2,ECorMetaZero$SUBSTRATE,
draw="polygon", col="green",
alpha=100,kind="se",conf=0.95,
show.groups=(c("Leaf"))))
with(ECorMetaZero,ordiellipse(NMDS1.2, ECorMetaZero$SUBSTRATE,
draw="polygon", col=c("red"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("Branch"))))
legend("topright", c("Leaf","Branch"),
fill=c("green","red"),
border="white", bty="n")
## plot nmds2&3
NMDS2.3=ordiplot(NMDS.3,choices=c(2,3), type = "n", display = "sites",xlab="NMDS 2",
ylab="NMDS 3", ylim = c(-1,1), xlim = c(-1.5,1.5)
)
points(NMDS.3$points[,2],NMDS.3$points[,3], pch=20, col= "black", cex=0.75)
with(ECorMetaZero,ordiellipse(NMDS2.3,ECorMetaZero$SUBSTRATE,cex=.5,
draw="polygon", col="green",
alpha=100,kind="se",conf=0.95,
show.groups=(c("Leaf"))))
with(ECorMetaZero,ordiellipse(NMDS2.3, ECorMetaZero$SUBSTRATE,cex=.5,
draw="polygon", col=c("red"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("Branch"))))
legend("topright", c("Leaf","Branch"),
fill=c("green","red"),
border="white", bty="n")
### plot nmds 1&3
NMDS1.3=ordiplot(NMDS.3,choices=c(1,3), type = "n", display = "sites",xlab="NMDS 1",
ylab="NMDS 3"
,ylim = c(-1.5,1.5), xlim = c(-2,4))
points(NMDS.3$points[,1],NMDS.3$points[,3], pch=20, col= "black",cex=0.75)
with(ECorMetaZero,ordiellipse(NMDS1.3,ECorMetaZero$SUBSTRATE,cex=.5,
draw="polygon", col="green",
alpha=100,kind="se",conf=0.95,
show.groups=(c("Leaf"))))
with(ECorMetaZero,ordiellipse(NMDS1.3, ECorMetaZero$SUBSTRATE,cex=.5,
draw="polygon", col=c("red"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("Branch"))))
legend("topright", c("Leaf","Branch"),
fill=c("green","red"),
border="white", bty="n")
dev.off()
################
## NMDS for temprature
NMDS1.2=ordiplot(NMDS.3,choices=c(1,2), type = "n", display = "sites",xlab="NMDS 1",
ylab="NMDS 2", xlim = c(-2,2)
)
points(NMDS.3$points[,1],NMDS.3$points[,2], pch=20, col= "black", cex= 0.75)
with(ECorMetaZero,ordiellipse(NMDS1.2,ECorMetaZero$TEMPRATURE,
draw="polygon", col="blue",
alpha=100,kind="se",conf=0.95,
show.groups=(c("4 Degree"))))
with(ECorMetaZero,ordiellipse(NMDS1.2, ECorMetaZero$TEMPRATURE,
draw="polygon", col=c("red"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("25 Degree"))))
legend("topright", c("4 Degree","25 Degree"),
fill=c("blue","red"),
border="white", bty="n")
## plot nmds2&3
NMDS2.3=ordiplot(NMDS.3,choices=c(2,3), type = "n", display = "sites",xlab="NMDS 2",
ylab="NMDS 3", xlim = c(-2,2)
)
points(NMDS.3$points[,2],NMDS.3$points[,3], pch=20, col= "black", cex=0.75)
with(ECorMetaZero,ordiellipse(NMDS2.3,ECorMetaZero$TEMPRATURE,cex=.5,
draw="polygon", col="blue",
alpha=100,kind="se",conf=0.95,
show.groups=(c("4 Degree"))))
with(ECorMetaZero,ordiellipse(NMDS2.3, ECorMetaZero$TEMPRATURE,cex=.5,
draw="polygon", col=c("red"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("25 Degree"))))
legend("topright", c("4 Degree","25 Degree"),
fill=c("blue","red"),
border="white", bty="n")
### plot nmds 1&3
NMDS1.3=ordiplot(NMDS.3,choices=c(1,3), type = "n", display = "sites",xlab="NMDS 1",
ylab="NMDS 3"
,xlim = c(-2,2))
points(NMDS.3$points[,1],NMDS.3$points[,3], pch=20, col= "black",cex=0.75)
with(ECorMetaZero,ordiellipse(NMDS1.3,ECorMetaZero$TEMPRATURE,cex=.5,
draw="polygon", col="blue",
alpha=100,kind="se",conf=0.95,
show.groups=(c("4 Degree"))))
with(ECorMetaZero,ordiellipse(NMDS1.3, ECorMetaZero$TEMPRATURE,cex=.5,
draw="polygon", col=c("red"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("25 Degree"))))
legend("topright", c(expression(4*degree~C),expression(25*degree~C)),
fill=c("blue","red"),
border="white", bty="n")
#########################################################
############# FIG 2
#########################################################
dev.off()
png(file = "fig 2-final.png", width = 2100, height = 2100, res= 300)
par(mfrow= c(2,2) , mar=c(4,4,0.5,0.5), oma= c(2,0,2,0))
# tissue
ordiplot(NMDS.3,choices=c(1,2), type = "n", display = "sites",
ylab="NMDS 2", xlim = c(-2,4),ylim = c(-2,2)
)
points(NMDS.3$points[,1],NMDS.3$points[,2], pch=20, col= "black", cex= 0.75)
with(ECorMetaZero,ordiellipse(NMDS1.2,ECorMetaZero$SUBSTRATE,
draw="polygon", col="forestgreen",
alpha=100,kind="se",conf=0.95,
show.groups=(c("Leaf"))))
with(ECorMetaZero,ordiellipse(NMDS1.2, ECorMetaZero$SUBSTRATE,
draw="polygon", col=c("brown"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("Branch"))))
legend("topright", c("Leaf","Twig"), fill=c("forestgreen","brown"),
border="white", bty="n",title = "A")
## location
ordiplot(NMDS.3,choices=c(1,2), type = "n", display = "sites",
xlim = c(-2,4),ylim = c(-2,2))
points(NMDS.3$points[,1],NMDS.3$points[,2], pch=20, col= "black", cex= 0.75)
with(ECorMetaZero,ordiellipse(NMDS1.2,ECorMetaZero$LOCALITY, cex=.5,
draw="polygon", col="cyan",
alpha=100,kind="se",conf=0.95,
show.groups=(c("Hasan abad"))))
with(ECorMetaZero,ordiellipse(NMDS1.2, ECorMetaZero$LOCALITY,cex=.5,
draw="polygon", col=c("forestgreen"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("Khosro abad"))))
with(ECorMetaZero,ordiellipse(NMDS1.2,ECorMetaZero$LOCALITY,cex=.5,
draw="polygon", col=c("yellow"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("Sarmil"))))
with(ECorMetaZero,ordiellipse(NMDS1.2, ECorMetaZero$LOCALITY,cex=.5,
draw="polygon", col=c("maroon3"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("Sorkhe dize"))))
with(ECorMetaZero,ordiellipse(NMDS1.2, ECorMetaZero$LOCALITY,cex=.5,
draw="polygon", col= "black",
alpha=100, kind="se", conf=0.95,
show.groups=(c("Bisoton"))))#red
mylegend = legend("topright", c("Biseton", "Mahi Dasht", "Khosro abad",
"Sarmil","Sorkhe dize"), title = "B",
fill=c("black","cyan","forestgreen","yellow","maroon3"), border="white", bty="n")
### Time
ordiplot(NMDS.3,choices=c(1,2), type = "n", display = "sites",xlab="NMDS 1",
ylab="NMDS 2", xlim = c(-2,4),ylim = c(-2,2)
)
points(NMDS.3$points[,1],NMDS.3$points[,2], pch=20, col= "black", cex= 0.75)
with(ECorMetaZero,ordiellipse(NMDS1.2,ECorMetaZero$TIME,cex=.5,
draw="polygon", col="cyan",
alpha=100,kind="se",conf=0.95,
show.groups=(c("1"))))
with(ECorMetaZero,ordiellipse(NMDS1.2, ECorMetaZero$TIME,cex=.5,
draw="polygon", col=c("maroon3"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("2"))))
with(ECorMetaZero,ordiellipse(NMDS1.2,ECorMetaZero$TIME,cex=.5,
draw="polygon", col=c("forestgreen"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("3"))))
legend("topright", c("May 2015","June 2015","July 2015"), title = "C",
fill=c("cyan","maroon3","forestgreen"),
border="white", bty="n")
### Temprature
ordiplot(NMDS.3,choices=c(1,2), type = "n", display = "sites",xlab="NMDS 1",
xlim = c(-2,4),ylim = c(-2,2)
)
points(NMDS.3$points[,1],NMDS.3$points[,2], pch=20, col= "black", cex= 0.75)
with(ECorMetaZero,ordiellipse(NMDS1.2,ECorMetaZero$TEMPRATURE,
draw="polygon", col="cyan",
alpha=100,kind="se",conf=0.95,
show.groups=(c("4 Degree"))))
with(ECorMetaZero,ordiellipse(NMDS1.2, ECorMetaZero$TEMPRATURE,
draw="polygon", col=c("red"),
alpha=100,kind="se",conf=0.95,
show.groups=(c("25 Degree"))))
legend("topright", c(expression(4*degree~C),expression(25*degree~C)), title = "D",
fill=c("cyan","red"),
border="white", bty="n")
dev.off()
###############################################################
#### 6- community analysis at family level
###############################################################
## Reporting which factors are affecting the communities at family level
# Data input
### many OTUs were identified at famiy level so I chose this level to do this analysis
### the OTU abundances for each family was merged together
### out of 59 OTUs we were not able to identify 16 OTU and were grouped according to
#the assignment level
FmilyAbun= read.csv (file="matrix_family.csv",
header = T, row.names = 1)
row.names(FmilyAbun)==row.names(EndoMetaData)
colnames(FmilyAbun)
FamilyMVABUND= mvabund(FmilyAbun)
plot(FamilyMVABUND)
FamilyMV.m= manyglm(FamilyMVABUND ~ Locality+ Time+ Temperature+ Tissue, data = EndoMetaData,
family = "negative.binomial", show.residuals=T)
FamilyMV.Anova= anova.manyglm(FamilyMV.m,nBoot=100, test="LR", p.uni="adjusted",
resamp="montecarlo" )
FamilyMV.summ= summary.manyglm(FamilyMV.m,nBoot=100, test="LR", p.uni="adjusted",
resamp="montecarlo")
### which families are significantly affected?
FamilyAnova <- as.data.frame(FamilyMV.Anova$uni.p)
FmilyTissue<-colnames(FamilyAnova)[FamilyAnova["Tissue",]<= 0.05]
FmilyLoc<-colnames(FamilyAnova)[FamilyAnova["Locality",]<= 0.05]
FmilyTim<-colnames(FamilyAnova)[FamilyAnova["Time",]<= 0.05]
FmilyTemp<-colnames(FamilyAnova)[FamilyAnova["Temperature",]<= 0.05]
# get the sum for each family in each tisse
family.sum <- aggregate(. ~ EndoMetaData$SUBSTRATE,FmilyAbun , sum)
aggregate(. ~ EndoMetaData$TIME,FmilyAbun , sum)
###########################################################
####### Fig 2-paper
###########################################################
dev.off()
#pdf(file = "PIE CHART L&B final.pdf", paper = "a4", width = 7, height = 3)
jpeg(file = "Fig 4persian.jpeg", width = 1500, height = 2500, res = 300)
par(mfrow=c(2,1))
# creat a Pie chart for leaf
L.slic<- c(1,1,2,25,1,2,1,9,6,7,2,3)#get the valus from family.sum and remove the zeros
L.lbls<- c("Coniochaetaceae","Cladosporiaceae","Dothioraceae","Gnomoniaceae",
"Lasiosphaeriaceae","Montagnulaceae","Nectriaceae","Pleosporaceae",
"Sporormiaceae","Trichocomaceae","Xylariaceae","Other families")
L.Percent<-round(L.slic/sum(L.slic)*100, digits=2)
L.lbls <- paste(L.lbls, L.Percent)
L.lbls<-paste(L.lbls,"%",sep="")
#png(file = "Fig 2-revised-leaf.png", width = 1000, height = 1000, res = 300)
pie(L.slic,labels =L.lbls, col = c("firebrick","indianred1","orchid1","magenta",
"deeppink1","mediumblue","royalblue1","skyblue1","cyan",
"yellow", "springgreen2","forestgreen"
) , main = "برگ",
border = NA, radius = 0.7, cex=0.7,cex.main=0.7)
# creat a Pie chart for branch
B.slic<- c(6,45,157,6,37,48,85,15,60)#get the valus from family.sum and remove the zeros
B.lbls<- c("Coniochaetaceae","Dothioraceae","Montagnulaceae",
"Cladosporiaceae","Pleosporaceae","Sporormiaceae","Trichocomaceae","Valsaceae",
"Other families")
B.Percent<-round(B.slic/sum(B.slic)*100, digits=2)
B.lbls <- paste(B.lbls, B.Percent)
B.lbls<-paste(B.lbls,"%",sep="")
pie(B.slic,labels =B.lbls, col = c("firebrick","orchid1","mediumblue","indianred1",
"skyblue1", "cyan","yellow","gold","forestgreen"),
radius = 0.7, main = "شاخه",border = NA,cex=0.7,cex.main=0.7)
dev.off()
###################################################################
############## fig 5
###################################################################
familtemp<- c ("Dothioraceae","Gnomoniaceae","Montagnulaceae","Pleosporaceae",
"Sporormiaceae","Trichocomaceae")
familiTempABUN<- FmilyAbun[familtemp]##Keeping only Temp affected families
# get the mean valuse for each family in each temp
familiTempmean <- aggregate(. ~ EndoMetaData$TEMPRATURE, familiTempABUN, mean)
#CReat a data frame of the mean valuse
famiTMeanfram<- as.data.frame(familiTempmean,optional=TRUE)
attr(famiTMeanfram, "row.names")<- c("25 Degree", "4 Degree")
### creat a matrix of mean observation of families affected by temp for ploting
familiTdata<- data.matrix (famiTMeanfram[2:7],rownames.force = NA )
familiTdata2<-as.data.frame(familiTdata)
library(ggplot2)
dev.off()
png(file = "Fig 5-600.png", width = 3000, height = 2400, res = 600)
par(mar=c(6,4,1,0),oma=c(1,0,1,0))
bar.temp<-barplot(familiTdata,legend.text =c(expression(25*degree~C),expression(4*degree~C)),
beside = TRUE,ylab= "Mean frequency per sample" ,xlab = "Fungal taxonomic families",
names.arg= c ("Dothioraceae","Gnomoniaceae","Montagnulaceae","Pleosporaceae",
"Sporormiaceae","Trichocomaceae"),
axes= TRUE,ylim= c(0,0.7),
args.legend = list(x = "topright",bty= "n", cex=0.7), border = "Black", axis.lty=1,axisnames=T,
width = 0.5)
error.bar <- function(x, y, upper, lower=upper, length=0.1,...){
arrows(x,y+upper, x, y-lower, angle=90, code=3,width= 0.5, length=length, ...)
}
error.bar(bar.temp,familiTdata,familiTdata2se)
dev.off()
?barplot()
??args.legend
# add error bars
#First I create a smell function that takes...in entry
# get the se valuse for each family in each temp
familiTempse <- aggregate(. ~ EndoMetaData$TEMPRATURE, familiTempABUN, se)
#CReat a data frame of the mean valuse
famiTsefram<- as.data.frame(familiTempse,optional=TRUE)
attr(famiTsefram, "row.names")<- c("25 Degree", "4 Degree")
### creat a matrix of mean observation of families affected by temp for ploting
familiTdatase<- data.matrix (famiTsefram[2:7],rownames.force = NA )
familiTdata2se<-familiTdatase*0.5
error.bar(bar.temp,familiTdata,familiTdata2se)
##################
## OTU isolation results in tissue and temperature
## OTU observation in each tissue type
TisOTUsum <- aggregate(. ~ EndoMetaData$SUBSTRATE, EndoAbun, sum)
###OTU observation in each isolation temperature
TempOTUsum<-aggregate(. ~ EndoMetaData$TEMPRATURE, EndoAbun, sum)
class(TempOTUsum)
library(plyr)
library(dplyr)
?tally
dev.off()
#################
######VEN diagram
library(VennDiagram)
EndoMetaData
EndoAbun
par(mfrow=c(1,2))
jpeg("fig6-per-organ.jpeg", width = 1000,height = 1000, res=300)
venn.organ <- draw.pairwise.venn(
area1 = 22,
area2 = 51,
cross.area = 14,
category = c("Leaf", "Twig"),
fill = c("green", "gray"),cex =1.2,cat.cex = 1.2,
scaled=T,cat.pos = c(180,180),cat.dist = 0.05, lwd=c(0.5,0.5),col=c("green", "gray"))
grid.newpage()
?draw.pairwise.venn
venn.temp<-draw.pairwise.venn(
area1 = 51,
area2 = 15,
cross.area = 7,
category = c(expression(25*degree~C), expression(4*degree~C)),
fill = c("red", "blue"), cex =1.2,cat.cex = 1.2,
scaled=T,cat.pos = c(0,0),cat.dist = 0.05,lwd=c(0.5,0.5),col=c("red", "blue"))
dev.off()
library(gridExtra)
# Fig 6
png(file = "Fig 6.png", width = 2000, height = 900, res = 300)
grid.arrange(gTree(children=venn.temp), gTree(children=venn.organ), ncol=2)
dev.off()
#another way for venn diagram
source("http://www.bioconductor.org/biocLite.R")
biocLite("limma")
library(limma)
vennDiagram(vennCounts(EndoMetaData))
?vennDiagram
#subseting for site venn diagram
metadata2<-data.frame(out1=integer(446))
metadata2<-cbind(metadata2,site=EndoMetaData$LOCALITY)
metadata2<- cbind(metadata2,otu1=ifelse(EndoAbun$Alternaria.sp..A22 > 0, 1, 0))
metadata2<- cbind(metadata2,otu2=ifelse(EndoAbun$Alternaria.sp..Ac27 > 0, 1, 0))
metadata2<- cbind(metadata2,otu3=ifelse(EndoAbun$Alternaria.sp..A25 > 0, 1, 0))
metadata2<- cbind(metadata2,otu4=ifelse(EndoAbun$Alternaria.sp..A76 > 0, 1, 0))
metadata2<- cbind(metadata2,otu5=ifelse(EndoAbun$Alternaria.sp..A9 > 0, 1, 0))
metadata2<- cbind(metadata2,otu6=ifelse(EndoAbun$Arthrinium.marii > 0, 1, 0))
metadata2<- cbind(metadata2,otu7=ifelse(EndoAbun$Aspergillus.sp..A20 > 0, 1, 0))
metadata2<- cbind(metadata2,otu8=ifelse(EndoAbun$Aureobasidium.sp..A17 > 0, 1, 0))
metadata2<- cbind(metadata2,otu9=ifelse(EndoAbun$Aureobasidium.sp..A30 > 0, 1, 0))
metadata2<- cbind(metadata2,otu10=ifelse(EndoAbun$B25 > 0, 1, 0))
metadata2<- cbind(metadata2,otu11=ifelse(EndoAbun$Bc3 > 0, 1, 0))
metadata2<- cbind(metadata2,otu12=ifelse(EndoAbun$Biscogniauxia.mediterranea > 0, 1, 0))
metadata2<- cbind(metadata2,otu13=ifelse(EndoAbun$Byssochlamys.spectabilis. > 0, 1, 0))
metadata2<- cbind(metadata2,otu14=ifelse(EndoAbun$Chaetomiaceae.sp..A37 > 0, 1, 0))
metadata2<- cbind(metadata2,otu15=ifelse(EndoAbun$Cladosporium.herbarum.A8 > 0, 1, 0))
metadata2<- cbind(metadata2,otu16=ifelse(EndoAbun$Comoclathris.sedi > 0, 1, 0))
metadata2<- cbind(metadata2,otu17=ifelse(EndoAbun$Coniochaeta.sp..A85 > 0, 1, 0))
metadata2<- cbind(metadata2,otu18=ifelse(EndoAbun$Coniothyrium.sp..A41 > 0, 1, 0))
metadata2<- cbind(metadata2,otu19=ifelse(EndoAbun$Cytospora.sp..AC35 > 0, 1, 0))
metadata2<- cbind(metadata2,otu20=ifelse(EndoAbun$Cytospora.sp..C2 > 0, 1, 0))
metadata2<- cbind(metadata2,otu21=ifelse(EndoAbun$Diatrype.sp..C1 > 0, 1, 0))
metadata2<- cbind(metadata2,otu22=ifelse(EndoAbun$Diatrypella.sp..C6 > 0, 1, 0))
metadata2<- cbind(metadata2,otu23=ifelse(EndoAbun$Dikarya.sp..A38 > 0, 1, 0))
metadata2<- cbind(metadata2,otu24=ifelse(EndoAbun$Dothideomycetes.sp..A1 > 0, 1, 0))
metadata2<- cbind(metadata2,otu25=ifelse(EndoAbun$Dothideomycetes.sp..A79 > 0, 1, 0))
metadata2<- cbind(metadata2,otu26=ifelse(EndoAbun$Endoconidioma.populi.A39 > 0, 1, 0))
metadata2<- cbind(metadata2,otu27=ifelse(EndoAbun$Fusarium.sp..46 > 0, 1, 0))
metadata2<- cbind(metadata2,otu28=ifelse(EndoAbun$Gnomoniaceae.sp..66 > 0, 1, 0))
metadata2<- cbind(metadata2,otu29=ifelse(EndoAbun$Gnomoniaceae.sp..70 > 0, 1, 0))
metadata2<- cbind(metadata2,otu30=ifelse(EndoAbun$Humicola.sp..A52 > 0, 1, 0))
metadata2<- cbind(metadata2,otu31=ifelse(EndoAbun$Microsphaeriopsis.olivacea > 0, 1, 0))
metadata2<- cbind(metadata2,otu32=ifelse(EndoAbun$Penicillium.sp..A21 > 0, 1, 0))
metadata2<- cbind(metadata2,otu33=ifelse(EndoAbun$Penicillium.sp..A3 > 0, 1, 0))
metadata2<- cbind(metadata2,otu34=ifelse(EndoAbun$Pleosporaceae.sp..A5 > 0, 1, 0))
metadata2<- cbind(metadata2,ot35=ifelse(EndoAbun$Pleosporaceae.sp..Ac49 > 0, 1, 0))
metadata2<- cbind(metadata2,otu36=ifelse(EndoAbun$Pleosporaceae.sp..B27 > 0, 1, 0))
metadata2<- cbind(metadata2,otu37=ifelse(EndoAbun$Preussia.africana > 0, 1, 0))
metadata2<- cbind(metadata2,otu38=ifelse(EndoAbun$Preussia.australis > 0, 1, 0))
metadata2<- cbind(metadata2,otu39=ifelse(EndoAbun$Preussia.complex.sp..A36 > 0, 1, 0))
metadata2<- cbind(metadata2,otu40=ifelse(EndoAbun$Preussia.intermedia > 0, 1, 0))
metadata2<- cbind(metadata2,otu41=ifelse(EndoAbun$Preussia.sp..A31 > 0, 1, 0))
metadata2<- cbind(metadata2,otu42=ifelse(EndoAbun$Schizothecium.sp..B14 > 0, 1, 0))
metadata2<- cbind(metadata2,otu43=ifelse(EndoAbun$Sordariomycetes.sp..A13 > 0, 1, 0))
metadata2<- cbind(metadata2,otu44=ifelse(EndoAbun$Sordariomycetes.sp..A45 > 0, 1, 0))
metadata2<- cbind(metadata2,otu45=ifelse(EndoAbun$Sporormiaceae.sp..A29M > 0, 1, 0))
metadata2<- cbind(metadata2,otu46=ifelse(EndoAbun$Sporormiaceae.sp..L32 > 0, 1, 0))
metadata2<- cbind(metadata2,otu147=ifelse(EndoAbun$Unfidentified.Sordariomycetes.sp..A26 > 0, 1, 0))
metadata2<- cbind(metadata2,ot48=ifelse(EndoAbun$Unidentified.Ascomycota.sp..A86 > 0, 1, 0))
metadata2<- cbind(metadata2,otu49=ifelse(EndoAbun$Unidentified.Ascomycota.sp..Bc6 > 0, 1, 0))
metadata2<- cbind(metadata2,otu50=ifelse(EndoAbun$Unidentified.Dothideomycetes.sp..Bc4 > 0, 1, 0))
metadata2<- cbind(metadata2,otu51=ifelse(EndoAbun$Unidentified.Fungi.Ac44 > 0, 1, 0))
metadata2<- cbind(metadata2,otu52=ifelse(EndoAbun$Unidentified.Fungi.Ac52 > 0, 1, 0))
metadata2<- cbind(metadata2,otu53=ifelse(EndoAbun$Unidentified.Pezizomycotina.sp..B12 > 0, 1, 0))
metadata2<- cbind(metadata2,otu54=ifelse(EndoAbun$Unidentified.Pleosporales.sp..A65 > 0, 1, 0))
metadata2<- cbind(metadata2,otu55=ifelse(EndoAbun$Unidentified.Pleosporales.sp..A75 > 0, 1, 0))
metadata2<- cbind(metadata2,otu56=ifelse(EndoAbun$Unidentified.Xylariaceae.sp..B21 > 0, 1, 0))
metadata2<- cbind(metadata2,otu57=ifelse(EndoAbun$Ustilago.A14 > 0, 1, 0))
metadata2<- cbind(metadata2,otu58=ifelse(EndoAbun$Ustilago.A16 > 0, 1, 0))
metadata2<- cbind(metadata2,otu59=ifelse(EndoAbun$Valsaceae.sp..A32 > 0, 1, 0))
write.csv(metadata2, file = "metadata2.csv")
metadata1<-read.csv("metadata2.csv",header = T,row.names = 1)
site=metadata2$site
metadata1$site<-NULL
metadata3<-t((rowsum(metadata1)) group = EndoMetaData$LOCALITY, na.rm = T)
df1<-rowsum(metadata1,group = EndoMetaData$LOCALITY, na.rm = T)
metadataREV<-t(df1)
class(metadataREV)
metadataREV<-as.data.frame(metadataREV)
metadataREV<- cbind(metadataREV,Biseton=ifelse(metadataREV$Bisoton > 0, 1, 0))
metadataREV<- cbind(metadataREV,Mahidasht=ifelse(metadataREV$`Hasan abad`> 0, 1, 0))
metadataREV<- cbind(metadataREV,KhosroAbad=ifelse(metadataREV$`Khosro abad`> 0, 1, 0))
metadataREV<- cbind(metadataREV,SARMIL=ifelse(metadataREV$Sarmil> 0, 1, 0))
metadataREV<- cbind(metadataREV,SORKHEDIZE=ifelse(metadataREV$`Sorkhe dize`> 0, 1, 0))
metadataREV$Bisoton<-NULL
metadataREV$`Hasan abad`<-NULL
metadataREV$`Khosro abad`<-NULL
metadataREV$Sarmil<-NULL
metadataREV$`Sorkhe dize`<-NULL
venn.site<-vennDiagram(vennCounts(metadataREV),
circle.col = c("red", "darkblue", "green","black","darkviolet"),
names = c("Biseton","Mahi Dasht", "Khosro Abad","Sarmil","Sorkhe Dize"))
dev.off()
# Fig 6- site
jpeg(file = "Fig 6site-per.jpeg", width = 1000, height = 1000, res = 300)
venn.site<-vennDiagram(vennCounts(metadataREV), cex=c(0.5,0.5,0.5),
circle.col = c("red", "darkblue", "green","black","darkviolet"),
names = c("Biseton","Mahi Dasht", "Khosro Abad","Sarmil","Sorkhe Dize"))
dev.off()
?vennDiagram
# subset for time venn diagram
metadata3<-data.frame(out1=integer(446))
metadata3$out1<-NULL
metadata3<-cbind(metadata3,time=EndoMetaData$TIME)
metadata3<- cbind(metadata3,otu1=ifelse(EndoAbun$Alternaria.sp..A22 > 0, 1, 0))
metadata3<- cbind(metadata3,otu2=ifelse(EndoAbun$Alternaria.sp..Ac27 > 0, 1, 0))
metadata3<- cbind(metadata3,otu3=ifelse(EndoAbun$Alternaria.sp..A25 > 0, 1, 0))
metadata3<- cbind(metadata3,otu4=ifelse(EndoAbun$Alternaria.sp..A76 > 0, 1, 0))
metadata3<- cbind(metadata3,otu5=ifelse(EndoAbun$Alternaria.sp..A9 > 0, 1, 0))
metadata3<- cbind(metadata3,otu6=ifelse(EndoAbun$Arthrinium.marii > 0, 1, 0))
metadata3<- cbind(metadata3,otu7=ifelse(EndoAbun$Aspergillus.sp..A20 > 0, 1, 0))
metadata3<- cbind(metadata3,otu8=ifelse(EndoAbun$Aureobasidium.sp..A17 > 0, 1, 0))
metadata3<- cbind(metadata3,otu9=ifelse(EndoAbun$Aureobasidium.sp..A30 > 0, 1, 0))
metadata3<- cbind(metadata3,otu10=ifelse(EndoAbun$B25 > 0, 1, 0))
metadata3<- cbind(metadata3,otu11=ifelse(EndoAbun$Bc3 > 0, 1, 0))
metadata3<- cbind(metadata3,otu12=ifelse(EndoAbun$Biscogniauxia.mediterranea > 0, 1, 0))
metadata3<- cbind(metadata3,otu13=ifelse(EndoAbun$Byssochlamys.spectabilis. > 0, 1, 0))
metadata3<- cbind(metadata3,otu14=ifelse(EndoAbun$Chaetomiaceae.sp..A37 > 0, 1, 0))
metadata3<- cbind(metadata3,otu15=ifelse(EndoAbun$Cladosporium.herbarum.A8 > 0, 1, 0))
metadata3<- cbind(metadata3,otu16=ifelse(EndoAbun$Comoclathris.sedi > 0, 1, 0))
metadata3<- cbind(metadata3,otu17=ifelse(EndoAbun$Coniochaeta.sp..A85 > 0, 1, 0))
metadata3<- cbind(metadata3,otu18=ifelse(EndoAbun$Coniothyrium.sp..A41 > 0, 1, 0))
metadata3<- cbind(metadata3,otu19=ifelse(EndoAbun$Cytospora.sp..AC35 > 0, 1, 0))
metadata3<- cbind(metadata3,otu20=ifelse(EndoAbun$Cytospora.sp..C2 > 0, 1, 0))
metadata3<- cbind(metadata3,otu21=ifelse(EndoAbun$Diatrype.sp..C1 > 0, 1, 0))
metadata3<- cbind(metadata3,otu22=ifelse(EndoAbun$Diatrypella.sp..C6 > 0, 1, 0))
metadata3<- cbind(metadata3,otu23=ifelse(EndoAbun$Dikarya.sp..A38 > 0, 1, 0))
metadata3<- cbind(metadata3,otu24=ifelse(EndoAbun$Dothideomycetes.sp..A1 > 0, 1, 0))
metadata3<- cbind(metadata3,otu25=ifelse(EndoAbun$Dothideomycetes.sp..A79 > 0, 1, 0))
metadata3<- cbind(metadata3,otu26=ifelse(EndoAbun$Endoconidioma.populi.A39 > 0, 1, 0))
metadata3<- cbind(metadata3,otu27=ifelse(EndoAbun$Fusarium.sp..46 > 0, 1, 0))
metadata3<- cbind(metadata3,otu28=ifelse(EndoAbun$Gnomoniaceae.sp..66 > 0, 1, 0))
metadata3<- cbind(metadata3,otu29=ifelse(EndoAbun$Gnomoniaceae.sp..70 > 0, 1, 0))
metadata3<- cbind(metadata3,otu30=ifelse(EndoAbun$Humicola.sp..A52 > 0, 1, 0))
metadata3<- cbind(metadata3,otu31=ifelse(EndoAbun$Microsphaeriopsis.olivacea > 0, 1, 0))
metadata3<- cbind(metadata3,otu32=ifelse(EndoAbun$Penicillium.sp..A21 > 0, 1, 0))
metadata3<- cbind(metadata3,otu33=ifelse(EndoAbun$Penicillium.sp..A3 > 0, 1, 0))
metadata3<- cbind(metadata3,otu34=ifelse(EndoAbun$Pleosporaceae.sp..A5 > 0, 1, 0))
metadata3<- cbind(metadata3,ot35=ifelse(EndoAbun$Pleosporaceae.sp..Ac49 > 0, 1, 0))
metadata3<- cbind(metadata3,otu36=ifelse(EndoAbun$Pleosporaceae.sp..B27 > 0, 1, 0))
metadata3<- cbind(metadata3,otu37=ifelse(EndoAbun$Preussia.africana > 0, 1, 0))
metadata3<- cbind(metadata3,otu38=ifelse(EndoAbun$Preussia.australis > 0, 1, 0))
metadata3<- cbind(metadata3,otu39=ifelse(EndoAbun$Preussia.complex.sp..A36 > 0, 1, 0))
metadata3<- cbind(metadata3,otu40=ifelse(EndoAbun$Preussia.intermedia > 0, 1, 0))
metadata3<- cbind(metadata3,otu41=ifelse(EndoAbun$Preussia.sp..A31 > 0, 1, 0))
metadata3<- cbind(metadata3,otu42=ifelse(EndoAbun$Schizothecium.sp..B14 > 0, 1, 0))
metadata3<- cbind(metadata3,otu43=ifelse(EndoAbun$Sordariomycetes.sp..A13 > 0, 1, 0))
metadata3<- cbind(metadata3,otu44=ifelse(EndoAbun$Sordariomycetes.sp..A45 > 0, 1, 0))
metadata3<- cbind(metadata3,otu45=ifelse(EndoAbun$Sporormiaceae.sp..A29M > 0, 1, 0))
metadata3<- cbind(metadata3,otu46=ifelse(EndoAbun$Sporormiaceae.sp..L32 > 0, 1, 0))
metadata3<- cbind(metadata3,otu147=ifelse(EndoAbun$Unfidentified.Sordariomycetes.sp..A26 > 0, 1, 0))
metadata3<- cbind(metadata3,ot48=ifelse(EndoAbun$Unidentified.Ascomycota.sp..A86 > 0, 1, 0))
metadata3<- cbind(metadata3,otu49=ifelse(EndoAbun$Unidentified.Ascomycota.sp..Bc6 > 0, 1, 0))
metadata3<- cbind(metadata3,otu50=ifelse(EndoAbun$Unidentified.Dothideomycetes.sp..Bc4 > 0, 1, 0))
metadata3<- cbind(metadata3,otu51=ifelse(EndoAbun$Unidentified.Fungi.Ac44 > 0, 1, 0))
metadata3<- cbind(metadata3,otu52=ifelse(EndoAbun$Unidentified.Fungi.Ac52 > 0, 1, 0))
metadata3<- cbind(metadata3,otu53=ifelse(EndoAbun$Unidentified.Pezizomycotina.sp..B12 > 0, 1, 0))
metadata3<- cbind(metadata3,otu54=ifelse(EndoAbun$Unidentified.Pleosporales.sp..A65 > 0, 1, 0))
metadata3<- cbind(metadata3,otu55=ifelse(EndoAbun$Unidentified.Pleosporales.sp..A75 > 0, 1, 0))
metadata3<- cbind(metadata3,otu56=ifelse(EndoAbun$Unidentified.Xylariaceae.sp..B21 > 0, 1, 0))
metadata3<- cbind(metadata3,otu57=ifelse(EndoAbun$Ustilago.A14 > 0, 1, 0))
metadata3<- cbind(metadata3,otu58=ifelse(EndoAbun$Ustilago.A16 > 0, 1, 0))
metadata3<- cbind(metadata3,otu59=ifelse(EndoAbun$Valsaceae.sp..A32 > 0, 1, 0))
metadata3$time<-NULL
write.csv(metadata3, file = "metadata3.csv")
metadata4<-read.csv("metadata3.csv",header = T)
df2<-rowsum(metadata4,group = EndoMetaData$TIME, na.rm = T)
metadata.time<-t(df2)
class(metadata.time)
metadata.time<-as.data.frame(metadata.time)
metadata.time<- cbind(metadata.time,May=ifelse(metadata.time$`1` > 0, 1, 0))
metadata.time<- cbind(metadata.time,June=ifelse(metadata.time$`2`> 0, 1, 0))
metadata.time<- cbind(metadata.time,July=ifelse(metadata.time$`3`> 0, 1, 0))
metadata.time$`1`<-NULL
metadata.time$`2`<-NULL
metadata.time$`3`<-NULL
jpeg(file = "Fig 6 time-per.jpeg", width = 1000, height = 1000, res = 300)
venn.time<-vennDiagram(vennCounts(metadata.time), cex=c(0.5,0.5,0.5),
circle.col = c("green","darkblue","red"))
dev.off()
|
261e03c2c9c39e2b962536dcc85c5c8ce5bd94ce
|
8e6dd6e6306902e5447754b5c312d039c0c1c13c
|
/hello.R
|
77e33e7b92b23ad8a62618afd0e748072813ebe0
|
[] |
no_license
|
tnvrsingh/abega-SAaaS
|
bd3d68b9a6ddc43c1a29a95e640655dfe2387437
|
7b26b1886e0414fb36c601a5ce1664adf9bef723
|
refs/heads/master
| 2021-08-28T21:32:34.540202
| 2017-12-13T06:23:38
| 2017-12-13T06:23:38
| 113,954,380
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 55
|
r
|
hello.R
|
# example/ex-sync.R
sentiment <- input
print(sentiment)
|
a8f4d12621873d292e964c828b4e8a11446993a5
|
77157987168fc6a0827df2ecdd55104813be77b1
|
/palm/inst/testfiles/pbc_distances/libFuzzer_pbc_distances/pbc_distances_valgrind_files/1612988741-test.R
|
7a5356feb034597292baf844afe56334908bbb69
|
[] |
no_license
|
akhikolla/updatedatatype-list2
|
e8758b374f9a18fd3ef07664f1150e14a2e4c3d8
|
a3a519440e02d89640c75207c73c1456cf86487d
|
refs/heads/master
| 2023-03-21T13:17:13.762823
| 2021-03-20T15:46:49
| 2021-03-20T15:46:49
| 349,766,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 985
|
r
|
1612988741-test.R
|
testlist <- list(lims = structure(c(1.42575829028035e+248, 6.41572323189181e+197, 2.08227334829114e+262, 3.79580189380573e-305, 1.41050742846599e+248, 2.32159106063967e-152, 1.63749279656873e-314, 6.72759243712098e+197, 8.79436849597419e-322, 17408, 0, 0, 1.25986739689518e-321, 1.82543690750688e-139, 7.06328124687489e-304, 7.60242495019018e-270, 4.79263036908215e+173, 1.74356874098388e-304, 1.31996369220776e-309, 0, 2.11367436582065e-314, 3.78576699573368e-270, 1.2510820261329e-308, 2.05226840064919e-289, 1.9522358119801e-312, 8.10541286676906e+228, 5.71229768251201e+151, 1.39137526939423e+93, 1.41050742846599e+248, 2.32159105515088e-152, 1.33381209957218e+243, 2.41880775422113e-231, 1.74356874130052e-304, 5.22851419824833e+54, 5.22829692547757e+54, 5.22851419824833e+54 ), .Dim = c(6L, 6L)), points = structure(c(1.16357400252072e-319, 5.83464618113015e-304, 3.34858267947339e-115), .Dim = c(1L, 3L )))
result <- do.call(palm:::pbc_distances,testlist)
str(result)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.