blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
60fc530837c1139e6c97f6f05fd301b3e5b29dc7
|
2ad28c625ccccf372e4dcd7d58f10dc39f55c0a8
|
/man/search_stn_name.Rd
|
13270c9d8cfe586efa5bc000dc5bce2c8a6eb64e
|
[
"Apache-2.0"
] |
permissive
|
njatel/tidyhydat
|
4c0e44328cf1ee38d70a2b84898413174506ec8b
|
9954d53c4f8d6aa54b9de525a9264fdd93cf09ff
|
refs/heads/master
| 2021-08-23T22:27:59.266678
| 2017-12-06T18:21:34
| 2017-12-06T18:21:34
| 113,370,923
| 0
| 0
| null | 2017-12-06T21:50:55
| 2017-12-06T21:50:55
| null |
UTF-8
|
R
| false
| true
| 602
|
rd
|
search_stn_name.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{search_stn_name}
\alias{search_stn_name}
\alias{search_stn_number}
\title{A search function for hydrometric station name or number}
\usage{
search_stn_name(search_term)
search_stn_number(search_term)
}
\arguments{
\item{search_term}{Only accepts one word.}
}
\value{
A tibble of stations that match the \code{search_term}
}
\description{
Use this search function when you only know the partial station name or want to search.
}
\examples{
\dontrun{
search_stn_name("Cowichan")
search_stn_number("08HF")
}
}
|
2dbd98e6041dafc7b5b52b4d2b92b54cf5ac3004
|
2d32305806855dc8793ab0348acef458b139f1be
|
/man/roxygen/templates/F_search_loops.R
|
701b96516e697b673664dc8c66113d9409f5a25b
|
[
"MIT"
] |
permissive
|
nmfs-fish-tools/SSMSE
|
07a9ed4defb370833864183b7f4a775425c53b3c
|
47445d973a537eaf9a7361f842d3f7a404bca247
|
refs/heads/main
| 2023-08-16T21:18:17.253400
| 2023-08-09T21:40:26
| 2023-08-10T12:20:30
| 197,069,801
| 16
| 5
|
MIT
| 2023-09-08T16:03:34
| 2019-07-15T20:44:06
|
R
|
UTF-8
|
R
| false
| false
| 302
|
r
|
F_search_loops.R
|
#' @param n_F_search_loops Number of times to try to find an F that achieves the
#' catches input in the OM. Defaults to 20.
#' @param tolerance_F_search How far apart the input catch and achieved
#' catch can be in tried to find an F that achieves the catch input in the
#' OM. Defaults to 0.001.
|
aa4d95bf2785bd64e95cd0d517b3cf8335e1fadf
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/zFactor/examples/multiplotStandingKatz.Rd.R
|
5654e722d4089c6bff668dfc6522a03525c6210b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 334
|
r
|
multiplotStandingKatz.Rd.R
|
library(zFactor)
### Name: multiplotStandingKatz
### Title: Plot multiple Tpr isotherm curves in one figure
### Aliases: multiplotStandingKatz
### ** Examples
# plot Standing-Katz curves for Tpr=1.1 and 2.0
multiplotStandingKatz(c(1.1, 2))
# plot SK curves for the lowest range of Tpr
multiplotStandingKatz(c(1.05, 1.1, 1.2))
|
6fca7e4c1080c89bde6ae187561e8a5c264c0a8d
|
562204373b05dde8ea2a94862c0024ad5c67f21e
|
/man/n.Rd
|
20fd937ae64ebd69724d7bf06c858c71052f24a2
|
[] |
no_license
|
sophiasun025/gSeg
|
47698dc11a3c29559017a31be098fbf745ec94be
|
bd0f095dd47ea06ae8c793c3b55b9e0c2dccecca
|
refs/heads/master
| 2022-12-21T06:15:46.614682
| 2020-09-24T08:10:12
| 2020-09-24T08:10:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 183
|
rd
|
n.Rd
|
\name{n}
\alias{n}
\title{The Number of Observations in the Sequence}
\description{This is the variable name for the number of observations in the sequences in the "Example" data.
}
|
c3e07e5bc45484384be6f2f129fc0a6ba2edced5
|
c9c42d0649822f08754141c0c7ef54ce70341853
|
/man/listn.Rd
|
511918e0e022ba364d40a026ce158b3fadfb187a
|
[] |
no_license
|
luisgasco/openskyr
|
33884f912eb2c4569938c4e01be398dc8decc43b
|
35e64cbabb7d8c484055cbb0c37028305f584c44
|
refs/heads/master
| 2022-08-03T14:26:05.384936
| 2020-05-04T16:16:19
| 2020-05-04T16:16:19
| 83,914,360
| 7
| 2
| null | 2020-05-15T18:17:07
| 2017-03-04T18:13:50
|
R
|
UTF-8
|
R
| false
| true
| 294
|
rd
|
listn.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{listn}
\alias{listn}
\title{Create a named-list.}
\usage{
listn(...)
}
\arguments{
\item{...}{list of variables used to create the named-list.}
}
\description{
Create a named-list.
}
\keyword{internal}
|
95fb898027c87bb403c9d22add8e792ea9afbda4
|
3f680c621d68cd817097e1a83915ceaead162e12
|
/R/getAllFunnels.R
|
64ea455962e576063c4d41572939170a3db48fdc
|
[] |
no_license
|
rohan-shah/mpMap2
|
46273875750e7a564a17156f34439a4d93260d6c
|
c43bb51b348bdf6937e1b11298b9cdfe7a85e001
|
refs/heads/master
| 2021-05-23T20:34:59.327670
| 2020-07-19T10:24:09
| 2020-07-19T10:24:09
| 32,772,885
| 10
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,349
|
r
|
getAllFunnels.R
|
#' @title Get funnels
#' @rdname getAllFunnels
#' @description Get the order of the founding lines, as they contribute to each line in the final population
#' @details In multi-parent experimental designs, the founding lines of the population are combined together through initial mixing generations. For experiments without further intercrossing generations, the order in which these mixing crosses occur influences the genotypes of the final lines. It can be important to examine or visualise these orders, which are known as funnels.
#'
#' This function returns a matrix, where each row corresponds to a genetic line in the final population, and each column corresponds to a position in the mixing step. So if a row of the returned matrix contains the values 4, 1, 2, 3, then the pedigee that generated the first individual in the experiment started by crossing founders 4 and 1 to give individual 41, and 2 and 3 to give individual 23. Then individuals 41 and 23 are crossed to generate individual 4123, which after inbreeding results in the first final genetic line.
#'
#' If sex is considered to be unimportant, then many orderings are equivalent. For example, the ordering 4, 1, 2, 3 of the initial founders is equivalent to 1, 4, 2, 3. In this case each funnel can be put into a standardised ordering, by setting \code{standardised} to \code{FALSE}.
#'
#' Note that if there are generations of random interbreeding in the population (often referred to as maintenance generations), then there is no "funnel" associated with a genetic line, and values of NA are returned. In that case, see \code{\link{getAllFunnelsIncAIC}}.
#'
#' Note that funnels for all pedigrees simulated by mpMap2 are already standardised. This will not generally be the case for realy experiments.
#' @param cross The object of class \code{mpcross} containing the pedigree of interest
#' @param standardised Should the output funnels be standardised?
#' @return An integer matrix with rows representing genetic lines, and columns representing positions within the funnel.
#' @examples
#' data(simulatedFourParentData)
#' #Funnels used to generate the first ten lines
#' #Because this is simulated data, they are already standardised,
#' #' with the first founder in the first position in the mixing step.
#' getAllFunnels(simulatedFourParentData)[1:10, ]
#' @export
getAllFunnels <- function(cross, standardised = FALSE)
{
if(!is.logical(standardised) || length(standardised) != 1)
{
stop("Input standardised must be TRUE or FALSE")
}
if(class(cross) == "geneticData")
{
return(.Call("getAllFunnels", cross, standardised, PACKAGE="mpMap2"))
}
else if(inherits(cross, "mpcross"))
{
if(length(cross@geneticData) == 1)
{
return(.Call("getAllFunnels", cross@geneticData[[1]], standardised, PACKAGE="mpMap2"))
}
else
{
return(lapply(cross@geneticData, function(x) .Call("getAllFunnels", x, standardised, PACKAGE="mpMap2")))
}
}
else
{
stop("Input must be of class geneticData or mpcross")
}
}
#' @title Get all funnels, including AIC lines
#' @description Get every order of the founding lines, which makes a contribution to the final population
#' @rdname getAllFunnelsIncAIC
#' @details This function is similar to \code{\link{getAllFunnels}}, but more useful for populations with maintenance (or AIC) generations. It returns a list of all the mixing orders in the initial generations, which make a genetic contribution to the final population. Unlike for \code{\link{getAllFunnels}}, rows of the returned matrix DO NOT refer to specific genetic lines.
#' @param cross The object of class \code{mpcross} containing the pedigree of interest
#' @param standardised Should the output funnels be standardised?
#' @return Matrix of mixing orders that contribute to the final popluation. Rows DO NOT refer to specific genetic lines.
#' @examples
#' set.seed(1)
#' pedigree <- fourParentPedigreeRandomFunnels(initialPopulationSize = 1000,
#' selfingGenerations = 6, intercrossingGenerations = 1)
#' #Assume infinite generations of selfing in subsequent analysis
#' selfing(pedigree) <- "infinite"
#' #Generate random map
#' map <- qtl::sim.map(len = 100, n.mar = 101, anchor.tel = TRUE, include.x = FALSE)
#' #Simulate data
#' cross <- simulateMPCross(map = map, pedigree = pedigree, mapFunction = haldane, seed = 1L)
#' #Because we have maintenance in this experiment, we can't get out the funnels per genetic line
#' funnels <- getAllFunnels(cross)
#' dim(funnels)
#' funnels[1:10,]
#' #But we can get out a list of all the funnels that go into the experiment.
#' funnels <- getAllFunnelsIncAIC(cross)
#' dim(funnels)
#' funnels[1:10,]
#' @export
getAllFunnelsIncAIC <- function(cross, standardised = FALSE)
{
if(!is.logical(standardised) || length(standardised) != 1)
{
stop("Input standardised must be TRUE or FALSE")
}
if(class(cross) == "geneticData")
{
return(.Call("getAllFunnels", cross, standardised, PACKAGE="mpMap2"))
}
else if(inherits(cross, "mpcross"))
{
if(length(cross@geneticData) == 1)
{
return(.Call("getAllFunnelsIncAIC", cross@geneticData[[1]], standardised, PACKAGE="mpMap2"))
}
else
{
return(lapply(cross@geneticData, function(x) .Call("getAllFunnelsIncAIC", x, standardised, PACKAGE="mpMap2")))
}
}
else
{
stop("Input must be of class geneticData or mpcross")
}
}
|
82df4d45d32108676457e3dff9304181c3ee467b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/EngrExpt/examples/cure.Rd.R
|
f065892461312ce05564acfb21681cfcafbb917a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 313
|
r
|
cure.Rd.R
|
library(EngrExpt)
### Name: cure
### Title: Yield from a chemical curing process
### Aliases: cure
### Keywords: datasets
### ** Examples
str(cure)
dotplot(temp ~ yield, cure, groups = time, type = c("p","a"),
jitter.y = TRUE, xlab = "Yield of parts")
summary(fm1 <- lm(yield ~ time * temp, cure))
|
da0a8260d25288704353f97204618d2c64231493
|
184d33fbe6d0ab73a260d0db9d3849df00d33786
|
/rcmdr.temis/R/output.R
|
ca0420d62eb906e43f4e7fc594ab746314bbf8f8
|
[] |
no_license
|
nalimilan/R.TeMiS
|
65660d9fbe4c8ca7253aeba5571eab4445736c99
|
3a8398038595807790087c36375bb26417ca606a
|
refs/heads/master
| 2023-04-30T18:04:49.721122
| 2023-04-25T19:45:04
| 2023-04-25T19:45:04
| 81,315,737
| 25
| 7
| null | 2020-06-29T21:45:06
| 2017-02-08T10:07:16
|
C
|
UTF-8
|
R
| false
| false
| 10,846
|
r
|
output.R
|
setOutputFile <- function(..., browse=TRUE) {
file <- NULL
try(file <- HTMLGetFile(), silent=TRUE)
if(!is.null(file))
dir <- dirname(file)
else
dir <- "."
file <- tclvalue(tkgetSaveFile(title=.gettext("Select a file to save results"),
filetypes=sprintf("{{%s} {.html}}",
.gettext("HTML file")),
defaultextension=".html",
initialdir=dir,
parent=CommanderWindow()))
if (file == "") return(FALSE)
doItAndPrint(sprintf('initOutputFile("%s")', file))
# Set options for good formatting
options(R2HTML.format.decimal.mark=.gettext("."))
# The openOutputFile menu needs to notice the new file
activateMenus()
if(browse)
doItAndPrint("browseURL(R2HTML::HTMLGetFile())")
return(TRUE)
}
initOutputFile <- function(file) {
title <- .gettext("Text Mining Analysis Results")
# R2HTML uses cat() to output text, which in turns uses the value of getOption("encoding")
# By default, this corresponds to native.enc returned by localeToCharset()
enc <- getOption("encoding", "")
if(enc %in% c("", "native.enc"))
enc <- localeToCharset()[1]
if(is.na(enc)) # In case system encoding could not be detected
enc <- "UTF-8"
# R2HTML does not add encoding information to the HTML headers, even when using HTMLInitFile
header <- sprintf('<head>\n<meta http-equiv="Content-Type" content="text/html; charset=%s"/>\n<title>%s</title>\n</head>\n',
enc, title)
writeLines(header, file)
HTMLSetFile(file)
HTML.title(title, 1, append=TRUE)
HTML(sprintf(.gettext("Corpus imported on %s. Language: %s."),
# c() is needed to get rid of the timezone attribute, set to GMT by tm
format(c(meta(corpus, type="corpus", tag="create_date")), "%c"),
meta(corpus, type="corpus", tag="language")))
HTML(sprintf(.gettext("Source: %s."), meta(corpus, type="corpus", tag="source")))
HTML(sprintf(.gettext("%i documents and %i terms."), nrow(dtm), ncol(dtm)))
cat(.gettext("Processing options:"), "\n", sep="", file=file, append=TRUE)
processing <- meta(corpus, type="corpus", tag="processing")
# Keep in sync with strings in importCorpusDlg()
HTMLli(paste(c(.gettext("Ignore case"), .gettext("Remove punctuation"),
.gettext("Remove digits"), .gettext("Remove stopwords"),
.gettext("Apply stemming")),
.gettext(": "),
ifelse(processing[c("lowercase", "punctuation", "digits", "stopwords", "stemming")],
.gettext("enabled"), .gettext("disabled")),
".", sep=""))
}
openOutputFile <- function() {
file <- NULL
try(file <- HTMLGetFile(), silent=TRUE)
if(is.null(file)) {
.Message(.gettext("No report file has been created yet."), type="error")
return()
}
else if(!file.exists(file)) {
.Message(.gettext("Report file does not exist (it was probably removed)."), type="error")
return()
}
doItAndPrint("browseURL(R2HTML::HTMLGetFile())")
}
setLastTable <- function(name, title=NULL) {
justDoIt(sprintf('last.table <- "%s"', name))
if(!is.null(title))
doItAndPrint(sprintf('attr(%s, "title") <- "%s"', name, title))
}
copyTableToOutput <- function() {
if(!exists("last.table") || !exists(last.table)) {
.Message(.gettext("No table has been built yet. Please create a table first."), type="error")
return()
}
file <- NULL
try(file <- HTMLGetFile(), silent=TRUE)
if(!is.null(file))
html.on <- file.exists(file)
else
html.on <- FALSE
if(!(html.on || setOutputFile(browse=FALSE)))
return()
# Needed when copying CA, HTML.ca() is too late to update the GUI
setBusyCursor()
on.exit(setIdleCursor())
tab <- get(last.table)
title <- attr(tab, "title")
if(length(title) > 0)
doItAndPrint(sprintf("R2HTML::HTML.title('%s', 3)", attr(tab, "title")))
# zoo objects are printed as plain text by default
if(inherits(tab, "zoo"))
doItAndPrint(sprintf('R2HTML::HTML(as.matrix(%s), Border=NULL, align="left", scientific=4)', last.table))
# HTML.array already passes Border=0, so Border=NULL generates an error
else if(inherits(tab, "array"))
doItAndPrint(sprintf('R2HTML::HTML(%s, align="left", scientific=4)', last.table))
else if(inherits(tab, "list"))
doItAndPrint(sprintf('HTML.list(%s, Border=NULL, align="left", scientific=4)', last.table))
else
doItAndPrint(sprintf('R2HTML::HTML(%s, Border=NULL, align="left", scientific=4)', last.table))
# Open file in browser when creating it
if(!html.on)
doItAndPrint("browseURL(R2HTML::HTMLGetFile())")
# If output file was removed, we recreate it, and the openOutputFile menu needs to notice it
activateMenus()
}
copyPlotToOutput <- function() {
if(length(dev.list()) == 0) {
.Message(.gettext("No plot has been drawn yet. Please create a plot first."), type="error")
return()
}
file <- NULL
try(file <- HTMLGetFile(), silent=TRUE)
if(!is.null(file))
html.on <- file.exists(file)
else
html.on <- FALSE
if(!(html.on || setOutputFile(browse=FALSE)))
return()
# Only the filename within the folder is needed, this allows moving HTML and PNG files to another folder
filename <- gsub(".html$", "", basename(file))
file <- paste(filename, format(Sys.time(), .gettext(" - plot %Y-%m-%d %H-%M")), ".png", sep="")
i <- 1
testfile <- file
while(file.exists(testfile)) {
i <- i + 1
testfile <- paste(filename, format(Sys.time(), .gettext(" - plot %Y-%m-%d %H-%M")),
"-", i, ".png", sep="")
}
if(file.exists(file))
file <- testfile
doItAndPrint(sprintf('dev.print(png, width=7, height=7, unit="in", res=200, filename="%s")',
paste(dirname(file), .Platform$file.sep, file, sep="")))
doItAndPrint(sprintf('R2HTML::HTMLInsertGraph("%s", "", 0, "left")', file))
# Open file in browser when creating it
if(!html.on)
doItAndPrint("browseURL(R2HTML::HTMLGetFile())")
# If output file was removed, we recreate it, and the openOutputFile menu needs to notice it
activateMenus()
}
enableBlackAndWhite <- function() {
doItAndPrint("lattice.options(default.theme=standard.theme(color=FALSE))")
# Update current plot if there is one
if(dev.cur() > 1) {
doItAndPrint("trellis.device(new=FALSE)")
doItAndPrint("trellis.last.object()")
}
options(bw.plots=TRUE)
activateMenus()
}
disableBlackAndWhite <- function() {
# Keep in sync with .onAttach()
# We can stop specifying region when latticeExtra uses RColorBrewer:: for its default value:
# https://r-forge.r-project.org/tracker/index.php?func=detail&aid=4853&group_id=232&atid=942
doItAndPrint('lattice.options(default.theme=latticeExtra::custom.theme(symbol=RColorBrewer::brewer.pal(8, "Set1")[c(2:1, 3:5, 7:9)], fill=RColorBrewer::brewer.pal(8, "Set1")[c(2:1, 3:5, 7:9)], region=RColorBrewer::brewer.pal(n=11, name="Spectral")))')
# Update current plot if there is one
if(dev.cur() > 1) {
doItAndPrint("trellis.device(new=FALSE)")
doItAndPrint("trellis.last.object()")
}
options(bw.plots=FALSE)
activateMenus()
}
# The default HTML.list function does not print element names,
# and redirects align="left" to cat(), which prints it to the file
HTML.list <- function (x, file = HTMLGetFile(), first = TRUE, append = TRUE, ...)
{
cat("\n", file = file, append = append)
if (first)
HTML("<hr class='hr'>", file = file, append = TRUE, sep = "\n")
for (i in 1:length(x)) {
cat("<ul>", file = file, append = TRUE, sep = "\n")
cat("</center><li>", file = file, append = TRUE, sep = "\n")
HTML(paste(names(x)[i], "\n", sep=""), file = file, first = FALSE, ...)
if(length(x[[i]]) > 0)
HTML(x[[i]], file = file, first = FALSE, ...)
else
HTML(.gettext("No items."), file = file, first = FALSE, ...)
cat("</ul>", file = file, append = TRUE, sep = "\n")
}
cat("\n<br><hr class='hr'>", file = file, append = TRUE,
sep = "\n")
}
# This function uses parts from summary.ca() from package ca, version 0.53.
# Released under the GPL (no version specified), Copyright Michael Greenacre
# and Oleg Nenadic <onenadi at uni-goettingen.de>.
# http://cran.r-project.org/web/packages/ca/index.html
HTML.ca <- function(x, ...) {
object <- summary.ca(x)
if (!is.na(object$scree)[1]){
cat("\n")
nchars <- 25
Dim <- object$scree[,1]
ev <- object$scree[,2]
rev <- object$scree[,3]
crev <- object$scree[,4]
Value <- ev[Dim]
EV <- rev[Dim]
CUMEV <- crev[Dim]
if (length(rev)>1) {
st <- round(nchars * (rev - min(rev)) / diff(range(rev)), 0)
} else {
st <- nchars
}
scree <- character(length(Dim))
for (q in Dim) {
s1 <- paste(rep("*", st[q]), collapse = "")
s2 <- paste(rep(" ", nchars - st[q]), collapse = "")
scree[q] <- paste(" ", s1, s2, sep = "")
}
scree.out <- data.frame(Value = c(Value, sum(Value)),
EV = c(EV, sum(EV)),
CUMEV = c(CUMEV, sum(EV)),
scree = c(scree, ""))
colnames(scree.out) <- c(.gettext("Value"), .gettext("%"), .gettext("Cum. %"), "")
HTML(paste(.gettext("Axes inertias (eigenvalues):"), "\n", sep=""), ...)
# scree.out <- as.matrix(scree.out)
# colnames(scree.out) <- rep(1, dim(scree.out)[1])
# print(as.matrix(scree.out), quote = FALSE)
# fix for rownames showing up in scree-plot
# dimnames(scree.out)[[1]] <- rep("", length(dimnames(scree.out)[[1]]))
rownames(scree.out) <- c(seq(nrow(scree.out) - 1), .gettext("Total:"))
HTML(scree.out, ...)
}
rownames(object$row) <- object$row[[1]]
rownames(object$col) <- object$col[[1]]
object$row <- object$row[-1]
object$col <- object$col[-1]
names(object$row) <- names(object$col) <- c(.gettext("Mass"), .gettext("Quality"), .gettext("Inertia"),
outer(c(.gettext("Coord"), .gettext("Quality"), .gettext("Contr")),
seq((length(object$row) - 3)/3), paste, sep=""))
HTML(.gettext("Documents and variables:"), ...)
HTML(object$row, ...)
HTML(.gettext("Terms:"), ...)
HTML(object$col, ...)
}
|
98934893badef5bd632da430f05a457f7e3955e3
|
2ddd0d8d4fc8f009fa51d0fde61d1d40825d4d2d
|
/man/filter_data.Rd
|
eb379d5deec9a96495312eaef99d3713fa1bdac0
|
[
"MIT",
"CC-BY-4.0",
"CC0-1.0"
] |
permissive
|
januz/comsldpsy-1
|
7029dbc5c0dcbe7893ca5ee67c59b1f5f81c0018
|
fdf6248c53fdbc7cef4885fb630d5d188d8ff081
|
refs/heads/master
| 2020-04-10T21:36:55.722283
| 2018-12-12T17:56:54
| 2018-12-12T17:56:54
| 161,301,150
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 449
|
rd
|
filter_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filter.R
\name{filter_data}
\alias{filter_data}
\title{Filter data}
\usage{
filter_data(data, df_filter_cond)
}
\arguments{
\item{data}{tbl. The data frame to be filtered}
\item{df_filter_cond}{tbl. A data frame with exclusion criteria}
}
\value{
tbl. The filtered data frame
}
\description{
Filter out participants that fulfill one or several the exclusion criteria.
}
|
5182dae12e5fd0a2c93dd9c3c3d71d416fbad2b7
|
80f66a992fc733aab681dfa1e103db8105609725
|
/NYCTRS_Data_readMemberData_AV2016.R
|
6550e8af0049ba1d1c3132475ff518014bee877c
|
[] |
no_license
|
yimengyin16/RSF_NYCTRS
|
cd5cfcad260f1d03fe06f472a263f2e96325acb1
|
82326660d9bc525089772a84837851dfad3713f1
|
refs/heads/master
| 2021-06-05T16:12:11.367594
| 2019-03-17T14:30:25
| 2019-03-17T14:30:25
| 152,126,629
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,116
|
r
|
NYCTRS_Data_readMemberData_AV2016.R
|
#This script loads demographic data as of June 30, 2016 provided in the 2017 CAFR of NYCTRS and FOIL data from NYCTRS
## Member Data from CAFR 2017
# Tables to load:
# Service retirees by age (as of Jun30, 2016) : p169 (Sheet8) Schedule 13 service retirement allowance - QPP
# Disability retirees by age (as of Jun30, 2016) :p170 (Sheet9) Schedule 14 and 15 Ordinary/Accident disability retirement allowance - QPP
# Survivors by age (as of Jun30, 2016) : p171 (Sheet10 ) Schedule 16 Survivors' benefit - QPP
# TDA membership by age (as of Jun30 2017) : p174 (Sheet14) Schedule 23 Membership by age and type (count and fund balance )
# Active members by age (as of Jun30, 2016) : p165 (Sheet4) Schedule 6 Table of average salaries of in-service members-QPP
# Active members by tier (2007-2017) : p166 (Sheet5) Schedule 7 In-service membership by tier and by title - QPP
# Tables that are not loaded but are useful modeling and/or calibration
# Average years of service by gender p167 (Sheet6)
# Payment options chosen at Retirement p167 (Sheet6), also average age at retirement
# Retirees' average monthly payments and FAS by YOS p168 (Sheet7) (For calibration?)
# TDA program summary (changes in membership): schedule 21 (Sheet12) (increased from 70k to 85k in 10 years)
# TDA annuitant summary: schedule 22 (Sheet13) (number decreasing over time)
# TDA withdrawal by age and type (count and amount) Schedule 24 (Sheet13): RMD,Partial, survivors, payments, total, 401a service purchase
# TDA fund conversion: schedule 25 (Sheet14): most common conversion: VA(diversified equity fund) to FX (Fixed Return fund),
# VE (Socially Responsive Equity fund) to FX common among young members.
## Member Data from FOIL
## Setting file path
dir_data <- "Inputs_data/"
file_name <- "NYCTRS_MemberData_AV2016.xlsx"
file_path <- paste0(dir_data, file_name)
file_path
## 1. Active members ####
# df_nactives <-
# read_excel(file_path, sheet = "Sheet4", range = "A22:F33") %>%
# select(age_grp = 1, nactives_male = 2, salary_male = 3, nactives_female = 5, salary_female = 6) %>%
# separate(age_grp, c("age_lb", "age_ub"), convert= T) %>%
# mutate_at(vars(age_lb, age_ub), funs(as.numeric(.)))
#
# df_nactives[1, c("age_lb", "age_ub")] <- c(20, 24)
# df_nactives[nrow(df_nactives), c("age_lb", "age_ub")] <- c(70, 74)
# df_nactives
df_nactives_allTieres <-
read_excel(file_path, sheet = "Actives_AllTiers", range = "B7:M30")
df_nactives_tier1 <-
read_excel(file_path, sheet = "Actives_Tier1", range = "B7:M30")
df_nactives_tier2 <-
read_excel(file_path, sheet = "Actives_Tier2", range = "B7:M30")
df_nactives_tier4 <-
read_excel(file_path, sheet = "Actives_Tier4", range = "B7:M30")
df_nactives_tier6 <-
read_excel(file_path, sheet = "Actives_Tier6", range = "B7:M30")
# Rearrange active member data
arrange_memberData <- function(df, tier, AV_year){
# df <- df_nactives_allTieres
# tier <- "allTiers"
# AV_year <- 2016
df %<>%
filter(!is.na(type)) %>%
#mutate(keyVar = paste0(type, age.cell)) %>%
gather(yos.cell, value, -type, -age.cell, -agegrp)
df_yosgrp <- df %>% filter(type == "yosgrp") %>% select(yos.cell, yosgrp = value)
df %<>%
filter(type != "yosgrp") %>%
left_join(df_yosgrp, by = "yos.cell") %>%
mutate_at(vars(age.cell, yos.cell, value), funs(as.numeric)) %>%
mutate(AV_year = AV_year,
tier = tier) %>%
spread(type, value) %>%
select(AV_year, tier, age.cell, agegrp, yos.cell, yosgrp, nactives, salary) %>%
arrange(age.cell, yos.cell)
}
df_nactives_allTieres %<>% arrange_memberData("allTiers", "2016")
df_nactives_tier1 %<>% arrange_memberData("tier1", "2016")
df_nactives_tier2 %<>% arrange_memberData("tier2", "2016")
df_nactives_tier4 %<>% arrange_memberData("tier4", "2016")
df_nactives_tier6 %<>% arrange_memberData("tier6", "2016")
# agecuts and yoscuts
# index tables for age cells and yos cells
agecuts <- df_nactives_allTieres %>% select(age.cell, agegrp)
agecuts <- agecuts[!duplicated(agecuts), ]
agecuts %<>%
separate(agegrp, into = c("agelb", "ageub")) %>%
mutate_all(funs(as.numeric))
yoscuts <- df_nactives_allTieres %>% select(yos.cell, yosgrp)
yoscuts <- yoscuts[!duplicated(yoscuts), ]
yoscuts %<>%
separate(yosgrp, into = c("yoslb", "yosub")) %>%
mutate_all(funs(as.numeric))
agecuts
yoscuts
# Reading data of actives by gender from CAFR2017 ####
df_nactives_CAFR17 <-
read_excel(file_path, sheet = "Actives_CAFR17", range = "C6:H17") %>%
select(age_grp = 1, nactives_male = 2, salary_male = 3, nactives_female = 5, salary_female = 6) %>%
separate(age_grp, c("age_lb", "age_ub"), convert= T) %>%
mutate_at(vars(age_lb, age_ub), funs(as.numeric(.))) %>%
mutate(AV_year = 2016,
tier = "allTiers")
df_nactives_CAFR17[1, c("age_lb", "age_ub")] <- c(20, 24)
df_nactives_CAFR17[nrow(df_nactives_CAFR17), c("age_lb", "age_ub")] <- c(70, 74)
df_nactives_CAFR17
## 2. share of tiers ####
df_TierShares <-
read_excel(file_path, sheet = "ShareTier_Raw", range = "B7:H16",
col_names = c("year", "age_avg", "Teir1", "Tier2", "Tier3", "Tier4", "Tier6"))
## 3. Service retirees ####
df_nservRet <-
read_excel(file_path, sheet = "Retirees", range = "C9:H22") %>%
select(age_grp = 1, nservRet_male = 2, benefit_male = 3, nservRet_female = 5, benefit_female = 6) %>%
separate(age_grp, c("age_lb", "age_ub")) %>%
mutate_at(vars(age_lb, age_ub), funs(as.numeric(.)))
df_nservRet[nrow(df_nservRet), "age_ub"] <- 94
df_nservRet
## 4. Disability retirees ####
df_ndisbRet_ord <-
read_excel(file_path, sheet = "Disabled", range = "C9:H23") %>%
select(age_grp = 1, ndisbRet_ord_male = 2, benefit_male = 3, ndisbRet_ord_female = 5, benefit_female = 6) %>%
separate(age_grp, c("age_lb", "age_ub")) %>%
mutate_at(vars(age_lb, age_ub, benefit_male, benefit_female), funs(na2zero(as.numeric(.))))
df_ndisbRet_ord[1, c("age_lb", "age_ub") ] <- c(25,29)
df_ndisbRet_ord[nrow(df_ndisbRet_ord), c("age_lb", "age_ub")] <- c(90, 94)
df_ndisbRet_ord
df_ndisbRet_acc <-
read_excel(file_path, sheet = "Disabled", range = "C31:H45") %>%
select(age_grp = 1, ndisbRet_acc_male = 2, benefit_male = 3, ndisbRet_acc_female = 5, benefit_female = 6) %>%
separate(age_grp, c("age_lb", "age_ub")) %>%
mutate_at(vars(age_lb, age_ub, benefit_male, benefit_female), funs(na2zero(as.numeric(.))))
df_ndisbRet_acc[1, c("age_lb", "age_ub") ] <- c(25,29)
df_ndisbRet_acc[nrow(df_ndisbRet_acc), c("age_lb", "age_ub")] <- c(90, 94)
df_ndisbRet_acc
## 5. Survivors ####
df_nsurvivors <-
read_excel(file_path, sheet = "Beneficiaries", range = "C7:H21") %>%
select(age_grp = 1, nsurvivors_male = 2, benefit_male = 3, nsurvivors_female = 5, benefit_female = 6) %>%
separate(age_grp, c("age_lb", "age_ub")) %>%
mutate_at(vars(age_lb, age_ub), funs(as.numeric(.)))
df_nsurvivors[1, c("age_lb", "age_ub") ] <- c(25,29)
df_nsurvivors[nrow(df_ndisbRet_ord), c("age_lb", "age_ub")] <- c(90, 94)
df_nsurvivors
## 6. TDA withdrawals ####
df_TDAwithdrawal <-
read_excel(file_path, sheet = "TDAwithdrawal_Raw", range = "B7:L20",
col_names = c("age",
"n_partial", "d_partial",
"n_401k", "d_401k",
"n_RMD", "d_RMD",
"n_total", "d_total",
"n_surv", "d_surv")) %>%
mutate(age = str_extract(age, "\\d+")) %>%
mutate_all(funs(na2zero(as.numeric(.)) ))
df_TDAwithdrawal
## Review and save results ####
#df_nactives
df_nservRet
df_ndisbRet_acc
df_ndisbRet_ord
df_nsurvivors
df_TierShares
df_TDAwithdrawal
df_nactives <- df_nactives_allTieres
save(df_nactives,
df_nactives_tier1,
df_nactives_tier2,
df_nactives_tier4,
df_nactives_tier6,
df_nactives_CAFR17,
agecuts,
yoscuts,
df_nservRet,
df_ndisbRet_acc,
df_ndisbRet_ord,
df_nsurvivors,
df_TierShares,
df_TDAwithdrawal,
file = paste0(dir_data, "Data_memberData_raw_AV2016.RData")
)
|
a714df591f07f25c64e36b8d38196412eef6cc27
|
0958797b18cb05c11859b1455a519aba91220dbb
|
/R/km.R
|
7189debe7e2d2aa03ec811d49045990e29a14f00
|
[] |
no_license
|
sarupurisailalith/RStudioAddins
|
da8f97552ff2cf68e76df23919fc9fc4f44c466a
|
7ad26c0cfeca4b529476635d2b86d081ecff2c36
|
refs/heads/master
| 2021-01-09T20:41:22.104779
| 2016-08-02T15:03:48
| 2016-08-02T15:03:48
| 64,377,925
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,576
|
r
|
km.R
|
# K-means clustering model diagnostics
km_diagnostics <- function() {
require("shiny")
require("plotly")
require("miniUI")
require("cluster")
require("clValid")
ui <- miniPage(
gadgetTitleBar("k-means clustering model diagnostics"),
miniContentPanel(
fluidRow(
fluidRow(align = "center",
fluidRow(
column(6,selectInput("model", "Select model object", choices = c("",unlist(ls(envir = parent.frame(1)))), selected = NULL, width = "50%")),
column(6,selectInput("indata", "Select the input data object", choices = c("",unlist(ls(envir = parent.frame(1)))), selected = NULL, width = "50%"))
),br(),
actionButton("load", "Load model object")
),
fluidRow( uiOutput("performance"))
)
)
)
server <- function(input, output) {
values <- reactiveValues()
values$fit_obj <- list()
values$inData <- data.frame()
values$dunn <- c()
observeEvent(input$load, {
values$fit_obj <- get(input$model, envir = parent.frame(1))
values$inData <- get(input$indata, envir = parent.frame(1))
values$dunn <- dunn(clusters = values$fit_obj[["cluster"]], Data = values$inData)
})
output$performance <- renderUI({
if (inherits(values$fit_obj, "kmeans")){
uiOutput("kmModel")
} else {
fluidRow(align = "center", HTML("<br><br>Select and load a k-means clustering model object into Workspace.
"))
}
})
output$kmModel <- renderUI({
fluidRow(
br(),br(),
fluidRow(align = "center", HTML("<h3>Model Summary:</h3>")),
br(),
fluidRow(align="center",
column(1),
column(5,verbatimTextOutput("km_summary")),
column(5, uiOutput("components")),
column(1)),
br(),
fluidRow(align = "center",br(),
HTML("<h3>Clustering performance indices and plots:</h3>"),br(),
fluidRow(align = "center",
column(1),
column(5,uiOutput("km_plot")),
column(5,uiOutput("silhouette")),
column(1)
),
br()
)
)
})
output$components <- renderUI({
input$load
fti <- values$fit_obj
fluidRow(
br(),br(),br(),br(),
HTML("<h4> Total sum of squares: <b>",round(values$fit_obj[["totss"]], digits=3),"</b></h4>"), br(),br(),
HTML("<h4> Total within-cluster sum of squares: <b>",round(values$fit_obj[["tot.withinss"]], digits = 3),"</b></h4>"), br(),br(),
HTML("<h4> Between cluster sum of squares: <b>",round(values$fit_obj[["betweenss"]], digits = 3),"</b></h4>"), br(),br(),
HTML("<h4> Number of iterations: <b>",values$fit_obj[["iter"]],"</b></h4>"), br(),br()
)
})
output$km_summary <- renderPrint({
fit <- values$fit_obj
print(fit)
})
output$km_plot <- renderUI({
fluidRow(
HTML("<b><u>Visualize the clusters: two variable plot</u></b><br>"),br(),
fluidRow(
column(6, selectInput("x", "Select X:", choices = names(values$inData), selected = NULL)),
column(6, selectInput("y", "Select Y:", choices = names(values$inData), selected = NULL))
),
plotlyOutput("varplot", width = "90%")
)
})
output$varplot <- renderPlotly({
dat <- values$inData
fit <- values$fit_obj
plt <- plot_ly(x = dat[,input$x], y = dat[,input$y], mode = "markers", color = factor(fit$cluster))
plt %>% layout(xaxis = list(title = input$x), yaxis = list(title = input$y))
})
output$silhouette <- renderUI({
fluidRow(
fluidRow(align = "center",
HTML("<br><b><u>Silhouette Plot</u></b>")
),
fluidRow(align = "center",
plotOutput("silPlot", width = "90%")
),
HTML(paste('<br><br><div align = "center"><b><u> Dunn Index: </u></b>', '<h4>', round(values$dunn, digits = 3), '</h4></div>'))
)
})
output$silPlot <- renderPlot({
fit <- values$fit_obj
dat <- values$inData
len <- length(fit$size)
dissE <- daisy(dat)
plot(silhouette(fit$cluster, dissE), col = 1:len, main = "")
})
observeEvent(input$done, {
stopApp()
})
}
runGadget(ui, server, viewer = paneViewer())
}
|
e046b58c70465f64b523e806338c055358bd556d
|
72a445a223967b1abff6eb548e5c9908d9d08c52
|
/man/isRT.Rd
|
dc4fc7523be619d2e1ae589bb3d4166f9d453681
|
[
"MIT"
] |
permissive
|
s0eren/mafun
|
46a4e949152ccb747fcbd9c7a2a346d43b742695
|
92615b2a7a8559cba2f205f5b568fed7cb8ba17f
|
refs/heads/master
| 2020-05-17T08:44:45.211693
| 2015-01-31T13:26:20
| 2015-01-31T13:26:20
| 26,682,893
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 647
|
rd
|
isRT.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{isRT}
\alias{isRT}
\title{isRT}
\usage{
isRT(tweet)
}
\arguments{
\item{tweet}{A character string OR a vector of character strings.}
}
\description{
This function checks whether a specific Tweet-Input can be categorized as Retweet.
}
\details{
Retweets in Twitter are indicated by starting with: 'RT'.
To ensure proper working the input string is transformed to lower case and the function checks whether the first to elements of the string are 'rt'
}
\examples{
tweet1 <- 'RT @aSuperTwitterUser I am awesome!'
tweet2 <- 'I am awesome!'
isRT(tweet1)
[1] TRUE
isRT(tweet2)
[1] FALSE
}
|
9f8bb8ca5c7cccc4264f1a1b607c5de866c2a778
|
47530697355af33b13df82d54b833f1210f13c54
|
/clarity2sims/man/sim_clarity2_trial.Rd
|
a1c0d8579b1b70b78f28ec8d478a63b5f9a0c799
|
[
"MIT"
] |
permissive
|
michaeldymock25/clarity2
|
9f933c2ee96f704a7e5fa1c3427a3f9bb7b0c188
|
7d703c3540b473e2a52213a131f0af69319d6199
|
refs/heads/main
| 2023-08-20T08:41:51.670140
| 2021-11-02T00:19:16
| 2021-11-02T00:19:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 888
|
rd
|
sim_clarity2_trial.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clarity2sims.R
\name{sim_clarity2_trial}
\alias{sim_clarity2_trial}
\title{sim_clarity2_trial}
\usage{
sim_clarity2_trial(
mod,
n_seq = seq(600, 2100, 300),
p_assign = rep(1/3, 3),
alpha = stats::qlogis(cumsum(c(16, 29, 32, 13, 2, 1, 1, 6)/100)[1:7]),
eta = c(0, -0.5, 0.5),
eff_eps = 0.975,
...
)
}
\arguments{
\item{mod}{A list (model, model data)}
\item{n_seq}{Sequence of interim analysis sample sizes}
\item{p_assign}{Assignment probabilities to treatment arms}
\item{alpha}{True intercept parameter}
\item{eta}{True eta parameter}
\item{eff_eps}{Effectiveness threshold}
\item{...}{Other arguments to cmdstanr::sample, e.g. adapt_delta, chains, etc.}
}
\value{
A list of trial related data.tables
}
\description{
Simulate a clarity2 trial
}
\details{
Simulates a Clarity 2.0 trial.
}
|
3245f0d4e0c6b16c5a93e4ff716c1e7078384b08
|
37794cfdab196879e67c3826bae27d44dc86d7f7
|
/Math/Poly.System.Hetero.Symmetric.S3.Theory.R
|
b650f2685aecac13d659b0472324ac5bc2dbed96
|
[] |
no_license
|
discoleo/R
|
0bbd53a54af392ef53a6e24af85cec4f21133d17
|
e9db8008fb66fb4e6e17ff6f301babde0b2fc1ff
|
refs/heads/master
| 2023-09-05T00:43:32.381031
| 2023-08-31T23:03:27
| 2023-08-31T23:03:27
| 213,750,865
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 787
|
r
|
Poly.System.Hetero.Symmetric.S3.Theory.R
|
########################
###
### Leonard Mada
### [the one and only]
###
### Polynomial Systems: S3
### Theory
###
### draft v.0.1a
### Hetero-Symmetric S3 System
# - let P(x, y, z) = polynomial;
### System:
# P(x, y, z) = 0
# P(y, z, x) = 0
# P(z, x, y) = 0
### Trivial solution: x = y = z;
### Non-Trivial system:
# - where at least 2 of (x, y, z) are different;
### Theorem
# The non-trivial system is always decomposable into a simpler system: P[3] o P[simpler];
# - let (x0, y0, z0) be a root tuple;
# - then (y0, z0, x0) & (z0, x0, y0) are also roots;
# Therefore:
# - for each pair of roots, there are other 2 pairs,
# which have the same value for: S = x0 + y0 + z0.
# - the order of the polynomial in S = Order(non-trivial system) / 3;
|
4a0246c2874bdcfdad781263f4afdcb5530bbc96
|
c2ac3133ecaaf0e6ad8e751198c670dffdcbd410
|
/man/UK.Rd
|
0312bc54f3837531311bd2dd9e05f1cac7694891
|
[
"MIT"
] |
permissive
|
elenanikolova190/covidregionaldata
|
e4465cfceb4f990093e1bb5a1e2de70a0d0d6400
|
eed4fea49a5f47173c0d4a6544c9acd8f2fd0a5d
|
refs/heads/master
| 2023-04-11T07:29:50.650715
| 2021-04-14T18:16:45
| 2021-04-14T18:16:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 9,743
|
rd
|
UK.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/UK.R
\name{UK}
\alias{UK}
\title{United Kingdom Class for downloading, cleaning and processing notification
data.}
\source{
\url{https://coronavirus.data.gov.uk/details/download}
\url{https://coronavirus.data.gov.uk/details/download}
}
\description{
Extracts daily COVID-19 data for the UK, stratified by region
and nation. Contains additional options to other country class objects,
including options to return subnational English regions using NHS region
boundaries instead of PHE boundaries (nhsregions=TRUE), a release date to
download from (release_date) and a geographical resolution (resolution).
}
\examples{
\dontrun{
region <- UK$new(level = "1", verbose = TRUE, steps = TRUE, get = TRUE)
region$return()
}
## ------------------------------------------------
## Method `UK$new`
## ------------------------------------------------
\dontrun{
Uk$new(
level = 1, localise = TRUE,
verbose = True, steps = FALSE,
nhsregions = FALSE, release_date = NULL,
resolution = "utla"
)
}
}
\concept{dataset}
\section{Super class}{
\code{\link[covidregionaldata:DataClass]{covidregionaldata::DataClass}} -> \code{UK}
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{country}}{name of country to fetch data for}
\item{\code{supported_levels}}{A list of supported levels.}
\item{\code{supported_region_names}}{A list of region names in order of level.}
\item{\code{supported_region_codes}}{A list of region codes in order of level.}
\item{\code{common_data_urls}}{List of named links to raw data. The first, and
only entry, is be named main.}
\item{\code{level_data_urls}}{List of named links to raw data that are level
specific.}
\item{\code{source_data_cols}}{existing columns within the raw data}
\item{\code{query_filters}}{Set what filters to use to query the data}
\item{\code{nhsregions}}{Whether to include NHS regions in the data}
\item{\code{release_date}}{The release date for the data}
\item{\code{resolution}}{The resolution of the data to return}
\item{\code{authority_data}}{The raw data for creating authority lookup tables}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-set_region_codes}{\code{UK$set_region_codes()}}
\item \href{#method-download}{\code{UK$download()}}
\item \href{#method-clean_level_1}{\code{UK$clean_level_1()}}
\item \href{#method-clean_level_2}{\code{UK$clean_level_2()}}
\item \href{#method-new}{\code{UK$new()}}
\item \href{#method-download_filter}{\code{UK$download_filter()}}
\item \href{#method-set_filters}{\code{UK$set_filters()}}
\item \href{#method-download_nhs_regions}{\code{UK$download_nhs_regions()}}
\item \href{#method-add_nhs_regions}{\code{UK$add_nhs_regions()}}
\item \href{#method-clone}{\code{UK$clone()}}
}
}
\if{html}{
\out{<details ><summary>Inherited methods</summary>}
\itemize{
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="clean">}\href{../../covidregionaldata/html/DataClass.html#method-clean}{\code{covidregionaldata::DataClass$clean()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="clean_common">}\href{../../covidregionaldata/html/DataClass.html#method-clean_common}{\code{covidregionaldata::DataClass$clean_common()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="filter">}\href{../../covidregionaldata/html/DataClass.html#method-filter}{\code{covidregionaldata::DataClass$filter()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="get">}\href{../../covidregionaldata/html/DataClass.html#method-get}{\code{covidregionaldata::DataClass$get()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="process">}\href{../../covidregionaldata/html/DataClass.html#method-process}{\code{covidregionaldata::DataClass$process()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="return">}\href{../../covidregionaldata/html/DataClass.html#method-return}{\code{covidregionaldata::DataClass$return()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="summary">}\href{../../covidregionaldata/html/DataClass.html#method-summary}{\code{covidregionaldata::DataClass$summary()}}\out{</span>}
}
\out{</details>}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-set_region_codes"></a>}}
\if{latex}{\out{\hypertarget{method-set_region_codes}{}}}
\subsection{Method \code{set_region_codes()}}{
Specific function for getting region codes for UK .
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{UK$set_region_codes()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-download"></a>}}
\if{latex}{\out{\hypertarget{method-download}{}}}
\subsection{Method \code{download()}}{
UK specific download function
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{UK$download()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clean_level_1"></a>}}
\if{latex}{\out{\hypertarget{method-clean_level_1}{}}}
\subsection{Method \code{clean_level_1()}}{
Region Level Data Cleaning
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{UK$clean_level_1()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clean_level_2"></a>}}
\if{latex}{\out{\hypertarget{method-clean_level_2}{}}}
\subsection{Method \code{clean_level_2()}}{
Level 2 Data Cleaning
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{UK$clean_level_2()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
Initalize the UK Class
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{UK$new(nhsregions = FALSE, release_date = NULL, resolution = "utla", ...)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{nhsregions}}{Return subnational English regions using NHS region
boundaries instead of PHE boundaries.}
\item{\code{release_date}}{Date data was released. Default is to extract
latest release. Dates should be in the format "yyyy-mm-dd".}
\item{\code{resolution}}{"utla" (default) or "ltla", depending on which
geographical resolution is preferred}
\item{\code{...}}{Options arguments passed to \code{initialise_dataclass}}
}
\if{html}{\out{</div>}}
}
\subsection{Examples}{
\if{html}{\out{<div class="r example copy">}}
\preformatted{\dontrun{
Uk$new(
level = 1, localise = TRUE,
verbose = True, steps = FALSE,
nhsregions = FALSE, release_date = NULL,
resolution = "utla"
)
}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-download_filter"></a>}}
\if{latex}{\out{\hypertarget{method-download_filter}{}}}
\subsection{Method \code{download_filter()}}{
Helper function for downloading data API
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{UK$download_filter(filter)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{filter}}{region filters}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-set_filters"></a>}}
\if{latex}{\out{\hypertarget{method-set_filters}{}}}
\subsection{Method \code{set_filters()}}{
Set filters for UK data api query.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{UK$set_filters()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-download_nhs_regions"></a>}}
\if{latex}{\out{\hypertarget{method-download_nhs_regions}{}}}
\subsection{Method \code{download_nhs_regions()}}{
Download NHS data for level 1 regions
Separate NHS data is available for "first" admissions, excluding
readmissions. This is available for England + English regions only.
See: \url{https://www.england.nhs.uk/statistics/statistical-work-areas/covid-19-hospital-activity/}
Section 2, "2. Estimated new hospital cases"
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{UK$download_nhs_regions()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
nhs data.frame of nhs regions
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-add_nhs_regions"></a>}}
\if{latex}{\out{\hypertarget{method-add_nhs_regions}{}}}
\subsection{Method \code{add_nhs_regions()}}{
Add NHS data for level 1 regions
Separate NHS data is available for "first" admissions, excluding
readmissions. This is available for England + English regions only.
See: \url{https://www.england.nhs.uk/statistics/statistical-work-areas/covid-19-hospital-activity/}
Section 2, "2. Estimated new hospital cases"
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{UK$add_nhs_regions(clean_data, nhs_data)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{clean_data}}{Cleaned UK covid-19 data}
\item{\code{nhs_data}}{NHS region data}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{UK$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
7136e02785c27966d9841664f90d72341f992fe1
|
384c3dbc571be91c6f743d1427dec00f13e0d8ae
|
/r/kernels/rodericksd-dr-titanic-on-kaggle-9-13-2016/script/dr-titanic-on-kaggle-9-13-2016.R
|
c2f28a9be1de0733dfcd2cc3c843e970e59e87ed
|
[] |
no_license
|
helenaK/trustworthy-titanic
|
b9acdd8ca94f2fa3f7eb965596eed4a62821b21e
|
ade0e487820cf38974561da2403ebe0da9de8bc6
|
refs/heads/master
| 2022-12-09T20:56:30.700809
| 2020-09-10T14:22:24
| 2020-09-10T14:22:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,809
|
r
|
dr-titanic-on-kaggle-9-13-2016.R
|
# This R script will run on our backend. You can write arbitrary code here!
# Many standard libraries are already installed, such as randomForest
library(randomForest)
library (rpart)
library (caret)
#install.packages('rattle')
#install.packages ('rpart.plot')
#install.packages ('RColorBrewer')
#library(rattle)
library(rpart.plot)
library(RColorBrewer)
library (lattice)
library (ggplot2)
# The train and test data is stored in the ../input directory
train <- read.csv("../input/train.csv")
test <- read.csv("../input/test.csv")
# *********************9-2-2016*************************
# Let us split train into two variables (one of 80%, the other of 20%)
#train2 <- train[1:712,]
#train2
#str(train2)
#train2cv <- train[713:891,]
#train2cv
#How many survived in the train2 data set
#Survived = 278 (39.04%), Died = 434 (60.96%), total 712 in train2
#table(train2$Survived)
#prop.table(table(train2$Survived))
#How many males, females in train2 data set
# Females = 256 (35.96%), Males = 456 (64.04%)
#table(train2$Sex)
#prop.table(table(train2$Sex))
#How many males, females survived
# Females survived = 190 (74.22% of all women)
# Males survived = 88 (19.30% of all males)
#table(train2$Sex, train2$Survived)
#prop.table(table(train2$Sex, train2$Survived),1)
# Now, to investigate Age
# table(train2$Age,train2$Survived)
#fit <- rpart(Survived ~ Sex + Age + SibSp + Pclass + Embarked + Parch,
# data = train2,
# method = "class")
#rpart.plot(fit)
#*****************************9-13-2016**********************************
# First, clean up Pclass to be ordered using as.factor (so we do not get "1.5" in our decision trees)
train$Pclass <- as.factor(train$Pclass)
test$Pclass <- as.factor(test$Pclass)
#Group the Fare feature into fewer categories
train$Fare2 <- '30+'
train$Fare2[train$Fare < 30 & train$Fare >= 20] <- '20-30'
train$Fare2[train$Fare < 20 & train$Fare >= 10] <- '10-20'
train$Fare2[train$Fare < 10] <- '<10'
test$Fare2 <- '30+'
test$Fare2[test$Fare < 30 & test$Fare >= 20] <- '20 - 30'
test$Fare2[test$Fare < 20 & test$Fare >= 10] <- '10 - 20'
test$Fare2[test$Fare < 10] <- '<10'
#Break up train dataframe into 5 train frames and use one of them as the cv set. Then rotate through.
train1 <- train[1:179,]
train2 <-train[180:358,]
train3 <- train[359:537,]
train4 <- train[538:716,]
train5 <- train[717:891,]
# Setting up pairs of newtrain and cv to use on building our model.
cv1 <- train1
newtrain2345 <- rbind(train2,train3,train4,train5)
table(newtrain2345$Survived)
table(newtrain2345$Sex)
table(newtrain2345$Sex,newtrain2345$Survived)
# USe first pair of data sets (cv1 and newtrain2345).
#Construct a decision tree using rpart and examine outcomes
#Refer to this as "model 1"
fit <- rpart(Survived ~ Fare2 + Sex + Age + SibSp + Pclass + Embarked + Parch,
data = newtrain2345,
method = "class")
predict(fit,cv1,interval = "confidence")
#submit <- data.frame(PassengerId = test$PassengerId, Survived = test$Survived)
#write.csv(submit, file = "theyallperish.csv", row.names = FALSE)
cv2 <- train2
newtrain1345 <- rbind(train1,train3,train4,train5)
#table(newtrain1345$Survived)
#table(newtrain1345$Sex)
#table(newtrain1345$Sex,newtrain1345$Survived)
# USe first pair of data sets (cv1 and newtrain2345).
#Construct a decision tree using rpart and examine outcomes
#Refer to this as "model 1"
#fit <- rpart(Survived ~ Fare2 + Sex + Age + SibSp + Pclass + Embarked + Parch,
# data = newtrain1345,
# method = "class")
#rpart.plot(fit)
#predict(fit,cv2,interval = "confidence")
cv3 <- train3
newtrain1245 <- rbind(train1,train2,train4,train5)
#table(newtrain1245$Survived)
#table(newtrain1245$Sex)
#table(newtrain1245$Sex,newtrain1245$Survived)
# USe first pair of data sets (cv3 and newtrain1245).
#Construct a decision tree using rpart and examine outcomes
#Refer to this as "model 1"
#fit <- rpart(Survived ~ Fare2 + Sex + Age + SibSp + Pclass + Embarked + Parch,
# data = newtrain1245,
# method = "class")
#rpart.plot(fit)
#predict(fit,cv3,interval = "confidence")
cv4 <- train4
newtrain1235 <- rbind(train1,train2,train3,train5)
#table(newtrain1235$Survived)
#table(newtrain1235$Sex)
#table(newtrain1235$Sex,newtrain1235$Survived)
# USe first pair of data sets (cv4 and newtrain1235).
#Construct a decision tree using rpart and examine outcomes
#Refer to this as "model 1"
#fit <- rpart(Survived ~ Fare2 + Sex + Age + SibSp + Pclass + Embarked + Parch,
# data = newtrain1235,
# method = "class")
#rpart.plot(fit)
#predict(fit,cv4,interval = "confidence")
cv5 <- train5
newtrain1234 <- rbind(train1,train2,train3,train4)
#table(newtrain1234$Survived)
#table(newtrain1234$Sex)
#table(newtrain1234$Sex,newtrain1234$Survived)
# USe first pair of data sets (cv5 and newtrain1234).
#Construct a decision tree using rpart and examine outcomes
#Refer to this as "model 1"
#fit <- rpart(Survived ~ Fare2 + Sex + Age + SibSp + Pclass + Embarked + Parch,
# data = newtrain1234,
# method = "class")
#rpart.plot(fit)
#predict(fit,cv5,interval = "confidence")
#At the conclusion of running this model (fit) against all 5 sets of data,
#it seems that the percentage accuracy in predicting survival is 78.77095% (median) or 79.2556% (average).
#Let us make a submission based on this model against test and see where Kaggle rates us.....
#fit <- rpart(Survived ~ Fare2 + Sex + Age + SibSp + Pclass + Embarked + Parch,
# data = newtrain2345,
# method = "class")
#predict(fit, test, interval = "confidence")
#prediction <- predict(fit,test,interval = "confidence")
#prediction.Survived
# Save the solution to a dataframe with two columns: PassengerId and Survived (prediction)
#solution <- data.frame(PassengerId = test$PassengerId, Survived <- prediction)
#solution
# Write the solution to file
#write.csv(solution, file = 'rf_mod_Solution.csv', row.names = F)
#submission <- test(PassengerId = test$PassengerId)
#submission$Survived <- predict(fit, test, interval = "confidence")
#write.csv(submission, file = "1_random_forest_r_submission.csv", row.names=FALSE)
#submission
#Investigating Random Forests
#rf <- randomForest(extractFeatures(train), as.factor(train$Survived), ntree=100, importance=TRUE)
# Here we will plot the passenger survival by class
# train$Survived <- factor(train$Survived, levels=c(1,0))
# levels(train$Survived) <- c("Survived", "Died")
# train$Pclass <- as.factor(train$Pclass)
# levels(train$Pclass) <- c("1st Class", "2nd Class", "3rd Class")
# png("1_survival_by_class.png", width=800, height=600)
# mosaicplot(train$Pclass ~ train$Survived, main="Passenger Survival by Class",
# color=c("#8dd3c7", "#fb8072"), shade=FALSE, xlab="", ylab="",
# off=c(0), cex.axis=1.4)
#dev.off()
|
924c9183edb0d799b9928b238fcb6dc701ac14eb
|
a09b729d3dddcf139712c5e47fcd825a3d414cf3
|
/man/midpoint.root.Rd
|
0fa4f465bf9d3a6c7081e87ba17cb19ba2062866
|
[] |
no_license
|
olmen/phytools
|
62fdab172b504362d417c49d5d768e9a88b92ff1
|
1c61ab64d38f92c487e1edd49e918c6a2d3a0965
|
refs/heads/master
| 2020-03-17T10:08:03.625735
| 2018-04-13T16:51:31
| 2018-04-13T16:51:31
| 133,501,475
| 1
| 0
| null | 2018-05-15T10:42:46
| 2018-05-15T10:42:45
| null |
UTF-8
|
R
| false
| false
| 1,189
|
rd
|
midpoint.root.Rd
|
\name{midpoint.root}
\alias{midpoint.root}
\title{Midpoint root a phylogeny}
\usage{
midpoint.root(tree)
}
\arguments{
\item{tree}{an object of class \code{"phylo"}.}
}
\description{
This function midpoint roots a rooted or unrooted tree (Farris 1972).
}
\details{
Midpoint rooting involves locating the midpoint of the longest path between any two tips and putting the root in that location. This function performs the same operation as \code{midpoint} in the phangorn package, but uses no phangorn code internally.
}
\value{
A phylogenetic tree in \code{"phylo"} format.
}
\references{
Farris, J. (1972) Estimating phylogenetic trees from distance matrices. \emph{American Naturalist}, \bold{106}, 645-667.
Paradis, E., J. Claude, and K. Strimmer (2004) APE: Analyses of phylogenetics and evolution in R language. \emph{Bioinformatics}, \bold{20}, 289-290.
Revell, L. J. (2012) phytools: An R package for phylogenetic comparative biology (and other things). \emph{Methods Ecol. Evol.}, \bold{3}, 217-223.
}
\author{Liam Revell \email{liam.revell@umb.edu}}
\seealso{
\code{\link{midpoint}}, \code{\link{reroot}}, \code{\link{root}}
}
\keyword{phylogenetics}
\keyword{utilities}
|
f4f3b1a961e038522d4dabfb39c2cfb1f30e1fc8
|
a71f5727b67ecd4b9a3dd506749dd39c47264904
|
/R语言统计分析与应用/《R语言统计分析与应用》配套程序/十三章/Example13_8.R
|
cd2af5788a8c0408e25a1e4033d51bfed18d9879
|
[] |
no_license
|
wwjvictor/test-R
|
4085408ae64c48cda7dd34128e4dac3e1dd1a7a3
|
be292db7c0288b02d9ce0a749b3af64043f99634
|
refs/heads/master
| 2020-12-14T00:01:31.140699
| 2020-01-17T15:08:09
| 2020-01-17T15:08:09
| 234,570,309
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 201
|
r
|
Example13_8.R
|
> Example13_8 <- read.table ("example13_8.csv", header=TRUE, sep=",")
> attach(Example13_8)
> cor(Example13_8, method="spearman")
> cor.test(a, b, method="spearman")
> detach (Example13_8)
|
e0a91621fbce20ba8c5e28efe87c9ec9acec1511
|
f9cd477265898f4466ea0acf4c784fb8328dcba9
|
/HW5Pack/R/random_letters2.R
|
45742c2e86abc28f106d8c00409add4a09a90d2c
|
[] |
no_license
|
alexrblohm/Simulation
|
7d0f935c475af2d6fbce4a513bb49df5a7436609
|
5a09e827cdc44e68df342963d9316e84327d6920
|
refs/heads/master
| 2020-07-04T21:22:51.060613
| 2019-11-26T17:57:58
| 2019-11-26T17:57:58
| 202,422,196
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 54
|
r
|
random_letters2.R
|
random_letters2 <-
function(){
sample(LETTERS, 1)
}
|
18c02f9b8d06011537be7baa419b096f7d6504b1
|
7d18e60a7da4b47b43bf9a90f0e7b47903e26c68
|
/task2/NN/ATVHelper.R
|
ec73d5e304ed397187ddc2dce8b1c7ff3b2ab878
|
[] |
no_license
|
danranyiyu123456/Black-Swan
|
74149e7667b3495b44507cac66382e637afcbd27
|
571839ff218b4b7f6dd999842b67f6230aa0c98c
|
refs/heads/master
| 2022-11-23T03:49:52.482740
| 2020-08-01T05:52:34
| 2020-08-01T05:52:34
| 284,000,634
| 0
| 0
| null | 2020-07-31T09:53:42
| 2020-07-31T09:53:41
| null |
UTF-8
|
R
| false
| false
| 8,658
|
r
|
ATVHelper.R
|
###################################################
#####The data List
###################################################
predictHourWindow <- c("[9,8]", "[18,17]")
dataHourList <- c(6,7,8,9,15,16,17,18)
nationDayList <- seq(as.Date("2016-10-01"),as.Date("2016-10-07"),1)
##############################################################
#### Data preprocessing
##############################################################
ATVMLDataFunc <- function(data){
data[, hourWindow:=ifelse(curHour<=12, "[9,8]", "[18,17]")]
data[, keyIndex:=paste(tollgateDirection, curDate,hourWindow,sep="-")]
keyIndexList <- unique(data$keyIndex)
ATVMWList <- lapply(keyIndexList, function(x){
print(paste("tollgateDirection-curDate-hourWindow:", x))
subData <- data[keyIndex==x, ]
y <- ifelse(unique(subData$hourWindow)=='[9,8]',9,18)
###The MWInterval
subMain <- subData[curHour %in% c(y,y-1)]
subMain[,MWInterval:=(curHour-y+1)*3+curMinute20/20+1]
subFeature <- subData[curHour %in% c(y-2,y-3),]
####The AVTLag
subMain$ATVLag6 <- subFeature[curHour==y-3 & curMinute20==0, ]$ATV
subMain$ATVLag5 <- subFeature[curHour==y-3 & curMinute20==20, ]$ATV
subMain$ATVLag4 <- subFeature[curHour==y-3 & curMinute20==40, ]$ATV
subMain$ATVLag3 <- subFeature[curHour==y-2 & curMinute20==0, ]$ATV
subMain$ATVLag2 <- subFeature[curHour==y-2 & curMinute20==20, ]$ATV
subMain$ATVLag1 <- subFeature[curHour==y-2 & curMinute20==40, ]$ATV
####The etc-Lag
subMain$etcVLag6 <- subFeature[curHour==y-3 & curMinute20==0, ]$etcV
subMain$etcVLag5 <- subFeature[curHour==y-3 & curMinute20==20, ]$etcV
subMain$etcVLag4 <- subFeature[curHour==y-3 & curMinute20==40, ]$etcV
subMain$etcVLag3 <- subFeature[curHour==y-2 & curMinute20==0, ]$etcV
subMain$etcVLag2 <- subFeature[curHour==y-2 & curMinute20==20, ]$etcV
subMain$etcVLag1 <- subFeature[curHour==y-2 & curMinute20==40, ]$etcV
####The VM01
subMain$VM01Lag6 <- subFeature[curHour==y-3 & curMinute20==0, ]$VM01
subMain$VM01Lag5 <- subFeature[curHour==y-3 & curMinute20==20, ]$VM01
subMain$VM01Lag4 <- subFeature[curHour==y-3 & curMinute20==40, ]$VM01
subMain$VM01Lag3 <- subFeature[curHour==y-2 & curMinute20==0, ]$VM01
subMain$VM01Lag2 <- subFeature[curHour==y-2 & curMinute20==20, ]$VM01
subMain$VM01Lag1 <- subFeature[curHour==y-2 & curMinute20==40, ]$VM01
####The VM012
subMain$VM012Lag6 <- subFeature[curHour==y-3 & curMinute20==0, ]$VM012
subMain$VM012Lag5 <- subFeature[curHour==y-3 & curMinute20==20, ]$VM012
subMain$VM012Lag4 <- subFeature[curHour==y-3 & curMinute20==40, ]$VM012
subMain$VM012Lag3 <- subFeature[curHour==y-2 & curMinute20==0, ]$VM012
subMain$VM012Lag2 <- subFeature[curHour==y-2 & curMinute20==20, ]$VM012
subMain$VM012Lag1 <- subFeature[curHour==y-2 & curMinute20==40, ]$VM012
subMain[,keyIndex:=NULL]
})
ATVMWData <- rbindlist(ATVMWList)
return(ATVMWData)
}
##############################################################
#### Data preprocessing
##############################################################
extendMLDataFunc <- function(data, minuteGap=5){
newData <- copy(data)
newData[, time:=time+dminutes(minuteGap)]
newData[, c("curDate", "curHour", "curMinute"):=list(as.Date(time), hour(time), minute(time))]
newData[,curMinute20:=floor(curMinute/20)*20]
newData[, timeStamp:=paste(curDate, paste(curHour, curMinute20,"00", sep=":"), sep=" ")]
newData[, timeStamp:=ymd_hms(timeStamp)]
##Subset
newData <- newData[curHour %in% dataHourList,]
newATVData <- newData[,list(tollgate_id=unique(tollgate_id), direction=unique(direction),
ATV=length(time),etcV=sum(has_etc),VM01=sum(aggVehicleModel==0|aggVehicleModel==1),
VM1=sum(aggVehicleModel==1), VM012=sum(aggVehicleModel<=2),
curDate=unique(curDate),curHour=unique(curHour),curMinute20=unique(curMinute20),
mark=unique(mark)),
by=c("tollgateDirection", "timeStamp")]
newATVData[, hourWindow:=ifelse(curHour<=12, "[9,8]", "[18,17]")]
newATVData[, keyIndex:=paste(tollgateDirection, curDate,hourWindow,sep="-")]
keyIndexList <- unique(newATVData$keyIndex)
ATVMWList <- lapply(keyIndexList, function(x){
print(paste("tollgateDirection-curDate-hourWindow:", x))
subData <- newATVData[keyIndex==x, ]
y <- ifelse(unique(subData$hourWindow)=='[9,8]',9,18)
###The MWInterval
subMain <- subData[curHour %in% c(y,y-1)]
subMain[,MWInterval:=(curHour-y+1)*3+curMinute20/20+1]
subFeature <- subData[curHour %in% c(y-2,y-3),]
####The AVTLag
subMain$ATVLag6 <- subFeature[curHour==y-3 & curMinute20==0, ]$ATV
subMain$ATVLag5 <- subFeature[curHour==y-3 & curMinute20==20, ]$ATV
subMain$ATVLag4 <- subFeature[curHour==y-3 & curMinute20==40, ]$ATV
subMain$ATVLag3 <- subFeature[curHour==y-2 & curMinute20==0, ]$ATV
subMain$ATVLag2 <- subFeature[curHour==y-2 & curMinute20==20, ]$ATV
subMain$ATVLag1 <- subFeature[curHour==y-2 & curMinute20==40, ]$ATV
####The etc-Lag
subMain$etcVLag6 <- subFeature[curHour==y-3 & curMinute20==0, ]$etcV
subMain$etcVLag5 <- subFeature[curHour==y-3 & curMinute20==20, ]$etcV
subMain$etcVLag4 <- subFeature[curHour==y-3 & curMinute20==40, ]$etcV
subMain$etcVLag3 <- subFeature[curHour==y-2 & curMinute20==0, ]$etcV
subMain$etcVLag2 <- subFeature[curHour==y-2 & curMinute20==20, ]$etcV
subMain$etcVLag1 <- subFeature[curHour==y-2 & curMinute20==40, ]$etcV
####The VM01
subMain$VM01Lag6 <- subFeature[curHour==y-3 & curMinute20==0, ]$VM01
subMain$VM01Lag5 <- subFeature[curHour==y-3 & curMinute20==20, ]$VM01
subMain$VM01Lag4 <- subFeature[curHour==y-3 & curMinute20==40, ]$VM01
subMain$VM01Lag3 <- subFeature[curHour==y-2 & curMinute20==0, ]$VM01
subMain$VM01Lag2 <- subFeature[curHour==y-2 & curMinute20==20, ]$VM01
subMain$VM01Lag1 <- subFeature[curHour==y-2 & curMinute20==40, ]$VM01
####The VM012
subMain$VM012Lag6 <- subFeature[curHour==y-3 & curMinute20==0, ]$VM012
subMain$VM012Lag5 <- subFeature[curHour==y-3 & curMinute20==20, ]$VM012
subMain$VM012Lag4 <- subFeature[curHour==y-3 & curMinute20==40, ]$VM012
subMain$VM012Lag3 <- subFeature[curHour==y-2 & curMinute20==0, ]$VM012
subMain$VM012Lag2 <- subFeature[curHour==y-2 & curMinute20==20, ]$VM012
subMain$VM012Lag1 <- subFeature[curHour==y-2 & curMinute20==40, ]$VM012
subMain[,keyIndex:=NULL]
})
ATVMWData <- rbindlist(ATVMWList)
return(ATVMWData)
}
########################################################
### The time series decomposition
########################################################
extendTSFunc <- function(data, minuteGap=5){
newData <- copy(data)
newData[, time:=time+dminutes(minuteGap)]
newData[, c("curDate", "curHour", "curMinute"):=list(as.Date(time), hour(time), minute(time))]
newData[,curMinute20:=floor(curMinute/20)*20]
newData[, timeStamp:=paste(curDate, paste(curHour, curMinute20,"00", sep=":"), sep=" ")]
newData[, timeStamp:=ymd_hms(timeStamp)]
##Subset
newData <- newData[curHour %in% dataHourList,]
newATVData <- newData[,list(tollgate_id=unique(tollgate_id), direction=unique(direction),
ATV=length(time),etcV=sum(has_etc),VM01=sum(aggVehicleModel==0|aggVehicleModel==1),
VM1=sum(aggVehicleModel==1), VM012=sum(aggVehicleModel<=2),
curDate=unique(curDate),curHour=unique(curHour),curMinute20=unique(curMinute20),
mark=unique(mark)),
by=c("tollgateDirection", "timeStamp")]
newATVData[, hourWindow:=ifelse(curHour<=12, "[9,8]", "[18,17]")]
newATVData <- newATVData[order(tollgateDirection,timeStamp),]
return(newATVData)
}
|
90cb93fc5d9f96a9166f7706de18e7a242cf4a7e
|
76ad703bce9a4796a10e8b5cf092ff31ff8db33c
|
/R/crayon_substitutes.R
|
f9953b8a662550b4d99fdd9048443a9fbd98a179
|
[
"MIT"
] |
permissive
|
robertzk/lockbox
|
e9d5bb963a8a8f0bd9a18336106805c9a5d44fd4
|
4e8d96f86fc34e7de09b2a2535d6b2ad6b1ec74f
|
refs/heads/master
| 2020-05-21T23:36:00.445712
| 2018-01-16T14:31:04
| 2018-01-16T14:31:04
| 33,389,821
| 49
| 11
|
MIT
| 2018-01-16T14:31:05
| 2015-04-04T01:07:35
|
R
|
UTF-8
|
R
| false
| false
| 309
|
r
|
crayon_substitutes.R
|
crayon_blue <- function(string) {
paste0("\033[34m", string,"\033[39m")
}
crayon_red <- function(string) {
paste0("\033[31m", string,"\033[39m")
}
crayon_yellow <- function(string) {
paste0("\033[33m", string,"\033[39m")
}
crayon_green <- function(string) {
paste0("\033[32m", string,"\033[39m")
}
|
eb05dab950091ee339f96fa863a248639cd8d9e8
|
97c0ba9b73da48eecb1a5832b802915db59b2ae0
|
/introjs.R
|
f80d6a3b4e6e7d33b9cbf7205932d7367cb20787
|
[] |
no_license
|
kpatel427/RShiny
|
e5bccdbce99cd3fa32a79076ab408e47c3679193
|
98f700bba8d72441bfeb59650a943767a8d9db39
|
refs/heads/master
| 2020-06-01T02:26:32.351228
| 2020-05-04T20:01:25
| 2020-05-04T20:01:25
| 190,596,525
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,227
|
r
|
introjs.R
|
library(rintrojs)
library(shiny)
# Define UI for application that draws a histogram
ui <- shinyUI(fluidPage(
introjsUI(),
# Application title
introBox(
titlePanel("Old Faithful Geyser Data"),
data.step = 1,
data.intro = "This is the title panel"
),
# Sidebar with a slider input for number of bins
sidebarLayout(sidebarPanel(
introBox(
introBox(
sliderInput(
"bins",
"Number of bins:",
min = 1,
max = 50,
value = 30
),
data.step = 3,
data.intro = "This is a slider",
data.hint = "You can slide me"
),
introBox(
actionButton("help", "Press for instructions"),
data.step = 4,
data.intro = "This is a button",
data.hint = "You can press me"
),
data.step = 2,
data.intro = "This is the sidebar. Look how intro elements can nest"
)
),
# Show a plot of the generated distribution
mainPanel(
introBox(
plotOutput("distPlot"),
data.step = 5,
data.intro = "This is the main plot"
)
))
))
# Define server logic required to draw a histogram
server <- shinyServer(function(input, output, session) {
# initiate hints on startup with custom button and event
hintjs(session, options = list("hintButtonLabel"="Hope this hint was helpful"),
events = list("onhintclose"=I('alert("Wasn\'t that hint helpful")')))
output$distPlot <- renderPlot({
# generate bins based on input$bins from ui.R
x <- faithful[, 2]
bins <- seq(min(x), max(x), length.out = input$bins + 1)
# draw the histogram with the specified number of bins
hist(x,
breaks = bins,
col = 'darkgray',
border = 'white')
})
# start introjs when button is pressed with custom options and events
observeEvent(input$help,
introjs(session, options = list("nextLabel"="next",
"prevLabel"="prev",
"skipLabel"="skip",
events = list("oncomplete"=I('alert("Glad that is over")'))))
)
})
# Run the application
shinyApp(ui = ui, server = server)
|
84f55e5bdd9f731ac5fe46bae1918e7cc9a2fd56
|
812ca561672990c77765107f58c75b7b71214328
|
/twitterHashtagsAnalysis.r
|
71b30e749692f42868faa5030dbaa99e83737d7a
|
[] |
no_license
|
sciruela/twitterHashtagAnalysis
|
d0d19612afe92eb811f46aa8cb3db1573f5909a6
|
2e87d752983e5e2fb0840cf3f8947e021db4de3a
|
refs/heads/master
| 2021-01-19T08:46:01.975418
| 2012-02-07T23:16:28
| 2012-02-07T23:16:28
| 3,382,422
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,670
|
r
|
twitterHashtagsAnalysis.r
|
require(twitteR)
searchTerm='#SOPA'
rdmTweets <- searchTwitter(searchTerm, n=500)
tw.df=twListToDF(rdmTweets)
require(plyr)
tw.dfx=ddply(tw.df, .var = "screenName", .fun = function(x) {return(subset(x, created %in% min(created),select=c(screenName,created)))})
tw.dfxa=arrange(tw.dfx,-desc(created))
tw.df$screenName=factor(tw.df$screenName, levels = tw.dfxa$screenName)
require(ggplot2)
pdf("/Users/sciruela/Documents/twitterHashtagsAnalysis/graph1.pdf")
ggplot(tw.df)+geom_point(aes(x=created,y=screenName))
dev.off()
library(stringr)
trim <- function (x) sub('@','',x)
tw.df$rt=sapply(tw.df$text,function(tweet) trim(str_match(tweet,"^RT (@[[:alnum:]_]*)")[2]))
tw.df$rtt=sapply(tw.df$rt,function(rt) if (is.na(rt)) 'T' else 'RT')
pdf("/Users/sciruela/Documents/twitterHashtagsAnalysis/graph2.pdf")
ggplot(tw.df)+geom_point(aes(x=created,y=screenName,col=rtt))
dev.off()
tw.df$rtof=sapply(tw.df$text,function(tweet) trim(str_match(tweet,"^RT (@[[:alnum:]_]*)")[2]))
pdf("/Users/sciruela/Documents/twitterHashtagsAnalysis/graph3.pdf")
ggplot(subset(tw.df,subset=(!is.na(rtof))))+geom_point(aes(x=created,y=rtof))
dev.off()
require(gdata)
tw.df.rt=drop.levels(subset(tw.df,subset=(!is.na(rtof))))
tw.df.rta=arrange(ddply(tw.df.rt, .var = "screenName", .fun = function(x) {return(subset(x, created %in% min(created),select=c(screenName,created)))}),-desc(created))
tw.df.rt$screenName=factor(tw.df.rt$screenName, levels = tw.df.rta$screenName)
pdf("/Users/sciruela/Documents/twitterHashtagsAnalysis/graph4.pdf")
ggplot(subset(tw.df.rt,subset=(!is.na(rtof))))+geom_point(aes(x=screenName,y=rtof))+opts(axis.text.x=theme_text(angle=-90,size=6)) + xlab(NULL)
dev.off()
|
234137938d6ae8eb8a0745a2cf877ddb3c2d70ab
|
eff628edd09eab16685431d8f9f95a57aff7952b
|
/cachematrix.R
|
eb417a02fa6862fe2cabe3c35930f24bafe69c50
|
[] |
no_license
|
nycmetro/ProgrammingAssignment2
|
f8198f88d7f889139d7aa03cec178d9a71e48ebe
|
5d05d1ea80632bf7638833fc77141c54b5de4c87
|
refs/heads/master
| 2021-01-19T07:07:37.214947
| 2014-12-21T04:29:18
| 2014-12-21T04:29:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,393
|
r
|
cachematrix.R
|
## The functions in this module goal is to aid the step of carrying out an inversion
## of a matrix repeatly which can be an expensive operation for a large matrix by caching
## the creation step of the object - so expensive computation is carried out only once.
## Subsequent calls to get matrix inverse are fast.
## The first function, makeCacheMatrix creates a special "vector", which is a list containing a function to
## set the value of the matrix
## get the value of the matrix
## set the value of the inverse of matrix by solve
## get the value of the inverse of matrix by solve
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve<- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed), then cacheSolve
## should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
#message("start")
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
|
4dfad5b0b8fe73a86d1795e1df889356ee310d77
|
c4a77d19da5d46766311c3b9eb68131bc696daf9
|
/man/Resistance.Opt_single.scale.Rd
|
0c33a3b811c96be3d4be4808d6159d16463b8f38
|
[] |
no_license
|
rmarrotte/ResistanceGA
|
ac57bff6c3e2bd05006e923f4af93eec9e1e18c1
|
6934cf25cf025baec0dccc3bf67311ca170c9808
|
refs/heads/master
| 2020-03-27T18:24:03.423010
| 2018-09-14T17:30:07
| 2018-09-14T17:30:07
| 146,920,028
| 1
| 0
| null | 2018-08-31T16:57:59
| 2018-08-31T16:57:59
| null |
UTF-8
|
R
| false
| true
| 1,884
|
rd
|
Resistance.Opt_single.scale.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OptimFunction_Single_scale.R
\name{Resistance.Opt_single.scale}
\alias{Resistance.Opt_single.scale}
\title{Optimize resistance surfaces individually with kernel smoothing}
\usage{
Resistance.Opt_single.scale(PARM, Resistance, CS.inputs = NULL,
gdist.inputs = NULL, GA.inputs, Min.Max = "max", iter = NULL,
quiet = TRUE)
}
\arguments{
\item{PARM}{Parameters to transform continuous surface or resistance values of binary categorical surface. A vector with parameters specified in the order of resistance surfaces.These values are selected during optimization if called within GA function.}
\item{Resistance}{Resistance surface to be optimized. This should be an R raster object. If not specified, the function will attempt to find the a resistance surface from \code{GA.inputs}}
\item{CS.inputs}{Object created from running \code{\link[ResistanceGA]{CS.prep}} function. Defined if optimizing using CIRCUITSCAPE}
\item{gdist.inputs}{Object created from running \code{\link[ResistanceGA]{gdist.prep}} function. Defined if optimizing using gdistance}
\item{GA.inputs}{Object created from running \code{\link[ResistanceGA]{GA.prep}} function}
\item{Min.Max}{Define whether the optimization function should minimized ('min') or maximized ('max'). Default in 'max'}
\item{iter}{A counter for the number of surfaces that will be optimized}
\item{quiet}{Logical, if FALSE objective function values and iteration duration will be printed to the screen at the completion of each iteration. (Default = TRUE)}
}
\value{
Objective function value (either AIC, R2, or LL) from mixed effect model
}
\description{
Optimize all resistance surfaces that are located in the same directory individually. This optimization function is designed to be called from GA
}
\author{
Bill Peterman <Bill.Peterman@gmail.com>
}
|
6942bf3e3b568d194747f1ee7491478dc58293c6
|
041d872d0f55b4a2d26a7a09ad6f7187d51fd4f7
|
/man/getLloydTaylor.Rd
|
40715fa9fa8c0e929c012a74e196d037a1a2e385
|
[] |
no_license
|
mjwang1010/RSCAPE
|
9d1715e696729ce0f5e0fb7d9ea315c58e01b7fe
|
f81ace0d953a4f791430f493a12355e63c0d2aa7
|
refs/heads/master
| 2020-12-02T21:00:42.628528
| 2014-06-10T14:43:42
| 2014-06-10T14:43:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,080
|
rd
|
getLloydTaylor.Rd
|
\name{getLloydTaylor}
\alias{getLloydTaylor}
\title{Estimate activation energy \eqn{E_a}{Ea} and time varying \eqn{R_b}{Rb} from temperature and efflux time series including uncertainty.}
\description{Function to determine the temperature sensitivity (\eqn{E_a}{Ea} value) and time varying
basal efflux (\eqn{R_b(i)}{Rb(i)}) from a given temperature and efflux (usually respiration) time series
according the principle of "SCAle dependent Parameter Estimation, SCAPE" (Mahecha et al. 2010). }
\usage{getLloydTaylor(temperature, respiration, sf, Tref = 15, T0 = -46.02,
fborder = 30, M = -1, nss = 0, method = "Fourier", weights = NULL,
lag = NULL, gapFilling = TRUE, doPlot = FALSE)}
\arguments{
\item{temperature}{numeric vector: temperature time series}
\item{respiration}{numeric vector: respiration time series}
\item{sf}{numeric: sampling rate, number of measurements (per day)}
\item{Tref}{numeric: Reference temperature (in deg C)}
\item{T0}{numeric: Minimum temperature (in deg C) at which respiration becomes 0. Make sure that all temperature value are greater than this value}
\item{fborder}{numeric: boundary for dividing high- and low-frequency parts (in days)}
\item{M}{numeric vector: size of SSA window (in days)}
\item{nss}{numeric vector: number of surrogate samples}
\item{method}{String: method to be applied for signal decomposition (choose from "Fourier","SSA","MA","EMD","Spline")}
\item{weights}{numeric vector: optional vector of weights to be used for linear regression, points can be set to 0 for bad data points}
\item{lag}{numeric vector: optional vector of time lags between respiration and temprature signal}
\item{gapFilling}{Logical: Choose whether Gap-Filling should be applied}
\item{doPlot}{Logical: Choose whether Surrogates should be plotted}
}
\details{Function to determine the activation energy (\eqn{Ea}{Ea} value) and time varying saturation efflux (\eqn{R_b}{Rb}) from a given temperature and efflux (usually respiration) time series.
The following model was proposed by .....:
\eqn{Resp(i) = R_b e^{\frac{1}{Tref-T0} - \frac{1}{T(i)-T0}}}{Resp(i) = Rb * exp(1/(Tref-T0) - 1/(T(i)-T0))},
where \eqn{i}{i} is the time index. It has been shown, however, that this model is misleading when \eqn{R_b}{Rb} is varying over time which can be expected in many real world examples (e.g. Sampson et al. 2008).
If \eqn{R_b}{Rb} varies slowly, i.e. with some low frequency then the "scale dependent parameter estimation, SCAPE"
allows us to identify this oscillatory pattern. As a consequence, the estimation of \eqn{E_a}{Ea} can be substantially stabilized (Mahecha et al. 2010). The model becomes
\eqn{Resp(i) = R_b(i) e^{\frac{1}{Tref-T0} - \frac{1}{T(i)-T0}}}{Resp(i) = Rb(i) * exp(1/(Tref-T0) - 1/(T(i)-T0))},
where \eqn{R_b(i)}{Rb(i)} is the time varying "basal respiration", i.e. the respiration expected at \eqn{Tref}{Tref}. The convenience function gettau allows to extract the \eqn{E_a}{Ea} value minimizing the confounding factor of the time varying \eqn{R_b}{Rb}. Four different spectral methods can be used and compared. A surrogate technique (function by curtsey of Dr. Henning Rust, written in the context of Venema et al. 2006) is applied to propagate the uncertainty due to the decomposition.
The user is strongly encouraged to use the function with caution, i.e. see critique by Graf et al. (2011).}
\value{A list with elements
$SCAPE_Ea : the estimated \eqn{E_a}{Ea} with the SCAPE principle and the method chosen.
$Conv_Ea : the conventional \eqn{E_a}{Ea} (assuming constant Rb)
$DAT$SCAPE_R_pred : the SCAPE prediction of respiration
$DAT$SCAPE_Rb : the basal respiration based on the the SCAPE principle
$DAT$Conv_R_pred : the conventional prediction of respiration
$DAT$Conv_Rb : the conventional (constant) basal respiration}
\author{Fabian Gans, Miguel D. Mahecha, MPI BGC Jena, Germany, fgans@bgc-jena.mpg.de mmahecha@bgc-jena.mpg.de
Fabian Gans, Miguel Mahecha, Max-Planck-Institute for Biogeochemistry, Jena}
|
626a56dd3a18ccd1292dfcc0395c7a08c0b9aa88
|
cb306ee1d47e69addb589fd3d9f8e2093e455c70
|
/aux_fun_chains_START_fill.R
|
003c855aea17ccc44612fa23e694cf184b8d04df
|
[] |
no_license
|
IvanaGaneva/SocialDistancingDataManipulation
|
705026703948a0ba65290d0becf573c29b2b3407
|
ee4b3e35cb9ba49754135eed3b0e765b04baa12f
|
refs/heads/main
| 2023-05-15T22:31:21.485951
| 2021-06-16T20:56:39
| 2021-06-16T20:56:39
| 344,110,541
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,544
|
r
|
aux_fun_chains_START_fill.R
|
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Ganeva, I.K.
# Covid 19 State Policy Data Cleaning
# Feb-Apr, 2021
# __________________________________________________________________________
# Nancy Fullman, Bree Bang-Jensen, Grace Reinke, Beatrice Magistro,
# Rachel Castellano, Megan Erickson, Rebecca Walcott, Carolyn Dapper,
# Kenya Amano, John Wilkerson, and Christopher Adolph.
# "State-level social distancing policies in response to COVID-19 in the US".
# Version 1.117, February 17, 2021. http://www.covid19statepolicy.org
# __________________________________________________________________________
# FUNCTION THAT GIVES THE df_to_fill VALUES FOR EACH NEW POLICY INTRODUCED
# (AUXILIARY CODE FOR BREVITY)
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
aux_fun_chains_START_fill <- function(starting_policy,
p_type = policy_type,
p_usual_vale = usual_value,
df_changes = policy_state_changes,
df_simplified = policy_state_simplified,
df_fill = df_to_fill,
not_vec = c(0, NA)){
# used for testing within the FILL_function:
# -----------------
# df_changes <- policy_state_changes
# df_simplified <- policy_state_simplified
# starting_policy <- policy_state_simplified$PID[1]
# p_type <- policy_type
# p_usual_value <- usual_value
# df_fill <- df_to_fill
row_s_df <- which(df_simplified$PID == starting_policy)
# --------------------------------- STEP I ---------------------------------------
# Starting from the first observation in the policy_state_simplified df:
# filling the time/vaccine/lim-s/mandate/location dimensions
# [And doing the same thing for each beginning of a new policy chain.]
st_policy_dates <- seq(df_simplified$begins[row_s_df],
df_simplified$finishes[row_s_df],
by = 'days')
last_day_st_policy <- df_simplified$finishes[row_s_df]
first_day_st_policy <- st_policy_dates[1]
if(row_s_df == 1){
df_fill$perc_usual_time[df_fill$Date < first_day_st_policy] <- 'All the time'
df_fill$only_non_vaccinated_ppl[df_fill$Date < first_day_st_policy] <- 0
df_fill[df_fill$Date < first_day_st_policy, 8:11] <- 'No limit'
df_fill$mandate[df_fill$Date < first_day_st_policy] <- 1
# Filling for the dates before the first policy was introduced.
}
# Finding which locations to fill for:
if(df_simplified$SWGeo[row_s_df] == 1){
counties_to_fill_vec <- unique(as.character(df_fill$County))
# i.e. if it is state-wide geographically, fill for all counties in the state
# for this policy instance
} else{
counties_to_fill_vec <- unlist(str_split(df_simplified$AppliesTo[row_s_df],
', '))
}
# Putting the dates-locations combinations to be filled in a single vector:
which_loc_date_vec <- which((df_fill$County %in% counties_to_fill_vec) &
(df_fill$Date %in% st_policy_dates))
# ===============================
# FILLING FOR THE TIME DIMENSION:
if(df_simplified$Curfew[row_s_df] %in% not_vec){
df_fill$perc_usual_time[which_loc_date_vec] <- 'All the time'
} else{
df_fill$perc_usual_time[which_loc_date_vec] <- paste0('From ',
df_simplified$CurfewStart[row_s_df],
' to ',
df_simplified$CurfewEnd[row_s_df])
}
# =========================================
# FILLING FOR THE VACCINATED PPL DIMENSION:
if(df_simplified$VaccineExempt[row_s_df] %in% not_vec){
df_fill$only_non_vaccinated_ppl[which_loc_date_vec] <- 0
} else{
df_fill$only_non_vaccinated_ppl[which_loc_date_vec] <- 1
}
# ==================================
# FILLING FOR THE MANDATE DIMENSION:
if(df_simplified$Mandate[row_s_df] %in% not_vec){
df_fill$mandate[which_loc_date_vec] <- 0
} else{
df_fill$mandate[which_loc_date_vec] <- 1
}
# =====================================
# FILLING FOR THE GATHERINGS DIMENSION:
# -> FOR INSIDE, GENERAL GATHERINGS
if(is.na(df_simplified$InGathLim[row_s_df])){
df_fill$lim_in_general[which_loc_date_vec] <- 'No limit'
} else{
df_fill$lim_in_general[which_loc_date_vec] <- df_simplified$InGathLim[row_s_df]
}
# -> FOR OUTSIDE, GENERAL GATHERINGS
if(is.na(df_simplified$OutGathLim[row_s_df])){
df_fill$lim_out_general[which_loc_date_vec] <- 'No limit'
} else{
df_fill$lim_out_general[which_loc_date_vec] <- df_simplified$OutGathLim[row_s_df]
}
# -> FOR INSIDE, RELIGIOUS GATHERINGS
if(is.na(df_simplified$InGathLimReligious[row_s_df])){
df_fill$lim_in_rel[which_loc_date_vec] <- 'No limit'
} else{
df_fill$lim_in_rel[which_loc_date_vec] <- df_simplified$InGathLimReligious[row_s_df]
}
# -> FOR OUTSIDE, RELIGIOUS GATHERINGS
if(is.na(df_simplified$OutGathLimReligious[row_s_df])){
df_fill$lim_out_rel[which_loc_date_vec] <- 'No limit'
} else{
df_fill$lim_out_rel[which_loc_date_vec] <- df_simplified$OutGathLimReligious[row_s_df]
}
# --------------------------------------------------------------------------------
# --------------------------------- STEP II --------------------------------------
# Continuing to work with the first observation in the policies df:
# depending on the type of policy (5 broad categories of variables)
# [And doing the same thing for each beginning of a new policy chain.]
if(p_type == 'cat_sch'){
# =========== THIS IS FOR THE SchoolClose VARIABLE
df_fill$policy_measure_var_main[which_loc_date_vec] <- df_simplified$SchoolRestrictLevel[row_s_df]
if(str_detect(df_simplified$PolicyCodingNotes[row_s_df], 'private|Private')){
df_fill$policy_measure_var_sec[which_loc_date_vec] <- df_simplified$SchoolRestrictLevel[row_s_df]
}
} else{
if(p_type == 'bin'){
# ========= THIS IS FOR THE EmergDec, CaseIsolation, StayAtHome, BusinessMask, SchoolMask,
# Quarantine, & the three TravelRestrict VARIABLES
df_fill$policy_measure_var_main[which_loc_date_vec] <- 1
# since this is for the first policy in each policy chain on this variable
# (and so, it cannot start with an easing of the 'normal' values)
} else{
if(p_type == 'cat_bus'){
# ======= THIS IS FOR THE BarRestrict, RestaurantRestrict, OtherBusinessClose and the
# NEBusinessClose VARIABLES
df_fill$policy_measure_var_main[which_loc_date_vec] <- df_simplified$BusinessRestrictLevel[row_s_df]
} else{
if(p_type == 'numb'){
# ======= THIS IS FOR THE GathRestrict VARIABLE
df_fill$policy_measure_var_main[which_loc_date_vec] <- 'GathRestrict: see limit var-s'
} else{
if(p_type == 'cat_mand'){
# ===== THIS IS FOR THE PublicMask VARIABLE
df_fill$policy_measure_var_main[which_loc_date_vec] <- df_simplified$PublicMaskLevel[row_s_df]
}
}
}
}
}
# --------------------------------------------------------------------------------
return(df_fill)
# --------------------------- END OF AUXILIARY FUNCTION --------------------------
}
|
4f97cfdcf13bbfa99451d7b70c3bdad7888d4c24
|
0cff15b1ec8bf64acb0f6da3d660d0cc5edaed6f
|
/PHD-Pilot_jun_19.R
|
8ec66d49faf88f26fd30a7495339c35bcd4b8b85
|
[] |
no_license
|
Rutmer/PHD-Pilot_jun_19
|
4ab2c4bf2419c41060b4e986f1cda650fa596b10
|
328290e7de1d74aa939cef9cef8323e93a2d0714
|
refs/heads/master
| 2020-06-23T15:13:10.087637
| 2019-08-22T12:35:05
| 2019-08-22T12:35:05
| 198,660,352
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,154
|
r
|
PHD-Pilot_jun_19.R
|
install.packages("lavaan", dependencies = TRUE)
install.packages("svMisc")
library(foreign); library(lavaan); library(knitr); library(svMisc)
pilot <- read.spss("Pilot - Juli 2019.sav", to.data.frame = TRUE)
# Checking assumptions (outliers and multivariate normality) was done in spss
pilot [,c(11:89)] <- lapply(pilot[,c(11:88)], ordered)
srl_model <- '
self_efficacy =~ q1 + q2 + q3 + q4 + q5
anger =~ q9 + q13 + q17
anxious =~ q10 + q14 + q18
boredom =~ q11 + q15 + q19
hopelessness =~ q12 + q16 + q20
external =~ q21 + q26 + q31
introjected =~ q22 + q27 + q32 + q23
identified =~ q28 + q33 + q24 + q29
intrinsic =~ q34 + q25 + q30
motivation =~ q35 + q36 + q37 + q38
cogn.pl =~ q39 + q43
emo.pl =~ q40 + q44 + q41 + q45
res.pl =~ q42
cogn.mo =~ q46 + q49 + q52
emo.mo =~ q47 + q50 + q53 + q48
res.mo =~ q51
cogn.co =~ q54 + q58 + q62
emo.co =~ q55 + q59 + q63 + q66 + q69
mot.co =~ q56 + q60 + q64 + q67 + q68 + q70
res.co =~ q57 + q61 + q65 + q71
cogn.re =~ q72 + q76
emo.re =~ q73 + q77 + q74 + q78
res.re =~ q75
'
# Second model with planning, monitoring and reflecting aggregated to single variables.
srl_model2 <- '
self_efficacy =~ q1 + q2 + q3 + q4 + q5
anger =~ q9 + q13 + q17
anxious =~ q10 + q14 + q18
boredom =~ q11 + q15 + q19
hopelessness =~ q12 + q16 + q20
external =~ q21 + q26 + q31
introjected =~ q22 + q27 + q32 + q23
identified =~ q28 + q33 + q24 + q29
intrinsic =~ q34 + q25 + q30
motivation =~ q35 + q36 + q37 + q38
planning =~ q39 + q43 + q40 + q44 + q41 + q45 + q42
monitoring =~ q46 + q49 + q52 + q47 + q50 + q53 + q48 + q51
cogn.co =~ q54 + q58 + q62
emo.co =~ q55 + q59 + q63 + q66 + q69
mot.co =~ q56 + q60 + q64 + q67 + q68 + q70
res.co =~ q57 + q61 + q65 + q71
reflecting =~ q72 + q76 + q73 + q77 + q74 + q78 + q75
'
# Third model based on EFA (try12: 16 factoren).
srl_model3 <- '
self_efficacy =~ q1 + q4 + q5
anger =~ q9 + q13 + q17
anxious =~ q10 + q14
boredom =~ q11 + q15 + q19
hopelessness =~ q12 + q16 + q20
external1 =~ q21 + q32 + q23
exeternal2 =~ q26 + q22 + q27
identified =~ q28 + q33 + q29
intrinsic =~ q34 + q25 + q30
motivation =~ q35 + q37 + q38
planning =~ q39 + q43 + q40 + q44 + q41 + q45
monitoring =~ q46 + q49 + q52 + q47 + q50 + q53
cogn.co =~ q54 + q58 + q62
rest1.co =~ q55 + q56 + q60
rest2.co =~ q59 + q63+ q66 + q64 + q67
reflecting =~ q72 + q76 + q73 + q77 + q74 + q78
'
# fourth model based on EFA (try12: 16 factoren) + dropped heywood case q23.
srl_model4 <- '
self_efficacy =~ q1 + q4 + q5
anger =~ q9 + q13 + q17
anxious =~ q10 + q14
boredom =~ q11 + q15 + q19
hopelessness =~ q12 + q16 + q20
external1 =~ q21 + q32
exeternal2 =~ q26 + q22 + q27
identified =~ q28 + q33 + q29
intrinsic =~ q34 + q25 + q30
motivation =~ q35 + q37 + q38
planning =~ q39 + q43 + q40 + q44 + q41 + q45
monitoring =~ q46 + q49 + q52 + q47 + q50 + q53
cogn.co =~ q54 + q58 + q62
rest1.co =~ q55 + q56 + q60
rest2.co =~ q59 + q63+ q66 + q64 + q67
reflecting =~ q72 + q76 + q73 + q77 + q74 + q78
'
fit <- cfa(srl_model4, data = pilot, ordered = TRUE, missing = "pairwise")
summary(fit, fit.measures = TRUE)
lavInspect(fit, "cov.lv")
lavInspect(fit, "vcov")
|
bb2b81a4b40f8120f7fb1c989846e80fffc6795e
|
faf09020a6bb22aee4beb72cb7d684df27da20d7
|
/inst/shinyApps/iBoard/output/outputTabsetpanels.R
|
68fe7311d6ae1647fe33d8a26f46a08af5d6f734
|
[] |
no_license
|
cran/idiogramFISH
|
cf1f9b735c761fb718fb64429735a15b2392299e
|
6cdf969071aa52869cb1c89796e70fc84e1fb993
|
refs/heads/master
| 2023-09-04T00:46:06.171086
| 2023-08-22T16:50:02
| 2023-08-22T18:31:05
| 211,358,009
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,642
|
r
|
outputTabsetpanels.R
|
output$tabsetpanel0UI <- renderUI({
tabsetPanel(
id = "tabsetpanel0",
tabPanel("1. Load example",
tags$head(tags$style(HTML(" wpanel .tab-content {margin-left:50px;}"))),
value = "exampleTab",
div(
class = "wpanel",
uiOutput("examplepanel")
)
)
)
})
output$tabsetpanel1UI <- renderUI({
tabsetPanel(
id = "tabsetpanel1",
tabPanel("1. Chr. data data.frame",
value = "dfChrTab",
uiOutput("dfchrpanel")
),
tabPanel("2. Marks' pos. d.f.",
value = "dfMarkTab",
uiOutput("dfmarkpanel")
),
tabPanel("3. Marks' style d.f.",
value = "dfMSTab",
uiOutput("dfMStylepanel")
),
tabPanel("4. Notes' data.frames",
value = "notesTab",
uiOutput("dfnotespanel")
)
)
})
output$tabsetpanel5UI <- renderUI({
tabsetPanel(
id = "tabsetpanel5",
tabPanel("Indices",
value = "indicesTab",
uiOutput("indicespanel")
),
tabPanel("Marks",
value = "marksTab",
uiOutput("markspanel")
)
)
})
output$tabsetpanel2UI <- renderUI({
tabsetPanel(
id = "tabsetpanel2",
tabPanel("1. Parameters & Plot",
value = "paramTab"
# ,style= "min-width:1366px;max-width:1920px;overflow:auto"
, uiOutput("parameterPanel")
),
tabPanel("2. Log",
value = "logTab",
uiOutput("logpanel")
),
tabPanel("3. code",
value = "codeTab",
uiOutput("strpanel")
)
)
})
output$tabsetpanel4UI <- renderUI({
tabsetPanel(
id = "tabsetpanel4",
tabPanel("1. Search",
value = "searchTab",
uiOutput("searchPanel")
)
)
})
|
dfb3268a688a4df4654e16ff564f8541b021af36
|
14f1f68a61710d38ea9636227329ece397304cdd
|
/r_for_ds/III Program/14_pipes.r
|
acc7b3fcd61e3ca520be35e62be321b4d04efa6f
|
[] |
no_license
|
felix-ha/r_ds
|
27f88d38fb7781668383438eb3600e3ec771775c
|
121906d61e35924435eb68da1bb6b0c510d1dfc9
|
refs/heads/master
| 2020-05-24T22:01:41.698240
| 2019-08-03T11:27:42
| 2019-08-03T11:27:42
| 187,488,967
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 654
|
r
|
14_pipes.r
|
library(magrittr)
diamonds <- ggplot2::diamonds
diamonds2 <- diamonds %>%
dplyr::mutate(price_per_carat = price / carat)
pryr::object_size(diamonds)
pryr::object_size(diamonds2)
pryr::object_size(diamonds, diamonds2)
#does only work in current env, pipe live in own enviroment
assign("x", 10)
"x" %>% assign(100)
env <- environment()
"x" %>% assign(100, envir = env)
#other operator
#tee pipe
rnorm(100) %>%
matrix(ncol = 2) %T>%
plot() %>%
str()
#"explodes" the variables in a dataframe
cor(mtcars$disp, mtcars$mpg)
mtcars %$%
cor(disp, mpg)
#assing
mtcars <- mtcars %>%
transform(cyl = cyl * 2)
mtcars %<>% transform(cyl = cyl * 2)
|
574b42680d0bae314203c4665bf8f6741ef18172
|
5bed56d19203d621ef28db176ab583f772f0f668
|
/02_Demos/02_UI/03_fonctions_output/plotOutput.R
|
306b0b2b54df165cc38fafef9ebba47f13ff7255
|
[] |
no_license
|
julomoppet/Formation_Shiny
|
5f2e5035b8208a17fd1ad6e317991a37329220be
|
94032dedc21c0f1e9c74b8dd6a9836613571839d
|
refs/heads/master
| 2022-11-17T17:09:43.623523
| 2020-06-29T20:10:30
| 2020-06-29T20:10:30
| 274,126,865
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 496
|
r
|
plotOutput.R
|
library(shiny)
library(tidyverse)
ui <- fluidRow(
column(width = 6,
sliderInput(
"slider",
label = h3("Choisir le nombre d'observations"),
min = 1,
max = 50,
value = 25
)),
column(width=8,
plotOutput("plot")))
server <- function(input, output) {
output$plot <- renderPlot({
cars2 <- cars %>% sample_n(input$slider)
plot(cars2,
pch = 3,
col = "Orange")
})
}
shinyApp(ui, server)
|
978021700c7cdc84f2bc86b17c705b32e24f012e
|
c830ddad211494ce1d407ee7b2b769e31248db79
|
/Heatmap.R
|
acc6cafe6cd4a89023b895153b281eb2f49d93b4
|
[] |
no_license
|
chetkar/Matrix-Visualization
|
ba7233f105fef97930a842872ff4729fa934db7f
|
005ba9207fcb9e7f4a6b81d5b0725809b89e9033
|
refs/heads/master
| 2021-01-09T06:01:37.458554
| 2017-02-04T23:45:37
| 2017-02-04T23:45:37
| 80,895,443
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,910
|
r
|
Heatmap.R
|
#Graph Visualization Tutorial
#http://tutorials.iq.harvard.edu/R/Rgraphics/Rgraphics.html
#Grammar of Graphics
# data
# aesthetic mapping
# geometric object
# statistical transformation
# scales
# coordinate system
# position adjustment
# faceting
#Set Work Directory
wkdir <- "C:\\Users\\Certified Copy\\Workspace\\Guha\\Laplace Approximation\\Variscan-Mixed-LXXY"
setwd(wkdir)
#External File to get Data
source("main_base.R")
#Libraries
library(ggplot2)
library(reshape2)
library(RColorBrewer)
library(corrplot)
library(gplots)
# Mixed data generated
data <- SimulateExample(n=50,p =150, prop.data.type =c(0.33,0.34,0.33,0,0))
# No of Cluster
# 64
#Indicator of Columns
ind_po <- which(data$X$data$data.type == 3)
ind_n <- which(data$X$data$data.type == 2)
ind_p <- which(data$X$data$data.type == 1)
data.poisson <- data$X$data$X[,ind_po]
data.normal <- data$X$data$X[,ind_n]
data.probit <- data$X$data$X[,ind_p]
#######################################################################
#Heatmap
#######################################################################
#Using RcolorBrewer to get color
mycolor <- colorRampPalette(brewer.pal(5,"Spectral"))(100)
#Using gplots to plot custom heatmaps
# Only Row Dendrogram are displayed, color key is displayed and both the row and columns are rotated
heatmap.2(cor(data.normal),revC=TRUE,trace='none', dendrogram ="row",key=TRUE,density.info='none', col=mycolor, main="Heatmap of Correlation Submatrix of Continuous Data")
heatmap.2(cor(data.probit,method="spearman"),revC=TRUE,trace='none', dendrogram ="row",key=TRUE,density.info='none', col=mycolor , main="Heatmap of Correlation Submatrix of Binary Data")
heatmap.2(cor(data.poisson),revC=TRUE,trace='none', dendrogram ="row",key=TRUE,density.info='none', col=mycolor , main="Heatmap of Correlation Submatrix of Count Data")
|
47232584026689dfc3c1a08ebe023dbd54b04c1d
|
cffe264a50bd5eb8f4088e6f618b4dd4bd3cbde3
|
/data_raw/data_cleaning.R
|
54381f8b8fb827da7c9480ac970867a1d09aeaef
|
[] |
no_license
|
zauster/easyMake
|
dc279968f6cb82aba9935d99d64c51418dd049bf
|
c13629fea61c826481cf08cfda39128aff7bf431
|
refs/heads/master
| 2021-01-17T21:12:15.245789
| 2019-05-21T18:56:06
| 2019-05-21T18:56:06
| 65,031,009
| 0
| 0
| null | 2016-08-05T15:54:33
| 2016-08-05T15:54:33
| null |
UTF-8
|
R
| false
| false
| 250
|
r
|
data_cleaning.R
|
io <- read.csv("data_raw/import_export_function.csv", stringsAsFactors = FALSE)
input <- as.character(io[ io$I.O == "input", "Name"])
output <- as.character(io[ io$I.O == "output", "Name"])
save( list = c("input", "output"), file = "R/sysdata.rda")
|
8cf150ebb6592fe384a6b17e29a896b7ba0ac762
|
e7211133b68aa312c8031558d346f81da7c0f691
|
/R/read1d_raw.R
|
c41364d654bae7544c0a9eda9f31bebe420b26e7
|
[
"MIT"
] |
permissive
|
tkimhofer/metabom8
|
542c7effc322db07bfca555ff130732d63caa088
|
37d04910d6a5e47c0e0702d122f345373949d0d4
|
refs/heads/master
| 2023-07-19T12:31:30.037418
| 2022-11-17T06:56:19
| 2022-11-17T06:56:19
| 262,197,875
| 2
| 7
|
NOASSERTION
| 2023-07-10T15:09:11
| 2020-05-08T01:32:06
|
R
|
UTF-8
|
R
| false
| false
| 9,043
|
r
|
read1d_raw.R
|
#### read fids
#' @title Read-in 1D NMR FIDs and process to spectra
#' @export
#' @param path char, path to file directory containing spectra
#' @param exp_type named list, filter for acquisition paramters of experiments to read-in (see Details)
#' @param apodisation named list, apodisation function and its parameters (see Details)
#' @param zerofil int, amount of zeros to append to FID given as exponent added of base 2 (see Details)
#' @param return char, return mode of spectra: absorption, dispersion or magnitude mode
#' @param verbose num, different verbose levels: 0 (no info), 1 (overview), 2 (detailed), 3 (step-by-step for debugging)
#' @param recursive logic, if TRUE recursively search all subfolders of path for specified NMR files
#' @param n_max int, maximum number of experiments to read-in
#' @param filter logic, remove experiments with incomplete file systems (TRUE is recommended)
#' @details
#' In the first step, read-in are FIDs generated with experimental condition(s) specified with the exp_type argument. This represents a list with each element representing a parameter condition, named according to spectrometer parameters listed in \emph{acqus} file. For example, to read standard 1D NMR experiments use \code{exp_type=list(exp='noesygppr1d')}. More than one argument can be provided as list element.
#'
#' The apodisation argument is a named list specifying the function name in element \emph{fun} and functions-specific paramter arguments. There are the following different apodisation functions and arguments:
#' #' @return
#' \itemize{
#' \item exponential, arguments: lb (line broadening factor)
#' \item cosine, no further arguments
#' \item sine, no further arguments
#' \item sem, combined sine-bell - exponential fct: arguments: lb (line broadening factor)
#' }
#'
#' The zerofil argument specifies the amount of zeros to append to the FID and is expressed as exponent addand in the binary numeral system: \code{2^(1+x)}, with x being the zerofil parameter argument. Hence, \code{zerofil=1} doubles the amount of data points.
#'
# @return Three objects: NMR data matrix (2D: rows=spectra, cols=chem shift
# variables), ppm num vector matched to NMR data columns, meta data.frame
# containing spectrometer metadata
#' @return
#' The function exports the following three objects into the currently active R environment (no variable assignments needed):
#' \itemize{
#' \item X, num matrix: NMR data, spectra in rows
#' \item ppm, num array - chemical shift positions, length matches to columns in X
#' \item meta, data.frame - spectrometer metadata as extracted from individual \emph{acqus} files, row-matched to X
#' }
#' Objects in the R environment with the same variable names will be overwritten.
#' @examples
#' path<-system.file('extdata/', package = 'metabom8')
#' read1d_raw(path, exp_type=list(exp='PROF_PLASMA_NOESY'), apodisation=list(fun='exponential', lb=0.2), n_max=3)
#' @author \email{torben.kimhofer@@murdoch.edu.au}
# @seealso \code{\reference{read1d}}
#' @importFrom stats approxfun
#' @family NMR
#' @seealso \code{\link[=read1d]{Import TopSpin processed spectra}}
#' @section
read1d_raw <- function(path, exp_type = list(exp = c("PROF_PLASMA_CPMG128_3mm"),
pulprog = c("noesygppr1d")), apodisation = list(fun = "exponential", lb = 0.2),
zerofil = 1L, return = "absorption", verbose = 1, recursive = TRUE, n_max = 1000,
filter = TRUE) {
path <- path.expand(path)
if (!return %in% c("absorption", "dispersion", "magnitude")) {
return <- "absorption"
message("Check argument type. Returning absorption spectrum.")
}
if (verbose > 1) {
message("Searching for spectral data...")
}
# check file system intact
f_list <- .check1d_files_fid(path, n_max, filter, recursive, verbose)
if (verbose > 1) {
message("Found", paste(length(f_list[[1]]), "experiments files in path."))
}
if (verbose > 1) {
message("Extracting spectrometer meta-data.")
}
pars <- .extract_acq_pars1d(f_list)
if (verbose > 1) {
message("Filtering for experiments using user-defined parameters (ext_type argument)")
}
exp_filt <- .filterExp_files(pars, exp_type, f_list)
f_list <- exp_filt[[1]]
pars <- exp_filt[[2]]
if (verbose > 0) {
message("Reading ", length(f_list[[1]]), " experiments.")
}
# chem shift
if (verbose >= 2) {
message("Defining chemical shift axis.")
}
ppm_ref <- .defineChemShiftPpm(pars$a_SFO1[1], pars$a_SW_h[1], pars$a_TD[1],
dref = 4.79, ref = TRUE)[, 1] # 4.79: distance water to TSP
ppm_ref <- ppm_ref - ppm_ref[which.min(abs(ppm_ref - 0))]
if (length(unique(pars$a_TD)) > 2 || length(unique(pars$a_GRPDLY)) > 1) {
stop("Number of points collected in time domain is unqual across experiments.")
}
if (verbose >= 2) {
message("Defining apidisation function.")
}
apoFct <- .fidApodisationFct(n = (pars$a_TD[1] - (floor(pars$a_GRPDLY[1]) * 2)),
apodisation) # subtract group delay from TD (digital filtering artefact)
if (verbose > 1) {
message("Reading FIDs and transform to spectra.")
}
# read in binary file and
out <- vapply(seq(f_list[[1]]), function(s, pref = ppm_ref, afun = apoFct, zf = zerofil) {
if (verbose > 1) {
message(f_list[[1]][s])
}
byteorda <- c(little = 0, big = 1)
dtypa <- c(int = 0, double = 2)
if (verbose == 3) {
message("Read FID")
}
fid <- readBin(paste0(f_list[[1]][s], .Platform$file.sep, "fid"), what = names(dtypa)[match(pars$a_DTYPA[s],
dtypa)], n = pars$a_TD[s], size = 4L, endian = names(byteorda)[match(pars$a_BYTORDA[s],
byteorda)])
fid <- (fid * (2^(-1*pars$a_NC[s])))
# remove group delay points
if (verbose == 3) {
message("Adjust for group delay (=dig filter)")
}
if (pars$a_DSPFVS[s] < 20) {
stop("Implement group delay digital filter correction ofr DSP firmware <20 (DSPFVS & DECIM")
}
fid_corF <- fid[-(seq(floor(pars$a_GRPDLY[s]) * 2))]
# apppodisatoin
if (verbose == 3) {
message("Apodisation fct")
}
if (length(afun) != length(fid_corF)) {
#browser()
message("Apd fct mismatch in length w spec")
}
spec_lb <- fid_corF * afun
if (!is.integer(zf)) {
stop("Zerofil argument nees to be an integer as it is summand of exponent in log2 space (ie., zerofil=1 doubles , zerofil=2 quadruples the number of data points.")
}
# zerofill, fft
if (verbose == 3) {
message("Zero-fill, fft")
}
spec_zf <- .zerofil(fid = spec_lb, zf = zf, le_ori = length(fid))
sp <- .cplxFft(spec_zf)[, 1]
sp_re <- Re(sp)
sp_im <- Im(sp)
sp_mag <- sp_re + sp_im
# define ppm
if (verbose == 3) {
message("Establish indiv. ppm scale")
}
ppm <- .defineChemShiftPpm(pars$a_SFO1[s], pars$a_SW_h[s], length(sp_re),
dref = 4.79, ref = FALSE) # 4.79: distance water to TSP
# phasing
if (verbose == 3) {
message("\tPhasing")
}
if (abs(min(sp_re[0:(length(sp_re)/3)])) > max(sp_re[0:(length(sp_re)/3)])) {
sp_re <- sp_re * (-1)
}
sp_re <- .phaseTsp(sp_re, sp_im, ppm, seq(0, pi, by = 0.01), 0, idx_tsp = get.idx(c(-0.15,
0.15), ppm) - 1)[, 1]
if (abs(min(sp_re[0:(length(sp_re)/3)])) > max(sp_re[0:(length(sp_re)/3)])) {
sp_re <- sp_re * (-1)
}
# calibration
if (verbose == 3) {
message("Calibrate w TSP")
}
ppm <- .calibTsp(sp_re, ppm)
switch(return, absorption = {
sp_out <- sp_re
}, dispersion = {
sp_out <- sp_im
}, magnitude = {
sp_out <- sp_mag
})
if (verbose == 3) {
message("Approx. spec to common ppm scale")
}
fspec <- approxfun(ppm, sp_out)
spec_out <- fspec(pref)
if (verbose == 3) {
message("###")
}
return(spec_out)
}, FUN.VALUE = ppm_ref)
out <- t(out)
colnames(out) <- ppm_ref
if (verbose == 3) {
message("Prep rownames for X and ppm")
}
fnam <- strsplit(f_list[[1]], .Platform$file.sep)
idx_keep <- which((apply(do.call(rbind, fnam), 2, function(x) length(unique(x)))) >
1)
fnam <- vapply(fnam, function(x, st = idx_keep) {
paste(x[idx_keep], collapse = .Platform$file.sep)
}, FUN.VALUE = "")
rownames(out) <- fnam
rownames(pars) <- fnam
if (verbose > 0) {
message("Adding objects X, ppm and meta to the global workspace.")
}
assign("X", out, envir = .GlobalEnv)
assign("ppm", ppm_ref, envir = .GlobalEnv)
assign("meta", pars, envir = .GlobalEnv)
}
|
2feb0a35dcab0debb647b486e59ee9e34ab138d3
|
2f212439e7d9a15b814c1b61ebc8626945cf59a8
|
/man/EMcenso.Rd
|
04c400cf0eade5f9d1a9c79e4850cf60c823be21
|
[] |
no_license
|
freephys/mcsm
|
7d0b8e13d4faacdd52b5e91d95b5bc500c1d8c10
|
b15b367fdd409985518bd640119463a1ab3a2897
|
refs/heads/master
| 2020-12-30T20:05:52.929932
| 2009-02-26T00:00:00
| 2009-02-26T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 459
|
rd
|
EMcenso.Rd
|
\name{EMcenso}
\alias{EMcenso}
\title{EM paths for a censored normal model}
\description{
This function produces a series of EM paths for a censored normal model.
}
\usage{
EMcenso(repp = 10)
}
\arguments{
\item{repp}{Number of paths}
}
\value{
The outcome of this function is a plot.
}
\references{From Chapter 5 of \strong{EnteR Monte Carlo Statistical Methods}}
\author{Christian P. Robert and George Casella}
\examples{
EMcenso(45)
}
\keyword{optimize}
|
5e14828218509d1dae8793ed9efb74b678f4e788
|
07a74984bf59ce4486e1bcaefafb8ce692b50d5a
|
/man/add_text.Rd
|
494c68df4d24fd5a96d3a53d95d680809c572095
|
[] |
no_license
|
SymbolixAU/mapdeck
|
c3bc3a61b8d8ade69b9b67fa69a00f9294281630
|
6138c6845e37ab3479e4ff65d9b0fff29e20f070
|
refs/heads/master
| 2023-09-03T22:34:43.418728
| 2023-08-24T22:14:59
| 2023-08-24T22:14:59
| 141,350,341
| 344
| 50
| null | 2023-08-09T22:22:59
| 2018-07-17T22:06:34
|
HTML
|
UTF-8
|
R
| false
| true
| 6,553
|
rd
|
add_text.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/map_layer_text.R
\name{add_text}
\alias{add_text}
\title{Add Text}
\usage{
add_text(
map,
data = get_map_data(map),
text,
lon = NULL,
lat = NULL,
polyline = NULL,
fill_colour = NULL,
fill_opacity = NULL,
size = NULL,
angle = NULL,
anchor = NULL,
alignment_baseline = NULL,
billboard = TRUE,
font_family = "Monaco, monospace",
font_weight = "normal",
tooltip = NULL,
layer_id = NULL,
id = NULL,
auto_highlight = FALSE,
highlight_colour = "#AAFFFFFF",
palette = "viridis",
na_colour = "#808080FF",
legend = FALSE,
legend_options = NULL,
legend_format = NULL,
update_view = TRUE,
focus_layer = FALSE,
digits = 6,
transitions = NULL,
brush_radius = NULL,
collision_filter = FALSE
)
}
\arguments{
\item{map}{a mapdeck map object}
\item{data}{data to be used in the layer. All coordinates are expected to be
EPSG:4326 (WGS 84) coordinate system}
\item{text}{column of \code{data} containing the text. The data must be a character.}
\item{lon}{column containing longitude values}
\item{lat}{column containing latitude values}
\item{polyline}{optional column of \code{data} containing the polylines, if using encoded polylines}
\item{fill_colour}{column of \code{data} or hex colour for the fill colour.
If using a hex colour, use either a single value, or a column of hex colours on \code{data}}
\item{fill_opacity}{Either a string specifying the column of \code{data}
containing the opacity of each shape, or a single value in [0,255], or [0, 1),
to be applied to all the shapes. Default 255. If a hex-string is used as the
colour, this argument is ignored and you should include the alpha on the hex string}
\item{size}{column of \code{data} containing the size of the text. Default 32}
\item{angle}{column of \code{data} containging the angle of the text. Default 0}
\item{anchor}{column of \code{data} containing the anchor of the text. One of
'start', 'middle' or 'end'}
\item{alignment_baseline}{column of \code{data} containing the alignment. One of
'top', 'center' or 'bottom'}
\item{billboard}{logical indicating if the text always faces the camera (TRUE) or
if it always faces up (FALSE)}
\item{font_family}{specifies a prioritised list of one or more font family names and/or
generic family names. Follow the specifics for CSS font-family
\url{https://developer.mozilla.org/en-US/docs/Web/CSS/font-family}}
\item{font_weight}{specifies the font weight. Follow the specifics for CSS font-weight
\url{https://htmldog.com/references/css/properties/font-weight/}}
\item{tooltip}{variable of \code{data} containing text or HTML to render as a tooltip}
\item{layer_id}{single value specifying an id for the layer. Use this value to
distinguish between shape layers of the same type. Layers with the same id are likely
to conflict and not plot correctly}
\item{id}{an id value in \code{data} to identify layers when interacting in Shiny apps.}
\item{auto_highlight}{logical indicating if the shape under the mouse should auto-highlight}
\item{highlight_colour}{hex string colour to use for highlighting. Must contain the alpha component.}
\item{palette}{string or matrix. String will be one of \code{colourvalues::colour_palettes()}.
A matrix must have at least 5 rows, and 3 or 4 columns of values between [0, 255],
where the 4th column represents the alpha. You can use a named list to specify a different
palette for different colour options (where available),
e.g. list(fill_colour = "viridis", stroke_colour = "inferno")}
\item{na_colour}{hex string colour to use for NA values}
\item{legend}{either a logical indiciating if the legend(s) should be displayed, or
a named list indicating which colour attributes should be included in the legend.}
\item{legend_options}{A list of options for controlling the legend.}
\item{legend_format}{A list containing functions to apply to legend values. See section legend}
\item{update_view}{logical indicating if the map should update the bounds to include this layer}
\item{focus_layer}{logical indicating if the map should update the bounds to only include this layer}
\item{digits}{number of digits for rounding coordinates}
\item{transitions}{list specifying the duration of transitions.}
\item{brush_radius}{radius of the brush in metres. Default NULL. If supplied,
the arcs will only show if the origin or destination are within the radius of the mouse.
If NULL, all arcs are displayed}
}
\description{
The Text Layer renders text labels on the map
}
\details{
\code{add_text} supports POINT and MULTIPOINT sf objects
}
\section{transitions}{
The transitions argument lets you specify the time it will take for the shapes to transition
from one state to the next. Only works in an interactive environment (Shiny)
and on WebGL-2 supported browsers and hardware.
The time is in milliseconds
Available transitions for text
list(
position = 0,
fill_colour = 0,
angle = 0,
size = 0
)
}
\section{legend}{
The \code{legend_options} can be used to control the appearance of the legend.
This should be a named list, where the names are one of
\itemize{
\item{css - a string of valid \code{css} for controlling the appearance of the legend}
\item{title - a string to use for the title of the legend}
\item{digits - number to round the legend values to}
}
If the layer allows different fill and stroke colours, you can use different options for each. See examples in \link{add_arc}.
The \code{legend_format} can be used to control the format of the values in the legend.
This should be a named list, where the names are one of
\itemize{
\item{fill_colour}
\item{stroke_colour}
}
depending on which type of colouring the layer supports.
The list elements must be functions to apply to the values in the legend.
}
\section{id}{
The \code{id} is returned to your R session from an interactive shiny environment
by observing layer clicks. This is useful for returning the data.frame row relating to the
cliked shape.
From within a shiny server you would typically use \code{ observeEvent({input$map_arc_click})},
where 'map' is the map_id supplied to \code{mapdeckOutput()}, and 'arc' is the layer
you are clicking on
}
\examples{
\donttest{
## You need a valid access token from Mapbox
key <- 'abc'
set_token( key )
mapdeck(
style = mapdeck_style('dark')
) \%>\%
add_text(
data = capitals
, lon = 'lon'
, lat = 'lat'
, fill_colour = 'country'
, text = 'capital'
, layer_id = 'text'
)
}
}
|
3a37c871ab8e76bb00558a7c187c9c487df6a7c6
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/qtl/examples/effectscan.Rd.R
|
f140156e38fbd0aa4402473d9e7cf4f316f922cc
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 467
|
r
|
effectscan.Rd.R
|
library(qtl)
### Name: effectscan
### Title: Plot estimated QTL effects across the whole genome
### Aliases: effectscan
### Keywords: hplot
### ** Examples
data(fake.f2)
## Don't show:
fake.f2 <- subset(fake.f2, chr=c(1,13,"X"))
## End(Don't show)
fake.f2 <- sim.geno(fake.f2, step=2.5, n.draws=16)
# allelic effect on whole genome
effectscan(fake.f2)
# on chromosome 13, include standard errors
effectscan(fake.f2, chr="13", mtick="triangle", get.se=TRUE)
|
2692d8a051dc58853b378e332aff2618b4f526ec
|
2eba14c211813af8791ae86476e8b0e6ce43b3be
|
/man/dot-jsonV.Rd
|
5495d1bebabfceb53ac330af4a579f5dc65a9706
|
[
"MIT"
] |
permissive
|
frankkramer-lab/RCX
|
2fe89e145a8f0f7508895d84f585a24fefdd11f3
|
e17e5e4ce4b1ab545a9ed68bbf9c9a24eaf97041
|
refs/heads/master
| 2023-02-04T04:49:15.229622
| 2023-01-26T17:32:00
| 2023-01-26T17:32:00
| 99,221,216
| 8
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 788
|
rd
|
dot-jsonV.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Utils-json.R
\name{.jsonV}
\alias{.jsonV}
\title{Return data as a vector from a JSON list}
\usage{
.jsonV(data, acc, default = NA, returnAllDefault = TRUE)
}
\arguments{
\item{data}{json list}
\item{acc}{accession name}
\item{default}{default return value}
\item{returnAllDefault}{whether to return the vector if all values are the default value (or \code{NULL} instead)}
}
\value{
vector
}
\description{
Return data as a vector from a JSON list
}
\note{
Internal function only for convenience
}
\examples{
testData = list(list(n="CDKN1B"),
list(n="ROCK1", r="BLA"),
list(n="SHC1", r="BLUBB"),
list(n="IRS1"))
RCX:::.jsonV(testData, "r")
}
\keyword{internal}
|
34108f72509c6dd3d11ce389e0fbde97895e4d8b
|
9231c91ab94d7f745360bcc41db8b855dd1ab384
|
/R/writeHDF5Array.R
|
8939bfebd7ff63eccef07bcabbba5a4543c0eb26
|
[] |
no_license
|
Bioconductor/HDF5Array
|
69199ded2b709fb452e9cbf960a1174de6545edc
|
41fe4b17c7822a1d29f0bf03d89c79aabce94bcc
|
refs/heads/devel
| 2023-09-03T07:01:23.767189
| 2023-05-04T20:27:16
| 2023-05-04T20:27:16
| 101,208,679
| 10
| 15
| null | 2023-08-16T09:58:22
| 2017-08-23T17:43:11
|
C
|
UTF-8
|
R
| false
| false
| 8,748
|
r
|
writeHDF5Array.R
|
### =========================================================================
### writeHDF5Array()
### -------------------------------------------------------------------------
###
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### HDF5RealizationSink objects
###
### The HDF5RealizationSink class is a concrete RealizationSink subclass that
### implements an HDF5Array realization sink.
###
setClass("HDF5RealizationSink",
contains="RealizationSink",
representation(
## Slots that support the RealizationSink constructor contract.
dim="integer", # Naming this slot "dim" makes dim() work
# out of the box.
dimnames="list",
type="character", # Single string.
as_sparse="logical", # TRUE or FALSE.
## Other slots.
filepath="character", # Single string.
name="character", # Dataset name.
chunkdim="integer_OR_NULL" # An integer vector parallel to the 'dim'
# slot or NULL.
)
)
setMethod("dimnames", "HDF5RealizationSink",
function(x)
{
ans <- x@dimnames
if (all(S4Vectors:::sapply_isNULL(ans)))
return(NULL)
ans
}
)
setMethod("type", "HDF5RealizationSink", function(x) x@type)
setMethod("chunkdim", "HDF5RealizationSink", function(x) x@chunkdim)
setMethod("is_sparse", "HDF5RealizationSink", function(x) x@as_sparse)
.normarg_chunkdim <- function(chunkdim, dim)
{
if (!(is.numeric(chunkdim) || is.logical(chunkdim) && all(is.na(chunkdim))))
stop(wmsg("'chunkdim' must be NULL or an integer vector"))
if (!is.integer(chunkdim))
chunkdim <- as.integer(chunkdim)
if (length(chunkdim) != length(dim))
stop(wmsg("'chunkdim' must be an integer vector of length ",
"the number of dimensions of the object to write"))
if (!all(chunkdim <= dim, na.rm=TRUE))
stop(wmsg("the chunk dimensions specified in 'chunkdim' exceed ",
"the dimensions of the object to write"))
if (any(chunkdim == 0L & dim != 0L, na.rm=TRUE))
stop(wmsg("'chunkdim' must contain nonzero values unless ",
"the zero values correspond to dimensions in the ",
"object to write that are also zero"))
na_idx <- which(is.na(chunkdim))
chunkdim[na_idx] <- dim[na_idx]
if (prod(chunkdim) > .Machine$integer.max)
stop(wmsg("The chunk dimensions in 'chunkdim' are too big. The ",
"product of the chunk dimensions should always be <= ",
".Machine$integer.max"))
chunkdim
}
### Note that the supplied 'as.sparse' value is stored in the 'as_sparse'
### slot of the returned object, and that's all. It doesn't change how the
### data will be laid out to the HDF5 file in anyway (HDF5 doesn't support
### sparse storage at the moment). The only reason we store the supplied
### 'as.sparse' value in the object is so that we can propagate it later
### when we coerce the object to HDF5ArraySeed.
### Unlike with rhdf5::h5createDataset(), if 'chunkdim' is NULL then an
### automatic chunk geometry will be used. To write "unchunked data" (a.k.a.
### contiguous data), 'chunkdim' must be set to 0.
HDF5RealizationSink <- function(dim, dimnames=NULL, type="double",
as.sparse=FALSE,
filepath=NULL, name=NULL,
H5type=NULL, size=NULL,
chunkdim=NULL, level=NULL)
{
if (!isTRUEorFALSE(as.sparse))
stop(wmsg("'as.sparse' must be TRUE or FALSE"))
if (is.null(filepath)) {
filepath <- getHDF5DumpFile(for.use=TRUE)
} else {
filepath <- normalize_dump_filepath(filepath)
}
if (is.null(name)) {
name <- getHDF5DumpName(for.use=TRUE)
} else {
name <- normalize_dump_name(name)
}
if (is.null(chunkdim)) {
## TODO: Pass 'x' instead of 'dim' to getHDF5DumpChunkDim() and modify
## getHDF5DumpChunkDim() to return 'chunkdim(x)' if it's not NULL.
## See TODO comment in dump-management.R
chunkdim <- getHDF5DumpChunkDim(dim)
} else if (isSingleNumber(chunkdim) && chunkdim == 0) {
chunkdim <- NULL # no chunking
} else {
chunkdim <- .normarg_chunkdim(chunkdim, dim)
}
if (is.null(level)) {
if (is.null(chunkdim)) {
level <- 0L
} else {
level <- getHDF5DumpCompressionLevel()
}
} else {
level <- normalize_compression_level(level)
}
create_and_log_HDF5_dataset(filepath, name, dim,
type=type, H5type=H5type, size=size,
chunkdim=chunkdim, level=level)
if (is.null(dimnames)) {
dimnames <- vector("list", length(dim))
} else {
h5writeDimnames(dimnames, filepath, name)
}
new2("HDF5RealizationSink", dim=dim, dimnames=dimnames, type=type,
as_sparse=as.sparse,
filepath=filepath, name=name,
chunkdim=chunkdim)
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Writing data to an HDF5RealizationSink object
###
setMethod("write_block", "HDF5RealizationSink",
function(sink, viewport, block)
{
if (!is.array(block))
block <- as.array(block)
h5write(block, sink@filepath, sink@name,
start=start(viewport), count=width(viewport))
sink
}
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Coercing an HDF5RealizationSink object
###
setAs("HDF5RealizationSink", "HDF5ArraySeed",
function(from) HDF5ArraySeed(from@filepath, from@name,
as.sparse=from@as_sparse)
)
setAs("HDF5RealizationSink", "HDF5Array",
function(from) DelayedArray(as(from, "HDF5ArraySeed"))
)
setAs("HDF5RealizationSink", "DelayedArray",
function(from) DelayedArray(as(from, "HDF5ArraySeed"))
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### writeHDF5Array()
###
### If 'filepath' and 'name' are NULL (the default), write the dataset to
### the current dump.
### If 'chunkdim' is NULL, an automatic chunk geometry will be used.
### To write "unchunked data" (a.k.a. contiguous data), 'chunkdim' must be
### set to 0.
### Return an HDF5Array object pointing to the newly written HDF5 dataset
### on disk.
writeHDF5Array <- function(x, filepath=NULL, name=NULL,
H5type=NULL, chunkdim=NULL, level=NULL,
as.sparse=NA,
with.dimnames=FALSE, verbose=NA)
{
if (!(is.logical(as.sparse) && length(as.sparse) == 1L))
stop(wmsg("'as.sparse' must be NA, TRUE or FALSE"))
if (!isTRUEorFALSE(with.dimnames))
stop("'with.dimnames' must be TRUE or FALSE")
verbose <- DelayedArray:::normarg_verbose(verbose)
if (is.na(as.sparse))
as.sparse <- is_sparse(x)
sink_dimnames <- if (with.dimnames) dimnames(x) else NULL
## compute_max_string_size() will trigger block processing if 'x' is a
## DelayedArray object of type "character", so it could take a while.
size <- compute_max_string_size(x)
sink <- HDF5RealizationSink(dim(x), sink_dimnames, type(x), as.sparse,
filepath=filepath, name=name,
H5type=H5type, size=size,
chunkdim=chunkdim, level=level)
sink <- BLOCK_write_to_sink(sink, x, verbose=verbose)
as(sink, "HDF5Array")
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Coercion to HDF5Array
###
### The methods below write the object to disk. Note that coercion from
### HDF5RealizationSink to HDF5Array is already taken care of by the specific
### method above and doesn't write anything to disk. So coercing to HDF5Array
### in general writes the object to disk *except* when the object to coerce is
### an HDF5RealizationSink object.
###
### Write to current dump.
.as_HDF5Array <- function(from) writeHDF5Array(from, with.dimnames=TRUE)
setAs("ANY", "HDF5Array", .as_HDF5Array)
### Automatic coercion methods from DelayedArray to HDF5Array and from
### DelayedMatrix to HDF5Matrix silently return broken objects (unfortunately
### these dummy automatic coercion methods don't bother to validate the object
### they return). So we overwrite them.
setAs("DelayedArray", "HDF5Array", .as_HDF5Array)
setAs("DelayedMatrix", "HDF5Matrix", .as_HDF5Array)
|
e39d8bc88a9069801d0bf7d10137ab609d05d9ca
|
823eb9fa96390e7c64b112ec054a0371331bda3f
|
/03-Reproductible/F-STUDY-05-Reproducible-Analysis-01-Assignement-test.r
|
ae702dd5b6c71b8b68bf5623171ee5c129fee90f
|
[] |
no_license
|
AlexSickert/R-Code-Snippets
|
c3bb5ba04ce7b06343ef933ed40d96a8dcfd6157
|
8e01a1bfef2842c80f39461c329f5bcae93d964a
|
refs/heads/master
| 2020-12-02T08:17:03.325768
| 2017-07-10T16:52:25
| 2017-07-10T16:52:25
| 96,800,467
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 931
|
r
|
F-STUDY-05-Reproducible-Analysis-01-Assignement-test.r
|
###library(plyr)
library(timeDate)
myData <-read.csv("activity.csv", sep = ",", header = TRUE, as.is = TRUE)
head(myData)
res <- aggregate(myData["steps"], by=myData[c("interval")], FUN=mean, na.rm=TRUE)
print(res)
rowIndex = which.max( res[,2] )
print(res[rowIndex,])
sum(is.na(myData$steps))
myData$day <- isWeekend(as.Date(myData$date))
head(myFilledData)
myFilledData$weekend <- factor(isWeekend(as.Date(myFilledData$date)))
head(myFilledData)
## print(myFilledData)
plot(myFilledData$interval, myFilledData$steps)
##plot.ts(myFilledData$interval, myFilledData$steps, plot.type = "multiple")
library(lattice)
library(datasets)
myFilledData$weekend <- factor(myFilledData$weekend, labels = c("Weekday", "Weekend"))
res <- aggregate(myFilledData["steps"], by=myFilledData[c("interval", "weekend")], FUN=mean, na.rm=TRUE)
print(res)
xyplot(steps ~ interval | weekend , data = res, type = "l", layout = c(1,2))
|
edb049611d25f3ae8b4bcbbcc8413fb8700bd036
|
b3f764c178ef442926a23652c4848088ccd40dca
|
/man/CovControlMcd.Rd
|
a640f356022284ef8e73551826a29a6aa618101f
|
[] |
no_license
|
armstrtw/rrcov
|
23e7642ff2fd2f23b676d4ad8d5c451e89949252
|
684fd97cdf00750e6d6fd9f9fc4b9d3d7a751c20
|
refs/heads/master
| 2021-01-01T19:51:52.146269
| 2013-07-24T18:18:24
| 2013-07-24T18:18:24
| 11,597,037
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,036
|
rd
|
CovControlMcd.Rd
|
\name{CovControlMcd}
\alias{CovControlMcd}
%
\title{ Constructor function for objects of class "CovControlMcd"}
\description{
This function will create a control object \code{CovControlMcd}
containing the control parameters for \code{CovMcd}
}
\usage{
CovControlMcd(alpha = 0.5, nsamp = 500, seed = NULL, trace= FALSE, use.correction = TRUE)
}
\arguments{
\item{alpha}{numeric parameter controlling the size of the subsets
over which the determinant is minimized, i.e., \code{alpha*n}
observations are used for computing the determinant. Allowed values
are between 0.5 and 1 and the default is 0.5.}
\item{nsamp}{ number of subsets used for initial estimates or \code{"best"}
or \code{"exact"}. Default is \code{nsamp = 500}. For
\code{nsamp="best"} exhaustive enumeration is done, as long as the
number of trials does not exceed 5000. For \code{"exact"},
exhaustive enumeration will be attempted however many samples are
needed. In this case a warning message will be displayed saying
that the computation can take a very long time.}
\item{seed}{starting value for random generator. Default is \code{seed = NULL}}
\item{trace}{whether to print intermediate results. Default is \code{trace = FALSE}}
\item{use.correction}{ whether to use finite sample correction factors.
Default is \code{use.correction=TRUE}}
}
%\details{}
\value{
A \code{CovControlMcd} object
}
\references{
Todorov V & Filzmoser P (2009),
An Object Oriented Framework for Robust Multivariate Analysis.
\emph{Journal of Statistical Software}, \bold{32}(3), 1--47.
URL \url{http://www.jstatsoft.org/v32/i03/}.
}
%\note{}
\author{Valentin Todorov \email{valentin.todorov@chello.at}}
%\seealso{}
\examples{
## the following two statements are equivalent
ctrl1 <- new("CovControlMcd", alpha=0.75)
ctrl2 <- CovControlMcd(alpha=0.75)
data(hbk)
CovMcd(hbk, control=ctrl1)
}
\keyword{classes}
\keyword{robust}
\keyword{multivariate}
|
a58d73128e6634e21ea8ce1d7ca218ec61b035dc
|
ce8ec3a112edfb9865f04c321261da5aac2f9082
|
/R/CLAYs.R
|
5b9da1edf5b00d2858e543b282ca2258204e8ad4
|
[
"MIT"
] |
permissive
|
DrRoad/mapsRinteractive
|
14c7c01756bcd4a4dcf74af3bdb7b0448db92cef
|
5ac39688187556379ae58cf419a4015d3c1f2654
|
refs/heads/master
| 2022-04-10T09:24:25.159197
| 2020-03-05T19:28:57
| 2020-03-05T19:28:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 723
|
r
|
CLAYs.R
|
#' @Title SLU farm (Brogarden) soil sample data -topsoil clay content
#'
#' @usage data(CLAYs)
#'
#' @format SpatialPointsDataFrame. Projected coordinate system: Sweref99TM
#' (epsg: 3006). Attribute: Lab analyzed topsoil (0-20 cm depth)
#' clay content (% of fine soil).
#'
#' @keywords datasets
#'
#' @references Piikki, K., Wetterlind, J., Söderström, M., & Stenberg, B. (2015).
#' Three-dimensional digital soil mapping of agricultural fields by integration
#' of multiple proximal sensor data obtained from different sensing methods.
#' Precision agriculture, 16(1), 29-45. https://doi.org/10.1007/s11119-014-9381-6
#'
#' @example
#' data(CLAYs)
#' raster::plot(CLAYs)
#'
#'@export data(CLAYs)
|
aa0abce9fcf263f854267344a25a2e5badea5fe3
|
b2f436cf3b287396f4d30cfde682f7a72a387c5e
|
/tests/testthat/test_as_seqData.R
|
98ab2daabed027175d69c6088709554993341c52
|
[
"BSD-2-Clause"
] |
permissive
|
pmartR/pmartR
|
840c6f6409aa9de503aab0aa482ddb6fe7d1b7e0
|
14fcb28a766a85fe7aed74c7456c7eb4df61cdaf
|
refs/heads/master
| 2023-08-17T13:56:46.883961
| 2023-08-16T23:46:40
| 2023-08-16T23:46:40
| 69,275,428
| 25
| 10
|
NOASSERTION
| 2023-09-12T17:17:51
| 2016-09-26T17:38:26
|
R
|
UTF-8
|
R
| false
| false
| 18,476
|
r
|
test_as_seqData.R
|
context('class: seqData')
### TO do: add emeta
test_that('as.seqData returns the correct data frame and attributes', {
# Load the reduced peptide data frames ---------------------------------------
load(system.file('testdata',
'little_seqdata.RData',
package = 'pmartR'
))
# Run as.seqData with agreeable data frames ----------------------------------
# Construct a seqData object with the edata, fdata, and emeta data frames.
seqdata <- as.seqData(
e_data = edata,
f_data = fdata,
edata_cname = 'ID_REF',
fdata_cname = 'Samples'
)
# Check high level structure
expect_equal(names(seqdata), c("e_data", "f_data", "e_meta"))
# Ensure the returned data frames are the correct dimension.
expect_equal(dim(seqdata$e_data), c(1200, 41))
expect_equal(dim(seqdata$f_data), c(40, 4))
# Confirm the correct attributes are present in the seqData object.
expect_equal(
names(attributes(seqdata)),
c(
'names', 'cnames', 'data_info', 'meta_info',
'filters', 'class'
)
)
# Scrutinize the column names attribute.
expect_equal(
attr(seqdata, 'cnames'),
list(
edata_cname = 'ID_REF',
emeta_cname = NULL,
fdata_cname = 'Samples',
techrep_cname = NULL
)
)
# Investigate the elements of the data_info attribute.
expect_equal(
attr(seqdata, 'data_info'),
list(
data_scale_orig = 'counts',
data_scale = 'counts',
norm_info = list(is_normalized = FALSE),
num_edata = length(unique(seqdata$e_data[, 1])),
num_zero_obs = sum(seqdata$e_data == 0),
prop_zeros = (sum(seqdata$e_data == 0) /
prod(dim(seqdata$e_data[, -1]))),
num_samps = ncol(seqdata$e_data[, -1]),
data_types = NULL,
batch_info = list(is_bc = FALSE)
)
)
# Take a looksie at the filters attribute.
expect_identical(attr(seqdata, 'filters'), list())
# Ensure the omicsData object is classy.
expect_s3_class(seqdata, 'seqData')
# Ensure the returned data frames are the correct dimension.
expect_equal(dim(seqdata$e_data), c(1200, 41))
expect_equal(dim(seqdata$f_data), c(40, 4))
# Confirm the correct attributes are present in the seqData object.
expect_equal(
names(attributes(seqdata)),
c(
'names', 'cnames', 'data_info', 'meta_info',
'filters', 'class'
)
)
# Scrutinize the column names attribute.
expect_equal(
attr(seqdata, 'cnames'),
list(
edata_cname = 'ID_REF',
emeta_cname = NULL,
fdata_cname = 'Samples',
techrep_cname = NULL
)
)
# Investigate the elements of the data_info attribute.
expect_equal(
attr(seqdata, 'data_info'),
list(
data_scale_orig = 'counts',
data_scale = 'counts',
norm_info = list(is_normalized = FALSE),
num_edata = length(unique(seqdata$e_data[, 1])),
num_zero_obs = sum(seqdata$e_data == 0),
prop_zeros = (sum(seqdata$e_data == 0) /
prod(dim(seqdata$e_data[, -1]))),
num_samps = ncol(seqdata$e_data[, -1]),
data_types = NULL,
batch_info = list(is_bc = FALSE)
)
)
# Inspect the elements of the meta_info attribute.
expect_equal(
attr(seqdata, 'meta_info'),
list(
meta_data = FALSE,
num_emeta = NULL
)
)
# Take a looksie at the filters attribute.
expect_identical(attr(seqdata, 'filters'), list())
# Ensure the omicsData object is classy.
expect_s3_class(seqdata, 'seqData')
# Run as.seqData with disagreeable data frames -------------------------------
# Check for an error when e_data has more columns than f_data has rows.
expect_error(
as.seqData(
e_data = data.frame(edata,
Mock4 = edata[, 10]
),
f_data = fdata,
edata_cname = 'ID_REF',
fdata_cname = 'Samples'
),
'1 samples from e_data not found in f_data'
)
fdata
# Create an f_data object with an extra row.
fdata_1 <- rbind(fdata, data.frame(
Samples = 'uterus_PBS_R5',
Tissue = 'uterus',
Treatment = 'PBS',
Label = 'R5'
))
# Create a seqData object and check for a warning when the f_data object has
# an extra row.
testthat::expect_warning(
seqdata <- as.seqData(
e_data = edata,
f_data = fdata_1,
edata_cname = 'ID_REF',
fdata_cname = 'Samples'
),
paste('Extra samples were found in f_data that were not in',
'e_data. These have been removed from f_data.',
sep = ' '
)
)
# Check high level structure
expect_equal(names(seqdata), c("e_data", "f_data", "e_meta"))
# Ensure the returned data frames are the correct dimension.
expect_equal(dim(seqdata$e_data), c(1200, 41))
expect_equal(dim(seqdata$f_data), c(40, 4))
# Confirm the correct attributes are present in the seqData object.
expect_equal(
names(attributes(seqdata)),
c(
'names', 'cnames', 'data_info', 'meta_info',
'filters', 'class'
)
)
# Scrutinize the column names attribute.
expect_equal(
attr(seqdata, 'cnames'),
list(
edata_cname = 'ID_REF',
emeta_cname = NULL,
fdata_cname = 'Samples',
techrep_cname = NULL
)
)
# Investigate the elements of the data_info attribute.
expect_equal(
attr(seqdata, 'data_info'),
list(
data_scale_orig = 'counts',
data_scale = 'counts',
norm_info = list(is_normalized = FALSE),
num_edata = length(unique(seqdata$e_data[, 1])),
num_zero_obs = sum(seqdata$e_data == 0),
prop_zeros = (sum(seqdata$e_data == 0) /
prod(dim(seqdata$e_data[, -1]))),
num_samps = ncol(seqdata$e_data[, -1]),
data_types = NULL,
batch_info = list(is_bc = FALSE)
)
)
# Inspect the elements of the meta_info attribute.
expect_equal(
attr(seqdata, 'meta_info'),
list(
meta_data = FALSE,
num_emeta = NULL
)
)
# Take a looksie at the filters attribute.
expect_identical(attr(seqdata, 'filters'), list())
# Ensure the omicsData object is classy.
expect_s3_class(seqdata, 'seqData')
# Confirm the dimensions of the e_data and e_meta data frames.
expect_equal(
dim(seqdata$e_data),
c(1200, 41)
)
expect_equal(dim(seqdata$f_data), c(40, 4))
# Confirm the correct attributes are present in the seqData object.
expect_equal(
names(attributes(seqdata)),
c(
'names', 'cnames', 'data_info', 'meta_info',
'filters', 'class'
)
)
# Scrutinize the column names attribute.
expect_equal(
attr(seqdata, 'cnames'),
list(
edata_cname = 'ID_REF',
emeta_cname = NULL,
fdata_cname = 'Samples',
techrep_cname = NULL
)
)
# Investigate the elements of the data_info attribute.
expect_equal(
attr(seqdata, 'data_info'),
list(
data_scale_orig = 'counts',
data_scale = 'counts',
norm_info = list(is_normalized = FALSE),
num_edata = length(unique(seqdata$e_data[, 1])),
num_zero_obs = sum(seqdata$e_data == 0),
prop_zeros = (sum(seqdata$e_data == 0) /
prod(dim(seqdata$e_data[, -1]))),
num_samps = ncol(seqdata$e_data[, -1]),
data_types = NULL,
batch_info = list(is_bc = FALSE)
)
)
# Take a looksie at the filters attribute.
expect_identical(attr(seqdata, 'filters'), list())
# Ensure the omicsData object is classy.
expect_s3_class(seqdata, 'seqData')
# Check the technical replicates column for correct structure.
expect_error(
as.seqData(
e_data = edata,
f_data = data.frame(fdata,
tReps = fdata[, 1]
),
edata_cname = 'ID_REF',
fdata_cname = 'Samples',
techrep_cname = 'tReps'
),
paste('Specified technical replicate column had a unique value',
'for each row. Values should specify groups of technical',
'replicates belonging to a biological sample.',
sep = ' '
)
)
set.seed(5)
# Fabricate an e_data object with some of the IDs repeated.
edata_1 <- rbind(
edata,
edata[sample(1:150, 8), ]
)
# Create a seqData object with some of the rows of e_data repeated.
seqdata <- as.seqData(
e_data = edata_1,
f_data = fdata,
edata_cname = 'ID_REF',
fdata_cname = 'Samples'
)
# Check high level structure
expect_equal(names(seqdata), c("e_data", "f_data", "e_meta"))
# Verify that the returned data frames are the correct dimension.
expect_equal(dim(seqdata$e_data), c(1200, 41))
expect_equal(dim(seqdata$f_data), c(40, 4))
# Confirm the correct attributes are present in the seqData object.
expect_equal(
names(attributes(seqdata)),
c(
'names', 'cnames', 'data_info', 'meta_info',
'filters', 'class'
)
)
# Scrutinize the column names attribute.
expect_equal(
attr(seqdata, 'cnames'),
list(
edata_cname = 'ID_REF',
emeta_cname = NULL,
fdata_cname = 'Samples',
techrep_cname = NULL
)
)
# Investigate the elements of the data_info attribute.
expect_equal(
attr(seqdata, 'data_info'),
list(
data_scale_orig = 'counts',
data_scale = 'counts',
norm_info = list(is_normalized = FALSE),
num_edata = length(unique(seqdata$e_data[, 1])),
num_zero_obs = sum(seqdata$e_data == 0),
prop_zeros = (sum(seqdata$e_data == 0) /
prod(dim(seqdata$e_data[, -1]))),
num_samps = ncol(seqdata$e_data[, -1]),
data_types = NULL,
batch_info = list(is_bc = FALSE)
)
)
# Take a looksie at the filters attribute.
expect_identical(attr(seqdata, 'filters'), list())
# Ensure the omicsData object is classy.
expect_s3_class(seqdata, 'seqData')
# Change the values in each of the samples for the repeated IDs.
# edata_1[151:158, 2:13] <- edata_1[151:158, 2:13] * 1.1
edata_1 <- edata
edata_1[2, 1] <- edata_1[1, 1]
# Forge a seqData object with some of the peptide IDs repeated but the values
# for each of the repeated peptide IDs is different from the original data.
expect_error(
as.seqData(
e_data = edata_1,
f_data = fdata,
edata_cname = 'ID_REF',
fdata_cname = 'Samples'
),
"The 'edata_cname' identifier is non-unique."
)
# Check for an error when e_meta is non-null but emeta_cname is null.
# expect_error(as.seqData(e_data = edata_1,
# f_data = fdata,
# edata_cname = 'ID_REF',
# fdata_cname = 'Samples'),
# 'Since e_meta is non-NULL, emeta_cname must also be non-NULL.')
# Verify there is an error when emeta_cname is not a column name in e_meta.
# expect_error(as.seqData(e_data = edata_1,
# f_data = fdata,
# e_meta = emeta,
# edata_cname = 'ID_REF',
# fdata_cname = 'Samples',
# emeta_cname = 'Protein2'),
# paste('Mapping variable column',
# 'Protein2',
# 'not found in e_meta. See details of as.seqData for',
# 'specifying column names.',
# sep = ' '))
## Warning for non-int (allowed for visualizing)
testthat::expect_warning(
as.seqData(
e_data = cbind(edata[1], edata[-1] / 2),
edata_cname = 'ID_REF',
f_data = fdata,
fdata_cname = "Samples"
),
"Non-integers detected"
)
## Error for non "count"
testthat::expect_error(
as.seqData(
e_data = edata,
edata_cname = 'ID_REF',
f_data = fdata,
fdata_cname = "Samples",
data_scale = "log2"
),
"data_scale must be 'counts' for as.seqData"
)
## NA replaced by zeros
e_data_temp <- edata
e_data_temp[e_data_temp == 0] <- NA
x <- testthat::expect_message(
as.seqData(
e_data = e_data_temp,
edata_cname = 'ID_REF',
f_data = fdata,
fdata_cname = "Samples"
)$e_data,
"instances of NA have been replaced with 0"
)
testthat::expect_false(any(is.na(x)))
###### Make the class checks happen
## Supports Data table conversion
expect_error(as.seqData(
e_data = 5,
f_data = fdata,
edata_cname = 'ID_REF',
fdata_cname = 'Samples'
), "e_data must be of class 'data.frame'")
expect_error(as.seqData(
e_data = edata,
f_data = 5,
edata_cname = 'ID_REF',
fdata_cname = 'Samples'
), "f_data must be of class 'data.frame'")
expect_error(as.seqData(
e_data = edata,
f_data = fdata,
e_meta = 5,
edata_cname = 'ID_REF',
fdata_cname = 'Samples'
), "e_meta must be of class 'data.frame'")
emeta <- data.frame(edata[1], class = 1:nrow(edata))
## Supports Data table conversion also
seqdata <- as.seqData(
e_data = data.table::as.data.table(edata),
f_data = data.table::as.data.table(fdata),
e_meta = data.table::as.data.table(emeta),
edata_cname = 'ID_REF',
fdata_cname = 'Samples',
emeta_cname = "class"
)
expect_error(
as.seqData(
e_data = data.table::as.data.table(edata),
f_data = data.table::as.data.table(fdata),
e_meta = data.table::as.data.table(emeta),
edata_cname = 5,
fdata_cname = 'Samples',
emeta_cname = "class"
),
"must be of the class 'character'"
)
expect_error(
as.seqData(
e_data = data.table::as.data.table(edata),
f_data = data.table::as.data.table(fdata),
e_meta = data.table::as.data.table(emeta),
edata_cname = 'ID_REF',
fdata_cname = 5,
emeta_cname = "class"
),
"must be of the class 'character'"
)
expect_error(
as.seqData(
e_data = data.table::as.data.table(edata),
f_data = data.table::as.data.table(fdata),
e_meta = data.table::as.data.table(emeta),
edata_cname = 'ID_REF',
fdata_cname = 'Samples',
emeta_cname = "class",
is_normalized = 5
),
"must be of the class 'logical'"
)
expect_error(
as.seqData(
e_data = data.table::as.data.table(edata),
f_data = data.table::as.data.table(fdata),
e_meta = data.table::as.data.table(emeta),
edata_cname = 'ID_REF',
fdata_cname = 'Samples',
emeta_cname = "class",
is_bc = 5
),
"must be of the class 'logical'"
)
expect_error(
as.seqData(
e_data = data.table::as.data.table(edata),
f_data = data.table::as.data.table(fdata),
e_meta = data.table::as.data.table(emeta),
edata_cname = 'ID_REF',
fdata_cname = 'Samples',
emeta_cname = "class",
batch_info = 5
),
"must be of the class 'list'"
)
expect_error(
as.seqData(
e_data = data.table::as.data.table(edata),
f_data = data.table::as.data.table(fdata),
e_meta = data.table::as.data.table(emeta),
edata_cname = 'ID_REF',
fdata_cname = 'Samples',
emeta_cname = "class",
data_types = 5
),
"must be of the class 'character'"
)
## Valid data_scale
expect_error(
as.nmrData(
e_data = data.table::as.data.table(edata),
f_data = data.table::as.data.table(fdata),
e_meta = data.table::as.data.table(emeta),
edata_cname = 'ID_REF',
fdata_cname = 'Samples',
emeta_cname = "class",
data_scale = 5
),
"data_scale must be one of the"
)
expect_error(
as.nmrData(
e_data = data.table::as.data.table(edata),
f_data = data.table::as.data.table(fdata),
e_meta = data.table::as.data.table(emeta),
techrep_cname = 5,
edata_cname = 'ID_REF',
fdata_cname = 'Samples',
emeta_cname = "class"
),
"techrep_cname must be a character string "
)
## cnames correct
# Bad edata
expect_error(
as.seqData(
e_data = data.table::as.data.table(edata),
f_data = data.table::as.data.table(fdata),
e_meta = data.table::as.data.table(emeta),
edata_cname = 'ID_RF',
fdata_cname = 'Samples',
emeta_cname = "class"
),
"not found in"
)
# Bad techrep
expect_error(
as.nmrData(
e_data = data.table::as.data.table(edata),
f_data = data.table::as.data.table(fdata),
e_meta = data.table::as.data.table(emeta),
techrep_cname = 'Samples',
edata_cname = 'ID_REF',
fdata_cname = 'Samples',
emeta_cname = "class"
),
"not found in"
)
# Bad fdata
expect_error(
as.seqData(
e_data = data.table::as.data.table(edata),
f_data = data.table::as.data.table(fdata),
e_meta = data.table::as.data.table(emeta),
edata_cname = 'ID_REF',
fdata_cname = 'Sampes',
emeta_cname = "class"
),
"not found in"
)
# Bad emeta
expect_error(
as.seqData(
e_data = data.table::as.data.table(edata),
f_data = data.table::as.data.table(fdata),
e_meta = data.table::as.data.table(emeta),
edata_cname = 'ID_REF',
fdata_cname = 'Samples',
emeta_cname = "cass"
),
"not found in"
)
emeta2 <- emeta
colnames(emeta2) <- c("t", "class")
# Bad edata/emeta
expect_error(
as.seqData(
e_data = data.table::as.data.table(edata),
f_data = data.table::as.data.table(fdata),
e_meta = data.table::as.data.table(emeta2),
edata_cname = 'ID_REF',
fdata_cname = 'Samples',
emeta_cname = "class"
),
"not found in"
)
fdata2 <- fdata[1]
# Bad fdata
expect_error(
as.seqData(
e_data = data.table::as.data.table(edata),
f_data = data.table::as.data.table(fdata2),
e_meta = data.table::as.data.table(emeta),
edata_cname = 'ID_REF',
fdata_cname = 'Samples',
emeta_cname = "class"
),
"must contain at least 2"
)
fdata3 <- fdata
fdata3$techrep <- 1:nrow(fdata)
## bad techrep
expect_error(
as.nmrData(
e_data = data.table::as.data.table(edata),
f_data = data.table::as.data.table(fdata3),
e_meta = data.table::as.data.table(emeta),
techrep_cname = 'techrep',
edata_cname = 'ID_REF',
fdata_cname = 'Samples',
emeta_cname = "class"
),
"Specified technical replicate column"
)
# Bad emeta
emeta2 <- rbind(emeta, emeta[1, ])
expect_error(
as.nmrData(
e_data = data.table::as.data.table(edata),
f_data = data.table::as.data.table(fdata3),
e_meta = data.table::as.data.table(emeta2),
edata_cname = 'ID_REF',
fdata_cname = 'Samples',
emeta_cname = "class"
),
"Not all e_data cname and "
)
})
|
02b28803bd51eae8d7d2105b04d65b175ed9ffc8
|
18347ef9bc1f489e63e83cf03338b7211d21b7c8
|
/man/modal_category.Rd
|
8fd8300f13852d992c544eb5f2f329f52bdbff48
|
[
"BSD-3-Clause",
"CC-BY-4.0"
] |
permissive
|
stan-dev/posterior
|
cd1e0778f5b930b7ef97b9c1f09167f162fb9d7e
|
55e92336c2984be1a2487cdd489552a07e273d70
|
refs/heads/master
| 2023-08-18T07:53:15.023052
| 2023-08-07T08:13:36
| 2023-08-07T08:13:36
| 212,145,446
| 105
| 20
|
NOASSERTION
| 2023-08-07T08:13:37
| 2019-10-01T16:30:28
|
R
|
UTF-8
|
R
| false
| true
| 1,306
|
rd
|
modal_category.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/discrete-summaries.R
\name{modal_category}
\alias{modal_category}
\alias{modal_category.default}
\alias{modal_category.rvar}
\title{Modal category}
\usage{
modal_category(x)
\method{modal_category}{default}(x)
\method{modal_category}{rvar}(x)
}
\arguments{
\item{x}{(multiple options) A vector to be interpreted as draws from
a categorical distribution, such as:
\itemize{
\item A \link{factor}
\item A \link{numeric} (should be \link{integer} or integer-like)
\item An \link{rvar}, \link{rvar_factor}, or \link{rvar_ordered}
}}
}
\value{
If \code{x} is a \link{factor} or \link{numeric}, returns a length-1 vector containing
the modal value.
If \code{x} is an \link{rvar}, returns an array of the same shape as \code{x}, where each
cell is the modal value of the draws in the corresponding cell of \code{x}.
}
\description{
Modal category of a vector.
}
\details{
Finds the modal category (i.e., most frequent value) in \code{x}. In the case of
ties, returns the first tie.
}
\examples{
x <- factor(c("a","b","b","c","d"))
modal_category(x)
# in the case of ties, the first tie is returned
y <- factor(c("a","c","c","d","d"))
modal_category(y)
# both together, as an rvar
xy <- c(rvar(x), rvar(y))
xy
modal_category(xy)
}
|
71f7d9327f32f0250f2391286e6c911a59369165
|
d74b710c899faef9b6eb1fd906c41fdbc88d100c
|
/man/find_first_neg_for.Rd
|
9800784def0148361dfb82f1dbd5dec264fde4f3
|
[] |
no_license
|
sebschnell/spsintro
|
a085a245b596650398151552643a40ba507779d8
|
d9bde7631ff326810998a479a893d145aa6aaeba
|
refs/heads/master
| 2021-01-11T03:59:26.320575
| 2017-06-15T12:12:15
| 2017-06-15T12:12:15
| 71,263,777
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 416
|
rd
|
find_first_neg_for.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{find_first_neg_for}
\alias{find_first_neg_for}
\title{Find first negative element in a vector and return its position}
\usage{
find_first_neg_for(x)
}
\arguments{
\item{x}{A vector of double}
}
\value{
Index of the first negative value
}
\description{
Find first negative element in a vector and return its position
}
|
6c10e04d23fba98b9a322878a0087baa76db1bc5
|
96216dc92bf7cd4519bf236c37595f1808639042
|
/Seminar/funkcije/funkcije.r
|
fd81e1c772bdd3abc4d37c0c3dac5253357c076f
|
[] |
no_license
|
JakaSvetek/Simulacija-propada-zavarovalnice
|
189f881ca393a6b3604ed1b7273dc5a48b8cc8f3
|
6d3aacb0b5d8bd75c110eac26faff64d6d8f321d
|
refs/heads/main
| 2023-04-11T22:26:10.613153
| 2021-05-13T08:54:04
| 2021-05-13T08:54:04
| 366,989,259
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,012
|
r
|
funkcije.r
|
# funkcije
simulacija <- function(zacetni_kapital, stevilo_dni, verjetnost){
S <- zacetni_kapital
n <- stevilo_dni
p <- verjetnost
i <- 0
dnevi <- runif(n, min = 0, max = 1)
for (dan in dnevi) {
i <- i + 1
if (S < 0){
break
}
else if (dan >= p){
S <- S + 1
}
else {
S <- S - 1
}
}
if (S < 0){
dan_pika <- gsub(" ", "", paste(i, "."))
print(paste("Propad v dnevu", dan_pika))
}
else if (i == length(dnevi)){
kapital_pika <- gsub(" ", "", paste(S, "."))
print(paste("Kapital na zadnji dan je", kapital_pika))
}
}
Vec_simulacij <- function(stevilo_simulacij, zacetni_kapital, stevilo_dni, verjetnost){
T <- 1:stevilo_simulacij
n <- stevilo_dni
p <- verjetnost
propadli <- 0
for (t in T) {
S <- zacetni_kapital
i <- 0
dnevi <- runif(n, min = 0, max = 1)
for (dan in dnevi) {
i <- i + 1
if (S < 0){
propadli <- propadli + 1
break
}
else if (dan >= p){
S <- S + 1
}
else {
S <- S - 1
}
}
}
propadli
}
Vec_simulacij_rezultat <- function(stevilo_simulacij, zacetni_kapital, stevilo_dni, verjetnost){
propadli <-Vec_simulacij(stevilo_simulacij, zacetni_kapital, stevilo_dni, verjetnost)
propadli_pika <- gsub(" ", "", paste(propadli, "."))
print(paste("Propadlo jih je", propadli_pika))
}
Graf_propada <- function(stevilo_dni, verjetnost){
n <- stevilo_dni
p <- verjetnost
kapitali <- 1:(stevilo_dni * ( 4 / 10))
vektor_propadlih <- c()
for (kapital in kapitali){
propadli <- Vec_simulacij(1000, kapital, stevilo_dni, verjetnost) / 10
vektor_propadlih <- c(vektor_propadlih, propadli)
}
podatki <- data.frame(kapitali, vektor_propadlih)
ggplot(podatki,
aes(x=kapitali, y=vektor_propadlih)) +
geom_line() +
ylim(0,100) +
ylab("Propadli[%]") +
xlab("Začetni kapital")
}
|
8025528ddfac519ad190a7afc67b0613313059cf
|
7d9c406b15ecc4d78408a4a338a939c2d5cb40f5
|
/2 - CITE-seq.R
|
23caae7b690b105ebda18a253e235195c939836a
|
[] |
no_license
|
afidanza/ScRNAseq_HPC_humaniPSCs
|
4ada45da604b10e1eb3ab58453585c8804d5b548
|
b5c240a2132da6e494ff8ffe82b3e57eddfb7898
|
refs/heads/master
| 2022-04-18T08:10:17.188475
| 2020-04-10T15:34:24
| 2020-04-10T15:34:24
| 177,476,632
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,185
|
r
|
2 - CITE-seq.R
|
library(Seurat)
library(ggplot2)
# Read 10X data
data10x <- Read10X("outs/raw_feature_bc_matrix/")
# Create seurat object from the gene expression data only
seuratObj <- CreateSeuratObject(data10x$`Gene Expression`,
min.cells = 3,
min.features = 200,
project = "10x 2019")
# Create the ADT assay and add to Seurat object
adt <- CreateAssayObject(data10x$`Antibody Capture`)
adt <- SubsetData(adt, cells = Cells(seuratObj))
seuratObj[["ADT"]] <- adt
rm(data10x)
# Add metadata
seuratObj[["percent.mt"]] <- PercentageFeatureSet(seuratObj, pattern = "^MT-")
seuratObj[["libraryID"]] <- substr(colnames(seuratObj), 18, 18)
seuratObj[["Day"]] <- ifelse(seuratObj[["libraryID"]] %in% c("1", "2", "3"),
"D10", "D13")
# QC plots
VlnPlot(seuratObj, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3, pt.size = 0.01)
ggplot(seuratObj@meta.data, aes(nFeature_RNA, percent.mt)) +
geom_point(aes(col = Day), size = 0.5) +
geom_hline(yintercept = c(4,20)) +
geom_vline(xintercept = c(2000, 7500))
boxplot(percent.mt ~ Day, seuratObj@meta.data)
table(seuratObj@meta.data$percent.mt > 5, seuratObj@meta.data$Day)
FeatureScatter(seuratObj, "nFeature_RNA", "percent.mt", pt.size = 0.01 ) +
geom_hline(yintercept = c(4,20)) +
geom_vline(xintercept = c(2000, 7500))
# Filtering
seuratObj <- subset(seuratObj,
subset = percent.mt > 4 & percent.mt < 20 & nFeature_RNA > 2000 & 7500)
# SCTransform replaces NormalizeData, ScaleData, and FindVariableFeatures.
# Transformed data will be available in the SCT assay, which is set as the default after running sctransform
table(seuratObj@meta.data$Day)
seuratObj <- SCTransform(seuratObj, vars.to.regress = "percent.mt")
seuratObj <- RunPCA(object = seuratObj)
ElbowPlot(seuratObj, ndims = 50)
seuratObj <- RunUMAP(object = seuratObj, dims = 1:15)
seuratObj <- FindNeighbors(object = seuratObj, dims = 1:15, verbose = FALSE)
seuratObj <- FindClusters(object = seuratObj, verbose = FALSE)
DimPlot(object = seuratObj, label = TRUE)
saveRDS(seuratObj, "CITEseqSeuratObject.rds")
|
fd940551060805853734d74cac2c4e0eae2da02f
|
427ab22e8751bb0987bf0ea23c1bc43b060226e5
|
/exploratoryDataAnalysis/hw4/plot1.R
|
d580c3b377c413b6b12fa67930dc76fbc403d137
|
[] |
no_license
|
savourylie/dsSpecialization
|
da3b121dbf74c1142f21696c6282e61fb81c6e17
|
bcda19827731ab19a4fb06c0927870abd77f7aa5
|
refs/heads/master
| 2021-01-21T13:21:09.046775
| 2016-05-03T14:50:06
| 2016-05-03T14:50:06
| 52,599,429
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 738
|
r
|
plot1.R
|
# 1. Have total emissions from PM2.5 decreased in the United States from 1999 to 2008?
# Using the base plotting system, make a plot showing the total PM2.5 emission from all
# sources for each of the years 1999, 2002, 2005, and 2008.
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
pm99 <- subset(NEI, year == 1999)
pm02 <- subset(NEI, year == 2002)
pm05 <- subset(NEI, year == 2005)
pm08 <- subset(NEI, year == 2008)
x99 <- pm99$Emissions
x02 <- pm02$Emissions
x05 <- pm05$Emissions
x08 <- pm08$Emissions
barplot(c(sum(x99), sum(x02), sum(x05), sum(x08)), xlab="Year", ylab="Total PM2.5 Emission", names.arg=c("1999", "2002", "2005", "2008"))
dev.copy(png, file = "plot1.png")
dev.off()
|
081d37330b5ca1647f15c07921bd5c423a302f63
|
bdb3a04b53a7320544bf34c330fdd23d2ba5dbb9
|
/ARMA.R
|
2f9227950d5a197a4488133e051e082cf7af8d1c
|
[] |
no_license
|
duqiangcs/traffic_uncertainty
|
82db5463ed0040206fbccfb9d0a0311ef9b0f766
|
679a27ec6cfbc88c29393b3035685269464e8fe3
|
refs/heads/master
| 2020-05-05T13:56:27.448266
| 2018-02-07T15:22:24
| 2018-02-07T15:22:24
| 180,100,354
| 1
| 0
| null | 2019-04-08T08:07:16
| 2019-04-08T08:07:16
| null |
UTF-8
|
R
| false
| false
| 511
|
r
|
ARMA.R
|
data1<-read.csv('traffic_900.csv');
library(forecast);
train<-data1[1:600,1];
test<-data1[601:900,1];
# off line training
sensor<-ts(train,frequency=15);
fit <- auto.arima(sensor,approximation=FALSE,trace=FALSE);
fcast <- forecast(fit,h=300,level=c(80,95));# typeof(fcast)
res <- data.frame(fcast[[4]],fcast[[5]],fcast[[6]]);#transfer time series back to data frame
write.csv(res, "ARMA_res.csv")
# the five columns are point predictions, lower bound 80, lower bound 95, upper bound 80, upper bound 95
|
d5ccd3aac7300429467f2c24b0686de4165e5dba
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/mcga/inst/testfiles/ByteVectorToDoubles/AFL_ByteVectorToDoubles/ByteVectorToDoubles_valgrind_files/1613105782-test.R
|
7762f2a5fb1c9fe18949e648dea9c3f0d616db48
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 95
|
r
|
1613105782-test.R
|
testlist <- list(b = -1536L)
result <- do.call(mcga:::ByteVectorToDoubles,testlist)
str(result)
|
80eb466aa021978231646b196e6056eee8fcd150
|
e1a95dde991308d9c3eef761c7a2650b7f4137b7
|
/features/make/experiment/exploreR_0704.R
|
eef162ccaec198e21186ebc8582715aa1c3a2d79
|
[] |
no_license
|
surfingit/talking-data-click-fraud
|
d8b93286a3c8eea8dba8225fa6de41a593aa9984
|
d6babcbd4e056d28665cf282ec0af113d61ce48c
|
refs/heads/master
| 2020-04-18T23:32:20.968686
| 2018-05-20T14:57:35
| 2018-05-20T14:57:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,672
|
r
|
exploreR_0704.R
|
#install.packages("fasttime")
rm(list=ls())
gc();gc();gc()
library(data.table)
library(RcppRoll)
library(fasttime)
library(Hmisc)
path = '~/tdata/data/'
#path = '/Users/dhanley2/Documents/tdata/data/'
trndf = fread(paste0(path, 'train.csv'))
tstdf = fread(paste0(path, 'test.csv'))
tstdf[device %in% c()]
trndf[, click_time := fasttime::fastPOSIXct(trndf$click_time)]
trndf[, click_day := round(as.numeric(click_time)/(3600*24)%%31)]
tstdf[, click_time := fasttime::fastPOSIXct(tstdf$click_time)]
tstdf[, click_day := round(as.numeric(click_time)/(3600*24)%%31)]
tstdf[, click_hour := round(as.numeric(click_time)/(3600))%%24]
tstdf[, .(sum(device==9), .N), by=click_hour][order(click_hour)]
# trndf[device==3032]
aggdf = trndf[,.(.N, mean(is_attributed), sum(is_attributed), length(unique(os))), by = device]
aggdf[N>10000]
daydf = trndf[device %in% c(3032, 3542, 3866),.N, by = .(click_day, device)]
daydf[order(device, click_day)]
daydf = trndf[device %in% c(1,2,0),.N, by = .(click_day, device)]
daydf[order(device, click_day)]
tst_devs = tstdf[,.(.N, length(unique(os))), by = device][N>500]
tst_devs
setdiff(unique(tst_devs$device), unique(trndf$device))
trndf[device %in% tst_devs$device,.(.N, mean(is_attributed), sum(is_attributed), length(unique(os))), by = device]
trndf[, click_time := fasttime::fastPOSIXct(trndf$click_time)]
trndf[, click_day := round(as.numeric(click_time)/(3600*24))]
trndf = trndf[,.(click_day, click_time, device, is_attributed)]
tstdf = tstdf[,.(click_time, device)]
table(trndf$click_day, trndf$device==3032)
table(trndf[device==3032]$is_attributed)
table(trndf[device!=3032]$is_attributed)
table(tstdf$device==3032)
sub = fread(paste0(path, '../sub/sub_lgb0304val.csv'))
sub = sub[2:nrow(sub)]
idx = 1:1000000
trndf = fread(paste0(path, 'trainval.csv'))
trndf[, sec_seq := 1:.N , by = click_time]
cols_ = c("ip", "device", "os", "app")
trndf$click_sec = as.numeric(fasttime::fastPOSIXct(trndf$click_time))
trndf[, click_sec := click_sec - min( click_sec)]
trndf[,ct := .N, by = click_time]
trndf[, split_sec := round((0:(.N-1))/.N, 8), by = click_time]
trndf[, ct_sec := .N , by = click_time]
trndf[, click_split_sec := click_sec + split_sec]
trndf[, index := 1:nrow(trndf)]
trndf = trndf[order(ip, os, device, app, click_split_sec)]
trndf
trndf[,click_sec_shift_lead := - click_split_sec + shift(click_split_sec, 1, type = "lead")]
trndf[,seq_lead := .N:1, by = cols_ ]
trndf[seq_lead == 1, click_sec_shift_lead := -1]
trndf[(ip==6) & (app==19) & (device == 16) & (os==0)]
idx = 1:3000000
idx = idx[trndf[idx]$click_sec_shift_lead!=-1]
table(cut2(round(1000*trndf[idx]$click_sec_shift_lead), g = 50), trndf[idx]$is_attributed)
trndf[click_sec_shift_lead==-1][1:1000]
# 0 1
# FALSE 51121554 34771
# TRUE 10807505 116171
getSplitLead = function(df, cols_, fname, path, shift_n = 1){
df$click_sec = as.numeric(fasttime::fastPOSIXct(df$click_time))
df[, split_sec := round((0:(.N-1))/.N, 4), by = click_time]
df = df[,c(cols_, "click_sec", "split_sec"), with = F]
df[, index := 1:nrow(df)]
setorderv(df, c(cols_, "click_sec", "split_sec"))
df[,click_sec_shift_lead := shift(click_sec+split_sec, shift_n, type = "lead")]
df[,seq_lead := .N:1, by = cols_ ]
df[,click_sec_lead := click_sec_shift_lead - (click_sec + split_sec)]
df[,click_sec_lead := round(click_sec_lead, 4)]
df[seq_lead %in% 1:shift_n, click_sec_lead := 999999]
setorderv(df, "index")
new_name = "click_sec_lead_split_sec"
setnames(df, "click_sec_lead", new_name)
df = df[,new_name,with=F]
return(df)
}
# Write out the <ip, device, os> level
trndf = fread(paste0(path, 'train.csv'))
trndf[,attributed_time:=NULL]
trndf[,channel:=NULL]
gc(); gc()
# Make the full training data
trndf[,click_time := as.numeric(fasttime::fastPOSIXct(click_time))]
trndf[,click_hr := round(click_time/3600) %%24]
trndf[,click_hr := click_hr]
trndf[,click_day := round(click_time/(24*3600))]
trndf[,click_time:=NULL]
gc(); gc()
aggdf = trndf[,.(.N, sum(is_attributed)), by = .(ip, device, os, click_hr, click_day)]
rm(trndf)
gc(); gc()
setnames(aggdf, "N", "ct_click")
setnames(aggdf, "V2", "y_click")
aggdf[,ipdevosapp_ct :=sum(ct_click), by = .(ip, device, os)]
aggdf = aggdf[ipdevosapp_ct>5000]
aggdf = aggdf[order(ip, device, os, click_hr, click_day)]
aggdf[,seq := 1:.N, by = .(ip, device, os, click_hr)]
aggdf[, ct_click_lag := shift(ct_click, 1, type="lag")]
aggdf[, y_click_lag := shift( y_click, 1, type="lag")]
glob_mean = sum(aggdf$y_click)/sum(aggdf$ct_click)
aggdf[seq>1, bmean_lag := ((y_click_lag)+(glob_mean*2000))/(ct_click_lag+2000)]
aggdf[seq>1, bmean := ((y_click)+(glob_mean*2000))/(ct_click+2000)]
plot(table(cut2(round(aggdf$bmean,5), g=10), cut2(round(aggdf$bmean_lag,5), g=10)))
View(aggdf[1:2000])
#########################################################################
clknite = trndf[click_hr %in% 19:21, .(sum(is_attributed), .N), by=.(ip)]
clkday = trndf[click_hr %in% c(2:16), .(sum(is_attributed), .N), by=.(ip)]
setnames(clknite, c("ip", "ysumday", "ctnite"))
setnames(clkday, c("ip", "ysumnite", "ctday"))
clk = merge(clkday, clknite, by = "ip", type = "all")
clk
glob_mean = sum(clk$ctnite)/sum(clk$ctday)
glob_mean_click = (sum(clk$ysumday)+sum(clk$ysumnite))/(sum(clk$ctnite)+sum(clk$ctday))
prior_niteday = 20
prior_clicks = 100
clk[,bmean := (((ctday+ctnite)*(ctnite/ctday)) + (prior_niteday*glob_mean))/((ctday+ctnite)+prior_niteday)]
clk[,bmean_click := ((ysumnite+ysumday) + (prior_clicks*glob_mean_click))/( prior_clicks+(ctday+ctnite))]
clk[order(bmean)]
clk
plot(clk$bmean, clk$bmean_click, ylim = c(0,.005))
abline(h=(glob_mean_click), col="red")
|
89072a3ebecdd49a344adedcccd0a291e112bdf3
|
d5facf2eb1940a5ef24399017845e17ca172ebf3
|
/R/get_total_frame.R
|
52592430fc12531c0b6440ad4000fa95f0d75a54
|
[] |
no_license
|
ailich/mytools
|
3970d0254b4bc9b7bb23b2918f99ec7e966ddbbe
|
2e8b244974483df793ae000d8a44f8904e44bc9a
|
refs/heads/master
| 2023-01-13T23:41:50.623083
| 2022-12-29T17:56:54
| 2022-12-29T17:56:54
| 117,773,800
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 700
|
r
|
get_total_frame.R
|
#' Calculates Total Frame Number
#'
#' Calculates Total Frame Number by video number and seconds or frame number in
#' @param video_num vector video_numbers
#' @param video_sec vector of seconds in video (alternativelly could use video_frame)
#' @param video_frame vector of frames into video (alternativelly could use video_sec)
#' @param fps frames per second
#' @param vid_length video length in minutes
#' @export
get_total_frame<- function(video_num, video_sec, video_frame=NULL, fps, vid_length=1){
fpv<- fps*(vid_length*60) #frames per video
if(is.null(video_frame)){Total_Frame<- (video_num*fpv)+(video_sec*fps)
} else{Total_Frame<- (video_num*fpv)+video_frame}
return(Total_Frame)
}
|
7113eb2e206da173b882c80737b50de151a5581f
|
cef3216fbd42b078c491f2581f9086aca199621e
|
/R/plotPeaks_gui.r
|
f2607a29db5a4446970ce29ad01d58ac1e92e684
|
[] |
no_license
|
mokshasoft/strvalidator
|
b0d171879144b7bedb694406df965b1cf02a739a
|
434ef63c3d2c48b0455b58ae7008e07ee3574c47
|
refs/heads/master
| 2020-04-08T08:03:51.903497
| 2018-09-12T21:39:23
| 2018-09-12T21:39:23
| 159,163,549
| 0
| 0
| null | 2018-11-26T12:08:29
| 2018-11-26T12:08:28
| null |
UTF-8
|
R
| false
| false
| 18,165
|
r
|
plotPeaks_gui.r
|
################################################################################
# TODO LIST
# TODO: ...
################################################################################
# CHANGE LOG (last 20 changes)
# 18.07.2017: Fixed "Warning: Ignoring unknown aesthetics: ymax".
# 13.07.2017: Fixed issue with button handlers.
# 13.07.2017: Fixed narrow dropdown with hidden argument ellipsize = "none".
# 07.07.2017: Replaced 'droplist' with 'gcombobox'.
# 07.07.2017: Removed argument 'border' for 'gbutton'.
# 29.04.2016: 'Save as' textbox expandable.
# 11.11.2015: Added importFrom ggplot2.
# 29.08.2015: Added importFrom.
# 11.10.2014: Added 'focus', added 'parent' parameter.
# 28.06.2014: Added help button and moved save gui checkbox.
# 08.05.2014: Implemented 'checkDataset'.
# 20.01.2014: Changed 'saveImage_gui' for 'ggsave_gui'.
# 12.01.2014: First version.
#' @title Plot Peaks
#'
#' @description
#' GUI simplifying the creation of plots from result type data.
#'
#' @details Plot result type data. It is possible to customize titles and font
#' size. Data can be plotted as as frequency or proportion. The values can be
#' printed on the plot with custom number of decimals. There are several
#' color palettes to chose from.
#' A name for the result is automatically suggested.
#' The resulting plot can be saved as either a plot object or as an image.
#' @param env environment in which to search for data frames and save result.
#' @param savegui logical indicating if GUI settings should be saved in the environment.
#' @param debug logical indicating printing debug information.
#' @param parent widget to get focus when finished.
#'
#' @return TRUE
#'
#' @export
#'
#' @importFrom plyr count
#' @importFrom utils help str
#' @importFrom ggplot2 ggplot aes_string theme_grey geom_bar scale_fill_brewer
#' labs geom_text theme
#'
plotPeaks_gui <- function(env=parent.frame(), savegui=NULL, debug=FALSE, parent=NULL){
# Global variables.
.gData <- NULL
.gDataName <- NULL
.gPlot <- NULL
.palette <- c("Set1","Set2","Set3","Accent","Dark2",
"Paired","Pastel1", "Pastel2")
# Qualitative palette, do not imply magnitude differences between legend
# classes, and hues are used to create the primary visual differences
# between classes. Qualitative schemes are best suited to representing
# nominal or categorical data.
if(debug){
print(paste("IN:", match.call()[[1]]))
}
# Main window.
w <- gwindow(title="Plot peaks", visible=FALSE)
# Runs when window is closed.
addHandlerDestroy(w, handler = function (h, ...) {
# Save GUI state.
.saveSettings()
# Focus on parent window.
if(!is.null(parent)){
focus(parent)
}
})
gv <- ggroup(horizontal=FALSE,
spacing=8,
use.scrollwindow=FALSE,
container = w,
expand=TRUE)
# Help button group.
gh <- ggroup(container = gv, expand=FALSE, fill="both")
savegui_chk <- gcheckbox(text="Save GUI settings", checked=FALSE, container=gh)
addSpring(gh)
help_btn <- gbutton(text="Help", container=gh)
addHandlerChanged(help_btn, handler = function(h, ...) {
# Open help page for function.
print(help("plotPeaks_gui", help_type="html"))
})
# FRAME 0 ###################################################################
f0 <- gframe(text = "Dataset",
horizontal=TRUE,
spacing = 5,
container = gv)
glabel(text="Select dataset:", container=f0)
dataset_drp <- gcombobox(items=c("<Select dataset>",
listObjects(env=env,
obj.class="data.frame")),
selected = 1,
editable = FALSE,
container = f0,
ellipsize = "none")
f0_samples_lbl <- glabel(text=" (0 samples)", container=f0)
addHandlerChanged(dataset_drp, handler = function (h, ...) {
val_obj <- svalue(dataset_drp)
# Check if suitable.
requiredCol <- c("Sample.Name", "Peaks", "Group", "Id")
ok <- checkDataset(name=val_obj, reqcol=requiredCol,
env=env, parent=w, debug=debug)
if(ok){
# Load or change components.
.gData <<- get(val_obj, envir=env)
.gDataName <<- val_obj
# Suggest name.
svalue(f5_save_edt) <- paste(val_obj, "_ggplot", sep="")
svalue(f0_samples_lbl) <- paste(" (",
length(unique(.gData$Id)),
" samples)", sep="")
# Enable buttons.
enabled(plot_btn) <- TRUE
} else {
# Reset components.
.gData <<- NULL
svalue(f5_save_edt) <- ""
svalue(dataset_drp, index=TRUE) <- 1
svalue(f0_samples_lbl) <- " (0 samples)"
}
} )
# FRAME 1 ###################################################################
f1 <- gframe(text = "Options",
horizontal=FALSE,
spacing = 10,
container = gv)
f1_titles_chk <- gcheckbox(text="Override automatic titles.",
checked=FALSE, container=f1)
addHandlerChanged(f1_titles_chk, handler = function(h, ...) {
val <- svalue(f1_titles_chk)
if(val){
enabled(grid1) <- TRUE
} else {
enabled(grid1) <- FALSE
}
} )
grid1 <- glayout(container = f1, spacing = 1)
enabled(grid1) <- svalue(f1_titles_chk)
grid1[1,1] <- glabel(text="Plot title:", container=grid1)
grid1[1,2] <- f1_title_edt <- gedit(text="",
width=40,
container=grid1)
grid1[2,1] <- glabel(text="X title:", container=grid1)
grid1[2,2] <- f1_xtitle_edt <- gedit(text="",
container=grid1)
grid1[3,1] <- glabel(text="Y title:", container=grid1)
grid1[3,2] <- f1_ytitle_edt <- gedit(text="",
container=grid1)
f1_prop_chk <- gcheckbox(text="Plot proportion",
checked=TRUE,
container=f1)
grid2 <- glayout(container = f1, spacing = 1)
grid2[1,1] <- glabel(text="Base font size (pts):", container=grid2)
grid2[1,2] <- f1_base_size_edt <- gedit(text="18", width=4, container=grid2)
grid3 <- glayout(container = f1, spacing = 1)
grid3[1,1] <- glabel(text="Color palette:", container=grid3)
grid3[1,2] <- f1_palette_drp <- gcombobox(items=.palette,
selected = 1,
editable = FALSE,
container = grid3,
ellipsize = "none")
grid4 <- glayout(container = f1, spacing = 1)
grid4[1,1] <- f1_print_chk <- gcheckbox(text="Print values as bar labels", checked=TRUE, container=grid4)
grid4[2,1] <- glabel(text="Number of decimals for bar labels:", container=grid4)
grid4[2,2] <- f1_decimal_spb <- gspinbutton(from=0, to=9, by=1, value=4,
container=grid4)
grid4[3,1] <- glabel(text="Font size for bar labels (pts):", container=grid4)
grid4[3,2] <- f1_lab_size_edt <- gedit(text="5", width=4, container=grid4)
# FRAME 7 ###################################################################
plot_btn <- gbutton(text="Plot", container=gv)
addHandlerChanged(plot_btn, handler = function(h, ...) {
enabled(plot_btn) <- FALSE
.plotBalance()
enabled(plot_btn) <- TRUE
} )
# FRAME 5 ###################################################################
f5 <- gframe(text = "Save as",
horizontal=TRUE,
spacing = 5,
container = gv)
glabel(text="Name for result:", container=f5)
f5_save_edt <- gedit(text="", container=f5, expand = TRUE)
f5_save_btn <- gbutton(text = "Save as object", container = f5)
f5_ggsave_btn <- gbutton(text = "Save as image", container = f5)
addHandlerClicked(f5_save_btn, handler = function(h, ...) {
val_name <- svalue(f5_save_edt)
# Change button.
blockHandlers(f5_save_btn)
svalue(f5_save_btn) <- "Processing..."
unblockHandlers(f5_save_btn)
enabled(f5_save_btn) <- FALSE
# Save data.
saveObject(name=val_name, object=.gPlot,
parent=w, env=env, debug=debug)
# Change button.
blockHandlers(f5_save_btn)
svalue(f5_save_btn) <- "Object saved"
unblockHandlers(f5_save_btn)
} )
addHandlerChanged(f5_ggsave_btn, handler = function(h, ...) {
val_name <- svalue(f5_save_edt)
# Save data.
ggsave_gui(ggplot=.gPlot, name=val_name,
parent=w, env=env, savegui=savegui, debug=debug)
} )
# FUNCTIONS #################################################################
.plotBalance <- function(){
# Get values.
val_titles <- svalue(f1_titles_chk)
val_title <- svalue(f1_title_edt)
val_x_title <- svalue(f1_xtitle_edt)
val_y_title <- svalue(f1_ytitle_edt)
val_base_size <- as.numeric(svalue(f1_base_size_edt))
val_lab_size <- as.numeric(svalue(f1_lab_size_edt))
val_palette <- svalue(f1_palette_drp)
val_decimals <- as.numeric(svalue(f1_decimal_spb))
val_print <- svalue(f1_print_chk)
val_prop <- svalue(f1_prop_chk)
if(debug){
print("val_titles")
print(val_titles)
print("val_title")
print(val_title)
print("val_x_title")
print(val_x_title)
print("val_y_title")
print(val_y_title)
print("val_base_size")
print(val_base_size)
print("val_lab_size")
print(val_lab_size)
print("val_palette")
print(val_palette)
print("val_decimals")
print(val_decimals)
print("val_print")
print(val_print)
print("val_prop")
print(val_prop)
print("str(.gData)")
print(str(.gData))
}
# Check if data.
if (!is.na(.gData) && !is.null(.gData)){
if(debug){
print("Before plot: str(.gData)")
print(str(.gData))
}
# Prepare data.
# Get one row from each sample for plotting.
.gData <- .gData[!duplicated(.gData[,'Id']),]
# Create titles.
if(val_titles){
mainTitle <- val_title
xTitle <- val_x_title
yTitle <- val_y_title
} else {
numberOfSamples <- nrow(.gData)
mainTitle <- paste("Analysis of peaks from",
numberOfSamples, "samples")
xTitle <- "Group"
if(val_prop){
yTitle <- "Proportion"
} else {
yTitle <- "Count"
}
}
# Count samples per group.
.gData <- plyr::count(.gData, vars="Group")
#Calculate frequencies.
if(val_prop){
.gData$freq <- .gData$freq / sum(.gData$freq)
}
.gData$lab <- round(.gData$freq, val_decimals)
# Create plot.
gp <- ggplot(.gData, aes_string(x = "Group", y="freq", fill = "Group"))
gp <- gp + theme_grey(base_size = val_base_size)
gp <- gp + geom_bar(stat = "identity", position = "stack")
# Add color.
gp <- gp + scale_fill_brewer(palette = val_palette) # NB! only 9 colors.
# Add titles.
gp <- gp + labs(title=mainTitle, x=xTitle, y=yTitle, fill=NULL)
# Print value labels on bars.
if(val_print){
gp <- gp + geom_text(aes_string(x="Group", y="freq", label="lab",
hjust=0.5, vjust=0), size=val_lab_size)
}
# Remove legend.
gp <- gp + theme(legend.position="none")
# plot.
print(gp)
# Store in global variable.
.gPlot <<- gp
# Change save button.
svalue(f5_save_btn) <- "Save as object"
enabled(f5_save_btn) <- TRUE
} else {
gmessage(msg="Data frame is NULL or NA!",
title="Error",
icon = "error")
}
}
# INTERNAL FUNCTIONS ########################################################
.loadSavedSettings <- function(){
# First check status of save flag.
if(!is.null(savegui)){
svalue(savegui_chk) <- savegui
enabled(savegui_chk) <- FALSE
if(debug){
print("Save GUI status set!")
}
} else {
# Load save flag.
if(exists(".strvalidator_plotPeaks_gui_savegui", envir=env, inherits = FALSE)){
svalue(savegui_chk) <- get(".strvalidator_plotPeaks_gui_savegui", envir=env)
}
if(debug){
print("Save GUI status loaded!")
}
}
if(debug){
print(svalue(savegui_chk))
}
# Then load settings if true.
if(svalue(savegui_chk)){
if(exists(".strvalidator_plotPeaks_gui_title", envir=env, inherits = FALSE)){
svalue(f1_title_edt) <- get(".strvalidator_plotPeaks_gui_title", envir=env)
}
if(exists(".strvalidator_plotPeaks_gui_title_chk", envir=env, inherits = FALSE)){
svalue(f1_titles_chk) <- get(".strvalidator_plotPeaks_gui_title_chk", envir=env)
}
if(exists(".strvalidator_plotPeaks_gui_x_title", envir=env, inherits = FALSE)){
svalue(f1_xtitle_edt) <- get(".strvalidator_plotPeaks_gui_x_title", envir=env)
}
if(exists(".strvalidator_plotPeaks_gui_y_title", envir=env, inherits = FALSE)){
svalue(f1_ytitle_edt) <- get(".strvalidator_plotPeaks_gui_y_title", envir=env)
}
if(exists(".strvalidator_plotPeaks_gui_base_size", envir=env, inherits = FALSE)){
svalue(f1_base_size_edt) <- get(".strvalidator_plotPeaks_gui_base_size", envir=env)
}
if(exists(".strvalidator_plotPeaks_gui_label_size", envir=env, inherits = FALSE)){
svalue(f1_lab_size_edt) <- get(".strvalidator_plotPeaks_gui_label_size", envir=env)
}
if(exists(".strvalidator_plotPeaks_gui_print", envir=env, inherits = FALSE)){
svalue(f1_print_chk) <- get(".strvalidator_plotPeaks_gui_print", envir=env)
}
if(exists(".strvalidator_plotPeaks_gui_print", envir=env, inherits = FALSE)){
svalue(f1_prop_chk) <- get(".strvalidator_plotPeaks_gui_prop", envir=env)
}
if(exists(".strvalidator_plotPeaks_gui_palette", envir=env, inherits = FALSE)){
svalue(f1_palette_drp) <- get(".strvalidator_plotPeaks_gui_palette", envir=env)
}
if(exists(".strvalidator_plotPeaks_gui_decimal", envir=env, inherits = FALSE)){
svalue(f1_decimal_spb) <- get(".strvalidator_plotPeaks_gui_decimal", envir=env)
}
if(debug){
print("Saved settings loaded!")
}
}
}
.saveSettings <- function(){
# Then save settings if true.
if(svalue(savegui_chk)){
assign(x=".strvalidator_plotPeaks_gui_savegui", value=svalue(savegui_chk), envir=env)
assign(x=".strvalidator_plotPeaks_gui_title_chk", value=svalue(f1_titles_chk), envir=env)
assign(x=".strvalidator_plotPeaks_gui_title", value=svalue(f1_title_edt), envir=env)
assign(x=".strvalidator_plotPeaks_gui_x_title", value=svalue(f1_xtitle_edt), envir=env)
assign(x=".strvalidator_plotPeaks_gui_y_title", value=svalue(f1_ytitle_edt), envir=env)
assign(x=".strvalidator_plotPeaks_gui_base_size", value=svalue(f1_base_size_edt), envir=env)
assign(x=".strvalidator_plotPeaks_gui_label_size", value=svalue(f1_lab_size_edt), envir=env)
assign(x=".strvalidator_plotPeaks_gui_print", value=svalue(f1_print_chk), envir=env)
assign(x=".strvalidator_plotPeaks_gui_prop", value=svalue(f1_prop_chk), envir=env)
assign(x=".strvalidator_plotPeaks_gui_palette", value=svalue(f1_palette_drp), envir=env)
assign(x=".strvalidator_plotPeaks_gui_decimal", value=svalue(f1_decimal_spb), envir=env)
} else { # or remove all saved values if false.
if(exists(".strvalidator_plotPeaks_gui_savegui", envir=env, inherits = FALSE)){
remove(".strvalidator_plotPeaks_gui_savegui", envir = env)
}
if(exists(".strvalidator_plotPeaks_gui_title_chk", envir=env, inherits = FALSE)){
remove(".strvalidator_plotPeaks_gui_title_chk", envir = env)
}
if(exists(".strvalidator_plotPeaks_gui_title", envir=env, inherits = FALSE)){
remove(".strvalidator_plotPeaks_gui_title", envir = env)
}
if(exists(".strvalidator_plotPeaks_gui_x_title", envir=env, inherits = FALSE)){
remove(".strvalidator_plotPeaks_gui_x_title", envir = env)
}
if(exists(".strvalidator_plotPeaks_gui_y_title", envir=env, inherits = FALSE)){
remove(".strvalidator_plotPeaks_gui_y_title", envir = env)
}
if(exists(".strvalidator_plotPeaks_gui_base_size", envir=env, inherits = FALSE)){
remove(".strvalidator_plotPeaks_gui_base_size", envir = env)
}
if(exists(".strvalidator_plotPeaks_gui_label_size", envir=env, inherits = FALSE)){
remove(".strvalidator_plotPeaks_gui_label_size", envir = env)
}
if(exists(".strvalidator_plotPeaks_gui_print", envir=env, inherits = FALSE)){
remove(".strvalidator_plotPeaks_gui_print", envir = env)
}
if(exists(".strvalidator_plotPeaks_gui_prop", envir=env, inherits = FALSE)){
remove(".strvalidator_plotPeaks_gui_prop", envir = env)
}
if(exists(".strvalidator_plotPeaks_gui_palette", envir=env, inherits = FALSE)){
remove(".strvalidator_plotPeaks_gui_palette", envir = env)
}
if(exists(".strvalidator_plotPeaks_gui_decimal", envir=env, inherits = FALSE)){
remove(".strvalidator_plotPeaks_gui_decimal", envir = env)
}
if(debug){
print("Settings cleared!")
}
}
if(debug){
print("Settings saved!")
}
}
# END GUI ###################################################################
# Load GUI settings.
.loadSavedSettings()
# Show GUI.
visible(w) <- TRUE
focus(w)
}
|
6454ab0fdb74eb353ac1e98a1ccfb1e98516481f
|
68016683149f16eedfe6cdcba6a6f65a015f2a46
|
/R/comments.R
|
a7c41eee69b6fa436befad136d173fa691239d79
|
[
"MIT"
] |
permissive
|
yonicd/basecamper
|
b1b1c328ae3ba5a051937ffa4b7287298b0b8f63
|
bc707a17f1ef629a381d9dddbd6de9e036cd58cd
|
refs/heads/master
| 2022-11-21T11:26:58.587160
| 2020-07-21T14:45:04
| 2020-07-21T14:45:04
| 265,820,935
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,997
|
r
|
comments.R
|
#' @importFrom glue glue
#' @importFrom httr GET authenticate stop_for_status headers
#' @rdname get_api
#' @export
create_comment <- function(
scope = c('posts', 'milestones', 'todo_items'),
id = NULL,
host = Sys.getenv('BASECAMP_HOST'),
token = Sys.getenv('BASECAMP_TOKEN')){
if(is.null(id)) stop(glue::glue('argument id must contain a {scope} id'))
res <- httr::GET(
glue::glue('{host}/{scope}/{id}/comments/new.xml'),
httr::authenticate(token, 'X')
)
httr::stop_for_status(res)
res_xml <- httr::content(res)
POST_URL <- glue::glue(
"{host}","{gsub('^POST ','',httr::headers(res)[['x-create-action']])}"
)
structure(res_xml, POST_URL = POST_URL)
}
#' @importFrom glue glue
#' @importFrom httr GET authenticate stop_for_status headers
#' @rdname get_api
#' @export
edit_comment <- function(
id,
host = Sys.getenv('BASECAMP_HOST'),
token = Sys.getenv('BASECAMP_TOKEN')){
if(is.null(id)) stop(glue::glue('argument id must contain a comment id'))
res <- httr::GET(
glue::glue('{host}/comments/{id}/edit.xml'),
httr::authenticate(token, 'X')
)
httr::stop_for_status(res)
res_xml <- httr::content(res)
POST_URL <- glue::glue(
"{host}","{gsub('^POST ','',httr::headers(res)[['x-update-action']])}"
)
structure(res_xml, POST_URL = POST_URL)
}
#' @importFrom xml2 xml_child xml_text<-
edit_attr <- function(object,field,val){
val <- switch(class(val),
'character' = val,
'logical' = tolower(as.character(val)),
'numeric' = as.character(val)
)
message_attr <- xml2::xml_child(object,glue::glue('.//{field}'))
xml2::xml_text(message_attr) <- val
object
}
#' @title Edit a Basecamp Post
#' @description Edit post object fields
#' @param object post object
#' @param value new value to replace with
#' @details
#' When editing the body basecamp is expecting HTML.
#'
#' To make it simpler to write you can write a simple markdown text and
#' then convert it to html using [markdownToHTML][markdown::markdownToHTML] :
#'
#' ```
#' html_text <- markdown::markdownToHTML(text = md_text,fragment.only = TRUE)
#' ```
#' x will contain the html equivalent of value you can pass into [edit_body][basecamper::edit_body]
#'
#' ```
#' new_message%>%
#' edit_body(html_text)
#' ```
#'
#' @return updated post object
#' @rdname edit_post
#' @export
edit_title <- function(object, value){
edit_attr(object,'title',value)
}
#' @export
#' @rdname edit_post
edit_body <- function(object, value){
edit_attr(object,'body',value)
}
#' @export
#' @rdname edit_post
edit_category_id <- function(object, value){
edit_attr(object,'category-id',value)
}
#' @export
#' @rdname edit_post
edit_milestone_id <- function(object, value){
edit_attr(object,'milestone-id',value)
}
#' @importFrom glue glue
#' @importFrom httr DELETE authenticate stop_for_status
#' @rdname get_api
#' @export
delete_comment <- function(
id = NULL,
host = Sys.getenv('BASECAMP_HOST'),
token = Sys.getenv('BASECAMP_TOKEN')
){
if(is.null(id)) stop(glue::glue('argument id must contain a comment id'))
res <- httr::DELETE(
glue::glue('{host}/comments/{id}.xml'),
httr::authenticate(token, 'X')
)
httr::stop_for_status(res)
res
}
#' @title Post a comment
#' @description Post a comment to a Basecamp thread
#' @param comment object created by [new_comment][basecamper::create_comment]
#' @param token character, Basecamp Classic API token , Default: Sys.getenv("BASECAMP_TOKEN")
#' @return [response][httr::response]
#' @rdname post_comment
#' @export
#' @importFrom httr POST authenticate content_type stop_for_status
post_comment <- function(comment,
token = Sys.getenv('BASECAMP_TOKEN')){
res <- httr::POST(
url = attr(comment,"POST_URL"),
body = as.character(comment),
httr::authenticate(token, 'X'),
httr::content_type("text/xml")
)
httr::stop_for_status(res)
res
}
|
bf255f01b103640fe0700cf3bf3f98a5e8c6bef7
|
08ac16cc8f66d4417296dffb9c5dee07334cec6d
|
/Src/LS_Evaluator.R
|
84725b6d9e3ce11c3350fff40292b0a8180897d0
|
[] |
no_license
|
iffylaw/ThawSlump
|
4ece12665f08ab9ca7480eb3f88a05aa3faafc08
|
ac27b247242dec7fca7510fa9c965777488f2db8
|
refs/heads/master
| 2016-09-10T19:26:22.300058
| 2014-03-18T08:28:21
| 2014-03-18T08:28:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 255,801
|
r
|
LS_Evaluator.R
|
######################################################################
######################################################################
#### ####
#### ####
#### LANDSLIDE SUSCEPTIBILITY EVALUATION ####
#### V4.0 - 17/07/2009 ####
#### IRPI CNR ####
#### MAURO ROSSI - IRPI CNR ####
#### ####
#### Istituto di Ricerca per la Protezione Idrogeologica ####
#### Consiglio Nazionale delle Ricerche ####
#### Gruppo di Geomorfologia ####
#### Via della Madonna Alta, 126 ####
#### 06128 Perugia (Italia) ####
#### +39 075 5014421 ####
#### +39 075 5014420 ####
#### mauro.rossi@irpi.cnr.it ####
#### geomorfologia@irpi.cnr.it ####
#### ####
#### This script was prepared using R 2.7.2 ####
#### The script requires the following R packages: ####
#### 1: MASS ####
#### 2: vcd ####
#### 3: verification ####
#### 4: perturb ####
#### 5: Zelig ####
#### 6: nnet ####
#### ####
#### INPUTS: 1) calibration.txt file (tab delimited) ####
#### 1st column -> identification value ####
#### 2nd column -> grouping variable ####
#### Other columns -> explanatory variables ####
#### ####
#### 2) validation.txt file (tab delimited) ####
#### 1st column -> identification value ####
#### 2nd column -> validation grouping variable ####
#### Other columns -> explanatory variables ####
#### ####
#### 3) configuration.txt file (tab delimited) ####
#### 1st column -> model name ####
#### 2nd column -> model selection (YES or NO) ####
#### 3rd column -> bootstrap sample size ####
#### 4th column -> analysis parameter ####
#### For QDA can be: ####
#### SEL (eliminate dummy variables) ####
#### DUM (maintain dummy variables ####
#### trasformed in numeric ####
#### introducing a random variation ####
#### between -0.1 and +0.1) ####
#### 5th column -> model variability estimation ####
#### using a bootstrap procedure ####
#### (YES or NO) ####
#### 6th column -> number of bootstrap samples for ####
#### the estimation of the model ####
#### variability (We selected only ####
#### 20 run for the neural network ####
#### analysis because it requires a ####
#### long calculation time) ####
#### ####
######################################################################
######################################################################
######################################################################
######################################################################
######################################################################
#######################----------------------#########################
##################---------------------------#########################
###############-------------##########################################
#############---------#######################-----####################
###########--------####---------------------------------##############
#########-------####---------------------------------------###########
########-------##--------###########################---------#########
#######-------##-------####------------------##########-------########
#######------##------###---------------------############------#######
######------##------##-----------------------#############------######
######------##------##------##############################------######
######------##------##------##############################------######
######------##------##------##############################------######
#######------##-----##------##############################------######
#######-------##----##------##############################------######
########-------##---##------##############################------######
#########-------###-##------##############################------######
###########--------####-----##############################------######
#############---------####################################------######
###############-----------------------------------------##------######
##################--------------------------------------##------######
#######################---------------------------------##------######
######################################################################
######################################################################
######################################################################
######################################################################
######################################################################
#### _ _ _ _ _ _ _ _ ####
#### / / / / / / / /\ / / / ####
#### / /_ _ / /_ _ / / _ _ / / \ / /_ _ / ####
#### / /\ / / / / \/ /\ ####
#### / / \ / / /_ _ / / / \ ####
#### ####
######################################################################
######################################################################
#--------------------------- SEED SETTING ---------------------------#
seed.value<-NULL
seed.value<-1 # Uncomment this line if you want to fix a seed. If this is the case multiple run of the script will give always the same result.
if (is.numeric(seed.value)){seed.value<-seed.value} else {seed.value<-round(runif(1,min=1,max=10000))}
#-------------------- READ THE CONFIGURATION FILE -------------------#
configuration.table<-read.table("configuration.txt",header = TRUE,dec=".", sep="\t")
model.type<-as.character(configuration.table$MODEL)
model.run.matrix<-as.character(configuration.table$RUN)
bootstrap.sample.values<-as.numeric(configuration.table$BOOTSTRAP_SAMPLES_ROC_CURVE)
analysis.parameter.matrix<-as.character(configuration.table$ANALYSIS_PARAMETER)
bootstrap.model.variability<-as.character(configuration.table$BOOTSTRAP_MODEL_VARIABILITY_RUN)
bootstrap.sample.model<-as.numeric(configuration.table$BOOTSTRAP_SAMPLES_MODEL_VARIABILITY)
#-------------------- READ THE CALIBRATION DATA ----------------------#
data.table<-read.table("calibration.txt",header = TRUE,dec=".", sep="\t")
names(data.table)
dim(data.table)
data.table<-na.omit(data.table)
dim(data.table)
explanatory.variables<-data.table[3:dim(data.table)[2]]
str(explanatory.variables)
range(explanatory.variables)
dim(explanatory.variables)
data.variables<-data.table[,2:dim(data.table)[2]]
#data.variables<-data.table[,2:58]
dim(data.variables)
grouping.variable<-as.factor(data.table[,2])
str(grouping.variable)
identification.value<-data.table[,1]
#str(identification.value)
## Histogram of posterior grouping variable
breaks.histogram.values<-c(0,0.2,0.45,0.55,0.8,1)
windows()
hist(data.table[,2], breaks=breaks.histogram.values,freq=TRUE, xlab="Susceptibility Class", ylab="Frequency", main="Histogram of grouping variable", col=c(rgb(38,115,0,max=255),rgb(233,255,190,max=255),rgb(255,255,0,max=255),rgb(255,128,0,max=255),rgb(255,0,0,max=255)))
pdf(file = "GroupingVariable_Histogram.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
hist(data.table[,2], breaks=breaks.histogram.values,freq=TRUE, xlab="Susceptibility Class", ylab="Frequency", main="Histogram of grouping variable", col=c(rgb(38,115,0,max=255),rgb(233,255,190,max=255),rgb(255,255,0,max=255),rgb(255,128,0,max=255),rgb(255,0,0,max=255)))
dev.off()
#--------------------- READ THE VALIDATION DATA ---------------------#
validation.table<-read.table("validation.txt",header = TRUE,dec=".", sep="\t")
dim(validation.table)
validation.table<-na.omit(validation.table)
dim(validation.table)
validation.explanatory.variables<-validation.table[3:dim(validation.table)[2]]
str(validation.explanatory.variables)
range(validation.explanatory.variables)
dim(validation.explanatory.variables)
validation.variables<-validation.table[,2:dim(validation.table)[2]]
dim(validation.variables)
validation.grouping.variable<-as.factor(validation.table[,2])
str(validation.grouping.variable)
validation.identification.value<-validation.table[,1]
#str(validation.identification.value)
## Histogram of validation grouping variable
windows()
hist(validation.table[,2], breaks=breaks.histogram.values,freq=TRUE, xlab="Susceptibility Class", ylab="Frequency", main="Histogram of validation grouping variable", col=c(rgb(38,115,0,max=255),rgb(233,255,190,max=255),rgb(255,255,0,max=255),rgb(255,128,0,max=255),rgb(255,0,0,max=255)))
pdf(file = "GroupingVariable_Histogram_Validation.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
hist(validation.table[,2], breaks=breaks.histogram.values,freq=TRUE, xlab="Susceptibility Class", ylab="Frequency", main="Histogram of validation grouping variable", col=c(rgb(38,115,0,max=255),rgb(233,255,190,max=255),rgb(255,255,0,max=255),rgb(255,128,0,max=255),rgb(255,0,0,max=255)))
dev.off()
#--------------------- COLLINEARITY EVALUATION ---------------------#
# Colldiag is an implementation of the regression collinearity diagnostic procedures found in
# Belsley, Kuh, and Welsch (1980). These procedures examine the ?onditioning?of the matrix
# of independent variables.
# Colldiag computes the condition indexes of the matrix. If the largest condition index
# (the condition number) is large (Belsley et al suggest 30 or higher), then there may be
# collinearity problems. All large condition indexes may be worth investigating.
# Colldiag also provides further information that may help to identify the source of these problems,
# the variance decomposition proportions associated with each condition index. If a large condition
# index is associated two or more variables with large variance decomposition proportions, these
# variables may be causing collinearity problems. Belsley et al suggest that a large proportion is 50
# percent or more.
#load collinearity package (perturb)
library(perturb)
colnames(explanatory.variables)
collinearity.test<-colldiag(explanatory.variables)
#collinearity.test$condindx
#collinearity.test$pi
range(collinearity.test$condindx)
if(range(collinearity.test$condindx)[2] >= 30) {
collinearity.value<-"Some explanatory variables are collinear"
} else {
collinearity.value<-"Explanatory variables are not collinear"
}
print(collinearity.test,fuzz=.5)
collinearity.evaluation.matrix<-print(collinearity.test,fuzz=.5)
write.table("COLLINEARITY ANALYSIS RESULT",file="result_Collinearity_Analysis.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_Collinearity_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("EXPLANATION",file="result_Collinearity_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("This analysis was performed with Colldiag an implementation of the regression collinearity diagnostic procedures found in Belsley, Kuh,
and Welsch (1980). These procedures examine the ?onditioning?of the matrix of independent variables. The procedure computes the condition
indexes of the matrix. If the largest condition index (the condition number) is large (Belsley et al suggest 30 or higher), then there may be
collinearity problems. All large condition indexes may be worth investigating. The procedure also provides further information that may help to
identify the source of these problems, the variance decomposition proportions associated with each condition index. If a large condition
index (> 30) is associated with two or more variables with large variance decomposition proportions, these variables may be causing collinearity problems.
Belsley et al suggest that a large proportion is 50 percent or more.",file="result_Collinearity_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t",
row.names=FALSE, col.names=FALSE)
write.table("",file="result_Collinearity_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("RESULTS",file="result_Collinearity_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(paste("Largest condition index (the condition number) =",range(collinearity.test$condindx)[2]),file="result_Collinearity_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_Collinearity_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(collinearity.value,file="result_Collinearity_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_Collinearity_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Matrix of the variance decomposition proportions associated with each condition index (1st column)",file="result_Collinearity_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(colnames(collinearity.evaluation.matrix),collinearity.evaluation.matrix),file="result_Collinearity_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
#------------------- LINEAR DISCRIMINANT ANALISYS -------------------#
#load package (MASS)
library(MASS)
##### Linear Discriminant Analisys Run
if(model.run.matrix[1] == "YES")
{
#if(class(result.lda[1]) == "NULL") # Other Error selection criteria. In this case the IF istruction must be put at the end of the script
if (class(try(lda(explanatory.variables, grouping.variable, tol=0.001, method="moment")))=="try-error")
{
lda(explanatory.variables, grouping.variable, tol=0.001, method="moment")
write.table("Linear Discriminant Analysis was not completed",file="Error_LDA_Analysis.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="Error_LDA_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Error LOG",file="Error_LDA_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(cbind("Message",rev(1:length(as.vector(.Traceback)))," ->",as.vector(.Traceback)),file="Error_LDA_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
}
##### Linear Discriminant Analisys using data.frame
result.lda<-NULL
result.lda<-lda(explanatory.variables, grouping.variable, tol=0.001, method="moment")
# Result Predicted
predict.result.lda<-predict(result.lda)
str(predict.result.lda)
### predict.result.lda$class is obtained rounding the posterior probability associated to 1 (predict.result.lda$posterior[,2])
length(predict.result.lda$class[predict.result.lda$class==0])
length(data.table[,2][data.table[,2]==0])
cross.classification.lda<-table(grouping.variable,predict.result.lda$class,dnn=c("Observed","Predicted"))
rownames(cross.classification.lda)<-list("No Landslide","Landslide") # Observed
colnames(cross.classification.lda)<-list("No Landslide","Landslide") # Predicted
str(cross.classification.lda)
# Assignation of a matching code between observed and predicted values
result.lda.matching.code<-paste(grouping.variable,as.numeric(levels(predict.result.lda$class))[predict.result.lda$class],sep="")
result.lda.matching.code<-gsub("00","1",result.lda.matching.code)
result.lda.matching.code<-gsub("01","2",result.lda.matching.code)
result.lda.matching.code<-gsub("10","3",result.lda.matching.code)
result.lda.matching.code<-gsub("11","4",result.lda.matching.code)
result.lda.matching.code<-as.numeric(result.lda.matching.code)
#Elaboration of Coefficient of association for contingency table
#load package (vcd)
library(vcd)
#help(package=vcd)
contingency.table.lda<-table2d_summary(cross.classification.lda)
test.table.lda<-assocstats(cross.classification.lda)
#co_table(cross.classification.lda, margin=1)
#mar_table(cross.classification.lda)
#structable(cross.classification.lda)
#Different plots for contingency table
windows()
fourfold(cross.classification.lda, std="margin", main="LINEAR DISCRIMINANT ANALYSIS MODEL", extended=TRUE, space = 0.2, margin=1, color = c(rgb(255,0,0,max=255), rgb(255,128,0,max=255), rgb(56,168,0,max=255), rgb(170,255,0,max=255), rgb(150,220,255,max=255), rgb(0,0,128,max=255)))
#Receiver Operating Characteristic (ROC) plots for one or more models.
#A ROC curve plots the false alarm rate against the hit rate
#for a probablistic forecast for a range of thresholds.
#load package (verification)
library(verification)
#verify function
#Based on the type of inputs, this function calculates a range of verification statistics and skill scores.
#Additionally, it creates a verify class object that can be further analyzed.
##### ROC PLOT OBS - BINARY PREDICTION
#windows()
#roc.plot(as.numeric(data.table$FRAXD2[-result.lda$na.action]),as.numeric(predict.result.lda$class),main = "ROC PLOT: LINEAR DISCRIMINANT ANALYSIS MODEL - BINARY PREDICTED", binormal = TRUE, plot = "both")
##### ROC PLOT OBS - POSTERIOR PROBABILITY ASSOCIATED TO 1
## 1st method
#windows()
#roc.plot(data.table[,2],predict.result.lda$posterior[,2],main = "ROC PLOT: LINEAR DISCRIMINANT ANALYSIS MODEL", binormal = TRUE, plot = "both")
# 2nd method using verify function
verification.results.lda<-verify(data.table[,2],predict.result.lda$posterior[,2], frcst.type="prob", obs.type="binary")
summary(verification.results.lda)
#str(verification.results.lda)
#windows()
#roc.plot(verification.results.lda, main = "ROC PLOT: LINEAR DISCRIMINANT ANALYSIS MODEL", binormal = TRUE, plot = "both", extra=TRUE, legend=TRUE)
area.under.roc.curve.lda<-roc.area(data.table[,2],predict.result.lda$posterior[,2])
## showing confidence intervals. MAY BE SLOW
windows()
roc.plot(verification.results.lda, main = "ROC PLOT: LINEAR DISCRIMINANT ANALYSIS MODEL", binormal=TRUE, plot="both", CI=TRUE, n.boot=bootstrap.sample.values[1] , alpha = 0.05, extra=TRUE, legend=TRUE)
mtext(paste("ROC area = ",round(area.under.roc.curve.lda$A,2),"; Sample size = ",area.under.roc.curve.lda$n.total,"; Bootstrap samples = ",bootstrap.sample.values[1], sep=""), side=3, col="red", cex=0.8)
## Histogram of posterior probability
windows()
hist(predict.result.lda$posterior[,2], breaks=breaks.histogram.values,freq=TRUE, xlab="Susceptibility Class", ylab="Frequency", main="Histogram of Linear Disciminant Analysis susceptibility", col=c(rgb(38,115,0,max=255),rgb(233,255,190,max=255),rgb(255,255,0,max=255),rgb(255,128,0,max=255),rgb(255,0,0,max=255)))
pdf(file = "result_LDA_Histogram.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
hist(predict.result.lda$posterior[,2], breaks=breaks.histogram.values,freq=TRUE, xlab="Susceptibility Class", ylab="Frequency", main="Histogram of Linear Disciminant Analysis susceptibility", col=c(rgb(38,115,0,max=255),rgb(233,255,190,max=255),rgb(255,255,0,max=255),rgb(255,128,0,max=255),rgb(255,0,0,max=255)))
dev.off()
# EXPORT OF PLOT FOR LDA MODEL
pdf(file = "result_LDA_FourfoldPlot.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
fourfold(cross.classification.lda, std="margin", main="LINEAR DISCRIMINANT ANALYSIS MODEL", extended=TRUE, space = 0.2, margin=1, color = c(rgb(255,0,0,max=255), rgb(255,128,0,max=255), rgb(56,168,0,max=255), rgb(170,255,0,max=255), rgb(150,220,255,max=255), rgb(0,0,128,max=255)))
dev.off()
#pdf(file = "result_LDA_ROCPlot.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
#roc.plot(verification.results.lda, main = "ROC PLOT: LINEAR DISCRIMINANT ANALYSIS MODEL", binormal = TRUE, plot = "both", extra=TRUE, legend=TRUE)
#dev.off()
pdf(file = "result_LDA_ROCPlot_bootstrap.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
roc.plot(verification.results.lda, main = "ROC PLOT: LINEAR DISCRIMINANT ANALYSIS MODEL", binormal=TRUE, plot="both", CI=TRUE, n.boot=bootstrap.sample.values[1] , alpha = 0.05, extra=TRUE, legend=TRUE)
mtext(paste("ROC area = ",round(area.under.roc.curve.lda$A,2),"; Sample size = ",area.under.roc.curve.lda$n.total,"; Bootstrap samples = ",bootstrap.sample.values[1], sep=""), side=3, col="red", cex=0.8)
dev.off()
## BOOTSTRAP PROCEDURE FOR THE ESTIMATION OF MODEL PREDICTION VARIABILITY
if(bootstrap.model.variability[1] == "YES")
{
bootstrap.sample.model.lda<-bootstrap.sample.model[1]
matrix.bootstrap.model.lda<-matrix(data=NA, nrow=dim(data.table)[1], ncol=(bootstrap.sample.model.lda*3)+1)
colnames(matrix.bootstrap.model.lda)<-rep("na",(bootstrap.sample.model.lda*3)+1)
matrix.bootstrap.model.lda[,1]<-identification.value
colnames(matrix.bootstrap.model.lda)[1]<-"ID"
name.sel.run<-paste(rep("ID_Selection_Run",bootstrap.sample.model.lda),1:bootstrap.sample.model.lda,sep="_")
colnames(matrix.bootstrap.model.lda)[seq(2,(bootstrap.sample.model.lda*3)-1,3)]<-name.sel.run
name.prob.run<-paste(rep("Probability_Run",bootstrap.sample.model.lda),1:bootstrap.sample.model.lda,sep="_")
colnames(matrix.bootstrap.model.lda)[seq(3,(bootstrap.sample.model.lda*3),3)]<-name.prob.run
name.pred.run<-paste(rep("Prediction_Run",bootstrap.sample.model.lda),1:bootstrap.sample.model.lda,sep="_")
colnames(matrix.bootstrap.model.lda)[seq(4,(bootstrap.sample.model.lda*3)+1,3)]<-name.pred.run
selection.index<-NULL
library(MASS)
#Bootstrap procedure
for (count.boot in 1:bootstrap.sample.model.lda)
{
selection.index<-sample(1:dim(data.table)[1], replace=TRUE, prob=NULL)
matrix.bootstrap.model.lda[as.numeric(names(table(selection.index))),(count.boot*3)-1]<-table(selection.index)
explanatory.variables.bootstrap.model.lda<-data.table[selection.index,3:dim(data.table)[2]]
grouping.variable.bootstrap.model.lda<-as.factor(data.table[selection.index,2])
result.bootstrap.model.lda<-lda(explanatory.variables.bootstrap.model.lda, grouping.variable.bootstrap.model.lda, tol=0.001, method="moment")
matrix.bootstrap.model.lda[as.numeric(names(table(selection.index))),(count.boot*3)]<-predict(result.bootstrap.model.lda,newdata=explanatory.variables[as.numeric(names(table(selection.index))),])$posterior[,2]
matrix.bootstrap.model.lda[,(count.boot*3)+1]<-predict(result.bootstrap.model.lda,newdata=explanatory.variables)$posterior[,2]
}
# Export of bootstrap sample
write.table(matrix.bootstrap.model.lda,file="result_LDA_BootstrapSamples.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=TRUE)
ID.bootstrap.model.lda.count<-numeric(length=dim(data.table)[1])
#Probability (selected values)
bootstrap.model.lda.probability.mean<-numeric(length=dim(data.table)[1])
bootstrap.model.lda.probability.sd<-numeric(length=dim(data.table)[1])
bootstrap.model.lda.probability.min<-numeric(length=dim(data.table)[1])
bootstrap.model.lda.probability.max<-numeric(length=dim(data.table)[1])
bootstrap.model.lda.probability.sderror<-numeric(length=dim(data.table)[1])
bootstrap.model.lda.probability.quantiles<-matrix(nrow=dim(data.table)[1],ncol=7)
#Prediction (all values)
bootstrap.model.lda.prediction.mean<-numeric(length=dim(data.table)[1])
bootstrap.model.lda.prediction.sd<-numeric(length=dim(data.table)[1])
bootstrap.model.lda.prediction.min<-numeric(length=dim(data.table)[1])
bootstrap.model.lda.prediction.max<-numeric(length=dim(data.table)[1])
bootstrap.model.lda.prediction.sderror<-numeric(length=dim(data.table)[1])
bootstrap.model.lda.prediction.quantiles<-matrix(nrow=dim(data.table)[1],ncol=7)
for (count.row.variability in 1:dim(data.table)[1])
{
# Statistics on boostrapped probability
ID.bootstrap.model.lda.count[count.row.variability]<-length(na.omit(matrix.bootstrap.model.lda[count.row.variability,seq(2,(bootstrap.sample.model.lda*3)-1,3)]))
bootstrap.model.lda.probability.mean[count.row.variability]<-mean(na.omit(matrix.bootstrap.model.lda[count.row.variability,seq(3,(bootstrap.sample.model.lda*3),3)]))
bootstrap.model.lda.probability.sd[count.row.variability]<-sd(na.omit(matrix.bootstrap.model.lda[count.row.variability,seq(3,(bootstrap.sample.model.lda*3),3)]))
bootstrap.model.lda.probability.min[count.row.variability]<-min(na.omit(matrix.bootstrap.model.lda[count.row.variability,seq(3,(bootstrap.sample.model.lda*3),3)]))
bootstrap.model.lda.probability.max[count.row.variability]<-max(na.omit(matrix.bootstrap.model.lda[count.row.variability,seq(3,(bootstrap.sample.model.lda*3),3)]))
bootstrap.model.lda.probability.sderror[count.row.variability]<-bootstrap.model.lda.probability.sd[count.row.variability]/ID.bootstrap.model.lda.count[count.row.variability]
bootstrap.model.lda.probability.quantiles[count.row.variability,]<-quantile(na.omit(matrix.bootstrap.model.lda[count.row.variability,seq(3,(bootstrap.sample.model.lda*3),3)]),probs=c(0,0.05,0.25,0.5,0.75,0.95,1))
# Statistics on boostrapped prediction
bootstrap.model.lda.prediction.mean[count.row.variability]<-mean(matrix.bootstrap.model.lda[count.row.variability,seq(4,(bootstrap.sample.model.lda*3)+1,3)])
bootstrap.model.lda.prediction.sd[count.row.variability]<-sd(matrix.bootstrap.model.lda[count.row.variability,seq(4,(bootstrap.sample.model.lda*3)+1,3)])
bootstrap.model.lda.prediction.min[count.row.variability]<-min(matrix.bootstrap.model.lda[count.row.variability,seq(4,(bootstrap.sample.model.lda*3)+1,3)])
bootstrap.model.lda.prediction.max[count.row.variability]<-max(matrix.bootstrap.model.lda[count.row.variability,seq(4,(bootstrap.sample.model.lda*3)+1,3)])
bootstrap.model.lda.prediction.sderror[count.row.variability]<-bootstrap.model.lda.prediction.sd[count.row.variability]/bootstrap.sample.model.lda
bootstrap.model.lda.prediction.quantiles[count.row.variability,]<-quantile(matrix.bootstrap.model.lda[count.row.variability,seq(4,(bootstrap.sample.model.lda*3)+1,3)],probs=c(0,0.05,0.25,0.5,0.75,0.95,1))
}
# Export of bootstrap sample statistics
write.table(cbind("ID","LDA_NumberSelectedSamples","LDA_Probability_Mean","LDA_Probability_Sd","LDA_Probability_Min","LDA_Probability_Max","LDA_Probability_Sderror","LDA_Probability_Quantiles_0","LDA_Probability_Quantiles_0.05","LDA_Probability_Quantiles_0.25","LDA_Probability_Quantiles_0.5","LDA_Probability_Quantiles_0.75","LDA_Probability_Quantiles_0.95","LDA_Probability_Quantiles_1","LDA_Prediction_Mean","LDA_Prediction_Sd","LDA_Prediction_Min","LDA_Prediction_Max","LDA_Prediction_Sderror","LDA_Prediction_Quantiles_0","LDA_Prediction_Quantiles_0.05","LDA_Prediction_Quantiles_0.25","LDA_Prediction_Quantiles_0.5","LDA_Prediction_Quantiles_0.75","LDA_Prediction_Quantiles_0.95","LDA_Prediction_Quantiles_1"),file="result_LDA_BootstrapStatistics.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(cbind(identification.value,ID.bootstrap.model.lda.count,bootstrap.model.lda.probability.mean,bootstrap.model.lda.probability.sd,bootstrap.model.lda.probability.min,bootstrap.model.lda.probability.max,bootstrap.model.lda.probability.sderror,bootstrap.model.lda.probability.quantiles,bootstrap.model.lda.prediction.mean,bootstrap.model.lda.prediction.sd,bootstrap.model.lda.prediction.min,bootstrap.model.lda.prediction.max,bootstrap.model.lda.prediction.sderror,bootstrap.model.lda.prediction.quantiles),file="result_LDA_BootstrapStatistics.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
windows()
plot(bootstrap.model.lda.probability.mean,bootstrap.model.lda.prediction.mean,xlab="Probability mean",ylab="Prediction mean", type="p",main="LDA BOOTSTRAP: Mean Probability vs Mean Prediction")
abline(a=0,b=1,col="red",lty=1,lwd=1)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.lda,sep=""),side=3, padj=-0.5, adj=0.5, col="red",cex=0.8)
pdf(file = "result_LDA_BootstrapMeansComparison.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(bootstrap.model.lda.probability.mean,bootstrap.model.lda.prediction.mean,xlab="Probability mean",ylab="Prediction mean", type="p",main="LDA BOOTSTRAP: Mean Probability vs Mean Prediction")
abline(a=0,b=1,col="red",lty=1,lwd=1)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.lda,sep=""),side=3, padj=-0.5, adj=0.5, col="red",cex=0.8)
dev.off()
#windows()
#double.sd.histogram.variability<-hist(bootstrap.model.lda.probability.sd*2,breaks=seq(0,1,0.05),labels=TRUE)
#plot(double.sd.histogram.variability$counts, seq(0,0.95,0.05), type="S",ylim=c(0,1),labels=TRUE)
# BOOTSTRAPPED PROBABILITY - Fit parabola 3 parameter y = ax^2 + bx + c
parabola.probability.lda<-cbind(bootstrap.model.lda.probability.mean,2*bootstrap.model.lda.probability.sd)
parabola.probability.lda<-na.omit(parabola.probability.lda[order(parabola.probability.lda[,1]),])
colnames(parabola.probability.lda)<-c("abscissa","ordinate")
#If y has to be 0 in x=0 and x=1, this means that c=0 and a+b=0, so in our case since a<0, a has to be equal to -b
fit.parabola.probability.lda <- nls(parabola.probability.lda[,"ordinate"] ~ coeff.a*(parabola.probability.lda[,"abscissa"]^2) + (-1)*coeff.a*parabola.probability.lda[,"abscissa"], start = c("coeff.a"=-1))
value.parabola.probability.lda<-predict(fit.parabola.probability.lda)
#coef(fit.parabola.probability.lda)
windows()
plot(parabola.probability.lda[,"abscissa"],parabola.probability.lda[,"ordinate"],xlim=c(0,1),ylim=c(0,1),xlab="Bootstrapped probability mean",ylab="2 Standard Deviations", type="p",main="LDA Model Probability Variability (Bootstrap)")
lines(parabola.probability.lda[,"abscissa"],value.parabola.probability.lda,col="red",lwd=1.5)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.lda,sep=""),side=3, padj=-0.5, adj=0.5, col="blue",cex=1)
espr <- expression(Y == coeff.a %*% X ^2 + coeff.b %*% X)
list.espr.subs <- list(coeff.a = round(coef(fit.parabola.probability.lda),3),coeff.b= -round(coef(fit.parabola.probability.lda),3))
as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]])
mtext(as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]]),side=1, padj=-1.5, adj=0.5,col="red",cex=1)
pdf(file = "result_LDA_BootstrapProbabilityVariability.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(parabola.probability.lda[,"abscissa"],parabola.probability.lda[,"ordinate"],xlim=c(0,1),ylim=c(0,1),xlab="Bootstrapped probability mean",ylab="2 Standard Deviations", type="p",main="LDA Model Probability Variability (Bootstrap)")
lines(parabola.probability.lda[,"abscissa"],value.parabola.probability.lda,col="red",lwd=1.5)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.lda,sep=""),side=3, padj=-0.5, adj=0.5, col="blue",cex=1)
espr <- expression(Y == coeff.a %*% X ^2 + coeff.b %*% X)
list.espr.subs <- list(coeff.a = round(coef(fit.parabola.probability.lda),3),coeff.b= -round(coef(fit.parabola.probability.lda),3))
as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]])
mtext(as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]]),side=1, padj=-1.5, adj=0.5,col="red",cex=1)
dev.off()
# BOOTSTRAPPED PREDICTION - Fit parabola 3 parameter y = ax^2 + bx + c
parabola.prediction.lda<-cbind(bootstrap.model.lda.prediction.mean,2*bootstrap.model.lda.prediction.sd)
parabola.prediction.lda<-parabola.prediction.lda[order(parabola.prediction.lda[,1]),]
colnames(parabola.prediction.lda)<-c("abscissa","ordinate")
#If y has to be 0 in x=0 and x=1, this means that c=0 and a+b=0, so in our case since a<0, a has to be equal to -b
fit.parabola.prediction.lda <- nls(parabola.prediction.lda[,"ordinate"] ~ coeff.a*(parabola.prediction.lda[,"abscissa"]^2) + (-1)*coeff.a*parabola.prediction.lda[,"abscissa"], start = c("coeff.a"=-1))
value.parabola.prediction.lda<-predict(fit.parabola.prediction.lda)
#coef(fit.parabola.prediction.lda)
windows()
plot(parabola.prediction.lda[,"abscissa"],parabola.prediction.lda[,"ordinate"],xlim=c(0,1),ylim=c(0,1),xlab="Bootstrapped prediction mean",ylab="2 Standard Deviations", type="p",main="LDA Model Prediction Variability (Bootstrap)")
lines(parabola.prediction.lda[,"abscissa"],value.parabola.prediction.lda,col="red",lwd=1.5)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.lda,sep=""),side=3, padj=-0.5, adj=0.5, col="blue",cex=1)
espr <- expression(Y == coeff.a %*% X ^2 + coeff.b %*% X)
list.espr.subs <- list(coeff.a = round(coef(fit.parabola.prediction.lda),3),coeff.b= -round(coef(fit.parabola.prediction.lda),3))
as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]])
mtext(as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]]),side=1, padj=-1.5, adj=0.5,col="red",cex=1)
pdf(file = "result_LDA_BootstrapPredictionVariability.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(parabola.prediction.lda[,"abscissa"],parabola.prediction.lda[,"ordinate"],xlim=c(0,1),ylim=c(0,1),xlab="Bootstrapped prediction mean",ylab="2 Standard Deviations", type="p",main="LDA Model Prediction Variability (Bootstrap)")
lines(parabola.prediction.lda[,"abscissa"],value.parabola.prediction.lda,col="red",lwd=1.5)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.lda,sep=""),side=3, padj=-0.5, adj=0.5, col="blue",cex=1)
espr <- expression(Y == coeff.a %*% X ^2 + coeff.b %*% X)
list.espr.subs <- list(coeff.a = round(coef(fit.parabola.prediction.lda),3),coeff.b= -round(coef(fit.parabola.prediction.lda),3))
as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]])
mtext(as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]]),side=1, padj=-1.5, adj=0.5,col="red",cex=1)
dev.off()
}
## Sensitivity, Specificity, Cohens kappa plot
roc.plot.lda.series<-roc.plot(verification.results.lda,binormal=TRUE)
#str(roc.plot.lda.series)
#roc.plot.lda.series$plot.data
#str(roc.plot.lda.series$plot.data)
contingency.table.matrix.lda<-matrix(nrow=dim(roc.plot.lda.series$plot.data)[1],ncol=8)
colnames(contingency.table.matrix.lda)<-c("Threshold","TP","TN","FP","FN","TPR","FPR","COHEN_KAPPA")
contingency.table.matrix.lda[,1]<-roc.plot.lda.series$plot.data[,1,1]
contingency.table.matrix.lda[,6]<-roc.plot.lda.series$plot.data[,2,1]
contingency.table.matrix.lda[,7]<-roc.plot.lda.series$plot.data[,3,1]
values.odserved<-data.table[,2]
values.predicted<-predict.result.lda$posterior[,2]
for (threshold.series in 1:dim(roc.plot.lda.series$plot.data)[1])
{
value.threshold<-contingency.table.matrix.lda[threshold.series,1]
values.probability.reclassified<-NULL
values.probability.reclassified<-numeric(length=length(values.odserved))
for (length.observed.series in 1:length(values.odserved))
{
if (values.predicted[length.observed.series] > value.threshold)
{
values.probability.reclassified[length.observed.series]<-1
} else
{
values.probability.reclassified[length.observed.series]<-0
}
}
#sum(values.probability.reclassified-round(values.predicted)) # Check sum: It has to be 0 if threshold is equal to 1
series.pasted<-paste(values.odserved,values.probability.reclassified,sep="")
series.pasted<-gsub("00","1",series.pasted)
series.pasted<-gsub("01","2",series.pasted)
series.pasted<-gsub("10","3",series.pasted)
series.pasted<-gsub("11","4",series.pasted)
series.pasted<-as.numeric(series.pasted)
TP<-length(series.pasted[series.pasted>=4]) # True Positive
FN<-length(series.pasted[series.pasted>=3 & series.pasted<4]) # False Negative
FP<-length(series.pasted[series.pasted>=2 & series.pasted<3]) # False Positive
TN<-length(series.pasted[series.pasted>=1 & series.pasted<2]) # True Negative
#TPR<-TP/(TP+FN) # Hit Rate or True Positive Rate or Sensitivity - Assigned before the for cicle using rocplot data
#FPR<-FP/(FP+TN) # False Alarm Rate or False Positive Rate or 1-Specificity
# Cohen's Kappa = (agreement-chance)/(1-chance) where agreement=(TP+TN)/(TP+TN+FP+FN) and chance=((((TN+FN)*(TN+FP))/(TP+TN+FP+FN))+(((TP+FP)*(TP+FN))/(TP+TN+FP+FN)))/(TP+TN+FP+FN)
agreement=(TP+TN)/(TP+TN+FP+FN)
chance=((((TN+FN)*(TN+FP))/(TP+TN+FP+FN))+(((TP+FP)*(TP+FN))/(TP+TN+FP+FN)))/(TP+TN+FP+FN)
cohen.kappa.value<-(agreement-chance)/(1-chance)
#Other
#library(vcd)
#cohen.kappa.value<-Kappa(cross.classification.table)
contingency.table.matrix.lda[threshold.series,2]<-TP
contingency.table.matrix.lda[threshold.series,3]<-TN
contingency.table.matrix.lda[threshold.series,4]<-FP
contingency.table.matrix.lda[threshold.series,5]<-FN
contingency.table.matrix.lda[threshold.series,8]<-cohen.kappa.value
}
windows()
plot(roc.plot.lda.series$plot.data[,1,1],roc.plot.lda.series$plot.data[,2,1],type="p",pch=1,cex=0.6,col="red",xlim=c(0,1),ylim=c(0,1),xlab="Probability threshold",ylab="Evaluation parameter", main="LDA MODEL EVALUATION PLOT")
points(roc.plot.lda.series$plot.data[,1,1],1-roc.plot.lda.series$plot.data[,3,1],col="dark green",pch=1,cex=0.6)
points(roc.plot.lda.series$plot.data[,1,1], contingency.table.matrix.lda[,8],col="blue",pch=1,cex=0.6)
mtext("SENSITIVITY",side=3, padj=-0.5, adj=0.01, col="red",cex=0.8)
mtext("COHEN'S KAPPA",side=3, padj=-0.5, adj=0.5, col="blue",cex=0.8)
mtext("SPECIFICITY",side=3, padj=-0.5, adj=0.99, col="dark green",cex=0.8)
pdf(file = "result_LDA_ModelEvaluationPlot_prova.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(roc.plot.lda.series$plot.data[,1,1],roc.plot.lda.series$plot.data[,2,1],type="p",pch=1,cex=0.6,col="red",xlim=c(0,1),ylim=c(0,1),xlab="Probability threshold",ylab="Evaluation parameter", main="LDA MODEL EVALUATION PLOT")
points(roc.plot.lda.series$plot.data[,1,1],1-roc.plot.lda.series$plot.data[,3,1],col="dark green",pch=1,cex=0.6)
points(roc.plot.lda.series$plot.data[,1,1], contingency.table.matrix.lda[,8],col="blue",pch=1,cex=0.6)
mtext("SENSITIVITY",side=3, padj=-0.5, adj=0.01, col="red",cex=0.8)
mtext("COHEN'S KAPPA",side=3, padj=-0.5, adj=0.5, col="blue",cex=0.8)
mtext("SPECIFICITY",side=3, padj=-0.5, adj=0.99, col="dark green",cex=0.8)
dev.off()
## VALIDATION OF LDA MODEL (Matching LDA posterior probability results and validation grouping variable)
cross.classification.temporal.validation.lda<-table(validation.grouping.variable,predict.result.lda$class,dnn=c("Observed","Predicted"))
rownames(cross.classification.temporal.validation.lda)<-list("No Landslide","Landslide") #Observed
colnames(cross.classification.temporal.validation.lda)<-list("No Landslide","Landslide") #Predicted
#str(cross.classification.temporal.validation.lda)
#Elaboration of Coefficient of association for contingency table
#load package (vcd)
library(vcd)
#help(package=vcd)
contingency.table.temporal.validation.lda<-table2d_summary(cross.classification.temporal.validation.lda)
test.table.temporal.validation.lda<-assocstats(cross.classification.temporal.validation.lda)
#Different plots for contingency table
windows()
fourfold(cross.classification.temporal.validation.lda, std="margin", main="TEMPORAL VALIDATION LDA MODEL", extended=TRUE, space = 0.2, margin=1, color = c(rgb(255,0,0,max=255), rgb(255,128,0,max=255), rgb(56,168,0,max=255), rgb(170,255,0,max=255), rgb(170,135,210,max=255), rgb(115,70,155,max=255)))
#Receiver Operating Characteristic (ROC) plots for one or more models.
#load package (verification)
library(verification)
# 2nd method using verify function
verification.temporal.validation.lda<-verify(validation.table[,2],predict.result.lda$posterior[,2], frcst.type="prob", obs.type="binary")
#summary(verification.temporal.validation.lda)
# showing confidence intervals. MAY BE SLOW
area.under.roc.curve.temporal.validation.lda<-roc.area(validation.table[,2],predict.result.lda$posterior[,2])
windows()
roc.plot(verification.temporal.validation.lda, main = "ROC PLOT: TEMPORAL VALIDATION LDA MODEL", binormal=TRUE, plot="both", CI=TRUE, n.boot=bootstrap.sample.values[1] , alpha = 0.05, extra=TRUE, legend=TRUE)
mtext(paste("ROC area = ",round(area.under.roc.curve.temporal.validation.lda$A,2),"; Sample size = ",area.under.roc.curve.temporal.validation.lda$n.total,"; Bootstrap samples = ",bootstrap.sample.values[1], sep=""), side=3, col="red", cex=0.8)
# EXPORT OF PLOT FOR VALIDATION OF LDA MODEL
pdf(file = "result_LDA_FourfoldPlot_Temporal_Validation.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
fourfold(cross.classification.temporal.validation.lda, std="margin", main="TEMPORAL VALIDATION LDA MODEL", extended=TRUE, space = 0.2, margin=1, color = c(rgb(255,0,0,max=255), rgb(255,128,0,max=255), rgb(56,168,0,max=255), rgb(170,255,0,max=255), rgb(170,135,210,max=255), rgb(115,70,155,max=255)))
dev.off()
#pdf(file = "result_LDA_ROCPlot_Validation.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
#roc.plot(verification.temporal.validation.lda, main = "ROC PLOT: TEMPORAL VALIDATION LDA MODEL", binormal = TRUE, plot = "both", extra=TRUE, legend=TRUE)
#area.under.roc.curve.temporal.validation.lda<-roc.area(data.table[,2],predict.result.lda$posterior[,2])
#dev.off()
pdf(file = "result_LDA_ROCPlot_bootstrap_Temporal_Validation.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
roc.plot(verification.temporal.validation.lda, main = "ROC PLOT: TEMPORAL VALIDATION LDA MODEL", binormal=TRUE, plot="both", CI=TRUE, n.boot=bootstrap.sample.values[1] , alpha = 0.05, extra=TRUE, legend=TRUE)
mtext(paste("ROC area = ",round(area.under.roc.curve.temporal.validation.lda$A,2),"; Sample size = ",area.under.roc.curve.temporal.validation.lda$n.total,"; Bootstrap samples = ",bootstrap.sample.values[1], sep=""), side=3, col="red", cex=0.8)
dev.off()
# Assignation of a matching code between observed and predicted values calculated using the validation dataset
validation.lda.matching.code<-paste(validation.grouping.variable,as.numeric(levels(predict.result.lda$class))[predict.result.lda$class],sep="")
validation.lda.matching.code<-gsub("00","1",validation.lda.matching.code)
validation.lda.matching.code<-gsub("01","2",validation.lda.matching.code)
validation.lda.matching.code<-gsub("10","3",validation.lda.matching.code)
validation.lda.matching.code<-gsub("11","4",validation.lda.matching.code)
validation.lda.matching.code<-as.numeric(validation.lda.matching.code)
# EXPORT OF LDA MODEL RESULTS
write.table("RESULTS OF LINEAR DISCRIMINANT ANALYSIS",file="result_LDA.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("LDA MODEL OUTPUTS",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Prior Probabilities",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(cbind(names(result.lda$prior),result.lda$prior),file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Total number",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(cbind("N",result.lda$N),file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Counts",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(cbind(names(result.lda$counts),result.lda$counts),file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Means",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(t(rbind(c("",colnames(result.lda$means)),cbind(rownames(result.lda$means),result.lda$means))),file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Discriminant function coefficients",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
#write.table(cbind(rownames(result.lda$scaling),result.lda$scaling),file="result_LDA.tsv", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
scaling.order<-result.lda$scaling[order(result.lda$scaling),]
scaling.matrix.ordered<-cbind(names(scaling.order),scaling.order)
write.table(scaling.matrix.ordered,file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("CONTINGENCY TABLE MODEL RESULT",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(c("","No Landslide Predicted","Landslide Predicted","Total"),cbind(c("No Landslide Observed","Landslide Observed","Total"),contingency.table.lda$table[,1,],contingency.table.lda$table[,2,],contingency.table.lda$table[,3,])),file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("CONTINGENCY TABLE VALIDATION",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(c("","No Landslide Predicted","Landslide Predicted","Total"),cbind(c("No Landslide Observed","Landslide Observed","Total"),contingency.table.temporal.validation.lda$table[,1,],contingency.table.temporal.validation.lda$table[,2,],contingency.table.temporal.validation.lda$table[,3,])),file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("MATCHING CODE DEFINITION",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(cbind(c("","OBSERVED NO LANDSLIDES: 0","OBSERVED LANDSLIDES: 1"), c("PREDICTED NO LANDSLIDES: 0","00 -> Code 1","10 -> Code 3"), c("PREDICTED LANDSLIDES: 1","01 -> Code 2","11 -> Code 4")),file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("FINAL RESULTS",file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(c("ID","GROUPING VARIABLE","LDA MODEL POSTERIOR PROBABILITY","LDA MODEL CLASSIFICATION","LDA MODEL RESULT MATCHING CODE","VALIDATION GROUPING VARIABLE","LDA VALIDATION MATCHING CODE"),cbind(identification.value,as.numeric(levels(grouping.variable))[grouping.variable],predict.result.lda$posterior[,2],as.numeric(levels(predict.result.lda$class))[predict.result.lda$class],result.lda.matching.code,as.numeric(levels(validation.grouping.variable))[validation.grouping.variable],validation.lda.matching.code)),file="result_LDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
}
#----------------- QUADRATIC DISCRIMINANT ANALISYS ------------------#
if(model.run.matrix[2] == "YES")
{
# Changing of Dummy Explanatory Variables in Numeric variable
if (analysis.parameter.matrix[2] == "DUM")
{
print("The Quadratic Discriminant Analsysis (QDA) will be performed using dummy variables, but a random variation in these variables will be introduced")
for (count.variables in 1:dim(explanatory.variables)[2])
{
#print(range(explanatory.variables[,count.variables]))
if (min(explanatory.variables[,count.variables])==0 & max(explanatory.variables[,count.variables])==1)
{
set.seed(seed.value)
explanatory.variables[,count.variables]<-explanatory.variables[,count.variables]+runif(dim(explanatory.variables)[1],-0.1,0.1)
validation.explanatory.variables[,count.variables]<-explanatory.variables[,count.variables]
seed.value<-seed.value+1
}
}
}
if (analysis.parameter.matrix[2] == "SEL")
{
print("The Quadratic Discriminant Analsysis (QDA) will be performed excluding dummy variables")
index.variables.dummy<-NULL
for (count.variables in 1:dim(explanatory.variables)[2])
{
print(range(explanatory.variables[,count.variables]))
if (min(explanatory.variables[,count.variables])==0 & max(explanatory.variables[,count.variables])==1)
{
index.variables.dummy<-c(index.variables.dummy,count.variables)
}
}
explanatory.variables<-explanatory.variables[,-index.variables.dummy]
validation.explanatory.variables<-validation.explanatory.variables[,-index.variables.dummy]
#str(explanatory.variables)
}
if (class(try(qda(explanatory.variables, grouping.variable, method="moment")))=="try-error")
{
#qda(explanatory.variables, grouping.variable, method="moment")
write.table("Quadratic Discriminant Analysis was not completed",file="Error_QDA_Analysis.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="Error_QDA_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Error LOG",file="Error_QDA_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(cbind("Message",rev(1:length(as.vector(.Traceback)))," ->",as.vector(.Traceback)),file="Error_QDA_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
}
##### Quadratic Discriminant Analisys using data.frame
result.qda<-NULL
result.qda<-qda(explanatory.variables, grouping.variable, method="mle")
# Result Predicted
predict.result.qda<-predict(result.qda)
str(predict.result.qda)
cross.classification.qda<-table(grouping.variable,predict.result.qda$class,dnn=c("Observed","Predicted"))
rownames(cross.classification.qda)<-list("No Landslide","Landslide") # Observed
colnames(cross.classification.qda)<-list("No Landslide","Landslide") # Predicted
str(cross.classification.qda)
# Assignation of a matching code between observed and predicted values
result.qda.matching.code<-paste(grouping.variable,as.numeric(levels(predict.result.qda$class))[predict.result.qda$class],sep="")
result.qda.matching.code<-gsub("00","1",result.qda.matching.code)
result.qda.matching.code<-gsub("01","2",result.qda.matching.code)
result.qda.matching.code<-gsub("10","3",result.qda.matching.code)
result.qda.matching.code<-gsub("11","4",result.qda.matching.code)
result.qda.matching.code<-as.numeric(result.qda.matching.code)
#Elaboration of Coefficient of association for contingency table
#load package (vcd)
library(vcd)
#help(package=vcd)
contingency.table.qda<-table2d_summary(cross.classification.qda)
test.table.qda<-assocstats(cross.classification.qda)
#Different plots for contingency table
windows()
fourfold(cross.classification.qda, std="margin", main="QUADRATIC DISCRIMINANT ANALYSIS MODEL", extended=TRUE, space = 0.2, margin=1, color = c(rgb(255,0,0,max=255), rgb(255,128,0,max=255), rgb(56,168,0,max=255), rgb(170,255,0,max=255), rgb(150,220,255,max=255), rgb(0,0,128,max=255)))
#Receiver Operating Characteristic (ROC) plots for one or more models.
#A ROC curve plots the false alarm rate against the hit rate
#for a probablistic forecast for a range of thresholds.
#load package (verification)
library(verification)
#verify function
#Based on the type of inputs, this function calculates a range of verification statistics and skill scores.
#Additionally, it creates a verify class object that can be further analyzed.
##### ROC PLOT OBS - POSTERIOR PROBABILITY ASSOCIATED TO 1
## 1st method
#windows()
#roc.plot(data.table[,2],predict.result.qda$posterior[,2],main = "ROC PLOT: QUADRATIC DISCRIMINANT ANALYSIS MODEL", binormal = TRUE, plot = "both")
# 2nd method using verify function
verification.results.qda<-verify(data.table[,2],predict.result.qda$posterior[,2], frcst.type="prob", obs.type="binary")
#summary(verification.results.qda)
#str(verification.results.qda)
#windows()
#roc.plot(verification.results.qda, main = "ROC PLOT: QUADRATIC DISCRIMINANT ANALYSIS MODEL", binormal = TRUE, plot = "both", extra=TRUE, legend=TRUE)
area.under.roc.curve.qda<-roc.area(data.table[,2],predict.result.qda$posterior[,2])
## showing confidence intervals. MAY BE SLOW
windows()
roc.plot(verification.results.qda, main = "ROC PLOT: QUADRATIC DISCRIMINANT ANALYSIS MODEL", binormal=TRUE, plot="both", CI=TRUE, n.boot=bootstrap.sample.values[2] , alpha = 0.05, extra=TRUE, legend=TRUE)
mtext(paste("ROC area = ",round(area.under.roc.curve.qda$A,2),"; Sample size = ",area.under.roc.curve.qda$n.total,"; Bootstrap samples = ",bootstrap.sample.values[2], sep=""), side=3, col="red", cex=0.8)
## Histogram of posterior probability
windows()
hist(predict.result.qda$posterior[,2], breaks=breaks.histogram.values,freq=TRUE, xlab="Susceptibility Class", ylab="Frequency", main="Histogram of Quadratic Disciminant Analysis susceptibility", col=c(rgb(38,115,0,max=255),rgb(233,255,190,max=255),rgb(255,255,0,max=255),rgb(255,128,0,max=255),rgb(255,0,0,max=255)))
pdf(file = "result_QDA_Histogram.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
hist(predict.result.qda$posterior[,2], breaks=breaks.histogram.values,freq=TRUE, xlab="Susceptibility Class", ylab="Frequency", main="Histogram of Quadratic Disciminant Analysis susceptibility", col=c(rgb(38,115,0,max=255),rgb(233,255,190,max=255),rgb(255,255,0,max=255),rgb(255,128,0,max=255),rgb(255,0,0,max=255)))
dev.off()
# EXPORT OF PLOT FOR QDA MODEL
pdf(file = "result_QDA_FourfoldPlot.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
fourfold(cross.classification.qda, std="margin", main="QUADRATIC DISCRIMINANT ANALYSIS MODEL", extended=TRUE, space = 0.2, margin=1, color = c(rgb(255,0,0,max=255), rgb(255,128,0,max=255), rgb(56,168,0,max=255), rgb(170,255,0,max=255), rgb(150,220,255,max=255), rgb(0,0,128,max=255)))
dev.off()
#pdf(file = "result_QDA_ROCPlot.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
#roc.plot(verification.results.qda, main = "ROC PLOT: QUADRATIC DISCRIMINANT ANALYSIS MODEL", binormal = TRUE, plot = "both", extra=TRUE, legend=TRUE)
#dev.off()
pdf(file = "result_QDA_ROCPlot_bootstrap.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
roc.plot(verification.results.qda, main = "ROC PLOT: QUADRATIC DISCRIMINANT ANALYSIS MODEL", binormal=TRUE, plot="both", CI=TRUE, n.boot=bootstrap.sample.values[2] , alpha = 0.05, extra=TRUE, legend=TRUE)
mtext(paste("ROC area = ",round(area.under.roc.curve.qda$A,2),"; Sample size = ",area.under.roc.curve.qda$n.total,"; Bootstrap samples = ",bootstrap.sample.values[2], sep=""), side=3, col="red", cex=0.8)
dev.off()
## BOOTSTRAP PROCEDURE FOR THE ESTIMATION OF MODEL PREDICTION VARIABILITY
if(bootstrap.model.variability[2] == "YES")
{
bootstrap.sample.model.qda<-bootstrap.sample.model[2]
matrix.bootstrap.model.qda<-matrix(data=NA, nrow=dim(explanatory.variables)[1], ncol=(bootstrap.sample.model.qda*3)+1)
colnames(matrix.bootstrap.model.qda)<-rep("na",(bootstrap.sample.model.qda*3)+1)
matrix.bootstrap.model.qda[,1]<-identification.value
colnames(matrix.bootstrap.model.qda)[1]<-"ID"
name.sel.run<-paste(rep("ID_Selection_Run",bootstrap.sample.model.qda),1:bootstrap.sample.model.qda,sep="_")
colnames(matrix.bootstrap.model.qda)[seq(2,(bootstrap.sample.model.qda*3)-1,3)]<-name.sel.run
name.prob.run<-paste(rep("Probability_Run",bootstrap.sample.model.qda),1:bootstrap.sample.model.qda,sep="_")
colnames(matrix.bootstrap.model.qda)[seq(3,(bootstrap.sample.model.qda*3),3)]<-name.prob.run
name.pred.run<-paste(rep("Prediction_Run",bootstrap.sample.model.qda),1:bootstrap.sample.model.qda,sep="_")
colnames(matrix.bootstrap.model.qda)[seq(4,(bootstrap.sample.model.qda*3)+1,3)]<-name.pred.run
selection.index<-NULL
library(MASS)
#Bootstrap procedure
for (count.boot in 1:bootstrap.sample.model.qda)
{
selection.index<-sample(1:dim(explanatory.variables)[1], replace=TRUE, prob=NULL)
matrix.bootstrap.model.qda[as.numeric(names(table(selection.index))),(count.boot*3)-1]<-table(selection.index)
explanatory.variables.bootstrap.model.qda<-explanatory.variables[selection.index,]
grouping.variable.bootstrap.model.qda<-as.factor(data.table[selection.index,2])
result.bootstrap.model.qda<-qda(explanatory.variables.bootstrap.model.qda, grouping.variable.bootstrap.model.qda, tol=0.001, method="moment")
matrix.bootstrap.model.qda[as.numeric(names(table(selection.index))),(count.boot*3)]<-predict(result.bootstrap.model.qda,newdata=explanatory.variables[as.numeric(names(table(selection.index))),])$posterior[,2]
matrix.bootstrap.model.qda[,(count.boot*3)+1]<-predict(result.bootstrap.model.qda,newdata=explanatory.variables)$posterior[,2]
}
# Export of bootstrap sample
write.table(matrix.bootstrap.model.qda,file="result_QDA_BootstrapSamples.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=TRUE)
ID.bootstrap.model.qda.count<-numeric(length=dim(data.table)[1])
#Probability (selected values)
bootstrap.model.qda.probability.mean<-numeric(length=dim(data.table)[1])
bootstrap.model.qda.probability.sd<-numeric(length=dim(data.table)[1])
bootstrap.model.qda.probability.min<-numeric(length=dim(data.table)[1])
bootstrap.model.qda.probability.max<-numeric(length=dim(data.table)[1])
bootstrap.model.qda.probability.sderror<-numeric(length=dim(data.table)[1])
bootstrap.model.qda.probability.quantiles<-matrix(nrow=dim(data.table)[1],ncol=7)
#Prediction (all values)
bootstrap.model.qda.prediction.mean<-numeric(length=dim(data.table)[1])
bootstrap.model.qda.prediction.sd<-numeric(length=dim(data.table)[1])
bootstrap.model.qda.prediction.min<-numeric(length=dim(data.table)[1])
bootstrap.model.qda.prediction.max<-numeric(length=dim(data.table)[1])
bootstrap.model.qda.prediction.sderror<-numeric(length=dim(data.table)[1])
bootstrap.model.qda.prediction.quantiles<-matrix(nrow=dim(data.table)[1],ncol=7)
for (count.row.variability in 1:dim(data.table)[1])
{
# Statistics on boostrapped probability
ID.bootstrap.model.qda.count[count.row.variability]<-length(na.omit(matrix.bootstrap.model.qda[count.row.variability,seq(2,(bootstrap.sample.model.qda*3)-1,3)]))
bootstrap.model.qda.probability.mean[count.row.variability]<-mean(na.omit(matrix.bootstrap.model.qda[count.row.variability,seq(3,(bootstrap.sample.model.qda*3),3)]))
bootstrap.model.qda.probability.sd[count.row.variability]<-sd(na.omit(matrix.bootstrap.model.qda[count.row.variability,seq(3,(bootstrap.sample.model.qda*3),3)]))
bootstrap.model.qda.probability.min[count.row.variability]<-min(na.omit(matrix.bootstrap.model.qda[count.row.variability,seq(3,(bootstrap.sample.model.qda*3),3)]))
bootstrap.model.qda.probability.max[count.row.variability]<-max(na.omit(matrix.bootstrap.model.qda[count.row.variability,seq(3,(bootstrap.sample.model.qda*3),3)]))
bootstrap.model.qda.probability.sderror[count.row.variability]<-bootstrap.model.qda.probability.sd[count.row.variability]/ID.bootstrap.model.qda.count[count.row.variability]
bootstrap.model.qda.probability.quantiles[count.row.variability,]<-quantile(na.omit(matrix.bootstrap.model.qda[count.row.variability,seq(3,(bootstrap.sample.model.qda*3),3)]),probs=c(0,0.05,0.25,0.5,0.75,0.95,1))
# Statistics on boostrapped prediction
bootstrap.model.qda.prediction.mean[count.row.variability]<-mean(matrix.bootstrap.model.qda[count.row.variability,seq(4,(bootstrap.sample.model.qda*3)+1,3)])
bootstrap.model.qda.prediction.sd[count.row.variability]<-sd(matrix.bootstrap.model.qda[count.row.variability,seq(4,(bootstrap.sample.model.qda*3)+1,3)])
bootstrap.model.qda.prediction.min[count.row.variability]<-min(matrix.bootstrap.model.qda[count.row.variability,seq(4,(bootstrap.sample.model.qda*3)+1,3)])
bootstrap.model.qda.prediction.max[count.row.variability]<-max(matrix.bootstrap.model.qda[count.row.variability,seq(4,(bootstrap.sample.model.qda*3)+1,3)])
bootstrap.model.qda.prediction.sderror[count.row.variability]<-bootstrap.model.qda.prediction.sd[count.row.variability]/bootstrap.sample.model.qda
bootstrap.model.qda.prediction.quantiles[count.row.variability,]<-quantile(matrix.bootstrap.model.qda[count.row.variability,seq(4,(bootstrap.sample.model.qda*3)+1,3)],probs=c(0,0.05,0.25,0.5,0.75,0.95,1))
}
# Export of bootstrap sample statistics
write.table(cbind("ID","QDA_NumberSelectedSamples","QDA_Probability_Mean","QDA_Probability_Sd","QDA_Probability_Min","QDA_Probability_Max","QDA_Probability_Sderror","QDA_Probability_Quantiles_0","QDA_Probability_Quantiles_0.05","QDA_Probability_Quantiles_0.25","QDA_Probability_Quantiles_0.5","QDA_Probability_Quantiles_0.75","QDA_Probability_Quantiles_0.95","QDA_Probability_Quantiles_1","QDA_Prediction_Mean","QDA_Prediction_Sd","QDA_Prediction_Min","QDA_Prediction_Max","QDA_Prediction_Sderror","QDA_Prediction_Quantiles_0","QDA_Prediction_Quantiles_0.05","QDA_Prediction_Quantiles_0.25","QDA_Prediction_Quantiles_0.5","QDA_Prediction_Quantiles_0.75","QDA_Prediction_Quantiles_0.95","QDA_Prediction_Quantiles_1"),file="result_QDA_BootstrapStatistics.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(cbind(identification.value,ID.bootstrap.model.qda.count,bootstrap.model.qda.probability.mean,bootstrap.model.qda.probability.sd,bootstrap.model.qda.probability.min,bootstrap.model.qda.probability.max,bootstrap.model.qda.probability.sderror,bootstrap.model.qda.probability.quantiles,bootstrap.model.qda.prediction.mean,bootstrap.model.qda.prediction.sd,bootstrap.model.qda.prediction.min,bootstrap.model.qda.prediction.max,bootstrap.model.qda.prediction.sderror,bootstrap.model.qda.prediction.quantiles),file="result_QDA_BootstrapStatistics.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
#windows()
#double.sd.histogram.variability<-hist(bootstrap.model.qda.probability.sd*2,breaks=seq(0,1,0.05),labels=TRUE)
#plot(double.sd.histogram.variability$counts, seq(0,0.95,0.05), type="S",ylim=c(0,1), labels=TRUE)
windows()
plot(bootstrap.model.qda.probability.mean,bootstrap.model.qda.prediction.mean,xlab="Probability mean",ylab="Prediction mean", type="p",main="QDA BOOTSTRAP: Mean Probability vs Mean Prediction")
abline(a=0,b=1,col="red",lty=1,lwd=1)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.qda,sep=""),side=3, padj=-0.5, adj=0.5, col="red",cex=0.8)
pdf(file = "result_QDA_BootstrapMeansComparison.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(bootstrap.model.qda.probability.mean,bootstrap.model.qda.prediction.mean,xlab="Probability mean",ylab="Prediction mean", type="p",main="QDA BOOTSTRAP: Mean Probability vs Mean Prediction")
abline(a=0,b=1,col="red",lty=1,lwd=1)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.qda,sep=""),side=3, padj=-0.5, adj=0.5, col="red",cex=0.8)
dev.off()
# BOOTSTRAPPED PROBABILITY - Fit parabola 3 parameter y = ax^2 + bx + c
parabola.probability.qda<-cbind(bootstrap.model.qda.probability.mean,2*bootstrap.model.qda.probability.sd)
parabola.probability.qda<-na.omit(parabola.probability.qda[order(parabola.probability.qda[,1]),])
colnames(parabola.probability.qda)<-c("abscissa","ordinate")
#If y has to be 0 in x=0 and x=1, this means that c=0 and a+b=0, so in our case since a<0, a has to be equal to -b
fit.parabola.probability.qda <- nls(parabola.probability.qda[,"ordinate"] ~ coeff.a*(parabola.probability.qda[,"abscissa"]^2) + (-1)*coeff.a*parabola.probability.qda[,"abscissa"], start = c("coeff.a"=-1))
value.parabola.probability.qda<-predict(fit.parabola.probability.qda)
#coef(fit.parabola.probability.qda)
windows()
plot(parabola.probability.qda[,"abscissa"],parabola.probability.qda[,"ordinate"],xlim=c(0,1),ylim=c(0,1),xlab="Bootstrapped probability mean",ylab="2 Standard Deviations", type="p",main="QDA Model Probability Variability (Bootstrap)")
lines(parabola.probability.qda[,"abscissa"],value.parabola.probability.qda,col="red",lwd=1.5)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.qda,sep=""),side=3, padj=-0.5, adj=0.5, col="blue",cex=1)
espr <- expression(Y == coeff.a %*% X ^2 + coeff.b %*% X)
list.espr.subs <- list(coeff.a = round(coef(fit.parabola.probability.qda),3),coeff.b= -round(coef(fit.parabola.probability.qda),3))
as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]])
mtext(as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]]),side=1, padj=-1.5, adj=0.5,col="red",cex=1)
pdf(file = "result_QDA_BootstrapProbabilityVariability.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(parabola.probability.qda[,"abscissa"],parabola.probability.qda[,"ordinate"],xlim=c(0,1),ylim=c(0,1),xlab="Bootstrapped probability mean",ylab="2 Standard Deviations", type="p",main="QDA Model Probability Variability (Bootstrap)")
lines(parabola.probability.qda[,"abscissa"],value.parabola.probability.qda,col="red",lwd=1.5)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.qda,sep=""),side=3, padj=-0.5, adj=0.5, col="blue",cex=1)
espr <- expression(Y == coeff.a %*% X ^2 + coeff.b %*% X)
list.espr.subs <- list(coeff.a = round(coef(fit.parabola.probability.qda),3),coeff.b= -round(coef(fit.parabola.probability.qda),3))
as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]])
mtext(as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]]),side=1, padj=-1.5, adj=0.5,col="red",cex=1)
dev.off()
# BOOTSTRAPPED PREDICTION - Fit parabola 3 parameter y = ax^2 + bx + c
parabola.prediction.qda<-cbind(bootstrap.model.qda.prediction.mean,2*bootstrap.model.qda.prediction.sd)
parabola.prediction.qda<-parabola.prediction.qda[order(parabola.prediction.qda[,1]),]
colnames(parabola.prediction.qda)<-c("abscissa","ordinate")
#If y has to be 0 in x=0 and x=1, this means that c=0 and a+b=0, so in our case since a<0, a has to be equal to -b
fit.parabola.prediction.qda <- nls(parabola.prediction.qda[,"ordinate"] ~ coeff.a*(parabola.prediction.qda[,"abscissa"]^2) + (-1)*coeff.a*parabola.prediction.qda[,"abscissa"], start = c("coeff.a"=-1))
value.parabola.prediction.qda<-predict(fit.parabola.prediction.qda)
#coef(fit.parabola.prediction.qda)
windows()
plot(parabola.prediction.qda[,"abscissa"],parabola.prediction.qda[,"ordinate"],xlim=c(0,1),ylim=c(0,1),xlab="Bootstrapped prediction mean",ylab="2 Standard Deviations", type="p",main="QDA Model Prediction Variability (Bootstrap)")
lines(parabola.prediction.qda[,"abscissa"],value.parabola.prediction.qda,col="red",lwd=1.5)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.qda,sep=""),side=3, padj=-0.5, adj=0.5, col="blue",cex=1)
espr <- expression(Y == coeff.a %*% X ^2 + coeff.b %*% X)
list.espr.subs <- list(coeff.a = round(coef(fit.parabola.prediction.qda),3),coeff.b= -round(coef(fit.parabola.prediction.qda),3))
as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]])
mtext(as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]]),side=1, padj=-1.5, adj=0.5,col="red",cex=1)
pdf(file = "result_QDA_BootstrapPredictionVariability.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(parabola.prediction.qda[,"abscissa"],parabola.prediction.qda[,"ordinate"],xlim=c(0,1),ylim=c(0,1),xlab="Bootstrapped prediction mean",ylab="2 Standard Deviations", type="p",main="QDA Model Prediction Variability (Bootstrap)")
lines(parabola.prediction.qda[,"abscissa"],value.parabola.prediction.qda,col="red",lwd=1.5)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.qda,sep=""),side=3, padj=-0.5, adj=0.5, col="blue",cex=1)
espr <- expression(Y == coeff.a %*% X ^2 + coeff.b %*% X)
list.espr.subs <- list(coeff.a = round(coef(fit.parabola.prediction.qda),3),coeff.b= -round(coef(fit.parabola.prediction.qda),3))
as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]])
mtext(as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]]),side=1, padj=-1.5, adj=0.5,col="red",cex=1)
dev.off()
}
## Sensitivity, Specificity, Cohens kappa plot
roc.plot.qda.series<-roc.plot(verification.results.qda,binormal=TRUE)
#str(roc.plot.qda.series)
#roc.plot.qda.series$plot.data
#str(roc.plot.qda.series$plot.data)
contingency.table.matrix.qda<-matrix(nrow=dim(roc.plot.qda.series$plot.data)[1],ncol=8)
colnames(contingency.table.matrix.qda)<-c("Threshold","TP","TN","FP","FN","TPR","FPR","COHEN_KAPPA")
contingency.table.matrix.qda[,1]<-roc.plot.qda.series$plot.data[,1,1]
contingency.table.matrix.qda[,6]<-roc.plot.qda.series$plot.data[,2,1]
contingency.table.matrix.qda[,7]<-roc.plot.qda.series$plot.data[,3,1]
values.odserved<-data.table[,2]
values.predicted<-predict.result.qda$posterior[,2]
for (threshold.series in 1:dim(roc.plot.qda.series$plot.data)[1])
{
value.threshold<-contingency.table.matrix.qda[threshold.series,1]
values.probability.reclassified<-NULL
values.probability.reclassified<-numeric(length=length(values.odserved))
for (length.observed.series in 1:length(values.odserved))
{
if (values.predicted[length.observed.series] > value.threshold)
{
values.probability.reclassified[length.observed.series]<-1
} else
{
values.probability.reclassified[length.observed.series]<-0
}
}
#sum(values.probability.reclassified-round(values.predicted)) # Check sum: It has to be 0 if threshold is equal to 1
series.pasted<-paste(values.odserved,values.probability.reclassified,sep="")
series.pasted<-gsub("00","1",series.pasted)
series.pasted<-gsub("01","2",series.pasted)
series.pasted<-gsub("10","3",series.pasted)
series.pasted<-gsub("11","4",series.pasted)
series.pasted<-as.numeric(series.pasted)
TP<-length(series.pasted[series.pasted>=4]) # True Positive
FN<-length(series.pasted[series.pasted>=3 & series.pasted<4]) # False Negative
FP<-length(series.pasted[series.pasted>=2 & series.pasted<3]) # False Positive
TN<-length(series.pasted[series.pasted>=1 & series.pasted<2]) # True Negative
#TPR<-TP/(TP+FN) # Hit Rate or True Positive Rate or Sensitivity - Assigned before the for cicle using rocplot data
#FPR<-FP/(FP+TN) # False Alarm Rate or False Positive Rate or 1-Specificity
# Cohen's Kappa = (agreement-chance)/(1-chance) where agreement=(TP+TN)/(TP+TN+FP+FN) and chance=((((TN+FN)*(TN+FP))/(TP+TN+FP+FN))+(((TP+FP)*(TP+FN))/(TP+TN+FP+FN)))/(TP+TN+FP+FN)
agreement=(TP+TN)/(TP+TN+FP+FN)
chance=((((TN+FN)*(TN+FP))/(TP+TN+FP+FN))+(((TP+FP)*(TP+FN))/(TP+TN+FP+FN)))/(TP+TN+FP+FN)
cohen.kappa.value<-(agreement-chance)/(1-chance)
#Other
#library(vcd)
#cohen.kappa.value<-Kappa(cross.classification.table)
contingency.table.matrix.qda[threshold.series,2]<-TP
contingency.table.matrix.qda[threshold.series,3]<-TN
contingency.table.matrix.qda[threshold.series,4]<-FP
contingency.table.matrix.qda[threshold.series,5]<-FN
contingency.table.matrix.qda[threshold.series,8]<-cohen.kappa.value
}
windows()
plot(roc.plot.qda.series$plot.data[,1,1],roc.plot.qda.series$plot.data[,2,1],type="p",pch=1,cex=0.6,col="red",xlim=c(0,1),ylim=c(0,1),xlab="Probability threshold",ylab="Evaluation parameter", main="QDA MODEL EVALUATION PLOT")
points(roc.plot.qda.series$plot.data[,1,1],1-roc.plot.qda.series$plot.data[,3,1],col="dark green",pch=1,cex=0.6)
points(roc.plot.qda.series$plot.data[,1,1], contingency.table.matrix.qda[,8],col="blue",pch=1,cex=0.6)
mtext("SENSITIVITY",side=3, padj=-0.5, adj=0.01, col="red",cex=0.8)
mtext("COHEN'S KAPPA",side=3, padj=-0.5, adj=0.5, col="blue",cex=0.8)
mtext("SPECIFICITY",side=3, padj=-0.5, adj=0.99, col="dark green",cex=0.8)
pdf(file = "result_QDA_ModelEvaluationPlot.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(roc.plot.qda.series$plot.data[,1,1],roc.plot.qda.series$plot.data[,2,1],type="p",pch=1,cex=0.6,col="red",xlim=c(0,1),ylim=c(0,1),xlab="Probability threshold",ylab="Evaluation parameter", main="QDA MODEL EVALUATION PLOT")
points(roc.plot.qda.series$plot.data[,1,1],1-roc.plot.qda.series$plot.data[,3,1],col="dark green",pch=1,cex=0.6)
points(roc.plot.qda.series$plot.data[,1,1], contingency.table.matrix.qda[,8],col="blue",pch=1,cex=0.6)
mtext("SENSITIVITY",side=3, padj=-0.5, adj=0.01, col="red",cex=0.8)
mtext("COHEN'S KAPPA",side=3, padj=-0.5, adj=0.5, col="blue",cex=0.8)
mtext("SPECIFICITY",side=3, padj=-0.5, adj=0.99, col="dark green",cex=0.8)
dev.off()
## VALIDATION OF QDA MODEL (Matching QDA posterior probability results and validation grouping variable)
cross.classification.temporal.validation.qda<-table(validation.grouping.variable,predict.result.qda$class,dnn=c("Observed","Predicted"))
rownames(cross.classification.temporal.validation.qda)<-list("No Landslide","Landslide") #Observed
colnames(cross.classification.temporal.validation.qda)<-list("No Landslide","Landslide") #Predicted
#str(cross.classification.temporal.validation.qda)
#Elaboration of Coefficient of association for contingency table
#load package (vcd)
library(vcd)
#help(package=vcd)
contingency.table.temporal.validation.qda<-table2d_summary(cross.classification.temporal.validation.qda)
test.table.temporal.validation.qda<-assocstats(cross.classification.temporal.validation.qda)
#Different plots for contingency table
windows()
fourfold(cross.classification.temporal.validation.qda, std="margin", main="TEMPORAL VALIDATION QDA MODEL", extended=TRUE, space = 0.2, margin=1, color = c(rgb(255,0,0,max=255), rgb(255,128,0,max=255), rgb(56,168,0,max=255), rgb(170,255,0,max=255), rgb(170,135,210,max=255), rgb(115,70,155,max=255)))
#Receiver Operating Characteristic (ROC) plots for one or more models.
#load package (verification)
library(verification)
# 2nd method using verify function
verification.temporal.validation.qda<-verify(validation.table[,2],predict.result.qda$posterior[,2], frcst.type="prob", obs.type="binary")
#summary(verification.temporal.validation.qda)
# showing confidence intervals. MAY BE SLOW
area.under.roc.curve.temporal.validation.qda<-roc.area(validation.table[,2],predict.result.qda$posterior[,2])
windows()
roc.plot(verification.temporal.validation.qda, main = "ROC PLOT: TEMPORAL VALIDATION QDA MODEL", binormal=TRUE, plot="both", CI=TRUE, n.boot=bootstrap.sample.values[1] , alpha = 0.05, extra=TRUE, legend=TRUE)
mtext(paste("ROC area = ",round(area.under.roc.curve.temporal.validation.qda$A,2),"; Sample size = ",area.under.roc.curve.temporal.validation.qda$n.total,"; Bootstrap samples = ",bootstrap.sample.values[2], sep=""), side=3, col="red", cex=0.8)
# EXPORT OF PLOT FOR VALIDATION OF QDA MODEL
pdf(file = "result_QDA_FourfoldPlot_Temporal_Validation.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
fourfold(cross.classification.temporal.validation.qda, std="margin", main="TEMPORAL VALIDATION QDA MODEL", extended=TRUE, space = 0.2, margin=1, color = c(rgb(255,0,0,max=255), rgb(255,128,0,max=255), rgb(56,168,0,max=255), rgb(170,255,0,max=255), rgb(170,135,210,max=255), rgb(115,70,155,max=255)))
dev.off()
#pdf(file = "result_QDA_ROCPlot_Validation.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
#roc.plot(verification.temporal.validation.qda, main = "ROC PLOT: TEMPORAL VALIDATION QDA MODEL", binormal = TRUE, plot = "both", extra=TRUE, legend=TRUE)
#area.under.roc.curve.temporal.validation.qda<-roc.area(validation.table[,2],predict.result.qda$posterior[,2])
#dev.off()
pdf(file = "result_QDA_ROCPlot_bootstrap_Temporal_Validation.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
roc.plot(verification.temporal.validation.qda, main = "ROC PLOT: TEMPORAL VALIDATION QDA MODEL", binormal=TRUE, plot="both", CI=TRUE, n.boot=bootstrap.sample.values[2] , alpha = 0.05, extra=TRUE, legend=TRUE)
mtext(paste("ROC area = ",round(area.under.roc.curve.temporal.validation.qda$A,2),"; Sample size = ",area.under.roc.curve.temporal.validation.qda$n.total,"; Bootstrap samples = ",bootstrap.sample.values[2], sep=""), side=3, col="red", cex=0.8)
dev.off()
# Assignation of a matching code between observed and predicted values calculated using the validation dataset
validation.qda.matching.code<-paste(validation.grouping.variable,as.numeric(levels(predict.result.qda$class))[predict.result.qda$class],sep="")
validation.qda.matching.code<-gsub("00","1",validation.qda.matching.code)
validation.qda.matching.code<-gsub("01","2",validation.qda.matching.code)
validation.qda.matching.code<-gsub("10","3",validation.qda.matching.code)
validation.qda.matching.code<-gsub("11","4",validation.qda.matching.code)
validation.qda.matching.code<-as.numeric(validation.qda.matching.code)
# EXPORT OF QDA MODEL RESULTS
write.table("RESULTS OF QUADRATIC DISCRIMINANT ANALYSIS",file="result_QDA.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("QDA MODEL OUTPUTS",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Prior Probabilities",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(cbind(names(result.qda$prior),result.qda$prior),file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Total number",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(cbind("N",result.qda$N),file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Counts",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(cbind(names(result.qda$counts),result.qda$counts),file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Means",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(t(rbind(c("",colnames(result.qda$means)),cbind(rownames(result.qda$means),result.qda$means))),file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Discriminant function coefficients",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
#Scaling coefficients
write.table("",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(paste("Coefficients Group",levels(grouping.variable)[1]),file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(c("",colnames(result.qda$scaling[,,1])),cbind(rownames(result.qda$scaling[,,1]),result.qda$scaling[,,1])),file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(paste("Coefficients Group",levels(grouping.variable)[2]),file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(c("",colnames(result.qda$scaling[,,2])),cbind(rownames(result.qda$scaling[,,2]),result.qda$scaling[,,2])),file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("CONTINGENCY TABLE MODEL RESULT",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(c("","No Landslide Predicted","Landslide Predicted","Total"),cbind(c("No Landslide Observed","Landslide Observed","Total"),contingency.table.qda$table[,1,],contingency.table.qda$table[,2,],contingency.table.qda$table[,3,])),file="result_QDA.txt", append=TRUE, quote = FALSE, sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("CONTINGENCY TABLE VALIDATION",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(c("","No Landslide Predicted","Landslide Predicted","Total"),cbind(c("No Landslide Observed","Landslide Observed","Total"),contingency.table.temporal.validation.qda$table[,1,],contingency.table.temporal.validation.qda$table[,2,],contingency.table.temporal.validation.qda$table[,3,])),file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("MATCHING CODE DEFINITION",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(cbind(c("","OBSERVED NO LANDSLIDES: 0","OBSERVED LANDSLIDES: 1"), c("PREDICTED NO LANDSLIDES: 0","00 -> Code 1","10 -> Code 3"), c("PREDICTED LANDSLIDES: 1","01 -> Code 2","11 -> Code 4")),file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("FINAL RESULTS",file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(c("ID","GROUPING VARIABLE","QDA MODEL POSTERIOR PROBABILITY","QDA MODEL CLASSIFICATION","QDA MODEL RESULT MATCHING CODE","QDA VALIDATION GROUPING VARIABLE","QDA VALIDATION MATCHING CODE"),cbind(identification.value,as.numeric(levels(grouping.variable))[grouping.variable],predict.result.qda$posterior[,2],as.numeric(levels(predict.result.qda$class))[predict.result.qda$class],result.qda.matching.code,as.numeric(levels(validation.grouping.variable))[validation.grouping.variable],validation.qda.matching.code)),file="result_QDA.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
explanatory.variables<-data.table[,3:dim(data.table)[2]] # Restore to original values of explanatory variables
validation.explanatory.variables<-validation.table[3:dim(validation.table)[2]] # Restore to original values of validation explanatory variables
}
#-------------------- LOGISTIC REGRESSION MODEL ---------------------#
if(model.run.matrix[3] == "YES")
{
library(Zelig)
if (class(try(zelig(as.formula(paste(names(data.variables)[1],"~",paste(names(data.variables[,2:dim(data.variables)[2]]),collapse= "+"))), data=data.variables, model="logit")))=="try-error")
{
#zelig(as.formula(paste(names(data.variables)[1],"~",paste(names(data.variables[,2:dim(data.variables)[2]]),collapse= "+"))), data=data.variables, model="logit")
write.table("Analysis based on Logistic Regression Model was not completed",file="Error_LRM_Analysis.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="Error_LRM_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Error LOG",file="Error_LRM_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(cbind("Message",rev(1:length(as.vector(.Traceback)))," ->",as.vector(.Traceback)),file="Error_LRM_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
}
result.lrm<-NULL
result.lrm<-zelig(as.formula(paste(names(data.variables)[1],"~",paste(names(data.variables[,2:dim(data.variables)[2]]),collapse= "+"))), data=data.variables, model="logit")
str(result.lrm)
#names(result.lrm)
#for predicted value (posterior probablity calculated with model) result.lrm$fitted.values was considered
# Result Predicted
#test.explanatory.variables<-setx(result.lrm, fn=NULL, cond=FALSE)
#problem with sim: while zelig() and sex() work well, sim() doesn't work. Changing data.variables (escludind last two variables) sim() works well. Why?
#predict.result.lrm<-sim(result.lrm, test.explanatory.variables, num=c(2,2), prev = NULL, bootstrap=FALSE, bootfn=NULL)
#names(predict.result.lrm)
#predict.result.lrm$qi$ev[]
#predict.result.lrm$qi$pr[]
#plot(predict.result.lrm)
cross.classification.lrm<-table(as.numeric(result.lrm$y),round(result.lrm$fitted.values),dnn=c("Observed","Predicted"))
rownames(cross.classification.lrm)<-list("No Landslide","Landslide") # Observed
colnames(cross.classification.lrm)<-list("No Landslide","Landslide") # Predicted
str(cross.classification.lrm)
# Assignation of a matching code between observed and predicted values
result.lrm.matching.code<-paste(grouping.variable,round(result.lrm$fitted.values),sep="")
result.lrm.matching.code<-gsub("00","1",result.lrm.matching.code)
result.lrm.matching.code<-gsub("01","2",result.lrm.matching.code)
result.lrm.matching.code<-gsub("10","3",result.lrm.matching.code)
result.lrm.matching.code<-gsub("11","4",result.lrm.matching.code)
result.lrm.matching.code<-as.numeric(result.lrm.matching.code)
#Elaboration of Coefficient of association for contingency table
#load package (vcd)
library(vcd)
#help(package=vcd)
contingency.table.lrm<-table2d_summary(cross.classification.lrm)
test.table.lrm<-assocstats(cross.classification.lrm)
#Different plots for contingency table
windows()
fourfold(cross.classification.lrm, std="margin", main="LOGISTIC REGRESSION MODEL", extended=TRUE, space = 0.2, margin=1, color = c(rgb(255,0,0,max=255), rgb(255,128,0,max=255), rgb(56,168,0,max=255), rgb(170,255,0,max=255), rgb(150,220,255,max=255), rgb(0,0,128,max=255)))
#Receiver Operating Characteristic (ROC) plots for one or more models.
#A ROC curve plots the false alarm rate against the hit rate
#for a probablistic forecast for a range of thresholds.
#load package (verification)
library(verification)
#verify function
#Based on the type of inputs, this function calculates a range of verification statistics and skill scores.
#Additionally, it creates a verify class object that can be further analyzed.
##### ROC PLOT OBS - POSTERIOR PROBABILITY ASSOCIATED TO 1
# Method using verify function
verification.results.lrm<-verify(result.lrm$y,result.lrm$fitted.values, frcst.type="prob", obs.type="binary")
#str(verification.results.lrm)
#windows()
#roc.plot(verification.results.lrm, main = "ROC PLOT: LOGISTIC REGRESSION MODEL", binormal = TRUE, plot = "both", extra=TRUE, legend=TRUE)
area.under.roc.curve.lrm<-roc.area(result.lrm$y,result.lrm$fitted.values)
## showing confidence intervals. MAY BE SLOW
windows()
roc.plot(verification.results.lrm, main = "ROC PLOT: LOGISTIC REGRESSION MODEL", binormal=TRUE, plot="both", CI=TRUE, n.boot=bootstrap.sample.values[3] , alpha = 0.05, extra=TRUE, legend=TRUE)
mtext(paste("ROC area = ",round(area.under.roc.curve.lrm$A,2),"; Sample size = ",area.under.roc.curve.lrm$n.total,"; Bootstrap samples = ",bootstrap.sample.values[3], sep=""), side=3, col="red", cex=0.8)
## Histogram of posterior probability
windows()
hist(result.lrm$fitted.values, breaks=breaks.histogram.values, freq=TRUE, xlab="Susceptibility Class", ylab="Frequency", main="Histogram of Logistic Regression Model susceptibility", col=c(rgb(38,115,0,max=255),rgb(233,255,190,max=255),rgb(255,255,0,max=255),rgb(255,128,0,max=255),rgb(255,0,0,max=255)))
pdf(file = "result_LRM_Histogram.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
hist(result.lrm$fitted.values, breaks=breaks.histogram.values, freq=TRUE, xlab="Susceptibility Class", ylab="Frequency", main="Histogram of Logistic Regression Model susceptibility", col=c(rgb(38,115,0,max=255),rgb(233,255,190,max=255),rgb(255,255,0,max=255),rgb(255,128,0,max=255),rgb(255,0,0,max=255)))
dev.off()
# EXPORT OF PLOT FOR LRM MODEL
pdf(file = "result_LRM_FourfoldPlot.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
fourfold(cross.classification.lrm, std="margin", main="LOGISTIC REGRESSION MODEL", extended=TRUE, space = 0.2, margin=1, color = c(rgb(255,0,0,max=255), rgb(255,128,0,max=255), rgb(56,168,0,max=255), rgb(170,255,0,max=255), rgb(150,220,255,max=255), rgb(0,0,128,max=255)))
dev.off()
#pdf(file = "result_LRM_ROCPlot.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
#roc.plot(verification.results.lrm, main = "ROC PLOT: LOGISTIC REGRESSION MODEL", binormal = TRUE, plot = "both", extra=TRUE, legend=TRUE)
#dev.off()
pdf(file = "result_LRM_ROCPlot_bootstrap.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
roc.plot(verification.results.lrm, main = "ROC PLOT: LOGISTIC REGRESSION MODEL", binormal=TRUE, plot="both", CI=TRUE, n.boot=bootstrap.sample.values[3] , alpha = 0.05, extra=TRUE, legend=TRUE)
mtext(paste("ROC area = ",round(area.under.roc.curve.lrm$A,2),"; Sample size = ",area.under.roc.curve.lrm$n.total,"; Bootstrap samples = ",bootstrap.sample.values[3], sep=""), side=3, col="red", cex=0.8)
dev.off()
## BOOTSTRAP PROCEDURE FOR THE ESTIMATION OF MODEL PREDICTION VARIABILITY
if(bootstrap.model.variability[3] == "YES")
{
bootstrap.sample.model.lrm<-bootstrap.sample.model[3]
matrix.bootstrap.model.lrm<-matrix(data=NA, nrow=dim(data.table)[1], ncol=(bootstrap.sample.model.lrm*3)+1)
colnames(matrix.bootstrap.model.lrm)<-rep("na",(bootstrap.sample.model.lrm*3)+1)
matrix.bootstrap.model.lrm[,1]<-identification.value
colnames(matrix.bootstrap.model.lrm)[1]<-"ID"
name.sel.run<-paste(rep("ID_Selection_Run",bootstrap.sample.model.lrm),1:bootstrap.sample.model.lrm,sep="_")
colnames(matrix.bootstrap.model.lrm)[seq(2,(bootstrap.sample.model.lrm*3)-1,3)]<-name.sel.run
name.prob.run<-paste(rep("Probability_Run",bootstrap.sample.model.lrm),1:bootstrap.sample.model.lrm,sep="_")
colnames(matrix.bootstrap.model.lrm)[seq(3,(bootstrap.sample.model.lrm*3),3)]<-name.prob.run
name.pred.run<-paste(rep("Prediction_Run",bootstrap.sample.model.lrm),1:bootstrap.sample.model.lrm,sep="_")
colnames(matrix.bootstrap.model.lrm)[seq(4,(bootstrap.sample.model.lrm*3)+1,3)]<-name.pred.run
selection.index<-NULL
library(Zelig)
#Bootstrap procedure
for (count.boot in 1:bootstrap.sample.model.lrm)
{
selection.index<-sample(1:dim(data.table)[1], replace=TRUE, prob=NULL)
matrix.bootstrap.model.lrm[as.numeric(names(table(selection.index))),(count.boot*3)-1]<-table(selection.index)
data.variables.bootstrap.model.lrm<-data.table[selection.index,2:dim(data.table)[2]]
explanatory.variables.bootstrap.model.lrm<-data.table[selection.index,3:dim(data.table)[2]]
grouping.variable.bootstrap.model.lrm<-as.factor(data.table[selection.index,2])
result.bootstrap.model.lrm<-zelig(as.formula(paste(names(data.variables.bootstrap.model.lrm)[1],"~",paste(names(data.variables.bootstrap.model.lrm[,2:dim(data.variables.bootstrap.model.lrm)[2]]),collapse= "+"))), data=data.variables.bootstrap.model.lrm, model="logit")
excluded.variables.bootstrap.model.lrm<-which(match(result.bootstrap.model.lrm$coefficients,NA)==1)
if (length(excluded.variables.bootstrap.model.lrm) != 0)
{
data.variables.bootstrap.model.lrm.selected<-data.variables.bootstrap.model.lrm[,-excluded.variables.bootstrap.model.lrm]
setx.data.probability<-data.table[as.numeric(names(table(selection.index))),2:dim(data.table)[2]][,-excluded.variables.bootstrap.model.lrm]
setx.data.prediction<-data.table[,2:dim(data.table)[2]][,-excluded.variables.bootstrap.model.lrm]
} else
{
data.variables.bootstrap.model.lrm.selected<-data.variables.bootstrap.model.lrm
setx.data.probability<-data.table[as.numeric(names(table(selection.index))),2:dim(data.table)[2]]
setx.data.prediction<-data.table[,2:dim(data.table)[2]]
}
result.bootstrap.model.lrm.selected<-zelig(as.formula(paste(names(data.variables.bootstrap.model.lrm.selected)[1],"~",paste(names(data.variables.bootstrap.model.lrm.selected[,2:dim(data.variables.bootstrap.model.lrm.selected)[2]]),collapse= "+"))), data=data.variables.bootstrap.model.lrm.selected, model="logit")
x.result.bootstrap.model.lrm.selected.probability<-setx(result.bootstrap.model.lrm.selected,data=setx.data.probability,fn=NULL)
sim.result.bootstrap.model.lrm.selected.probability<-sim(result.bootstrap.model.lrm.selected,x=x.result.bootstrap.model.lrm.selected.probability,num=c(100,100))
matrix.bootstrap.model.lrm[as.numeric(names(table(selection.index))),(count.boot*3)]<-colMeans(sim.result.bootstrap.model.lrm.selected.probability$qi$ev)
x.result.bootstrap.model.lrm.selected.prediction<-setx(result.bootstrap.model.lrm.selected,data=setx.data.prediction,fn=NULL)
sim.result.bootstrap.model.lrm.selected.prediction<-sim(result.bootstrap.model.lrm.selected,x=x.result.bootstrap.model.lrm.selected.prediction,num=c(100,100))
matrix.bootstrap.model.lrm[,(count.boot*3)+1]<-colMeans(sim.result.bootstrap.model.lrm.selected.prediction$qi$ev)
}
# Export of bootstrap sample
write.table(matrix.bootstrap.model.lrm,file="result_LRM_BootstrapSamples.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=TRUE)
ID.bootstrap.model.lrm.count<-numeric(length=dim(data.table)[1])
#Probability (selected values)
bootstrap.model.lrm.probability.mean<-numeric(length=dim(data.table)[1])
bootstrap.model.lrm.probability.sd<-numeric(length=dim(data.table)[1])
bootstrap.model.lrm.probability.min<-numeric(length=dim(data.table)[1])
bootstrap.model.lrm.probability.max<-numeric(length=dim(data.table)[1])
bootstrap.model.lrm.probability.sderror<-numeric(length=dim(data.table)[1])
bootstrap.model.lrm.probability.quantiles<-matrix(nrow=dim(data.table)[1],ncol=7)
#Prediction (all values)
bootstrap.model.lrm.prediction.mean<-numeric(length=dim(data.table)[1])
bootstrap.model.lrm.prediction.sd<-numeric(length=dim(data.table)[1])
bootstrap.model.lrm.prediction.min<-numeric(length=dim(data.table)[1])
bootstrap.model.lrm.prediction.max<-numeric(length=dim(data.table)[1])
bootstrap.model.lrm.prediction.sderror<-numeric(length=dim(data.table)[1])
bootstrap.model.lrm.prediction.quantiles<-matrix(nrow=dim(data.table)[1],ncol=7)
for (count.row.variability in 1:dim(data.table)[1])
{
# Statistics on boostrapped probability
ID.bootstrap.model.lrm.count[count.row.variability]<-length(na.omit(matrix.bootstrap.model.lrm[count.row.variability,seq(2,(bootstrap.sample.model.lrm*3)-1,3)]))
bootstrap.model.lrm.probability.mean[count.row.variability]<-mean(na.omit(matrix.bootstrap.model.lrm[count.row.variability,seq(3,(bootstrap.sample.model.lrm*3),3)]))
bootstrap.model.lrm.probability.sd[count.row.variability]<-sd(na.omit(matrix.bootstrap.model.lrm[count.row.variability,seq(3,(bootstrap.sample.model.lrm*3),3)]))
bootstrap.model.lrm.probability.min[count.row.variability]<-min(na.omit(matrix.bootstrap.model.lrm[count.row.variability,seq(3,(bootstrap.sample.model.lrm*3),3)]))
bootstrap.model.lrm.probability.max[count.row.variability]<-max(na.omit(matrix.bootstrap.model.lrm[count.row.variability,seq(3,(bootstrap.sample.model.lrm*3),3)]))
bootstrap.model.lrm.probability.sderror[count.row.variability]<-bootstrap.model.lrm.probability.sd[count.row.variability]/ID.bootstrap.model.lrm.count[count.row.variability]
bootstrap.model.lrm.probability.quantiles[count.row.variability,]<-quantile(na.omit(matrix.bootstrap.model.lrm[count.row.variability,seq(3,(bootstrap.sample.model.lrm*3),3)]),probs=c(0,0.05,0.25,0.5,0.75,0.95,1))
# Statistics on boostrapped prediction
bootstrap.model.lrm.prediction.mean[count.row.variability]<-mean(matrix.bootstrap.model.lrm[count.row.variability,seq(4,(bootstrap.sample.model.lrm*3)+1,3)])
bootstrap.model.lrm.prediction.sd[count.row.variability]<-sd(matrix.bootstrap.model.lrm[count.row.variability,seq(4,(bootstrap.sample.model.lrm*3)+1,3)])
bootstrap.model.lrm.prediction.min[count.row.variability]<-min(matrix.bootstrap.model.lrm[count.row.variability,seq(4,(bootstrap.sample.model.lrm*3)+1,3)])
bootstrap.model.lrm.prediction.max[count.row.variability]<-max(matrix.bootstrap.model.lrm[count.row.variability,seq(4,(bootstrap.sample.model.lrm*3)+1,3)])
bootstrap.model.lrm.prediction.sderror[count.row.variability]<-bootstrap.model.lrm.prediction.sd[count.row.variability]/bootstrap.sample.model.lrm
bootstrap.model.lrm.prediction.quantiles[count.row.variability,]<-quantile(matrix.bootstrap.model.lrm[count.row.variability,seq(4,(bootstrap.sample.model.lrm*3)+1,3)],probs=c(0,0.05,0.25,0.5,0.75,0.95,1))
}
# Export of bootstrap sample statistics
write.table(cbind("ID","LRM_NumberSelectedSamples","LRM_Probability_Mean","LRM_Probability_Sd","LRM_Probability_Min","LRM_Probability_Max","LRM_Probability_Sderror","LRM_Probability_Quantiles_0","LRM_Probability_Quantiles_0.05","LRM_Probability_Quantiles_0.25","LRM_Probability_Quantiles_0.5","LRM_Probability_Quantiles_0.75","LRM_Probability_Quantiles_0.95","LRM_Probability_Quantiles_1","LRM_Prediction_Mean","LRM_Prediction_Sd","LRM_Prediction_Min","LRM_Prediction_Max","LRM_Prediction_Sderror","LRM_Prediction_Quantiles_0","LRM_Prediction_Quantiles_0.05","LRM_Prediction_Quantiles_0.25","LRM_Prediction_Quantiles_0.5","LRM_Prediction_Quantiles_0.75","LRM_Prediction_Quantiles_0.95","LRM_Prediction_Quantiles_1"),file="result_LRM_BootstrapStatistics.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(cbind(identification.value,ID.bootstrap.model.lrm.count,bootstrap.model.lrm.probability.mean,bootstrap.model.lrm.probability.sd,bootstrap.model.lrm.probability.min,bootstrap.model.lrm.probability.max,bootstrap.model.lrm.probability.sderror,bootstrap.model.lrm.probability.quantiles,bootstrap.model.lrm.prediction.mean,bootstrap.model.lrm.prediction.sd,bootstrap.model.lrm.prediction.min,bootstrap.model.lrm.prediction.max,bootstrap.model.lrm.prediction.sderror,bootstrap.model.lrm.prediction.quantiles),file="result_LRM_BootstrapStatistics.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
#windows()
#double.sd.histogram.variability<-hist(bootstrap.model.lrm.probability.sd*2,breaks=seq(0,1,0.05),labels=TRUE)
#plot(double.sd.histogram.variability$counts, seq(0,0.95,0.05), type="S",ylim=c(0,1), labels=TRUE)
windows()
plot(bootstrap.model.lrm.probability.mean,bootstrap.model.lrm.prediction.mean,xlab="Probability mean",ylab="Prediction mean", type="p",main="LRM BOOTSTRAP: Mean Probability vs Mean Prediction")
abline(a=0,b=1,col="red",lty=1,lwd=1)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.lrm,sep=""),side=3, padj=-0.5, adj=0.5, col="red",cex=0.8)
pdf(file = "result_LRM_BootstrapMeansComparison.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(bootstrap.model.lrm.probability.mean,bootstrap.model.lrm.prediction.mean,xlab="Probability mean",ylab="Prediction mean", type="p",main="LRM BOOTSTRAP: Mean Probability vs Mean Prediction")
abline(a=0,b=1,col="red",lty=1,lwd=1)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.lrm,sep=""),side=3, padj=-0.5, adj=0.5, col="red",cex=0.8)
dev.off()
# BOOTSTRAPPED PROBABILITY - Fit parabola 3 parameter y = ax^2 + bx + c
parabola.probability.lrm<-cbind(bootstrap.model.lrm.probability.mean,2*bootstrap.model.lrm.probability.sd)
parabola.probability.lrm<-na.omit(parabola.probability.lrm[order(parabola.probability.lrm[,1]),])
colnames(parabola.probability.lrm)<-c("abscissa","ordinate")
#If y has to be 0 in x=0 and x=1, this means that c=0 and a+b=0, so in our case since a<0, a has to be equal to -b
fit.parabola.probability.lrm <- nls(parabola.probability.lrm[,"ordinate"] ~ coeff.a*(parabola.probability.lrm[,"abscissa"]^2) + (-1)*coeff.a*parabola.probability.lrm[,"abscissa"], start = c("coeff.a"=-1))
value.parabola.probability.lrm<-predict(fit.parabola.probability.lrm)
#coef(fit.parabola.probability.lrm)
windows()
plot(parabola.probability.lrm[,"abscissa"],parabola.probability.lrm[,"ordinate"],xlim=c(0,1),ylim=c(0,1),xlab="Bootstrapped probability mean",ylab="2 Standard Deviations", type="p",main="LRM Model Probability Variability (Bootstrap)")
lines(parabola.probability.lrm[,"abscissa"],value.parabola.probability.lrm,col="red",lwd=1.5)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.lrm,sep=""),side=3, padj=-0.5, adj=0.5, col="blue",cex=1)
espr <- expression(Y == coeff.a %*% X ^2 + coeff.b %*% X)
list.espr.subs <- list(coeff.a = round(coef(fit.parabola.probability.lrm),3),coeff.b= -round(coef(fit.parabola.probability.lrm),3))
as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]])
mtext(as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]]),side=1, padj=-1.5, adj=0.5,col="red",cex=1)
pdf(file = "result_LRM_BootstrapProbabilityVariability.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(parabola.probability.lrm[,"abscissa"],parabola.probability.lrm[,"ordinate"],xlim=c(0,1),ylim=c(0,1),xlab="Bootstrapped probability mean",ylab="2 Standard Deviations", type="p",main="LRM Model Probability Variability (Bootstrap)")
lines(parabola.probability.lrm[,"abscissa"],value.parabola.probability.lrm,col="red",lwd=1.5)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.lrm,sep=""),side=3, padj=-0.5, adj=0.5, col="blue",cex=1)
espr <- expression(Y == coeff.a %*% X ^2 + coeff.b %*% X)
list.espr.subs <- list(coeff.a = round(coef(fit.parabola.probability.lrm),3),coeff.b= -round(coef(fit.parabola.probability.lrm),3))
as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]])
mtext(as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]]),side=1, padj=-1.5, adj=0.5,col="red",cex=1)
dev.off()
# BOOTSTRAPPED PREDICTION - Fit parabola 3 parameter y = ax^2 + bx + c
parabola.prediction.lrm<-cbind(bootstrap.model.lrm.prediction.mean,2*bootstrap.model.lrm.prediction.sd)
parabola.prediction.lrm<-parabola.prediction.lrm[order(parabola.prediction.lrm[,1]),]
colnames(parabola.prediction.lrm)<-c("abscissa","ordinate")
#If y has to be 0 in x=0 and x=1, this means that c=0 and a+b=0, so in our case since a<0, a has to be equal to -b
fit.parabola.prediction.lrm <- nls(parabola.prediction.lrm[,"ordinate"] ~ coeff.a*(parabola.prediction.lrm[,"abscissa"]^2) + (-1)*coeff.a*parabola.prediction.lrm[,"abscissa"], start = c("coeff.a"=-1))
value.parabola.prediction.lrm<-predict(fit.parabola.prediction.lrm)
#coef(fit.parabola.prediction.lrm)
windows()
plot(parabola.prediction.lrm[,"abscissa"],parabola.prediction.lrm[,"ordinate"],xlim=c(0,1),ylim=c(0,1),xlab="Bootstrapped prediction mean",ylab="2 Standard Deviations", type="p",main="LRM Model Prediction Variability (Bootstrap)")
lines(parabola.prediction.lrm[,"abscissa"],value.parabola.prediction.lrm,col="red",lwd=1.5)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.lrm,sep=""),side=3, padj=-0.5, adj=0.5, col="blue",cex=1)
espr <- expression(Y == coeff.a %*% X ^2 + coeff.b %*% X)
list.espr.subs <- list(coeff.a = round(coef(fit.parabola.prediction.lrm),3),coeff.b= -round(coef(fit.parabola.prediction.lrm),3))
as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]])
mtext(as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]]),side=1, padj=-1.5, adj=0.5,col="red",cex=1)
pdf(file = "result_LRM_BootstrapPredictionVariability.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(parabola.prediction.lrm[,"abscissa"],parabola.prediction.lrm[,"ordinate"],xlim=c(0,1),ylim=c(0,1),xlab="Bootstrapped prediction mean",ylab="2 Standard Deviations", type="p",main="LRM Model Prediction Variability (Bootstrap)")
lines(parabola.prediction.lrm[,"abscissa"],value.parabola.prediction.lrm,col="red",lwd=1.5)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.lrm,sep=""),side=3, padj=-0.5, adj=0.5, col="blue",cex=1)
espr <- expression(Y == coeff.a %*% X ^2 + coeff.b %*% X)
list.espr.subs <- list(coeff.a = round(coef(fit.parabola.prediction.lrm),3),coeff.b= -round(coef(fit.parabola.prediction.lrm),3))
as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]])
mtext(as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]]),side=1, padj=-1.5, adj=0.5,col="red",cex=1)
dev.off()
}
## Sensitivity, Specificity, Cohens kappa plot
roc.plot.lrm.series<-roc.plot(verification.results.lrm,binormal=TRUE)
#str(roc.plot.lrm.series)
#roc.plot.lrm.series$plot.data
#str(roc.plot.lrm.series$plot.data)
contingency.table.matrix.lrm<-matrix(nrow=dim(roc.plot.lrm.series$plot.data)[1],ncol=8)
colnames(contingency.table.matrix.lrm)<-c("Threshold","TP","TN","FP","FN","TPR","FPR","COHEN_KAPPA")
contingency.table.matrix.lrm[,1]<-roc.plot.lrm.series$plot.data[,1,1]
contingency.table.matrix.lrm[,6]<-roc.plot.lrm.series$plot.data[,2,1]
contingency.table.matrix.lrm[,7]<-roc.plot.lrm.series$plot.data[,3,1]
values.odserved<-data.table[,2]
values.predicted<-result.lrm$fitted.values
for (threshold.series in 1:dim(roc.plot.lrm.series$plot.data)[1])
{
value.threshold<-contingency.table.matrix.lrm[threshold.series,1]
values.probability.reclassified<-NULL
values.probability.reclassified<-numeric(length=length(values.odserved))
for (length.observed.series in 1:length(values.odserved))
{
if (values.predicted[length.observed.series] > value.threshold)
{
values.probability.reclassified[length.observed.series]<-1
} else
{
values.probability.reclassified[length.observed.series]<-0
}
}
#sum(values.probability.reclassified-round(values.predicted)) # Check sum: It has to be 0 if threshold is equal to 1
series.pasted<-paste(values.odserved,values.probability.reclassified,sep="")
series.pasted<-gsub("00","1",series.pasted)
series.pasted<-gsub("01","2",series.pasted)
series.pasted<-gsub("10","3",series.pasted)
series.pasted<-gsub("11","4",series.pasted)
series.pasted<-as.numeric(series.pasted)
TP<-length(series.pasted[series.pasted>=4]) # True Positive
FN<-length(series.pasted[series.pasted>=3 & series.pasted<4]) # False Negative
FP<-length(series.pasted[series.pasted>=2 & series.pasted<3]) # False Positive
TN<-length(series.pasted[series.pasted>=1 & series.pasted<2]) # True Negative
#TPR<-TP/(TP+FN) # Hit Rate or True Positive Rate or Sensitivity - Assigned before the for cicle using rocplot data
#FPR<-FP/(FP+TN) # False Alarm Rate or False Positive Rate or 1-Specificity
# Cohen's Kappa = (agreement-chance)/(1-chance) where agreement=(TP+TN)/(TP+TN+FP+FN) and chance=((((TN+FN)*(TN+FP))/(TP+TN+FP+FN))+(((TP+FP)*(TP+FN))/(TP+TN+FP+FN)))/(TP+TN+FP+FN)
agreement=(TP+TN)/(TP+TN+FP+FN)
chance=((((TN+FN)*(TN+FP))/(TP+TN+FP+FN))+(((TP+FP)*(TP+FN))/(TP+TN+FP+FN)))/(TP+TN+FP+FN)
cohen.kappa.value<-(agreement-chance)/(1-chance)
#Other
#library(vcd)
#cohen.kappa.value<-Kappa(cross.classification.table)
contingency.table.matrix.lrm[threshold.series,2]<-TP
contingency.table.matrix.lrm[threshold.series,3]<-TN
contingency.table.matrix.lrm[threshold.series,4]<-FP
contingency.table.matrix.lrm[threshold.series,5]<-FN
contingency.table.matrix.lrm[threshold.series,8]<-cohen.kappa.value
}
windows()
plot(roc.plot.lrm.series$plot.data[,1,1],roc.plot.lrm.series$plot.data[,2,1],type="p",pch=1,cex=0.6,col="red",xlim=c(0,1),ylim=c(0,1),xlab="Probability threshold",ylab="Evaluation parameter", main="LRM MODEL EVALUATION PLOT")
points(roc.plot.lrm.series$plot.data[,1,1],1-roc.plot.lrm.series$plot.data[,3,1],col="dark green",pch=1,cex=0.6)
points(roc.plot.lrm.series$plot.data[,1,1], contingency.table.matrix.lrm[,8],col="blue",pch=1,cex=0.6)
mtext("SENSITIVITY",side=3, padj=-0.5, adj=0.01, col="red",cex=0.8)
mtext("COHEN'S KAPPA",side=3, padj=-0.5, adj=0.5, col="blue",cex=0.8)
mtext("SPECIFICITY",side=3, padj=-0.5, adj=0.99, col="dark green",cex=0.8)
pdf(file = "result_LRM_ModelEvaluationPlot.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(roc.plot.lrm.series$plot.data[,1,1],roc.plot.lrm.series$plot.data[,2,1],type="p",pch=1,cex=0.6,col="red",xlim=c(0,1),ylim=c(0,1),xlab="Probability threshold",ylab="Evaluation parameter", main="LRM MODEL EVALUATION PLOT")
points(roc.plot.lrm.series$plot.data[,1,1],1-roc.plot.lrm.series$plot.data[,3,1],col="dark green",pch=1,cex=0.6)
points(roc.plot.lrm.series$plot.data[,1,1], contingency.table.matrix.lrm[,8],col="blue",pch=1,cex=0.6)
mtext("SENSITIVITY",side=3, padj=-0.5, adj=0.01, col="red",cex=0.8)
mtext("COHEN'S KAPPA",side=3, padj=-0.5, adj=0.5, col="blue",cex=0.8)
mtext("SPECIFICITY",side=3, padj=-0.5, adj=0.99, col="dark green",cex=0.8)
dev.off()
## VALIDATION OF LRM MODEL (Matching LRM posterior probability results and validation grouping variable)
cross.classification.temporal.validation.lrm<-table(validation.grouping.variable,round(result.lrm$fitted.values),dnn=c("Observed","Predicted"))
rownames(cross.classification.temporal.validation.lrm)<-list("No Landslide","Landslide") #Observed
colnames(cross.classification.temporal.validation.lrm)<-list("No Landslide","Landslide") #Predicted
#str(cross.classification.temporal.validation.lrm)
#Elaboration of Coefficient of association for contingency table
#load package (vcd)
library(vcd)
#help(package=vcd)
contingency.table.temporal.validation.lrm<-table2d_summary(cross.classification.temporal.validation.lrm)
test.table.temporal.validation.lrm<-assocstats(cross.classification.temporal.validation.lrm)
#Different plots for contingency table
windows()
fourfold(cross.classification.temporal.validation.lrm, std="margin", main="TEMPORAL VALIDATION LRM MODEL", extended=TRUE, space = 0.2, margin=1, color = c(rgb(255,0,0,max=255), rgb(255,128,0,max=255), rgb(56,168,0,max=255), rgb(170,255,0,max=255), rgb(170,135,210,max=255), rgb(115,70,155,max=255)))
#Receiver Operating Characteristic (ROC) plots for one or more models.
#load package (verification)
library(verification)
# 2nd method using verify function
verification.temporal.validation.lrm<-verify(validation.table[,2],result.lrm$fitted.values, frcst.type="prob", obs.type="binary")
#summary(verification.temporal.validation.lrm)
# showing confidence intervals. MAY BE SLOW
area.under.roc.curve.temporal.validation.lrm<-roc.area(validation.table[,2],result.lrm$fitted.values)
windows()
roc.plot(verification.temporal.validation.lrm, main = "ROC PLOT: TEMPORAL VALIDATION LRM MODEL", binormal=TRUE, plot="both", CI=TRUE, n.boot=bootstrap.sample.values[3] , alpha = 0.05, extra=TRUE, legend=TRUE)
mtext(paste("ROC area = ",round(area.under.roc.curve.temporal.validation.lrm$A,2),"; Sample size = ",area.under.roc.curve.temporal.validation.lrm$n.total,"; Bootstrap samples = ",bootstrap.sample.values[3], sep=""), side=3, col="red", cex=0.8)
# EXPORT OF PLOT FOR VALIDATION OF LRM MODEL
pdf(file = "result_LRM_FourfoldPlot_Temporal_Validation.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
fourfold(cross.classification.temporal.validation.lrm, std="margin", main="TEMPORAL VALIDATION LRM MODEL", extended=TRUE, space = 0.2, margin=1, color = c(rgb(255,0,0,max=255), rgb(255,128,0,max=255), rgb(56,168,0,max=255), rgb(170,255,0,max=255), rgb(170,135,210,max=255), rgb(115,70,155,max=255)))
dev.off()
#pdf(file = "result_LRM_ROCPlot_Validation.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
#roc.plot(verification.temporal.validation.lrm, main = "ROC PLOT: TEMPORAL VALIDATION LRM MODEL", binormal = TRUE, plot = "both", extra=TRUE, legend=TRUE)
#area.under.roc.curve.temporal.validation.lrm<-roc.area(verification.table[,2],result.lrm$fitted.values)
#dev.off()
pdf(file = "result_LRM_ROCPlot_bootstrap_Temporal_Validation.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
roc.plot(verification.temporal.validation.lrm, main = "ROC PLOT: TEMPORAL VALIDATION LRM MODEL", binormal=TRUE, plot="both", CI=TRUE, n.boot=bootstrap.sample.values[3] , alpha = 0.05, extra=TRUE, legend=TRUE)
mtext(paste("ROC area = ",round(area.under.roc.curve.temporal.validation.lrm$A,2),"; Sample size = ",area.under.roc.curve.temporal.validation.lrm$n.total,"; Bootstrap samples = ",bootstrap.sample.values[3], sep=""), side=3, col="red", cex=0.8)
dev.off()
# Assignation of a matching code between observed and predicted values calculated using the validation dataset
validation.lrm.matching.code<-paste(validation.grouping.variable,round(result.lrm$fitted.values),sep="")
validation.lrm.matching.code<-gsub("00","1",validation.lrm.matching.code)
validation.lrm.matching.code<-gsub("01","2",validation.lrm.matching.code)
validation.lrm.matching.code<-gsub("10","3",validation.lrm.matching.code)
validation.lrm.matching.code<-gsub("11","4",validation.lrm.matching.code)
validation.lrm.matching.code<-as.numeric(validation.lrm.matching.code)
# EXPORT OF LRM MODEL RESULTS
write.table("RESULTS OF LOGISTIC REGRESSION MODEL",file="result_LRM.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LRM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LRM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("LRM MODEL OUTPUTS",file="result_LRM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LRM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Logistic Regression coefficients",file="result_LRM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
#Scaling coefficients
write.table(cbind(names(result.lrm$coefficients),result.lrm$coefficients),file="result_LRM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LRM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("CONTINGENCY TABLE MODEL RESULT",file="result_LRM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LRM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(c("","No Landslide Predicted","Landslide Predicted","Total"),cbind(c("No Landslide Observed","Landslide Observed","Total"),contingency.table.lrm$table[,1,],contingency.table.lrm$table[,2,],contingency.table.lrm$table[,3,])),file="result_LRM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LRM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LRM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("CONTINGENCY TABLE VALIDATION",file="result_LRM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LRM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(c("","No Landslide Predicted","Landslide Predicted","Total"),cbind(c("No Landslide Observed","Landslide Observed","Total"),contingency.table.temporal.validation.lrm$table[,1,],contingency.table.temporal.validation.lrm$table[,2,],contingency.table.temporal.validation.lrm$table[,3,])),file="result_LRM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LRM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LRM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("MATCHING CODE DEFINITION",file="result_LRM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LRM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(cbind(c("","OBSERVED NO LANDSLIDES: 0","OBSERVED LANDSLIDES: 1"), c("PREDICTED NO LANDSLIDES: 0","00 -> Code 1","10 -> Code 3"), c("PREDICTED LANDSLIDES: 1","01 -> Code 2","11 -> Code 4")),file="result_LRM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LRM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_LRM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("FINAL RESULTS",file="result_LRM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(c("ID","GROUPING VARIABLE","MODEL POSTERIOR PROBABILITY","MODEL CLASSIFICATION","MODEL RESULT MATCHING CODE","VALIDATION GROUPING VARIABLE","VALIDATION MATCHING CODE"),cbind(identification.value,result.lrm$y,result.lrm$fitted.values,round(result.lrm$fitted.values),result.lrm.matching.code,as.numeric(levels(validation.grouping.variable))[validation.grouping.variable],validation.lrm.matching.code)),file="result_LRM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
}
#------------------ NEURAL NETWORK MODEL ANALISYS -------------------#
if(model.run.matrix[4] == "YES")
{
library(nnet)
# This "for" cicle search the best number of Weight Decay and Hidden Layer Nodes basing on the minimization of Sum of Squared Error (SSE) or on the mamixization of ROC area results
# INITIATE A NULL TABLE
nnm.table <- NULL
total.iteration.number<-3*length(round((length(explanatory.variables)/2)):(length(explanatory.variables)-2))*10
iteration.number<-0
# SEARCH FOR OPTIMAL WEIGHT DECAY WITH RANGE OF WEIGHT DECAYS SUGGESTED BY B. RIPLEY
for (weight.decay in c(0.0001, 0.001, 0.01))
{
# SEARCH FOR OPTIMAL NUMBER OF HIDDEN UNITS
for (n.nodes in round((length(explanatory.variables)/2)):(length(explanatory.variables)-2))
{
# UNITIATE A NULL VECTOR
sse <- NULL
# FOR EACH SETTING, RUN NEURAL NET MULTIPLE TIMES
for (i.counts in 1:10)
{
# INITIATE THE RANDOM STATE FOR EACH NET
set.seed(i.counts)
# TRAIN NEURAL NETS
result.nnm <- nnet(explanatory.variables, data.table[,2], size = n.nodes, rang = 0.00001, maxit = 200, MaxNWts=10000, decay = weight.decay, skip = FALSE, trace = TRUE) #original maxit=10000
# CALCULATE SSE (Sum of Squared Error) and ROC.area
test.sse <- sum(((as.numeric(grouping.variable)-1) - round(predict(result.nnm)))^2)
library(verification)
test.ROC.area <- (roc.area((as.numeric(grouping.variable)-1),result.nnm$fitted.values))$A
iteration.number<-iteration.number+1
print(paste("Iteration",iteration.number,"of",total.iteration.number,"-",round((iteration.number/total.iteration.number*100),1),"%","completed"))
# APPEND EACH SSE and ROC.area TO A VECTOR
if (i.counts == 1) sse <- test.sse else sse <- rbind(sse, test.sse)
if (i.counts == 1) ROC.area <- test.ROC.area else ROC.area <- rbind(ROC.area, test.ROC.area)
}
# APPEND AVERAGED SSE and AVERAGED ROC.area WITH RELATED PARAMETERS TO A TABLE
nnm.table <- rbind(nnm.table, c(WEIGHT_DECAY = weight.decay, HYDDEN_LAYER_NODES = n.nodes, SUM_SQUARED_ERROR = mean(sse), ROC_AREA = mean(ROC.area)))
}
}
# PRINT OUT THE RESULT
print(nnm.table)
# Extracting value of Weight Decay and Number of nodes that minimize the SSE
pos.sse.min<-which.min(nnm.table[,3])
weight.decay.sse.min<-nnm.table[pos.sse.min,1]
n.nodes.sse.min<-nnm.table[pos.sse.min,2]
sse.min<-min(nnm.table[,3])
# Extracting value of Weight Decay and Number of nodes that maximize the ROC Area
pos.roc.area.max<-which.max(nnm.table[,4])
weight.decay.roc.area.max<-nnm.table[pos.roc.area.max,1]
n.nodes.roc.area.max<-nnm.table[pos.roc.area.max,2]
roc.area.max<-max(nnm.table[,4])
n.nodes.selected <- n.nodes.sse.min
weight.decay.selected <- weight.decay.sse.min
if (class(try(nnet(explanatory.variables, data.table[,2], size = n.nodes.selected, rang = 0.00001, maxit = 100, MaxNWts=10000, decay = weight.decay.selected, skip = FALSE, trace = TRUE)))=="try-error")
{
#nnet(explanatory.variables, data.table[,2], size = n.nodes.selected, rang = 0.00001, maxit = 10000, MaxNWts=100, decay = weight.decay.selected, skip = FALSE, trace = TRUE)
write.table("Analysis based on Neural Network Model was not completed",file="Error_NNM_Analysis.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="Error_NNM_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Error LOG",file="Error_NNM_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(cbind("Message",rev(1:length(as.vector(.Traceback)))," ->",as.vector(.Traceback)),file="Error_NNM_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
}
result.nnm<-NULL
set.seed(seed.value)
result.nnm<-nnet(explanatory.variables, data.table[,2], size = n.nodes.selected, rang = 0.00001, maxit = 10000, MaxNWts=10000, decay = weight.decay.selected, skip = FALSE, trace = TRUE)
#names(result.nnm)
# Result Predicted
predict.result.nnm<-predict(result.nnm)
str(predict.result.nnm)
# As predicted values also result.nnm$fitted.values can be considered because it corresponds to predict.result.nnm as the sum of the difference of the two vectors two is 0
# sum(result.nnm$fitted.values-predict.result.nnm)
cross.classification.nnm<-table((as.numeric(grouping.variable)-1),round(predict.result.nnm),dnn=c("Observed","Predicted"))
rownames(cross.classification.nnm)<-list("No Landslide","Landslide") # Observed
colnames(cross.classification.nnm)<-list("No Landslide","Landslide") # Predicted
str(cross.classification.nnm)
# Assignation of a matching code between observed and predicted values
result.nnm.matching.code<-paste(grouping.variable,round(predict.result.nnm),sep="")
result.nnm.matching.code<-gsub("00","1",result.nnm.matching.code)
result.nnm.matching.code<-gsub("01","2",result.nnm.matching.code)
result.nnm.matching.code<-gsub("10","3",result.nnm.matching.code)
result.nnm.matching.code<-gsub("11","4",result.nnm.matching.code)
result.nnm.matching.code<-as.numeric(result.nnm.matching.code)
#Elaboration of Coefficient of association for contingency table
#load package (vcd)
library(vcd)
#help(package=vcd)
contingency.table.nnm<-table2d_summary(cross.classification.nnm)
test.table.nnm<-assocstats(cross.classification.nnm)
#Different plots for contingency table
windows()
fourfold(cross.classification.nnm,std="margin", main="NEURAL NETWORK MODEL", extended=TRUE, space = 0.2, margin=1, color = c(rgb(255,0,0,max=255), rgb(255,128,0,max=255), rgb(56,168,0,max=255), rgb(170,255,0,max=255), rgb(150,220,255,max=255), rgb(0,0,128,max=255)))
#Receiver Operating Characteristic (ROC) plots for one or more models.
#A ROC curve plots the false alarm rate against the hit rate
#for a probablistic forecast for a range of thresholds.
#load package (verification)
library(verification)
#verify function
#Based on the type of inputs, this function calculates a range of verification statistics and skill scores.
#Additionally, it creates a verify class object that can be further analyzed.
##### ROC PLOT OBS - POSTERIOR PROBABILITY ASSOCIATED TO 1
# Method using verify function
verification.results.nnm<-verify((as.numeric(grouping.variable)-1),predict.result.nnm, frcst.type="prob", obs.type="binary")
#summary(verification.results.nnm)
#str(verification.results.nnm)
#windows()
#roc.plot(verification.results.nnm, main = "ROC PLOT: NEURAL NETWORK MODEL", binormal = TRUE, plot = "both", extra=TRUE, legend=TRUE)
area.under.roc.curve.nnm<-roc.area((as.numeric(grouping.variable)-1),predict.result.nnm)
## showing confidence intervals. MAY BE SLOW
windows()
roc.plot(verification.results.nnm, main = "ROC PLOT: NEURAL NETWORK MODEL", binormal=TRUE, plot="both", CI=TRUE, n.boot=bootstrap.sample.values[4] , alpha = 0.05, extra=TRUE, legend=TRUE)
mtext(paste("ROC area = ",round(area.under.roc.curve.nnm$A,2),"; Sample size = ",area.under.roc.curve.nnm$n.total,"; Bootstrap samples = ",bootstrap.sample.values[4], sep=""), side=3, col="red", cex=0.8)
## Histogram of posterior probability
windows()
hist(predict.result.nnm, breaks=breaks.histogram.values, freq=TRUE, xlab="Susceptibility Class", ylab="Frequency", main="Histogram of Neural Network Model susceptibility", col=c(rgb(38,115,0,max=255),rgb(233,255,190,max=255),rgb(255,255,0,max=255),rgb(255,128,0,max=255),rgb(255,0,0,max=255)))
pdf(file = "result_NNM_Histogram.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
hist(predict.result.nnm, breaks=breaks.histogram.values, freq=TRUE, xlab="Susceptibility Class", ylab="Frequency", main="Histogram of Neural Network Model susceptibility", col=c(rgb(38,115,0,max=255),rgb(233,255,190,max=255),rgb(255,255,0,max=255),rgb(255,128,0,max=255),rgb(255,0,0,max=255)))
dev.off()
# EXPORT OF PLOT FOR NNM MODEL
pdf(file = "result_NNM_FourfoldPlot.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
fourfold(cross.classification.nnm,std="margin", main="NEURAL NETWORK MODEL", extended=TRUE, space = 0.2, margin=1, color = c(rgb(255,0,0,max=255), rgb(255,128,0,max=255), rgb(56,168,0,max=255), rgb(170,255,0,max=255), rgb(150,220,255,max=255), rgb(0,0,128,max=255)))
dev.off()
#pdf(file = "result_NNM_ROCPlot.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
#roc.plot(verification.results.nnm, main = "ROC PLOT: NEURAL NETWORK MODEL", binormal = TRUE, plot = "both", extra=TRUE, legend=TRUE)
#dev.off()
pdf(file = "result_NNM_ROCPlot_bootstrap.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
roc.plot(verification.results.nnm, main = "ROC PLOT: NEURAL NETWORK MODEL", binormal=TRUE, plot="both", CI=TRUE, n.boot=bootstrap.sample.values[4] , alpha = 0.05, extra=TRUE, legend=TRUE)
mtext(paste("ROC area = ",round(area.under.roc.curve.nnm$A,2),"; Sample size = ",area.under.roc.curve.nnm$n.total,"; Bootstrap samples = ",bootstrap.sample.values[4], sep=""), side=3, col="red", cex=0.8)
dev.off()
## BOOTSTRAP PROCEDURE FOR THE ESTIMATION OF MODEL PREDICTION VARIABILITY
if(bootstrap.model.variability[4] == "YES")
{
bootstrap.sample.model.nnm<-bootstrap.sample.model[4]
matrix.bootstrap.model.nnm<-matrix(data=NA, nrow=dim(data.table)[1], ncol=(bootstrap.sample.model.nnm*3)+1)
colnames(matrix.bootstrap.model.nnm)<-rep("na",(bootstrap.sample.model.nnm*3)+1)
matrix.bootstrap.model.nnm[,1]<-identification.value
colnames(matrix.bootstrap.model.nnm)[1]<-"ID"
name.sel.run<-paste(rep("ID_Selection_Run",bootstrap.sample.model.nnm),1:bootstrap.sample.model.nnm,sep="_")
colnames(matrix.bootstrap.model.nnm)[seq(2,(bootstrap.sample.model.nnm*3)-1,3)]<-name.sel.run
name.prob.run<-paste(rep("Probability_Run",bootstrap.sample.model.nnm),1:bootstrap.sample.model.nnm,sep="_")
colnames(matrix.bootstrap.model.nnm)[seq(3,(bootstrap.sample.model.nnm*3),3)]<-name.prob.run
name.pred.run<-paste(rep("Prediction_Run",bootstrap.sample.model.nnm),1:bootstrap.sample.model.nnm,sep="_")
colnames(matrix.bootstrap.model.nnm)[seq(4,(bootstrap.sample.model.nnm*3)+1,3)]<-name.pred.run
selection.index<-NULL
library(nnet)
#Bootstrap procedure
for (count.boot in 1:bootstrap.sample.model.nnm)
{
selection.index<-sample(1:dim(data.table)[1], replace=TRUE, prob=NULL)
matrix.bootstrap.model.nnm[as.numeric(names(table(selection.index))),(count.boot*3)-1]<-table(selection.index)
explanatory.variables.bootstrap.model.nnm<-data.table[selection.index,3:dim(data.table)[2]]
grouping.variable.bootstrap.model.nnm<-data.table[selection.index,2]
result.bootstrap.model.nnm<-nnet(explanatory.variables.bootstrap.model.nnm, grouping.variable.bootstrap.model.nnm, size = n.nodes.selected, rang = 0.00001, maxit = 10000, MaxNWts=10000, decay = weight.decay.selected, skip = FALSE, trace = TRUE)
matrix.bootstrap.model.nnm[as.numeric(names(table(selection.index))),(count.boot*3)]<-predict(result.bootstrap.model.nnm,newdata=explanatory.variables[as.numeric(names(table(selection.index))),])
matrix.bootstrap.model.nnm[,(count.boot*3)+1]<-predict(result.bootstrap.model.nnm,newdata=explanatory.variables)
}
# Export of bootstrap sample
write.table(matrix.bootstrap.model.nnm,file="result_NNM_BootstrapSamples.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=TRUE)
ID.bootstrap.model.nnm.count<-numeric(length=dim(data.table)[1])
#Probability (selected values)
bootstrap.model.nnm.probability.mean<-numeric(length=dim(data.table)[1])
bootstrap.model.nnm.probability.sd<-numeric(length=dim(data.table)[1])
bootstrap.model.nnm.probability.min<-numeric(length=dim(data.table)[1])
bootstrap.model.nnm.probability.max<-numeric(length=dim(data.table)[1])
bootstrap.model.nnm.probability.sderror<-numeric(length=dim(data.table)[1])
bootstrap.model.nnm.probability.quantiles<-matrix(nrow=dim(data.table)[1],ncol=7)
#Prediction (all values)
bootstrap.model.nnm.prediction.mean<-numeric(length=dim(data.table)[1])
bootstrap.model.nnm.prediction.sd<-numeric(length=dim(data.table)[1])
bootstrap.model.nnm.prediction.min<-numeric(length=dim(data.table)[1])
bootstrap.model.nnm.prediction.max<-numeric(length=dim(data.table)[1])
bootstrap.model.nnm.prediction.sderror<-numeric(length=dim(data.table)[1])
bootstrap.model.nnm.prediction.quantiles<-matrix(nrow=dim(data.table)[1],ncol=7)
for (count.row.variability in 1:dim(data.table)[1])
{
# Statistics on boostrapped probability
ID.bootstrap.model.nnm.count[count.row.variability]<-length(na.omit(matrix.bootstrap.model.nnm[count.row.variability,seq(2,(bootstrap.sample.model.nnm*3)-1,3)]))
bootstrap.model.nnm.probability.mean[count.row.variability]<-mean(na.omit(matrix.bootstrap.model.nnm[count.row.variability,seq(3,(bootstrap.sample.model.nnm*3),3)]))
bootstrap.model.nnm.probability.sd[count.row.variability]<-sd(na.omit(matrix.bootstrap.model.nnm[count.row.variability,seq(3,(bootstrap.sample.model.nnm*3),3)]))
bootstrap.model.nnm.probability.min[count.row.variability]<-min(na.omit(matrix.bootstrap.model.nnm[count.row.variability,seq(3,(bootstrap.sample.model.nnm*3),3)]))
bootstrap.model.nnm.probability.max[count.row.variability]<-max(na.omit(matrix.bootstrap.model.nnm[count.row.variability,seq(3,(bootstrap.sample.model.nnm*3),3)]))
bootstrap.model.nnm.probability.sderror[count.row.variability]<-bootstrap.model.nnm.probability.sd[count.row.variability]/ID.bootstrap.model.nnm.count[count.row.variability]
bootstrap.model.nnm.probability.quantiles[count.row.variability,]<-quantile(na.omit(matrix.bootstrap.model.nnm[count.row.variability,seq(3,(bootstrap.sample.model.nnm*3),3)]),probs=c(0,0.05,0.25,0.5,0.75,0.95,1))
# Statistics on boostrapped prediction
bootstrap.model.nnm.prediction.mean[count.row.variability]<-mean(matrix.bootstrap.model.nnm[count.row.variability,seq(4,(bootstrap.sample.model.nnm*3)+1,3)])
bootstrap.model.nnm.prediction.sd[count.row.variability]<-sd(matrix.bootstrap.model.nnm[count.row.variability,seq(4,(bootstrap.sample.model.nnm*3)+1,3)])
bootstrap.model.nnm.prediction.min[count.row.variability]<-min(matrix.bootstrap.model.nnm[count.row.variability,seq(4,(bootstrap.sample.model.nnm*3)+1,3)])
bootstrap.model.nnm.prediction.max[count.row.variability]<-max(matrix.bootstrap.model.nnm[count.row.variability,seq(4,(bootstrap.sample.model.nnm*3)+1,3)])
bootstrap.model.nnm.prediction.sderror[count.row.variability]<-bootstrap.model.nnm.prediction.sd[count.row.variability]/bootstrap.sample.model.nnm
bootstrap.model.nnm.prediction.quantiles[count.row.variability,]<-quantile(matrix.bootstrap.model.nnm[count.row.variability,seq(4,(bootstrap.sample.model.nnm*3)+1,3)],probs=c(0,0.05,0.25,0.5,0.75,0.95,1))
}
# Export of bootstrap sample statistics
write.table(cbind("ID","NNM_NumberSelectedSamples","NNM_Probability_Mean","NNM_Probability_Sd","NNM_Probability_Min","NNM_Probability_Max","NNM_Probability_Sderror","NNM_Probability_Quantiles_0","NNM_Probability_Quantiles_0.05","NNM_Probability_Quantiles_0.25","NNM_Probability_Quantiles_0.5","NNM_Probability_Quantiles_0.75","NNM_Probability_Quantiles_0.95","NNM_Probability_Quantiles_1","NNM_Prediction_Mean","NNM_Prediction_Sd","NNM_Prediction_Min","NNM_Prediction_Max","NNM_Prediction_Sderror","NNM_Prediction_Quantiles_0","NNM_Prediction_Quantiles_0.05","NNM_Prediction_Quantiles_0.25","NNM_Prediction_Quantiles_0.5","NNM_Prediction_Quantiles_0.75","NNM_Prediction_Quantiles_0.95","NNM_Prediction_Quantiles_1"),file="result_NNM_BootstrapStatistics.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(cbind(identification.value,ID.bootstrap.model.nnm.count,bootstrap.model.nnm.probability.mean,bootstrap.model.nnm.probability.sd,bootstrap.model.nnm.probability.min,bootstrap.model.nnm.probability.max,bootstrap.model.nnm.probability.sderror,bootstrap.model.nnm.probability.quantiles,bootstrap.model.nnm.prediction.mean,bootstrap.model.nnm.prediction.sd,bootstrap.model.nnm.prediction.min,bootstrap.model.nnm.prediction.max,bootstrap.model.nnm.prediction.sderror,bootstrap.model.nnm.prediction.quantiles),file="result_NNM_BootstrapStatistics.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
#windows()
#double.sd.histogram.variability<-hist(bootstrap.model.nnm.probability.sd*2,breaks=seq(0,1,0.05),labels=TRUE)
#plot(double.sd.histogram.variability$counts, seq(0,0.95,0.05), type="S",ylim=c(0,1), labels=TRUE)
windows()
plot(bootstrap.model.nnm.probability.mean,bootstrap.model.nnm.prediction.mean,xlab="Probability mean",ylab="Prediction mean", type="p",main="NNM BOOTSTRAP: Mean Probability vs Mean Prediction")
abline(a=0,b=1,col="red",lty=1,lwd=1)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.nnm,sep=""),side=3, padj=-0.5, adj=0.5, col="red",cex=0.8)
pdf(file = "result_NNM_BootstrapMeansComparison.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(bootstrap.model.nnm.probability.mean,bootstrap.model.nnm.prediction.mean,xlab="Probability mean",ylab="Prediction mean", type="p",main="NNM BOOTSTRAP: Mean Probability vs Mean Prediction")
abline(a=0,b=1,col="red",lty=1,lwd=1)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.nnm,sep=""),side=3, padj=-0.5, adj=0.5, col="red",cex=0.8)
dev.off()
# BOOTSTRAPPED PROBABILITY - Fit parabola 3 parameter y = ax^2 + bx + c
parabola.probability.nnm<-cbind(bootstrap.model.nnm.probability.mean,2*bootstrap.model.nnm.probability.sd)
parabola.probability.nnm<-na.omit(parabola.probability.nnm[order(parabola.probability.nnm[,1]),])
colnames(parabola.probability.nnm)<-c("abscissa","ordinate")
#If y has to be 0 in x=0 and x=1, this means that c=0 and a+b=0, so in our case since a<0, a has to be equal to -b
fit.parabola.probability.nnm <- nls(parabola.probability.nnm[,"ordinate"] ~ coeff.a*(parabola.probability.nnm[,"abscissa"]^2) + (-1)*coeff.a*parabola.probability.nnm[,"abscissa"], start = c("coeff.a"=-1))
value.parabola.probability.nnm<-predict(fit.parabola.probability.nnm)
#coef(fit.parabola.probability.nnm)
windows()
plot(parabola.probability.nnm[,"abscissa"],parabola.probability.nnm[,"ordinate"],xlim=c(0,1),ylim=c(0,1),xlab="Bootstrapped probability mean",ylab="2 Standard Deviations", type="p",main="NNM Model Probability Variability (Bootstrap)")
lines(parabola.probability.nnm[,"abscissa"],value.parabola.probability.nnm,col="red",lwd=1.5)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.nnm,sep=""),side=3, padj=-0.5, adj=0.5, col="blue",cex=1)
espr <- expression(Y == coeff.a %*% X ^2 + coeff.b %*% X)
list.espr.subs <- list(coeff.a = round(coef(fit.parabola.probability.nnm),3),coeff.b= -round(coef(fit.parabola.probability.nnm),3))
as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]])
mtext(as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]]),side=1, padj=-1.5, adj=0.5,col="red",cex=1)
pdf(file = "result_NNM_BootstrapProbabilityVariability.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(parabola.probability.nnm[,"abscissa"],parabola.probability.nnm[,"ordinate"],xlim=c(0,1),ylim=c(0,1),xlab="Bootstrapped probability mean",ylab="2 Standard Deviations", type="p",main="NNM Model Probability Variability (Bootstrap)")
lines(parabola.probability.nnm[,"abscissa"],value.parabola.probability.nnm,col="red",lwd=1.5)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.nnm,sep=""),side=3, padj=-0.5, adj=0.5, col="blue",cex=1)
espr <- expression(Y == coeff.a %*% X ^2 + coeff.b %*% X)
list.espr.subs <- list(coeff.a = round(coef(fit.parabola.probability.nnm),3),coeff.b= -round(coef(fit.parabola.probability.nnm),3))
as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]])
mtext(as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]]),side=1, padj=-1.5, adj=0.5,col="red",cex=1)
dev.off()
# BOOTSTRAPPED PREDICTION - Fit parabola 3 parameter y = ax^2 + bx + c
parabola.prediction.nnm<-cbind(bootstrap.model.nnm.prediction.mean,2*bootstrap.model.nnm.prediction.sd)
parabola.prediction.nnm<-parabola.prediction.nnm[order(parabola.prediction.nnm[,1]),]
colnames(parabola.prediction.nnm)<-c("abscissa","ordinate")
#If y has to be 0 in x=0 and x=1, this means that c=0 and a+b=0, so in our case since a<0, a has to be equal to -b
fit.parabola.prediction.nnm <- nls(parabola.prediction.nnm[,"ordinate"] ~ coeff.a*(parabola.prediction.nnm[,"abscissa"]^2) + (-1)*coeff.a*parabola.prediction.nnm[,"abscissa"], start = c("coeff.a"=-1))
value.parabola.prediction.nnm<-predict(fit.parabola.prediction.nnm)
#coef(fit.parabola.prediction.nnm)
windows()
plot(parabola.prediction.nnm[,"abscissa"],parabola.prediction.nnm[,"ordinate"],xlim=c(0,1),ylim=c(0,1),xlab="Bootstrapped prediction mean",ylab="2 Standard Deviations", type="p",main="NNM Model Prediction Variability (Bootstrap)")
lines(parabola.prediction.nnm[,"abscissa"],value.parabola.prediction.nnm,col="red",lwd=1.5)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.nnm,sep=""),side=3, padj=-0.5, adj=0.5, col="blue",cex=1)
espr <- expression(Y == coeff.a %*% X ^2 + coeff.b %*% X)
list.espr.subs <- list(coeff.a = round(coef(fit.parabola.prediction.nnm),3),coeff.b= -round(coef(fit.parabola.prediction.nnm),3))
as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]])
mtext(as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]]),side=1, padj=-1.5, adj=0.5,col="red",cex=1)
pdf(file = "result_NNM_BootstrapPredictionVariability.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(parabola.prediction.nnm[,"abscissa"],parabola.prediction.nnm[,"ordinate"],xlim=c(0,1),ylim=c(0,1),xlab="Bootstrapped prediction mean",ylab="2 Standard Deviations", type="p",main="NNM Model Prediction Variability (Bootstrap)")
lines(parabola.prediction.nnm[,"abscissa"],value.parabola.prediction.nnm,col="red",lwd=1.5)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.nnm,sep=""),side=3, padj=-0.5, adj=0.5, col="blue",cex=1)
espr <- expression(Y == coeff.a %*% X ^2 + coeff.b %*% X)
list.espr.subs <- list(coeff.a = round(coef(fit.parabola.prediction.nnm),3),coeff.b= -round(coef(fit.parabola.prediction.nnm),3))
as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]])
mtext(as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]]),side=1, padj=-1.5, adj=0.5,col="red",cex=1)
dev.off()
}
## Sensitivity, Specificity, Cohens kappa plot
roc.plot.nnm.series<-roc.plot(verification.results.nnm,binormal=TRUE)
#str(roc.plot.nnm.series)
#roc.plot.nnm.series$plot.data
#str(roc.plot.nnm.series$plot.data)
contingency.table.matrix.nnm<-matrix(nrow=dim(roc.plot.nnm.series$plot.data)[1],ncol=8)
colnames(contingency.table.matrix.nnm)<-c("Threshold","TP","TN","FP","FN","TPR","FPR","COHEN_KAPPA")
contingency.table.matrix.nnm[,1]<-roc.plot.nnm.series$plot.data[,1,1]
contingency.table.matrix.nnm[,6]<-roc.plot.nnm.series$plot.data[,2,1]
contingency.table.matrix.nnm[,7]<-roc.plot.nnm.series$plot.data[,3,1]
values.odserved<-data.table[,2]
values.predicted<-predict.result.nnm
for (threshold.series in 1:dim(roc.plot.nnm.series$plot.data)[1])
{
value.threshold<-contingency.table.matrix.nnm[threshold.series,1]
values.probability.reclassified<-NULL
values.probability.reclassified<-numeric(length=length(values.odserved))
for (length.observed.series in 1:length(values.odserved))
{
if (values.predicted[length.observed.series] > value.threshold)
{
values.probability.reclassified[length.observed.series]<-1
} else
{
values.probability.reclassified[length.observed.series]<-0
}
}
#sum(values.probability.reclassified-round(values.predicted)) # Check sum: It has to be 0 if threshold is equal to 1
series.pasted<-paste(values.odserved,values.probability.reclassified,sep="")
series.pasted<-gsub("00","1",series.pasted)
series.pasted<-gsub("01","2",series.pasted)
series.pasted<-gsub("10","3",series.pasted)
series.pasted<-gsub("11","4",series.pasted)
series.pasted<-as.numeric(series.pasted)
TP<-length(series.pasted[series.pasted>=4]) # True Positive
FN<-length(series.pasted[series.pasted>=3 & series.pasted<4]) # False Negative
FP<-length(series.pasted[series.pasted>=2 & series.pasted<3]) # False Positive
TN<-length(series.pasted[series.pasted>=1 & series.pasted<2]) # True Negative
#TPR<-TP/(TP+FN) # Hit Rate or True Positive Rate or Sensitivity - Assigned before the for cicle using rocplot data
#FPR<-FP/(FP+TN) # False Alarm Rate or False Positive Rate or 1-Specificity
# Cohen's Kappa = (agreement-chance)/(1-chance) where agreement=(TP+TN)/(TP+TN+FP+FN) and chance=((((TN+FN)*(TN+FP))/(TP+TN+FP+FN))+(((TP+FP)*(TP+FN))/(TP+TN+FP+FN)))/(TP+TN+FP+FN)
agreement=(TP+TN)/(TP+TN+FP+FN)
chance=((((TN+FN)*(TN+FP))/(TP+TN+FP+FN))+(((TP+FP)*(TP+FN))/(TP+TN+FP+FN)))/(TP+TN+FP+FN)
cohen.kappa.value<-(agreement-chance)/(1-chance)
#Other
#library(vcd)
#cohen.kappa.value<-Kappa(cross.classification.table)
contingency.table.matrix.nnm[threshold.series,2]<-TP
contingency.table.matrix.nnm[threshold.series,3]<-TN
contingency.table.matrix.nnm[threshold.series,4]<-FP
contingency.table.matrix.nnm[threshold.series,5]<-FN
contingency.table.matrix.nnm[threshold.series,8]<-cohen.kappa.value
}
windows()
plot(roc.plot.nnm.series$plot.data[,1,1],roc.plot.nnm.series$plot.data[,2,1],type="p",pch=1,cex=0.6,col="red",xlim=c(0,1),ylim=c(0,1),xlab="Probability threshold",ylab="Evaluation parameter", main="NNM MODEL EVALUATION PLOT")
points(roc.plot.nnm.series$plot.data[,1,1],1-roc.plot.nnm.series$plot.data[,3,1],col="dark green",pch=1,cex=0.6)
points(roc.plot.nnm.series$plot.data[,1,1], contingency.table.matrix.nnm[,8],col="blue",pch=1,cex=0.6)
mtext("SENSITIVITY",side=3, padj=-0.5, adj=0.01, col="red",cex=0.8)
mtext("COHEN'S KAPPA",side=3, padj=-0.5, adj=0.5, col="blue",cex=0.8)
mtext("SPECIFICITY",side=3, padj=-0.5, adj=0.99, col="dark green",cex=0.8)
pdf(file = "result_NNM_ModelEvaluationPlot.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(roc.plot.nnm.series$plot.data[,1,1],roc.plot.nnm.series$plot.data[,2,1],type="p",pch=1,cex=0.6,col="red",xlim=c(0,1),ylim=c(0,1),xlab="Probability threshold",ylab="Evaluation parameter", main="NNM MODEL EVALUATION PLOT")
points(roc.plot.nnm.series$plot.data[,1,1],1-roc.plot.nnm.series$plot.data[,3,1],col="dark green",pch=1,cex=0.6)
points(roc.plot.nnm.series$plot.data[,1,1], contingency.table.matrix.nnm[,8],col="blue",pch=1,cex=0.6)
mtext("SENSITIVITY",side=3, padj=-0.5, adj=0.01, col="red",cex=0.8)
mtext("COHEN'S KAPPA",side=3, padj=-0.5, adj=0.5, col="blue",cex=0.8)
mtext("SPECIFICITY",side=3, padj=-0.5, adj=0.99, col="dark green",cex=0.8)
dev.off()
## VALIDATION OF NNM MODEL (Matching NNM posterior probability results and validation grouping variable)
cross.classification.temporal.validation.nnm<-table(validation.grouping.variable,round(predict.result.nnm),dnn=c("Observed","Predicted"))
rownames(cross.classification.temporal.validation.nnm)<-list("No Landslide","Landslide") #Observed
colnames(cross.classification.temporal.validation.nnm)<-list("No Landslide","Landslide") #Predicted
#str(cross.classification.temporal.validation.nnm)
#Elaboration of Coefficient of association for contingency table
#load package (vcd)
library(vcd)
#help(package=vcd)
contingency.table.temporal.validation.nnm<-table2d_summary(cross.classification.temporal.validation.nnm)
test.table.temporal.validation.nnm<-assocstats(cross.classification.temporal.validation.nnm)
#Different plots for contingency table
windows()
fourfold(cross.classification.temporal.validation.nnm, std="margin", main="TEMPORAL VALIDATION NNM MODEL", extended=TRUE, space = 0.2, margin=1, color = c(rgb(255,0,0,max=255), rgb(255,128,0,max=255), rgb(56,168,0,max=255), rgb(170,255,0,max=255), rgb(170,135,210,max=255), rgb(115,70,155,max=255)))
#Receiver Operating Characteristic (ROC) plots for one or more models.
#load package (verification)
library(verification)
# 2nd method using verify function
verification.temporal.validation.nnm<-verify(validation.table[,2],predict.result.nnm, frcst.type="prob", obs.type="binary")
#summary(verification.temporal.validation.lrm)
# showing confidence intervals. MAY BE SLOW
area.under.roc.curve.temporal.validation.nnm<-roc.area(validation.table[,2],predict.result.nnm)
windows()
roc.plot(verification.temporal.validation.nnm, main = "ROC PLOT: TEMPORAL VALIDATION NNM MODEL", binormal=TRUE, plot="both", CI=TRUE, n.boot=bootstrap.sample.values[4] , alpha = 0.05, extra=TRUE, legend=TRUE)
mtext(paste("ROC area = ",round(area.under.roc.curve.temporal.validation.nnm$A,2),"; Sample size = ",area.under.roc.curve.temporal.validation.nnm$n.total,"; Bootstrap samples = ",bootstrap.sample.values[4], sep=""), side=3, col="red", cex=0.8)
# EXPORT OF PLOT FOR VALIDATION OF NNM MODEL
pdf(file = "result_NNM_FourfoldPlot_Temporal_Validation.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
fourfold(cross.classification.temporal.validation.nnm, std="margin", main="TEMPORAL VALIDATION NNM MODEL", extended=TRUE, space = 0.2, margin=1, color = c(rgb(255,0,0,max=255), rgb(255,128,0,max=255), rgb(56,168,0,max=255), rgb(170,255,0,max=255), rgb(170,135,210,max=255), rgb(115,70,155,max=255)))
dev.off()
#pdf(file = "result_NNM_ROCPlot_Validation.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
#roc.plot(verification.temporal.validation.nnm, main = "ROC PLOT: TEMPORAL VALIDATION NNM MODEL", binormal = TRUE, plot = "both", extra=TRUE, legend=TRUE)
#area.under.roc.curve.temporal.validation.nnm<-roc.area(verification.table[,2],predict.result.nnm)
#dev.off()
pdf(file = "result_NNM_ROCPlot_bootstrap_Temporal_Validation.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
roc.plot(verification.temporal.validation.nnm, main = "ROC PLOT: TEMPORAL VALIDATION NNM MODEL", binormal=TRUE, plot="both", CI=TRUE, n.boot=bootstrap.sample.values[3] , alpha = 0.05, extra=TRUE, legend=TRUE)
mtext(paste("ROC area = ",round(area.under.roc.curve.temporal.validation.nnm$A,2),"; Sample size = ",area.under.roc.curve.temporal.validation.nnm$n.total,"; Bootstrap samples = ",bootstrap.sample.values[4], sep=""), side=3, col="red", cex=0.8)
dev.off()
# Assignation of a matching code between observed and predicted values calculated using the validation dataset
validation.nnm.matching.code<-paste(validation.grouping.variable,round(predict.result.nnm),sep="")
validation.nnm.matching.code<-gsub("00","1",validation.nnm.matching.code)
validation.nnm.matching.code<-gsub("01","2",validation.nnm.matching.code)
validation.nnm.matching.code<-gsub("10","3",validation.nnm.matching.code)
validation.nnm.matching.code<-gsub("11","4",validation.nnm.matching.code)
validation.nnm.matching.code<-as.numeric(validation.nnm.matching.code)
# EXPORT OF NNM MODEL RESULTS
write.table("RESULTS OF NEURAL NETWORK MODEL",file="result_NNM.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("NNM MODEL OUTPUTS",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Selection of Neural Network Structure",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(c("WEIGHT DECAY","HIDDEN LAYER NODES","SUM SQUARED ERROR","ROC AREA"),nnm.table),file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Neural Network Structure Selected",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(c("INPUT NODES","HIDDEN LAYER NODES","OUTPUT NODES"),cbind(result.nnm$n[1],result.nnm$n[2],result.nnm$n[3])),file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Value of Weight Decay Term Selected",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(weight.decay.selected,file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Value of Fitting Criterion Plus Weight Decay Term",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(result.nnm$value,file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Best Set of Weights Found",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(result.nnm$wts,file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("CONTINGENCY TABLE MODEL RESULT",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(c("","No Landslide Predicted","Landslide Predicted","Total"),cbind(c("No Landslide Observed","Landslide Observed","Total"),contingency.table.nnm$table[,1,],contingency.table.nnm$table[,2,],contingency.table.nnm$table[,3,])),file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("CONTINGENCY TABLE VALIDATION",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(c("","No Landslide Predicted","Landslide Predicted","Total"),cbind(c("No Landslide Observed","Landslide Observed","Total"),contingency.table.temporal.validation.nnm$table[,1,],contingency.table.temporal.validation.nnm$table[,2,],contingency.table.temporal.validation.nnm$table[,3,])),file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("MATCHING CODE DEFINITION",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(cbind(c("","OBSERVED NO LANDSLIDES: 0","OBSERVED LANDSLIDES: 1"), c("PREDICTED NO LANDSLIDES: 0","00 -> Code 1","10 -> Code 3"), c("PREDICTED LANDSLIDES: 1","01 -> Code 2","11 -> Code 4")),file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("FINAL RESULTS",file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(c("ID","GROUPING VARIABLE","MODEL POSTERIOR PROBABILITY","MODEL CLASSIFICATION","MODEL RESULT MATCHING CODE","VALIDATION GROUPING VARIABLE","VALIDATION MATCHING CODE"),cbind(identification.value,as.numeric(grouping.variable)-1,predict.result.nnm,round(predict.result.nnm),result.nnm.matching.code,as.numeric(levels(validation.grouping.variable))[validation.grouping.variable],validation.nnm.matching.code)),file="result_NNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
}
#-------------------- FORECAST COMBINATION MODEL --------------------#
##### FORECAST COMBINATION USING A LOGISTIC REGRESSION MODEL
#####(A constrained Ordinary Least Squared estimation can also be adopted)
if(model.run.matrix[5] == "YES")
{
library(Zelig)
forecasting.combined.variables<-as.data.frame(cbind(data.variables[,1],predict.result.lda$posterior[,2],predict.result.qda$posterior[,2],result.lrm$fitted.values,predict.result.nnm))
colnames(forecasting.combined.variables)<-c("FRAX","resultlda","resultqda","resultlrm","resultnnm") # Names of column mustn't have points
if (class(try(zelig(as.formula(paste(names(forecasting.combined.variables)[1],"~",paste(names(forecasting.combined.variables)[2:dim(forecasting.combined.variables)[2]],collapse= "+"))), data=forecasting.combined.variables, model="logit")))=="try-error")
{
#zelig(as.formula(paste(names(forecasting.combined.variables)[1],"~",paste(names(forecasting.combined.variables)[2:dim(forecasting.combined.variables)[2]],collapse= "+"))), data=forecasting.combined.variables, model="logit")
write.table("The combination of forecast using Logistic Regression Model was not completed",file="Error_CFM_Analysis.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="Error_CFM_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Error LOG",file="Error_CFM_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(cbind("Message",rev(1:length(as.vector(.Traceback)))," ->",as.vector(.Traceback)),file="Error_CFM_Analysis.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
}
result.cfm<-NULL
result.cfm<-zelig(as.formula(paste(names(forecasting.combined.variables)[1],"~",paste(names(forecasting.combined.variables)[2:dim(forecasting.combined.variables)[2]],collapse= "+"))), data=forecasting.combined.variables, model="logit")
summary(result.cfm)
#names(result.cfm)
#for predicted value (posterior probablity calculated with model) result.cfm$fitted.values was considered
cross.classification.cfm<-table(as.numeric(result.cfm$y),round(result.cfm$fitted.values),dnn=c("Observed","Predicted"))
rownames(cross.classification.cfm)<-list("No Landslide","Landslide") # Observed
colnames(cross.classification.cfm)<-list("No Landslide","Landslide") # Predicted
#str(cross.classification.cfm)
# Assignation of a matching code between observed and predicted values
result.cfm.matching.code<-paste(grouping.variable,round(result.cfm$fitted.values),sep="")
result.cfm.matching.code<-gsub("00","1",result.cfm.matching.code)
result.cfm.matching.code<-gsub("01","2",result.cfm.matching.code)
result.cfm.matching.code<-gsub("10","3",result.cfm.matching.code)
result.cfm.matching.code<-gsub("11","4",result.cfm.matching.code)
result.cfm.matching.code<-as.numeric(result.cfm.matching.code)
#Elaboration of Coefficient of association for contingency table
#load package (vcd)
library(vcd)
#help(package=vcd)
contingency.table.cfm<-table2d_summary(cross.classification.cfm)
test.table.cfm<-assocstats(cross.classification.cfm)
#Different plots for contingency table
windows()
fourfold(cross.classification.cfm,std="margin", main="COMBINATION LOGISTIC REGRESSION MODEL", extended=TRUE, space = 0.2, margin=1, color = c(rgb(255,0,0,max=255), rgb(255,128,0,max=255), rgb(56,168,0,max=255), rgb(170,255,0,max=255), rgb(150,220,255,max=255), rgb(0,0,128,max=255)))
#Receiver Operating Characteristic (ROC) plots for one or more models.
#A ROC curve plots the false alarm rate against the hit rate
#for a probablistic forecast for a range of thresholds.
#load package (verification)
library(verification)
#verify function
#Based on the type of inputs, this function calculates a range of verification statistics and skill scores.
#Additionally, it creates a verify class object that can be further analyzed.
##### ROC PLOT OBS - POSTERIOR PROBABILITY ASSOCIATED TO 1
# Method using verify function
verification.results.cfm<-verify(result.cfm$y,result.cfm$fitted.values, frcst.type="prob", obs.type="binary")
#summary(verification.results.cfm)
#str(verification.results.qda)
#windows()
#roc.plot(verification.results.cfm, main = "ROC PLOT: COMBINATION LOGISTIC REGRESSION", binormal = TRUE, plot = "both", extra=TRUE, legend=TRUE)
area.under.roc.curve.cfm<-roc.area(result.cfm$y,result.cfm$fitted.values)
## showing confidence intervals. MAY BE SLOW
windows()
roc.plot(verification.results.cfm, main = "ROC PLOT: COMBINATION LOGISTIC REGRESSION", binormal=TRUE, plot="both", CI=TRUE, n.boot=bootstrap.sample.values[5] , alpha = 0.05, extra=TRUE, legend=TRUE)
mtext(paste("ROC area = ",round(area.under.roc.curve.cfm$A,2),"; Sample size = ",area.under.roc.curve.cfm$n.total,"; Bootstrap samples = ",bootstrap.sample.values[5], sep=""), side=3, col="red", cex=0.8)
## Histogram of posterior probability
windows()
hist(result.cfm$fitted.values, breaks=breaks.histogram.values, freq=TRUE, xlab="Susceptibility Class", ylab="Frequency", main="Histogram of Combination Logistic Regression Model susceptibility", col=c(rgb(38,115,0,max=255),rgb(233,255,190,max=255),rgb(255,255,0,max=255),rgb(255,128,0,max=255),rgb(255,0,0,max=255)))
pdf(file = "result_CFM_Histogram.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
hist(result.cfm$fitted.values, breaks=breaks.histogram.values, freq=TRUE, xlab="Susceptibility Class", ylab="Frequency", main="Histogram of Combination Logistic Regression Model susceptibility", col=c(rgb(38,115,0,max=255),rgb(233,255,190,max=255),rgb(255,255,0,max=255),rgb(255,128,0,max=255),rgb(255,0,0,max=255)))
dev.off()
# EXPORT OF PLOT FOR CFM MODEL
pdf(file = "result_CFM_FourfoldPlot.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
fourfold(cross.classification.cfm,std="margin", main="COMBINATION LOGISTIC REGRESSION MODEL", extended=TRUE, space = 0.2, margin=1, color = c(rgb(255,0,0,max=255), rgb(255,128,0,max=255), rgb(56,168,0,max=255), rgb(170,255,0,max=255), rgb(150,220,255,max=255), rgb(0,0,128,max=255)))
dev.off()
#pdf(file = "result_CFM_ROCPlot.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
#roc.plot(verification.results.cfm, main = "ROC PLOT: COMBINATION LOGISTIC REGRESSION", binormal = TRUE, plot = "both", extra=TRUE, legend=TRUE)
#dev.off()
pdf(file = "result_CFM_ROCPlot_bootstrap.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
roc.plot(verification.results.cfm, main = "ROC PLOT: COMBINATION LOGISTIC REGRESSION", binormal=TRUE, plot="both", CI=TRUE, n.boot=bootstrap.sample.values[5] , alpha = 0.05, extra=TRUE, legend=TRUE)
mtext(paste("ROC area = ",round(area.under.roc.curve.cfm$A,2),"; Sample size = ",area.under.roc.curve.cfm$n.total,"; Bootstrap samples = ",bootstrap.sample.values[5], sep=""), side=3, col="red", cex=0.8)
dev.off()
## BOOTSTRAP PROCEDURE FOR THE ESTIMATION OF MODEL PREDICTION VARIABILITY
if(bootstrap.model.variability[5] == "YES")
{
bootstrap.sample.model.cfm<-bootstrap.sample.model[5]
matrix.bootstrap.model.cfm<-matrix(data=NA, nrow=dim(data.table)[1], ncol=(bootstrap.sample.model.cfm*3)+1)
colnames(matrix.bootstrap.model.cfm)<-rep("na",(bootstrap.sample.model.cfm*3)+1)
matrix.bootstrap.model.cfm[,1]<-identification.value
colnames(matrix.bootstrap.model.cfm)[1]<-"ID"
name.sel.run<-paste(rep("ID_Selection_Run",bootstrap.sample.model.cfm),1:bootstrap.sample.model.cfm,sep="_")
colnames(matrix.bootstrap.model.cfm)[seq(2,(bootstrap.sample.model.cfm*3)-1,3)]<-name.sel.run
name.prob.run<-paste(rep("Probability_Run",bootstrap.sample.model.cfm),1:bootstrap.sample.model.cfm,sep="_")
colnames(matrix.bootstrap.model.cfm)[seq(3,(bootstrap.sample.model.cfm*3),3)]<-name.prob.run
name.pred.run<-paste(rep("Prediction_Run",bootstrap.sample.model.cfm),1:bootstrap.sample.model.cfm,sep="_")
colnames(matrix.bootstrap.model.cfm)[seq(4,(bootstrap.sample.model.cfm*3)+1,3)]<-name.pred.run
selection.index<-NULL
library(Zelig)
#Bootstrap procedure
for (count.boot in 1:bootstrap.sample.model.cfm)
{
selection.index<-sample(1:dim(data.table)[1], replace=TRUE, prob=NULL)
matrix.bootstrap.model.cfm[as.numeric(names(table(selection.index))),(count.boot*3)-1]<-table(selection.index)
data.variables.bootstrap.model.cfm<-forecasting.combined.variables[selection.index,]
explanatory.variables.bootstrap.model.cfm<-forecasting.combined.variables[selection.index,2:dim(forecasting.combined.variables)[2]]
grouping.variable.bootstrap.model.cfm<-as.factor(forecasting.combined.variables[selection.index,1])
result.bootstrap.model.cfm<-zelig(as.formula(paste(names(data.variables.bootstrap.model.cfm)[1],"~",paste(names(data.variables.bootstrap.model.cfm[,2:dim(data.variables.bootstrap.model.cfm)[2]]),collapse= "+"))), data=data.variables.bootstrap.model.cfm, model="logit")
excluded.variables.bootstrap.model.cfm<-which(match(result.bootstrap.model.cfm$coefficients,NA)==1)
if (length(excluded.variables.bootstrap.model.cfm) != 0)
{
data.variables.bootstrap.model.cfm.selected<-data.variables.bootstrap.model.cfm[,-excluded.variables.bootstrap.model.cfm]
setx.data.probability<-forecasting.combined.variables[as.numeric(names(table(selection.index))),][,-excluded.variables.bootstrap.model.cfm]
setx.data.prediction<-forecasting.combined.variables[,-excluded.variables.bootstrap.model.cfm]
} else
{
data.variables.bootstrap.model.cfm.selected<-data.variables.bootstrap.model.cfm
setx.data.probability<-forecasting.combined.variables[as.numeric(names(table(selection.index))),]
setx.data.prediction<-forecasting.combined.variables
}
result.bootstrap.model.cfm.selected<-zelig(as.formula(paste(names(data.variables.bootstrap.model.cfm.selected)[1],"~",paste(names(data.variables.bootstrap.model.cfm.selected[,2:dim(data.variables.bootstrap.model.cfm.selected)[2]]),collapse= "+"))), data=data.variables.bootstrap.model.cfm.selected, model="logit")
x.result.bootstrap.model.cfm.selected.probability<-setx(result.bootstrap.model.cfm.selected,data=setx.data.probability,fn=NULL)
sim.result.bootstrap.model.cfm.selected.probability<-sim(result.bootstrap.model.cfm.selected,x=x.result.bootstrap.model.cfm.selected.probability,num=c(100,100))
matrix.bootstrap.model.cfm[as.numeric(names(table(selection.index))),(count.boot*3)]<-colMeans(sim.result.bootstrap.model.cfm.selected.probability$qi$ev)
x.result.bootstrap.model.cfm.selected.prediction<-setx(result.bootstrap.model.cfm.selected,data=setx.data.prediction,fn=NULL)
sim.result.bootstrap.model.cfm.selected.prediction<-sim(result.bootstrap.model.cfm.selected,x=x.result.bootstrap.model.cfm.selected.prediction,num=c(100,100))
matrix.bootstrap.model.cfm[,(count.boot*3)+1]<-colMeans(sim.result.bootstrap.model.cfm.selected.prediction$qi$ev)
}
# Export of bootstrap sample
write.table(matrix.bootstrap.model.cfm,file="result_CFM_BootstrapSamples.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=TRUE)
ID.bootstrap.model.cfm.count<-numeric(length=dim(data.table)[1])
#Probability (selected values)
bootstrap.model.cfm.probability.mean<-numeric(length=dim(data.table)[1])
bootstrap.model.cfm.probability.sd<-numeric(length=dim(data.table)[1])
bootstrap.model.cfm.probability.min<-numeric(length=dim(data.table)[1])
bootstrap.model.cfm.probability.max<-numeric(length=dim(data.table)[1])
bootstrap.model.cfm.probability.sderror<-numeric(length=dim(data.table)[1])
bootstrap.model.cfm.probability.quantiles<-matrix(nrow=dim(data.table)[1],ncol=7)
#Prediction (all values)
bootstrap.model.cfm.prediction.mean<-numeric(length=dim(data.table)[1])
bootstrap.model.cfm.prediction.sd<-numeric(length=dim(data.table)[1])
bootstrap.model.cfm.prediction.min<-numeric(length=dim(data.table)[1])
bootstrap.model.cfm.prediction.max<-numeric(length=dim(data.table)[1])
bootstrap.model.cfm.prediction.sderror<-numeric(length=dim(data.table)[1])
bootstrap.model.cfm.prediction.quantiles<-matrix(nrow=dim(data.table)[1],ncol=7)
for (count.row.variability in 1:dim(data.table)[1])
{
# Statistics on boostrapped probability
ID.bootstrap.model.cfm.count[count.row.variability]<-length(na.omit(matrix.bootstrap.model.cfm[count.row.variability,seq(2,(bootstrap.sample.model.cfm*3)-1,3)]))
bootstrap.model.cfm.probability.mean[count.row.variability]<-mean(na.omit(matrix.bootstrap.model.cfm[count.row.variability,seq(3,(bootstrap.sample.model.cfm*3),3)]))
bootstrap.model.cfm.probability.sd[count.row.variability]<-sd(na.omit(matrix.bootstrap.model.cfm[count.row.variability,seq(3,(bootstrap.sample.model.cfm*3),3)]))
bootstrap.model.cfm.probability.min[count.row.variability]<-min(na.omit(matrix.bootstrap.model.cfm[count.row.variability,seq(3,(bootstrap.sample.model.cfm*3),3)]))
bootstrap.model.cfm.probability.max[count.row.variability]<-max(na.omit(matrix.bootstrap.model.cfm[count.row.variability,seq(3,(bootstrap.sample.model.cfm*3),3)]))
bootstrap.model.cfm.probability.sderror[count.row.variability]<-bootstrap.model.cfm.probability.sd[count.row.variability]/ID.bootstrap.model.cfm.count[count.row.variability]
bootstrap.model.cfm.probability.quantiles[count.row.variability,]<-quantile(na.omit(matrix.bootstrap.model.cfm[count.row.variability,seq(3,(bootstrap.sample.model.cfm*3),3)]),probs=c(0,0.05,0.25,0.5,0.75,0.95,1))
# Statistics on boostrapped prediction
bootstrap.model.cfm.prediction.mean[count.row.variability]<-mean(matrix.bootstrap.model.cfm[count.row.variability,seq(4,(bootstrap.sample.model.cfm*3)+1,3)])
bootstrap.model.cfm.prediction.sd[count.row.variability]<-sd(matrix.bootstrap.model.cfm[count.row.variability,seq(4,(bootstrap.sample.model.cfm*3)+1,3)])
bootstrap.model.cfm.prediction.min[count.row.variability]<-min(matrix.bootstrap.model.cfm[count.row.variability,seq(4,(bootstrap.sample.model.cfm*3)+1,3)])
bootstrap.model.cfm.prediction.max[count.row.variability]<-max(matrix.bootstrap.model.cfm[count.row.variability,seq(4,(bootstrap.sample.model.cfm*3)+1,3)])
bootstrap.model.cfm.prediction.sderror[count.row.variability]<-bootstrap.model.cfm.prediction.sd[count.row.variability]/bootstrap.sample.model.cfm
bootstrap.model.cfm.prediction.quantiles[count.row.variability,]<-quantile(matrix.bootstrap.model.cfm[count.row.variability,seq(4,(bootstrap.sample.model.cfm*3)+1,3)],probs=c(0,0.05,0.25,0.5,0.75,0.95,1))
}
# Export of bootstrap sample statistics
write.table(cbind("ID","CFM_NumberSelectedSamples","CFM_Probability_Mean","CFM_Probability_Sd","CFM_Probability_Min","CFM_Probability_Max","CFM_Probability_Sderror","CFM_Probability_Quantiles_0","CFM_Probability_Quantiles_0.05","CFM_Probability_Quantiles_0.25","CFM_Probability_Quantiles_0.5","CFM_Probability_Quantiles_0.75","CFM_Probability_Quantiles_0.95","CFM_Probability_Quantiles_1","CFM_Prediction_Mean","CFM_Prediction_Sd","CFM_Prediction_Min","CFM_Prediction_Max","CFM_Prediction_Sderror","CFM_Prediction_Quantiles_0","CFM_Prediction_Quantiles_0.05","CFM_Prediction_Quantiles_0.25","CFM_Prediction_Quantiles_0.5","CFM_Prediction_Quantiles_0.75","CFM_Prediction_Quantiles_0.95","CFM_Prediction_Quantiles_1"),file="result_CFM_BootstrapStatistics.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(cbind(identification.value,ID.bootstrap.model.cfm.count,bootstrap.model.cfm.probability.mean,bootstrap.model.cfm.probability.sd,bootstrap.model.cfm.probability.min,bootstrap.model.cfm.probability.max,bootstrap.model.cfm.probability.sderror,bootstrap.model.cfm.probability.quantiles,bootstrap.model.cfm.prediction.mean,bootstrap.model.cfm.prediction.sd,bootstrap.model.cfm.prediction.min,bootstrap.model.cfm.prediction.max,bootstrap.model.cfm.prediction.sderror,bootstrap.model.cfm.prediction.quantiles),file="result_CFM_BootstrapStatistics.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
#windows()
#double.sd.histogram.variability<-hist(bootstrap.model.cfm.probability.sd*2,breaks=seq(0,1,0.05),labels=TRUE)
#plot(double.sd.histogram.variability$counts, seq(0,0.95,0.05), type="S",ylim=c(0,1), labels=TRUE)
windows()
plot(bootstrap.model.cfm.probability.mean,bootstrap.model.cfm.prediction.mean,xlab="Probability mean",ylab="Prediction mean", type="p",main="CFM BOOTSTRAP: Mean Probability vs Mean Prediction")
abline(a=0,b=1,col="red",lty=1,lwd=1)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.cfm,sep=""),side=3, padj=-0.5, adj=0.5, col="red",cex=0.8)
pdf(file = "result_CFM_BootstrapMeansComparison.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(bootstrap.model.cfm.probability.mean,bootstrap.model.cfm.prediction.mean,xlab="Probability mean",ylab="Prediction mean", type="p",main="CFM BOOTSTRAP: Mean Probability vs Mean Prediction")
abline(a=0,b=1,col="red",lty=1,lwd=1)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.cfm,sep=""),side=3, padj=-0.5, adj=0.5, col="red",cex=0.8)
dev.off()
# BOOTSTRAPPED PROBABILITY - Fit parabola 3 parameter y = ax^2 + bx + c
parabola.probability.cfm<-cbind(bootstrap.model.cfm.probability.mean,2*bootstrap.model.cfm.probability.sd)
parabola.probability.cfm<-na.omit(parabola.probability.cfm[order(parabola.probability.cfm[,1]),])
colnames(parabola.probability.cfm)<-c("abscissa","ordinate")
#If y has to be 0 in x=0 and x=1, this means that c=0 and a+b=0, so in our case since a<0, a has to be equal to -b
fit.parabola.probability.cfm <- nls(parabola.probability.cfm[,"ordinate"] ~ coeff.a*(parabola.probability.cfm[,"abscissa"]^2) + (-1)*coeff.a*parabola.probability.cfm[,"abscissa"], start = c("coeff.a"=-1))
value.parabola.probability.cfm<-predict(fit.parabola.probability.cfm)
#coef(fit.parabola.probability.cfm)
windows()
plot(parabola.probability.cfm[,"abscissa"],parabola.probability.cfm[,"ordinate"],xlim=c(0,1),ylim=c(0,1),xlab="Bootstrapped probability mean",ylab="2 Standard Deviations", type="p",main="CFM Model Probability Variability (Bootstrap)")
lines(parabola.probability.cfm[,"abscissa"],value.parabola.probability.cfm,col="red",lwd=1.5)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.cfm,sep=""),side=3, padj=-0.5, adj=0.5, col="blue",cex=1)
espr <- expression(Y == coeff.a %*% X ^2 + coeff.b %*% X)
list.espr.subs <- list(coeff.a = round(coef(fit.parabola.probability.cfm),3),coeff.b= -round(coef(fit.parabola.probability.cfm),3))
as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]])
mtext(as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]]),side=1, padj=-1.5, adj=0.5,col="red",cex=1)
pdf(file = "result_CFM_BootstrapProbabilityVariability.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(parabola.probability.cfm[,"abscissa"],parabola.probability.cfm[,"ordinate"],xlim=c(0,1),ylim=c(0,1),xlab="Bootstrapped probability mean",ylab="2 Standard Deviations", type="p",main="CFM Model Probability Variability (Bootstrap)")
lines(parabola.probability.cfm[,"abscissa"],value.parabola.probability.cfm,col="red",lwd=1.5)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.cfm,sep=""),side=3, padj=-0.5, adj=0.5, col="blue",cex=1)
espr <- expression(Y == coeff.a %*% X ^2 + coeff.b %*% X)
list.espr.subs <- list(coeff.a = round(coef(fit.parabola.probability.cfm),3),coeff.b= -round(coef(fit.parabola.probability.cfm),3))
as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]])
mtext(as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]]),side=1, padj=-1.5, adj=0.5,col="red",cex=1)
dev.off()
# BOOTSTRAPPED PREDICTION - Fit parabola 3 parameter y = ax^2 + bx + c
parabola.prediction.cfm<-cbind(bootstrap.model.cfm.prediction.mean,2*bootstrap.model.cfm.prediction.sd)
parabola.prediction.cfm<-parabola.prediction.cfm[order(parabola.prediction.cfm[,1]),]
colnames(parabola.prediction.cfm)<-c("abscissa","ordinate")
#If y has to be 0 in x=0 and x=1, this means that c=0 and a+b=0, so in our case since a<0, a has to be equal to -b
fit.parabola.prediction.cfm <- nls(parabola.prediction.cfm[,"ordinate"] ~ coeff.a*(parabola.prediction.cfm[,"abscissa"]^2) + (-1)*coeff.a*parabola.prediction.cfm[,"abscissa"], start = c("coeff.a"=-1))
value.parabola.prediction.cfm<-predict(fit.parabola.prediction.cfm)
#coef(fit.parabola.prediction.cfm)
windows()
plot(parabola.prediction.cfm[,"abscissa"],parabola.prediction.cfm[,"ordinate"],xlim=c(0,1),ylim=c(0,1),xlab="Bootstrapped prediction mean",ylab="2 Standard Deviations", type="p",main="CFM Model Prediction Variability (Bootstrap)")
lines(parabola.prediction.cfm[,"abscissa"],value.parabola.prediction.cfm,col="red",lwd=1.5)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.cfm,sep=""),side=3, padj=-0.5, adj=0.5, col="blue",cex=1)
espr <- expression(Y == coeff.a %*% X ^2 + coeff.b %*% X)
list.espr.subs <- list(coeff.a = round(coef(fit.parabola.prediction.cfm),3),coeff.b= -round(coef(fit.parabola.prediction.cfm),3))
as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]])
mtext(as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]]),side=1, padj=-1.5, adj=0.5,col="red",cex=1)
pdf(file = "result_CFM_BootstrapPredictionVariability.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(parabola.prediction.cfm[,"abscissa"],parabola.prediction.cfm[,"ordinate"],xlim=c(0,1),ylim=c(0,1),xlab="Bootstrapped prediction mean",ylab="2 Standard Deviations", type="p",main="CFM Model Prediction Variability (Bootstrap)")
lines(parabola.prediction.cfm[,"abscissa"],value.parabola.prediction.cfm,col="red",lwd=1.5)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.cfm,sep=""),side=3, padj=-0.5, adj=0.5, col="blue",cex=1)
espr <- expression(Y == coeff.a %*% X ^2 + coeff.b %*% X)
list.espr.subs <- list(coeff.a = round(coef(fit.parabola.prediction.cfm),3),coeff.b= -round(coef(fit.parabola.prediction.cfm),3))
as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]])
mtext(as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]]),side=1, padj=-1.5, adj=0.5,col="red",cex=1)
dev.off()
}
## Sensitivity, Specificity, Cohens kappa plot
roc.plot.cfm.series<-roc.plot(verification.results.cfm,binormal=TRUE)
#str(roc.plot.cfm.series)
#roc.plot.cfm.series$plot.data
#str(roc.plot.cfm.series$plot.data)
contingency.table.matrix.cfm<-matrix(nrow=dim(roc.plot.cfm.series$plot.data)[1],ncol=8)
colnames(contingency.table.matrix.cfm)<-c("Threshold","TP","TN","FP","FN","TPR","FPR","COHEN_KAPPA")
contingency.table.matrix.cfm[,1]<-roc.plot.cfm.series$plot.data[,1,1]
contingency.table.matrix.cfm[,6]<-roc.plot.cfm.series$plot.data[,2,1]
contingency.table.matrix.cfm[,7]<-roc.plot.cfm.series$plot.data[,3,1]
values.odserved<-data.table[,2]
values.predicted<-result.cfm$fitted.values
for (threshold.series in 1:dim(roc.plot.cfm.series$plot.data)[1])
{
value.threshold<-contingency.table.matrix.cfm[threshold.series,1]
values.probability.reclassified<-NULL
values.probability.reclassified<-numeric(length=length(values.odserved))
for (length.observed.series in 1:length(values.odserved))
{
if (values.predicted[length.observed.series] > value.threshold)
{
values.probability.reclassified[length.observed.series]<-1
} else
{
values.probability.reclassified[length.observed.series]<-0
}
}
#sum(values.probability.reclassified-round(values.predicted)) # Check sum: It has to be 0 if threshold is equal to 1
series.pasted<-paste(values.odserved,values.probability.reclassified,sep="")
series.pasted<-gsub("00","1",series.pasted)
series.pasted<-gsub("01","2",series.pasted)
series.pasted<-gsub("10","3",series.pasted)
series.pasted<-gsub("11","4",series.pasted)
series.pasted<-as.numeric(series.pasted)
TP<-length(series.pasted[series.pasted>=4]) # True Positive
FN<-length(series.pasted[series.pasted>=3 & series.pasted<4]) # False Negative
FP<-length(series.pasted[series.pasted>=2 & series.pasted<3]) # False Positive
TN<-length(series.pasted[series.pasted>=1 & series.pasted<2]) # True Negative
#TPR<-TP/(TP+FN) # Hit Rate or True Positive Rate or Sensitivity - Assigned before the for cicle using rocplot data
#FPR<-FP/(FP+TN) # False Alarm Rate or False Positive Rate or 1-Specificity
# Cohen's Kappa = (agreement-chance)/(1-chance) where agreement=(TP+TN)/(TP+TN+FP+FN) and chance=((((TN+FN)*(TN+FP))/(TP+TN+FP+FN))+(((TP+FP)*(TP+FN))/(TP+TN+FP+FN)))/(TP+TN+FP+FN)
agreement=(TP+TN)/(TP+TN+FP+FN)
chance=((((TN+FN)*(TN+FP))/(TP+TN+FP+FN))+(((TP+FP)*(TP+FN))/(TP+TN+FP+FN)))/(TP+TN+FP+FN)
cohen.kappa.value<-(agreement-chance)/(1-chance)
#Other
#library(vcd)
#cohen.kappa.value<-Kappa(cross.classification.table)
contingency.table.matrix.cfm[threshold.series,2]<-TP
contingency.table.matrix.cfm[threshold.series,3]<-TN
contingency.table.matrix.cfm[threshold.series,4]<-FP
contingency.table.matrix.cfm[threshold.series,5]<-FN
contingency.table.matrix.cfm[threshold.series,8]<-cohen.kappa.value
}
windows()
plot(roc.plot.cfm.series$plot.data[,1,1],roc.plot.cfm.series$plot.data[,2,1],type="p",pch=1,cex=0.6,col="red",xlim=c(0,1),ylim=c(0,1),xlab="Probability threshold",ylab="Evaluation parameter", main="CFM MODEL EVALUATION PLOT")
points(roc.plot.cfm.series$plot.data[,1,1],1-roc.plot.cfm.series$plot.data[,3,1],col="dark green",pch=1,cex=0.6)
points(roc.plot.cfm.series$plot.data[,1,1], contingency.table.matrix.cfm[,8],col="blue",pch=1,cex=0.6)
mtext("SENSITIVITY",side=3, padj=-0.5, adj=0.01, col="red",cex=0.8)
mtext("COHEN'S KAPPA",side=3, padj=-0.5, adj=0.5, col="blue",cex=0.8)
mtext("SPECIFICITY",side=3, padj=-0.5, adj=0.99, col="dark green",cex=0.8)
pdf(file = "result_CFM_ModelEvaluationPlot.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(roc.plot.cfm.series$plot.data[,1,1],roc.plot.cfm.series$plot.data[,2,1],type="p",pch=1,cex=0.6,col="red",xlim=c(0,1),ylim=c(0,1),xlab="Probability threshold",ylab="Evaluation parameter", main="CFM MODEL EVALUATION PLOT")
points(roc.plot.cfm.series$plot.data[,1,1],1-roc.plot.cfm.series$plot.data[,3,1],col="dark green",pch=1,cex=0.6)
points(roc.plot.cfm.series$plot.data[,1,1], contingency.table.matrix.cfm[,8],col="blue",pch=1,cex=0.6)
mtext("SENSITIVITY",side=3, padj=-0.5, adj=0.01, col="red",cex=0.8)
mtext("COHEN'S KAPPA",side=3, padj=-0.5, adj=0.5, col="blue",cex=0.8)
mtext("SPECIFICITY",side=3, padj=-0.5, adj=0.99, col="dark green",cex=0.8)
dev.off()
## VALIDATION OF CFM MODEL (Matching CFM posterior probability results and validation grouping variable)
cross.classification.temporal.validation.cfm<-table(validation.grouping.variable,round(result.cfm$fitted.values),dnn=c("Observed","Predicted"))
rownames(cross.classification.temporal.validation.cfm)<-list("No Landslide","Landslide") #Observed
colnames(cross.classification.temporal.validation.cfm)<-list("No Landslide","Landslide") #Predicted
#str(cross.classification.temporal.validation.cfm)
#Elaboration of Coefficient of association for contingency table
#load package (vcd)
library(vcd)
#help(package=vcd)
contingency.table.temporal.validation.cfm<-table2d_summary(cross.classification.temporal.validation.cfm)
test.table.temporal.validation.cfm<-assocstats(cross.classification.temporal.validation.cfm)
#Different plots for contingency table
windows()
fourfold(cross.classification.temporal.validation.cfm, std="margin", main="TEMPORAL VALIDATION CFM MODEL", extended=TRUE, space = 0.2, margin=1, color = c(rgb(255,0,0,max=255), rgb(255,128,0,max=255), rgb(56,168,0,max=255), rgb(170,255,0,max=255), rgb(170,135,210,max=255), rgb(115,70,155,max=255)))
#Receiver Operating Characteristic (ROC) plots for one or more models.
#load package (verification)
library(verification)
# 2nd method using verify function
verification.temporal.validation.cfm<-verify(validation.table[,2],result.cfm$fitted.values, frcst.type="prob", obs.type="binary")
#summary(verification.temporal.validation.cfm)
# showing confidence intervals. MAY BE SLOW
area.under.roc.curve.temporal.validation.cfm<-roc.area(validation.table[,2],result.cfm$fitted.values)
windows()
roc.plot(verification.temporal.validation.cfm, main = "ROC PLOT: TEMPORAL VALIDATION CFM MODEL", binormal=TRUE, plot="both", CI=TRUE, n.boot=bootstrap.sample.values[5] , alpha = 0.05, extra=TRUE, legend=TRUE)
mtext(paste("ROC area = ",round(area.under.roc.curve.temporal.validation.cfm$A,2),"; Sample size = ",area.under.roc.curve.temporal.validation.cfm$n.total,"; Bootstrap samples = ",bootstrap.sample.values[5], sep=""), side=3, col="red", cex=0.8)
# EXPORT OF PLOT FOR VALIDATION OF CFM MODEL
pdf(file = "result_CFM_FourfoldPlot_Temporal_Validation.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
fourfold(cross.classification.temporal.validation.cfm, std="margin", main="TEMPORAL VALIDATION CFM MODEL", extended=TRUE, space = 0.2, margin=1, color = c(rgb(255,0,0,max=255), rgb(255,128,0,max=255), rgb(56,168,0,max=255), rgb(170,255,0,max=255), rgb(170,135,210,max=255), rgb(115,70,155,max=255)))
dev.off()
#pdf(file = "result_CFM_ROCPlot_Validation.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
#roc.plot(verification.temporal.validation.cfm, main = "ROC PLOT: TEMPORAL VALIDATION CFM MODEL", binormal = TRUE, plot = "both", extra=TRUE, legend=TRUE)
#area.under.roc.curve.temporal.validation.cfm<-roc.area(verification.table[,2],result.cfm$fitted.values)
#dev.off()
pdf(file = "result_CFM_ROCPlot_bootstrap_Temporal_Validation.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
roc.plot(verification.temporal.validation.cfm, main = "ROC PLOT: TEMPORAL VALIDATION CFM MODEL", binormal=TRUE, plot="both", CI=TRUE, n.boot=bootstrap.sample.values[5] , alpha = 0.05, extra=TRUE, legend=TRUE)
mtext(paste("ROC area = ",round(area.under.roc.curve.temporal.validation.cfm$A,2),"; Sample size = ",area.under.roc.curve.temporal.validation.cfm$n.total,"; Bootstrap samples = ",bootstrap.sample.values[5], sep=""), side=3, col="red", cex=0.8)
dev.off()
# Assignation of a matching code between observed and predicted values calculated using the validation dataset
validation.cfm.matching.code<-paste(validation.grouping.variable,round(result.cfm$fitted.values),sep="")
validation.cfm.matching.code<-gsub("00","1",validation.cfm.matching.code)
validation.cfm.matching.code<-gsub("01","2",validation.cfm.matching.code)
validation.cfm.matching.code<-gsub("10","3",validation.cfm.matching.code)
validation.cfm.matching.code<-gsub("11","4",validation.cfm.matching.code)
validation.cfm.matching.code<-as.numeric(validation.cfm.matching.code)
# EXPORT OF CFM MODEL RESULTS
write.table("RESULTS OF COMBINATION FORECAST LOGISTIC REGRESSION MODEL",file="result_CFM.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("CFM MODEL OUTPUTS",file="result_CFM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Logistic Regression coefficients",file="result_CFM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
#Scaling coefficients
write.table(cbind(names(result.cfm$coefficients),result.cfm$coefficients),file="result_CFM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("CONTINGENCY TABLE MODEL RESULT",file="result_CFM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(c("","No Landslide Predicted","Landslide Predicted","Total"),cbind(c("No Landslide Observed","Landslide Observed","Total"),contingency.table.cfm$table[,1,],contingency.table.cfm$table[,2,],contingency.table.cfm$table[,3,])),file="result_CFM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("CONTINGENCY TABLE VALIDATION",file="result_CFM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(c("","No Landslide Predicted","Landslide Predicted","Total"),cbind(c("No Landslide Observed","Landslide Observed","Total"),contingency.table.temporal.validation.cfm$table[,1,],contingency.table.temporal.validation.cfm$table[,2,],contingency.table.temporal.validation.cfm$table[,3,])),file="result_CFM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("MATCHING CODE DEFINITION",file="result_CFM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(cbind(c("","OBSERVED NO LANDSLIDES: 0","OBSERVED LANDSLIDES: 1"), c("PREDICTED NO LANDSLIDES: 0","00 -> Code 1","10 -> Code 3"), c("PREDICTED LANDSLIDES: 1","01 -> Code 2","11 -> Code 4")),file="result_CFM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("FINAL RESULTS",file="result_CFM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(c("ID","GROUPING VARIABLE","MODEL POSTERIOR PROBABILITY","MODEL CLASSIFICATION","MODEL RESULT MATCHING CODE","VALIDATION GROUPING VARIABLE","VALIDATION MATCHING CODE"),cbind(identification.value,result.cfm$y,result.cfm$fitted.values,round(result.cfm$fitted.values),result.cfm.matching.code,as.numeric(levels(validation.grouping.variable))[validation.grouping.variable],validation.cfm.matching.code)),file="result_CFM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
}
#-------------- FORECAST COMBINATION MODEL WITHOUT NNM ---------------#
##### FORECAST COMBINATION USING A LOGISTIC REGRESSION MODEL WITHOUT NNM MODEL
#####
if(model.run.matrix[6] == "YES")
{
library(Zelig)
forecasting.combined.variables<-as.data.frame(cbind(data.variables[,1],predict.result.lda$posterior[,2],predict.result.qda$posterior[,2],result.lrm$fitted.values))
colnames(forecasting.combined.variables)<-c("FRAX","resultlda","resultqda","resultlrm")
if (class(try(zelig(as.formula(paste(names(forecasting.combined.variables)[1],"~",paste(names(forecasting.combined.variables)[2:dim(forecasting.combined.variables)[2]],collapse= "+"))), data=forecasting.combined.variables, model="logit")))=="try-error")
{
#zelig(as.formula(paste(names(forecasting.combined.variables)[1],"~",paste(names(forecasting.combined.variables)[2:dim(forecasting.combined.variables)[2]],collapse= "+"))), data=forecasting.combined.variables, model="logit")
write.table("The combination of forecast using Logistic Regression Model was not completed",file="Error_CFM_Analysis_NoNNM.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="Error_CFM_Analysis_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Error LOG",file="Error_CFM_Analysis_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(cbind("Message",rev(1:length(as.vector(.Traceback)))," ->",as.vector(.Traceback)),file="Error_CFM_Analysis_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
}
result.cfm.nonnm<-NULL
result.cfm.nonnm<-zelig(as.formula(paste(names(forecasting.combined.variables)[1],"~",paste(names(forecasting.combined.variables)[2:dim(forecasting.combined.variables)[2]],collapse= "+"))), data=forecasting.combined.variables, model="logit")
#summary(result.cfm.nonnm)
#names(result.cfm.nonnm)
#for predicted value (posterior probablity calculated with model) result.cfm$fitted.values was considered
cross.classification.cfm.nonnm<-table(as.numeric(result.cfm.nonnm$y),round(result.cfm.nonnm$fitted.values),dnn=c("Observed","Predicted"))
rownames(cross.classification.cfm.nonnm)<-list("No Landslide","Landslide") # Observed
colnames(cross.classification.cfm.nonnm)<-list("No Landslide","Landslide") # Predicted
#str(cross.classification.cfm.nonnm)
# Assignation of a matching code between observed and predicted values
result.cfm.nonnm.matching.code<-paste(grouping.variable,round(result.cfm.nonnm$fitted.values),sep="")
result.cfm.nonnm.matching.code<-gsub("00","1",result.cfm.nonnm.matching.code)
result.cfm.nonnm.matching.code<-gsub("01","2",result.cfm.nonnm.matching.code)
result.cfm.nonnm.matching.code<-gsub("10","3",result.cfm.nonnm.matching.code)
result.cfm.nonnm.matching.code<-gsub("11","4",result.cfm.nonnm.matching.code)
result.cfm.nonnm.matching.code<-as.numeric(result.cfm.nonnm.matching.code)
#Elaboration of Coefficient of association for contingency table
#load package (vcd)
library(vcd)
#help(package=vcd)
contingency.table.cfm.nonnm<-table2d_summary(cross.classification.cfm.nonnm)
test.table.cfm.nonnm<-assocstats(cross.classification.cfm.nonnm)
#Different plots for contingency table
windows()
fourfold(cross.classification.cfm.nonnm, std="margin", main="COMBINATION LOGISTIC NO NNM MODEL", extended=TRUE, space = 0.2, margin=1, color = c(rgb(255,0,0,max=255), rgb(255,128,0,max=255), rgb(56,168,0,max=255), rgb(170,255,0,max=255), rgb(150,220,255,max=255), rgb(0,0,128,max=255)))
#Receiver Operating Characteristic (ROC) plots for one or more models.
#A ROC curve plots the false alarm rate against the hit rate
#for a probablistic forecast for a range of thresholds.
#load package (verification)
library(verification)
#verify function
#Based on the type of inputs, this function calculates a range of verification statistics and skill scores.
#Additionally, it creates a verify class object that can be further analyzed.
##### ROC PLOT OBS - POSTERIOR PROBABILITY ASSOCIATED TO 1
# Method using verify function
verification.results.cfm.nonnm<-verify(result.cfm.nonnm$y,result.cfm.nonnm$fitted.values, frcst.type="prob", obs.type="binary")
#summary(verification.results.cfm.nonnm)
#str(verification.results.qda)
#windows()
#roc.plot(verification.results.cfm.nonnm, main = "ROC PLOT: COMBINATION LOGISTIC NO NNM MODEL", binormal = TRUE, plot = "both", extra=TRUE, legend=TRUE)
area.under.roc.curve.cfm.nonnm<-roc.area(result.cfm.nonnm$y,result.cfm.nonnm$fitted.values)
## showing confidence intervals. MAY BE SLOW
windows()
roc.plot(verification.results.cfm.nonnm, main = "ROC PLOT: COMBINATION LOGISTIC NO NNM MODEL", binormal=TRUE, plot="both", CI=TRUE, n.boot=bootstrap.sample.values[5] , alpha = 0.05, extra=TRUE, legend=TRUE)
mtext(paste("ROC area = ",round(area.under.roc.curve.cfm.nonnm$A,2),"; Sample size = ",area.under.roc.curve.cfm.nonnm$n.total,"; Bootstrap samples = ",bootstrap.sample.values[5], sep=""), side=3, col="red", cex=0.8)
## Histogram of posterior probability
windows()
hist(result.cfm.nonnm$fitted.values, breaks=breaks.histogram.values,freq=TRUE, xlab="Susceptibility Class", ylab="Frequency", main="Histogram of Combination Logistic No NNM Model susceptibility", col=c(rgb(38,115,0,max=255),rgb(233,255,190,max=255),rgb(255,255,0,max=255),rgb(255,128,0,max=255),rgb(255,0,0,max=255)))
pdf(file = "result_CFM_NoNNM_Histogram.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
hist(result.cfm.nonnm$fitted.values, breaks=breaks.histogram.values,freq=TRUE, xlab="Susceptibility Class", ylab="Frequency", main="Histogram of Combination Logistic No NNM Model susceptibility", col=c(rgb(38,115,0,max=255),rgb(233,255,190,max=255),rgb(255,255,0,max=255),rgb(255,128,0,max=255),rgb(255,0,0,max=255)))
dev.off()
# EXPORT OF PLOT FOR CFM NoNNM MODEL
pdf(file = "result_CFM_NoNNM_FourfoldPlot.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
fourfold(cross.classification.cfm.nonnm, std="margin", main="COMBINATION LOGISTIC NO NNM MODEL", extended=TRUE, space = 0.2, margin=1, color = c(rgb(255,0,0,max=255), rgb(255,128,0,max=255), rgb(56,168,0,max=255), rgb(170,255,0,max=255), rgb(150,220,255,max=255), rgb(0,0,128,max=255)))
dev.off()
#pdf(file = "result_CFM_NoNNM_ROCPlot.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
#roc.plot(verification.results.cfm.nonnm, main = "ROC PLOT: COMBINATION FORECAST WITHOUT NNM", binormal = TRUE, plot = "both", extra=TRUE, legend=TRUE)
#dev.off()
pdf(file = "result_CFM_NoNNM_ROCPlot_bootstrap.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
roc.plot(verification.results.cfm.nonnm, main = "ROC PLOT: COMBINATION LOGISTIC NO NNM MODEL", binormal=TRUE, plot="both", CI=TRUE, n.boot=bootstrap.sample.values[5] , alpha = 0.05, extra=TRUE, legend=TRUE)
mtext(paste("ROC area = ",round(area.under.roc.curve.cfm.nonnm$A,2),"; Sample size = ",area.under.roc.curve.cfm.nonnm$n.total,"; Bootstrap samples = ",bootstrap.sample.values[5], sep=""), side=3, col="red", cex=0.8)
dev.off()
## BOOTSTRAP PROCEDURE FOR THE ESTIMATION OF MODEL PREDICTION VARIABILITY
if(bootstrap.model.variability[6] == "YES")
{
bootstrap.sample.model.cfm.nonnm<-bootstrap.sample.model[6]
matrix.bootstrap.model.cfm.nonnm<-matrix(data=NA, nrow=dim(data.table)[1], ncol=(bootstrap.sample.model.cfm.nonnm*3)+1)
colnames(matrix.bootstrap.model.cfm.nonnm)<-rep("na",(bootstrap.sample.model.cfm.nonnm*3)+1)
matrix.bootstrap.model.cfm.nonnm[,1]<-identification.value
colnames(matrix.bootstrap.model.cfm.nonnm)[1]<-"ID"
name.sel.run<-paste(rep("ID_Selection_Run",bootstrap.sample.model.cfm.nonnm),1:bootstrap.sample.model.cfm.nonnm,sep="_")
colnames(matrix.bootstrap.model.cfm.nonnm)[seq(2,(bootstrap.sample.model.cfm.nonnm*3)-1,3)]<-name.sel.run
name.prob.run<-paste(rep("Probability_Run",bootstrap.sample.model.cfm.nonnm),1:bootstrap.sample.model.cfm.nonnm,sep="_")
colnames(matrix.bootstrap.model.cfm.nonnm)[seq(3,(bootstrap.sample.model.cfm.nonnm*3),3)]<-name.prob.run
name.pred.run<-paste(rep("Prediction_Run",bootstrap.sample.model.cfm.nonnm),1:bootstrap.sample.model.cfm.nonnm,sep="_")
colnames(matrix.bootstrap.model.cfm.nonnm)[seq(4,(bootstrap.sample.model.cfm.nonnm*3)+1,3)]<-name.pred.run
selection.index<-NULL
library(Zelig)
#Bootstrap procedure
for (count.boot in 1:bootstrap.sample.model.cfm.nonnm)
{
selection.index<-sample(1:dim(data.table)[1], replace=TRUE, prob=NULL)
matrix.bootstrap.model.cfm.nonnm[as.numeric(names(table(selection.index))),(count.boot*3)-1]<-table(selection.index)
data.variables.bootstrap.model.cfm.nonnm<-forecasting.combined.variables[selection.index,]
explanatory.variables.bootstrap.model.cfm.nonnm<-forecasting.combined.variables[selection.index,2:dim(forecasting.combined.variables)[2]]
grouping.variable.bootstrap.model.cfm.nonnm<-as.factor(forecasting.combined.variables[selection.index,1])
result.bootstrap.model.cfm.nonnm<-zelig(as.formula(paste(names(data.variables.bootstrap.model.cfm.nonnm)[1],"~",paste(names(data.variables.bootstrap.model.cfm.nonnm[,2:dim(data.variables.bootstrap.model.cfm.nonnm)[2]]),collapse= "+"))), data=data.variables.bootstrap.model.cfm.nonnm, model="logit")
excluded.variables.bootstrap.model.cfm.nonnm<-which(match(result.bootstrap.model.cfm.nonnm$coefficients,NA)==1)
if (length(excluded.variables.bootstrap.model.cfm.nonnm) != 0)
{
data.variables.bootstrap.model.cfm.nonnm.selected<-data.variables.bootstrap.model.cfm.nonnm[,-excluded.variables.bootstrap.model.cfm.nonnm]
setx.data.probability<-forecasting.combined.variables[as.numeric(names(table(selection.index))),][,-excluded.variables.bootstrap.model.cfm.nonnm]
setx.data.prediction<-forecasting.combined.variables[,-excluded.variables.bootstrap.model.cfm.nonnm]
} else
{
data.variables.bootstrap.model.cfm.nonnm.selected<-data.variables.bootstrap.model.cfm.nonnm
setx.data.probability<-forecasting.combined.variables[as.numeric(names(table(selection.index))),]
setx.data.prediction<-forecasting.combined.variables
}
result.bootstrap.model.cfm.nonnm.selected<-zelig(as.formula(paste(names(data.variables.bootstrap.model.cfm.nonnm.selected)[1],"~",paste(names(data.variables.bootstrap.model.cfm.nonnm.selected[,2:dim(data.variables.bootstrap.model.cfm.nonnm.selected)[2]]),collapse= "+"))), data=data.variables.bootstrap.model.cfm.nonnm.selected, model="logit")
x.result.bootstrap.model.cfm.nonnm.selected.probability<-setx(result.bootstrap.model.cfm.nonnm.selected,data=setx.data.probability,fn=NULL)
sim.result.bootstrap.model.cfm.nonnm.selected.probability<-sim(result.bootstrap.model.cfm.nonnm.selected,x=x.result.bootstrap.model.cfm.nonnm.selected.probability,num=c(100,100))
matrix.bootstrap.model.cfm.nonnm[as.numeric(names(table(selection.index))),(count.boot*3)]<-colMeans(sim.result.bootstrap.model.cfm.nonnm.selected.probability$qi$ev)
x.result.bootstrap.model.cfm.nonnm.selected.prediction<-setx(result.bootstrap.model.cfm.nonnm.selected,data=setx.data.prediction,fn=NULL)
sim.result.bootstrap.model.cfm.nonnm.selected.prediction<-sim(result.bootstrap.model.cfm.nonnm.selected,x=x.result.bootstrap.model.cfm.nonnm.selected.prediction,num=c(100,100))
matrix.bootstrap.model.cfm.nonnm[,(count.boot*3)+1]<-colMeans(sim.result.bootstrap.model.cfm.nonnm.selected.prediction$qi$ev)
}
# Export of bootstrap sample
write.table(matrix.bootstrap.model.cfm.nonnm,file="result_CFM_NoNNM_BootstrapSamples.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=TRUE)
ID.bootstrap.model.cfm.nonnm.count<-numeric(length=dim(data.table)[1])
#Probability (selected values)
bootstrap.model.cfm.nonnm.probability.mean<-numeric(length=dim(data.table)[1])
bootstrap.model.cfm.nonnm.probability.sd<-numeric(length=dim(data.table)[1])
bootstrap.model.cfm.nonnm.probability.min<-numeric(length=dim(data.table)[1])
bootstrap.model.cfm.nonnm.probability.max<-numeric(length=dim(data.table)[1])
bootstrap.model.cfm.nonnm.probability.sderror<-numeric(length=dim(data.table)[1])
bootstrap.model.cfm.nonnm.probability.quantiles<-matrix(nrow=dim(data.table)[1],ncol=7)
#Prediction (all values)
bootstrap.model.cfm.nonnm.prediction.mean<-numeric(length=dim(data.table)[1])
bootstrap.model.cfm.nonnm.prediction.sd<-numeric(length=dim(data.table)[1])
bootstrap.model.cfm.nonnm.prediction.min<-numeric(length=dim(data.table)[1])
bootstrap.model.cfm.nonnm.prediction.max<-numeric(length=dim(data.table)[1])
bootstrap.model.cfm.nonnm.prediction.sderror<-numeric(length=dim(data.table)[1])
bootstrap.model.cfm.nonnm.prediction.quantiles<-matrix(nrow=dim(data.table)[1],ncol=7)
for (count.row.variability in 1:dim(data.table)[1])
{
# Statistics on boostrapped probability
ID.bootstrap.model.cfm.nonnm.count[count.row.variability]<-length(na.omit(matrix.bootstrap.model.cfm.nonnm[count.row.variability,seq(2,(bootstrap.sample.model.cfm.nonnm*3)-1,3)]))
bootstrap.model.cfm.nonnm.probability.mean[count.row.variability]<-mean(na.omit(matrix.bootstrap.model.cfm.nonnm[count.row.variability,seq(3,(bootstrap.sample.model.cfm.nonnm*3),3)]))
bootstrap.model.cfm.nonnm.probability.sd[count.row.variability]<-sd(na.omit(matrix.bootstrap.model.cfm.nonnm[count.row.variability,seq(3,(bootstrap.sample.model.cfm.nonnm*3),3)]))
bootstrap.model.cfm.nonnm.probability.min[count.row.variability]<-min(na.omit(matrix.bootstrap.model.cfm.nonnm[count.row.variability,seq(3,(bootstrap.sample.model.cfm.nonnm*3),3)]))
bootstrap.model.cfm.nonnm.probability.max[count.row.variability]<-max(na.omit(matrix.bootstrap.model.cfm.nonnm[count.row.variability,seq(3,(bootstrap.sample.model.cfm.nonnm*3),3)]))
bootstrap.model.cfm.nonnm.probability.sderror[count.row.variability]<-bootstrap.model.cfm.nonnm.probability.sd[count.row.variability]/ID.bootstrap.model.cfm.nonnm.count[count.row.variability]
bootstrap.model.cfm.nonnm.probability.quantiles[count.row.variability,]<-quantile(na.omit(matrix.bootstrap.model.cfm.nonnm[count.row.variability,seq(3,(bootstrap.sample.model.cfm.nonnm*3),3)]),probs=c(0,0.05,0.25,0.5,0.75,0.95,1))
# Statistics on boostrapped prediction
bootstrap.model.cfm.nonnm.prediction.mean[count.row.variability]<-mean(matrix.bootstrap.model.cfm.nonnm[count.row.variability,seq(4,(bootstrap.sample.model.cfm.nonnm*3)+1,3)])
bootstrap.model.cfm.nonnm.prediction.sd[count.row.variability]<-sd(matrix.bootstrap.model.cfm.nonnm[count.row.variability,seq(4,(bootstrap.sample.model.cfm.nonnm*3)+1,3)])
bootstrap.model.cfm.nonnm.prediction.min[count.row.variability]<-min(matrix.bootstrap.model.cfm.nonnm[count.row.variability,seq(4,(bootstrap.sample.model.cfm.nonnm*3)+1,3)])
bootstrap.model.cfm.nonnm.prediction.max[count.row.variability]<-max(matrix.bootstrap.model.cfm.nonnm[count.row.variability,seq(4,(bootstrap.sample.model.cfm.nonnm*3)+1,3)])
bootstrap.model.cfm.nonnm.prediction.sderror[count.row.variability]<-bootstrap.model.cfm.nonnm.prediction.sd[count.row.variability]/bootstrap.sample.model.cfm.nonnm
bootstrap.model.cfm.nonnm.prediction.quantiles[count.row.variability,]<-quantile(matrix.bootstrap.model.cfm.nonnm[count.row.variability,seq(4,(bootstrap.sample.model.cfm.nonnm*3)+1,3)],probs=c(0,0.05,0.25,0.5,0.75,0.95,1))
}
# Export of bootstrap sample statistics
write.table(cbind("ID","CFM_NoNNM_NumberSelectedSamples","CFM_NoNNM_Probability_Mean","CFM_NoNNM_Probability_Sd","CFM_NoNNM_Probability_Min","CFM_NoNNM_Probability_Max","CFM_NoNNM_Probability_Sderror","CFM_NoNNM_Probability_Quantiles_0","CFM_NoNNM_Probability_Quantiles_0.05","CFM_NoNNM_Probability_Quantiles_0.25","CFM_NoNNM_Probability_Quantiles_0.5","CFM_NoNNM_Probability_Quantiles_0.75","CFM_NoNNM_Probability_Quantiles_0.95","CFM_NoNNM_Probability_Quantiles_1","CFM_NoNNM_Prediction_Mean","CFM_NoNNM_Prediction_Sd","CFM_NoNNM_Prediction_Min","CFM_NoNNM_Prediction_Max","CFM_NoNNM_Prediction_Sderror","CFM_NoNNM_Prediction_Quantiles_0","CFM_NoNNM_Prediction_Quantiles_0.05","CFM_NoNNM_Prediction_Quantiles_0.25","CFM_NoNNM_Prediction_Quantiles_0.5","CFM_NoNNM_Prediction_Quantiles_0.75","CFM_NoNNM_Prediction_Quantiles_0.95","CFM_NoNNM_Prediction_Quantiles_1"),file="result_CFM_NoNNM_BootstrapStatistics.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(cbind(identification.value,ID.bootstrap.model.cfm.nonnm.count,bootstrap.model.cfm.nonnm.probability.mean,bootstrap.model.cfm.nonnm.probability.sd,bootstrap.model.cfm.nonnm.probability.min,bootstrap.model.cfm.nonnm.probability.max,bootstrap.model.cfm.nonnm.probability.sderror,bootstrap.model.cfm.nonnm.probability.quantiles,bootstrap.model.cfm.nonnm.prediction.mean,bootstrap.model.cfm.nonnm.prediction.sd,bootstrap.model.cfm.nonnm.prediction.min,bootstrap.model.cfm.nonnm.prediction.max,bootstrap.model.cfm.nonnm.prediction.sderror,bootstrap.model.cfm.nonnm.prediction.quantiles),file="result_CFM_NoNNM_BootstrapStatistics.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
#windows()
#double.sd.histogram.variability<-hist(bootstrap.model.cfm.nonnm.probability.sd*2,breaks=seq(0,1,0.05),labels=TRUE)
#plot(double.sd.histogram.variability$counts, seq(0,0.95,0.05), type="S",ylim=c(0,1), labels=TRUE)
windows()
plot(bootstrap.model.cfm.nonnm.probability.mean,bootstrap.model.cfm.nonnm.prediction.mean,xlab="Probability mean",ylab="Prediction mean", type="p",main="CFM_NoNNM BOOTSTRAP: Mean Probability vs Mean Prediction")
abline(a=0,b=1,col="red",lty=1,lwd=1)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.cfm.nonnm,sep=""),side=3, padj=-0.5, adj=0.5, col="red",cex=0.8)
pdf(file = "result_CFM_NoNNM_BootstrapMeansComparison.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(bootstrap.model.cfm.nonnm.probability.mean,bootstrap.model.cfm.nonnm.prediction.mean,xlab="Probability mean",ylab="Prediction mean", type="p",main="CFM_NoNNM BOOTSTRAP: Mean Probability vs Mean Prediction")
abline(a=0,b=1,col="red",lty=1,lwd=1)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.cfm.nonnm,sep=""),side=3, padj=-0.5, adj=0.5, col="red",cex=0.8)
dev.off()
# BOOTSTRAPPED PROBABILITY - Fit parabola 3 parameter y = ax^2 + bx + c
parabola.probability.cfm.nonnm<-cbind(bootstrap.model.cfm.nonnm.probability.mean,2*bootstrap.model.cfm.nonnm.probability.sd)
parabola.probability.cfm.nonnm<-na.omit(parabola.probability.cfm.nonnm[order(parabola.probability.cfm.nonnm[,1]),])
colnames(parabola.probability.cfm.nonnm)<-c("abscissa","ordinate")
#If y has to be 0 in x=0 and x=1, this means that c=0 and a+b=0, so in our case since a<0, a has to be equal to -b
fit.parabola.probability.cfm.nonnm <- nls(parabola.probability.cfm.nonnm[,"ordinate"] ~ coeff.a*(parabola.probability.cfm.nonnm[,"abscissa"]^2) + (-1)*coeff.a*parabola.probability.cfm.nonnm[,"abscissa"], start = c("coeff.a"=-1))
value.parabola.probability.cfm.nonnm<-predict(fit.parabola.probability.cfm.nonnm)
#coef(fit.parabola.probability.cfm.nonnm)
windows()
plot(parabola.probability.cfm.nonnm[,"abscissa"],parabola.probability.cfm.nonnm[,"ordinate"],xlim=c(0,1),ylim=c(0,1),xlab="Bootstrapped probability mean",ylab="2 Standard Deviations", type="p",main="CFM_NoNNM Model Probability Variability (Bootstrap)")
lines(parabola.probability.cfm.nonnm[,"abscissa"],value.parabola.probability.cfm.nonnm,col="red",lwd=1.5)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.cfm.nonnm,sep=""),side=3, padj=-0.5, adj=0.5, col="blue",cex=1)
espr <- expression(Y == coeff.a %*% X ^2 + coeff.b %*% X)
list.espr.subs <- list(coeff.a = round(coef(fit.parabola.probability.cfm.nonnm),3),coeff.b= -round(coef(fit.parabola.probability.cfm.nonnm),3))
as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]])
mtext(as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]]),side=1, padj=-1.5, adj=0.5,col="red",cex=1)
pdf(file = "result_CFM_NoNNM_BootstrapProbabilityVariability.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(parabola.probability.cfm.nonnm[,"abscissa"],parabola.probability.cfm.nonnm[,"ordinate"],xlim=c(0,1),ylim=c(0,1),xlab="Bootstrapped probability mean",ylab="2 Standard Deviations", type="p",main="CFM_NoNNM Model Probability Variability (Bootstrap)")
lines(parabola.probability.cfm.nonnm[,"abscissa"],value.parabola.probability.cfm.nonnm,col="red",lwd=1.5)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.cfm.nonnm,sep=""),side=3, padj=-0.5, adj=0.5, col="blue",cex=1)
espr <- expression(Y == coeff.a %*% X ^2 + coeff.b %*% X)
list.espr.subs <- list(coeff.a = round(coef(fit.parabola.probability.cfm.nonnm),3),coeff.b= -round(coef(fit.parabola.probability.cfm.nonnm),3))
as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]])
mtext(as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]]),side=1, padj=-1.5, adj=0.5,col="red",cex=1)
dev.off()
# BOOTSTRAPPED PREDICTION - Fit parabola 3 parameter y = ax^2 + bx + c
parabola.prediction.cfm.nonnm<-cbind(bootstrap.model.cfm.nonnm.prediction.mean,2*bootstrap.model.cfm.nonnm.prediction.sd)
parabola.prediction.cfm.nonnm<-parabola.prediction.cfm.nonnm[order(parabola.prediction.cfm.nonnm[,1]),]
colnames(parabola.prediction.cfm.nonnm)<-c("abscissa","ordinate")
#If y has to be 0 in x=0 and x=1, this means that c=0 and a+b=0, so in our case since a<0, a has to be equal to -b
fit.parabola.prediction.cfm.nonnm <- nls(parabola.prediction.cfm.nonnm[,"ordinate"] ~ coeff.a*(parabola.prediction.cfm.nonnm[,"abscissa"]^2) + (-1)*coeff.a*parabola.prediction.cfm.nonnm[,"abscissa"], start = c("coeff.a"=-1))
value.parabola.prediction.cfm.nonnm<-predict(fit.parabola.prediction.cfm.nonnm)
#coef(fit.parabola.prediction.cfm.nonnm)
windows()
plot(parabola.prediction.cfm.nonnm[,"abscissa"],parabola.prediction.cfm.nonnm[,"ordinate"],xlim=c(0,1),ylim=c(0,1),xlab="Bootstrapped prediction mean",ylab="2 Standard Deviations", type="p",main="CFM_NoNNM Model Prediction Variability (Bootstrap)")
lines(parabola.prediction.cfm.nonnm[,"abscissa"],value.parabola.prediction.cfm.nonnm,col="red",lwd=1.5)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.cfm.nonnm,sep=""),side=3, padj=-0.5, adj=0.5, col="blue",cex=1)
espr <- expression(Y == coeff.a %*% X ^2 + coeff.b %*% X)
list.espr.subs <- list(coeff.a = round(coef(fit.parabola.prediction.cfm.nonnm),3),coeff.b= -round(coef(fit.parabola.prediction.cfm.nonnm),3))
as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]])
mtext(as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]]),side=1, padj=-1.5, adj=0.5,col="red",cex=1)
pdf(file = "result_CFM_NoNNM_BootstrapPredictionVariability.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(parabola.prediction.cfm.nonnm[,"abscissa"],parabola.prediction.cfm.nonnm[,"ordinate"],xlim=c(0,1),ylim=c(0,1),xlab="Bootstrapped prediction mean",ylab="2 Standard Deviations", type="p",main="CFM_NoNNM Model Prediction Variability (Bootstrap)")
lines(parabola.prediction.cfm.nonnm[,"abscissa"],value.parabola.prediction.cfm.nonnm,col="red",lwd=1.5)
mtext(paste("Number of bootstrap samples: ",bootstrap.sample.model.cfm.nonnm,sep=""),side=3, padj=-0.5, adj=0.5, col="blue",cex=1)
espr <- expression(Y == coeff.a %*% X ^2 + coeff.b %*% X)
list.espr.subs <- list(coeff.a = round(coef(fit.parabola.prediction.cfm.nonnm),3),coeff.b= -round(coef(fit.parabola.prediction.cfm.nonnm),3))
as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]])
mtext(as.expression(do.call(substitute, list(as.call(espr), list.espr.subs))[[1]]),side=1, padj=-1.5, adj=0.5,col="red",cex=1)
dev.off()
}
## Sensitivity, Specificity, Cohens kappa plot
roc.plot.cfm.nonnm.series<-roc.plot(verification.results.cfm.nonnm,binormal=TRUE)
#str(roc.plot.cfm.series)
#roc.plot.cfm.series$plot.data
#str(roc.plot.cfm.series$plot.data)
contingency.table.matrix.cfm.nonnm<-matrix(nrow=dim(roc.plot.cfm.nonnm.series$plot.data)[1],ncol=8)
colnames(contingency.table.matrix.cfm.nonnm)<-c("Threshold","TP","TN","FP","FN","TPR","FPR","COHEN_KAPPA")
contingency.table.matrix.cfm.nonnm[,1]<-roc.plot.cfm.nonnm.series$plot.data[,1,1]
contingency.table.matrix.cfm.nonnm[,6]<-roc.plot.cfm.nonnm.series$plot.data[,2,1]
contingency.table.matrix.cfm.nonnm[,7]<-roc.plot.cfm.nonnm.series$plot.data[,3,1]
values.odserved<-data.table[,2]
values.predicted<-result.cfm.nonnm$fitted.values
for (threshold.series in 1:dim(roc.plot.cfm.nonnm.series$plot.data)[1])
{
value.threshold<-contingency.table.matrix.cfm.nonnm[threshold.series,1]
values.probability.reclassified<-NULL
values.probability.reclassified<-numeric(length=length(values.odserved))
for (length.observed.series in 1:length(values.odserved))
{
if (values.predicted[length.observed.series] > value.threshold)
{
values.probability.reclassified[length.observed.series]<-1
} else
{
values.probability.reclassified[length.observed.series]<-0
}
}
#sum(values.probability.reclassified-round(values.predicted)) # Check sum: It has to be 0 if threshold is equal to 1
series.pasted<-paste(values.odserved,values.probability.reclassified,sep="")
series.pasted<-gsub("00","1",series.pasted)
series.pasted<-gsub("01","2",series.pasted)
series.pasted<-gsub("10","3",series.pasted)
series.pasted<-gsub("11","4",series.pasted)
series.pasted<-as.numeric(series.pasted)
TP<-length(series.pasted[series.pasted>=4]) # True Positive
FN<-length(series.pasted[series.pasted>=3 & series.pasted<4]) # False Negative
FP<-length(series.pasted[series.pasted>=2 & series.pasted<3]) # False Positive
TN<-length(series.pasted[series.pasted>=1 & series.pasted<2]) # True Negative
#TPR<-TP/(TP+FN) # Hit Rate or True Positive Rate or Sensitivity - Assigned before the for cicle using rocplot data
#FPR<-FP/(FP+TN) # False Alarm Rate or False Positive Rate or 1-Specificity
# Cohen's Kappa = (agreement-chance)/(1-chance) where agreement=(TP+TN)/(TP+TN+FP+FN) and chance=((((TN+FN)*(TN+FP))/(TP+TN+FP+FN))+(((TP+FP)*(TP+FN))/(TP+TN+FP+FN)))/(TP+TN+FP+FN)
agreement=(TP+TN)/(TP+TN+FP+FN)
chance=((((TN+FN)*(TN+FP))/(TP+TN+FP+FN))+(((TP+FP)*(TP+FN))/(TP+TN+FP+FN)))/(TP+TN+FP+FN)
cohen.kappa.value<-(agreement-chance)/(1-chance)
#Other
#library(vcd)
#cohen.kappa.value<-Kappa(cross.classification.table)
contingency.table.matrix.cfm.nonnm[threshold.series,2]<-TP
contingency.table.matrix.cfm.nonnm[threshold.series,3]<-TN
contingency.table.matrix.cfm.nonnm[threshold.series,4]<-FP
contingency.table.matrix.cfm.nonnm[threshold.series,5]<-FN
contingency.table.matrix.cfm.nonnm[threshold.series,8]<-cohen.kappa.value
}
windows()
plot(roc.plot.cfm.nonnm.series$plot.data[,1,1],roc.plot.cfm.nonnm.series$plot.data[,2,1],type="p",pch=1,cex=0.6,col="red",xlim=c(0,1),ylim=c(0,1),xlab="Probability threshold",ylab="Evaluation parameter", main="CFM NoNNM MODEL EVALUATION PLOT")
points(roc.plot.cfm.nonnm.series$plot.data[,1,1],1-roc.plot.cfm.nonnm.series$plot.data[,3,1],col="dark green",pch=1,cex=0.6)
points(roc.plot.cfm.nonnm.series$plot.data[,1,1], contingency.table.matrix.cfm.nonnm[,8],col="blue",pch=1,cex=0.6)
mtext("SENSITIVITY",side=3, padj=-0.5, adj=0.01, col="red",cex=0.8)
mtext("COHEN'S KAPPA",side=3, padj=-0.5, adj=0.5, col="blue",cex=0.8)
mtext("SPECIFICITY",side=3, padj=-0.5, adj=0.99, col="dark green",cex=0.8)
pdf(file = "result_CFM_NoNNM_ModelEvaluationPlot.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(roc.plot.cfm.nonnm.series$plot.data[,1,1],roc.plot.cfm.nonnm.series$plot.data[,2,1],type="p",pch=1,cex=0.6,col="red",xlim=c(0,1),ylim=c(0,1),xlab="Probability threshold",ylab="Evaluation parameter", main="CFM NoNNM MODEL EVALUATION PLOT")
points(roc.plot.cfm.nonnm.series$plot.data[,1,1],1-roc.plot.cfm.nonnm.series$plot.data[,3,1],col="dark green",pch=1,cex=0.6)
points(roc.plot.cfm.nonnm.series$plot.data[,1,1], contingency.table.matrix.cfm.nonnm[,8],col="blue",pch=1,cex=0.6)
mtext("SENSITIVITY",side=3, padj=-0.5, adj=0.01, col="red",cex=0.8)
mtext("COHEN'S KAPPA",side=3, padj=-0.5, adj=0.5, col="blue",cex=0.8)
mtext("SPECIFICITY",side=3, padj=-0.5, adj=0.99, col="dark green",cex=0.8)
dev.off()
## VALIDATION OF CFM NO NNM MODEL (Matching CFM NO NNM posterior probability results and validation grouping variable)
cross.classification.temporal.validation.cfm.nonnm<-table(validation.grouping.variable,round(result.cfm.nonnm$fitted.values),dnn=c("Observed","Predicted"))
rownames(cross.classification.temporal.validation.cfm.nonnm)<-list("No Landslide","Landslide") #Observed
colnames(cross.classification.temporal.validation.cfm.nonnm)<-list("No Landslide","Landslide") #Predicted
#str(cross.classification.temporal.validation.cfm.nonnm)
#Elaboration of Coefficient of association for contingency table
#load package (vcd)
library(vcd)
#help(package=vcd)
contingency.table.temporal.validation.cfm.nonnm<-table2d_summary(cross.classification.temporal.validation.cfm.nonnm)
test.table.temporal.validation.cfm.nonnm<-assocstats(cross.classification.temporal.validation.cfm.nonnm)
#Different plots for contingency table
windows()
fourfold(cross.classification.temporal.validation.cfm.nonnm, std="margin", main="TEMPORAL VALIDATION CFM NO NNM MODEL", extended=TRUE, space = 0.2, margin=1, color = c(rgb(255,0,0,max=255), rgb(255,128,0,max=255), rgb(56,168,0,max=255), rgb(170,255,0,max=255), rgb(170,135,210,max=255), rgb(115,70,155,max=255)))
#Receiver Operating Characteristic (ROC) plots for one or more models.
#load package (verification)
library(verification)
# 2nd method using verify function
verification.temporal.validation.cfm.nonnm<-verify(validation.table[,2],result.cfm.nonnm$fitted.values, frcst.type="prob", obs.type="binary")
#summary(verification.temporal.validation.cfm.nonnm)
# showing confidence intervals. MAY BE SLOW
area.under.roc.curve.temporal.validation.cfm.nonnm<-roc.area(validation.table[,2],result.cfm.nonnm$fitted.values)
windows()
roc.plot(verification.temporal.validation.cfm.nonnm, main = "ROC PLOT: TEMPORAL VALIDATION CFM NO NNM MODEL", binormal=TRUE, plot="both", CI=TRUE, n.boot=bootstrap.sample.values[6] , alpha = 0.05, extra=TRUE, legend=TRUE)
mtext(paste("ROC area = ",round(area.under.roc.curve.temporal.validation.cfm.nonnm$A,2),"; Sample size = ",area.under.roc.curve.temporal.validation.cfm.nonnm$n.total,"; Bootstrap samples = ",bootstrap.sample.values[6], sep=""), side=3, col="red", cex=0.8)
# EXPORT OF PLOT FOR VALIDATION OF CFM NO NNM MODEL
pdf(file = "result_CFM_NoNNM_FourfoldPlot_Temporal_Validation.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
fourfold(cross.classification.temporal.validation.cfm.nonnm, std="margin", main="TEMPORAL VALIDATION CFM NO NNM MODEL", extended=TRUE, space = 0.2, margin=1, color = c(rgb(255,0,0,max=255), rgb(255,128,0,max=255), rgb(56,168,0,max=255), rgb(170,255,0,max=255), rgb(170,135,210,max=255), rgb(115,70,155,max=255)))
dev.off()
#pdf(file = "result_CFM_NoNNM_ROCPlot_Validation.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
#roc.plot(verification.temporal.validation.cfm.nonnm, main = "ROC PLOT: TEMPORAL VALIDATION CFM NO NNM MODEL", binormal = TRUE, plot = "both", extra=TRUE, legend=TRUE)
#area.under.roc.curve.temporal.validation.cfm.nonnm<-roc.area(verification.table[,2],result.cfm.nonnm$fitted.values)
#dev.off()
pdf(file = "result_CFM_NoNNM_ROCPlot_bootstrap_Temporal_Validation.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
roc.plot(verification.temporal.validation.cfm.nonnm, main = "ROC PLOT: TEMPORAL VALIDATION CFM NO NNM MODEL", binormal=TRUE, plot="both", CI=TRUE, n.boot=bootstrap.sample.values[6] , alpha = 0.05, extra=TRUE, legend=TRUE)
mtext(paste("ROC area = ",round(area.under.roc.curve.temporal.validation.cfm.nonnm$A,2),"; Sample size = ",area.under.roc.curve.temporal.validation.cfm.nonnm$n.total,"; Bootstrap samples = ",bootstrap.sample.values[6], sep=""), side=3, col="red", cex=0.8)
dev.off()
# Assignation of a matching code between observed and predicted values calculated using the validation dataset
validation.cfm.nonnm.matching.code<-paste(validation.grouping.variable,round(result.cfm.nonnm$fitted.values),sep="")
validation.cfm.nonnm.matching.code<-gsub("00","1",validation.cfm.nonnm.matching.code)
validation.cfm.nonnm.matching.code<-gsub("01","2",validation.cfm.nonnm.matching.code)
validation.cfm.nonnm.matching.code<-gsub("10","3",validation.cfm.nonnm.matching.code)
validation.cfm.nonnm.matching.code<-gsub("11","4",validation.cfm.nonnm.matching.code)
validation.cfm.nonnm.matching.code<-as.numeric(validation.cfm.nonnm.matching.code)
# EXPORT OF CFM NO NNM MODEL RESULTS
write.table("RESULTS OF COMBINATION FORECAST LOGISTIC REGRESSION MODEL WITHOUT NNM",file="result_CFM_NoNNM.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("CFM MODEL OUTPUTS",file="result_CFM_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Logistic Regression coefficients",file="result_CFM_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
#Scaling coefficients
write.table(cbind(names(result.cfm.nonnm$coefficients),result.cfm.nonnm$coefficients),file="result_CFM_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("CONTINGENCY TABLE MODEL RESULT",file="result_CFM_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(c("","No Landslide Predicted","Landslide Predicted","Total"),cbind(c("No Landslide Observed","Landslide Observed","Total"),contingency.table.cfm.nonnm$table[,1,],contingency.table.cfm.nonnm$table[,2,],contingency.table.cfm.nonnm$table[,3,])),file="result_CFM_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("CONTINGENCY TABLE VALIDATION",file="result_CFM_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(c("","No Landslide Predicted","Landslide Predicted","Total"),cbind(c("No Landslide Observed","Landslide Observed","Total"),contingency.table.temporal.validation.cfm.nonnm$table[,1,],contingency.table.temporal.validation.cfm.nonnm$table[,2,],contingency.table.temporal.validation.cfm.nonnm$table[,3,])),file="result_CFM_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("MATCHING CODE DEFINITION",file="result_CFM_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(cbind(c("","OBSERVED NO LANDSLIDES: 0","OBSERVED LANDSLIDES: 1"), c("PREDICTED NO LANDSLIDES: 0","00 -> Code 1","10 -> Code 3"), c("PREDICTED LANDSLIDES: 1","01 -> Code 2","11 -> Code 4")),file="result_CFM_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="result_CFM_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("FINAL RESULTS",file="result_CFM_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(c("ID","GROUPING VARIABLE","MODEL POSTERIOR PROBABILITY","MODEL CLASSIFICATION","MODEL RESULT MATCHING CODE","VALIDATION GROUPING VARIABLE","VALIDATION MATCHING CODE"),cbind(identification.value,result.cfm.nonnm$y,result.cfm.nonnm$fitted.values,round(result.cfm.nonnm$fitted.values),result.cfm.nonnm.matching.code,as.numeric(levels(validation.grouping.variable))[validation.grouping.variable],validation.cfm.nonnm.matching.code)),file="result_CFM_NoNNM.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
}
#------------------- MODEL PROBABILITY COMPARISON --------------------#
### LDA - Other models
if(model.run.matrix[1] == "YES")
{
# LDA - QDA
#windows()
#plot(predict.result.lda$posterior[,2],predict.result.qda$posterior[,2],type="p",pch=1,cex=0.85,col="dark blue",xlim=c(0,1),ylim=c(0,1),xlab="LDA Model Probability",ylab="QDA Model Probability", main="MODEL COMPARISON")
#abline(a=0,b=1,col="red",lty=1,lwd=1)
pdf(file = "result_ModelComparison_LDA_QDA.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(predict.result.lda$posterior[,2],predict.result.qda$posterior[,2],type="p",pch=1,cex=0.85,col="dark blue",xlim=c(0,1),ylim=c(0,1),xlab="LDA Model Probability",ylab="QDA Model Probability", main="MODEL COMPARISON")
abline(a=0,b=1,col="red",lty=1,lwd=1)
dev.off()
# LDA - LRM
#windows()
#plot(predict.result.lda$posterior[,2],result.lrm$fitted.values,type="p",pch=1,cex=0.85,col="dark blue",xlim=c(0,1),ylim=c(0,1),xlab="LDA Model Probability",ylab="LRM Model Probability", main="MODEL COMPARISON")
#abline(a=0,b=1,col="red",lty=1,lwd=1)
pdf(file = "result_ModelComparison_LDA_LRM.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(predict.result.lda$posterior[,2],result.lrm$fitted.values,type="p",pch=1,cex=0.85,col="dark blue",xlim=c(0,1),ylim=c(0,1),xlab="LDA Model Probability",ylab="LRM Model Probability", main="MODEL COMPARISON")
abline(a=0,b=1,col="red",lty=1,lwd=1)
dev.off()
# LDA - NNM
#windows()
#plot(predict.result.lda$posterior[,2],predict.result.nnm,type="p",pch=1,cex=0.85,col="dark blue",xlim=c(0,1),ylim=c(0,1),xlab="LDA Model Probability",ylab="NNM Model Probability", main="MODEL COMPARISON")
#abline(a=0,b=1,col="red",lty=1,lwd=1)
pdf(file = "result_ModelComparison_LDA_NNM.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(predict.result.lda$posterior[,2],predict.result.nnm,type="p",pch=1,cex=0.85,col="dark blue",xlim=c(0,1),ylim=c(0,1),xlab="LDA Model Probability",ylab="NNM Model Probability", main="MODEL COMPARISON")
abline(a=0,b=1,col="red",lty=1,lwd=1)
dev.off()
}
if(model.run.matrix[2] == "YES")
{
# QDA - LDA
#windows()
#plot(predict.result.qda$posterior[,2],predict.result.lda$posterior[,2],type="p",pch=1,cex=0.85,col="dark blue",xlim=c(0,1),ylim=c(0,1),xlab="QDA Model Probability",ylab="LDA Model Probability", main="MODEL COMPARISON")
#abline(a=0,b=1,col="red",lty=1,lwd=1)
pdf(file = "result_ModelComparison_QDA_LDA.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(predict.result.qda$posterior[,2],predict.result.lda$posterior[,2],type="p",pch=1,cex=0.85,col="dark blue",xlim=c(0,1),ylim=c(0,1),xlab="QDA Model Probability",ylab="LDA Model Probability", main="MODEL COMPARISON")
abline(a=0,b=1,col="red",lty=1,lwd=1)
dev.off()
# QDA - LRM
#windows()
#plot(predict.result.qda$posterior[,2],result.lrm$fitted.values,type="p",pch=1,cex=0.85,col="dark blue",xlim=c(0,1),ylim=c(0,1),xlab="QDA Model Probability",ylab="LRM Model Probability", main="MODEL COMPARISON")
#abline(a=0,b=1,col="red",lty=1,lwd=1)
pdf(file = "result_ModelComparison_QDA_LRM.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(predict.result.qda$posterior[,2],result.lrm$fitted.values,type="p",pch=1,cex=0.85,col="dark blue",xlim=c(0,1),ylim=c(0,1),xlab="QDA Model Probability",ylab="LRM Model Probability", main="MODEL COMPARISON")
abline(a=0,b=1,col="red",lty=1,lwd=1)
dev.off()
# QDA - NNM
#windows()
#plot(predict.result.qda$posterior[,2],predict.result.nnm,type="p",pch=1,cex=0.85,col="dark blue",xlim=c(0,1),ylim=c(0,1),xlab="QDA Model Probability",ylab="NNM Model Probability", main="MODEL COMPARISON")
#abline(a=0,b=1,col="red",lty=1,lwd=1)
pdf(file = "result_ModelComparison_QDA_NNM.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(predict.result.qda$posterior[,2],predict.result.nnm,type="p",pch=1,cex=0.85,col="dark blue",xlim=c(0,1),ylim=c(0,1),xlab="QDA Model Probability",ylab="NNM Model Probability", main="MODEL COMPARISON")
abline(a=0,b=1,col="red",lty=1,lwd=1)
dev.off()
}
if(model.run.matrix[3] == "YES")
{
# LRM - LDA
#windows()
#plot(result.lrm$fitted.values,predict.result.lda$posterior[,2],type="p",pch=1,cex=0.85,col="dark blue",xlim=c(0,1),ylim=c(0,1),xlab="LRM Model Probability",ylab="LDA Model Probability", main="MODEL COMPARISON")
#abline(a=0,b=1,col="red",lty=1,lwd=1)
pdf(file = "result_ModelComparison_LRM_LDA.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(result.lrm$fitted.values,predict.result.lda$posterior[,2],type="p",pch=1,cex=0.85,col="dark blue",xlim=c(0,1),ylim=c(0,1),xlab="LRM Model Probability",ylab="LDA Model Probability", main="MODEL COMPARISON")
abline(a=0,b=1,col="red",lty=1,lwd=1)
dev.off()
# LRM - QDA
#windows()
#plot(result.lrm$fitted.values,predict.result.qda$posterior[,2],type="p",pch=1,cex=0.85,col="dark blue",xlim=c(0,1),ylim=c(0,1),xlab="LRM Model Probability",ylab="QDA Model Probability", main="MODEL COMPARISON")
#abline(a=0,b=1,col="red",lty=1,lwd=1)
pdf(file = "result_ModelComparison_LRM_QDA.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(result.lrm$fitted.values,predict.result.qda$posterior[,2],type="p",pch=1,cex=0.85,col="dark blue",xlim=c(0,1),ylim=c(0,1),xlab="LRM Model Probability",ylab="QDA Model Probability", main="MODEL COMPARISON")
abline(a=0,b=1,col="red",lty=1,lwd=1)
dev.off()
# LRM - NNM
#windows()
#plot(result.lrm$fitted.values,predict.result.nnm,type="p",pch=1,cex=0.85,col="dark blue",xlim=c(0,1),ylim=c(0,1),xlab="LRM Model Probability",ylab="NNM Model Probability", main="MODEL COMPARISON")
#abline(a=0,b=1,col="red",lty=1,lwd=1)
pdf(file = "result_ModelComparison_LRM_NNM.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(result.lrm$fitted.values,predict.result.nnm,type="p",pch=1,cex=0.85,col="dark blue",xlim=c(0,1),ylim=c(0,1),xlab="LRM Model Probability",ylab="NNM Model Probability", main="MODEL COMPARISON")
abline(a=0,b=1,col="red",lty=1,lwd=1)
dev.off()
}
if(model.run.matrix[4] == "YES")
{
# NNM - LDA
#windows()
#plot(predict.result.nnm,predict.result.lda$posterior[,2],type="p",pch=1,cex=0.85,col="dark blue",xlim=c(0,1),ylim=c(0,1),xlab="NNM Model Probability",ylab="LDA Model Probability", main="MODEL COMPARISON")
#abline(a=0,b=1,col="red",lty=1,lwd=1)
pdf(file = "result_ModelComparison_NNM_LDA.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(predict.result.nnm,predict.result.lda$posterior[,2],type="p",pch=1,cex=0.85,col="dark blue",xlim=c(0,1),ylim=c(0,1),xlab="NNM Model Probability",ylab="LDA Model Probability", main="MODEL COMPARISON")
abline(a=0,b=1,col="red",lty=1,lwd=1)
dev.off()
# NNM - QDA
#windows()
#plot(predict.result.nnm,predict.result.qda$posterior[,2],type="p",pch=1,cex=0.85,col="dark blue",xlim=c(0,1),ylim=c(0,1),xlab="NNM Model Probability",ylab="QDA Model Probability", main="MODEL COMPARISON")
#abline(a=0,b=1,col="red",lty=1,lwd=1)
pdf(file = "result_ModelComparison_NNM_QDA.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(predict.result.nnm,predict.result.qda$posterior[,2],type="p",pch=1,cex=0.85,col="dark blue",xlim=c(0,1),ylim=c(0,1),xlab="NNM Model Probability",ylab="QDA Model Probability", main="MODEL COMPARISON")
abline(a=0,b=1,col="red",lty=1,lwd=1)
dev.off()
# NNM - LRM
#windows()
#plot(predict.result.nnm,result.lrm$fitted.values,type="p",pch=1,cex=0.85,col="dark blue",xlim=c(0,1),ylim=c(0,1),xlab="NNM Model Probability",ylab="LRM Model Probability", main="MODEL COMPARISON")
#abline(a=0,b=1,col="red",lty=1,lwd=1)
pdf(file = "result_ModelComparison_NNM_LRM.pdf", width = 6, height = 6, onefile = TRUE, family = "Helvetica", fonts = NULL, version = "1.1", paper = "special", pagecentre=TRUE)
plot(predict.result.nnm,result.lrm$fitted.values,type="p",pch=1,cex=0.85,col="dark blue",xlim=c(0,1),ylim=c(0,1),xlab="NNM Model Probability",ylab="LRM Model Probability", main="MODEL COMPARISON")
abline(a=0,b=1,col="red",lty=1,lwd=1)
dev.off()
}
|
33a1cbbbb17929ed828cabcbc512712055a4d3d6
|
678a532bc05214556abb1f993b867b390ed5f7ab
|
/man/hit_plotter.Rd
|
9facf807c70c8aba01cfe63a793afbdeb7a4cba9
|
[] |
no_license
|
rdocking/amlpmpsupport
|
d744aa67c8cb882e8fd022b6d2c1ba402c0210c4
|
b1f843ab41b59ca1fc54b044f23cfbfa741f2ae0
|
refs/heads/main
| 2023-02-07T19:00:58.035482
| 2021-01-02T21:56:53
| 2021-01-02T21:56:53
| 271,644,653
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 730
|
rd
|
hit_plotter.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotting_functions.r
\name{hit_plotter}
\alias{hit_plotter}
\title{Generate a dot-plot of expression of selected genes, facetted by selected feature}
\usage{
hit_plotter(expression_df, feature_label)
}
\arguments{
\item{expression_df}{Data frame containing expression values}
\item{feature_label}{Label in the data frame to facet by}
}
\value{
p A ggplot object containing the generated plot
}
\description{
Generate a dot-plot of expression of selected genes, facetted by selected feature
}
\examples{
df <- tibble::tibble(lab = "Label", TPM = c(10, 100, 1000), gene = c('Foo', 'Bar', 'Baz'))
hit_plotter(expression_df = df, feature_label = "lab")
}
|
8c18721ca9a287a30920c16902d30b004546568a
|
6b4a1d51f6c8b7ea51be7a1349062dfe93987fba
|
/analyzeIPLBowlers/bowlerWicketsAgainstOpposition.R
|
63f8032ea4c40e8eee63efa24bb4f891e0bc7e03
|
[] |
no_license
|
amolmishra23/IPL_CDA
|
7315f2d44c40be07647f86cc70d86ac897f45f1a
|
4c8535ce192dcc650226efb205e4c3c67e2570e6
|
refs/heads/master
| 2020-03-27T01:07:20.186591
| 2018-08-22T09:18:37
| 2018-08-22T09:18:37
| 145,683,955
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 752
|
r
|
bowlerWicketsAgainstOpposition.R
|
bowlerWicketsAgainstOpposition <- function(df,name="Bowler"){
meanWickets = numMatches = wickets = opposition = NULL
c <- summarise(group_by(df,opposition),meanWickets=mean(wickets),numMatches=n())
d <- mutate(c,opposition=paste(opposition,"(",numMatches,")",sep=""))
plot.title = paste(name,"- Wickets against Opposition(number innings)")
ggplot(d, aes(x=opposition, y=meanWickets, fill=opposition))+
geom_bar(stat = "identity",position="dodge") +
geom_hline(aes(yintercept=2))+
xlab("Opposition") + ylab("Average wickets taken") +
ggtitle(plot.title)+theme(text = element_text(family="sans",size=15),
plot.title = element_text(family="sans",size=25,hjust = 0.5))
}
|
b5e3ac8b3ac0650578984c57cd426b5132e88592
|
b13615a56f86eb734d513809b19b470bbddce1c7
|
/makeDatabase/import_stage/calculate_season_breaks.R
|
e57fb558eeb0c566d95a07524abe3e305a54cd08
|
[] |
no_license
|
Conte-Ecology/westBrookData
|
16766a3fbba8f0ca2342d0c2e5c9b72aacd7233e
|
7218adfdaf12e35b14030b750e8f50ffa6a81a93
|
refs/heads/master
| 2022-07-01T05:35:36.779489
| 2022-06-14T18:47:52
| 2022-06-14T18:47:52
| 47,281,574
| 0
| 1
| null | 2022-06-14T18:47:52
| 2015-12-02T18:54:15
|
R
|
UTF-8
|
R
| false
| false
| 2,006
|
r
|
calculate_season_breaks.R
|
sampling <- dbGetQuery(con, "SELECT * FROM data_seasonal_sampling WHERE seasonal IS TRUE;")
#eliminate river specific data for determining seasons:
sampling<-sampling %>%
filter(drainage=="west") %>%
group_by(sample_name,sample_number,order,seasonal) %>%
summarize(start_julian_day=yday(min(start_date)),
end_julian_day=yday(max(end_date)),
year=min(year)) %>%
ungroup() %>%
arrange(sample_name)
#sampling[sampling$start_julian_day<20,"start_julian_day"]<-366
# sampling <- sampling[order(sampling[['start_date']]),]
season_kmeans <- kmeans(x=sampling$start_julian_day, centers=c(90,15,280,350))
row_cluster <- season_kmeans[['cluster']]
cluster_centers <- season_kmeans[['centers']]
season_from_cluster <- order(cluster_centers)[row_cluster]
sampling[['season']] <- season_from_cluster
sample_melt <- melt(
data=sampling[,c('sample_number','order','seasonal','start_julian_day','end_julian_day','year','season')],
id.vars=c('sample_number','order','seasonal','year','season')
)
sample_melt[ sample_melt[['value']] < 20,'value'] <- 366
pl_samples_by_number <- ggplot(
data=sample_melt[sample_melt[['seasonal']],],
aes(x=value, y=year, colour=factor(sample_number), size=factor(season))
) + geom_line()
season_breaks <- aggregate(
formula = start_julian_day ~ season,
data = sampling,
FUN=function(x) sort(x)[1] #### K had the third earliest sampling date, but that made a bunch of samples overlap seasons
)
pl_sampling_points <- ggplot(
data=sampling,
aes(x=start_julian_day, fill=factor(season))
) + geom_histogram() +
geom_vline(xintercept=season_breaks[['start_julian_day']], colour='blue') +
geom_vline(xintercept=sort(cluster_centers), colour='green')
## We used the start of sampling, but we're calculating the END of the
## season.
names(season_breaks)[names(season_breaks) == 'start_julian_day'] <- 'end_julian_day'
dbWriteTable(conn=con, name='data_season', value=season_breaks,
row.names=FALSE, overwrite=TRUE, append=FALSE)
|
f25b8fbd79b5a0d69d327fc3e6184bda5118bccc
|
139c32d5a68b74d9b1bab85b6ef81024bfab48d3
|
/man/cnvEQTL.Rd
|
c951d7b170a1a3b08a8357eb870232bc15398566
|
[] |
no_license
|
marromesc/CNVRanger
|
757c45c119cad5b1295488fa537b4cc1c95e5070
|
370085a3aad3a66acb1f46248e5588347f12dd64
|
refs/heads/master
| 2023-05-27T05:36:41.705717
| 2021-05-09T22:03:15
| 2021-05-09T22:03:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 6,487
|
rd
|
cnvEQTL.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/expr_assoc.R
\name{cnvEQTL}
\alias{cnvEQTL}
\title{CNV-expression association analysis}
\usage{
cnvEQTL(
cnvrs,
calls,
rcounts,
data,
window = "1Mbp",
multi.calls = .largest,
min.samples = 10,
de.method = c("edgeR", "limma"),
padj.method = "BH",
filter.by.expr = TRUE,
verbose = FALSE
)
}
\arguments{
\item{cnvrs}{A \code{\linkS4class{GRanges}} or character object containing
the summarized CNV regions as e.g. obtained with \code{\link{populationRanges}}.
Alternatively, the assay name if the 'data' argument is provided.}
\item{calls}{Either a \code{\linkS4class{GRangesList}} or
\code{\linkS4class{RaggedExperiment}} storing the individual CNV calls for
each sample. Alternatively, the assay name if 'data' is provided.}
\item{rcounts}{A \code{\linkS4class{RangedSummarizedExperiment}} or
character name storing either the raw RNA-seq read counts in a rectangular
fashion (genes x samples). Alternatively, the assay name if 'data' is provided.}
\item{data}{(optional) A \code{MultiAssayExperiment} object
with `cnvrs`, `calls`, and `rcounts` arguments corresponding to assay names.}
\item{window}{Numeric or Character. Size of the genomic window in base pairs
by which each CNV region is extended up- and downstream. This determines which
genes are tested for each CNV region. Character notation is supported for
convenience such as "100kbp" (same as 100000) or "1Mbp" (same as 1000000).
Defaults to \code{"1Mbp"}. Can also be set to \code{NULL} to test against all
genes included in the analysis.}
\item{multi.calls}{A function. Determines how to summarize the
CN state in a CNV region when there are multiple (potentially conflicting)
calls for one sample in that region. Defaults to \code{.largest}, which
assigns the CN state of the call that covers the largest part of the CNV
region tested. A user-defined function that is passed on to
\code{\link{qreduceAssay}} can also be provided for customized behavior.}
\item{min.samples}{Integer. Minimum number of samples with at least one call
overlapping the CNV region tested. Defaults to 10. See details.}
\item{de.method}{Character. Differential expression method.
Defaults to \code{"edgeR"}.}
\item{padj.method}{Character. Method for adjusting p-values to multiple testing.
For available methods see the man page of the function \code{\link{p.adjust}}.
Defaults to \code{"BH"}.}
\item{filter.by.expr}{Logical. Include only genes with
sufficiently large counts in the DE analysis? If TRUE, excludes genes not
satisfying a minimum number of read counts across samples using the
\code{\link{filterByExpr}} function from the edgeR package.
Defaults to TRUE.}
\item{verbose}{Logical. Display progress messages? Defaults to \code{FALSE}.}
}
\value{
A \code{\linkS4class{DataFrame}} containing measures of association for
each CNV region and each gene tested in the genomic window around the CNV region.
}
\description{
Testing CNV regions for effects on the expression level of genes in defined
genomic windows.
}
\details{
Association testing between CNV regions and RNA-seq read counts is carried
out using edgeR, which applies generalized linear models (GLMs) based on the
negative-binomial distribution while incorporating normalization factors for
different library sizes.
In the case of only one CN state deviating from 2n for a CNV region under
investigation, this reduces to the classical 2-group comparison.
For more than two states (e.g. 0n, 1n, 2n), edgeR’s ANOVA-like test is applied
to test all deviating groups for significant expression differences relative
to 2n.
To avoid artificial effects due to low expression of a gene or insufficient
sample size in deviating groups, it is typically recommended to exclude from
the analysis (i) genes with fewer than r reads per million reads mapped
(cpm, counts per million) in the maximally expressed sample group,
and (ii) CNV regions with fewer than s samples in a group deviating from 2n.
Use the \code{min.cpm} and \code{min.samples} arguments, respectively.
When testing local effects (adjacent or coinciding genes of a CNV region),
suitable thresholds for candidate discovery are r = 3, s = 4, and a nominal
significance level of 0.05; as such effects have a clear biological indication
and the number of genes tested is typically small.
For distal effects (i.e. when testing genes far away from a CNV region)
more stringent thresholds such as r = 20 and s = 10 for distal effects in
conjunction with multiple testing correction using a conservative adjusted
significance level such as 0.01 is typically recommended (due to power
considerations and to avoid detection of spurious effects).
}
\examples{
# (1) CNV calls
states <- sample(c(0,1,3,4), 17, replace=TRUE)
calls <- GRangesList(
sample1 = GRanges( c("chr1:1-10", "chr2:15-18", "chr2:25-34"), state=states[1:3]),
sample2 = GRanges( c("chr1:1-10", "chr2:11-18" , "chr2:25-36"), state=states[4:6] ),
sample3 = GRanges( c("chr1:2-11", "chr2:14-18", "chr2:26-36"), state=states[7:9] ),
sample4 = GRanges( c("chr1:1-12", "chr2:18-35" ), state=states[10:11] ),
sample5 = GRanges( c("chr1:1-12", "chr2:11-17" , "chr2:26-34"), state=states[12:14] ) ,
sample6 = GRanges( c("chr1:1-12", "chr2:12-18" , "chr2:25-35"), state=states[15:17] )
)
# (2) summarized CNV regions
cnvrs <- populationRanges(calls, density=0.1)
# (3) RNA-seq read counts
genes <- GRanges(c("chr1:2-9", "chr1:100-150", "chr1:200-300",
"chr2:16-17", "chr2:100-150", "chr2:200-300", "chr2:26-33"))
y <- matrix(rnbinom(42,size=1,mu=10),7,6)
names(genes) <- rownames(y) <- paste0("gene", 1:7)
colnames(y) <- paste0("sample", 1:6)
library(SummarizedExperiment)
rse <- SummarizedExperiment(assays=list(counts=y), rowRanges=granges(genes))
# (4) perform the association analysis
res <- cnvEQTL(cnvrs, calls, rse, min.samples=1, filter.by.expr=FALSE)
}
\references{
Geistlinger et al. (2018) Widespread modulation of gene expression
by copy number variation in skeletal muscle. Sci Rep, 8(1):1399.
}
\seealso{
\code{\link{findOverlaps}} to find overlaps between sets of genomic
regions,
\code{\link{qreduceAssay}} to summarize ragged genomic location data
in defined genomic regions,
\code{\link{glmQLFit}} and \code{\link{glmQLFTest}} to conduct negative
binomial generalized linear models for RNA-seq read count data.
}
\author{
Ludwig Geistlinger <Ludwig.Geistlinger@sph.cuny.edu>
}
|
3c9943b052465f8d4daffb334a41f5156f661b1e
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Kronegger-Pfandler-Pichler/bomb/p5-1.pddl_planlen=13/p5-1.pddl_planlen=13.R
|
56b9d02bd5f814860f03956d010c4545bfc7a847
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 69
|
r
|
p5-1.pddl_planlen=13.R
|
70b185c3c2722727ffc17c28ff41d241 p5-1.pddl_planlen=13.qdimacs 414 964
|
07c6d41d67652b07072b3e82147eeb6ceb91820e
|
fd2ccde676cc84644d4e3fd340dfde832d13d94a
|
/selective_sweep/pi.R
|
05f855b02b372d847b701a77fb8b04123f6bbb6c
|
[] |
no_license
|
wangchengww/pack-TIR
|
e601789bcde68708753ee1e4a1781422872e5eb6
|
5529e0793d0ed7bfe190f26b74f3301bf669d986
|
refs/heads/master
| 2023-05-27T06:18:30.491206
| 2020-09-23T12:23:15
| 2020-09-23T12:23:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 280
|
r
|
pi.R
|
library(ggplot2)
PIX50000<-read.table("X-50000.pi")
names(PIX50000)<-c("pos","pi","group")
PX50K<-ggplot(PIX50000,aes(pos,pi,group=group,colour=group))+geom_point()+geom_line()+geom_vline(xintercept=2639224,colour="lightblue")+labs(x="POS(X:2639224)",title="X:2639224-50K")
|
a68bf8bf6e4e308f2ea46895b93da2e5e738d4c3
|
71db4a78c8a989b58a0d839a77d58d1774dbec5f
|
/Code/R/apply1.R
|
54c1a6a39bf4bca2d9efe0b339c39b147a269a32
|
[] |
no_license
|
saulmoore1/MSc_CMEE
|
906a7bdf09528c39c0daf6e37f2d722b8ad7bd3d
|
5bfd0a5f696c59a092aa9df5536169d905d7ab69
|
refs/heads/master
| 2022-04-30T20:14:59.660442
| 2022-03-30T11:28:15
| 2022-03-30T11:28:15
| 158,312,708
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 807
|
r
|
apply1.R
|
#!/usr/bin/env R
# rm(list=ls())
# setwd("~/Documents/cmeecoursework/Week3/Code")
# A simple script to demonstrate the 'apply' function in R to calculate mean and variance of rows/columns in a matrix
# There are a family of functions called '*apply', that vectorise your code for you
# Use 'apply' when you want to apply a function to the rows or columns of a matrix
M <- matrix(rnorm(100), 10, 10) # Build a random matrix
RowMeans <- apply(M, 1, mean) # Take the mean of each row
print(RowMeans)
RowVars <- apply(M, 1, var) # Calculate the variance
print(RowVars)
ColMeans <- apply(M, 2, mean) # Now the mean of the columns
print(ColMeans)
# If you want row/column means or sums for a 2D matrix,
# be sure to investigate the highly optimized, lightning-quick colMeans, rowMeans, colSums, rowSums.
|
8da6a505ca1fd9f5884e3b00777949bed0942533
|
e210b15fba8f7f29765c0c2fb0e36ab0916cc6fd
|
/server.R
|
69fe33f987d1b1d5b0df440572abf00423b81307
|
[] |
no_license
|
tpgjs66/Albatross4Viewer
|
65d7a3b91e0b5d90f0d8cafbd9473d48e8391bba
|
c59d7646e46ae45d76a7c25b512639c784deb218
|
refs/heads/master
| 2020-03-19T07:58:34.317630
| 2019-02-25T17:08:28
| 2019-02-25T17:08:28
| 136,165,131
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 80,660
|
r
|
server.R
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
server <- function(input, output, session){
## Maximum file input size: 500 MB
options(shiny.maxRequestSize = 500*1024^2)
## Load household data
myhh <- reactive({
if (is.null(input$household)) {
hh <- read.csv("data/household-gen.txt")
return(hh)
}
hh <- read.csv(input$household$datapath,
header = input$householdheader,
sep = input$householdsep,
quote = input$householdquote)
# assign('hh',hh,envir=.GlobalEnv)
return(hh)
})
## Load household coordinates data
myhhcoords <- reactive({
if (is.null(input$hhcoords)) {
hhcoords <- read.csv("data/hh-coords.txt")
} else {
hhcoords <- read.csv(input$hhcoords$datapath,
header = input$hhcoordsheader,
sep = input$hhcoordssep,
quote = input$hhcoordsquote)
# assign('hh',hh,envir=.GlobalEnv)
}
return(hhcoords)
})
## Load schedule data
mysched <- reactive({
if (is.null(input$schedule)) {
sched <- read.csv("data/schedule-gen.txt")
return(sched)
}
sched <- read.csv(input$schedule$datapath,
header = input$scheduleheader,
sep = input$schedulesep,
quote = input$schedulequote)
## Time conversion
leaveTime <- strsplit(as.character(sched$LeaveTime),":|\\+")
leaveTime <- data.frame(matrix(unlist(leaveTime), nrow=length(leaveTime), byrow=T))
leaveTime$X1 <- as.integer(as.character(leaveTime$X1))
leaveTime$X2 <- as.integer(as.character(leaveTime$X2))
leaveTime$X3 <- as.integer(as.character(leaveTime$X3))
leaveTime$min <- leaveTime$X1*60 + leaveTime$X2 + leaveTime$X3*24*60
sched$LeaveTime <- leaveTime$min
beginTime <- strsplit(as.character(sched$BeginTime),":|\\+")
beginTime <- data.frame(matrix(unlist(beginTime), nrow=length(beginTime), byrow=T))
beginTime$X1 <- as.integer(as.character(beginTime$X1))
beginTime$X2 <- as.integer(as.character(beginTime$X2))
beginTime$X3 <- as.integer(as.character(beginTime$X3))
beginTime$min <- beginTime$X1*60 + beginTime$X2 + beginTime$X3*24*60
sched$BeginTime <- beginTime$min
endTime <- strsplit(as.character(sched$EndTime),":|\\+")
endTime <- data.frame(matrix(unlist(endTime), nrow=length(endTime), byrow=T))
endTime$X1 <- as.integer(as.character(endTime$X1))
endTime$X2 <- as.integer(as.character(endTime$X2))
endTime$X3 <- as.integer(as.character(endTime$X3))
endTime$min <- endTime$X1*60 + endTime$X2 + endTime$X3*24*60
sched$EndTime <- endTime$min
# Give unique ID
sched$SchedID<-do.call(paste, c(sched[c("HHID", "MemID","EpisodeID")], sep = "-"))
# assign('sched',sched,envir=.GlobalEnv)
return(sched)
})
## Load schedule data
myschedcoords <- reactive({
if (is.null(input$scheduleCoords)) {
scheduleCoords <- read.csv("data/sched-coords.txt")
return(scheduleCoords)
}
scheduleCoords <- read.csv(input$scheduleCoords$datapath,
header = input$scheduleCoordsheader,
sep = input$scheduleCoordssep,
quote = input$scheduleCoordsquote)
})
timeconverter <- function(sched) {
## Time conversion
leaveTime <- strsplit(as.character(sched$LeaveTime),":|\\+")
leaveTime <- data.frame(matrix(unlist(leaveTime), nrow=length(leaveTime), byrow=T))
leaveTime$X1 <- as.integer(as.character(leaveTime$X1))
leaveTime$X2 <- as.integer(as.character(leaveTime$X2))
leaveTime$X3 <- as.integer(as.character(leaveTime$X3))
leaveTime$min <- leaveTime$X1*60 + leaveTime$X2 + leaveTime$X3*24*60
sched$LeaveTime <- leaveTime$min
beginTime <- strsplit(as.character(sched$BeginTime),":|\\+")
beginTime <- data.frame(matrix(unlist(beginTime), nrow=length(beginTime), byrow=T))
beginTime$X1 <- as.integer(as.character(beginTime$X1))
beginTime$X2 <- as.integer(as.character(beginTime$X2))
beginTime$X3 <- as.integer(as.character(beginTime$X3))
beginTime$min <- beginTime$X1*60 + beginTime$X2 + beginTime$X3*24*60
sched$BeginTime <- beginTime$min
endTime <- strsplit(as.character(sched$EndTime),":|\\+")
endTime <- data.frame(matrix(unlist(endTime), nrow=length(endTime), byrow=T))
endTime$X1 <- as.integer(as.character(endTime$X1))
endTime$X2 <- as.integer(as.character(endTime$X2))
endTime$X3 <- as.integer(as.character(endTime$X3))
endTime$min <- endTime$X1*60 + endTime$X2 + endTime$X3*24*60
sched$EndTime <- endTime$min
# Give unique ID
sched$SchedID<-do.call(paste, c(sched[c("HHID", "MemID","EpisodeID")], sep = "-"))
return(sched)
}
## Load 4 digits PPCS data
myppcs <- reactive({
shpdf <- input$shpFilePc4
if (is.null(shpdf)) {
ppcs <- rgdal::readOGR("data/ppcs_single.shp",layer = "ppcs_single", GDAL1_integer64_policy = TRUE)
return(ppcs)
}
tempdirname <- dirname(shpdf$datapath[1])
for(i in 1:nrow(shpdf)){
file.rename(shpdf$datapath[i], paste0(tempdirname, "/", shpdf$name[i]))
}
ppcs <- rgdal::readOGR(paste(tempdirname, shpdf$name[grep(pattern = "*.shp$", shpdf$name)], sep = "/"),
layer = "ppcs_single", GDAL1_integer64_policy = TRUE)
# ppcs <- spTransform(ppcs, CRS("+init=EPSG:4326"))
# # Simplifying shape file
# ppcs <- tmaptools::simplify_shape(ppcs)
# assign('ppcs',ppcs,envir=.GlobalEnv)
return(ppcs)
})
# ## Load 6 digits PPCS land-use data and make it global
# pc6sf <<- sf::read_sf("data/PC6_BBG2012.shp",
# layer = "PC6_BBG2012")
# pc6sf$PC4 <<- substr(pc6sf$Postcode,1,4)
## Output household data
output$previewHousehold <- renderTable({
req(input$household)
if (input$householddisp == "head") {
hh <- head(myhh())
} else {
hh <- myhh()
}
return(hh)
})
output$previewHouseholdTotal <- renderText({
req(input$household)
hh <- myhh()
hhnum <- length(unique(hh$HHID))
return((paste(hhnum, "households have been found.")))
})
output$householdTotal <- DT::renderDataTable({
# Create input file error message
if (is.null(input$household)) {
showNotification("Please provide input files! (household)",
type = "error",
duration = 5)
hh <- read.csv("data/household-gen.txt")
return(hh)
}
req(input$household)
hh <- myhh()
hh <- DT::datatable(hh, filter = "top",options = list(scrollX = T))
return(hh)
})
## Output schedule data
output$previewSchedule <- renderTable({
req(input$schedule)
if (input$scheduledisp == "head") {
sched <- head(mysched())
} else {
sched <- read.csv(input$schedule$datapath,
header = input$scheduleheader,
sep = input$schedulesep,
quote = input$schedulequote)
}
return(sched)
})
output$previewScheduleTotal <- renderText({
req(input$schedule)
sched <- mysched()
schednum <- length(sched$EpisodeID)
hhnum <- length(unique(sched$HHID))
return(paste(hhnum, "households have been found with ",
schednum ,"activity episodes."))
})
output$scheduleTotal <- DT::renderDataTable({
# Create input file error message
if (is.null(input$schedule)) {
showNotification("Please provide input files! (schedule)",
type = "error",
duration = 5)
sched <- read.csv("data/schedule-gen.txt")
return(sched)
}
req(input$schedule)
sched <- mysched()
sched <- DT::datatable(sched, filter = "top",options = list(scrollX = TRUE))
return(sched)
})
## Output schedule coordinates data
output$previewScheduleCoords <- renderTable({
req(input$scheduleCoords)
if (input$scheduleCoordsdisp == "head") {
sched <- head(myschedcoords())
} else {
sched <- read.csv(input$scheduleCoords$datapath,
header = input$scheduleCoordsheader,
sep = input$scheduleCoordssep,
quote = input$scheduleCoordsquote)
}
return(sched)
})
output$previewScheduleTotalCoords <- renderText({
req(input$scheduleCoords)
sched <- myschedcoords()
schednum <- length(sched$EpisodeID)
hhnum <- length(unique(sched$HHID))
return(paste(hhnum, "households have been found with ",
schednum ,"activity episodes."))
})
##############################################################################
########### List schedule OD ##################
##############################################################################
filterschedod <- eventReactive(input$submitscheduleod,{
# Load schedule file
sched <- mysched()
# Filter schedule by input selection
sched <- sched[sched$ActivityType %in% input$listschedact,]
sched <- sched[sched$Mode %in% input$listschedmode,]
sched <- sched[which((sched$BeginTime > input$listschedtime[1] & sched$BeginTime < input$listschedtime[2])|
(sched$EndTime > input$listschedtime[1] & sched$EndTime < input$listschedtime[2])),]
# Convert OD matrix to pairwise column
flows <- as.data.frame(table(sched$OrigLoc,sched$DestLoc))
colnames(flows) <- c("origin","destination","flow")
flows$origin <- as.integer(levels(flows$origin))[flows$origin]
flows$destination <- as.integer(levels(flows$destination))[flows$destination]
flows$flow <- as.integer(flows$flow)
flows
})
output$scheduleod <- DT::renderDataTable({
# Create input file error message
if (is.null(input$schedule)) {
showNotification("Please provide input files! (schedule)",
type = "error",
duration = 5)
# Load schedule file
sched <- read.csv("data/schedule-gen.txt")
# Filter schedule by input selection
sched <- sched[sched$ActivityType %in% input$listschedact,]
sched <- sched[sched$Mode %in% input$listschedmode,]
# sched <- sched[which((sched$BeginTime > input$listschedtime[1] & sched$BeginTime < input$listschedtime[2])|
# (sched$EndTime > input$listschedtime[1] & sched$EndTime < input$listschedtime[2])),]
# Convert OD matrix to pairwise column
flows <- as.data.frame(table(sched$OrigLoc,sched$DestLoc))
colnames(flows) <- c("origin","destination","flow")
flows$origin <- as.integer(levels(flows$origin))[flows$origin]
flows$destination <- as.integer(levels(flows$destination))[flows$destination]
flows$flow <- as.integer(flows$flow)
flowsDT <- DT::datatable(flows, filter = "top")
return(flowsDT)
}
# Filter O-D pair by input selection
flows <- filterschedod()
flowsDT <- DT::datatable(flows, filter = "top")
output$downloadDTsched <- downloadHandler(
filename = function() {
paste("DTschedule", ".csv", sep = "")
},
content = function(file) {
write.csv(flows, file, row.names = FALSE)
}
)
return(flowsDT)
})
##############################################################################
########### Leaflet household map ##################
##############################################################################
filteredhh <- eventReactive(input$submitmaphh,{
hh <- myhhcoords()
# hh <- read.csv("data/hh-coords.txt")
})
output$maphh <- renderLeaflet({
# # Create input file error message
# if (is.null(input$household) | is.null(input$shpFilePc4)) {
# showNotification("Please provide input files! (household / shp file)",
# type = "error",
# duration = 5)
# # return()
# }
# Load household data
hh <- filteredhh()
# Load ppcs data
ppcs <- myppcs()
# Sample ppcs only where households are located
ppcsSample <- subset(ppcs, ppcs@data$PC4 %in% hh$Home)
# Add number of household within 4 ppcs
ppcsSample$Household[match(names(table(hh$Home)),ppcsSample$PC4)]<- table(hh$Home)
# Add household density by 4 ppc
ppcsSample$HHDensity <- round(ppcsSample$Household/
(ppcsSample$Shape_Area / 10^6),
digits = 2)
# ########### Loop for getting household location coordinate #################
# ### The household file above didn’t have coordinates just 4 ppc area codes.
# ### Here is a lookup that provides those and randomly disribute households.
#
# coords <- c()
# hhid <- c()
# for (i in ppcsSample@data$PC4) {
# # total household within PC4
# n <- sum(ppcsSample$Household[ppcsSample$PC4 == i], na.rm = TRUE)
# # get household id within PC4
# hhid <- append(hhid,hh$HHID[hh$Home == i])
#
# polygon <- ppcsSample[ppcsSample@data$PC4 == i,]@polygons
#
# chosenPolygon <- 1
# for (j in 1:length(polygon)) {
# if (j > 1) {
# if (polygon[[j]]@area > polygon[[j-1]]@area){
# chosenPolygon <- j
# }
# }
# }
#
# if (class(polygon) == "list" & length(polygon) > 1) { ##For multi-polygons
# polygon <- polygon[[chosenPolygon]]
# if (length(polygon@Polygons) == 1) {
#
# } else {
# chosen <- (polygon@plotOrder)[1]
# polygon <- polygon@Polygons[[chosen]]
# }
# } else {
# polygon <- polygon[[chosenPolygon]]
# if (length(polygon@Polygons) == 1) {
#
# } else {
# chosen <- (polygon@plotOrder)[1]
# polygon <- polygon@Polygons[[chosen]]
# }
# }
# coords <- rbind(coords,spsample(polygon, n = n, type = "random")@coords)
# }
# hh$Lng <- coords[,1]
# hh$Lat <- coords[,2]
############################################################################
# ############# Loop for getting household PC6 coordinate ####################
# ### The household file above didn’t have coordinates just 4 ppc area codes.
# ### Here is a lookup that provides those and randomly disribute households.
#
#
# coords <- c()
# hhpc6 <- c()
#
# withProgress(message = paste("Sampling location for ", nrow(hh),
# "households..."), value = 0, {
#
# for (i in 1:nrow(hh)) {
# # Increment the progress bar, and update the detail text.
# incProgress(1/nrow(hh),
# detail = paste("Sampled a location for",i,"-th household out of",
# nrow(hh)))
#
# # Check if PC6 covers all PC4 polygons
# try(if (!all(is.element(hh$Home,unique(pc6sf$PC4))))
# stop("PC6 geometry does not contain all polygons in PC4!")
# )
#
# # filter pc6 where household belongs and residential area
# pc6 <- pc6sf[which(pc6sf$PC4 == hh$Home[i] & pc6sf$BG2012_maj == 20),]
#
# # Convert sf to sp
# pc6sp <- as(pc6,"Spatial")
#
# # get random coordinates within filtered pc6
# coordsTmp <- spsample(x = pc6sp, n = 1, type = "random", iter = 30,
# prob = pc6sp$Aantal_adr, replace = TRUE)
#
# # Assign PC6 Home location and coordinates
# hhpc6 <- append(hhpc6,over(coordsTmp,pc6sp)$PC6)
# coords <- rbind(coords,coordsTmp@coords)
# }
#
# })
#
# hh$Lng <<- coords[,1]
# hh$Lat <<- coords[,2]
# hh$PC6 <<- hhpc6
############################################################################
## Label setting
labels <- sprintf(
"<strong>PPC: %s</strong><br/>
# of Households: %g <br/>
Area: %g km<sup>2</sup><br/>
Household density: % g households/km<sup>2</sup>",
ppcsSample$PC4,ppcsSample$Household,
ppcsSample$Shape_Area / 10^6,
ppcsSample$HHDensity
) %>% lapply(htmltools::HTML)
min <- min(ppcsSample$HHDensity,na.rm=T)
max <- max(ppcsSample$HHDensity,na.rm=T)
bins <- round(quantile(ppcsSample$HHDensity,probs = seq(0,1,length=9),
na.rm = TRUE),
digits = 2)
pal <- colorBin("YlOrRd", domain = ppcsSample$HHDensity, bins = bins)
############################################################################
############################################################################
# Write leaflet()
leaflet() %>%
setView(lng=5.00 , lat =52.00, zoom=8) %>%
# Base groups
addTiles(group = "OSM (default)",
options = providerTileOptions(noWrap = TRUE)) %>%
addProviderTiles(group = "Toner Lite", providers$Stamen.TonerLite) %>%
# Overlay groups
addPolygons(data = ppcsSample,
group = "4-digit postcode area",
color = "#444444",
weight = 1,
smoothFactor = 0.5,
opacity = 1.0,
fill = TRUE,
fillColor = "#A9F5BC",
fillOpacity = 0.5,
label = labels,
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px",
direction = "auto"),
highlightOptions = highlightOptions(color = "white",
weight = 3,
bringToFront = TRUE)) %>%
addCircles(data = hh,
group = "Household location",
lng = ~Lng,
lat = ~Lat,
color = "blue",
radius = 50,
layerId = ~HHID,
# radius = 3,
# icon = icons,
label = ~(paste("HHID: ",as.character(HHID))),
highlightOptions = highlightOptions(color = "white",
weight = 3,
bringToFront = TRUE)) %>%
addPolygons(data = ppcsSample,
group = "Household choropleth",
color = "#444444",
weight = 1,
smoothFactor = 0.5,
opacity = 1.0,
fill = TRUE,
fillColor = pal(ppcsSample$HHDensity),
fillOpacity = 0.5,
label = labels,
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px",
direction = "auto"),
highlightOptions = highlightOptions(color = "white",
weight = 3,
bringToFront = TRUE)) %>%
# Add legend for Household choropleth layer
addLegend(data = ppcsSample,
group = "Household choropleth",
pal = pal,
values = ppcsSample$HHDensity,
opacity = 0.7,
title = "Household Density",
position = "bottomright") %>%
# Layer Control
addLayersControl(
baseGroups = c("OSM (default)", "Toner Lite"),
overlayGroups = c("4-digit postcode area",
"Household choropleth",
"Household location"),
options = layersControlOptions(collapsed = FALSE)) %>%
# Hide some layers at first to reduce loading time
hideGroup("4-digit postcode area") %>%
hideGroup("Household choropleth") %>%
# hideGroup("Household location") %>%
addMiniMap(toggleDisplay = TRUE, position = "bottomleft")
})
observeEvent(input$maphh_shape_click, {
click <- input$maphh_shape_click
# Ignore other shapes than circle shape
# Note: No layerIDs are assigned to polygons for postcode
if (is.null(click) | is.null(click$id)){
return()
}
output$clickedhhId<-renderText({
text <- paste("You've selected Household", click$id)
text
})
output$clickedhhTable <- DT::renderDataTable({
hh <- myhhcoords()
table <- (subset(hh,HHID == click$id))
table <- DT::datatable(data = (t(table)), colnames = "",
options = list(paging = F, searching = F, pagelength = 25))
table
})
## Add pulse marker to the clicked household
proxy <- leafletProxy("maphh")
if (click$id != "Selected") {
proxy %>% setView(lng = click$lng, lat = click$lat, input$maphh_zoom, zoom = 13)
proxy %>% addPulseMarkers(lng = click$lng,
lat = click$lat,
layerId = "Selected",
icon = makePulseIcon(heartbeat = 1))
} else {
# Remove previously clicked pulse marker
proxy %>% removeMarker(layerId="Selected")
}
})
##############################################################################
############ Leaflet Activity location #################
##############################################################################
filterschedmapactloc <- eventReactive(input$submitmapactloc,{
# Load schedule file
# sched <- mysched()
sched <- myschedcoords()
sched <- timeconverter(sched)
# Filter schedule by input selection
sched <- sched[sched$ActivityType %in% input$mapactlocact,]
sched <- sched[sched$Mode %in% input$mapactlocmode,]
sched <- sched[sched$Charging %in% input$mapactloccharging,]
sched <- sched[which((sched$BeginTime > input$mapactloctime[1] & sched$BeginTime < input$mapactloctime[2])|
(sched$EndTime > input$mapactloctime[1] & sched$EndTime < input$mapactloctime[2])),]
sched
})
output$mapactloc <- renderLeaflet({
# # Create input file error message
# if (is.null(input$schedule) | is.null(input$shpFilePc4)) {
# showNotification("Please provide input files! (schedule / shp file)",
# type = "error",
# duration = 5)
# # return()
# }
sched <<- filterschedmapactloc()
# Print total number of activities
if (nrow(sched) == 0) {
showNotification("No activity was found. Please choose items as per panels above.",
type = "message",
duration = 5)
return()
}
# Load shape file
ppcs <- myppcs()
# Sample ppcs only where activities are occured.
ppcsSample <- subset(ppcs, ppcs@data$PC4 %in% sched$DestLoc)
# Add number of activities within 4 ppcs
ppcsSample$NumActs <- table(sched$DestLoc)[match(ppcsSample$PC4,names(table(sched$DestLoc)))]
# Add household density by 4 ppc
ppcsSample$ActDensity <- round(ppcsSample$NumActs/
(ppcsSample$Shape_Area / 10^6),
digits = 2)
# Add postcode label
labelsPpcs <- sprintf(
"<strong>PPC: %s</strong><br/>",
ppcsSample$PC4
) %>% lapply(htmltools::HTML)
showNotification(paste(nrow(sched),"activities were found in",length(unique(sched$HHID)),
"households"),
type = "message",
duration = 5)
# ##########################################################################
# ############# Loop for getting activity PC6 coordinate ###################
# ### The schedule file above didn’t have coordinates just 4 ppc area codes.
# ### Here is a lookup that provides those and randomly disribute act locs.
# ##########################################################################
#
# ## Land use info
# landuse <- function(actType) {
# switch(as.character(actType),
# Home = c(20),
# Work = c(22,24),
# Business = c(22,24),
# BringGet = c(20,21,22,23,24),
# Groceries = c(21),
# NonGroc = c(21),
# Services = c(22,24),
# Leisure = c(23,40,41,42,43,44,75),
# Social = c(23,40,41,42,43,44,75),
# Touring = c(20,21,22,23,24,40,41,42,43,44,45,75),
# Other = c(20,21,22,23,24,40,41,42,43,44,45,75))
# }
#
# coords <- c()
# schedpc6 <- c()
#
# withProgress(message = paste("Sampling location for ", nrow(sched),
# "activities..."), value = 0, {
# j <- 0
# for (i in sched$SchedID) {
# j = j + 1
# # Increment the progress bar, and update the detail text.
# incProgress(1/nrow(sched),
# detail = paste("Sampled a location for",j,"activity out of",
# nrow(sched),"activities"))
#
# destLoc <- sched[which(sched$SchedID == i),]$DestLoc
# actType <- sched[which(sched$SchedID == i),]$ActivityType
#
# # get pc6 where activity occurs in corresponding land use PC6
# pc6 <- pc6sf[which(pc6sf$PC4 == destLoc & pc6sf$BG2012_maj %in% landuse(actType)),]
#
# # Convert sf to sp
# pc6sp <- as(pc6,"Spatial")
#
# # Randomly sample if there is no appropriate land use in PC4
# if (nrow(pc6) == 0) {
# pc6 <- pc6sf[which(pc6sf$PC4 == destLoc),]
#
# # Convert sf to sp
# pc6sp <- as(pc6,"Spatial")
#
# # get random coordinates within filtered pc6
# coordsTmp <- spsample(x = pc6sp, n = 1, type = "random", iter = 30,
# prob = pc6sp$Aantal_adr, replace = TRUE)
# } else {
# # get random coordinates within filtered pc6
# coordsTmp <- spsample(x = pc6sp, n = 1, type = "random", iter = 30,
# prob = pc6sp$Aantal_adr, replace = TRUE)
# }
# schedpc6 <- append(schedpc6,over(coordsTmp,pc6sp)$PC6)
# coords <- rbind(coords,coordsTmp@coords)
# }
# })
#
# sched$Lng <<- coords[,1]
# sched$Lat <<- coords[,2]
# sched$PC6 <<- schedpc6
############################################################################
############################################################################
sched$Lng <<- sched$DestLng
sched$Lat <<- sched$DestLat
sched$PC6 <<- sched$DestPC6
leaflet() %>%
setView(lng=5.00 , lat =52.00, zoom=8) %>%
# Base groups
addTiles(group = "OSM (default)",
options = providerTileOptions(noWrap = TRUE)) %>%
addProviderTiles(group = "OSM B&W",
provider = providers$OpenStreetMap.BlackAndWhite) %>%
# addProviderTiles(group = "NatGeo", providers$Esri.NatGeoWorldMap) %>%
# addProviderTiles(group = "Toner Lite", providers$Stamen.TonerLite) %>%
# Overlay groups
addPolygons(data = ppcsSample,
group = "4-digit postcode area",
color = "#444444",
weight = 1,
smoothFactor = 0.5,
opacity = 1.0,
fill = TRUE,
fillColor = "#A9F5BC",
fillOpacity = 0.5,
label = labelsPpcs,
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px",
direction = "auto"),
highlightOptions = highlightOptions(color = "white",
weight = 3,
bringToFront = TRUE)) %>%
addCircles(data = sched,
group = "Activity location",
lng = ~Lng,
lat = ~Lat,
radius = 50,
layerId = ~SchedID,
# radius = 3,
# icon = icons,
label = ~(paste("SchedID: ",as.character(SchedID))),
highlightOptions = highlightOptions(color = "white",
weight = 3,
bringToFront = TRUE)) %>%
# Layer control
addLayersControl(
baseGroups = c("OSM (default)", "OSM B&W"),
overlayGroups = c("4-digit postcode area", "Activity location"),
options = layersControlOptions(collapsed = FALSE)) %>%
# Hide some layers at first to reduce loading time
hideGroup("4-digit postcode area") %>%
# Add minimap
addMiniMap(toggleDisplay = TRUE, position = "bottomleft")
})
observeEvent(input$mapactloc_shape_click, {
click <- input$mapactloc_shape_click
# Ignore other shapes than circle shape
# Note: No layerIDs are assigned to polygons for postcode
if (is.null(click) | is.null(click$id)){
return()
}
output$clickedactlocId<-renderText({
text <- paste("You've selected Activity", click$id)
text
})
output$clickedactlocTable <- DT::renderDataTable({
sched <- myschedcoords()
table <- (subset(sched,SchedID == input$mapactloc_shape_click$id))
table <- DT::datatable(data = (t(table)), colnames = "",
options = list(paging = F, searching = F, pagelength = 25))
table
})
## Add pulse marker to the clicked activity
proxy <- leafletProxy("mapactloc")
if (click$id != "Selected") {
proxy %>% setView(lng = click$lng, lat = click$lat, input$mapactloc_zoom, zoom = 13)
proxy %>% addPulseMarkers(lng = click$lng,
lat = click$lat,
layerId = "Selected",
icon = makePulseIcon(heartbeat = 1))
} else {
# Remove previously clicked pulse marker
proxy %>% removeMarker(layerId="Selected")
}
})
##############################################################################
########### Leaflet O-D flow ##################
##############################################################################
filterschedmapodflow <- eventReactive(input$submitmapodflow,{
# Load schedule file
sched <- myschedcoords()
# Filter schedule by input selection
sched <- sched[sched$ActivityType %in% input$mapodflowact,]
sched <- sched[sched$Charging %in% input$mapodflowcharging,]
sched <- sched[sched$Mode %in% input$mapodflowmode,]
# sched <- sched[which((sched$BeginTime > input$mapodflowtime[1] & sched$BeginTime < input$mapodflowtime[2])|
# (sched$EndTime > input$mapodflowtime[1] & sched$EndTime < input$mapodflowtime[2])),]
sched
})
filterodpairmapodflow <- eventReactive(input$submitmapodflow,{
# Filter schedule by input selection
sched <- filterschedmapodflow()
# Convert OD matrix to pairwise column
flows <- as.data.frame(table(sched$OrigLoc,sched$DestLoc))
flows <- flows[with(flows, order(-Freq)), ]
# Print message for empty data
if (nrow(flows) == 0) {
showNotification("No O-D pair was found. Please choose items as per panels above.",
type = "message",
duration = 5)
return()
}
colnames(flows) <- c("origin","destination","flow")
# Remove OD pairs without flow to reduce computation time
flows <- flows[which(flows$flow > 0),]
# Sample OD pairs with largest n.trips
if (input$mapodflowshow){
# Print message for wrong input number of O-D pairs
if (nrow(flows) < input$mapodflownum) {
showNotification("The number of O-D pairs you provide exceeds the total number of O-D pairs in data!",
type = "error",
duration = 5)
return()
}
flows <- flows[1:input$mapodflownum,]
}
flows
})
output$mapodflow <- renderLeaflet({
# # Create input file error message
# if (is.null(input$schedule) | is.null(input$shpFilePc4)) {
# showNotification("Please provide input files! (schedule / shp file)",
# type = "error",
# duration = 5)
# # return()
# }
# Load shape file
ppcs <- myppcs()
# Filter schedule by input selection
sched <- filterschedmapodflow()
# Sample ppcs only where activities are occured.
ppcsSample <- subset(ppcs, ppcs@data$PC4 %in% sched$DestLoc)
# Filter O-D pair by input selection
flows <- filterodpairmapodflow()
# Print total number of O-D pairs
showNotification(paste(sum(flows$flow), "trips have been found among",nrow(flows),"O-D pairs."),
type = "message",
duration = 5)
# Add progress status
withProgress(message = paste("Converting", nrow(flows),
"O-D pairs to polyline..."), value = 0, {
setProgress(value = NULL, message = NULL, detail = NULL,
session = getDefaultReactiveDomain())
## Get OD polylines
lines <-stplanr::od2line(flow = flows, zones = ppcs)
# proj4string(lines)<-"+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
})
# Add id for lines
lines$id <- rownames(lines@data)
## Put labels on lines
labels <- sprintf(
"O.PPC: %s<br/>
D.PPC: %s <br/>
N.trips: %s",
lines@data$origin,
lines@data$destination,
lines@data$flow
) %>% lapply(htmltools::HTML)
## Put labels on ppcs
labelsPpcs <- sprintf(
"<strong>PPC: %s</strong><br/>
",
ppcsSample$PC4
) %>% lapply(htmltools::HTML)
# breaks for legend
histinfo<-hist(lines@data$flow,plot = FALSE)
bins <- histinfo$breaks
pal <- colorBin("YlOrRd", domain = lines@data$flow, bins = bins)
leaflet() %>%
setView(lng=5.00 , lat =52.00, zoom=8) %>%
# Base groups
addProviderTiles(group = "CartoDB DarkMatter",
provider = providers$CartoDB.DarkMatter) %>%
addTiles(group = "OSM",
options = providerTileOptions(noWrap = F)) %>%
addPolygons(data = ppcsSample,
group = "4-digit postcode area",
color = "#444444",
weight = 1,
smoothFactor = 0.5,
opacity = 0.8,
fill = TRUE,
fillColor = "#A9F5BC",
fillOpacity = 0.2,
label = labelsPpcs,
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px",
direction = "auto"),
highlightOptions = highlightOptions(color = "white",
weight = 5,
bringToFront = TRUE)) %>%
# Overlay groups
addPolylines(data=lines,
group = "O-D flows",
weight = 0.5*(lines@data$flow),
color = ~pal(lines@data$flow),
label = labels,
layerId = ~id,
opacity = 0.5,
highlightOptions = highlightOptions(color = "red",
weight = 5,
bringToFront = TRUE)) %>%
# Add legend
addLegend(data = lines,
group = "O-D flows",
pal = pal,
values = lines@data$flow,
opacity = 0.7,
title = "Number of trips",
position = "bottomright") %>%
# Layer control
addLayersControl(
baseGroups = c("CartoDB DarkMatter","OSM"),
overlayGroups = c("4-digit postcode area","O-D flows"),
options = layersControlOptions(collapsed = TRUE)) %>%
hideGroup("4-digit postcode area")
})
observeEvent(input$mapodflow_shape_click,{
p <- input$mapodflow_shape_click
print(p)
})
##############################################################################
########### Leaflet O-D flow (Animated) ##################
##############################################################################
# filterschedmapodflowanim <- eventReactive(input$submitmapodflowanim,{
# # Load schedule file
# sched <- myschedcoords()
# # Filter schedule by input selection
# sched <- sched[sched$ActivityType %in% input$mapodflowactanim,]
# sched <- sched[sched$Charging %in% input$mapodflowcharginganim,]
# sched <- sched[sched$Mode %in% input$mapodflowmodeanim,]
# # sched <- sched[which((sched$BeginTime > input$mapodflowtime[1] & sched$BeginTime < input$mapodflowtime[2])|
# # (sched$EndTime > input$mapodflowtime[1] & sched$EndTime < input$mapodflowtime[2])),]
# sched
# })
#
# filterodpairmapodflowanim <- eventReactive(input$submitmapodflowanim,{
#
# # Filter schedule by input selection
# sched <- filterschedmapodflowanim()
#
# # Convert OD matrix to pairwise column
# flows <- as.data.frame(table(sched$OrigLoc,sched$DestLoc))
# flows <- flows[with(flows, order(-Freq)), ]
#
# # Print message for empty data
# if (nrow(flows) == 0) {
# showNotification("No O-D pair was found. Please choose items as per panels above.",
# type = "message",
# duration = 5)
# return()
# }
#
# colnames(flows) <- c("origin","destination","flow")
#
# # Remove OD pairs without flow to reduce computation time
# flows <- flows[which(flows$flow > 0),]
#
# # Sample OD pairs with largest n.trips
# if (input$mapodflowshow){
# # Print message for wrong input number of O-D pairs
# if (nrow(flows) < input$mapodflownum) {
# showNotification("The number of O-D pairs you provide exceeds the total number of O-D pairs in data!",
# type = "error",
# duration = 5)
# return()
# }
# flows <- flows[1:input$mapodflownum,]
# }
# flows
# })
#
# output$mapodflowanim <- renderLeaflet({
#
# # Load shape file
# ppcs <- myppcs()
#
# # Filter schedule by input selection
# sched <- filterschedmapodflowanim()
#
# # Filter O-D pair by input selection
# flows <- filterodpairmapodflowanim()
#
# # Print total number of O-D pairs
# showNotification(paste(sum(flows$flow), "trips have been found among",nrow(flows),"O-D pairs."),
# type = "message",
# duration = 5)
#
# # Get PC4 coordinates and put them to OD flows
# ppcsCoords<-cbind(ppcs$PC4,coordinates(ppcs))
# ppcsCoords <- as.data.frame(ppcsCoords)
# colnames(ppcsCoords) <- c("PC4","lng","lat")
# flows <- merge(flows,ppcsCoords,by.x="destination",by.y="PC4",all.x=T)
# flows <- merge(flows,ppcsCoords,by.x="origin",by.y="PC4",all.x=T)
#
# leaflet() %>%
# setView(lng=5.00 , lat =52.00, zoom=8) %>%
#
# # Base groups
#
# addTiles() %>%
# addFlows(
# flows$lng.x, flows$lat.x, flows$lng.y, flows$lat.y,
# flow = flows$flow,
# #time = 1
# )
#
# })
#
# observeEvent(input$mapodflow_shape_click,{
# p <- input$mapodflow_shape_click
# print(p)
# })
##############################################################################
########### Leaflet Route-Individual PC4 ##################
##############################################################################
filterschedmaprouteind <- eventReactive(input$submitmaprouteind,{
# Load schedule file
sched <- mysched()
# Sample only outgoing trips
schedOnlyOut <- sched[which(sched$OrigLoc!=sched$DestLoc),]
# Filter schedule by Activity type
schedOnlyOut <- schedOnlyOut[schedOnlyOut$ActivityType %in% input$maprouteindact,]
# Filter schedule by Transport mode
if (!input$maprouteindmode == "All"){
schedOnlyOut <- schedOnlyOut[schedOnlyOut$Mode == input$maprouteindmode,]
}
schedOnlyOut <- schedOnlyOut[schedOnlyOut$Charging %in% input$maprouteindcharging,]
# # Filter schedule by Time of day
# sched <- sched[which((sched$BeginTime > input$maprouteindtime[1] & sched$BeginTime < input$maprouteindtime[2])|
# (sched$EndTime > input$maprouteindtime[1] & sched$EndTime < input$maprouteindtime[2])),]
schedOnlyOut
})
filterodpairmaprouteind <- eventReactive(input$submitmaprouteind,{
# Filter schedule by input selection
schedOnlyOut <- filterschedmaprouteind()
# Convert OD matrix to pairwise column
myflows <- as.data.frame(table(schedOnlyOut$OrigLoc,schedOnlyOut$DestLoc))
myflows <- myflows[with(myflows, order(-Freq)), ]
# Print message for empty data
if (nrow(myflows) == 0) {
showNotification("No O-D pair was found. Please choose items as per panels above.",
type = "message",
duration = 5)
return()
}
colnames(myflows) <- c("origin","destination","flow")
# Remove OD pairs without flow
myflows <- myflows[which(myflows$flow > 0),]
# Sample OD pairs with largest n.trips
if (input$maprouteindshow){
# Print message for wrong input number of O-D pairs
if (nrow(myflows) < input$maprouteindnum) {
showNotification("The number of O-D pairs you provide exceeds the total number of O-D pairs in data!",
type = "error",
duration = 5)
return()
}
myflows <- myflows[1:input$maprouteindnum,]
}
myflows
})
output$maprouteind <- renderLeaflet({
# # Create input file error message
# if (is.null(input$schedule) | is.null(input$shpFilePc4)) {
# showNotification("Please provide input files! (schedule/shp file)",
# type = "error",
# duration = 5)
# # return()
# }
# Load schedule data
sched <- mysched()
# Filter schedule by input selection with more than 0 trip
schedOnlyOut <- filterschedmaprouteind()
# Filter O-D pair by input selection with more than 0 trip
myflows <- filterodpairmaprouteind()
# Load shape file
ppcs <- myppcs()
# Add progress status to UI
withProgress(message = paste("Converting", nrow(myflows),
"O-D pairs to polyline..."), value = 0, {
setProgress(value = NULL, message = NULL, detail = NULL,
session = getDefaultReactiveDomain())
## Get OD polylines
lines <-stplanr::od2line(flow = myflows, zones = ppcs)
})
## Add index to lines to match with routes
lines@data$id <- seq.int(nrow(lines@data))
## Check internet connection
if (!curl::has_internet()){
showNotification("Check your internet connection!",
type = "error",
duration = 5)
return()
}
## Get Routes by transport mode
if (input$maprouteindmode == "Car" | input$maprouteindmode == "Car as Passenger") {
routes <- line2routeRetryS(lines, route_osrm, profile = "driving", n_trial = 1000,
n_processes = 1)
## Get Routes from route_graphhopper (for Car)
# routes <- line2routeRetryS(lines, route_fun = "route_graphhopper", n_trial = 1000, vehicle = "car",
# n_processes = 1, pat = c("a0008794-1655-4b90-8fcc-bbe0822fdd23"))
# colnames(routes@data) <- c("duration","distance","change_elev","error","id")
# routes@data$duration <- routes@data$duration * 60
} else if (input$maprouteindmode == "Walking or Biking") {
# routes <- line2routeRetryS(lines, route_osrm, profile = "bike", n_trial = 1000,
# n_processes = 1)
## Get Routes from route_graphhopper (for Slow)
routes <- line2routeRetryS(lines, route_fun = "route_graphhopper", n_trial = 1000, vehicle = "foot",
n_processes = 1, pat = c("a0008794-1655-4b90-8fcc-bbe0822fdd23"))
colnames(routes@data) <- c("duration","distance","change_elev","error","id")
routes@data$duration <- routes@data$duration * 60
} else if (input$maprouteindmode == "Public Transport") {
## Get Routes from route_osrm (for Public Transport)
routes <- line2routeRetryS(lines, route_osrm, profile = "driving", n_trial = 1000,
n_processes = 1)
} else {
## Get Routes from route_osrm (for All modes)
routes <- line2routeRetryS(lines, route_osrm, profile = "driving", n_trial = 1000,
n_processes = 1)
# ## Get Routes from route_graphhopper (for All modes)
# routes <- line2routeRetryS(lines, route_fun = "route_graphhopper", n_trial = 1000, vehicle = "car",
# n_processes = 1, pat = c("a0008794-1655-4b90-8fcc-bbe0822fdd23"))
# colnames(routes@data) <- c("duration","distance","change_elev","error","id")
# routes@data$duration <- routes@data$duration * 60
}
## Simplifying polylines
# routes <- rmapshaper::ms_simplify(routes)
## Give more information on routes
routes@data$origin <- lines@data$origin[which(lines@data$id == routes@data$id)]
routes@data$destination <- lines@data$destination[which(lines@data$id == routes@data$id)]
routes@data$flow <- lines@data$flow[which(lines@data$id == routes@data$id)]
## Make the routes global variable (used for getting info on the map)
routesIndPC4 <<- routes
## Print summary
showNotification(paste(sum(lines@data$flow),"trips have been routed among",
nrow(lines),"O-D pairs."),
type = "message",
duration = 5)
## Put labels on lines
labelsRoutesInd <- sprintf(
"<strong>O-D pair ID: %s</strong><br/>
O.PPC: %s<br/>
D.PPC: %s<br/>
N.trips: %s<br/>
Distance: %s km<br/>
Duration: %s min",
routesIndPC4@data$id,
routesIndPC4@data$origin,
routesIndPC4@data$destination,
routesIndPC4@data$flow,
round(routesIndPC4@data$distance/1000,digits=1),
round(routesIndPC4@data$duration/60,digits=0)
) %>% lapply(htmltools::HTML)
# Add number of activities
ppcs$NumActs <- table(sched$DestLoc)[match(ppcs$PC4,names(table(sched$DestLoc)))]
# Add household density by 4 ppc
ppcs$ActDensity <- round(ppcs$NumActs/
(ppcs$Shape_Area / 10^6),
digits = 2)
labelsPpcs <- sprintf(
"<strong>PPC: %s</strong><br/>
# of Activities: %g <br/>
Area: %g km<sup>2</sup><br/>
Population density: % g Activities/km<sup>2</sup>",
ppcs$PC4,
ppcs$NumActs,
ppcs$Shape_Area / 10^6,
ppcs$ActDensity
) %>% lapply(htmltools::HTML)
leaflet() %>%
setView(lng=5.00 , lat =52.00, zoom=8) %>%
# Base groups
addProviderTiles(group = "OSM B&W",
provider = providers$OpenStreetMap.BlackAndWhite) %>%
addTiles(group = "OSM",options = providerTileOptions(noWrap = F)) %>%
# Overlay groups
addPolygons(data = ppcs,
group = "4-digit postcode area",
color = "#444444",
weight = 1,
smoothFactor = 0.5,
opacity = 0.5,
fill = TRUE,
fillColor = "#A9F5BC",
fillOpacity = 0.5,
label = labelsPpcs,
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px",
direction = "auto"),
highlightOptions = highlightOptions(color = "white",
weight = 3,
bringToFront = TRUE)) %>%
addPolylines(data=routesIndPC4,
group = "Routes-Individual",
weight = myflows$flow/mean(myflows$flow),
color = "red",
label = labelsRoutesInd,
layerId = ~id,
opacity = 1,
highlightOptions = highlightOptions(color = "blue",
weight = 3,
bringToFront = TRUE)) %>%
# # Add legend
# addLegend(data = r,
# group = "Routes",
# pal = pal,
# values = myflows$flow,
# opacity = 0.7,
# title = "Number of trips",
# position = "bottomright") %>%
# Layer control
addLayersControl(
baseGroups = c("OSM B&W", "OSM"),
overlayGroups = c("Routes-Individual","4-digit postcode area"),
options = layersControlOptions(collapsed = TRUE)) %>%
# hideGroup("Routes-Individual") %>%
hideGroup("4-digit postcode area")
})
observeEvent(input$maprouteind_shape_click,{
click <- input$maprouteind_shape_click
# Ignore other shapes than circle shape
# Note: No layerIDs are assigned to polygons for postcode
if (is.null(click) | is.null(click$id)){
return()
}
output$clickedrouteindId<-renderText({
text <- paste("You've selected route", click$id)
text
})
output$clickedrouteindTable <- DT::renderDataTable({
table <- (subset(routesIndPC4@data, id == click$id))
table <- DT::datatable(data = (t(table)), colnames = "",
options = list(paging = F, searching = F, pagelength = 25))
table
})
## Add pulse marker to the clicked activity
proxy <- leafletProxy("maprouteind")
if (click$id != "Selected") {
proxy %>% setView(lng = click$lng, lat = click$lat, input$maprouteind_zoom, zoom = 13)
proxy %>% addPulseMarkers(lng = click$lng,
lat = click$lat,
layerId = "Selected",
icon = makePulseIcon(heartbeat = 1))
} else {
# Remove previously clicked pulse marker
proxy %>% removeMarker(layerId="Selected")
}
})
##############################################################################
########### Leaflet Route-Individual PC6 ##################
##############################################################################
filterschedmaprouteindpc6 <- eventReactive(input$submitmaprouteindpc6,{
# Load schedule file
sched <- read.csv("data/sched-coords.txt")
sched <- timeconverter(sched)
# Filter schedule by Activity type
sched <- sched[sched$ActivityType %in% input$maprouteindpc6act,]
# Filter schedule by Transport mode
if (!input$maprouteindpc6mode == "All"){
sched <- sched[sched$Mode == input$maprouteindpc6mode,]
}
# Filter schedule by Charging Type
sched <- sched[sched$Charging %in% input$maprouteindpc6charging,]
if (input$maprouteindpc6show) {
sched <- sched[1:input$maprouteindpc6num,]
}
# # Filter schedule by Time of day
# sched <- sched[which((sched$BeginTime > input$maprouteindtime[1] & sched$BeginTime < input$maprouteindtime[2])|
# (sched$EndTime > input$maprouteindtime[1] & sched$EndTime < input$maprouteindtime[2])),]
sched
})
output$maprouteindpc6 <- renderLeaflet({
# # Create input file error message
# if (is.null(input$schedule) | is.null(input$shpFilePc4)) {
# showNotification("Please provide input files!`` (schedule/shp file)",
# type = "error",
# duration = 5)
# # return()
# }
# Filter schedule by input selection with more than 0 trip
sched <- filterschedmaprouteindpc6()
# Load shape file
ppcs <- myppcs()
withProgress(message = paste("Routing", nrow(sched),
"trips in the schedule..."), value = 0, {
setProgress(value = NULL, message = NULL, detail = NULL,
session = getDefaultReactiveDomain())
routes <- c()
for (i in 1:nrow(sched)) {
incProgress(1/nrow(sched),
detail = paste("Routing",i,"th trip out of",nrow(sched)))
# print(i)
from = c(sched$OrigLng[i],sched$OrigLat[i])
to = c(sched$DestLng[i],sched$DestLat[i])
route <- route_osrmRetryS(from, to, n_trial=500)
# routes[[i]] = list(sp::Lines(sp::Line(route@lines[[1]]@Lines[[1]]),ID = sched$SchedID[[i]]))
routes[i] = list(sp::Lines(route@lines[[1]]@Lines[[1]],ID = sched$SchedID[[i]]))
}
})
routes <- SpatialLines(routes)
routes <- SpatialLinesDataFrame(sl = routes, data = sched[1:nrow(sched),], match.ID = FALSE)
## Make the routes global variable (used to get route-info on the map)
routesIndPC6 <<- routes
labelsRoutesPC6 <- sprintf(
"SchedID: %s <br/>
O.PC6: %s <br/>
D.PC6: %s <br/>",
routesIndPC6$SchedID,
routesIndPC6$OrigPC6,
routesIndPC6$DestPC6
) %>% lapply(htmltools::HTML)
leaflet() %>%
setView(lng=5.00 , lat=52.00, zoom=8) %>%
# Base groups
addProviderTiles(group = "OSM B&W",
provider = providers$OpenStreetMap.BlackAndWhite) %>%
addTiles(group = "OSM",options = providerTileOptions(noWrap = F)) %>%
# # Overlay groups
# addPolygons(data = ppcs,
# group = "4-digit postcode area",
# color = "#444444",
# weight = 1,
# smoothFactor = 0.5,
# opacity = 0.5,
# fill = TRUE,
# fillColor = "#A9F5BC",
# fillOpacity = 0.5,
# # label = labelsPpcs,
# labelOptions = labelOptions(
# style = list("font-weight" = "normal", padding = "3px 8px"),
# textsize = "15px",
# direction = "auto"),
# highlightOptions = highlightOptions(color = "white",
# weight = 3,
# bringToFront = TRUE)) %>%
addPolylines(data=routesIndPC6,
group = "Routes-Individual",
color = "red",
weight = 1,
label = labelsRoutesPC6,
layerId = ~SchedID,
opacity = 1,
highlightOptions = highlightOptions(color = "blue",
weight = 3,
bringToFront = TRUE)) %>%
# Layer control
addLayersControl(
baseGroups = c("OSM B&W", "OSM"),
overlayGroups = c("Routes-Individual"),
options = layersControlOptions(collapsed = TRUE))
# hideGroup("Routes-Individual") %>%
# hideGroup("4-digit postcode area")
})
observeEvent(input$maprouteindpc6_shape_click,{
click <- input$maprouteindpc6_shape_click
# Ignore other shapes than circle shape
# Note: No layerIDs are assigned to polygons for postcode
if (is.null(click) | is.null(click$id)){
return()
}
output$clickedrouteindIdpc6<-renderText({
text <- paste("You've selected route", click$id)
text
})
output$clickedrouteindTablepc6 <- DT::renderDataTable({
table <- (subset(routesIndPC6@data, SchedID == click$id))
table <- DT::datatable(data = (t(table)), colnames = "",
options = list(paging = F, searching = F, pagelength = 25))
table
})
## Add pulse marker to the clicked activity
proxy <- leafletProxy("maprouteindpc6")
if (click$id != "Selected") {
proxy %>% setView(lng = click$lng, lat = click$lat, input$maprouteindpc6_zoom, zoom = 12)
proxy %>% addPulseMarkers(lng = click$lng,
lat = click$lat,
layerId = "Selected",
icon = makePulseIcon(heartbeat = 1))
} else {
# Remove previously clicked pulse marker
proxy %>% removeMarker(layerId="Selected")
}
})
##############################################################################
########### Leaflet Route-Aggregated PC4 ##################
##############################################################################
filterschedmaprouteagg <- eventReactive(input$submitmaprouteagg,{
# Load schedule file
sched <- mysched()
# Sample only outgoing trips
schedOnlyOut <- sched[which(sched$OrigLoc!=sched$DestLoc),]
# Filter schedule by Activity type
schedOnlyOut <- schedOnlyOut[schedOnlyOut$ActivityType %in% input$maprouteaggact,]
# Filter schedule by Transport mode
if (!input$maprouteaggmode == "All"){
schedOnlyOut <- schedOnlyOut[schedOnlyOut$Mode == input$maprouteaggmode,]
}
# Filter schedule by Charging type
schedOnlyOut <- schedOnlyOut[schedOnlyOut$Charging %in% input$maprouteaggcharging,]
schedOnlyOut
})
filterodpairmaprouteagg <- eventReactive(input$submitmaprouteagg,{
# Filter schedule by input selection
schedOnlyOut <- filterschedmaprouteagg()
# Convert OD matrix to pairwise column
myflows <- as.data.frame(table(schedOnlyOut$OrigLoc,schedOnlyOut$DestLoc))
myflows <- myflows[with(myflows, order(-Freq)), ]
# # Print message for empty data
# if (nrow(myflows) == 0) {
# showNotification("No O-D pair was found. Please choose items as per panels above.",
# type = "message",
# duration = 5)
# return()
# }
colnames(myflows) <- c("origin","destination","flow")
# Remove OD pairs without flow
myflows <- myflows[which(myflows$flow > 0),]
# Sample OD pairs with largest n.trips
if (input$maprouteaggshow) {
# Print message for wrong input number of O-D pairs
if (nrow(myflows) < input$maprouteaggnum) {
showNotification("The number of O-D pairs you provide exceeds the total number of O-D pairs in data!",
type = "error",
duration = 5)
return()
}
myflows <- myflows[1:input$maprouteaggnum,]
}
myflows
})
output$maprouteagg <- renderLeaflet({
# # Create input file error message
# if (is.null(input$schedule) | is.null(input$shpFilePc4)) {
# showNotification("Please provide input files! (schedule/shp file)",
# type = "error",
# duration = 5)
# # return()
# }
# Load schedule file
sched <- mysched()
# Filter schedule by input selection with more than 0 trip
schedOnlyOut <- filterschedmaprouteagg()
# Filter O-D pair by input selection with more than 0 trip
myflows <- filterodpairmaprouteagg()
# Load shape file
ppcs <- myppcs()
# Add progress status to UI
withProgress(message = paste("Converting", nrow(myflows),
"O-D pairs to polyline..."), value = 0, {
setProgress(value = NULL, message = NULL, detail = NULL,
session = getDefaultReactiveDomain())
## Get OD polylines
lines <-stplanr::od2line(flow = myflows, zones = ppcs)
})
## Add index to lines to match with routes
lines@data$id <- seq.int(nrow(lines@data))
## Check internet connection
if (!curl::has_internet()){
showNotification("Check your internet connection!",
type = "error",
duration = 5)
return()
}
## Get Routes by transport mode
if (input$maprouteaggmode == "Car" | input$maprouteaggmode == "Car as Passenger") {
routes <- line2routeRetryS(lines, route_osrm, profile = "driving", n_trial = 1000,
n_processes = 1)
## Get Routes from route_graphhopper (for Car)
# routes <- line2routeRetryS(lines, route_fun = "route_graphhopper", n_trial = 1000, vehicle = "car",
# n_processes = 1, pat = c("a0008794-1655-4b90-8fcc-bbe0822fdd23"))
# colnames(routes@data) <- c("duration","distance","change_elev","error","id")
# routes@data$duration <- routes@data$duration * 60
} else if (input$maprouteaggmode == "Walking or Biking") {
# routes <- line2routeRetryS(lines, route_osrm, profile = "bike", n_trial = 1000,
# n_processes = 1)
## Get Routes from route_graphhopper (for Slow)
routes <- line2routeRetryS(lines, route_fun = "route_graphhopper", n_trial = 1000, vehicle = "foot",
n_processes = 1, pat = c("a0008794-1655-4b90-8fcc-bbe0822fdd23"))
colnames(routes@data) <- c("duration","distance","change_elev","error","id")
routes@data$duration <- routes@data$duration * 60
} else if (input$maprouteaggmode == "Public Transport") {
## Get Routes from route_osrm (for Public Transport)
routes <- line2routeRetryS(lines, route_osrm, profile = "driving", n_trial = 1000,
n_processes = 1)
} else {
## Get Routes from route_osrm (for All modes)
routes <- line2routeRetryS(lines, route_osrm, profile = "driving", n_trial = 1000,
n_processes = 1)
# ## Get Routes from route_graphhopper (for All modes)
# routes <- line2routeRetryS(lines, route_fun = "route_graphhopper", n_trial = 1000, vehicle = "car",
# n_processes = 1, pat = c("a0008794-1655-4b90-8fcc-bbe0822fdd23"))
# colnames(routes@data) <- c("duration","distance","change_elev","error","id")
# routes@data$duration <- routes@data$duration * 60
}
## Simplifying polylines
# routes <- rmapshaper::ms_simplify(routes)
## Give more information on routes
routes@data$origin <- lines@data$origin[which(lines@data$id == routes@data$id)]
routes@data$destination <- lines@data$destination[which(lines@data$id == routes@data$id)]
routes@data$flow <- lines@data$flow[which(lines@data$id == routes@data$id)]
# Convert series of overlapping lines into a route network
routes <- overline(routes, attrib = "flow", fun = sum)
# Remove line with 0 flow
routes <- routes[which(routes$flow > 0),]
# Give id to aggregated routes
routes$id <- seq.int(nrow(routes@data))
## Make the routes global variable (used for getting info on the map)
routes <<- routes
## Print summary
showNotification(paste(sum(lines@data$flow),"trips have been routed among",
nrow(lines),"O-D pairs."),
type = "message",
duration = 5)
## Put labels on lines
labelsRoutesAgg <- sprintf(
"<strong>Segment ID: %s</strong><br/>
N.trips: %s",
routes$id,
routes$flow
) %>% lapply(htmltools::HTML)
# Add number of activities
ppcs$NumActs <- table(sched$DestLoc)[match(ppcs$PC4,names(table(sched$DestLoc)))]
# Add household density by 4 ppc
ppcs$ActDensity <- round(ppcs$NumActs/
(ppcs$Shape_Area / 10^6),
digits = 2)
labelsPpcs <- sprintf(
"<strong>PPC: %s</strong><br/>
# of Activities: %g <br/>
Area: %g km<sup>2</sup><br/>
Population density: % g Activities/km<sup>2</sup>",
ppcs$PC4,
ppcs$NumActs,
ppcs$Shape_Area / 10^6,
ppcs$ActDensity
) %>% lapply(htmltools::HTML)
leaflet() %>%
setView(lng=5.00 , lat =52.00, zoom=8) %>%
# Base groups
addProviderTiles(group = "OSM B&W",
provider = providers$OpenStreetMap.BlackAndWhite) %>%
addTiles(group = "OSM",options = providerTileOptions(noWrap = F)) %>%
# Overlay groups
addPolygons(data = ppcs,
group = "4-digit postcode area",
color = "#444444",
weight = 1,
smoothFactor = 0.5,
opacity = 0.5,
fill = TRUE,
fillColor = "#A9F5BC",
fillOpacity = 0.5,
label = labelsPpcs,
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px",
direction = "auto"),
highlightOptions = highlightOptions(color = "white",
weight = 3,
bringToFront = TRUE)) %>%
addPolylines(data=routes,
group = "Routes-Aggregated",
weight = routes$flow/mean(routes$flow),
color = "red",
label = labelsRoutesAgg,
layerId = ~id,
opacity = 1,
highlightOptions = highlightOptions(color = "blue",
weight = 3,
bringToFront = TRUE)) %>%
# # Add legend
# addLegend(data = r,
# group = "Routes",
# pal = pal,
# values = myflows$flow,
# opacity = 0.7,
# title = "Number of trips",
# position = "bottomright") %>%
# Layer control
addLayersControl(
baseGroups = c("OSM B&W", "OSM"),
overlayGroups = c("Routes-Aggregated", "4-digit postcode area"),
options = layersControlOptions(collapsed = TRUE)) %>%
# hideGroup("Routes-Aggregated") %>%
hideGroup("4-digit postcode area")
})
observeEvent(input$maprouteagg_shape_click,{
click <- input$maprouteagg_shape_click
# Ignore other shapes than circle shape
# Note: No layerIDs are assigned to polygons for postcode
if (is.null(click) | is.null(click$id)){
return()
}
output$clickedrouteaggId<-renderText({
text <- paste("You've selected route segment", click$id)
text
})
output$clickedrouteaggTable <- DT::renderDataTable({
table <- (subset(routes@data, id == click$id))
table <- DT::datatable(data = (t(table)), colnames = "",
options = list(paging = F, searching = F, pagelength = 25))
table
})
## Add pulse marker to the clicked activity
proxy <- leafletProxy("maprouteagg")
if (click$id != "Selected") {
proxy %>% setView(lng = click$lng, lat = click$lat, input$maprouteagg_zoom, zoom = 13)
proxy %>% addPulseMarkers(lng = click$lng,
lat = click$lat,
layerId = "Selected",
icon = makePulseIcon(heartbeat = 1))
} else {
# Remove previously clicked pulse marker
proxy %>% removeMarker(layerId="Selected")
}
})
##############################################################################
########### Leaflet Animation ##################
##############################################################################
points <- reactive({
sched <- mysched()
ppcs <- myppcs()
# Sample ppcs only where activities are occured.
ppcsSample <- subset(ppcs, ppcs@data$PC4 %in% sched$DestLoc)
# Add number of activities within 4 ppcs
ppcsSample$NumActs <- table(sched$DestLoc)[match(ppcsSample$PC4,names(table(sched$DestLoc)))]
# Add household density by 4 ppc
ppcsSample$ActDensity <- round(ppcsSample$NumActs/
(ppcsSample$Shape_Area / 10^6),
digits = 2)
## Get coordinates
coords <- c()
schedid <- c()
for (i in ppcsSample$PC4) {
# Get total number of activities by postcode
n <- sum(ppcsSample$NumActs[ppcsSample$PC4 == i], na.rm = TRUE)
schedid <- append(schedid,sched$SchedID[sched$DestLoc == i])
polygon <- ppcsSample[ppcsSample@data$PC4 == i,]@polygons
chosenPolygon <- 1
for (j in 1:length(polygon)) {
if (j > 1) {
if (polygon[[j]]@area > polygon[[j-1]]@area){
chosenPolygon <- j
}
}
}
if (class(polygon) == "list" & length(polygon) > 1) { ## For multi-polygons
polygon <- polygon[[chosenPolygon]]
if (length(polygon@Polygons) == 1) {
} else {
chosen <- (polygon@plotOrder)[1]
polygon <- polygon@Polygons[[chosen]]
}
} else {
polygon <- polygon[[chosenPolygon]]
if (length(polygon@Polygons) == 1) {
} else {
chosen <- (polygon@plotOrder)[1]
polygon <- polygon@Polygons[[chosen]]
}
}
coords <- rbind(coords,spsample(polygon, n = n, type = "random")@coords)
}
sched$Lng <- coords[,1]
sched$Lat <- coords[,2]
sched <- sched[which((sched$BeginTime > input$mapanimtime[1] & sched$BeginTime < input$mapanimtime[2])|
(sched$EndTime > input$mapanimtime[1] & sched$EndTime < input$mapanimtime[2])),]
return(sched)
})
observeEvent(input$animtime,{
leafletProxy("mapanim") %>%
clearShapes() %>%
addCircles(data = points(),
lng= ~Lng,
lat= ~Lat,
layerId = ~SchedID,
radius = 8,
weight = 5
)
})
output$mapanim <- renderLeaflet({
leaflet() %>%
setView(lng=5.4697 , lat =51.4416, zoom=7) %>%
addProviderTiles(group = "CartoDB Dark",
provider = providers$CartoDB.DarkMatterNoLabels)
})
##############################################################################
############################ Activity type ############################
##############################################################################
output$mapact<- renderLeaflet({
# Load schedule file
sched <- myschedcoords()
sched <- timeconverter(sched)
sched$ActDur <- sched$EndTime-sched$BeginTime
# Load shape file
ppcs <- myppcs()
ppcsCoords <- gCentroid(ppcs,byid=T)
ppcsCoords$PC4 <- ppcs$PC4
ppcsCoords <- as.data.frame(ppcsCoords)
# Data conversion
newdata2 <- sched %>%
group_by(DestLoc, ActivityType) %>%
summarise(ActDur = sum(ActDur))
newdata2 <- as.data.frame(newdata2)
newdata2 <- reshape(newdata2, idvar = "DestLoc", timevar = "ActivityType", direction = "wide")
newdata2[is.na(newdata2)] <- 0
newdata2 <- merge(newdata2,ppcsCoords,by.x="DestLoc",by.y="PC4",all.x=F)
activityData <- newdata2 %>% select(input$checkActivity)
leaflet() %>%
setView(lng=5.4697 , lat =51.4416, zoom=13) %>%
# Base groups
addTiles(group = "OSM",options = providerTileOptions(noWrap = F)) %>%
addMinicharts(
newdata2$x, newdata2$y,
type = "bar",
chartdata = activityData,
showLabels = FALSE,
width = 80, height = 80
)
})
##############################################################################
####################### Electricity consumption #######################
##############################################################################
output$mapcharging<- renderLeaflet({
# Load schedule file
sched <- myschedcoords()
sched <- timeconverter(sched)
sched$ActDur <- sched$EndTime-sched$BeginTime
# Load shape file
ppcs <- myppcs()
ppcsCoords <- gCentroid(ppcs,byid=T)
ppcsCoords$PC4 <- ppcs$PC4
ppcsCoords <- as.data.frame(ppcsCoords)
# Data conversion
newdata1 <- sched %>%
group_by(DestLoc, Charging) %>%
summarise(ActDur = sum(ActDur))
newdata1 <- as.data.frame(newdata1)
newdata1 <- reshape(newdata1, idvar = "DestLoc", timevar = "Charging", direction = "wide")
newdata1[is.na(newdata1)] <- 0
newdata1 <- merge(newdata1,ppcsCoords,by.x="DestLoc",by.y="PC4",all.x=F)
chargingData <- newdata1 %>% select(ActDur.PrivateCharging,ActDur.SemiPublicCharging,ActDur.PublicCharging,ActDur.FastCharging)
# Conversion from time to Kilowatt-hour
chargingData <- chargingData/60
chargingData$ActDur.PrivateCharging <- chargingData$ActDur.PrivateCharging * input$privateCharging
chargingData$ActDur.PublicCharging <- chargingData$ActDur.PublicCharging * input$publicCharging
chargingData$ActDur.SemiPublicCharging <- chargingData$ActDur.SemiPublicCharging * input$semiPublicCharging
chargingData$ActDur.FastCharging <- chargingData$ActDur.FastCharging * input$fastCharging
leaflet() %>%
setView(lng=5.4697 , lat =51.4416, zoom=13) %>%
# Base groups
addTiles(group = "OSM",options = providerTileOptions(noWrap = F)) %>%
addMinicharts(
newdata1$x, newdata1$y,
type = "bar",
chartdata = chargingData,
showLabels = FALSE,
width = 50, height = 50
)
})
##############################################################################
########################### Custom Functions ##########################
##############################################################################
# Based on Robinlovelace's stplanr/R/od-funs.R
# https://github.com/ropensci/stplanr/blob/master/R/od-funs.R
line2routeRetryS <- function(l, route_fun = "route_osrm", n_trial = 200,
n_print = 10,list_output = FALSE, l_id = NA,
n_processes = 1,...){
return_sf <- is(l, "sf")
if(return_sf) {
l <- as(l, "Spatial")
}
FUN <- match.fun(route_fun)
ldf <- stplanr::line2df(l)
n_ldf <- nrow(ldf)
if(n_processes > 1){
n_processes <- min(c(n_processes, n_ldf))
cl <- parallel::makeCluster(n_processes)
doParallel::registerDoParallel(cl)
}
if(n_processes > 1){
if(!require(foreach)) {
stop("You must install foreach before running this code")
}
rc <- foreach::foreach(i = 1:n_ldf, .errorhandling = "pass") %dopar% {
FUN(from = c(ldf$fx[i], ldf$fy[i]),
to = c(ldf$tx[i], ldf$ty[i]),...)
}
parallel::stopCluster(cl)
} else {
## add progress bar
withProgress(message = "Routing calculation is in progress...", value = 0, {
rc <- as.list(rep(NA, length(l)))
for(i in 1:n_ldf){
# Increment the progress bar, and update the detail text.
incProgress(1/n_ldf,
detail = paste("Routing",i,"th O-D pair out of",
n_ldf,"pairs"))
# Retry for intermittent errors while calculating route in OSRM
attempt <- 1
while(typeof(rc[[i]])!="S4" && attempt <= n_trial) {
attempt = attempt + 1
rc[[i]] <- try({
FUN(from = c(ldf$fx[i], ldf$fy[i]),
to = c(ldf$tx[i], ldf$ty[i]),...)
},silent = T)
}
# perc_temp <- i %% round(n_ldf / n_print)
# # print % of distances calculated
# if(!is.na(perc_temp) & perc_temp == 0){
# message(paste0(round(100 * i/n_ldf), " % out of ",
# n_ldf, " distances calculated"))
# }
}
})
}
if(list_output) {
r <- rc
} else {
# Set the names based on the first non failing line (then exit loop)
for(i in 1:n_ldf){
if(grepl("Spatial.*DataFrame", class(rc[[i]]))[1]) {
rdata <- data.frame(matrix(nrow = nrow(l), ncol = ncol(rc[[i]]) + 1))
names(rdata) <- c(names(rc[[i]]), "error")
r <- l
r@data <- rdata
break
}
}
# Copy rc into r including the data or copy the error into r
for(i in 1:n_ldf){
if(grepl("Spatial.*DataFrame", class(rc[[i]]))[1]) {
r@lines[[i]] <- sp::Lines(rc[[i]]@lines[[1]]@Lines, row.names(l[i,]))
r@data[i,] <- c(rc[[i]]@data, error = NA)
} else {
r@data[i, "error"] <- rc[[i]][1]
}
}
# Set the id in r
l_ids <- c(l_id, "id")
l_id <- l_ids[!is.na(l_ids)][1]
r$id <- if(l_id %in% names(l)){
l@data[[l_id]]
} else {
row.names(l)
}
}
if(return_sf) {
r <- sf::st_as_sf(r)
}
r
}
##############################################################################
route_osrmRetryS <- function(from, to , route_fun = "route_osrm", n_trial = 200,
...){
FUN <- match.fun(route_fun)
rc <- c()
# Retry for intermittent errors while calculating route in OSRM
attempt <- 1
while(class(rc) !="SpatialLinesDataFrame" && attempt <= n_trial) {
attempt = attempt + 1
rc <- try({
FUN(from = from,
to = to,...)
},silent = T)
}
return(rc)
}
}
|
5dd3f093b1309fc766b26bd85024ab1db79d8e31
|
3e33606d2fb598cfbfa4ab777b587b2aa20c3897
|
/code/AFNI/matrix_corrs.R
|
007d584676ec4131f70de25dbfb25792c1c5be87
|
[] |
no_license
|
TIGRLab/compare_task_tools
|
2b2be0bcf7f71a86d723dac441f81eeda11b630a
|
b301694c17caed080c8d49c27ac82d1e3db0f655
|
refs/heads/master
| 2022-11-27T13:31:01.407039
| 2020-08-11T16:06:18
| 2020-08-11T16:06:18
| 263,997,287
| 0
| 1
| null | 2020-05-29T20:09:20
| 2020-05-14T18:43:28
| null |
UTF-8
|
R
| false
| false
| 5,538
|
r
|
matrix_corrs.R
|
library(lineup)
library(ggplot2)
library(reshape2)
# set working dir
setwd("/projects/loliver/SPINS_GLM_Test")
#glob2rx("sub*spm_mat.csv")
# find spm and afni design matrices for each participant
spm_mat_list <- list.files(path= ".", recursive=T, pattern="^sub.*spm_mat\\.csv$")
afni_mat_list <- list.files(path= "./", recursive=T, pattern="^sub.*mat.xmat\\.1D$")
# find nistats design matrices for each participant
nistats_mat_list <- list.files(path= "/projects/ttan/fMRI_tools/analysis/first_lvl",recursive=T,full.names=T,
pattern="^sub.*emp_combined_dm\\.tsv$")
nistats_mat_list <- nistats_mat_list[1:10] # cut ones in tmp folder
# read in mats
spm_mat <- lapply(spm_mat_list, read.csv, header=F, col.names=c("ea_block","ea_pmod","circles","ea_press","circ_press",
rep("confounds",19)))
afni_mat <- lapply(afni_mat_list, read.table, header=F, skip=32, sep=" ", col.names=c("skip",rep("polort",18),"ea_block",
"ea_pmod","circles","ea_press","circ_press","FD","x","y","z","roll","pitch","yaw","xder","yder","zder",
"rollder","pitchder","yawder","mean_wm","mean_csf"))
nistats_mat <- lapply(nistats_mat_list, read.table, header=F, skip=1, col.names=c("skip","ea_block","ea_press","circles",
"circ_press","ea_pmod","mean_csf","mean_wm","FD","trans_x","trans_x_der","trans_y",
"trans_y_der","trans_z","trans_z_der","rot_x","rot_x_der","rot_y","rot_y_der","rot_z","rot_z_der",
"drift_1","drift_2","drift_3","drift_4","drift_5","constant"))
# add subject ids
names(spm_mat) <- substring(spm_mat_list,1,11)
names(afni_mat) <- substring(afni_mat_list,1,11)
names(nistats_mat) <- substring(nistats_mat_list,58,68)
# collapse values across runs (SPM and nistats - not necessary with changes) and remove unwanted columns
for (i in names(spm_mat)) {
spm_mat[[i]]$TR <- 1:819
# spm_mat[[i]][274:546,1:19] <- spm_mat[[i]][274:546,20:38]
# spm_mat[[i]][547:819,1:19] <- spm_mat[[i]][547:819,39:57]
}
for (i in names(nistats_mat)) {
nistats_mat[[i]] <- nistats_mat[[i]][,-1]
nistats_mat[[i]]$TR <- 1:819
# nistats_mat[[i]][274:546,1:26] <- nistats_mat[[i]][274:546,27:52]
# nistats_mat[[i]][547:819,1:26] <- nistats_mat[[i]][547:819,53:78]
}
for (i in names(afni_mat)) {
afni_mat[[i]] <- afni_mat[[i]][,-1]
afni_mat[[i]]$TR <- 1:819
}
# correlate columns across programs for each participant
all_corrs <- matrix(ncol=15,nrow=9)
rownames(all_corrs)=names(spm_mat)
colnames(all_corrs)=c("SA_ea_block","SA_ea_pmod","SA_circles","SA_ea_press","SA_circ_press",
"SN_ea_block","SN_ea_pmod","SN_circles","SN_ea_press","SN_circ_press",
"AN_ea_block","AN_ea_pmod","AN_circles","AN_ea_press","AN_circ_press")
for (i in names(spm_mat)) {
all_corrs[i,1:5] <- corbetw2mat(spm_mat[[i]][,1:5],afni_mat[[i]][,19:23], what="paired")
all_corrs[i,6:10] <- corbetw2mat(spm_mat[[i]][,1:5],nistats_mat[[i]][,c(1,5,3,2,4)], what="paired")
all_corrs[i,11:15] <- corbetw2mat(afni_mat[[i]][,19:23],nistats_mat[[i]][,c(1,5,3,2,4)], what="paired")
}
all_corrs <- all_corrs[,c(1,6,11,2,7,12,3,8,13,4,9,14,5,10,15)]
#write.csv(all_corrs,file="/projects/loliver/SPINS_GLM_Test/design_mat_corrs_fixed.csv",row.names = T)
# melt df to visualize across programs
spm_mat_long <- lapply(spm_mat, melt, id.vars = "TR")
afni_mat_long <- lapply(afni_mat, melt, id.vars = "TR")
nistats_mat_long <- lapply(nistats_mat, melt, id.vars = "TR")
all_mats <- vector(mode = "list", length = 9)
names(all_mats) <- names(spm_mat)
for (i in names(spm_mat)) {
spm_mat_long[[i]]$program <- c(rep("SPM",19656))
afni_mat_long[[i]]$program <- c(rep("afni",31122))
nistats_mat_long[[i]]$program <- c(rep("ni",21294))
all_mats[[i]] <- rbind(spm_mat_long[[i]],afni_mat_long[[i]],nistats_mat_long[[i]])
}
# visualize
for (i in names(spm_mat)) {
ggplot(data.frame(all_mats[[i]][all_mats[[i]]$variable=="ea_pmod",]), aes(x = TR, y = value, col=program)) +
geom_line()
}
ggplot(data.frame(all_mats[["sub-CMH0057"]][all_mats[["sub-CMH0057"]]$variable=="ea_pmod",]), aes(x = TR, y = value, col=program)) +
geom_line() #+ facet_wrap("variable")
ggplot(data.frame(all_mats[["sub-CMH0057"]][all_mats[["sub-CMH0057"]]$variable=="ea_block",]), aes(x = TR, y = value, col=program)) +
geom_line() #+ facet_wrap("variable")
ggplot(data.frame(all_mats[["sub-CMH0065"]][all_mats[["sub-CMH0065"]]$variable=="ea_pmod",]), aes(x = TR, y = value, col=program)) +
geom_line() #+ facet_wrap("variable")
ggplot(data.frame(all_mats[["sub-CMH0093"]][all_mats[["sub-CMH0093"]]$variable=="ea_pmod",]), aes(x = TR, y = value, col=program)) +
geom_line() #+ facet_wrap("variable")
# visualize
ggplot(data.frame(spm_mat[["sub-CMH0057"]]), aes(x = 1:819, y = ea_press)) +
geom_line()
ggplot(data.frame(afni_mat[["sub-CMH0057"]]), aes(x = 1:819, y = ea_press)) +
geom_line()
ggplot(data.frame(nistats_mat[["sub-CMH0057"]]), aes(x = 1:819, y = ea_press)) +
geom_line()
ggplot(data.frame(spm_mat[["sub-CMH0159"]]), aes(x = 1:819, y = ea_pmod)) +
geom_line()
ggplot(data.frame(afni_mat[["sub-CMH0159"]]), aes(x = 1:819, y = ea_pmod)) +
geom_line()
ggplot(data.frame(nistats_mat[["sub-CMH0159"]]), aes(x = 1:819, y = ea_pmod)) +
geom_line()
ggplot(data.frame(afni_mat[["sub-CMH0093"]]), aes(x = 1:819, y = ea_pmod)) +
geom_line()
ggplot(data.frame(nistats_mat[["sub-CMH0093"]]), aes(x = 1:819, y = ea_pmod)) +
geom_line()
|
796bdb4d56234fc0d77daa571cb81371515d56c6
|
cd3bad7fb562ad5f0333728c261d23ed0661e8bf
|
/modelo_transporte/04.2_add-pop-to-hex.R
|
8d1be109598a65ed2b62ce1217cc548dcefe7caf
|
[] |
no_license
|
Joaobazzo/Master-thesis-scripts
|
6bfeff05d6567ad5a11a6f8047229943d7b61aea
|
effe21e025993844209bf4af3cbdb7eeadbe34b1
|
refs/heads/master
| 2021-01-02T12:57:01.458385
| 2020-04-01T21:55:54
| 2020-04-01T21:55:54
| 196,636,722
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,729
|
r
|
04.2_add-pop-to-hex.R
|
#
# grade estatistica e hexagonos
#
#
# ---
rm(list=ls())
library(sf)
library(sp)
library(dplyr)
library(data.table)
library(geobr)
library(mapview)
library(readr)
setwd("L:/# DIRUR #/ASMEQ/bosistas/joaobazzo/master-thesis-repo1")
#
rmc <- c("full09","full08")
#
# grade
grade <- sf::read_sf("dados/IBGE/grade_id25/grade_rmc.shp") %>%
sf::st_transform(31982)
grade$grad_total <- sf::st_area(grade$geometry) %>% as.numeric()
#
for(i in rmc){
# i = rmc[1]
print(i)
#
# hexagonos
# grid09 <- readr::read_rds("dados/IBGE/cwb_full09.rds")
hex <- readr::read_rds(paste0("dados/IBGE/cwb_",i,".rds")) %>%
sf::st_transform(4326) %>% st_transform(31982)
hex$hex_total <- sf::st_area(hex$geometry) %>% as.numeric()
#
# interseccao
inter <- sf::st_intersection(grade,hex)
inter <- inter[which(
sf::st_geometry_type(inter$geometry) %>% as.character() %in%
"POLYGON"),]
listhex <- unique(inter$id_hex)
teste <- lapply(listhex,function(j){
temp <- inter[inter$id_hex %in% j,]
#message(j)
temp$grad_rel <- sf::st_area(temp$geometry) %>% as.numeric()
temp$grad_prop <- temp$grad_rel / temp$grad_total
temp$pop_rel <- temp$grad_prop * temp$POP
temp$pop_hex <- sum(temp$pop_rel)
return(temp)
}) %>% data.table::rbindlist() %>% sf::st_as_sf()
#
# salva DT resumo
pop_dt <- data.table::as.data.table(teste)[,.SD[1],by = id_hex][,.(id_hex,pop_hex)]
# hexagono populacao
hex1 <- as.data.table(hex)
hex_pop <- as.data.table(pop_dt)[hex1,on = "id_hex"] %>% sf::st_as_sf()
# escreve
readr::write_rds(hex_pop,paste0("dados/IBGE/rmc_pop_",i,".rds"))
message(paste0("dados/IBGE/rmc_pop_",i,".shp"))
}
|
da1109971d12931614a32788fa413d8d24515d70
|
068edb35f376dc3eb572b0763d62b800dfe172a9
|
/three_ANA_sites/add_censored_status.R
|
a6a9aa4c2046d5885c27877304cc5875617f59e1
|
[] |
no_license
|
wdwatkins/loadflexBatch
|
8888cb0ba8d4852640d274a8ce651898175a57a7
|
9e9093b2d0ac2f0510685328fd6db771f9831b6e
|
refs/heads/master
| 2021-01-11T23:14:01.846607
| 2017-02-14T20:45:07
| 2017-02-14T20:45:07
| 78,558,430
| 0
| 2
| null | 2017-01-23T20:52:45
| 2017-01-10T17:47:59
|
R
|
UTF-8
|
R
| false
| false
| 419
|
r
|
add_censored_status.R
|
#add censored status column to wq inputs
#need wd set where this script is
lf <- list.files(c('PT', "NO3"), full.names = TRUE)
library(dplyr)
for(file in lf) {
df <- read.csv(file, stringsAsFactors = FALSE)
df <- mutate(df, status = rep(1, nrow(df)))
#randomly make some 0 or 2
df$status[sample(1:nrow(df), 5)] <- sample(c(0,2), 5, replace = TRUE)
write.csv(file = file, x = df, row.names = FALSE)
}
|
2c7ecc56a7660987512ad2733fcc4bf8992b7a20
|
19972d587adb296a49a32da61cc42ad2dfd81bb4
|
/R codes/Scatterplots_cBAAD+SS Media.R
|
3e1b16a098ef42f94eb889bbcb37ebe4892b4a05
|
[
"MIT"
] |
permissive
|
bmomeni/nasal-community-modeling
|
07997152bc80c70f057b0245bd115a2fecce4455
|
b32732b4d6c2556ffc1cbafa9521c7090b91dc53
|
refs/heads/main
| 2023-04-06T08:39:12.408534
| 2023-03-27T21:25:34
| 2023-03-27T21:25:34
| 341,382,468
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,308
|
r
|
Scatterplots_cBAAD+SS Media.R
|
#Last run 04/10/2020
#Clear Workspace
rm(list=ls())
#################################################
#Create scatterplots for GR-CC relationships
#################################################
####################################################################################################
#C. tuberculostearicum in cBAAD+SS dilutions
GR_Ct <- c(0.418875,
0.481875,
0.558425,
0.66295,
0.663075,
0.63545)
Max_Ct <- c(0.630666667,
0.60775,
0.54575,
0.371,
0.27375,
0.23325)
DF_Ct <- data.frame(GR_Ct, Max_Ct)
DF_Ct
#Generate a linear regression model of the two variables with the lm function
Ct.lm <- lm(Max_Ct ~ GR_Ct + 1, data=DF_Ct)
Ct.lm2 <- lm(Max_Ct ~ 0 + GR_Ct, data=DF_Ct) #Tells the lm() to fit the line through the origin
summary(Ct.lm)
summary(Ct.lm2)
#Create scatter plot of points using plot function
plot(GR_Ct, Max_Ct,
xlab="Growth Rate (1/hr.)",
ylab="Carrying Capacity (Max OD600)",
xlim= c(0, 0.7),
ylim= c(0, 0.7),
main="Growth Rate vs. Carrying Capacity for C. tuberculostearicum \n Grown in Complex Carbon Concentrations",
pch=16)
abline(lm(Ct.lm2))
legend("topleft", bty="n", legend= paste("R-squared= 0.730 \n p-value= 0.009"))
#Create scatter plot of points using plot function - without graph labels
plot(GR_Ct, Max_Ct,
xlab= "Growth Rate (1/hr)",
ylab= "Max OD600",
xlim= c(0, 0.7),
ylim= c(0, 0.7),
cex= 1.5,
pch=16)
#abline(lm(Ct.lm2))
#op <- par(cex= 1.5)
#legend("topleft", bty="n", legend= paste("R-squared= 0.730 \n p-value= 0.009"))
################################################################################################3
#S. aureus in cBAAD+SS dilutions
GR_Sa <- c(1.102125,
1.088575,
1.116875,
1.081475,
1.0499,
1.1184)
Max_Sa <- c(1.19375,
0.85425,
0.44175,
0.20975,
0.1375,
0.15)
DF_Sa <- data.frame(GR_Sa, Max_Sa)
DF_Sa
#Generate a linear regression model of the two variables with the lm function
Sa.lm <- lm(Max_Sa ~ GR_Sa + 1, data=DF_Sa)
Sa.lm2 <- lm(Max_Sa ~ 0 + GR_Sa, data=DF_Sa) #Tells the lm() to fit the line through the origin
summary(Sa.lm)
summary(Sa.lm2)
#Create scatter plot of points using plot function
plot(GR_Sa, Max_Sa,
xlab="Growth Rate (1/hr.)",
ylab="Carrying Capacity (Max OD600)",
xlim= c(0, 1.25),
ylim= c(0, 1.25),
main="Growth Rate vs. Carrying Capacity for S. aureus \n Grown in Complex Carbon Concentrations",
pch=16)
abline(lm(Sa.lm2))
legend("topleft", bty="n", legend= paste("R-squared= 0.538 \n p-value= 0.0375"))
#Create scatter plot of points using plot function - without graph labels
plot(GR_Sa, Max_Sa,
xlab= "Growth Rate (1/hr)",
ylab= "Max OD600",
xlim= c(0, 1.25),
ylim= c(0, 1.25),
cex= 1.5,
pch=16)
#abline(lm(Sa.lm2))
#op <- par(cex= 1.5)
#legend("topleft", bty="n", legend= paste("R-squared= 0.538 \n p-value= 0.0375"))
####################################################################################################
|
13ee82275432590b01401dbf335686396cf04755
|
90f7665f8bba521f609dad44ab45566989693928
|
/2019-10-23-mouse_c1_downstream/step5-linage-diff-and-annotation/step5.4-cluster-genes-annotate.R
|
f652a8b8d219bb3c939652829594afa98c955559
|
[] |
no_license
|
daisyyr/scRNA-codes
|
c8ec75e41da80a4f460fc49086e8bb4fd2bb9d82
|
d7a2e1249927c460366c873211e8b847ab7b9c22
|
refs/heads/master
| 2022-04-07T07:45:32.670931
| 2020-02-03T14:06:32
| 2020-02-03T14:06:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,956
|
r
|
step5.4-cluster-genes-annotate.R
|
### ---------------
### Creator: Yunze Liu (Reed Liu)
### Date: 2019-11-10
### Email: jieandze1314@gmail.com
### Title: Smartseq2-C1-小鼠性腺发育-聚类后的基因进一步注释
### ---------------
rm(list=ls())
options(stringsAsFactors = F)
library(clusterProfiler)
#############################
# 作者重新进行整合分组(17组变7组)
#############################
# 下载作者做好的整合数据:https://raw.githubusercontent.com/IStevant/XX-XY-mouse-gonad-scRNA-seq/master/data/female_lineages_DE_gene_pseudotime_clustered_annotated.csv
dyn_genes <- read.csv(file="../female_lineages_DE_gene_pseudotime_clustered_annotated.csv")
gene_names <- dyn_genes$Genes
#基因ID转换
entrez_genes <- bitr(gene_names, fromType="SYMBOL", toType="ENTREZID", OrgDb="org.Mm.eg.db")
# 提取有对应Entrez ID的变化基因,drop=FALSE确保返回数据框
gene_clusters <- dyn_genes[dyn_genes$Genes %in% entrez_genes$SYMBOL,,drop=FALSE]
# 准备进行GoSemSim
de_gene_clusters <- data.frame(
ENTREZID=entrez_genes[!duplicated(entrez_genes$SYMBOL),"ENTREZID"],
Gene_Clusters=gene_clusters$Gene.categories
)
# 进行富集分析
formula_res <- compareCluster(
ENTREZID~Gene_Clusters,
data=de_gene_clusters,
fun="enrichGO",
OrgDb="org.Mm.eg.db",
ont = "BP",
pAdjustMethod = "BH",
pvalueCutoff = 0.05,
qvalueCutoff = 0.05
)
# Run simplified GO enrochment analysis
lineage1_ego <- simplify(
formula_res,
cutoff=0.5,
by="p.adjust",
select_fun=min
)
write.csv(formula_res@compareClusterResult,
file="step5.4-A-female_compared_GO_term_DE_cluster.csv")
write.csv(lineage1_ego@compareClusterResult,
file="step5.4-B-female_compared_symplified_GO_term_DE_cluster.csv")
pdf(file="step5.4-C-female_GO_term_DE_genes_clusters.pdf", width=11, height=8)
dotplot(formula_res, showCategory=3)+ theme(aspect.ratio=0.8)
dotplot(lineage1_ego, showCategory=3)+ theme(aspect.ratio=2)
dev.off()
|
0bbdadb21d10740e5808710992b594b064b9f53d
|
1fca6aad81042030067e7d0b3d29f45d6e68ebb5
|
/HLEDecomp/Code/R/USAvsOther.R
|
f2d2de9e7bc31ff9c19c402e79ff10895a17f7a7
|
[] |
no_license
|
timriffe/HLEDecomp
|
ec07115d10b2cf4ddf88cdbcd18635f02f646f65
|
0b9525c1d579577f76e231a1ca6c13745d4f74e6
|
refs/heads/master
| 2022-09-08T04:36:16.777568
| 2022-08-27T08:36:07
| 2022-08-27T08:36:07
| 118,452,402
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,537
|
r
|
USAvsOther.R
|
# Author: tim
###############################################################################
library(HMDHFDplus)
CNTRS <- getHMDcountries()
CNTRIES_USE <- c("AUS", "AUT", "BEL", "CAN", "CHE",
"DEUTNP", "DNK", "ESP", "FIN",
"FRATNP","GRC", "IRL", "ISL", "ISR", "ITA",
"JPN", "KOR", "LUX", "NLD", "NOR", "NZL_NP",
"PRT", "SWE", "TWN", "GBR_NP", "USA")
e50L <- lapply(CNTRIES_USE,function(cntry,us,pw){
mlt <- readHMDweb(cntry,"mltper_1x1",username=us,password=pw)
flt <- readHMDweb(cntry,"fltper_1x1",username=us,password=pw)
yrind <- sort(unique(mlt$Year[mlt$Year >= 1990]))
e50m <- mlt$ex[mlt$Age == 50 & mlt$Year >= 1990]
e50f <- flt$ex[flt$Age == 50 & flt$Year >= 1990]
data.frame(CNTRY = cntry, Year = yrind, Male = e50m, Female = e50f)
},us=us,pw=pw)
e50 <- do.call("rbind",e50L)
e50 <- e50[e50$CNTRY != "UKR", ]
e50 <- e50[e50$CNTRY != "KOR", ]
library(reshape2)
e50m <- acast(e50, Year~CNTRY, value.var = "Male")
e50f <- acast(e50, Year~CNTRY, value.var = "Female")
keep <- !is.na(e50m["2014",]) & !is.na(e50m["1990",])
e50m <- e50m[,keep]
e50f <- e50f[,keep]
hrs_male_col <- "#053a8e"
hmd_male_col <- "#88aeea"
hrs_fem_col <- "#a50847"
hmd_fem_col <- "#ed9ebe"
pdf("/home/tim/git/HLEDecomp/HLEDecomp/Figures/USAvsOthers_R.pdf")
plot(NULL, type = 'n',xlim = c(1990,2017),ylim=c(24,38), ann = FALSE,las=1, axes=FALSE)
matplot(1990:2017, e50m, type = 'l', col = hmd_male_col, lty = 1, add =TRUE)
lines(1990:2017, e50m[,"USA"],col = hrs_male_col, lwd = 2)
#plot(NULL, type = 'n',xlim = c(1990,2017),ylim=c(24,40), ann = FALSE,)
matplot(1990:2017, e50f, type = 'l', col = hmd_fem_col, lty = 1, add =TRUE)
lines(1990:2017, e50f[,"USA"],col = hrs_fem_col, lwd = 2)
axis(1)
axis(2,las=1)
points(c(1996,2014),e50m[c("1996","2014"),"USA"],pch=16,col=hrs_male_col,cex=1.2)
points(c(1996,2014),e50f[c("1996","2014"),"USA"],pch=16,col=hrs_fem_col,cex=1.2)
dev.off()
sort(23-rank(e50f["1990", ], na.last = NA)) # 12
sort(23-rank(e50f["1996", ], na.last = NA)) # 17 out of 23
sort(23-rank(e50f["2014", ], na.last = NA)) # last 23 - 17 = 6
sort(23-rank(e50m["1990", ], na.last = NA)) # 12
sort(23-rank(e50m["1996", ], na.last = NA)) # 11 out of 23
sort(23-rank(e50m["2014", ], na.last = NA)) # penultimate after TWN (21)
#colnames(e50m)
#"AUS"
#"AUT"
#"BEL"
#"CHE"
#"DEUTNP"
#"DNK"
#"ESP"
#"FIN"
# "FRATNP"
# "IRL"
# "ISL"
# "ISR"
# "ITA"
# "JPN"
# "LUX"
# "NLD"
# "NOR"
# "PRT"
# "SWE"
# "TWN"
# "GBR_NP"
# "USA"
|
8e68f9001560b7a04dbdf47e8088eab48349938a
|
b16a5d56c2281543636ddc2b3cd15a61a94de7b0
|
/genesets/corum.r
|
8090909bbe4b21b4cd21a2e96e21a90d72951c00
|
[
"Apache-2.0"
] |
permissive
|
mschubert/ebits
|
b18bccde6198cb938c04be3704e9fdcff8e5be7d
|
e65b3941b44174e7267ee142387ffacafca11e53
|
refs/heads/master
| 2023-07-23T09:09:47.175229
| 2023-07-07T09:36:15
| 2023-07-07T09:36:15
| 18,678,011
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 841
|
r
|
corum.r
|
import_package("dplyr", attach=TRUE)
.url = "http://mips.helmholtz-muenchen.de/corum/download"
.get_cached = function(fid) {
fname = sprintf("%s.txt.zip", fid)
dest = file.path(module_file("data"), fname)
if (!file.exists(dest))
download.file(sprintf("%s/%s", .url, fname), destfile=dest)
readr::read_tsv(dest)
}
.process = function(fid) {
.get_cached(fid) %>%
filter(Organism == "Human") %>%
select(hgnc=`subunits(Gene name)`, name=ComplexName) %>%
rowwise() %>%
mutate(hgnc = list(tibble(strsplit(hgnc, ";")[[1]]))) %>%
ungroup() %>%
tidyr::unnest(hgnc) %>%
unstack()
}
corum_core = function() {
.process("coreComplexes")
}
corum_all = function() {
.process("allComplexes")
}
corum_splice = function() {
.process("spliceComplexes")
}
|
09b074d22b5b7098a009989760be4828a79cc40e
|
97d0b8880dfdf59d691708ca4a9f45defa38538f
|
/SCRIPTS/09_Res2_Donation_Regs.R
|
eb11e4f01bf6d11f51bd0f240d9b6d194a92b2b9
|
[] |
no_license
|
JShuman20/Econ-Thesis
|
59e2ab96746850b69ba5616482c76e2624c6f004
|
2bd0d4eaeed2958e2483c682ce32823daad7c8cf
|
refs/heads/master
| 2023-03-01T19:18:03.078057
| 2021-02-14T14:11:10
| 2021-02-14T14:11:10
| 253,827,214
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,563
|
r
|
09_Res2_Donation_Regs.R
|
#-----------------------------------------------------------------------------------------------------------------------------------------------#
#-----------------------------------------------------------------------------------------------------------------------------------------------#
#This Script Produces Regression Results For Section 2 of the Paper
#-----------------------------------------------------------------------------------------------------------------------------------------------#
#-----------------------Loading Required Libraries-------------------------#
library(tidyverse)
library(purrr)
library(parallel)
library(readxl)
library(data.table)
library(lubridate)
library(zoo)
library(plm)
library(sandwich)
library(lmtest)
library(stargazer)
library(car)
#-----------------------------------Import TNC Panel and Transform Damage Variables---------------------------------------#
TNC_CLEAN_WEEK_FINAL = read.csv("~/Google Drive/DATA/ECON/CLEAN/TNC_Merged_Week.csv")
TNC_CLEAN_WEEK_FINAL = TNC_CLEAN_WEEK_FINAL %>%
dplyr::mutate_at(which(str_detect(names(TNC_CLEAN_WEEK_FINAL), "Lag_")), funs(log(.+1))) %>%
dplyr::mutate_at(which(str_detect(names(TNC_CLEAN_WEEK_FINAL), "Lag_")), .funs = list(BIN = ~ ifelse(.>0,1,0))) %>%
mutate(WEEK_LOW = as.Date(WEEK_LOW)) %>%
mutate(AMT_LOG = log(AMT + 1)) %>%
mutate(PANEL_VAR = as.numeric(WEEK_LOW)) %>% #Panel "time" variable
mutate(YEAR = year(WEEK_LOW),
MONTH = month(WEEK_LOW),
DEC = ifelse(MONTH == 12,1,0)) %>% #December indicator and other time variables
filter(YEAR %in% as.character(2011:2017)) %>%
mutate(
BIN_30_Mil = ifelse( (Lag_0_30 > 0 & Lag_0_30 < log(1000000)),1,0),
BIN_30_G_MIL = ifelse(Lag_0_30 > log(1000000),1,0),
BIN_30_FiftyMil = ifelse( (Lag_0_30 > log(1000000) & Lag_0_30 < log(50000000)), 1,0),
BIN_30_G_FiftyMil = ifelse(Lag_0_30 >log(50000000),1,0),
BIN_30_FHMil = ifelse( (Lag_0_30 > log(50000000) & Lag_0_30 < log(400000000)), 1,0),
BIN_30_Huge = ifelse(Lag_0_30 > log(400000000), 1,0)) %>% #Creating Binned Damages
ungroup()
#----------------------------------------Table of Means for Results Section 2------------------------------------#
TNC_CLEAN_WEEK_FINAL %>%
select(COUNT_PER_MIL,Lag_0_30) %>%
mutate(Lag_0_30 = exp(Lag_0_30)/1000000) %>%
stargazer(., type = "latex",style = "aer",
title = "Summary Stats for TNC",
summary.stat = c("n", "mean","min","max","sd"),
covariate.labels = c("Donation Count", "30-Day Lagged Damage"),
out = "~/Desktop/ECON Thesis/OUTPUT/DATA_Section/Table_of_Means_TNC.tex")
#---------------------------------Table 1: Build Time Lags ---------------------------------------#
#List of Regressions
Table1_TNC_Scaled = list(
D = plm(COUNT_PER_MIL ~ Lag_0_30_BIN + factor(YEAR) + factor(MONTH), data = TNC_CLEAN_WEEK_FINAL,
model = "within", index = c("state","WEEK_LOW")),
E = plm(COUNT_PER_MIL ~ Lag_0_30_BIN + Lag_30_60_BIN + factor(YEAR) + factor(MONTH), data = TNC_CLEAN_WEEK_FINAL,
model = "within", index = c("state","WEEK_LOW")),
G = plm(COUNT_PER_MIL ~ Lag_0_30_BIN + Lag_30_60_BIN + Lag_60_90_BIN + factor(YEAR) + factor(MONTH), data = TNC_CLEAN_WEEK_FINAL,
model = "within", index = c("state","WEEK_LOW")),
A = plm(COUNT_PER_MIL ~ Lag_0_15_BIN + factor(YEAR) + factor(MONTH) , data = TNC_CLEAN_WEEK_FINAL,
model = "within", index = c("state","WEEK_LOW")),
B = plm(COUNT_PER_MIL ~ Lag_0_15_BIN + Lag_15_30_BIN + factor(YEAR) + factor(MONTH), data = TNC_CLEAN_WEEK_FINAL,
model = "within", index = c("state","WEEK_LOW")),
C = plm(COUNT_PER_MIL ~ Lag_0_15_BIN + Lag_15_30_BIN + Lag_30_45_BIN + factor(YEAR) + factor(MONTH), data = TNC_CLEAN_WEEK_FINAL,
model = "within", index = c("state","WEEK_LOW"))
)
#Robust SE
Table1_TNC_Scaled_SE = lapply(Table1_TNC_Scaled, GET_SEs)
#Output Table
stargazer(Table1_TNC_Scaled, style = "aer", type = "latex", column.sep.width = "1", no.space = TRUE,
omit = "factor*", keep.stat = c("n","adj.rsq"), se = Table1_TNC_Scaled_SE,
title = "Lagged Storm Damages on TNC Donations",
dep.var.labels = "Count of TNC Donations",
covariate.labels = c("0-30","30-60","60-90","0-15","15-30","30-45"),
out = "~/Desktop/ECON Thesis/OUTPUT/STORM_ON_TNC/Scaled.tex")
#---------------------------------------Table 2: Different Storm Bins-------------------------------------------------#
#List of Regressions
Table2_TNC_Scaled = list(
A = plm(COUNT_PER_MIL ~ Lag_0_30_BIN + factor(YEAR) + factor(MONTH), data = TNC_CLEAN_WEEK_FINAL,
model = "within", index = c("state","WEEK_LOW")),
D = plm(COUNT_PER_MIL ~ BIN_30_Mil + BIN_30_FiftyMil + BIN_30_FHMil + BIN_30_Huge + factor(YEAR) + factor(MONTH), data = TNC_CLEAN_WEEK_FINAL,
model = "within", index = c("state","WEEK_LOW"))
)
#Robust Standard Errors
Table2_TNC_Scaled_SE = lapply(X = Table2_TNC_Scaled, GET_SEs)
#Outputting Results
stargazer(Table2_TNC_Scaled, style = "aer", type = "latex", column.sep.width = "3", no.space = TRUE,
omit = "factor*", keep.stat = c("n","adj.rsq"), se = Table2_TNC_Scaled_SE,
title = "Heterogeneity By Storm Severity",
dep.var.labels = "Count of TNC Donations",
covariate.labels = c(">0","0-1 Mil","1-50 Mil", "50-400 Mil", ">400 Mil"),
out = "~/Desktop/ECON Thesis/OUTPUT/STORM_ON_TNC/Scaled2.tex", append = TRUE)
#Creating Figure for Poster
COEFS = Table2_TNC_Scaled$D$coefficients[1:4] #Coefficients
COV = GET_SEs(Table2_TNC_Scaled$D)[1:4] #SE estimates
DF = Table2_TNC_Scaled$D$df.residual
RES = data.frame(
estimate = COEFS,
conf.low = COEFS - qt(0.975, DF) * COV,
conf.high = COEFS + qt(0.975, DF) * COV #t-interval quantiles
) %>%
rownames_to_column(var = "term")
RES$term =factor(RES$term, c("BIN_30_Mil", "BIN_30_FiftyMil", "BIN_30_FHMil", "BIN_30_Huge"))
library(dotwhisker)
#Re-arranging Factors
RES = rbind(RES[4,], RES[3,], RES[2,], RES[1,])
#Creating Plot
dwplot(RES) +
scale_color_manual(values = "blue") +
coord_flip() +
scale_size_manual(values =3) +
geom_vline(aes(xintercept = 0), col = "red", lty = 2) +
scale_y_discrete(labels = c("<1 Mil", "1-50 Mil", "50-400 Mil", ">400 Mil"))+
ggtitle("95% Confidence Intervals For Coefficient Estimates") +
xlab("Coefficient - Donations Per Million People") +
ylab("Cumulative 30-Day Lagged Storm Property Damage") +
theme_bw() +
theme(axis.title = element_text(face = "bold", size =14),
axis.text = element_text(size = 11),
legend.position = "none",
title = element_text(face = "bold", size = 14))
#Testing Equality of Coefficients
linearHypothesis(Table2_TNC_Scaled$D, "BIN_30_Mil = BIN_30_FHMil", white.adjust = "hc1")
linearHypothesis(Table2_TNC_Scaled$D, "BIN_30_FiftyMil = BIN_30_FHMil", white.adjust = "hc1")
linearHypothesis(Table2_TNC_Scaled$D, "BIN_30_FiftyMil = BIN_30_Mil", white.adjust = "hc1")
linearHypothesis(Table2_TNC_Scaled$D, "BIN_30_Huge = BIN_30_FHMil", white.adjust = "hc1")
#-------------------------------------------Table 3: Storm Types----------------------------------------------#
#List of Regressions
Table3_TNC_Scaled = list(
A = plm(COUNT_PER_MIL ~ Lag_0_30_BIN + factor(YEAR) + factor(MONTH), data = TNC_CLEAN_WEEK_FINAL,
model = "within", index = c("state","WEEK_LOW")),
B = plm(COUNT_PER_MIL ~ Lag_Placebo_0_30_BIN + factor(YEAR) + factor(DEC), data = TNC_CLEAN_WEEK_FINAL,
model = "within", index = c("state","WEEK_LOW")),
C = plm(COUNT_PER_MIL ~ Lag_Not_Placebo_0_30_BIN + factor(YEAR) + factor(MONTH), data = TNC_CLEAN_WEEK_FINAL,
model = "within", index = c("state","WEEK_LOW")),
D = plm(COUNT_PER_MIL ~ Lag_Not_Placebo_0_30_BIN + Lag_Placebo_0_30_BIN + factor(YEAR) + factor(MONTH), data = TNC_CLEAN_WEEK_FINAL,
model = "within", index = c("state","WEEK_LOW"))
)
#Robust SE
Table3_TNC_Scaled_SE = lapply(X = Table3_TNC_Scaled, GET_SEs)
#Outputting Results
stargazer(Table3_TNC_Scaled, style = "aer", type = "latex", column.sep.width = "3", no.space = TRUE,
omit = "factor*", keep.stat = c("n","adj.rsq"), se = Table3_TNC_Scaled_SE,
title = "Heterogeneity By Storm Severity",
covariate.labels = c("$>0 Damage (All)","$>0$ Damage (Cold)","$>0$ Damage (Non-Cold)"),
dep.var.labels = "Count of TNC Donations",
out = "~/Desktop/ECON Thesis/OUTPUT/STORM_ON_TNC/Scaled3.tex")
|
771ea2ba4b226ea2340c5c27f1f1f3b6037804dd
|
5bebc5568f733312fc50605b7f4d287bfa249bfc
|
/R/afni_3drefit.R
|
b3ea8056567eb0f1c5e78076321117c8840c91bd
|
[] |
no_license
|
neuroconductor/afnir
|
b583015d424a982939512b8f80080969eb2a9acf
|
7f917abf9fe015104f013d711b81830332b9a283
|
refs/heads/master
| 2021-07-09T05:05:18.436785
| 2021-05-16T23:03:42
| 2021-05-16T23:03:43
| 93,069,314
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,526
|
r
|
afni_3drefit.R
|
#' @title AFNI 3drefit function
#' @description Wrapper for AFNI \code{3drefit} function
#'
#' @param file nifti object or NIfTI filename to change the header
#' @param opts Additional options passed to \code{3drefit}
#' @param ... not currently used
#'
#' @return Output filename of the image
#' @importFrom neurobase parse_img_ext
#' @export
afni_3drefit = function(
file,
opts = "",
...) {
func = "3drefit"
file = checkimg(file, allow_array = FALSE)
#############################################
# Making all the options
#############################################
opts = trimws(opts)
opts = opts[ opts != "" ]
opts = paste(opts, collapse = " ")
img_ext = neurobase::parse_img_ext(file)
ext = tools::file_ext(file)
img_ext = paste0(img_ext, ifelse(ext %in% "gz", ".gz", ""))
outfile = tempfile(fileext = paste0(".", img_ext))
file.copy(file, outfile)
res = afni_cmd(
file = outfile,
func = func,
opts = "",
frontopts = opts,
outfile = NULL,
samefile = TRUE,
add_ext = FALSE,
quote_outfile = FALSE,
# run = FALSE,
retimg = FALSE
)
if (res != 0) {
warning(paste0("Result does not indicate success ",
"- function may not work as expected!"))
}
# outfile = paste0(outfile, suffix, ".BRIK")
# outfile = afni_3dAFNItoNIFTI(outfile, retimg = retimg, ...)
attr(outfile, "afni_version") = afni_version()
return(outfile)
}
#' @rdname afni_3drefit
#' @export
refit = function(...) {
afni_3drefit(...)
}
|
3a2114a635f64e85216d726668631c7ecfdbd646
|
9737af8e9a42072fce6d77d921655451b774f485
|
/Project.R
|
8e38869c5e67a28ee8a7a8a29a1ce2bbcd8575e1
|
[] |
no_license
|
Pulkit2810/RR1
|
83ce4b7b5425a354a8bf6fd4c89cf58f5ebffea7
|
2514205387b3e810e09a30e4d673ce60a4b6abb7
|
refs/heads/master
| 2022-10-23T04:21:42.886209
| 2020-06-13T08:46:39
| 2020-06-13T08:46:39
| 271,574,715
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,851
|
r
|
Project.R
|
steps_by_day <- aggregate(steps ~ date, data, sum)
hist(steps_by_day$steps, main = paste("Total Steps Each Day"), col="green",xlab="Number of Steps")
rmean <- mean(steps_by_day$steps)
rmean
rmedian <- median(steps_by_day$steps)
rmedian
steps_by_interval <- aggregate(steps ~ interval, data, mean)
plot(steps_by_interval$interval,steps_by_interval$steps, type="l", xlab="Interval", ylab="Number of Steps",main="Average Number of Steps per Day by Interval")
max_interval <- steps_by_interval[which.max(steps_by_interval$steps),1]
max_interval
StepsAverage <- aggregate(steps ~ interval, data = data, FUN = mean)
fillNA <- numeric()
for (i in 1:nrow(data)) {
obs <- data[i, ]
if (is.na(obs$steps)) {
steps <- subset(StepsAverage, interval == obs$interval)$steps
} else {
steps <- obs$steps
}
fillNA <- c(fillNA, steps)
}
StepsTotalUnion <- aggregate(steps ~ date, data = new_activity, sum, na.rm = TRUE)
hist(StepsTotalUnion$steps, main = paste("Total Steps Each Day"), col="blue", xlab="Number of Steps")
hist(steps_by_day$steps, main = paste("Total Steps Each Day"), col="green", xlab="Number of Steps", add=T)
legend("topright", c("Imputed", "Non-imputed"), col=c("blue", "green"), lwd=10)
rmeantotal <- mean(StepsTotalUnion$steps)
rmeantotal
rmediandiff <- rmediantotal - rmedian
rmediandiff
rmeandiff <- rmeantotal - rmean
rmeandiff
weekdays <- c("Monday", "Tuesday", "Wednesday", "Thursday",
"Friday")
new_activity$dow = as.factor(ifelse(is.element(weekdays(as.Date(new_activity$date)),weekdays), "Weekday", "Weekend"))
StepsTotalUnion <- aggregate(steps ~ interval + dow, new_activity, mean)
library(lattice)
xyplot(StepsTotalUnion$steps ~ StepsTotalUnion$interval|StepsTotalUnion$dow, main="Average Steps per Day by Interval",xlab="Interval", ylab="Steps",layout=c(1,2), type="l")
|
bc8a2ca97824f7c2387e40854d6f22a1eecfc5a7
|
757c9a3ad35814d5db3dec3ece0f76e5ec372daa
|
/Analise.R
|
77841c9c9429fd3cee341ae3710b914fd8c6e5ce
|
[] |
no_license
|
LucasTerciotti/AnaliseExploratoria
|
c1ca9661ad3809ece1bc767c9cca8195ea0a558d
|
f5b783459321410b91eabc5356d185680855726f
|
refs/heads/master
| 2020-03-20T03:08:42.965137
| 2018-06-29T15:00:50
| 2018-06-29T15:00:50
| 137,136,531
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,499
|
r
|
Analise.R
|
#PREPARAÇÃO DOS DADOS - Feita posteriormente
titanic <- read.csv(("https://raw.githubusercontent.com/Efsilvaa/EPSMLRepo/master/Data/titanic.csv"),
stringsAsFactors=FALSE,
na.strings = c(""))
library(tidyverse)
library(ggplot2)
str(titanic)
titanic$Embarked <- as.factor(titanic$Embarked)
titanic$Survived <- as.factor(titanic$Survived)
summary(titanic$Embarked)
titanic$Embarked[is.na(titanic$Embarked)] = "S"
titanic$Embarked <- factor(titanic$Embarked)
summary(titanic$Embarked)
str(titanic)
#SEXO:
titanic$Sex <- as.factor(titanic$Sex)
summary(titanic$Sex)
contrasts(titanic$Sex)
Agefit <- rpart(Age ~ Pclass + Sex + SibSp +
Parch + Fare + Embarked,
data=titanic[!is.na(titanic$Age),],
method="anova")
titanic$Age[is.na(titanic$Age)] <-
predict(Agefit, titanic[is.na(titanic$Age),])
summary(titanic$Age)
#Análise Exploratória
#testes:
glimpse(titanic)
filter(titanic, Survived == 1, Age == 30) #permite filtrar os dados para alcançar informações específicas
select(titanic,Survived, Age, Sex, PassengerId)
summarise(titanic, Age = mean(titanic$Age))
#Curiosidade: Qual a média de idade dos que morreram? e dos que sobreviveram?
Alive <- group_by(titanic, Survived)
summarise(Alive, Age = mean(Age, na.rm = TRUE))
#Métricas básicas: Six number summary
summary(titanic)
#Boxplot:
ggplot(data = titanic, aes(x = "", y = Age) ) +
geom_boxplot() +
geom_jitter()
ggplot(data = titanic, aes(x=Sex, y=Age)) +
geom_boxplot(aes(color=Survived))
ggplot(data = titanic, aes(x=Survived, y=Age)) +
geom_boxplot(aes(color= Embarked))
#scatterplot (Age ~Survived)
ggplot(data = titanic) +
geom_point(mapping = aes(x = Age, y = Survived))
#scatterplot (Age ~Embarked)
ggplot(data = titanic) +
geom_point(mapping = aes(x = Age, y = Embarked))
ggplot(data = titanic) +
geom_point(mapping = aes(x = Age, y = Embarked, color = Survived))
#scatterplot (Sex ~ Age)
ggplot(data = titanic) +
geom_point(mapping = aes(x = Sex, y = Age))
ggplot(data = titanic) +
geom_point(mapping = aes(x = Sex, y = Age, color = Survived))
ggplot(data = titanic) +
geom_point(mapping = aes(x = PassengerId, y = Sex, color = Survived)) +
facet_wrap( ~ Age )
#Density
ggplot(data = titanic, aes(Age)) +
geom_density(aes(fill=Survived,color=Survived), alpha=0.4)
#histogram
ggplot(data = titanic, aes(Age)) +
geom_histogram(aes(color=Survived, fill=Survived), alpha=0.5)
|
8a859a9dcbeb45927a374dd05ad756805fcab496
|
771d9236b852638924858f6f38bbadb496e10426
|
/R/RetrieveSaccades.R
|
600bdbf99eae44502862126f623aaeecfdbf0e6e
|
[] |
no_license
|
sascha2schroeder/popEye
|
e8c8de1de72d0c5f937c2710b8b03e9834ea7435
|
ef6cfca89e71f324cbb7970061cdf23ece7b63b4
|
refs/heads/master
| 2023-05-24T14:11:35.312441
| 2022-09-08T10:17:15
| 2022-09-08T10:17:15
| 173,056,807
| 17
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,901
|
r
|
RetrieveSaccades.R
|
RetrieveSaccades <- function(dat, trial, env = parent.frame(n = 2)) {
# trial = 1
# setup output
dat$item[[trial]]$sac <-
data.frame(matrix(NA, (nrow(dat$item[[trial]]$fix) - 1), 12))
colnames(dat$item[[trial]]$sac) <-
c("num", "start", "stop", "xs", "ys", "xe", "ye", "msg", "lines", "linee",
"lets", "lete")
# extract saccades
for (i in 1:(nrow(dat$item[[trial]]$fix) - 1)){
dat$item[[trial]]$sac$num[i] <- i
dat$item[[trial]]$sac$start[i] <- dat$item[[trial]]$fix$stop[i] + 1
dat$item[[trial]]$sac$stop[i] <- dat$item[[trial]]$fix$start[i + 1] - 1
dat$item[[trial]]$sac$xs[i] <- dat$item[[trial]]$fix$xs[i]
dat$item[[trial]]$sac$ys[i] <- dat$item[[trial]]$fix$ys[i]
dat$item[[trial]]$sac$xe[i] <- dat$item[[trial]]$fix$xs[i + 1]
dat$item[[trial]]$sac$ye[i] <- dat$item[[trial]]$fix$ys[i + 1]
dat$item[[trial]]$sac$lines[i] <- dat$item[[trial]]$fix$line[i]
dat$item[[trial]]$sac$linee[i] <- dat$item[[trial]]$fix$line[i + 1]
dat$item[[trial]]$sac$lets[i] <- dat$item[[trial]]$fix$line.let[i]
dat$item[[trial]]$sac$lete[i] <- dat$item[[trial]]$fix$line.let[i + 1]
dat$item[[trial]]$sac$msg <- "SAC"
}
# check blinks
blink <- dat$item[[trial]]$parse[dat$item[[trial]]$parse$msg == "BLINK", 1:7]
for (i in 1:nrow(dat$item[[trial]]$sac)) {
if (dat$item[[trial]]$sac$start[i] %in% blink$start) {
dat$item[[trial]]$sac$msg[i] <- "BLINK"
}
}
# NOTE: deletes first saccade (if there is one)
# NOTE: deletes last saccade (if there is one)
# drift correct
# ---------------
# x axis
if (env$exp$setup$assign$driftX == T) {
if (is.na(dat$item[[trial]]$meta$drift) == F) {
dat$item[[trial]]$sac$xsn <- dat$item[[trial]]$sac$xs - dat$item[[trial]]$meta$drift.x
dat$item[[trial]]$sac$xen <- dat$item[[trial]]$sac$xe - dat$item[[trial]]$meta$drift.x
} else {
dat$item[[trial]]$sac$xsn <- dat$item[[trial]]$sac$xs
dat$item[[trial]]$sac$xen <- dat$item[[trial]]$sac$xe
}
} else {
dat$item[[trial]]$sac$xsn <- dat$item[[trial]]$sac$xs
dat$item[[trial]]$sac$xen <- dat$item[[trial]]$sac$xe
}
# y axis
if (env$exp$setup$assign$driftY == T) {
if (is.na(dat$item[[trial]]$meta$drift) == F) {
dat$item[[trial]]$sac$ysn <- dat$item[[trial]]$sac$ys - dat$item[[trial]]$meta$drift.y + env$exp$setup$font$height / 2
dat$item[[trial]]$sac$yen <- dat$item[[trial]]$sac$ye - dat$item[[trial]]$meta$drift.y + env$exp$setup$font$height / 2
} else {
dat$item[[trial]]$sac$ysn <- dat$item[[trial]]$sac$ys
dat$item[[trial]]$sac$yen <- dat$item[[trial]]$sac$ye
}
} else {
dat$item[[trial]]$sac$ysn <- dat$item[[trial]]$sac$ys
dat$item[[trial]]$sac$yen <- dat$item[[trial]]$sac$ye
}
return(dat)
}
|
304566c8ef20b81b42505e4270a4936a0193fcf6
|
5416264e9a51f3f5e45940ae63e18594ada711e5
|
/statCognition/R/value.R
|
b3822e4fe6db8b79475120831089f31a29feea50
|
[
"MIT"
] |
permissive
|
linnykos/statCognition
|
452d31fe96a62875a24395483de443d6f0b5ab95
|
f3f2867b0e50f6a2a0194c14d9d8404833d267c4
|
refs/heads/master
| 2021-06-16T18:38:26.955235
| 2017-05-14T17:58:02
| 2017-05-14T17:58:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,785
|
r
|
value.R
|
#' Value function
#'
#' \code{surface} is a list that contains \code{hash} and \code{block_list}.
#' \code{hash} is a hash table with keys for example "1-5" (if there are two
#' state elements, meaning the currently evaluated region lies in the first block
#' of state element 1 and the fifth block of state element 5) and has a value that
#' is a data frame with \code{value} set to the value of that block and \code{action}
#' of the action taken in that block. \code{block_list} is a list of vectors with length
#' equal to the number of state elements (2 in this case). It tells which regions in the
#' domain of the state element correspond to which blocks.
#'
#' @param surface list of \code{hash} and \code{block_list}
#' @param contribution_ll list of lists of contribution objects, for visualization
#' purposes only
#'
#' @return value object
#' @export
value <- function(surface, contribution_ll = NA){
res <- structure(list(surface = surface, contribution_ll = contribution_ll),
class = "value")
is_valid(res)
res
}
#' Checks value object for validity
#'
#' @param obj The object to check
#' @param ... not used
#'
#' @return boolean
#' @export
is_valid.value <- function(obj, ...){
stopifnot(all(names(obj) == c("surface", "contribution_ll")))
stopifnot(all(names(obj$surface) == c("hash", "block_list")))
stopifnot(class(obj$surface$hash) == "hash")
stopifnot(all(sapply(obj$surface$block_list, class) == "numeric"))
stopifnot(all(sapply(obj$surface$block_list, is.matrix) == FALSE))
action <- as.numeric(unlist(hash::values(obj$surface$hash)[2,]))
stopifnot(min(action) >= 1)
TRUE
}
.num_states <- function(obj){
stopifnot(class(obj) == "value")
length(strsplit(hash::keys(obj$surface$hash)[[1]], split = "-")[[1]])
}
|
af0506d199c468c1377b318205d6c458300febe1
|
91fc1f1d7404c7023c9e164f40cac9cad9b38e59
|
/man/BalancedSample.Rd
|
d55167654a9bb083fe28ffef01c098f820d53451
|
[
"MIT"
] |
permissive
|
msenosain/denoisingCTF
|
4ce6dd29e950b7d0c733acb31b7b6f7c443033a6
|
97701a018a2e20dea43aff296f1667b7b2ed1195
|
refs/heads/master
| 2021-07-04T15:20:05.898871
| 2021-01-27T22:33:10
| 2021-01-27T22:33:10
| 221,529,482
| 1
| 0
|
MIT
| 2019-11-18T15:13:59
| 2019-11-13T18:51:27
|
R
|
UTF-8
|
R
| false
| true
| 739
|
rd
|
BalancedSample.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_building.R
\name{BalancedSample}
\alias{BalancedSample}
\title{Obtain a class-balanced sample}
\usage{
BalancedSample(df, sample_size = 5000, class_col = class_col)
}
\arguments{
\item{df}{An object of class \code{data.frame}.}
\item{sample_size}{Numeric. Size of the sample from each class.}
\item{class_col}{A character vector with the name of the column that identify
the classes.}
}
\value{
Returns a \code{data.frame} with n=\code{sample_size} rows per class
randomly sampled.
}
\description{
This function samples the rows of a \code{data.frame} with balanced classes.
Useful when original training set has severely unbalanced classes.
}
|
423f4dab9aa5e85aa7f93981cc9530a1cc31dd68
|
98550ab8b21f1d86f5954886911fc01498ef7699
|
/R/packageCountry.R
|
3ec0bf779e8764506dd9c6d5ae932ba2a1cd43f7
|
[] |
no_license
|
lindbrook/packageRank
|
a68ee94e0ed3621e7f10239f1eb2d12dbb7c6530
|
a83ebfaa05f6ee82b7e5ae76cf0b8a4c296b4dfb
|
refs/heads/master
| 2023-08-04T21:18:01.261280
| 2023-08-01T22:00:29
| 2023-08-01T22:00:29
| 184,319,415
| 27
| 1
| null | 2023-08-01T22:00:20
| 2019-04-30T19:25:45
|
R
|
UTF-8
|
R
| false
| false
| 2,354
|
r
|
packageCountry.R
|
#' Package download counts by country.
#'
#' From RStudio's CRAN Mirror http://cran-logs.rstudio.com/
#' @param packages Character. Vector of package name(s).
#' @param date Character. Date. "yyyy-mm-dd". NULL uses latest available log.
#' @param all.filters Logical. Master switch for filters.
#' @param ip.filter Logical.
#' @param triplet.filter Logical.
#' @param small.filter Logical.
#' @param sequence.filter Logical.
#' @param size.filter Logical.
#' @param sort Logical. Sort by download count.
#' @param na.rm Logical. Remove NAs.
#' @param memoization Logical. Use memoization when downloading logs.
#' @param check.package Logical. Validate and "spell check" package.
#' @param multi.core Logical or Numeric. \code{TRUE} uses \code{parallel::detectCores()}. \code{FALSE} uses one, single core. You can also specify the number logical cores. Mac and Unix only.
#' @param dev.mode Logical. Development mode uses parallel::parLapply().
#' @export
packageCountry <- function(packages = "cholera", date = NULL,
all.filters = FALSE, ip.filter = FALSE, triplet.filter = FALSE,
small.filter = FALSE, sequence.filter = FALSE, size.filter = FALSE,
sort = TRUE, na.rm = FALSE, memoization = TRUE, check.package = TRUE,
multi.core = TRUE, dev.mode = FALSE) {
if (all.filters) {
ip.filter <- TRUE
triplet.filter <- TRUE
small.filter <- TRUE
sequence.filter <- TRUE
size.filter <- TRUE
}
p.log <- packageLog(packages = packages, date = date,
ip.filter = ip.filter, triplet.filter = triplet.filter,
small.filter = small.filter, sequence.filter = sequence.filter,
size.filter = size.filter, memoization = memoization,
check.package = check.package, multi.core = TRUE, dev.mode = FALSE)
if (na.rm) {
if (is.data.frame(p.log)) {
out <- table(p.log$country)
} else if (is.list(p.log)) {
out <- lapply(p.log, function(x) table(x$country))
}
} else {
if (is.data.frame(p.log)) {
out <- table(p.log$country, useNA = "ifany")
} else if (is.list(p.log)) {
out <- lapply(p.log, function(x) table(x$country, useNA = "ifany"))
}
}
if (sort) {
if (is.table(out)) {
out <- sort(out, decreasing = TRUE)
} else if (is.list(out)) {
out <- lapply(out, function(x) sort(x, decreasing = TRUE))
names(out) <- names(p.log)
}
}
out
}
|
6ab8c0afd99624b5a6584924c620cbcadfe8bff8
|
5be5233c70855f78773e177f9a2ff5795aafb8c5
|
/cbsots/tests/testthat/test_get_ts_84328NED.R
|
65c9888b7fa476150eeaf5d7f13e5a61e9767068
|
[] |
no_license
|
timemod/cbsots
|
5057c3d38754aae175776d857f9c4916a9e5af73
|
3523b0eaa87eeee6425d80cbceb9668ca76c3ce1
|
refs/heads/master
| 2023-06-22T22:35:10.602907
| 2023-06-12T08:06:00
| 2023-06-12T08:06:00
| 121,116,490
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,604
|
r
|
test_get_ts_84328NED.R
|
library(cbsots)
library(testthat)
rm(list = ls())
id <- "84328NED"
update_expected <- FALSE
dum <- Sys.setlocale("LC_COLLATE", "C")
# Use UTF-8 encoding, because the Titles contains diacritical characters
# and the data files have been created with UTF-8 encoding.
options(encoding = "UTF-8")
ts_code <- readRDS(sprintf("tscode/tscode_%s.rds",id))
source("utils/check_ts_table.R")
source("utils/read_match_report.R")
source("utils/check_titles_and_labels.R")
raw_cbs_dir <- "raw_cbs_data"
test_that(id, {
msg <- paste("Duplicate keys in cbs meta data for dimension Topic in",
"table 84328NED:\n'KapitaalgoederenvoorraadEindbalans_1'\\.")
expect_warning(result1 <- get_ts(id, ts_code, download = FALSE),
msg)
check <- check_ts_table(result1, id, raw_cbs_dir = raw_cbs_dir)
expect_true(check)
expected_label_file <- file.path("expected_output", paste0(id, "_1_labels.rds"))
expect_ts_labels_equal(ts_labels(result1$Y), expected_label_file,
update = update_expected)
})
test_that(paste(id, "errors"), {
ts_code_err <- ts_code
ts_code_err$`84328NED`$codes$Topic[9, "Select"] <- TRUE
ts_code_err$`84328NED`$codes$Topic[9, "Code"] <- "xxx"
msg <- paste0("Duplicate keys selected in timeseries coding for dimension",
" Topic in table 84328NED:\n",
"'KapitaalgoederenvoorraadBeginbalans_3', 'Afschrijvingen_6'",
", 'KapitaalgoederenvoorraadEindbalans_1'\\.")
expect_error(get_ts(id, ts_code_err, download = FALSE), msg)
ts_code_err <- ts_code
ts_code_err$`84328NED`$codes$Topic[7, "Code"] <- "wnd"
expect_warning(result1 <- get_ts(id, ts_code_err, download = FALSE))
check <- check_ts_table(result1, id, raw_cbs_dir = raw_cbs_dir)
expect_true(check)
ts_code_err$`84328NED`$codes$Topic[7, "Select"] <- TRUE
msg <- "Duplicate codes found for Topic:\nwnd\n."
expect_error(get_ts(id, ts_code_err, download = FALSE), msg)
})
test_that(paste(id, "alt"), {
ts_code <- readRDS(sprintf("tscode/tscode_%s_2.rds",id))
msg <- paste("Duplicate keys in cbs meta data for dimension Topic in",
"table 84328NED:\n'KapitaalgoederenvoorraadEindbalans_1'\\.")
expect_warning(result1 <- get_ts(id, ts_code, download = FALSE),
msg)
check <- check_ts_table(result1, id, raw_cbs_dir = raw_cbs_dir)
expect_true(check)
expected_label_file <- file.path("expected_output", paste0(id, "_2_labels.rds"))
expect_ts_labels_equal(ts_labels(result1$Y), expected_label_file,
update = update_expected)
})
|
20ed8c0541d07ce4510d15b287d5b17a7759eeda
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/wordspace/man/normalize_rows.Rd
|
a62ae84e037809ce6dec509b0f11f5fb1f3ba8ca
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,319
|
rd
|
normalize_rows.Rd
|
\name{normalize.rows}
\alias{normalize.rows}
\alias{normalize.cols}
\title{
Normalize Rows or Columns of Matrix to Unit Length (wordspace)
}
\description{
Efficiently normalize the row or column vectors of a dense or sparse matrix to unit length.
}
\usage{
normalize.rows(M, method = "euclidean", p = 2, \dots,
tol = 1e-6, inplace = FALSE)
normalize.cols(M, method = "euclidean", p = 2, \dots,
tol = 1e-6, inplace = FALSE)
}
\arguments{
\item{M}{a dense or sparse numeric matrix}
\item{method}{norm to be computed, see \code{\link{rowNorms}}}
\item{p}{exponent of Minkowski p-norm in the range \eqn{0 < p \le \infty}. Note that normalization is not possible for very small values of \eqn{p}.}
\item{\dots}{any further arguments are passed to \code{\link{rowNorms}} (or \code{\link{colNorms}})}
\item{tol}{row/column vectors with norm below \code{tol} are assumed to be all zeroes and cannot be normalized (see \dQuote{Details} below)}
\item{inplace}{if \code{TRUE}, modify the matrix \code{M} in place. Don't ever set this argument to \code{TRUE}.}
}
\details{
These functions return a matrix with row (or column) vectors rescaled to a length of 1 according to the selected norm.
All-zero vectors (with \eqn{\|0\| = 0}{|0| = 0}) cannot be normalized. In order to avoid scaling up rounding errors, rows (or columns) with \eqn{\|x\| < tol}{|x| < tol} are explicitly set to 0 (and thus not normalized). Since a suitable threshold for rounding errors depends on the scaling behaviour of the selected norm and the provenance of \eqn{M}, it is advisable to set \code{tol} explicitly to an appropriate value. Pass \code{tol = 0} to normalize all nonzero vectors.
The generalized Minkowski norm with \eqn{p < 1} is not homogeneous but can still be normalized. This is numerically unstable for very small values of \eqn{p}, which will be rejected with an error message. The Hamming length (\eqn{p = 0}) cannot be normalized at all. See \code{\link{rowNorms}} for more information.
}
\value{
A row-normalized (or column-normalized) matrix with the same dimensions as \eqn{M}.
}
\seealso{
See \code{\link{rowNorms}} for details on available norms and their parameters.
}
%% \examples{
%% }
\author{Stefan Evert (\url{http://purl.org/stefan.evert})}
|
9719ddb99b68f8e0f7a6fecedc59f07fec3db791
|
cc6d892933070283e4ad343194fe80eed5396fe2
|
/inst/tests/test-parameters.R
|
777ced323f0eccf9e4d5b07e3c9964a956be9b21
|
[] |
no_license
|
opetchey/Revolve
|
0fbfc3b90e8b53c1baaa4100555ba1b2dbf3e67b
|
ccf77ae16c4d9b08ed6fe5d9a260607ddfd4906c
|
refs/heads/master
| 2021-05-27T15:42:19.423593
| 2014-08-04T00:53:16
| 2014-08-04T00:53:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,822
|
r
|
test-parameters.R
|
source("helper-Revolve.R")
context("Parameter helper")
test_that("Corner cases fail", {
# Missing names
expect_that(make_parameters(list(1)), throws_error())
# Blank names
tmp <- list(1)
names(tmp) <- ""
expect_that(make_parameters(tmp), throws_error())
# Duplicated names
expect_that(make_parameters(list(a=1, a=2)), throws_error())
})
test_that("Basic usage", {
pars <- list(a=1, b=pi)
p <- make_parameters(pars)
expect_that(names(p), equals(names(pars)))
expect_that(p$get(), is_identical_to(pars))
})
test_that("Setting parameters corner cases", {
pars <- list(a=1, b=pi)
p <- make_parameters(pars)
# Empty set does not change anything:
p$set()
expect_that(p$get(), is_identical_to(pars))
p$set(list())
expect_that(p$get(), is_identical_to(pars))
# Non-existant parameters will fail to set:
expect_that(p$set(list(c=1)), throws_error())
# Un-named parameters
expect_that(p$set(list(1)), throws_error())
})
test_that("Setting parameters", {
pars <- list(a=1, b=pi)
p <- make_parameters(pars)
pars1 <- list(a=exp(1))
pars2 <- list(b=sqrt(2))
pars3 <- list(b=runif(1), a=runif(1))
expect_that(p$get(), is_identical_to(pars))
p$set(pars1)
expect_that(p$get(), is_identical_to(modifyList(pars, pars1)))
p$set(pars2)
expect_that(p$get(),
is_identical_to(modifyList(modifyList(pars, pars1), pars2)))
p$set(pars3)
expect_that(p$get(),
is_identical_to(modifyList(pars, pars3)))
})
test_that("New environment", {
e <- new.env()
expect_that(ls(e), is_identical_to(character(0)))
pars <- list(a=1, b=pi)
p <- make_parameters(pars, e)
expect_that(ls(e), is_identical_to(names(pars)))
expect_that(get("a", e), is_identical_to(pars$a))
p$set(list(a=pi))
expect_that(get("a", e), is_identical_to(pi))
})
|
499acf842099304b8b051f94eb860395bd84dcca
|
288362daf4b36524a633148eae10e6857da2ec4f
|
/analysis_summary_plots.R
|
56f41ef21831a98ec958578119da3017ad61277b
|
[] |
no_license
|
mcdermottd/lfs_capstone
|
11a7510dba8fc3c04ee3efa3a449b6b213053fc2
|
5b1446182bed250867c870b9c01b4f11faf0d5a6
|
refs/heads/master
| 2021-06-10T13:28:47.618149
| 2021-03-22T03:37:16
| 2021-03-22T03:37:16
| 53,220,718
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,169
|
r
|
analysis_summary_plots.R
|
######################################################################
# notes:
# - purpose: create summary plots of OHC characteristics
# - inputs: formatted analysis set
# - outputs: plots summarizing OHC characteristics
# - keywords: #brule
# - general:
######################################################################
#######################################
# load packages and clear objects/log #
#######################################
# load easimple and clear objects log
library(easimple)
ea_start()
# load packages
library(ggplot2)
library(data.table)
#############
# set parms #
#############
# set up base plot attributes / theme
plot_attributes <- theme(plot.background = element_rect(fill = "lightgrey"),
panel.grid.major.x = element_line(color = "gray90"),
panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white", colour = "black") ,
panel.grid.major.y = element_line(color = "gray90"),
text = element_text(size = 20),
plot.title = element_text(vjust = 0, colour = "black", face = "bold", size = 25))
# output toggle
p_opt_exp <- 0
#############
# load data #
#############
# load analysis set
in_outcomes_set <- ea_load("X:/LFS-Education Outcomes/data/lfs_analysis_sets/analysis_set.rdata")
########################
# format analysis data #
########################
# copy input sets
full_outcomes_set <- copy(in_outcomes_set)
# sort by academic year
setorder(full_outcomes_set, acad_year)
# create analysis sample subset
analysis_sample <- subset(full_outcomes_set, flag_analysis_grd == 1)
# create set with only placement years
plcmt_data <- subset(analysis_sample, flag_cur_plcmt == 1)
# create var for avg. length of placement
plcmt_data[, plcmt_length := tot_plcmt_days / lf_n_plcmt_tot]
plcmt_data[, plcmt_length_acad := tot_plcmt_days_acad / lf_n_plcmt_acad]
################################
# create summary stats to plot #
################################
# freq - plcmt type by acad year
freq_type_yr <- ea_table(plcmt_data, c("acad_year", "dcf_plcmt_type"))
# remove missing placement types
freq_type_yr <- subset(freq_type_yr, !is.na(dcf_plcmt_type))
#########################
# plot ohc info overall #
#########################
# histogram - avg plcmt length (15 day bins, <= 750 days)
plot_hist_avg_pdays <- ggplot(data = subset(plcmt_data, plcmt_length <= 750), aes(x = plcmt_length)) +
geom_histogram(binwidth = 15, colour = "black", fill = "dodgerblue4") +
labs(x = "Average Length of Placement", y = "Number of Children",
title = "Average Length of Out-of-Home Care Placement - Overall") +
plot_attributes
####################################
# plot placement info by acad year #
####################################
# histogram - total placements in acad year
plot_hist_n_plcmt <- ggplot(data = plcmt_data, aes(x = n_plcmt_acad)) +
geom_histogram(binwidth = 1, colour = "black", fill = "dodgerblue4") +
labs(x = "Number of Placements", y = "Number of Children",
title = "Out-of-Home Care Placements in an Academic Year") +
plot_attributes
# histogram - total plcmt days in acad year (20 day bins)
plot_hist_plcmt_days <- ggplot(data = plcmt_data, aes(x = tot_plcmt_days_acad)) +
geom_histogram(binwidth = 20, colour = "black", fill = "dodgerblue4") +
labs(x = "Number of Placement Days", y = "Number of Children",
title = "Days in Out-of-Home Care Placement in an Academic Year") +
plot_attributes
# histogram - avg plcmt length in acad year (15 day bins)
plot_hist_plcmt_length <- ggplot(data = plcmt_data, aes(x = plcmt_length_acad)) +
geom_histogram(binwidth = 10, colour = "black", fill = "dodgerblue4") +
labs(x = "Average Placement Length", y = "Number of Children",
title = "Average Length of Out-of-Home Care Placement \n in an Academic Year") +
plot_attributes
# bar plot - total placements by year and placement type
plot_bar_ptype_by_yr <- ggplot(freq_type_yr, aes(acad_year, count)) +
geom_bar(stat = "identity", position = "dodge", aes(fill = dcf_plcmt_type)) +
labs(x = "Academic Year", y = "Number of Placements",
title = "Out-of-Home Care Placements by Type \n in an Academic Year") +
scale_fill_discrete(name = "Placement Type") +
plot_attributes
#####################
# format and export #
#####################
# set output directory
p_dir_out <- "X:/LFS-Education Outcomes/qc/final_draft_exhibits/descriptive/"
# set height and width of plots
p_height <- 28
p_width <- 28
# export
if (p_opt_exp == 1) {
ggsave(paste0(p_dir_out, "hist_avg_plcmt_length.png"), plot = plot_hist_avg_pdays, width = p_width, height = p_height, units = "cm")
ggsave(paste0(p_dir_out, "hist_acad_n_plcmts.png"), plot = plot_hist_n_plcmt, width = p_width, height = p_height, units = "cm")
ggsave(paste0(p_dir_out, "hist_acad_plcmt_days.png"), plot = plot_hist_plcmt_days, width = p_width, height = p_height, units = "cm")
ggsave(paste0(p_dir_out, "hist_acad_avg_plcmt_length.png"), plot = plot_hist_plcmt_length, width = p_width, height = p_height, units = "cm")
ggsave(paste0(p_dir_out, "bar_ptype_by_acad_yr.png"), plot = plot_bar_ptype_by_yr, width = p_width, height = p_height, units = "cm")
}
|
a9d4b3991b910e3c99f1e9ad1bbf68fcbd271c07
|
a7ff4aeaed4f82a2d777a806f46c97aee85904c8
|
/man/dtCutoff.Rd
|
2e595444abdbf66df35e36ea6237048f1fb82f8d
|
[
"MIT"
] |
permissive
|
shwetagopaul92/NSForestR
|
89015d2385a7dadc064516e3a24e806b96693928
|
34726e122264336bc358d6ff86e7d492a5f8c50f
|
refs/heads/master
| 2020-04-19T19:22:42.745296
| 2019-04-16T16:01:01
| 2019-04-16T16:01:01
| 168,386,581
| 0
| 0
| null | 2019-02-07T20:46:04
| 2019-01-30T17:41:08
|
Python
|
UTF-8
|
R
| false
| true
| 534
|
rd
|
dtCutoff.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NSForestR.R
\name{dtCutoff}
\alias{dtCutoff}
\title{to get expression cutoffs for f-beta testing}
\usage{
dtCutoff(binaryDF, clusterCol, dataDummy, dataFull)
}
\arguments{
\item{binaryDF}{data.frame}
\item{clusterCol}{numeric(1) cluster column}
\item{dataDummy}{matrix dummy columns for one vs all Random Forest modeling}
\item{dataFull}{data.frame tsvfile for from a SingleCellExperiment}
}
\description{
to get expression cutoffs for f-beta testing
}
|
691ba10868d682cc852e3a9090b603580bc92fb6
|
90bc0268ab54edfeb1eb2231e3d40c074b1fc784
|
/man-roxygen/offline.R
|
454b3d96c4812fedaae9627e740f71deea573ec6
|
[] |
no_license
|
jackwasey/jwutil
|
e920952f8f42ef609c6019f7107c4256836fb4a9
|
d149051dc750a56412c8c7d7d07c1d3619d4f4b2
|
refs/heads/master
| 2021-01-17T09:26:51.710521
| 2020-01-18T19:58:17
| 2020-01-18T19:58:17
| 24,302,789
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 226
|
r
|
offline.R
|
#' @param offline single logical, if \code{TRUE} then don't pull the file from
#' internet, only return path and file name if the file already exists in
#' data-raw. This is helpful for testing without using the internet.
|
900fa912076367542ad62b84212deb0a03f7387f
|
3075543281d3798c5bf91f9539b40798ad758bea
|
/measurement/measureROC.R
|
7f2664c32a65a03fde7df1596e1f8d6bea7567df
|
[] |
no_license
|
m10223045/R
|
9c3888d10f7bcd4c4c39ddbf76974a009dc22bb8
|
bcef60900cf8d56dbbdae12d4c1b65d86de0022a
|
refs/heads/master
| 2021-01-13T16:06:51.894391
| 2017-03-17T02:50:36
| 2017-03-17T02:50:36
| 81,721,251
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,473
|
r
|
measureROC.R
|
measureROC <- function(actual, fitted, digits = 2){
t <- table(Actual=actual, Fitted = fitted)
# t <- table(Actual=testLabel, Fitted=pID3)
tFrame <- data.frame(t)
TP <- NULL
FP <- NULL
FN <- NULL
TN <- NULL
for(i in 1:length(tFrame[,1])){
if(tFrame$Actual[i] == -1 && tFrame$Fitted[i] == -1) TP <- tFrame$Freq[i]
if(tFrame$Actual[i] == 1 && tFrame$Fitted[i] == 1) TN <- tFrame$Freq[i]
if(tFrame$Actual[i] == 1 && tFrame$Fitted[i] == -1) FP <- tFrame$Freq[i]
if(tFrame$Actual[i] == -1 && tFrame$Fitted[i] == 1) FN <- tFrame$Freq[i]
}
P <- TP+FN
N <- FP+TN
TP.rate <- TP/P
FP.rate <- FP/N
TN.rate <- TN/N
FN.rate <- FN/P
ACC <- (TP+TN)/(P+N)
Gmean <- sqrt(TP.rate*TN.rate)
ROC.Rate <- data.frame(cbind(TP.rate, TN.rate, FP.rate, FN.rate, ACC, Gmean))
ROC.Rate <- round(ROC.Rate, digits = 4)
# colnames(ROC.Rate) <- c("TP.rate=TP/P", "FP.rate=FP/N", "TN.rate=TN/N", "FN.rate=FN/P", "ACC=(TP+TN)/(P+N)")
ROC.Condition <- data.frame(cbind(P,N,TP,FN,FP,TN))
list(Table = t, ROC.Rate = ROC.Rate, ROC.Condition = ROC.Condition)
}
#################################################################
# # The R OOP class test.
# setClass("Measure.ROC",representation(
# pretictTure = "numeric",
# pretictFalse = "numeric",
# actualTrue = "numeric",
# actualFalse = "numeric"
# ))
#
# setGeneric("measure","Measure.ROC",
# function(p, y){
#
# }
# )
|
4aa98a5c3d1a4cde7a050944c0d0062081c3b274
|
8ed170e060bc2cbca171371344bc804055f940f3
|
/day-14-simulate-tadpoles.R
|
0d112c7dec4286b635552755d59304377494d648
|
[
"MIT"
] |
permissive
|
colt-jensen/maymester-bayes-2021
|
d67cc6add1400652b25bd2f1e42a69688353738a
|
693d03ab8c403494da39d687c27695f1dee385d9
|
refs/heads/main
| 2023-05-13T09:01:21.493735
| 2021-06-09T15:56:31
| 2021-06-09T15:56:31
| 370,819,540
| 0
| 0
|
MIT
| 2021-05-25T20:26:27
| 2021-05-25T20:26:26
| null |
UTF-8
|
R
| false
| false
| 1,854
|
r
|
day-14-simulate-tadpoles.R
|
## Simulate tadpole mortality
library(tidyverse)
library(rethinking)
# hyperparameters
a_bar <- 1.5
sigma <- 1.5
nponds <- 60
Ni <- rep(20, nponds)
# simulate probability of survival in each pond
set.seed(42)
a_pond <- rnorm(nponds, a_bar, sigma)
# create a dataframe to hold our information
dsim <- data.frame(
pond = 1:nponds, # pond id
Ni = Ni, # number of tadpoles
true_a = a_pond, # true log-odds of survival
true_p = inv_logit(a_pond) # true probability of survival
)
# kill tadpoles
dsim$Si <- rbinom(n = nponds,
size = dsim$Ni,
prob = dsim$true_p)
dat <- list(
Si = dsim$Si,
Ni = dsim$Ni,
pond = dsim$pond
)
# now fit some models
no_pooling_model <- ulam(
alist(
Si ~ dbinom( Ni, p ), # likelihood
logit(p) <- a[pond], # link function
a[pond] ~ dnorm(0, 1) # prior on a[pond]
), data = dat
)
# diagnostics
traceplot( no_pooling_model, 'a[2]' )
plot(no_pooling_model, depth = 2)
# fit multilevel model
multilevel_model <- ulam(
alist(
Si ~ dbinom( Ni, p ), # likelihood
logit(p) <- a[pond], # link function
a[pond] ~ dnorm(a_bar, sigma), # adaptive prior on a[pond]
a_bar ~ dnorm(0, 1.5), # prior on a_bar
sigma ~ dexp(1) # prior on sigma
), data = dat
)
traceplot( multilevel_model, 'a[2]' )
plot(multilevel_model, depth = 2)
## add posterior predictions to the dataframe -----------------------------
# predicted probability from no pooling model
no_pooling_posterior <- extract.samples(no_pooling_model)
multilevel_posterior <- extract.samples(multilevel_model)
dsim$no_pooling_p <- apply( inv_logit(no_pooling_posterior$a), 2, mean)
dsim$multilevel_p <- apply( inv_logit(multilevel_posterior$a), 2, mean)
dsim$empirical_p <- dsim$Si / dsim$Ni
dsim$complete_pooling_p <- sum(dsim$Si) / sum(dsim$Ni) # percent of all tadpoles that survived
|
de1da4bc9b67628fb6e21ff6f34f61305a569ff0
|
5fcc3f8421fa41dbb443204d206961ab18b1d45e
|
/man/sfn.Rd
|
c3dbb4cf48a8327057d0492949107bd94a2cc93b
|
[
"MIT"
] |
permissive
|
fengweijp/RCyjs
|
192f369e1024661686bc10b19578587824660f1c
|
0f22b40382b63f4882d7204b54b650bbfbb59333
|
refs/heads/master
| 2021-10-26T16:10:46.523267
| 2019-04-13T18:38:52
| 2019-04-13T18:38:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 580
|
rd
|
sfn.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RCyjs-class.R
\docType{methods}
\name{sfn,RCyjs-method}
\alias{sfn,RCyjs-method}
\alias{sfn}
\title{sfn}
\usage{
\S4method{sfn}{RCyjs}(obj)
}
\arguments{
\item{obj}{an RCyjs instance}
}
\value{
no return value
}
\description{
\code{sfn} select first neighbors of the currently selected nodes
}
\examples{
if(interactive()){
rcy <- RCyjs(title="rcyjs demo", graph=simpleDemoGraph())
selectNodes(rcy, "A")
getSelectedNodes(rcy) # just one
sfn()
getSelectedNodes(rcy) # now three
}
}
|
0358a98274f1b6a371a6c3084651c623caf6cdb8
|
ea9001912ab3dbb04ebce13c4d4652e9a9101456
|
/depo,with.R
|
d3a0fe88384847f6844be462780fa95731571960
|
[] |
no_license
|
kiki3700/investmentJounal
|
feb30f13ac07a7eb1b0dbfce369faf0442d565e4
|
7274a57d3d002fb18ab52fbcb86ec4715327c47d
|
refs/heads/main
| 2023-04-29T00:34:17.765243
| 2021-05-18T02:21:59
| 2021-05-18T02:21:59
| 368,376,300
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,038
|
r
|
depo,with.R
|
#deposit
deposit<-function(price,date){
arrow_1_cash_balance<-read.csv('data/arrow_1/arrow_1_cash_balance.csv',row.names=1)
arrow_1_cash<-read.csv('data/arrow_1/arrow_1_cash.csv',row.names = 1)
#cash
arrow_1_cash<-rbind(arrow_1_cash,data.frame(date=date,distribution=price,balance=last(arrow_1_cash$balance)+price,'function'='deposit'))
print(arrow_1_cash)
write.csv(arrow_1_cash,'data/arrow_1/arrow_1_cash.csv')
#cash balance
if(tail(rownames(arrow_1_cash_balance),1)==date){
arrow_1_cash_balance[nrow(arrow_1_cash_balance),]<-(arrow_1_cash_balance[nrow(arrow_1_cash_balance),]+price)
}else{
arrow_1_cash_balance<-rbind(arrow_1_cash_balance,arrow_1_cash_balance[nrow(arrow_1_cash_balance),])
rownames(arrow_1_cash_balance[nrow(arrow_1_cash_balance),])<-date
arrow_1_cash_balance[nrow(arrow_1_cash_balance),]<-arrow_1_cash_balance[nrow(arrow_1_cash_balance),]+price
}
print(arrow_1_cash_balance)
write.csv(arrow_1_cash_balance,'data/arrow_1_cash_balance.csv')
}
#withraw
withraw<-function(price,date){
arrow_1_cash_balance<-read.csv('data/arrow_1/arrow_1_cash_balance.csv',row.names=1)
arrow_1_cash<-read.csv('data/arrow_1/arrow_1_cash.csv',row.names = 1)
#cash
arrow_1_cash<-rbind(arrow_1_cash,data.frame(date=date,distribution=price,balance=last(arrow_1_cash$balance)-price,'function'='deposit'))
print(arrow_1_cash)
write.csv(arrow_1_cash,'data/arrow_1/arrow_1_cash.csv')
#cash balance
if(tail(rownames(arrow_1_cash_balance),1)==date){
arrow_1_cash_balance[nrow(arrow_1_cash_balance),]<-arrow_1_cash_balance[nrow(arrow_1_cash_balance),]-price
}else{
arrow_1_cash_balance<-rbind(arrow_1_cash_balance,arrow_1_cash_balance[nrow(arrow_1_cash_balance),])
rownames(arrow_1_cash_balance[nrow(arrow_1_cash_balance),])<-date
arrow_1_cash_balance[nrow(arrow_1_cash_balance),]<-arrow_1_cash_balance[nrow(arrow_1_cash_balance),]-price
}
print(arrow_1_cash_balance)
write.csv(arrow_1_cash_balance,'data/arrow_1_cash_balance.csv')
}
|
f33770705ec3b2c299a97fb97fe38cb674e25b64
|
78f689caac190116528392d483416eb95d6fd5de
|
/GCSAdiagram.R
|
4d42e7c75cdae9a47d0d7e00e15af75f7af3fb15
|
[] |
no_license
|
oscci/Genetic_analysis_SCT_twin
|
c096d231c4dbbd0fdd0f0ca7552c0d5cd98dd417
|
1c63e787394b4b547f6bdb410b3251ece17dfaa8
|
refs/heads/master
| 2021-06-06T09:14:13.653846
| 2021-05-19T08:29:58
| 2021-05-19T08:29:58
| 105,423,199
| 0
| 0
| null | 2018-06-13T14:47:26
| 2017-10-01T06:15:20
|
R
|
UTF-8
|
R
| false
| false
| 2,032
|
r
|
GCSAdiagram.R
|
library(DiagrammeR)
library(DiagrammeRsvg)
#x and y coords for SNP rectangles are hand-crafted by trial and error
#quartz()
grViz("
digraph SEM {
graph [layout = neato,
overlap = true,
outputorder = edgesfirst]
node [shape = rectangle,
fontname = Helvetica]
a [pos = '0,5.6!', label = 'SNP_1']
b [pos = '-1,5.6!', label = 'SNP_2']
c [pos = '-1.9,5.6!', label = 'SNP_3']
d [pos = '-2.8,5.5!', label = 'SNP_4']
e [pos = '-3.5,4.9!', label = 'SNP_5']
f [pos = '-3.9,4.2!', label = 'SNP_6']
g [pos = '-4.3,3.5!', label = 'SNP_7']
h [pos = '-4.6,2.8!', label = 'SNP_8']
i [pos = '-5,2.1!', label = 'SNP_9']
j [pos = '-5.1,1.4!', label = 'SNP_10']
k [pos = '-5.2,.7!', label = 'SNP_11']
l [pos = '-5.3,0!', label = 'SNP_12']
m [pos = '-5.2,-.7!', label = 'SNP_13']
n [pos = '-5.1,-1.4!', label = 'SNP_14']
o [pos = '-5,-2.1!', label = 'SNP_15']
p [pos = '-4.6,-2.8!', label = 'SNP_16']
q [pos = '-4.3,-3.5!', label = 'SNP_17']
r [pos = '-3.8,-4.2!', label = 'SNP_18']
s [pos = '-3,-4.9!', label = 'SNP_19']
t [pos = '-2,-5!', label = 'SNP_20']
u [pos = '-1,-5!', label = 'SNP_21']
v [pos = '0,-5!', label = 'SNP_22']
w [pos = '-1,0!', label = 'Gene', shape = ellipse,fontsize=20]
x [pos = '2,0!', label = 'Neuro factor', shape = ellipse,fontsize=20]
y [pos = '4.5,2!', label = 'PhenoA',fontsize=18]
z [pos = '4.5,0!', label = 'PhenoB',fontsize=18]
aa [pos = '4.5,-2!', label = 'PhenoC',fontsize=18]
w->x
w->a
w->b
w->c
w->d
w->e
w->f
w->g
w->h
w->i
w->j
w->k
w->l
w->m
w->n
w->o
w->p
w->q
w->r
w->s
w->t
w->u
w->v
x->y
x->z
x->aa
}
")
#NB problems with exporting diagram from GraphViz; currently using screenshot
|
8a2d6de5cd923954a1dcc9b4129599203f734ca0
|
abfddfa8a9be36d19426cf16586381ef16a50d00
|
/plot2.R
|
835bf152bdf5fdb73949d1fe5ce239644336639e
|
[] |
no_license
|
aru20/Household-Power-Consumption-Data-Plotting
|
ff3024aff5ad2d781a2480be3c84ce898c14eb00
|
0d1904a2c789ca0277238f7fc9e9af88f84d5f9b
|
refs/heads/master
| 2023-07-21T08:26:21.625268
| 2021-09-01T22:56:29
| 2021-09-01T22:56:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,234
|
r
|
plot2.R
|
# R code for plot2
# If the data file does not exist in the working directory then download the file and unzip
# else read the data file
if(! file.exists("household_power_consumption.txt")){
####download the Zip file and Unzip the zip file
###household_power_consumption.txt is the file name
zip.url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
dir <- getwd()
zipfile <- "household_power_consumption.zip"
zip.combine <- as.character(paste(dir, zipfile, sep = "/"))
download.file(zip.url, destfile = zip.combine)
unzip(zipfile)
}
#Reading the data file
householdPowerfile <- "household_power_consumption.txt"
householdPowerData <- read.table(householdPowerfile, header = TRUE, sep = ";", stringsAsFactors = FALSE, dec = ".",na.strings = "?")
# filtering the required data
req.data <- householdPowerData[householdPowerData$Date %in% c("1/2/2007","2/2/2007"),]
# convert the date and time as as "2007-02-01 00:00:00 CST"
DataTime <- strptime(paste(req.data$Date, req.data$Time), "%d/%m/%Y %H:%M:%S")
#plotting the graph
png(filename="plot2.png")
plot(DataTime,req.data$Global_active_power,type = "l", xlab="", ylab = "Global Active Power (kilowatts)")
# close the device
dev.off()
|
e0bdf7481e0cf90a0d8e1ba97463405f8a3235b7
|
e7d5e723eaae21a724740e320120f9daed267d96
|
/src/figures/figure-regression.R
|
5fbdde99c429193b61bf129e67542299a80d9e0f
|
[
"BSD-3-Clause"
] |
permissive
|
dylanhmorris/sars-cov-2-temp-humidity
|
b811d67bc793b42df8481538901689be26df64b0
|
d80dd0132be738753f1a77c99ce280219dc5afba
|
refs/heads/main
| 2021-05-27T05:40:21.491503
| 2021-04-27T18:23:19
| 2021-04-27T18:23:19
| 304,516,612
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,177
|
r
|
figure-regression.R
|
#!/usr/bin/env Rscript
########################################
## filename: figure-regression.R
## author: Dylan Morris <dhmorris@princeton.edu>
## plot main regression figure for environmental
## conditions analysis
#######################################
suppressPackageStartupMessages(library(virusenv))
suppressPackageStartupMessages(library(ggplot2))
suppressPackageStartupMessages(library(dplyr))
suppressPackageStartupMessages(library(readr))
suppressPackageStartupMessages(library(tibble))
suppressPackageStartupMessages(library(tidybayes))
suppressPackageStartupMessages(library(tidyr))
suppressPackageStartupMessages(library(ggridges))
suppressPackageStartupMessages(library(extrafont))
#################################
# read in needed data
#################################
## read command line args
args <- commandArgs(trailingOnly = TRUE)
data_path <- args[1]
evap_path <- args[2]
results_path <- args[3]
titers_path <- args[4]
outpath <- args[5]
## read data / style files
cat("reading data (this may take a while)...\n")
decay_chains <- readRDS(results_path)
ests_chains <- readRDS(titers_path)
dat <- read_data_for_plotting(data_path)
evap_dat <- read_csv(evap_path,
col_types = cols())
cat("data read successfully!\n")
## model to use
evaporation_with_concentration <- (
any(grepl("fraction_solute", names(decay_chains))))
evaporation_biphasic <- (
(!evaporation_with_concentration) &
(any(grepl("transient_decay_rate", names(decay_chains)))))
#################################
## overall plot styling
#################################
gen_params <- get_params("general_param_list")
general_palette <- get_params("general_palette")
set.seed(23) # reproducible! (since we use random draws)
text_size = 40
detection_linesize = 0.75
titer_ylab <- expression("virus titer (TCID"[50] * "/mL media)")
ylim <- c(1, 5e4)
LOD_log10 <- 0.5
LOD <- 10^LOD_log10
conversion_factor <- 1
n_lines <- 10
line_alpha <- gen_params[["line_alpha"]]
interval_grey <- general_palette[["interval_grey"]]
evaporation <- grepl("evap-phase", outpath)
##################################################
## calculate posterior draws for regression lines
##################################################
## calculate posterior median drying times
median_drying_times <- decay_chains %>%
spread_draws(drying_time[experiment_id]) %>%
group_by(experiment_id) %>%
summarise(med_drying_time = median(drying_time))
## get needed draws and add human readable names
if(evaporation){
xlim <- c(0, max(median_drying_times$med_drying_time))
} else {
xlim <- c(0, 96)
}
fineness = 250
plot_times <- tibble(
time = seq(xlim[1], xlim[2],
length.out = fineness))
cat("extracting draws for decay rates / intercepts (this may also take a while)...\n")
int_draws <- decay_chains %>%
spread_draws(intercept[titer_id]) %>%
add_titer_metadata(dat)
if( evaporation_with_concentration ){
cat("Using explicit concentration model...\n")
decay_draws <- decay_chains %>%
spread_draws(c(transient_decay_rate,
decay_rate)[experiment_id],
initial_mass_fraction_solute)
evap_draws <- decay_chains %>%
spread_draws(c(beta,
drying_time)[evap_class_id])
predict_log_titers <- function(data){
return( data %>%
mutate(predicted_log_titer =
1 + predict_titers_explicit_evaporation(
time,
intercept,
transient_decay_rate,
decay_rate,
beta / (initial_mass * (1 - initial_mass_fraction_solute)),
equilibrium_concentration_factor)))
}
} else if( evaporation_biphasic ) {
cat("Using biphasic evaporation model...\n")
decay_draws <- decay_chains %>%
spread_draws(c(transient_decay_rate,
decay_rate)[experiment_id])
evap_draws <- decay_chains %>%
spread_draws(c(beta,
drying_time)[evap_class_id]) %>%
ungroup()
predict_log_titers <- function(data){
return( data %>%
mutate(predicted_log_titer =
1 + predict_titers_implicit_evaporation(
time,
intercept,
transient_decay_rate,
decay_rate,
drying_time)))
}
}
cat("extracting positive wells...\n")
pos_wells <- dat %>%
group_by(titer_id) %>%
summarise(
n_wells = n(),
n_pos = sum(virus_detect))
tidy_draws <- decay_draws %>%
inner_join(int_draws %>%
select(intercept,
experiment_id,
.draw,
evap_class_id,
temperature,
humidity,
titer_id,
virus),
by = c("experiment_id",
".draw")) %>%
inner_join(evap_draws %>%
select(evap_class_id,
drying_time,
beta,
.draw),
by = c("evap_class_id",
".draw")) %>%
inner_join(evap_dat %>% distinct(temperature, humidity,
.keep_all = TRUE) %>%
select(-time),
by = c("temperature", "humidity"))
print(tidy_draws)
if(evaporation_with_concentration){
tidy_draws <- tidy_draws %>%
mutate(
equilibrium_concentration_factor = exp(mass_change_to_log_concentration_factor(
equilibrium_mass,
initial_mass,
initial_mass_fraction_solute)))
}
cat('extracting titer estimates...\n')
titer_ests_draws <- ests_chains %>%
spread_draws(sampled_titer[titer_id])
## get human readable names and detectability
titer_ests_draws <- titer_ests_draws %>%
add_titer_metadata(dat) %>%
inner_join(pos_wells,
by = "titer_id") %>%
mutate(detectable = n_pos > 0) %>%
filter(material == "Plastic") %>%
inner_join(median_drying_times,
by = "experiment_id")
## filter time
if(evaporation){
print("evaporation")
titer_ests_draws <- titer_ests_draws %>%
filter(time < med_drying_time) %>%
mutate(time_use = time) %>%
arrange(desc(time_use))
figure_xlab <- "time since deposition (hours)"
} else {
titer_ests_draws <- titer_ests_draws %>%
filter(time >= med_drying_time) %>%
mutate(time_use = time - med_drying_time) %>%
arrange(desc(time_use))
figure_xlab <- "time since quasi-equilibrium reached (hours)"
}
titer_ests_draws <- titer_ests_draws %>%
mutate(
log10_tcid50 = ifelse(
detectable,
sampled_titer + 1,
LOD_log10))
###################################
## plot panel showing fit of
## regression lines to real data
###################################
cat('plotting regression lines...\n')
## draw n_lines random regression lines
chosen_draws <- sample(1:max(tidy_draws$.draw), n_lines)
func_samples <- tidy_draws %>%
filter(.draw %in% chosen_draws)
## annotate lines so that each
## has a unique id for ggplot overplotting
## (else two lines from the same draw but
## different replicates can get confused
## with each other)
## cross product decay_rates with x (time) values
## and calculate y (titer) values
cat('setting up x values...\n')
to_plot <- func_samples %>%
crossing(plot_times)
if(evaporation){
to_plot <- to_plot %>%
filter(time < drying_time) %>%
mutate(time_use = time)
} else {
to_plot <- to_plot %>%
filter(time >= drying_time) %>%
mutate(time_use = time - drying_time)
}
## adding one to convert to per mL from per 0.1 mL
to_plot <- to_plot %>%
predict_log_titers() %>%
mutate(
predicted_titer = 10^predicted_log_titer) %>%
filter(predicted_titer > ylim[1])
shape_scale <- scale_shape_manual(
values = unlist(list("FALSE" = 25,
"TRUE" = 21)))
hl_dat <- dat %>%
distinct(experiment_id,
.keep_all = TRUE) %>%
select(experiment_id,
material,
temperature,
virus,
humidity) %>%
inner_join(decay_draws,
by = "experiment_id") %>%
mutate(
half_life = log10(2) / decay_rate,
tenfold = log10(10) / decay_rate)
plot_dat <- titer_ests_draws
panel <- to_plot %>%
ggplot() +
geom_hline(aes(yintercept = LOD),
size = 2,
linetype = "dotted") +
geom_line(aes(
x = time_use,
y = predicted_titer,
color = virus,
group = interaction(.draw, titer_id)),
alpha = line_alpha) +
stat_pointinterval(
.width = 0.95,
mapping = aes(x = time_use,
y = 10^log10_tcid50,
shape = detectable,
fill = virus,
group = titer_id),
data = plot_dat,
point_size = 6,
size = 7,
stroke = 2,
interval_color = interval_grey,
interval_alpha = 1,
color = "black",
alpha = 0.9)
panel <- panel +
scale_fill_virus() +
scale_fill_virus(aesthetics = "point_fill") +
scale_color_virus() +
shape_scale +
scale_y_log10_mathformat(expand = c(0, 0)) +
coord_cartesian(ylim = ylim,
xlim = xlim,
clip = "off") +
facet_grid(rows = vars(humidity),
cols = vars(temperature))
## styling: no facet labels because is background plot
panel <- panel +
theme_project(base_size = text_size) +
theme(panel.border = element_rect(size = 2,
color = "black",
fill = NA)) +
xlab(figure_xlab) +
ylab(titer_ylab) +
theme(legend.position = "none",
panel.spacing.y = unit(5, "lines")) +
labs(tag = "relative humidity (%)",
subtitle = "temperature (\u00B0C)")
####################################
## compose full figure from panels
####################################
labeled_panel_theme <- theme(
strip.background = element_blank(),
strip.text.x = element_text(size = text_size),
strip.placement = "outside",
strip.switch.pad.grid = unit("0.5", "in"),
plot.subtitle = element_text(hjust = 0.5),
plot.tag = element_text(angle=-90,
size = text_size),
plot.tag.position = c(1.05, 0.5))
left_margin <- theme(
plot.margin = margin(b = 3, t = 1, l = 1, r = 3, unit = "cm"))
cat('making full figure...\n')
full_fig <- panel + labeled_panel_theme + left_margin
## save the plot to outpath
cat('saving figure to ', outpath, '...\n')
save_plot(outpath,
full_fig,
base_height = 15,
base_asp = 1.2)
warnings()
|
01a86512b35526b9d0037fe2c965ebccad36a100
|
007dfb4f91d2e788a06dee41b33678f36ce85642
|
/man/encrypt_token.Rd
|
ba33e2b2368cf7a37ef2084a6c21318367e2cab4
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
jdtrat/tokencodr
|
df8d3e36f575df8add2e081f651708beaa4887fa
|
8ee27f88ad18498e5e80d20d31945eadd289606d
|
refs/heads/master
| 2023-07-12T08:35:46.609323
| 2021-08-16T16:14:43
| 2021-08-16T16:14:43
| 396,803,487
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 893
|
rd
|
encrypt_token.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/func_encrypt.R
\name{encrypt_token}
\alias{encrypt_token}
\title{Encrypt a token file}
\usage{
encrypt_token(service, input, destination)
}
\arguments{
\item{service}{Identifier of the service whose token will be encrypted.}
\item{input}{The token file to encrypt, typically a ".json" file.}
\item{destination}{The output directory you would like to store this file,
e.g. in a Shiny Web App's "www" subdirectory.}
}
\value{
NA; used for side effects to create an encrypted token file.
}
\description{
Following
\url{https://gargle.r-lib.org/articles/articles/managing-tokens-securely.html#encrypt-the-secret-file},
this function writes an encrypted version of the input file to a specified
directory, such as a Shiny Web App's "www" subfolder. The supplied
destination path is suffixed with a ".secret" folder.
}
|
b6ce7a8795d58eec8cc6ac483423e2d120026e59
|
541a192813be04a1793959edd57dc7abb7834e22
|
/R_code/main_empirical_CVaR_optimization.R
|
b53a5293d456cab41a92761670eb5cfeb863e9de
|
[] |
no_license
|
jumping2000/MasterThesis
|
03b2abc057fbfde4914c26dd03643b3424d5268f
|
f0b5dc44ae7e9e87380497d0d03bdfbf86f0e693
|
refs/heads/master
| 2021-10-26T16:00:09.891289
| 2019-04-13T16:56:07
| 2019-04-13T16:56:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,350
|
r
|
main_empirical_CVaR_optimization.R
|
# empirical VaR portfolio optimization
source("MarkowitzMeanVariancePortfolio.R")
source("PortfolioVaROptimization.R")
load("returns.Rda")
load("data.Rda")
N_samples=dim(my_returns)[1]
N_assets = dim(my_returns)[2]/2 -1 # -1 to exclude vix
asset_names = colnames(my_returns[,2*(1:N_assets)])
percentage_returns = exp(my_returns[,2*(1:N_assets)])
tot_days = dim(percentage_returns)[1]
last_days_to_use = tot_days
daily_return_matrix = as.matrix(percentage_returns[1:last_days_to_use,])
# daily returns to plot frontier and compute allocations
daily_returns = seq(1.00,1.2, by=0.004)^(1/255)
alpha_percentage = 95
daily_returns=sort(daily_returns,decreasing = FALSE)
# including btc
t_beg = Sys.time()
daily_allocations_cvar = matrix(rep(0,N_assets*length(daily_returns)), ncol=N_assets)
colnames(daily_allocations_cvar)=asset_names
daily_CVaRs = rep(0,length(daily_returns))
daily_resulting_returns_cvar = rep(0,length(daily_returns)) # just as a check
for (i in 1:length(daily_returns)){
print(daily_returns[i]^255)
sol_func = OptimalAllocationDailyCVaR(daily_return = daily_return_matrix, alpha = 1-alpha_percentage/100, target_return = daily_returns[i], N_rep = 1)
daily_allocations_cvar[i,]=sol_func$allocation
daily_CVaRs[i]=sol_func$objective
daily_resulting_returns_cvar[i]= sol_func$expected_return
}
t_end = Sys.time()
t_end-t_beg
plot(daily_CVaRs, (daily_resulting_returns_cvar)-1, type='l', col = "green", xlab = paste0("Daily CVaR ", alpha_percentage,"%"), ylab = "Daily Returns in %")
title("Efficient CVaR Frontier (daily) including BTC")
grid()
# excluding btc
max_return_no_btc = max(colMeans(percentage_returns[,2:N_assets]))
daily_returns_reduced = daily_returns[which(daily_returns <= max_return_no_btc)]
t_beg = Sys.time()
daily_allocations_cvar_no_btc = matrix(rep(0,N_assets*length(daily_returns_reduced)), ncol=N_assets)
colnames(daily_allocations_cvar_no_btc) = asset_names
daily_CVaRs_no_btc = rep(0,length(daily_returns_reduced))
daily_resulting_returns_cvar_no_btc = rep(0,length(daily_returns_reduced)) # just as a check
for (i in 1:length(daily_returns_reduced)){
print(daily_returns_reduced[i]^255)
sol_func = OptimalAllocationDailyCVaR(daily_return = daily_return_matrix[,2:N_assets], alpha = 1-alpha_percentage/100, target_return = daily_returns_reduced[i], N_rep = 1)
daily_allocations_cvar_no_btc[i,2:N_assets]=sol_func$allocation
daily_CVaRs_no_btc[i]=sol_func$objective
daily_resulting_returns_cvar_no_btc[i]= sol_func$expected_return
}
t_end = Sys.time()
t_end-t_beg
plot(daily_CVaRs_no_btc, (daily_resulting_returns_cvar_no_btc)-1, type='l', col = "orange", xlab = paste0("Daily CVaR ", alpha_percentage,"%"), ylab = "Daily Returns in %")
title("Efficient CVaR Frontier (daily) excluding BTC")
grid()
# polish allocation data
daily_allocations_cvar[which(abs(daily_allocations_cvar)<1e-8)] =0
daily_allocations_cvar_no_btc[which(abs(daily_allocations_cvar_no_btc)<1e-10)] =0
# aggregate results to be saved as csv file
res_btc = cbind(daily_resulting_returns_cvar-1,daily_CVaRs,daily_allocations_cvar)
colnames(res_btc)[c(1,2)] = c("return_daily", "cvar_daily")
res_no_btc = cbind(daily_resulting_returns_cvar_no_btc-1,daily_CVaRs_no_btc,daily_allocations_cvar_no_btc )
colnames(res_no_btc)[c(1,2)] = c("return_daily", "cvar_daily")
# # save to file
# write.csv(file = paste0("allocation_cvar",alpha_percentage, ".csv"), x = res_btc)
# write.csv(file = paste0("allocation_cvar",alpha_percentage, "_no_btc.csv"), x = res_no_btc)
idx_min_btc = which(daily_CVaRs == min(daily_CVaRs))
idx_min_no_btc = which(daily_CVaRs_no_btc == min(daily_CVaRs_no_btc))
L_btc = length(daily_CVaRs)
L_no_btc = length(daily_CVaRs_no_btc)
# create full plot
x11()
plot(daily_CVaRs_no_btc[idx_min_no_btc:L_no_btc], (daily_resulting_returns_cvar_no_btc[idx_min_no_btc:L_no_btc])-1, type='l',
col = "orange", xlab = paste0("Daily CVaR ", alpha_percentage,"%"), ylab = "Daily Returns",
ylim = c(0,0.0008))
title("Daily CVaR Frontier ")
grid()
lines(daily_CVaRs[idx_min_btc:L_btc], (daily_resulting_returns_cvar[idx_min_btc:L_btc])-1, type='l',
col = "green")
legend("topleft", legend = c("btc", "NO btc"),
col=c("green", "orange"), lwd = 3, lty = 1, cex=0.75, bg = 'white')
# dev.copy2pdf(file = "efficient_frontier_CVaR.pdf", height = 7, width=7 )
# dev.off()
|
f419f04087ef1feb3cc9cc70fa348003808a9d10
|
f5d94a4296966d7bc6e3194ebdf5ba76cf51cd2a
|
/euroformix_2.2.1/man/plotMPS2.Rd
|
7511987bb510268531198a5536fbc4decd3f12e9
|
[] |
no_license
|
oyvble/euroformixArchive
|
170c1b4d6fe11c0ccb3ce28e6691579fe57a650b
|
2250b118ccfdb110079a008ac481e4afe615514d
|
refs/heads/master
| 2021-01-03T12:20:14.421081
| 2020-02-12T18:34:11
| 2020-02-12T18:34:11
| 240,082,872
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,402
|
rd
|
plotMPS2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotMPS2.R
\name{plotMPS2}
\alias{plotMPS2}
\title{plotMPS2}
\usage{
plotMPS2(mixData, refData = NULL, AT = NULL, ST = NULL,
grpsymbol = "_", locYmax = TRUE, options = NULL)
}
\arguments{
\item{mixData}{List of mixData[[ss]][[loc]] =list(adata,hdata), with samplenames ss, loci names loc, allele vector adata (can be strings or numeric), intensity vector hdata (must be numeric)}
\item{refData}{List of refData[[rr]][[loc]] or refData[[loc]][[rr]] to label references (flexible). Visualizer will show dropout alleles.}
\item{AT}{A detection threshold can be shown in a dashed line in the plot (constant). Possibly a AT[[loc]] list.}
\item{ST}{A stochastic threshold can be shown in a dashed line in the plot (constant). Possibly a ST[[loc]] list.}
\item{grpsymbol}{A separator for each allele giving plot grouping. Useful for separating conventional repeat units (RU) and sequence variant.}
\item{locYmax}{A boolean of whether Y-axis should be same for all markers (FALSE) or not (TRUE this is default)}
\item{options}{A list of possible plot configurations. See comments below}
}
\description{
MPS data visualizer (interactive)
}
\details{
Plots intensities with corresponding allele variant for one sample. Does not yet handle replicates. Can handle RU grouping with separator grpsymbol.
}
\author{
Oyvind Bleka
}
|
13cfcf703378b8925dd5ba43da0593b42d1f9096
|
d2922aa04644cb228c0a33488fc8a6f7c4783050
|
/R/CohortPercent2.R
|
caa9b0f126fa44f59904bc5020732ea501dcb118
|
[] |
no_license
|
caiostat/churnFunc
|
76277eb02b75e96732cfa16135fe74709c59fd02
|
0eb3c100b2c027d1d353afef2df46c6a5820dee1
|
refs/heads/master
| 2020-03-22T05:54:03.704590
| 2018-08-01T14:49:17
| 2018-08-01T14:49:17
| 139,586,240
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 647
|
r
|
CohortPercent2.R
|
CohortPercent2 <- function(df, c, var, cols, cols2){
x <- df %>%
mutate(MesCriacao = as.yearmon(DataCriacao)) %>%
group_by(!!!as_quosure(cols)) %>% dplyr::summarise(ChurnAlvo=n()) %>%
group_by(!!!as_quosure(cols2)) %>% mutate(Total = sum(ChurnAlvo),Percentual = round((ChurnAlvo/Total)*100,1))
ggplot(x,aes_string(x = "MesCriacao",y="Percentual",fill=cols[3])) +
geom_col() +
facet_grid(reformulate(".",var)) +
theme_few() +
theme(legend.position = "bottom") +
geom_text(data = x %>% filter(!! sym(cols[3]) != "Não"), aes(y=100 - Percentual, x=MesCriacao, label= Percentual)) +
labs(title = paste(c))
}
|
a07f1c0f268b205ae0a7a286b36c1e49e6ba3b0c
|
9982f88bf5d3c04f08107842511c1a29e1af89ba
|
/transform-your-r-code-into-api-with-plumber/adspool/R/add_ad.R
|
44959bd70ff2f319405f7be09a288ec22b546156
|
[] |
no_license
|
riccardoporreca/MiraiLabs
|
f3c1ec8ab3545eb93467ac715026b259172ff872
|
0001ff70a5a9a00b9c4469779f6a038d1cb8f93b
|
refs/heads/master
| 2023-01-01T15:51:59.888218
| 2020-10-14T08:55:15
| 2020-10-14T08:55:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,098
|
r
|
add_ad.R
|
#' add_ad
#' @description
#' Add a new advertisement to the dataframe of advertisements.
#' The function assumes the existence of a file "ads_file":
#' - a dataframe ads, with minimal structure "ads" "subcats" "cats" "customer";
#'
#' @param newad new advertisement. Character string of name of new advertisement.
#' @param newad_cat new advertisement category. Character string of name of new advertisement category. If unavailable a random category from those already available in ads will be used.
#' @param newad_subcat new advertisement subcategory. Character string of name of new advertisement subcategory. If unavailable a random subcategory from those already available in catsToSubcats and associated with the given category will be used.
#' @param newad_customer new advertisement category. Integer indicating the customer id.
#' @param newad_click_rate new advertisement rate.
#'
#' @return dataframe
#' @export
add_ad <- function(newad, newad_cat = NULL, newad_subcat = NULL, newad_customer, newad_click_rate){
ads <- read.ads()
assert_that(is.character(newad))
if (length(newad) > 1){
message("Warning: length(newad) > 1, considering only the first element")
newad <- newad[1]
}
# Define newad_cat
if (is.null(newad_cat)) {
newad_cat <- sample(ads$category, 1)
}
# Define newad_subcat
if (is.null(newad_subcat)) {
newad_subcat <- with(ads, sample(subcategory[category == newad_cat], 1))
if (length(newad_subcat) == 0) {
newad_subcat <- "none"
}
}
# Create newad dataframe
newad_df <- data.frame(
id = max(ads$id) + 1,
name = newad,
category = newad_cat,
subcategory = newad_subcat,
customer_id = as.integer(newad_customer),
img_path = sprintf("%03d_%s.jpeg", max(ads$id) + 1, gsub("\\W", "", newad)),
click_count = 0,
click_rate = as.numeric(newad_click_rate)
)
# Add new row to ads
write.ads(newad_df, append = TRUE, col.names = FALSE)
# Return the newly created ad
return(newad_df)
}
|
12937528ed8d07e0ae395d738adcc470eb164a7c
|
6d35b4de6b69532cb5b6908ca8b6aa69110c91d2
|
/deploy/app.R
|
1981e00113f1ea2910556ad15209328e38e12c45
|
[] |
no_license
|
templardrake/covid_shiny
|
1dd07aa2f18c801ee7456839ba42d98d3bfc2392
|
15c61f6d3aaa3bde8f7ad7620121544d1baf8663
|
refs/heads/master
| 2022-04-15T03:55:21.741061
| 2020-04-11T21:57:00
| 2020-04-11T21:57:00
| 254,930,077
| 0
| 0
| null | 2020-04-11T18:29:09
| 2020-04-11T18:29:08
| null |
UTF-8
|
R
| false
| false
| 5,774
|
r
|
app.R
|
library(shiny)
library(shinyWidgets)
library(plotly)
library(shinydashboard)
# get population density-normalized cumulative COVID deaths data
source("data.R")
df_orig <- getData()
# function to turn a string term into a formula
tilde <- function(term) as.formula(paste0("~`", term, "`"))
# function to add a trace to a Plotly plot
add_country <- function(country, plot)
plot %>% add_trace(y = tilde(country), name = country, mode = 'lines')
ui <- dashboardPage(title = "COVID Data Explorer", skin = "black",
header = dashboardHeader(
title = "COVID Data Explorer",
titleWidth = "250px"
),
sidebar = dashboardSidebar(
width = "250px",
pickerInput("user_countries", "Select countries",
choices = rownames(df_orig),
options = list(`actions-box` = TRUE),
multiple = T,
selected = c("Ireland", "US", "Italy", "United Kingdom", "Spain", "France", "Germany", "Japan"),
width = "100%"
),
selectInput("alignx", "Align x-axis on...",
choices = c("Date", "Days Since..."),
selected = "Date"
),
uiOutput("days_since"),
checkboxInput("logy", "Logarithmic y-axis?")
),
body = dashboardBody(
fluidRow(
column(width = 12,
box(width = "100%",
plotlyOutput("plot", width = "100%")
)
)
)
)
)
server <- function(input, output) {
# listen to x-axis alignment selection and render this separately from other UI
observe({
if (input$alignx == "Days Since...")
output$days_since <- renderUI(
sliderInput("xaxis_rate_align", "...cumulative normalized deaths", 0.1, 100, 0.1, 0.1))
else
output$days_since <- NULL
})
observe({
# create plot only if user has selected at least one country
len <- length(input$user_countries)
if (len > 0) {
# copy original data, but only use user-selected countries
df <- df_orig[input$user_countries, ]
countries <- NULL
# transposed data with additional "Date" column
if (input$alignx == "Date") {
# what to plot along the x-axis
xval <- "Date"
# transpose and add a Date column
tf <- as.data.frame(t(df))
tf$Date <- as.Date(rownames(tf))
# countries to use? all of them
countries <- input$user_countries
# complex row-wise shifting of data
} else if (input$alignx == "Days Since...") {
# what to plot along the x-axis
xval <- "Days Since..."
minrate <- if (is.null(input$xaxis_rate_align)) 0.1 else input$xaxis_rate_align
# get day (index) where country first met or exceeded `minrate` density-normalized cumulative deaths
start <- as.vector(tail(apply(df < minrate, 1, cumsum), n = 1)) + 1
# if number of days > `start`, country hasn't had that many cases yet
n_days <- ncol(df)
# these are all the countries with >= `minrate` cumulative deaths rate
countries <- rownames(df)[start < n_days]
if (length(countries) > 0) {
# rewrite data frame and start date list
df <- df[countries, ]
start <- start[start < n_days]
names(start) <- countries
# shift all rows to the left by `start` columns
minoffset <- min(start)
offset <- start-minoffset
# This is the most complicated part -- we need to "shift" each row of the data frame.
# If Japan has an offset of -49, we need to move every element of the "Japan" row
# 49 columns to the right. If Luxembourg has an offset of 5, we need to move every
# element of the "Luxembourg" row 5 columns to the left.
shift <- function(df, row, by) {
nr <- nrow(df)
if (by == 0 || row > nr || row < 1) df
else {
nc <- ncol(df)
if (abs(by) >= nc) {
df[row, ] <- rep(NA, nc)
df
} else if (by > 0) {
df[row, 1:(nc-by)] <- df[row, (1+by):nc]
df[row, (nc-by+1):nc] <- NA
df
} else {
df[row, (1-by):nc] <- df[row, 1:(nc+by)]
df[row, 1:(-by)] <- NA
df
}
}
}
# shift all rows by the appropriate amount
for (ii in 1:length(countries)) df <- shift(df, ii, offset[ii])
# finally, transpose and add "Days Since..." column, similar to above
tf <- as.data.frame(t(df))
tf$`Days Since...` <- (-minoffset+1):(nrow(tf)-minoffset)
}
}
# create plot only if at least one country is left after any filtering
len <- length(countries)
if (len > 0) {
# create plot with first country
first_country <- countries[1]
plot <- plot_ly(tf, x=tilde(xval), y=tilde(first_country), name=first_country, type="scatter", mode="lines")
# add all additional countries in a loop
if (len >1) for (ii in 2:len) plot <- add_country(countries[ii], plot)
# handle linear / logarithmic y-axis
yaxis <- list(title=HTML("Cumulative Deaths / Capita / km<sup>2</sup>"), tickprefix=" ")
if (input$logy) yaxis <- c(yaxis, type="log")
plot <- plot %>% layout(
title = HTML("Cumulative Deaths / Capita / km<sup>2</sup>"),
xaxis = list(title = xval),
yaxis = yaxis,
margin = list(l = 50, r = 50, b = 80, t = 80, pad = 20)
)
output$plot <- renderPlotly(plot)
}
}
})
}
shinyApp(ui = ui, server = server)
|
5d980d54e9eef81f22dcda7b1653f4a00172bd42
|
2912ad9b06522abedad98f7cbd81cf8b0bae929a
|
/man/matrixGenerator.Rd
|
ba097762e5a63ec6b5bbcd609ddaf91e4cc0e050
|
[] |
no_license
|
ricardobatistad/sbm_theory
|
185e98794f1f3b1e9d8b1b4351fd7151ee535532
|
edce31f2f86d12b1dda21ee3e9a4259f3896c317
|
refs/heads/master
| 2020-04-09T01:59:48.776549
| 2018-12-01T08:50:16
| 2018-12-01T08:50:16
| 159,925,169
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 390
|
rd
|
matrixGenerator.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/graph_generator.R
\name{matrixGenerator}
\alias{matrixGenerator}
\title{Generate M matrix (see Lelarge)}
\usage{
matrixGenerator(params)
}
\arguments{
\item{params}{Full set of parameters}
}
\value{
Matrix M
}
\description{
Notice that some entries may be illegal. This issue is taken care of in script below.
}
|
95eb311a9ea232bfc2cfaad51840743e38f8680a
|
1bca04b4897d868ca0dccf168e6b90ed98455eb9
|
/man/CESdata.Rd
|
29a6f9948e1284c6bd145f23207a4f679b8b22b9
|
[] |
no_license
|
cran/nlWaldTest
|
c0c5edcb211bcb0cb13dc65d33a85d838ce96392
|
b96104f4557400abd5fb3fd475a9f3c923609ef0
|
refs/heads/master
| 2021-01-21T21:54:02.768196
| 2016-03-25T00:12:23
| 2016-03-25T00:12:23
| 24,210,481
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 452
|
rd
|
CESdata.Rd
|
\name{CESdata}
\alias{CESdata}
\docType{data}
\title{Data for testing CES production function}
\description{Data for estimation and testing CES production function: q-output, l-labor, k-capital}
\usage{CESdata}
\format{
A data frame with 25 observations on the following 3 variables.
\describe{
\item{\code{k}}{capital}
\item{\code{l}}{labor}
\item{\code{q}}{output}
}
}
\source{EViews, coef_test.wf1}
\examples{
attach(CESdata)}
|
b4d0ecbcc9c1429ab7a423092dd012a393b935a8
|
0ffa84c1e3cb0fef567ee9a7be34b808a5f06f29
|
/man/fs_search.Rd
|
1cf1225813f87ad7101bd345fe6b0241ce710354
|
[] |
no_license
|
chris-lundberg/factsetr
|
7f2170eeccf581b066f1c6313d7fd390eff3068a
|
652a92428acfcf67b41cfad9cfc9988c38e16293
|
refs/heads/master
| 2021-01-07T00:40:57.403604
| 2020-02-22T16:53:56
| 2020-02-22T16:53:56
| 241,529,043
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 910
|
rd
|
fs_search.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/search.R
\name{fs_search}
\alias{fs_search}
\title{FactSet Filesystem Search}
\usage{
fs_search(con, dir, type = c("pa", "spar", "dir", "accounts"))
}
\arguments{
\item{con}{A FactSet connection object.}
\item{dir}{A directory in the FactSet filesystem associated with your serial number.}
\item{type}{The type of item you want to search for. Right now the options include accounts, directories, PA documents, or SPAR documents.}
}
\value{
A tibble containing the accounts, documents, or directories within the specified directory.
}
\description{
This function makes it easy to search your FactSet filesystem. It returns a tibble with the accounts, directories, or documents in the specified directory.
}
\examples{
fs_pa_docs <- fs_search(con, dir, type = "pa")
}
\seealso{
See \link{fs_auth} for authentication instructions.
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.