blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fb820d6e5b3e01d942cfed9d1589d90e70198b8b | 4201e9b754760dc35fc0aeef9df5a8b9d801c47f | /bin/R-3.5.1/src/library/base/R/datetime.R | 67990f99beb67194b7758cdc24f8ae7c3e2596c8 | [
"LGPL-2.1-only",
"GPL-2.0-only",
"GPL-2.0-or-later",
"LGPL-3.0-only",
"GPL-3.0-only",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | lifebit-ai/exomedepth | cbe59cb7fcf2f9183d187f8d466c6620fb1a0c2e | 5a775ae5e2a247aeadc5208a34e8717c7855d080 | refs/heads/master | 2020-03-27T12:55:56.400581 | 2018-10-11T10:00:07 | 2018-10-11T10:00:07 | 146,578,924 | 0 | 0 | MIT | 2018-08-29T09:43:52 | 2018-08-29T09:43:51 | null | UTF-8 | R | false | false | 46,702 | r | datetime.R | # File src/library/base/R/datetime.R
# Part of the R package, https://www.R-project.org
#
# Copyright (C) 1995-2018 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# https://www.R-project.org/Licenses/
Sys.time <- function() .POSIXct(.Internal(Sys.time()))
### Extensively rewritten for R 3.4.4
### There is no portable way to find the system timezone by location.
### For some ideas (not all accurate) see
### https://stackoverflow.com/questions/3118582/how-do-i-find-the-current-system-timezone
### See http://mm.icann.org/pipermail/tz/2017-December/025617.html for
### why you cannot deduce the timezone name from current abbreviations
### and offset from UTC -- cf Europe/Dublin and Europe/London which
### (despite the GB-Eire alias) have a different history including of
### DST in 1971.
### Will be called from C startup code for internal tzcode as Sys.timezone()
### For bootstrapping, it must be simple if TZ is set.
Sys.timezone <- function(location = TRUE)
{
if(!location)
.Deprecated(msg = "Sys.timezone(location = FALSE) is defunct and ignored")
## caching added in 3.5.0
if(!is.na(tz <- get0(".sys.timezone", baseenv(), mode = "character",
inherits = FALSE, ifnotfound = NA_character_)))
return(tz)
cacheIt <- function(tz) assign(".sys.timezone", tz, baseenv())
## Many Unix set TZ, e.g. Solaris and AIX.
## For Solaris the system setting is a line in /etc/TIMEZONE
tz <- Sys.getenv("TZ")
if(nzchar(tz)) return(tz)
if(.Platform$OS.type == "windows") return(.Internal(tzone_name()))
## At least tzcode and glibc respect TZDIR.
## glibc uses $(datadir)/zoneinfo
## musl does not mention it, just reads /etc/localtime (as from 1.1.13)
## (A search of /usr/share/zoneinfo, /share/zoneinfo, /etc/zoneinfo
## is hardcoded in musl.)
## Systems using --with-internal-tzcode will use the database at
## file.path(R.home("share"), "zoneinfo"), but it is a reasonable
## assumption that /etc/localtime is based on the system database.
tzdir <- Sys.getenv("TZDIR")
if(nzchar(tzdir) && !dir.exists(tzdir)) tzdir <- ""
if(!nzchar(tzdir)) { ## See comments in OlsonNames
if(dir.exists(tzdir <- "/usr/share/zoneinfo") ||
dir.exists(tzdir <- "/share/zoneinfo") ||
dir.exists(tzdir <- "/usr/share/lib/zoneinfo") ||
dir.exists(tzdir <- "/usrlib/zoneinfo") ||
dir.exists(tzdir <- "/usr/local/etc/zoneinfo") ||
dir.exists(tzdir <- "/etc/zoneinfo") ||
dir.exists(tzdir <- "/usr/etc/zoneinfo")) {
} else tzdir <- ""
}
## First try timedatectl: should work on any modern Linux
## as part of systemd (and probably nowhere else)
if (nzchar(Sys.which("timedatectl"))) {
inf <- system("timedatectl", intern = TRUE)
## typical format:
## " Time zone: Europe/London (GMT, +0000)"
## " Time zone: Europe/Vienna (CET, +0100)"
lines <- grep("Time zone: ", inf)
if (length(lines)) {
tz <- sub(" .*", "", sub(" *Time zone: ", "", inf[lines[1L]]))
## quick sanity check
if(nzchar(tzdir)) {
if(file.exists(file.path(tzdir, tz))) {
cacheIt(tz)
return(tz)
} else
warning(sprintf("%s indicates the non-existent timezone name %s",
sQuote("timedatectl"), sQuote(tz)),
call. = FALSE, immediate. = TRUE, domain = NA)
} else {
cacheIt(tz)
return(tz)
}
}
}
## Debian/Ubuntu Linux do things differently, so try that next.
## Derived loosely from PR#17186
## As the Java sources say
##
## 'There's no spec of the file format available. This parsing
## assumes that there's one line of an Olson tzid followed by a
## '\n', no leading or trailing spaces, no comments.'
##
## but we do trim whitespace and do a sanity check (Java does not)
if (grepl("linux", R.Version()$platform, ignore.case = TRUE) &&
file.exists("/etc/timezone")) {
tz0 <- try(readLines("/etc/timezone"))
if(!inherits(tz0, "try-error") && length(tz0) == 1L) {
tz <- trimws(tz0)
## quick sanity check
if(nzchar(tzdir)) {
if(file.exists(file.path(tzdir, tz))) {
cacheIt(tz)
return(tz)
} else
warning(sprintf("%s indicates the non-existent timezone name %s",
sQuote("/etc/timezone"), sQuote(tz)),
call. = FALSE, immediate. = TRUE, domain = NA)
} else {
cacheIt(tz)
return(tz)
}
}
}
## non-Debian Linux (if not covered above), macOS, *BSD, ...
## According to the glibc's (at least 2.26)
## manual/time.texi, it can be configured to use
## /etc/localtime or /usr/local/etc/localtime
## (and in fact can be overridden when glibc is installed)
## This should be a symlink,
## but people including Debian have copied files instead.
## 'man 5 localtime' says (even on Debian)
## 'Because the timezone identifier is extracted from the symlink
## target name of /etc/localtime, this file may not be a normal
## file or hardlink.'
## tzcode mentions /usr/local/etc/zoneinfo/localtime
## as the 'local time zone file' (not seen in the wild)
## man tzset on macOS (from BSD) mentions /var/db/timezone/localtime
if ((file.exists(lt0 <- "/etc/localtime") ||
file.exists(lt0 <- "/usr/local/etc/localtime") ||
file.exists(lt0 <- "/usr/local/etc/zoneinfo/localtime") ||
file.exists(lt0 <- "/var/db/timezone/localtime")) &&
!is.na(lt <- Sys.readlink(lt0)) && nzchar(lt)) { # so it is a symlink
tz <- NA_character_
## glibc and macOS < 10.13 this is a link into /usr/share/zoneinfo
## (Debian Etch and later replaced it with a copy,
## as have RHEL/Centos 6.x.)
## macOS 10.13.0 is a link into /usr/share/zoneinfo.default
## macOS 10.13.[12] is a link into /var/db/timezone/zoneinfo,
## itself a link (with target different on different machines)
if ((nzchar(tzdir) && grepl(pat <- paste0("^", tzdir, "/"), lt)) ||
grepl(pat <- "^/usr/share/zoneinfo.default/", lt))
tz <- sub(pat, "", lt)
## all the locations listed for OlsonNames end in zoneinfo
else if(grepl(pat <- ".*/zoneinfo/(.*)", lt))
tz <- sub(pat, "\\1", lt)
if(!is.na(tz)) {
cacheIt(tz)
return(tz)
} else
message("unable to deduce timezone name from ", sQuote(lt))
}
## Last-gasp (slow, several seconds) fallback: compare a
## non-link lt0 to all the files under tzdir (as Java does).
## This may match more than one tz file: we don't care which.
if (nzchar(tzdir) && # we already found lt0
(is.na(lt <- Sys.readlink(lt0)) || !nzchar(lt))) {
warning(sprintf("Your system is mis-configured: %s is not a symlink",
sQuote(lt0)),
call. = FALSE, immediate. = TRUE, domain = NA)
if(nzchar(Sys.which("cmp"))) {
known <- dir(tzdir, recursive = TRUE)
for(tz in known) {
status <- system2("cmp", c("-s", lt0, file.path(tzdir, tz)))
if (status == 0L) {
cacheIt(tz)
warning(sprintf("It is strongly recommended to set envionment variable TZ to %s (or equivalent)",
sQuote(tz)),
call. = FALSE, immediate. = TRUE, domain = NA)
return(tz)
}
}
warning(sprintf("%s is not identical to any known timezone file",
sQuote(lt0)),
call. = FALSE, immediate. = TRUE, domain = NA)
}
}
## all heuristics have failed, so give up
NA_character_
}
as.POSIXlt <- function(x, tz = "", ...) UseMethod("as.POSIXlt")
as.POSIXlt.Date <- function(x, ...) .Internal(Date2POSIXlt(x))
## ## Moved to packages date and chron.
## as.POSIXlt.date <- as.POSIXlt.dates <- function(x, ...)
## as.POSIXlt(as.POSIXct(x), ...)
as.POSIXlt.POSIXct <- function(x, tz = "", ...)
{
if((missing(tz) || is.null(tz)) &&
!is.null(tzone <- attr(x, "tzone"))) tz <- tzone[1L]
.Internal(as.POSIXlt(x, tz))
}
as.POSIXlt.factor <- function(x, ...)
{
y <- as.POSIXlt(as.character(x), ...)
names(y$year) <- names(x)
y
}
as.POSIXlt.character <-
function(x, tz = "", format,
tryFormats = c("%Y-%m-%d %H:%M:%OS",
"%Y/%m/%d %H:%M:%OS",
"%Y-%m-%d %H:%M",
"%Y/%m/%d %H:%M",
"%Y-%m-%d",
"%Y/%m/%d"), optional = FALSE, ...)
{
x <- unclass(x) # precaution PR#7826
if(!missing(format)) {
res <- strptime(x, format, tz = tz)
if(nzchar(tz)) attr(res, "tzone") <- tz
return(res)
}
xx <- x[!is.na(x)]
if (!length(xx)) { # all NA
res <- strptime(x, "%Y/%m/%d")
if(nzchar(tz)) attr(res, "tzone") <- tz
return(res)
} else
for(f in tryFormats)
if(all(!is.na(strptime(xx, f, tz = tz)))) {
res <- strptime(x, f, tz = tz)
if(nzchar(tz)) attr(res, "tzone") <- tz
return(res)
}
## no success :
if(optional)
as.POSIXlt.character(rep.int(NA_character_, length(x)), tz=tz)
else stop("character string is not in a standard unambiguous format")
}
as.POSIXlt.numeric <- function(x, tz = "", origin, ...)
{
if(missing(origin)) stop("'origin' must be supplied")
as.POSIXlt(as.POSIXct(origin, tz = "UTC", ...) + x, tz = tz)
}
as.POSIXlt.default <- function(x, tz = "", optional = FALSE, ...)
{
if(inherits(x, "POSIXlt")) return(x)
if(is.logical(x) && all(is.na(x)))
return(as.POSIXlt(as.POSIXct.default(x), tz = tz))
if(optional)
as.POSIXlt.character(rep.int(NA_character_, length(x)), tz=tz)
else stop(gettextf("do not know how to convert '%s' to class %s",
deparse(substitute(x)),
dQuote("POSIXlt")),
domain = NA)
}
as.POSIXct <- function(x, tz = "", ...) UseMethod("as.POSIXct")
as.POSIXct.Date <- function(x, ...) .POSIXct(unclass(x)*86400)
## ## Moved to package date
## as.POSIXct.date <- function(x, ...)
## {
## if(inherits(x, "date")) {
## x <- (x - 3653) * 86400 # origin 1960-01-01
## return(.POSIXct(x))
## } else stop(gettextf("'%s' is not a \"date\" object",
## deparse(substitute(x)) ))
## }
## ## Moved to package chron
## as.POSIXct.dates <- function(x, ...)
## {
## if(inherits(x, "dates")) {
## z <- attr(x, "origin")
## x <- as.numeric(x) * 86400
## if(length(z) == 3L && is.numeric(z))
## x <- x + as.numeric(ISOdate(z[3L], z[1L], z[2L], 0))
## return(.POSIXct(x))
## } else stop(gettextf("'%s' is not a \"dates\" object",
## deparse(substitute(x)) ))
## }
as.POSIXct.POSIXlt <- function(x, tz = "", ...)
{
tzone <- attr(x, "tzone")
if(missing(tz) && !is.null(tzone)) tz <- tzone[1L]
## <FIXME>
## Move names handling to C code eventually ...
y <- .Internal(as.POSIXct(x, tz))
names(y) <- names(x$year)
.POSIXct(y, tz)
## </FIXME>
}
as.POSIXct.numeric <- function(x, tz = "", origin, ...)
{
if(missing(origin)) stop("'origin' must be supplied")
.POSIXct(as.POSIXct(origin, tz = "GMT", ...) + x, tz)
}
as.POSIXct.default <- function(x, tz = "", ...)
{
if(inherits(x, "POSIXct")) return(x)
if(is.character(x) || is.factor(x))
return(as.POSIXct(as.POSIXlt(x, tz, ...), tz, ...))
if(is.logical(x) && all(is.na(x)))
return(.POSIXct(as.numeric(x)))
stop(gettextf("do not know how to convert '%s' to class %s",
deparse(substitute(x)),
dQuote("POSIXct")),
domain = NA)
}
`length<-.POSIXct` <- function(x, value)
.POSIXct(NextMethod(), attr(x, "tzone"), oldClass(x))
as.double.POSIXlt <- function(x, ...) as.double(as.POSIXct(x))
## POSIXlt is not primarily a list, but primarily an abstract vector of
## time stamps:
length.POSIXlt <- function(x) length(unclass(x)[[1L]])
`length<-.POSIXlt` <- function(x, value)
.POSIXlt(lapply(unclass(x), `length<-`, value),
attr(x, "tzone"), oldClass(x))
format.POSIXlt <- function(x, format = "", usetz = FALSE, ...)
{
if(!inherits(x, "POSIXlt")) stop("wrong class")
if(any(f0 <- format == "")) {
## need list [ method here.
times <- unlist(unclass(x)[1L:3L])[f0]
secs <- x$sec[f0]; secs <- secs[!is.na(secs)]
np <- getOption("digits.secs")
np <- if(is.null(np)) 0L else min(6L, np)
if(np >= 1L)
for (i in seq_len(np)- 1L)
if(all( abs(secs - round(secs, i)) < 1e-6 )) {
np <- i
break
}
format[f0] <-
if(all(times[!is.na(times)] == 0)) "%Y-%m-%d"
else if(np == 0L) "%Y-%m-%d %H:%M:%S"
else paste0("%Y-%m-%d %H:%M:%OS", np)
}
## <FIXME>
## Move names handling to C code eventually ...
y <- .Internal(format.POSIXlt(x, format, usetz))
names(y) <- names(x$year)
y
## </FIXME>
}
## prior to 2.9.0 the same as format.POSIXlt.
## now more or less the same as format.POSIXct but also works for Dates.
strftime <- function(x, format = "", tz = "", usetz = FALSE, ...)
format(as.POSIXlt(x, tz = tz), format = format, usetz = usetz, ...)
strptime <- function(x, format, tz = "")
{
## <FIXME>
## Move names handling to C code eventually ...
y <- .Internal(strptime(as.character(x), format, tz))
## Assuming we can rely on the names of x ...
names(y$year) <- names(x)
y
## </FIXME>
}
format.POSIXct <- function(x, format = "", tz = "", usetz = FALSE, ...)
{
if(!inherits(x, "POSIXct")) stop("wrong class")
## NB identical(tz, "") is *NOT* the same as missing(tz)
if(missing(tz) && !is.null(tzone <- attr(x, "tzone"))) tz <- tzone
structure(format.POSIXlt(as.POSIXlt(x, tz), format, usetz, ...),
names = names(x))
}
## could handle arrays for max.print \\ keep in sync with print.Date() in ./dates.R
print.POSIXct <-
print.POSIXlt <- function(x, tz = "", usetz = TRUE, ...)
{
max.print <- getOption("max.print", 9999L)
FORM <- if(missing(tz)) function(z) format(x, usetz = usetz)
else function(z) format(x, tz = tz, usetz = usetz)
if(max.print < length(x)) {
print(FORM(x[seq_len(max.print)]), ...)
cat(' [ reached getOption("max.print") -- omitted',
length(x) - max.print, 'entries ]\n')
} else if(length(x))
print(FORM(x), max = max.print, ...)
else
cat(class(x)[1L], "of length 0\n")
invisible(x)
}
summary.POSIXct <- function(object, digits = 15L, ...)
{
x <- summary.default(unclass(object), digits = digits, ...)
if(m <- match("NA's", names(x), 0L)) {
NAs <- as.integer(x[m])
x <- x[-m]
attr(x, "NAs") <- NAs
}
.POSIXct(x,
tz = attr(object, "tzone"),
cl = c("summaryDefault", "table", oldClass(object)))
}
summary.POSIXlt <- function(object, digits = 15, ...)
summary(as.POSIXct(object), digits = digits, ...)
`+.POSIXt` <- function(e1, e2)
{
## need to drop "units" attribute here
coerceTimeUnit <- function(x)
as.vector(switch(attr(x,"units"),
secs = x, mins = 60*x, hours = 60*60*x,
days = 60*60*24*x, weeks = 60*60*24*7*x))
if (nargs() == 1L) return(e1)
# only valid if one of e1 and e2 is a scalar/difftime
if(inherits(e1, "POSIXt") && inherits(e2, "POSIXt"))
stop("binary '+' is not defined for \"POSIXt\" objects")
if(inherits(e1, "POSIXlt")) e1 <- as.POSIXct(e1)
if(inherits(e2, "POSIXlt")) e2 <- as.POSIXct(e2)
if (inherits(e1, "difftime")) e1 <- coerceTimeUnit(e1)
if (inherits(e2, "difftime")) e2 <- coerceTimeUnit(e2)
.POSIXct(unclass(e1) + unclass(e2), check_tzones(e1, e2))
}
`-.POSIXt` <- function(e1, e2)
{
## need to drop "units" attribute here
coerceTimeUnit <- function(x)
as.vector(switch(attr(x,"units"),
secs = x, mins = 60*x, hours = 60*60*x,
days = 60*60*24*x, weeks = 60*60*24*7*x))
if(!inherits(e1, "POSIXt"))
stop("can only subtract from \"POSIXt\" objects")
if (nargs() == 1L) stop("unary '-' is not defined for \"POSIXt\" objects")
if(inherits(e2, "POSIXt")) return(difftime(e1, e2))
if (inherits(e2, "difftime")) e2 <- coerceTimeUnit(e2)
if(!is.null(attr(e2, "class")))
stop("can only subtract numbers from \"POSIXt\" objects")
e1 <- as.POSIXct(e1)
.POSIXct(unclass(e1) - e2, attr(e1, "tzone"))
}
Ops.POSIXt <- function(e1, e2)
{
if (nargs() == 1L)
stop(gettextf("unary '%s' not defined for \"POSIXt\" objects",
.Generic), domain = NA)
boolean <- switch(.Generic, "<" = , ">" = , "==" = ,
"!=" = , "<=" = , ">=" = TRUE, FALSE)
if (!boolean)
stop(gettextf("'%s' not defined for \"POSIXt\" objects", .Generic),
domain = NA)
if(inherits(e1, "POSIXlt") || is.character(e1)) e1 <- as.POSIXct(e1)
if(inherits(e2, "POSIXlt") || is.character(e2)) e2 <- as.POSIXct(e2)
check_tzones(e1, e2)
NextMethod(.Generic)
}
Math.POSIXt <- function (x, ...)
{
stop(gettextf("'%s' not defined for \"POSIXt\" objects", .Generic),
domain = NA)
}
check_tzones <- function(...)
{
tzs <- unique(sapply(list(...), function(x) {
y <- attr(x, "tzone")
if(is.null(y)) "" else y[1L]
}))
tzs <- tzs[nzchar(tzs)]
if(length(tzs) > 1L)
warning("'tzone' attributes are inconsistent")
if(length(tzs)) tzs[1L] else NULL
}
Summary.POSIXct <- function (..., na.rm)
{
ok <- switch(.Generic, max = , min = , range = TRUE, FALSE)
if (!ok)
stop(gettextf("'%s' not defined for \"POSIXt\" objects", .Generic),
domain = NA)
args <- list(...)
tz <- do.call("check_tzones", args)
.POSIXct(NextMethod(.Generic), tz = tz, cl = oldClass(args[[1L]]))
}
Summary.POSIXlt <- function (..., na.rm)
{
ok <- switch(.Generic, max = , min = , range = TRUE, FALSE)
if (!ok)
stop(gettextf("'%s' not defined for \"POSIXt\" objects", .Generic),
domain = NA)
args <- list(...)
tz <- do.call("check_tzones", args)
args <- lapply(args, as.POSIXct)
val <- do.call(.Generic, c(args, na.rm = na.rm))
as.POSIXlt(.POSIXct(val, tz))
}
`[.POSIXct` <-
function(x, ..., drop = TRUE)
.POSIXct(NextMethod("["), attr(x, "tzone"), oldClass(x))
`[[.POSIXct` <-
function(x, ..., drop = TRUE)
.POSIXct(NextMethod("[["), attr(x, "tzone"), oldClass(x))
`[<-.POSIXct` <-
function(x, ..., value) {
if(!length(value)) return(x)
value <- unclass(as.POSIXct(value))
.POSIXct(NextMethod(.Generic), attr(x, "tzone"), oldClass(x))
}
as.character.POSIXt <- function(x, ...) format(x, ...)
as.data.frame.POSIXct <- as.data.frame.vector
as.list.POSIXct <- function(x, ...)
{
nms <- names(x)
names(x) <- NULL
y <- lapply(unclass(x), .POSIXct, attr(x, "tzone"), oldClass(x))
names(y) <- nms
y
}
is.na.POSIXlt <- function(x)
is.na(as.POSIXct(x))
anyNA.POSIXlt <- function(x, recursive = FALSE)
anyNA(as.POSIXct(x))
## <FIXME> check the argument validity
## This is documented to remove the timezone
c.POSIXct <- function(..., recursive = FALSE)
.POSIXct(c(unlist(lapply(list(...), unclass))))
## we need conversion to POSIXct as POSIXlt objects can be in different tz.
c.POSIXlt <- function(..., recursive = FALSE)
as.POSIXlt(do.call("c", lapply(list(...), as.POSIXct)))
ISOdatetime <- function(year, month, day, hour, min, sec, tz = "")
{
if(min(vapply(list(year, month, day, hour, min, sec), length, 1, USE.NAMES=FALSE)) == 0L)
.POSIXct(numeric(), tz = tz)
else {
x <- paste(year, month, day, hour, min, sec, sep = "-")
as.POSIXct(strptime(x, "%Y-%m-%d-%H-%M-%OS", tz = tz), tz = tz)
}
}
ISOdate <- function(year, month, day, hour = 12, min = 0, sec = 0, tz = "GMT")
ISOdatetime(year, month, day, hour, min, sec, tz)
as.matrix.POSIXlt <- function(x, ...)
{
as.matrix(as.data.frame(unclass(x)), ...)
}
mean.POSIXct <- function (x, ...)
.POSIXct(mean(unclass(x), ...), attr(x, "tzone"))
mean.POSIXlt <- function (x, ...)
as.POSIXlt(mean(as.POSIXct(x), ...))
## ----- difftime -----
difftime <-
function(time1, time2, tz,
units = c("auto", "secs", "mins", "hours", "days", "weeks"))
{
if (missing(tz)) {
time1 <- as.POSIXct(time1)
time2 <- as.POSIXct(time2)
} else {
## Wishlist PR#14182
time1 <- as.POSIXct(time1, tz = tz)
time2 <- as.POSIXct(time2, tz = tz)
}
z <- unclass(time1) - unclass(time2)
attr(z, "tzone") <- NULL # it may get copied from args of `-`
units <- match.arg(units)
if(units == "auto")
units <-
if(all(is.na(z))) "secs"
else {
zz <- min(abs(z), na.rm = TRUE)
if(!is.finite(zz) || zz < 60) "secs"
else if(zz < 3600) "mins"
else if(zz < 86400) "hours"
else "days"
}
switch(units,
"secs" = .difftime(z, units = "secs"),
"mins" = .difftime(z/60, units = "mins"),
"hours" = .difftime(z/3600, units = "hours"),
"days" = .difftime(z/86400, units = "days"),
"weeks" = .difftime(z/(7*86400), units = "weeks")
)
}
## "difftime" constructor
## Martin Maechler, Date: 16 Sep 2002
## Numeric input version Peter Dalgaard, December 2006
as.difftime <- function(tim, format = "%X", units = "auto")
{
if (inherits(tim, "difftime")) return(tim)
if (is.character(tim)) {
difftime(strptime(tim, format = format),
strptime("0:0:0", format = "%X"), units = units)
} else {
if (!is.numeric(tim)) stop("'tim' is not character or numeric")
if (units == "auto") stop("need explicit units for numeric conversion")
if (!(units %in% c("secs", "mins", "hours", "days", "weeks")))
stop("invalid units specified")
.difftime(tim, units = units)
}
}
### For now, these have only difftime methods, but you never know...
units <- function(x) UseMethod("units")
`units<-` <- function(x, value) UseMethod("units<-")
units.difftime <- function(x) attr(x, "units")
`units<-.difftime` <- function(x, value)
{
from <- units(x)
if (from == value) return(x)
if (!(value %in% c("secs", "mins", "hours", "days", "weeks")))
stop("invalid units specified")
sc <- cumprod(c(secs = 1, mins = 60, hours = 60, days = 24, weeks = 7))
newx <- unclass(x) * as.vector(sc[from]/sc[value])
.difftime(newx, value)
}
as.double.difftime <- function(x, units = "auto", ...)
{
if (units != "auto") units(x) <- units
as.vector(x, "double")
}
as.data.frame.difftime <- as.data.frame.vector
format.difftime <- function(x,...)
paste(format(unclass(x),...), units(x))
print.difftime <- function(x, digits = getOption("digits"), ...)
{
if(is.array(x) || length(x) > 1L) {
cat("Time differences in ", attr(x, "units"), "\n", sep = "")
y <- unclass(x); attr(y, "units") <- NULL
print(y, digits=digits, ...)
}
else
cat("Time difference of ", format(unclass(x), digits = digits), " ",
attr(x, "units"), "\n", sep = "")
invisible(x)
}
`[.difftime` <- function(x, ..., drop = TRUE)
.difftime(NextMethod("["), attr(x, "units"), oldClass(x))
diff.difftime <- function(x, ...)
.difftime(NextMethod("diff"), attr(x, "units"), oldClass(x))
Ops.difftime <- function(e1, e2)
{
coerceTimeUnit <- function(x)
{
switch(attr(x, "units"),
secs = x, mins = 60*x, hours = 60*60*x,
days = 60*60*24*x, weeks = 60*60*24*7*x)
}
if (nargs() == 1L) {
switch(.Generic, "+" = {}, "-" = {e1[] <- -unclass(e1)},
stop(gettextf("unary '%s' not defined for \"difftime\" objects",
.Generic), domain = NA, call. = FALSE)
)
return(e1)
}
boolean <- switch(.Generic, "<" = , ">" = , "==" = ,
"!=" = , "<=" = , ">=" = TRUE, FALSE)
if (boolean) {
## assume user knows what he/she is doing if not both difftime
if(inherits(e1, "difftime") && inherits(e2, "difftime")) {
e1 <- coerceTimeUnit(e1)
e2 <- coerceTimeUnit(e2)
}
NextMethod(.Generic)
} else if(.Generic == "+" || .Generic == "-") {
if(inherits(e1, "difftime") && !inherits(e2, "difftime"))
return(.difftime(NextMethod(.Generic),
units = attr(e1, "units")))
if(!inherits(e1, "difftime") && inherits(e2, "difftime"))
return(.difftime(NextMethod(.Generic),
units = attr(e2, "units")))
u1 <- attr(e1, "units")
if(attr(e2, "units") == u1) {
.difftime(NextMethod(.Generic), units = u1)
} else {
e1 <- coerceTimeUnit(e1)
e2 <- coerceTimeUnit(e2)
.difftime(NextMethod(.Generic), units = "secs")
}
} else {
## '*' is covered by a specific method
stop(gettextf("'%s' not defined for \"difftime\" objects", .Generic),
domain = NA)
}
}
`*.difftime` <- function (e1, e2)
{
## need one scalar, one difftime.
if(inherits(e1, "difftime") && inherits(e2, "difftime"))
stop("both arguments of * cannot be \"difftime\" objects")
if(inherits(e2, "difftime")) {tmp <- e1; e1 <- e2; e2 <- tmp}
.difftime(e2 * unclass(e1), attr(e1, "units"))
}
`/.difftime` <- function (e1, e2)
{
## need one scalar, one difftime.
if(inherits(e2, "difftime"))
stop("second argument of / cannot be a \"difftime\" object")
.difftime(unclass(e1) / e2, attr(e1, "units"))
}
## "Math": some methods should work; the other ones are meaningless :
Math.difftime <- function (x, ...)
{
switch(.Generic,
"abs" =, "sign" =, "floor" =, "ceiling" =, "trunc" =,
"round" =, "signif" = {
units <- attr(x, "units")
.difftime(NextMethod(), units)
},
### otherwise :
stop(gettextf("'%s' not defined for \"difftime\" objects", .Generic),
domain = NA))
}
mean.difftime <- function (x, ...)
.difftime(mean(unclass(x), ...), attr(x, "units"))
Summary.difftime <- function (..., na.rm)
{
## FIXME: this could return in the smallest of the units of the inputs.
coerceTimeUnit <- function(x)
{
as.vector(switch(attr(x,"units"),
secs = x, mins = 60*x, hours = 60*60*x,
days = 60*60*24*x, weeks = 60*60*24*7*x))
}
ok <- switch(.Generic, max = , min = , sum=, range = TRUE, FALSE)
if (!ok)
stop(gettextf("'%s' not defined for \"difftime\" objects", .Generic),
domain = NA)
x <- list(...)
Nargs <- length(x)
if(Nargs == 0) {
.difftime(do.call(.Generic), "secs")
} else {
units <- sapply(x, attr, "units")
if(all(units == units[1L])) {
args <- c(lapply(x, as.vector), na.rm = na.rm)
} else {
args <- c(lapply(x, coerceTimeUnit), na.rm = na.rm)
units <- "secs"
}
.difftime(do.call(.Generic, args), units[[1L]])
}
}
c.difftime <-
function(..., recursive = FALSE)
{
coerceTimeUnit <- function(x) {
switch(attr(x, "units"),
secs = x, mins = 60*x, hours = 60*60*x,
days = 60*60*24*x, weeks = 60*60*24*7*x)
}
args <- list(...)
if(!length(args)) return(.difftime(double(), "secs"))
ind <- sapply(args, inherits, "difftime")
pos <- which(!ind)
units <- sapply(args[ind], attr, "units")
if(all(units == (un1 <- units[1L]))) {
if(length(pos))
args[pos] <-
lapply(args[pos], as.difftime, units = un1)
.difftime(unlist(args), un1)
} else {
if(length(pos))
args[pos] <-
lapply(args[pos], as.difftime, units = "secs")
args[ind] <- lapply(args[ind], coerceTimeUnit)
.difftime(unlist(args), "secs")
}
}
`length<-.difftime` <-
function(x, value)
.difftime(NextMethod(), attr(x, "units"), oldClass(x))
## ----- convenience functions -----
seq.POSIXt <-
function(from, to, by, length.out = NULL, along.with = NULL, ...)
{
if (missing(from)) stop("'from' must be specified")
if (!inherits(from, "POSIXt")) stop("'from' must be a \"POSIXt\" object")
cfrom <- as.POSIXct(from)
if(length(cfrom) != 1L) stop("'from' must be of length 1")
tz <- attr(cfrom , "tzone")
if (!missing(to)) {
if (!inherits(to, "POSIXt")) stop("'to' must be a \"POSIXt\" object")
if (length(as.POSIXct(to)) != 1) stop("'to' must be of length 1")
}
if (!missing(along.with)) {
length.out <- length(along.with)
} else if (!is.null(length.out)) {
if (length(length.out) != 1L) stop("'length.out' must be of length 1")
length.out <- ceiling(length.out)
}
status <- c(!missing(to), !missing(by), !is.null(length.out))
if(sum(status) != 2L)
stop("exactly two of 'to', 'by' and 'length.out' / 'along.with' must be specified")
if (missing(by)) {
from <- unclass(cfrom)
to <- unclass(as.POSIXct(to))
## Till (and incl.) 1.6.0 :
##- incr <- (to - from)/length.out
##- res <- seq.default(from, to, incr)
res <- seq.int(from, to, length.out = length.out)
return(.POSIXct(res, tz))
}
if (length(by) != 1L) stop("'by' must be of length 1")
valid <- 0L
if (inherits(by, "difftime")) {
by <- switch(attr(by,"units"), secs = 1, mins = 60, hours = 3600,
days = 86400, weeks = 7*86400) * unclass(by)
} else if(is.character(by)) {
by2 <- strsplit(by, " ", fixed = TRUE)[[1L]]
if(length(by2) > 2L || length(by2) < 1L)
stop("invalid 'by' string")
valid <- pmatch(by2[length(by2)],
c("secs", "mins", "hours", "days", "weeks",
"months", "years", "DSTdays", "quarters"))
if(is.na(valid)) stop("invalid string for 'by'")
if(valid <= 5L) {
by <- c(1, 60, 3600, 86400, 7*86400)[valid]
if (length(by2) == 2L) by <- by * as.integer(by2[1L])
} else
by <- if(length(by2) == 2L) as.integer(by2[1L]) else 1
} else if(!is.numeric(by)) stop("invalid mode for 'by'")
if(is.na(by)) stop("'by' is NA")
if(valid <= 5L) { # secs, mins, hours, days, weeks
from <- unclass(as.POSIXct(from))
if(!is.null(length.out))
res <- seq.int(from, by = by, length.out = length.out)
else {
to0 <- unclass(as.POSIXct(to))
## defeat test in seq.default
res <- seq.int(0, to0 - from, by) + from
}
return(.POSIXct(res, tz))
} else { # months or years or DSTdays or quarters
r1 <- as.POSIXlt(from)
if(valid == 7L) { # years
if(missing(to)) { # years
yr <- seq.int(r1$year, by = by, length.out = length.out)
} else {
to <- as.POSIXlt(to)
yr <- seq.int(r1$year, to$year, by)
}
r1$year <- yr
} else if(valid %in% c(6L, 9L)) { # months or quarters
if (valid == 9L) by <- by * 3
if(missing(to)) {
mon <- seq.int(r1$mon, by = by, length.out = length.out)
} else {
to0 <- as.POSIXlt(to)
mon <- seq.int(r1$mon, 12*(to0$year - r1$year) + to0$mon, by)
}
r1$mon <- mon
} else if(valid == 8L) { # DSTdays
if(!missing(to)) {
## We might have a short day, so need to over-estimate.
length.out <- 2L + floor((unclass(as.POSIXct(to)) -
unclass(as.POSIXct(from)))/(by * 86400))
}
r1$mday <- seq.int(r1$mday, by = by, length.out = length.out)
}
r1$isdst <- -1L
res <- as.POSIXct(r1)
## now shorten if necessary.
if(!missing(to)) {
to <- as.POSIXct(to)
res <- if(by > 0) res[res <= to] else res[res >= to]
}
res
}
}
## *very* similar to cut.Date [ ./dates.R ] -- keep in sync!
cut.POSIXt <-
function (x, breaks, labels = NULL, start.on.monday = TRUE,
right = FALSE, ...)
{
if(!inherits(x, "POSIXt")) stop("'x' must be a date-time object")
x <- as.POSIXct(x)
if (inherits(breaks, "POSIXt")) {
breaks <- sort(as.POSIXct(breaks))
} else if(is.numeric(breaks) && length(breaks) == 1L) {
## specified number of breaks
} else if(is.character(breaks) && length(breaks) == 1L) {
by2 <- strsplit(breaks, " ", fixed = TRUE)[[1L]]
if(length(by2) > 2L || length(by2) < 1L)
stop("invalid specification of 'breaks'")
valid <-
pmatch(by2[length(by2)],
c("secs", "mins", "hours", "days", "weeks",
"months", "years", "DSTdays", "quarters"))
if(is.na(valid)) stop("invalid specification of 'breaks'")
start <- as.POSIXlt(min(x, na.rm = TRUE))
incr <- 1
if(valid > 1L) { start$sec <- 0L; incr <- 60 }
if(valid > 2L) { start$min <- 0L; incr <- 3600 }
## start of day need not be on the same DST, PR#14208
if(valid > 3L) { start$hour <- 0L; start$isdst <- -1L; incr <- 86400 }
if(valid == 5L) { # weeks
start$mday <- start$mday - start$wday
if(start.on.monday)
start$mday <- start$mday + ifelse(start$wday > 0L, 1L, -6L)
incr <- 7*86400
}
if(valid == 8L) incr <- 25*3600 # DSTdays
if(valid == 6L) { # months
start$mday <- 1L
end <- as.POSIXlt(max(x, na.rm = TRUE))
step <- if(length(by2) == 2L) as.integer(by2[1L]) else 1L
end <- as.POSIXlt(end + (31 * step * 86400))
end$mday <- 1L
end$isdst <- -1L
breaks <- seq(start, end, breaks)
} else if(valid == 7L) { # years
start$mon <- 0L
start$mday <- 1L
end <- as.POSIXlt(max(x, na.rm = TRUE))
step <- if(length(by2) == 2L) as.integer(by2[1L]) else 1L
end <- as.POSIXlt(end + (366 * step* 86400))
end$mon <- 0L
end$mday <- 1L
end$isdst <- -1L
breaks <- seq(start, end, breaks)
} else if(valid == 9L) { # quarters
qtr <- rep(c(0L, 3L, 6L, 9L), each = 3L)
start$mon <- qtr[start$mon + 1L]
start$mday <- 1L
maxx <- max(x, na.rm = TRUE)
end <- as.POSIXlt(maxx)
step <- if(length(by2) == 2L) as.integer(by2[1L]) else 1L
end <- as.POSIXlt(end + (93 * step * 86400))
end$mon <- qtr[end$mon + 1L]
end$mday <- 1L
end$isdst <- -1L
breaks <- seq(start, end, paste(step * 3, "months"))
## 93 days ahead could give an empty level, so
lb <- length(breaks)
if(maxx < breaks[lb-1]) breaks <- breaks[-lb]
} else { # weeks or shorter
if (length(by2) == 2L) incr <- incr * as.integer(by2[1L])
maxx <- max(x, na.rm = TRUE)
breaks <- seq(start, maxx + incr, breaks)
breaks <- breaks[seq_len(1+max(which(breaks <= maxx)))]
}
} else stop("invalid specification of 'breaks'")
res <- cut(unclass(x), unclass(breaks), labels = labels,
right = right, ...)
if(is.null(labels)) {
levels(res) <-
as.character(if (is.numeric(breaks)) x[!duplicated(res)]
else breaks[-length(breaks)])
}
res
}
julian <- function(x, ...) UseMethod("julian")
julian.POSIXt <- function(x, origin = as.POSIXct("1970-01-01", tz = "GMT"), ...)
{
origin <- as.POSIXct(origin)
if(length(origin) != 1L) stop("'origin' must be of length one")
res <- difftime(as.POSIXct(x), origin, units = "days")
structure(res, "origin" = origin)
}
## Note that 'abbreviate' works *vectorized* here :
weekdays <- function(x, abbreviate) UseMethod("weekdays")
weekdays.POSIXt <- function(x, abbreviate = FALSE)
{
format(x, ifelse(abbreviate, "%a", "%A"))
}
months <- function(x, abbreviate) UseMethod("months")
months.POSIXt <- function(x, abbreviate = FALSE)
{
format(x, ifelse(abbreviate, "%b", "%B"))
}
quarters <- function(x, abbreviate) UseMethod("quarters")
quarters.POSIXt <- function(x, ...)
{
x <- (as.POSIXlt(x)$mon)%/%3
paste0("Q", x+1)
}
trunc.POSIXt <-
function(x, units = c("secs", "mins", "hours", "days", "months", "years"), ...)
{
units <- match.arg(units)
x <- as.POSIXlt(x)
if(length(x$sec))
switch(units,
"secs" = {x$sec <- trunc(x$sec)},
"mins" = {x$sec[] <- 0},
"hours" = {x$sec[] <- 0; x$min[] <- 0L},
## start of day need not be on the same DST.
"days" = {
x$sec[] <- 0; x$min[] <- 0L; x$hour[] <- 0L;
x$isdst[] <- -1L
},
"months" = {
x$sec[] <- 0; x$min[] <- 0L; x$hour[] <- 0L;
x$mday[] <- 1L
x$isdst[] <- -1L
## To get wday and yday correctly:
x <- as.POSIXlt(as.POSIXct(x))
},
"years" = {
x$sec[] <- 0; x$min[] <- 0L; x$hour[] <- 0L;
x$mday[] <- 1L; x$mon[] <- 0L
x$isdst[] <- -1L
## To get wday and yday correctly:
x <- as.POSIXlt(as.POSIXct(x))
}
)
x
}
round.POSIXt <-
function(x, units = c("secs", "mins", "hours", "days", "months", "years"))
{
.round_x_to_l_or_u <- function(lx, ll, lu) {
## lx ll lu all POSIXlt, lu not necessarily valid yet.
cu <- as.POSIXct(lu)
lu <- as.POSIXlt(cu)
tu <- unclass(cu)
tx <- unclass(as.POSIXct(lx))
tl <- unclass(as.POSIXct(ll))
up <- ((tu - tx) <= (tx - tl))
up <- !is.na(up) & up
y <- ll
y[up] <- lu[up]
y
}
## this gets the default from the generic's 2nd arg 'digits = 0' :
units <- if(is.numeric(units) && units == 0.) "secs" else match.arg(units)
if(units == "months") {
x <- as.POSIXlt(x)
## Start of this month:
ll <- trunc.POSIXt(x, "months")
## Start of next month:
lu <- ll
lu$mon <- lu$mon + 1L
## Now make lu valid and round ...
.round_x_to_l_or_u(x, ll, lu)
}
else if(units == "years") {
x <- as.POSIXlt(x)
## Start of this year:
ll <- trunc.POSIXt(x, "years")
## Start of next year:
lu <- ll
lu$year <- lu$year + 1L
## Now make lu valid and round ...
.round_x_to_l_or_u(x, ll, lu)
}
else
trunc.POSIXt(as.POSIXct(x) +
switch(units,
"secs" = 0.5,
"mins" = 30,
"hours" = 1800,
"days" = 43200),
units = units)
}
## ---- additions in 1.5.0 -----
`[.POSIXlt` <- function(x, i, j, drop = TRUE)
{
if(missing(j)) {
.POSIXlt(lapply(X = unclass(x), FUN = "[", i, drop = drop),
attr(x, "tzone"), oldClass(x))
} else {
unclass(x)[[j]][i]
}
}
`[<-.POSIXlt` <- function(x, i, j, value)
{
if(!length(value)) return(x)
cl <- oldClass(x)
class(x) <- NULL
if(missing(j)) {
value <- unclass(as.POSIXlt(value))
for(n in names(x)) x[[n]][i] <- value[[n]]
} else {
x[[j]][i] <- value
}
class(x) <- cl
x
}
as.data.frame.POSIXlt <- function(x, row.names = NULL, optional = FALSE, ...)
{
value <- as.data.frame.POSIXct(as.POSIXct(x), row.names, optional, ...)
if (!optional)
names(value) <- deparse(substitute(x))[[1L]]
value
}
## ---- additions in 1.8.0 -----
rep.POSIXct <- function(x, ...)
.POSIXct(NextMethod(), attr(x, "tzone"), oldClass(x))
rep.POSIXlt <- function(x, ...)
.POSIXlt(lapply(X = unclass(x), FUN = rep, ...),
attr(x, "tzone"), oldClass(x))
diff.POSIXt <- function (x, lag = 1L, differences = 1L, ...)
{
ismat <- is.matrix(x)
r <- if(inherits(x, "POSIXlt")) as.POSIXct(x) else x
xlen <- if (ismat) dim(x)[1L] else length(r)
if (length(lag) != 1L || length(differences) > 1L || lag < 1L || differences < 1L)
stop("'lag' and 'differences' must be integers >= 1")
if (lag * differences >= xlen) return(.difftime(numeric(), "secs"))
i1 <- -seq_len(lag)
if (ismat) for (i in seq_len(differences)) r <- r[i1, , drop = FALSE] -
r[-nrow(r):-(nrow(r) - lag + 1), , drop = FALSE]
else for (i in seq_len(differences))
r <- r[i1] - r[-length(r):-(length(r) - lag + 1L)]
r
}
## ---- additions in 2.2.0 -----
duplicated.POSIXlt <- function(x, incomparables = FALSE, ...)
{
x <- as.POSIXct(x)
NextMethod("duplicated", x)
}
unique.POSIXlt <- function(x, incomparables = FALSE, ...)
x[!duplicated(x, incomparables, ...)]
## ---- additions in 2.4.0 -----
sort.POSIXlt <- function(x, decreasing = FALSE, na.last = NA, ...)
x[order(as.POSIXct(x), na.last = na.last, decreasing = decreasing)]
## ---- additions in 2.6.0 -----
is.numeric.POSIXt <- function(x) FALSE
## ---- additions in 2.8.0 -----
split.POSIXct <-
function(x, f, drop = FALSE, ...)
lapply(split.default(as.double(x), f, drop = drop, ...),
.POSIXct, attr(x, "tzone"), oldClass(x))
xtfrm.POSIXct <- function(x) as.numeric(x)
xtfrm.POSIXlt <- function(x) as.double(x) # has POSIXlt method
xtfrm.difftime <- function(x) as.numeric(x)
is.numeric.difftime <- function(x) FALSE
## Class generators added in 2.11.0, class order changed in 2.12.0.
## FIXME:
## At least temporarily avoide structure() for performance reasons.
## .POSIXct <- function(xx, tz = NULL)
## structure(xx, class = c("POSIXct", "POSIXt"), tzone = tz)
.POSIXct <- function(xx, tz = NULL, cl = c("POSIXct", "POSIXt")) {
class(xx) <- cl
attr(xx, "tzone") <- tz
xx
}
## FIXME:
## At least temporarily avoide structure() for performance reasons.
## .POSIXlt <- function(xx, tz = NULL)
## structure(xx, class = c("POSIXlt", "POSIXt"), tzone = tz)
.POSIXlt <- function(xx, tz = NULL, cl = c("POSIXlt", "POSIXt")) {
class(xx) <- cl
attr(xx, "tzone") <- tz
xx
}
## FIXME:
## At least temporarily avoide structure() for performance reasons.
## .difftime <- function(xx, units)
## structure(xx, units = units, class = "difftime")
.difftime <- function(xx, units, cl = "difftime") {
class(xx) <- cl
attr(xx, "units") <- units
xx
}
## ---- additions in 2.13.0 -----
names.POSIXlt <-
function(x)
names(x$year)
`names<-.POSIXlt` <-
function(x, value)
{
names(x$year) <- value
x
}
## Added in 3.1.0.
OlsonNames <- function(tzdir = NULL)
{
if (is.null(tzdir)) {
if(.Platform$OS.type == "windows")
tzdir <- Sys.getenv("TZDIR", file.path(R.home("share"), "zoneinfo"))
else {
## Try known locations in turn.
## The list is not exhaustive (mac OS 10.13's
## /usr/share/zoneinfo is a symlink) and there is a risk that
## the wrong one is found.
## We assume that if the second exists that the system was
## configured with --with-internal-tzcode
tzdirs <- c(Sys.getenv("TZDIR"), # defaults to ""
file.path(R.home("share"), "zoneinfo"),
"/usr/share/zoneinfo", # Linux, macOS, FreeBSD
"/share/zoneinfo", # in musl's search
"/usr/share/lib/zoneinfo", # Solaris, AIX
"/usr/lib/zoneinfo", # early glibc
"/usr/local/etc/zoneinfo", # tzcode default
"/etc/zoneinfo", "/usr/etc/zoneinfo")
tzdirs <- tzdirs[file.exists(tzdirs)]
if (!length(tzdirs)) {
warning("no Olson database found")
return(character())
} else tzdir <- tzdirs[1L]
}
} else if(!dir.exists(tzdir))
stop(sprintf("%s is not a directory", sQuote(tzdir)), domain = NA)
x <- list.files(tzdir, recursive = TRUE)
## Some databases have VERSION (tzdata hence --with-internal-tzcode),
## some +VERSION (Apple), some neither (including glibc)
ver <- if(file.exists(vf <- file.path(tzdir, "VERSION")))
readLines(vf, warn = FALSE)
else if(file.exists(vf <- file.path(tzdir, "+VERSION")))
readLines(vf, warn = FALSE)
## else NULL
x <- setdiff(x, "VERSION")
## all other auxiliary files are l/case.
ans <- grep("^[ABCDEFGHIJKLMNOPQRSTUVWXYZ]", x, value = TRUE)
if(!is.null(ver)) attr(ans, "Version") <- ver
ans
}
## Added in 3.5.0.
`[[.POSIXlt` <- function(x, ..., drop = TRUE)
.POSIXlt(lapply(X = unclass(x), FUN = "[[", ..., drop = drop),
attr(x, "tzone"), oldClass(x))
as.list.POSIXlt <- function(x, ...)
{
nms <- names(x)
names(x) <- NULL
y <- lapply(X = do.call(Map, c(list(list), unclass(x))),
FUN = .POSIXlt, attr(x, "tzone"), oldClass(x))
names(y) <- nms
y
}
|
55ef53fa254ec74bf1e8a4143f5d35e882b033fe | 7da7e8a59c56e5d3c7154843025d2a0fa1d10e03 | /stats_only.R | 374cf3216a6a91122ffd58870a400f0d3b73c142 | [] | no_license | anneracht/sick_score_github | 4821c10c2fa23b8792961f42e13cd5868936c4a9 | 7ff8059106ff5b6a5a1bf28cdf4dd7275cfb673e | refs/heads/master | 2020-07-06T12:16:10.400335 | 2019-09-18T14:23:39 | 2019-09-18T14:23:39 | 203,014,628 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,275 | r | stats_only.R | #1. find cutoff values to split it into intervals. It requires non-parametric LOESS smoothing
# technique to describe the relationship between the variable and the outcome.
#2.
#amazing references
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5690964/
#https://cran.r-project.org/web/packages/tableone/vignettes/smd.html
# https://www.ncbi.nlm.nih.gov/pubmed/22521443
#comorbidity analysis according to age , BMI , gender in obese patients
sss #contains the data for analysis. variables will be numbered.
#calculate the top comorbidities
sss %>% count(CVA , sort=TRUE)
sss %>% count(CAD, sort = TRUE)
sss %>% count(PVD, sort = TRUE)
sss %>% count(CHF, sort=TRUE)
sss %>% count(DM, sort=TRUE)
sss %>% count(HTN, sort=TRUE)
sss %>% count(HLD, sort=TRUE)
sss %>% count(COPD, sort=TRUE)
sss %>% count(OSA, sort=TRUE)
sss %>% count(CKD, sort=TRUE)
sss %>% count(Liver.Disease, sort=TRUE)
sss %>% count(Alcohol, sort=TRUE)
sss %>% count(Tobacco, sort=TRUE)
#delete columns from sss first
sss$CHF.continued <- NULL
sss$CHF.continued.1 <- NULL
sss$CHF.continued.2 <- NULL
sss$Liver.Disease.other.factors.5 <- NULL
sss$Study.Number <- NULL
sss$Age <- NULL
sss$Health.Insurance.Status <- NULL
sss$Gender <- NULL
sss$CVA.continued <- NULL
sss$CAD.Continued <- NULL
sss$HTN.continued <- NULL
sss$CKD.continued.3 <- NULL
sss$CKD.continued.4 <- NULL
sss$Liver.Disease.other.factors.1 <- NULL
sss$Liver.Disease.other.factors.2 <- NULL
sss$Liver.Disease.other.factors.3 <- NULL
sss$Liver.Disease.other.factors.4 <- NULL
sss$Type.of.Surgery <- NULL
sss$Race <- NULL
sss$BMI.Surgical.Tracking <- NULL
sss$ASA.2 <- NULL
sss$Reoperation.30.Day <- NULL
sss$Readmit.30.Day <- NULL
sss$ED.Visit.30.Day<- NULL
sss$LOS <- NULL
sss$Discharge.Date <- NULL
sss$Surgery.Date <- NULL
sss$State <- NULL
sss$City <- NULL
sss$ASA <- NULL
ss_comorbidity <- as.matrix(sss)
# Matrix multiplication for cooccurrence counts
#convert the dataframe to boolean matrix
ss_comorbidity[ss_comorbidity== "NA" ] <- as.numeric(0)
replace(ss_comorbidity, is.na(ss_comorbidity), 0)
ss_comorbidity[ss_comorbidity>=1] <- 1
ss_comorbidity[is.na(ss_comorbidity)] <- 0
ss_comorbidity <- as.matrix(ss_comorbidity, fill = T)
ss_comorbidity_df <- as.data.frame(ss_comorbidity, stringsAsFactors = FALSE)
#convert to matrix and numeric
ss_comorbidity_1 <- as.matrix(ss_comorbidity_df)
head(ss_comorbidity_1)
#check if matrix contents are character, and convert it to numeric
is.character(ss_comorbidity_1)
class(ss_comorbidity_1) <- "numeric"
head(ss_comorbidity_1)
ss_comorbidity_cor <- t(ss_comorbidity_1) %*% ss_comorbidity_1
comorbid_counts <- ss_comorbidity_cor %>% kable() %>% kable_styling(bootstrap_options = c("striped", "hover", "condensed"), font_size = 15)
diag(ss_comorbidity_cor) <- 0
heatmap(ss_comorbidity_cor, Rowv = NA, Colv = NA)
library(dplyr)
library(kableExtra)
cor(ss_comorbidity_cor, method = c("spearman")) %>% kable() %>% kable_styling(bootstrap_options = c("striped", "hover", "condensed"))
mydata.cor <- cor(ss_comorbidity_cor, method = c("spearman"))
#generate p-values (source: https://www.displayr.com/how-to-create-a-correlation-matrix-in-r/)
install.packages("Hmisc")
library("Hmisc")
mydata.rcorr = rcorr(as.matrix(ss_comorbidity_cor))
mydata.rcorr
mydata.coeff = mydata.rcorr$r
mydata.p = mydata.rcorr$P
mydata.p %>% kable() %>% kable_styling(bootstrap_options = c("striped", "hover", "condensed"))
library(corrplot)
corrplot(mydata.cor)
palette = colorRampPalette(c("green", "white", "red")) (20)
heatmap(x = ss_comorbidity_cor, col = palette, symm = TRUE)
palette = colorRampPalette(c("green", "white", "red")) (20)
heatmap(x = mydata.cor, col = palette, symm = TRUE)
#heatmap(ss_comorbidity_cor)
temp = sapply(colnames(ss_comorbidity_cor), function(x)
sapply(colnames(ss_comorbidity_cor), function(y)
sum(rowSums(ss_comorbidity_cor[,c(x, y)]) == 2)))
diag(temp) = 0
temp
library(reshape2)
library(ggplot2)
df1 = melt(temp)
graphics.off()
ggplot(df1, aes(x = Var1, y = Var2, fill = value)) +
geom_tile() +
theme_classic()
#comorbidity network of co-occurrences
ss_comorbidity_df
ss_comorbidity[,1:16]
dat <- ss_comorbidity[,1:16]
cor(ss_comorbidity_1)
#reference
#http://www.medsci.org/v13p0099.htm#T3
#https://www.nature.com/articles/s41598-018-36973-1
|
7bca11af38d3148dce9749efeb5b8e445d4340dd | 1158f6d183b4a9093a6fdc8dda8891d084777e41 | /tutorial/basicsOfProgramming.R | 91a1e627be4a4b3065e7ddd46694799a0ac3d223 | [] | no_license | Beerkay/RTutorials_ITU | 401d2c4ad3344bb1184f3170eacacb0d9a01485a | d5b0a8379ac88c8d461ae6c64983c9e726f69929 | refs/heads/master | 2021-01-02T09:14:21.519770 | 2014-07-09T12:27:31 | 2014-07-09T12:27:31 | 21,098,533 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,437 | r | basicsOfProgramming.R | #####################################
#Basics of Programming
#author : Z.B.Celik
#Date : 06/23/2014
#####################################
#1- functions
say.hello <- function(){
print ("hello world !")
}
say.hello
say.hello()
#functions with arguments
hello.person <- function(first, last){
print(sprintf("Hello %s %s", first, last))
}
hello.person("Berkay", "Celik")
#or we can specy arguments as
hello.person(last="Celik",first="Berkay")
#returning from functions, R just returns the last computation
square.num <- function(x){
x * x
}
square.num(9)
#or we can specify the return
square.num <- function (x){
return(x*x)
}
square.num(9)
#using do.call()
do.call(square.num, args=list(9)) # list is optional here
#functions may take other functions as an argument
run.this <- function(x, func=mean) #func is a function variable and default is mean
{
do.call(func, args=list(x))
}
run.this(x=1:10)#calculates mean
class(x=1:10)
x=1:10
x
run.this(x=1:10, sum) #sum of the x vector
run.this(x=1:10, sd)
#Using if statements
#ifelse
y=1
ifelse(y==1,"yes", "no")
vectorX <- c(1,0,1,1,NA)
ifelse(vectorX==1,"Yes", "No")#NA outputs as is, as expected
#for loop
for (i in 1:10)
{
print (i)
}
fruit <- c("apple","banana","pomegranate")
fruitLength = rep (NA, length(fruit)) #replicate 3 times in this case
names(fruitLength) <- fruit
fruitLength
for (a in fruit)
{
fruitLength[a] <- nchar(a)
}
fruitLength
a
|
f8594dc16fe5188e0a76a624865feb84c357c10b | 30cf86d398ec8823ca0e9f5c8b557f8723592376 | /plot1.r | 0bc9514ed5bbe9fd7e4381d005408fbbbb603654 | [] | no_license | macawile21/ExData_Plotting1 | f49cc58ee14685ee1fe1a3f4f90e663790a85386 | 78ea7242842d6764b5071ccf9da3fd8777533b81 | refs/heads/master | 2021-01-24T01:27:54.978038 | 2018-02-25T17:53:13 | 2018-02-25T17:53:13 | 122,808,224 | 0 | 0 | null | 2018-02-25T05:31:36 | 2018-02-25T05:31:36 | null | UTF-8 | R | false | false | 659 | r | plot1.r | #READ,CLEAN AND ORGANIZE DATA
data = read.table(paste(getwd(),"household_power_consumption.txt",sep="/"),sep =";",na.strings = "?",header = TRUE)
na.omit(data)
data[complete.cases(data),]
data$Date=as.Date(data$Date,"%d/%m/%Y")
data_filtered=data[data$Date=='2007-02-01',]
data_filtered_2=data[data$Date=='2007-02-02',]
d2= rbind(data_filtered,data_filtered_2)
dateTime=paste(d2$Date,d2$Time)
dateTime = as.POSIXct(dateTime,tz = "GMT+8")
d2=cbind(dateTime,d2)
#PLOT TO FILE DEVICE
png(filename = "plot1.png",width = 480,height = 480)
hist(d2$Global_active_power,main = "Global Active Power",xlab = "Global Active Power
(kilowatts)",col = "red")
dev.off()
|
92a01994b336d1df193b45eb5ef33f267f68c333 | 48516682819308cb1ebc92edce2c1c144744cc2f | /scripts_for_simulations/006_figures_4_5_S9_S10_S11_S12.R | 6a74593cd6a897c0a74d17a7850ad88552b64e33 | [] | no_license | Landrylab/Gene_duplication_2019 | 139c8b2ca0af251cfe4398ab8d11191ef0c7346b | af5cacc44768bcccb952ba9ce27ba7ddb707b008 | refs/heads/master | 2020-05-02T07:44:58.301178 | 2019-03-26T15:48:36 | 2019-03-26T15:48:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 24,643 | r | 006_figures_4_5_S9_S10_S11_S12.R | #########################################################################################
#### Figures simulations ####
#### This script plots the figures on the results of the simulations, that is: ####
#### - Figure 4 ####
#### - Figure 5 ####
#### - Figure S9 ####
#### - Figure S10 ####
#### - Figure S11 ####
#### - Figure S12 ####
#########################################################################################
# Libraries
library(ggplot2)
library(tidyverse)
library(magrittr)
library(cowplot)
library(grid)
library(gridExtra)
library(ggpubr)
library(Cairo)
#### Figure 4 ####
# Load the data
data_fig_4_s9 <- read.table('/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/Data/data_fig_4_s9.tsv', sep = '\t', header = T)
# Load the diagram for the simulations
fig_4A <- ggdraw() + draw_image('/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/Data/Figure4_2019_03_05_bisAM.png')
# Relevel to adjust order in legend
data_fig_4_s9$ComplexType = factor(data_fig_4_s9$ComplexType, levels = c('HM AA', 'HET AB', 'HM BB'))
# Plot each of the scenarios for 1M38
# No selection #
p_1M38_nosel <- data_fig_4_s9 %>%
filter(PDB == '1M38', Scenario == 'Neutral evolution') %>%
ggplot(aes(x=Substitution,y=Binding_energy, group=interaction(Replicate, ComplexType), color=ComplexType)) +
facet_grid(~Scenario) +
geom_line(size=1, alpha=0.1) +
scale_colour_manual(values = c("blue", "purple", "pink"),
name = "Complex")+
scale_alpha_manual(values = c(1, 0.3)) +
theme(panel.background = element_rect(fill = "white", colour = "grey50"),
axis.text.x= element_text(size=12), axis.text.y= element_text(size=12),
axis.title.x = element_text(size=15, face = 'bold'), axis.title.y = element_text(size=15, face = 'bold'))+
theme(legend.key = element_rect(fill = "white"), legend.title = element_blank(),
legend.text=element_text(size=12), legend.position = "none",
legend.justification = "center")+
ylab("Binding energy (Kcal/mol)") + xlab("Time (substitutions)") +
stat_summary(aes(group=ComplexType), fun.y=mean, geom="line", size=2) +
geom_hline(yintercept = 0) + geom_vline(xintercept = 0) +
ylim(c(-35,20))+
theme(panel.border = element_rect(linetype = "solid", colour = "gray50", size=1),
strip.text.x = element_text(size = 15, face = 'bold'),
strip.text.y = element_text(size = 15, face = 'bold'),
strip.background = element_rect(fill = 'white')
)
# Selection on both HMs #
p_1M38_sel_AA_BB <- data_fig_4_s9 %>%
filter(PDB == '1M38', Scenario == 'Selection on both HMs') %>%
ggplot(aes(x=Substitution,y=Binding_energy, group=interaction(Replicate, ComplexType), color=ComplexType)) +
facet_grid(~Scenario) +
geom_line(size=1, alpha=0.1) +
scale_colour_manual(values = c("blue", "purple", "pink"),
name = "Complex")+
scale_alpha_manual(values = c(1, 0.3)) +
theme(panel.background = element_rect(fill = "white", colour = "grey50"),
axis.text.x= element_text(size=12), axis.text.y= element_text(size=12),
axis.title.x = element_text(size=15, face = 'bold'), axis.title.y = element_text(size=15, face = 'bold'))+
theme(legend.key = element_rect(fill = "white"), legend.title = element_blank(),
legend.text=element_text(size=12), legend.position = "none",
legend.justification = "center")+
ylab("Binding energy (Kcal/mol)") + xlab("Time (substitutions)") +
stat_summary(aes(group=ComplexType), fun.y=mean, geom="line", size=2) +
geom_hline(yintercept = 0) + geom_vline(xintercept = 0) +
ylim(c(-35,20))+
theme(panel.border = element_rect(linetype = "solid", colour = "gray50", size=1),
strip.text.x = element_text(size = 15, face = 'bold'),
strip.text.y = element_text(size = 15, face = 'bold'),
strip.background = element_rect(fill = 'white')
)
# Selection on HET #
p_1M38_sel_AB <- data_fig_4_s9 %>%
filter(PDB == '1M38', Scenario == 'Selection on HET AB') %>%
ggplot(aes(x=Substitution,y=Binding_energy, group=interaction(Replicate, ComplexType), color=ComplexType)) +
facet_grid(~Scenario) +
geom_line(size=1, alpha=0.1) +
scale_colour_manual(values = c("blue", "purple", "pink"),
name = "Complex")+
scale_alpha_manual(values = c(1, 0.3)) +
theme(panel.background = element_rect(fill = "white", colour = "grey50"),
axis.text.x= element_text(size=12), axis.text.y= element_text(size=12),
axis.title.x = element_text(size=15, face = 'bold'), axis.title.y = element_text(size=15, face = 'bold'))+
theme(legend.key = element_rect(fill = "white"), legend.title = element_blank(),
legend.text=element_text(size=12), legend.position = "none",
legend.justification = "center")+
ylab("Binding energy (Kcal/mol)") + xlab("Time (substitutions)") +
stat_summary(aes(group=ComplexType), fun.y=mean, geom="line", size=2) +
geom_hline(yintercept = 0) + geom_vline(xintercept = 0) +
ylim(c(-35,20))+
theme(panel.border = element_rect(linetype = "solid", colour = "gray50", size=1),
strip.text.x = element_text(size = 15, face = 'bold'),
strip.text.y = element_text(size = 15, face = 'bold'),
strip.background = element_rect(fill = 'white')
)
# Selection on HM AA #
p_1M38_sel_AA <- data_fig_4_s9 %>%
filter(PDB == '1M38', Scenario == 'Selection on HM AA') %>%
ggplot(aes(x=Substitution,y=Binding_energy, group=interaction(Replicate, ComplexType), color=ComplexType)) +
facet_grid(~Scenario) +
geom_line(size=1, alpha=0.1) +
scale_colour_manual(values = c("blue", "purple", "pink"),
name = "Complex")+
scale_alpha_manual(values = c(1, 0.3)) +
theme(panel.background = element_rect(fill = "white", colour = "grey50"),
axis.text.x= element_text(size=12), axis.text.y= element_text(size=12),
axis.title.x = element_text(size=15, face = 'bold'), axis.title.y = element_text(size=15, face = 'bold'))+
theme(legend.key = element_rect(fill = "white"), legend.title = element_blank(),
legend.text=element_text(size=12), legend.position = "none",
legend.justification = "center")+
ylab("Binding energy (Kcal/mol)") + xlab("Time (substitutions)") +
stat_summary(aes(group=ComplexType), fun.y=mean, geom="line", size=2) +
geom_hline(yintercept = 0) + geom_vline(xintercept = 0) +
ylim(c(-35,20))+
theme(panel.border = element_rect(linetype = "solid", colour = "gray50", size=1),
strip.text.x = element_text(size = 15, face = 'bold'),
strip.text.y = element_text(size = 15, face = 'bold'),
strip.background = element_rect(fill = 'white')
)
# Selection on HM BB #
p_1M38_sel_BB <- data_fig_4_s9 %>%
filter(PDB == '1M38', Scenario == 'Selection on HM BB') %>%
ggplot(aes(x=Substitution,y=Binding_energy, group=interaction(Replicate, ComplexType), color=ComplexType)) +
facet_grid(~Scenario) +
geom_line(size=1, alpha=0.1) +
scale_colour_manual(values = c("blue", "purple", "pink"),
name = "Complex")+
scale_alpha_manual(values = c(1, 0.3)) +
theme(panel.background = element_rect(fill = "white", colour = "grey50"),
axis.text.x= element_text(size=12), axis.text.y= element_text(size=12),
axis.title.x = element_text(size=15, face = 'bold'), axis.title.y = element_text(size=15, face = 'bold'))+
theme(legend.key = element_rect(fill = "white"), legend.title = element_blank(),
legend.text=element_text(size=12), legend.position = "none",
legend.justification = "center")+
ylab("Binding energy (Kcal/mol)") + xlab("Time (substitutions)") +
stat_summary(aes(group=ComplexType), fun.y=mean, geom="line", size=2) +
geom_hline(yintercept = 0) + geom_vline(xintercept = 0) +
ylim(c(-35,20))+
theme(panel.border = element_rect(linetype = "solid", colour = "gray50", size=1),
strip.text.x = element_text(size = 15, face = 'bold'),
strip.text.y = element_text(size = 15, face = 'bold'),
strip.background = element_rect(fill = 'white')
)
# Save the panels for figure 4 #
final_fig4 <- plot_grid(fig_4A, p_1M38_nosel, p_1M38_sel_AA_BB,
p_1M38_sel_AB, p_1M38_sel_AA, p_1M38_sel_BB,
nrow = 3,
labels = c('A', 'B', 'C', 'D', 'E', 'F'))
ggsave(filename = '/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/Figure4.pdf',
width = 14, height = 21, dpi = 500, plot = final_fig4, device = cairo_pdf)
#### Figure 5 ####
# Load the data
data_fig_5_s10 <- read.table('/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/Data/data_fig_5_s10.tsv',
h = T, sep = '\t')
# Save helper variables
y_min <- -5
y_max <- 10
x_min <- -5
x_max <- 15
# Load the PDB figure
fig_1m38 <- ggdraw() + draw_image('/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/Data/1M38_new.png')
fig_1M38_sel_AA_BB <- data_fig_5_s10 %>%
filter(Scenario == 'Selection on both HMs', PDB == '1M38') %>%
ggplot(aes(y = het_ddg_be, x = homo_ddg_be, color = Verdict, shape = out_of_bounds)) +
facet_grid(~Scenario) +
scale_color_manual(values = c('red', 'black')) +
scale_shape_manual(values = c(16, 17), guide = 'none') +
geom_point(aes(alpha = Verdict), size = 2) +
scale_alpha_manual(values = c(1, 0.3)) +
theme(panel.background = element_rect(fill = "white", colour = "grey50"),
axis.text.x= element_text(size=12), axis.text.y= element_text(size=12),
axis.title.x = element_text(size=15, face = "bold"), axis.title.y = element_text(size=15, face = "bold"))+
theme(plot.title = element_text(size = 20, face = "bold",family="Arial", hjust = 0.5),
legend.key = element_rect(fill = "white"), legend.title = element_blank(),
legend.text=element_text(size=15), legend.position = "none",
legend.justification = "center")+
guides(color = guide_legend(override.aes = list(size=3))) +
ylab("ΔΔG HET (Kcal/mol)") + xlab("ΔΔG HM (Kcal/mol)") +
geom_hline(yintercept = 0) + geom_vline(xintercept = 0) +
xlim(x_min, x_max) + ylim(y_min, y_max) +
stat_cor(method = 'pearson', label.x.npc = 0.35,
label.y.npc = 0.1, show.legend = FALSE,
inherit.aes = FALSE,
aes(x = het_ddg_be, y = homo_ddg_be, label = gsub(x = ..label.., replacement = 'r', pattern = 'R')),
size = 6
) +
theme(panel.border = element_rect(linetype = "solid", colour = "gray50", size=1),
strip.text.x = element_text(size = 15, face = 'bold'),
strip.text.y = element_text(size = 15, face = 'bold'),
strip.background = element_rect(fill = 'white'))
legend <- get_legend(fig_1M38_sel_AA_BB + theme(legend.position = 'top',
legend.text=element_text(size=20)) +
guides(color = guide_legend(override.aes = list(size=4))))
fig_pdb_legend <- plot_grid(ncol = 1, fig_1m38, legend,
rel_heights = c(1, 0.1))
fig_1M38_sel_AB <- data_fig_5_s10 %>%
filter(Scenario == 'Selection on HET AB', PDB == '1M38') %>%
ggplot(aes(y = het_ddg_be, x = homo_ddg_be, color = Verdict, shape = out_of_bounds)) +
facet_grid(~Scenario) +
scale_color_manual(values = c('red', 'black')) +
scale_shape_manual(values = c(16, 17), guide = 'none') +
geom_point(aes(alpha = Verdict), size = 2) +
scale_alpha_manual(values = c(1, 0.3)) +
theme(panel.background = element_rect(fill = "white", colour = "grey50"),
axis.text.x= element_text(size=12), axis.text.y= element_text(size=12),
axis.title.x = element_text(size=15, face = "bold"), axis.title.y = element_text(size=15, face = "bold"))+
theme(plot.title = element_text(size = 20, face = "bold",family="Arial", hjust = 0.5),
legend.key = element_rect(fill = "white"), legend.title = element_blank(),
legend.text=element_text(size=15), legend.position = "none",
legend.justification = "center")+
guides(color = guide_legend(override.aes = list(size=3))) +
ylab("ΔΔG HET (Kcal/mol)") + xlab("ΔΔG HM (Kcal/mol)") +
geom_hline(yintercept = 0) + geom_vline(xintercept = 0) +
xlim(x_min, x_max) + ylim(y_min, y_max) +
stat_cor(method = 'pearson', label.x.npc = 0.35,
label.y.npc = 0.1, show.legend = FALSE,
inherit.aes = FALSE,
aes(x = het_ddg_be, y = homo_ddg_be, label = gsub(x = ..label.., replacement = 'r', pattern = 'R')),
size = 6
) +
theme(panel.border = element_rect(linetype = "solid", colour = "gray50", size=1),
strip.text.x = element_text(size = 15, face = 'bold'),
strip.text.y = element_text(size = 15, face = 'bold'),
strip.background = element_rect(fill = 'white'))
all_plots <- plot_grid(
fig_pdb_legend, fig_1M38_sel_AA_BB, fig_1M38_sel_AB,
ncol = 3, nrow = 1)
ggsave(filename = '/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/Figure5.pdf',
width = 21, height = 7, dpi = 500, device = cairo_pdf, plot = all_plots)
#### Figure S9 ####
# Load the data
data_fig_4_s9 <- read.table('/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/Data/data_fig_4_s9.tsv', sep = '\t', header = T)
# Load the figures of the PDB structures
fig_1a82 <- ggdraw() + draw_image('/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/Data/1A82_new.png')
fig_2o1v <- ggdraw() + draw_image('/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/Data/2O1V_new.png')
fig_1m38 <- ggdraw() + draw_image('/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/Data/1M38_new.png')
fig_4fgw <- ggdraw() + draw_image('/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/Data/4FGW_new.png')
fig_3d8x <- ggdraw() + draw_image('/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/Data/3D8X_new.png')
fig_2jky <- ggdraw() + draw_image('/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/Data/4FGW_new.png')
# Relevel to adjust order in legend
data_fig_4_s9$ComplexType = factor(data_fig_4_s9$ComplexType, levels = c('HM AA', 'HET AB', 'HM BB'))
#### Save figure S9 ####
p_all <- data_fig_4_s9 %>% ggplot(aes(x=Substitution,y=Binding_energy, group=interaction(Replicate, ComplexType), color=ComplexType)) +
facet_grid(Scenario ~ PDB, labeller = label_wrap_gen(width = 15)) +
geom_line(size=1, alpha=0.1) +
scale_colour_manual(values = c("blue", "purple", "pink"),
name = "")+
scale_alpha_manual(values = c(1, 0.3)) +
theme(panel.background = element_rect(fill = "white", colour = "grey50"),
axis.text.x= element_text(size=20), axis.text.y= element_text(size=20),
axis.title.x = element_text(size=25, face = 'bold'), axis.title.y = element_text(size=25, face = 'bold'))+
theme(
legend.key = element_rect(fill = "white"),
legend.text=element_text(size=25), legend.position = "none",
legend.justification = "center")+
ylab("Binding energy (Kcal/mol)") + xlab("Time (substitutions)") +
stat_summary(aes(group=ComplexType), fun.y=mean, geom="line", size=2) +
geom_hline(yintercept = 0) + geom_vline(xintercept = 0) +
ylim(c(-60,20))+
theme(panel.border = element_rect(linetype = "solid", colour = "gray50", size=1),
strip.text.x = element_text(size = 25, face = 'bold'),
strip.text.y = element_text(size = 25, face = 'bold'),
strip.background = element_rect(fill = 'white')
)
legend <- get_legend(p_all + theme(legend.position = 'top',
legend.text=element_text(size=25),
legend.justification = "center"
) +
guides(color = guide_legend(override.aes = list(size=7))))
header_pdb <- plot_grid(NULL, fig_1a82, fig_1m38, fig_2jky, fig_2o1v, fig_3d8x, fig_4fgw, NULL, nrow = 1,
rel_widths = c(0.2, 1, 1, 1, 1, 1, 1, 0.1))
fig_s9 <- plot_grid(header_pdb, legend, p_all,
rel_heights = c(0.5, 0.05, 3), ncol = 1)
ggsave(filename = '/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/FigureS9.pdf',
width = 28, height = 28, dpi = 500, device = cairo_pdf, plot = fig_s9)
#### Figure S10 ####
# Load the data
data_fig_5_s10 <- read.table('/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/Data/data_fig_5_s10.tsv',
h = T, sep = '\t')
# Load the figures of the PDB structures
fig_1a82 <- ggdraw() + draw_image('/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/Data/1A82_new.png')
fig_2o1v <- ggdraw() + draw_image('/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/Data/2O1V_new.png')
fig_1m38 <- ggdraw() + draw_image('/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/Data/1M38_new.png')
fig_4fgw <- ggdraw() + draw_image('/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/Data/4FGW_new.png')
fig_3d8x <- ggdraw() + draw_image('/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/Data/3D8X_new.png')
fig_2jky <- ggdraw() + draw_image('/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/Data/4FGW_new.png')
p_all_facet <- data_fig_5_s10 %>% ggplot(aes(y = het_ddg_be, x = homo_ddg_be, color = Verdict, shape = out_of_bounds)) +
facet_grid(Scenario ~ PDB, labeller = label_wrap_gen(width = 15)) +
scale_color_manual(values = c('red', 'black')) +
scale_shape_manual(values = c(16, 17), guide = 'none') +
geom_point(aes(alpha = Verdict), size = 2) +
scale_alpha_manual(values = c(1, 0.3)) +
theme(panel.background = element_rect(fill = "white", colour = "grey50"),
axis.text.x= element_text(size=15), axis.text.y= element_text(size=15),
axis.title.x = element_text(size=20, face = "bold"), axis.title.y = element_text(size=20, face = "bold"))+
theme(plot.title = element_text(size = 20, face = "bold",family="Arial", hjust = 0.5),
legend.key = element_rect(fill = "white"), legend.title = element_blank(),
legend.text=element_text(size=12), legend.position = "none",
legend.justification = "center")+
guides(color = guide_legend(override.aes = list(size=3))) +
ylab("ΔΔG HET (Kcal/mol)") + xlab("ΔΔG HM (Kcal/mol)") +
geom_hline(yintercept = 0) + geom_vline(xintercept = 0) +
xlim(x_min, x_max) + ylim(y_min, y_max) +
stat_cor(method = 'pearson', label.x.npc = 0.35,
label.y.npc = 0.1, show.legend = FALSE,
inherit.aes = FALSE,
aes(x = het_ddg_be, y = homo_ddg_be, label = gsub(x = ..label.., replacement = 'r', pattern = 'R')),
size = 6
) +
theme(panel.border = element_rect(linetype = "solid", colour = "gray50", size = 1),
strip.text.x = element_text(size = 20, face = 'bold'),
strip.text.y = element_text(size = 20, face = 'bold'),
strip.background = element_rect(fill = 'white'))
header_pdb <- plot_grid(NULL, fig_1a82, fig_1m38, fig_2jky, fig_2o1v, fig_3d8x, fig_4fgw, NULL, nrow = 1,
rel_widths = c(0.2, 1, 1, 1, 1, 1, 1, 0.1))
legend <- get_legend(p_all_facet + theme(legend.position = 'top',
legend.text=element_text(size=20)) +
guides(color = guide_legend(override.aes = list(size=4))))
fig_s10 <- plot_grid(header_pdb, legend, p_all_facet, rel_heights = c(1, 0.2, 4), ncol = 1)
ggsave(filename = '/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/FigureS10.pdf',
width = 28, height = 14, dpi = 500, device = cairo_pdf, plot = fig_s10)
#### Figure S11 ####
# Load the data for panel A
data_fig_s11A <- read.table('/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/Data/data_fig_s11A.tsv',
h = T, sep = '\t')
# Relevel for order in legend
data_fig_s11A$Complex <- factor(data_fig_s11A$Complex, levels = c('HM', 'HET'))
p11_A <- data_fig_s11A %>%
ggplot(aes(x = quadrant, y = ddg, fill = Complex)) +
geom_point(aes(colour = Complex),
position = position_jitterdodge(jitter.width = 0.25)) +
geom_boxplot(outlier.colour = NA, alpha = 0.5) +
scale_fill_manual(values = c('#737373', '#d9d9d9')) +
scale_colour_manual(values = c('#737373', '#d9d9d9'), guide = 'none') +
theme(plot.title = element_text(hjust = 0.5, face = 'bold'),
panel.background = element_rect(fill = "white"),
axis.title.x = element_text(face = 'bold', size = 15),
axis.title.y = element_text(face = 'bold', size = 15),
axis.ticks.x = element_blank(), axis.ticks.y = element_blank(),
axis.line = element_line(size = 0.5),
axis.text.x = element_text(size = 12, angle = 45, vjust = 0.5),
axis.text.y = element_text(size = 12)) +
xlab('Class of mutation') + ylab('ΔΔG (kcal/mol)') + labs(fill = '') +
ylim(-5,10)
p11_A
# Load the data for panel B
data_fig_s11B <- read.table('/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/Data/data_fig_s11B.tsv',
h = T, sep = '\t')
# Relevel for order in legend
data_fig_s11B$Complex <- factor(data_fig_s11B$Complex, levels = c('HM', 'HET'))
p11_B <- data_fig_s11B %>%
ggplot(aes(x = quadrant, y = ddg, fill = Complex)) +
geom_point(aes(colour = Complex),
position = position_jitterdodge(jitter.width = 0.25)) +
geom_boxplot(outlier.colour = NA, alpha = 0.5) +
scale_fill_manual(values = c('#737373', '#d9d9d9')) +
scale_colour_manual(values = c('#737373', '#d9d9d9'), guide = 'none') +
theme(plot.title = element_text(hjust = 0.5, face = 'bold'),
panel.background = element_rect(fill = "white"),
axis.title.x = element_text(face = 'bold', size = 15),
axis.title.y = element_text(face = 'bold', size = 15),
axis.ticks.x = element_blank(), axis.ticks.y = element_blank(),
axis.line = element_line(size = 0.5),
axis.text.x = element_text(size = 12, angle = 45, vjust = 0.5),
axis.text.y = element_text(size = 12)) +
xlab('Class of mutation') + ylab('ΔΔG (kcal/mol)') + labs(fill = '') +
ylim(-5,10)
p11_B
p_effect_sizes <- plot_grid(p11_A, NULL, p11_B, labels = c('A', NULL, 'B'), nrow = 3, rel_heights = c(1, 0.1, 1))
ggsave(filename = '/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/FigureS11.pdf',
device = cairo_pdf, width = 7, height = 14, plot = p_effect_sizes, dpi = 500)
#### Figure 12 ####
# Load the data
data_fig_s12 <- read.table('/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/Data/data_fig_s12.tsv',
h = T, sep = '\t')
fig_s12 <- data_fig_s12 %>%
ggplot(aes(x = quadrant, y = accepted_percentage_of_total, fill = selection)) +
geom_bar(stat = 'identity', position = 'dodge') +
geom_errorbar(aes(ymin = min, ymax = max),
width = 0.5, size = 0.3,
position = position_dodge(0.9)) +
scale_fill_manual(values = c('#737373', '#d9d9d9')) +
theme(plot.title = element_text(hjust = 0.5, face = 'bold'),
panel.background = element_rect(fill = "white"),
axis.title.x = element_text(face = 'bold'), axis.title.y = element_text(face = 'bold'),
axis.ticks.x = element_blank(), axis.ticks.y = element_blank(),
axis.line = element_line(size = 0.5)) +
xlab("Effect on HMs") + ylab("Fixed mutations (%)") + labs(fill = 'Selection on')
fig_s12
ggsave(filename = '/home/axelle/Dropbox/draftAM1.0/Full_submission_Elife/Scripts/scripts_for_simulations/FigureS12.pdf',
width = 10,height = 7, plot = fig_s12, device = cairo_pdf, dpi = 500)
|
412a921fc37d3c3ceaa734faa336b9e986734a04 | 210683b5347b6f584b258f26c7d48ab51a518fe3 | /man/FindTableGroup.Rd | 0857622252fdd3cf7303d196aef258aec99bcea8 | [
"MIT"
] | permissive | statisticsnorway/SSBtools | 6b95eab7f46c1096cd7d6ee3f61d3898150d49d0 | aa2728571e0840e1965f3e7ed0f1984c818ca7a1 | refs/heads/master | 2023-06-24T02:48:17.178606 | 2023-06-23T08:05:58 | 2023-06-23T08:05:58 | 137,074,899 | 5 | 0 | Apache-2.0 | 2023-06-23T08:06:00 | 2018-06-12T13:21:36 | R | UTF-8 | R | false | true | 1,267 | rd | FindTableGroup.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FindTableGroup.R
\encoding{UTF8}
\name{FindTableGroup}
\alias{FindTableGroup}
\title{Finding table(s) of hierarchical variable groups}
\usage{
FindTableGroup(
x = NULL,
findLinked = FALSE,
mainName = TRUE,
fCorr = FactorLevCorr(x),
CheckHandling = warning
)
}
\arguments{
\item{x}{Matrix or data frame containing the variables}
\item{findLinked}{When TRUE, two linked tables can be in output}
\item{mainName}{When TRUE the groupVarInd ouput is named according to first variable in group.}
\item{fCorr}{When non-null x is not needed as input.}
\item{CheckHandling}{Function (warning or stop) to be used in problematic situations.}
}
\value{
Output is a list with items
\item{groupVarInd}{List defining the hierarchical variable groups. First variable has most levels.}
\item{table}{List containing one or two tables. These tables are coded as indices referring to elements of groupVarInd.}
}
\description{
A single table or two linked tables are found
}
\examples{
x <- rep(c('A','B','C'),3)
y <- rep(c(11,22,11),3)
z <- c(1,1,1,2,2,2,3,3,3)
zy <- paste(z,y,sep='')
m <- cbind(x,y,z,zy)
FindTableGroup(m)
FindTableGroup(m,findLinked=TRUE)
}
\author{
Øyvind Langsrud
}
|
145fe837a58b1702fe3005c53e3c736554f4b68d | 1ed42775543aab64d1376bb521b2f7360862cd73 | /analyzed/feed/logistic.R | 337e03cfdcbb438fdebdc145c94359cd1722063e | [] | no_license | yja2397/wayne-s-crop | 6aa4bac7f7418c6d3dab7f6fd148c159b68dab44 | ede7d95a4d205e079e2f787ac84146dfaefe31e5 | refs/heads/master | 2020-04-14T06:54:28.419328 | 2019-02-18T19:11:46 | 2019-02-18T19:11:46 | 163,698,625 | 1 | 0 | null | 2018-12-31T21:34:43 | 2018-12-31T21:34:42 | null | UTF-8 | R | false | false | 1,360 | r | logistic.R | ##########################################################################################
######### S u b j e c t : Collect Temperatue and Humidity data and Analyze #############
######### Collection Date : January 14, 2019 #############
######### Data collector : Wayne's Crop #############
##########################################################################################
rm(list=ls())
# library
library(ggplot2)
library(UsingR)
library(car)
library(nnet)
par("mar")
par(mar=c(1,1,1,1))
# Open data
test = read.csv("feed.csv",header = TRUE)
acc = read.csv("..//compare//tem_hum_com.csv", header = TRUE)
test1 = test[,3:4]
View(test1)
temperature <- test1[,1]
humidity <- test1[,2]
temperature_com <- acc[,1]
humidity_com <- acc[,2]
fit1 <- multinom(humidity_com~temperature_com,data=acc) # model 1 (our interest)
fit0 <- multinom(humidity_com~1,data=acc) # model 0 (uner H_0)
ratio.likelihood <- fit0$deviance - fit1$deviance
p.value <- pchisq(ratio.likelihood, df=2, lower.tail=F)
pred <- fit1$fitted.values # fitted probabilty for each category
plot(temperature_com,pred[,1],col="red",ylim=c(0,1),xlim=range(temperature_com),xlab="Size of alligator",ylab="Predicted probability")
points(temperature_com,pred[,2],col="blue")
points(temperature_com,pred[,3],col="black")
|
b3b5f000c1ac3de9b789bed5f6cd006338fc6832 | 12ae74bd0ba9d5494d7301b521b45d1bfa5ff84a | /man/keep.Rd | d0b74851cdd662efc018daef72588aed55f8db10 | [] | no_license | cran/do | 62b609a0f0cc0f0c0cc879adb821b1d9d95b6632 | fa0d7c8f9799326ffa6f0763f490c2873597131b | refs/heads/master | 2021-08-15T11:59:00.793187 | 2021-08-03T10:40:02 | 2021-08-03T10:40:02 | 206,034,685 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 347 | rd | keep.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/keep.R
\name{keep}
\alias{keep}
\title{Keep objects}
\usage{
keep(..., envir = .GlobalEnv)
}
\arguments{
\item{...}{one or more objects}
\item{envir}{environment, default is global}
}
\description{
Keep objects
}
\examples{
\donttest{
a <- 1
b <- 2
d <- 4
keep(a)
}
}
|
46b8f2d8d0b0fd8bc334ff323c978ecd341c726e | 97087dcf020dac3fa24643092da52f552174c764 | /ui.R | 7ce75851f7334105060c434ad41852566a77ab89 | [
"MIT"
] | permissive | belmontmat/Info-201-BA3 | f17e4560ad70adc9ce69520c8eb7037d430bd58c | 18165a22914eec1c18ed3f956231b2152b21994d | refs/heads/master | 2020-03-15T02:32:36.701180 | 2018-05-30T21:58:03 | 2018-05-30T21:58:03 | 131,920,477 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,612 | r | ui.R | # ui.R
library(shiny)
library(shinythemes)
library(dplyr)
library(ggplot2)
library(plotly)
library(maps)
shinyUI(navbarPage(
theme = shinytheme("superhero"),
"Temperature and Disasters",
# Create an introduction tab ----------------------------------------------
tabPanel(
"Introduction",
titlePanel("The Association Between The Rise In Temperature And
Frequency Of Natural Disasters"),
mainPanel(
tags$p(
"This project is aimed towards allowing users to discover the
relationships and impacts between the increase in temperature
and number of natural disasters in recent history",
"Recently the U.S. has pulled out of the Paris Climate Agreement,
so we have focused the first 3 tabs on the U.S. to drive home that
this is a both local and global phenomenon."
),
img(src = "emdat.jpg", align = "center"),
tags$p(
"Our data for temperature comes from ",
tags$a("The International Disaster Database",
href = "https://www.emdat.be/"
),
" and limiting the disasters to ",
tags$i("natural, meteorological, hydrological, and climatological"),
"disasters."
),
img(src = "Data_Society.png", align = "center", width = "600px"),
tags$p(
"We'll be using",
tags$a("the Data Society", href = "https://datasociety.com/about-us/"),
" for our climate change dataset. Data society is a data
science training and consulting firm based in Washington,
D.C. We retrieved the data from ",
tags$a("Data.World",
href = "https://data.world/data-society/global-climate-change-data"
),
", which is a database of datasets."
),
tags$p("Hopefully, together we can make the world a better place.")
)
),
# Create a heatmap of the USA ---------------------------------------------
tabPanel(
"USA Heatmap",
titlePanel("USA Heatmap"),
# Create sidebar layout
sidebarLayout(
# Side panel for controls
sidebarPanel(
sliderInput("year", "Pick A Year",
min = 1750, max = 2013, value = 2012,
step = 10, sep = ""
),
selectInput("unit", "Pick A Unit Of Temperature", choices = list(
"Fahrenheit", "Celsius"
), selected = "Celsius")
),
mainPanel(
tags$link(rel = "stylesheet", type = "text/css", href = "style.css"),
tags$h2(id = "main-heading", "How has temperature in the U.S. as a
whole changed over time?"),
tags$p(
"Temperature By Year: ",
"Has this had an impact on number of natural disasters?",
"On this page we can see a visualization
which overviews many different factors that look into these
questions."
),
plotlyOutput("plot1"),
textOutput("disaster_text"),
textOutput("temp_text"),
tags$p(
"It is evident that there is indeed a rise in average temperature;
there is also an apparent rise in recorded natural disasters",
"in the U.S. A more in-depth analysis of
each state's temperatures can be seen in the next tab. This will
provide clarity towards understanding the temperature trends in the
U.S."
)
)
)
),
# Create a tabPanel to show your bar plot
tabPanel(
"Temperature Trends",
titlePanel("State Versus National Temperature Trends"),
# Create sidebar layout
sidebarLayout(
# Side panel for controls
sidebarPanel(
selectInput(
"state_select",
choices = c("Alabama", "Alaska" ,"Arizona", "Arkansas", "California",
"Colorado", "Connecticut", "Delaware",
"District Of Columbia", "Florida", "Georgia (State)",
"Hawaii", "Idaho", "Illinois", "Indiana", "Iowa",
"Kansas", "Kentucky", "Louisiana", "Maine", "Maryland",
"Massachusetts", "Michigan", "Minnesota", "Mississippi",
"Missouri", "Montana", "Nebraska", "Nevada",
"New Hampshire", "New Jersey", "New Mexico", "New York",
"North Carolina", "North Dakota", "Ohio", "Oklahoma",
"Oregon", "Pennsylvania", "Rhode Island",
"South Carolina", "South Dakota", "Tennessee",
"Texas", "Utah", "Vermont", "Virginia", "Washington",
"West Virginia", "Wisconsin", "Wyoming"),
label = "Select State"
)
),
mainPanel(
tags$h2(id = "main-heading", "How has the temperature for a state
changed in the past years?"),
tags$p("The function of this interactive visualization is to detail
the trends of the temperature data for the U.S. as a whole
nation and as individual states."),
plotlyOutput("plot2"),
tags$p("Picking off where the last tab left off, the line graph trends
for both the nation and all its states shows that there is
a gradual increasing trend in average temperature. Another
interesting point to note is the uncertainy of the data
in the earlier years, this is due to inaccuracies of how
temperature was measured-- this has since changed since
the invention of more accurate temperature measurement. One
key thing to note is how average temperatures for both the states
and the nation are proportionate (this is depicted by the
similar slopes of the trend lines).")
)
)
),
# Create the natural disaster cost tab ------------------------------------
tabPanel(
"Cost Bar Graph",
titlePanel("Impact on U.S. Economy Visualized"),
# Create sidebar layout
sidebarLayout(
# Side panel for controls
sidebarPanel(
sliderInput("tab3_slider",
label = "Year Range",
min = 1944,
max = 2018,
value = c(1950, 2017)
)
),
mainPanel(
tags$h2(id = "main-heading", "How much is the U.S. spending on
disasters per year?"),
tags$p("We complied data for U.S. disaster spendings, natural disaster
occurrences and the total damage in cost for as a result of
these disasters."),
plotlyOutput("plot3"),
tags$p("As we focus on attention on the U.S. as a nation, we inquired
how much the U.S. spends on natural disasters and the damage
inflicted from these events. From this graph, there is a clear
increasing
trend of natural disaster spending. In addition to this, the
frequencies of which natural disasters are occurring have also
increased as time progresses, similar to how average temperatures
of the U.S. and its states have increased. With this information
in mind, we focused our attention to a global scale-- see next
tab.")
)
)
),
# Create a line graph using global data -----------------------------------
tabPanel(
"Global Line Graph",
titlePanel("Global Line Graph"),
# Create sidebar layout
sidebarLayout(
# Side panel for controls
sidebarPanel(
selectInput(
"continent_tab4",
choices = c("Africa", "Americas", "Asia", "Europe", "Oceania"),
label = "Select Continent"
)
),
mainPanel(
tags$h2(id = "main-heading", "Is there a correlation
between climate change and natural disasters?"),
tags$p(
"This line graph helps to answer this question by relating",
" temperature and occurrences of natural disasters."
),
plotOutput("plot4"),
textOutput("text_tab4")
)
)
),
# Create a global map using global data -----------------------------------
tabPanel(
"Global Map",
titlePanel("Temperature and Disasters"),
# Create sidebar layout
sidebarLayout(
# Used the link below to figure out how to remove comma from years
# https://stackoverflow.com/questions/26636335/formatting-number-output-of-sliderinput-in-shiny
# Side panel for controls
sidebarPanel(
sliderInput("tab5_year",
label = "Year",
min = 1743,
max = 2013,
value = 2000,
sep = ""
)
),
mainPanel(
tags$h2(id = "main-heading", "Where has temperature changed the most?"),
tags$p("Average Temperature Change to Given Year: Do these people
experience a higher frequency of natural disaster as well?
This map depicts multiple factors converging on a global
scale to answer these questions."),
plotlyOutput("plot5"),
tags$p("From this map, a picture is painted where a majority of
countries experience an average temperature increase. A few
outliers are northern most countries like Greenland and
Russia. Despite these outliers, a significant trend that is
evident is how large land masses that experience a notable
average temperature change also experience more frequent
natural disasters, some examples being USA, China and Mexico.")
)
)
)
))
|
a250898f9d0a486b1ce0ed653d0761bc26ae7932 | 52cb210d2c627b6a6ce68843e4116151a3be1301 | /R/serve_elo_peaks_2017_wta.R | 77b922b176626ade42ff962ae680705e57ad57d8 | [] | no_license | skoval/sports-blog | 462607d3897ceb98ccb719c40cc015300fa8e243 | c8a9fc2ac71e370b801e71bb4ebe3735d255297c | refs/heads/master | 2023-01-11T20:51:29.411992 | 2022-06-25T13:54:51 | 2022-06-25T13:54:51 | 92,786,559 | 3 | 2 | null | 2023-01-04T18:40:43 | 2017-05-30T00:53:07 | JavaScript | UTF-8 | R | false | false | 3,328 | r | serve_elo_peaks_2017_wta.R | library(ggplot2)
library(ggthemes)
library(dplyr)
load(file = "wta_elos_all_2017.RData")
elos <- elos[order(elos$match_date, elos$matchid, elos$Set, elos$Game, elos$Point),]
serve_elos <- elos %>%
group_by(serve) %>%
dplyr::mutate(
Matches = n_distinct(matchid),
PointNumber = 1:n()
)
return_elos <- elos %>%
group_by(return) %>%
dplyr::mutate(
Matches = n_distinct(matchid),
PointNumber = 1:n()
)
players <- unique(serve_elos$serve[serve_elos$Matches >= 10])
serve_peak_elo <- serve_elos %>%
filter(serve %in% players) %>%
group_by(serve) %>%
dplyr::summarise(
serve.peak.elo = max(server.elo)
)
return_peak_elo <- return_elos %>%
filter(serve %in% players) %>%
group_by(return) %>%
dplyr::summarise(
return.peak.elo = max(return.elo)
)
serve_peak_elo <- rename(serve_peak_elo, player = serve)
return_peak_elo <- rename(return_peak_elo, player = return)
peak_elos <- merge(serve_peak_elo, return_peak_elo, by = "player")
gg1 <- peak_elos %>%
ggplot(aes(y = serve.peak.elo, x = return.peak.elo)) +
geom_point() +
theme_gdocs() +
#scale_y_continuous("Peak Serve Elo", lim = c(1450, 1700), breaks = scales::pretty_breaks(n = 10)) +
#scale_x_continuous("Peak Return Elo", lim = c(1400, 1600), breaks = scales::pretty_breaks(n = 10)) +
geom_hline(yintercept = median(peak_elos$serve.peak.elo)) +
geom_vline(xintercept = median(peak_elos$return.peak.elo)) +
geom_text(aes(label = player))
gg1
# Most improved
serve_improve <- elos %>%
filter(serve %in% players) %>%
group_by(serve) %>%
dplyr::summarise(
start = server.elo[1],
improved = median(server.elo[month(match_date) > 6]) - start,
matches = n_distinct(matchid[month(match_date) > 6])
) %>%
filter(matches >= 15)
# Top 30
serve_improve <- serve_improve[order(serve_improve$improved, decreasing = T),][1:30,]
serve_improve$serve <- factor(serve_improve$serve, level = serve_improve$serve[nrow(serve_improve):1], order = T)
serve_improve %>%
ggplot(aes(y = improved, x = serve)) +
geom_bar(stat = "identity", fill = colorblind_pal()(2)[2]) +
geom_text(aes(label = round(start, 0)), nudge_y = 5) +
coord_flip() +
theme_hc() +
scale_y_continuous("Serve Rating Improvement", breaks = scales::pretty_breaks(n = 10)) +
scale_x_discrete("") +
theme(text = element_text(size = 14)) +
ggtitle("Most Improved Servers in 2017")
return_improve <- elos %>%
filter(return %in% players) %>%
group_by(return) %>%
dplyr::summarise(
start = return.elo[1],
improved = median(return.elo[month(match_date) > 6]) - start,
matches = n_distinct(matchid[month(match_date) > 6])
) %>%
filter(matches >= 15)
return_improve <- return_improve[order(return_improve$improved, decreasing = T),][1:30,]
return_improve <- return_improve %>% filter(improved > 0)
return_improve$return <- factor(return_improve$return, level = return_improve$return[nrow(return_improve):1], order = T)
return_improve %>%
ggplot(aes(y = improved, x = return)) +
geom_bar(stat = "identity", fill = colorblind_pal()(3)[3]) +
geom_text(aes(label = round(start, 0)), nudge_y = 5) +
coord_flip() +
theme_hc() +
scale_y_continuous("Return Rating Improvement", breaks = scales::pretty_breaks(n = 10)) +
scale_x_discrete("") +
theme(text = element_text(size = 14)) +
ggtitle("Most Improved Returners in 2017") |
70366499377d04ceb73d4ebf2a0144ddf0458762 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/phyclust/examples/prune.Mu.Rd.R | 3e2d00e6d0a7aacc9c60a5bcdcfd0ceb895f3f1c | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 433 | r | prune.Mu.Rd.R | library(phyclust)
### Name: prune.Mu
### Title: Prune the Center Sequences Mu
### Aliases: prune.Mu
### Keywords: programming
### ** Examples
## Not run:
##D library(phyclust, quiet = TRUE)
##D
##D X <- seq.data.toy$org
##D X[, 5] <- .nucleotide$nid[.nucleotide$code == "-"]
##D ret <- phyclust(X, 2)
##D Mu.GAPs <- prune.Mu(X, ret$class.id, ret$Mu)
##D
##D ret$Mu[, 5]
##D Mu.GAPs[, 5] # Replace by GAPs.
## End(Not run)
|
3b1c187bf3a8a6ca1de07a85f253be41f75b3c2b | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/magic/examples/lozenge.Rd.R | fd1b33e575c23f09a750415363b02259702c6fbb | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 216 | r | lozenge.Rd.R | library(magic)
### Name: lozenge
### Title: Conway's lozenge algorithm for magic squares
### Aliases: lozenge
### Keywords: array
### ** Examples
lozenge(4)
all(sapply(1:10,function(n){is.magic(lozenge(n))}))
|
06c78bf56011d94aea8ef03912239e19a0e30bfc | ec9761df6ca7744bcb97af3c3882134abed4b694 | /bi-plot code.R | 0811ec26f0db390a552e11b71e88128246784427 | [] | no_license | harshnirajsheth/Predicting-the-life-cycle-of-lithium-ion-batteries-using-degradation-data | b98b5a8363e4cac8fd19e51cc380e5c98eb59f07 | adcbacdf4098ed24478836f5c932131ebf2a667e | refs/heads/master | 2020-08-04T22:17:54.872675 | 2019-10-25T00:39:35 | 2019-10-25T00:39:35 | 212,296,225 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,472 | r | bi-plot code.R | # import .csv file
data <- read.csv(file.choose())
# using packages
library(dplyr)
library(ggplot2)
# group by Cycle Index and chosing maximum and mean of discharge capacity and temperature respectively
DC <- data %>% group_by(Cycle_Index) %>% summarise(Discharge_Capacity = max(Discharge_Capacity))
T <- data %>% group_by(Cycle_Index) %>% summarise(Temperature = mean(Temperature))
# merging DC and T dataframes
merged <- merge(T,DC,by="Cycle_Index")
# remove the first and last rows of the merged dataframe
merged <- merged[-nrow(merged),]
merged <- merged[-1, ]
# plotting the bi-plot
par(mar=c(5,5,5,5) + 0.4)
## Plot first set of data and draw its axis
plot(merged$Cycle_Index, merged$Discharge_Capacity, axes=FALSE, xlab="", ylab="",
type="l",col="black", main="3.6C-80per_3.6C_CH2")
axis(2, ylim=c(0,2),col="black",las=1) ## las=1 makes horizontal labels
mtext("Discharge Capacity",side=2,line=2.5)
box()
## Allow a second plot on the same graph
par(new=TRUE)
## Plot the second plot and put axis scale on right
plot(merged$Cycle_Index, merged$Temperature, pch=15, xlab="", ylab="",
axes=FALSE, type="l", col="red")
## a little farther out (line=4) to make room for labels
mtext("Temperature",side=4,col="red",line=4)
axis(4, ylim=c(25,40), col="red",col.axis="red",las=1)
## Draw the Cycle Index axis
axis(1, xlim=c(0,2000),col="black",las=1)
mtext("Cycle Index",side=1,col="black",line=2.5)
|
eda0cdd302f2f5e1314a4de08a3966ea9f98f2d7 | 6dcd7c7215b226abf5ed6736b9dc118af31e7466 | /man/simplify_taxo.Rd | fb6f0bc368ae5b5cd849c0b42005378ecf7c961e | [
"MIT"
] | permissive | adrientaudiere/MiscMetabar | b31a841cdac749a0074a0c24c7f8248348c64a22 | 2cb7839d26668836aac129af7115dea0a52385c0 | refs/heads/master | 2023-08-31T06:57:15.546756 | 2023-08-23T09:10:52 | 2023-08-23T09:10:52 | 268,765,075 | 7 | 0 | NOASSERTION | 2023-09-06T10:12:57 | 2020-06-02T10:02:00 | R | UTF-8 | R | false | true | 645 | rd | simplify_taxo.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/miscellanous.R
\name{simplify_taxo}
\alias{simplify_taxo}
\title{Simplify taxonomy by removing some unused characters such as "k__"}
\usage{
simplify_taxo(physeq)
}
\arguments{
\item{physeq}{(required): a \code{\link{phyloseq-class}} object obtained
using the \code{phyloseq} package.}
}
\value{
A \code{\link{phyloseq-class}} object with simplified taxonomy
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#maturing}{\figure{lifecycle-maturing.svg}{options: alt='[Maturing]'}}}{\strong{[Maturing]}}
}
\author{
Adrien Taudière
}
|
ccc768751ce2dc6cbd0deaea7f9bd87cd535324d | b2a69ed7b75ceb2e08e2b8a1c162b7b1b9754224 | /25 02 tests sur deux echantillons.R | dbda0ebe64d5b7e7700c527ea792d1ca23938a93 | [] | no_license | pierrebedu/STATISTICS | da042d9e652e2db94fb5985397f888b8e1af91de | 47bc20547ff3faf4fec6b113153fd601abcb0db6 | refs/heads/main | 2023-04-03T23:22:54.497549 | 2021-04-11T15:33:20 | 2021-04-11T15:33:20 | 356,820,706 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 5,754 | r | 25 02 tests sur deux echantillons.R | # ECHANTILLONS INDEPENDANTS exo 1
riche <- scan("C:\\Users\\pierr\\Documents\\R scripts\\DU INP\\Donnees\\riche.txt")
pauvre <- scan("C:\\Users\\pierr\\Documents\\R scripts\\DU INP\\Donnees\\pauvre.txt")
#on regarde
mean(pauvre)
mean(riche)
sd(pauvre)
sd(riche)
boxplot(list(riche, pauvre), names= c("riche", "pauvre"))
#test de normalité
shapiro.test(pauvre)
shapiro.test(riche)
plot(density(pauvre))
plot(density(riche))
# la normalité des DEUX echantillons étant acquise, on teste d'abord l'égalité des variances
var.test(pauvre, riche) # TEST DE FISHER : p value : 0.98 donc Ho : variances egales ok
# les variances etant égales, on teste ensuite l'égalité des moyennes
t.test(pauvre, riche, paired=FALSE, var.equal=TRUE) #TEST DE STUDENT independance par defaut (paired=FALSE) et on lui dit variances egales
#Rq : dans le cas variance inégales on ne pourrait le faire car N = 19 trop petit
#ici moyenne égales!
#on affine l'hypothèse H1 pour un test plus fin!
t.test(riche, pauvre, paired=FALSE, var.equal=TRUE, alternative="greater")
#ici elles ne le sont plus!
##################################################################################################################################
# résultat tangent donc pour confirmer on fait un test non paramétrique : Wilcoxon test des rangs
wilcox.test(riche, pauvre, alternative="greater") # confirme les moyennes poas egales si H1 choisi finement! (paired =FALSE par defaut)
###################################################################################################################################
# ECHANTILLONS APPARIES exo 2
load("C:\\Users\\pierr\\Downloads\\donneesFiltres.Rdata")
attach(donneesFiltres)# pour qu'il connaisse colonne et dataframe
#EDA
head(donneesFiltres)
summary(donneesFiltres)# pas bien! (pas le même effectif et on perd les liens indivs!)
filtre.verre> filtre.papier
boxplot(filtre.verre - filtre.papier) # box plot des différences
plot(filtre.verre, filtre.papier)#nuage de points
abline(0,1, col=2)
# test de student
shapiro.test(filtre.verre)# les echantillons ne sont pas normalement distribués mais ca n'est pas necessaire
shapiro.test(filtre.papier)
hist(filtre.verre)
hist(filtre.papier)
shapiro.test(filtre.verre - filtre.papier)# les différences sont bien normales(il le fallait!). Faire un student a du sens.
t.test(filtre.verre - filtre.papier)#mal fait : les echantillons doivent etre considérés comme apairés
t.test(filtre.verre, filtre.papier, paired=TRUE, alternative= "greater") # dit bien que verre filtre mieux que papier car moyennes differentes
t.test(filtre.verre -filtre.papier, alternative= "greater") # EXACTEMENT LA MEME CHOSE: avec les différences sous entendu greater than zero
#SANS PRECISER APPARIES CA DEVIENT FAUX !
t.test(filtre.verre, filtre.papier, alternative= "greater")# version fausse!!!!!!!!!!!! fait comme si les echant sont non appariés
#on peut confirmer avec Wilcox
wilcox.test(filtre.verre, filtre.papier, alternative= "greater", paired=TRUE) # ça confirme le rejet de Ho
###########################################################################################################
#exo3.1 ECHANTILLON APPARIE
sourisA <- scan("C:\\Users\\pierr\\Documents\\R scripts\\DU INP\\Donnees\\echSourisA.txt")
sourisB <- scan("C:\\Users\\pierr\\Documents\\R scripts\\DU INP\\Donnees\\echSourisB.txt")
#EDA
head(sourisA)
summary(sourisA) # pas bien
summary(sourisB)
plot(sourisA, sourisB)
abline(0,1)
boxplot(sourisA-sourisB)
abline(h=0)
sum(sourisA>sourisB)
#On teste la normalité puis Ho : les moyennes sont égales
shapiro.test(sourisA-sourisB) # différences bien normales
t.test(sourisA-sourisB)
ou
t.test(sourisA, sourisB, paired=TRUE) # même chose
#######################################################################################################
#EXO 3.2 ECHANTILLON INDEPENDANTS
malade <- scan("C:\\Users\\pierr\\Documents\\R scripts\\DU INP\\Donnees\\echCancerColon.txt")
nmalade <- scan("C:\\Users\\pierr\\Documents\\R scripts\\DU INP\\Donnees\\echCancerTemoin.txt")
head(malade)
summary(malade) # pas bien
summary(nmalade)
boxplot(list(malade, nmalade))
#On teste la normalité
shapiro.test(malade) # les deux echantillons sont normaux
shapiro.test(nmalade)
var.test(malade, nmalade)# egalité des variances avec fisher
t.test(malade, nmalade, var.equal=TRUE, paired=FALSE) #puis Ho : les moyennes sont égales
# on confirme avec kolmogorov smirnov sur les fonctions de repartition
ks.test(malade, nmalade)# kolmogorov smirnov -> même distribution acceptée (distance sur les fcts de repartition)
#############################################################################################################
# exo 3.3 on imagine independants + pas normaux (wilcoxon direct) // OU // independants +variances pas egales
jeunes <- scan("C:\\Users\\pierr\\Documents\\R scripts\\DU INP\\Donnees\\jeunes.txt")
ages <- scan("C:\\Users\\pierr\\Documents\\R scripts\\DU INP\\Donnees\\ages.txt")
jeunes
summary(jeunes)
summary(ages)
boxplot(list(jeunes, ages))
# normalité???
hist(jeunes)
hist(ages)
shapiro.test(jeunes)
shapiro.test(ages)
#variance égales?
var.test(jeunes, ages,paired=FALSE )
#conclusion : moyennes égales
t.test(jeunes, ages, paired=FALSE, var.equal = TRUE, alternative= "less")
##############################################################################
#confirmons avec wilcoxon (rangs) et kolmogorov smirnov (fct repart)
wilcox.test(jeunes, ages, paired=FALSE, alternative ="less")
ks.test(jeunes, ages, paired=FALSE, alternative ="less")
|
b7ed43c629a68e3a26d9c3209e74190e2034adab | 892e88ee513844acdf263b092035c8781f47d0af | /R/citrusSize.R | 18502f3ad3fb55d7bfd9cae2762e52e45805a02a | [] | no_license | NailouZhang/idiogramFISH | 90ce42589e85896bf53be35411482b22d3828b05 | 65fb1c1012c798ab21ad0642de50c52fdf7233b9 | refs/heads/master | 2023-08-30T22:12:43.847226 | 2021-09-30T11:00:02 | 2021-09-30T11:00:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,969 | r | citrusSize.R | #' @name citrusSize
#' @aliases citrusMarkPos markOverCMA
#' @title FUNCTIONS: citrusSize, citrusMarkPos, markOverCMA
#' @description Helper function to create data.frames with
#' chr. size and mark size data for Citrus
#' based on categories in Carvalho et al. (2005)
#' @description Special behaviour while plotting:
#' normally you will get chr. names as: B_1, B_2, etc.
#' to remove _*, use \code{chrIdPatternRem='_.*'} in
#' \code{plotIdiograms}. However, for FL+ and FL0,
#' this conversion is automatic. So, in plot you will
#' never see FL0_1, FL0_2, for example.
#'
#' @param A number of A to calculate (citrusSize)
#' @param B number of B to calculate (citrusSize)
#' @param C number of C to calculate (citrusSize)
#' @param D number of D to calculate (citrusSize)
#' @param E number of E to calculate (citrusSize)
#' @param F number of F to calculate (citrusSize)
#' @param G number of G to calculate (citrusSize)
#' @param FL number of FL+ to calculate (citrusSize)
#' @param FL0 number of FL0 to calculate (citrusSize)
#' @param shortArm for A to G (not FL) (citrusSize)
#' @param longArm for A to G (not FL) (citrusSize)
#' @param shortArmFL for FL (citrusSize)
#' @param longArmFL for FL (citrusSize)
#' @param OTU name of species (citrusSize)
#' @param chrSizeDf data.frame created with \code{citrusSize} function (citrusMarkPos)
#' @param mSizePter numeric, default size for P(short) ter (terminal) bands. \code{0.25} (default) (citrusMarkPos)
#' @param mSizeQter numeric, default size for Q(long) ter (terminal) bands. \code{0.35} (default) (citrusMarkPos)
#' @param mSizePprox numeric, default size for P prox (proximal) bands. \code{0.35} (default) (citrusMarkPos)
#' @param mOther numeric, default size for other bands. \code{0.25} (default) (citrusMarkPos)
#' @param markName character, default name of mark \code{"CMA"}, or \code{"45S"}, respectively. (citrusMarkPos,markOverCMA)
#' @param citrusMarkPosDF data.frame, with CMA marks (markOverCMA)
#' @param chrType character, defaults to "B", chr. type to duplicate mark (markOverCMA)
#' @param chrName character, defaults to "B", chr. name(s) to duplicate mark (markOverCMA)
#' @param chrRegion character, arm, defaults to "p". for mark duplication (markOverCMA)
#' @param shrinkMark boolean, shrink new mark to be created (markOverCMA)
#'
#' @keywords size arm
#' @examples
#' citrusSizeDF <- citrusSize(B=1,D=11,F=4,FL0=2,OTU="C. jambhiri")
#' suppressMessages(
#' plotIdiograms(citrusSizeDF,
#' indexIdTextSize=.4,# font size
#' rulerNumberSize=.4,# font size
#' rulerTitleSize=.4, # font size
#' rulerPos =-.5, # ruler pos.
#' xPosRulerTitle =1.5, # ruler title pos.
#' orderChr="original"# order of chr. as in d.f.
#' )
#' )
#' citrusSizeDF2 <- citrusSize(B=2,D=10,F=4,FL0=1,
#' FL=1, # equivalent to FL+
#' OTU="C. limettioides")
#'
#'
#'suppressMessages(
#' plotIdiograms(citrusSizeDF2, # FL^NA error corrected in 1.15.4
#' indexIdTextSize=.4,# font size
#' rulerNumberSize=.4,# font size
#' rulerTitleSize=.4, # font size
#' rulerPos =-.5, # ruler pos.
#' xPosRulerTitle =1.5, # ruler title pos.
#' orderChr="original"# order of chr. as in d.f.
#' )
#')
#'
#' @references Carvalho, R., Soares Filho, W. S., Brasileiro-Vidal, A. C., & Guerra, M. (2005). The relationships among lemons, limes and citron: A chromosomal comparison. Cytogenetic and Genome Research, 109(1–3), 276–282. https://doi.org/10.1159/000082410
#'
#' @return data.frame
#' @rdname citrusSize
#' @export
#'
#' @importFrom dplyr bind_rows
#'
citrusSize <- function(A=0,B=0,C=0,D=0,E=0,F=0,FL=0,FL0=0,G=0,
shortArm=1.2,longArm=1.7,
shortArmFL=1.3, longArmFL=1.8,
OTU="OTU 1") {
nonFLsum<- sum(A,B,C,D,E,F,G)
FLsum <- sum(FL,FL0)
if(nonFLsum>0){
chrNames <- c(rep("A",A),rep("B",B),rep("C",C),rep("D",D),rep("E",E),rep("F",F),rep("G",G) )
chrNames <- make.uniqueIF(chrNames)
chrSizeCitrusNonFL<- data.frame(chrName=chrNames,shortArmSize=shortArm,longArmSize=longArm)
chrSizeCitrusNonFL$OTU<-OTU
# chrSizeCitrusNonFL <- chrSizeCitrusNonFL[order(chrSizeCitrusNonFL$chrName),]
}
if(FLsum>0){
chrNamesFL <- c(rep("FL+",FL),rep("FL0",FL0))
chrNamesFL <- make.uniqueIF(chrNamesFL)
chrSizeCitrusFL <- data.frame(chrName=chrNamesFL,shortArmSize=shortArmFL,longArmSize=longArmFL)
chrSizeCitrusFL$OTU<-OTU
}
chrSizeCitrus <- dplyr::bind_rows(
rev(as.list(environment() ))[which(names(rev(as.list(environment()) ) ) %in%
grep("chrSizeCitrus",names(rev(as.list(environment() ) ) )
, value=TRUE)
)]
)
return(chrSizeCitrus)
} # fun
#'
#' @rdname citrusSize
#' @return data.frame
#' @examples
#' citrusMarkPosDF <- citrusMarkPos(citrusSizeDF)
#' suppressMessages(
#' plotIdiograms(dfChrSize= citrusSizeDF, # chr. size data.frame
#' dfMarkPos= citrusMarkPosDF,# mark position data.frame (inc. cen.)
#' ruler=FALSE, # remove
#' chrIndex=FALSE, # remove
#' morpho=FALSE, # remove
#' karIndex=FALSE, # remove
#' indexIdTextSize=.4, # font size
#' xlimRightMod=4, # xlim mod.
#' orderChr="original", # order chr. as in d.f.
#' chrColor="blue", # chr. color
#' legendHeight=3 # legend item height
#' )
#' )
#' @export
#'
citrusMarkPos<-function(chrSizeDf,mSizePter=.25,mSizeQter=.35,mSizePprox=.35,mOther=.25,markName="CMA"){
# making A marks
getMarkPosA<-chrSizeDf[which(chrSizeDf$chrName %in% grep("A",chrSizeDf$chrName, value=T )),]
numberOfA<-nrow(getMarkPosA)
if(numberOfA>0){
markPosAThree<-do.call("rbind", replicate(3, getMarkPosA, simplify = FALSE))
markPosAThree$markDistCen <- c(getMarkPosA$shortArmSize-mSizePter,
rep(0,numberOfA),
getMarkPosA$longArmSize-mSizeQter )
markPosAThree<-markPosAThree[order(markPosAThree$chrName),]
markPosAThree$chrRegion <- rep(c("p","p","q"),numberOfA)
markPosAThree$markSize <- rep(c(mSizePter,mSizePprox,mSizeQter),numberOfA)
}
getMarkPosB<-chrSizeDf[which(chrSizeDf$chrName %in% grep("B",chrSizeDf$chrName, value=T )),]
numberOfB<-nrow(getMarkPosB)
if(numberOfB>0){
markPosBtwo<-do.call("rbind", replicate(2, getMarkPosB, simplify = FALSE))
markPosBtwo$markDistCen <- c(rep(0,numberOfB),getMarkPosB$longArmSize-mSizeQter )
markPosBtwo<-markPosBtwo[order(markPosBtwo$chrName),]
markPosBtwo$chrRegion <- rep(c("p","q"),numberOfB)
markPosBtwo$markSize <- rep(c(mSizePprox,mSizeQter),numberOfB)
# markPosBtwo$markDistCen <- rep(c(0,getMarkPosB$longArmSize-mSizeQter ),numberOfB)
}
getMarkPosC<-chrSizeDf[which(chrSizeDf$chrName %in% grep("C",chrSizeDf$chrName, value=T )),]
numberOfC<-nrow(getMarkPosC)
if(numberOfC>0){
markPosCtwo<-do.call("rbind", replicate(2, getMarkPosC, simplify = FALSE))
markPosCtwo$markDistCen <- c(getMarkPosC$shortArmSize-mSizePter,
getMarkPosC$longArmSize-mSizeQter )
markPosCtwo<-markPosCtwo[order(markPosCtwo$chrName),]
markPosCtwo$chrRegion <- rep(c("p","q"),numberOfC)
markPosCtwo$markSize <- rep(c(mSizePter,mSizeQter),numberOfC)
}
getMarkPosD<-chrSizeDf[which(chrSizeDf$chrName %in% grep("D",chrSizeDf$chrName, value=T )),]
numberOfD<-nrow(getMarkPosD)
if(numberOfD>0){
markPosDtwo<-do.call("rbind", replicate(1, getMarkPosD, simplify = FALSE))
markPosDtwo$markDistCen <- getMarkPosD$longArmSize-mSizeQter
markPosDtwo[order(markPosDtwo$chrName),]
markPosDtwo$chrRegion <- rep(c("q"),numberOfD)
markPosDtwo$markSize <- rep(c(mSizeQter),numberOfD)
# markPosDtwo$markDistCen <- rep(c(getMarkPosD$longArmSize-mSizeQter ),numberOfD)
}
getMarkPosE<-chrSizeDf[which(chrSizeDf$chrName %in% grep("E",chrSizeDf$chrName, value=T )),]
numberOfE<-nrow(getMarkPosE)
if(numberOfE>0){
markPosEtwo<-do.call("rbind", replicate(1, getMarkPosE, simplify = FALSE))
markPosEtwo$markDistCen <- getMarkPosE$longArmSize-2*mOther
markPosEtwo<-markPosEtwo[order(markPosEtwo$chrName),]
markPosEtwo$chrRegion <- rep(c("q"),numberOfE)
markPosEtwo$markSize <- rep(c(mOther),numberOfE)
}
getMarkPosF<-chrSizeDf[which(chrSizeDf$chrName %in% grep("^F\\+",chrSizeDf$chrName, value=T )),]
numberOfF <- nrow(getMarkPosF)
if(numberOfF>0) {
markPosFtwo <- do.call("rbind", replicate(1, getMarkPosF, simplify = FALSE))
markPosFtwo$markDistCen <- getMarkPosF$longArmSize-mOther
markPosFtwo <- markPosFtwo[order(markPosFtwo$chrName),]
markPosFtwo$chrRegion <- rep(c("q"),numberOfF)
markPosFtwo$markSize <- rep(c(mOther),numberOfF)
}
getMarkPosFL<-chrSizeDf[which(chrSizeDf$chrName %in% grep("^FL\\+",chrSizeDf$chrName, value=T )),]
numberOfFL <- nrow(getMarkPosFL)
if(numberOfFL>0) {
markPosFLtwo<-do.call("rbind", replicate(1, getMarkPosFL, simplify = FALSE))
markPosFLtwo$markDistCen <- getMarkPosFL$longArmSize-mOther
markPosFLtwo <- markPosFLtwo[order(markPosFLtwo$chrName),]
markPosFLtwo$chrRegion <- rep(c("q"),numberOfFL)
markPosFLtwo$markSize <- rep(c(mOther),numberOfFL)
}
getMarkPosG<-chrSizeDf[which(chrSizeDf$chrName %in% grep("G",chrSizeDf$chrName, value=T )),]
numberOfG<-nrow(getMarkPosG)
if(numberOfG>0){
markPosGtwo<-do.call("rbind", replicate(2, getMarkPosG, simplify = FALSE))
markPosGtwo$markDistCen <- c(getMarkPosG$longArmSize-mOther,
getMarkPosG$longArmSize-3*mOther )
markPosGtwo<-markPosGtwo[order(markPosGtwo$chrName),]
markPosGtwo$chrRegion <- rep(c("q","q"),numberOfG)
markPosGtwo$markSize <- rep(c(mOther,mOther),numberOfG)
# markPosGtwo$markDistCen <- rep(c(getMarkPosG$longArmSize-mOther,getMarkPosG$longArmSize-3*mOther ),numberOfG)
}
# print(grep("markPos.+",names(as.list(environment()) ), value=T))
marksDF<-dplyr::bind_rows(
as.list(environment() )[which(names(as.list(environment()) ) %in% grep("markPos.+",names(as.list(environment()) ), value=T))]
)
marksDF<-marksDF[order(marksDF$chrName),]
row.names(marksDF)<-1:nrow(marksDF)
marksDF$markName<-markName
marksDF <- marksDF[,c("chrName","chrRegion","markName","markDistCen","markSize")]
tryCatch(marksDF$OTU<-unique(chrSizeDf$OTU), error=function(e){"no OTU name"})
return(marksDF)
}
#'
#' @rdname citrusSize
#' @return data.frame
#' @examples
#' citrusMarkPosDF45S<-markOverCMA(citrusMarkPosDF, chrType="B", chrRegion="p", markName="45S")
#' suppressMessages(
#' plotIdiograms(dfChrSize= citrusSizeDF, # chr. size data.frame
#' dfMarkPos= citrusMarkPosDF45S,# mark position data.frame (inc. cen.)
#' ruler=FALSE, # remove ruler
#' chrIndex=FALSE, # remove index
#' morpho=FALSE, # remove morphol.
#' karIndex=FALSE, # remove
#' indexIdTextSize=.4, # font size chr.
#' xlimRightMod=4, # modify xlim
#' orderChr="original", # as in d.f.
#' chrColor="blue",
#' legendHeight=5, # height of legend item
#' colorBorderMark="black", # mark border color
#' OTUfont=3 # italics
#' )
#' )
#' @export
#'
markOverCMA <- function(citrusMarkPosDF,chrType="B", chrName, chrRegion="p", markName="45S",shrinkMark=TRUE){
if(!missing(chrName)){
listSmallDF <- list()
for(i in 1:length(chrName) ) {
if(grepl("A", chrName[i] ) ) {
minMDC <- min(citrusMarkPosDF[which(citrusMarkPosDF$chrName %in% grep(paste0("^",chrName[i],"$"), citrusMarkPosDF$chrName, value=TRUE) &
citrusMarkPosDF$chrRegion %in% chrRegion),]$markDistCen)
listSmallDF[[i]]<-citrusMarkPosDF[which(citrusMarkPosDF$chrName %in% grep(paste0("^",chrName[i],"$"), citrusMarkPosDF$chrName, value=TRUE) &
citrusMarkPosDF$chrRegion %in% chrRegion &
citrusMarkPosDF$markDistCen %in% minMDC ),]
} else {
listSmallDF[[i]]<-citrusMarkPosDF[which(citrusMarkPosDF$chrName %in% grep(paste0("^",chrName[i],"$"), citrusMarkPosDF$chrName, value=TRUE) &
citrusMarkPosDF$chrRegion %in% chrRegion),]
}
}
smallDF<-dplyr::bind_rows(listSmallDF)
} else {
if(chrType=="A") {
Anames <- grep(chrType, citrusMarkPosDF$chrName, value=TRUE)
minMDC<-numeric()
for (i in 1:length(Anames)){
minMDC[i] <- min(citrusMarkPosDF[which(citrusMarkPosDF$chrName %in% Anames[i] &
citrusMarkPosDF$chrRegion %in% chrRegion),]$markDistCen)
}
smallDF<-citrusMarkPosDF[which(citrusMarkPosDF$chrName %in% grep(chrType, citrusMarkPosDF$chrName, value=TRUE) &
citrusMarkPosDF$chrRegion %in% chrRegion &
citrusMarkPosDF$markDistCen %in% minMDC ),]
} else {
smallDF<-citrusMarkPosDF[which(citrusMarkPosDF$chrName %in% grep(chrType, citrusMarkPosDF$chrName, value=TRUE) &
citrusMarkPosDF$chrRegion %in% chrRegion),]
}
}
if(nrow(smallDF)==0) {
message("no bands found")
return(citrusMarkPosDF)
} else {
smallDF$markName<-markName
if(shrinkMark){
mS<-smallDF$markSize
smallDF$markSize<-mS/2
smallDF$markDistCen<-smallDF$markDistCen+mS/4
}
citrusMarkPosDFMark<-dplyr::bind_rows(citrusMarkPosDF, smallDF)
return(citrusMarkPosDFMark)
}
}
|
9eaee29dac949caba4adc240dbf317b09059ab41 | 9812b19047865c889e7995bf19389e7d3038f450 | /tests/testthat/test_summaryStatsUtils.R | a8133c1326d71a0ba75d1ee827b4358cb958898e | [] | no_license | bigpas/summaryStatsR | d1a9e9c57619ab4d3a5fc7ffd0e3f6148439d34f | afe155f48c8c7fcbf6c0581158e40775de508ac9 | refs/heads/master | 2020-05-07T09:45:55.757166 | 2019-11-30T16:27:19 | 2019-11-30T16:27:19 | 180,391,078 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,954 | r | test_summaryStatsUtils.R | context("Summary Stats Funcitons for Data Frame(s) in R")
suppressPackageStartupMessages(library(testthat))
suppressPackageStartupMessages(library(rlang))
suppressPackageStartupMessages(library(tidyverse))
suppressPackageStartupMessages(library(glue))
suppressPackageStartupMessages(library(data.table))
suppressPackageStartupMessages(library(jsonlite))
test_that("`summary_stats` works on data.table", {
irisDT <- data.table(iris)
expect_equal(toJSON(summary_stats(irisDT, target_var = "Sepal.Length")),
toJSON(summary_stats(iris, "Sepal.Length")))
})
test_that("`summary_stats` works on tibble", {
irisTibble <- dplyr::as_tibble(iris)
expect_equal(toJSON(summary_stats(irisTibble, "Sepal.Length")),
toJSON(summary_stats(iris, "Sepal.Length")))
})
test_that("`grouped_summary_stats` works on data.table", {
irisDT <- data.table(iris)
expect_equal(toJSON(
grouped_summary_stats(irisDT,
group = "Species",
target_var = "Sepal.Length")
),
toJSON(
grouped_summary_stats(iris,
group = "Species",
target_var = "Sepal.Length")
))
})
test_that("`grouped_summary_stats` works on tibble", {
mtcarsTibble <- dplyr::as_tibble(mtcars)
expect_equal(
object = toJSON(
grouped2_summary_stats(
mtcarsTibble,
group1 = "gear",
group2 = "cyl",
target_var = 'mpg'
)
),
expected = toJSON(
grouped2_summary_stats(
mtcars,
group1 = "gear",
group2 = "cyl",
target_var = 'mpg'
)
)
)
})
test_that("`grouped_summary_stats` works on tibble", {
mtcarsTibble <- dplyr::as_tibble(mtcars)
expect_equal(
object = toJSON(
grouped2_summary_stats(
mtcarsTibble,
group1 = "gear",
group2 = "cyl",
target_var = 'mpg'
)
),
expected = toJSON(
grouped2_summary_stats(
mtcars,
group1 = "gear",
group2 = "cyl",
target_var = 'mpg'
)
)
)
})
test_that("`grouped_summary_stats` works on data.table", {
mtcarsDT <- data.table(mtcars)
expect_equal(
object = toJSON(
grouped2_summary_stats(
mtcarsDT,
group1 = "gear",
group2 = "cyl",
target_var = 'mpg'
)
),
expected = toJSON(
grouped2_summary_stats(
mtcars,
group1 = "gear",
group2 = "cyl",
target_var = 'mpg'
)
)
)
})
|
e2cf92270f43494c1a821b6267327ec0917d1678 | fa128f50bca1c43419341227bd986ff03545f41d | /man/sdcor2cov.Rd | 8a64201c5cc69cf0d047da6f86c51c8cde6ce413 | [] | no_license | cran/SIN | 33f4786b5f270dfe75ccd7ee21080ff177f7eb9c | 48fa54363cfe853b66c4c7db72ad069e79f91eed | refs/heads/master | 2021-01-18T13:59:31.843367 | 2013-12-23T00:00:00 | 2013-12-23T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 511 | rd | sdcor2cov.Rd | \name{sdcor2cov}
\alias{sdcor2cov}
\title{Covariance matrix}
\description{
This function takes a vector of standard deviations and a correlation
matrix as input and computes the covariance matrix.
}
\usage{sdcor2cov(stddev, corr)}
\arguments{
\item{stddev}{a vector of standard deviations.}
\item{corr}{a correlation matrix.}
}
\value{
The function returns the covariance matrix corresponding to the input
information.
}
\examples{
data(sur)
sdcor2cov(sur$stddev, sur$corr)
}
\keyword{multivariate}
|
786c69ed3ee1ba199cb0df38f0abbf447bc88718 | d9334d392b84ac9a66453e0fd5b7b85c82156cc8 | /sa health combos.R | 29c550de9f2ee7242880a4a0f8f1e1dfb4011c0d | [] | no_license | dranthonymaher/handycode | 0a8961f7f61cdc732f1d40d963adfb8ac64c6125 | 4134c967a2e0e7b008646bd26171db75db82414d | refs/heads/master | 2023-04-12T06:58:58.981578 | 2023-03-18T00:27:19 | 2023-03-18T00:27:19 | 251,159,704 | 0 | 0 | null | 2020-11-26T05:02:18 | 2020-03-29T23:58:34 | R | UTF-8 | R | false | false | 857 | r | sa health combos.R |
expand.grid(0:1, 0:1, 0:1)
settings<-c("ED","ICU","theatre","surg wards","med wards","community")
LHNs<-c("Barossa Hills Fleurieu LHN",
"Central Adelaide LHN",
"Eyre and Far North LHN",
"Flinders and Upper North LHN",
"Limestone Coast LHN",
"Northern Adelaide LHN",
"Riverland Mallee Coorong LHN",
"Southern Adelaide LHN",
"Womens and Childrens LHN",
"Yorke and Northern LHN")
#OccGroups<-c("Medical","Nursing","admin","hotel","allied")
HospOccGroups<-c("JMOs","Medics","NursesL1","NursesL2","NUMs")
LHNOccGroups<-c("admin","allied","hotel")
m1<-expand.grid(HospOccGroups,settings, LHNs, KEEP.OUT.ATTRS = TRUE)
m2<-expand.grid(LHNOccGroups, LHNs, KEEP.OUT.ATTRS = TRUE)
write.csv(m1,"SAhealthcombos_hosp.csv")
write.csv(m2,"SAhealthcombos_lhns.csv")
|
ad893d72c0948e546692bfe5e3570fdf6e0bd8a4 | ab2b794ed2d69a24feacd4ff55ea73ff06bfe7f0 | /R/fromJSON.R | a50427af6c2456712054245bd48eb7c896c2ea4d | [] | no_license | ogito/jsonlite | 0b3322fa08d3c4cd6060810ee851dab327fa1024 | e2676c924663a953930c72c7ac84c12864894755 | refs/heads/master | 2020-12-14T18:43:57.557994 | 2014-05-12T02:00:41 | 2014-05-12T02:00:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,837 | r | fromJSON.R | #' These functions are used to convert R objects into JSON and vice versa. The \code{\link{toJSON}} and \code{\link{fromJSON}}
#' functions use a class based encoding schema which follows conventions outlines in the vignette of this
#' package. They implement a consitent and practical mapping between JSON structures and the standard data
#' structures in R.
#'
#' The \code{\link{toJSON}} and \code{\link{fromJSON}} functions are drop-in replacements for the identically named functions
#' in packages rjson and RJSONIO. Our implementation uses an alternative, somewhat more consistent mapping
#' between R objects and JSON strings. The \code{\link{serializeJSON}} and \code{\link{unserializeJSON}} functions use an
#' alternative system to convert between R objects and JSON, which supports more classes but is much more verbose.
#
#' @rdname fromJSON
#' @title Convert R objects to/from \code{JSON}
#' @name toJSON, fromJSON
#' @aliases fromJSON toJSON
#' @export fromJSON toJSON
#' @param x the object to be encoded
#' @param dataframe how to encode data.frame objects: must be one of 'row' or 'column'
#' @param matrix should matrices and higher dimensional arrays be encoded in row-major or column-major.
#' @param Date how to encode Date objects: must be one of 'ISO8601' or 'epoch'
#' @param POSIXt how to encode POSIXt (datetime) objects: must be one of 'string', 'ISO8601', 'epoch' or 'mongo'
#' @param factor how to encode factor objects: must be one of 'string' or 'integer'
#' @param complex how to encode complex numbers: must be one of 'string' or 'list'
#' @param raw how to encode raw objects: must be one of 'base64', 'hex' or 'mongo'
#' @param auto_unbox automatically \code{\link{unbox}} all atomic vectors of length 1. Not recommended!
#' @param digits max number of digits (after the dot) to print for numeric values
#' @param na how to print NA values. One of 'null' or 'string'. Defaults are class specific
#' @param force unclass/skip objects of classes with no defined json mapping
#' @param pretty adds indentation whitespace to JSON output. See \code{\link{prettify}}
#' @param txt a string in json format
#' @param simplifyVector coerse JSON arrays containing only scalars into a vector
#' @param simplifyDataFrame coerse JSON arrays containing only records (JSON objects) into a data frame.
#' @param simplifyMatrix coerse JSON arrays containing vectors of equal length and mode into matrix or array.
#' @param flatten flatten nested data frames into a single non-nested data frame
#' @param ... arguments passed on to class specific \code{print} methods
#' @note All encoded objects should pass the validation at www.jsonlint.org
#' @useDynLib jsonlite
#' @references
#' \url{http://www.jsonlint.org}
#' @author Jeroen Ooms \email{jeroen.ooms@@stat.ucla.edu}
#' @examples #stringify some data
#' jsoncars <- toJSON(mtcars, pretty=TRUE)
#' cat(jsoncars)
#'
#' #parse it back
#' fromJSON(jsoncars)
#'
#' #control scientific notation
#' toJSON(10 ^ (0:10), digits=8)
#' options(scipen=3)
#' toJSON(10 ^ (0:10), digits=8)
#'
#' \dontrun{
#' # Parse data frame
#' data1 <- fromJSON("https://api.github.com/users/hadley/orgs")
#' names(data1)
#' data1$login
#'
#' #nested data frames:
#' data2 <- fromJSON("https://api.github.com/users/hadley/repos")
#' names(data2)
#' names(data2$owner)
#' data2$owner$login
#' }
fromJSON <- function(txt, simplifyVector = TRUE, simplifyDataFrame = simplifyVector,
simplifyMatrix = simplifyVector, flatten = FALSE, ...) {
# check type
if (!is.character(txt)) {
stop("Argument 'txt' must be a JSON string, URL or path to existing file.")
}
# overload for URL or path
if (length(txt) == 1 && nchar(txt) < 1000) {
if (grepl("^https?://", txt)) {
tryCatch(getNamespace("httr"), error = function(e) {
stop("Package httr not found. Please run: install.packages('httr')",
call. = FALSE)
})
req <- httr::GET(txt, httr::config(httpheader=c(`User-Agent` = "RCurl-httr-jsonlite"),
sslversion = 3, ssl.verifypeer=FALSE, ssl.verifyhost = FALSE))
httr::stop_for_status(req)
txt <- rawToChar(req$content)
} else if (file.exists(txt)) {
txt <- paste(readLines(txt, warn = FALSE), collapse = "\n")
}
}
# collapse
if (length(txt) > 1) {
txt <- paste(txt, collapse = "\n")
}
# simple check
if (!grepl("^[ \t\r\n]*(\\{|\\[)", txt)) {
stop("String does not seem to be valid JSON: ", substring(txt, 0, 20))
}
# parse JSON
obj <- parseJSON(txt)
# post processing
if (any(isTRUE(simplifyVector), isTRUE(simplifyDataFrame), isTRUE(simplifyMatrix))) {
return(simplify(obj, simplifyVector = simplifyVector, simplifyDataFrame = simplifyDataFrame,
simplifyMatrix = simplifyMatrix, flatten = flatten, ...))
} else {
return(obj)
}
}
|
d8b3248b87132253edc2797bfcc9c3e74a2338bd | b2d32cb57604a26e31f0c4947ee866f59a7aa8ba | /man/estM.Rd | 513be8aa727a1110afe89dadced393b105706697 | [
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | atredennick/GenEst | fe1f95ca844b2bf9cf0f3358e810439b0a964410 | 38b73a290c074872c0651926bd6de283072aa8e6 | refs/heads/master | 2020-06-10T20:24:52.460911 | 2019-06-25T16:52:44 | 2019-06-25T16:52:44 | 193,736,013 | 0 | 0 | null | 2019-06-25T15:35:03 | 2019-06-25T15:35:03 | null | UTF-8 | R | false | true | 2,690 | rd | estM.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mortality_functions.R
\name{estM}
\alias{estM}
\title{Estimate mortality}
\usage{
estM(data_CO, data_SS, data_DWP, frac = 1, COdate = "DateFound",
model_SE, model_CP, unitCol = NULL, SSdate = NULL, sizeCol = NULL,
DWPCol = NULL, seed_SE = NULL, seed_CP = NULL, seed_g = NULL,
seed_M = NULL, nsim = 1000, max_intervals = 8)
}
\arguments{
\item{data_CO}{Carcass Observation data}
\item{data_SS}{Search Schedule data}
\item{data_DWP}{Survey unit (rows) by size (columns) density weighted
proportion table}
\item{frac}{fraction of facility (by units or by area) surveyed}
\item{COdate}{Column name for the date found data}
\item{model_SE}{Searcher Efficiency model (or list of models if there are
multiple size classes)}
\item{model_CP}{Carcass Persistence model (or list of models if there are
multiple size classes)}
\item{unitCol}{Column name for the unit indicator (optional)}
\item{SSdate}{Column name for the date searched data}
\item{sizeCol}{Name of colum in \code{data_CO} where the size classes
are recorded. Optional. If none provided, it is assumed there is no
distinctions among size classes.}
\item{DWPCol}{Column name for the DWP values in the DWP table when no
size class is used and there is more than one column in \code{data_DWP}
that could be interpreted as DWP.}
\item{seed_SE}{seed for random draws of the SE model}
\item{seed_CP}{seed for random draws of the CP model}
\item{seed_g}{seed for random draws of the gs}
\item{seed_M}{seed for the random draws of the Mhats}
\item{nsim}{the number of simulation draws}
\item{max_intervals}{maximum number of arrival intervals to consider
for each carcass}
}
\value{
list of Mhat, Aj, ghat
}
\description{
Given given fitted Searcher Efficiency and Carcass
Persistence models; Search Schedule, Density Weighted Proportion,
and Carcass Observation data; and information about the fraction of the
the facility that was surveyed.
}
\examples{
\dontrun{
data(mock)
model_SE <- pkm(formula_p = p ~ HabitatType, formula_k = k ~ 1,
data = mock$SE
)
model_CP <- cpm(formula_l = l ~ Visibility, formula_s = s ~ Visibility,
data = mock$CP, dist = "weibull",
left = "LastPresentDecimalDays",
right = "FirstAbsentDecimalDays"
)
eM <- estM(nsim = 1000, data_CO = mock$CO, data_SS = mock$SS,
data_DWP = mock$DWP, frac = 1, model_SE = model_SE,
model_CP = model_CP, COdate = "DateFound",
DWPCol = "S", sizeCol = NULL
)
}
}
|
4e897d30fa5fdf3f13e4ea3453f45375658a4a1a | 860acc513a2fb1a31e8a770517b63faf73b19af9 | /man/check_input.Rd | f2ddfb95300d82ac5c7e3c986c169cb74b69ff4e | [] | no_license | PeterDeWeirdt/intactr | 0ae1b934fbc51783ed956dbde7a8b06b25e36c74 | 4194774d21a6a32777c9b0a03fc8465658a4ac41 | refs/heads/master | 2022-09-23T13:32:40.157417 | 2020-06-02T02:06:48 | 2020-06-02T02:06:48 | 234,556,649 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 301 | rd | check_input.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process.R
\name{check_input}
\alias{check_input}
\title{Checks input for correct column names}
\usage{
check_input(input_df)
}
\arguments{
\item{input_df}{input data}
}
\description{
Checks input for correct column names
}
|
0874bfda4abdd4d11de17bad36a1f3676ed2df87 | 06aec25dadf0e5c54939fe87f845ee9007e144e0 | /JSM 2011/Talk/tools/example/lake distance.R | 1afe8cbc6a7a1cf3a71ee31312cb8e345118a4e8 | [] | no_license | rundel/Presentations | 67beac96f767669d1c5cded0f122df007c37da12 | bce5c7ba02eff9159c5252068aafe936e5bfd0aa | refs/heads/master | 2023-08-17T00:01:37.099095 | 2023-08-07T18:03:03 | 2023-08-07T18:03:03 | 4,659,430 | 20 | 5 | null | null | null | null | UTF-8 | R | false | false | 1,213 | r | lake distance.R | library(rgeos)
set.seed(1)
txt = rep("",5)
for(i in 1:5) {
xc = floor(runif(1,1,25))
yc = floor(runif(1,1,25))
xpts = round(rnorm(10,0,0.5),3)
ypts = round(rnorm(10,0,0.5),3)
txt[i] = paste("MULTIPOINT(",paste( "(",xpts+xc," ",ypts+yc,")",sep="",collapse="," ),")",sep="")
}
wkt = paste( "GEOMETRYCOLLECTION(",paste(txt,collapse=","),")", sep="")
lake_pts = readWKT(wkt)
pdf("lake_plot1.pdf")
plot(lake_pts,pch=16)
dev.off()
hull=gConvexHull(lake_pts,byid=TRUE)
pdf("lake_plot2.pdf")
plot(lake_pts,pch=16)
plot(hull,add=TRUE)
dev.off()
lakes = gBuffer(hull,byid=TRUE,width=1,quadsegs=10)
pdf("lake_plot3.pdf")
plot(lakes,col=1:5+1)
plot(lake_pts,add=TRUE,pch=16)
plot(hull,add=TRUE)
dev.off()
pdf("lake_plot4.pdf")
plot(lakes,col=1:5+1)
dev.off()
pts = matrix( runif(100,1,25),ncol=2)
pts_wkt = paste( "GEOMETRYCOLLECTION(", paste(paste("POINT(",pts[,1]," ",pts[,2],")",sep=""),collapse=",") ,")", sep="")
pts_sp = readWKT(pts_wkt)
pdf("lake_plot5.pdf")
plot(lakes,col=1:5+1)
plot(pts_sp,add=TRUE,pch=1)
dev.off()
cols = apply(gDistance(pts_sp,lakes,byid=TRUE),2,which.min)
pdf("lake_plot6.pdf")
plot(lakes,col=1:5+1)
plot(pts_sp,pch=16,col=cols+1,add=TRUE)
plot(pts_sp,pch=1,add=TRUE)
dev.off() |
428739882d2d6fdabe7c0ed675d0ae96651e8185 | da03930f45319ebc3af391f92352657030a46c25 | /man/seqtab.Rd | cd6c0b3376a7c8018756b4fcfce0d3554774b298 | [] | no_license | cran/TraMineR | 7c5a47bf44b60d0aa8e798b893b766359a106122 | f69de43d018da19df8247a9090ebd1c01803d246 | refs/heads/master | 2023-04-11T10:25:42.257555 | 2023-03-31T17:20:02 | 2023-03-31T17:20:02 | 17,693,958 | 12 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,860 | rd | seqtab.Rd | \name{seqtab}
\alias{seqtab}
\alias{print.stslist.freq}
\title{Frequency table of the sequences}
\description{
Computes the frequency table of the sequences (count and percent of each sequence).
}
\usage{
seqtab(seqdata, idxs = 1:10, weighted = TRUE, format = "SPS", tlim)
}
\arguments{
\item{seqdata}{a sequence object as defined by the \code{\link{seqdef}} function.}
\item{idxs}{returns the table for the sequences at ranks '\code{idxs}' in the list of distinct sequences sorted in decreasing order of their frequencies. Default is \code{1:10}, i.e. the 10 most frequent sequences. Can be any subset, like \code{5:10} (fifth to tenth most frequent sequences) or \code{c(2,10)} (second and tenth most frequent sequences). Set \code{idxs=0} to get the table for the whole set of distinct sequences.
}
\item{weighted}{if \code{TRUE} (default), frequencies account for the weights, if any, assigned to the state sequence object
(see \code{\link{seqdef}}). Set to \code{FALSE} for ignoring weights.}
\item{format}{format used for displaying the rownames (the sequences) in the output table.
Default is SPS format, which yields shorter and more readable sequence representations.
Alternatively, \code{"STS"} may be specified.}
\item{tlim}{Deprecated. Use \code{idxs} instead.}
}
\details{The \code{weighted} argument has no effect when no weights were assigned to the state sequence object since weights default in that case to 1.}
\value{An object of class \code{stslist.freq}. This is actually a state sequence object (containing a list of state sequences) with added attributes, among others the \code{freq} attribute containing the frequency table. There are \code{print} and \code{plot} methods for such objects. More sophisticated plots can be produced with the \code{seqplot} function.}
\references{
Gabadinho, A., G. Ritschard, N. S. Müller and M. Studer (2011). Analyzing and Visualizing State Sequences in R with TraMineR. \emph{Journal of Statistical Software} \bold{40}(4), 1-37.
}
\seealso{
\code{\link{seqplot}, \link{plot.stslist.freq}}.
}
\examples{
## Creating a sequence object from the actcal data set
data(actcal)
actcal.lab <- c("> 37 hours", "19-36 hours", "1-18 hours", "no work")
actcal.seq <- seqdef(actcal, 13:24, labels=actcal.lab)
## 10 most frequent sequences in the data
seqtab(actcal.seq)
## With idxs=0, we get all distinct sequences in the data set
## sorted in decreasing order of their frequency
stab <- seqtab(actcal.seq, idxs=0)
head(stab)
tail(stab)
## Example with weights
## from biofam data set using weigths
data(ex1)
ex1.seq <- seqdef(ex1, 1:13, weights=ex1$weights)
## Unweighted frequencies
seqtab(ex1.seq, weighted=FALSE)
## Weighted frequencies
seqtab(ex1.seq, weighted=TRUE)
}
\author{Alexis Gabadinho (with Gilbert Ritschard for the help page)}
\keyword{Transversal characteristics}
|
3e6bb594030ebd5f525d48fad595a559e7d19dff | 13e11079ba6fcd554a2f6015cd4e40204d1cb292 | /man/get_datasetID.Rd | 50559010885b1d9f5761f94b5f184d2949e951af | [
"MIT"
] | permissive | hut34/hutr-package | cdae26631d0f4e640b7c22f1eff703692a47b8ad | 3519c7f5acc5fe2bdb12a96f42539542ecd11b32 | refs/heads/master | 2020-12-28T18:33:10.945324 | 2020-04-24T03:25:38 | 2020-04-24T03:25:38 | 238,440,955 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 594 | rd | get_datasetID.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_datasetID.R
\name{get_datasetID}
\alias{get_datasetID}
\title{get_datasetID}
\usage{
get_datasetID(DatasetNumber, Endpoint)
}
\arguments{
\item{DatasetNumber}{An integer.}
\item{Endpoint}{A string}
}
\value{
Returns a string
}
\description{
\code{get_datasetID} retrieves dataset ID
}
\details{
Dataset ID is not currently specified in the table of datasets so requires a separate post request.
User must also specify the endpoint.
}
\examples{
get_datasetID(DatasetNumber=DatasetNumber, Endpoint=Endpoint)
}
|
0541479c302ab002a241442e926ba13490cf62b8 | f461dd36fb85a44bf2ff866fb3898e475c0ac4a9 | /Application/version0.3/R/Data_HR.R | 032d23dfcc3e81102b395de7aab5d2edbe39f90e | [
"MIT"
] | permissive | oujbih/PFE | 0059938284e03c0d41816d85739fffac3bbd244e | 2f71d1febc03cc55286ea0243d9dd824bc931d7a | refs/heads/master | 2023-03-11T11:12:42.646087 | 2021-02-19T14:57:20 | 2021-02-19T14:57:20 | 246,944,720 | 1 | 0 | null | 2020-06-05T22:59:47 | 2020-03-12T22:52:59 | R | UTF-8 | R | false | false | 337 | r | Data_HR.R | #########################################
# OUJBIH ABDERRAHIM ########
# 31/03/2020 ########
# OCP SA Beni Amir Khouribga PFE ########
#########################################
load("Data/Model_SDP_Mean.Rda")
load("Data/Fevrier_DATA3.Rda")
load("Data/Mean_PM_HG.Rda")
Model_data <- Model_SDP_Mean
|
bd0a59512c1b0083c4b52dfb24f180127f5e4fde | d226838e64a1d55fdaf797893f7468651b725183 | /man/bwaSamse.Rd | 7b4accab202096795bacadf82a6547429e236453 | [] | no_license | HenrikBengtsson/aroma.seq | 5fd673cc449d9c3b89daf1125e8cc95556d0641d | 6464f1e5e929c423978cf7dcb11ac7018d179a6d | refs/heads/master | 2021-06-21T13:53:21.618898 | 2021-02-10T02:57:15 | 2021-02-10T02:57:15 | 20,848,327 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,499 | rd | bwaSamse.Rd | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% bwaSamse.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{bwaSamse}
\alias{bwaSamse.default}
\alias{bwaSamse}
\title{Generates BWA-backtrack single-end (SE) alignments via 'bwa samse'}
\description{
Generates BWA-backtrack single-end (SE) alignments via 'bwa samse'.
}
\usage{
\method{bwaSamse}{default}(pathnameSAI, pathnameFQ, indexPrefix, pathnameD, ..., verbose=FALSE)
}
\arguments{
\item{pathnameSAI}{The SAI file to be aligned.}
\item{pathnameFQ}{The FASTQ file to be aligned.}
\item{indexPrefix}{The pathname prefix to the BWA index files.}
\item{pathnameD}{The destination pathname.}
\item{...}{Additional arguments specifying BWA 'samse' switches
passed to \code{\link{systemBWA}}().}
\item{verbose}{See \code{\link[R.utils]{Verbose}}.}
}
\examples{\dontrun{
pathnameFA <- "annotationData/organisms/Lambda_phage/lambda_virus.fa"
bwaIndex(pathnameFA)
pathnameSAI <- "bwaData/LambdaVirusExample/Lambda_phage/reads_1.sai"
pathnameFQ <- "fastqData/LambdaVirusExample/Lambda_phage/reads_1.fq"
pathnameD <- "bwaData/LambdaVirusExample/Lambda_phage/reads_1.sam"
bwaSamse(pathnameSAI=pathnameSAI, pathnameFQ=pathnameFQ,
pathnameFA=pathnameFA, pathnameD=pathnameD)
}}
\author{Henrik Bengtsson}
\keyword{internal}
|
84e41599115064920e8aaa81be4ca24ecc856a8f | 273b9ae1349390114c56c6220d0b6e975173a1f5 | /old_code/rawGfs2castdf.R | 924eb1f82d02517710398608c2738b3fd82b46f4 | [] | no_license | athena-small/nyiso_forecast_evaluation | 387335ffc95e92e0c90a656713c959f84316c05d | 1960e37829faf618979a363419bbf6bc932d4a9d | refs/heads/master | 2023-02-02T09:59:48.564543 | 2013-08-09T15:24:30 | 2013-08-09T15:24:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,288 | r | rawGfs2castdf.R | # Function rawGfs2castdf:
# A function that converts a raw GFS file containing temperature forecasts into
# a cast data frame organized by "valid for" datetime and forecast lead time.
rawGfs2castdf <- function(rawGfsFile){
# Perform transformations on GFS data:
# - Convert text dates + times into date-times in the POSIXt class
# - Replace initial_datetime with new column of forecast lead/lag times
# - Add new column labels
# - Reshape data frame into table by validTime, leadTime
require(reshape2)
colClasses <- c(rep("NULL",2), "factor", "character", "character")
gfs <- raw.gfs <- read.csv(rawGfsFile, colClasses=colClasses)
gfs[[2]] <- strptime(raw.gfs[[2]], format="%Y-%m-%d %H:%M", tz="GMT")
gfs[[3]] <- strptime(raw.gfs[[3]], format="%Y-%m-%d %H:%M", tz="GMT")
gfs[[2]] <- as.factor(gfs[[3]] - gfs[[2]])
gfs[[3]] <- as.factor(raw.gfs[[3]]) # Convert validFor datetimes into factors, for sorting using melt+cast
names(gfs) <- c("Temperature", "hoursAhead", "validFor")
gfs.molten <- melt(gfs, id=c("validFor","hoursAhead"))
dcast(gfs.molten, ... ~ hoursAhead)
# gfs.molten$validFor <- as.POSIX(gfs.molten$validFor)
# gfs.molten[ , 3:23] <- as.numeric(gfs.molten[ , 3:23])
}
|
55c8d3695e850dfec5a530bc32228773123d4c31 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ltbayes/examples/fmodelnrm.Rd.R | 7722802ea6ab4ba1bdc6692f9580b15b0dca10d0 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,207 | r | fmodelnrm.Rd.R | library(ltbayes)
### Name: fmodelnrm
### Title: Latent Trait Posterior of the Nominal Response Model
### Aliases: fmodelnrm
### ** Examples
samp <- 5000 # samples from posterior distribution
burn <- 1000 # burn-in samples to discard
alph <- matrix(c(-1, 0, 1), 5, 3, byrow = TRUE)
beta <- matrix(0, 5, 3)
post <- postsamp(fmodelnrm, c(0,1,2,1,0),
apar = alph, bpar = beta, control = list(nbatch = samp + burn))
post <- data.frame(sample = 1:samp,
zeta = post$batch[(burn + 1):(samp + burn)])
with(post, plot(sample, zeta), type = "l") # trace plot of sampled realizations
with(post, plot(density(zeta, adjust = 2))) # density estimate of posterior distribution
with(posttrace(fmodelnrm, c(0,1,2,1,0), apar = alph, bpar = beta),
plot(zeta, post, type = "l")) # profile of log-posterior density
information(fmodelnrm, c(0,1,2,1,0), apar = alph, bpar = beta) # Fisher information
with(post, mean(zeta)) # posterior mean
postmode(fmodelnrm, c(0,1,2,1,0), apar = alph, bpar = beta) # posterior mode
with(post, quantile(zeta, probs = c(0.025, 0.975))) # posterior credibility interval
profileci(fmodelnrm, c(0,1,2,1,0),
apar = alph, bpar = beta) # profile likelihood confidence interval
|
8a17e08f0578772d1f86937444e71476bab74bcb | 54b4976030ae6a42e10282c8f41609ef266721c9 | /R/ecd-plot-2x2-generic.R | afc687c0bf197924b619a886ac71b7a03ec45ba8 | [] | no_license | cran/ecd | b1be437b407e20c34d65bcf7dbee467a9556b4c1 | 18f3650d6dff442ee46ed7fed108f35c4a4199b9 | refs/heads/master | 2022-05-18T20:24:56.375378 | 2022-05-09T20:10:02 | 2022-05-09T20:10:02 | 48,670,406 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,791 | r | ecd-plot-2x2-generic.R | #' Standard 2x2 plot for sample data
#'
#' Standard 2x2 plot for sample data
#'
#' @param object An object of ecd class.
#' @param ts The xts object for the timeseries.
#' @param EPS Logical, indicating whether to save the plot to EPS, default = FALSE
#' @param eps_file File name for eps output
#'
#' @keywords plot
#'
#' @export plot_2x2
#'
#' @importFrom grDevices dev.off postscript
#'
#' @examples
#' \dontrun{
#' plot_2x2(d, ts)
#' }
### <======================================================================>
"plot_2x2.ecd" <- function (object, ts, EPS = FALSE, eps_file = NA)
{
if ( EPS ) {
postscript(file= eps_file,
paper="special", width=8, height=8, horizontal=FALSE)
}
attr <- xtsAttributes(ts)
if (! "tail" %in% names(attr)) {
stop("ts does not contain tail stats")
}
main1 <- "ECD PDF"
if ("symbol" %in% names(attr)) {
main1 <- paste("PDF of symbol:", attr$symbol)
}
object <- quantilize(object)
par(mfcol=c(2,2)); # 4 plots
par(oma = c(1, 1, 4, 1))
ecd.plot_pdf (object, ts, main = main1)
ecd.plot_logpdf (object, ts)
ecd.plot_cdf (object, ts)
ecd.plot_qq (object, ts)
if ( EPS ) dev.off()
}
### <---------------------------------------------------------------------->
#' @rdname plot_2x2.ecd
setGeneric("plot_2x2", function(object, ts, EPS = FALSE, eps_file = NA) standardGeneric("plot_2x2"))
#' @rdname plot_2x2.ecd
setMethod("plot_2x2", signature("ecd"), plot_2x2.ecd)
### <---------------------------------------------------------------------->
### Standard plot facility
# TODO split the following to separate files
### <---------------------------------------------------------------------->
ecd.plot_pdf <- function (object, ts, xlab="$\\log(r)$", main="ECD PDF" )
{
if (length(object@stats)==0) {
stop("stats is not computed in ecd object")
}
hist <- xtsAttributes(ts)$hist
m1 <- object@stats$m1
s <- object@stats$stdev
xe <- ellipticity(object)
Q <- 0.005
xmin <- qec(Q, object)
xmax <- qec(1-Q, object)
N <- 400
logr <- seq(xmin, xmax, length.out = N)
pdf <- dec(logr, object)
ymax <- max(max(pdf),max(hist$density))
par(mar=c(4,4,2,1))
plot( hist$mids, hist$density,
pch = "o", col="red",
xlim=c(xmin, xmax),
ylim=c(0, ymax),
ylab="PDF", xlab=xlab, main=main)
lines(logr, pdf, pch = ".", col="blue", lwd=2)
abline(v=xe$xe1)
abline(v=xe$xe2)
sc = 0.8
yseg <- 0.20*max(hist$density)
# std dev indicator
segments( m1-s, yseg,
m1+s, yseg, lwd=3 )
text( m1, yseg*0.5, "stdev", cex=sc )
yr1 <- format(min(zoo::index(ts)), "%Y")
yr2 <- format(max(zoo::index(ts)), "%Y")
text( xmax*0.7, ymax*0.8, yr1, cex=sc )
text( xmax*0.7, ymax*0.7, "to", cex=sc )
text( xmax*0.7, ymax*0.6, yr2, cex=sc )
text( xe[1], ymax*0.4, pos=2, paste("$x_e$=", sprintf("%.4f", xe$xe1)), cex=sc)
text( xe[2], ymax*0.4, pos=4, paste("$x_e$=", sprintf("%.4f", xe$xe2)), cex=sc)
# legends
legend(xmin, ymax, c("data","fit"),
cex=sc,
col=c("red","blue"),
pch=c("o",NA),
lwd=c(1,2),
lty=c(NA,1));
}
# ------------------------------------------------------------------
ecd.plot_logpdf <- function (object, ts, xlab="$\\log(r)$", main="ECD Log PDF" )
{
if (length(object@stats)==0) {
stop("stats is not computed in ecd object")
}
hist <- xtsAttributes(ts)$hist
m1 <- object@stats$m1
s <- object@stats$stdev
xe <- ellipticity(object)
par(mar=c(4,4,2,1))
fnt <- log(hist$density) [ which(is.finite(log(hist$density))) ]
xmin <- min(hist$mids)
xmax <- max(hist$mids)
N <- 400
logr <- seq(xmin, xmax, length.out = N)
pdf <- dec(logr, object)
plot( hist$mids, log(hist$density),
pch = "o", col="red",
ylim=c( min(max(fnt)-8,min(fnt)), max(fnt)+0.5 ),
xlim=c( xmin, xmax ),
ylab="$\\log$(PDF)", xlab=xlab, main=main)
lines(logr, log(pdf), pch = ".", col="blue", lwd=2)
abline(v=xe$xe1)
abline(v=xe$xe2)
# legends
# parameters
xpos <- 0.7 * if ( abs(xmin) > abs(xmax) ) xmin else xmax
ypos <- max(log(hist$density))+0.5
sc <- 0.8
sp <- 0.8
text(xpos,ypos-1*sp,labels=c("ecd fit"),cex=sc)
text(xpos,ypos-2*sp,labels=paste("$\\alpha$=", sprintf("%.4f", object@alpha)), cex=sc)
text(xpos,ypos-3*sp,labels=paste("$\\gamma$=", sprintf("%.4f", object@gamma)), cex=sc)
text(xpos,ypos-4*sp,labels=paste("$\\sigma$=", sprintf("%.6f", object@sigma)), cex=sc)
text(xpos,ypos-5*sp,labels=paste("$\\beta$=", sprintf("%.4f", object@beta)), cex=sc)
text(xpos,ypos-6*sp,labels=paste("$\\mu$=", sprintf("%.6f", object@mu)), cex=sc)
}
# ------------------------------------------------------------------
ecd.plot_cdf <- function (object, ts, xlab="$\\log(r)$", main="ECD CDF" )
{
if (length(object@stats)==0) {
stop("stats is not computed in ecd object")
}
attr <- xtsAttributes(ts)
hist <- attr$hist
stats <- object@stats
m1 <- stats$m1
s <- stats$stdev
Q <- 0.005
xmin <- qec(Q, object)
xmax <- qec(1-Q, object)
N <- 400
logr <- seq(xmin, xmax, length.out = N)
cdf <- pec(logr, object)
par(mar=c(4,4,2,1))
hist_cdf <- cumsum(hist$counts) / sum(hist$counts)
plot(hist$mids, hist_cdf,
lwd=2, type="s", col="red",
xlim=c(xmin, xmax),
ylab="CDF", xlab=xlab, main=main)
dr <- logr[2]-logr[1]
# lines(hist$mids, hist_cdf, col="red")
lines(logr, cdf, pch = ".", col="blue", lwd=2)
# statistics
sc <- 0.8
ell <- ellipticity(object)$avg
q <- attr$tail_quantile
asd <- attr$tail$asymp_stdev_0 # observed
ask <- attr$tail$asymp_skewness_0 # observed
aku <- attr$tail$asymp_kurt_0 # observed
d_astats <- ecd.asymp_stats(object,q)[[1]] # theoretical
d_asd <- d_astats$stdev # theoretical
d_ask <- d_astats$skewness # theoretical
d_aku <- d_astats$kurtosis # theoretical
text(xmin*0.5,0.95,labels=c("data stats"),cex=sc)
text(xmin*0.5,0.85,labels=paste("mean", sprintf("%.6f",attr$mean)),cex=sc)
text(xmin*0.5,0.77,labels=paste("stdev", sprintf("%.4f",attr$stdev)),cex=sc)
text(xmin*0.5,0.69,labels=paste("skew", sprintf("%.4f",attr$skewness)),cex=sc)
text(xmin*0.5,0.61,labels=paste("kurt", sprintf("%.4f",attr$kurtosis)),cex=sc)
text(xmin*0.5,0.53,labels=paste("asymp stdev", sprintf("%.4f",asd)),cex=sc)
text(xmin*0.5,0.45,labels=paste("asymp skew", sprintf("%.2f",ask)),cex=sc)
text(xmin*0.5,0.37,labels=paste("asymp kurt", sprintf("%.1f",aku)),cex=sc)
text(xmin*0.5,0.29,labels=paste("tail quant $e^\\wedge$", sprintf("%.1f",log(q))),cex=sc)
text(xmax*0.5,0.75,labels=c("ecd fit"),cex=sc)
text(xmax*0.5,0.60,labels=paste("mean", sprintf("%.6f",stats$mean)),cex=sc)
text(xmax*0.5,0.53,labels=paste("stdev", sprintf("%.4f",stats$stdev)),cex=sc)
text(xmax*0.5,0.46,labels=paste("skew", sprintf("%.4f",stats$skewness)),cex=sc)
text(xmax*0.5,0.39,labels=paste("kurt", sprintf("%.4f",stats$kurtosis)),cex=sc)
text(xmax*0.5,0.32,labels=paste("asymp stdev", sprintf("%.4f",d_asd)),cex=sc)
text(xmax*0.5,0.25,labels=paste("asymp skew", sprintf("%.2f",d_ask)),cex=sc)
text(xmax*0.5,0.18,labels=paste("asymp kurt", sprintf("%.1f",d_aku)),cex=sc)
text(xmax*0.5,0.11,labels=paste("ellipticity", sprintf("%.4f",ell)),cex=sc)
}
# ------------------------------------------------------------------
ecd.plot_qq <- function (object, ts, main="ECD QQ-Plot" ) {
htu <- xtsAttributes(ts)$histuple
merge_tails <- xtsAttributes(ts)$merge_tails
x <- htu$hx
hq <- cumsum(htu$hy) / (sum(htu$hy)+1)
y <- qec(hq, object)
par(mar=c(4,4,2,1))
plot(x, y,
ylim=c(min(y)*1.2, max(y)*1.2),
pch="o", col="red",
main=main,
xlab="Observed Quantile",
ylab="Theoretical Quantile")
abline(0,1,col="black", lwd=2)
abline(h=0,col="yellow")
abline(v=0,col="yellow")
lines(x, abs(y-x), pch = ".", col="green")
# legends
sc <- 0.8
xmin <- min(x)
ymax <- max(y)
legend(xmin, ymax,
c("qq data","45 degree","error"),
cex=sc,
col=c("red","black","green"),
pch=c("o",NA,NA),
lwd=c(1,2,1),
lty=c(NA,1,1));
xmax <- max(x)
ymin <- min(y)
if (sum(merge_tails)>0) {
text(xmax*0.5, ymin*0.4, "tail dropped:",cex=sc);
text(xmax*0.5, ymin*0.6, paste("left", merge_tails[1]),cex=sc);
text(xmax*0.5, ymin*0.8, paste("right", merge_tails[2]),cex=sc);
}
}
|
95b92c5f7b97c3ef265e65d6943ffebd0dbd4195 | 42ac78fed8e8494cc54a533e6cb9b4c18ca51369 | /branches/lme4-lmer/tests/lmer.R | 859085cdc2bdbf3a58dfe8ec85780d9d68c569b4 | [] | no_license | LTLA/Matrix | 8a79cac905cdb820f95190e99352cd9d8f267558 | 2b80087cfebc9f673e345000aeaf2170fc15b506 | refs/heads/master | 2020-08-07T20:22:12.075155 | 2019-09-28T21:21:10 | 2019-09-28T21:21:10 | 213,576,484 | 0 | 1 | null | 2019-10-13T00:56:38 | 2019-10-08T07:30:49 | C | UTF-8 | R | false | false | 7,224 | r | lmer.R | library(lme4)
options(show.signif.stars = FALSE)
data(sleepstudy)
(fm1 <- lmer(Reaction ~ Days + (Days|Subject), sleepstudy))
(fm1a <- lmer(Reaction ~ Days + (Days|Subject), sleepstudy, method = "ML"))
(fm2 <- lmer(Reaction ~ Days + (1|Subject) + (0+Days|Subject), sleepstudy))
## should produce a warning but fit by REML
(fm1b <- lmer(Reaction ~ Days + (Days|Subject), sleepstudy, method = "AGQ"))
## transformed vars [failed in 0.995-1]
(fm2l <- lmer(log(Reaction) ~ log(Days+1) + (log(Days+1)|Subject),
data = sleepstudy, method = "ML"))
## generalized linear mixed model
(fm3 <- lmer(decrease ~ treatment + (1|rowpos) + (1|colpos),
OrchardSprays, family = poisson(), method = "PQL"))
## PQL is used per default:
fm3. <- lmer(decrease ~ treatment + (1|rowpos) + (1|colpos),
OrchardSprays, family = poisson)
fm3.@call <- fm3@call # so that they should be almost identical:
##MM: 'tol=0' now (2006-05-24) fails (on 32-bit Ubuntu; not 64-bit RHEL 4) ???
##DMB: On 32-bit Debian this fails in R CMD check but not in R CMD BATCH ???
#stopifnot(all.equal(fm3, fm3., tol = 1e-6))
## Laplace approximation {takes time}
(fm4 <- lmer(decrease ~ treatment + (1|rowpos) + (1|colpos),
data = OrchardSprays, family = poisson(), method = "Laplace"))
## Simple example by Andrew Gelman (2006-01-10) ----
n.groups <- 10 ; n.reps <- 2
n <- length(group.id <- gl(n.groups, n.reps))
## simulate the varying parameters and the data:
set.seed(0)
a.group <- rnorm(n.groups, 1, 2)
y <- rnorm (n, a.group[group.id], 1)
## fit and summarize the model
fit.1 <- lmer (y ~ 1 + (1 | group.id))
coef (fit.1)# failed in Matrix 0.99-6
(sf1 <- summary(fit.1)) # show() is as without summary()
## ranef and coef
rr <- ranef(fm1)
stopifnot(is.list(rr), length(rr) == 1, class(rr[[1]]) == "data.frame")
print(plot(rr))
cc <- coef(fm1)
stopifnot(is.list(cc), length(cc) == 1, class(cc[[1]]) == "data.frame")
print(plot(cc))
rr <- ranef(fm2)
stopifnot(is.list(rr), length(rr) == 2,
all((sapply(rr, class) == "data.frame")))
print(plot(rr))
cc <- coef(fm2)
stopifnot(is.list(cc), length(cc) == 2,
all((sapply(cc, class) == "data.frame")))
print(plot(cc))
## Many family = binomial cases
if (isTRUE(try(data(Contraception, package = 'mlmRev')) == 'Contraception')) {
print(fm.1 <- lmer(use ~ urban + age + livch + (1 | district),
Contraception, binomial))
print(system.time(fm1 <- lmer(use ~ urban + age + livch + (1 | district),
Contraception, binomial), gc = TRUE))
## same model, using "Laplace" :
print(fm.2 <- lmer(use ~ urban + age + livch + (1 | district),
Contraception, binomial, method = 'Laplace'))
print(system.time(lmer(use ~ urban + age + livch + (1 | district),
Contraception, binomial, method = 'Laplace'),
gc = TRUE))
## print(fm.2a <- lmer(use ~ urban + age + livch + (1 | district),
## Contraception, binomial, method = 'AGQ'))
## print(system.time(lmer(use ~ urban + age + livch + (1 | district),
## Contraception, binomial, method = 'AGQ'),
## gc = TRUE))
## model + random intercept, with and w/o using "Laplace" :
print(fm.3 <- lmer(use ~ urban + age + livch + (urban | district),
Contraception, binomial))
print(fm.4 <- lmer(use ~ urban + age + livch + (urban | district),
Contraception, binomial, method = 'Laplace'))
}
if (require('MASS', quietly = TRUE)) {
bacteria$wk2 <- bacteria$week > 2
contrasts(bacteria$trt) <-
structure(contr.sdif(3),
dimnames = list(NULL, c("diag", "encourage")))
print(fm5 <- lmer(y ~ trt + wk2 + (1|ID), bacteria, binomial))
print(system.time(fm5 <- lmer(y ~ trt + wk2 + (1|ID), bacteria, binomial),
gc = TRUE))
print(fm6 <- lmer(y ~ trt + wk2 + (1|ID), bacteria, binomial,
method = 'Laplace'))
print(system.time(lmer(y ~ trt + wk2 + (1|ID), bacteria, binomial,
method = 'Laplace'), gc = TRUE))
## print(fm6a <- lmer(y ~ trt + wk2 + (1|ID), bacteria, binomial,
## method = 'AGQ'))
## print(system.time(lmer(y ~ trt + wk2 + (1|ID), bacteria, binomial,
## method = 'AGQ'), gc = TRUE))
}
## Invalid factor specification -- used to seg.fault:
set.seed(1)
dat <- data.frame(y = round(10*rnorm(100)), lagoon = factor(rep(1:4,each = 25)),
habitat = factor(rep(1:20, each = 5)))
r1 <- lmer(y ~ habitat + (1|habitat:lagoon), data = dat) # ok
try(
reg <- lmer(y ~ habitat + (1|habitat*lagoon), data = dat) # did seg.fault
) # now gives error ^- should be ":"
### mcmcsamp() :
## From: Andrew Gelman <gelman@stat.columbia.edu>
## Date: Wed, 18 Jan 2006 22:00:53 -0500
has.coda <- require(coda)
if(!has.coda)
cat("'coda' package not available; some outputs will look suboptimal\n")
## Very simple example
y <- 1:10
group <- gl(2,5)
(M1 <- lmer (y ~ 1 + (1 | group))) # works fine
(r1 <- mcmcsamp (M1)) # dito
r2 <- mcmcsamp (M1, saveb = TRUE) # gave error in 0.99-* and 0.995-[12]
(r10 <- mcmcsamp (M1, n = 10, saveb = TRUE))
## another one, still simple
y <- (1:20)*pi
x <- (1:20)^2
group <- gl(2,10)
M1 <- lmer (y ~ 1 + (1 | group)) # << MM: why is the "1 + " needed ?
mcmcsamp (M1, n = 2, saveb=TRUE) # fine
M2 <- lmer (y ~ 1 + x + (1 + x | group)) # false convergence
## should be identical (and is)
M2 <- lmer (y ~ x + ( x | group))# false convergence -> simulation doesn't work:
if(FALSE) ## try(..) fails here (in R CMD check) [[why ??]]
mcmcsamp (M2, saveb=TRUE)
## Error: inconsistent degrees of freedom and dimension ...
## mcmc for glmer:
rG1k <- mcmcsamp(fm3., n = 1000)
summary(rG1k)
rG2 <- mcmcsamp(fm4, n = 3, verbose = TRUE)
# convergence on boundary warnings
load(system.file("external/test3comp.rda", package = "Matrix"))
b3 <- lmer(Y3 ~ (1|Sample) + (1|Operator/Run), test3comp)
if (isTRUE(try(data(Early, package = 'mlmRev')) == 'Early')) {
Early$tos <- Early$age - 0.5 # time on study
b1 <- lmer(cog ~ tos + trt:tos + (tos|id), Early,
control = list(msV = TRUE, nit=0))
}
## Spencer Graves' example (from a post to S-news, 2006-08-03): ----------------
## FIXME?
tstDF <- data.frame(group = letters[1:5], y = 1:5)
var(tstDF$y) # == 2.5
f.oops <- lmer(y ~ 1 + (1|group), data = tstDF)
summary(f.oops) ## or print(Matrix:::formatVC(VarCorr(f.oops)), quote = FALSE)
## ...
## Groups Name Variance Std.Dev.
## group (Intercept) 1.81818 1.34840
## Residual 0.68182 0.82572
## ...
##SG> This is ... silly, because there are zero degrees of freedom
##SG> to distinguish "group" from Residual. It is comforting that the sum of
##SG> the variances sum to the variance of "y", ......
##SG> However, I would prefer to have the multilevel software catch this
##SG> case and optionally return an error or drop the redundant group
##SG> with a warning.
cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons''
|
b1e807c61a881e44d7cf95900526baa9657f6361 | 1d2266e12300818fa40e6350a2b79b372de510b5 | /man/volleyxml.Rd | b327c36cc41657abad0b3e47408ab58388dd28c9 | [
"MIT"
] | permissive | openvolley/volleyxml | ba6951930cc2659b2213666211b391b8243bd072 | 5acf20f4dd0f49fcc5a4f598fbce799944fabcb2 | refs/heads/master | 2023-02-19T19:29:47.489954 | 2021-01-15T06:45:45 | 2021-01-15T06:45:45 | 132,218,131 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 261 | rd | volleyxml.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/volleyxml.R
\docType{package}
\name{volleyxml}
\alias{volleyxml}
\title{\pkg{volleyxml}}
\description{
Provides basic functions for parsing volleyball scouting files in XML format.
}
|
1e3c9e0234151852cf42e0abb3b710745dcbdc02 | 9bfa123022e7055ecd62e58dc3375539f079c3b9 | /R/pixelate.R | 9c1691dca6708d10be706b0bf8c73c2a00b48638 | [] | no_license | FinYang/pixelaRt | da7fdd038bc88c84645d30a30c261935db768890 | ad1116e89ca5430d287bb39c64c011265e62760a | refs/heads/main | 2023-08-26T01:07:46.311409 | 2021-11-01T16:26:16 | 2021-11-01T16:26:16 | 423,536,631 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,013 | r | pixelate.R | #' @export
pixelate <- function(img,
scale = 8,
pixel_width = NULL,
pixel_height = NULL,
keep_resolution = TRUE,
output_width = NULL,
output_height = NULL) {
img_info <- image_info(img)
img_height <- img_info$height
img_width <- img_info$width
aspect_ratio <- img_width/img_height
pixel_size_re <- list(pixel_width,
pixel_height)
if(all(sapply(pixel_size_re, is.null))) {
scale <- ifelse(
scale && scale>0 && scale <=50,
scale*0.01,
8*0.01)
if(img_width > 800 || img_height > 800) {
scale <- scale * 0.25
# scale <- 0.08
}
scaledW <- img_width *scale
scaledH <- img_height *scale
} else {
if(sum(sapply(pixel_size_re, is.null))==1) {
if(!is.null(pixel_size_re[[1]])) {
pixel_width <- pixel_size_re[[1]]
pixel_height <- pixel_width/aspect_ratio
} else {
pixel_height <- pixel_size_re[[2]]
pixel_width <- pixel_height * aspect_ratio
}
}
scaledW <- pixel_width
scaledH <- pixel_height
}
temp_img <- image_resize(
img,
pixel_size <- geometry_size_pixels(
width = scaledW,
height = scaledH))
message("pixel size: ", pixel_size)
if(sum(is.null(output_width), is.null(output_height)) == 1) {
if(!is.null(output_width)) {
img_width <- output_width
img_height <- img_width/aspect_ratio
}
if(!is.null(output_height)) {
img_height <- output_height
img_width <- img_height * aspect_ratio
}
} else if(all(!is.null(output_width), !is.null(output_height))) {
img_width <- output_width
img_height <- output_height
} else if(!keep_resolution) {
img_width <- pixel_width
img_height <- pixel_height
}
temp_img %>%
image_resize(
geometry_size_pixels(
width = img_width,
height = img_height),
filter = "point")
}
|
5ff26dfa4846fcffbf4af9cca7501a858a357993 | 51830cdacf31aa85281af6673eaf364ddf815c30 | /man/do.survival.Rd | d9f48209d2bf3be28c6a689e30f116324562af61 | [] | no_license | niko-balanis/small.cell.project | 1895cdc9a716d50984b1958cded6f44ea9da8c35 | ddcd781aa5db10e6012be59d2a50f3a59fe578e3 | refs/heads/master | 2021-10-22T22:30:41.411050 | 2019-03-13T02:44:46 | 2019-03-13T02:44:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 891 | rd | do.survival.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/do.survival.R
\name{do.survival}
\alias{do.survival}
\title{does survival analysis}
\usage{
do.survival(threshold = 0, pattern, path, output_folder,
component = "comp.1", pred_file = T, plsda = F)
}
\arguments{
\item{threshold}{threshold to use to pick samples}
\item{pattern}{pattern of prediction files}
\item{path}{location of prediction files}
\item{output_folder}{the output folder}
\item{component}{component to use}
\item{pred_file}{using scores or a file with predictions}
}
\description{
Reads file with features(PC's) in columns and samples in rows. And an annotation file.You pick a max # of features. Using caret
runs a prediction iteratively from 2 to max.features. Picks best predictor with least featurs. Returns samples correctly predicted
leaves out those samples incorrectly predicted.
}
|
c977eb6614f96e493070e2f2b1c4862f5dfe73cd | fb02e020729ff504edfb0f309a6955e06fba1862 | /man/get_metadata.Rd | 63514bb42f6ba8a406f137e51c6ea0d347024a8d | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] | permissive | govtmirror/GDopp | c31a49218258c0baec573e8f7fc86bf7a7856a60 | 714584d64530f4cd7b74a80b801a4ee9f1f0481b | refs/heads/master | 2020-12-24T18:55:41.895481 | 2014-08-29T16:27:19 | 2014-08-29T16:27:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 605 | rd | get_metadata.Rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{get_metadata}
\alias{get_metadata}
\title{get measurement details}
\usage{
get_metadata(deploy_nm, folder_nm)
}
\arguments{
\item{deploy_nm}{a valid deployment name (string)}
\item{folder_nm}{the folder where the deployment files reside.}
}
\value{
a list of measurement metadata
}
\description{
get measurement details
}
\details{
a \code{GDopp} function for extracting sampling info from file.\cr
}
\examples{
folder_nm <- system.file('extdata', package = 'GDopp')
deploy_nm <- "ALQ102"
get_metadata(deploy_nm, folder_nm)
}
\keyword{methods}
|
08427942b7d9b052ce3853c02b56fd2ac050fcaa | 2b850f9fdfa54159f05553050ad21600e6a58246 | /R/plot.palaeoSig.R | ca220f0c12fe18bf67e7bfa98a4aaa943049ecd3 | [] | no_license | cran/palaeoSig | f1dece921733d50a2915d47bd188ecdc32392ced | 29f25c31bf18651a5d50928ebe61f2b247f22962 | refs/heads/master | 2023-03-15T03:17:27.916002 | 2023-03-10T08:30:02 | 2023-03-10T08:30:02 | 17,698,185 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,949 | r | plot.palaeoSig.R | #' @describeIn randomTF Plot palaeoSig object
#' @param x Output from randomTF
#' @param variable_names Names of environmental variables. If missing, taken
#' from \code{env} data.frame.
#' @param top Proportion of the figure below the environmental name labels.
#' @param adj Adjust the position that the environmental names are plotted at.
#' @param p_val P value to draw a line vertical line at (with which=2)
#'
#' @importFrom graphics hist lines text strwidth
#' @importFrom stats quantile
#' @method plot palaeoSig
#' @export
#'
plot.palaeoSig <- function(x, variable_names, top = 0.7,
adj = c(0, 0.5), p_val = 0.05, ...) {
if (missing(variable_names)) {
variable_names <- names(x$EX)
}
hist(x$sim.ex,
breaks = seq(min(x$sim.ex), max(x$sim.ex), length = 20),
xlim = c(0, x$MAX[1] * 1.1),
main = "", xlab = "Proportion variance explained",
col = "grey80", border = NA, ...
)
tops <- par()$usr[4] * top
sapply(x$EX, function(z) {
lines(rep(z, 2), c(0, tops))
})
lines(rep(x$MAX, 2), c(0, tops), col = 1, lwd = 2, lty = 3)
lines(rep(quantile(x$sim.ex, probs = 1 - p_val), 2), c(0, tops),
col = 2, lwd = 1, lty = 3
)
put <- TeachingDemos::spread.labs(x$EX, 1.2 * strwidth("A", cex = .8))
text(put, par()$usr[4] * .71,
label = variable_names,
srt = 90, adj = adj, cex = .8
)
}
#' @describeIn randomTF autoplot function for palaeoSig object
#' @param object Output from randomTF
#' @param nbins integer giving number of bins for the histogram
#' @importFrom ggplot2 autoplot ggplot aes geom_col geom_linerange geom_text
#' scale_colour_identity scale_linetype_identity labs
#' @importFrom tibble tibble
#' @importFrom rlang .data
#' @importFrom stats quantile
#' @importFrom ggrepel geom_text_repel
#' @method autoplot palaeoSig
#' @export
autoplot.palaeoSig <- function(object, variable_names,
nbins = 20, top = 0.7, p_val = 0.05, ...) {
if (missing(variable_names)) {
variable_names <- names(object$EX)
}
x_fort <- fortify_palaeosig(
sim = object$sim,
variable_names = variable_names,
p_val = p_val,
nbins = nbins,
top = top,
PC1 = object$MAX,
EX = object$EX
)
autoplot_sig(x_fort, xlab = "Proportion variance explained", xmin = 0)
}
#' @importFrom tibble tibble lst
#' @importFrom rlang .data
fortify_palaeosig <- function(sim, variable_names, p_val, nbins,
top, PC1 = NA, EX) {
breaks <- seq(min(sim), max(sim), length = nbins + 1)
id <- cut(sim, breaks = breaks, include.lowest = TRUE)
sim_bin <- tibble(
mid_point = (breaks[-length(breaks)] + breaks[-1]) / 2,
n = as.vector(table(id))
)
width <- diff(sim_bin$mid_point[1:2])
lines_to_add <- tibble(
label = c("PC1", paste("p =", p_val), variable_names),
value = c(PC1, quantile(sim, probs = 1 - p_val), EX),
max = max(sim_bin$n) * top,
linetype = c("dashed", "dotted", rep("solid", length(variable_names))),
colour = c("black", "red", rep("black", length(variable_names)))
) %>%
filter(!is.na(.data$value))
result <- lst(sim_bin, lines_to_add, width)
return(result)
}
#' @importFrom ggplot2 xlim
autoplot_sig <- function(x, xlab, xmin) {
g <- ggplot(x$sim_bin, aes(x = .data$mid_point, y = .data$n)) +
geom_col(fill = "grey70", width = x$width) +
geom_linerange(
data = x$lines_to_add,
aes(
x = .data$value, ymin = 0, ymax = .data$max,
linetype = .data$linetype, colour = .data$colour
),
inherit.aes = FALSE
) +
geom_text_repel(
data = x$lines_to_add,
aes(
x = .data$value, y = .data$max, label = .data$label,
colour = .data$colour
),
angle = 90, hjust = .5, vjust = 0, direction = "x"
) +
scale_colour_identity() +
scale_linetype_identity() +
xlim(xmin, NA_real_) +
labs(x = xlab, y = "Frequency")
return(g)
}
|
3381aef8930af9d1bea94e30da22312c88b54e1f | 582e96e874c9e8e907bbb82fe1e7fd654211d619 | /IMBD_db.R | 29042b358f74c4540737345f6be77d036bd619b1 | [] | no_license | prashantbhuyan/Data-Exploration-Visualization | 2649134a63ee24d6fbd409210f044da93ead4602 | 32fcfb5d348a120c5ccedfcc0d9452e0adc24d6c | refs/heads/master | 2016-08-03T14:56:27.209933 | 2015-01-15T19:15:51 | 2015-01-15T19:15:51 | 24,250,897 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,690 | r | IMBD_db.R | # Prashant B. Bhuyan
# Exploration & Visualization IMBD Database
##### Problem 1 #####
# Solution (See attached image 'Movie Titles By Decade.png')
# Certainly according to the visualization the number of movies produced
# grows substantially each decade. Perhaps this is due to better production and
# distribution technology and perhaps due to population growth and globalization.
library(ggplot2)
library(plyr)
data <- movies
movies$decade <- round_any(movies$year, 10)
g <- ggplot(movies, aes(x = title, y = year, color = decade, group = decade))
x <- geom_line()
g <- g + x
##### Problem 2 #####
# Solution (See attached image 'Avg Rating By Genre.png' and 'Avg Rating Over the Years.png').
# Documentaries get the highest average rating of any genre whereas
# Action gets the lowest average rating of any genre.
#
# Over the years, ratings became much more varied. In the 1920's,
# most films got ratings better than a 5. You could infer that
# the overall quality of films was better until the 1970's or that
# critics became more lax in their critiques. It seems that
# you could delve further into the question by analyzing the
# important factors like technological advancement and population growth
# and globalization to see if there are just more movies being produced
# for all types of people some of which are low budget 'bollywood' types of
# movies that are mass produced to entertain large swaths of the global population.
# Maybe the cost of making movies in the 1920's was so high that
# the quality of the movie was very important to producers.
library(ggplot2)
library(reshape2)
data <- movies
size <- rep(NA, nrow(movies))
genres <- rowSums(movies[,18:24])
size[which(genres==1 & movies$Action == 1)] = "Action"
size[which(genres==1 & movies$Animation == 1)] = "Animation"
size[which(genres==1 & movies$Comedy == 1)] = "Comedy"
size[which(genres==1 & movies$Drama == 1)] = "Drama"
size[which(genres==1 & movies$Documentary == 1)] = "Documentary"
size[which(genres==1 & movies$Romance == 1)] = "Romance"
size[which(genres==1 & movies$Short == 1)] = "Short"
size[which(genres > 1)] = "MultiGenre"
size[which(genres < 1)] = "NoGenre"
movies$size <- as.factor(size)
# Avg Ratings by Genre
plot <- ggplot(movies,aes(x = size, y = rating))+geom_boxplot()+labs(title = "Avg Rating By Genre", x = "Genre", y = "Avg Rating")
# Avg Ratings Over the Years
plot_z <- ggplot(movies, aes(x = year, y = rating))+geom_point()+labs(title = "Avg Ratings Over the Years", x = "Years", Y = "Avg Ratings")
##### Problem 3 #####
# Solution
# please see attached image 'Rating vs Length.png'.
library(ggplot2)
library(reshape2)
library(plyr)
data <- movies
rating_vs_length <- ggplot(data, aes(rating, length)) + geom_point() + labs(title = "Rating vs Length", x = "Rating", y = "Length")
# Answer: There is a slight positive relationship between rating and length of movies. There are certainly outliers where very long movies get ratings below 5. Further
# there are very short movies that get very high ratings. As such, the relationship is very weak but positive nevertheless.
##### Problem 4 #####
# Solution
#
# There is a relationship between Genres and Length. Shorts are
# the shortest in length and Documentaries, Dramas and Multi Genre
# films are the longest in length. Animations are also fairly short
# movies perhaps due to high cost of development. Films that have
# no genre seem to be very long but I'm not sure why that is.
# Please see 'Movie Genres vs Length.png'.
library(ggplot2)
data <- movies
size <- rep(NA, nrow(movies))
genres <- rowSums(movies[,18:24])
size[which(genres == 1 & movies$Action == 1)] = "Action"
size[which(genres==1 & movies$Animation == 1)] = "Animation"
size[which(genres==1 & movies$Comedy == 1)] = "Comedy"
size[which(genres== 1 & movies$Drama == 1)] = "Drama"
size[which(genres == 1 & movies$Documentary == 1)] = "Documentary"
size[which(genres == 1 & movies$Romance == 1)] = "Romance"
size[which(genres == 1 & movies$Short == 1)] = "Short"
size[which(genres > 1)] = "MultiGenre"
size[which(genres < 1)] = "NoGenre"
movies$size <- as.factor(size)
movies_length_vs_genres <- ggplot(data, aes(movies$size, movies$length)) + geom_line() + labs(title = "Genres vs Length", x = "Genres", y = "Movie Length")
##### Problem 5 #####
# Solution
#
# Clearly, the variable that best predicts votes is the rating of the movie. Please see
# attached scatter plot called 'Votes vs Rating.png' and you'll see a strong positive relationship.
library(ggplot2)
data <- movies
votes_vs_rating = ggplot(data, aes(rating, votes)) + geom_point() + labs(title = "Rating vs Votes", x = "Rating", y = "Votes")
|
60774f1333fdedc95bcfa8175ebfafae8929b193 | 2c4bb3cc1eb9f992f0a9f21392eccfc6b2ca2382 | /man/FindAncestor.Rd | 976f55dd8b4efebb377ca8eff0c88f3c4d151cf8 | [] | no_license | Christophe-Hendrickx/Claddis | f98be53fa60408825a39e923b97da9ec1be03446 | c5dfba9f5a043e49d31d860cd8cc99d6ac392916 | refs/heads/master | 2020-12-29T00:26:18.332760 | 2014-11-26T01:02:50 | 2014-11-26T01:02:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 637 | rd | FindAncestor.Rd | \name{FindAncestor}
\alias{FindAncestor}
\title{
Find ancestor
}
\description{
Finds last common ancestor (node) of a set of two or more descendant tips.
}
\usage{
FindAncestor(descs, tree)
}
\arguments{
\item{descs}{A vector of mode character representing the tip names for which an ancestor is sought.}
\item{tree}{The tree as a phylo object.}
}
\details{
Intended for use as an internal function for \link{TrimMorphDistMatrix}, but potentially of more general use.
}
\value{
\item{anc.node}{The ancestral node number.}
}
\author{
Graeme T. Lloyd \email{graemetlloyd@gmail.com}
}
\examples{
# Nothing yet
}
\keyword{ancestor} |
755d81c17c435585e3721f68735e148406ed6687 | 39fd119fb2e6e8df0e1b08c6c99b3c9965d2280a | /NCS16.R | 7a301c2652f8520113c806627b1f685130594549 | [] | no_license | DuggalM/R-Codes | 8ecba0bc816351574339de2bee0734d3adb379a2 | 84400afb925fc6deba89f406570f9461482b9f52 | refs/heads/master | 2021-01-21T14:48:09.096090 | 2019-02-14T16:31:17 | 2019-02-14T16:31:17 | 56,999,481 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,373 | r | NCS16.R | library(plotly)
library(plyr)
knitr::opts_chunk$set(warning=FALSE, message=FALSE)
knitr::opts_chunk$set(fig.width = 30, fig.height = 30, fig.show = "asis")
#' Title: "NCS16"
#' Author: "Mausam Duggal"
#' Date: "Nov 11th, 2015"
#' set working directory and batch in data
wd <- setwd("c:/personal/r")
#' This reflects the 2006 VDF definitions
#' and encompasses the entire GGHM network
data <- read.csv("Links_scenario12014_nov11.csv")
#+ take out centroid connectors and non-road links
data1.sub <- subset(data, VDF!=90 & VDF!=0)
#' Plot interactive 3-D scatter plot
plot_ly(data1.sub, x = VDF, y = ul3, z=ul2, text = paste("Density: ", Density_km2),
type="scatter3d", mode="markers", filename="c:/personal/r")
#' plot interactive facet scatter plots
p <- ggplot(data = data1.sub, aes(x = ul3, y = ul2, color = factor(Orig))) +
geom_point(aes(text = paste("Density:", Density_km2)), size = 3) +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=.5,colour='black', size=10)) +
theme(axis.title = element_text(size = 25)) +
facet_wrap(~ Orig, nrow=8)
(gg <- ggplotly(p))
# ggplot(data = data.sub, aes(x = ul3, y = ul2)) +
# geom_point(aes(text = paste("Density:", Density_km2)), size = 4) +
# facet_wrap(~ VDF, nrow=8)
#volcano plot
ggplot(data1.sub, aes(x = ul3)) +
stat_density(aes(ymax = ..count.., ymin = -..count..,
fill = VDF, color = VDF),
geom = "ribbon", position = "identity") +
facet_grid(. ~ VDF) +
coord_flip() +
theme(legend.position = "none")
# #' Plot interactive 3-D scatter plot
# plot_ly(data.sub, x = VDF, y = ul3, z=ul2, text = paste("Density: ", Density_km2),
# type="scatter3d", mode="markers", filename="c:/personal/r")
#' This reflects the TMG VDF definitions
#' and encompasses the only the GTHA network
data2.sub <- subset(data1.sub, From<=70000)
#' plot interactive facet scatter plots
p <- ggplot(data = data2.sub, aes(x = ul3, y = ul2, color = factor(TMG))) +
geom_point(aes(text = paste("Density:", Density_km2)), size = 3) +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=.5,colour='black', size=10)) +
theme(axis.text.y=element_text(size=15))+
theme(axis.title = element_text(size = 25)) +
facet_wrap(~ TMG, nrow=8)
(gg <- ggplotly(p))
|
7ee8dd9308a9f2e6bb2a1bd7d6b12bd94859b816 | 91b06471fd5d6ef45b4173b8ed490d17789359e0 | /script.r | 2aec9beb3924acd38ecf0ae739e9c2bf89555373 | [] | no_license | CarlosLCV/Repo-4 | 3e7118f552e2bf5d4b0631524f82b88e20830e3e | be1564f7ea604c03ec352d4acb0c1a6fd2672188 | refs/heads/master | 2020-12-27T06:47:41.584182 | 2020-02-02T16:42:35 | 2020-02-02T16:42:35 | 237,801,575 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 38 | r | script.r | print("Hola mundo")
x <- 0
x <- x + 1
|
caef7f5afb8596973834f74bb1aa91d8dd503094 | 4097f2ea4734703125c04f3fa19babe7af2f7903 | /functions/refreshing/convex_hulls_3D.R | 83eb9b6a360fd439591c0e5df06b3cdf48f6b8c2 | [
"MIT"
] | permissive | PavlopoulosLab/NORMA | 4aac7f61cfd42346f1b5e9fec8152d9bf29dce81 | 3073b5f0516117346513826b510df6380eba605b | refs/heads/master | 2022-05-05T06:52:32.354089 | 2022-04-29T17:02:00 | 2022-04-29T17:02:00 | 245,136,633 | 15 | 8 | null | null | null | null | UTF-8 | R | false | false | 16,494 | r | convex_hulls_3D.R | convex_hull_3D <- function() {
set.seed(123)
g <- fetchFirstSelectedStoredIgraph_annotations_tab()
if (is.null(g))
return()
# dataset <- get.edgelist(g)
dataset <- fetchFirstSelectedStoredDataset_annotations_tab()
if (is.null(dataset))
return()
original_dataset_weighted <- fetchFirstSelectedStoredDataset_annotations_tab()
if (is.null(original_dataset_weighted))
return(NULL)
annotation_graph <- fetchFirstSelectedStoredGroups2_annotations_tab()
if (is.null(annotation_graph))
return()
#---------------------------------------------------------------#
my_network<- as.data.frame(get.edgelist(g))
my_network<- data.frame(Source = my_network$V1, Target = my_network$V2)
groups <- as.data.frame(annotation_graph)
groups<- data.frame(V1 = groups$Annotations, stri_split_fixed(groups$Nodes, ",", simplify = TRUE))
groups<-mutate_all(groups, funs(na_if(.,"")))
number_of_groups<-dim(groups)[1]
x <- list()
for (i in 1:number_of_groups) {
group_i<- groups[i,]
group_i<- group_i[,-1]
group_i <- group_i[!is.na(group_i)]
x[[i]]<- (group_i)
}
GO <- list()
for (i in 1:number_of_groups) {
GO[[i]]<-rep(groups[i,1], length(x[[i]]))
}
column1<-my_network$Source
column2<-my_network$Target
node_names<-unique(union(column1, column2))
tt<-unlist(x)
nodes_with_NA_groups<-setdiff(node_names,tt)
members <- data_frame(id=unlist(x),group = unlist(GO))
members_with_NA_groups <- data_frame(id=unlist(x),group = unlist(GO))
#----------------------------------------------------------------#
#-------------------------------------------------------------------------#
new_nodes <- unique(union(dataset[,1], dataset[,2]))
new_annot <- annotation_graph %>%
mutate(V2 = strsplit(as.character(annotation_graph$Nodes), ",")) %>%
unnest(V2)
intersect_g_annot <- intersect(new_nodes, new_annot$V2)
if(identical(intersect_g_annot, character(0))){
showModal(modalDialog(
title = "Important message",
"Please check if the selected annotation file corresponds to the selected network.",
easyClose = T
))
}
#-------------------------------------------------------------------------#
set.seed(123)
lay <- layout_choices_3D(g, lay)
#----------------------------------------------------------------#
scene_scale_x_max <- max(lay[,1])*scaling_coordinates_convex_3D_X()
scene_scale_x_min <- min(lay[,1])*scaling_coordinates_convex_3D_X()
scene_scale_y_max <- max(lay[,2])*scaling_coordinates_convex_3D_Y()
scene_scale_y_min <- min(lay[,2])*scaling_coordinates_convex_3D_Y()
scene_scale_z_max <- max(lay[,3])*scaling_coordinates_convex_3D_Z()
scene_scale_z_min <- min(lay[,3])*scaling_coordinates_convex_3D_Z()
#----------------------------------------------------------------#
# lay <- lay*scaling_coordinates_convex_3D()
#-- Scaling coordinates --#
coorx<- lay[,1]*scaling_coordinates_convex_3D_X()
lay <- cbind(coorx,lay[,2:ncol(lay)])
coory<- lay[,2]*scaling_coordinates_convex_3D_Y()
lay <- cbind(lay[,1], coory,lay[,3])
coorz<- lay[,3]*scaling_coordinates_convex_3D_Z()
lay <- cbind(lay[,1:2], coorz)
#-------------------------#
colnames(lay) <- c("x", "y", "z")
# node_names_3D <- unique(union(dataset[,1], dataset[,2]))
#---------------------------------#
node_names_3D <- names(V(g))
node_name_links_3D <- names(V(g))
#--------------------------------#
if(length(node_names_3D) != nrow(lay)){
showModal(modalDialog(
title = "Important message",
"Please check if the selected annotation file corresponds to the selected network.",
easyClose = T
))
}
else{
node_names_with_coords <- data.frame("Source" = node_names_3D,
"x" = lay[,1],
"y" = lay[,2],
"z" = lay[,3])
}
#--- hash table for names and coordinates ---#
coordinates_hashmap <- new.env(hash = TRUE)
for(i in 1:nrow(node_names_with_coords)){
coordinates_hashmap[[as.character(node_names_with_coords[i,1])]]<-c(node_names_with_coords[i,2], node_names_with_coords[i,3], node_names_with_coords[i,4])
}
#---------------------------------------------------#
nrowdat <- nrow(dataset)
nrowannot <- nrow(annotation_graph)
if(!(is.weighted(g))){
original_dataset_weighted <- cbind(original_dataset_weighted[,1:2],"Weight"=rep(0.5, nrow(original_dataset_weighted)))
} else{
minx <- min(original_dataset_weighted[,3])
maxx <- max(original_dataset_weighted[,3])
scaling_weight_values<- c()
for (i in 1:nrow(original_dataset_weighted)){
scaling_weight_values_i <- mapper(original_dataset_weighted[i,3], minx, maxx, 0.5, 20)
scaling_weight_values <- c(scaling_weight_values, scaling_weight_values_i)
}
if(maxx > 50){
original_dataset_weighted <- cbind(original_dataset_weighted[,1:2],"Weight"=scaling_weight_values)
}
}
if (length(s)==0)
{
s<-c(1:nrowannot)
}
if (length(s)) {
s<-sort(s)#-----------------------------------
x<- length(s)
dataset1 <- get.edgelist(g)
dataset1 <- data.frame("Source" = dataset[,1], "Target" = dataset[,2])
source <- matrix(ncol=3, nrow=nrow(dataset1))
target <- matrix(ncol=3, nrow=nrow(dataset1))
for(i in 1:nrow(dataset1)){
source[i,] <- coordinates_hashmap[[as.character(dataset1[i,1])]]
target[i,] <- coordinates_hashmap[[as.character(dataset1[i,2])]]
}
edges_for_plotly <- data.frame("Source.x" = source[,1], "Source.y" = source[,2], "Source.z" = source[,3],
"Target.x" = target[,1], "Target.y" = target[,2], "Target.z" = target[,3])
# x<- c()
# for(i in 1:nrow(edges_for_plotly)){
# if(i == nrow(edges_for_plotly)){
# a <- paste(edges_for_plotly$Source.x[i], edges_for_plotly$Target.x[i], "null", sep = "," )
# }else{
# a <- paste(edges_for_plotly$Source.x[i], edges_for_plotly$Target.x[i], "null,", sep = "," )
#
# }
# x<- c(x, a)
# }
#
# x <- as.vector(unlist(x))
# x <- paste(x, collapse=" ")
#
# y<- c()
# for(i in 1:nrow(edges_for_plotly)){
# if(i == nrow(edges_for_plotly)){
# a = paste(edges_for_plotly$Source.y[i], edges_for_plotly$Target.y[i], "null", sep = "," )
# }else{
# a = paste(edges_for_plotly$Source.y[i], edges_for_plotly$Target.y[i], "null,", sep = "," )
# }
# y<- c(y, a)
# }
#
# y <- as.vector(unlist(y))
# y <- paste(y, collapse=" ")
#
#
# z<- c()
# for(i in 1:nrow(edges_for_plotly)){
# if(i == nrow(edges_for_plotly)){
# a = paste(edges_for_plotly$Source.z[i], edges_for_plotly$Target.z[i], "null", sep = "," )
# }else{
# a = paste(edges_for_plotly$Source.z[i], edges_for_plotly$Target.z[i], "null,", sep = "," )
# }
# z<- c(z, a)
# }
#
# z <- as.vector(unlist(z))
# z <- paste(z, collapse=" ")
#--------------------------------------------------------------------#
fileConn <- file(paste(USER_TEMP_FOLDER, "/convex_3D_", session$token,".html", sep=""), "w")
#--------------------------------------------------------------------#
cat(sprintf("<!DOCTYPE html>
<head>
<script src=\"https://cdn.plot.ly/plotly-latest.min.js\"></script>
</head>
<body>
<!-- Plotly chart will be drawn inside this div -->
<div id='plotly-div' div>
<script>" ), file = fileConn)
# cat(sprintf("
#
# trace_edges = {
# uid: 'bcd52d',
# line: {
# color: 'rgb(125,125,125)',
# width: 0.5
# },
# mode: 'lines',
# name: 'Edges',
# type: 'scatter3d',"), file = fileConn)
#---------------------------------------#
for(i in 1:nrow(edges_for_plotly)){
x_trial <- paste(edges_for_plotly$Source.x[i], edges_for_plotly$Target.x[i], sep = "," )
cat(sprintf(paste("trace_edges_",i," = {
uid: 'bcd52d',
line: {
color: 'rgb(125,125,125)',
width:", original_dataset_weighted[i,3], "
},
mode: 'lines',
name: 'Edges',
type: 'scatter3d',
x : [", edges_for_plotly$Source.x[i], ",", edges_for_plotly$Target.x[i], "],\n
y : [", edges_for_plotly$Source.y[i], ",", edges_for_plotly$Target.y[i], "],\n
z : [", edges_for_plotly$Source.z[i], "," ,edges_for_plotly$Target.z[i], "], \n
hoverinfo : 'none'};
" , sep="")), file = fileConn)
}
traces_edges<- c()
for(i in 1:nrow(original_dataset_weighted)){
traces_edges_i <- paste("trace_edges_", i, sep = "")
traces_edges<- c(traces_edges,traces_edges_i)
traces_edges <- paste(traces_edges, collapse=",")
}
#--- Nodes ---#
x_nodes <- paste(lay[,1], collapse=",")
y_nodes <- paste(lay[,2], collapse=",")
z_nodes <- paste(lay[,3], collapse=",")
if(show_labels_3D == T){
show_labels <- "markers+text"
}else{
show_labels <- "markers"
}
if(Dark_mode ==T){
textColor <- "textfont: {
color: 'white'
},"
} else{
textColor <- "textfont: {
color: 'black'
},"
}
cat(sprintf(paste("
trace_nodes = {
uid: 'a2e4a0',
mode: '", show_labels , "',
name: 'Nodes',", textColor,"
type: 'scatter3d',", sep="")), file = fileConn)
write(paste("
x : [", x_nodes, "],\n" , sep=""), file = fileConn, append = T)
write(paste("
y : [", y_nodes, "],\n" , sep=""), file = fileConn, append = T)
write(paste("
z : [", z_nodes, "],\n" , sep=""), file = fileConn, append = T)
#---- expression_colors_3D ----#
if (!is.null(getStoredExpressionChoices())){
expression<-fetchFirstSelectedStoredExpression()
colnames(expression) <- c("id", "color")
express_order<- as.data.frame(names(V(g)))
colnames(express_order) <- "id"
expression <- suppressMessages(left_join(express_order, expression, by = "id"))
expression$color<- as.character(expression$color)
for(i in 1:length(expression$color)){
if(Dark_mode ==F){
if(expression$color[i] == "" || is.na(expression$color[i])){
expression$color[i] <- "purple"
}
} else if (expression$color[i] == "" || is.na(expression$color[i])){
expression$color[i] <- "#a419bc"
}
}
}
if (is.null(getStoredExpressionChoices())){
expression<- as.data.frame(names(V(g)))
if(Dark_mode == F){
expression$color <- rep(c("purple"))
} else expression$color <- rep(c("#a419bc"))
colnames(expression) <- c("id", "color")
}
#---------------------------------------------------#
write(paste("
marker: {
cmax: 2,
cmin: 1,
line: {
color: 'rgb(50,50,50)',
width: 0.5
},
size: 3,
symbol: 'dot',
" , sep=""), file = fileConn, append = T)
if(Dark_mode==F){
node_colors <- "purple"
}else{
node_colors <- "#a419bc"
}
if(expression_colors_3D == F){
write(paste("color:'", node_colors, "'" , sep=""), file = fileConn, append = T)
}else{
write(paste("color:[", sep = ""), file = fileConn, append = T)
for(i in 1:nrow(expression)){
write(paste("'", expression$color[i], "'," , sep = ""), file = fileConn, append = T)
}
write(paste("]" , sep = ""), file = fileConn, append = T)
}
#// color: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
#// colorscale: 'Viridis'
write(paste("
},
text: [" , sep=""), file = fileConn, append = T)
annotations_split <- str_split(annotation_graph[,2], ",", simplify = T)
if(show_some_labels_3D == T){
selected <- matrix("", ncol=1, nrow=0)
for(i in 1:length(s)){
selected <- rbind(selected, as.matrix(annotations_split[s[i],]))
}
selected <- unique(selected)
selected <- selected[!is.na(selected)]
selected <- selected[!selected==""]
for(i in 1:length(node_names_3D)){
if (node_names_3D[i] %in% selected){
write(paste("\"", node_names_3D[i], "\",", sep=""), file = fileConn, append = T, sep="")
} else{
write(paste("\"\",", sep=""), file = fileConn, append = T, sep="")
}
}
}else{
for(i in 1:length(node_names_3D)){
if(i == length(node_names_3D)){
cat(sprintf(paste("\"", node_names_3D[i], "\"", sep="")), file = fileConn)
}else{
cat(sprintf(paste("\"", node_names_3D[i], "\"", ",", sep="")), file = fileConn)
}
}
}
cat(sprintf(paste("],\n
hoverinfo: 'text'
};\n", sep="")), file = fileConn)
traces<- c()
for(i in 1:length(s)){
if(i == length(s)){
trace_i <- paste("trace", s[i], sep = "")
} else{
trace_i <- paste("trace", s[i], ",", sep = "")
}
xxxx <- c()
yyyy <- c()
zzzz <- c()
for(j in 1:ncol(annotations_split)){
if(annotations_split[s[i],j] != ""){
xxxx <- c(xxxx, coordinates_hashmap[[ annotations_split[s[i],j] ]][1])
yyyy <- c(yyyy, coordinates_hashmap[[ annotations_split[s[i],j] ]][2])
zzzz <- c(zzzz, coordinates_hashmap[[ annotations_split[s[i],j] ]][3])
}
}
if(length(xxxx) < 5){
xxxx <- paste(xxxx, runif(10, xxxx[1] - 0.05, xxxx[1] + 0.05), sep= ",")
yyyy <- paste(yyyy, runif(10, yyyy[1] - 0.05, yyyy[1] + 0.05), sep= ",")
zzzz <- paste(zzzz, runif(10, zzzz[1] - 0.05, zzzz[1] + 0.05), sep= ",")
}
xxxx<- paste(xxxx, collapse=",")
yyyy<- paste(yyyy, collapse=",")
zzzz<- paste(zzzz, collapse=",")
write(paste("trace",s[i],"= {\n name:'", annotation_graph[s[i],1], "',\n",
"type: 'mesh3d',
x: [", xxxx, "],
y: [", yyyy, "],
z: [", zzzz, "],
color:'", qual_col_pals[s[i]],
"',\n opacity: 0.2,
alphahull: 1, // 1 ορ 0.5
showscale: true,
text: [],
hoverinfo: 'text'
};\n" , sep=""), file = fileConn, append = T)
traces<- c(traces, trace_i)
}
traces<- paste(traces, collapse=" ")
if(Dark_mode == T){
cat(sprintf(paste("var layout = {
paper_bgcolor: 'black',
plot_bgcolor: '#c7c7c7',", sep="")), file = fileConn)
}else{
cat(sprintf(paste("var layout = {", sep="")), file = fileConn)
}
if(show_labels_3D == T){
hovermode <- "hovermode: false,"
}else{
hovermode <- "hovermode: true,"
}
cat(sprintf(paste(hovermode,
"scene: {
xaxis: {
showspikes: false,
range: [",scene_scale_x_min, ",",scene_scale_x_max,"],
title: '',
autorange: false,
showgrid: false,
zeroline: false,
showline: false,
autotick: true,
ticks: '',
showticklabels: false
},
yaxis: {
showspikes: false,
range: [",scene_scale_y_min, ",",scene_scale_y_max,"],
title: '',
autorange: false,
showgrid: false,
zeroline: false,
showline: false,
autotick: true,
ticks: '',
showticklabels: false
},
zaxis: {
showspikes: false,
range: [",scene_scale_z_min,",", scene_scale_z_max,"],
title: '',
autorange: false,
showgrid: false,
zeroline: false,
showline: false,
autotick: true,
ticks: '',
showticklabels: false
}
},
width: 1100,
height: 900,
margin: {
l: 0,
r: 0,
b: 0,
t: 0
},
showlegend: false,
legend: {
\"x\": \"0\",
\"margin.r\": \"120\"
}
};", sep="")), file = fileConn)
write(paste("
data = [",traces_edges,",trace_nodes,", traces,"];
Plotly.plot('plotly-div', {
data: data,
layout: layout
});
</script>
</body>
</html>
", sep = ""), file = fileConn, append = T)
} # if(length(s))
} |
483d7c634832409641bf8d7aa4cadceb62c9624a | f5e66f86b5e89c21a54cd8a095de5752018a549e | /man/Diag_Non_Con.Rd | 8e8b2804432c2f1e76b305bf6bc37ef7fa4dda97 | [] | no_license | rjaneUCF/MultiHazard | 5279e4efc869e8745818fe62b37ce1d35534114c | 334b0e1aa6bd5f4dbd68876221fad3c499e691a2 | refs/heads/master | 2023-07-29T15:04:55.523801 | 2023-07-11T20:46:42 | 2023-07-11T20:46:42 | 239,541,679 | 11 | 4 | null | null | null | null | UTF-8 | R | false | true | 1,786 | rd | Diag_Non_Con.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Diag_Non_Con.R
\name{Diag_Non_Con}
\alias{Diag_Non_Con}
\title{Goodness of fit of non-extreme marginal distributions}
\usage{
Diag_Non_Con(Data, x_lab, y_lim_min = 0, y_lim_max = 1)
}
\arguments{
\item{Data}{Numeric vector containing realizations of the variable of interest.}
\item{x_lab}{Character vector of length one specifying the label on the x-axis of histogram and cumulative distribution plot.}
\item{y_lim_min}{Numeric vector of length one specifying the lower y-axis limit of the histogram. Default is \code{0}.}
\item{y_lim_max}{Numeric vector of length one specifying the upper y-axis limit of the histogram. Default is \code{1}.}
}
\value{
Dataframe \code{$AIC} giving the AIC associated with each distribution and the name of the best fitting distribution \code{$Best_fit}. Panel consisting of three plots. Upper plot: Plot depicting the AIC of the two fitted distributions. Middle plot: Probability Density Functions (PDFs) of the fitted distributions superimposed on a histogram of the data. Lower plot: Cumulative Distribution Functions (CDFs) of the fitted distributions overlaid on a plot of the empirical CDF.
}
\description{
Fits two (unbounded) non-extreme marginal distributions to a dataset and returns three plots demonstrating their relative goodness of fit.
}
\examples{
S20.Rainfall<-Con_Sampling_2D(Data_Detrend=S20.Detrend.df[,-c(1,4)],
Data_Declust=S20.Detrend.Declustered.df[,-c(1,4)],
Con_Variable="Rainfall",Thres=0.97)
Diag_Non_Con(Data=S20.Rainfall$Data$OsWL,x_lab="O-sWL (ft NGVD 29)",
y_lim_min=0,y_lim_max=1.5)
}
\seealso{
\code{\link{Copula_Threshold_2D}}
}
|
1bb0554cd55791b20e1010c9ef9979af6347ffbf | 7050fc64449bf7fc96384bcb5d7d57cfd638e5fc | /Laboratorios/Lab1/Lab1.R | 76d8051ea06315960c432e4f530f847b84c0db96 | [] | no_license | jcecheverria/Data-Wrangling | 3aaea8e7ca1b45eef30f6dccb2c53b03c752c75d | a313c34d041fb1fdf0d1d0929260ec86556fec61 | refs/heads/master | 2023-01-24T22:11:06.388820 | 2020-11-15T20:46:04 | 2020-11-15T20:46:04 | 283,594,887 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,193 | r | Lab1.R |
# LIBRERIAS ####
library(readxl)
library(readr)
# PROBLEMA 1 ####
files = list.files(pattern = ".xlsx")
read_excel_files = function(path,cols){
# Leemos el archivo
df = read_excel(path)
# Creamos las columnas de mes y año
fecha = strsplit(path,".xlsx")[[1]]
df["FECHA"] = fecha
sliced_df = df[cols]
print(nrow(sliced_df))
return(sliced_df)
}
columnas = c("COD_VIAJE","CLIENTE","UBICACION","CANTIDAD",
"PILOTO","Q","CREDITO","UNIDAD","FECHA")
df_list = lapply(files, read_excel_files,columnas)
# Tabla final
concat_df = do.call(rbind,df_list)
# Exportamos la tabla como .csv
write_excel_csv(concat_df,"entregas_2018.csv")
# PROBLEMA 2 ####
lista_vec = list(c(1,2,3),c(1,2,2,2,3),c(1,2,2,3,3))
moda <- function(lista){
tbl = table(lista)
cond = (tbl>1)&(tbl==max(tbl))
modas = names(tbl)[cond]
if(length(modas)==0) modas=NULL
return(modas)
}
lapply(lista_vec,moda)
# PROBLEMA 3 ####
PATH = "C:/Users/Jose/Documents/UFM/4th_year/2ndo_Semestre/Data Wrangling/Data/datos_sat/INE_PARQUE_VEHICULAR_080720.txt" # Path en mi maquina
data_sat = read_delim(PATH,delim ="|") # Columna X11 parseada por error (prob. un simbolo "|" extra)
|
4a952e7ce52edd377566872a61d8b978911c838b | 92e597e4ffc9b52cfb6b512734fb10c255543d26 | /tests/testthat/test-function-excludeNULL.R | 11d061c28794fcddb1ebf78699d15f342c52d328 | [
"MIT"
] | permissive | KWB-R/kwb.utils | 3b978dba2a86a01d3c11fee1fbcb965dd15a710d | 0930eaeb9303cd9359892c1403226a73060eed5b | refs/heads/master | 2023-05-12T15:26:14.529039 | 2023-04-21T04:28:29 | 2023-04-21T04:28:29 | 60,531,844 | 9 | 1 | MIT | 2023-04-21T04:28:30 | 2016-06-06T13:52:43 | R | UTF-8 | R | false | false | 187 | r | test-function-excludeNULL.R | test_that("excludeNULL() works", {
L <- list(a = 1, b = NULL, c = "three")
expect_error(excludeNULL(0))
expect_identical(sum(sapply(excludeNULL(L, dbg = FALSE), is.null)), 0L)
})
|
6e0b09c8a7669ebafc7222c893bd5bcb5e4b9a4e | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/rvcheck/examples/check_r.Rd.R | e61d84810a6370955fcfe0ae06cd8e6234071602 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 162 | r | check_r.Rd.R | library(rvcheck)
### Name: check_r
### Title: check_r
### Aliases: check_r
### ** Examples
## Not run:
##D library(rvcheck)
##D check_r()
## End(Not run)
|
befb01d82631354bbdc503b2928708203acc2adb | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Basler/terminator/stmt21_178_218/stmt21_178_218.R | 018664ddbfc9fd88fc985db703a9185e1e300382 | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 65 | r | stmt21_178_218.R | 89d89989a202095c939d3652c4616f26 stmt21_178_218.qdimacs 2696 8703 |
457ad339d4fae36a8a1d4d458cd8e1bfc22778f9 | 5700e3810d1ab93d186a21b0fe47310c320d9f5c | /code-for-plots-heritability.R | 947118e1e0b3a07410dfd9419d69184be8dfe532 | [] | no_license | jiazhao97/sim-BWMR | b8b64189413832004d43c3ad75cbd51cf2a54775 | a32f46b6ea5436bcc82bd16c21dd98b390cfa657 | refs/heads/master | 2020-05-16T23:56:27.907719 | 2019-09-04T14:42:52 | 2019-09-04T14:42:52 | 183,380,837 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,507 | r | code-for-plots-heritability.R | # Figure S25 in supplementary document
rm(list = ls())
library(ggplot2)
library(reshape2)
method_level <- c("BWMR", "Egger", "GSMR", "RAPS")
## Type I error
load("heritability-02.RData")
#load("heritability-03.RData")
#load("heritability-04.RData")
i <- 1
Rp <- length(pvalset.bwmr[i, ])
type.I.error <- c("BWMR" = sum(pvalset.bwmr[i, ] < 0.05)/Rp, "RAPS" = sum(pvalset.raps[i, ] < 0.05)/Rp,
"GSMR" = sum(pvalset.gsmr[i, ] < 0.05)/Rp, "Egger" = sum(pvalset.egger[i, ] < 0.05)/Rp)
se <- c("BWMR" = sqrt(type.I.error["BWMR"]*(1 - type.I.error["BWMR"])/Rp),
"RAPS" = sqrt(type.I.error["RAPS"]*(1 - type.I.error["RAPS"])/Rp),
"GSMR" = sqrt(type.I.error["GSMR"]*(1 - type.I.error["GSMR"])/Rp),
"Egger" = sqrt(type.I.error["Egger"]*(1 - type.I.error["Egger"])/Rp))
df_typeIerror <- data.frame(
typeIerror = type.I.error,
se = se,
Method = names(type.I.error)
)
df_typeIerror$Method <- factor(df_typeIerror$Method, levels = method_level)
plt_typeIerror <- ggplot(df_typeIerror, aes(x = Method, y = typeIerror, fill = Method)) +
geom_bar(position=position_dodge(), stat="identity") +
geom_errorbar(aes(ymin = typeIerror - se, ymax = typeIerror + se),
width=.2,
position=position_dodge(.9)) +
geom_hline(aes(yintercept = 0.05), colour = "tomato1", linetype = "dashed", size = 1) +
labs(x = "Method", y = "Type I error rate", title = "Type I error rate") +
theme(axis.title.y = element_blank()) +
theme(legend.position = "none") +
theme(axis.title.x = element_text(size = 25, vjust = -0.5),
plot.title = element_text(hjust = 0.5, vjust = 1.5, size = 30),
axis.text = element_text(size = 25))
plt_typeIerror
### QQplot
source('qqunifplot.R')
my.pvalue.list <- list("GSMR" = pvalset.gsmr[1, ][pvalset.gsmr[1, ] > 1e-30], "RAPS" = pvalset.raps[1, ][pvalset.raps[1, ] > 1e-30],
"Egger" = pvalset.egger[1, ][pvalset.egger[1, ] > 1e-30], "BWMR" = pvalset.bwmr[1, ][pvalset.bwmr[1, ] > 1e-30])
plt_qq <- qqunif.plot(my.pvalue.list, auto.key=list(corner=c(.95,.05)))
plt_qq
## Power
i <- 2
power_1 <- c("BWMR" = sum(pvalset.bwmr[i, ] < 0.05)/Rp, "RAPS" = sum(pvalset.raps[i, ] < 0.05)/Rp,
"GSMR" = sum(pvalset.gsmr[i, ] < 0.05)/Rp, "Egger" = sum(pvalset.egger[i, ] < 0.05)/Rp)
i <- 3
power_2 <- c("BWMR" = sum(pvalset.bwmr[i, ] < 0.05)/Rp, "RAPS" = sum(pvalset.raps[i, ] < 0.05)/Rp,
"GSMR" = sum(pvalset.gsmr[i, ] < 0.05)/Rp, "Egger" = sum(pvalset.egger[i, ] < 0.05)/Rp)
i <- 4
power_3 <- c("BWMR" = sum(pvalset.bwmr[i, ] < 0.05)/Rp, "RAPS" = sum(pvalset.raps[i, ] < 0.05)/Rp,
"GSMR" = sum(pvalset.gsmr[i, ] < 0.05)/Rp, "Egger" = sum(pvalset.egger[i, ] < 0.05)/Rp)
i <- 5
power_4 <- c("BWMR" = sum(pvalset.bwmr[i, ] < 0.05)/Rp, "RAPS" = sum(pvalset.raps[i, ] < 0.05)/Rp,
"GSMR" = sum(pvalset.gsmr[i, ] < 0.05)/Rp, "Egger" = sum(pvalset.egger[i, ] < 0.05)/Rp)
i <- 6
power_5 <- c("BWMR" = sum(pvalset.bwmr[i, ] < 0.05)/Rp, "RAPS" = sum(pvalset.raps[i, ] < 0.05)/Rp,
"GSMR" = sum(pvalset.gsmr[i, ] < 0.05)/Rp, "Egger" = sum(pvalset.egger[i, ] < 0.05)/Rp)
df_power <- data.frame(
beta_true = rep(c(0.1, 0.2, 0.3, 0.4, 0.5), rep(4, 5)),
power = c(power_1, power_2, power_3, power_4, power_5),
Method = rep(names(power_5), 5)
)
df_power$Method <- factor(df_power$Method, levels = method_level)
plt_power <- ggplot(df_power, aes(x = beta_true, y = power, fill = Method)) +
geom_bar(position=position_dodge(), stat="identity") +
ylim(0, 1) +
scale_x_continuous(breaks=seq(0.1, 0.5, 0.1)) +
labs(x = "Beta", y = "Power", title = "Power") +
theme(axis.title.y = element_blank()) +
theme(legend.position = "top") +
theme(axis.title.x = element_text(size = 25, vjust = -0.5),
plot.title = element_text(hjust = 0.5, vjust = 1.5, size = 30),
axis.text = element_text(size = 25),
legend.text = element_text(size = 15),
legend.title = element_text(size = 15, face = "bold"))
plt_power
### Estimation (boxplot)
method_0 <- c("BWMR", "RAPS", "Egger", "GSMR")
beta_0 <- c("beta=0.0", "beta=0.1", "beta=0.2", "beta=0.3", "beta=0.4", "beta=0.5")
beta_value_0 <- seq(0.0, 0.5, 0.1)
# store estimate
b.mat <- matrix(nrow = length(beta_0), ncol = length(method_0)*Rp)
b.mat.b <- matrix(nrow = length(beta_0), ncol = length(method_0)*Rp)
rownames(b.mat) <- beta_0
for (i in 1:6) {
b.mat[i, ] <- c(bset.bwmr[i, ], bset.raps[i, ], bset.egger[i, ], bset.gsmr[i, ])
}
beta <- rep(beta_0, rep(length(method_0)*Rp, length(beta_0)))
beta_value <- rep(beta_value_0, rep(length(method_0)*Rp, length(beta_0)))
method <- rep(rep(method_0, rep(Rp, length(method_0))), length(beta_0))
est_df <- data.frame(
beta = beta,
beta_value = beta_value,
Method = method,
beta_est = c(b.mat[1, ], b.mat[2, ], b.mat[3, ], b.mat[4, ], b.mat[5, ], b.mat[6, ])
)
est_plt <- ggplot(est_df, aes(x = Method, y = beta_est)) +
geom_boxplot(aes(fill = Method)) +
geom_hline(aes(yintercept = beta_value), colour="blue", linetype="dashed", size=1) +
labs(x = "Method", title="Estimation") +
theme(legend.position = "none") +
theme(axis.title.y = element_blank()) +
theme(axis.title.x = element_text(size = 25, vjust = -0.5),
plot.title = element_text(hjust = 0.5, vjust = 1.5, size = 30),
axis.text = element_text(size = 20)) +
theme(axis.text.x = element_text(angle = 45, hjust=1))
est_plt <- est_plt + facet_grid( ~ beta) +
theme(strip.text.x = element_text(size = 20))
est_plt
|
3ad78a9cea2b837785236fe68a0801abfea427fb | 1910b492b0fb6ede2dfcb6c6ccdc569c59c1d2da | /Rscript/rename_files/rename_files.R | d23de6253c0f495a0dd3950985310475b8eed998 | [] | no_license | lapig-ufg/jackknife-scripts | 46bc8a8b4bc22ecb3870fc88f89879bc2bb8147a | fddb1f35c745d2736d44602e4110078ec6a01a3e | refs/heads/master | 2021-11-24T02:21:20.281556 | 2021-10-25T15:04:07 | 2021-10-25T15:04:07 | 44,969,682 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 742 | r | rename_files.R | #####################################################################
#####################################################################
#'
# A utlility to rename multiple files
###
###
# Workstation
setwd('Y:\\CENTURYCERRADO/mosaicsWorkstation/block_34')
filesNames <- list.files(pattern = '*.tif')
newNames <- gsub("01.tif", "01_wk.tif", filesNames)
file.rename(filesNames, newNames)
###
###
# Áqua
setwd('Y:\\CENTURYCERRADO/mosaics')
filesNames <- list.files(pattern = '*.tif')
newNames <- gsub("01.tif", "01_aqua.tif", filesNames)
file.rename(filesNames, newNames)
#####################################################################
#####################################################################
|
d5344f21b0ae325371b500496ac0e3bc7ce1a04a | 71abd2ebcb868047c700d09ad9b10afdd4dd5c89 | /dev_test.R | 3cadfef76c2859a8614ad42c9d83b5010e3aecf9 | [] | no_license | eriqande/Rrunstruct | 5a3fdb839b9d71615fd61a11fa5d6cdf6d0643c4 | 2881a014ee77ac714e54aa3ac5eb3cb364772d83 | refs/heads/master | 2021-01-18T22:44:35.926474 | 2018-07-27T05:29:27 | 2018-07-27T05:29:27 | 38,450,585 | 2 | 3 | null | null | null | null | UTF-8 | R | false | false | 1,189 | r | dev_test.R |
#setwd("~/Desktop/scot-cats/PlainPars")
#structure_runs("Boing", 1:4, 3, 1234)
results <- read_results("struct-runs-from-r-1")
traces <- read_traces("struct-runs-from-r-1")
res2 <- MAP_cluster(results)
mp <- relabel_map_clusters(res2)
# then we can pick out just the max perm indexes for each K and r
max_perms <- mp %>%
group_by(K, Rep) %>%
summarise(mp = first(max_perm))
# and once we have that, we can relabel the traces and the results
traces_relab <- relabel_traces(traces, max_perms)
ggplot(traces_relab %>% filter(str_detect(variable, "^F[0-9]")), aes(x = Sweep, y = value, colour = variables_relabeled)) +
geom_line() +
facet_grid(K ~ Rep)
results_relab <- relabel_results(results, max_perms)
# now prepare to plot these with Rep 1 values on the x axes...
prep <- results_relab %>%
filter(Rep == 1) %>%
group_by(K, Index, cluster_relabeled) %>%
transmute(rep1_cluster = cluster_relabeled, rep1_prob = probability) %>%
ungroup %>%
inner_join(results_relab) %>%
filter(Rep != 1) %>%
mutate(which_cluster = paste(rep1_cluster))
ggplot(prep, aes(x = rep1_prob, y = probability, colour = which_cluster)) +
geom_point() +
facet_grid(K ~ Rep)
|
94064676ca89eed04815d7b8785299bc8ed8c349 | f26b89cec02c147b1a6a9f4ad26165c00ac1f09e | /man/aqs_sampledata_by_cbsa.Rd | fc8ca8bbede3a749df444e37a6acf80cadbacf48 | [
"CC0-1.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | cjmc00/RAQSAPI | 131f36b59f180b52260311462ce9aaab54c6c841 | 1eb717ce455903a7795abdd52b669c18cb0db99f | refs/heads/main | 2023-04-01T12:35:34.554784 | 2021-04-05T18:59:48 | 2021-04-05T18:59:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 4,480 | rd | aqs_sampledata_by_cbsa.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bycbsa.R
\name{aqs_sampledata_by_cbsa}
\alias{aqs_sampledata_by_cbsa}
\title{aqs_sampledata_by_cbsa}
\usage{
aqs_sampledata_by_cbsa(
parameter,
bdate,
edate,
cbsa_code,
cbdate = NA_Date_,
cedate = NA_Date_,
return_header = FALSE
)
}
\arguments{
\item{parameter}{a character list or a single character string
which represents the parameter code of the air
pollutant related to the data being requested.}
\item{bdate}{a R date object which represents that begin date of the data
selection. Only data on or after this date will be returned.}
\item{edate}{a R date object which represents that end date of the data
selection. Only data on or before this date will be returned.}
\item{cbsa_code}{a R character object which represents the 5 digit AQS Core
Based Statistical Area code (the same as the census code,
with leading zeros)}
\item{cbdate}{a R date object which represents a "beginning
date of last change" that indicates when the data was last
updated. cbdate is used to filter data based on the change
date. Only data that changed on or after this date will be
returned. This is an optional variable which defaults
to NA_Date_.}
\item{cedate}{a R date object which represents an "end
date of last change" that indicates when the data was last
updated. cedate is used to filter data based on the change
date. Only data that changed on or before this date will be
returned. This is an optional variable which defaults
to NA_Date_.}
\item{return_header}{If FALSE (default) only returns data requested.
If TRUE returns a AQSAPI_v2 object which is a two item
list that contains header information returned from
the API server mostly used for debugging purposes in
addition to the data requested.}
}
\value{
a tibble or an AQS_Data_Mart_APIv2 S3 object containing sample data
for all monitors matching cbsa_code for the given parameter. An
AQS_Data Mart_APIv2 is a 2 item named list in which the first item
/(/$Header/) is a tibble of header information from the AQS API and
the second item /(/$Data/) is a tibble of the data returned.
}
\description{
\lifecycle{stable}
Returns sample data where the data is aggregated at the Core
Based Statistical Area (cbsa) level. If return_header is
FALSE (default) this function returns a single dataframe with
the requested data. If return_header is TRUE returns a list
of AQSAPI_v2 objects where each index of the list is an
individual RAQSAPI_v2 object returned from each successive
call to the AQS API. RAQSAPI_v2 objects are two item list
where the $Data portion contains data that contains
sample air monitoring data at a site with the input
parameter and cbsa_code provided for
bdate - edate time frame. The $Header is a tibble of
header information from the API call /(useful for
debugging/). This function returns NULL is bdate > edate.
}
\note{
The AQS API only allows for a single year of sampledata to be retrieved
at a time. This function conveniently extracts date information from
the bdate and edate parameters then makes repeated calls to the
AQSAPI retrieving a maximum of one calendar year of data at a time.
Each calendar year of data requires a separate API call so multiple
years of data will require multiple API calls. As the number of years
of data being requested increases so does the length of time that it
will take to retrieve results. There is also a 5 second wait
time inserted between successive API calls to prevent overloading the
API server. Fortunately this operation has a linear run time
/(Big O notation: O/(n + 5 seconds/)/)
}
\examples{
# returns an aqs_v2 s3 object which contains NO2 data
# for Charlotte-Concord-Gastonia, NC cbsa for
# Janurary 1, 2015 - Janurary 01, 2017
\dontrun{aqs_sampledata_by_cbsa(parameter = "42602",
bdate = as.Date("20150101",
format = "\%Y\%m\%d"),
edate = as.Date("20170101",
format = "\%Y\%m\%d"),
cbsa_code = "16740"
)
}
}
\seealso{
Other Aggregate _by_cbsa functions:
\code{\link{aqs_annualsummary_by_cbsa}()},
\code{\link{aqs_dailysummary_by_cbsa}()},
\code{\link{aqs_monitors_by_cbsa}()}
}
\concept{Aggregate _by_cbsa functions}
|
b25381d5a11b19cb76231a129df46ddc0340a568 | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Messinger/circuits/k8_2_4/k8_2_4.R | 10faa821e8ebe09f178ef70b12b49f70da0694c0 | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 59 | r | k8_2_4.R | 167fc96e947a5976d32e95bc6bd141ed k8_2_4.qdimacs 11353 15984 |
4c9098f12d71333704343e9d0db2ba3bdfddb46f | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/affy/examples/rma.Rd.R | 57e80351a7383dde78267629abe7f42ff70aa4be | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 213 | r | rma.Rd.R | library(affy)
### Name: rma
### Title: Robust Multi-Array Average expression measure
### Aliases: rma
### Keywords: manip
### ** Examples
if (require(affydata)) {
data(Dilution)
eset <- rma(Dilution)
}
|
f6245fe08a53005de771d745512561f6b112ca28 | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /MESS/R/wallyplot.R | 21f0cdd6bbbc26923050e96a1b261deaf5749864 | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | false | 5,909 | r | wallyplot.R | #' @rdname wallyplot
#' @export
wallyplot.default <- function(x, y=x, FUN=residualplot,
hide=TRUE,
simulateFunction=rnorm,
...) {
simulateFunction <- match.fun(simulateFunction)
if (is.vector(y) && length(x)==length(y))
y <- cbind(y, sapply(1:8, function(k) {simulateFunction(length(x))}))
if (!is.numeric(x) || !is.matrix(y))
stop("x and y input must be a vector and matrix, respectively")
if (length(x) != nrow(y))
stop("x and y does not conform")
if (ncol(y) != 9)
stop("y must be a matrix with 9 columns")
if (!is.numeric(x) && !is.numeric(y)) {
stop("Must have a pair of numeric vectors or an lm object as input")
}
cc <- complete.cases(x, y)
x <- x[cc]
y <- y[cc,]
plot3x3(x,y, FUN=FUN, hide=hide, ...)
invisible(NULL)
}
#' @import graphics
#' @rdname wallyplot
#' @export
wallyplot.lm <- function(x, y=x, FUN=residualplot,
hide=TRUE,
simulateFunction=rnorm,
...) {
# Extract information from model fit
y <- rstandard(x)
x <- predict(x)
wallyplot.default(x, y, FUN=FUN, hide=hide, simulateFunction=simulateFunction, ...)
}
#' Plots a Wally plot
#'
#' Produces a 3x3 grid of residual- or qq-plots plots from a lm object. One of
#' the nine subfigures is the true residual plot/qqplot while the remaining are
#' plots that fulfill the assumptions of the linear model
#'
#' Users who look at residual plots or qqnorm plots for the first time often
#' feel they lack the experience to determine if the residual plot is okay or
#' if the model assumptions are indeed violated. One way to convey "experience"
#' is to plot a series of graphical model validation plots simulated under the
#' model assumption together with the corresponding plot from the real data and
#' see if the user can pinpoint one of them that looks like an odd-one-out. If
#' the proper plot from the real data does not stand out then the assumptions
#' are not likely to be violated.
#'
#' The Wallyplot produces a 3x3 grid of plots from a lm object or from a set of
#' pairs of x and y values. One of the nine subfigures is the true plot while
#' the remaining are plots that fulfill the assumptions of the linear model.
#' After the user interactively hits a key the correct residual plot
#' (correponding to the provided data) is shown.
#'
#' The plotting function can be set using the \code{FUN} argument which should
#' be a function that accepts \code{x}, \code{y} and \code{...} arguments and
#' plots the desired figure. When \code{y} is a single vector the same length
#' as \code{x} then the function \code{simulateFunction} is used to generate
#' the remaining y values corresponding the situations under the null.
#'
#' For a description of the features of the default residual plot see the help page for \code{\link{residualplot}}.
#'
#' @aliases wallyplot wallyplot.lm wallyplot.default
#' @param x a numeric vector of x values, or an lm object.
#' @param y a numeric vector of y values of the same length as x or a n * 9
#' matrix of y values - one column for each of the nine plots to make. The
#' first column is the one corresponding to the results from the dataset
#' @param FUN a function that accepts an \code{x}, \code{y} and \code{...}
#' argument and produces a graphical model validation plots from the \code{x}
#' and \code{y} values.
#' @param hide logical; if \code{TRUE} (the default) then the identity of the
#' true residual plot is hidden until the user presses a key. If \code{FALSE}
#' then the true residual plot is shown in the center.
#' @param simulateFunction The function used to produce y values under the null
#' hypothesis. Defaults to rnorm
#' @param ... Other arguments passed to the plot function \code{FUN}
#' @author Claus Ekstrom \email{claus@@rprimer.dk}
#' @references Ekstrom, CT (2014) \emph{Teaching 'Instant Experience' with
#' Graphical Model Validation Techniques}. Teaching Statistics (36), p 23-26
#' @keywords iplot
#' @examples
#'
#' \dontrun{
#' data(trees)
#' res <- lm(Volume ~ Height + Girth, data=trees)
#' wallyplot(res)
#'
#'
#' # Create a grid of QQ-plot figures
#' # Define function to plot a qq plot with an identity line
#' qqnorm.wally <- function(x, y, ...) { qqnorm(y, ...) ; abline(a=0, b=1) }
#' wallyplot(res, FUN=qqnorm.wally, main="")
#'
#' # Define function to simulate components+residuals for Girth
#' cprsimulate <- function(n) {rnorm(n)+trees$Girth}
#' # Create the cpr plotting function
#' cprplot <- function(x, y, ...) {plot(x, y, pch=20, ...) ;
#' lines(lowess(x, y), lty=3)}
#' # Create the Wallyplot
#' wallyplot(trees$Girth, trees$Girth+rstudent(res), FUN=cprplot,
#' simulateFunction=cprsimulate, xlab="Girth")
#' }
#'
#' @export
wallyplot <- function(x, y=x, FUN=residualplot,
hide=TRUE,
simulateFunction=rnorm,
...) {
UseMethod("wallyplot")
}
# qqnorm.wally <- function(x, y, ...) {qqnorm(y, ...) ; abline(a=0, b=1)}
plot3x3 <- function(x, y, FUN=plot, hide=TRUE, ylim=range(y), mar=c(4, 4, .1, .1)+.1, ...) {
# Input check
if (!is.numeric(x) || !is.matrix(y))
stop("x and y input must be a vector and matrix, respectively")
if (length(x) != nrow(y))
stop("x and y does not conform")
if (ncol(y) != 9)
stop("y must be a matrix with 9 columns")
oldpar <- par(no.readonly = TRUE)
par(mar=mar)
FUN <- match.fun(FUN)
pos <- c(1:9)
if (hide)
pos <- sample(pos)
par(mfrow=c(3,3))
for (i in 1:length(pos)) {
FUN(x, y[,pos[i]], ylim=ylim, ...)
}
if (hide) {
readline("Hit <Enter> to show the original plot. ")
}
figpos <- order(pos)[1]
par(mfg=c(figpos %/% 3.1 + 1, figpos - (figpos %/% 3.1)*3 ))
box(col="red", lwd=2)
par(oldpar)
}
|
676ce78fbf898880bd3ba24f5a842386fe77e285 | 3530276edd98fa3ca7e35e41588f0e12de251d54 | /Processing/Correlation/CP_Detrend_LCs.R | dcde68d2009941b2c20180c2cd3832742bf58459 | [] | no_license | pmlefeuvre/NVE_work | 9e21dfa19f6998cb624b47d8faf4e9c819df21d5 | e4ef285843089f7a2fe9f5f3fb69194ce4148303 | refs/heads/Process_LCdata_HydroMetdata_mac | 2021-01-21T15:00:06.798694 | 2016-07-01T10:19:39 | 2016-07-01T10:19:39 | 57,060,323 | 0 | 0 | null | 2016-04-26T08:18:49 | 2016-04-25T17:05:20 | R | UTF-8 | R | false | false | 5,040 | r | CP_Detrend_LCs.R |
##########################################################################
## Extract Trends and Short Term Variablilty ##
## And Plot Month Variations for: ##
## - 4 years Period between 1996 - 2013 ##
## - Whole Period 1996 - 2013
##########################################################################
###########################################
# # Clean up Workspace
# rm(list = ls(all = TRUE))
###########################################
### Go to the following Path in order to access data files
setwd("/Users/PiM/Desktop/PhD/Data Processing/Load Cells/Processing")
Sys.setenv(TZ="UTC")
# Load libraries
library(zoo)
library(lattice)
library(signal)
# Load User Functions
source('~/Desktop/PhD/Data Processing/Load Cells/Processing/f_Detrend.R')
# Interval
hour <- 60 #min
day <- 24 #hours
win.mean <- 4*hour/15#12*hour/15 #15min interval = 4 points per hour
win.corr <- 12*(hour/15)#24*(hour/15)
print(paste("Win.mean:", win.mean))
print(paste("Win.corr:", win.corr))
# #################################
# ## Correlation for 4 YEAR periods
#
# years <- seq(1996,2012,4)
# years[5] <- years[5]+1 #To add 2013
#
# for (k in 1:(length(years)-1)){
#
# # Time span
# sub.start <- sprintf("01/01/%i 00:00",years[k])
# # Condition to change month end for 2013
# if(years[k+1]<2013){sub.end=sprintf("01/01/%i 00:00",years[k+1])
# }else{sub.end=sprintf("05/01/%i 00:00",years[k+1])}
#
# # Print
# print(sprintf("Analysing Corr. between %i & %i",years[k],years[k+1]))
#
# # Function
# LC.reg.sub <- f_Detrend(LCnames=c("LC6","LC97_2"),
# sub.start= sub.start,
# sub.end = sub.end,
# win.mean,win.corr)
# }
###############################################
## Correlation between LCs for the WHOLE Period
# LCnames <- names(LC.reg.sub)
LCnames <- c("LC6","LC4","LC97_1","LC97_2","LC1e","LC7")
n <- 1
for (i in LCnames[2:length(LCnames)]){
print(sprintf("Correlation between %s & %s",LCnames[n],i))
LC.reg.sub <- f_Detrend(LCnames=c(LCnames[n],i),
sub.start="11/01/1992 00:00",
sub.end= "05/01/2013 00:00",
win.mean,win.corr)
}
#################################
##### ARCHIVE
# hour.mean <- c(1,2,4,8,day/2,day,2*day,4*day,6*day)
# hour.coor <- c(1,2,4,8,day/2,day,2*day,4*day,6*day)
# LC.reg.sub <- f_Detrend(LCnames=c("LC6","LC4"),
# sub.start="01/01/1996 00:00",
# sub.end= "05/01/2013 00:00",
# win.mean,win.corr)
#
# LC.reg.sub <- f_Detrend(LCnames=c("LC6","LC4"),
# sub.start="01/01/1996 00:00",
# sub.end= "01/01/2000 00:00",
# win.mean,win.corr)
#
# LC.reg.sub <- f_Detrend(LCnames=c("LC6","LC4"),
# sub.start="01/01/2000 00:00",
# sub.end= "01/01/2004 00:00",
# win.mean,win.corr)
#
# LC.reg.sub <- f_Detrend(LCnames=c("LC6","LC4"),
# sub.start="01/01/2004 00:00",
# sub.end= "01/01/2008 00:00",
# win.mean,win.corr)
#
# LC.reg.sub <- f_Detrend(LCnames=c("LC6","LC4"),
# sub.start="01/01/2008 00:00",
# sub.end= "05/01/2013 00:00",
# win.mean,win.corr)
#
# ##
# print(paste("LCnames:", c("LC97_1","LC97_2")))
#
# LC.reg.sub <- f_Detrend(LCnames=c("LC97_1","LC97_2"),
# sub.start="01/01/1996 00:00",
# sub.end= "05/01/2013 00:00",
# win.mean,win.corr)
#
# LC.reg.sub <- f_Detrend(LCnames=c("LC97_1","LC97_2"),
# sub.start="01/01/1996 00:00",
# sub.end= "01/01/2000 00:00",
# win.mean,win.corr)
#
# LC.reg.sub <- f_Detrend(LCnames=c("LC97_1","LC97_2"),
# sub.start="01/01/2000 00:00",
# sub.end= "01/01/2004 00:00",
# win.mean,win.corr)
#
# LC.reg.sub <- f_Detrend(LCnames=c("LC97_1","LC97_2"),
# sub.start="01/01/2004 00:00",
# sub.end= "01/01/2008 00:00",
# win.mean,win.corr)
#
# LC.reg.sub <- f_Detrend(LCnames=c("LC97_1","LC97_2"),
# sub.start="01/01/2008 00:00",
# sub.end= "05/01/2013 00:00",
# win.mean,win.corr)
#
#
# print(paste("LCnames:", c("LC6","LC97_2")))
# LC.reg.sub <- f_Detrend(LCnames=c("LC6","LC97_2"),
# sub.start="01/01/1996 00:00",
# sub.end= "05/01/2013 00:00",
# win.mean,win.corr) |
fe02d633feb389b6c4e427ad45ac4c70eae74a8a | cb03093b823294a5f23f7992031d0f34550fa712 | /OGIIR_0_Wrapper_Example.R | 97a52325badfa559b535be73ebb23875f4b501c4 | [] | no_license | fdbesanto2/Landsat-compositing | 603ab3a11d39fc5f228bff24eefee3114cfb7d09 | 0937b343123cf87f148489282b22bba0d090c4db | refs/heads/master | 2022-03-13T09:12:01.873703 | 2019-11-15T09:05:33 | 2019-11-15T09:05:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,595 | r | OGIIR_0_Wrapper_Example.R | ## Author: Jasper Van doninck
##
## Description: This is an example of a simple wrapper for browsing, processing and compositing Landsat imagery.
## Set EarthExplorer username and password
#username
userName <- "my.EE.username"
#password
require(getPass)
pwd <- getPass()
## Set search parameters
#Min/max longitude and latitude
xMin <- 22
xMax <- 22.5
yMin <- 60
yMax <- 60.5
#Start and end date of search window
startDate <- 20110501
endDate <- 20110701
#Maximum scene cloud cover
cloudCover <- 60
## Set directories
#dowloaded Level-1 images
l1Dir <- "C/OGIIR_example/L1"
#topographically and BRDF normalized surface reflectance images
srDir <- "C:/OGIIR_example/SR"
#output composite images
compDir <- "C:/OGIIR_example/comp"
#directory in which temporary directories will be created
tempDirRoot <- "C:/OGIIR_example"
# Step 1: Query and download all available Landsat images
source(file.path("directory_of_R_files", "OGIIR_1_browseEE.R"))
fl <- browseEE(userName, pwd, xMin, xMax, yMin, yMax,
searchFrom=startDate, searchTo=endDate,
landCC=cloudCover,
download=l1Dir)
# Step 2: Process all downloaded images to surface reflectance, apply mask, normalize for BRDF effects (skipped topographic correction)
source(file.path("directory_of_R_files", "OGIIR_2_srTopoBrdf.R"))
sapply(fl, surfaceReflectance,
inDir=l1Dir,outDir=srDir,tempDirRoot=tempDirRoot,
brdf="Roy", outSZA=30,
topo=NULL, demFile=NULL)
#(Steps 1-2 could be skipped when downloading L2 data directly, but this would exclude )
# Step 3: Reprojection of images to common extent and pixel-based compositing
source(file.path("directory_of_R_files", "OGIIR_3_composite.R"))
#File IDs of surface reflectance images
fIDs <- substr(list.files(srDir, pattern="_sr_band1.tif"), 1,48)
#Dates of image acquisition
inDates <- substr(list.files(srDir, pattern="_sr_band1.tif"), 18,25)
#Reference output extent-dimensions
library(raster)
outRef <- raster(xmn=xMin, xmx=xMax, ymn=yMin, ymx=yMax, crs=CRS("+proj=longlat +datum=WGS84"), nrows=1800, ncols=1800)
#Load all Landat images into list of raster bricks
srBricks <- sapply(fIDs, function(x){stack(as.list(list.files(srDir, pattern=x, full.names=TRUE)))})
#Apply bilinear interpolation and multidimensional median compositing
LandsatComposite <- OGIIR_composite(srBricks, inDates, outRef, file.path(compDir,"LandsatComposite.tif"),
tempDirRoot=tempDirRoot, int="bilinear", comp="medoid")
|
1448227414348d3a1e200e032bb40cdda0652aa9 | c7e9a7fe3ee4239aad068c6c41149a4a09888275 | /OLD_GALLERY_RSCRIPT/#283_the_hourly_heatmap.R | eea47253025df391c24ed864a6e0792c1608ffc4 | [
"MIT"
] | permissive | holtzy/R-graph-gallery | b0dfee965ac398fe73b3841876c6b7f95b4cbae4 | 7d266ad78c8c2d7d39f2730f79230775930e4e0b | refs/heads/master | 2023-08-04T15:10:45.396112 | 2023-07-21T08:37:32 | 2023-07-21T08:37:32 | 31,253,823 | 591 | 219 | MIT | 2023-08-30T10:20:37 | 2015-02-24T09:53:50 | HTML | UTF-8 | R | false | false | 3,388 | r | #283_the_hourly_heatmap.R |
# ---------------------------------------------------------------------------------------------------------------------------------------------------------
#283 HOURLY HEATMAP
#submitted by John MacKintosh <dataguy.jm@gmail.com>
#the hourly heatmap
#blog:http://johnmackintosh.com/2016-12-01-the-hourly-heatmap/
#https://gist.github.com/johnmackintosh/520643a1f82a0c7df00cf949ba98a4e9
library(ggplot2)
library(dplyr) # easier data wrangling
library(viridis) # colour blind friendly palette, works in B&W also
library(Interpol.T) # will generate a large dataset on initial load
library(lubridate) # for easy date manipulation
library(ggExtra) # because remembering ggplot theme options is beyond me
library(tidyr)
data<- data(Trentino_hourly_T,package = "Interpol.T")
names(h_d_t)[1:5]<- c("stationid","date","hour","temp","flag")
df<- tbl_df(h_d_t) %>%
filter(stationid =="T0001")
df<- df %>% mutate(year = year(date),
month = month(date, label=TRUE),
day = day(date))
df$date<-ymd(df$date) # not necessary for plot but
#useful if you want to do further work with the data
#cleanup
rm(list=c("h_d_t","mo_bias","Tn","Tx",
"Th_int_list","calibration_l",
"calibration_shape","Tm_list"))
#create plotting df
df <-df %>% select(stationid,day,hour,month,year,temp)%>%
fill(temp) #optional - see note below
# Re: use of fill
# This code is for demonstrating a visualisation technique
# There are 5 missing hourly values in the dataframe.
# see the original plot here (from my ggplot demo earlier this year) to see the white spaces where the missing values occcur:
# https://github.com/johnmackintosh/ggplotdemo/blob/master/temp8.png
# I used 'fill' from tidyr to take the prior value for each missing value and replace the NA
# This is a quick fix for the blog post only - _do not_ do this with your real world data
# Should really use either use replace_NA or complete(with fill)in tidyr
# OR
# Look into more specialist way of replacing these missing values -e.g. imputation.
statno <-unique(df$stationid)
######## Plotting starts here#####################
p <-ggplot(df,aes(day,hour,fill=temp))+
geom_tile(color= "white",size=0.1) +
scale_fill_viridis(name="Hrly Temps C",option ="C")
p <-p + facet_grid(year~month)
p <-p + scale_y_continuous(trans = "reverse", breaks = unique(df$hour))
p <-p + scale_x_continuous(breaks =c(1,10,20,31))
p <-p + theme_minimal(base_size = 8)
p <-p + labs(title= paste("Hourly Temps - Station",statno), x="Day", y="Hour Commencing")
p <-p + theme(legend.position = "bottom")+
theme(plot.title=element_text(size = 14))+
theme(axis.text.y=element_text(size=6)) +
theme(strip.background = element_rect(colour="white"))+
theme(plot.title=element_text(hjust=0))+
theme(axis.ticks=element_blank())+
theme(axis.text=element_text(size=7))+
theme(legend.title=element_text(size=8))+
theme(legend.text=element_text(size=6))+
removeGrid()#ggExtra
# you will want to expand your plot screen before this bit!
png("#283_the_hourly_heatmap.png" , width = 480, height = 480 )
p #awesomeness
dev.off()
png("#283_the_hourly_heatmap_large.png" , width = 680, height = 480 )
p #awesomeness
dev.off()
# ---------------------------------------------------------------------------------------------------------------------------------------------------------
|
ff6aa571d985677eba67db9d8bc907cfed872989 | a1e0c1014d6de2ee4034accb9cd29cacb1533b08 | /app.R | 9b0bef943a8f8fd42a46fb06a716f25841ff522c | [] | no_license | NickPyll/Prime | 97e16c1a218176660c5c2fbbb17579224c13c9b6 | 5f861990b095bdf60a9a88a0c22aaae119ca4ff9 | refs/heads/master | 2021-10-21T22:02:11.242799 | 2021-10-11T13:09:48 | 2021-10-11T13:09:48 | 123,444,507 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,059 | r | app.R | # source all the things
library(shiny)
library(shinythemes)
# thinking about adding fibonacci and palindrome as other "interesting facts" about input number
# f <- numeric()
# f[1] <- f[2] <- 1
# for(i in 3:100) f[i] <- f[i-2] + f[i-1]
# q <- 7117
# q <- 7332
# q <- as.character(q)
# # Function to reverse text
# strReverse <- function(x)
# sapply(lapply(strsplit(x, NULL), rev), paste, collapse = "")
# q == strReverse(q)
ui <- fluidPage(
theme = 'custom.css',
tags$style(type='text/css', '#xtext {white-space: pre-wrap;}'),
class = "page",
# force vertical gap so content is beneath the navbar
fluidRow(style = "height:50px"),
fluidRow(
class = "db-intro",
align = "center",
HTML("<H1>PRIME FINDER</H1>")
),
titlePanel("Is your favorite number prime?"),
# fluidRow(
# align = "center",
# numericInput('num', 'What is your favorite number?', 1)
# ),
column(3,
numericInput('num', NULL, 1)),
column(9,
verbatimTextOutput('xtext')
)
)
server <- function(input, output, session) {
results <- reactive({
xtext <- paste(input$num, "is a prime number.")
j <- 0
#### less than 500k ----
if (input$num < 0) {
xtext <- "No...negative numbers are not prime, by definition."
}
else if (input$num == 0) {
xtext <- "Zero is a very interesting use case indeed...and there are lots of debates about whether it's prime, even, or even a number. But by almost all definitions, zero is not prime."
}
else if (input$num <= 500000) {
#### 1 ----
if (input$num == 1) {
xtext <- paste(input$num, "is the loneliest number, but by definition it is not prime.")
}
#### 2 ----
else if (input$num == 2) {
xtext <- paste(input$num, "is the only even prime and the base of the binary system.")
}
#### 42 ----
else if (input$num == 42) {
xtext <- 'yes....that is the answer to the Ultimate Question. Also...not prime.'
}
#### 69 ----
else if (input$num == 69) {
xtext <- 'Sigh. No...not prime...divisible by 3.'
}
#### even ----
else {
if ((input$num %% 2) == 0) {
xtext <- paste(input$num, "is an even number, silly....of course it's not prime.")
}
#### loop ----
else {
for(i in 3:(floor(input$num / 2))) {
k <- i + j
if (k - input$num >= 0) {
break
}
if ((input$num %% k) == 0) {
xtext <- paste(input$num, "is not a prime number. It is divisible by", k, ".")
break
}
j <- j + 1
}
}
}
#### greater than 500k ----
} else if (input$num <= 1000000000000){
if ((input$num %% 2) == 0) {
xtext <- paste(input$num, "is an even number, silly....of course it's not prime.")
}
#### 8675309 -----
else if (input$num == 8675309){
xtext <- paste(input$num, "is not only Jenny's number, but it's also the hypotenuse of a primitive Pythagorean triple... Oh, and it is also prime.")
}
else if (input$num == 573237611){
xtext <- paste(input$num, "Not sure how you did it, but you guessed my SSN...and yes it's prime.")
}
else {
for(i in 3:(floor(input$num / 100000))) {
k <- i + j
if (k - input$num >= 0) {
break
}
if ((input$num %% k) == 0) {
xtext <- paste(input$num, "is not a prime number. It is divisible by", k, ".")
break
}
j <- j + 1
}
}
} else {
xtext <- paste("Sorry, but I am not able to work with numbers that large at this time. Bokay?")
}
results <- xtext
results
})
output$xtext <- renderText(results())
}
shinyApp(ui = ui, server = server)
|
bddbba4ff27a35a7cfd202a4a32e708b179a8b35 | 0b2a9e8945c2d487bf9b73a489e35a73a4b67489 | /man/compute_linear_KL.Rd | c29c6fc1f4e043efc9b235157df62112994a50d6 | [] | no_license | sergioluengosanchez/EMS_clustering | 60cd08e4373cc50752b706ad483825744db9a520 | c958b9037e59235a940fc47c944dd5c996a9896b | refs/heads/master | 2020-04-02T13:56:36.519439 | 2019-02-21T16:24:40 | 2019-02-21T16:24:40 | 154,503,617 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 467 | rd | compute_linear_KL.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_KL.R
\name{compute_linear_KL}
\alias{compute_linear_KL}
\title{Compute KL divergence of linear variables}
\usage{
compute_linear_KL(model_p, model_q, cluster_p, cluster_q, is_dir)
}
\description{
Compute KL divergence of linear variables
}
\examples{
cluster_p <- 2
cluster_q <- 2
is_dir <- dataset$is_dir
compute_linear_KL(model, clustering_model, cluster_p, cluster_q, is_dir)
}
|
a23c94b7bafade5b124c62afdb66f6be059f39f1 | bb04b62a93cbe2d18d4d5b85254a3b1106635f69 | /R/mmp_utils.R | 6b1bf86ccd89e25a2c904434b0e203426b0716b6 | [] | no_license | xia-lab/MicrobiomeAnalystR | 331725a764aa97099c0ca56668428bce63785bed | 97ced67a4ac04e4804d263ec3cde0fe10fd752aa | refs/heads/master | 2023-08-08T13:52:28.383786 | 2023-08-04T16:28:34 | 2023-08-04T16:28:34 | 184,415,141 | 97 | 36 | null | 2020-02-25T05:39:50 | 2019-05-01T12:48:01 | R | UTF-8 | R | false | false | 138,027 | r | mmp_utils.R | ##############################################################
## R script for MicrobiomeAnalyst
## Description: Functions for microbiome metabolomics analysis
## Author: Jeff Xia, jeff.xia@mcgill.ca
################################################################
#####################################################
#############processing functions####################
#####################################################
CreateMMPFakeFile <- function(mbSetObj,isNormalized="true",isNormalizedMet="true",module.type){
mbSetObj <- .get.mbSetObj(mbSetObj);
if(isNormalized=="false" & isNormalizedMet=="false"){
AddErrMsg("Please make sure your data has been normalized properly!");
return(0)
}
current.msg <<- ""
if(isNormalized=="true"){
mbSetObj$dataSet$filt.data <- mbSetObj$dataSet$data.orig
mbSetObj$dataSet$filt.msg <- "No filtration has been performed for microbiome data since it has been transformed."
mbSetObj$dataSet$norm.phyobj <- mbSetObj$dataSet$proc.phyobj
mbSetObj$dataSet$norm.msg <- "No normalization has been performed for microbiome data since it has been transformed."
#make hierarchies
ranks <- c(GetMetaTaxaInfo(mbSetObj), "OTU")
ranks <- unique(ranks)
data.list <- list()
data.list$merged_obj <- vector(length = length(ranks), "list")
data.list$count_tables <- vector(length = length(ranks), "list")
names(data.list$count_tables) <- names(data.list$merged_obj) <- ranks
for(i in 1:length(ranks)){
phyloseq.obj <- UtilMakePhyloseqObjs(mbSetObj, ranks[i])
data.list$merged_obj[[i]] <- phyloseq.obj
count.table <- UtilMakeCountTables(phyloseq.obj, ranks[i])
data.list$count_tables[[i]] <- count.table
}
qs::qsave(data.list,"prescale.phyobj.qs")
current.proc$mic$data.proc<<- data.list$count_tables[["OTU"]]
saveDataQs(data.list, "phyloseq_prenorm_objs.qs",module.type, mbSetObj$dataSet$name);
saveDataQs(data.list, "phyloseq_objs.qs",module.type, mbSetObj$dataSet$name);
}
if(isNormalizedMet=="true"){
mbSetObj$dataSet$metabolomics$filt.data <- mbSetObj$dataSet$metabolomics$norm.data <- mbSetObj$dataSet$metabolomics$data.orig; ## feature in row and sample in column
qs::qsave( mbSetObj$dataSet$metabolomics$norm.data, file="metabo.complete.norm.qs");
current.proc$met$data.proc<<-mbSetObj$dataSet$metabolomics$data.orig
mbSetObj$dataSet$metabolomics$norm.msg <- "No normalization has been performed for metabolomics data since it has been transformed."
}
mbSetObj$dataSet$sample_data$sample_id <- rownames(mbSetObj$dataSet$sample_data);
return(.set.mbSetObj(mbSetObj));
}
#####################################################
#############differential analysis###################
#####################################################
#'Perform differential analysis
#'@description This functions performs alpha diversity.
#'@param mbSetObj Input the name of the mbSetObj.
#'@param taxalvl Character, input taxonomy level
#'@param metadata Character, input the name of the experimental factor
#'to group the samples.
#'@author Jeff Xia \email{jeff.xia@mcgill.ca}
#'McGill University, Canada
#'License: GNU GPL (>= 2)
#'@export
PerformDEAnalyse<- function(mbSetObj, taxalvl="Genus",netType="gem",overlay,initDE=1,
analysisVar="CLASS",adjustedVar,alg="limma",plvl=0.05, fc.lvl=1, selected="NA",nonpar=FALSE){
mbSetObj <- .get.mbSetObj(mbSetObj);
require(dplyr)
if(!exists("phyloseq_objs")){
phyloseq_objs <- qs::qread("phyloseq_objs.qs")
}
plvl<<-plvl
analysisVar<<-analysisVar
alg<<-alg
metdat <- mbSetObj$dataSet$metabolomics$norm.data
sample_data <- mbSetObj$dataSet$sample_data
sample_type <- mbSetObj$dataSet$meta_info
metdat.de <- performLimma(metdat,sample_data,sample_type,analysisVar)
if(initDE=="1"){
phyloseq_objs[["res_deAnal"]] <- vector("list",length=length(phyloseq_objs$count_tables))
names( phyloseq_objs[["res_deAnal"]]) <- names( phyloseq_objs$count_tables)
micdat <- phyloseq_objs$count_tables
micdat.de <- lapply(micdat,function(x) performLimma(x,sample_data,sample_type,analysisVar))
predres.met <- qs::qread(paste0("m2m_pred_",predDB,".qs"))
predres.met <- lapply(predres.met,function(x) return(x$fun_prediction_met))
predDE<- vector("list",length=length(predres.met))
pred.dat<- vector("list",length=length(predres.met))
if(netType=="gem" & overlay =="true"){
if(!(file.exists(paste0("m2m_pred_",predDB,".qs")))){
AddErrMsg("Cannot import the prediction result!")
}else{
for(i in 1:length( predres.met)){
taxalvl2<- names( predres.met)[i]
m2m_pred <- predres.met[[taxalvl2]]
m2m_pred <- lapply(m2m_pred,function(x) reshape2::melt(x) )
m2m_for_de <- mapply(`[<-`, m2m_pred, 'sample', value = names(m2m_pred), SIMPLIFY = FALSE)
m2m_for_de <- do.call(rbind,m2m_for_de)
rownames(m2m_for_de) <- NULL
m2m_for_de$pair <- apply(m2m_for_de[,1:2],1,function(x) paste(x,collapse = ";;"))
tokeep <- aggregate(m2m_for_de$value,list(m2m_for_de$pair),sum) %>% filter(x!=0)
m2m_for_de <- m2m_for_de %>% filter(pair %in% tokeep$Group.1)
m2m_pair_dat <- reshape2::dcast(m2m_for_de,pair~sample,value.var = "value")
pred.dat[[taxalvl2]] <- m2m_pair_dat
#qs::qsave(list(m2m_for_de=m2m_for_de,m2m_pair_dat=m2m_pair_dat),"m2m_pair_pred.qs")
rownames(m2m_pair_dat) <- m2m_pair_dat$pair
m2m_pair_dat$pair <- NULL
m2m_pair_de <- performLimma(m2m_pair_dat,sample_data,sample_type,analysisVar)
m2m_pair_de$mic <- m2m_for_de$Var1[match(rownames(m2m_pair_de),m2m_for_de$pair)]
m2m_pair_de$met <- m2m_for_de$Var2[match(rownames(m2m_pair_de),m2m_for_de$pair)]
predDE[[taxalvl2]]<-m2m_pair_de
}
qs::qsave(list(pred.dat=pred.dat,predDE=predDE),"m2m_pair_de.qs")
}
}else if(netType=="kegg"){
}
}else{
micdat <- phyloseq_objs$count_tables[[taxalvl]]
micdat.de <- performLimma(micdat,sample_data,sample_type,analysisVar)
if(netType=="gem" & overlay =="true" & taxalvl != "OTU"){
if(!(file.exists(paste0(tolower(taxalvl),"_metabolite_pred_pair.qs")))){
AddErrMsg("Cannot import the prediction result!")
}else{
m2m_pred <- qs::qread(paste0(tolower(taxalvl),"_metabolite_pred_pair.qs"))
m2m_pred <- lapply(m2m_pred,function(x) reshape2::melt(x) )
m2m_for_de <- mapply(`[<-`, m2m_pred, 'sample', value = names(m2m_pred), SIMPLIFY = FALSE)
m2m_for_de <- do.call(rbind,m2m_for_de)
rownames(m2m_for_de) <- NULL
m2m_for_de$pair <- apply(m2m_for_de[,1:2],1,function(x) paste(x,collapse = ";;"))
tokeep <- aggregate(m2m_for_de$value,list(m2m_for_de$pair),sum) %>% filter(x!=0)
m2m_for_de <- m2m_for_de %>% filter(pair %in% tokeep$Group.1)
m2m_pair_dat <- reshape2::dcast(m2m_for_de,pair~sample,value.var = "value")
qs::qsave(list(m2m_for_de=m2m_for_de,m2m_pair_dat=m2m_pair_dat),"m2m_pair_pred.qs")
rownames(m2m_pair_dat) <- m2m_pair_dat$pair
m2m_pair_dat$pair <- NULL
m2m_pair_de <- performLimma(m2m_pair_dat,sample_data,sample_type,analysisVar)
m2m_pair_de$mic <- m2m_for_de$Var1[match(rownames(m2m_pair_de),m2m_for_de$pair)]
m2m_pair_de$met <- m2m_for_de$Var2[match(rownames(m2m_pair_de),m2m_for_de$pair)]
mbSetObj$analSet$m2m_pair_de <- m2m_pair_de
qs::qsave(m2m_pair_de,"m2m_pair_de.qs")
}
}
}
#fast.write(micdat.de, file=paste0(taxalvl,adjustedVar,"_",alg,"_Res.csv"));
fast.write(metdat.de, file=paste0("metabolite",'_',analysisVar,"_",alg,"_Res.csv"));
phyloseq_objs$res_deAnal <- micdat.de
mbSetObj$dataSet$metabolomics$res_deAnal <- metdat.de
qs::qsave(phyloseq_objs,"phyloseq_objs.qs")
return(.set.mbSetObj(mbSetObj))
}
PerformPairDEAnalyse <- function(mbSetObj, taxalvl, analysisVar,alg="limma",plvl=0.05, selected="NA",nonpar=FALSE){
mbSetObj <- .get.mbSetObj(mbSetObj);
require(dplyr)
if(!exists("phyloseq_objs")){
phyloseq_objs <- qs::qread("phyloseq_objs.qs")
}
if(taxalvl=="null" | is.null(taxalvl)){
if(exist(current.proc$taxalvl)){
taxalvl = current.proc$taxalvl
}else{
taxalvl=names(phyloseq_objs$count_tables)[length(phyloseq_objs$count_tables)-1]
}
}
analysisVar <- current.proc$meta_para$analysis.var
#if(analysisVar=="null" | is.null(analysisVar)){
# analysisVar = names(current.proc$sample)[1]
# }
sample_data <- mbSetObj$dataSet$sample_data
sample_type <- mbSetObj$dataSet$meta_info
#tempnm <- paste0(analysisVar,"_",alg)
predDB<- current.proc$predDB
if(micDataType=="ko"){
AddErrMsg("Prediction is supportive for KO abundance table! Please check your data type")
}else{
if(!(file.exists(paste0("m2m_pred_",predDB,".qs")))){
AddErrMsg("Cannot import the prediction result!")
}else{
if(taxalvl=="all"){
predres.met <- qs::qread(paste0("m2m_pred_",predDB,".qs"))
predres.met <- lapply(predres.met,function(x) return(x$fun_prediction_met))
predDE<- vector("list",length=length(predres.met))
pred.dat<- vector("list",length=length(predres.met))
names(predDE) <- names(pred.dat) <- names(predres.met)
for(tax in names( predres.met)){
m2m_pred <- predres.met[[tax]]
m2m_pred <- lapply(m2m_pred,function(x) reshape2::melt(x) )
m2m_for_de <- mapply(`[<-`, m2m_pred, 'sample', value = names(m2m_pred), SIMPLIFY = FALSE)
m2m_for_de <- do.call(rbind,m2m_for_de)
rownames(m2m_for_de) <- NULL
m2m_for_de$pair <- apply(m2m_for_de[,1:2],1,function(x) paste(x,collapse = ";;"))
tokeep <- aggregate(m2m_for_de$value,list(m2m_for_de$pair),sum) %>% filter(x!=0)
m2m_for_de <- m2m_for_de %>% filter(pair %in% tokeep$Group.1)
m2m_pair_dat <- reshape2::dcast(m2m_for_de,pair~sample,value.var = "value")
# m2m_pair_dat[,-1] <- t(apply(m2m_pair_dat[,-1],1,function(x) ReScale(x,0,1)))
pred.dat[[tax]] <- m2m_pair_dat
rownames(m2m_pair_dat) <- m2m_pair_dat$pair
m2m_pair_dat$pair <- NULL
m2m_pair_de <- performLimma(m2m_pair_dat,sample_data,sample_type,analysisVar)
m2m_pair_de$mic <- m2m_for_de$Var1[match(rownames(m2m_pair_de),m2m_for_de$pair)]
m2m_pair_de$met <- m2m_for_de$Var2[match(rownames(m2m_pair_de),m2m_for_de$pair)]
predDE[[tax]]<-m2m_pair_de
}
m2m_pair_de <- list()
m2m_pair_de$pred.dat <- pred.dat
m2m_pair_de$predDE <- predDE
qs::qsave(m2m_pair_de,"m2m_pair_de.qs")
}else{
if(!exists("predres",current.proc)){
predres.met <- qs::qread(paste0("m2m_pred_",predDB,".qs"))
m2m_pred <- predres.met[[taxalvl]]$fun_prediction_met
}else{
m2m_pred <- current.proc$predres$fun_prediction_met
}
m2m_pred <- lapply(m2m_pred,function(x) reshape2::melt(x) )
m2m_for_de <- mapply(`[<-`, m2m_pred, 'sample', value = names(m2m_pred), SIMPLIFY = FALSE)
m2m_for_de <- do.call(rbind,m2m_for_de)
rownames(m2m_for_de) <- NULL
m2m_for_de$pair <- apply(m2m_for_de[,1:2],1,function(x) paste(x,collapse = ";;"))
tokeep <- aggregate(m2m_for_de$value,list(m2m_for_de$pair),sum) %>% filter(x!=0)
m2m_for_de <- m2m_for_de %>% filter(pair %in% tokeep$Group.1)
m2m_pair_dat <- reshape2::dcast(m2m_for_de,pair~sample,value.var = "value")
# m2m_pair_dat[,-1] <- t(apply(m2m_pair_dat[,-1],1,function(x) ReScale(x,0,1)))
pred.dat <- m2m_pair_dat
rownames(m2m_pair_dat) <- m2m_pair_dat$pair
m2m_pair_dat$pair <- NULL
m2m_pair_de <- performLimma(m2m_pair_dat,sample_data,sample_type,analysisVar)
m2m_pair_de$mic <- m2m_for_de$Var1[match(rownames(m2m_pair_de),m2m_for_de$pair)]
m2m_pair_de$met <- m2m_for_de$Var2[match(rownames(m2m_pair_de),m2m_for_de$pair)]
current.proc$pred.dat <<- pred.dat
current.proc$predDE <<- m2m_pair_de
fast.write(m2m_pair_de, file=paste0("prediction_differential.csv"));
}
}
}
return(.set.mbSetObj(mbSetObj))
}
CompareMic <- function(mbSetObj, taxalvl,initDE=1,
analysis.var,comp = NULL,ref = NULL,block = "NA",
alg="maaslin",plvl=0.05,
is.norm=FALSE,selected="NA",nonpar=FALSE){
mbSetObj <- .get.mbSetObj(mbSetObj);
current.proc$mic$alg<<-alg
current.proc$mic$plvl<<-plvl
sample_data <- data.frame(mbSetObj$dataSet$sample_data)
sample_type <- mbSetObj$dataSet$meta_info
meta_type <- mbSetObj$dataSet$meta.types
if (!exists('adj.vec')) {
adj.bool = F;
} else {
if (length(adj.vec) > 0) {
adj.bool = T;
} else {
adj.bool = F;
}
}
adj.vars <- adj.vec;
meta.nms <- colnames(sample_data)
input.meta <-sample_data@.Data %>% as.data.frame()
colnames(input.meta) <- meta.nms
rownames(input.meta) <- input.meta$sample_id
if(adj.bool){
fixed.effects <- c(analysis.var, adj.vars)
fixed.types <- meta_type[names(meta_type) %in% fixed.effects]
fixed.types <- fixed.types[match(fixed.effects, names(fixed.types))]
} else { # to do still
fixed.effects <- analysis.var
fixed.types <- meta_type[names(meta_type) == analysis.var]
}
analysis.type <- fixed.types[fixed.effects == analysis.var]
disc.effects <- fixed.effects[fixed.types == "disc"]
# build refs vector (may need to add for blocking too)
if(length(disc.effects) > 0){
if(analysis.type == "disc"){
refs <- paste0(analysis.var, ",", ref)
if(length(disc.effects) > 1){
for(i in c(2:length(disc.effects))){
ref.temp <- paste0(disc.effects[i], ",", levels(unlist(c(input.meta[,disc.effects[i]])))[1])
refs <- c(refs, ref.temp)
}
}
} else {
refs <- c()
if(length(disc.effects) > 1){
for(i in c(1:length(disc.effects))){
ref.temp <- paste0(disc.effects[i], ",", levels(unlist(c(input.meta[,disc.effects[i]])))[1])
refs <- c(refs, ref.temp)
}
}
}
}
# MaAslin does not require samples or orders to exactly match - it takes care of this
# set normalized/transformation parameters
if(is.norm == "false"){
phyloseq_objs <- qs::qread("phyloseq_prenorm_objs.qs")
norm.method = "TSS"
trans.method = "LOG"
} else {
phyloseq_objs <- qs::qread("phyloseq_objs.qs")
norm.method = "NONE"
trans.method = "NONE"
}
current.proc$meta_para<<-list(analysis.var=analysis.var,sample_data=sample_data,
sample_type=sample_type, input.meta=input.meta,
fixed.effects=fixed.effects,analysis.type=analysis.type,
disc.effects=disc.effects,comp=comp,ref=ref,refs=refs,block=block,
norm.method=norm.method,trans.method =trans.method)
if(micDataType=="ko"){
micdat <- phyloseq_objs$count_tables[["OTU"]]
micdat.de <- doMaAslin(micdat,plvl)
}else{
if(initDE=="1"|taxalvl=="all"){
micdat <- phyloseq_objs$count_tables
micdat.de <- lapply(micdat,function(x) doMaAslin(x,plvl))
}else{
micdat <- phyloseq_objs$count_tables[[taxalvl]]
micdat.de <- doMaAslin(micdat,plvl)
}
}
return(micdat.de)
}
CompareMet <- function(mbSetObj, analysisVar,
alg="limma",plvl=0.05, selected="NA",nonpar=FALSE){
mbSetObj <- .get.mbSetObj(mbSetObj);
# current.proc$sample<<-data.frame(mbSetObj$dataSet$sample_data)
require(dplyr)
if(analysisVar=="null" | is.null(analysisVar)){
analysisVar = names(current.proc$meta_para$sample_data)[1]
}
current.proc$met$plvl<<-plvl
current.proc$met$alg<<-alg
metdat <-current.proc$met$data.proc
sample_data <- mbSetObj$dataSet$sample_data
sample_type <- mbSetObj$dataSet$meta_info
metdat.de <- performLimma(metdat,sample_data,sample_type,analysisVar)
fast.write(metdat.de, file="limma_output.csv");
current.proc$met$res_deAnal <<- metdat.de
mbSetObj$dataSet$metabolomics$resTable <- metdat.de
sigfeat <- rownames(metdat.de)[metdat.de$FDR < plvl];
sig.count <- length(sigfeat);
if(sig.count == 0){
current.msg <<- "No significant features were identified using the given p value cutoff.";
}else{
if(metDataType=="peak"){
current.msg <<- c(current.msg,paste("A total of", sig.count, "significant ", " peaks were identified!"));
}else{
current.msg <<- c(current.msg,paste("A total of", sig.count, "significant ", " metabolites were identified!"));
}
}
mbSetObj$dataSet$metabolomics$sigfeat <- sigfeat
mbSetObj$dataSet$metabolomics$sig.count <- sig.count
current.proc$met$sigfeat <<- sigfeat
print("CompareMet done")
return(.set.mbSetObj(mbSetObj))
}
performLimma <-function(data,sample_data,sample_type,analysisVar){
require(limma);
covariates <- data.frame(sample_data)
if(is.null(covariates$sample_id)){
covariates$sample_id <- rownames(covariates)
}
sample_type <- lapply(sample_type, function(x) return(x[x]))
for(i in 1:(ncol(covariates)-1)){ # ensure all columns are the right type
if(names(covariates)[i] %in% names(sample_type[["disc.inx"]])){
covariates[,i] <- covariates[,i] %>% make.names() %>% factor()
} else {
covariates[,i] <- covariates[,i] %>% as.character() %>% as.numeric()
}
}
covariates <- data.frame(covariates[,-ncol(covariates),drop=F])
if(!exists('adj.vec')){
adj.bool = F;
}else{
if(length(adj.vec) > 0){
adj.bool = T;
adj.vars <- adj.vec;
}else{
adj.bool = F;
}
}
feature_table = as.matrix(data[,which(colnames(data) %in% rownames(covariates))])
covariates <- covariates[match(colnames(feature_table), rownames(covariates)),,drop=F]
analysis.var <- analysisVar
analysis.type <- ifelse(analysis.var %in% names(sample_type[["disc.inx"]]),"disc","count")
if(adj.bool){
vars <- c(analysis.var, adj.vars)
}else{
vars <- analysis.var
}
if(analysis.type == "disc"){
covariates[, analysis.var] <- covariates[, analysis.var] %>% make.names() %>% factor();
grp.nms <- unique(c(current.proc$meta_para$comp,current.proc$meta_para$ref,levels(covariates[, analysis.var])))
design <- model.matrix(formula(paste0("~ 0", paste0(" + ", vars, collapse = ""))), data =covariates );
if(adj.bool){
nms=sapply(seq(adj.vars), function(x) nms= levels(covariates[,adj.vars[x]])[-1])
nms=sapply(seq(adj.vars), function(x) {
if(!(is.null(levels(covariates[,adj.vars[x]])))){
return(levels(covariates[,adj.vars[x]])[-1])
}else{
return(adj.vars[x])
}
})
colnames(design) = c(grp.nms[order(grp.nms)],unlist(nms))
}else{
colnames(design) = grp.nms[order(grp.nms)]
}
inx = 0;
myargs <- list();
for(m in 1:(length(grp.nms)-1)){
for(n in (m+1):length(grp.nms)){
inx <- inx + 1;
myargs[[inx]] <- paste(grp.nms[m], "-", grp.nms[n], sep="")
}
}
myargs[["levels"]] <- design;
contrast.matrix <- do.call(makeContrasts, myargs);
fit <- lmFit(feature_table, design)
fit <- contrasts.fit(fit, contrast.matrix);
fit <- eBayes(fit);
if(length(levels( covariates[, analysis.var]))==2){
topFeatures <- topTable(fit, number = Inf);
res = data.frame( P_value=signif(topFeatures[,"P.Value"] , digits = 3),
FDR=signif(topFeatures[,"adj.P.Val"], digits = 3),
T.Stats=signif(topFeatures[,"t"], digits = 3),
Log2FC=signif(topFeatures[,"logFC"], digits = 3))
}else{
res <- data.frame(P_value=signif(fit$p.value[,1],digits = 3),
FDR=signif(p.adjust(fit$p.value[,1],"fdr"),digits = 3),
T.Stats=signif(fit$t[,1],digits = 3),
F.Stats=signif(fit$F,digits = 3),
F.Pval=signif(fit$F.p.value,digits = 3))
}
} else {
covariates[, analysis.var] <- covariates[, analysis.var] %>% as.numeric();
design <- model.matrix(formula(paste0("~ 0", paste0(" + ", vars, collapse = ""))), data = covariates);
fit <- eBayes(lmFit(feature_table, design));
rest <- topTable(fit, number = Inf, coef = analysis.var);
colnames(rest)[1] <- analysis.var;
### get results with no adjustment
# design <- model.matrix(formula(paste0("~ 0", paste0(" + ", analysis.var, collapse = ""))), data = covariates);
# fit <- eBayes(lmFit(feature_table, design));
# topFeatures <- topTable(efit, number = Inf, adjust.method = "fdr");
}
if(length(which(duplicated(rownames(fit$p.value))))>0){
current.msg<<-"Duplicate features names are not allowed! Please double check your input!"
return()
}else{
rownames(res) <- rownames(fit$p.value)
}
res <- na.omit(res)
res <- res[order(res[,2], decreasing=FALSE),]
res[res == "NaN"] = 1
return(res)
}
doMaAslin <- function(input.data,thresh = 0.05,adj.bool=F){
require(dplyr);
require(R.utils);
if(.on.public.web){
# make this lazy load
if(!exists(".prepare.maaslin2")){ # public web on same user dir
.load.scripts.on.demand("utils_maaslin.Rc");
}
}
thresh <- as.numeric(thresh);
input.data <- as.data.frame(input.data)
block <- current.proc$meta_para$block
disc.effects <- current.proc$meta_para$disc.effects
analysis.var <- current.proc$meta_para$analysis.var
if(block == "NA"){
if(length(disc.effects) > 0){ # case: discrete variables, no blocking factor
maaslin.para<<- list(input_data = input.data,
input_metadata = current.proc$meta_para$input.meta,
fixed_effects = current.proc$meta_para$fixed.effects,
reference = current.proc$meta_para$refs,
max_significance = 0.05,
min_abundance = 0.0,
min_prevalence = 0.0,
min_variance = 0.0,
normalization = current.proc$meta_para$norm.method,
transform = current.proc$meta_para$trans.method)
return(1)
} else { # case: no discrete variables, no blocking factor
maaslin.para<<- list(input_data = input.data,
fixed_effects = current.proc$meta_para$fixed.effects,
max_significance = 0.05,
min_abundance = 0.0,
min_prevalence = 0.0,
min_variance = 0.0,
normalization = current.proc$meta_para$norm.method,
transform = trans.method)
return(1)
}
} else { # case: discrete variables, blocking factor (blocking factor must be discrete)
maaslin.para <<-list(check= list(input_data = input.data[1,],
input_metadata = current.proc$meta_para$input.meta,
fixed_effects = current.proc$meta_para$fixed.effects,
random_effects = block,
reference =current.proc$meta_para$refs,
max_significance = 0.05,
min_abundance = 0.0,
min_prevalence = 0.0,
min_variance = 0.0,
normalization = current.proc$meta_para$norm.method,
transform = current.proc$meta_para$trans.method),
test=list(input_data = input.data,
input_metadata = current.proc$meta_para$input.meta,
fixed_effects = current.proc$meta_para$fixed.effects,
random_effects = block,
reference = current.proc$meta_para$refs,
max_significance = 0.05,
min_abundance = 0.0,
min_prevalence = 0.0,
min_variance = 0.0,
normalization = current.proc$meta_para$norm.method,
transform = current.proc$meta_para$trans.method)
)
return(2)
}
}
PrepareResTable <- function(mbSetObj,micDataType,taxalvl,is.norm=F){
mbSetObj <- .get.mbSetObj(mbSetObj);
mbSetObj$analSet$maaslin$taxalvl <- taxalvl;
if(micDataType=="otu"){
if(is.null(taxalvl)|taxalvl=="null"){
taxalvl = colnames(mbSetObj$dataSet$taxa_table)[length(colnames(mbSetObj$dataSet$taxa_table))]
}
resTab = qs::qread("phyloseq_objs.qs")$res_deAnal[[taxalvl]]
sigfeat <- qs::qread("phyloseq_objs.qs")$sigfeat[[taxalvl]]
fileName <- paste0(taxalvl,"_maaslin_output.csv");
}else{
taxalvl =="OTU"
resTab = current.proc$mic$res_deAnal
sigfeat <- current.proc$mic$sigfeat
fileName <- paste0("maaslin_output.csv");
}
sig.count <- length(sigfeat);
if(sig.count == 0){
current.msg <<- "No significant features were identified using the given p value cutoff.";
}else{
current.msg <<- paste("A total of", sig.count,"significant ", tolower(taxalvl), " were identified!");
}
if(is.norm){
phylonm <- "phyloseq_objs.qs"
}else{
phylonm <- "phyloseq_prenorm_objs.qs"
}
input.data = qs::qread(phylonm)$count_tables[[taxalvl]]
analysis.var = current.proc$meta_para$analysis.var
# put results in mbSetObj, learn pattern of analysis set
fast.write(resTab, file = fileName);
compMicFile<<-fileName
# process data for individual feature boxplot
taxrank_boxplot <- taxalvl;
claslbl_boxplot <- as.factor(sample_data(mbSetObj$dataSet$norm.phyobj)[[analysis.var]]);
nm_boxplot <- rownames(input.data);
dat3t_boxplot <- as.data.frame(t(input.data),check.names=FALSE);
colnames(dat3t_boxplot) <- nm_boxplot;
box_data <- dat3t_boxplot;
box_data$class <- claslbl_boxplot;
box_data$norm <- is.norm;
message("Result table done")
mbSetObj$analSet$multiboxdata <- box_data;
mbSetObj$analSet$sig.count <- sig.count;
mbSetObj$analSet$resTable <- resTab;
return(.set.mbSetObj(mbSetObj))
}
ProcessMaaslinRes <- function(mbSetObj,taxalvl,analysis.var){
mbSetObj <- .get.mbSetObj(mbSetObj);
input.data<-maaslin.para$input_data
res <- mbSetObj$analSet$maaslin$results
inds <- !(res$feature %in% rownames(input.data));
# filter results to get only ones related to analysis var
res <- res[res$metadata == analysis.var, ];
# make res pretty
res$coef <- signif(res$coef, digits = 3);
res$stderr <- signif(res$stderr, digits = 3);
res$pval <- signif(res$pval, digits = 3);
res$qval <- signif(res$qval, digits = 3);
if(current.proc$meta_para$analysis.type == "disc"){
res <- res[res$value == current.proc$meta_para$comp, ];
rownames(res) <- res$feature;
res <- res[ ,c("coef", "stderr", "pval", "qval")];
colnames(res) <- c("Log2FC", "St.Error", "P_value", "FDR");
} else {
rownames(res) <- res$feature;
res <- res[ ,c("coef", "stderr", "pval", "qval")];
colnames(res) <- c("Coefficient", "St.Error", "P_value", "FDR");
}
res = res[order(res$P_value),]
# write out/save results
fileName <-paste0(taxalvl,"_maaslin_output.csv");
fast.write(res, file = fileName);
plvl<- current.proc$mic$plvl
if(micDataType=="ko"){
current.proc$mic$res_deAnal <<- res
current.proc$mic$sigfeat <<- rownames(current.proc$mic$res_deAnal)[current.proc$mic$res_deAnal$FDR< plvl]
}else{
phyloseq_objs <- qs::qread("phyloseq_objs.qs")
phyloseq_objs$res_deAnal[[taxalvl]] <- res
phyloseq_objs$sigfeat[[taxalvl]] <- rownames(phyloseq_objs$res_deAnal[[taxalvl]])[phyloseq_objs$res_deAnal[[taxalvl]]$FDR< plvl]
qs::qsave(phyloseq_objs,"phyloseq_objs.qs")
}
print(paste0("CompareMic ", taxalvl," done!"))
mbSetObj$analSet$maaslin$taxalvl <- "OTU"
return(.set.mbSetObj(mbSetObj))
}
#####################################################
##################Prediction#########################
#####################################################
#####################################################
#####lib path
if(file.exists("/Users/lzy/Documents/examples_data_microbiomeanalyst")){
lib.path.mmp <<- "/Users/lzy/Documents/examples_data_microbiomeanalyst/gem_m2m/"
}else if(file.exists("../../lib/mmp/")){
lib.path.mmp <<- "../../lib/mmp/"
}
MetaboIDmap <- function(netModel,predDB,IDtype,met.vec=NA){
# met.vec <- rownames(qs::qread("metabo.complete.norm.qs"))
if(inputType=="table"){
met.vec <- rownames(current.proc$met$data.proc)
}else{
met.vec <- met.vec
}
if(netModel=="gem"){
if(predDB=="agora"){
metdb <- qs::qread(paste0(lib.path.mmp,"agora.met.qs"))
}else if(predDB=="embl"){
metdb <- qs::qread(paste0(lib.path.mmp,"embl.met.qs"))
}
if(IDtype=="name"){
metInfo <- qs::qread(paste0(lib.path.mmp,"synonymGem.qs"));
met.map <- data.frame(Query=met.vec,Match=met.vec,stringsAsFactors = F)
met.map$Match <- metInfo$metID[match(tolower(met.map$Query),tolower(metInfo$Name))]
met.map <- met.map[which(met.map$Match %in% metdb),]
map.l <- length(unique(met.map$Match))
}else if(IDtype=="kegg"){
metInfo <- qs::qread(paste0(lib.path.mmp,"gem2kegg.qs"));
met.map <- data.frame(Query=met.vec,Match=met.vec,stringsAsFactors = F)
met.map$Match <- metInfo$metID[match(met.map$Query,metInfo$KEGG)]
met.map <- met.map[which(met.map$Match %in% metdb),]
map.l <- length(unique(met.map$KEGG))
}else if(IDtype=="hmdb"){
metInfo <- qs::qread(paste0(lib.path.mmp,"gem2hmdb.qs"));
met.map <- data.frame(Query=met.vec,Match=met.vec,stringsAsFactors = F)
met.map$Match <- metInfo$metID[match(met.map$Query,metInfo$HMDB)]
met.map <- met.map[which(met.map$Match %in% metdb),]
map.l <- length(unique(met.map$HMDB))
}
}else if(netModel=="keggNet"){
if(IDtype=="name"){
metInfo <- qs::qread(paste0(lib.path.mmp,"general_kegg2name.qs"));
met.map <- data.frame(Query=met.vec,Match=met.vec,stringsAsFactors = F)
met.map$Match <- metInfo$ID[match(tolower(met.map$Query),tolower(metInfo$Name))]
met.map$Name <- met.map$Query
met.map$Node <- metInfo$id[match(met.map$Query,metInfo$Name)]
met.map <- met.map[!(is.na(met.map$Match)),]
map.l <- length(unique(met.map$Match))
}else if(IDtype=="kegg"){
metInfo <- qs::qread(paste0(lib.path.mmp,"general_kegg2name.qs"));
met.map <- data.frame(Query=met.vec,Match=met.vec,Name=met.vec,stringsAsFactors = F)
met.map$Name <- metInfo$Name[match(met.map$Query,metInfo$ID)]
met.map$Node <- metInfo$node[match(met.map$Query,metInfo$ID)]
met.map <- met.map[!(is.na(met.map$Name)),]
map.l <- length(unique(met.map$Match))
}
}
if(inputType=="table"){
qs::qsave(met.map,paste0(netModel,".met.map.qs"))
current.proc[[netModel]]<<-met.map
fast.write(met.map, file=paste0(netModel,"_metabo_match_result.csv"))
return(1)
}else{
return(met.map)
}
}
MicIDmap <- function(netModel,predDB,taxalvl="all"){
load_stringr();
if(!exists("phyloseq_objs")){
phyloseq_objs <- qs::qread("phyloseq_objs.qs")
}
mic.vec <- lapply(phyloseq_objs$count_tables, function(x) return(list(rownames(x))))
mic.vec[["OTU"]] <- NULL
lvlnm <- c("phylum","class","order","family","genus","species")
lvlidx <- match(tolower(names(mic.vec)),lvlnm)
lvlppl <- c("p__","c__","o__","f__","g__","s__")
lvlppl2 <- c("p_","c_","o_","f_","g_","s_")
for(i in 1:length(mic.vec)){
mic.vec[[i]][[2]] <- gsub(paste0("^",lvlppl[lvlidx[i]]),"", mic.vec[[i]][[1]])
mic.vec[[i]][[2]] <- gsub(paste0("^",lvlppl2[lvlidx[i]]),"", mic.vec[[i]][[2]])
mic.vec[[i]][[2]] <- str_trim(mic.vec[[i]][[2]],side="both")
# mic.vec[[i]][[2]] <- gsub("_"," ",mic.vec[[i]][[2]])
# mic.vec[[i]][[2]] <- gsub("\\.","",mic.vec[[i]][[2]])
}
if(netModel=="gem"){
if(taxalvl=="all"){
mic.map <- list()
for(i in 1:length(mic.vec)){
mic.map[[i]] <- doGemNameMatch(mic.vec[[i]],lvlidx[i],predDB)
}
names(mic.map)<-lvlnm[lvlidx]
# map_num <- orig.num <-setNames(rep(0,6),lvlnm)
# orig.num[lvlidx] <- unlist(lapply(mic.map,function(x) length(unique(x$Query))))
# map_num[lvlidx] <- unlist(lapply(mic.map,function(x) length(which(!(is.na(x$Match))))))
#
}
}else if(netModel=="keggNet"){
if(taxalvl=="default"){
taxalvl = names(mic.vec)[length(mic.vec)]
}
if(file.exists("kegg.mic.map.qs")){
mic.map = qs::qread("kegg.mic.map.qs")
}else{
mic.map <- list()
}
if(is.null(mic.map[[taxalvl]]) | length(mic.map[[taxalvl]])==0){
mic.map <- doKeggNameMatch(mic.vec[[taxalvl]],taxalvl)
}
if(is.null(mic.map)||length(mic.map)==0){
current.msg<<-paste0("No ",taxalvl, " was found in kegg database!")
return(0)
}
sig.mic <- phyloseq_objs$sigfeat[[taxalvl]]
sig.mic <- mic.map$Match[match(sig.mic,mic.map$Query)][!is.na( mic.map$Match)]
sig.mic<<-unlist(strsplit(sig.mic,split=";"))
if(is.null(sig.mic)||length(sig.mic)==0){
current.msg<<-paste0("No significant ",taxalvl, " was found in kegg database! Taxonomy level can be change on comparison analysis page!")
}
}
qs::qsave(mic.map,paste0(netModel,".mic.map.qs"))
return(1)
}
doGemNameMatch <- function(qvec,l,predDB){
taxMapLong <- qs::qread(paste0(lib.path.mmp,predDB,"_tax.qs"))[[l]]
names(taxMapLong)[1] <- "taxa"
res <- data.frame(Query=qvec[[1]],Qtrans=qvec[[2]],stringsAsFactors = F)
taxMapLong$taxa2<- gsub("[[:space:]./_-]", "_",taxMapLong$taxa)
taxMapLong$taxa2<- gsub("\\[|\\]","",taxMapLong$taxa2)
res$Match <- taxMapLong[match(tolower(res$Qtrans),tolower(taxMapLong$taxa2)),1]
fast.write(res, paste("gem_taxa_match_result.csv"));
return(res)
}
doKeggNameMatch <- function(qvec,taxalvl){
taxalvl = tolower(taxalvl)
taxalvl<<- taxalvl
taxMapKEGG <- qs::qread(paste0(lib.path.mmp,"taxMapKEGG.qs"))[[taxalvl]]
taxnms <- gsub("[[:space:]./_-]", "_",names(taxMapKEGG)[-1])
taxnms<- gsub("\\[|\\]","",taxnms)
names(taxnms) <- names(taxMapKEGG)[-1]
res <- data.frame(Query=qvec[[1]],Qtrans=qvec[[2]],stringsAsFactors = F)
nmsidx= which(taxnms %in% res$Qtrans)
mtchidx <- taxMapKEGG[names(taxnms)[nmsidx]]
mtcls <<- unique(unlist(mtchidx))
mtchidx <- unlist(lapply(mtchidx, function(x) paste(unique(x),collapse = ";")))
res$Match <- mtchidx[match(res$Qtrans,as.character(taxnms[names(mtchidx)]))]
fast.write(res, paste("kegg_taxa_match_result.csv"));
message("kegg taxonomy mapping done!")
return(res)
}
CreatPathwayLib <- function(contain){
if(contain=="usrbac"){
mtcls = mtcls
}else if(contain=="sigbac"){
mtcls = sig.mic[sig.mic!="NA"]
}
bacpath <- qs::qread(paste0(lib.path.mmp,"bacpathway.met.qs"))[mtcls]
bacpath <- bacpath[!is.na(names(bacpath))]
paths <- unique(unlist(lapply(bacpath,function(x) names(x))))
current.lib = vector("list",length=length(paths))
names(current.lib) = paths
for(p in paths){
pth = lapply(bacpath, function(x) x[[p]])
current.lib[[p]] =unique(unlist(pth))
}
includeInfo = list(nodes=unique(unlist(current.lib)))
edges.bc = qs::qread(paste0(lib.path.mmp,"edge.bac.qs"))
edges = data.frame(edge=rep(edges.bc$id_edge,2),cpd = c(edges.bc$from,edges.bc$to))
edges = unique(edges[!(grepl("undef",edges$cpd)),])
edges = edges[which(edges$cpd %in% includeInfo$nodes),]
edges = edges.bc[which(edges.bc$id_edge %in% edges$edge),]
includeInfo$edges = edges
qs::qsave(current.lib,paste0(taxalvl,".current.lib.qs"))
json.mat <- rjson::toJSON(includeInfo);
sink("includeInfo.json");
cat(json.mat);
sink();
}
M2Mprediction<- function(model,predDB,taxalvl,psc=0.5,metType="metabolite"){
if(!exists("phyloseq_objs")){
phyloseq_objs <- qs::qread("prescale.phyobj.qs")
}
if(predDB=="null"| is.null(predDB) | predDB==""){
predDB <- "agora"
}
current.proc$predDB <<-predDB
if(is.null(taxalvl)|taxalvl=="null"){
taxalvl=names(phyloseq_objs$count_tables)[length(phyloseq_objs$count_tables)-1]
}
if(taxalvl=="all"){
lvlnm <- names(phyloseq_objs$count_tables)
lvlnm <- lvlnm[lvlnm!="OTU"]
#taxalvls <- lvlnm[length(lvlnm)]
predres <- vector('list',length=length(lvlnm))
names(predres)<- lvlnm
for(taxalvl in lvlnm){
OTUtab <<- phyloseq_objs$count_tables[[taxalvl]]
predres[[taxalvl]] <- doGemPrediction(predDB,taxalvl,psc,metType)
}
}else{
OTUtab <<- phyloseq_objs$count_tables[[taxalvl]]
predres <- doGemPrediction(predDB,taxalvl,psc,metType)
current.proc$taxalvl <<-taxalvl
current.proc$predres<<-predres
}
# met.map <- qs::qread("met.map.qs")
# predres <-predres[rownames(predres) %in% met.map$Match,]
# mbSetObj$analSet$m2m.pred <- predres
#mbSetObj$imgSet$m2m.pred <- imgName;
qs::qsave(predres,paste0("m2m_pred_",predDB,".qs"))
message("Prediction completed!")
return(1)
}
doGemPrediction <- function(predDB,taxalvl,psc=0.5,metType,matchonly=T,sigonly=T){
#print(c(predDB,taxalvl,metType))
require(reshape2)
message('Loading the model database..')
psc <- as.numeric(psc)
taxalvl<-tolower(taxalvl)
tax_map <- qs::qread("gem.mic.map.qs")[[taxalvl]]
tax_map <- tax_map[which(!is.na(tax_map$Match)),]
m2m_ls <- qs::qread(paste0(lib.path.mmp,predDB,".qs"))[[taxalvl]]
names(m2m_ls)[1] <- "taxa"
m2m_ls <- m2m_ls[which(m2m_ls$potential>=psc),]
m2m_ls <- m2m_ls[which(m2m_ls$taxa %in% tax_map$Match),]
m2m_ls$taxa <- tax_map$Query[match(m2m_ls$taxa,tax_map$Match)]
if(metType=="metabolite"){
met.map<- current.proc$gem
m2m_ls <- m2m_ls[which(m2m_ls$metID %in% met.map$Match),]
m2m_ls$metabolite <- met.map$Query[match(m2m_ls$metID,met.map$Match)]
}
m2m_db <- dcast(m2m_ls,taxa~metabolite,value.var="potential")
m2m_db[is.na(m2m_db)] <- 0
dbnorm <- as.matrix(m2m_db[,-1])
##filter otu table
OTUtab <- OTUtab[which(rownames(OTUtab) %in% tax_map$Query),]
if(!(all( rownames(OTUtab) ==m2m_db$taxa))){
AddErrMsg("Names not match!");
return(0);
}
fun_prediction = NULL
fun_m2m_pair <- list()
message('Generating metabolic profile..')
rownames(dbnorm) <- m2m_db$taxa
for(sample in 1:ncol(OTUtab)){
fun_prediction_sample = dbnorm * as.numeric(OTUtab[,sample])
fun_prediction_sample <- t(preprocessCore::normalize.quantiles(t(fun_prediction_sample), copy=FALSE))
#??zero should be back transfer??
fun_m2m_pair[[sample]] <- fun_prediction_sample
fun_prediction_sample = colMeans(fun_prediction_sample)
fun_prediction_sample = fun_prediction_sample / sum(fun_prediction_sample)
if(is.na(sum(fun_prediction_sample))) fun_prediction_sample[1:ncol(dbnorm)] = 0
fun_prediction = cbind(fun_prediction, fun_prediction_sample)
}
message("Prediction done")
names(fun_m2m_pair) <-colnames(fun_prediction) <- colnames(OTUtab)
fun_m2m_pair <- lapply(fun_m2m_pair, function(p){
rownames(p)=rownames(OTUtab)
return(p) })
keep = which(rowSums(fun_prediction) > 0)
if (length(keep) == 0) stop("No functional prediction possible!\nEither no nearest neighbor found or your table is empty!")
fun_prediction_final = fun_prediction[unname(keep),]
fast.write(fun_prediction_final, paste0(taxalvl,"_prediction.csv"))
return(list(fun_prediction_sample=fun_prediction_final,fun_prediction_met=fun_m2m_pair))
}
###########################################################
####################Prediction && correlation heatmap######
###########################################################
###########################################################
DoM2Mcorr <- function(mic.sig,met.sig,cor.method="univariate",cor.stat="pearson",taxalvl){
labels <- c(rownames(mic.sig),rownames(met.sig))
nan.msg<<-"null"
if(cor.method == "univariate"){
require(psych)
res <- corr.test(cbind(t(mic.sig), t(met.sig)),method=cor.stat);
rowidx <- which(rownames(res$r) %in% rownames(mic.sig))
colidx <- which(colnames(res$r) %in% rownames(met.sig))
corr.mat <- res$r[rowidx,colidx];
corr.pval <- res$p[rowidx,colidx]
}else if(cor.method == "MI"){
library(parmigene)
res = knnmi.all(rbind(mic.sig, met.sig), k=5)
scale = 1/max(res)
corr.mat = res * scale
}else if(cor.method=="discor"){
require(energy)
corr.mat <- matrix(data=NA,nrow=nrow(mic.sig),ncol = nrow(met.sig))
corr.pval <- matrix(data=NA,nrow=nrow(mic.sig),ncol = nrow(met.sig))
for(row in 1:nrow(mic.sig)){
res<-lapply(1:nrow(met.sig), function(y) {
corr=dcor.test(mic.sig[row,],met.sig[y,],R=100)
return(corr)
})
corr.mat[row,] <- unlist(lapply(res,function(z) return(z[["statistic"]])))
corr.pval[row,] <- unlist(lapply(res,function(z) return(z[["p.value"]])))
}
colnames(corr.mat) <- colnames(corr.pval) <- rownames(met.sig)
rownames(corr.mat) <- rownames(corr.pval) <- rownames(mic.sig)
}else{
library(ppcor);
sel.res <- cbind(t(mic.sig), t(met.sig))
res <- tryCatch(
{ pcor(sel.res, method=cor.stat);
},
error = function(error_cond){
current.msg <<-"Fail to perform patial correlation";
})
if(!is.null(dim(res$estimate))){
corr.mat <- res$estimate;
corr.pval <- res$p.value;
if(any(is.nan(corr.pval))){
corr.pval=0
nan.msg <<-"NaNs produced in p_value calculation using current correlation parameters! ";
rownames(corr.mat) <- colnames(sel.res)
colnames(corr.mat) <- colnames(sel.res)
rowidx <- which(rownames(corr.mat) %in% rownames(mic.sig))
colidx <- which(colnames(corr.mat) %in% rownames(met.sig))
corr.mat <- corr.mat[rowidx,colidx];
}else{
rownames(corr.mat) <- rownames(corr.pval) <- colnames(sel.res)
colnames(corr.mat) <-colnames(corr.pval) <- colnames(sel.res)
rowidx <- which(rownames(corr.mat) %in% rownames(mic.sig))
colidx <- which(colnames(corr.mat) %in% rownames(met.sig))
corr.mat <- corr.mat[rowidx,colidx];
corr.pval <- corr.pval[rowidx,colidx]
}
}else{
corr.mat=0
corr.pval=0
}
}
return(list(corr.mat=corr.mat,corr.pval=corr.pval));
}
performeCorrelation <- function(mbSetObj,taxalvl,initDE,cor.method="univariate",cor.stat="pearson",sign, cor.thresh=0.5,
corp.thresh=0.05){
mbSetObj <- .get.mbSetObj(mbSetObj);
if(!exists("phyloseq_objs")){
phyloseq_objs <- qs::qread("phyloseq_objs.qs")
}
micdat <- phyloseq_objs$count_tables[[taxalvl]]
metdat <- current.proc$met$data.proc
if(micDataType=="ko"){
lbl.mic <-current.proc$mic$sigfeat
}else{
lbl.mic <- phyloseq_objs$sigfeat[[taxalvl]]
}
lbl.met <- current.proc$met$sigfeat
if(length(lbl.mic) >100){lbl.mic= lbl.mic[1:100]}
if(length(lbl.met) >100){lbl.met= lbl.met[1:100]}
mic.sig <- micdat[which(rownames(micdat) %in% lbl.mic),]
met.sig <- metdat[which(rownames(metdat) %in% lbl.met),match(colnames(mic.sig),colnames(metdat))]
res.corr <- DoM2Mcorr(mic.sig,met.sig,cor.method,cor.stat,taxalvl)
corr.mat <- res.corr$corr.mat
if(is.null(dim(res.corr$corr.mat))){
corr.mat <- 0
corr.pval <- 0
return(0)
}
res.corr.filt <- doCorrelationFilt(res.corr,cor.thresh,corp.thresh,sign)
if(is.null(dim(res.corr.filt$corr.mat))){
corr.mat <- 0
corr.pval <- 0
return(0)
}
output.dat <- reshape2::melt(res.corr$corr.mat,value.name = "correlation")
output.p <- reshape2::melt(res.corr$corr.pval,value.name = "pval")
if(!is.null(output.p)&nrow(output.p)>0){
output.dat <- merge(output.dat,output.p)
output.dat <- output.dat[order(output.dat$pval),]
}else{
output.dat <- output.dat[order(abs(output.dat$correlation),decreasing = T),]
}
fast.write(output.dat, file=paste("correlation", "_",cor.method,"_",cor.stat,".csv", sep=""),row.names=F);
corrNm<<-paste("correlation", "_",cor.method,"_",cor.stat,".csv", sep="")
mbSetObj$analSet$corr.method <- paste0(cor.method,"_",cor.stat)
current.proc$corr.mat <<- res.corr.filt$corr.mat
current.proc$corr.pval <<- res.corr.filt$corr.pval
message("correlation completed")
return(.set.mbSetObj(mbSetObj));
}
doCorrelationFilt <- function( res.corr,cor.thresh,corp.thresh,sign){
corr.mat <- res.corr$corr.mat
corr.pval <- res.corr$corr.pval
if(any(is.nan(corr.pval))|is.null(dim(corr.pval))){
corr.pval=0
nan.msg <<-"NaNs produced in p_value calculation using current correlation parameters! ";
}else{
keepidx1p <- apply(corr.pval,2,function(x) sum(x<corp.thresh))>0
keepidx2p <- apply(corr.pval[,keepidx1p],1,function(x) sum(x<corp.thresh))>0
corr.pval <- corr.pval[keepidx2p,keepidx1p]; if(length(corr.pval) ==1) { corr.pval <- matrix(corr.pval) }
corr.mat <- corr.mat[keepidx2p,keepidx1p]
}
if(sign=="positive"){
keepidx1 <- apply(corr.mat,2,function(x) sum(x>cor.thresh))>0
keepidx2 <- apply(corr.mat[,keepidx1],1,function(x) sum(x>cor.thresh))>0
}else if(sign=="negative"){
keepidx1 <- apply(corr.mat,2,function(x) sum(x<(-cor.thresh)))>0
keepidx2 <- apply(corr.mat[,keepidx1],1,function(x) sum(x<(-cor.thresh)))>0
}else{
keepidx1 <- apply(corr.mat,2,function(x) sum(abs(x)>cor.thresh))>0
keepidx2 <- apply(corr.mat[,keepidx1],1,function(x) sum(abs(x)>cor.thresh))>0
}
corr.mat <- corr.mat[keepidx2,keepidx1]
if(!is.null(dim(corr.pval))){
corr.pval <- corr.pval[keepidx2,keepidx1]
}
return(list(corr.mat=corr.mat,corr.pval=corr.pval))
}
CreatM2MHeatmap<-function(mbSetObj,htMode,overlay, taxalvl, plotNm, format="png",
smplDist="euclidean", clstDist="ward.D", palette="npj",viewOpt="barraw",
clustRow="T", clustCol="T",
colname="T",rowname="T", fontsize_col=10, fontsize_row=10,
sign, cor.thresh=0.5,corp.thresh=0.05,
potential.thresh=0.5,predpval.thresh=0.05,
var.inx=NA, border=T, width=NA, dpi=72){
mbSetObj <- .get.mbSetObj(mbSetObj);
load_iheatmapr();
load_rcolorbrewer();
load_viridis();
current.msg<<- NULL
set.seed(2805614);
#used for color pallete
######set up plot
#colors for heatmap
if(palette=="gbr"){
colors <- grDevices::colorRampPalette(c("green", "black", "red"), space="rgb")(256);
}else if(palette == "heat"){
colors <- grDevices::heat.colors(256);
}else if(palette == "topo"){
colors <- grDevices::topo.colors(256);
}else if(palette == "gray"){
colors <- grDevices::colorRampPalette(c("grey90", "grey10"), space="rgb")(256);
}else if(palette == "byr"){
colors <- rev(grDevices::colorRampPalette(RColorBrewer::brewer.pal(10, "RdYlBu"))(256));
}else if(palette == "viridis") {
colors <- rev(viridis::viridis(10))
}else if(palette == "plasma") {
colors <- rev(viridis::plasma(10))
}else if(palette == "npj"){
colors <- c("#00A087FF","white","#E64B35FF")
}else if(palette == "aaas"){
colors <- c("#4DBBD5FF","white","#E64B35FF");
}else if(palette == "d3"){
colors <- c("#2CA02CFF","white","#FF7F0EFF");
}else {
colors <- colorRampPalette(rev(brewer.pal(n = 7, name ="RdYlBu")), alpha=0.8)(100)
#c("#0571b0","#92c5de","white","#f4a582","#ca0020");
}
plotjs = paste0(plotNm, ".json");
plotNm = paste(plotNm, ".", format, sep="");
mbSetObj$imgSet$IntegrationHeatmap<-plotNm;
if(htMode=="predht"){ ####using prediction pair pval
pred.dat <- current.proc$pred.dat
predDE <- current.proc$predDE
data.abd <- data.frame(mic=as.character(predDE$mic[match(pred.dat$pair,rownames(predDE))]),
met=as.character(predDE$met[match(pred.dat$pair,rownames(predDE))]),
var = ReScale(rowMeans(pred.dat[,-1]),0,1),
value = predDE$P_value)
data.abd <- data.abd[order(data.abd$value,-(data.abd$var)),]
if(length(unique(data.abd$mic))>100){
micnms <- unique(data.abd$mic)[1:100]
}else{
micnms <- unique(data.abd$mic)
}
if(length(unique(data.abd$met))>100){
metnms <- unique(data.abd$met)[1:100]
}else{
metnms <- unique(data.abd$met)
}
data <- data.abd[which(data.abd$mic %in% micnms & data.abd$met %in% metnms),-4]
data <- reshape2::dcast(data,mic~met)
data[is.na(data)] <-0
data.mtr <- data[,-1]
micnms <- data$mic
metnms <- colnames(data.mtr)
nameHt <- "Ave.Potential"
anno.mat0 <- data.abd[which(data.abd$mic %in% micnms & data.abd$met %in% metnms),-3]
anno.mat0 <- anno.mat0[which(anno.mat0$value<predpval.thresh),]
if(nrow(anno.mat0)==0){
current.msg <<- paste("No significant prediction was detected using current parameters!");
}else{
#anno.mat$value <- as.character(round(anno.mat$value,2))
names(anno.mat0) <- c("Var1","Var2","value")
}
if(overlay=="true"){
corr.mat <- current.proc$corr.mat
corr.pval <- current.proc$corr.pval
if(nrow(corr.mat)==0){
current.msg <<- paste("No statistical correlation was detected using current parameters!");
}else{
corr.mat <- corr.mat[which(rownames(corr.mat) %in% as.character(micnms)),
which(colnames(corr.mat) %in% metnms)]
anno.mat <- reshape2::melt(corr.mat,value.name = "correlation")
anno.mat <- anno.mat[which(anno.mat$correlation>cor.thresh),]
if(is.null(anno.mat) | nrow(anno.mat)==0){
current.msg <<- paste("No significant statistical correlation was detected using current parameters!");
}else{
if(is.null(dim(corr.pval))){
current.msg <<- paste("No significant statistical correlation was detected! The triangle only show the ones pass the correlation thresh hold!");
}else{
corr.pval <- corr.pval[which(rownames(corr.pval) %in% as.character(micnms)),
which(colnames(corr.pval) %in% metnms)]
anno.pval <- reshape2::melt(corr.pval,value.name="pval")
anno.pval <- anno.pval[which(anno.pval$pval<corp.thresh),]
if(nrow(anno.pval)==0| is.null(anno.pval)){
current.msg <<- paste("No statistical correlation pass the significance thresh hold using current parameters! The triangle show the ones pass the correlation thresh hold!");
}else{
anno.mat <- unique(left_join(anno.mat,anno.pval))
if(all(is.na(anno.mat$pval))){
anno.mat$pval<- NULL
current.msg <<- paste("No statistical correlation pass the significance thresh hold using current parameters! The triangle show the ones pass the correlation thresh hold!");
}
}
}
anno.mat <- unique(left_join(anno.mat0,anno.mat))
if(all(is.na(anno.mat$correlation))){
anno.mat$correlation<- NULL
current.msg <<- paste("No statistical correlation was detected using current parameters");
}
anno.mat$size <- as.numeric(ReScale(-log(anno.mat$value),8,12))
annols <- vector("list",length=nrow( anno.mat))
}
}
mbSetObj$analSet$integration$corr<- cor.thresh
mbSetObj$analSet$integration$corrPval<- corp.thresh
}else{
anno.mat <- anno.mat0
anno.mat$size <- as.numeric(ReScale(-log(anno.mat$value),8,12))
annols <- vector("list",length=nrow( anno.mat))
}
mbSetObj$analSet$integration$potential<- potential.thresh
mbSetObj$analSet$integration$predPval<- predpval.thresh
}else if(htMode=="corrht"){
data.mtr <- current.proc$corr.mat
corr.pval <- current.proc$corr.pval
if(nrow(data.mtr)==0){
current.msg <<- paste("No statistical correlation was detected using current parameters!");
return(0)
}else{
micnms <- rownames(data.mtr)
metnms <- colnames(data.mtr)
nameHt <- "Correlation"
anno.mat0 <- reshape2::melt(data.mtr)
if(is.null(dim(corr.pval))){
current.msg <<- paste("No significant correlation was detected using current parameters!");
}else{
#### fro annotation using pval
corr.pval <- corr.pval[which(rownames(corr.pval) %in% as.character(micnms)),
which(colnames(corr.pval) %in% metnms)]
if(sign=="positive"){
anno.mat0 <- anno.mat0[which(anno.mat0$value>cor.thresh),]
}else if(sign=="negative"){
anno.mat0 <- anno.mat0[which(anno.mat0$value< (-cor.thresh)),]
}else{
anno.mat0 <- anno.mat0[which(abs(anno.mat0$value)>cor.thresh),]
}
anno.pval <- reshape2::melt(corr.pval,value.name = "pval")
anno.pval <- anno.pval[which(anno.pval$pval<corp.thresh),]
anno.mat <- unique(left_join(anno.mat0,anno.pval))
anno.mat <- anno.mat[!(is.na(anno.mat$pval)),]
if(nrow(anno.mat)==0){
current.msg <<- paste("No significant correlation was detected using current parameters!");
}else{
anno.mat$size <- as.numeric(ReScale(-log(anno.mat$pval),8,12))
annols <- vector("list",length=nrow( anno.mat))
}
}
# anno.mat$value <- as.character(round(anno.mat$value,2))
}
if(overlay=="true"){
pred.de <- current.proc$predDE[,c(5,6,1)] %>% filter(P_value <predpval.thresh)
rownames(pred.de) <- NULL
names(pred.de)[1:2] <- names(anno.mat0)[1:2]
if(exists("anno.mat")){
if(nrow(anno.mat)>0){
anno.mat <- unique(left_join(anno.mat,pred.de))
}else{
anno.mat <- anno.mat0
anno.mat <- unique(left_join(anno.mat,pred.de)) %>% filter(!(is.na(P_value)))
}
}else{
anno.mat <- anno.mat0
anno.mat <- unique(left_join(anno.mat,pred.de)) %>% filter(!(is.na(P_value)))
}
if(nrow(anno.mat)==0){
if(!is.null(current.msg)){
current.msg <<- c(current.msg," No overlay prediction result was detected using current parameters!")
}else{
current.msg <<- paste("No overlay prediction result was detected using current parameters!");
}
}else{
anno.mat$size <- as.numeric(ReScale(-log(anno.mat$pval),8,12))
annols <- vector("list",length=nrow( anno.mat))
}
mbSetObj$analSet$integration$potential<- potential.thresh
mbSetObj$analSet$integration$predPval<- predpval.thresh
}
mbSetObj$analSet$integration$corr<- cor.thresh
mbSetObj$analSet$integration$corrPval<- corp.thresh
}
data1 <- data.mtr;
data1sc <- as.matrix(apply(data1, 2, as.numeric))
rownames(data1sc) <- micnms
#data1sc <- scale_mat(data1sc, scaleOpt)
fzCol <- round(as.numeric(fontsize_col), 1)
fzRow <- round(as.numeric(fontsize_row), 1)
map.height=nrow(data1)*30
map.width=ncol(data1)*30
#cb_grid <- setup_colorbar_grid(nrow = 100, x_start = 1.1, y_start = 0.95, x_spacing = 0.15)
dend_row <- hclust(dist(data1sc, method = smplDist), method = clstDist)
p <- iheatmap(data1sc,
# colorbar_grid = cb_grid,
name = nameHt, x_categorical = TRUE,
layout = list(font = list(size = 10)),
colors = colors
)
if (clustRow == "true") {
p <- p %>% add_row_dendro(dend_row, side = "right")
}
if (colname == "true" ){
p <- p %>% add_col_labels(size = 0.1, font = list(size = fzCol))
}
if (colname == "true" ){
p <- p %>% add_row_labels(size = 0.1, font = list(size = fzRow), side = "left")
}
if (clustCol == "true") {
dend_col <- hclust(dist(t(data1), method = smplDist), method = clstDist)
p <- p %>% add_col_dendro(dend_col)
}
as_list <- to_plotly_list(p)
### add the layer for annotation
if(exists("annols")){
annht <- as_list$layout$annotations
annht <- data.frame(label=unlist(lapply(annht,function(x) x[["text"]])),
X= unlist(lapply(annht,function(x) x[["x"]])),
Y=unlist(lapply(annht,function(x) x[["y"]])))
for(i in 1:nrow(anno.mat)){
annols[[i]]$text <- "*"
annols[[i]]$x <- annht$X[match(anno.mat$Var2[i],annht$label)]
annols[[i]]$y <- annht$Y[match(anno.mat$Var1[i],annht$label)]
annols[[i]][["font"]][["size"]] <- anno.mat$size[i]
annols[[i]]$showarrow <- FALSE
}
if(htMode=="predht"&overlay=="true"){
if(!(is.null(anno.mat$pval))){
anno.mat$pval[is.na(anno.mat$pval)]=1
anno.mat$size2 <- as.numeric(ReScale(-log(anno.mat$pval),10,14))
for(i in 1:nrow(anno.mat)){
if(anno.mat$pval[i]!=1){
annols[[i]]$text <- "۞"
annols[[i]][["font"]][["size"]] <- anno.mat$size2[i]
}
}
}else{
if(!(is.null(anno.mat$correlation))){
anno.mat$correlation[is.na(anno.mat$correlation)]=0
anno.mat$size2 <- as.numeric(ReScale(anno.mat$correlation,10,14))
for(i in 1:nrow(anno.mat)){
if(anno.mat$correlation[i]!=0){
annols[[i]]$text <- "۞"
annols[[i]][["font"]][["size"]] <- anno.mat$size2[i]
}
}
}
}
}
if(!(is.null(anno.mat$P_value))&htMode=="corrht"&overlay=="true"){
anno.mat$P_value[is.na(anno.mat$P_value)]=1
anno.mat$size2 <- as.numeric(ReScale(-log(anno.mat$P_value),10,14))
for(i in 1:nrow(anno.mat)){
if(anno.mat$P_value[i]!=1){
annols[[i]]$text <- "۞"
annols[[i]][["font"]][["size"]] <- 12
}
}
}
as_list$layout$annotations <- c(as_list$layout$annotations,annols)
}
overlyNum = length(which(unlist(lapply(annols,function(x) x[["text"]]=="۞"))))
if (viewOpt != "overview") {
as_list[["layout"]][["width"]] <- max(map.width,1000)
as_list[["layout"]][["height"]] <- max(map.height,800)
} else {
as_list[["layout"]][["width"]] <- 1200
as_list[["layout"]][["height"]] <- map.height
}
if(exists("id2nm",where=current.proc)){
for(i in 1:ncol(data1sc)){
as_list$layout$annotations[[i]]$text = unname(current.proc$id2nm[as_list$layout$annotations[[i]]$text])
}
}
as_json <- attr(as_list, "TOJSON_FUNC")(as_list)
as_json <- paste0("{ \"x\":", as_json, ",\"evals\": [],\"jsHooks\": []}")
write(as_json, plotjs)
if(is.null(current.msg)){
current.msg<<-"null"
}
# storing for Report Generation
mbSetObj$analSet$integration$heatmap <- data1sc
mbSetObj$analSet$integration$heatmap.dist <- smplDist
mbSetObj$analSet$integration$heatmap.clust <- clstDist
mbSetObj$analSet$integration$taxalvl <- taxalvl
mbSetObj$analSet$integration$overlay <- overlay
mbSetObj$analSet$integration$htMode <- htMode
mbSetObj$analSet$integration$sign <- sign
message("heatmap done")
.set.mbSetObj(mbSetObj)
return(overlyNum)
}
###########################################################
####################KEGG Metabolism Network################
###########################################################
###########################################################
PrepareOTUQueryJson <- function(mbSetObj,taxalvl,contain="bac"){
mbSetObj <- .get.mbSetObj(mbSetObj);
if(contain=="bac"| contain=="hsabac"|contain=="all"|contain=="hsa"){
met.map <- qs::qread("keggNet.met.map.qs")
query.res <- rep(2,length(unique(met.map$id[!(is.na(met.map$id))])))
names(query.res) <- unique(met.map$id[!(is.na(met.map$id))])
}else{
doNetpathFilt(contain)
}
json.mat <- rjson::toJSON(query.res);
sink("network_query_met.json");
cat(json.mat);
sink();
return(.set.mbSetObj(mbSetObj));
}
PerformTuneEnrichAnalysis <- function(mbSetObj, dataType,category, file.nm,contain="hsabac",enrich.type){
mbSetObj <- .get.mbSetObj(mbSetObj);
if(enrich.type == "hyper"){
if(dataType=="metabolite"){
PerformMetListEnrichment(mbSetObj, contain,file.nm);
}else{
MicrobiomeAnalystR:::LoadKEGGKO_lib(category);
PerformKOEnrichAnalysis_List(mbSetObj, file.nm);
}
}else if(enrich.type =="global"){
if(contain=="usrbac" & micDataType=="ko"){
tuneKOmap()
contain = "bac"
}
.prepare.global.tune(mbSetObj, dataType, category, file.nm,contain);
.perform.computing();
if(dataType=="ko"){
res= .save.global.res();
taxalvl = "ko"
}else if(dataType=="metabolite"){
res=enrich2json()
}
}else if(enrich.type =="mummichog"){
if(!exists("performPeakEnrich")){ # public web on same user dir
.load.scripts.on.demand("utils_peak2fun.Rc");
}
performPeakEnrich(lib=contain)
}
if(!exists("taxalvl")){taxalvl = "ko"}
mbSetObj$analSet$keggnet$background <- contain
mbSetObj$analSet$keggnet$taxalvl <- taxalvl
return(.set.mbSetObj(mbSetObj))
}
.prepare.global.tune<-function(mbSetObj, dataType,category, file.nm,contain){
mbSetObj <- .get.mbSetObj(mbSetObj);
phenotype <- as.factor(sample_data(mbSetObj$dataSet$norm.phyobj)[[selected.meta.data]]);
if(dataType=="metabolite"){
if(contain=="bac"){
current.set <- qs::qread(paste0(lib.path.mmp,"kegg_bac_mummichog.qs"))$pathways$cpds
}else if(contain=="hsabac"){
current.set <- qs::qread(paste0(lib.path.mmp,"kegg_hsa_bac_mummichog.qs"))$pathways$cpds
}else if(contain=="hsa"){
current.set <- qs::qread(paste0(lib.path.mmp,"kegg_hsa_mummichog.qs"))$pathways$cpds
}else if(contain=="all"){
current.set <- qs::qread(paste0(lib.path.mmp,"kegg_all_mummichog.qs"))$pathways$cpds
}else{
current.set <- qs::qread(paste0(taxalvl,".current.lib.qs"))
}
metmat <- t(current.proc$met$data.proc)
met.map <- qs::qread("keggNet.met.map.qs")
met.map <- met.map[!(is.na(met.map$Node)),]
met.map$include = ifelse(met.map$Match %in% unique(unlist(current.set)),T,F)
qs::qsave(met.map,"keggNet.met.map.qs")
colnames(metmat) <- met.map$Match[match(colnames(metmat),met.map$Query)]
datmat <- metmat[,which(colnames(metmat)!='')]
hits <- lapply(current.set, function(x){x[x %in% colnames(datmat)]});
set.num <- unlist(lapply(current.set, length), use.names = FALSE);
dat.in <- list(cls=phenotype, data=datmat, subsets=hits, set.num=set.num, filenm=file.nm);
}else if(dataType=="ko"){
if(contain=="bac"){
current.set <- qs::qread(paste0(lib.path.mmp,"ko_set_bac.qs"))
}else if(contain=="hsabac"){
current.set <- qs::qread(paste0(lib.path.mmp,"ko_set_hsa_bac.qs"))
}else if(contain=="hsa"){
current.set <- qs::qread(paste0(lib.path.mmp,"ko_set_hsa.qs"))
}else if(contain=="all"){
kegg.anot <- .read.microbiomeanalyst.lib.rds("ko_pathways.rds", "ko")
current.setlink <- kegg.anot$link;
current.set <- kegg.anot$sets$Metabolism;
}else{
current.set <- qs::qread(paste0(lib.path.mmp,"ko_set_bac.qs"))
}
set2nm <- qs::qread("../../lib/mmp/set2nm.qs")[["pathway"]];
set.ids <- names(current.set);
names(set.ids) <- names(current.set)<- set2nm[set.ids];
current.setids <<- set.ids;
datmat <- as.data.frame(t(otu_table(mbSetObj$dataSet$norm.phyobj)),check.names=FALSE);
# first, get the matched entries from current.set
hits <- lapply(current.set, function(x){x[x %in% colnames(datmat)]});
set.num <- unlist(lapply(current.set, length), use.names = FALSE);
dat.in <- list(cls=phenotype, data=datmat, subsets=hits, set.num=set.num, filenm=file.nm);
}
my.fun <- function(){
gt.obj <- globaltest::gt(dat.in$cls, dat.in$data, subsets=dat.in$subsets);
gt.res <- globaltest::result(gt.obj);
match.num <- gt.res[,5];
if(sum(match.num>0)==0){
return(NA);
}
raw.p <- gt.res[,1];
# add adjust p values
bonf.p <- p.adjust(raw.p, "holm");
fdr.p <- p.adjust(raw.p, "fdr");
res.mat <- cbind(set.num, match.num, gt.res[,2], gt.res[,3], raw.p, bonf.p, fdr.p);
rownames(res.mat) <- names(hits);
colnames(res.mat) <- c("Size", "Hits", "Statistic Q", "Expected Q", "Pval", "Holm p", "FDR");
hit.inx <- res.mat[,2]>0;
res.mat <- res.mat[hit.inx, ];
ord.inx <- order(res.mat[,5]);
res.mat <- res.mat[ord.inx,];
return(res.mat);
}
dat.in <- list(cls=phenotype, data=datmat, subsets=hits, set.num=set.num, filenm=file.nm , my.fun=my.fun);
qs::qsave(dat.in, file="dat.in.qs");
return(1);
}
enrich2json <- function(){
dat.in <- qs::qread("dat.in.qs");
hits = dat.in$subsets
file.nm = dat.in$filenm;
my.res <- dat.in$my.res;
if(length(my.res)==1 && is.na(my.res)){
AddErrMsg("No match was found to the selected metabolite set library!");
return(0);
}
nms <- rownames(my.res);
hits <- hits[nms];
resTable <- data.frame(Pathway=rownames(my.res), my.res,check.names=FALSE);
current.msg <<- "Functional enrichment analysis was completed";
met.map <- qs::qread("keggNet.met.map.qs")
hits.met <- lapply(hits, function(x){
x=met.map$Name[match(x,met.map$Match)]
return(x)
})
hits.node <- lapply(hits, function(x){
x=met.map$Node[match(x,met.map$Match)]
return(x)
})
# write json
path.pval = resTable$Pval; if(length(path.pval) ==1) { path.pval <- matrix(path.pval) };
hit.num = paste0(resTable$Hits,"/",resTable$Size); if(length(hit.num) ==1) { hit.num <- matrix(hit.num) };
path.nms <- resTable$Pathway; if(length(path.nms) ==1) { path.nms <- matrix(path.nms) };
hits.query <- convert2JsonList(hits)
path.fdr <- resTable$FDR; if(length(path.fdr) ==1) { path.fdr <- matrix(path.fdr) };
sig.path <- resTable$Pathway[which(resTable$FDR<0.05)];
sig.path <- hits.node[names(hits.node) %in% sig.path]; if(length(sig.path) ==0) { sig.path <-0};
expr.mat = current.proc$met$res_deAnal
expr.mat$kegg = met.map$Match[match(rownames(expr.mat),met.map$Query)]
expr.mat <- expr.mat[expr.mat$kegg %in% met.map$Match[met.map$include],]
expr.mat <- split(expr.mat$T.Stats,expr.mat$kegg)
json.res <- list(hits.query = hits.query,
path.nms = path.nms,
path.pval = path.pval,
hit.num = hit.num,
path.fdr = path.fdr,
hits.query.nm = convert2JsonList(hits.met),
hits.node = convert2JsonList(hits.node),
expr.mat=convert2JsonList(expr.mat),
sig.path=sig.path );
json.mat <- RJSONIO::toJSON(json.res);
json.nm <- paste(file.nm, ".json", sep="");
sink(json.nm)
cat(json.mat);
sink();
# write csv
fast.write(resTable, file=paste(file.nm, ".csv", sep=""), row.names=F);
return(1)
}
GetAssociationPlot <- function(type,keggid,koid,micDataType,metIDType,taxalvl,imgNm,corrMethod,corrSign,corrThresh,corrPval,topNum=10,subset="false"){
current.msg<<-"null"
topNum = as.numeric(topNum)
current.proc$keggmap$corrMethod <<-corrMethod
current.proc$keggmap$corrSign <<-corrSign
current.proc$keggmap$corrThresh <<-corrThresh
current.proc$keggmap$corrPval <<-corrPval
if(type=="corr"){
corrThresh <- as.numeric(corrThresh)
corrPval <- as.numeric(corrPval)
if(!exists("phyloseq_objs")){
phyloseq_objs <- qs::qread("phyloseq_objs.qs")
}
if(!(keggid %in% current.proc$keggNet$Match)){
current.msg <<- "The selected compound is not provided in your input data! Please choose the related compounds!"
return(0)
}
if(metDataType == "peak"){
qvecidx = which(current.proc$keggNet$Match==keggid)
adduct = current.proc$keggNet$adduct[qvecidx]
if(length(interaction(adduct,primary_ions))>0){
qvec = current.proc$keggNet$Query[which(current.proc$keggNet$Match==keggid)][which(adduct %in% primary_ions)]
}else{
qvec = current.proc$keggNet$Query[which(current.proc$keggNet$Match==keggid)]
}
print(paste0(length(qvec)," peaks related! The plots take longer time."))
}else{
qvec = current.proc$keggNet$Query[which(current.proc$keggNet$Match==keggid)]
}
current.proc$keggmap$current_qvec<<-qvec
micdat <- phyloseq_objs$count_tables[[taxalvl]]
metdat <- current.proc$met$data.proc[qvec,,drop=FALSE]
if(grepl("u-",corrMethod)){
cor.method = "univariate"
cor.stat = gsub("u-","",corrMethod)
}else if(grepl("p-",corrMethod)){
cor.method = "partial"
cor.stat = gsub("p-","",corrMethod)
}else if(corrMethod=="discor"){
cor.method = "discor"
cor.stat = "discor"
}
if(nrow(micdat)>2000){
if(micDataType=="ko"){
keepft = rownames(current.proc$mic$res_deAnal)[1:2000]
}else{
keepft =phyloseq_objs$res_deAnal[[taxalvl]] [1:2000]
}
micdat <- micdat[rownames( micdat) %in%keepft, ]
}
res.corr <- DoM2Mcorr(micdat,metdat,cor.method,cor.stat,taxalvl)
if(is.null(res.corr$corr.mat)| length(res.corr$corr.mat)==1){
corr.mat <- 0
corr.pval <- 0
current.msg<<-"No correlation is detected using the selected parameters! Please adjust the parameters!"
return(0)
}else{
if(!is.matrix(res.corr$corr.mat)){
corr.mat <- as.matrix(res.corr$corr.mat)
}else{
corr.mat <- res.corr$corr.mat
}
colnames(corr.mat) <- qvec
corr.mat <- reshape2::melt(corr.mat,value.name = "correlation")
if(!is.null(res.corr$corr.pval) & length(res.corr$corr.pval)>1){
if(!is.matrix(res.corr$corr.pval)){
corr.pval <- as.matrix(res.corr$corr.pval)
}else{
corr.pval <- res.corr$corr.pval
}
corr.pval <- reshape2::melt(corr.pval,value.name = "pval")
corr.mat$pval <- corr.pval$pval
# corr.mat <- corr.mat[which(corr.mat$pval < corrPval),]
}
fast.write(corr.mat, file=paste(imgNm, ".csv", sep=""), row.names=F);
current.proc$keggmap$corrplot <<- corr.mat
if(corrSign=="positive"){
corr.mat <- corr.mat[which(corr.mat$correlation>0),]
}else if(corrSign=="negative"){
corr.mat <- corr.mat[which(corr.mat$correlation< 0),]
}
if(nrow(corr.mat)>0 & length(unique(corr.mat$Var2))==1){
corr.mat <- corr.mat[1:min(topNum,nrow(corr.mat)),]
names(corr.mat)[1:2] <- c("mic","met")
colnm = 1
wb=6
hb = max(0.25*nrow(corr.mat),2.5)
wc=7
hc = 8
}else if(nrow(corr.mat)>0 & length(unique(corr.mat$Var2))>1){
library(dplyr)
names(corr.mat)[1:2] <- c("mic","met")
corr.mat <- corr.mat[order(corr.mat$correlation,decreasing = T),] %>%
group_by(met) %>%
mutate(idx=1:length(mic)) %>%
filter(idx<(min(topNum,nrow(corr.mat))+1))
colnm = 2
wb= 8
hb = min(0.25*nrow(corr.mat)/2,60)
wc= 12
hc = 3*length(qvec)
}else{
current.msg<<-"No correlation is detected using the selected parameters! Please adjust the parameters!"
return(0)
}
ylim0 = min(min(corr.mat$correlation)-0.1,-0.1)
ylim1 = max(corr.mat$correlation)+0.1
require("Cairo");
library(ggplot2);
library(viridis);
library(geomtextpath)
barplot <- vector("list",length=length(qvec))
circleplot <- vector("list",length=length(qvec))
for(pl in 1:length(qvec)){
plot.df <- corr.mat %>% filter(met==qvec[pl])
plot.df <- plot.df[order(abs(plot.df$correlation)),]
plot.df$mic <- factor(plot.df$mic,levels = unique(plot.df$mic))
plot.df$met <- factor(plot.df$met,levels = unique(plot.df$met))
if("pval" %in% colnames(plot.df)){
barplot[[pl]] <- ggplot(plot.df, aes(x=mic, y=correlation,fill= pval)) +
scale_fill_viridis_c(option = "plasma",alpha = 0.8)+
geom_bar(stat = "identity") +
ggtitle(unname(current.proc$id2nm[qvec[pl]]))+
xlab("")+theme_minimal()+coord_flip()
if(length(qvec)>1){
barplot[[pl]] <- barplot[[pl]] +theme(legend.key.size = unit(0.45, 'cm'))
}
if(nrow(plot.df)>2 ){
angle <- 90 - 360 * (1:nrow(plot.df)-0.5) /nrow(plot.df)
plot.df$hjust<-ifelse( angle < -90, 1, 0)
plot.df$angle<-ifelse(angle < -90, angle+180, angle)
plot.df$yh <- 0.05
if(length(which(plot.df$correlation>0))>0){
pidx <- which(plot.df$correlation>0)
lidx <- which(plot.df$correlation>0 & nchar(as.character(plot.df$mic))>20)
plot.df$yh[pidx] <- max(plot.df$correlation)-nchar(as.character(plot.df$mic[pidx]))/100
sidx <- which((plot.df$yh[pidx]-(plot.df$correlation[pidx]+0.05))>0)
plot.df$yh[pidx[sidx]] <- plot.df$correlation[pidx[sidx]]+0.05
plot.df$yh[lidx] <- max(plot.df$correlation)-0.2-nchar(as.character(plot.df$mic[lidx]))/100
}
if(pl%%2==0){
yl=""
}else{
yl="correlation"
}
circleplot[[pl]] <- ggplot(plot.df,aes(x=mic, y=correlation,fill= pval))+
geom_bar(stat="identity", color="black")+
ylim(ylim0,ylim1) + xlab("")+ylab(yl)+
theme_minimal() + ggtitle(unname(current.proc$id2nm[qvec[pl]]))+
geom_text(data=plot.df, aes(x=mic, y=yh, label=mic, hjust=hjust), color="black", fontface="bold",alpha=0.8, size=3, angle= plot.df$angle, inherit.aes = FALSE )+
coord_polar(start = 0) +scale_fill_viridis_c(option = "plasma",alpha = 0.7)+
theme(
axis.text.x = element_blank(),
# axis.text.y = element_text(hjust = -15),
# plot.margin = margin(1, 1, 1, 1, "cm")
)
if(length(qvec)>1){
circleplot[[pl]] <- circleplot[[pl]] + theme(legend.key.size = unit(0.4, 'cm'))
}
}else{
current.msg<<-"Circle plot is not supported when associated taxa are less than 3!"
}
}else{
barplot[[pl]] <- ggplot(plot.df, aes(x=mic, y=correlation,fill= correlation)) +
scale_fill_viridis_c(option = "plasma",alpha = 0.8)+
geom_bar(stat = "identity") + xlab("")+ theme_minimal()+
coord_flip() + ggtitle(unname(current.proc$id2nm[qvec[pl]]))
if(nrow(plot.df)>2){
angle <- 90 - 360 * (1:nrow(plot.df)-0.5) /nrow(plot.df)
plot.df$hjust<-ifelse( angle < -90, 1, 0)
plot.df$angle<-ifelse(angle < -90, angle+180, angle)
plot.df$yh <- 0.05
if(length(which(plot.df$correlation>0))>0){
pidx <- which(plot.df$correlation>0)
lidx <- which(plot.df$correlation>0 & nchar(as.character(plot.df$mic))>20)
plot.df$yh[pidx] <- max(plot.df$correlation)-nchar(as.character(plot.df$mic[pidx]))/100
sidx <- which((plot.df$yh[pidx]-(plot.df$correlation[pidx]+0.05))>0)
plot.df$yh[pidx[sidx]] <- plot.df$correlation[pidx[sidx]]+0.05
plot.df$yh[lidx] <- max(plot.df$correlation)-0.2-nchar(as.character(plot.df$mic[lidx]))/100
}
circleplot[[pl]] <- ggplot(plot.df,aes(x=mic, y=correlation,fill= correlation))+
geom_bar(stat="identity", color="black")+
ylim(ylim0,ylim1) + xlab("")+
theme_minimal() +
geom_text(data=plot.df, aes(x=mic, y=yh, label=mic, hjust=hjust), color="black", fontface="bold",alpha=0.8, size=3, angle= plot.df$angle, inherit.aes = FALSE )+
coord_polar(start = 0) +scale_fill_viridis_c(option = "plasma",alpha = 0.7)+
theme(
axis.text.x = element_blank()
) + ggtitle(unname(current.proc$id2nm[qvec[pl]]))
}else{
current.msg<<-"Circle plot is not supported when associated taxa are less than 3!"
}
}
}
library(grid)
library(gridExtra)
library(gridGraphics);
library(cowplot)
imgNm.bar <- paste("barplot_",imgNm, ".png",sep="");
imgNm.circle <- paste("circleplot_",imgNm, ".png",sep="");
Cairo(file=imgNm.bar, width=wb, height=hb, type="png", bg="white", unit="in", dpi=100);
grid.arrange(grobs =barplot, ncol=colnm)
dev.off();
if(exists("circleplot")){
Cairo(file=imgNm.circle, width=wc, height=hc, type="png", bg="white", unit="in", dpi=100);
grid.arrange(grobs =circleplot, ncol=colnm)
dev.off();
}
#print("accociation plot done")
# write json
mic = corr.mat$mic; if(length(mic) ==1) { mic <- matrix(mic) };
met = corr.mat$met; if(length(met) ==1) { met <- matrix(met) };
correlation = corr.mat$correlation; if(length(correlation) ==1) { correlation <- matrix(correlation) };
pval = corr.mat$pval; if(length(pval) ==1) { pval <- matrix(pval) };
fdr <- p.adjust(corr.mat$pval, "fdr"); if(length(fdr) ==1) { fdr <- matrix(fdr) };
json.res <- list(mic = mic,
met = met,
correlation = correlation,
pval = pval,
fdr = fdr);
json.mat <- RJSONIO::toJSON(json.res);
json.nm <- paste(imgNm, ".json", sep="");
sink(json.nm)
cat(json.mat);
sink();
}
}else{
if(micDataType=="otu" & !is.null(keggid)){
if(!(exists("gem",current.proc))){
met.map <- MetaboIDmap(netModel="gem", predDB="agora", IDtype=metIDType);
}
if(!exists("gem.mic.map.qs")){
mic.map <- MicIDmap(predModel="gem",predDB="agora",taxalvl="")
}
M2Mprediction(model="gem", predDB="agora",taxalvl="all",metType=metIDType)
current.proc$meta_para$analysis.var<<-selected.meta.data
PerformFeatDEAnalyse(NA,taxalvl=taxalvl, 0, analysisVar=selected.meta.data, adjustedVar=NULL);
PerformPairDEAnalyse(NA,taxalvl=taxalvl, overlay="T", initDE="0", analysisVar=selected.meta.data, adjustedVar=NULL);
predDE <- current.proc$predDE
qvec = current.proc$keggNet$Query[match(keggid,current.proc$keggNet$Match)]
data.plot = predDE[which(predDE$met==qvec),c(4,5,2)]
}else{
PerformFeatDEAnalyse(NA,taxalvl="OTU", 0, analysisVar=selected.meta.data, adjustedVar=NULL);
performeCorrelation(NA,taxalvl="OTU",initDE=0)
}
if(nrow(data.plot)==0){
return(0)
current.msg<<-"no mtached metabolite was found"
}
if(nrow(data.plot)>20){
data.plot = data.plot[1:20,]
}
data.plot$`-log(p)` = -log(data.plot$p_value)
data.plot <- data.plot[order(data.plot$p_value,decreasing = T),]
data.plot$mic <- factor(data.plot$mic,levels = data.plot$mic)
require("Cairo");
library(ggplot2);
library(viridis);
imgNm <- paste(imgNm, ".png",sep="");
Cairo(file=imgNm, width=5, height=5, type="png", bg="white", unit="in", dpi=100);
p1 <- ggplot(data.plot, aes(x=mic, y=`-log(p)`,fill=`-log(p)`)) +
scale_fill_viridis_c(option = "plasma",alpha = 0.8)+
geom_bar(stat = "identity") + xlab("")+
coord_flip()
print(p1)
dev.off();
}
print("plot done")
return(1)
}
UpdateAssociationPlot <- function(imgNm,topNum=10){
current.msg<<-"null"
topNum = as.numeric(topNum)
corr.mat <-current.proc$keggmap$corrplot
corrSign<-current.proc$keggmap$corrSign
qvec <- current.proc$keggmap$current_qvec
library(dplyr)
if(corrSign=="positive"){
corr.mat <- corr.mat[which(corr.mat$correlation>0),]
}else if(corrSign=="negative"){
corr.mat <- corr.mat[which(corr.mat$correlation< 0),]
}
if(is.null(corr.mat) | nrow(corr.mat)==0){
current.msg<<-"Correlation failed! Please check the parameters"
return(0)
}
names(corr.mat)[1:2] <- c("mic","met")
if(nrow(corr.mat)>0 & length(unique(corr.mat$met))==1){
corr.mat <- corr.mat[1:min(topNum,nrow(corr.mat)),]
colnm = 1
wb=6
hb = max(0.25*nrow(corr.mat),2.5)
wc=6
hc = 7
}else if(nrow(corr.mat)>0 & length(unique(corr.mat$met))>1){
corr.mat <- corr.mat[order(corr.mat$correlation,decreasing = T),] %>%
group_by(met) %>%
mutate(idx=1:length(mic)) %>%
filter(idx<(min(topNum,nrow(corr.mat))+1))
colnm = 2
wb= 8
hb = min(0.25*nrow(corr.mat)/2,60)
wc= 12
hc = 3*length(qvec)
}else{
current.msg<<-"No correlation is detected using the selected parameters! Please adjust the parameters!"
return(0)
}
ylim0 = min(min(corr.mat$correlation)-0.1,-0.1)
ylim1 = max(corr.mat$correlation)+0.1
require("Cairo");
library(ggplot2);
library(viridis);
library(geomtextpath)
barplot <- vector("list",length=length(qvec))
circleplot <- vector("list",length=length(qvec))
for(pl in 1:length(qvec)){
plot.df <- corr.mat %>% filter(met==qvec[pl])
plot.df <- plot.df[order(abs(plot.df$correlation)),]
plot.df$mic <- factor(plot.df$mic,levels = unique(plot.df$mic))
plot.df$met <- factor(plot.df$met,levels = unique(plot.df$met))
if("pval" %in% colnames(plot.df)){
barplot[[pl]] <- ggplot(plot.df, aes(x=mic, y=correlation,fill= pval)) +
scale_fill_viridis_c(option = "plasma",alpha = 0.8)+
geom_bar(stat = "identity") +
ggtitle(qvec[pl])+
xlab("")+theme_minimal()+coord_flip()
if(length(qvec)>1){
barplot[[pl]] <- barplot[[pl]] +theme(legend.key.size = unit(0.45, 'cm'))
}
if(nrow(plot.df)>2 ){
angle <- 90 - 360 * (1:nrow(plot.df)-0.5) /nrow(plot.df)
plot.df$hjust<-ifelse( angle < -90, 1, 0)
plot.df$angle<-ifelse(angle < -90, angle+180, angle)
plot.df$yh <- 0.05
if(length(which(plot.df$correlation>0))>0){
pidx <- which(plot.df$correlation>0)
lidx <- which(plot.df$correlation>0 & nchar(as.character(plot.df$mic))>20)
plot.df$yh[pidx] <- max(plot.df$correlation)-nchar(as.character(plot.df$mic[pidx]))/100
sidx <- which((plot.df$yh[pidx]-(plot.df$correlation[pidx]+0.05))>0)
plot.df$yh[pidx[sidx]] <- plot.df$correlation[pidx[sidx]]+0.05
plot.df$yh[lidx] <- max(plot.df$correlation)-0.2-nchar(as.character(plot.df$mic[lidx]))/100
}
if(pl%%2==0){
yl=""
}else{
yl="correlation"
}
circleplot[[pl]] <- ggplot(plot.df,aes(x=mic, y=correlation,fill= pval))+
geom_bar(stat="identity", color="black")+
ylim(ylim0,ylim1) + xlab("")+ylab(yl)+
theme_minimal() + ggtitle(qvec[pl])+
geom_text(data=plot.df, aes(x=mic, y=yh, label=mic, hjust=hjust), color="black", fontface="bold",alpha=0.8, size=3, angle= plot.df$angle, inherit.aes = FALSE )+
coord_polar(start = 0) +scale_fill_viridis_c(option = "plasma",alpha = 0.7)+
theme(
axis.text.x = element_blank(),
# axis.text.y = element_text(hjust = -15),
# plot.margin = margin(1, 1, 1, 1, "cm")
)
if(length(qvec)>1){
circleplot[[pl]] <- circleplot[[pl]] + theme(legend.key.size = unit(0.4, 'cm'))
}
}else{
current.msg<<-"Circle plot is not supported when associated taxa are less than 3!"
}
}else{
barplot[[pl]] <- ggplot(plot.df, aes(x=mic, y=correlation,fill= correlation)) +
scale_fill_viridis_c(option = "plasma",alpha = 0.8)+
geom_bar(stat = "identity") + xlab("")+ theme_minimal()+
coord_flip() + ggtitle(qvec[pl])
if(nrow(plot.df)>2){
angle <- 90 - 360 * (1:nrow(plot.df)-0.5) /nrow(plot.df)
plot.df$hjust<-ifelse( angle < -90, 1, 0)
plot.df$angle<-ifelse(angle < -90, angle+180, angle)
plot.df$yh <- 0.05
if(length(which(plot.df$correlation>0))>0){
pidx <- which(plot.df$correlation>0)
lidx <- which(plot.df$correlation>0 & nchar(as.character(plot.df$mic))>20)
plot.df$yh[pidx] <- max(plot.df$correlation)-nchar(as.character(plot.df$mic[pidx]))/100
sidx <- which((plot.df$yh[pidx]-(plot.df$correlation[pidx]+0.05))>0)
plot.df$yh[pidx[sidx]] <- plot.df$correlation[pidx[sidx]]+0.05
plot.df$yh[lidx] <- max(plot.df$correlation)-0.2-nchar(as.character(plot.df$mic[lidx]))/100
}
circleplot[[pl]] <- ggplot(plot.df,aes(x=mic, y=correlation,fill= correlation))+
geom_bar(stat="identity", color="black")+
ylim(ylim0,ylim1) + xlab("")+
theme_minimal() +
geom_text(data=plot.df, aes(x=mic, y=yh, label=mic, hjust=hjust), color="black", fontface="bold",alpha=0.8, size=3, angle= plot.df$angle, inherit.aes = FALSE )+
coord_polar(start = 0) +scale_fill_viridis_c(option = "plasma",alpha = 0.7)+
theme(
axis.text.x = element_blank()
) + ggtitle(qvec[pl])
}else{
current.msg<<-"Circle plot is not supported when associated taxa are less than 3!"
}
}
}
library(grid)
library(gridExtra)
library(gridGraphics);
library(cowplot)
imgNm.bar <- paste("barplot_",imgNm, ".png",sep="");
imgNm.circle <- paste("circleplot_",imgNm, ".png",sep="");
Cairo(file=imgNm.bar, width=wb, height=hb, type="png", bg="white", unit="in", dpi=100);
grid.arrange(grobs =barplot, ncol=colnm)
dev.off();
if(exists("circleplot")){
Cairo(file=imgNm.circle, width=wc, height=hc, type="png", bg="white", unit="in", dpi=100);
grid.arrange(grobs =circleplot, ncol=colnm)
dev.off();
}
print("Update accociation plot done")
# write json
mic = corr.mat$mic; if(length(mic) ==1) { mic <- matrix(mic) };
met = corr.mat$met; if(length(met) ==1) { met <- matrix(met) };
correlation = corr.mat$correlation; if(length(correlation) ==1) { correlation <- matrix(correlation) };
pval = corr.mat$pval; if(length(pval) ==1) { pval <- matrix(pval) };
fdr <- p.adjust(corr.mat$pval, "fdr"); if(length(fdr) ==1) { fdr <- matrix(fdr) };
json.res <- list(mic = mic,
met = met,
correlation = correlation,
pval = pval,
fdr = fdr);
json.mat <- RJSONIO::toJSON(json.res);
json.nm <- paste(imgNm, ".json", sep="");
sink(json.nm)
cat(json.mat);
sink();
}
tuneKOmap <- function(){
edges.ko = qs::qread(paste0(lib.path.mmp,"ko.info.qs"))
include = rownames(current.proc$mic$data.proc)
edges.ko = edges.ko[which(edges.ko$ko %in% include),]
includeInfo = list(edges=edges.ko)
includeInfo$nodes = unique(c(edges.ko$from,edges.ko$to))
includeInfo$nodes =includeInfo$nodes[!(grepl("unddef",includeInfo$nodes))]
json.mat <- rjson::toJSON(includeInfo);
sink("includeInfo.json");
cat(json.mat);
sink();
}
###########################################################
####################3DScatter Plot#########################
###########################################################
###########################################################
DoDimensionReductionIntegrative <- function(mbSetObj, reductionOpt, method="globalscore", dimn,analysisVar,diabloPar=0.2){
if(!exists("my.reduce.dimension")){ # public web on same user dir
.load.scripts.on.demand("utils_dimreduction.Rc");
}
if(analysisVar=="null"){
analysisVar = current.proc$meta_para$analysis.var
}
return(my.reduce.dimension(mbSetObj, reductionOpt, method,dimn, analysisVar,diabloPar));
}
doScatterJson <- function(filenm,analysisVar){
if(!exists("my.json.scatter")){ # public web on same user dir
.load.scripts.on.demand("utils_scatter_json.Rc");
}
return(my.json.scatter(filenm,current.proc$meta_para$analysis.var));
}
DoStatComparisonVis <- function(filenm, alg, meta, selected, meta.vec, omicstype, taxalvl,nonpar=FALSE){
mbSetObj <- .get.mbSetObj(NA);
if(taxalvl=="null"|taxalvl=="NULL"|is.null(taxalvl)){
taxalvl="OTU"
}
if(meta == "null"){
meta = 1;
}
combined.res <- qs::qread("combined.res.qs")
if(meta.vec == "NA"){ # process page
#if(dataSet$name != filenm){
dataSet <- readRDS(filenm);
#}
}else{
if(omicstype != "NA"){
if(omicstype %in% c("microbiome","mic")){
data <- qs::qread("phyloseq_objs.qs")
data<- data$count_tables[[taxalvl]]
}else{
data<- current.proc$met$data.proc
}
}else{
data.mic <- current.proc$mic$data.proc
data.met <- current.proc$met$data.proc
}
}
if(meta.vec == "NA"){ # process page
if(meta == ""){
meta=1;
}
metavec = current.proc$meta_para$sample_data
sel = unique(metavec)
}else{
metavec <- strsplit(meta.vec, "; ")[[1]];
sel <- strsplit(selected, "; ")[[1]];
}
combined.res$meta$newcolumn = metavec
metadf = combined.res$meta
metadf = metadf[which(metadf$omics==omicstype),]
sel_meta1 = metadf[which(metadf[,"newcolumn"] %in% sel[1]),]
sel_meta2 = metadf[which(metadf[,"newcolumn"] %in% sel[2]),]
nms1 <- rownames(sel_meta1)
nms2 <- rownames(sel_meta2)
sel_meta_more_than_2 = metadf[which(metadf[,"newcolumn"] %in% sel),]
nms <- rownames(sel_meta_more_than_2)
sample_type <- mbSetObj$dataSet$meta_info
newcol_disc <- c(T);
names(newcol_disc) <- "newcolumn";
newcol_cont <- c(F);
names(newcol_cont) <- "newcolumn";
sample_type$disc_inx <- c(sample_type$disc.inx, newcol_disc);
sample_type$cont_inx <- c(sample_type$cont.inx, newcol_cont);
if(alg=="ttest"){
res <- PerformFastUnivTests(data,factor(metadf[,"newcolumn"]),F,F)
}else if(alg =="kruskal"){
res <- PerformFastUnivTests(data,factor(metadf[,"newcolumn"]),F,T)
}else if(alg =="limma"){
metadf[,meta] <- metadf[,"newcolumn"]
res <- performLimma(data,metadf,sample_type,meta)
}
res = res[,c(1,2)]
rownames(res) = rownames(data)
colnames(res) = c("stat", "p_value")
res = na.omit(res)
res = res[order(res[,2], decreasing=FALSE),]
pvals <- p.adjust(res[,"p_value"],method="BH");
res = cbind(res, pvals)
res = cbind(res, rownames(res))
de = res
de[de == "NaN"] = 1
pv = as.numeric(de[,"p_value"])
pv_no_zero = pv[pv != 0]
minval = min(pv_no_zero)
pv[pv == 0] = minval/2
pvals <- -log10(pv);
colorb<- ComputeColorGradient(pvals, "black");
sizes <- as.numeric(rescale2NewRange(-log10(pv), 15, 35));
res = cbind(res, colorb);
res = cbind(res, sizes);
#ids <- names(dataSet$enrich_ids[order(match(combined.res$enrich_ids,rownames(res)))])
res = cbind(res, rownames(res));
colnames(res) = c("stat", "p_value", "p_adj", "ids", "color", "size","name");
res= as.matrix(res)
library(RJSONIO)
sink(filenm);
cat(toJSON(res));
sink();
if(meta.vec == "NA"){
filenm = "OK";
combined.res[[omicstype]]$comp.res = de;
combined.res$sel.meta = meta
}
return(filenm)
}
.calNMI<-function (x, y){
x <- as.vector(x)
y <- as.vector(y)
mutual.info <- (.mutualInformation(x, y)/sqrt(.entropy(x) *
.entropy(y)))
return(max(0, mutual.info, na.rm = TRUE))
}
###########################################################
####################list input################################
###########################################################
###########################################################
PrepareListInput<-function(mbSetObj, qvec, omic){
mbSetObj <- .get.mbSetObj(mbSetObj);
mbSetObj$inputType <- "list";
mbSetObj$micDataType <- "otu"
lines <- unlist(strsplit(qvec, "\r|\n|\r\n")[1]);
if(substring(lines[1],1,1)=="#"){
lines <- lines[-1];
}
mbSetObj$dataSet[[omic]][["original"]] <- lines;
inputType <<- "list"
return(.set.mbSetObj(mbSetObj))
}
SetListInfo <-function(mbSetObj,taxalvl){
mbSetObj <- .get.mbSetObj(mbSetObj);
mbSetObj$paraSet<-list()
mbSetObj$paraSet$taxalvl <-taxalvl
mbSetObj$paraSet$metDataType <-metDataType
mbSetObj$paraSet$metIDType <-metIDType
taxalvl<<-taxalvl
return(.set.mbSetObj(mbSetObj))
}
PerformMicNameMap <- function(mbSetObj,taxalvl){
mbSetObj <- .get.mbSetObj(mbSetObj);
mic.map = data.frame(Query=mbSetObj$dataSet$mic$original,agora=NA,embl=NA,kegg=NA,ncbi=NA,stringsAsFactors = F)
taxMapLong <- qs::qread(paste0(lib.path.mmp,"agora_tax.qs"))[[taxalvl]]
names(taxMapLong)[1] <- "taxa"
mic.map$agora <- taxMapLong[match(mic.map$Query,taxMapLong$taxa),1]
mic.map$ncbi <- taxMapLong$ncbi_id[match(mic.map$agora,taxMapLong$taxa)]
taxMapLong <- qs::qread(paste0(lib.path.mmp,"embl_tax.qs"))[[taxalvl]]
names(taxMapLong)[1] <- "taxa"
mic.map$embl <- taxMapLong[match(mic.map$Query,taxMapLong$taxa),1]
mic.map$ncbi[is.na(mic.map$ncbi)] <- taxMapLong$ncbi_id[match(mic.map$embl[is.na(mic.map$ncbi)],taxMapLong$taxa)]
taxMapKEGG <- qs::qread(paste0(lib.path.mmp,"taxMapKEGG.qs"))[[taxalvl]]
taxMapLong <- taxMapKEGG[["info"]]
names(taxMapLong)[1] <- "taxa"
mic.map$kegg <- taxMapLong[match(mic.map$Query,taxMapLong$taxa),1]
mic.map$ncbi[is.na(mic.map$ncbi)] <- taxMapLong$id[match(mic.map$kegg[is.na(mic.map$ncbi)],taxMapLong$taxa)]
mtchidx <- taxMapKEGG[which(names(taxMapKEGG) %in% mic.map$Query)]
mtcls <<- unique(unlist(mtchidx))
fast.write(mic.map, paste("taxa_match_result.csv"));
mbSetObj$analSet$mic.map = mic.map
mbSetObj$analSet$mtcls = mtcls
return(.set.mbSetObj(mbSetObj))
}
PerformMetNameMap <- function(mbSetObj,metIDType="kegg"){
mbSetObj <- .get.mbSetObj(mbSetObj);
met.map = data.frame(Query=mbSetObj$dataSet$met$original,agora=NA,embl=NA,kegg=NA,stringsAsFactors = F)
res = MetaboIDmap("gem","agora",metIDType,met.map$Query)
met.map$agora_id = res$Match[match( met.map$Query,res$Query)]
res = MetaboIDmap("gem","embl",metIDType,met.map$Query)
met.map$embl_id = res$Match[match( met.map$Query,res$Query)]
if(metIDType !='name'){
metInfo <- qs::qread(paste0(lib.path.mmp,"synonymGem.qs"));
met.map$agora = metInfo$Name[match(met.map$agora_id,metInfo$metID)]
met.map$embl = metInfo$Name[match(met.map$embl_id,metInfo$metID)]
}
if(metIDType=="kegg"){
met.map$kegg = met.map$Query
metInfo <- qs::qread(paste0(lib.path.mmp,"general_kegg2name.qs"));
met.map$name <- metInfo$Name[match(met.map$kegg,metInfo$ID)]
met.map$node <- metInfo$node[match(met.map$kegg,metInfo$ID)]
}else{
res = MetaboIDmap("keggNet","kegg",metIDType,met.map$Query)
met.map$kegg = res$Match[match( met.map$Query,res$Query)]
met.map$name = res$Name
met.map$node = res$Node
}
fast.write(met.map, paste("metabolite_match_result.csv"));
mbSetObj$analSet$met.map = met.map
return(.set.mbSetObj(mbSetObj))
}
GetMicMapCol <-function(mbSetObj, colInx){
mbSetObj <- .get.mbSetObj(mbSetObj);
if(colInx==0){
return(rownames(mbSetObj$analSet$mic.map));
}else{
return(mbSetObj$analSet$mic.map[,colInx]);
}
}
GetMetMapCol <-function(mbSetObj, colInx){
mbSetObj <- .get.mbSetObj(mbSetObj);
if(colInx==0){
return(rownames(mbSetObj$analSet$met.map));
}else{
return(mbSetObj$analSet$met.map[,colInx]);
}
}
PerformMetListEnrichment <- function(mbSetObj, contain,file.nm){
mbSetObj <- .get.mbSetObj(mbSetObj);
if(contain=="bac"){
current.set <- qs::qread(paste0(lib.path.mmp,"kegg_bac_mummichog.qs"))$pathways$cpds
}else if(contain=="hsabac"){
current.set <- qs::qread(paste0(lib.path.mmp,"kegg_hsa_bac_mummichog.qs"))$pathways$cpds
}else if(contain=="hsa"){
current.set <- qs::qread(paste0(lib.path.mmp,"kegg_hsa_mummichog.qs"))$pathways$cpds
}else if(contain=="all"){
current.set <- qs::qread(paste0(lib.path.mmp,"kegg_all_mummichog.qs"))$pathways$cpds
}else{
current.set <- qs::qread(paste0(taxalvl,".current.lib.qs"))
}
current.universe <- unique(unlist(current.set));
met.map <- mbSetObj$analSet$met.map
# prepare for the result table
set.size <- length(current.set);
res.mat <- matrix(0, nrow=set.size, ncol=5);
rownames(res.mat) <- names(current.set);
colnames(res.mat) <- c("Total", "Expected", "Hits", "Pval", "FDR");
# prepare query
ora.vec <- NULL;
ora.vec <- met.map$kegg[!is.na(met.map$kegg)];
# need to cut to the universe covered by the pathways, not all genes
hits.inx <- ora.vec %in% current.universe;
ora.vec <- ora.vec[hits.inx];
#ora.nms <- ora.nms[hits.inx];
q.size <- length(ora.vec);
# get the matched query for each pathway
hits.query <- lapply(current.set, function(x) {
ora.vec[ora.vec%in%unlist(x)];});
names(hits.query) <- names(current.set);
hit.num <- unlist(lapply(hits.query, function(x){length(x)}), use.names=FALSE);
# total unique gene number
uniq.count <- length(current.universe);
# unique gene count in each pathway
set.size <- unlist(lapply(current.set, length));
res.mat[,1] <- set.size;
res.mat[,2] <- q.size*(set.size/uniq.count);
res.mat[,3] <- hit.num;
# use lower.tail = F for P(X>x)
raw.pvals <- phyper(hit.num-1, set.size, uniq.count-set.size, q.size, lower.tail=F);
res.mat[,4] <- raw.pvals;
res.mat[,5] <- p.adjust(raw.pvals, "fdr");
# now, clean up result, synchronize with hit.query
res.mat <- res.mat[hit.num>0,,drop = F];
hits.query <- hits.query[hit.num>0];
if(nrow(res.mat)> 1){
# order by p value
ord.inx <- order(res.mat[,4]);
res.mat <- signif(res.mat[ord.inx,],3);
hits.query <- hits.query[ord.inx];
imp.inx <- res.mat[,4] <= 0.05;
if(sum(imp.inx) < 10){ # too little left, give the top ones
topn <- ifelse(nrow(res.mat) > 10, 10, nrow(res.mat));
res.mat <- res.mat[1:topn,];
hits.query <- hits.query[1:topn];
}else{
res.mat <- res.mat[imp.inx,];
hits.query <- hits.query[imp.inx];
if(sum(imp.inx) > 120){
# now, clean up result, synchronize with hit.query
res.mat <- res.mat[1:120,];
hits.query <- hits.query[1:120];
}
}
}
fast.write(res.mat, file=paste(file.nm, ".csv", sep=""), row.names=F);
resTable <- data.frame(Pathway=rownames(res.mat), res.mat,check.names=FALSE);
path.pval = resTable$Pval; if(length(path.pval) ==1) { path.pval <- matrix(path.pval) };
hit.num = paste0(resTable$Hits,"/",resTable$Total); if(length(hit.num) ==1) { hit.num <- matrix(hit.num) };
path.nms <- resTable$Pathway; if(length(path.nms) ==1) { path.nms <- matrix(path.nms) };
path.fdr <- resTable$FDR; if(length(path.fdr) ==1) { path.fdr <- matrix(path.fdr) };
expr.mat = setNames(rep(2,q.size),ora.vec)
hits.met <- lapply(hits.query, function(x){
x=met.map$name[match(x,met.map$kegg)]
return(x)
})
hits.node <- lapply(hits.query, function(x){
x=met.map$node[match(x,met.map$kegg)]
return(x)
})
json.res <- list(hits.query =convert2JsonList(hits.query),
path.nms = path.nms,
path.pval = path.pval,
hit.num = hit.num,
path.fdr = path.fdr,
hits.query.nm = convert2JsonList(hits.met),
hits.node = convert2JsonList(hits.node),
expr.mat=expr.mat);
json.mat <- RJSONIO::toJSON(json.res);
json.nm <- paste(file.nm, ".json", sep="");
sink(json.nm)
cat(json.mat);
sink();
return(.set.mbSetObj(mbSetObj));
}
M2MPredictionList<- function(mbSetObj,model,predDB,psc=0.5,metType="metabolite",taxalvl){
mbSetObj <- .get.mbSetObj(mbSetObj);
if(predDB=="null"| is.null(predDB) | predDB==""){
predDB <- "agora"
}
mbSetObj$paraSet$gemdb<-predDB
require(reshape2)
message('Loading the model database..')
psc <- as.numeric(psc)
taxalvl<-tolower(taxalvl)
tax_map <- mbSetObj$analSet$mic.map
tax_map <- tax_map[which(!is.na(tax_map[,predDB])),]
m2m_ls <- qs::qread(paste0(lib.path.mmp,predDB,".qs"))[[taxalvl]]
names(m2m_ls)[1] <- "taxa"
m2m_ls <- m2m_ls[which(m2m_ls$potential>=psc),]
m2m_ls <- m2m_ls[which(m2m_ls$taxa %in% tax_map[,predDB]),]
m2m_ls$taxa <- tax_map$Query[match(m2m_ls$taxa,tax_map[,predDB])]
if(metType=="metabolite"){
met.map<- mbSetObj$analSet$met.map
m2m_ls <- m2m_ls[which(m2m_ls$metID %in% met.map[,paste0(predDB,"_id")]),]
if(mbSetObj$paraSet$metIDType=="name"){
m2m_ls$metabolite <- met.map$Query[match(m2m_ls$metID,met.map[,paste0(predDB,"_id")])]
}
}
mbSetObj$analSet$predres<-m2m_ls
qs::qsave(m2m_ls,paste0("m2m_pred_",predDB,".qs"))
mbSetObj$analSet$integration$db <- predDB;
message("Prediction completed!")
return(.set.mbSetObj(mbSetObj));
}
CreatM2MHeatmapList<-function(mbSetObj, plotNm, format="png",
smplDist="euclidean", clstDist="ward.D", palette="npj",viewOpt="barraw",
clustRow="T", clustCol="T",
colname="T",rowname="T", fontsize_col=10, fontsize_row=10,
potential.thresh=0.5,
var.inx=NA, border=T, width=NA, dpi=72){
mbSetObj <- .get.mbSetObj(mbSetObj);
load_iheatmapr();
load_rcolorbrewer();
load_viridis();
current.msg<<- NULL
set.seed(2805614);
#used for color pallete
######set up plot
#colors for heatmap
if(palette=="gbr"){
colors <- grDevices::colorRampPalette(c("green", "black", "red"), space="rgb")(256);
}else if(palette == "heat"){
colors <- grDevices::heat.colors(256);
}else if(palette == "topo"){
colors <- grDevices::topo.colors(256);
}else if(palette == "gray"){
colors <- grDevices::colorRampPalette(c("grey90", "grey10"), space="rgb")(256);
}else if(palette == "byr"){
colors <- rev(grDevices::colorRampPalette(RColorBrewer::brewer.pal(10, "RdYlBu"))(256));
}else if(palette == "viridis") {
colors <- rev(viridis::viridis(10))
}else if(palette == "plasma") {
colors <- rev(viridis::plasma(10))
}else if(palette == "npj"){
colors <- c("#00A087FF","white","#E64B35FF")
}else if(palette == "aaas"){
colors <- c("#4DBBD5FF","white","#E64B35FF");
}else if(palette == "d3"){
colors <- c("#2CA02CFF","white","#FF7F0EFF");
}else {
colors <- colorRampPalette(rev(brewer.pal(n = 7, name ="RdYlBu")), alpha=0.8)(100)
#c("#0571b0","#92c5de","white","#f4a582","#ca0020");
}
plotjs = paste0(plotNm, ".json");
plotNm = paste(plotNm, ".", format, sep="");
mbSetObj$imgSet$IntegrationHeatmap<-plotNm;
####using prediction pair pval
pred.dat <-mbSetObj$analSet$predres
data.abd <- data.frame(mic=pred.dat$taxa,
met=pred.dat$metabolite,
var = pred.dat$potential)
data <- data.abd[order(data.abd$var,decreasing = T),]
if(nrow(data)>1000){
thresh <- data$var[1000]
}else{
thresh<- potential.thresh
}
data <- reshape2::dcast(data,mic~met)
data[is.na(data)] <-0
data.mtr <- data[,-1]
micnms <- data$mic
metnms <- colnames(data.mtr)
nameHt <- "Potential Score"
#### fro annotation using pval
data1 <- data.mtr;
data1sc <- as.matrix(apply(data1, 2, as.numeric))
rownames(data1sc) <- micnms
#data1sc <- scale_mat(data1sc, scaleOpt)
fzCol <- round(as.numeric(fontsize_col), 1)
fzRow <- round(as.numeric(fontsize_row), 1)
map.height=nrow(data1)*30
map.width=ncol(data1)*30
#cb_grid <- setup_colorbar_grid(nrow = 100, x_start = 1.1, y_start = 0.95, x_spacing = 0.15)
dend_row <- hclust(dist(data1sc, method = smplDist), method = clstDist)
p <- iheatmap(data1sc,
# colorbar_grid = cb_grid,
name = nameHt, x_categorical = TRUE,
layout = list(font = list(size = 10)),
colors = colors
)
if (clustRow == "true") {
p <- p %>% add_row_dendro(dend_row, side = "right")
}
if (colname == "true" ){
p <- p %>% add_col_labels(size = 0.1, font = list(size = fzCol))
}
if (colname == "true" ){
p <- p %>% add_row_labels(size = 0.1, font = list(size = fzRow), side = "left")
}
if (clustCol == "true") {
dend_col <- hclust(dist(t(data1), method = smplDist), method = clstDist)
p <- p %>% add_col_dendro(dend_col)
}
as_list <- to_plotly_list(p)
if (viewOpt != "overview") {
as_list[["layout"]][["width"]] <- max(map.width,1000)
as_list[["layout"]][["height"]] <- max(map.height,800)
} else {
as_list[["layout"]][["width"]] <- 1200
as_list[["layout"]][["height"]] <- map.height
}
as_json <- attr(as_list, "TOJSON_FUNC")(as_list)
as_json <- paste0("{ \"x\":", as_json, ",\"evals\": [],\"jsHooks\": []}")
write(as_json, plotjs)
if(is.null(current.msg)){
current.msg<<-"null"
}
# storing for Report Generation
mbSetObj$analSet$integration$heatmap <- data1sc
mbSetObj$analSet$integration$heatmap.dist <- smplDist
mbSetObj$analSet$integration$heatmap.clust <- clstDist
mbSetObj$analSet$integration$taxalvl <- taxalvl
mbSetObj$analSet$integration$overlay <- "false"
mbSetObj$analSet$integration$htMode <- "prediction"
mbSetObj$analSet$integration$potential <- potential.thresh;
message("heatmap done")
return(.set.mbSetObj(mbSetObj))
}
GetPredictionPlot <- function(mbSetObj, keggid,imgNm,predDB="agora",potentialThresh=0.5,topNum=10,subset="false"){
mbSetObj <- .get.mbSetObj(mbSetObj);
current.msg<<-"null"
topNum = as.numeric(topNum)
potentialThresh = as.numeric(potentialThresh)
mic.map = mbSetObj$analSet$mic.map
met.map = mbSetObj$analSet$met.map
macthed_cmpd = met.map$kegg[which( !is.na(met.map[,predDB]))]
metType = mbSetObj$paraSet$metDataType
if(!keggid %in% met.map$kegg){
current.msg <<- "The selected compound is not provided in your input data! Please choose the related compounds!"
return(0)
}else if(!keggid %in% macthed_cmpd){
current.msg <<- "The selected compound is not supported by the selected GEM database!"
return(0)
}
if(metType=="metabolite"){
qvec = met.map$Query[which(met.map$kegg==keggid)]
}else{
qvecidx = which(current.proc$keggNet$Match==keggid)
adduct = current.proc$keggNet$adduct[qvecidx]
if(length(interaction(adduct,primary_ions))>0){
qvec = current.proc$keggNet$Query[which(current.proc$keggNet$Match==keggid)][which(adduct %in% primary_ions)]
}else{
qvec = current.proc$keggNet$Query[which(current.proc$keggNet$Match==keggid)]
}
print(paste0(length(qvec)," peaks related! The plots take longer time."))
}
mbSetObj$analSet$current_qvec <- qvec
message('Loading the model database..')
taxalvl<-tolower(taxalvl)
tax_map <- mic.map[which(!is.na(mic.map[,predDB])),]
m2m_ls <- qs::qread(paste0(lib.path.mmp,predDB,".qs"))[[taxalvl]]
names(m2m_ls)[1] <- "taxa"
m2m_ls <- m2m_ls[which(m2m_ls$potential>=potentialThresh),]
m2m_ls <- m2m_ls[which(m2m_ls$taxa %in% tax_map[,predDB]),]
m2m_ls$taxa <- tax_map$Query[match(m2m_ls$taxa,tax_map[,predDB])]
if(metType=="metabolite"){
m2m_ls <- m2m_ls[which(m2m_ls$metID %in% met.map[which(met.map$kegg==keggid),paste0(predDB,"_id")]),]
m2m_ls$met <- met.map$Query[match(m2m_ls$metID,met.map[,paste0(predDB,"_id")])]
predres <- data.frame(mic = m2m_ls$taxa,met=m2m_ls$met,potentialSore=m2m_ls$potential,stringsAsFactors = F)
}
library(dplyr)
if(nrow(predres)>0 & length(unique(predres$met))==1){
predres <- predres[order(predres$potentialSore,decreasing = T),]
predres <- predres[1:min(topNum,nrow(predres)),]
colnm = 1
wb=6
hb = max(0.25*nrow(predres),2.5)
wc=7
hc = 7
}else if(nrow(predres)>0 & length(unique(predres$met))>1){
predres <- predres[order(predres$potentialSore,decreasing = T),] %>%
group_by(met) %>%
mutate(idx=1:length(mic)) %>%
filter(idx<(min(topNum,nrow(corr.mat))+1))
colnm = 2
wb= 8
hb = min(0.25*nrow(predres)/2,60)
wc= 12
hc = 3*length(qvec)
}else{
current.msg<<-"No prediction is detected using the selected parameters! Please adjust the parameters!"
return(0)
}
ylim0 = min(min(predres$potentialSore)-0.1,-0.1)
ylim1 = max(predres$potentialSore)+0.1
require("Cairo");
library(ggplot2);
library(viridis);
library(geomtextpath)
barplot <- vector("list",length=length(qvec))
circleplot <- vector("list",length=length(qvec))
for(pl in 1:length(qvec)){
plot.df <- predres %>% filter(met==qvec[pl])
plot.df <- plot.df[order(abs(plot.df$potentialSore)),]
plot.df$mic <- factor(plot.df$mic,levels = unique(plot.df$mic))
plot.df$met <- factor(plot.df$met,levels = unique(plot.df$met))
barplot[[pl]] <- ggplot(plot.df, aes(x=mic, y=potentialSore,fill= potentialSore)) +
scale_fill_viridis_c(option = "plasma",alpha = 0.8)+
geom_bar(stat = "identity") + xlab("")+ ylab("Potential Sore")+ theme_minimal()+
coord_flip() + ggtitle(qvec[pl])+labs(fill = "Potential Sore")
if(nrow(plot.df)>2){
angle <- 90 - 360 * (1:nrow(plot.df)-0.5) /nrow(plot.df)
plot.df$hjust<-ifelse( angle < -90, 1, 0)
plot.df$angle<-ifelse(angle < -90, angle+180, angle)
plot.df$yh <- 0.05
lidx <- which(nchar(as.character(plot.df$mic))>15)
plot.df$yh <- max(plot.df$potentialSore)-nchar(as.character(plot.df$mic))/90
sidx <- which((plot.df$yh-(plot.df$potentialSore+0.05))>0)
plot.df$yh[sidx] <- plot.df$potentialSore[sidx]+0.05
plot.df$yh[lidx] <- max(plot.df$potentialSore)-0.2-nchar(as.character(plot.df$mic[lidx]))/100
circleplot[[pl]] <- ggplot(plot.df,aes(x=mic, y=potentialSore,fill= potentialSore))+
geom_bar(stat="identity", color="black")+
ylim(ylim0,ylim1) + xlab("")+ ylab("Potential Sore")+
theme_minimal() +
geom_text(data=plot.df, aes(x=mic, y=yh, label=mic, hjust=hjust), color="black", fontface="bold",alpha=0.8, size=3, angle= plot.df$angle, inherit.aes = FALSE )+
coord_polar(start = 0) +scale_fill_viridis_c(option = "plasma",alpha = 0.7)+labs(fill = "Potential Sore")+
theme(
axis.text.x = element_blank()
) + ggtitle(qvec[pl])
}else{
current.msg<<-"Circle plot is not supported when associated taxa are less than 3!"
}
}
library(grid)
library(gridExtra)
library(gridGraphics);
library(cowplot)
imgNm.bar <- paste("barplot_",imgNm, ".png",sep="");
imgNm.circle <- paste("circleplot_",imgNm, ".png",sep="");
Cairo(file=imgNm.bar, width=wb, height=hb, type="png", bg="white", unit="in", dpi=100);
grid.arrange(grobs =barplot, ncol=colnm)
dev.off();
if(exists("circleplot")){
Cairo(file=imgNm.circle, width=wc, height=hc, type="png", bg="white", unit="in", dpi=100);
grid.arrange(grobs =circleplot, ncol=colnm)
dev.off();
}
print("prediction plot done")
mbSetObj$analSet$current_predres <- predres
# write json
mic = predres$mic; if(length(mic) ==1) { mic <- matrix(mic) };
met = predres$met; if(length(met) ==1) { met <- matrix(met) };
potentialSore = predres$potentialSore; if(length(potentialSore) ==1) { potentialSore <- matrix(potentialSore) };
json.res <- list(mic = mic,
met = met,
potentialSore = potentialSore);
json.mat <- RJSONIO::toJSON(json.res);
json.nm <- paste(imgNm, ".json", sep="");
sink(json.nm)
cat(json.mat);
sink();
return(.set.mbSetObj(mbSetObj))
}
UpdatePredictionPlot <- function(mbSetObj,imgNm,topNum=10){
mbSetObj <- .get.mbSetObj(mbSetObj);
current.msg<<-"null"
topNum = as.numeric(topNum)
qvec <- mbSetObj$analSet$current_qvec
predres <- mbSetObj$analSet$current_predres
library(dplyr)
if(nrow(predres)>0 & length(unique(predres$met))==1){
predres <- predres[order(predres$potentialSore,decreasing = T),]
predres <- predres[1:min(topNum,nrow(predres)),]
colnm = 1
wb=6
hb = max(0.25*nrow(predres),2.5)
wc=7
hc = 7
}else if(nrow(predres)>0 & length(unique(predres$met))>1){
predres <- predres[order(predres$potentialSore,decreasing = T),] %>%
group_by(met) %>%
mutate(idx=1:length(mic)) %>%
filter(idx<(min(topNum,nrow(corr.mat))+1))
colnm = 2
wb= 8
hb = min(0.25*nrow(predres)/2,60)
wc= 12
hc = 3*length(qvec)
}else{
current.msg<<-"No prediction is detected using the selected parameters! Please adjust the parameters!"
return(0)
}
ylim0 = min(min(predres$potentialSore)-0.1,-0.1)
ylim1 = max(predres$potentialSore)+0.1
require("Cairo");
library(ggplot2);
library(viridis);
library(geomtextpath)
barplot <- vector("list",length=length(qvec))
circleplot <- vector("list",length=length(qvec))
for(pl in 1:length(qvec)){
plot.df <- predres %>% filter(met==qvec[pl])
plot.df <- plot.df[order(abs(plot.df$potentialSore)),]
plot.df$mic <- factor(plot.df$mic,levels = unique(plot.df$mic))
plot.df$met <- factor(plot.df$met,levels = unique(plot.df$met))
barplot[[pl]] <- ggplot(plot.df, aes(x=mic, y=potentialSore,fill= potentialSore)) +
scale_fill_viridis_c(option = "plasma",alpha = 0.8)+
geom_bar(stat = "identity") + xlab("")+ ylab("Potential Sore")+ theme_minimal()+
coord_flip() + ggtitle(qvec[pl])+labs(fill = "Potential Sore")
if(nrow(plot.df)>2){
angle <- 90 - 360 * (1:nrow(plot.df)-0.5) /nrow(plot.df)
plot.df$hjust<-ifelse( angle < -90, 1, 0)
plot.df$angle<-ifelse(angle < -90, angle+180, angle)
plot.df$yh <- 0.05
lidx <- which(nchar(as.character(plot.df$mic))>15)
plot.df$yh <- max(plot.df$potentialSore)-nchar(as.character(plot.df$mic))/90
sidx <- which((plot.df$yh-(plot.df$potentialSore+0.05))>0)
plot.df$yh[sidx] <- plot.df$potentialSore[sidx]+0.05
plot.df$yh[lidx] <- max(plot.df$potentialSore)-0.2-nchar(as.character(plot.df$mic[lidx]))/100
circleplot[[pl]] <- ggplot(plot.df,aes(x=mic, y=potentialSore,fill= potentialSore))+
geom_bar(stat="identity", color="black")+
ylim(ylim0,ylim1) + xlab("")+ ylab("Potential Sore")+
theme_minimal() +
geom_text(data=plot.df, aes(x=mic, y=yh, label=mic, hjust=hjust), color="black", fontface="bold",alpha=0.8, size=3, angle= plot.df$angle, inherit.aes = FALSE )+
coord_polar(start = 0) +scale_fill_viridis_c(option = "plasma",alpha = 0.7)+labs(fill = "Potential Sore")+
theme(
axis.text.x = element_blank()
) + ggtitle(qvec[pl])
}else{
current.msg<<-"Circle plot is not supported when associated taxa are less than 3!"
}
}
library(grid)
library(gridExtra)
library(gridGraphics);
library(cowplot)
imgNm.bar <- paste("barplot_",imgNm, ".png",sep="");
imgNm.circle <- paste("circleplot_",imgNm, ".png",sep="");
Cairo(file=imgNm.bar, width=wb, height=hb, type="png", bg="white", unit="in", dpi=100);
grid.arrange(grobs =barplot, ncol=colnm)
dev.off();
if(exists("circleplot")){
Cairo(file=imgNm.circle, width=wc, height=hc, type="png", bg="white", unit="in", dpi=100);
grid.arrange(grobs =circleplot, ncol=colnm)
dev.off();
}
print("prediction plot done")
mbSetObj$analSet$current_predres <- predres
# write json
mic = predres$mic; if(length(mic) ==1) { mic <- matrix(mic) };
met = predres$met; if(length(met) ==1) { met <- matrix(met) };
potentialSore = predres$potentialSore; if(length(potentialSore) ==1) { potentialSore <- matrix(potentialSore) };
json.res <- list(mic = mic,
met = met,
potentialSore = potentialSore);
json.mat <- RJSONIO::toJSON(json.res);
json.nm <- paste(imgNm, ".json", sep="");
sink(json.nm)
cat(json.mat);
sink();
return(.set.mbSetObj(mbSetObj))
}
###########################################################
####################Plots################################
###########################################################
###########################################################
PlotCorrHistogram <- function(imgNm, dpi=72, format="png"){
dpi<-as.numeric(dpi)
imgNm <- paste(imgNm, "dpi", dpi, ".", format, sep="");
library(ggplot2)
reductionSet <- .get.rdt.set();
fig.list <- list();
for( i in 1:2){
if(i == 1){
cors <- reductionSet$corr.mat.inter
titleText <- "Between-omics correlation"
}else{
cors <- reductionSet$corr.mat.intra
titleText <- "Intra-omics correlation"
}
cor.data <- cors[upper.tri(cors, diag = FALSE)] # we're only interested in one of the off-diagonals, otherwise there'd be duplicates
cor.data <- as.data.frame(cor.data) # that's how ggplot likes it
summary(cor.data)
colnames(cor.data) <- "coefficient"
coefficient.p <- function(r, n) {
pofr <- ((1-r^2)^((n-4)/2))/beta(a = 1/2, b = (n-2)/2)
return(pofr)
}
fig.list[[i]] <- ggplot(cors, aes(x=correlation)) + geom_histogram() +
xlim(-1,1) +
theme_bw()
}
library(Cairo)
library(ggpubr)
Cairo(file=imgNm, width=10, height=8, unit="in", type="png", bg="white", dpi=dpi);
p1 <- ggarrange(plotlist=fig.list, ncol = 1, labels=c("Between-omics correlation", "Intra-omics correlation"))
print(p1)
dev.off();
}
PlotDiagnostic <- function(imgName, dpi=72, format="png",alg){
mbSetObj <- .get.mbSetObj(mbSetObj);
dpi <- as.numeric(dpi);
imgNm <- paste(imgName, ".", format, sep="");
require("Cairo");
if(alg %in% c("snf", "spectrum") ){
h=8
fig.list <- list()
library(ggpubr);
}else{
h=8
}
Cairo(file=imgNm, width=10, height=h, type="png",unit="in", bg="white", dpi=dpi);
if(alg == "procrustes"){
procrustes.res <- qs::qread("procrustes.res.qs")
res <- procrustes.res$dim.res[[length(procrustes.res$dim.res)]]
error = residuals(res[[1]])
require("ggrepel")
error.df = data.frame(Samples=names(error), Procrustes_residual=unname(error))
rankres <- rank(-abs(error), ties.method="random");
inx.x <- which(rankres < 6);
inx.y <- error[inx.x];
nms <- names(error)[inx.x];
subsetdf <- error.df[which(error.df$Samples %in% nms),]
p = ggplot(error.df, aes(x = Samples, y = Procrustes_residual)) + geom_col() + geom_hline(yintercept = summary(error)[c(2,3,5)])+
theme(axis.text.x=element_blank(),
axis.ticks.x=element_blank()) +
geom_text_repel(
data = subsetdf,
aes(label = Samples),
size = 5,
box.padding = unit(0.35, "lines"),
point.padding = unit(0.3, "lines")
) +
theme_bw()
print(p)
mbSetObj$imgSet$procrustes$diagnostic <- imgNm
}else if(alg == "rcca"){
require(mixOmics)
plot(reductionSet$dim.res, scree.type = "barplot")
}else if(alg == "spls"){
#save.image("diag.RData");
require(mixOmics)
tune.spls <- mixOmics:::perf(reductionSet$dim.res, validation = "Mfold", folds = 10, progressBar = FALSE, nrepeat = 1)
if("Q2.total" %in% names(tune.spls)){
plot(tune.spls$Q2.total)
}else{
r = data.frame(Comp=seq.int(nrow(tune.spls$measures$Q2.total$values)), Q2.total=tune.spls$measures$Q2.total$values$value)
rownames(r) = paste("comp", seq.int(nrow(r)))
plot(r)
}
abline(h = 0.0975)
}else if(alg == "diablo"){
require(mixOmics)
diablo.res <- qs::qread("diablo.res.qs")
res <- diablo.res$dim.res[[length(diablo.res$dim.res)]]
set.seed(123) # for reproducibility, only when the `cpus' argument is not used
# this code takes a couple of min to run
perf.res <- mixOmics:::perf(res, validation = 'Mfold', folds = 10, nrepeat = 1, dist="max.dist")
diablo.comp <<- median(perf.res$choice.ncomp$WeightedVote)
plot(perf.res)
mbSetObj$imgSet$diablo$diagnostic <- imgNm
}else if(alg == "mcia"){
res = reductionSet$dim.res
p1<-plot.mcoin(type=3, res, phenovec=reductionSet$cls, sample.lab=FALSE, df.color=length(names(mdata.all)))
print(p1)
}else if(alg == "mbpca"){
res = reductionSet$dim.res
plotEig(3, res@eig)
#mogsa::plot(res, value="eig", type=2, xlab="Component", ylab="Eigenvalue")
}else if(alg == "spectrum"){
if(!is.null(reductionSet$clustRes$eigenvector_analysis)){
plotEig(length(unique(reductionSet$clustVec)), reductionSet$clustRes$eigenvector_analysis[,2]);
}else{
plotEig(length(unique(reductionSet$clustVec)), reductionSet$clustRes$eigensystem$values[1:10]);
}
}else if(alg == "perturbation"){
res <- reductionSet$clustRes
library(ggpubr)
library(ggplot2)
xlabel="Number of clusters"
ylabel="AUC"
auc1 <- res$dataTypeResult[[1]]$Discrepancy$AUC[-1]
auc.df1 <- data.frame(K=seq.int(length(auc1))+1, evals=auc1)
auc2 <- res$dataTypeResult[[2]]$Discrepancy$AUC[-1]
auc.df2 <- data.frame(K=seq.int(length(auc2))+1, evals=auc2)
auc.df1$evals2 = auc.df2$evals
colnames(auc.df1) = c("K", sel.nms[[1]], sel.nms[[2]])
library("tidyverse")
df <- auc.df1 %>%
select(K, sel.nms[[1]], sel.nms[[2]]) %>%
gather(key = "variable", value = "value", -K)
p1 <- ggplot(df, aes(x = K, y = value)) +
geom_point(aes(color = variable), size=2) +
geom_line(aes(color = variable)) +
geom_vline(xintercept = length(unique(res$cluster1)),linetype="dashed")+ xlab(xlabel) +
ylab(ylabel) +
theme_bw()
print(p1)
}else if(alg == "snf"){
plotEig(length(unique(reductionSet$clustVec)), reductionSet$clustRes[[5]])
}
dev.off();
.set.mbSetObj(mbSetObj)
return(1);
}
PlotDiagnosticPca <- function(imgNm, dpi=72, format="png",type="diablo"){
mbSetObj <- .get.mbSetObj(mbSetObj);
require("Cairo");
library(ggplot2);
dpi<-as.numeric(dpi)
imgNm<- paste(imgNm, ".", format, sep="");
#print(imgNm)
fig.list <- list()
if(type == "diablo"){
library(grid)
library(gridExtra)
library(gridGraphics);
library(cowplot)
diablo.res <- qs::qread("diablo.res.qs")
dim.res <- diablo.res$dim.res[[length(diablo.res$dim.res)]]
fig.list[[1]] <- as_grob(function(){
plotDiablo(dim.res, ncomp = 1)
})
fig.list[[2]] <- as_grob(function(){
plotDiablo(dim.res, ncomp = 2)
})
fig.list[[3]] <- as_grob(function(){
plotDiablo(dim.res, ncomp = 3)
})
h<-8*round(length(fig.list))
Cairo(file=imgNm, width=10, height=h, type=format, bg="white", unit="in", dpi=dpi);
grid.arrange(grobs =fig.list, nrow=length(fig.list))
dev.off();
mbSetObj$imgSet$diablo$pca <- imgNm;
}else if(type == "rcca" || type == "spls"){
res = reductionSet$dim.res
Factor <- as.factor(reductionSet$meta$newcolumn)
library(ggplot2)
library("ggpubr")
scrs <- list()
for(i in 1:length(res$variates)){
pca.rest <- as.data.frame(res$variates[[i]][,c(1:3)])
colnames(pca.rest) <- c("PC1", "PC2", "PC3")
pca.rest$Conditions <- Factor
pca.rest$names <- rownames(res$variates[[i]])
xlim <- GetExtendRange(pca.rest[,1]);
ylim <- GetExtendRange(pca.rest[,2]);
if("prop_expl_var" %in% names(res)){
var.vec <- res$prop_expl_var[[i]]
}else{
var.vec <- res$explained_variance[[i]]
}
# proe <- signif(as.numeric(var.vec), 4)
proe <- signif(var.vec/sum(var.vec), 4)
xlabel <- paste0("Variate 1 (", proe[1]*100,"%)")
ylabel <- paste0("Variate 2 (", proe[2]*100,"%)")
if(i == 1){
fig <- ggplot(pca.rest, aes(x=PC1, y=PC2, color=Conditions)) +
geom_point(size=3, alpha=0.5) +
xlim(xlim) +
ylim(ylim) +
xlab(xlabel) +
ylab(ylabel) +
theme_bw() +
theme(legend.position = "none") +
ggtitle(reductionSet$omicstype[[1]])
fig.list[[i]] <- fig
} else {
fig <- ggplot(pca.rest, aes(x=PC1, y=PC2, color=Conditions)) +
geom_point(size=3, alpha=0.5) +
xlim(xlim) +
ylim(ylim) +
xlab(xlabel) +
ylab(ylabel) +
theme(axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank()) +
ggtitle(reductionSet$omicstype[[2]]) +
theme_bw()
fig.list[[i]] <- fig
}
}
h<-8
Cairo(file=imgNm, width=14, height=h, type=format, bg="white", unit="in", dpi=dpi);
p1 <- ggarrange(plotlist=fig.list, ncol = 2, nrow = 1, widths=c(7,8));
print(p1)
dev.off();
}else if(type == "mcia"){
library(omicade4)
mcoin <- reductionSet$dim.res
h<-8
Cairo(file=imgNm, width=10, height=h, type=format, bg="white", unit="in", dpi=dpi);
plot.mcoin(type=1, mcoin, phenovec=reductionSet$meta$newcolumn, sample.lab=FALSE, df.color=length(names(mdata.all)))
dev.off();
} else if(type == "mbpca"){
res = reductionSet$dim.res
scrs <- moaScore(res);
scr <- as.data.frame(scrs[,c(1:3)])
Factor <- as.factor(reductionSet$meta$newcolumn)
pca.rest <- scr
pca.rest$Conditions <- Factor
pca.rest$names <- rownames(scr)
xlim <- GetExtendRange(pca.rest$PC1);
ylim <- GetExtendRange(pca.rest$PC2);
xlabel <- paste0("PC1")
ylabel <- paste0("PC2")
library(ggplot2)
pcafig <- ggplot(pca.rest, aes(x=PC1, y=PC2, color=Conditions)) +
geom_point(size=3, alpha=0.5) + xlim(xlim) + ylim(ylim)+ xlab(xlabel) + ylab(ylabel) +
theme_bw()
Cairo(file=imgNm, width=10, height = 8, type=format, bg="white", unit="in", dpi=dpi);
print(pcafig)
dev.off();
} else if(type == "procrustes"){
library(ggplot2)
library(grid)
procrustes.res <- qs::qread("procrustes.res.qs")
pro.test <- procrustes.res$dim.res[[length(procrustes.res$dim.res)]][[1]]
pct <- pro.test$svd$d
ctest <- data.frame(rda1=pro.test$Yrot[,1], rda2=pro.test$Yrot[,2], xrda1=pro.test$X[,1],
xrda2=pro.test$X[,2],Type=procrustes.res$newmeta[,"omics"], Conditions = procrustes.res$newmeta[,1])
xlabel <- paste0("Component 1 ", "(" , signif(pct[1],4), ")")
ylabel <- paste0("Component 2 ", "(" , signif(pct[2],4), ")")
p <- ggplot(ctest) +
geom_point(aes(x=rda1, y=rda2, colour=Conditions, shape=Type)) +
geom_point(aes(x=xrda1, y=xrda2, colour=Conditions, shape=Type)) +
geom_segment(aes(x=rda1,y=rda2,xend=xrda1,yend=xrda2,colour=Conditions), alpha=0.4,arrow=arrow(length=unit(0.1,"cm"))) +
xlab(xlabel) + ylab(ylabel) +
theme_bw()
Cairo(file=imgNm, width=10, height=10, type=format, bg="white", unit="in", dpi=dpi);
print(p)
dev.off();
mbSetObj$imgSet$procrustes$pca <- imgNm
}
.set.mbSetObj(mbSetObj)
}
PlotDiagnosticLoading <- function(imgNm, dpi=72, format="png",type="diablo"){
mbSetObj <- .get.mbSetObj(mbSetObj);
require("Cairo");
library(ggplot2)
dpi <- as.numeric(dpi);
imgNm <- paste(imgNm, ".", format, sep="");
mbSetObj$imgSet$diablo$loading <- imgNm
if(type == "diablo"){
library(grid)
library(gridExtra)
library(cowplot)
fig.list <- list()
diablo.res <- qs::qread("diablo.res.qs")
dim.res <- diablo.res$dim.res[[length(diablo.res$dim.res)]]
fig.list[[1]] <- as_grob(function(){
plotLoadings(dim.res, ndisplay=10, comp = 1, contrib="max", method="median", size.name=1.1, legend=T)
})
fig.list[[2]] <- as_grob(function(){
plotLoadings(dim.res, ndisplay=10, comp = 2, contrib="max", method="median", size.name=1.1, legend=T)
})
fig.list[[3]] <-as_grob(function(){
plotLoadings(dim.res, ndisplay=10, comp = 3, contrib="max", method="median", size.name=1.1, legend=T)
})
h <- 8*round(length(fig.list))
Cairo(file=imgNm, width=13, height=h, type=format, bg="white", unit="in", dpi=dpi);
grid.arrange(grobs =fig.list, nrow=length(fig.list))
dev.off();
}else if(type == "rcca" || type == "spls"){
Cairo(file=imgNm, width=12, height=10, type=format, bg="white", unit="in", dpi=dpi);
plotVar(reductionSet$dim.res, comp = 1:2, cutoff = 0.5, var.names = c(TRUE, TRUE),
cex = c(4, 4), title = 'rCCA comp 1 - 2')
dev.off();
}else if(type == "mcia"){
library(omicade4)
mcoin <- reductionSet$dim.res
Cairo(file=imgNm, width=10, height=10, type=format, bg="white", unit="in", dpi=dpi);
plot.mcoin(type=2, mcoin, phenovec=reductionSet$cls, sample.lab=FALSE, df.color=1:length(names(mdata.all)))
dev.off();
}else if(type == "mbpca"){
library(ggplot2)
moa <- reductionSet$dim.res
loading <- moa@loading[,c(1:3)]
loading <- as.data.frame(loading)
colnames(loading) = c("PC1", "PC2", "PC3")
d.types <- rownames(moa@RV)
loading$Type <- gsub(".*_", "", rownames(loading))
for(i in 1:length(d.types)){
rownames(loading) = gsub(paste0("_", d.types[i]), "",rownames(loading))
}
loading$Type <- as.factor(loading$Type)
xlim <- GetExtendRange(loading[,1]);
ylim <- GetExtendRange(loading[,2]);
xlabel <- paste0("PC1")
ylabel <- paste0("PC2")
pcafig <- ggplot(loading, aes(x=PC1, y=PC2, color=Type)) +
geom_point(size=3, alpha=0.5) +
xlim(xlim) + ylim(ylim) + xlab(xlabel) + ylab(ylabel) +
theme_bw()
Cairo(file=imgNm, width=10, height=10, type=format, bg="white", unit="in", dpi=dpi);
print(pcafig)
dev.off();
}
.set.mbSetObj(mbSetObj)
}
GetDiagnosticSummary<- function(type){
if(type %in% c("perturbation", "spectrum", "snf")){
reductionSet <- .get.rdt.set();
clustNum <- length(unique(reductionSet$clustVec))
return(c(clustNum, signif(reductionSet$clustNmi)))
}else if(type == "procrustes"){
procrustes.res <- qs::qread("procrustes.res.qs")
res <-procrustes.res$dim.res[[length(procrustes.res$dim.res)]][[2]];
return(c(signif(res$ss,4), signif(res$scale,4)));
}else{
return(c("","") )
}
}
gg_color_hue <- function(grp.num, type="green", filenm=NULL) {
grp.num <- as.numeric(grp.num)
if(type == "green"){
pal18 <- c("#e6194B", "#3cb44b", "#4363d8", "#ffff00", "#f032e6", "#ffe119", "#911eb4", "#f58231", "#bfef45", "#fabebe", "#469990", "#e6beff", "#9A6324", "#800000", "#aaffc3", "#808000", "#ffd8b1", "#000075");
}else{
pal18 <- c( "#4363d8","#e6194B" , "#3cb44b", "#f032e6", "#ffe119", "#e6194B", "#f58231", "#bfef45", "#fabebe", "#469990", "#e6beff", "#9A6324", "#800000", "#aaffc3", "#808000", "#ffd8b1", "#42d4f4","#000075", "#ff4500");
}
if(grp.num <= 18){ # update color and respect default
colArr <- pal18[1:grp.num];
}else{
colArr <- colorRampPalette(pal18)(grp.num);
}
if(is.null(filenm)){
return(colArr);
}else{
library(RJSONIO)
sink(filenm);
cat(toJSON(colArr));
sink();
return(filenm);
}
}
###########################################################
####################Helpper functions######################
###########################################################
###########################################################
SetMMPDataType <- function(inputType,micDataType,metDataType,metIDType){
inputType<<-inputType
micDataType<<-micDataType
metDataType<<-metDataType
metIDType<<-metIDType
return(1)
}
InitCurrentProc <-function(){
current.proc<- vector("list",length=2)
names(current.proc)<-c("mic","met")
moduleType <<-"mmp"
if(metIDType=="kegg"){
metInfo <- qs::qread(paste0(lib.path.mmp,"general_kegg2name.qs"));
mbSetObj <- .get.mbSetObj(mbSetObj);
keggids <- rownames(mbSetObj$dataSet$metabolomics$data.orig)
nms<- metInfo$Name[match(keggids, metInfo$ID)]
current.proc$id2nm <- setNames(nms,keggids)
}else{
mbSetObj <- .get.mbSetObj(mbSetObj);
nms <- rownames(mbSetObj$dataSet$metabolomics$data.orig)
current.proc$id2nm <- setNames(nms,nms)
}
mbSetObj$inputType <- inputType;
mbSetObj$micDataType <-micDataType;
mbSetObj$metDataType <-metDataType;
mbSetObj$metIDType <-metIDType;
current.proc<<-current.proc
.set.mbSetObj(mbSetObj)
return(1)
}
SetPeakParameter<-function(rtOpt,mode,instrumentOpt){
if(rtOpt=="no"){
current.proc$mumRT <- FALSE
current.proc$mumRT.type <- NA;
}else{
current.proc$mumRT <- TRUE
current.proc$mumRT.type <- rtOpt;
}
current.proc$mode <- mode;
current.proc$adducts <- NA;
current.proc$peakFormat <- "mpt";
current.proc$instrument <- as.numeric(instrumentOpt);
current.proc$rt_frac <- 0.02
current.proc$primary_ion <- "yes"
current.proc <<- current.proc
return(1);
}
RemoveData <- function(dataName){
mbSetObj <- .get.mbSetObj(NA);
if(!is.null(mbSetObj$dataSets[[dataName]])){
mbSetObj$dataSets[[dataName]] <- NULL;
unlink(paste0(dataName, "_data"), recursive = TRUE)
}
if(mbSetObj$module.type=="meta"){
if(!is.null(mdata.all[[dataName]])){
mdata.all[[dataName]] <<- NULL;
}
}
return(.set.mbSetObj(mbSetObj));
}
GetMicMetDataDims <- function(dataType,dataName){
if(is.null(current.proc$mic)){
data<-read.csv(dataName)
dm <- dim(data);
dm[2] <- dm[2]-1
naNum <- sum(is.na(data));
}else{
if(dataType=="mic"){
dm <- current.proc$mic$data.proc
naNum <- sum(is.na(current.proc$mic$data.proc));
}else{
dm <- current.proc$met$data.proc
naNum <- sum(is.na(current.proc$met$data.proc));
}
}
return(c(dm, naNum));
}
GetMetaTaxaInfoMMP <- function(mbSetObj,istaxalbl){
mbSetObj <- .get.mbSetObj(mbSetObj);
proc.phyobj <- mbSetObj$dataSet$proc.phyobj;
#check that each rank has >2 groups
taxa.tbl <- as(tax_table(proc.phyobj), "matrix")
if(ncol(taxa.tbl)==1){
taxa.nms <- "Phylum"
return(taxa.nms)
}
#drop taxa with only 1 level (i.e. Viruses at Phylum)
gd.inx <- apply(taxa.tbl, 2, function(x) length(unique(x))!=1);
taxa.tbl.update <- taxa.tbl[,gd.inx, drop=FALSE];
if(ncol(taxa.tbl.update) == 0){
current.msg <<- c("All taxa info for the remaining features are the same!")
return("OTU")
}
taxa.nms <- rank_names(taxa.tbl.update);
return(c(taxa.nms[!is.na(taxa.nms)],"OTU"));
}
CleanMMP<- function(){
rm(list = ls())
}
ReScale <- function(x,first,last){(last-first)/(max(x)-min(x))*(x-min(x))+first}
rescale2NewRange <- function(qvec, a, b){ReScale
qvec = replace(qvec, qvec == 0, 1)
q.min <- min(qvec);
q.max <- max(qvec);
if(length(qvec) < 50){
a <- a*2;
}
if(q.max == q.min){
new.vec <- rep(8, length(qvec));
}else{
coef.a <- (b-a)/(q.max-q.min);
const.b <- b - coef.a*q.max;
new.vec <- coef.a*qvec + const.b;
}
return(new.vec);
}
GetNetsName <- function(){
rownames(net.stats);
}
GetNetsEdgeNum <- function(){
as.numeric(net.stats$Edge);
}
GetNetsNodeNum <- function(){
as.character(net.stats$Node);
}
GetNetsQueryNum <- function(){
as.numeric(net.stats$Query);
}
GetMicNm <- function(){
as.character(corrRes$source);
}
GetMetNm <- function(){
as.character(corrRes$target);
}
GetCorrIdx <- function(){
as.numeric(corrRes$corrindex);
}
GetCorrPval <- function(){
as.numeric(corrRes$corrpval);
}
GetPredPval <- function(){
as.numeric(corrRes$predpval);
}
GetNetsNameString <- function(){
nms <- paste(rownames(net.stats), collapse="||");
return(nms);
}
GetCorrRes <- function(mbSetObj){
mbSetObj <- .get.mbSetObj(mbSetObj);
corr.pval <- mbSetObj$analSet$corr.pval
idxkeep <- rownames(corr.pval) %in% mbSetObj$analSet$m2mNet_graphlist$nodes$name
corr.pval <- reshape2::melt(corr.pval[idxkeep,idxkeep])
corrRes <- mbSetObj$analSet$m2mNet_graphlist$links
corrRes <- dplyr::left_join(corrRes,corr.pval,by=c("source"="Var1","target"="Var2"))
corrRes <- corrRes[,c(1,2,3,7,4)]
names(corrRes)[3:5] <- c("corrindex","corrpval","predpval")
corrRes<<-corrRes
return(1)
}
GetCurrentScatter <- function(){
return(jsonNms_scatter)
}
convert2JsonList <- function(my.list){
lapply(my.list, function(x){
if(length(x) == 1){
matrix(x);
}else{
x;
}
});
}
|
c6ea74dc04d4ef19e82aadbfac3072d9ce936988 | 43541a956f5cabd86404a120bfb63d6ae08aad0c | /man/IlluminaHumanMethylation27kmanifest.Rd | a978597bc70809a3891cd8670f13d7bdd0bc3545 | [] | no_license | hansenlab/IlluminaHumanMethylation27kmanifest | 1a86ffa73f8fbf5320dbf68dad5494defdcbf1b5 | 53867f42d897c91c96100ec1244cabb4a67895c9 | refs/heads/master | 2023-06-03T01:22:32.463021 | 2016-06-15T18:12:40 | 2016-06-15T18:12:40 | 378,113,969 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 686 | rd | IlluminaHumanMethylation27kmanifest.Rd | \name{IlluminaHumanMethylation27kmanifest}
\alias{IlluminaHumanMethylation27kmanifest}
\docType{data}
\title{
Manifest object for the IlluminaHumanMethylation27k array
}
\description{
This object contains the array design for Illumina's Human Methylation
27k microarray.
}
\usage{data(IlluminaHumanMethylation27kmanifest)}
\details{
This package is based on the file
\code{HumanMethylation27_270596_v1-2.csv}.
}
\format{
An object of class \code{IlluminaMethylationManifest}
}
\seealso{
The description of the class in the minfi package,
\code{\link[minfi]{IlluminaMethylationManifest-class}}.
}
\examples{
data(IlluminaHumanMethylation27kmanifest)
}
\keyword{datasets}
|
4767bc2b0e709804518af52211225e46c9ced535 | c1d359cdf0281885744cdcd85d41a21e91218b43 | /man/KYCG_annoProbes.Rd | 18fb357d6a2717b42f61ce97656b77dc06049670 | [
"MIT"
] | permissive | zwdzwd/sesame | 20b2d29578661487db53432c8991d3c4478aa2c1 | 62fe6ef99a02e7f94b121fb601c3f368b8a4c1a8 | refs/heads/master | 2023-08-08T01:45:02.112492 | 2023-07-26T13:23:03 | 2023-07-26T13:23:03 | 122,086,019 | 37 | 26 | MIT | 2023-01-05T16:02:38 | 2018-02-19T16:00:34 | R | UTF-8 | R | false | true | 1,075 | rd | KYCG_annoProbes.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/KYCG.R
\name{KYCG_annoProbes}
\alias{KYCG_annoProbes}
\title{Annotate Probe IDs using KYCG databases}
\usage{
KYCG_annoProbes(
query,
databases,
db_names = NULL,
platform = NULL,
sep = ",",
indicator = FALSE,
silent = FALSE
)
}
\arguments{
\item{query}{probe IDs in a character vector}
\item{databases}{character or actual database (i.e. list of probe IDs)}
\item{db_names}{specific database (default to all databases)}
\item{platform}{EPIC, MM285 etc. will infer from probe IDs if not given}
\item{sep}{delimiter used in paste}
\item{indicator}{return the indicator matrix instead of a concatenated
annotation (in the case of have multiple annotations)}
\item{silent}{suppress message}
}
\value{
named annotation vector, or indicator matrix
}
\description{
see sesameData_annoProbes if you'd like to annotate by genomic coordinates
(in GRanges)
}
\examples{
query <- names(sesameData_getManifestGRanges("MM285"))
anno <- KYCG_annoProbes(query, "designGroup", silent = TRUE)
}
|
bf7bca41a1c06515f06c1bdf0ef7bca2e23a8a61 | 1535e5168530e093771ac053d276c2f58ee24008 | /R/useful-items.R | ada8acf0eb556f12c7fada93e87c9aa89895a1e5 | [] | no_license | csiesel/CaseyGentelellaShiny | 656f542ce48963faa143a4c9f51eaf6ce5913673 | ea83c5d06afb88cb634a7dbbeda4f72701aa0f69 | refs/heads/master | 2021-03-14T01:10:35.098888 | 2020-03-12T04:09:47 | 2020-03-12T04:09:47 | 246,726,429 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 24,808 | r | useful-items.R | # #' A wizard container
# #'
# #' @param ... slot for \link{wizardItem}
# #' @param orientation wizard orientation: "horizontal" or "verticle"
# #'
# #' @examples
# #' if (interactive()) {
# #' library(shiny)
# #' library(gentelellaShiny)
# #' shinyApp(
# #' ui = gentelellaPageCustom(
# #' gentelellaBody(
# #' wizard(
# #' wizardItem(1, description = "blabla"),
# #' wizardItem(2, description = "blabla")
# #' )
# #' )
# #' ),
# #' server = function(input, output, session) {}
# #' )
# #' }
# #'
# #' @export
# wizard <- function(..., orientation = "horizontal") {
#
# items <- list(...)
# len_items <- length(items)
#
# # add the proper number to each item
# items <- lapply(X = 1:len_items, FUN = function(i) {
# current_item <- items[[i]][["tag"]]
# if (i == 1) current_item$attribs$style <- "display: block;"
# htmltools::tagAppendAttributes(current_item, id = paste0("step-", i))
# })
#
# # create the corresponding menu
# itemsMenu <- lapply(X = 1:len_items, FUN = function(i) {
#
# current_item_desc <- items[["desc"]][[i]]
#
# shiny::tags$li(
# shiny::a(
# href = paste0("#step-", i),
# class = if (i == 1) "selected" else "disabled",
# isdone = "1", # always 1 by default
# rel = i,
# shiny::span(class = "step_no", i),
# shiny::span(
# class = "step_descr",
# paste("Step", i),
# shiny::br(),
# shiny::tags$small(current_item_desc)
# )
# )
# )
# })
#
# # main tag
# htmltools::withTags({
# shiny::div(
# id = if (orientation == "vertical") "wizard_verticle" else "wizard",
# class = paste0("form_wizard wizard_", orientation),
# shiny::tags$ul(
# class = if (orientation == "vertical") {
# "list-unstyled wizard_steps anchor"
# } else {
# "wizard_steps anchor"
# },
# # insert setp items
# itemsMenu
# ),
# shiny::div(
# class = "stepContainer",
# style = "height: 154px;",
# lapply(X = 1:len_items, function(i) { items[[i]] })
# )#,
# # action bar
# #shiny::div(
# # class = "actionBar",
# # shiny::div(
# # class = "msgBox",
# # shiny::div(class = "content"),
# # shiny::a(href = "#", class = "close", "X")
# # ),
# # shiny::div(class = "loader", "Loading"),
# # shiny::a(href = "#", class = "buttonNext btn btn-success", "Next"),
# # shiny::a(href = "#", class = "buttonPrevious btn btn-primary", "Previous")
# #)
# )
# })
# }
#
#
# #' A wizard item
# #'
# #' @param ... any UI element
# #' @param display Whether to diplay if or not. "none" by default.
# #' @param description Item description, if any.
# #'
# #' @export
# wizardItem <- function(..., display = "none", description = NULL) {
# list(
# tag = shiny::div(
# class = "content",
# style = paste0("display: ", display, ";"),
# ...
# ),
# desc = description
# )
# }
#' A social stats container
#'
#' @param ... slot for socialStatsItem
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
socialStats <- function(...) {
shiny::tags$ul(class = "list-inline count2", ...)
}
#' A social stats item to insert in a socialStats container
#'
#' @param value Item value
#' @param name Item name
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
socialStatsItem <- function(value = NULL, name = NULL) {
shiny::tags$li(
shiny::tags$h3(value),
shiny::tags$span(name)
)
}
#' A simple circular diagram item
#'
#' https://github.com/rendro/easy-pie-chart
#'
#' @param id Unique id.
#' @param value Item value
#' @param height Canvas height. 220 px by default.
#' @param width Canvas width. 220 px by default.
#' @param barColor Default: #ef1e25. The color of the curcular bar. You can either pass a valid css color string, or a function that takes the current percentage as a value and returns a valid css color string.
#' @param trackColor Default: #f2f2f2. The color of the track, or false to disable rendering.
#' @param scaleColor Default: #dfe0e0. The color of the scale lines, false to disable rendering.
#' @param scaleLength Default: 5. Length of the scale lines (reduces the radius of the chart).
#' @param lineCap Default: 'round'. Defines how the ending of the bar line looks like. Possible values are: butt, round and square.
#' @param lineWidth Default: 3. Width of the chart line in px.
#' @param rotate Default: 0. Rotation of the complete chart in degrees.
#'
#' @examples
#' if (interactive()) {
#' library(shiny)
#' library(gentelellaShiny)
#' shinyApp(
#' ui = gentelellaPageCustom(
#' gentelellaBody(
#' box(
#' title = "pieChart",
#' "If you've decided to go in development mode and
#' tweak all of this a bit, there are few things
#' you should do.",
#' pieChart(id = "chart1", value = 10),
#' pieChart(
#' id = "chart2",
#' value = 20,
#' barColor = "#0000FF",
#' trackColor = "#FFA500",
#' scaleColor = "#dfe0e0",
#' scaleLength = 10,
#' lineCap = "square",
#' lineWidth = 6,
#' rotate = 180
#' )
#' )
#' )
#' ),
#' server = function(input, output, session) {}
#' )
#' }
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
pieChart <- function(id, value, height = 220, width = 220,
barColor = "#ef1e25", trackColor = "#f2f2f2",
scaleColor = "#dfe0e0", scaleLength = 5,
lineCap = "round", lineWidth = 3, rotate = 0) {
pieChartTag <- shiny::div(
style = "text-align: center; margin-bottom: 17px;",
shiny::span(
class = "chart",
id = id,
`data-percent` = value,
shiny::span(
class = "percent",
value
),
shiny::tags$canvas(height = height, width = width)
)
)
# initialisation of the chart
shiny::tagList(
shiny::singleton(
shiny::tags$head(
shiny::tags$script(
paste0(
"$(function() {
$('#", id, "').easyPieChart({
//your options goes here
barColor:'", barColor,"',
trackColor:'", trackColor,"',
scaleColor:'", scaleColor,"',
scaleLength:", scaleLength,",
lineCap:'", lineCap,"',
lineWidth:", lineWidth,",
rotate:", rotate,"
});
});
"
)
)
)
),
pieChartTag
)
}
#' A timeline block
#'
#' @param ... slot for \link{timelineItem}
#'
#' @examples
#' if (interactive()) {
#' library(shiny)
#' library(gentelellaShiny)
#' shinyApp(
#' ui = gentelellaPageCustom(
#' gentelellaBody(
#' box(
#' width = 4,
#' title = "Timeline",
#' timeline(
#' timelineItem(
#' title = "Who Needs Sundance When You’ve Got Crowdfunding?",
#' url = NULL,
#' date = "13 hours ago",
#' author = "Jane Smith",
#' "Film festivals used to be do-or-die moments for movie makers.
#' They were where you met the producers that could fund your
#' project, and if the buyers liked your flick, they’d pay to
#' Fast-forward and ..."
#' ),
#' timelineItem(
#' title = "Who needs Money",
#' url = "http:://www.google.com",
#' date = "Today",
#' author = "John Doe",
#' "Nobody need money!",
#' tag = "My tag"
#' )
#' )
#' )
#' )
#' ),
#' server = function(input, output, session) {}
#' )
#' }
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
timeline <- function(...) {
shiny::tags$ul(
class = "list-unstyled timeline",
...
)
}
#' A timeline item
#'
#' @param ... timeline item content, any element
#' @param title timeline item title
#' @param url timelime item external link
#' @param date timeline item date
#' @param author timeline item author, if any
#' @param tag timeline item tag
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
timelineItem <- function(..., title = NULL, url = NULL, date = NULL, author = NULL,
tag = NULL) {
shiny::tags$li(
shiny::div(
class = "block",
if (!is.null(tag)) {
shiny::div(
class = "tags",
shiny::a(
href = NA,
class = "tag",
shiny::span(tag)
)
)
},
shiny::div(
class = "block_content",
shiny::h2(
class = "title",
shiny::a(
title,
href = url,
target = "_blank"
)
),
shiny::div(
class = "byline",
shiny::span(date),
"by",
shiny::a(author)
),
shiny::p(class = "excerpt", ...)
)
)
)
}
#' A quick list container
#'
#' @param ... slot for \link{quickListItem}
#'
#'@examples
#' if (interactive()) {
#' library(shiny)
#' library(gentelellaShiny)
#' shinyApp(
#' ui = gentelellaPageCustom(
#' gentelellaBody(
#' box(
#' width = 4,
#' title = "Quick Lists",
#' quickList(
#' quickListItem(icon = icon("calendar-o"), name = "Settings"),
#' quickListItem(icon = icon("bars"), name = "Subscription")
#' )
#' )
#' )
#' ),
#' server = function(input, output, session) {}
#' )
#' }
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
quickList <- function(...) {
shiny::tags$ul(class = "quick-list", ...)
}
#' A quick list item
#'
#' @param icon item icon
#' @param name item name
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
quickListItem <- function(icon, name = NULL) {
shiny::tags$li(icon, shiny::tags$a(name))
}
#' A box widget container
#'
#' @param ... slot for any widget
#' @param title widget title
#' @param width widget width
#'
#'@examples
#' if (interactive()) {
#' library(shiny)
#' library(gentelellaShiny)
#' shinyApp(
#' ui = gentelellaPageCustom(
#' gentelellaBody(
#' box(
#' width = 6,
#' title = "Box Widget",
#' fluidRow(
#' column(
#' width = 3,
#' align = "center",
#' sliderInput(
#' "obs",
#' "Number of observations:",
#' min = 0,
#' max = 1000,
#' value = 500
#' )
#' ),
#' column(
#' width = 9,
#' boxWidget(
#' title = "Widget",
#' plotOutput("distPlot")
#' )
#' )
#' )
#' )
#' )
#' ),
#' server <- function(input, output) {
#' output$distPlot <- renderPlot({
#' hist(rnorm(input$obs))
#' })
#' }
#' )
#' }
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
boxWidget <- function(..., title = NULL, width = NULL) {
shiny::tags$div(
class = "dashboard-widget-content",
shiny::tags$div(
class = "sidebar-widget",
style = paste0("width: ", width, "px;"),
shiny::tags$h4(title),
...
)
)
}
#' A user list block
#'
#' @param ... slot for userListItem
#'
#' @examples
#' if (interactive()) {
#' library(shiny)
#' library(gentelellaShiny)
#' shinyApp(
#' ui = gentelellaPageCustom(
#' gentelellaBody(
#' box(
#' width = 3,
#' title = "user List",
#' userList(
#' userListItem(
#' user_img = "https://image.flaticon.com/icons/svg/145/145862.svg",
#' user_url = "http:://www.google.com",
#' title = "user 1",
#' subtitle = "2 Sales Today",
#' "$2300. Agent Avarage Sales."
#' ),
#' userListItem(
#' user_img = "https://image.flaticon.com/icons/svg/145/145864.svg",
#' user_url = "http:://www.google.com",
#' title = "user 2",
#' subtitle = "4 Sales Today",
#' "$4600. Agent Avarage Sales."
#' )
#' )
#' )
#' )
#' ),
#' server <- function(input, output) {}
#' )
#' }
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
userList <- function(...) {
shiny::tags$ul(class = "list-unstyled msg_list", ...)
}
#' A user list item
#'
#' @param ... Any content
#' @param user_img User image
#' @param title item title
#' @param subtitle item subtitle
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
userListItem <- function(..., user_img = NULL, title = NULL, subtitle = NULL) {
shiny::tags$li(
shiny::tags$a(
shiny::tags$span(
class = "image",
shiny::img(src = user_img),
shiny::tags$span(
shiny::tags$span(title),
shiny::tags$span(class = "time", subtitle)
),
shiny::tags$span(
class = "message",
...
)
)
)
)
}
#' Progress bar
#'
#' Progress bars are scaled from 0 to 100
#'
#' @param value progress value
#' @param side From which side the bar comes: "left" or "right". "left" by default.
#' @param status progress status: "danger", "warning", "info", "success" or "primary".
#' When status is not NULL, color is NULL
#' @param striped Whether the progress bar is striped or not. FALSE by default.
#' @param color Alternative to status: "red", "orange", "green", "blue", "purple".
#' When color is not NULL, status is NULL.
#'
#' @examples
#' if (interactive()) {
#' library(shiny)
#' library(gentelellaShiny)
#' shinyApp(
#' ui = gentelellaPageCustom(
#' gentelellaBody(
#' box(
#' width = 3,
#' title = "Progress Bars",
#' progressBar(
#' 20,
#' side = "left",
#' status = "danger",
#' striped = FALSE
#' ),
#' progressBar(
#' 70,
#' side = "right",
#' color = "purple",
#' striped = TRUE
#' )
#' )
#' )
#' ),
#' server <- function(input, output) {}
#' )
#' }
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
progressBar <- function(value, side = "left", status = NULL, striped = FALSE,
color = NULL){
progressBarCl <- "progress-bar"
if (!is.null(status)) {
if (!is.null(color)) color <- NULL
progressBarCl <- paste0(progressBarCl, " progress-bar-", status)
}
if (!is.null(color)) {
status <- NULL
progressBarCl <- paste0(progressBarCl, " bg-", color)
}
if (side == "left") progressCl <- "progress" else progressCl <- "progress right"
if (striped) progressCl <- paste0(progressCl, " progress-striped")
shiny::tags$div(
class = progressCl,
shiny::tags$div(
class = progressBarCl,
`data-transitiongoal` = value,
`aria-valuenow` = value,
style = "width: 25%;"
)
)
}
#' A jumbotron
#'
#' @param ... Any UI element or text
#' @param title jumbotron title
#'
#' @examples
#' if (interactive()) {
#' library(shiny)
#' library(gentelellaShiny)
#' shinyApp(
#' ui = gentelellaPageCustom(
#' gentelellaBody(
#' jumbotron(
#' title = "Hello, world!",
#' "This is a simple hero unit, a simple jumbotron-style
#' component for calling extra attention to featured
#' content or information."
#' )
#' )
#' ),
#' server <- function(input, output) {}
#' )
#' }
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
jumbotron <- function(..., title = NULL) {
shiny::tags$div(
class = "bs-example",
`data-example-id` = "simple-jumbotron",
shiny::tags$div(
class = "jumbotron",
shiny::tags$h1(title),
shiny::tags$p(...)
)
)
}
#' An alert
#'
#' @param ... Alert text
#' @param title Alert title
#' @param status Alert status: "danger", "warning", "info", "success" or "primary"
#' @param dismissible Whether the alert is closable or not. TRUE by default.
#' @param width Alert width. 3 by default.
#'
#' @examples
#' if (interactive()) {
#' library(shiny)
#' library(gentelellaShiny)
#' shinyApp(
#' ui = gentelellaPageCustom(
#' gentelellaBody(
#' alert(
#' status = "warning",
#' title = "An alert",
#' "Best check yo self,
#' you're not looking too good."
#' )
#' )
#' ),
#' server <- function(input, output) {}
#' )
#' }
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
alert <- function(..., title = NULL, status = "primary", dismissible = TRUE, width = 3) {
alertCl <- "alert fade in"
if (!is.null(status)) alertCl <- paste0(alertCl, " alert-", status)
if (dismissible) alertCl <- paste0(alertCl, " alert-dismissible")
shiny::column(
width = width,
shiny::tags$div(
class = alertCl,
role = "alert",
if (dismissible) shiny::tags$button(
type = "button",
class = "close",
`data-dismiss` = "alert",
`aria-label` = "Close",
shiny::tags$span(`aria-hidden` = "true", "x")
),
shiny::tags$strong(title),
shiny::br(),
...
)
)
}
# #' A rating tag
# #'
# #' @param value value between 0 and 5
# #'
# #' @examples
# #' if (interactive()) {
# #' library(shiny)
# #' library(gentelellaShiny)
# #' shinyApp(
# #' ui = gentelellaPageCustom(
# #' gentelellaBody(
# #' stars(value = 4)
# #' )
# #' ),
# #' server <- function(input, output) {}
# #' )
# #' }
# #'
# #' @export
# stars <- function(value) {
#
# stop_val <- 5 - value
#
# shiny::tags$p(
# class = "ratings",
# shiny::tags$a(value),
# if (value >= 1) {
# # full stars
# shiny::tagList(
# lapply(X = 1:value, FUN = function(i) {
# shiny::tags$a(shiny::tags$span(class = "fa fa-star"))
# }),
# # empty stars
# lapply(X = 1:stop_val, FUN = function(i) {
# shiny::tags$a(shiny::tags$span(class = "fa fa-star-o"))
# })
# )
# } else {
# lapply(X = 1:5, FUN = function(i) {
# shiny::tags$a(shiny::tags$span(class = "fa fa-star-o"))
# })
# }
# )
# }
#' An activity list
#'
#' @param ... Slot for activityItem
#'
#' @examples
#' if (interactive()) {
#' library(shiny)
#' library(gentelellaShiny)
#' shinyApp(
#' ui = gentelellaPageCustom(
#' gentelellaBody(
#' box(
#' title = "Activity List",
#' activityList(
#' lapply(X = 1:3, FUN = function(i) {
#' activityItem(
#' title = "Desmond Davison",
#' img = paste0("https://image.flaticon.com/icons/svg/1087/108783", i,".svg"),
#' day = 13,
#' month = "june",
#' url = "http://www.google.com",
#' "Raw denim you probably haven't heard of them jean shorts Austin.
#' Nesciunt tofu stumptown aliqua butcher retro keffiyeh
#' dreamcatcher synth."
#' )
#' })
#' )
#' )
#' )
#' ),
#' server <- function(input, output) {}
#' )
#' }
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
activityList <- function(...) {
shiny::tags$ul(class = "messages", ...)
}
#' An activity item
#'
#' @param ... item content
#' @param title item title
#' @param img img path or url
#' @param day day of publication
#' @param month month of publication
#' @param url external link
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
activityItem <- function(..., title = NULL, img = NULL,
day = NULL, month = NULL, url = NULL) {
shiny::tags$li(
shiny::tags$img(src = img, class = "avatar"),
shiny::tags$div(
class = "message_date",
shiny::tags$h3(class = "date text-info", day),
shiny::tags$p(class = "month", month)
),
shiny::tags$div(
class = "message_wrapper",
shiny::tags$h4(class = "heading", title),
shiny::tags$blockquote(class = "message", ...),
shiny::br(),
shiny::tags$p(
class = "url",
shiny::tags$a(
href = url,
target = "_blank",
shiny::tags$span(shiny::icon("info"), " More")
)
)
)
)
}
#' tileCountRow
#'
#' @param ... tileCountElements
#'
#' @examples
#' if (interactive()) {
#' library(shiny)
#' library(gentelellaShiny)
#' shinyApp(
#' ui = gentelellaPageCustom(
#' gentelellaBody(
#' tileCountRow(
#' lapply(1:4, tileCountElement)
#' )
#' )
#' ),
#' server <- function(input, output) {}
#' )
#' }
#'
#' @author Mark Edmondson, \email{m@@sunholo.com}
#'
#' @export
tileCountRow <- function(...){
shiny::tags$div(class = "row tile_count", shiny::tagList(...))
}
#' tileCountRow Element
#'
#' @param value Count value
#' @param change_value Change value
#' @param going_well If TRUE then change_value is green, else red
#' @param tile_title Title text
#' @param width Width of tile in bootstrap
#' @param icon_in Icon to show
#' @param from_text Change text
#' @param highlight color to highlight value
#'
#' @return a tileCountRow for use within \link{tileCountRow}
#'
#' @author Mark Edmondson, \email{m@@sunholo.com}
#'
#' @export
tileCountElement <- function(value = 2500, change_value = "4%", going_well = TRUE,
tile_title = " Total Users", width = 3,
icon_in = shiny::icon("user"), from_text = " From last Week",
highlight = NULL){
if (going_well) {
bottom_icon <- shiny::tags$i(class = "green", shiny::icon("sort-asc"), change_value)
} else {
bottom_icon <- shiny::tags$i(class = "red", shiny::icon("sort-desc"), change_value)
}
htmltools::withTags({
shiny::div(
class = paste0("col-md-",width," col-sm-4 col-xs-6 tile_stats_count"),
shiny::span(class = "count_top", icon_in, tile_title),
shiny::div(class = paste("count", highlight), value),
shiny::span(class = "count_bottom", bottom_icon, from_text)
)
})
}
#' tileCount UI
#'
#' Shiny Module for use with \link{tileCountElement}
#'
#' @param id Shiny id
#'
#' @return Shiny UI
#'
#' @author Mark Edmondson, \email{m@@sunholo.com}
#'
#' @export
tileCountUI <- function(id){
ns <- shiny::NS(id)
shiny::uiOutput(ns("tile_count"))
}
#' updateTileCount
#'
#' Shiny Module for use with \link{tileCountUI}
#'
#' Call via \code{shiny::callModule(updateTileCount, "your_id")}
#'
#' @param input shiny input
#' @param output shiny output
#' @param session shiny session
#' @param value [reactive] Count value
#' @param change_value [reactive] Change value
#' @param going_well [reactive] If TRUE then change_value is green, else red
#' @param tile_title Title text
#' @param width Width of tile in bootstrap
#' @param icon_in Icon to show
#' @param from_text Change text
#' @param highlight [reactive] color to highlight value
#'
#' @return NULL
#'
#' @author Mark Edmondson, \email{m@@sunholo.com}
#'
#' @export
updateTileCount <- function(input, output, session, value, change_value,
going_well, tile_title = " Total Users",
width = 2, icon_in = shiny::icon("user"),
from_text = " From last Week", highlight = shiny::reactive(NULL)){
ns <- session$ns
output$tile_count <- shiny::renderUI({
tileCountElement(
value = value(),
change_value = change_value(),
going_well = going_well(),
tile_title = tile_title,
width = width,
icon_in = icon_in,
from_text = from_text,
highlight = highlight()
)
})
}
#' Create a label or badge
#'
#' @param name label name
#' @param status label status: "danger", "warning", "info", "success" or "primary".
#' If mode is "badge" statuses are "red", "orange", "green", "blue", "purple".
#' @param position label position: NULL by default, "pull-right" or "pull-left".
#' If mode is "badge", position is NULL
#' @param mode Either "label" or "badge". "label" by default.
#'
#' @examples
#' if (interactive()) {
#' library(shiny)
#' library(gentelellaShiny)
#' shinyApp(
#' ui = gentelellaPageCustom(
#' gentelellaBody(
#' box(
#' title = "Labels",
#' label(name = "David", status = "warning", mode = "badge"),
#' br(), br(), br(),
#' label(name = "Mark", position = "pull-right"),
#' label(name = "Isabella", status = "danger", position = "pull-left")
#' )
#' )
#' ),
#' server <- function(input, output) {}
#' )
#' }
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
label <- function(name = NULL, status = "primary",
position = NULL, mode = "label") {
if (mode == "badge") position <- NULL
if (mode == "badge") status <- "green"
mode_switch <- switch(mode,
"label" = "label",
"badge" = "badge"
)
labelCl <- paste0(mode_switch, " ", if (mode_switch == "badge") "bg" else mode_switch, "-", status)
if (!is.null(position)) labelCl <- paste0(labelCl, " ", position)
shiny::tags$span(class = labelCl, name)
}
|
88bc55fcdef4ca3eccf062c9f1898e0b314d5f66 | 1c45135b0801ec0d39d359b4b8e72b8009bd51e5 | /Ingreso nacional.R | bb04fb6ad6e299d9fc199d32ed7a125958869bc6 | [] | no_license | AdeAlonso/Modelos-discretos | 59eec1f9d010569faea92ade0e2931c786bccf99 | e9c68436ff07b76ac8913c4713e42a0b7cf31ec3 | refs/heads/master | 2020-04-28T19:52:15.128313 | 2019-03-14T01:19:46 | 2019-03-14T01:19:46 | 175,524,861 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 128 | r | Ingreso nacional.R | x=rep(0,21)
n=seq(0,20)
x[1]=1
x[2]=2
a=1/2
b=1
for(j in 3:length(n)) {
x[j]=a*x[j-1]+(a*b*(x[j-1]-x[j-2]))+1
plot(n,x)
}
|
a0d6d6ce01e58179eb1bd7cb54ce2badfc1d26ad | 9d987d82210f6454ca6289c182df2fc24ac8ade3 | /SpaDES-master/SAMPLE/maybe not needed/Eliot-data.table.R | 5f7e07bcbf9fac2823a43a795ff61a9b5a56ba05 | [] | no_license | B-Ron12/RCodeSK | b298d24cf75b03f3e7c590f1b8f69130d810ab47 | 4d1ef7255c297668b9705d77054704991445ce37 | refs/heads/master | 2021-01-21T18:43:53.011945 | 2016-02-03T21:48:31 | 2016-02-03T21:48:31 | 45,698,759 | 0 | 0 | null | 2015-11-06T18:11:11 | 2015-11-06T18:11:09 | null | UTF-8 | R | false | false | 8,869 | r | Eliot-data.table.R | devtools::install_github("lineprof")
devtools::install_github("pryr")
devtools::install_github("shiny-slickgrid", "wch")
library(lineprof)
library(pryr)
library(shiny)
devtools::load_all("c:/Eliot/GitHub/SpaDES")
library(microbenchmark)
library(raster)
library(RColorBrewer)
#library(compiler)
#enableJIT(3)
#library(SpaDES)
a = raster(extent(0,1e2,0,1e2),res=1)
landscape = hab = GaussMap(a,speedup=10)
names(hab)="hab"
cells = loci = b = as.integer(sample(1:ncell(a),1e1))
mask = raster(a)
mask = setValues(mask, 0)
mask[1:50] <- 1
numCol <- ncol(a)
numCell <- ncell(a)
directions=8
cols = list(c("#00000000",brewer.pal(8,"RdYlGn")[8:1]),brewer.pal(9,"Greys"),brewer.pal(8,"Spectral"))
# Transparency involves putting 2 more hex digits on the color code, 00 is fully transparent
simPlot(hab)
dE = drawExtent()
dev(2)
simPlot(crop(hab,dE),col=cols[[2]])
names(hab)<-"hab"
(mb2 = microbenchmark(times = 1L,
hab = habitat[["Age"]]
fire2 <- spread(hab,loci=as.integer(sample(1:ncell(hab),10)),mapFireID=T,
spreadProb = 0.235,0,NULL,1e8,8,1e6,mergeDuplicates = T,
plot.it=F,col=cols[[1]],delete.previous=F,add=F)
dis <- distanceFromPoints(hab,pts)
))
dev(4)
fires = list()
for (fir in 1:10)
fires[[fir]] <- spread(hab,loci=as.integer(sample(1:ncell(hab),10)),
spreadProb = runif(1,0.2,0.3),0,NULL,1e8,8,1e6,mergeDuplicates = T,
plot.it=F,col=cols[[1]],delete.previous=T,add=F,on.which.to.plot="hab")
names(fire2)<-"fire"
vp = viewport(xscale = rangex,yscale= rangey,width=0.8,height=0.8,
name=paste(deparse(substitute(x))))
simPlot(fire2,col=cols[[1]])
upViewport()
grid.raster(as.raster(cols[[1]][9:2] ),
x=0.94,y=0.5,height=0.5,width=0.03,
interpolate=TRUE)
pr = unname(quantile(range(minValue(fire2),maxValue(fire2)),c(0,0.5,1)))
grid.text(pr,x=0.98, y = pr/(2*max(pr,na.rm=T))+0.25,...)
dev(4)
pts = SpatialPoints(xyFromCell(fire2,Which(fire2>0,cells=T)))
simPlot(x=pts,on.which.to.plot="fire",add=T,pch=15,gp=gpar(cex=0.5))
(mb = microbenchmark(
simPlot(fire2,col=cols[[1]]),
simPlot(x=pts,on.which.to.plot="fire",add=T,pch=15,gp=gpar(cex=0.5)),
times=10L
))
# crop
simPlot(crop(hab,dE),col=cols[[2]])
simPlot(crop(fire2,dE),add=T,on.which.to.plot="hab",delete.previous=F,col= cols[[1]])
simPlot(stack(stack(fires),hab),col=cols[c(sample(1:3,15,replace=T),2)])
#
simPlot(fire2,col=cols[[1]],speedup=10,add=T,on.which.to.plot="hab",delete.previous=F)
simPlot(fire2,col=cols[[1]],speedup=10,add=T,on.which.to.plot="fire",delete.previous=F)
newPlot()
simPlot(hab,speedup=15,col=brewer.pal(9,"Accent"))
simPlot(stack(speedup=15,fire0,fire1,hab),col=cols)
mb = list()
for (i in 3:3) {
hab = raster(extent(0,10^i,0,10^i),res=1)
b=as.integer(sample(1:ncell(hab),10))
#library(compiler)
#spread.c = cmpfun(spread)
jmax = 10
maxes = data.frame(matrix(nrow = 2, ncol=jmax))
times = data.frame(matrix(nrow = 2, ncol=jmax))
for (j in 1:jmax) {
mb[[j]] <- microbenchmark(times = 1L,
fire0 <- spread(hab,loci=b,1,0,NULL,1e8,8,1e6),
# fire1 <- spread.adjacent(hab,loci=b,0.235,0,NULL,1e8,8,1e6),
fire2 <- spread.m(hab,loci=b,1,0,NULL,1e8,8,1e6,mergeDuplicates=T)
# fire3 <- spread.c(hab,loci=b,0.235,0,NULL,1e8,8,1e6)
)
maxes[,j]=c(maxValue(fire0),maxValue(fire2))
times[,j] = summary(mb[[j]])[[4]]
try(rm(fire0,fire2))
gc()
print(j)
}
}
print(rowMeans(maxes))
print(rowMeans(times))
#times = cbind(times, times1)
#maxes = cbind(maxes, maxes1)
times1 = times
maxes1 = maxes
dev(4)
r = 1
coefs = data.frame(matrix(ncol = 2))
plot(0,type = "n", xlim = c(0,200),ylim = c(1,600),
ylab="time in seconds",xlab="num iterations",log="y")
for (r in 1:2){
coefs[r,] = coef(lm(as.numeric(times[r,])~as.numeric(maxes[r,])))
points(as.numeric(maxes[r,]), as.numeric(times[r,]),col= r,pch=19)
}
legend("topleft",col=1:2,pch=19, legend =
c("current spread","new spread"))
out <-sapply(mb,function(x) print(x)[[4]])
for (i in 1:3)
out[,i]<-out[,i]/1000
par(mai=c(1, 1, 1, 1))
num.pixels = (10^(1:4))^2
fns = c("original","adj","optimized.adj","recompiled.optimized.adj")
plot(1:4,out[,4],log="y",type="l",ylim = c(0.05,25),xlab="",axes=F,ylab="Time in seconds")
lapply(1:4,function(x) {lines(1:4,out[,x],col=x)})
axis(2)
axis(1,label=fns,at=1:4)
legend("topright",inset=c(-0.1,-0.5),xpd=NA,legend=num.pixels,lty=1,col=1:4,title="num pixels")
out2 = numeric()
for(i in 1:4)
out2[i] <- out[1,i]/out[3,i]
plot(num.pixels,out2,log="x",ylab="speedup factor")
mtext(side=3,"Speedup between spread fn in Raster, and new spread\nas a function of num.pixels in raster")
enableJIT(0)
system.time(fire2 <- spread.adj(hab,loci=b,1,0,NULL,1e3,8,1e6))
enableJIT(3)
system.time(fire3 <- spread.adj.c(hab,loci=b,1,0,NULL,1e3,8,1e6))
adj.c <- compiler::cmpfun(adj)
spread.adj.c <- compiler::cmpfun(spread.adj)
profs <- lineprof(spread(hab,loci=b,0.225,0,NULL,1e2,8,1e6))
shine(profs)
profs3 <- lineprof(adj(numCol=numCol,numCell=numCell,sort=T,as.data.table=T,
cells=cells,directions=8,pairs=T,include=F))
shine(profs3)
library(lineprof)
prof4 <- lineprof(adj2(numCol=numCol,numCell=numCell,sort=T,as.data.table=T,
cells=cells,directions=8,pairs=T,include=F,match.adjacent=F))#,
shine(prof4)
#newPlot();
#dev.set(4)
simPlot(stack(fire1,fire2,fire0),speedup=1)
mb1 = list()
i = 0:4
#library(compiler)
#adj.cmp <- cmpfun(adj)
#enableJIT(0)
for (ind in i) {
numCells = 10^ind
cells = sample(numCell,numCells)
(mb1[[ind+1]] = microbenchmark(times=20L,
adj.orig <- adjacent(a,cells,sort=T,directions=8,include=F,pairs = T),
# adj.new4 <- adj.raw(numCol=numCol,numCell=numCell,sort=F,#,as.data.table=T,
# cells=cells,directions=8,pairs=T,include=F,match.adjacent=F),
adj.new4.1 <- adj(numCol=numCol,numCell=numCell,sort=F,#,as.data.table=T,
cells=cells,directions=8,pairs=T,include=F,match.adjacent=F)
# adj.new4.1 <- adj4(numCol=numCol,numCell=numCell,sort=F,cutoff = 1e4,#,as.data.table=T,
# cells=cells,directions=8,pairs=T,include=F,match.adjacent=F)
# adj.new4.2 <- adj4(numCol=numCol,numCell=numCell,sort=T,cutoff = 1e5,#,as.data.table=T,
# cells=cells,directions=8,pairs=T,include=F,match.adjacent=F)
# adj.new5 <- adj4(numCol=numCol,numCell=numCell,sort=T,#,as.data.table=T,
# cells=cells,directions=8,pairs=T,include=F,match.adjacent=T)
# adj.new <- adj(numCol=numCol,numCell=numCell,sort=T,#as.data.table=T,
# cells=cells,directions=8,pairs=T,include=F)#,
# adj.new2 <- adj2(numCol=numCol,numCell=numCell,sort=T,as.data.table=T,
# cells=cells,directions=8,pairs=T,include=F,match.adjacent=F)#,
# adj.new.m = adj.m(numCol=numCol,numCell=numCell,sort=F,as.data.table=FALSE,cells=cells,directions=8,pairs = F),
# adj.new.m2 = adj.m(numCol=numCol,numCell=numCell,sort=T,cells=cells,directions=8,pairs = F)
#adj.new3 <- adj3(numCol=numCol,numCell=numCell,cells=cells,directions=8),
#adj.new4 <- adj4(numCol=numCol,numCell=numCell,cells=cells,directions=8)
))
}
print(mb1)
print(data.frame(nCells=10^(0:(length(mb1)-1)),
matchF=sapply(lapply(mb1,function(x) summary(x)[[4]]),function(x) x[1]/x[2]),
matchT=sapply(lapply(mb1,function(x) summary(x)[[4]]),function(x) x[1]/x[3])#,
#speedup1e5=sapply(lapply(mb1,function(x) summary(x)[[4]]),function(x) x[1]/x[4])
))
plot(10^(0:(length(mb1)-1)),
sapply(lapply(mb1,function(x) summary(x)[[4]]),
function(x) x[1]/x[3]),log="xy")
plot(mb,horiz=FALSE)
print(all.equal(adj.orig,adj.new))
###################################################################################
<<tapered-pareto, fig=FALSE, eval=FALSE>>=
# Count number of pixels in each fire (removing the pixels with no fires)
fireSizes = sort(unname(table(getValues(habitat[["Fires"]]))[-1]))
probEx = vector(length = length(unique(fireSizes)))
for (i in unique(fireSizes))
probEx[match(i,unique(fireSizes))] <- length(fireSizes[fireSizes>i])
library(PtProcess)
library(parallel)
source("~/GitHub/SpaDES/SAMPLE/taperedPareto.R")
cl <- makePSOCKcluster(rep("localhost", 7))
data = data.frame(Area_ha= fireSizes)
est.tap = wrap.nlminb(data = data)
fireSizes.theor <- rtappareto(1000,lambda=est.tap$par[1],theta=est.tap$par[2],a=1)
fireSizes.theor <- sort(round(fireSizes.theor,0))
probEx.theor = vector(length = length(unique(fireSizes.theor)))
for (i in unique(fireSizes.theor))
probEx.theor[match(i,unique(fireSizes.theor))] <- length(fireSizes.theor[fireSizes.theor>i])
dev(4); plot(unique(fireSizes.theor),probEx.theor,log="xy",type = "l", ylab = "probability of exceeding", main=paste(nFires,"fires in an",nx,"by",ny,"landscape"))
par(new = T)
dev(4); plot(unique(fireSizes),probEx, col = "red",type = "l",log="xy")
#lines(unique(fireSizes),probEx, col = "red")
@
|
676f29a700d2d28b36f73a6e8b6d5ea1326ef7a0 | 7dd7431a7d41b4bda5135a0083e717bf1eaf0d72 | /scripts/generate_random.R | c30233fd2f9a775df8f3c115fd93af10b201a735 | [] | no_license | malmriv/hopfield | 22178c51756395993e01803e014eeba980003420 | ddf9a5c5dbb54072ffa5094c14e64bbbbc1e38f3 | refs/heads/master | 2022-11-08T06:49:44.658946 | 2020-06-19T23:49:03 | 2020-06-19T23:49:03 | 273,083,494 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 476 | r | generate_random.R | nswitch = function(a) {
if(a==1) a=0
else if(a==0) a=1
}
dir.create("../random")
files = list.files("../sets")
for(l in 1:200) {
data = read.table(paste("../sets/",files[1],sep=""))
noise = 0.5 #Completely random
random = runif(length(data[,1]),min=0,max=1)
for(i in 1:length(data[,1])) {
if(random[i]<noise) {
data[i,1] = nswitch(data[i,1])
}
}
write.table(data,file=paste("../random/",l,".txt",sep=""),col.names=FALSE,row.names=FALSE,quote=FALSE)
} |
644021dd6e16f061532211985e11ba4d1c8a1ef8 | 6c38c5850f822a151b3930a1574d80718876e69c | /RFiles/For Anniete.R | f987cd0a6292ac9648ff7decde7bed8d36ddb776 | [] | no_license | christianmconroy/Georgetown-Coursework | 7eb30f7c3d20a831ade19c21177b0eb7ad30d288 | d301800adc35cb6a509be5c75fc6c46c3263537b | refs/heads/master | 2020-05-09T13:59:33.434366 | 2020-05-07T16:10:16 | 2020-05-07T16:10:16 | 181,173,259 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 370 | r | For Anniete.R | # Set WD
setwd("~/GeorgetownMPPMSFS/McCourtMPP/RFiles")
# Load in Data
df <- read.csv("SampleForAnn.csv", stringsAsFactors = FALSE)
library(dplyr)
library(tidyr)
final <- df %>%
gather(var, Response, X1980.Total:X2010.High) %>% ## Makes wide data long
separate(var, c("Year", "Skill"), sep = -6) ## Splits up a column
final$Year <- substring(final$Year, 2, 5) |
564949fecb19fc6db46daa6d878addcaa939e20a | b0401403a3259ce2bf43a0d17fdbbf91b9a48c0a | /R/rast.R | 593cdeec74cb53a9ee9933a641bad745d9965c5f | [] | no_license | cran/terra | e619f8d942189daef2f8436d14bdcbe6c89a6d79 | e3ea767455d46d961fc82c8fcb3aef424c9ecad7 | refs/heads/master | 2023-06-23T13:05:17.593647 | 2023-06-23T10:20:02 | 2023-06-23T11:21:46 | 248,760,535 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,557 | r | rast.R | # Author: Robert J. Hijmans
# Date : October 2017
# Version 1.0
# License GPL v3
new_rast <- function(nrows=10, ncols=10, nlyrs=1, xmin=0, xmax=1, ymin=0, ymax=1, crs, extent, resolution, vals, names, time, units) {
ncols <- round(ncols)
if (ncols < 1) error("rast", "ncols < 1")
nrows <- round(nrows)
if (nrows < 1) error("rast", "nrows < 1")
if (missing(extent)) {
e <- c(xmin, xmax, ymin, ymax)
} else {
extent <- ext(extent)
e <- as.vector(extent)
}
if ((e[1] >= e[2]) || e[3] >= e[4]) {
error("rast,missing", "invalid extent")
}
if (missing(crs)) {
if (e[1] > -360.01 & e[2] < 360.01 & e[3] > -90.01 & e[4] < 90.01) {
crs <- "OGC:CRS84"
} else {
crs <- ""
}
} else {
crs <- character_crs(crs, "rast")
}
#check_proj4_datum(crs)
r <- methods::new("SpatRaster")
r@ptr <- SpatRaster$new(c(nrows, ncols, nlyrs), e, crs)
r <- messages(r, "rast")
if (!missing(resolution)) {
res(r) <- resolution
}
if (!missing(names)) {
names(r) <- names
}
if (!missing(vals)) {
if (length(vals) == 1) {
if (is.na(vals[1])) {
vals <- as.numeric(NA)
}
}
values(r) <- vals
}
if (!missing(time)) {
time(r) <- time
}
if (!missing(units)) {
time(r) <- units
}
r
}
setMethod("rast", signature(x="missing"),
function(x, nrows=180, ncols=360, nlyrs=1, xmin=-180, xmax=180, ymin=-90, ymax=90, crs, extent, resolution, vals, names, time, units) {
new_rast(nrows, ncols, nlyrs, xmin, xmax, ymin, ymax, crs, extent, resolution, vals, names, time, units)
}
)
setMethod("rast", signature(x="list"),
function(x, warn=TRUE) {
i <- sapply(x, function(i) inherits(i, "SpatRaster"))
if (!all(i)) {
if (!any(i)) {
error("rast,list", "none of the elements of x are a SpatRaster")
} else {
warn("rast", sum(!i), " out of ", length(x), " elements of x are not a SpatRaster")
x <- x[i]
}
}
# start with an empty raster (alternatively use a deep copy)
out <- deepcopy(x[[1]])
if (length(x) == 1) {
return(out)
}
opt <- spatOptions()
for (i in 2:length(x)) {
out@ptr$addSource(x[[i]]@ptr, warn, opt)
}
out <- messages(out, "rast")
lnms <- names(x)
if (!is.null(lnms)) {
if (any(lnms != "") && (length(lnms) == nlyr(out))) {
rnms <- names(out)
rnms[lnms != ""] <- lnms[lnms != ""]
names(out) <- rnms
} else if (all(lnms != "")) {
nl <- sapply(x, nlyr)
rnms <- sapply(1:length(nl), function(i) {
if (nl[i] > 1) paste0(lnms[i], "_", 1:nl[i]) else lnms[i]
})
names(out) <- unlist(rnms)
}
}
out
}
)
setMethod("rast", signature(x="SpatExtent"),
function(x, ...) {
dots <- list(...)
dots$xmin=x[1]
dots$xmax=x[2]
dots$ymin=x[3]
dots$ymax=x[4]
do.call(new_rast, dots)
}
)
setMethod("rast", signature(x="SpatVector"),
function(x, ...) {
dots <- list(...)
e <- ext(x)
dots$xmin=e[1]
dots$xmax=e[2]
dots$ymin=e[3]
dots$ymax=e[4]
if (all(is.na(pmatch(names(dots), "crs")))) {
dots$crs <- crs(x)
}
do.call(new_rast, dots)
}
)
.fullFilename <- function(x, mustExist=TRUE, vsi=FALSE) {
x <- trimws(x)
x <- x[x != ""]
i <- substr(x, 1, 5) == "s3://"
x[i] <- paste0("/vsis3/", substr(x[i], 6, nchar(x[i])))
i <- substr(x, 1, 4) == "http"
if (vsi) {
x[i] <- paste0("/vsicurl/", x[i])
}
if (all(i)) return(x)
x <- enc2utf8(x)
p <- normalizePath(x, winslash = "/", mustWork = FALSE)
if (mustExist) {
i <- file.exists(dirname(p))
x[i] <- p[i]
} else {
return(p)
}
#if (identical(basename(x), x)) {
# x <- file.path(getwd(), x)
#}
#if (expand) {
# x <- path.expand(x)
#}
return(x)
}
setMethod("rast", signature(x="character"),
function(x, subds=0, lyrs=NULL, drivers=NULL, opts=NULL, win=NULL, snap="near", vsi=FALSE) {
f <- .fullFilename(x, TRUE, vsi=vsi)
if (length(f) == 0) {
error("rast", "filename is empty. Provide a valid filename")
}
if ((length(f) == 1) && grepl("\\.rds$", tolower(f[1]))) {
r <- unwrap(readRDS(x))
if (!inherits(r, "SpatRaster")) {
error("rast", "the rds file does not store a SpatRaster")
}
return(r)
}
r <- methods::new("SpatRaster")
#subds <- subds[1]
if (is.null(opts)) opts <- ""[0]
if (is.null(drivers)) drivers <- ""[0]
if (length(subds) == 0) subds = 0
if (is.character(subds)) {
#r@ptr <- SpatRaster$new(f, -1, subds, FALSE, 0[])
r@ptr <- SpatRaster$new(f, -1, subds, FALSE, drivers, opts, 0[])
} else {
r@ptr <- SpatRaster$new(f, subds-1, "", FALSE, drivers, opts, 0[])
}
r <- messages(r, "rast")
if (r@ptr$getMessage() == "ncdf extent") {
# could have used opts="IGNORE_XY_AXIS_NAME_CHECKS=YES"
test <- try(r <- .ncdf_extent(r, f), silent=TRUE)
if (inherits(test, "try-error")) {
warn("rast", "GDAL did not find an extent. Cells not equally spaced?")
}
}
r <- messages(r, "rast")
if (crs(r) == "") {
if (is.lonlat(r, perhaps=TRUE, warn=FALSE)) {
if (!isTRUE(all(as.vector(ext(r)) == c(0,ncol(r),0,nrow(r))))) {
crs(r) <- "OGC:CRS84"
}
}
}
if (!is.null(lyrs)) {
r <- r[[lyrs]]
}
if (!is.null(win)) {
e <- ext(win)
e <- align(e, r, snap=snap)
window(r) <- e
}
r
}
)
multi <- function(x, subds=0, xyz=3:1, drivers=NULL, opts=NULL) {
x <- trimws(x)
x <- x[x!=""]
if (length(x) == 0) {
error("rast,character", "provide a valid filename")
}
r <- methods::new("SpatRaster")
f <- .fullFilename(x)
if (is.null(opts)) opts <- ""[0]
if (is.null(drivers)) drivers <- ""[0]
if (length(subds) == 0) subds = 1
subds <- subds[1]
if (is.character(subds)) {
r@ptr <- SpatRaster$new(f, -1, subds, TRUE, drivers, opts, xyz-1)
} else {
r@ptr <- SpatRaster$new(f, subds-1, ""[0], TRUE, drivers, opts, xyz-1)
}
if (r@ptr$getMessage() == "ncdf extent") {
test <- try(r <- .ncdf_extent(r), silent=TRUE)
if (inherits(test, "try-error")) {
warn("rast", "GDAL did not find an extent. Cells not equally spaced?")
}
}
r <- messages(r, "rast")
if (crs(r) == "") {
if (is.lonlat(r, perhaps=TRUE, warn=FALSE)) {
crs(r) <- "OGC:CRS84"
}
}
r
}
setMethod("rast", signature(x="SpatRaster"),
function(x, nlyrs=nlyr(x), names, vals, keeptime=TRUE, keepunits=FALSE, props=FALSE) {
if (inherits(nlyrs, "SpatRaster")) {
error("rast", "use 'c()' to combine SpatRasters")
}
x@ptr <- x@ptr$geometry(nlyrs, props, keeptime, keepunits)
x <- messages(x, "rast")
if (!missing(names)) {
if (length(names) == nlyr(x)) names(x) <- names
}
if (!missing(vals)) {
values(x) <- vals
}
x
}
)
setMethod("rast", signature(x="SpatRasterDataset"),
function(x) {
if (length(x) == 0) {
error("rast", "empty SpatRasterDataset")
} else if (length(x) == 1) {
x[1]
} else {
r <- methods::new("SpatRaster")
r@ptr <- x@ptr$collapse()
nms <- names(x)
if (any(nms != "")) {
names(r) <- paste(rep(nms, nlyr(x)), names(r), sep="_")
}
r
}
}
)
setMethod("rast", signature(x="array"),
function(x, crs="", extent=NULL) {
dims <- dim(x)
if (length(dims) > 3) {
if (length(dims) == 4) {
if (dims[4] == 1) {
x <- x[,,,1]
} else {
error("rast,array", "rast cannot handle an array with 4 dimensions (try 'sds')")
}
} else {
error("rast,array", "cannot handle an array with more than 3 dimensions")
}
}
r <- methods::new("SpatRaster")
if (!is.null(extent)) {
e <- as.vector(extent)
} else {
e <- c(0, dims[2], 0, dims[1])
}
crs <- character_crs(crs, "rast")
r@ptr <- SpatRaster$new(dims, e, crs)
values(r) <- x
messages(r, "rast")
}
)
setMethod("rast", signature(x="ANY"),
function(x, ...) {
if (inherits(x, "sf")) {
out <- rast(ext(x), ...)
if (is.null(list(...)$crs)) {
sfi <- attr(x, "sf_column")
crs(out, warn=FALSE) <- attr(x[[sfi]], "crs")$wkt
}
} else {
out <- methods::as(x, "SpatRaster")
}
#g <- gc()
out
}
)
.rastFromXYZ <- function(xyz, digits=6, crs="", extent=NULL) {
ln <- colnames(xyz)
## xyz might not have colnames, or might have "" names
if (is.null(ln)) ln <- rep("", ncol(xyz))
if (any(nchar(ln) < 1)) ln <- make.names(ln)
if (inherits(xyz, "data.frame")) {
xyz <- as.matrix(xyz)
xyz <- matrix(as.numeric(xyz), ncol=ncol(xyz), nrow=nrow(xyz))
}
x <- sort(unique(xyz[,1]))
if (length(x) == 1) {
error("rast", "cannot create a raster geometry from a single x coordinate")
}
dx <- x[-1] - x[-length(x)]
rx <- min(dx)
for (i in 1:5) {
rx <- rx / i
q <- sum(round(dx / rx, digits=digits) %% 1)
if ( q == 0 ) {
break
}
}
if ( q > 0 ) {
error("raster,matrix(xyz)", "x cell sizes are not regular")
}
y <- sort(unique(xyz[,2]))
if (length(y) == 1) {
error("rast", "cannot create a raster geometry from a single y coordinate")
}
dy <- y[-1] - y[-length(y)]
# probably a mistake to use the line below
# Gareth Davies suggested that it be removed
# dy <- round(dy, digits)
ry <- min(dy)
for (i in 1:5) {
ry <- ry / i
q <- sum(round(dy / ry, digits=digits) %% 1)
if ( q == 0 ) {
break
}
}
if ( q > 0 ) {
error("raster,matrix(xyz)", "y cell sizes are not regular")
}
minx <- min(x) - 0.5 * rx
maxx <- max(x) + 0.5 * rx
miny <- min(y) - 0.5 * ry
maxy <- max(y) + 0.5 * ry
d <- dim(xyz)
r <- rast(xmin=minx, xmax=maxx, ymin=miny, ymax=maxy, crs=crs, nlyrs=d[2]-2)
res(r) <- c(rx, ry)
ext(r) <- round(ext(r), digits+2)
cells <- cellFromXY(r, xyz[,1:2])
if (d[2] > 2) {
names(r) <- ln[-c(1:2)]
v <- try( matrix(NA, nrow=ncell(r), ncol= nlyr(r)) )
if (inherits(v, "try-error")) {
error(paste("cannot make matrix with ", ncell(r), " rows"))
}
v[cells, ] <- xyz[, -c(1:2)]
values(r) <- v
}
if (!is.null(extent)) {
r <- extend(r, extent)
r <- crop(r, extent)
}
return(r)
}
setMethod("rast", signature(x="matrix"),
function(x, type="", crs="", digits=6, extent=NULL) {
stopifnot(prod(dim(x)) > 0)
if (type == "xyz") {
r <- .rastFromXYZ(x, crs=crs, digits=digits, extent=extent)
} else {
if (is.null(extent)) {
r <- rast(nrows=nrow(x), ncols=ncol(x), extent=ext(c(0, ncol(x), 0, nrow(x))), crs=crs)
} else {
r <- rast(nrows=nrow(x), ncols=ncol(x), crs=crs, extent=extent)
}
values(r) <- as.vector(t(x))
}
messages(r, "rast")
}
)
setMethod("rast", signature(x="data.frame"),
function(x, type="xyz", crs="", digits=6, extent=NULL) {
if (type == "xyz") {
.rastFromXYZ(x, crs=crs, digits=digits, extent=extent)
} else {
rast(as.matrix(x), type=type, crs=crs, digits=digits, extent=extent)
}
}
)
setMethod("rast", signature(x="stars"),
function(x) {
x <- from_stars(x)
if (inherits(x, "SpatRasterDataset")) {
rast(x)
} else {
x
}
}
)
setMethod("rast", signature(x="stars_proxy"),
function(x) {
x <- from_stars(x)
if (inherits(x, "SpatRasterDataset")) {
rast(x)
} else {
x
}
}
)
setMethod("NAflag<-", signature(x="SpatRaster"),
function(x, value) {
value <- as.numeric(value)
if (!(x@ptr$setNAflag(value))) {
error("NAflag<-", "cannot set this value")
}
x
}
)
setMethod("NAflag", signature(x="SpatRaster"),
function(x) {
x@ptr$getNAflag()
}
)
|
d8498e336a0fc5767ba9bc14bdaecfbb96a0705d | 9cf1abc8ce339d07859eaa12d6143382bee0431a | /FUZZY_DIST_STRINGS.R | fb60ac9060def7ea4eaed2a037e5afddabd6a2ae | [] | no_license | ccsosa/GIS_ANALYSIS | 043e4d2d8fc76a8c50ea8e174914bf1b5612a2eb | f82a0b75ef67478d058216c84ad3ae78fc83cd59 | refs/heads/master | 2021-06-21T06:24:42.182397 | 2017-05-10T22:59:53 | 2017-05-10T22:59:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,703 | r | FUZZY_DIST_STRINGS.R | require(stringdist);require(psych)
a<-"United States of America"
b<-"United States of Mexico"
c<-"United States"
d<-"United States of America"
e<-"USA"
stAB<-stringsim(a, b, method=c("jw"),p=0.1);stAB2<-stringsim(a, b, method=c("soundex"));
stAB3<-stringsim(a, b, method=c("jaccard"),q=2);stAB4<-stringsim(a, b, method=c("dl"));
stAB5<-stringsim(a, b, method=c("lv"));stAB6<-stringsim(a, b, method=c("osa"));
stAB7<-stringsim(a, b, method=c("lcs"));stAB8<-stringsim(a, b, method=c("cosine"),q=2);
stAB9<-stringsim(a, b, method=c("qgram"),q=2);stAB10<-stringsim(a, b, method=c("jw"));
# 1-
stMEDIAN_AB<-median(c(stAB,stAB2,stAB3,stAB4,stAB5,stAB6,stAB7,stAB8,stAB9,stAB10))
stMEAN_AB<-mean(c(stAB,stAB2,stAB3,stAB4,stAB5,stAB6,stAB7,stAB8,stAB9,stAB10))
stAC<-stringsim(a, c,method=c("jw"), p=0.2);stAC2<-stringsim(a, c, method=c("soundex"));
stAC3<-stringsim(a, c, method=c("jaccard"),q=2);stAC4<-stringsim(a, c, method=c("dl"));
stAC5<-stringsim(a, c, method=c("lv"));stAC6<-stringsim(a, c, method=c("osa"));
stAC7<-stringsim(a, c, method=c("lcs"));stAC8<-stringsim(a, c, method=c("cosine"),q=2);
stAC9<-stringsim(a, c, method=c("qgram"),q=2);stAC10<-stringsim(a, c, method=c("jw"));
# 1-
stMEDIAN_AC<-median(c(stAC,stAC2,stAC3,stAC4,stAC5,stAC6,stAC7,stAC8,stAC9,stAC10))
stMEAN_AC<-mean(c(stAC,stAC2,stAC3,stAC4,stAC5,stAC6,stAC7,stAC8,stAC9,stAC10))
stAD<-stringsim(a, d,method=c("jw"), p=0.2);stAD2<-stringsim(a, d, method=c("soundex"));
stAD3<-stringsim(a, d, method=c("jaccard"),q=2);stAD4<-stringsim(a, d, method=c("dl"));
stAD5<-stringsim(a, d, method=c("lv"));stAD6<-stringsim(a, d, method=c("osa"));
stAD7<-stringsim(a, d, method=c("lcs"));stAD8<-stringsim(a, d, method=c("cosine"),q=2);
stAD9<-stringsim(a, d, method=c("qgram"),q=2);stAD10<-stringsim(a, d, method=c("jw"));
# 1-
stMEDIAN_AD<-median(c(stAD,stAD2,stAD3,stAD4,stAD5,stAD6,stAD7,stAD8,stAD9,stAD10))
stMEAN_AD<-mean(c(stAD,stAD2,stAD3,stAD4,stAD5,stAD6,stAD7,stAD8,stAD9,stAD10))
stBE<-stringsim(b, e,method=c("jw"), p=0.2);stBE2<-stringsim(b, e, method=c("soundex"));
stBE3<-stringsim(b, e, method=c("jaccard"),q=2);stBE4<-stringsim(b, e, method=c("dl"));
stBE5<-stringsim(b, e, method=c("lv"));stBE6<-stringsim(b, e, method=c("osa"));
stBE7<-stringsim(b, e, method=c("lcs"));stBE8<-stringsim(b, e, method=c("cosine"),q=2);
stBE9<-stringsim(b, e, method=c("qgram"),q=2);stBE10<-stringsim(b, e, method=c("jw"));
# 1-
stMEDIAN_BE<-median(c(stBE,stBE2,stBE3,stBE4,stBE5,stBE6,stBE7,stBE8,stBE9,stBE10))
stMEAN_BE<-mean(c(stBE,stBE2,stBE3,stBE4,stBE5,stBE6,stBE7,stBE8,stBE9,stBE10))
stCE<-stringsim(c, e,method=c("jw"), p=0.2);stCE2<-stringsim(c, e, method=c("soundex"));
stCE3<-stringsim(c, e, method=c("jaccard"),q=2);stCE4<-stringsim(c, e, method=c("dl"));
stCE5<-stringsim(c, e, method=c("lv"));stCE6<-stringsim(c, e, method=c("osa"));
stCE7<-stringsim(c, e, method=c("lcs"));stCE8<-stringsim(c, e, method=c("cosine"),q=2);
stCE9<-stringsim(c, e, method=c("qgram"),q=2);stCE10<-stringsim(c, e, method=c("jw"));
# 1-
stMEDIAN_CE<-median(c(stCE,stCE2,stCE3,stCE4,stCE5,stCE6,stCE7,stCE8,stCE9,stCE10))
stMEAN_CE<-mean(c(stCE,stCE2,stCE3,stCE4,stCE5,stCE6,stCE7,stCE8,stCE9,stCE10))
quantile((st_F[1,5:14]))
st_F<-as.data.frame(matrix(ncol = 14,nrow = 5))
colnames(st_F)<-c("STRING_A","STRING_B","MEDIAN_SIM","MEAN_SIM","Jaro-Winkiler","Soundex","Jaccard","Damerau-Levensthein","Levenshtein","Optimal String Alignment","Longest common substring","Cosine","Q-gram","Jaro")
st_F[1,1]<-a;st_F[1,2]<-b
st_F[2,1]<-a;st_F[2,2]<-c
st_F[3,1]<-a;st_F[3,2]<-d
st_F[4,1]<-b;st_F[4,2]<-e
st_F[5,1]<-c;st_F[5,2]<-e
st_F[1,3]<-stMEDIAN_AB;st_F[1,4]<-stMEAN_AB
st_F[2,3]<-stMEDIAN_AC;st_F[2,4]<-stMEAN_AC
st_F[3,3]<-stMEDIAN_AD;st_F[3,4]<-stMEAN_AD
st_F[4,3]<-stMEDIAN_BE;st_F[4,4]<-stMEAN_BE
st_F[5,3]<-stMEDIAN_CE;st_F[5,4]<-stMEAN_CE
st_F[1,5]<-stAB;st_F[1,6]<-stAB2;st_F[1,7]<-stAB3;st_F[1,8]<-stAB4;st_F[1,9]<-stAB5;st_F[1,10]<-stAB6;st_F[1,11]<-stAB7;st_F[1,12]<-stAB8;st_F[1,13]<-stAB9;st_F[1,14]<-stAB10;
st_F[2,5]<-stAC;st_F[2,6]<-stAC2;st_F[2,7]<-stAC3;st_F[2,8]<-stAC4;st_F[2,9]<-stAC5;st_F[2,10]<-stAC6;st_F[2,11]<-stAC7;st_F[2,12]<-stAC8;st_F[2,13]<-stAC9;st_F[2,14]<-stAC10
st_F[3,5]<-stAD;st_F[3,6]<-stAD2;st_F[3,7]<-stAD3;st_F[3,8]<-stAD4;st_F[3,9]<-stAD5;st_F[3,10]<-stAD6;st_F[3,11]<-stAD7;st_F[3,12]<-stAD8;st_F[3,13]<-stAD9;st_F[3,14]<-stAD10
st_F[4,5]<-stBE;st_F[4,6]<-stBE2;st_F[4,7]<-stBE3;st_F[4,8]<-stBE4;st_F[4,9]<-stBE5;st_F[4,10]<-stBE6;st_F[4,11]<-stBE7;st_F[4,12]<-stBE8;st_F[4,13]<-stBE9;st_F[4,14]<-stBE10
st_F[5,5]<-stCE;st_F[5,6]<-stCE2;st_F[5,7]<-stCE3;st_F[5,8]<-stCE4;st_F[5,9]<-stCE5;st_F[5,10]<-stCE6;st_F[5,11]<-stCE7;st_F[5,12]<-stCE8;st_F[5,13]<-stCE9;st_F[5,14]<-stCE10
|
788f3344e73a62f52c9f472267771bb4956348bf | 29585dff702209dd446c0ab52ceea046c58e384e | /smds/R/obj.sph.R | 5f6ac511d0845b7b023168b072520021c95fb865 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 306 | r | obj.sph.R | "obj.sph" <-
function(vecxr,ldiss,udiss, P){
N <- nrow(ldiss)
X <- matrix(vecxr[1:(N*P)],nrow=N,ncol=P)
r <- vecxr[(N*P)+1:N]^2
D <- dist(X)
IDM <- idistSph(X,r)
tmp <- array(0,dim=c(2,N,N))
tmp[1,,] <- ldiss
tmp[2,,] <- udiss
return(sum( (tmp-IDM)^2 )/2)
} |
80a50baccde04c5f14afbd6c5e19fbb3fdc693e4 | a830f244abbdea05374475d0199cd2f4282b0c47 | /R/cov.SE.R | ab9d623ce01995b4304d393535008e7e34d58f2a | [] | no_license | HuckleyLab/GRaF | 0b4cdb8682d2779b48aafe9ed473358b90000a63 | 557d3240c26198c0e0f7bb76453e97a7726723fc | refs/heads/master | 2020-05-25T16:12:51.533111 | 2019-05-22T18:07:50 | 2019-05-22T18:07:50 | 187,882,507 | 0 | 0 | null | 2019-05-21T17:16:24 | 2019-05-21T17:16:24 | null | UTF-8 | R | false | false | 1,635 | r | cov.SE.R | cov.SE <- function(x1, x2 = NULL, e1 = NULL, e2 = NULL, l) {
n1 <- nrow(x1)
n2 <- ifelse(is.null(x2), n1, nrow(x2))
n3 <- ncol(x1)
# distance matrices
if(is.null(x2)) {
e2 <- e1
# if no second matrix do with distance matrices for speed up
dists <- lapply(1:n3, function(i, x) dist(x[, i]) ^ 2, x1)
} else {
dists <- list()
for (i in 1:n3) {
dists[[i]] <- x1[, i] ^ 2 %*% t(rep(1, n2)) +
rep(1, n1) %*% t(x2[, i] ^ 2) - 2 * x1[, i] %*% t(x2[, i])
}
}
# with error matrices
if (!is.null(e1)) {
E1 <- list()
ones <- t(rep(1, n2))
for (i in 1:n3) {
E1[[i]] <- e1[, i] %*% ones
}
if (!is.null(e2)) {
E2 <- list()
ones <- t(rep(1, n1))
for (i in 1:n3) {
E2[[i]] <- t(e2[, i] %*% ones)
}
} else {
E2 <- as.list(rep(0, n3))
}
# run through each covariate
sumdiffs <- 0
denom <- 1
lower <- lower.tri(E1[[1]])
for (i in 1:n3) {
err <- E1[[i]] + E2[[i]]
if (is.null(x2)) {
err <- err[lower] # save only lower portion for speed up
}
sumdiffs <- sumdiffs + dists[[i]] / (err + l[i])
denom <- denom * (1 + err / l[i])
}
# inverse kronecker delta
ikds <- as.numeric(sumdiffs > 0)
diag(ikds <- 1)
denom <- sqrt(denom) * ikds
K <- exp(-0.5 * sumdiffs) / denom
} else {
# without error matrices
sumdiffs <- 0
for (i in 1:n3) {
sumdiffs <- sumdiffs + dists[[i]] / l[i]
}
K <- exp(-0.5 * sumdiffs) # to matrix?
}
if(class(sumdiffs) == 'dist') {
K <- as.matrix(K)
diag(K) <- 1
}
K
}
|
ba6619cd5e151e8f4d3098befd6ea00bbca29d5b | 8fcd363c8dd5cb712cd8ed88a37f5138dd4cf079 | /R/ArrowUtils.R | 0e8a0d63ce9d1aeadc90a8d69454be2d8063e568 | [
"MIT"
] | permissive | GreenleafLab/ArchR | 2d4fd5b4febf21d0d0315922fc1690ef16a6a2a0 | c61b0645d1482f80dcc24e25fbd915128c1b2500 | refs/heads/master | 2023-09-04T05:04:35.202961 | 2023-05-17T12:47:27 | 2023-05-17T12:47:27 | 216,123,064 | 313 | 132 | MIT | 2023-09-01T16:14:59 | 2019-10-18T23:35:41 | R | UTF-8 | R | false | false | 16,958 | r | ArrowUtils.R | ####################################################################
# Hidden Helper Utils for Arrow Files
####################################################################
.validArrow <- function(ArrowFile = NULL){
o <- h5closeAll()
if(h5read(ArrowFile,"Class")!="Arrow"){
warning(
"This file is not a valid ArrowFile, this most likely is a bug with previous function where the class was not added.\n",
"To fix your ArrowFiles :\n",
"\tlapply(getArrowFiles(ArchRProj), function(x) h5write(obj = 'Arrow', file = x, name = 'Class'))",
"\nThis will be an error in future versions."
)
}
o <- h5closeAll()
return(ArrowFile)
}
.isProtectedArray <- function(matrixName = NULL, exclude = NULL){
protectedArrays <- tolower(c("peakmatrix", "tilematrix", "genescorematrix"))
if(!is.null(exclude)){
protectedArrays <- protectedArrays[protectedArrays %ni% tolower(exclude)]
}
if(tolower(matrixName) %in% protectedArrays){
stop(sprintf("Error %s cannot be used as this conflicts with another predefined matrix function!", matrixName))
}
matrixName
}
.availableArrays <- function(ArrowFiles = NULL, threads = getArchRThreads()){
threads <- min(threads, length(ArrowFiles))
o <- h5closeAll()
availableArrays <- .safelapply(seq_along(ArrowFiles), function(x){
groups <- h5ls(ArrowFiles[x]) %>% {.[.$group=="/" & .$otype=="H5I_GROUP","name"]}
groups <- groups[!grepl("Fragments|Metadata", groups)]
groups
}, threads = threads) %>% Reduce("intersect", .)
o <- h5closeAll()
return(availableArrays)
}
.availableSeqnames <- function(ArrowFiles = NULL, subGroup = "Fragments", threads = getArchRThreads()){
threads <- min(threads, length(ArrowFiles))
o <- h5closeAll()
seqList <- .safelapply(seq_along(ArrowFiles), function(x){
seqnames <- h5ls(ArrowFiles[x]) %>% {.[.$group==paste0("/",subGroup),]$name}
seqnames <- seqnames[!grepl("Info", seqnames)]
seqnames
}, threads = threads)
if(!all(unlist(lapply(seq_along(seqList), function(x) identical(seqList[[x]],seqList[[1]]))))){
stop("Not All Seqnames Identical!")
}
o <- h5closeAll()
return(paste0(seqList[[1]]))
}
.availableChr <- function(ArrowFiles = NULL, subGroup = "Fragments"){
seqnames <- .availableSeqnames(ArrowFiles, subGroup)
# if(getArchRChrPrefix()){
# seqnames <- seqnames[grep("chr", seqnames, ignore.case = TRUE)]
# }
if(length(seqnames) == 0){
stop("No Chr Found in ArrowFiles!")
}
return(seqnames)
}
.availableCells <- function(ArrowFile = NULL, subGroup = NULL, passQC = TRUE){
if(is.null(subGroup)){
o <- h5closeAll()
cellNames <- h5read(ArrowFile, "Metadata/CellNames")
if(passQC){
passQC <- tryCatch({
h5read(ArrowFile, "Metadata/PassQC")
}, error = function(x){
rep(1, length(cellNames))
})
cellNames <- cellNames[which(passQC==1)]
}
sampleName <- h5read(ArrowFile, paste0("Metadata/Sample"))
o <- h5closeAll()
}else{
o <- h5closeAll()
cellNames <- h5read(ArrowFile, paste0(subGroup, "/Info/CellNames"))
sampleName <- h5read(ArrowFile, paste0("Metadata/Sample"))
o <- h5closeAll()
}
return(paste0(sampleName,"#",cellNames))
}
.sampleName <- function(ArrowFile = NULL){
o <- h5closeAll()
sampleName <- h5read(ArrowFile, paste0("Metadata/Sample"))
o <- h5closeAll()
return(sampleName)
}
.summarizeArrowContent <- function(ArrowFile = NULL){
o <- h5closeAll()
#Get Contents of ArrowFile
h5DF <- h5ls(ArrowFile)
#Re-Organize Content Info
h5DF <- h5DF[-which(h5DF$group == "/"),]
groups <- stringr::str_split(h5DF$group, pattern = "/", simplify=TRUE)[,2]
groupList <- split(h5DF, groups)
#Split Nested Lists
groupList2 <- lapply(seq_along(groupList), function(x){
groupDFx <- groupList[[x]]
groupx <- gsub(paste0("/", names(groupList)[x]),"",groupDFx$group)
if(all(groupx=="")){
groupDFx
}else{
subDF <- groupDFx[-which(groupx == ""),]
split(subDF, stringr::str_split(subDF$group, pattern = "/", simplify=TRUE)[,3])
}
})
names(groupList2) <- names(groupList)
o <- h5closeAll()
return(groupList2)
}
.getMetadata <- function(ArrowFile = NULL){
o <- h5closeAll()
#Get Contents of ArrowFile
sampleName <- h5read(ArrowFile, paste0("Metadata/Sample"))
arrowMD <- .summarizeArrowContent(ArrowFile)$Metadata
#Which are same dimensions as cell names
arrowMD <- arrowMD[which(arrowMD$dim == arrowMD$dim[arrowMD$name=="CellNames"]),]
#Load these into a S4 DataFrame
md <- lapply(seq_len(nrow(arrowMD)), function(x){
dfx <- DataFrame(h5read(ArrowFile, paste0(arrowMD$group[x],"/",arrowMD$name[x])))
colnames(dfx) <- arrowMD$name[x]
dfx
}) %>% Reduce("cbind", .)
#Correct CellNames
md$CellNames <- paste0(sampleName,"#",md$CellNames)
md$Sample <- Rle(sampleName, nrow(md))
rownames(md) <- md$CellNames
md <- md[, -which(colnames(md)=="CellNames")]
md <- md[,order(colnames(md))]
o <- h5closeAll()
return(md)
}
.getFeatureDF <- function(ArrowFiles = NULL, subGroup = "TileMatrix", threads = getArchRThreads()){
threads <- min(threads, length(ArrowFiles))
.helpFeatureDF <- function(ArrowFile = NULL, subGroup = NULL){
o <- h5closeAll()
featureDF <- DataFrame(h5read(ArrowFile, paste0(subGroup,"/Info/FeatureDF")))
featureDF$seqnames <- Rle(as.character(featureDF$seqnames))
o <- h5closeAll()
return(featureDF)
}
fdf <- .helpFeatureDF(ArrowFiles[1], subGroup = subGroup)
if(length(ArrowFiles) > 1){
ArrowFiles <- ArrowFiles[-1]
checkIdentical <- .safelapply(seq_along(ArrowFiles), function(x){
fdfx <- .helpFeatureDF(ArrowFiles[x], subGroup = subGroup)
identical(fdfx, fdf)
}, threads = threads) %>% unlist %>% all
if(!checkIdentical){
stop("Error not all FeatureDF for asssay is the same!")
}
}
#Re-Order for Split Check!
newOrder <- split(seq_len(nrow(fdf)), fdf$seqnames) %>% {lapply(seq_along(.), function(x) .[[x]])} %>% Reduce("c", .)
fdf[newOrder,]
}
#####################################################################
# Dropping Group From Hdf5 File
#####################################################################
.createArrowGroup <- function(
ArrowFile = NULL,
group = "GeneScoreMatrix",
force = FALSE,
verbose = FALSE,
logFile = NULL
){
ArrowInfo <- .summarizeArrowContent(ArrowFile)
if(group == "Fragments"){ #This shouldnt happen but just in case
.logMessage(".createArrowGroup : Cannot create Group over Fragments in Arrow!", logFile = logFile)
stop("Cannot create Group over Fragments in Arrow!")
}
if(group %in% names(ArrowInfo)){
#We Should Check How Big it is if it exists
ArrowGroup <- ArrowInfo[[group]]
ArrowGroup <- ArrowGroup[names(ArrowGroup) %ni% c("Info")]
if(length(ArrowGroup) > 0){
if(!force){
.logMessage(".createArrowGroup : Arrow Group already exists! Set force = TRUE to continue!", logFile = logFile)
stop("Arrow Group already exists! Set force = TRUE to continue!")
}else{
.logMessage(".createArrowGroup : Arrow Group already exists! Dropping Group from ArrowFile! This will take ~10-30 seconds!", logFile = logFile)
if(verbose) message("Arrow Group already exists! Dropping Group from ArrowFile! This will take ~10-30 seconds!")
o <- .dropGroupsFromArrow(ArrowFile = ArrowFile, dropGroups = group, verbose = verbose, logFile = logFile)
tryCatch({h5createGroup(ArrowFile , group)}, error=function(e){})
invisible(return(0))
}
}
}else{
tryCatch({h5createGroup(ArrowFile , group)}, error=function(e){})
invisible(return(0))
}
}
.dropGroupsFromArrow <- function(
ArrowFile = NULL,
dropGroups = NULL,
level = 0,
verbose = FALSE,
logFile = NULL
){
tstart <- Sys.time()
#Summarize Arrow Content
ArrowInfo <- .summarizeArrowContent(ArrowFile)
.logMessage(".dropGroupsFromArrow : Initializing Temp ArrowFile", logFile = logFile)
#We need to transfer first
outArrow <- .tempfile(fileext = ".arrow")
o <- h5closeAll()
o <- h5createFile(outArrow)
o <- h5write(obj = "Arrow", file = outArrow, name = "Class")
o <- h5write(obj = paste0(packageVersion("ArchR")), file = outArrow, name = "ArchRVersion")
#1. Metadata First
.logMessage(".dropGroupsFromArrow : Adding Metadata to Temp ArrowFile", logFile = logFile)
groupName <- "Metadata"
o <- h5createGroup(outArrow, groupName)
mData <- ArrowInfo[[groupName]]
for(i in seq_len(nrow(mData))){
h5name <- paste0(groupName, "/", mData$name[i])
h5write(.h5read(ArrowFile, h5name), file = outArrow, name = h5name)
}
#2. Other Groups
.logMessage(".dropGroupsFromArrow : Adding SubGroups to Temp ArrowFile", logFile = logFile)
groupsToTransfer <- names(ArrowInfo)
groupsToTransfer <- groupsToTransfer[groupsToTransfer %ni% "Metadata"]
if(!is.null(dropGroups)){
groupsToTransfer <- groupsToTransfer[tolower(groupsToTransfer) %ni% tolower(dropGroups)]
}
for(k in seq_along(groupsToTransfer)){
.logDiffTime(paste0("Transferring ", groupsToTransfer[k]), tstart, verbose = verbose, logFile = logFile)
#Create Group
groupName <- groupsToTransfer[k]
o <- h5createGroup(outArrow, groupName)
#Sub Data
mData <- ArrowInfo[[groupName]]
#Get Order Of Sub Groups (Mostly Related to Seqnames)
seqOrder <- sort(names(mData))
if(any(grepl("chr", seqOrder))){
seqOrder <- c(seqOrder[!grepl("chr", seqOrder)], seqOrder[grepl("chr", seqOrder)])
}
for(j in seq_along(seqOrder)){
if(verbose) message(j, " ", appendLF = FALSE)
#Create Group
groupJ <- paste0(groupName, "/", seqOrder[j])
o <- h5createGroup(outArrow, groupJ)
#Sub mData
mDataj <- mData[[seqOrder[j]]]
#Transfer Components
for(i in seq_len(nrow(mDataj))){
h5name <- paste0(groupJ, "/", mDataj$name[i])
.suppressAll(h5write(.h5read(ArrowFile, h5name), file = outArrow, name = h5name, level = level))
}
}
gc()
if(verbose) message("")
}
.logMessage(".dropGroupsFromArrow : Move Temp ArrowFile to ArrowFile", logFile = logFile)
rmf <- file.remove(ArrowFile)
out <- .fileRename(from = outArrow, to = ArrowFile)
.logDiffTime("Completed Dropping of Group(s)", tstart, logFile = logFile, verbose = verbose)
ArrowFile
}
.copyArrows <- function(
inArrows = NULL,
outArrows = NULL,
cellsKeep = NULL,
level = 0,
verbose = FALSE,
logFile = NULL,
threads = 1
){
stopifnot(length(inArrows) == length(outArrows))
unlist(.safelapply(seq_along(inArrows), function(x){
.copyArrowSingle(
inArrow = inArrows[x],
outArrow = outArrows[x],
cellsKeep = cellsKeep,
level = level,
verbose = verbose,
logFile = logFile
)
}, threads = threads))
}
.copyArrowSingle <- function(
inArrow = NULL,
outArrow = NULL,
cellsKeep = NULL,
level = 0,
verbose = FALSE,
logFile = NULL
){
tstart <- Sys.time()
#Summarize Arrow Content
ArrowInfo <- .summarizeArrowContent(inArrow)
sampleName <- .sampleName(inArrow)
.logMessage(".copyArrow : Initializing Out ArrowFile", logFile = logFile)
#We need to transfer first
o <- .suppressAll(file.remove(outArrow))
o <- h5closeAll()
o <- h5createFile(outArrow)
o <- h5write(obj = "Arrow", file = outArrow, name = "Class")
o <- h5write(obj = paste0(packageVersion("ArchR")), file = outArrow, name = "ArchRVersion")
#1. Metadata First
.logMessage(".copyArrow : Adding Metadata to Out ArrowFile", logFile = logFile)
groupName <- "Metadata"
o <- h5createGroup(outArrow, groupName)
mData <- ArrowInfo[[groupName]]
cellNames <- .h5read(inArrow, "Metadata/CellNames")
idx <- which(cellNames %in% stringr::str_split(cellsKeep, pattern="#", simplify=TRUE)[,2])
if(length(idx)==0){
stop("No cells matching in arrow file!")
}
for(i in seq_len(nrow(mData))){
h5name <- paste0(groupName, "/", mData$name[i])
mDatai <- .h5read(inArrow, h5name)
if(length(mDatai)==length(cellNames)){
mDatai <- mDatai[idx]
}
h5write(mDatai, file = outArrow, name = h5name)
}
#2. scATAC-Fragments
.logDiffTime(paste0("Transferring Fragments"), tstart, verbose = verbose, logFile = logFile)
#Create Group
groupName <- "Fragments"
o <- h5createGroup(outArrow, groupName)
#Sub Data
mData <- ArrowInfo[[groupName]]
#Get Order Of Sub Groups (Mostly Related to Seqnames)
seqOrder <- sort(names(mData))
if(any(grepl("chr", seqOrder))){
seqOrder <- c(seqOrder[!grepl("chr", seqOrder)], seqOrder[grepl("chr", seqOrder)])
}
for(j in seq_along(seqOrder)){
if(verbose) message(j, " ", appendLF = FALSE)
#Create Group
groupJ <- paste0(groupName, "/", seqOrder[j])
o <- h5createGroup(outArrow, groupJ)
#Sub mData
mDataj <- mData[[seqOrder[j]]]
#Read In Fragments
RGLengths <- .h5read(inArrow, paste0(groupJ, "/RGLengths"))
RGValues <- .h5read(inArrow, paste0(groupJ, "/RGValues"))
RGRle <- Rle(paste0(sampleName, "#", RGValues), RGLengths)
#Determine Which to Keep
idxj <- BiocGenerics::which(RGRle %bcin% cellsKeep)
if(length(idxj) == 0){
idxj <- 1
}
#Info
Ranges <- .h5read(inArrow, paste0(groupJ, "/Ranges"))[idxj, ,drop=FALSE]
RGRle <- RGRle[idxj]
RGLengths <- RGRle@lengths
RGValues <- stringr::str_split(RGRle@values, pattern = "#", simplify = TRUE)[,2]
#Write Barcodes
o <- .suppressAll(h5write(RGLengths, file = outArrow, name = paste0(groupJ, "/RGLengths"), level = level))
o <- .suppressAll(h5write(RGValues, file = outArrow, name = paste0(groupJ, "/RGValues"), level = level))
#Write Ranges
o <- .suppressAll(
h5write(
obj = Ranges,
file = outArrow,
name = paste0(groupJ, "/Ranges"),
level = level
)
)
}
if(verbose) message("")
#3. Other Matrices
.logMessage(".copyArrow : Adding SubMatrices to Out ArrowFile", logFile = logFile)
groupsToTransfer <- names(ArrowInfo)
groupsToTransfer <- groupsToTransfer[groupsToTransfer %ni% c("Metadata", "Fragments")]
for(k in seq_along(groupsToTransfer)){
.logDiffTime(paste0("Transferring ", groupsToTransfer[k]), tstart, verbose = verbose, logFile = logFile)
#Create Group
groupName <- groupsToTransfer[k]
o <- h5createGroup(outArrow, groupName)
#Sub Data
mData <- ArrowInfo[[groupName]]
#Get Order Of Sub Groups (Mostly Related to Seqnames)
seqOrder <- sort(names(mData))
if(any(grepl("chr", seqOrder))){
seqOrder <- c(seqOrder[!grepl("chr", seqOrder)], seqOrder[grepl("chr", seqOrder)])
}
cellNames <- paste0(sampleName, "#", .h5read(inArrow, paste0(groupName, "/Info/CellNames")))
featureDF <- .getFeatureDF(ArrowFile = inArrow, subGroup = groupName)
seqOrder <- c("Info", seqOrder[!grepl("Info", seqOrder)])
for(j in seq_along(seqOrder)){
if(verbose) message(j, " ", appendLF = FALSE)
#Create Group
groupJ <- paste0(groupName, "/", seqOrder[j])
if(seqOrder[j] == "Info"){
o <- h5createGroup(outArrow, groupJ)
#Sub mData
mDataj <- mData[[seqOrder[j]]]
idxCL <- which(mDataj$dim == mDataj$dim[mDataj$name=="CellNames"])
idxCL <- idxCL[mDataj$name[idxCL] %ni% "FeatureDF"]
idxKeep <- which(cellNames %in% cellsKeep)
#Transfer Components
for(i in seq_len(nrow(mDataj))){
h5name <- paste0(groupJ, "/", mDataj$name[i])
if(i %in% idxCL){
.suppressAll(h5write(.h5read(inArrow, h5name)[idxKeep], file = outArrow, name = h5name))
}else{
.suppressAll(h5write(.h5read(inArrow, h5name), file = outArrow, name = h5name))
}
}
}else{
#Sub mData
mDataj <- mData[[seqOrder[j]]]
addAnalysis <- mDataj[mDataj$name %ni% c("i", "jLengths", "jValues", "x"), "name"]
mat <- .getMatFromArrow(
ArrowFile = inArrow,
featureDF = featureDF[BiocGenerics::which(featureDF$seqnames %bcin% seqOrder[j]),],
useMatrix = groupName,
cellNames = cellNames[cellNames %in% cellsKeep]
)
o <- .addMatToArrow(
mat = mat,
ArrowFile = outArrow,
Group = paste0(groupName, "/", seqOrder[j]),
binarize = all(mat@x == 1),
addColSums = "colSums" %in% addAnalysis,
addRowSums = "rowSums" %in% addAnalysis,
addRowMeans = "rowMeans" %in% addAnalysis,
addRowVars = "rowVars" %in% addAnalysis,
addRowVarsLog2 = "rowVarsLog2" %in% addAnalysis
)
rm(mat)
}
}
gc()
if(verbose) message("")
}
.logDiffTime("Completed Copying ArrowFile", tstart, logFile = logFile, verbose = verbose)
outArrow
}
|
5be6183c297608e33933cdc595e28b1172aeb9fc | 9ad4b4acb8bd2b54fd7b82526df75c595bc614f7 | /Integrate All/Entropy.R | 19ba3a3d7e8e04c8072007bf970a5c3a5bc17f80 | [] | no_license | sylvia-science/Ghobrial_EloRD | f27d2ff20bb5bbb90aa6c3a1d789c625540fbc42 | 041da78479433ab73335b09ed69bfdf6982e7acc | refs/heads/master | 2023-03-31T14:46:27.999296 | 2021-04-02T15:09:49 | 2021-04-02T15:09:49 | 301,811,184 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,945 | r | Entropy.R | library(coop)
#args <-R.utils::commandArgs(asValues=TRUE)
#if (is.null(args[["input"]])) {
# print("Provide a valid input file name (Batch corrected object) --> RDS file")
#}
#if (is.null(args[["output"]])) {
# print("Provide a valid output file name (Per batch and cell type entropy) --> CSV file")
#}
#if (is.null(args[["output_matrix"]])) {
# print("Provide a valid output file name for the matrix. RDS file")
#}
# entropy function
shannon_entropy <- function(x, sample_vector, N_samples) {
#browser()
#print(length(sample_vector[x == 1]))
freq_batch = table(sample_vector[x ==1])/length(sample_vector[x == 1])
#print(freq_batch)
freq_batch_positive = freq_batch[freq_batch > 0]
return(-sum(freq_batch_positive * log(freq_batch_positive))/log(N_samples))
}
compute_entropy <- function(corrected_space, k_num, dim_num, bool, x,
sample_vector, N_samples,
cell_type_vector, N_cell_types,
kit_vector, N_kits, BBKNN=0){
print(k_num)
print(dim_num)
#browser()
if(BBKNN==1){
knn_graph <- corrected_space
knn_graph <- as.matrix(knn_graph)
} else {
knn_graph = buildKNNGraph(corrected_space, k=k_num, d=dim_num, transposed = F)[]
knn_graph <- as.matrix(knn_graph)
}
#folder = '/home/sujwary/Desktop/scRNA/Output/CompareIntegration/Harmony/AllSamples/Batch_Sample_Kit//Cluster/PCA30/res1.4/Entropy/Harmony'
#write.csv(knn_graph, file = paste0(folder,'_knn_graph.csv'), sep = ",", row.names = T)
#browser()
sample_entropy <- apply(knn_graph, 1, function(x) {shannon_entropy (x, sample_vector, N_samples)})
print('Done sample_entropy')
celltype_entropy <- apply(knn_graph, 1, function(x) {shannon_entropy (x, cell_type_vector, N_cell_types)})
print('Done celltype_entropy')
kit_entropy <- apply(knn_graph, 1, function(x) {shannon_entropy (x, kit_vector, N_kits)})
print('Done kit_entropy')
entropy <- cbind(sample_entropy,celltype_entropy, kit_entropy)
names(entropy) <- c("sample_entropy", "Cell_type_entropy", "Kit_entropy")
return(entropy)
}
save_results <- function(x, col_names,folder){
write.csv(x, file = paste0(folder,'entropy.csv'), sep = ",", row.names = FALSE, col.names = col_names)
}
#2) Seurat objects
compute_entropy_Seurat = function(object, BBKNN = F, BBKNN_KNN = NA,corrected_assay,folder, k_num,dim_num = 10){
#browser()
suppressPackageStartupMessages(require(Seurat))
print("The input object is a Seurat class object")
sample_vector <- object@meta.data[['sample']]
N_samples <- length(unique(sample_vector))
cell_type_vector <- object$GeneralCellType # Idents(object) #object@meta.data[[celltype_key]]
N_cell_types <- length(unique(cell_type_vector))
kit_vector <- object@meta.data[['kit']]
N_kits <- length(unique(kit_vector))
print(unique(sample_vector))
print(unique(kit_vector))
print(unique(cell_type_vector))
#write.csv(kit_vector, file = paste0(folder,'_kit_vector.csv'), sep = ",", row.names = T)
#write.csv(cell_type_vector, file = paste0(folder,'_cell_type_vector.csv'), sep = ",", row.names = T)
# PCA
if (!BBKNN){
tmp = object@reductions[[corrected_assay]]
tmp = tmp@cell.embeddings
space <- as.matrix(tmp)
space = t(space)
}else{
space = BBKNN_KNN
space[space>0] = 1
}
folder = paste0(folder,'_k',k_num,'_')
col_names <- c("sample_entropy", "Cell_type_entropy", "Kit_entropy")
entropy = compute_entropy(corrected_space = space,
k_num = k_num,dim_num = dim_num,
bool = F, x,
sample_vector = sample_vector, N_samples = N_samples,
cell_type_vector = cell_type_vector,N_cell_types = N_cell_types,
kit_vector = kit_vector, N_kits = N_kits , BBKNN = BBKNN)
save_results(entropy,col_names,folder)
print("Entropy calculated over Seurat object!")
}
plotEntropy = function(entropy,folder_output,k_num){
entropy_melt = melt(entropy)
pathName <- paste0(folder_output,'_k',k_num,'_Boxplot_entropy','.png')
png(file=pathName,width=500, height=500, res = 100)
title = paste0('Median sample: ',specify_decimal(median(entropy$sample_entropy),2),
'\n','Median CellType: ',specify_decimal(median(entropy$celltype_entropy),2),
'\n','Median Kit: ',specify_decimal(median(entropy$kit_entropy),2))
plot = ggplot(entropy_melt, aes(x=variable, y=value)) + geom_boxplot()
plot = plot + ggtitle(title) +
theme_classic() +
theme(plot.title=element_text(hjust=0.5))
print(plot)
dev.off()
pathName <- paste0(folder_output,'_k',k_num,'_Hist_celltype_entropy','.png')
png(file=pathName,width=500, height=500, res = 100)
title = paste0('Median: ',specify_decimal(mean(entropy$celltype_entropy), 2) )
plot = ggplot(entropy, aes(x=celltype_entropy)) + geom_histogram()
plot = plot + ggtitle(title) +
theme_classic() +
theme(plot.title=element_text(hjust=0.5))
print(plot)
dev.off()
pathName <- paste0(folder_output,'_k',k_num,'_Hist_kit_entropy','.png')
png(file=pathName,width=500, height=500, res = 100)
title = paste0('Median: ',specify_decimal(mean(entropy$kit_entropy), 2) )
plot = ggplot(entropy, aes(x=kit_entropy)) + geom_histogram()
plot = plot + ggtitle(title) +
theme_classic() +
theme(plot.title=element_text(hjust=0.5))
print(plot)
dev.off()
pathName <- paste0(folder_output,'_k',k_num,'_Hist_sample_entropy','.png')
png(file=pathName,width=500, height=500, res = 100)
title = paste0('Median: ',specify_decimal(mean(entropy$sample_entropy), 2) )
plot = ggplot(entropy, aes(x=sample_entropy)) + geom_histogram()
plot = plot + ggtitle(title) +
theme_classic() +
theme(plot.title=element_text(hjust=0.5))
print(plot)
dev.off()
}
specify_decimal <- function(x, k) trimws(format(round(x, k), nsmall=k)) |
2efc414bc1d3d10c70e8fd47ab57711eb0e8166c | 44cf65e7ab4c487535d8ba91086b66b0b9523af6 | /data/Newspapers/2002.08.29.editorial.95114.0960.r | a51b8894518e66ec19a8ec885a58e11c62f9356e | [] | no_license | narcis96/decrypting-alpha | f14a746ca47088ec3182d610bfb68d0d4d3b504e | 5c665107017922d0f74106c13d097bfca0516e66 | refs/heads/master | 2021-08-22T07:27:31.764027 | 2017-11-29T12:00:20 | 2017-11-29T12:00:20 | 111,142,761 | 0 | 1 | null | null | null | null | ISO-8859-1 | R | false | false | 3,210 | r | 2002.08.29.editorial.95114.0960.r | libertatea de exprimare si economia de piata au devenit , peste noapte , doua valori democratice pe care romanii , cu spiritul lor inventiv , au inceput sa le foloseasca la cu totul altceva decit se face intr - o tara civilizata .
asa se face ca presa romaneasca de azi are o problema grava .
cititorii nu mai stiu care este diferenta intre un ziar de santaj si unul care face eforturi sa respecte normele deontologice .
romanii sint amagiti cu tot felul de campanii zgomotoase care le sint prezentate ca fiind mari demersuri jurnalistice , dar care nu sint decit o tehnica romaneasca de a smulge bani de la marile societati .
aceasta " metodologie " a proliferat , fara sa aiba parte de vreo impotrivire din partea comunitatii profesionale sau a organelor abilitate de lege sa cerceteze operatiunile de santaj si trafic de influenta .
asa s - a format presa de santaj .
ea cerceteaza activitatea unui agent comercial , gaseste o chichita sau o operatiune discutabila , dupa care isi trimite oamenii sa negocieze .
unii patroni cedeaza , altii nu !
si atunci incep sa curga acuzatiile .
sint numeroase cazurile in care cei atacati n - au mai rezistat , s - au temut si de lalaiala justitiei si de confruntari publice , si au cedat santajului , platind prin publicitate .
declinul pietei romanesti de publicitate a dus unele publicatii intr - o situatie disperata .
nu mai ataca doar companiile multinationale sau firmele romanesti puternice .
au inceput sa santajeze si personalitatile .
politice , artistice sau din lumea modei .
evident , e vorba de bani negri , care intra intotdeauna in buzunarele unei echipe restrinse .
ba , s - a ajuns pina acolo incit practica ramasa celebra in istoria presei interbelice din Romania prin expresia " Santajul si etajul " sa fie preluata si de unii aventurieri din strainatate ajunsi in piata romaneasca de media .
e de notorietate deja pagina pe care o mare companie de audit din Romania a inchiriat - o intr - un ziar numai si numai pentru a anunta ca este santajata si supusa presiunilor de o publicatie de buzunar , editata de un derbedeu .
daca pretinde legalitate si moralitate de la oamenii politici , de la functionarii publici , de la comunitatea de afaceri , pentru a fi credibila , presa romaneasca este obligata sa se curete de aventurieri fara scrupule , santajisti , fosti informatori ai Securitatii , persoane cu cazier .
altfel , la gramada , fara nici o delimitare , cu reguli formulate numai dupa ureche si dupa interes , presa romaneasca risca sa devina doar o oglinda murdara a societatii .
geaba au loc dezvaluiri legate de coruptie si ilegalitati , despre santaje si aranjamente daca unele ziare care scriu despre acestea le mai si practica .
pozitia transanta a IAA Romania , organism care include mari investitori in Romania , agentii de publicitate , televiziuni , radiouri , ziare si reviste , e primul semn clar de respingere a practicilor de santaj si presiune la care recurg anumite publicatii .
e chiar semnalul ca in presa romaneasca este necesar sa se faca , in sfirsit , curatenie !
altfel , vom trai intr - o tara obligata sa introduca in limbajul sau cotidian si expresia " Cu ziarul , la drumul mare ! " .
|
61e8ab68e266178d2634a1736922eecedbc9d8ca | 4b457d64d1e40b33010f9a4674a06131b223fcb4 | /R/calibrar-demo-LV.R | 7ab6382a19189ad39e0b61a4da0bd918fd59797a | [] | no_license | roliveros-ramos/calibrar | 96d5727f895dbf80f6881d744f6b515a304315d6 | 90b38e0bb0ca76021edc81d6b6f53b81cd2683a8 | refs/heads/master | 2023-07-20T00:12:59.140976 | 2023-07-07T14:26:36 | 2023-07-07T14:26:36 | 20,023,815 | 12 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,838 | r | calibrar-demo-LV.R | # T = 100
# par = list(r=0.5, l=0.2, K=100, alpha=0.1, gamma=0.1, initial=list(N=10, P=1))
.generatePredatorPreyModel = function(path, r=0.5, l=0.2, alpha=0.1, gamma=0.1, K=100, T=100,
N0=10, P0=1, ...) {
# 'real' parameters
par_real = list(r=r, l=l, K=K, alpha=alpha, gamma=gamma, initial=list(N=N0, P=P0))
pop = .PredatorPreyModel(par=par_real, T=T)
# observed abundances
n = rapply(pop, f=jitter, how = "list")
main.folder = file.path(path, "PredatorPreyDemo")
data.folder = file.path(main.folder, "data")
if(!file.exists(data.folder)) dir.create(data.folder, recursive=TRUE)
for(i in c("prey", "predator")) {
ifile = paste0(i, ".csv")
dat = matrix(n[[i]], ncol=1)
colnames(dat) = i
write.csv(dat, file.path(data.folder, ifile))
}
# parInfo.csv
parInfo = list()
parInfo$guess = list(r=0.1, l=0.1, K=1.1*max(n$prey), alpha=0.05, gamma=0.1, initial=list(N=n$prey[1], P=n$predator[1]))
parInfo$lower = list(r=0, l=0, K=0.25*max(n$prey), alpha=0, gamma=0, initial=list(N=0.5*n$prey[1], P=0.5*n$predator[1]))
parInfo$upper = list(r=2, l=2, K=5*max(n$prey), alpha=1, gamma=1, initial=list(N=1.5*n$prey[1], P=1.5*n$predator[1]))
parInfo$phase = list(r=1, l=1, K=1, alpha=1, gamma=1, initial=list(N=NA, P=NA))
# calibrationInfo.csv
calibrationInfo = list()
calibrationInfo$variable = c("prey", "predator")
calibrationInfo$type = "lnorm2"
calibrationInfo$calibrate = TRUE
calibrationInfo$weight = 1
calibrationInfo$use_data = TRUE
calibrationInfo$file = c("data/prey.csv", "data/predator.csv")
calibrationInfo$varid = c("prey", "predator")
calibrationInfo = as.data.frame(calibrationInfo)
write.csv(calibrationInfo, file.path(main.folder, "calibration_settings.csv"), row.names=FALSE)
constants = list(T=T)
output = c(list(file=file.path(main.folder, "calibration_settings.csv"),
path = main.folder,
par=par_real), constants, parInfo)
return(output)
}
.PredatorPreyModel = function(par, T) {
if(!requireNamespace("deSolve", quietly = TRUE))
stop("You need to install the 'deSolve' package.")
# par is a list with 'alpha', 'beta' 'gamma', 'sd' and 'mu_ini'.
LV = function(t, y, parms, ...) {
r = parms$r
l = parms$l
alpha = parms$alpha
gamma = parms$gamma
K = parms$K
dN = r*y[1]*(1-(y[1]/K)) - alpha*y[1]*y[2]
dP = -l*y[2] + gamma*alpha*y[1]*y[2]
return(list(c(dN, dP)))
}
times = seq(0, T)
y0 = c(par$initial$N, par$initial$P)
sol = deSolve::ode(y=y0, times=times, func=LV, parms=par, method="ode45")
out = as.list(as.data.frame(sol[,-1]))
names(out) = c("prey", "predator")
out$prey[is.na(out$prey)] = 0
out$predator[is.na(out$predator)] = 0
return(out)
}
|
35b1c2342d2f9479dd433a60ab8813d03080065c | 5346fdbb8f0bba368c79971d1f8ee82f80d72623 | /slides/04-predictions-and-variance-walk-through.R | 03171b5b4709404cf7a66a752d0e7e627750b4b7 | [
"MIT"
] | permissive | pedersen-fisheries-lab/DFO-3day-gam-workshop | f03dff53c16c1e61b6c4ee73c2f4d6a18993cd24 | d240ae4a4909fd884ca256389c1659c845c5acca | refs/heads/master | 2022-12-08T16:58:45.813666 | 2020-08-14T16:23:21 | 2020-08-14T16:23:21 | 281,726,758 | 4 | 1 | MIT | 2020-08-11T19:28:03 | 2020-07-22T16:19:17 | HTML | UTF-8 | R | false | false | 11,363 | r | 04-predictions-and-variance-walk-through.R | ## ----setup, include=FALSE, cache=FALSE----------------------------------------
library('here')
library('mgcv')
library('gratia')
library('ggplot2')
library('sf')
library('dplyr')
## ----load-shrimp--------------------------------------------------------------
shrimp <- read.csv(here('data', 'trawl_nl.csv'))
## ----shrimp-richness----------------------------------------------------------
m_rich <- gam(richness ~ s(year),
family = poisson,
method = "REML",
data = shrimp)
## ----richness-violin, fig.height=5, fig.width=5, echo=FALSE-------------------
ggplot(shrimp) +
geom_violin(aes(x = richness, y = factor(year))) +
labs(x = "Number of species", y = "Year")
## ----biom-space-time-plot, fig.height=8, fig.width=15, echo=FALSE-------------
coast <- read_sf(here("data/nl_coast.shp"))
ggplot(shrimp) +
geom_point(aes(x = long, y = lat, size = shrimp), alpha=.5) +
geom_sf(data = coast) +
facet_wrap(~year, ncol = 5)
## ----fit-shrimp-space-time----------------------------------------------------
m_spt <- gam(shrimp ~ te(x, y, year, d = c(2,1), bs = c('tp', 'cr'), k = c(20, 5)),
data = shrimp,
family = tw,
method = "REML")
## ----plot-richness-model------------------------------------------------------
plot(m_rich)
## ----draw-richness-model------------------------------------------------------
draw(m_rich)
## ----plot-conf-band-plus-posterior-smooths, fig.height = 5--------------------
sm_fit <- evaluate_smooth(m_rich, 's(year)') # tidy data on smooth
sm_post <- smooth_samples(m_rich, 's(year)', n = 20, seed = 42) # more on this later
draw(sm_fit) + geom_line(data = sm_post, aes(x = .x1, y = value, group = draw),
alpha = 0.3, colour = 'red')
## ----predict-newdata----------------------------------------------------------
new_year <- with(shrimp, tibble(year = seq(min(year), max(year), length.out = 100)))
pred <- predict(m_rich, newdata = new_year, se.fit = TRUE, type = 'link')
pred <- bind_cols(new_year, as_tibble(as.data.frame(pred)))
pred
## ----predict-newdata-resp-----------------------------------------------------
ilink <- inv_link(m_rich) # inverse link function
crit <- qnorm((1 - 0.89) / 2, lower.tail = FALSE) # or just `crit <- 2`
pred <- mutate(pred, richness = ilink(fit),
lwr = ilink(fit - (crit * se.fit)), # lower...
upr = ilink(fit + (crit * se.fit))) # upper credible interval
pred
## ----plot-predictions-richness, fig.height = 4--------------------------------
ggplot(pred, aes(x = year)) +
geom_ribbon(aes(ymin = lwr, ymax = upr), alpha = 0.2) +
geom_line(aes(y = richness)) + labs(y = "Species richness", x = NULL)
## ----spt-example-predict------------------------------------------------------
sp_new <- with(shrimp, expand.grid(x = seq_min_max(x, n = 100),
y = seq_min_max(y, n = 100),
year = unique(year)))
sp_pred <- predict(m_spt, newdata = sp_new, se.fit = TRUE) # link scale is default
sp_pred <- bind_cols(as_tibble(sp_new), as_tibble(as.data.frame(sp_pred)))
sp_pred
## ----spt-example-response-scale-----------------------------------------------
ilink <- inv_link(m_spt)
too_far <- exclude.too.far(sp_pred$x, sp_pred$y, shrimp$x, shrimp$y, dist = 0.1)
sp_pred <- sp_pred %>% mutate(biomass = ilink(fit),
biomass = case_when(too_far ~ NA_real_,
TRUE ~ biomass))
sp_pred
## ----spt-example-plot, fig.height = 5.5---------------------------------------
ggplot(sp_pred, aes(x = x, y = y, fill = biomass)) + geom_raster() +
scale_fill_viridis_c(option = "plasma") + facet_wrap(~ year, ncol = 5) +
coord_equal()
## ----plotting the uncertainty-------------------------------------------------
ggplot(sp_pred, aes(x = x, y = y, fill = se.fit)) + geom_raster() +
scale_fill_viridis_c(option = "plasma") + facet_wrap(~ year, ncol = 5) +
coord_equal()
## ----plotting the confidence interval-----------------------------------------
sp_pred <- sp_pred %>% mutate(lwr = ilink(fit - (2 * se.fit)),
upr = ilink(fit + (2 * se.fit)))
ggplot(sp_pred, aes(x = x, y = y, fill = lwr)) + geom_raster() +
scale_fill_viridis_c(option = "plasma") + facet_wrap(~ year, ncol = 5) +
coord_equal() +
labs(title = "Lower 95% interval")
ggplot(sp_pred, aes(x = x, y = y, fill = upr)) + geom_raster() +
scale_fill_viridis_c(option = "plasma") + facet_wrap(~ year, ncol = 5) +
coord_equal() +
labs(title = "Upper 95% interval")
## ----vis.gam------------------------------------------------------------------
vis.gam(m_spt, view = c("x", "y"), type = "response", plot.type = "contour",
asp = 1, too.far = 0.1)
## median year (or year closest to median)
## ----vis.gam 2----------------------------------------------------------------
vis.gam(m_spt, view = c("x", "y"), cond = list(year = 2007),
type = "response", plot.type = "contour", asp = 1,
too.far = 0.1)
## ----show-m-spt---------------------------------------------------------------
m_spt
## ----shrimp-ti-model----------------------------------------------------------
m_ti <- gam(shrimp ~ ti(x, y, year, d = c(2, 1), bs = c("tp", "cr"), k = c(20, 5)) +
s(x, y, bs = "tp", k = 20) +
s(year, bs = "cr", k = 5),
data = shrimp, family = tw, method = "REML")
## ----summary-spt-ti-----------------------------------------------------------
summary(m_ti)
## ----pred-data-ti-model-------------------------------------------------------
ti_new <- with(shrimp, expand.grid(x = mean(x), y = mean(y),
year = seq_min_max(year, n = 100)))
ti_pred <- predict(m_ti, newdata = ti_new, se.fit = TRUE,
exclude = c("ti(x,y,year)", "s(x,y)")) #<<
ti_pred <- bind_cols(as_tibble(ti_new), as_tibble(as.data.frame(ti_pred))) %>%
mutate(biomass = ilink(fit),
lwr = ilink(fit - (crit * se.fit)),
upr = ilink(fit + (crit * se.fit)))
## ----pred-data-ti-model-terms, results = "hide"-------------------------------
predict(m_ti, newdata = ti_new, se.fit = TRUE, terms = "s(year)")
## ----plot-ti-marginal-trend, fig.height = 5-----------------------------------
ggplot(ti_pred, aes(x = year)) +
geom_ribbon(aes(ymin = lwr, ymax = upr), alpha = 0.3) +
geom_line(aes(y = biomass)) +
labs(y = "Biomass", x = NULL)
## ----plot-conf-band-plus-posterior-smooths, fig.height = 5, echo = FALSE------
sm_fit <- evaluate_smooth(m_rich, 's(year)') # tidy data on smooth
sm_post <- smooth_samples(m_rich, 's(year)', n = 20, seed = 42) # more on this later
draw(sm_fit) + geom_line(data = sm_post, aes(x = .x1, y = value, group = draw),
alpha = 0.3, colour = 'red')
## ----richness-coefs-----------------------------------------------------------
sm_year <- get_smooth(m_rich, "s(year)") # extract the smooth object from model
idx <- gratia:::smooth_coefs(sm_year) # indices of the coefs for this smooth
idx
beta <- coef(m_rich) # vector of model parameters
## ----richness-vcov------------------------------------------------------------
Vb <- vcov(m_rich) # default is the bayesian covariance matrix
## ----richness-xp-matrix-------------------------------------------------------
new_year <- with(shrimp, tibble(year = seq_min_max(year, n = 100)))
Xp <- predict(m_rich, newdata = new_year, type = 'lpmatrix')
dim(Xp)
## ----richness-reduce-xp-------------------------------------------------------
Xp <- Xp[, idx, drop = FALSE]
dim(Xp)
## ----richness-simulate-params-------------------------------------------------
set.seed(42)
beta_sim <- rmvn(n = 20, beta[idx], Vb[idx, idx, drop = FALSE])
dim(beta_sim)
## ----richness-posterior-draws, fig.height = 5, fig.show = 'hide'--------------
sm_draws <- Xp %*% t(beta_sim)
dim(sm_draws)
matplot(sm_draws, type = 'l')
## ----richness-posterior-draws, fig.height = 5, fig.width = 5, echo = FALSE, results = 'hide'----
sm_draws <- Xp %*% t(beta_sim)
dim(sm_draws)
matplot(sm_draws, type = 'l')
## ----plot-posterior-smooths, fig.height = 5-----------------------------------
sm_post <- smooth_samples(m_rich, 's(year)', n = 20, seed = 42)
draw(sm_post)
## ----posterior-sim-model------------------------------------------------------
beta <- coef(m_rich) # vector of model parameters
Vb <- vcov(m_rich) # default is the bayesian covariance matrix
Xp <- predict(m_rich, type = 'lpmatrix')
set.seed(42)
beta_sim <- rmvn(n = 1000, beta, Vb) # simulate parameters
eta_p <- Xp %*% t(beta_sim) # form linear predictor values
mu_p <- inv_link(m_rich)(eta_p) # apply inverse link function
mean(mu_p[1, ]) # mean of posterior for the first observation in the data
quantile(mu_p[1, ], probs = c(0.025, 0.975))
## ----posterior-sim-model-hist, fig.height = 5---------------------------------
ggplot(tibble(richness = mu_p[587, ]), aes(x = richness)) +
geom_histogram() + labs(title = "Posterior richness for obs #587")
## ----richness-fitted-samples, fig.height = 4.5--------------------------------
rich_post <- fitted_samples(m_rich, n = 1000, newdata = shrimp, seed = 42)
ggplot(filter(rich_post, row == 587), aes(x = fitted)) +
geom_histogram() +
labs(title = "Posterior richness for obs #587", x = "Richness")
## ----total-biomass-posterior-1------------------------------------------------
sp_new <- with(shrimp, expand.grid(x = seq_min_max(x, n = 100),
y = seq_min_max(y, n = 100),
year = 2007))
Xp <- predict(m_spt, newdata = sp_new, type = "lpmatrix")
## work out now which points are too far now
too_far <- exclude.too.far(sp_new$x, sp_new$y, shrimp$x, shrimp$y, dist = 0.1)
beta <- coef(m_spt) # vector of model parameters
Vb <- vcov(m_spt) # default is the bayesian covariance matrix
set.seed(42)
beta_sim <- rmvn(n = 1000, beta, Vb) # simulate parameters
eta_p <- Xp %*% t(beta_sim) # form linear predictor values
mu_p <- inv_link(m_spt)(eta_p) # apply inverse link function
## ----total-biomass-posterior-2, dependson = -1--------------------------------
mu_copy <- mu_p # copy mu_p
mu_copy[too_far, ] <- NA # set cells too far from data to be NA
total_biomass <- colSums(mu_copy, na.rm = TRUE) # total biomass over the region
mean(total_biomass)
quantile(total_biomass, probs = c(0.025, 0.975))
## ----total-biomass-histogram, echo = FALSE------------------------------------
ggplot(tibble(biomass = total_biomass), aes(x = biomass)) +
geom_histogram()
## ----biomass-fitted-samples-example-------------------------------------------
bio_post <- fitted_samples(m_spt, n = 1000,
newdata = sp_new[!too_far, ],
seed = 42) %>%
group_by(draw) %>%
summarise(total = sum(fitted),
.groups = "drop_last")
with(bio_post, mean(total))
with(bio_post, quantile(total, probs = c(0.025, 0.975)))
## ----biomass-fitted-samples-plot, fig.width = 5, fig.height = 5---------------
ggplot(bio_post, aes(x = total)) +
geom_histogram() +
labs(x = "Total biomass")
|
5d97dc1f8614c5f397c9dfd5e1a437bc13ab5c02 | 0b84d6023413e08965e0ce2cca29c6431ddc3d53 | /codes/covarianceSelection/man/dot-initialize_children.Rd | 7237e942eb7ed8d3f4dcb51c6541c56a60bd5ebf | [
"MIT"
] | permissive | jasa-acs/Covariance-Based-Sample-Selection-for-Heterogeneous-Data-Applications-to-Gene-Expression-and-Auti... | 8d615fe9b73991ebf066bbc545aeebadfd31ec75 | 1ed8f1f509098c8d34b92496b7258b28d2309d4f | refs/heads/master | 2022-08-30T02:09:03.023416 | 2020-05-26T20:27:21 | 2020-05-26T20:27:21 | 266,931,290 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 424 | rd | dot-initialize_children.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clique_selection.R
\name{.initialize_children}
\alias{.initialize_children}
\title{Initialize hash table of nodes}
\usage{
.initialize_children(lis)
}
\arguments{
\item{lis}{list of indices}
}
\value{
a \code{hash} object of \code{node} objects
}
\description{
Given a list of indices, make a node for each element and put it into a
hash table
}
|
be8f4871f4854eb9ccd40e585af8169233c44a4e | 5a1aeb2b2f4d5fa21aa7eb2f9e6f85463af3a70d | /R/calScore.R | 986ba7d61eae78b3813cda56bffb81d5cfc523ec | [] | no_license | seandavi/GenomicSignatures | 5a4b553f2cbb172f12121ccfaabee368b73b22f3 | af2a512918089040b289cd2912774d7caf85dcde | refs/heads/master | 2021-01-03T23:04:40.263678 | 2020-02-13T01:31:11 | 2020-02-13T01:31:11 | 240,273,353 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,695 | r | calScore.R | #' Calculate the score for a new dataset
#'
#' Rownames of dataset and avg.loadings should be gene symbols.
#'
#' @param dataset A list of SummarizedExperiment (or ExpressionSet) objects.
#' Rownames are in 'symbol' format.
#' @param avg.loadings Output from `avgLoading` function - a data frame of avaerage
#' loadings. Each column represents cluster and rows represent genes used for PCA.
#'
#' @return A list containing the score matrices for input datasets. Scores are
#' assigned to each sample (row) on each cluster (column).
#'
#' @export
calScore = function(dataset, avg.loadings) {
if (!is.list(dataset)) {dataset = list(dataset)}
lapply(dataset, function(dat) {
if (class(dat) == "ExpressionSet") {
count = exprs(dat)
} else if (class(dat) == "SummarizedExperiment") {
count = assay(dat)
} else if (class(dat) == "matrix") {
count = dat
}
# if (class(dat) == "ExpressionSet") {dat = as(dat, "SummarizedExperiment")}
# count = assay(dat)
count = count[apply(count, 1, function(x) {!any(is.na(x) | (x==Inf) | (x==-Inf))}),]
count = apply(count, 1, function(x) {x - mean(x)}) %>% t
gene_common = intersect(rownames(avg.loadings), rownames(count))
score = t(count[gene_common,]) %*% apply(avg.loadings[gene_common,], 2,
function(x) x / sqrt(sum(x^2, na.rm = TRUE)))
# CRC paper version
# score = t(count[gene_common,]) %*% as.matrix(avg.loadings[gene_common,])
# score = (t(score) / apply(score, 2, sd)) %>% t
colnames(score) = colnames(avg.loadings)
return(score)
})
}
|
5dcd56b017eb813079a7d774b0732e22eed3e25a | d2ebe890dafca65977919640cb19615dd7fcd8e5 | /c2/3 Models with ratio and montecarlo.R | 9ad1c65a633a4377d222f2a8a242c23fd4131c98 | [] | no_license | ishandas387/ShinyApp | 7590e124ef3f1a3b636a512da1a2cde6a2b86be6 | 3e3d54130adf38db1f8d5d593f6a21d60f974751 | refs/heads/master | 2020-11-27T05:25:46.385776 | 2020-04-05T11:04:52 | 2020-04-05T11:04:52 | 229,321,495 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,253 | r | 3 Models with ratio and montecarlo.R | library(class)
library(e1071)
library(MASS)
data("iris")
Tit=iris
M=matrix(NA,3,3)
ratio=c(40,60,80)
for (j in 1:3){
acc=c(0,0)
mc=1000
for(i in 1:mc){
n=nrow(Tit)
indexes = sample(n,n*(ratio[j]/100))
trainset = Tit[indexes,]
testset = Tit[-indexes,]
#NB
m <- naiveBayes(Species ~ ., data = trainset)
actual=testset$Species
pred= predict(m, testset)
tab=table(pred,actual) # confusion matrix
accuracy=mean(pred==actual)
# Mulitnomial Logistic Regression
library(nnet)
mlr<- multinom(Species ~ ., data = trainset)
pred_mlr= predict(mlr, testset)
accuracy_mlr=mean(pred_mlr==actual)
# SVM Logistic Regression
svm_model<- svm(Species ~ ., data = trainset,kernel='linear')
pred_svm= predict(svm_model, testset)
accuracy_svm=mean(pred_svm==actual)
vector_accuracy = c(accuracy,accuracy_mlr,accuracy_svm)
acc=acc+(1/mc)*vector_accuracy
}
M[j,]= acc
}
M
x=ratio
plot(x, M[,1],ylim=c(0.93,.98),col='red', type='b')
lines(x, M[,2],col='blue', type='b')
lines(x, M[,3],col='green', type='b')
|
05469c7513cb148953b4c6015c9e07bab55cf702 | aa60ef703e17a9c925ff2138efe261d6d1d9f3a1 | /UAS_MLR_MAUDY.R | dfc8450e31d11954b9bc15c8a39836bbc7d2f8f6 | [] | no_license | maudyarynt/UAS_BIGDATA | 2884f97dd3c34ee8d80094983afe4a0bfa5190e8 | c71eed2d74fbb133db22ffc3f8e7baa350435236 | refs/heads/main | 2023-06-17T07:01:17.532519 | 2021-07-18T14:57:00 | 2021-07-18T14:57:00 | 387,194,438 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,963 | r | UAS_MLR_MAUDY.R | library(readxl)
UAS_PCA_dan_MLR <- read_excel("mod/UAS_PCA_dan_MLR.xlsx")
View(UAS_PCA_dan_MLR)
str(UAS_PCA_dan_MLR)
UAS_PCA_dan_MLR$Saving <- (UAS_PCA_dan_MLR$Saving - min(UAS_PCA_dan_MLR$Saving))/(max(UAS_PCA_dan_MLR$Saving) - min(UAS_PCA_dan_MLR$Saving))
UAS_PCA_dan_MLR$Deposit <- (UAS_PCA_dan_MLR$Deposit - min(UAS_PCA_dan_MLR$Deposit))/(max(UAS_PCA_dan_MLR$Deposit) - min(UAS_PCA_dan_MLR$Deposit))
UAS_PCA_dan_MLR$KK <- (UAS_PCA_dan_MLR$KK - min(UAS_PCA_dan_MLR$KK))/(max(UAS_PCA_dan_MLR$KK) - min(UAS_PCA_dan_MLR$KK))
UAS_PCA_dan_MLR$Tab_Bisnis <- (UAS_PCA_dan_MLR$Tab_Bisnis - min(UAS_PCA_dan_MLR$Tab_Bisnis))/(max(UAS_PCA_dan_MLR$Tab_Bisnis) - min(UAS_PCA_dan_MLR$Tab_Bisnis))
UAS_PCA_dan_MLR$Limit_Kredit_Mortgage <- (UAS_PCA_dan_MLR$Limit_Kredit_Mortgage - min(UAS_PCA_dan_MLR$Limit_Kredit_Mortgage))/(max(UAS_PCA_dan_MLR$Limit_Kredit_Mortgage) - min(UAS_PCA_dan_MLR$Limit_Kredit_Mortgage))
data.raw = UAS_PCA_dan_MLR
dim(data.raw)
length(data.raw$Product_holding)
data.raw[data.raw==""] <- NA
sapply(data.raw, function(x) sum(is.na(x)))
colSums(is.na(data.raw))
sapply(data.raw, function(x) length(unique(x)))
UAS_PCA_dan_MLR$Product_holding <- as.factor(UAS_PCA_dan_MLR$Product_holding)
set.seed(733)
ind <- sample(2, nrow(UAS_PCA_dan_MLR), replace = TRUE, prob = c(0.7, 0.3))
training <- UAS_PCA_dan_MLR[ind==1,]
testing <- UAS_PCA_dan_MLR[ind==2,]
library(nnet)
training$Product_holding <- relevel(training$Product_holding, ref="1")
mod <- multinom(Product_holding~., data = training)
summary(mod)
z <- summary(mod)$coefficients/summary(mod)$standard.errors
p <- (1 - pnorm(abs(z), 0, 1)) * 2
p
p <- predict(mod, training)
head(p)
head(training$Product_holding)
1- sum(diag(tab))/sum(tab)
p1 <- predict(mod, testing)
tab1 <- table(p1, testing$Product_holding)
tab1
1 - sum(diag(tab1))/sum(tab1)
table(training$Product_holding)
n <- table(training$Product_holding)
n/sum(n)
tab/colSums(tab)
tab1/colSums(tab1) |
2b6c89c395ac490b1b96663e6e1ad47fe6585524 | fd80186292810bd76c5de42579674a5f7bb0bf8e | /sc_atac_tSNE_perplexity_loop.R | aab5d26eae09f6588e50700391c04ba40f630070 | [] | no_license | resurgo-genetics/scATACseq | 6080e776b2444a646714f4d450e113b204631c17 | 7066c321200e090b953bf478dd0ba35ea43b2be8 | refs/heads/master | 2021-06-20T04:57:20.959310 | 2017-08-02T00:43:33 | 2017-08-02T00:43:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,807 | r | sc_atac_tSNE_perplexity_loop.R | #module load gmp/5.0.2 mpfr/3.1.0 mpc/0.8.2 gcc/4.9.1 R/3.2.1
library(Matrix)
library(limma)
library(RColorBrewer)
library(irlba,lib.loc="/net/shendure/vol1/home/cusanovi/R/x86_64-unknown-linux-gnu-library/3.1/")
library(data.table)
library(Rtsne)
library(densityClust)
library(methods)
library(viridis)
# gz set from 'TRUE' to 'FALSE'
gz = FALSE
upperandlower = FALSE
cellcutoff=0.1
uppercellcutoff=1-cellcutoff
sitecutoff=30000
siteperccutoff=0.05
#inmat = "/net/shendure/vol10/projects/mouse_atlas/nobackup/tissues/Spleen_62016/SC_atac_combined.Spleen_62016_P2.true.nodups.peaks.dhsmatrix.txt"
inmat = "/net/shendure/vol10/projects/human_EBs/nobackup/NextSeq_hEB_July_Sept2016_merged/matrix/scatac_hEB.q10.sort.true.nodups_peaks.dhsmatrix.txt"
#cellsout = "/net/shendure/vol10/projects/mouse_atlas/nobackup/tissues/Spleen_62016/SC_atac_combined.Spleen_62016_P2.true.nodups.peaks.densitypeaks.cellclusters."
cellsout = "/net/shendure/vol10/projects/human_EBs/nobackup/NextSeq_hEB_July_Sept2016_merged/tSNE/scatac.summits.densitypeaks.cellclusters."
#tsneout = "/net/shendure/vol10/projects/mouse_atlas/nobackup/tissues/Spleen_62016/SC_atac_combined.Spleen_62016_P2.true.nodups.peaks.lesswrong.tsne.0.05cells.0.01cellsites."
if(upperandlower){
tsneout = paste0("/net/shendure/vol10/projects/human_EBs/nobackup/NextSeq_hEB_July_Sept2016_merged/tSNE/scatac.summits.tsne.",cellcutoff,"cellsupperandlower.",siteperccutoff,"cellsites")
}else{
tsneout = paste0("/net/shendure/vol10/projects/human_EBs/nobackup/NextSeq_hEB_July_Sept2016_merged/tSNE/scatac.peaks.lesswrong.tsne.",cellcutoff,"cells.",siteperccutoff,"cellsites")
}
#promwindows = read.table("/net/shendure/vol10/projects/mouse_atlas/nobackup/master_combined_peaks_update.within.2_5kb.of.tss.whitelist.bed")
promwindows = read.table("/net/shendure/vol10/projects/human_EBs/nobackup/NextSeq_hEB_July_Sept2016_merged/sort/macs/master_combined_peaks.within.2.5kb.of.tss.whitelist.bed")
countreport = read.table("/net/shendure/vol10/projects/human_EBs/nobackup/NextSeq_hEB_July_Sept2016_merged/fastq/scatac_hEB.q10.sort.true.nodups.nodups.cells.duplicate_report.txt",header=T)
oldclusters = read.table("/net/shendure/vol10/projects/human_EBs/nobackup/NextSeq_hEB_July_Sept2016_merged/AlphabetExperiments/sc_drone_dal/SCatac_dal.true.nodups.5kbwindows.0.1cells.0.05cellsites.6components.3clusters.cellclusters.txt")
#oldclusters = read.table("/net/shendure/vol10/projects/mouse_atlas/nobackup/tissues/Spleen_62016/SC_atac_combined.Spleen_62016_P2.true.nodups.5kbwindows.cellclusters.5percnew.txt")
#sexcolors = read.table("/net/shendure/vol10/projects/scATAC/nobackup/FlyEmbryogenesis/sc_drone_tha/SCatac_tha.10to12.true.nodups.xycalls.txt")
#sexcounts = read.table("/net/shendure/vol10/projects/scATAC/nobackup/FlyEmbryogenesis/sc_drone_tha/SCatac_tha.10to12.true.nodups.xycounts.txt",header=T)
numclust = length(unique(oldclusters[,2]))
#Colors from https://www.r-bloggers.com/the-paul-tol-21-color-salute/
tol2qualitative=c("#4477AA", "#CC6677")
tol3qualitative=c("#4477AA", "#DDCC77", "#CC6677")
tol4qualitative=c("#4477AA", "#117733", "#DDCC77", "#CC6677")
tol5qualitative=c("#332288", "#88CCEE", "#117733", "#DDCC77", "#CC6677")
tol6qualitative=c("#332288", "#88CCEE", "#117733", "#DDCC77", "#CC6677","#AA4499")
tol7qualitative=c("#332288", "#88CCEE", "#44AA99", "#117733", "#DDCC77", "#CC6677","#AA4499")
tol8qualitative=c("#332288", "#88CCEE", "#44AA99", "#117733", "#999933", "#DDCC77", "#CC6677","#AA4499")
tol9qualitative=c("#332288", "#88CCEE", "#44AA99", "#117733", "#999933", "#DDCC77", "#CC6677", "#882255", "#AA4499")
tol10qualitative=c("#332288", "#88CCEE", "#44AA99", "#117733", "#999933", "#DDCC77", "#661100", "#CC6677", "#882255", "#AA4499")
tol11qualitative=c("#332288", "#6699CC", "#88CCEE", "#44AA99", "#117733", "#999933", "#DDCC77", "#661100", "#CC6677", "#882255", "#AA4499")
tol12qualitative=c("#332288", "#6699CC", "#88CCEE", "#44AA99", "#117733", "#999933", "#DDCC77", "#661100", "#CC6677", "#AA4466", "#882255", "#AA4499")
tol14rainbow=c("#882E72", "#B178A6", "#D6C1DE", "#1965B0", "#5289C7", "#7BAFDE", "#4EB265", "#90C987", "#CAE0AB", "#F7EE55", "#F6C141", "#F1932D", "#E8601C", "#DC050C")
tol15rainbow=c("#114477", "#4477AA", "#77AADD", "#117755", "#44AA88", "#99CCBB", "#777711", "#AAAA44", "#DDDD77", "#771111", "#AA4444", "#DD7777", "#771144", "#AA4477", "#DD77AA")
tol18rainbow=c("#771155", "#AA4488", "#CC99BB", "#114477", "#4477AA", "#77AADD", "#117777", "#44AAAA", "#77CCCC", "#777711", "#AAAA44", "#DDDD77", "#774411", "#AA7744", "#DDAA77", "#771122", "#AA4455", "#DD7788")
tol21rainbow= c("#771155", "#AA4488", "#CC99BB", "#114477", "#4477AA", "#77AADD", "#117777", "#44AAAA", "#77CCCC", "#117744", "#44AA77", "#88CCAA", "#777711", "#AAAA44", "#DDDD77", "#774411", "#AA7744", "#DDAA77", "#771122", "#AA4455", "#DD7788")
dc25rainbow = c("#771155", "#AA4488", "#CC99BB", "#114477", "#4477AA", "#77AADD", "#117777", "#44AAAA", "#77CCCC", "#117744", "#44AA77", "#88CCAA", "#777711", "#AAAA44", "#DDDD77", "#774411", "#AA7744", "#DDAA77", "#771122", "#AA4455", "#DD7788","lightgray","darkgray","black","orange")
if(gz == TRUE){
bigmat = fread(paste0("zcat ",inmat),header=T)
} else {
bigmat = fread(inmat,header=T)
}
bigmat = as.data.frame(bigmat)
print("Reformatting data...")
mastersites = bigmat[,4]
sortmat = as.matrix(bigmat[,-c(1:4)])
nonzeroes = rowSums(sortmat) > 0
sortmat.nonzero = sortmat[nonzeroes,]
mastersites.nonzero = mastersites[nonzeroes]
currnonzs = unlist(apply(sortmat.nonzero,2,function(x) which(x > 0)))
namers = gsub('[[:digit:]]+', '', names(currnonzs))
namenums = c(1:length(unique(namers)))[unlist(as.factor(namers))]
mastermat = cbind(currnonzs,namenums)
namelist = unique(namers)
rm(bigmat)
rm(sortmat)
rm(sortmat.nonzero)
gc()
bigmat.bin = sparseMatrix(i=mastermat[,1],j=mastermat[,2],x=rep(1,times=dim(mastermat)[1]))
rownames(bigmat.bin) = mastersites.nonzero
colnames(bigmat.bin) = namelist
sites = strsplit2(mastersites.nonzero,"_")
label = paste0(sites[,1],":",sites[,2],"-",sites[,3])
annot = data.frame(row.names=label, cell_spec=mastersites.nonzero)
num_cells_ncounted = rowSums(bigmat.bin)
#annot.ncounts = rownames(annot)[num_cells_ncounted >= num_cells_ncounted[order(num_cells_ncounted,decreasing=T)[sitecutoff]]]
#ncounts = bigmat.bin[num_cells_ncounted >= num_cells_ncounted[order(num_cells_ncounted,decreasing=T)[sitecutoff]],]
annot.ncounts = rownames(annot)[num_cells_ncounted >= dim(bigmat.bin)[2]*siteperccutoff]
ncounts = bigmat.bin[num_cells_ncounted >= dim(bigmat.bin)[2]*siteperccutoff,]
new_counts = colSums(ncounts)
if(upperandlower){
ncounts = ncounts[,new_counts >= quantile(new_counts,probs=cellcutoff) & new_counts <= quantile(new_counts,probs=uppercellcutoff)]
}else{
ncounts = ncounts[,new_counts >= quantile(new_counts,probs=cellcutoff)]
}
annot.ncounts = annot.ncounts[rowSums(ncounts) > 0]
ncounts = ncounts[rowSums(ncounts) > 0,]
# sexsites = c(grep("chrY",rownames(ncounts)),grep("chrX",rownames(ncounts)))
# ncounts.nosex = ncounts[-sexsites,]
# annot.ncounts.nosex = annot.ncounts[-sexsites]
print("Normalizing data...")
nfreqs <- t(t(ncounts) / colSums(ncounts))
tf_idf_counts <- nfreqs * log(1 + ncol(ncounts) / rowSums(ncounts))
dim(tf_idf_counts)
print("Running tSNE...")
set.seed(0)
SVDtsne = irlba(tf_idf_counts, 50, 50)
d_diagtsne = matrix(0, nrow=length(SVDtsne$d), ncol=length(SVDtsne$d))
diag(d_diagtsne) <- SVDtsne$d
SVDtsne_vd = t(d_diagtsne %*% t(SVDtsne$v))
for (n in c(5, 50, 100)){
set.seed(0)
tsnetfidf = Rtsne(SVDtsne_vd, perplexity = n, pca=F)
tsnedist = dist(tsnetfidf$Y)
cell_pal <- brewer.pal(numclust, "Paired")
dclust = densityClust(tsnedist,gaussian=T)
rhoer=50
deltar = 2.5
dclust = findClusters(dclust, rho = rhoer, delta = deltar, verbose = verbose)
pdf(paste0(tsneout,".perplexity",n,".rho",rhoer,".delta",deltar,".pdf"))
plot(tsnetfidf$Y,pch=20,main="No Clusters")
plot(tsnetfidf$Y,pch=20,col=cell_pal[oldclusters[match(colnames(tf_idf_counts),oldclusters[,1]),2]],main="Old Clusters")
#plot(tsnetfidf$Y,pch=20,col=c("blue","red")[sexcolors[match(colnames(tf_idf_counts),sexcolors[,1]),2]],main="Sex Coloring")
plot(tsnetfidf$Y,pch=20,col=dc25rainbow[as.factor(dclust$clusters)],main="New Peak Density Clusters")
plot(tsnetfidf$Y,pch=20,col=dc25rainbow[as.factor(dclust$clusters)],main=paste0("New Peak Density Clusters\n",dim(tf_idf_counts)[1]," sites\n",dim(tf_idf_counts)[2]," cells"))
text(tsnetfidf$Y[dclust$peaks,1],tsnetfidf$Y[dclust$peaks,2],labels=dclust$clusters[dclust$peaks],cex=3)
plot(dclust$rho,dclust$delta,pch=20)
points(dclust$rho[dclust$peaks],dclust$delta[dclust$peaks],col="red",pch=20)
text(dclust$rho[dclust$peaks]-2,dclust$delta[dclust$peaks]+2,labels=dclust$clusters[dclust$peaks])
abline(v=rhoer)
abline(h=deltar)
dev.off()
}
|
098c5e8ab679a439cdc4fc276725e641cd732ff6 | 5044e15804789c2f18e1cf82349010e9508ac506 | /plot2.R | 5691c000710df4b860b94575dffe2ca575e036dc | [] | no_license | rigley/ExData_Final | 201001e4a478ea54b9f11bb82d85a7e641be575f | 394d57869f692231f10c20f2c498506271605eaa | refs/heads/master | 2021-01-19T07:36:38.058513 | 2017-04-09T20:20:32 | 2017-04-09T20:20:32 | 87,560,144 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 713 | r | plot2.R | # rds files must be in the same directory as this script
print("Reading data....")
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
baltimore_filter <- "24510"
baltimore <- subset(NEI, NEI$fips == baltimore_filter)
tmp <- aggregate(baltimore$Emissions, by = list(year = baltimore$year), FUN = sum)
png(filename = "./plot2.png", width=480, height=480, units="px")
plot(tmp$year, tmp$x, xlim=c(1998, 2009), ylim = c(0, 5000), xaxt="n",
main = "PM2.5 Emissions per Year\nBaltimore City, Maryland",
xlab = "Year", ylab = "PM2.5 Emissions (tons)",
pch = 20, cex = 2)
axis(1, at = c(1999, 2002, 2005, 2008))
dev.off()
print("Output saved at plot2.png")
|
74dadaf8cbbd5ed873302ee0b6ffc2db1b4642cc | cb015d1ac88fa3ae722f16f3032b6eae6a2252bb | /R/graphviz.R | 81619ff3a46c82391cc90802e59f2021d11da761 | [] | no_license | pedmiston/crotchet | d97fd115edd05d29d3383a2027a47a54cf1fe549 | d3c9fffaf28766c4817c916262420ff30e1892bb | refs/heads/master | 2020-08-05T15:06:31.340565 | 2017-10-10T14:18:14 | 2017-10-10T14:18:14 | 67,830,890 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,901 | r | graphviz.R | #' Returns a path to a graphviz file included in the specified package.
#' @export
find_graphviz <- function(name, package, inst_dir_name = "extdata") {
if (missing(package)) {
stopifnot(file.exists(name))
return(tools::file_path_as_absolute(name))
}
find_extdata(name, ext = ".gv", package = package, inst_dir_name = inst_dir_name)
}
#' List all graphviz files available in the specified package
#' @export
list_graphviz <- function(package, strip_ext = TRUE) {
list_extdata(package, re_files = "*.gv$", strip_ext = strip_ext)
}
#' Read the graphviz files in a package as knitr chunks.
#' @export
read_all_graphviz_chunks <- function(package) {
chunk_names <- list_graphviz(package, strip_ext = TRUE)
for (name in chunk_names) {
read_graphviz_chunk(name, package)
}
}
#' Read a specific graphviz file as a knitr chunk.
#' @param name The name of the graphviz file. Can be the path to
#' a graphviz file or the basename of a graphviz file
#' installed in a specific package.
#' @param package Optional. The package to search in if the provided
#' name does not exist.
#' @param new_name Optional. Specify the name of the chunk to load.
#' Defaults to using the basename of the file.
#' @importFrom knitr read_chunk
#' @export
read_graphviz_chunk <- function(name, package, new_name) {
chunk_path <- find_graphviz(name, package)
if (missing(new_name)) {
new_name <- tools::file_path_sans_ext(basename(name))
}
read_chunk(chunk_path, labels = new_name)
}
#' Read a graphviz figure from source using DiagrammeR.
#'
#' @param name The name of the graphviz source file, without the
#' .gv extension.
#' @param package The name of the package where to find the graphviz
#' file.
#' @param ... Optional arguments passed on to \code{\link{DiagrammeR::grViz}}.
#'
#' @import dplyr
#' @export
diagram_graphviz <- function(name, package, ...) {
find_graphviz(name, package) %>%
DiagrammeR::grViz(...)
}
#' Render a graphviz figure as svg and import it.
#' @import dplyr
#' @export
read_graphviz <- function(name, package, ...) {
# Create the graphviz graph with DiagrammeR and
# export to a temporary SVG file.
temp <- tempfile("diagrammer", fileext = ".svg")
diagram_graphviz(name, package, ...) %>%
DiagrammeRsvg::export_svg() %>%
write(temp)
# Read the temporary SVG file back in as a pictureGrob
picture_grob <- read_svg(temp)
# Remove the temporary SVG file
file.remove(temp)
picture_grob
}
#' Read an svg figure in as a grob.
#' @import dplyr
#' @export
read_svg <- function(svg_file) {
temp <- tempfile("grconvert", fileext = ".svg")
grConvert::convertPicture(svg_file, temp)
picture_grob <- grImport2::readPicture(temp) %>%
grImport2::pictureGrob()
file.remove(temp)
picture_grob
}
#' Draw an svg file using grid.
#' @import dplyr
#' @import grid
#' @export
draw_svg <- function(svg_file) {
grid.newpage()
picture_grob <- read_svg(svg_file)
grid.draw(picture_grob)
}
#' Draw a graphviz image using grid.
#' @import magrittr
#' @import grid
#' @export
draw_graphviz <- function(name, package, ...) {
grid.newpage()
read_graphviz(name, package, ...) %>% grid.draw()
}
#' Render a graphviz figure directly with the dot engine.
#' @import dplyr
#' @export
read_graphviz_with_images <- function(name, package, ...) {
dot_source <- find_graphviz(name, package)
# Render gv -> svg using dot
temp1 <- tempfile("dot", fileext = ".png")
system(paste("dot -Tpng -o", temp1, dot_source))
# Read png in as a grob
pictureGrob <- png::readPNG(temp1) %>%
grid::rasterGrob()
file.remove(temp1)
pictureGrob
}
#' Draw a graphviz figure that has images in it with grid.
#' @import dplyr
#' @import grid
#' @export
draw_graphviz_with_images <- function(name, package, ...) {
grid.newpage()
read_graphviz_with_images(name, package, ...) %>% grid.draw()
}
|
fc9004ad52355e057f5a4c0c548a00f3f42d1b3b | abc43d77124cef33e655b1837551e610cdb2ce7f | /man/RmdUpdateOutputs.Rd | 48c003b99c90ad8917f60cc0e7b9d781c0a6cd73 | [
"MIT"
] | permissive | dr-harper/HarpR | 00073b672890e78cab943092d53936c2de222fd5 | f77bf85075992ff01124d9115bdf64650a9e0e75 | refs/heads/master | 2022-04-04T18:35:51.485215 | 2019-11-06T15:26:52 | 2019-11-06T15:26:52 | 120,006,708 | 0 | 0 | MIT | 2018-11-22T16:41:57 | 2018-02-02T16:49:18 | R | UTF-8 | R | false | true | 963 | rd | RmdUpdateOutputs.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RmdLastKnitted.R
\name{RmdUpdateOutputs}
\alias{RmdUpdateOutputs}
\title{Updates R Markdown output files}
\usage{
RmdUpdateOutputs(directory = getwd(), recursive = TRUE,
firstKnit = FALSE, minAge = 0, quiet = TRUE)
}
\arguments{
\item{directory}{the directory to search for the files. Defaults to the working directory}
\item{recursive}{should the search include subdirectories?}
\item{firstKnit}{should the function compile files if they have not be knitted before? Default is FALSE}
\item{minAge}{the minimum time difference between the updated R Markdown and the HTML.}
\item{quiet}{should the R Markdown print pandoc messages}
}
\description{
Checks the age of R Markdown files and their respective outputs, and will update
any documents which are out of date. The function will use the `output` options
of the YAML frontmatter of the document.
}
\author{
Michael Harper
}
|
f65e94dd6eb956b3dbbad43481e94aae5d96d436 | f76df79cc236bcdeb4f1d65a3a8fe5dba8a83f13 | /run_analysis.R | 49313548f3e0ec04d30bb3ccc635ca3f7f442b69 | [] | no_license | renato-leite/Getting_Cleanning | 34d0713a92ef0c7b1f63779bdc8578616e2503a5 | 2e9e5eb9a6b90ab6e00605a77879ad000c09db3a | refs/heads/master | 2020-05-16T23:04:43.495120 | 2014-05-23T23:43:17 | 2014-05-23T23:43:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,024 | r | run_analysis.R |
######################################
#
# This script imports and transform the data from the Coursera Project
# into a Tidy data.
# The description is in the README.MD
#
######################################
##Imports accelerometer data
A1=read.table("train/subject_train.txt")
A2=read.table("train/X_train.txt")
A3=read.table("train/y_train.txt")
B1=read.table( "test/subject_test.txt")
B2=read.table("test/X_test.txt")
B3=read.table("test/y_test.txt")
#imports MetaData
L = read.table("activity_labels.txt")
N = read.table("features.txt")
#extracts features names
NM = N[,2]
rm(N)
NM=as.character(NM)
# criates vector that identifies trainning and test data. Just for eventual control
A4 = matrix("A",dim(A1)[1],dim(A1)[2])
B4 = matrix("B",dim(B1)[1],dim(B1)[2])
#agregates columns
A = cbind(A4,A1,A3,A2)
B = cbind(B4,B1,B3,B2)
rm(A1)
rm(A2)
rm(A3)
rm(A4)
rm(B1)
rm(B2)
rm(B3)
rm(B4)
# gives functional names to the fundamental columns
names(A)[1] = "base"
names(A)[2] = "id"
names(A)[3] = "Y"
names(A)[4:dim(A)[2]] = NM
names(B)[1] = "base"
names(B)[2] = "id"
names(B)[3] = "Y"
names(B)[4:dim(B)[2]] = NM
# combines tranning and test data
Base = rbind(A,B)
rm(A)
rm(B)
# Selects mean and standard deviation measurements for each measurement
NewBase = Base[,c(1,2,3,
4, 5, 6, 7, 8, 9,
44, 45, 46, 47, 48, 49,
84, 85, 86, 87, 88, 89,
124, 125, 126, 127, 128, 129,
164, 165, 166, 167, 168, 169,
204, 205,
217, 218,
230, 231,
243, 244,
256, 257,
269, 270, 271, 272, 273, 274,
348, 349, 350, 351, 352, 353,
427, 428, 429, 430, 431, 432,
506, 507,
519, 520,
532, 533,
545, 546)]
rm(Base)
# Merge activity label
LBase = merge(NewBase,L,by.x = "Y",by.y = "V1")
# creates useful subject ID vector
ID = LBase[,3]
# creates useful Activity vector
Activity = LBase[,dim(LBase)[2]]
# Selects and converts measurements into matrix
DT = as.matrix(LBase[4:(dim(NewBase)[2])])
# Creates new DataFrame
PTD = as.data.frame(cbind(ID,Activity,DT))
# Concatenates subject ID and Activity to create a new variable that, alone identifies each variable for each activity
CAT = paste(ID,Activity)
# Splits measurements by each variable and each activity
SDT = split(PTD,CAT)
rm(CAT)
rm(DT)
rm(LBase)
# Calculates the average of each variable for each activity and each subject
TD = do.call(rbind,lapply(SDT, colMeans))
rm(SDT)
# Merges Activity Label lost in previous transformation
Temp = merge(TD,L,by.x = "Activity",by.y = "V1")
rm(TD)
Temp[,1] = Temp[,dim(Temp)[2]]
Temp = Temp[,-dim(Temp)[2]]
# Puts Tidy data in final object
Tidy = Temp
# Saves descriptive activity names to name the activities in the data set
english.names = c("Activity","ID",
"t.Body.Acceleration.Mean.X","t.Body.Acceleration.Mean.Y","t.Body.Acceleration.Mean.Z",
"t.Body.Acceleration.Std.X","t.Body.Acceleration.Std.Y","t.Body.Acceleration.Std.Z",
"t.Gravity.Acceleration.Mean.X","t.Gravity.Acceleration.Mean.Y","t.Gravity.Acceleration.Mean.Z",
"t.Gravity.Acceleration.Std.X","t.Gravity.Acceleration.Std.Y","t.Gravity.Acceleration.Std.Z",
"t.Body.Acceleration.Jerk.Mean.X","t.Body.Acceleration.Jerk.Mean.Y","t.Body.Acceleration.Jerk.Mean.Z",
"t.Body.Acceleration.Jerk.Std.X","t.Body.Acceleration.Jerk.Std.Y","t.Body.Acceleration.Jerk.Std.Z",
"t.Body.Gyro.Mean.X","t.Body.Gyro.Mean.Y","t.Body.Gyro.Mean.Z",
"t.Body.Gyro.Std.X","t.Body.Gyro.Std.Y","t.Body.Gyro.Std.Z",
"t.Body.Gyro.Jerk.Mean.X","t.Body.Gyro.Jerk.Mean.Y","t.Body.Gyro.Jerk.Mean.Z",
"t.Body.Gyro.Jerk.Std.X","t.Body.Gyro.Jerk.Std.Y","t.Body.Gyro.Jerk.Std.Z",
"t.Body.Acceleration.Magnitude.Mean","t.Body.Acceleration.Magnitude.Std",
"t.Gravity.Acceleration.Magnitude.Mean","t.Gravity.Acceleration.Magnitude.Std",
"t.Body.Acceleration.Magnitude.Mean","t.Body.Acceleration.Magnitude.Std",
"t.Body.Gyro.Magnitude.Mean","t.Body.Gyro.Magnitude.Std",
"t.Body.Gyro.Magnitude.Mean","t.Body.Gyro.Magnitude.Std",
"f.Body.Acceleration.Mean.X","f.Body.Acceleration.Mean.Y","f.Body.Acceleration.Mean.Z",
"f.Body.Acceleration.Std.X","f.Body.Acceleration.Std.Y","f.Body.Acceleration.Std.Z",
"f.Body.Acceleration.Jerk.Mean.X","f.Body.Acceleration.Jerk.Mean.Y","f.Body.Acceleration.Jerk.Mean.Z",
"f.Body.Acceleration.Jerk.Std.X","f.Body.Acceleration.Jerk.Std.Y","f.Body.Acceleration.Jerk.Std.Z",
"f.Body.Gyro.Mean.X","f.Body.Gyro.Mean.Y","f.Body.Gyro.Mean.Z",
"f.Body.Gyro.Std.X","f.Body.Gyro.Std.Y","f.Body.Gyro.Std.Z",
"f.Body.Acceleration.Magnitude.Mean","f.Body.Acceleration.Magnitude.Std",
"f.Body..Magnitude.Mean","f.Body..Magnitude.Std",
"f.Body.Gyro.Magnitude.Mean","f.Body.Gyro.Magnitude.Std",
"f.Body.Gyro.Magnitude.Mean","f.Body.Gyro.Magnitude.Std")
# Aplly new names into datasets
names(Tidy) = english.names
names(PTD) = english.names
# Order Tidy data in a more intuitive form
Tidy = Tidy[ order(Tidy[,2], Tidy[,1]), ]
# Writes tidy data into Work Directory
write.table(Tidy, "TidyData.txt", sep="\t")
|
75c006ac2a6cbc6cc43d2b65794e18107b385c02 | 094f81c31a3cfd560b24280e476d5af4fb52b9e3 | /R/t0.R | 2854abc4cada1f5b3976215456e2b9c19e60ffd0 | [
"MIT"
] | permissive | PJOssenbruggen/Basic | 6c2343dcb135cb364d059160925ded5cb43b5455 | 1885fa40d3318cc554b4dd80154b263baef19ac4 | refs/heads/master | 2021-01-25T11:57:19.583401 | 2019-01-04T13:03:32 | 2019-01-04T13:03:32 | 123,449,454 | 0 | 0 | null | 2018-03-05T12:26:55 | 2018-03-01T14:56:48 | R | UTF-8 | R | false | false | 132 | r | t0.R | #' \code{t0} is the time the analysis is initiated.
#'
#' @format
#' \describe{
#' \item{t0}{time, measured in seconds.}
#' }
"t0"
|
1b304b154dde9e02445d66c83ce346810dd7d573 | a62af7d68dba1b91bfc3c67ce612db75b1cf8b5a | /Run_analysis.R | 6cf802cc94f4519258d05589a4c53f22aa95795a | [] | no_license | spsuamin/Making-Tidy-Data | ce5d2d2eceed755884918b557de96d0ef6779bcc | 535b55a069ae46dcb422a113568392e16aa25b5a | refs/heads/master | 2020-05-09T14:54:04.555269 | 2019-04-13T20:35:05 | 2019-04-13T20:35:05 | 181,212,395 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,138 | r | Run_analysis.R | #Ask user to select the directory containing the UCH HAR Dataset
filepath <- if (interactive())
choose.dir(getwd(),"Select the root UCI HAR Dataset Folder")
setwd(filepath)
#Read metadata
activity_labels <- read.delim(file = "activity_labels.txt",header = FALSE) #Reads the activity labels
feature_labels <- read.delim(file = "features.txt",header = FALSE) #Reads the measurement feature labels
#Find the features that are mean or std calculations on the measurements.
filter<-grep("-(mean|std)",feature_labels$V1)
#grep("-(mean|std)",feature_labels$V1,value=TRUE) #Uncomment this line to inspect which features are mean or std.
#Read the training dataset
setwd("./train")
train_subject_data <- read.delim(file = "subject_train.txt",header=FALSE,col.names = "subjectID") #Subject Identifier
train_x_data<-read.delim(file = "X_train.txt",header=FALSE,sep="") #Feature Vector Data
train_y_data<-read.delim(file = "y_train.txt",header=FALSE,col.names = "activityLabel") #Activity Identifier
setwd("..")
#Read the test dataset
setwd("./test")
test_subject_data <- read.delim(file = "subject_test.txt",header=FALSE,col.names = "subjectID") #subject identifier
test_x_data<-read.delim(file = "X_test.txt",header = FALSE,sep = "") #feature vector data
test_y_data<-read.delim(file = "y_test.txt",header=FALSE,col.names = "activityLabel") # activity identifier
#Merge the traing and test datasets
combine_subject <-rbind(train_subject_data,test_subject_data) #merge the subject identifer data
combine_y_data <-rbind(train_y_data,test_y_data) #merge the activity identifier data
activityLabel <-factor(combine_y_data$activityLabel)
levels(activityLabel) <- activity_labels$V1 #replace activity values with descriptive activity labels
combine_y_data3 <-as.data.frame(activityLabel)
combine_x_data <-rbind(train_x_data,test_x_data) #merge the feature vector data
#Extract the mean and standard deviation for each measurement.
combine_x_data <-combine_x_data[,filter]
names(combine_x_data)<-grep("-(mean|std)",feature_labels$V1,value=TRUE) #apply the feature names as column labels
#renaming variables with descriptive names
test_subjects <-combine_subject
test_subjects$Obs <- seq.int(nrow(test_subjects))
test_activity <-combine_y_data3
test_activity$Obs <-seq.int(nrow(test_activity))
test_featuredata <- combine_x_data
test_featuredata$Obs <- seq.int(nrow(test_featuredata))
#Combine the variables into one dataframe
library(plyr)
dfList=list(test_subjects,test_activity,test_featuredata)
dataset<-join_all(dfList)
dataset$Obs <- NULL
head(dataset) #display the first five rows of the dataset
tail(dataset) #display the last five rows of the dataset
setwd(filepath)
write.csv(dataset, file = "tidyHRAdataset.csv")
write.table(dataset,file = 'tidyHRAdataset.txt',row.names = FALSE)
#Calculate a dataset of average of each feature vector grouped by activity and subject
library(dplyr)
dataset2 <- dataset %>%
group_by(activityLabel,subjectID) %>%
summarise_all(mean)
setwd(filepath)
write.csv(dataset2, file = "tidy2HRAdataset.csv")
write.table(dataset2,file = 'tidy2HRAdataset.txt',row.names = FALSE)
|
29d5331d7b222ed6802c2583b825759f50f044f6 | 0ca78ef5a8670fbdab55409eecda579cec2baf68 | /trade/SNPACKAGE/R/SnReport.R | 591d863d325fc74343cf285d0e6f28980c51cb97 | [] | no_license | zhurui1351/RSTOCK_TRAIL | ab83fdef790778a1e792d08a876522ef13a872e6 | 2396c512c8df81a931ea3ca0c925c151363a2652 | refs/heads/master | 2021-01-23T09:01:36.814253 | 2019-05-17T15:26:19 | 2019-05-17T15:26:19 | 23,482,375 | 9 | 4 | null | null | null | null | UTF-8 | R | false | false | 3,043 | r | SnReport.R | SnReport <- function(priceData,confProp=0.55,conf=0.05,tradeDays=5,prune=5,profitratio=0.2,drawdownratio=-0.1,initEq = 100000,path='',type='iw',computePosition = fixPosition(size=1000))
{
snrules = SNRules(priceData,confProp=confProp,conf=conf,tradeDays=tradeDays,prune=prune,type=type)
traderules = getTradeRules(snrules)
reports = list(NULL)
reportIndex = 1
if(length(traderules) == 0)
return(list(NULL))
for(i in 1:length(traderules))
{
traderule = traderules[[i]]$traderule
print("now testing: ")
print(traderule)
print(type)
report = backTestBySnRule(traderule$rule,priceData,tradeDays=tradeDays,traderule$buyday,traderule$sellday,short = traderule$short,verbose=FALSE,initEq=initEq,type=type,computePosition=computePosition)
perfomance = report$totalStates
pratio = perfomance$End.Equity / initEq
ddownRatio = perfomance$Max.Drawdown / initEq
print(path)
print(i)
print("rule:")
print(traderule)
print("profit ratio:")
print(pratio)
print("max drawndown ratio:")
print(ddownRatio)
#不满足条件
if(pratio < profitratio || ddownRatio < drawdownratio)
{
next
}
else
{
reports[[reportIndex]] = list(snrule=snrules[[i]],perfomance = perfomance,txns=report$txns)
reportIndex = reportIndex + 1
#生成测试报告
if(path != '')
{
#名称要和backTestBySnRule里面的一样,用于绘制资金曲线
strategy = "mydata";
subdir= paste(path,"/",i,sep="")
#生成报告目录以及子目录
if(!file.exists(path))
{
dir.create(path,recursive=T)
}
if(!file.exists(subdir))
{
dir.create(subdir,recursive=T)
}
picname = paste(subdir,"/equitcurve.jpg",sep="")
jpeg(picname)
chart.Posn(Portfolio=strategy,Symbol=strategy)
dev.off()
#交易明细
txnsName = paste(subdir,"/txns.csv",sep="")
write.zoo(report$txns,txnsName)
#每笔交易
pername = paste(subdir,"/per.csv",sep="")
write.csv(report$perState,pername)
#汇总
sumname = paste(subdir,"/summary.txt",sep="")
write("conditon: ",sumname)
write(traderule$rule,sumname,append=T)
write("buyday: ",sumname,append=T)
write(traderule$buyday,sumname,append=T)
write("sellday: ",sumname,append=T)
write(traderule$sellday,sumname,append=T)
write("short: ",sumname,append=T)
write(traderule$short,sumname,append=T)
write("profit ratio: ",sumname,append=T)
write(pratio,sumname,append=T)
write("max drawdown ratio: ",sumname,append=T)
write(ddownRatio,sumname,append=T)
write("percent positive: ",sumname,append=T)
write(perfomance$Percent.Positive,sumname,append=T)
write.csv(perfomance,paste(subdir,"/performance.csv",sep=""))
save(.blotter,file = paste(subdir,"/blotter",sep=""))
}
}
}
return(reports)
} |
ea456117ded648e15d224826c25052794d461a3a | edbfaab41041f855e0cd0ef25ce0f0adefac80b6 | /Week-13/Anusha_Week13.R | c08f5f8ca95eac3200ec143309a81681d2aec5fe | [
"MIT"
] | permissive | mlammens/PermuteSeminar-2014 | f41a7226155534cb0cccf536a4cf6f11253b3b73 | a8bb2ecc16b733362ca9f875711ef98019d55e96 | refs/heads/master | 2021-01-25T00:28:41.910569 | 2014-05-13T11:38:03 | 2014-05-13T11:38:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 770 | r | Anusha_Week13.R | library(vegan)
# Set working directory
setwd("C://Users\\Anusha\\Documents\\GitHub\\PermuteSeminar-2014\\Week-13")
svob <- read.csv("Svoboda_Supp.T2.csv", row.names=1)
env <- read.csv("Svoboda_T2_csv.csv", row.names=1)
head(svob)
head(env)
# Add column to env data to define swamp vs. bog
env <- env[-1,]
env <- env[-10,]
env$Type <- 0
env$Type[1:9] <- "Swamps"
env$Type[10:18] <- "Bogs"
head(env)
# n is the number of samples being considered
#n <- nrow(svob)
#M <- n*(n-1)/2
#swamps <- svob[,1:9]
#bogs <- svob[,10:18]
## Using vegdist of the vegan package to calculate Bray-Curtis dissimilarity index
svob_bray <- vegdist(x=svob,method="bray",binary=T, na.rm=T)
attach(env)
sites.ano <- anosim(svob_bray, env$Type)
summary(sites.ano)
detach(env)
plot(sites.ano) |
d91263bec0bfdeea60af8e651a56df69ebe61b83 | 2c38fc71287efd16e70eb69cf44127a5f5604a81 | /R/class_fst.R | b88d2d5d2e318806f53c70fd7e0cfd0b3733a42a | [
"MIT",
"Apache-2.0"
] | permissive | ropensci/targets | 4ceef4b2a3cf7305972c171227852338dd4f7a09 | a906886874bc891cfb71700397eb9c29a2e1859c | refs/heads/main | 2023-09-04T02:27:37.366455 | 2023-09-01T15:18:21 | 2023-09-01T15:18:21 | 200,093,430 | 612 | 57 | NOASSERTION | 2023-08-28T16:24:07 | 2019-08-01T17:33:25 | R | UTF-8 | R | false | false | 692 | r | class_fst.R | #' @export
store_class_format.fst <- function(format) {
c("tar_fst", "tar_store")
}
#' @export
store_assert_format_setting.fst <- function(format) {
}
#' @export
store_read_path.tar_fst <- function(store, path) {
fst::read_fst(path)
}
#' @export
store_write_path.tar_fst <- function(store, object, path) {
compress <- store$resources$fst$compress %|||%
store$resources$compress %|||%
50
tar_assert_dbl(compress)
tar_assert_scalar(compress)
fst::write_fst(x = object, path = path, compress = compress)
}
#' @export
store_convert_object.tar_fst <- function(store, object) {
as.data.frame(object)
}
#' @export
store_get_packages.tar_fst <- function(store) {
"fst"
}
|
359d67d964ed9046a8eea9a957561fe5a91bde07 | e649f291b0937f09b616411ed39d91e4f86218ef | /calculateSteps.r | 75cfe52aab8465dd7ffd5e130e5bdc362573e913 | [] | no_license | moocparticipant/RepData_PeerAssessment1 | 2e02b4faa3d92b284347a6ed2e56f09231a5679f | 2cae23436ca7a0684cd7b98b40c124b598675a4b | refs/heads/master | 2021-01-12T22:38:38.003112 | 2015-07-20T11:30:28 | 2015-07-20T11:30:28 | 39,078,694 | 0 | 0 | null | 2015-07-14T14:12:04 | 2015-07-14T14:12:04 | null | UTF-8 | R | false | false | 759 | r | calculateSteps.r | #This code takes a data table and calculates the total steps
#steps, date,interval are the columns in the data table
library(plyr)
calculateSteps <- function (data) {
dateSteps <- data[, c(1,2)]# only take date and steps columns
NARemovedSteps <- dateSteps[complete.cases(dateSteps),] #remove all NAs
#using ddply summarise steps by date
DailyTotal <- ddply(NARemovedSteps, "date", summarise, sum = sum(steps))
}
StepsHistogram <- function (DailyTotal){
#takes daily total and prints a histogram
hist(DailyTotal$sum, xlab = "Daily Total", ylab = "Frequency", main = "Histogram of Daily Total of Steps" )
}
calculateMean <- function (DailyTotal){
mean(DailyTotal$sum)
}
calculateMedian <- function (DailyTotal) {
median (DailyTotal$sum)
} |
e4b3daa79dd5063376f2e32d331425883c5557bc | 2001bb778b621ff646b2951c3b9e6995f8d40c2f | /below5_sum.R | 12edeb41ec6c047793f7e43867baf69956921971 | [] | no_license | emelaas/landsat_ARD | 15d8238ec04026634eb8a81207cbf33a7a682202 | b2c4beb007349cb80d42be52e3df87e4b38645e7 | refs/heads/master | 2021-04-29T23:46:20.607859 | 2018-08-28T21:56:29 | 2018-08-28T21:56:29 | 121,563,656 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,221 | r | below5_sum.R | ##This script will generate long-term mean and annual 32km maps of spring and autumn phenology
##for a given Landast overlap scene
system.time({
library(ncdf4)
require(rgdal)
library(raster)
library(foreach)
library(iterators)
library(doParallel)
#Register the parallel backend
registerDoParallel(16)
args = commandArgs(trailingOnly=T)
tile_name = args[1]
#tile_name <- "h18v16"
#Load observed, 1km data
setwd(paste('/projectnb/modislc/projects/landsat_sentinel/ARD/',tile_name,'/PHENO_1KM/',sep=''))
load(file = "landsat2daymet")
#Native Daymet projection
daymet_crs <- CRS("+proj=lcc +lon_0=-100 +lat_0=42.5 +x_0=0 +y_0=0 +a=6378137 +rf=298.257223563 +lat_1=25 +lat_2=60")
#Import overlap shapefile
o <- readOGR(paste('/projectnb/modislc/projects/landsat_sentinel/ARD/',
tile_name,'/SHP/',sep=""),tile_name)
#Reproject overlap shapefile
o_reproj <- spTransform(o,daymet_crs)
tmean_all <- foreach(yr = 1982:2017, .combine = rbind) %dopar% {
print(yr)
setwd('/projectnb/modislc/data/daymet')
tmax <- brick(paste('daymet_v3_tmax_',yr,'_na.nc4',sep=''),var='tmax')
tmin <- brick(paste('daymet_v3_tmin_',yr,'_na.nc4',sep=''),var='tmin')
tmax.crop <- crop(tmax,extent(o_reproj))
tmin.crop <- crop(tmin,extent(o_reproj))
tmean <- getValues((tmax.crop+tmin.crop)/2)
time <- seq(as.Date(paste(yr,"/1/1",sep='')), as.Date(paste(yr,"/12/31",sep='')), "days")
time <- time[1:365]
month <- as.numeric(substr(time,6,7))
year <- as.numeric(substr(time,1,4))
tmean <- data.frame(year,month,t(tmean))
}
month <- tmean_all[,2]
monthly_tmean <- aggregate(tmean_all,list(month),FUN=mean)
monthly_tmean <- monthly_tmean[,-c(1:3)]
monthly_tmean[monthly_tmean < 5] <- 1
monthly_tmean[monthly_tmean >= 5] <- 0
below5_sum <- as.numeric(apply(monthly_tmean,2,sum))
group <- matrix(NA,length(below5_sum),1)
group[below5_sum < 3] <- 1
group[below5_sum >= 3 & below5_sum < 6] <- 2
group[below5_sum >=6] <- 3
setwd(paste('/projectnb/modislc/projects/landsat_sentinel/ARD/',tile_name,'/PHENO_1KM/',sep=''))
save(group,below5_sum,file = 'below5_sum')
})
|
331a5d4ba3f7302a56b1d00c9093b8e403dcff7b | f7de34edd958d2a9ae3d39106b97e943bc5c052b | /Imputation_cleaning/hospital_discharge_date_cleaning.R | aadd930e512689beef1425d5f9be00fd20aca4a4 | [] | no_license | georgeannie/VHS-practicum-code | eccf057bdb0d300fd93c6bc81c48440964392d04 | 175de3dc2e109f601c4e42e5c301b10f4ea89d67 | refs/heads/master | 2020-04-27T10:23:21.455876 | 2019-05-22T23:25:15 | 2019-05-22T23:25:15 | 174,251,626 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,046 | r | hospital_discharge_date_cleaning.R | #######################################################################################
# CLEAN UP HOSPITAL DISCHARGE DATE. USE THE HOSPITAL DISCHARGE DATE AND #
# ED ADMISSION DATE TO FIND THE NUMBER OF DAYS IN THE HOSPITAL. #
# IF HOSPITAL DISCHARGE DISPOSITION IS DECEASED THEN REPLACE ED DISPOSITION WITH#
# DECEASED. USE THE NUMBER OF DAYS, ED DISPOSITION AND TIME(MAYBE) TO DETERMINE #
# OUTCOME - NEED A HELICOPTER? #
#######################################################################################
# Load required packages.
library(janitor)
library(lubridate)
library(hms)
library(tidyr)
library(stringr)
library(readr)
library(forcats)
library(RcppRoll)
library(dplyr)
library(tibble)
library('RPostgreSQL')
library(lazyeval)
source("/home/rstudio/R/VHS_github/VHS-practicum-code/aws_rds_access.R")
source("/home/rstudio/R/VHS_github/VHS-practicum-code/Imputation_cleaning/function_impute_zip_ems_facility.R")
pg = dbDriver("PostgreSQL")
con=dbConnect(pg,
dbname = "vhs",
host=host,
user = user,
password = password)
clean_trauma=dbGetQuery(con, 'select * from clean_trauma')
clean_trauma1=clean_trauma
#------------------------------------------------------------------------#
# CONVERT DISCHARGE DATE, ADMISSION DATE, INCIDENT DATE TO MDY FORMAT #
# POPULATE HOSPITAL DISCHARGE DATE WHICH IS NULL USING HOSPITAL DISCHARGE#
# ORDER WRITTEN #
#------------------------------------------------------------------------#
clean_trauma1 = clean_trauma1 %>%
mutate(hospital_discharge_date_tr25_34 = mdy(hospital_discharge_date_tr25_34),
ed_acute_care_discharge_date_tr17_25 = mdy(ed_acute_care_discharge_date_tr17_25),
ed_acute_care_admission_date_tr18_55 = mdy(ed_acute_care_admission_date_tr18_55),
incident_date_tr5_1 = mdy(incident_date_tr5_1),
hospital_discharge_orders_written_date_tr25_93 =
mdy(hospital_discharge_orders_written_date_tr25_93))
#------------------------------------------------------------------------#
# REPLACE ALL MISSING HOSPITAL DISCHARGE DATE WITH HOSPITAL DISCHARGE #
# ORDERS WRITTEN DATE IF AVAILABLE AND ENSURE THAT IT IS AVAILABLE IN AS #
# CLEAN_TRAUM #
#------------------------------------------------------------------------#
filter_discharge_date = clean_trauma1 %>%
filter(is.na(hospital_discharge_date_tr25_34)) %>%
mutate(hospital_discharge_date_tr25_34=
coalesce(hospital_discharge_date_tr25_34, hospital_discharge_orders_written_date_tr25_93))
clean_trauma1= clean_trauma1 %>%
filter(!incident_id %in% filter_discharge_date$incident_id) %>%
rbind(filter_discharge_date)
#------------------------------------------------------------------------#
#9695 RECORDS NEEDS TO BE FILLED WITH DISCHARGE DATE TO GET THE NUMBER OF#
# DAYS IN THE HOSPITAL AND IT SHOULD BE ON OR AFTER THE ED ADMISSION DATE#
#------------------------------------------------------------------------#
no_discharge= clean_trauma1 %>%
filter(is.na(hospital_discharge_date_tr25_34))
#------------------------------------------------------------------------#
#47 OF THESE CASES HAVE ADMISSION DATE BEFORE THE INCIDENT DATE AND SINCE#
# MAJORITY OF THESE CASES ARE TRANSFERS, THE DISCHARGE DATE AND ADMISSION#
# DATE WILL BE SET AS THE INCIDENT DATE #
#------------------------------------------------------------------------#
filter0= no_discharge %>%
filter(ed_acute_care_admission_date_tr18_55 < incident_date_tr5_1) %>%
mutate(hospital_discharge_date_tr25_34 = incident_date_tr5_1,
ed_acute_care_admission_date_tr18_55 = incident_date_tr5_1)
no_discharge=no_discharge[!no_discharge$incident_id %in% filter0$incident_id,]
#------------------------------------------------------------------------#
# 511 OF THESE CASES HAVE ED DISPOSITION OF DECEASED. HENCE SET DISCHARGE#
# DATE AS ADMISSION DATE #
#------------------------------------------------------------------------#
filter1 = no_discharge%>%
filter(ed_acute_care_disposition_tr17_27 == "Deceased/Expired" ) %>%
mutate(hospital_discharge_date_tr25_34 = ed_acute_care_admission_date_tr18_55)
no_discharge=no_discharge[!no_discharge$incident_id %in% filter1$incident_id,]
#------------------------------------------------------------------------#
#4771 OF THE REAMINING CASES WERE TRANSFERED TO ANOTHER HOSPITAL WITH ED #
# DISCHARGE DATE OCCURING ON SAME OR AFTER DAY OF ADMISSION OR ON THE #
# SAME DAY AS INCIDENT #
#------------------------------------------------------------------------#
filter2 = no_discharge%>%
filter(ed_acute_care_disposition_tr17_27 == "Transferred to another hospital" &
ed_acute_care_discharge_date_tr17_25 >= ed_acute_care_admission_date_tr18_55 &
ed_acute_care_admission_date_tr18_55 >= incident_date_tr5_1 ) %>%
mutate(hospital_discharge_date_tr25_34 = ed_acute_care_discharge_date_tr17_25)
no_discharge=no_discharge[!no_discharge$incident_id %in% filter2$incident_id,]
#------------------------------------------------------------------------#
#2098 OF THE REAMINING CASES WERE TRANSFERED TO ANOTHER HOSPITAL WITH NO #
# ED DISCHARGE BUT THE ADMISSION DATE OCCURING ON SAME OR AFTER #
# INCIDENT DATE. SET DISCHARGE DATE AS ADMISSION DATE #
#------------------------------------------------------------------------#
filter3 = no_discharge%>%
filter(ed_acute_care_disposition_tr17_27 == "Transferred to another hospital" &
is.na(ed_acute_care_discharge_date_tr17_25) &
ed_acute_care_admission_date_tr18_55 >= incident_date_tr5_1 ) %>%
mutate(hospital_discharge_date_tr25_34 = ed_acute_care_admission_date_tr18_55)
no_discharge=no_discharge[!no_discharge$incident_id %in% filter3$incident_id,]
#------------------------------------------------------------------------#
#31 OF THE REAMINING CASES WERE DECEASED AND HAD ICU DAYS GREATER THAN #
# EQUAL TO ZERO AND ALSO SATISFIED ADMISSION DATE ON OR AFTER INCIDENT #
# SET DISCHARGE DATE AS ADMISSION PLUS ICU DAYS #
#------------------------------------------------------------------------#
filter4 = no_discharge%>%
filter(icu_days_total_tr26_9 >= 0 &
hospital_discharge_disposition_tr25_27 == "Deceased/Expired" &
ed_acute_care_admission_date_tr18_55 >= incident_date_tr5_1 ) %>%
mutate(hospital_discharge_date_tr25_34 = ed_acute_care_admission_date_tr18_55 +
as.integer(icu_days_total_tr26_9))
no_discharge=no_discharge[!no_discharge$incident_id %in% filter4$incident_id,]
#------------------------------------------------------------------------#
#15 OF THE REAMINING CASES WERE DECEASED HAD ADMISSION DATE ON OR AFTER #
# THE INCIDENT DATE. SET DISCHARGE DATE AS ADMISSION #
#------------------------------------------------------------------------#
filter5 = no_discharge%>%
filter(hospital_discharge_disposition_tr25_27 == "Deceased/Expired" &
ed_acute_care_admission_date_tr18_55 >= incident_date_tr5_1 ) %>%
mutate(hospital_discharge_date_tr25_34 = ed_acute_care_admission_date_tr18_55)
no_discharge=no_discharge[!no_discharge$incident_id %in% filter5$incident_id,]
#------------------------------------------------------------------------#
#272 OF THE REAMINING CASES HAVE SOME ICU DAYS AND ADMISSION DATE ON OR #
# AFTER THE INCIDENT DATE. SET DISCHARGE DATE AS ADMISSION DATE + ICU DAY#
#------------------------------------------------------------------------#
filter6 = no_discharge%>%
filter(!is.na(icu_days_total_tr26_9)) %>%
mutate(hospital_discharge_date_tr25_34 = ed_acute_care_admission_date_tr18_55 + icu_days_total_tr26_9)
no_discharge=no_discharge[!no_discharge$incident_id %in% filter6$incident_id,]
#------------------------------------------------------------------------#
#246 OF THE REAMINING CASES HAVE ED ACUTE DISCHARGE DATE ON OR AFTER THE #
# ADMISSION DATE. SET DISCHARGE DATE AS ED DISCHARGE DATE #
#------------------------------------------------------------------------#
filter7 = no_discharge%>%
filter(ed_acute_care_discharge_date_tr17_25 >= ed_acute_care_admission_date_tr18_55) %>%
mutate(hospital_discharge_date_tr25_34 = ed_acute_care_discharge_date_tr17_25)
no_discharge=no_discharge[!no_discharge$incident_id %in% filter7$incident_id,]
#------------------------------------------------------------------------#
#1704 OF THE REAMINING CASES ARE MAJORITY WITH DISPOSITION HOME WITHOUT #
# SERVICES AND AMA AND OTHER. VERY FEW ARE OPERATING ROOM. HENCE SET #
# DISCHARGE DATE AS ADMISSION DATE #
#------------------------------------------------------------------------#
filter8 =no_discharge%>%
mutate(hospital_discharge_date_tr25_34 = ed_acute_care_admission_date_tr18_55)
no_discharge=no_discharge[!no_discharge$incident_id %in% filter8$incident_id,]
#------------------------------------------------------------------------#
#COMBINE ALL FILTERS AND REPLACE THE ROWS IN CLEAN_TRAUMA #
#------------------------------------------------------------------------#
clean_discharge_date = filter0 %>%
bind_rows(filter1, filter2, filter3, filter4, filter5, filter6, filter7, filter8)
clean_trauma1 = clean_trauma1 %>%
filter(!incident_id %in% clean_discharge_date$incident_id) %>%
rbind(clean_discharge_date)
#PART II: CLEANING HOSPITAL DISCHARGE DATES TO AVOID NEGATIVE DAYS IN HOSPITAL
#------------------------------------------------------------------------#
#28 OBSERVATIONS HAVE DISCHARGE DATE LESS THAN ADMISSION DATE. NEEDS TO #
#BE ADDRESSED TO AVOID NEGATIVE DAYS #
#------------------------------------------------------------------------#
# SOLUTION1: USE YEAR OF ED ADMISSION DATE IN THE DISCHARGE DATE FOR #
# THE DAY AND MONTH FOR THESE RECORDS IS REALISTIC #
#------------------------------------------------------------------------#
incorrect_discharge_date = clean_trauma1 %>%
filter(hospital_discharge_date_tr25_34 < ed_acute_care_admission_date_tr18_55) %>%
separate(hospital_discharge_date_tr25_34,
into = c("hospital_discharge_date_tr25_34_year",
"hospital_discharge_date_tr25_34_month",
"hospital_discharge_date_tr25_34_day"),
convert = FALSE, remove = FALSE) %>%
separate(ed_acute_care_admission_date_tr18_55,
into = c("ed_acute_care_admission_date_tr18_55_year",
"ed_acute_care_admission_date_tr18_55_month",
"ed_acute_care_admission_date_tr18_55_day"),
convert = FALSE, remove=FALSE) %>%
mutate(hospital_discharge_date_tr25_34 =
paste0(ed_acute_care_admission_date_tr18_55_year, "-",
str_pad(hospital_discharge_date_tr25_34_month, 2, pad=0), "-",
str_pad(hospital_discharge_date_tr25_34_day, 2, pad=0))
)
#------------------------------------------------------------------------#
# ANALYSIS: 15 RECORDS ARE RECTIFIED #
#------------------------------------------------------------------------#
filter1 = incorrect_discharge_date %>%
filter(hospital_discharge_date_tr25_34 >= ed_acute_care_admission_date_tr18_55)
#------------------------------------------------------------------------#
# 13 RECORDS STILL NEED TO BE CORRECTED #
# SOLUTION 2: REPLACE ED ADMISSIOND DATE WITH INCIDENT DATE #
#------------------------------------------------------------------------#
incorrect_discharge_date = incorrect_discharge_date %>%
filter(hospital_discharge_date_tr25_34 < ed_acute_care_admission_date_tr18_55) %>%
mutate(ed_acute_care_admission_date_tr18_55 = incident_date_tr5_1)
filter2=incorrect_discharge_date %>%
filter(hospital_discharge_date_tr25_34 >= ed_acute_care_admission_date_tr18_55)
#------------------------------------------------------------------------#
# RECHECK IF ALL ROWS HAVE BEEN ADDRESSED AND HAS DISCHARGE DATE GREATER #
# THAN OR EQUAL TO ADMISSION DATE. 5 ROWS STILL NEED TO BE ADDRESSED #
# SOLUTION 3: IF HOSPITAL DISCHARGE ORDERS WRITTEN DATE IS ON OR AFTER #
# THE DISCHARGE DATE AND ON OR AFTER THE ADMISSION DATE, THEN USE THE #
# DISCHARGE ORDERS WRITTEN DATE. 2 RECORDS CORRECTED #
#------------------------------------------------------------------------#
incorrect_discharge_date = incorrect_discharge_date %>%
filter(hospital_discharge_date_tr25_34 < ed_acute_care_admission_date_tr18_55)
filter3=incorrect_discharge_date %>%
filter(hospital_discharge_orders_written_date_tr25_93 >= hospital_discharge_date_tr25_34 &
hospital_discharge_orders_written_date_tr25_93 >= ed_acute_care_admission_date_tr18_55) %>%
mutate(hospital_discharge_date_tr25_34 = hospital_discharge_orders_written_date_tr25_93)
#------------------------------------------------------------------------#
# RECHECK IF ALL ROWS HAVE BEEN ADDRESSED AND HAS DISCHARGE DATE GREATER #
# THAN OR EQUAL TO ADMISSION DATE. 5 ROWS STILL NEED TO BE ADDRESSED #
# SOLUTION 3: IF HOSPITAL DISCHARGE ORDERS WRITTEN DATE IS ON OR AFTER #
# THE DISCHARGE DATE AND ON OR AFTER THE ADMISSION DATE, THEN USE THE #
# DISCHARGE ORDERS WRITTEN DATE. 2 RECORDS CORRECTED #
#------------------------------------------------------------------------#
incorrect_discharge_date = incorrect_discharge_date %>%
filter(!incident_id %in% filter3$incident_id)
filter4 = incorrect_discharge_date %>%
mutate(hospital_discharge_date_tr25_34 = ed_acute_care_admission_date_tr18_55)
#------------------------------------------------------------------------#
# REMOVE EXTRA COLUMNS ADDED AND COMBINE WITH ORIGINAL CLEAN_TRAUMA
#------------------------------------------------------------------------#
incorrect_discharge_date = rbind(filter1, filter2, filter3, filter4) %>%
select(-hospital_discharge_date_tr25_34_year,
-hospital_discharge_date_tr25_34_month,
-hospital_discharge_date_tr25_34_day,
-ed_acute_care_admission_date_tr18_55_year,
-ed_acute_care_admission_date_tr18_55_month,
-ed_acute_care_admission_date_tr18_55_day)
clean_trauma1 = clean_trauma1 %>%
filter(!incident_id %in% incorrect_discharge_date$incident_id) %>%
rbind(incorrect_discharge_date)
#------------------------------------------------------------------------#
#6 RECORDS STILL HAVE EMPTY DISCHARGE DATE #
#------------------------------------------------------------------------#
no_discharge= clean_trauma1 %>%
filter(is.na(hospital_discharge_date_tr25_34))
#------------------------------------------------------------------------#
#3 OF THE REAMINING CASES HAVE SOME ICU DAYS AND ADMISSION DATE ON OR #
# AFTER THE INCIDENT DATE. SET DISCHARGE DATE AS ADMISSION DATE + ICU DAY#
#------------------------------------------------------------------------#
filter1 = no_discharge%>%
filter(!is.na(icu_days_total_tr26_9)) %>%
mutate(hospital_discharge_date_tr25_34 = ed_acute_care_admission_date_tr18_55 + icu_days_total_tr26_9)
no_discharge=no_discharge[!no_discharge$incident_id %in% filter1$incident_id,]
#------------------------------------------------------------------------#
#2 OF THE REAMINING CASES HAVE ED ACUTE DISCHARGE DATE ON OR AFTER THE #
# ADMISSION DATE. SET DISCHARGE DATE AS ED DISCHARGE DATE #
#------------------------------------------------------------------------#
filter2 = no_discharge%>%
filter(ed_acute_care_discharge_date_tr17_25 >= ed_acute_care_admission_date_tr18_55) %>%
mutate(hospital_discharge_date_tr25_34 = ed_acute_care_discharge_date_tr17_25)
no_discharge=no_discharge[!no_discharge$incident_id %in% filter2$incident_id,]
#------------------------------------------------------------------------#
#0 OF THE REAMINING CASES ARE MAJORITY WITH DISPOSITION HOME WITHOUT #
# SERVICES AND AMA AND OTHER. VERY FEW ARE OPERATING ROOM. HENCE SET #
# DISCHARGE DATE AS ADMISSION DATE #
#------------------------------------------------------------------------#
filter3 =no_discharge%>%
mutate(hospital_discharge_date_tr25_34 = ed_acute_care_admission_date_tr18_55)
no_discharge=no_discharge[!no_discharge$incident_id %in% filter3$incident_id,]
#------------------------------------------------------------------------#
#COMBINE ALL FILTERS AND REPLACE THE ROWS IN CLEAN_TRAUMA #
#------------------------------------------------------------------------#
clean_discharge_date = filter1 %>%
bind_rows(filter2, filter3)
clean_trauma1 = clean_trauma1 %>%
filter(!incident_id %in% clean_discharge_date$incident_id) %>%
rbind(clean_discharge_date)
#-------------------------------------------------------------------------------#
# FINALLY CLEAN THE ADMISSION DATE IF IT IS ON OR BEFORE THE INCIDENT DATE BUT #
# DISCHARGE DATE IS ON OR AFTER INCIDENT DATE #
#-------------------------------------------------------------------------------#
filter5=clean_trauma1 %>%
filter(ed_acute_care_admission_date_tr18_55 <= incident_date_tr5_1 &
hospital_discharge_date_tr25_34 >=incident_date_tr5_1 ) %>%
mutate(ed_acute_care_admission_date_tr18_55 = incident_date_tr5_1)
clean_trauma1 = clean_trauma1 %>%
filter(!incident_id %in% filter5$incident_id) %>%
rbind(filter5)
dbSendQuery(con, "drop table clean_trauma")
dbWriteTable(con,c('clean_trauma'), value=clean_trauma1, row.names=FALSE)
dbDisconnect(con)
detach("package:RPostgreSQL", unload=TRUE)
|
fe6221a1c2f560cb591ac34604e71116929aa803 | e6a9d96db33e6cd819e4f1dbd60c80b0eb53a615 | /R/get_real_mgcv.R | 1e887f1833dfd969d352133fdebd63fbe40333ce | [] | no_license | dsjohnson/agTrendTMB | 42384a16831a7f21b4bfe61ad61075b6555cac8f | 403c55da56e891c8126b912ab8958346d7f1d3c8 | refs/heads/master | 2023-01-10T19:30:07.323254 | 2020-11-10T01:19:36 | 2020-11-10T01:19:36 | 307,476,720 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 584 | r | get_real_mgcv.R | #' @title Add realized SSL survey values to the prediction sample
#' @param x A list object from \code{fit_agtrend_ssl}
#' @param N_sample A matrix of abundance samples from \code{sample_N}
#' @importFrom stats rnorm
#' @export
#'
get_real_mgcv <- function(fit, sample){
use_mi <- any(fit$mi_data$w<1)
d <- fit$data
for(i in 1:ncol(sample)){
if(is.na(d$y[i])){
next
}else{
if(d$obl[i]==1 & use_mi){
sample[,i] <- d$y[i]*exp(rnorm(nrow(sample), 0.03903366, 0.01068773))
} else{
sample[,i] <- d$y[i]
}
}
}
return(sample)
}
|
c2deacb070d60289ddc0ff0bbd117560d1df7f2f | 494f94e6450afbb0cbeba73ab3e87cfa1eb6d078 | /server/sa_server.R | d3ef16c18470fc51a914fe60fb537ca6dbeb3664 | [] | no_license | cheriels/2017ops_beta | 8b1b134d470686f3f46ccfcc2d6b657b9350fb8d | 337e55007f7c53004e5e1a0b5e05f67296c08284 | refs/heads/master | 2021-01-23T12:45:11.595616 | 2017-12-07T16:20:52 | 2017-12-07T16:20:52 | 93,194,923 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,168 | r | sa_server.R | #----------------------------------------------------------------------------
observeEvent(input$reset.sa, {
updateCheckboxGroupInput(session, "gages.sa",
selected = c("por", "mon_jug", "lfalls",
"lfalls_from_upstr", "lfalls_trigger"))
})
#----------------------------------------------------------------------------
observeEvent(input$clear.sa, {
updateCheckboxGroupInput(session, "gages.sa", "Variables to show:",
c("Point of Rocks" = "por",
"Monacacy" = "mon_jug",
"Little Falls" = "lfalls",
"Little Falls (Predicted from upstream gages)" = "lfalls_from_upstr",
"Little Falls trigger for drought ops" = "lfalls_trigger"),
selected = NULL)
})
#------------------------------------------------------------------------------
sa.df <- reactive({
req(!is.null(daily.reac()),
!is.null(todays.date()),
!is.null(start.date()),
!is.null(end.date()))
start.date <- start.date() - lubridate::days(7)
date.temp <- date_frame(start.date,
end.date(),
"days") %>%
dplyr::mutate(date_time = as.Date(date_time))
#----------------------------------------------------------------------------
sub.df <- daily.reac() %>%
#select(date_time, lfalls, por, monocacy) %>%
select(date_time, site, flow) %>%
dplyr::filter(date_time >= start.date,
date_time <= todays.date(),
site %in% c("lfalls", "por", "mon_jug"))
#----------------------------------------------------------------------------
if (nrow(sub.df) == 0) return(NULL)
#----------------------------------------------------------------------------
sub.df <- sub.df %>%
tidyr::spread(site, flow) %>%
dplyr::left_join(date.temp, ., by = "date_time") %>%
tidyr::gather(site, flow, 2:ncol(.))
#----------------------------------------------------------------------------
# recess and lag POR flows
por.df <- sub.df %>%
constant_lagk(por, todays.date(), lag.days = 1)
#----------------------------------------------------------------------------
# recess and lag Monocacy flows
mon.df <- por.df %>%
constant_lagk(mon_jug, todays.date(), lag.days = 1)
#----------------------------------------------------------------------------
pot_withdrawals.sub <- withdrawals.reac() %>%
dplyr::filter(site == "potomac_total") %>%
dplyr::mutate(flow = flow + 100,
site = "lfalls_trigger") %>%
dplyr::select(site, date_time, flow) %>%
dplyr::rename(site = site,
flow = flow)
#----------------------------------------------------------------------------
final.df <- dplyr::bind_rows(mon.df, pot_withdrawals.sub) %>%
tidyr::spread(site, flow) %>%
dplyr::mutate(lfalls_from_upstr = por_recess_lag + mon_jug_recess_lag) %>%
dplyr::select(date_time, lfalls, por, mon_jug,
lfalls_from_upstr, lfalls_trigger) %>%
tidyr::gather(site, flow, lfalls:lfalls_trigger) %>%
dplyr::filter(!is.na(flow))
return(final.df)
})
#----------------------------------------------------------------------------
output$sa <- renderPlot({
validate(
need(!is.null(sa.df()),
"No data available for the selected date range. Please select a new date range.")
)
#----------------------------------------------------------------------------
gen_plots(sa.df(),
start.date(),
end.date(),
min.flow = input$min.flow,
max.flow = input$max.flow,
gages.checked = input$gages.sa,
labels.vec = c("lfalls" = "Little Falls",
"lfalls_from_upstr" = "Little Falls (Predicted from upstream gages)",
"por" = "Point of Rocks",
"mon_jug" = "Monacacy",
"lfalls_trigger" = "Little Falls trigger for drought ops"),
linesize.vec = c("lfalls" = 2,
"lfalls_from_upstr" = 1.5,
"por" = 2,
"mon_jug" = 2,
"lfalls_trigger" = 1.5),
linetype.vec = c("lfalls" = "solid",
"lfalls_from_upstr" = "dashed",
"por" = "solid",
"mon_jug" = "solid",
"lfalls_trigger" = "dashed"),
color.vec = c("lfalls" = "#0072B2",
"lfalls_from_upstr" = "#56B4E9",
"por" = "#E69F00",
"mon_jug" = "#9f00e6",
"lfalls_trigger" = "#FF0000"),
x.class = "date",
y.lab = y.units())
}) # End output$sa
#------------------------------------------------------------------------------
source("server/sa/sa_notifications.R", local = TRUE)
#------------------------------------------------------------------------------
|
775b20ff20574675549815bc6c8bf68f603d4077 | 9f514382a9a713f2f68ed7a6fad77221283760ee | /Course 4_Wk1_HW.R | 35d4a8f1ee38a58b1fcda85d84133654c60df6e3 | [] | no_license | marends007/ExData_Plotting1 | fca1864baede196fc7416be4923d1c1e4d9ce13c | 935fc2a350db9d82478ac391dedc0a5139b9de08 | refs/heads/master | 2021-03-22T15:08:31.928694 | 2017-10-19T01:07:20 | 2017-10-19T01:07:20 | 107,031,726 | 0 | 0 | null | 2017-10-15T17:04:30 | 2017-10-15T17:04:29 | null | UTF-8 | R | false | false | 2,557 | r | Course 4_Wk1_HW.R | install.packages("readtext")
library(readtext)
readtext("~/Course Trainings/John Hopkins Data Science Boot Camp/JHU Working Directory/household_power_consumption.txt")
powerConsumption = read.table("household_power_consumption.txt")
library(data.table)
file_power <- fread("household_power_consumption.txt", na.strings = "?")
## set time variable
power_Household <- file_power[file_power$Date %in% c("1/2/2007","2/2/2007"),]
SetTime <-strptime(paste(finalData$Date, finalData$Time, sep=" "),"%d/%m/%Y %H:%M:%S")
power_Household <- cbind(SetTime, finalData)
## Plot 1: Global Active Power
png(filename = "plot1")
hist(power_Household$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
## Plot 2:
plot(power_Household$SetTime, power_Household$Global_active_power, type="l", col="black", xlab="", ylab="Global Active Power (kilowatts)")
## Plot 3
par(mar = c(5, 4, 1, 1))
columnlines <- c("black", "red", "blue")
labels <- c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
plot(power_Household$SetTime, power_Household$Sub_metering_1, type = "l", col = columnlines[1], xlab = "", ylab = "Energy Sub Metering")
lines(power_Household$SetTime, power_Household$Sub_metering_2, col=columnlines[2])
lines(power_Household$SetTime, power_Household$Sub_metering_3, col=columnlines[3])
legend("topright", legend = labels, col=columnlines, lty = "solid")
## Plot 4
par(mar = c(3, 2, 1, 1), mfrow = c(2, 2))
plot(power_Household$SetTime, power_Household$Global_active_power, type="l", col="black", xlab="", ylab="Global Active Power (kilowatts)")
plot(power_Household$SetTime, power_Household$Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
columnlines <- c("black", "red", "blue")
labels <- c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
plot(power_Household$SetTime, power_Household$Sub_metering_1, type = "l", col = columnlines[1], xlab = "", ylab = "Energy Sub Metering")
lines(power_Household$SetTime, power_Household$Sub_metering_2, col=columnlines[2])
lines(power_Household$SetTime, power_Household$Sub_metering_3, col=columnlines[3])
legend("topright", legend = labels, col=columnlines, lty = "solid")
plot(power_Household$SetTime, power_Household$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_Reactive_Power")
file_power[,DateTime := as.Date(paste(Date, Time), format = "%d/%m/%Y %H:%M:%S")]
library(dplyr)
power_Household <- filter(file_power, DateTime >= as.Date("2007-02-01"), DateTime < as.Date("2007-02-02")) |
3ca22d3933e6f867488bd28d6ae18e59cabeecff | 24675907a2f4ecfb109fe9cd97b4afdbe7d0fe78 | /plot4.R | 7ee28e04af1baad3dc68b45696bc96a847f2e5f5 | [] | no_license | peterchsu/ExData_Plotting1 | 75cdeca4202072e5cb064e93090fa6fb08f092c6 | df3b9e3d8d3fee10195b736c041ec8072680ab1d | refs/heads/master | 2021-01-16T19:01:03.484516 | 2014-07-13T03:51:21 | 2014-07-13T03:51:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,253 | r | plot4.R | ## This function plots Global Active Power, Energy sub metering, Voltage, and
## Global Reactive Power for a two day period in February 2007.
##
## The graph is saved as a file in the local directory named "plot3.png"
##
## Appropriate data may be passed to the function. If this does not occur,
## the "readData.R" function as well as the appropriate dataset under the file
## name "household_power_consumption.txt" must be in the local working
## directory.
plot4 <- function(data = NULL, writeFile = TRUE){
#load data
if (is.null(data)){
source("readData.R")
data <- readData()
}
if(writeFile){png(file = "plot4.png")}
par(mfrow = c(2, 2))
with(data, {
plot(DateTime, Global_active_power, type="l", ylab = "Global Active Power", xlab="")
plot(DateTime, Voltage, type="l", xlab="datetime")
plot(DateTime, Sub_metering_1, type="l", xlab = "", ylab = "Energy sub metering")
lines(DateTime, Sub_metering_2, col = 2)
lines(DateTime, Sub_metering_3, col = 4)
legend("topright", lty="solid", col = c(1, 2, 4), bty="n", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(DateTime, Global_reactive_power, type="l", xlab="datetime")
})
if(writeFile){dev.off()}
} |
bc9246f18faa81b086f4d8e8f5da23829b44a666 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/TSA/examples/harmonic.Rd.R | 614f1ba5b2de06c22678f4600d43d5ecfa5c3d08 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 327 | r | harmonic.Rd.R | library(TSA)
### Name: harmonic
### Title: Construct harmonic functions for fitting harmonic trend model
### Aliases: harmonic
### Keywords: methods
### ** Examples
data(tempdub)
# first creates the first pair of harmonic functions and then fit the model
har.=harmonic(tempdub,1)
model4=lm(tempdub~har.)
summary(model4)
|
1206deb3f671995fa8a3972172f466552409a161 | d73907fd9853692ec8f8b0d9a67902dc4c5748b0 | /Simulations/7_compile_results.R | 0872445747501f1618528de653b1f2384f9ba9e0 | [] | no_license | foolkenan/lifetimerisk | 3bd23c9064181102f2bd0af1c5d40923c9c3beee | 5adebc62e5ac3207a5f51befbcbf1315a5809095 | refs/heads/master | 2023-07-21T01:01:06.675164 | 2021-09-08T23:03:13 | 2021-09-08T23:03:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,360 | r | 7_compile_results.R | # Assess simulation results and compile into a dataset for nested loop plot
# Updated for revision on 7/9/2021
options(scipen=999)
scenarios.csh <- read.csv('table_scenarios_csh.csv')
scenarios.sdh <- read.csv('table_scenarios_sdh.csv')
pseudocsh <- 'pseudo//cshv2//'
pseudosdh <- 'pseudo//sdhv2//'
fg.csh <- 'finegray//cshv2//'
fg.sdh <- 'finegray//sdhv2//'
fg.logt.csh <- 'finegray_logtime//cshv2//'
fg.logt.sdh <- 'finegray_logtime//sdhv2//'
fp.csh <- 'flex//cshv2_sub55//'
fp.sdh <- 'flex//sdhv2_sub55//'
fp.2df.csh <- 'flex//cshv2_sub55_2df//'
fp.2df.sdh <- 'flex//sdhv2_sub55_2df//'
fp.3df.csh <- 'flex//cshv2_sub55_3df//'
fp.3df.sdh <- 'flex//sdhv2_sub55_3df//'
scenarios.sdh$setting <- scenarios.sdh$setting+4
for(i in 1:32){
# CSH Truth
a01=scenarios.csh$a01[i]; b01=scenarios.csh$b01[i]; a02=scenarios.csh$a02[i]; b02=scenarios.csh$b02[i];
a11=scenarios.csh$a11[i]; b11=scenarios.csh$b11[i]; a12=scenarios.csh$a12[i]; b12=scenarios.csh$b12[i];
integrandz0 <- function(x) {((a01/(b01^a01))*(x^(a01-1))) * exp(-(((x/b01)^a01) + ((x/b02)^a02)))}
integrandz1 <- function(x) {((a11/(b11^a11))*(x^(a11-1))) * exp(-(((x/b11)^a11) + ((x/b12)^a12)))}
scenarios.csh$ltr0[i] <- integrate(integrandz0, lower = 0 , upper = 40)$value
scenarios.csh$ltr1[i] <- integrate(integrandz1, lower = 0 , upper = 40)$value
scenarios.csh$ltrdiff[i] <- scenarios.csh$ltr1[i] - scenarios.csh$ltr0[i]
# SDH Truth
gamma=scenarios.sdh$gamma[i]; rho=scenarios.sdh$rho[i]; psi1=scenarios.sdh$psi1[i]; theta=scenarios.sdh$theta[i];
scenarios.sdh$ltr1[i] <- 1 - exp(gamma*exp(psi1)*(1-exp(rho + theta))/(rho + theta))
scenarios.sdh$ltr0[i] <- 1 - exp(gamma*(1-exp(rho))/rho)
scenarios.sdh$ltrdiff[i] <- scenarios.sdh$ltr1[i] - scenarios.sdh$ltr0[i]
}
scenarios <- rbind(scenarios.csh[, c('setting', 'sampsize', 'probcens', 'ptrunc', 'ltr0', 'ltr1', 'ltrdiff')],
scenarios.sdh[, c('setting', 'sampsize', 'probcens', 'ptrunc', 'ltr0', 'ltr1', 'ltrdiff')])
scenarios <- scenarios[order(scenarios$setting, scenarios$sampsize, scenarios$probcens, scenarios$ptrunc), ]
bias.ltr0 <- matrix(NA,64,ncol=6)
bias.ltr1 <- matrix(NA,64,ncol=6)
bias.ltrdiff <- matrix(NA,64,ncol=6)
relbias.ltr0 <- matrix(NA,64,ncol=6)
relbias.ltr1 <- matrix(NA,64,ncol=6)
relbias.ltrdiff <- matrix(NA,64,ncol=6)
rmse.ltr0 <- matrix(NA,64,ncol=6)
rmse.ltr1 <- matrix(NA,64,ncol=6)
rmse.ltrdiff <- matrix(NA,64,ncol=6)
cov.ltr0 <- matrix(NA,64,ncol=4)
cov.ltr1 <- matrix(NA,64,ncol=4)
cov.ltrdiff <- matrix(NA,64,ncol=4)
power <- rep(NA,64)
effsampsize <- matrix(NA,64,ncol=6)
for(i in 1:64){
# Truth
ltr0 <- scenarios$ltr0[i]
ltr1 <- scenarios$ltr1[i]
ltrdiff <- ltr1-ltr0
# Logit results
if(i<33){
res.p <- read.csv(paste0(pseudocsh, 'logitres', i, '.csv'))
} else {
res.p <- read.csv(paste0(pseudosdh, 'logitres', i-32, '.csv'))
}
res.p <- res.p[!is.na(res.p$ltrdiff),]
effsampsize[i,1] <- nrow(res.p)
avgest.ltr0 <- mean(res.p$ltr0)
avgest.ltr1 <- mean(res.p$ltr1)
avgest.ltrdiff <- mean(res.p$ltrdiff)
bias.ltr0[i,1] <- avgest.ltr0-ltr0
bias.ltr1[i,1] <- avgest.ltr1-ltr1
bias.ltrdiff[i,1] <- avgest.ltrdiff-ltrdiff
relbias.ltr0[i,1] <- bias.ltr0[i,1]/ltr0
relbias.ltr1[i,1] <- bias.ltr1[i,1]/ltr1
relbias.ltrdiff[i,1] <- bias.ltrdiff[i,1]/ltrdiff
rmse.ltr0[i,1] <- sqrt(mean((rep(ltr0, effsampsize[i,1])-res.p$ltr0)^2))
rmse.ltr1[i,1] <- sqrt(mean((rep(ltr1, effsampsize[i,1])-res.p$ltr1)^2))
rmse.ltrdiff[i,1] <- sqrt(mean((rep(ltrdiff, effsampsize[i,1])-res.p$ltrdiff)^2))
cov.ltr0[i,1] <- sum((res.p$ltr0_CIL <= ltr0) & (res.p$ltr0_CIU >= ltr0))/effsampsize[i,1]
cov.ltr1[i,1] <- sum((res.p$ltr1_CIL <= ltr1) & (res.p$ltr1_CIU >= ltr1))/effsampsize[i,1]
cov.ltrdiff[i,1] <- sum((res.p$ltrdiff_CIL <= ltrdiff) & (res.p$ltrdiff_CIU >= ltrdiff))/effsampsize[i,1]
power[i] <- sum(res.p$beta1_pval <= .05)/effsampsize[i,1]
# Fine-Gray results
if(i<33){
res.fg <- read.csv(paste0(fg.csh, 'res_', i, '.csv'))
} else {
res.fg <- read.csv(paste0(fg.sdh, 'res_', i-32, '.csv'))
}
res.fg <- res.fg[!is.na(res.fg$ltr.diff),]
effsampsize[i,2] <- nrow(res.fg)
avgest.ltr0 <- mean(res.fg$ltr0)
avgest.ltr1 <- mean(res.fg$ltr1)
avgest.ltrdiff <- mean(res.fg$ltr.diff)
bias.ltr0[i,2] <- avgest.ltr0-ltr0
bias.ltr1[i,2] <- avgest.ltr1-ltr1
bias.ltrdiff[i,2] <- avgest.ltrdiff-ltrdiff
relbias.ltr0[i,2] <- bias.ltr0[i,2]/ltr0
relbias.ltr1[i,2] <- bias.ltr1[i,2]/ltr1
relbias.ltrdiff[i,2] <- bias.ltrdiff[i,2]/ltrdiff
rmse.ltr0[i,2] <- sqrt(mean((rep(ltr0, effsampsize[i,2])-res.fg$ltr0)^2))
rmse.ltr1[i,2] <- sqrt(mean((rep(ltr1, effsampsize[i,2])-res.fg$ltr1)^2))
rmse.ltrdiff[i,2] <- sqrt(mean((rep(ltrdiff, effsampsize[i,2])-res.fg$ltr.diff)^2))
# Fine-Gray with logtime results
if(i<33){
res.fg.logt.path1 <- read.csv(paste0(fg.logt.csh, 'res_', i, '_500.csv'))
res.fg.logt.path2 <- read.csv(paste0(fg.logt.csh, 'res_', i, '_1000.csv'))
res.fg.logt.path3 <- read.csv(paste0(fg.logt.csh, 'res_', i, '_1500.csv'))
res.fg.logt.path4 <- read.csv(paste0(fg.logt.csh, 'res_', i, '_2000.csv'))
} else {
res.fg.logt.path1 <- read.csv(paste0(fg.logt.sdh, 'res_', i-32, '_500.csv'))
res.fg.logt.path2 <- read.csv(paste0(fg.logt.sdh, 'res_', i-32, '_1000.csv'))
res.fg.logt.path3 <- read.csv(paste0(fg.logt.sdh, 'res_', i-32, '_1500.csv'))
res.fg.logt.path4 <- read.csv(paste0(fg.logt.sdh, 'res_', i-32, '_2000.csv'))
}
res.fg.logt <- rbind(res.fg.logt.path1, res.fg.logt.path2, res.fg.logt.path3, res.fg.logt.path4)
res.fg.logt <- res.fg.logt[!is.na(res.fg.logt$ltr.diff),]
effsampsize[i,3] <- nrow(res.fg.logt)
avgest.ltr0 <- mean(res.fg.logt$ltr0)
avgest.ltr1 <- mean(res.fg.logt$ltr1)
avgest.ltrdiff <- mean(res.fg.logt$ltr.diff)
bias.ltr0[i,3] <- avgest.ltr0-ltr0
bias.ltr1[i,3] <- avgest.ltr1-ltr1
bias.ltrdiff[i,3] <- avgest.ltrdiff-ltrdiff
relbias.ltr0[i,3] <- bias.ltr0[i,3]/ltr0
relbias.ltr1[i,3] <- bias.ltr1[i,3]/ltr1
relbias.ltrdiff[i,3] <- bias.ltrdiff[i,3]/ltrdiff
rmse.ltr0[i,3] <- sqrt(mean((rep(ltr0, effsampsize[i,3])-res.fg.logt$ltr0)^2))
rmse.ltr1[i,3] <- sqrt(mean((rep(ltr1, effsampsize[i,3])-res.fg.logt$ltr1)^2))
rmse.ltrdiff[i,3] <- sqrt(mean((rep(ltrdiff, effsampsize[i,3])-res.fg.logt$ltr.diff)^2))
# Flexible parametric results
if(i<33){
res.fp.path1 <- read.csv(paste0(fp.csh, 'res_', i, '_500.csv'))
res.fp.path2 <- read.csv(paste0(fp.csh, 'res_', i, '_1000.csv'))
res.fp.path3 <- read.csv(paste0(fp.csh, 'res_', i, '_1500.csv'))
res.fp.path4 <- read.csv(paste0(fp.csh, 'res_', i, '_2000.csv'))
} else {
res.fp.path1 <- read.csv(paste0(fp.sdh, 'res_', i-32, '_500.csv'))
res.fp.path2 <- read.csv(paste0(fp.sdh, 'res_', i-32, '_1000.csv'))
res.fp.path3 <- read.csv(paste0(fp.sdh, 'res_', i-32, '_1500.csv'))
res.fp.path4 <- read.csv(paste0(fp.sdh, 'res_', i-32, '_2000.csv'))
}
res.fp <- rbind(res.fp.path1, res.fp.path2, res.fp.path3, res.fp.path4)
res.fp <- res.fp[!is.na(res.fp$ltr.diff),]
effsampsize[i,4] <- nrow(res.fp)
avgest.ltr0 <- mean(res.fp$ltr0)
avgest.ltr1 <- mean(res.fp$ltr1)
avgest.ltrdiff <- mean(res.fp$ltr.diff)
bias.ltr0[i,4] <- avgest.ltr0-ltr0
bias.ltr1[i,4] <- avgest.ltr1-ltr1
bias.ltrdiff[i,4] <- avgest.ltrdiff-ltrdiff
relbias.ltr0[i,4] <- bias.ltr0[i,4]/ltr0
relbias.ltr1[i,4] <- bias.ltr1[i,4]/ltr1
relbias.ltrdiff[i,4] <- bias.ltrdiff[i,4]/ltrdiff
rmse.ltr0[i,4] <- sqrt(mean((rep(ltr0, effsampsize[i,4])-res.fp$ltr0)^2))
rmse.ltr1[i,4] <- sqrt(mean((rep(ltr1, effsampsize[i,4])-res.fp$ltr1)^2))
rmse.ltrdiff[i,4] <- sqrt(mean((rep(ltrdiff, effsampsize[i,4])-res.fp$ltr.diff)^2))
cov.ltr0[i,2] <- sum((res.fp$ltr0.cil <= ltr0) & (res.fp$ltr0.ciu >= ltr0))/effsampsize[i,4]
cov.ltr1[i,2] <- sum((res.fp$ltr1.cil <= ltr1) & (res.fp$ltr1.ciu >= ltr1))/effsampsize[i,4]
cov.ltrdiff[i,2] <- sum((res.fp$ltr.diff.cil <= ltrdiff) & (res.fp$ltr.diff.ciu >= ltrdiff))/effsampsize[i,4]
# Flexible parametric w/ 2df results
if(i<33){
res.fp.2df.path1 <- read.csv(paste0(fp.2df.csh, 'res_', i, '_500.csv'))
res.fp.2df.path2 <- read.csv(paste0(fp.2df.csh, 'res_', i, '_1000.csv'))
res.fp.2df.path3 <- read.csv(paste0(fp.2df.csh, 'res_', i, '_1500.csv'))
res.fp.2df.path4 <- read.csv(paste0(fp.2df.csh, 'res_', i, '_2000.csv'))
} else {
res.fp.2df.path1 <- read.csv(paste0(fp.2df.sdh, 'res_', i-32, '_500.csv'))
res.fp.2df.path2 <- read.csv(paste0(fp.2df.sdh, 'res_', i-32, '_1000.csv'))
res.fp.2df.path3 <- read.csv(paste0(fp.2df.sdh, 'res_', i-32, '_1500.csv'))
res.fp.2df.path4 <- read.csv(paste0(fp.2df.sdh, 'res_', i-32, '_2000.csv'))
}
res.fp.2df <- rbind(res.fp.2df.path1, res.fp.2df.path2, res.fp.2df.path3, res.fp.2df.path4)
res.fp.2df <- res.fp.2df[!is.na(res.fp.2df$ltr.diff),]
effsampsize[i,5] <- nrow(res.fp.2df)
avgest.ltr0 <- mean(res.fp.2df$ltr0)
avgest.ltr1 <- mean(res.fp.2df$ltr1)
avgest.ltrdiff <- mean(res.fp.2df$ltr.diff)
bias.ltr0[i,5] <- avgest.ltr0-ltr0
bias.ltr1[i,5] <- avgest.ltr1-ltr1
bias.ltrdiff[i,5] <- avgest.ltrdiff-ltrdiff
relbias.ltr0[i,5] <- bias.ltr0[i,5]/ltr0
relbias.ltr1[i,5] <- bias.ltr1[i,5]/ltr1
relbias.ltrdiff[i,5] <- bias.ltrdiff[i,5]/ltrdiff
rmse.ltr0[i,5] <- sqrt(mean((rep(ltr0, effsampsize[i,5])-res.fp.2df$ltr0)^2))
rmse.ltr1[i,5] <- sqrt(mean((rep(ltr1, effsampsize[i,5])-res.fp.2df$ltr1)^2))
rmse.ltrdiff[i,5] <- sqrt(mean((rep(ltrdiff, effsampsize[i,5])-res.fp.2df$ltr.diff)^2))
cov.ltr0[i,3] <- sum((res.fp.2df$ltr0.cil <= ltr0) & (res.fp.2df$ltr0.ciu >= ltr0))/effsampsize[i,5]
cov.ltr1[i,3] <- sum((res.fp.2df$ltr1.cil <= ltr1) & (res.fp.2df$ltr1.ciu >= ltr1))/effsampsize[i,5]
cov.ltrdiff[i,3] <- sum((res.fp.2df$ltr.diff.cil <= ltrdiff) & (res.fp.2df$ltr.diff.ciu >= ltrdiff))/effsampsize[i,5]
# Flexible parametric w/ 3df results
if(i<33){
res.fp.3df.path1 <- read.csv(paste0(fp.3df.csh, 'res_', i, '_500.csv'))
res.fp.3df.path2 <- read.csv(paste0(fp.3df.csh, 'res_', i, '_1000.csv'))
res.fp.3df.path3 <- read.csv(paste0(fp.3df.csh, 'res_', i, '_1500.csv'))
res.fp.3df.path4 <- read.csv(paste0(fp.3df.csh, 'res_', i, '_2000.csv'))
} else {
res.fp.3df.path1 <- read.csv(paste0(fp.3df.sdh, 'res_', i-32, '_500.csv'))
res.fp.3df.path2 <- read.csv(paste0(fp.3df.sdh, 'res_', i-32, '_1000.csv'))
res.fp.3df.path3 <- read.csv(paste0(fp.3df.sdh, 'res_', i-32, '_1500.csv'))
res.fp.3df.path4 <- read.csv(paste0(fp.3df.sdh, 'res_', i-32, '_2000.csv'))
}
res.fp.3df <- rbind(res.fp.3df.path1, res.fp.3df.path2, res.fp.3df.path3, res.fp.3df.path4)
res.fp.3df <- res.fp.3df[!is.na(res.fp.3df$ltr.diff),]
effsampsize[i,6] <- nrow(res.fp.3df)
avgest.ltr0 <- mean(res.fp.3df$ltr0)
avgest.ltr1 <- mean(res.fp.3df$ltr1)
avgest.ltrdiff <- mean(res.fp.3df$ltr.diff)
bias.ltr0[i,6] <- avgest.ltr0-ltr0
bias.ltr1[i,6] <- avgest.ltr1-ltr1
bias.ltrdiff[i,6] <- avgest.ltrdiff-ltrdiff
relbias.ltr0[i,6] <- bias.ltr0[i,6]/ltr0
relbias.ltr1[i,6] <- bias.ltr1[i,6]/ltr1
relbias.ltrdiff[i,6] <- bias.ltrdiff[i,6]/ltrdiff
rmse.ltr0[i,6] <- sqrt(mean((rep(ltr0, effsampsize[i,6])-res.fp.3df$ltr0)^2))
rmse.ltr1[i,6] <- sqrt(mean((rep(ltr1, effsampsize[i,6])-res.fp.3df$ltr1)^2))
rmse.ltrdiff[i,6] <- sqrt(mean((rep(ltrdiff, effsampsize[i,6])-res.fp.3df$ltr.diff)^2))
cov.ltr0[i,4] <- sum((res.fp.3df$ltr0.cil <= ltr0) & (res.fp.3df$ltr0.ciu >= ltr0))/effsampsize[i,6]
cov.ltr1[i,4] <- sum((res.fp.3df$ltr1.cil <= ltr1) & (res.fp.3df$ltr1.ciu >= ltr1))/effsampsize[i,6]
cov.ltrdiff[i,4] <- sum((res.fp.3df$ltr.diff.cil <= ltrdiff) & (res.fp.3df$ltr.diff.ciu >= ltrdiff))/effsampsize[i,6]
}
results <- cbind(scenarios,
bias.ltr0, bias.ltr1, bias.ltrdiff,
relbias.ltr0, relbias.ltr1, relbias.ltrdiff,
rmse.ltr0, rmse.ltr1, rmse.ltrdiff,
cov.ltr0, cov.ltr1, cov.ltrdiff,
effsampsize, power
)
results.cols <- c("bias.ltr0.pseudo", "bias.ltr0.fg", "bias.ltr0.fglogt", "bias.ltr0.flex", "bias.ltr0.flex2df", "bias.ltr0.flex3df",
"bias.ltr1.pseudo", "bias.ltr1.fg", "bias.ltr1.fglogt", "bias.ltr1.flex", "bias.ltr1.flex2df", "bias.ltr1.flex3df",
"bias.ltrdiff.pseudo", "bias.ltrdiff.fg", "bias.ltrdiff.fglogt", "bias.ltrdiff.flex", "bias.ltrdiff.flex2df", "bias.ltrdiff.flex3df",
"relbias.ltr0.pseudo", "relbias.ltr0.fg", "relbias.ltr0.fglogt", "relbias.ltr0.flex", "relbias.ltr0.flex2df", "relbias.ltr0.flex3df",
"relbias.ltr1.pseudo", "relbias.ltr1.fg", "relbias.ltr1.fglogt", "relbias.ltr1.flex", "relbias.ltr1.flex2df", "relbias.ltr1.flex3df",
"relbias.ltrdiff.pseudo", "relbias.ltrdiff.fg", "relbias.ltrdiff.fglogt", "relbias.ltrdiff.flex", "relbias.ltrdiff.flex2df", "relbias.ltrdiff.flex3df",
"rmse.ltr0.pseudo", "rmse.ltr0.fg", "rmse.ltr0.fglogt", "rmse.ltr0.flex", "rmse.ltr0.flex2df", "rmse.ltr0.flex3df",
"rmse.ltr1.pseudo", "rmse.ltr1.fg", "rmse.ltr1.fglogt", "rmse.ltr1.flex", "rmse.ltr1.flex2df", "rmse.ltr1.flex3df",
"rmse.ltrdiff.pseudo", "rmse.ltrdiff.fg", "rmse.ltrdiff.fglogt", "rmse.ltrdiff.flex", "rmse.ltrdiff.flex2df", "rmse.ltrdiff.flex3df",
"cov.ltr0.pseudo", "cov.ltr0.flex", "cov.ltr0.flex2df", "cov.ltr0.flex3df",
"cov.ltr1.pseudo", "cov.ltr1.flex", "cov.ltr1.flex2df", "cov.ltr1.flex3df",
"cov.ltrdiff.pseudo", "cov.ltrdiff.flex", "cov.ltrdiff.flex2df", "cov.ltrdiff.flex3df",
"effsampsize.pseudo", "effsampsize.fg", "effsampsize.fglogt", "effsampsize.flex", "effsampsize.flex2df", "effsampsize.flex3df",
"power.pseudo")
colnames(results) <- c(colnames(scenarios), results.cols)
write.csv(results, "results_revision.csv", row.names=FALSE)
# Try reordering
# results2 <- results[order(results$setting, results$probcens, results$sampsize, results$ptrunc), ]
# results2 <- results2[, c("setting", "probcens", "sampsize", "ptrunc", results.cols)]
# write.csv(results2, "Y:\\19SCR-lifetimerisk_varselection\\simulation\\sim results\\results order 2.csv", row.names=FALSE)
#
|
adb3f50e850ab79c8ba5b6cabf19296d0207a786 | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /resemble/R/print.local_fit.R | 16682740e7efe537931a8e1ab730196f13fd4929 | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 936 | r | print.local_fit.R | #' @title Print method for an object of class \code{local_fit}
#' @description Prints the contents of an object of class \code{local_fit}
#' @usage \method{print}{local_fit}(x, ...)
#' @param x an object of class \code{local_fit}
#' @param ... not yet functional.
#' @author Leonardo Ramirez-Lopez
#' @keywords internal
#' @export
print.local_fit <- function(x, ...) {
if (!x$method %in% c("pls", "wapls", "gpr")) {
message("Method not recognized!")
}
if (x$method == "pls") {
cat("Partial least squares (pls)")
cat("\nNumber of factors:", x$pls_c)
}
if (x$method == "wapls") {
cat("Weighted average partial least squares (wapls)")
cat("\nMin. and max. number of factors: from", x$pls_c[["min_pls_c"]], "to", x$pls_c[["max_pls_c"]])
}
if (x$method == "gpr") {
cat("Gaussian process with linear kernel/dot product (gpr)")
cat("\nNoise:", x$noise_variance)
}
}
|
1cbc9fe1777e30cb2f7d79edb55effde70f5a519 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/proportion/examples/ciWD.Rd.R | 0a6dd443c821813217ab77c4360cb9d23196e919 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 142 | r | ciWD.Rd.R | library(proportion)
### Name: ciWD
### Title: Wald method of CI estimation
### Aliases: ciWD
### ** Examples
n=5; alp=0.05
ciWD(n,alp)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.