content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
R <- function(object, ...)
UseMethod("R")
R.Surv <- function(object, as.R.ordered = FALSE, as.R.interval = FALSE, ...) {
type <- attr(object, "type")
stopifnot(type %in% c("left", "right", "interval",
"interval2", "counting"))
status <- object[, "status"]
if (as.R.interval) {
if (as.R.ordered) warning("argument as.R.ordered is ignored")
ret <- R(object, as.R.ordered = TRUE)
utm <- c(0, attr(ret, "unique_obs"), Inf)
left <- unclass(ret$cleft) + 1L
left[is.na(left)] <- 1L
right <- unclass(ret$cright) + 1L
right[is.na(right)] <- nlevels(ret$cright) + 2L
obj <- numeric(length(left))
obj[] <- NA
if (!is.ordered(ret$tleft)) {
ret <- R(object = obj, cleft = utm[left],
cright = utm[right])
} else {
tleft <- unclass(ret$tleft) + 1L
tleft[is.na(tleft)] <- 1L
ret <- R(object = obj, cleft = utm[left],
cright = utm[right], tleft = utm[tleft])
}
return(ret)
}
if (as.R.ordered && !type %in% c("right", "counting"))
stop("as.R.ordered only implemented for right-censored observations")
if (as.R.ordered && type %in% c("right", "counting")) {
### code response as ordered factor with right-censoring
### this defines the nonparametric likelihood
### for right-censored data in terms of the observed event times
tm <- if(type == "right") object[,"time"] else object[, "stop"]
### observed event times
utm <- sort(unique(tm[status == 1]))
if (all(tm[status == 0] < utm[length(utm)]))
utm <- utm[-length(utm)]
### convert to ordered factor
ct <- cut(tm, breaks = c(-Inf, utm, Inf), ordered_result = TRUE)
### events in category k contribute
### Prob(k) - Prob(k - 1)
lf <- rg <- ct
lf[status == 1] <- rg[status == 1] <- NA
### censored obs in category k contribute
### 1 - Prob(k - 1)
rg[status == 0] <- levels(ct)[nlevels(ct)]
### Note: Censoring before first event contributes
### 1 - 0 = 1 (coded as interval with cleft = NA, cright = NA)
### handle censoring times tied to event times separately
idx <- which(status != 1)
idx <- idx[!(tm[idx] %in% utm)] ### censoring tied to event
lf[idx] <- c(NA, levels(ct))[lf[idx]]
### left truncation
tl <- NA
if (type == "counting")
tl <- cut(object[, "start"], breaks = c(-Inf, utm, Inf), ordered = TRUE)
### is this a "response" representation of an ordered factor now
ret <- R(object = ct, cleft = lf, cright = rg, tleft = tl)
attr(ret, "unique_obs") <- utm
return(ret)
}
ret <- switch(type,
"right" = R(object = ifelse(status == 1, object[, "time"], NA),
cleft = ifelse(status != 1, object[, "time"], NA),
cright = ifelse(status != 1, Inf, NA)),
"left" = R(object = ifelse(status == 1, object[, "time"], NA),
cleft = ifelse(status != 1, -Inf, NA),
cright = ifelse(status != 1, object[, "time"], NA)),
"interval2" = {
ret <- cbind(left = object[, "time1"],
right = object[, "time2"])
ret$left[is.na(ret$left)] <- -Inf
ret$right[is.na(ret$right)] <- Inf
R(cleft = ret$left, cright = ret$right)
},
"interval" = {
status <- factor(status, levels = 0:3,
labels = c("right", "exact", "left", "interval"))
tmp <- matrix(NA, nrow = nrow(object), ncol = 2)
colnames(tmp) <- c("left", "right")
for (s in levels(status)) {
idx <- which(status == s)
tmp[idx, ] <- switch(s,
"right" = cbind(object[idx, "time1"], Inf),
"exact" = cbind(object[idx, "time1"], NA),
"left" = cbind(-Inf, object[idx, "time1"]),
"interval" = object[idx, c("time1", "time2")])
}
R(object = ifelse(is.na(tmp[, "right"]), tmp[, "left"], NA),
cleft = ifelse(is.na(tmp[, "right"]), NA, tmp[, "left"]),
cright = tmp[, "right"])
},
### left truncation, right censoring
"counting" = R(object = ifelse(status == 1, object[, "stop"], NA),
cleft = ifelse(status != 1, object[, "stop"], NA),
cright = ifelse(status != 1, Inf, NA),
tleft = object[, "start"])
)
attr(ret, "prob") <- function(weights) {
sf <- survival::survfit(object ~ 1, subset = weights > 0, weights = weights)
function(y) {
uy <- sort(unique(y))
s <- summary(sf, times = uy)$surv
if (length(s) < length(uy))
s <- c(s, rep(0, length(uy) - length(s)))
s[is.na(s)] <- 0
p <- 1 - s
p[match(y, uy)]
}
}
ret
}
R.factor <- function(object, ...) {
warning("response is unordered factor;
results may depend on order of levels")
return(R(as.ordered(object), ...))
}
R.ordered <- function(object, cleft = NA, cright = NA, ...) {
lev <- levels(object)
ret <- .mkR(exact = object, cleft = cleft, cright = cright, ...)
ret[is.na(ret$cright), "cright"] <- ret$exact[is.na(ret$cright)]
ret[is.na(ret$cleft), "cleft"] <- factor(unclass(object)[is.na(ret$cleft)] - 1,
levels = 1:length(lev), labels = lev, exclude = 0, ordered = TRUE)
ret$exact <- NA
ret[ret$cright == lev[nlevels(object)], "cright"] <- NA
attr(ret, "prob") <- function(weights) {
prt <- cumsum(prop.table(xtabs(weights ~ object)))
function(y) prt[y]
}
ret
}
### <FIXME> is bounds = c(min(object), Inf) the right thing, always?
### </FIXME>
R.integer <- function(object, cleft = NA, cright = NA, bounds = c(min(object), Inf), ...) {
ret <- .mkR(exact = object, cleft = cleft, cright = cright, ...)
ret$cright[is.na(ret$cright)] <- ret$exact[is.na(ret$cright)]
ret$cright[ret$cright == bounds[2]] <- NA
ret$cleft[is.na(ret$cleft)] <- ret$exact[is.na(ret$cleft)] - 1
ret$cleft[ret$cleft < bounds[1]] <- NA
ret$exact <- NA
attr(ret, "prob") <- function(weights)
.wecdf(object, weights)
ret
}
R.interval <- function(object, ...) {
breaks <- attr(object, "levels")
cleft <- breaks[-length(breaks)]
cright <- breaks[-1L]
R(cleft = cleft[object], cright = cright[object], ...)
}
### handle exact integer / factor as interval censored
R.numeric <- function(object = NA, cleft = NA, cright = NA,
tleft = NA, tright = NA, tol = sqrt(.Machine$double.eps),
as.R.ordered = FALSE, as.R.interval = FALSE, ...) {
### treat extremely small intervals as `exact' observations
d <- cright - cleft
if (any(!is.na(d) | is.finite(d))) {
if (any(d < 0, na.rm = TRUE)) stop("cleft > cright")
i <- (d < tol)
if (any(i, na.rm = TRUE)) {
i <- which(i)
object[i] <- cleft[i]
cleft[i] <- cright[i] <- NA
}
}
if (as.R.ordered && any(!is.na(cleft) || !is.na(cright)))
warning("as.R.ordered only implemented for exact observations")
if (as.R.ordered) {
### code response as ordered factor
### this defines the nonparametric likelihood
### in terms of the observed event times
utm <- sort(unique(object))
utm <- utm[-length(utm)]
### convert to ordered factor
ct <- cut(object, breaks = c(-Inf, utm, Inf), ordered = TRUE)
tl <- tr <- NA
if (!all(is.na(tleft)))
tl <- cut(tleft, breaks = c(-Inf, utm, Inf), ordered = TRUE)
if (!all(is.na(tright)))
tr <- cut(tright, breaks = c(-Inf, utm, Inf), ordered = TRUE)
ret <- R(object = ct, tleft = tl, tright = tr)
attr(ret, "unique_obs") <- utm
return(ret)
}
if (as.R.interval) {
### code response as interval-censored defining the nonparametric
### likelihood BUT keep data as numeric
utm <- sort(unique(object))
ct <- cut(object, breaks = c(-Inf, utm, Inf))
return(R(Surv(time = c(-Inf, utm)[ct], time2 = utm[ct], type = "interval2")))
}
ret <- .mkR(exact = object, cleft = cleft, cright = cright,
tleft = tleft, tright = tright)
### <FIXME>
### this fails if is.na(object) and only cleft/cright are given
# attr(ret, "prob") <- function(weights)
# .wecdf(object, weights)
attr(ret, "prob") <- function(weights)
.wecdf(ret$approxy, weights)
### we want something like survfit(Surv(... type = "interval")
### with adjustment to min(obs) = 0
### </FIXME>
ret
}
### for object = NA, ie censored observations only
R.logical <- R.numeric
R.response <- function(object, ...)
return(object)
R.default <- function(object, ...)
stop("cannot deal with response class", class(object))
.mkR <- function(...) {
args <- list(...)
cls <- unique(sapply(args, function(a) class(a)[1]))
cls <- cls[cls != "logical"]
stopifnot(length(cls) <= 1)
n <- unique(sapply(args, length))
stopifnot(length(n) <= 2)
if (length(n) == 2) stopifnot(min(n) == 1)
if (all(sapply(args, function(x) all(is.na(x))))) {
args$approxy <- NA
ret <- do.call("as.data.frame", list(x = args))
class(ret) <- c("response", class(ret))
return(ret[, c("tleft", "cleft", "exact", "cright", "tright", "approxy"), drop = FALSE])
}
ret <- do.call("as.data.frame", list(x = args))
if (is.null(ret$exact)) ret$exact <- NA
if (is.null(ret$cleft) || all(is.na(ret$cleft))) {
ret$cleft <- NA
if (is.ordered(ret$exact))
ret$cleft <- factor(ret$cleft, levels = 1:nlevels(ret$exact),
labels = levels(ret$exact), ordered = TRUE)
}
if (is.null(ret$cright) || all(is.na(ret$cright))) {
ret$cright <- NA
if (is.ordered(ret$exact))
ret$cright <- factor(ret$cright, levels = 1:nlevels(ret$exact),
labels = levels(ret$exact), ordered = TRUE)
}
if (is.null(ret$tleft)) ret$tleft <- NA
if (is.null(ret$tright)) ret$tright <- NA
if (all(is.finite(ret$exact))) {
# ret$rank <- rank(ret$exact, ties.method = "max")
ret$approxy <- ret$exact
} else {
### some meaningful ordering of observations
tmpexact <- as.numeric(ret$exact)
tmpleft <- as.numeric(ret$cleft)
tmpright <- as.numeric(ret$cright)
tmpler <- c(tmpleft, tmpexact, tmpright)
tmpleft[!is.finite(tmpleft)] <- min(tmpler[is.finite(tmpler)])
tmpright[!is.finite(tmpright)] <- max(tmpler[is.finite(tmpler)])
tmpexact[is.na(tmpexact)] <-
(tmpleft + ((tmpright - tmpleft) / 2))[is.na(tmpexact)]
# ret$rank <- rank(tmpexact, ties.method = "max")
ret$approxy <- tmpexact
}
class(ret) <- c("response", class(ret))
ret[, c("tleft", "cleft", "exact", "cright", "tright", "approxy"), drop = FALSE]
}
.exact <- function(object)
!is.na(object$exact)
.cleft <- function(object)
is.finite(object$cleft)
.cright <- function(object)
is.finite(object$cright)
.cinterval <- function(object)
!.exact(object)
# .cleft(object) | .cright(object)
.tleft <- function(object)
is.finite(object$tleft)
.tright <- function(object)
is.finite(object$tright)
.tinterval <- function(object)
.tleft(object) | .tright(object)
.mm_exact <- function(model, data, response, object) {
e <- .exact(object)
if (!any(e)) return(NULL)
tmp <- data[e,,drop = FALSE]
tmp[[response]] <- object$exact[e]
Y <- model.matrix(model, data = tmp)
deriv <- 1
names(deriv) <- response
Yprime <- model.matrix(model, data = tmp, deriv = deriv)
.matrix <- matrix
if (inherits(Y, "Matrix"))
.matrix <- function(...) Matrix(..., sparse = TRUE)
trunc <- NULL
if (any(.tinterval(object) & e)) {
Ytleft <- .matrix(-Inf, nrow = nrow(Y), ncol = ncol(Y))
Ytright <- .matrix(Inf, nrow = nrow(Y), ncol = ncol(Y))
if (any(il <- (.tleft(object) & e))) {
tmp <- data[il,]
tmp[[response]] <- object$tleft[il]
Ytleft[.tleft(object)[e],] <- model.matrix(model, data = tmp)
}
if (any(ir <- (.tright(object) & e))) {
tmp <- data[ir,,drop = FALSE]
tmp[[response]] <- object$tright[ir]
Ytright[.tright(object)[e],] <- model.matrix(model, data = tmp)
}
trunc <- list(left = Ytleft, right = Ytright)
}
list(Y = Y, Yprime = Yprime, trunc = trunc, which = which(e))
}
.mm_interval <- function(model, data, response, object) {
i <- .cinterval(object)
if (!any(i)) return(NULL)
tmpdata <- data[i,,drop = FALSE]
object <- object[i,, drop = FALSE]
Yleft <- NULL
if (any(il <- .cleft(object))) {
tmp <- tmpdata[il,,drop = FALSE]
tmp[[response]] <- object$cleft[il]
Ytmp <- model.matrix(model, data = tmp)
.matrix <- matrix
if (inherits(Ytmp, "Matrix"))
.matrix <- function(...) Matrix(..., sparse = TRUE)
Yleft <- .matrix(-Inf, nrow = length(il), ncol = ncol(Ytmp))
colnames(Yleft) <- colnames(Ytmp)
rownames(Yleft) <- rownames(tmpdata)
Yleft[il,] <- Ytmp
attr(Yleft, "constraint") <- attr(Ytmp, "constraint")
attr(Yleft, "Assign") <- attr(Ytmp, "Assign")
}
Yright <- NULL
if (any(ir <- .cright(object))) {
tmp <- tmpdata[ir,, drop = FALSE]
tmp[[response]] <- object$cright[ir]
Ytmp <- model.matrix(model, data = tmp)
.matrix <- matrix
if (inherits(Ytmp, "Matrix"))
.matrix <- function(...) Matrix(..., sparse = TRUE)
Yright <- .matrix(Inf, nrow = length(ir), ncol = ncol(Ytmp))
colnames(Yright) <- colnames(Ytmp)
rownames(Yright) <- rownames(tmpdata)
Yright[ir,] <- Ytmp
attr(Yright, "constraint") <- attr(Ytmp, "constraint")
attr(Yright, "Assign") <- attr(Ytmp, "Assign")
}
if (is.null(Yright)) {
Yright <- matrix(Inf, nrow = nrow(Yleft), ncol = ncol(Yleft))
colnames(Yright) <- colnames(Yleft)
attr(Yright, "constraint") <- attr(Yleft, "constraint")
attr(Yright, "Assign") <- attr(Yleft, "Assign")
}
if (is.null(Yleft)) {
Yleft <- matrix(-Inf, nrow = nrow(Yright), ncol = ncol(Yright))
colnames(Yleft) <- colnames(Yright)
attr(Yleft, "constraint") <- attr(Yright, "constraint")
attr(Yleft, "Assign") <- attr(Yright, "Assign")
}
trunc <- NULL
if (any(.tinterval(object))) {
.matrix <- matrix
if (inherits(Yleft, "Matrix"))
.matrix <- function(...) Matrix(..., sparse = TRUE)
Ytleft <- .matrix(-Inf, nrow = nrow(Yleft), ncol = ncol(Yleft))
Ytright <- .matrix(Inf, nrow = nrow(Yleft), ncol = ncol(Yleft))
colnames(Ytleft) <- colnames(Ytright) <- colnames(Yleft)
if (any(il <- (.tleft(object)))) {
tmp <- tmpdata[il,,drop = FALSE]
tmp[[response]] <- object$tleft[il]
Ytleft[il,] <- model.matrix(model, data = tmp)
}
if (any(ir <- (.tright(object)))) {
tmp <- tmpdata[ir,,drop = FALSE]
tmp[[response]] <- object$tright[ir]
Ytright[ir,] <- model.matrix(model, data = tmp)
}
trunc <- list(left = Ytleft, right = Ytright)
}
list(Yleft = Yleft, Yright = Yright, trunc = trunc, which = which(i))
}
.wecdf <- function(x, weights) {
### from: spatstat::ewcdf
ox <- order(x)
x <- x[ox]
w <- weights[ox]
vals <- sort(unique(x))
xmatch <- factor(match(x, vals), levels = seq_along(vals))
wmatch <- tapply(w, xmatch, sum)
wmatch[is.na(wmatch)] <- 0
cumwt <- cumsum(wmatch) / sum(wmatch)
approxfun(vals, cumwt, method = "constant", yleft = 0,
yright = sum(wmatch), f = 0, ties = "ordered")
}
as.Surv <- function(object)
UseMethod("as.Surv")
as.Surv.response <- function(object) {
stopifnot(all(!.tright(object)))
exact <- .exact(object)
cleft <- .cleft(object)
cright <- .cright(object)
tleft <- .tleft(object)
stopifnot(all(!.tright(object)))
if (any(tleft)) {
stopifnot(all(!cright))
tm <- ifelse(exact, object$exact, object$cleft)
return(Surv(time = object$tleft, time2 = tm, event = exact,
type = "counting"))
}
if (any(cleft & cright) || (any(cleft) && any(cright))) {
stopifnot(all(!tleft))
return(Surv(time = ifelse(exact, object$exact, object$cleft),
time2 = ifelse(exact, object$exact, object$cright),
type = "interval2"))
}
if (any(cleft) & all(!cright))
return(Surv(time = ifelse(exact, object$exact, object$cleft),
event = exact, type = "right"))
return(Surv(time = ifelse(exact, object$exact, object$cright),
event = exact, type = "left"))
}
R.response <- function(object, ...)
object
R.list <- function(object, ...) {
ret <- lapply(object, R)
do.call(".mkR", do.call("rbind", ret))
}
"[.response" <- function(x, i, j, ..., drop = FALSE) {
cls <- class(x)
class(x) <- "data.frame"
ret <- x[i,j, drop = FALSE]
class(ret) <- cls
ret
}
"[<-.response" <- function(x, i, j, value) {
cls <- class(x)
class(x) <- "data.frame"
x[i,j] <- value
class(x) <- cls
x
}
### coerse to double vector (get rid of censoring)
as.double.response <- function(x, ...) {
ex <- x$exact
le <- x$cleft
ri <- x$cright
rex <- ex
rle <- le
rri <- ri
rex[is.na(ex)] <- 0
rle[is.na(le) | !is.finite(le)] <- 0
rri[is.na(ri) | !is.finite(ri)] <- 0
### (-Inf, x] -> x and (x, Inf) -> x
rex + (rle + ifelse(is.finite(ri) & is.finite(le), (rri - rle)/2, rri))
}
| /R/R.R | no_license | cran/mlt | R | false | false | 18,354 | r |
R <- function(object, ...)
UseMethod("R")
R.Surv <- function(object, as.R.ordered = FALSE, as.R.interval = FALSE, ...) {
type <- attr(object, "type")
stopifnot(type %in% c("left", "right", "interval",
"interval2", "counting"))
status <- object[, "status"]
if (as.R.interval) {
if (as.R.ordered) warning("argument as.R.ordered is ignored")
ret <- R(object, as.R.ordered = TRUE)
utm <- c(0, attr(ret, "unique_obs"), Inf)
left <- unclass(ret$cleft) + 1L
left[is.na(left)] <- 1L
right <- unclass(ret$cright) + 1L
right[is.na(right)] <- nlevels(ret$cright) + 2L
obj <- numeric(length(left))
obj[] <- NA
if (!is.ordered(ret$tleft)) {
ret <- R(object = obj, cleft = utm[left],
cright = utm[right])
} else {
tleft <- unclass(ret$tleft) + 1L
tleft[is.na(tleft)] <- 1L
ret <- R(object = obj, cleft = utm[left],
cright = utm[right], tleft = utm[tleft])
}
return(ret)
}
if (as.R.ordered && !type %in% c("right", "counting"))
stop("as.R.ordered only implemented for right-censored observations")
if (as.R.ordered && type %in% c("right", "counting")) {
### code response as ordered factor with right-censoring
### this defines the nonparametric likelihood
### for right-censored data in terms of the observed event times
tm <- if(type == "right") object[,"time"] else object[, "stop"]
### observed event times
utm <- sort(unique(tm[status == 1]))
if (all(tm[status == 0] < utm[length(utm)]))
utm <- utm[-length(utm)]
### convert to ordered factor
ct <- cut(tm, breaks = c(-Inf, utm, Inf), ordered_result = TRUE)
### events in category k contribute
### Prob(k) - Prob(k - 1)
lf <- rg <- ct
lf[status == 1] <- rg[status == 1] <- NA
### censored obs in category k contribute
### 1 - Prob(k - 1)
rg[status == 0] <- levels(ct)[nlevels(ct)]
### Note: Censoring before first event contributes
### 1 - 0 = 1 (coded as interval with cleft = NA, cright = NA)
### handle censoring times tied to event times separately
idx <- which(status != 1)
idx <- idx[!(tm[idx] %in% utm)] ### censoring tied to event
lf[idx] <- c(NA, levels(ct))[lf[idx]]
### left truncation
tl <- NA
if (type == "counting")
tl <- cut(object[, "start"], breaks = c(-Inf, utm, Inf), ordered = TRUE)
### is this a "response" representation of an ordered factor now
ret <- R(object = ct, cleft = lf, cright = rg, tleft = tl)
attr(ret, "unique_obs") <- utm
return(ret)
}
ret <- switch(type,
"right" = R(object = ifelse(status == 1, object[, "time"], NA),
cleft = ifelse(status != 1, object[, "time"], NA),
cright = ifelse(status != 1, Inf, NA)),
"left" = R(object = ifelse(status == 1, object[, "time"], NA),
cleft = ifelse(status != 1, -Inf, NA),
cright = ifelse(status != 1, object[, "time"], NA)),
"interval2" = {
ret <- cbind(left = object[, "time1"],
right = object[, "time2"])
ret$left[is.na(ret$left)] <- -Inf
ret$right[is.na(ret$right)] <- Inf
R(cleft = ret$left, cright = ret$right)
},
"interval" = {
status <- factor(status, levels = 0:3,
labels = c("right", "exact", "left", "interval"))
tmp <- matrix(NA, nrow = nrow(object), ncol = 2)
colnames(tmp) <- c("left", "right")
for (s in levels(status)) {
idx <- which(status == s)
tmp[idx, ] <- switch(s,
"right" = cbind(object[idx, "time1"], Inf),
"exact" = cbind(object[idx, "time1"], NA),
"left" = cbind(-Inf, object[idx, "time1"]),
"interval" = object[idx, c("time1", "time2")])
}
R(object = ifelse(is.na(tmp[, "right"]), tmp[, "left"], NA),
cleft = ifelse(is.na(tmp[, "right"]), NA, tmp[, "left"]),
cright = tmp[, "right"])
},
### left truncation, right censoring
"counting" = R(object = ifelse(status == 1, object[, "stop"], NA),
cleft = ifelse(status != 1, object[, "stop"], NA),
cright = ifelse(status != 1, Inf, NA),
tleft = object[, "start"])
)
attr(ret, "prob") <- function(weights) {
sf <- survival::survfit(object ~ 1, subset = weights > 0, weights = weights)
function(y) {
uy <- sort(unique(y))
s <- summary(sf, times = uy)$surv
if (length(s) < length(uy))
s <- c(s, rep(0, length(uy) - length(s)))
s[is.na(s)] <- 0
p <- 1 - s
p[match(y, uy)]
}
}
ret
}
R.factor <- function(object, ...) {
warning("response is unordered factor;
results may depend on order of levels")
return(R(as.ordered(object), ...))
}
R.ordered <- function(object, cleft = NA, cright = NA, ...) {
lev <- levels(object)
ret <- .mkR(exact = object, cleft = cleft, cright = cright, ...)
ret[is.na(ret$cright), "cright"] <- ret$exact[is.na(ret$cright)]
ret[is.na(ret$cleft), "cleft"] <- factor(unclass(object)[is.na(ret$cleft)] - 1,
levels = 1:length(lev), labels = lev, exclude = 0, ordered = TRUE)
ret$exact <- NA
ret[ret$cright == lev[nlevels(object)], "cright"] <- NA
attr(ret, "prob") <- function(weights) {
prt <- cumsum(prop.table(xtabs(weights ~ object)))
function(y) prt[y]
}
ret
}
### <FIXME> is bounds = c(min(object), Inf) the right thing, always?
### </FIXME>
R.integer <- function(object, cleft = NA, cright = NA, bounds = c(min(object), Inf), ...) {
ret <- .mkR(exact = object, cleft = cleft, cright = cright, ...)
ret$cright[is.na(ret$cright)] <- ret$exact[is.na(ret$cright)]
ret$cright[ret$cright == bounds[2]] <- NA
ret$cleft[is.na(ret$cleft)] <- ret$exact[is.na(ret$cleft)] - 1
ret$cleft[ret$cleft < bounds[1]] <- NA
ret$exact <- NA
attr(ret, "prob") <- function(weights)
.wecdf(object, weights)
ret
}
R.interval <- function(object, ...) {
breaks <- attr(object, "levels")
cleft <- breaks[-length(breaks)]
cright <- breaks[-1L]
R(cleft = cleft[object], cright = cright[object], ...)
}
### handle exact integer / factor as interval censored
R.numeric <- function(object = NA, cleft = NA, cright = NA,
tleft = NA, tright = NA, tol = sqrt(.Machine$double.eps),
as.R.ordered = FALSE, as.R.interval = FALSE, ...) {
### treat extremely small intervals as `exact' observations
d <- cright - cleft
if (any(!is.na(d) | is.finite(d))) {
if (any(d < 0, na.rm = TRUE)) stop("cleft > cright")
i <- (d < tol)
if (any(i, na.rm = TRUE)) {
i <- which(i)
object[i] <- cleft[i]
cleft[i] <- cright[i] <- NA
}
}
if (as.R.ordered && any(!is.na(cleft) || !is.na(cright)))
warning("as.R.ordered only implemented for exact observations")
if (as.R.ordered) {
### code response as ordered factor
### this defines the nonparametric likelihood
### in terms of the observed event times
utm <- sort(unique(object))
utm <- utm[-length(utm)]
### convert to ordered factor
ct <- cut(object, breaks = c(-Inf, utm, Inf), ordered = TRUE)
tl <- tr <- NA
if (!all(is.na(tleft)))
tl <- cut(tleft, breaks = c(-Inf, utm, Inf), ordered = TRUE)
if (!all(is.na(tright)))
tr <- cut(tright, breaks = c(-Inf, utm, Inf), ordered = TRUE)
ret <- R(object = ct, tleft = tl, tright = tr)
attr(ret, "unique_obs") <- utm
return(ret)
}
if (as.R.interval) {
### code response as interval-censored defining the nonparametric
### likelihood BUT keep data as numeric
utm <- sort(unique(object))
ct <- cut(object, breaks = c(-Inf, utm, Inf))
return(R(Surv(time = c(-Inf, utm)[ct], time2 = utm[ct], type = "interval2")))
}
ret <- .mkR(exact = object, cleft = cleft, cright = cright,
tleft = tleft, tright = tright)
### <FIXME>
### this fails if is.na(object) and only cleft/cright are given
# attr(ret, "prob") <- function(weights)
# .wecdf(object, weights)
attr(ret, "prob") <- function(weights)
.wecdf(ret$approxy, weights)
### we want something like survfit(Surv(... type = "interval")
### with adjustment to min(obs) = 0
### </FIXME>
ret
}
### for object = NA, ie censored observations only
R.logical <- R.numeric
R.response <- function(object, ...)
return(object)
R.default <- function(object, ...)
stop("cannot deal with response class", class(object))
.mkR <- function(...) {
args <- list(...)
cls <- unique(sapply(args, function(a) class(a)[1]))
cls <- cls[cls != "logical"]
stopifnot(length(cls) <= 1)
n <- unique(sapply(args, length))
stopifnot(length(n) <= 2)
if (length(n) == 2) stopifnot(min(n) == 1)
if (all(sapply(args, function(x) all(is.na(x))))) {
args$approxy <- NA
ret <- do.call("as.data.frame", list(x = args))
class(ret) <- c("response", class(ret))
return(ret[, c("tleft", "cleft", "exact", "cright", "tright", "approxy"), drop = FALSE])
}
ret <- do.call("as.data.frame", list(x = args))
if (is.null(ret$exact)) ret$exact <- NA
if (is.null(ret$cleft) || all(is.na(ret$cleft))) {
ret$cleft <- NA
if (is.ordered(ret$exact))
ret$cleft <- factor(ret$cleft, levels = 1:nlevels(ret$exact),
labels = levels(ret$exact), ordered = TRUE)
}
if (is.null(ret$cright) || all(is.na(ret$cright))) {
ret$cright <- NA
if (is.ordered(ret$exact))
ret$cright <- factor(ret$cright, levels = 1:nlevels(ret$exact),
labels = levels(ret$exact), ordered = TRUE)
}
if (is.null(ret$tleft)) ret$tleft <- NA
if (is.null(ret$tright)) ret$tright <- NA
if (all(is.finite(ret$exact))) {
# ret$rank <- rank(ret$exact, ties.method = "max")
ret$approxy <- ret$exact
} else {
### some meaningful ordering of observations
tmpexact <- as.numeric(ret$exact)
tmpleft <- as.numeric(ret$cleft)
tmpright <- as.numeric(ret$cright)
tmpler <- c(tmpleft, tmpexact, tmpright)
tmpleft[!is.finite(tmpleft)] <- min(tmpler[is.finite(tmpler)])
tmpright[!is.finite(tmpright)] <- max(tmpler[is.finite(tmpler)])
tmpexact[is.na(tmpexact)] <-
(tmpleft + ((tmpright - tmpleft) / 2))[is.na(tmpexact)]
# ret$rank <- rank(tmpexact, ties.method = "max")
ret$approxy <- tmpexact
}
class(ret) <- c("response", class(ret))
ret[, c("tleft", "cleft", "exact", "cright", "tright", "approxy"), drop = FALSE]
}
.exact <- function(object)
!is.na(object$exact)
.cleft <- function(object)
is.finite(object$cleft)
.cright <- function(object)
is.finite(object$cright)
.cinterval <- function(object)
!.exact(object)
# .cleft(object) | .cright(object)
.tleft <- function(object)
is.finite(object$tleft)
.tright <- function(object)
is.finite(object$tright)
.tinterval <- function(object)
.tleft(object) | .tright(object)
.mm_exact <- function(model, data, response, object) {
e <- .exact(object)
if (!any(e)) return(NULL)
tmp <- data[e,,drop = FALSE]
tmp[[response]] <- object$exact[e]
Y <- model.matrix(model, data = tmp)
deriv <- 1
names(deriv) <- response
Yprime <- model.matrix(model, data = tmp, deriv = deriv)
.matrix <- matrix
if (inherits(Y, "Matrix"))
.matrix <- function(...) Matrix(..., sparse = TRUE)
trunc <- NULL
if (any(.tinterval(object) & e)) {
Ytleft <- .matrix(-Inf, nrow = nrow(Y), ncol = ncol(Y))
Ytright <- .matrix(Inf, nrow = nrow(Y), ncol = ncol(Y))
if (any(il <- (.tleft(object) & e))) {
tmp <- data[il,]
tmp[[response]] <- object$tleft[il]
Ytleft[.tleft(object)[e],] <- model.matrix(model, data = tmp)
}
if (any(ir <- (.tright(object) & e))) {
tmp <- data[ir,,drop = FALSE]
tmp[[response]] <- object$tright[ir]
Ytright[.tright(object)[e],] <- model.matrix(model, data = tmp)
}
trunc <- list(left = Ytleft, right = Ytright)
}
list(Y = Y, Yprime = Yprime, trunc = trunc, which = which(e))
}
.mm_interval <- function(model, data, response, object) {
i <- .cinterval(object)
if (!any(i)) return(NULL)
tmpdata <- data[i,,drop = FALSE]
object <- object[i,, drop = FALSE]
Yleft <- NULL
if (any(il <- .cleft(object))) {
tmp <- tmpdata[il,,drop = FALSE]
tmp[[response]] <- object$cleft[il]
Ytmp <- model.matrix(model, data = tmp)
.matrix <- matrix
if (inherits(Ytmp, "Matrix"))
.matrix <- function(...) Matrix(..., sparse = TRUE)
Yleft <- .matrix(-Inf, nrow = length(il), ncol = ncol(Ytmp))
colnames(Yleft) <- colnames(Ytmp)
rownames(Yleft) <- rownames(tmpdata)
Yleft[il,] <- Ytmp
attr(Yleft, "constraint") <- attr(Ytmp, "constraint")
attr(Yleft, "Assign") <- attr(Ytmp, "Assign")
}
Yright <- NULL
if (any(ir <- .cright(object))) {
tmp <- tmpdata[ir,, drop = FALSE]
tmp[[response]] <- object$cright[ir]
Ytmp <- model.matrix(model, data = tmp)
.matrix <- matrix
if (inherits(Ytmp, "Matrix"))
.matrix <- function(...) Matrix(..., sparse = TRUE)
Yright <- .matrix(Inf, nrow = length(ir), ncol = ncol(Ytmp))
colnames(Yright) <- colnames(Ytmp)
rownames(Yright) <- rownames(tmpdata)
Yright[ir,] <- Ytmp
attr(Yright, "constraint") <- attr(Ytmp, "constraint")
attr(Yright, "Assign") <- attr(Ytmp, "Assign")
}
if (is.null(Yright)) {
Yright <- matrix(Inf, nrow = nrow(Yleft), ncol = ncol(Yleft))
colnames(Yright) <- colnames(Yleft)
attr(Yright, "constraint") <- attr(Yleft, "constraint")
attr(Yright, "Assign") <- attr(Yleft, "Assign")
}
if (is.null(Yleft)) {
Yleft <- matrix(-Inf, nrow = nrow(Yright), ncol = ncol(Yright))
colnames(Yleft) <- colnames(Yright)
attr(Yleft, "constraint") <- attr(Yright, "constraint")
attr(Yleft, "Assign") <- attr(Yright, "Assign")
}
trunc <- NULL
if (any(.tinterval(object))) {
.matrix <- matrix
if (inherits(Yleft, "Matrix"))
.matrix <- function(...) Matrix(..., sparse = TRUE)
Ytleft <- .matrix(-Inf, nrow = nrow(Yleft), ncol = ncol(Yleft))
Ytright <- .matrix(Inf, nrow = nrow(Yleft), ncol = ncol(Yleft))
colnames(Ytleft) <- colnames(Ytright) <- colnames(Yleft)
if (any(il <- (.tleft(object)))) {
tmp <- tmpdata[il,,drop = FALSE]
tmp[[response]] <- object$tleft[il]
Ytleft[il,] <- model.matrix(model, data = tmp)
}
if (any(ir <- (.tright(object)))) {
tmp <- tmpdata[ir,,drop = FALSE]
tmp[[response]] <- object$tright[ir]
Ytright[ir,] <- model.matrix(model, data = tmp)
}
trunc <- list(left = Ytleft, right = Ytright)
}
list(Yleft = Yleft, Yright = Yright, trunc = trunc, which = which(i))
}
.wecdf <- function(x, weights) {
### from: spatstat::ewcdf
ox <- order(x)
x <- x[ox]
w <- weights[ox]
vals <- sort(unique(x))
xmatch <- factor(match(x, vals), levels = seq_along(vals))
wmatch <- tapply(w, xmatch, sum)
wmatch[is.na(wmatch)] <- 0
cumwt <- cumsum(wmatch) / sum(wmatch)
approxfun(vals, cumwt, method = "constant", yleft = 0,
yright = sum(wmatch), f = 0, ties = "ordered")
}
as.Surv <- function(object)
UseMethod("as.Surv")
as.Surv.response <- function(object) {
stopifnot(all(!.tright(object)))
exact <- .exact(object)
cleft <- .cleft(object)
cright <- .cright(object)
tleft <- .tleft(object)
stopifnot(all(!.tright(object)))
if (any(tleft)) {
stopifnot(all(!cright))
tm <- ifelse(exact, object$exact, object$cleft)
return(Surv(time = object$tleft, time2 = tm, event = exact,
type = "counting"))
}
if (any(cleft & cright) || (any(cleft) && any(cright))) {
stopifnot(all(!tleft))
return(Surv(time = ifelse(exact, object$exact, object$cleft),
time2 = ifelse(exact, object$exact, object$cright),
type = "interval2"))
}
if (any(cleft) & all(!cright))
return(Surv(time = ifelse(exact, object$exact, object$cleft),
event = exact, type = "right"))
return(Surv(time = ifelse(exact, object$exact, object$cright),
event = exact, type = "left"))
}
R.response <- function(object, ...)
object
R.list <- function(object, ...) {
ret <- lapply(object, R)
do.call(".mkR", do.call("rbind", ret))
}
"[.response" <- function(x, i, j, ..., drop = FALSE) {
cls <- class(x)
class(x) <- "data.frame"
ret <- x[i,j, drop = FALSE]
class(ret) <- cls
ret
}
"[<-.response" <- function(x, i, j, value) {
cls <- class(x)
class(x) <- "data.frame"
x[i,j] <- value
class(x) <- cls
x
}
### coerse to double vector (get rid of censoring)
as.double.response <- function(x, ...) {
ex <- x$exact
le <- x$cleft
ri <- x$cright
rex <- ex
rle <- le
rri <- ri
rex[is.na(ex)] <- 0
rle[is.na(le) | !is.finite(le)] <- 0
rri[is.na(ri) | !is.finite(ri)] <- 0
### (-Inf, x] -> x and (x, Inf) -> x
rex + (rle + ifelse(is.finite(ri) & is.finite(le), (rri - rle)/2, rri))
}
|
#----------------------------------------------------------------------------------------------
# Script: Calculations am 12.11.2014
#
# Script to paper by Langanki, Schulz 2015
# R version 3.1.2 (2014-10-31), RStudio 0.98.1103
#
# Last modified: 22.05.2015
# Package dependencies:
# FGClimatology
#
# Contact: schulz.1@campus.tu-berlin.de
#----------------------------------------------------------------------------------------------
summary(Temp_day2)
#----------------------------------------------------------------------------------------------
# Mindestwerte am Tag 2
#----------------------------------------------------------------------------------------------
# Vector position
min_20_row <- which.min(Temp_day2[,8])
min_20 <- Temp_day2[min_20_row,]
min_20_temp <- min(Temp_day2[,8])
min_40_row <- which.min(Temp_day2[,9])
min_40 <- Temp_day2[min_40_row,]
min_40_temp <- min(Temp_day2[,9])
min_50_row <- which.min(Temp_day2[,10])
min_50 <- Temp_day2[min_50_row,]
min_50_temp <- min(Temp_day2[,10])
min_dwd_row <- which.min(dwd_day2[,2])
min_dwd <- dwd_day2[min_dwd_row,]
min_dwd_temp <- min(dwd_day2[,2])
min_values <- c ( min_20_temp, min_40_temp, min_50_temp, min_dwd_temp )
#----------------------------------------------------------------------------------------------
# The same goes for max values
#----------------------------------------------------------------------------------------------
max_20_row <- which.max(Temp_day2[,8])
max_20 <- Temp_day2[max_20_row,]
max_20_temp <- max(Temp_day2[,8])
max_40_row <- which.max(Temp_day2[,9])
max_40 <- Temp_day2[max_40_row,]
max_40_temp <- max(Temp_day2[,9])
max_50_row <- which.max(Temp_day2[,10])
max_50 <- Temp_day2[max_50_row,]
max_50_temp <- max(Temp_day2[,10])
max_dwd_row <- which.max(dwd_day2[,2])
max_dwd <- dwd_day2[max_dwd_row,]
max_dwd_temp <- max(dwd_day2[,2])
max_values <- c (max(Temp_day2[,8]),max(Temp_day2[,9]),max(Temp_day2[,10]),max(dwd_day2[,2]))
str(Temp_all)
summary(Temp_day2)
| /code/temp.calculations.R | no_license | gavin971/r_dev_messkonzept | R | false | false | 1,984 | r | #----------------------------------------------------------------------------------------------
# Script: Calculations am 12.11.2014
#
# Script to paper by Langanki, Schulz 2015
# R version 3.1.2 (2014-10-31), RStudio 0.98.1103
#
# Last modified: 22.05.2015
# Package dependencies:
# FGClimatology
#
# Contact: schulz.1@campus.tu-berlin.de
#----------------------------------------------------------------------------------------------
summary(Temp_day2)
#----------------------------------------------------------------------------------------------
# Mindestwerte am Tag 2
#----------------------------------------------------------------------------------------------
# Vector position
min_20_row <- which.min(Temp_day2[,8])
min_20 <- Temp_day2[min_20_row,]
min_20_temp <- min(Temp_day2[,8])
min_40_row <- which.min(Temp_day2[,9])
min_40 <- Temp_day2[min_40_row,]
min_40_temp <- min(Temp_day2[,9])
min_50_row <- which.min(Temp_day2[,10])
min_50 <- Temp_day2[min_50_row,]
min_50_temp <- min(Temp_day2[,10])
min_dwd_row <- which.min(dwd_day2[,2])
min_dwd <- dwd_day2[min_dwd_row,]
min_dwd_temp <- min(dwd_day2[,2])
min_values <- c ( min_20_temp, min_40_temp, min_50_temp, min_dwd_temp )
#----------------------------------------------------------------------------------------------
# The same goes for max values
#----------------------------------------------------------------------------------------------
max_20_row <- which.max(Temp_day2[,8])
max_20 <- Temp_day2[max_20_row,]
max_20_temp <- max(Temp_day2[,8])
max_40_row <- which.max(Temp_day2[,9])
max_40 <- Temp_day2[max_40_row,]
max_40_temp <- max(Temp_day2[,9])
max_50_row <- which.max(Temp_day2[,10])
max_50 <- Temp_day2[max_50_row,]
max_50_temp <- max(Temp_day2[,10])
max_dwd_row <- which.max(dwd_day2[,2])
max_dwd <- dwd_day2[max_dwd_row,]
max_dwd_temp <- max(dwd_day2[,2])
max_values <- c (max(Temp_day2[,8]),max(Temp_day2[,9]),max(Temp_day2[,10]),max(dwd_day2[,2]))
str(Temp_all)
summary(Temp_day2)
|
#' @title Analysis of residual sum of squares (AoRSS)
#'
#' @description Returns the test statistic and p-value corresponding to a method of comparing a number of curves called the analysis of residual sum of squares (AoRSS). The null hypothesis of test is that the K curves can be assumed coincident.
#'
#' @param RSSi a vector containing for each data set the sum of squared residuals calculated after fitting the corresponding curve.
#' @param RSSp the sum of squared residuals for the fit of the curve for the pooled data.
#' @param K the number of curves being compared.
#' @param N the total or pooled sample size.
#' @details The function returns the test statistic and p-value corresponding to the following test. Previously to compute the statistic:\itemize{
#' \item For each data set i, fit a curve and calculate the sum of squared residuals, RSSi.
#' \item Data for all curves are pooled, a new curve is fitted to the combined data, and the total or pooled RSSp is calculated.}
#' Now, all the necessary terms for computing our F-statistic are available and F is equal to:
#' \deqn{((RSSp-sum(RSSi))/(3(K-1)))/((sum(RSSi))/(N-3K)),}
#' where F is the F statistic with 3.(K – 1) and (N – 3.K) degrees of freedom, K is the number of curves being compared, and N is the total or pooled sample size.
#' Remember that the null hypothesis is that the K curves can be assumed coincident.
#' @return The value of the F-statistic and the corresponding p-value.
#' @references Haddon, Malcolm. (2011). Modelling and Quantitative Methods in Fisheries 2nd Edition.
#' @author
#' \itemize{
#' \item{Marta Cousido-Rocha}
#' \item{Santiago Cerviño López}
#' }
#' @examples
#' # An example based on the age length data relating
#' # to Pacific hake with separate data for both males
#' # and females (Example Table 9.3 of Haddon 2011,
#' # Quantitative Methods of Fisheries). The question
#' # is whether the male and female Pacific hake exhibit
#' # different growth throughout their lives. This is, we
#' # testing if the growth curves for males and females
#' # are coincident or not?
#' RSSi=c(28.8003778903944, 19.4233877094241)
#' RSSp=79.7645155773056
#' K=2
#' N=24
#' AoRSS.test(RSSi,RSSp,K,N)
#' @export
AoRSS.test<-function(RSSi,RSSp,K,N){
# F statistic
numerator=(RSSp-sum(RSSi))/(3*(K-1))
denominator=(sum(RSSi)/(N-3*K))
F=abs(numerator/denominator)
# p-value
df1=3*(K-1)
df2=N-3*K
p.value=1-stats::pf(F,df1,df2)
# Results
res<-list(F.statistic=F,p.value=p.value)
return(res)
}
| /R/AoRSS.R | no_license | IMPRESSPROJECT/AoRSS | R | false | false | 2,573 | r | #' @title Analysis of residual sum of squares (AoRSS)
#'
#' @description Returns the test statistic and p-value corresponding to a method of comparing a number of curves called the analysis of residual sum of squares (AoRSS). The null hypothesis of test is that the K curves can be assumed coincident.
#'
#' @param RSSi a vector containing for each data set the sum of squared residuals calculated after fitting the corresponding curve.
#' @param RSSp the sum of squared residuals for the fit of the curve for the pooled data.
#' @param K the number of curves being compared.
#' @param N the total or pooled sample size.
#' @details The function returns the test statistic and p-value corresponding to the following test. Previously to compute the statistic:\itemize{
#' \item For each data set i, fit a curve and calculate the sum of squared residuals, RSSi.
#' \item Data for all curves are pooled, a new curve is fitted to the combined data, and the total or pooled RSSp is calculated.}
#' Now, all the necessary terms for computing our F-statistic are available and F is equal to:
#' \deqn{((RSSp-sum(RSSi))/(3(K-1)))/((sum(RSSi))/(N-3K)),}
#' where F is the F statistic with 3.(K – 1) and (N – 3.K) degrees of freedom, K is the number of curves being compared, and N is the total or pooled sample size.
#' Remember that the null hypothesis is that the K curves can be assumed coincident.
#' @return The value of the F-statistic and the corresponding p-value.
#' @references Haddon, Malcolm. (2011). Modelling and Quantitative Methods in Fisheries 2nd Edition.
#' @author
#' \itemize{
#' \item{Marta Cousido-Rocha}
#' \item{Santiago Cerviño López}
#' }
#' @examples
#' # An example based on the age length data relating
#' # to Pacific hake with separate data for both males
#' # and females (Example Table 9.3 of Haddon 2011,
#' # Quantitative Methods of Fisheries). The question
#' # is whether the male and female Pacific hake exhibit
#' # different growth throughout their lives. This is, we
#' # testing if the growth curves for males and females
#' # are coincident or not?
#' RSSi=c(28.8003778903944, 19.4233877094241)
#' RSSp=79.7645155773056
#' K=2
#' N=24
#' AoRSS.test(RSSi,RSSp,K,N)
#' @export
AoRSS.test<-function(RSSi,RSSp,K,N){
# F statistic
numerator=(RSSp-sum(RSSi))/(3*(K-1))
denominator=(sum(RSSi)/(N-3*K))
F=abs(numerator/denominator)
# p-value
df1=3*(K-1)
df2=N-3*K
p.value=1-stats::pf(F,df1,df2)
# Results
res<-list(F.statistic=F,p.value=p.value)
return(res)
}
|
library(tidyverse)
library(devtools)
install_github("rstudio/gt")
orig <- read_csv(file = "https://raw.githubusercontent.com/TheUpshot/2018-live-poll-results/master/data/elections-poll-az06-3.csv",
col_types = cols(
.default = col_character(),
turnout_scale = col_double(),
turnout_score = col_double(),
w_LV = col_double(),
w_RV = col_double(),
final_weight = col_double(),
timestamp = col_datetime(format = "")))
orig%>% select(response,educ4,final_weight)%>%
group_by(educ4, response)%>%
summarize(total=n())%>%
filter(!educ4== "[DO NOT READ] Don't know/Refused")%>%
spread(key=response,value=total)
| /R.Script-02-21.R | no_license | henryzhu88/class-2019-02-21 | R | false | false | 767 | r | library(tidyverse)
library(devtools)
install_github("rstudio/gt")
orig <- read_csv(file = "https://raw.githubusercontent.com/TheUpshot/2018-live-poll-results/master/data/elections-poll-az06-3.csv",
col_types = cols(
.default = col_character(),
turnout_scale = col_double(),
turnout_score = col_double(),
w_LV = col_double(),
w_RV = col_double(),
final_weight = col_double(),
timestamp = col_datetime(format = "")))
orig%>% select(response,educ4,final_weight)%>%
group_by(educ4, response)%>%
summarize(total=n())%>%
filter(!educ4== "[DO NOT READ] Don't know/Refused")%>%
spread(key=response,value=total)
|
# split_into_root_folder_file_extension ----------------------------------------
#' Split Full Paths into Root, Folder, File and Extension
#'
#' @param paths vector of character representing full file paths
#' @param n_root_parts number of first path segments considered as "root"
#' @return data frame with columns \code{root}, \code{folder}, \code{file},
#' \code{extension}, \code{depth}
#' @export
#' @examples
#' paths <- c(
#' "//always/the/same/root/project-1/intro.doc",
#' "//always/the/same/root/project-1/logo.png",
#' "//always/the/same/root/project-2/intro.txt",
#' "//always/the/same/root/project-2/planning/file-1.doc",
#' "//always/the/same/root/project-2/result/report.pdf"
#' )
#'
#' split_into_root_folder_file_extension(paths)
#' split_into_root_folder_file_extension(paths, n_root_parts = 6)
#' split_into_root_folder_file_extension(paths, n_root_parts = 7)
#'
split_into_root_folder_file_extension <- function(paths, n_root_parts = 0)
{
parts <- split_paths(paths)
# All paths must be at least n_root_parts + 1 segments long
min_depth <- min(lengths(parts))
if (min_depth < n_root_parts + 1) {
message(sprintf(
"Setting n_root_parts to %d (was: %d) due to too few path segments",
min_depth - 1, n_root_parts
))
n_root_parts <- min_depth - 1
}
result <- do.call(rbind, lapply(parts, function(x) {
paste_path <- function(indices) paste(x[indices], collapse = "/")
first_indices <- seq_len(n_root_parts)
n_parts <- length(x)
file <- x[n_parts]
c(
root = paste_path(first_indices),
folder = paste_path(- c(first_indices, n_parts)),
file = file,
extension = kwb.utils::fileExtension(file)
)
}))
kwb.utils::setColumns(
kwb.utils::asNoFactorDataFrame(result),
depth = lengths(parts) - n_root_parts,
dbg = FALSE
)
}
| /R/split_into_root_folder_file_extension.R | permissive | KWB-R/kwb.file | R | false | false | 1,873 | r | # split_into_root_folder_file_extension ----------------------------------------
#' Split Full Paths into Root, Folder, File and Extension
#'
#' @param paths vector of character representing full file paths
#' @param n_root_parts number of first path segments considered as "root"
#' @return data frame with columns \code{root}, \code{folder}, \code{file},
#' \code{extension}, \code{depth}
#' @export
#' @examples
#' paths <- c(
#' "//always/the/same/root/project-1/intro.doc",
#' "//always/the/same/root/project-1/logo.png",
#' "//always/the/same/root/project-2/intro.txt",
#' "//always/the/same/root/project-2/planning/file-1.doc",
#' "//always/the/same/root/project-2/result/report.pdf"
#' )
#'
#' split_into_root_folder_file_extension(paths)
#' split_into_root_folder_file_extension(paths, n_root_parts = 6)
#' split_into_root_folder_file_extension(paths, n_root_parts = 7)
#'
split_into_root_folder_file_extension <- function(paths, n_root_parts = 0)
{
parts <- split_paths(paths)
# All paths must be at least n_root_parts + 1 segments long
min_depth <- min(lengths(parts))
if (min_depth < n_root_parts + 1) {
message(sprintf(
"Setting n_root_parts to %d (was: %d) due to too few path segments",
min_depth - 1, n_root_parts
))
n_root_parts <- min_depth - 1
}
result <- do.call(rbind, lapply(parts, function(x) {
paste_path <- function(indices) paste(x[indices], collapse = "/")
first_indices <- seq_len(n_root_parts)
n_parts <- length(x)
file <- x[n_parts]
c(
root = paste_path(first_indices),
folder = paste_path(- c(first_indices, n_parts)),
file = file,
extension = kwb.utils::fileExtension(file)
)
}))
kwb.utils::setColumns(
kwb.utils::asNoFactorDataFrame(result),
depth = lengths(parts) - n_root_parts,
dbg = FALSE
)
}
|
library(readr)
library(data.table)
########################## CONSTANTS ##########################
address <- "~/Documents/experimento_doutorado/"
################################### RELEASE ###################################
release <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/release",
"\t",
escape_double = FALSE,
col_names = FALSE,
trim_ws = TRUE,
na = "\\N")
names(release) = c("id", "gid", "name", "artist_credit", "release_group", "status", "packaging", "language", "script", "barcode",
"comment", "edits_pending", "quality", "last_updated")
################################### RELEASE GROUP ###################################
release_group <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/release_group",
"\t",
escape_double = FALSE,
col_names = FALSE,
trim_ws = TRUE,
na = "\\N")
names(release_group) = c("id", "gid", "name", "artist_credit", "type", "comment", "edits_pending", "last_updated")
################################### RELEASE X RELEASE GROUP ###################################
release = merge(release,release_group,by.x = "release_group", by.y = "id")
release = release[,c(2,8,16,17,18)]
names(release) = c("release.id", "release.language", "release.name", "release.artist_credit", "release.type")
################################### ARTIST ###################################
artist <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/artist",
"\t", escape_double = FALSE,
col_names = FALSE,
trim_ws = TRUE,
na = "\\N")
names(artist) = c("id", "gid", "name", "sort_name", "begin_date_year", "begin_date_month", "begin_date_day", "end_date_year",
"end_date_month", "end_date_day", "type", "area", "gender", "comment", "edits_pending", "last_updated",
"ended_char", "begin_area", "end_area")
artist$ended = FALSE
artist[artist[,"ended_char"] == "t","ended"] = TRUE
artist = artist[,c(1, 3, 5, 8, 11, 12, 13, 20)]
################################### ARTIST CREDIT NAME ###################################
artist_credit_name <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/artist_credit_name",
"\t",
escape_double = FALSE,
col_names = FALSE,
trim_ws = TRUE,
na = "\\N")
names(artist_credit_name) = c("artist_credit", "position", "artist", "name", "join_phrase")
################################### ARTIST X ARTIST CREDIT NAME ###################################
artist = merge(artist, artist_credit_name, by.x = "id", by.y = "artist")
artist = artist[,c(1:9)]
names(artist) = c("artist.id", "artist.name", "artist.begin_date_year", "artist.end_date_year", "artist.type","artist.area", "artist.gender", "artist.ended", "artist.artist_credit")
################################### ALBUMS = ARTIST X RELEASE ###################################
albums = merge(artist, release, by.x = "artist.artist_credit", by.y = "release.artist_credit")
albums$artist.artist_credit = NULL
# Language
language <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/language",
"\t",
escape_double = FALSE,
col_names = FALSE,
trim_ws = TRUE,
na = "\\N")
language = language[,c(1,5)]
names(language) = c("language.id", "language.name")
albums = merge(albums, language, by.x = "release.language", by.y = "language.id", all.x = TRUE)
albums$release.language = NULL
# Gender
gender <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/gender",
"\t",
escape_double = FALSE,
col_names = FALSE,
trim_ws = TRUE,
na = "\\N")
gender = gender[,c(1,2)]
names(gender) = c("gender.id", "gender.name")
albums = merge(albums, gender, by.x = "artist.gender", by.y = "gender.id", all.x = TRUE)
albums$artist.gender = NULL
# Release Type
release_group_primary_type <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/release_group_primary_type",
"\t",
escape_double = FALSE,
col_names = FALSE,
trim_ws = TRUE,
na = "\\N")
release_group_primary_type = release_group_primary_type[,c(1,2)]
names(release_group_primary_type) = c("release_type.id", "release_type.name")
albums = merge(albums, release_group_primary_type, by.x = "release.type", by.y = "release_type.id")
albums$release.type = NULL
# Artist Type
artist_type <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/artist_type",
"\t",
escape_double = FALSE,
col_names = FALSE,
trim_ws = TRUE,
na = "\\N")
artist_type = artist_type[,c(1,2)]
names(artist_type) = c("artist_type.id", "artist_type.name")
albums = merge(albums, artist_type, by.x = "artist.type", by.y = "artist_type.id", all.x = TRUE)
albums$artist.type = NULL
################################### RELEASE COUNTRY AND DATE ###################################
release_country <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/release_country",
"\t",
escape_double = FALSE,
col_names = FALSE,
trim_ws = TRUE,
na = "\\N")
names(release_country) = c("release.id", "country.id", "release.date_year", "release.date_month", "release.date_day")
albums = merge(albums, release_country, by = "release.id", all.x = TRUE)
albums$release.date_month = NULL
albums$release.date_day = NULL
################################### AREA ###################################
area <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/area",
"\t",
escape_double = FALSE,
col_names = FALSE,
trim_ws = TRUE,
na = "\\N")
names(area) = c("area.id", "area.gid", "area.name", "area.type", "area.edits_pending", "area.last_updated", "area.begin_date_year",
"area.begin_date_month", "area.begin_date_day", "area.end_date_year", "area.end_date_month",
"area.end_date_day", "area.ended", "area.comment")
area = area[,c(1, 3, 4)]
albums = merge(albums, area, by.x = "country.id", by.y = "area.id", all.x = TRUE)
albums$country.id = NULL
area_type <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/area_type",
"\t",
escape_double = FALSE,
col_names = FALSE,
trim_ws = TRUE,
na = "\\N")
names(area_type) = c("area_type.id", "area_type.name", "area_type.parent", "area_type.child_order", "area_type.description",
"area_type.gid")
area_type = area_type[,c(1,2)]
albums = merge(albums, area_type, by.x = "area.type", by.y = "area_type.id", all.x = TRUE)
albums$area.type = NULL
albums$artist.area = NULL
################################### WRITING ###################################
write.table(albums, "~/Documentos/Experimento Doutorado/bases de dados/experimento/mb_albums.txt",
row.names = FALSE, col.names = TRUE, "\t")
###############################################################################
########################## Artist Load (no album) #############################
###############################################################################
artist = fread(paste0(address,"bases de dados/MusicBrainz/mbdump/artist"),
sep="\t",
verbose = TRUE,
na.strings = "\\N")
# artist <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/artist",
# "\t",
# escape_double = FALSE,
# col_names = FALSE,
# trim_ws = TRUE,
# na = "\\N")
names(artist) = c("id", "gid", "name", "sort_name", "begin_date_year", "begin_date_month", "begin_date_day", "end_date_year",
"end_date_month", "end_date_day", "type", "area", "gender", "comment", "edits_pending", "last_updated",
"ended_char", "begin_area", "end_area")
artist$ended = artist[,"ended_char"] == "t"
artist = artist[,c(1, 3, 5, 8, 11, 12, 13, 20)]
################################### AREA ###################################
# The area data is not necessary, the ids will be treated as the classes for the categorical attribute
# area <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/area",
# "\t",
# escape_double = FALSE,
# col_names = FALSE,
# trim_ws = TRUE,
# na = "\\N")
# names(area) = c("area.id", "area.gid", "area.name", "area.type", "area.edits_pending", "area.last_updated", "area.begin_date_year",
# "area.begin_date_month", "area.begin_date_day", "area.end_date_year", "area.end_date_month",
# "area.end_date_day", "area.ended", "area.comment")
# area = area[,c(1, 3, 4)]
# artist = merge(artist, area, by.x = "area", by.y = "area.id", all.x = TRUE)
# area_type <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/area_type",
# "\t",
# escape_double = FALSE,
# col_names = FALSE,
# trim_ws = TRUE,
# na = "\\N")
# names(area_type) = c("area_type.id", "area_type.name", "area_type.parent", "area_type.child_order", "area_type.description",
# "area_type.gid")
# area_type = area_type[,c(1,2)]
# artist = merge(artist, area_type, by.x = "area.type", by.y = "area_type.id", all.x = TRUE)
# artist$area.type = NULL
# artist$artist.area = NULL
################################### WRITING ###################################
fwrite(artist,
paste0(address,"bases de dados/experimento/mb_artists.txt"),
row.names = FALSE, col.names = TRUE, sep = "\t")
| /Scripts/MusicBrainz_Load.R | no_license | ricooliveira/moad_old | R | false | false | 11,042 | r | library(readr)
library(data.table)
########################## CONSTANTS ##########################
address <- "~/Documents/experimento_doutorado/"
################################### RELEASE ###################################
release <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/release",
"\t",
escape_double = FALSE,
col_names = FALSE,
trim_ws = TRUE,
na = "\\N")
names(release) = c("id", "gid", "name", "artist_credit", "release_group", "status", "packaging", "language", "script", "barcode",
"comment", "edits_pending", "quality", "last_updated")
################################### RELEASE GROUP ###################################
release_group <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/release_group",
"\t",
escape_double = FALSE,
col_names = FALSE,
trim_ws = TRUE,
na = "\\N")
names(release_group) = c("id", "gid", "name", "artist_credit", "type", "comment", "edits_pending", "last_updated")
################################### RELEASE X RELEASE GROUP ###################################
release = merge(release,release_group,by.x = "release_group", by.y = "id")
release = release[,c(2,8,16,17,18)]
names(release) = c("release.id", "release.language", "release.name", "release.artist_credit", "release.type")
################################### ARTIST ###################################
artist <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/artist",
"\t", escape_double = FALSE,
col_names = FALSE,
trim_ws = TRUE,
na = "\\N")
names(artist) = c("id", "gid", "name", "sort_name", "begin_date_year", "begin_date_month", "begin_date_day", "end_date_year",
"end_date_month", "end_date_day", "type", "area", "gender", "comment", "edits_pending", "last_updated",
"ended_char", "begin_area", "end_area")
artist$ended = FALSE
artist[artist[,"ended_char"] == "t","ended"] = TRUE
artist = artist[,c(1, 3, 5, 8, 11, 12, 13, 20)]
################################### ARTIST CREDIT NAME ###################################
artist_credit_name <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/artist_credit_name",
"\t",
escape_double = FALSE,
col_names = FALSE,
trim_ws = TRUE,
na = "\\N")
names(artist_credit_name) = c("artist_credit", "position", "artist", "name", "join_phrase")
################################### ARTIST X ARTIST CREDIT NAME ###################################
artist = merge(artist, artist_credit_name, by.x = "id", by.y = "artist")
artist = artist[,c(1:9)]
names(artist) = c("artist.id", "artist.name", "artist.begin_date_year", "artist.end_date_year", "artist.type","artist.area", "artist.gender", "artist.ended", "artist.artist_credit")
################################### ALBUMS = ARTIST X RELEASE ###################################
albums = merge(artist, release, by.x = "artist.artist_credit", by.y = "release.artist_credit")
albums$artist.artist_credit = NULL
# Language
language <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/language",
"\t",
escape_double = FALSE,
col_names = FALSE,
trim_ws = TRUE,
na = "\\N")
language = language[,c(1,5)]
names(language) = c("language.id", "language.name")
albums = merge(albums, language, by.x = "release.language", by.y = "language.id", all.x = TRUE)
albums$release.language = NULL
# Gender
gender <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/gender",
"\t",
escape_double = FALSE,
col_names = FALSE,
trim_ws = TRUE,
na = "\\N")
gender = gender[,c(1,2)]
names(gender) = c("gender.id", "gender.name")
albums = merge(albums, gender, by.x = "artist.gender", by.y = "gender.id", all.x = TRUE)
albums$artist.gender = NULL
# Release Type
release_group_primary_type <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/release_group_primary_type",
"\t",
escape_double = FALSE,
col_names = FALSE,
trim_ws = TRUE,
na = "\\N")
release_group_primary_type = release_group_primary_type[,c(1,2)]
names(release_group_primary_type) = c("release_type.id", "release_type.name")
albums = merge(albums, release_group_primary_type, by.x = "release.type", by.y = "release_type.id")
albums$release.type = NULL
# Artist Type
artist_type <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/artist_type",
"\t",
escape_double = FALSE,
col_names = FALSE,
trim_ws = TRUE,
na = "\\N")
artist_type = artist_type[,c(1,2)]
names(artist_type) = c("artist_type.id", "artist_type.name")
albums = merge(albums, artist_type, by.x = "artist.type", by.y = "artist_type.id", all.x = TRUE)
albums$artist.type = NULL
################################### RELEASE COUNTRY AND DATE ###################################
release_country <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/release_country",
"\t",
escape_double = FALSE,
col_names = FALSE,
trim_ws = TRUE,
na = "\\N")
names(release_country) = c("release.id", "country.id", "release.date_year", "release.date_month", "release.date_day")
albums = merge(albums, release_country, by = "release.id", all.x = TRUE)
albums$release.date_month = NULL
albums$release.date_day = NULL
################################### AREA ###################################
area <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/area",
"\t",
escape_double = FALSE,
col_names = FALSE,
trim_ws = TRUE,
na = "\\N")
names(area) = c("area.id", "area.gid", "area.name", "area.type", "area.edits_pending", "area.last_updated", "area.begin_date_year",
"area.begin_date_month", "area.begin_date_day", "area.end_date_year", "area.end_date_month",
"area.end_date_day", "area.ended", "area.comment")
area = area[,c(1, 3, 4)]
albums = merge(albums, area, by.x = "country.id", by.y = "area.id", all.x = TRUE)
albums$country.id = NULL
area_type <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/area_type",
"\t",
escape_double = FALSE,
col_names = FALSE,
trim_ws = TRUE,
na = "\\N")
names(area_type) = c("area_type.id", "area_type.name", "area_type.parent", "area_type.child_order", "area_type.description",
"area_type.gid")
area_type = area_type[,c(1,2)]
albums = merge(albums, area_type, by.x = "area.type", by.y = "area_type.id", all.x = TRUE)
albums$area.type = NULL
albums$artist.area = NULL
################################### WRITING ###################################
write.table(albums, "~/Documentos/Experimento Doutorado/bases de dados/experimento/mb_albums.txt",
row.names = FALSE, col.names = TRUE, "\t")
###############################################################################
########################## Artist Load (no album) #############################
###############################################################################
artist = fread(paste0(address,"bases de dados/MusicBrainz/mbdump/artist"),
sep="\t",
verbose = TRUE,
na.strings = "\\N")
# artist <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/artist",
# "\t",
# escape_double = FALSE,
# col_names = FALSE,
# trim_ws = TRUE,
# na = "\\N")
names(artist) = c("id", "gid", "name", "sort_name", "begin_date_year", "begin_date_month", "begin_date_day", "end_date_year",
"end_date_month", "end_date_day", "type", "area", "gender", "comment", "edits_pending", "last_updated",
"ended_char", "begin_area", "end_area")
artist$ended = artist[,"ended_char"] == "t"
artist = artist[,c(1, 3, 5, 8, 11, 12, 13, 20)]
################################### AREA ###################################
# The area data is not necessary, the ids will be treated as the classes for the categorical attribute
# area <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/area",
# "\t",
# escape_double = FALSE,
# col_names = FALSE,
# trim_ws = TRUE,
# na = "\\N")
# names(area) = c("area.id", "area.gid", "area.name", "area.type", "area.edits_pending", "area.last_updated", "area.begin_date_year",
# "area.begin_date_month", "area.begin_date_day", "area.end_date_year", "area.end_date_month",
# "area.end_date_day", "area.ended", "area.comment")
# area = area[,c(1, 3, 4)]
# artist = merge(artist, area, by.x = "area", by.y = "area.id", all.x = TRUE)
# area_type <- read_delim("~/Documentos/Experimento Doutorado/bases de dados/MusicBrainz/mbdump/area_type",
# "\t",
# escape_double = FALSE,
# col_names = FALSE,
# trim_ws = TRUE,
# na = "\\N")
# names(area_type) = c("area_type.id", "area_type.name", "area_type.parent", "area_type.child_order", "area_type.description",
# "area_type.gid")
# area_type = area_type[,c(1,2)]
# artist = merge(artist, area_type, by.x = "area.type", by.y = "area_type.id", all.x = TRUE)
# artist$area.type = NULL
# artist$artist.area = NULL
################################### WRITING ###################################
fwrite(artist,
paste0(address,"bases de dados/experimento/mb_artists.txt"),
row.names = FALSE, col.names = TRUE, sep = "\t")
|
#' Mirror Plot -- Regional Association ComparER Plot
#'
#' This function allows you to creat a plot of -log10(P-values) for two sets of association data.
#' Mirror plots illustrate the two associations on a common x-axis, with the first association
#' inverted, mirroring the two associations against one another.
#'
#' @param assoc_data1 required. A dataframe that has columns named POS representing the position
#' of the SNP on the chromosome, LOG10P which contains the -log10(P-values), CHR, which contains
#' the chromosome number, RS_ID, which contains LD information. If your data set has been processed
#' using the formatRACER function and the ldRACER function, these columns will be appropriately
#' named in your data.
#' @param assoc_data2 required. identifcal in format to assoc_data1
#' @param chr required. chromosome you wish to plot
#' @param build optional. indicates the genome build to use to plot the genes below the association plot. default = "hg19", but can be changed to "hg38"
#' @param set optional. default = "protein_coding", however can be set to "all" to plot all RNAs in the genome
#' @param name1 optional. name of association set 1
#' @param name2 optional. name of association set 2
#' @param plotby required. "coord", "gene", or "snp". Which parameter to use to
#' determine the reigon to be plotted.
#' @param gene_plot optional. If "gene" selected for plotby, then plot will be +/- 50kb of gene, should be a human gene symbol
#' @param snp_plot optional. If "snp" selected for plotby, then plot will be +/- 50kb of snp
#' @param start_plot optional. If "coord" selected for plotby, then this will be lower bound of x-axis
#' @param end_plot optional. If "coord" selected for plotby, then this will be upper bound of x-axis
#' @param label_lead optional. default = FALSE, set = TRUE if you wish to add a label to your graph of the SNP used to calculate LD. If the SNP used to calculate LD is not in your data set, the SNP with the greatest -LOG10(P) will be labeled. Labels both plots.
#'
#' @keywords association plot
#' @concept GWAS
#' @export
#' @import ggplot2
#' @importFrom rlang .data
#' @examples
#' \donttest{
#' data(mark3_bmd_gwas)
#' data(mark3_eqtl)
#'
#' mark3_bmd_gwas_f = RACER::formatRACER(assoc_data = mark3_bmd_gwas, chr_col = 3,
#' pos_col = 4, p_col = 11)
#' mark3_eqtl_f = RACER::formatRACER(assoc_data = mark3_eqtl, chr_col = 10,
#' pos_col = 11, p_col = 7)
#'
#' mark3_bmd_gwas_f_ld = RACER::ldRACER(assoc_data = mark3_bmd_gwas_f,
#' rs_col = 2, pops ="EUR", lead_snp = "rs11623869")
#' mark3_eqtl_f_ld = RACER::ldRACER(assoc_data = mark3_eqtl_f,
#' rs_col = 15, pops = "EUR", lead_snp = "rs11623869")
#' mirrorPlotRACER(assoc_data1 = mark3_bmd_gwas_f_ld, assoc_data2 = mark3_eqtl_f_ld,
#' chr = 14, plotby = "gene", gene_plot = "MARK3")}
mirrorPlotRACER <- function(assoc_data1, assoc_data2, chr, build = "hg19", set = "protein_coding", name1="Association Dataset #1", name2="Association Dataset #2", plotby, gene_plot=NULL, snp_plot=NULL, start_plot=NULL, end_plot=NULL, label_lead = FALSE){
reqs = c("CHR", "POS", "LOG10P")
cols_1 = colnames(assoc_data1)
cols_2 = colnames(assoc_data2)
if(sum(reqs %in% cols_1) == 3){
}else{stop("Association Data Set #1 is missing a required column.")}
if(sum(reqs %in% cols_2) == 3){
}else{stop("Association Data Set #2 is missing a required column.")}
if(build == "hg38"){
utils::data(hg38)
chr_in = chr
colnames(hg38) = c("GENE_ID", "CHR", "TRX_START", "TRX_END", "LENGTH", "GENE_NAME", "TYPE")
gene_sub = hg38[hg38$CHR == chr_in,]
}else if(build == "hg19"){
utils::data(hg19)
chr_in = chr
colnames(hg19) = c("GENE_ID", "CHR", "TRX_START", "TRX_END", "LENGTH", "GENE_NAME", "TYPE")
gene_sub = hg19[hg19$CHR == chr_in,]
}
if(set == "protein_coding"){
gene_sub = gene_sub[gene_sub$TYPE == "protein_coding",]
}else{
gene_sub = gene_sub
}
`%>%` <- magrittr::`%>%`
if((sum(is.null(plotby)) == 0) == TRUE){
message("Plotting by...")
if((plotby == "coord") == TRUE){
message("coord")
start = start_plot
end = end_plot
}else if((plotby == "gene") == TRUE){
message(paste("gene:",gene_plot))
if(sum(is.null(gene_plot)) == 0){
p = subset(gene_sub, gene_sub$GENE_NAME == gene_plot)
start = min(p$TRX_START) - 500000
end = max(p$TRX_END) + 500000
}else{stop("No gene specified.")}
}else if((plotby == "snp") == TRUE){
message(paste("snp",snp_plot))
q = assoc_data1[assoc_data1$RS_ID == snp_plot,]
w = q$POS
w = as.numeric(as.character(w))
start = w - 500000
end = w + 500000}
}else{
stop("Please specify a parameter to plotby.")
}
# reading in gene data
gene_sub = subset(gene_sub, gene_sub$TRX_START > (start-5000))
gene_sub = subset(gene_sub, gene_sub$TRX_END < (end+5000))
gene_sub = gene_sub[!duplicated(gene_sub$GENE_ID),]
gene_sub = gene_sub[,c(3,4,6)]
gene_sub = reshape2::melt(gene_sub, id.vars = "GENE_NAME")
gene_sub$y_value = as.numeric(as.factor(gene_sub$GENE_NAME))
plot_lab = subset(gene_sub, gene_sub$variable == "TRX_END")
# read in, format, and filter data sets
message("Reading in association data")
in.dt <- as.data.frame(assoc_data1)
in.dt$POS = as.numeric(as.character(in.dt$POS))
in.dt$LOG10P = as.numeric(as.character(in.dt$LOG10P))
in.dt$CHR = as.numeric(as.character(in.dt$CHR))
in.dt = dplyr::filter(in.dt, .data$CHR == chr_in)
in.dt = dplyr::filter(in.dt, .data$POS > start)%>%
dplyr::filter(.data$POS < end)
if(label_lead == TRUE){
lsnp_row_1 = which(in.dt$LABEL == "LEAD")
label_data_1 = in.dt[lsnp_row_1,]
if(dim(label_data_1)[1] == 0){
lsnp_row_1 = in.dt[in.dt$LOG10P == max(in.dt$LOG10P),]
label_data_1 = lsnp_row_1[1,]
}
}
in.dt.2 <- as.data.frame(assoc_data2)
in.dt.2$POS = as.numeric(as.character(in.dt.2$POS))
in.dt.2$LOG10P = as.numeric(as.character(in.dt.2$LOG10P))
in.dt.2$CHR = as.numeric(as.character(in.dt.2$CHR))
in.dt.2 = dplyr::filter(in.dt.2, .data$CHR == chr_in)
in.dt.2= dplyr::filter(in.dt.2, .data$POS > start)%>%
dplyr::filter(.data$POS < end)
if(label_lead == TRUE){
lsnp_row_2 = which(in.dt.2$LABEL == "LEAD")
label_data_2 = in.dt.2[lsnp_row_2,]
if(dim(label_data_2)[1] == 0){
lsnp_row_2 = in.dt.2[in.dt.2$LOG10P == max(in.dt.2$LOG10P),]
label_data_2 = lsnp_row_2[1,]
}
}
len1 = nchar(trunc(max(in.dt$LOG10P)))
len2 = nchar(trunc(max(in.dt.2$LOG10P)))
scaleFUN0 <- function(x) sprintf("%.0f", x)
scaleFUN1 <- function(x) sprintf("%.1f", x)
scaleFUN2 <- function(x) sprintf("%.2f", x)
scaleFUN3 <- function(x) sprintf("%.3f", x)
scaleFUN4 <- function(x) sprintf("%.4f", x)
# generate mirror plot
message("Generating plot.")
if("LD" %in% cols_1 && "LD_BIN" %in% cols_1){
a = ggplot2::ggplot(data = in.dt, ggplot2::aes_string(x = "POS", y = "LOG10P", color = "LD_BIN")) +
ggplot2::geom_point() + ggplot2::scale_colour_manual(
values = c("1.0-0.8" = "red", "0.8-0.6" = "darkorange1", "0.6-0.4" = "green1",
"0.4-0.2" = "skyblue1", "0.2-0.0" = "navyblue", "NA" = "grey"), drop = FALSE) +
ggplot2::theme_bw() + ggplot2::xlab(paste0("Chromosome ", chr_in, " Position")) + ggplot2::ylab("-log10(p-value)") +
ggplot2::scale_y_reverse() + ggplot2::theme(axis.title.x=ggplot2::element_blank(),
axis.text.x=ggplot2::element_blank(),
axis.ticks.x=ggplot2::element_blank()) +
ggplot2::theme(legend.position = "none") +
ggplot2::xlim(start,end) + ggplot2::ggtitle(paste0(name1)) +
theme(plot.title = element_text(size = 10, vjust = -1)) +
theme(plot.margin = margin(5.5,5.5,-3,5.5))
}else{
message("No LD information for dataset #1.")
a = ggplot2::ggplot(in.dt, ggplot2::aes_string(x = "POS", y = "LOG10P")) +
ggplot2::geom_point() + ggplot2::theme_bw() + ggplot2::xlab(paste0("Chromosome ", chr_in, " Position")) +
ggplot2::ylab("-log10(p-value)") +
ggplot2::scale_y_reverse() + ggplot2::theme(axis.title.x=ggplot2::element_blank(),
axis.text.x=ggplot2::element_blank(),
axis.ticks.x=ggplot2::element_blank()) +
ggplot2::theme(legend.position = "none") +
ggplot2::xlim(start,end) + ggplot2::ggtitle(paste0(name1)) +
theme(plot.title = element_text(size = 10, vjust = -1)) +
theme(plot.margin = margin(5.5,5.5,-3,5.5))
}
if("LD" %in% cols_2 && "LD_BIN" %in% cols_2){
b = ggplot2::ggplot(data = in.dt.2, ggplot2::aes_string(x = "POS", y = "LOG10P", color = "LD_BIN")) +
ggplot2::geom_point() + ggplot2::scale_colour_manual(
values = c("1.0-0.8" = "red", "0.8-0.6" = "darkorange1", "0.6-0.4" = "green1",
"0.4-0.2" = "skyblue1", "0.2-0.0" = "navyblue", "NA" = "grey"), drop = FALSE) +
ggplot2::theme_bw() + ggplot2::xlab(paste0("Chromosome ", chr_in, " Position (Mbp)")) +
ggplot2::ylab("-log10(p-value)") + ggplot2::theme(legend.position = "bottom") +
ggplot2::xlim(start,end) + ggplot2::ylim(min(in.dt.2$LOG10P),max(in.dt.2$LOG10P)) +
ggplot2::theme(axis.title.x=ggplot2::element_blank(),
axis.text.x=ggplot2::element_blank(),
axis.ticks.x=ggplot2::element_blank()) + ggplot2::ggtitle(paste0(name2)) +
theme(plot.title = element_text(size = 10, vjust = -1))
}else{
b = ggplot2::ggplot(in.dt.2, ggplot2::aes_string(x = "POS", y = "LOG10P")) +
ggplot2::geom_point() + ggplot2::theme_bw() + ggplot2::xlab(paste0("Chromosome ", chr_in, " Position (Mbp)")) +
ggplot2::ylab("-log10(p-value)") + ggplot2::theme(legend.position = "bottom") +
ggplot2::xlim(start,end) + ggplot2::ylim(min(in.dt.2$LOG10P),max(in.dt.2$LOG10P)) +
ggplot2::theme(axis.title.x=ggplot2::element_blank(),
axis.text.x=ggplot2::element_blank(),
axis.ticks.x=ggplot2::element_blank()) + ggplot2::ggtitle(paste0(name1)) +
theme(plot.title = element_text(size = 10, vjust = -1))
}
c = ggplot2::ggplot(gene_sub, ggplot2::aes_string(x = "value", y = "y_value")) +
ggplot2::geom_line(ggplot2::aes_string(group = "GENE_NAME"), size = 2) + ggplot2::theme_bw() +
ggplot2::geom_text(data = plot_lab, ggplot2::aes_string(x = "value", y = "y_value", label = "GENE_NAME"),
hjust = -0.1,vjust = 0.3, size = 2.5) + ggplot2::xlim(start,end) +
ggplot2::theme(axis.title.y = ggplot2::element_text(color = "white", size = 28),
axis.text.y = ggplot2::element_blank(),
axis.ticks.y = ggplot2::element_blank()) + ggplot2::xlab(paste0("Chromosome ", chr_in, " Position")) +
ggplot2::ylim(0,(max(gene_sub$y_value)+1))
if(len1 == len2){
a = a + scale_y_reverse(labels = scaleFUN0)
b = b + scale_y_continuous(labels = scaleFUN0)
}else if(len1 > len2){
a = a + scale_y_reverse(labels = scaleFUN1)
diff = len1 - len2
if(diff == 1){
b = b + scale_y_continuous(labels = scaleFUN2)
}else if(diff == 2){
b = b + scale_y_continuous(labels = scaleFUN3)
}else if(diff == 3){
b = b + scale_y_continuous(labels = scaleFUN4)
}
}else if(len2 > len1){
b = b + scale_y_continuous(labels = scaleFUN1)
diff = len2 - len1
if(diff == 1){
a = a + scale_y_reverse(labels = scaleFUN2)
}else if(diff == 2){
a = a + scale_y_reverse(labels = scaleFUN3)
}else if(diff == 3){
a = a + scale_y_reverse(labels = scaleFUN4)
}
}
if(label_lead == TRUE){
a = a + geom_point(data = label_data_1, aes_string(x = "POS", y = "LOG10P"), color = "purple")
a = a + geom_text(data = label_data_1, aes_string(label = "RS_ID"),
color = "black", size = 3, hjust = 1.25)
b = b + geom_point(data = label_data_2, aes_string(x = "POS", y = "LOG10P"), color = "purple")
b = b + geom_text(data = label_data_2, aes_string(label = "RS_ID"),
color = "black", size = 3, hjust = 1.25)
}
ggpubr::ggarrange(a, b, c, heights = c(2,2,1), nrow = 3, ncol = 1,
common.legend = TRUE, legend = "right")
}
| /R/mirrorPlotRACER.R | no_license | jonaszierer/RACER | R | false | false | 12,377 | r | #' Mirror Plot -- Regional Association ComparER Plot
#'
#' This function allows you to creat a plot of -log10(P-values) for two sets of association data.
#' Mirror plots illustrate the two associations on a common x-axis, with the first association
#' inverted, mirroring the two associations against one another.
#'
#' @param assoc_data1 required. A dataframe that has columns named POS representing the position
#' of the SNP on the chromosome, LOG10P which contains the -log10(P-values), CHR, which contains
#' the chromosome number, RS_ID, which contains LD information. If your data set has been processed
#' using the formatRACER function and the ldRACER function, these columns will be appropriately
#' named in your data.
#' @param assoc_data2 required. identifcal in format to assoc_data1
#' @param chr required. chromosome you wish to plot
#' @param build optional. indicates the genome build to use to plot the genes below the association plot. default = "hg19", but can be changed to "hg38"
#' @param set optional. default = "protein_coding", however can be set to "all" to plot all RNAs in the genome
#' @param name1 optional. name of association set 1
#' @param name2 optional. name of association set 2
#' @param plotby required. "coord", "gene", or "snp". Which parameter to use to
#' determine the reigon to be plotted.
#' @param gene_plot optional. If "gene" selected for plotby, then plot will be +/- 50kb of gene, should be a human gene symbol
#' @param snp_plot optional. If "snp" selected for plotby, then plot will be +/- 50kb of snp
#' @param start_plot optional. If "coord" selected for plotby, then this will be lower bound of x-axis
#' @param end_plot optional. If "coord" selected for plotby, then this will be upper bound of x-axis
#' @param label_lead optional. default = FALSE, set = TRUE if you wish to add a label to your graph of the SNP used to calculate LD. If the SNP used to calculate LD is not in your data set, the SNP with the greatest -LOG10(P) will be labeled. Labels both plots.
#'
#' @keywords association plot
#' @concept GWAS
#' @export
#' @import ggplot2
#' @importFrom rlang .data
#' @examples
#' \donttest{
#' data(mark3_bmd_gwas)
#' data(mark3_eqtl)
#'
#' mark3_bmd_gwas_f = RACER::formatRACER(assoc_data = mark3_bmd_gwas, chr_col = 3,
#' pos_col = 4, p_col = 11)
#' mark3_eqtl_f = RACER::formatRACER(assoc_data = mark3_eqtl, chr_col = 10,
#' pos_col = 11, p_col = 7)
#'
#' mark3_bmd_gwas_f_ld = RACER::ldRACER(assoc_data = mark3_bmd_gwas_f,
#' rs_col = 2, pops ="EUR", lead_snp = "rs11623869")
#' mark3_eqtl_f_ld = RACER::ldRACER(assoc_data = mark3_eqtl_f,
#' rs_col = 15, pops = "EUR", lead_snp = "rs11623869")
#' mirrorPlotRACER(assoc_data1 = mark3_bmd_gwas_f_ld, assoc_data2 = mark3_eqtl_f_ld,
#' chr = 14, plotby = "gene", gene_plot = "MARK3")}
mirrorPlotRACER <- function(assoc_data1, assoc_data2, chr, build = "hg19", set = "protein_coding", name1="Association Dataset #1", name2="Association Dataset #2", plotby, gene_plot=NULL, snp_plot=NULL, start_plot=NULL, end_plot=NULL, label_lead = FALSE){
reqs = c("CHR", "POS", "LOG10P")
cols_1 = colnames(assoc_data1)
cols_2 = colnames(assoc_data2)
if(sum(reqs %in% cols_1) == 3){
}else{stop("Association Data Set #1 is missing a required column.")}
if(sum(reqs %in% cols_2) == 3){
}else{stop("Association Data Set #2 is missing a required column.")}
if(build == "hg38"){
utils::data(hg38)
chr_in = chr
colnames(hg38) = c("GENE_ID", "CHR", "TRX_START", "TRX_END", "LENGTH", "GENE_NAME", "TYPE")
gene_sub = hg38[hg38$CHR == chr_in,]
}else if(build == "hg19"){
utils::data(hg19)
chr_in = chr
colnames(hg19) = c("GENE_ID", "CHR", "TRX_START", "TRX_END", "LENGTH", "GENE_NAME", "TYPE")
gene_sub = hg19[hg19$CHR == chr_in,]
}
if(set == "protein_coding"){
gene_sub = gene_sub[gene_sub$TYPE == "protein_coding",]
}else{
gene_sub = gene_sub
}
`%>%` <- magrittr::`%>%`
if((sum(is.null(plotby)) == 0) == TRUE){
message("Plotting by...")
if((plotby == "coord") == TRUE){
message("coord")
start = start_plot
end = end_plot
}else if((plotby == "gene") == TRUE){
message(paste("gene:",gene_plot))
if(sum(is.null(gene_plot)) == 0){
p = subset(gene_sub, gene_sub$GENE_NAME == gene_plot)
start = min(p$TRX_START) - 500000
end = max(p$TRX_END) + 500000
}else{stop("No gene specified.")}
}else if((plotby == "snp") == TRUE){
message(paste("snp",snp_plot))
q = assoc_data1[assoc_data1$RS_ID == snp_plot,]
w = q$POS
w = as.numeric(as.character(w))
start = w - 500000
end = w + 500000}
}else{
stop("Please specify a parameter to plotby.")
}
# reading in gene data
gene_sub = subset(gene_sub, gene_sub$TRX_START > (start-5000))
gene_sub = subset(gene_sub, gene_sub$TRX_END < (end+5000))
gene_sub = gene_sub[!duplicated(gene_sub$GENE_ID),]
gene_sub = gene_sub[,c(3,4,6)]
gene_sub = reshape2::melt(gene_sub, id.vars = "GENE_NAME")
gene_sub$y_value = as.numeric(as.factor(gene_sub$GENE_NAME))
plot_lab = subset(gene_sub, gene_sub$variable == "TRX_END")
# read in, format, and filter data sets
message("Reading in association data")
in.dt <- as.data.frame(assoc_data1)
in.dt$POS = as.numeric(as.character(in.dt$POS))
in.dt$LOG10P = as.numeric(as.character(in.dt$LOG10P))
in.dt$CHR = as.numeric(as.character(in.dt$CHR))
in.dt = dplyr::filter(in.dt, .data$CHR == chr_in)
in.dt = dplyr::filter(in.dt, .data$POS > start)%>%
dplyr::filter(.data$POS < end)
if(label_lead == TRUE){
lsnp_row_1 = which(in.dt$LABEL == "LEAD")
label_data_1 = in.dt[lsnp_row_1,]
if(dim(label_data_1)[1] == 0){
lsnp_row_1 = in.dt[in.dt$LOG10P == max(in.dt$LOG10P),]
label_data_1 = lsnp_row_1[1,]
}
}
in.dt.2 <- as.data.frame(assoc_data2)
in.dt.2$POS = as.numeric(as.character(in.dt.2$POS))
in.dt.2$LOG10P = as.numeric(as.character(in.dt.2$LOG10P))
in.dt.2$CHR = as.numeric(as.character(in.dt.2$CHR))
in.dt.2 = dplyr::filter(in.dt.2, .data$CHR == chr_in)
in.dt.2= dplyr::filter(in.dt.2, .data$POS > start)%>%
dplyr::filter(.data$POS < end)
if(label_lead == TRUE){
lsnp_row_2 = which(in.dt.2$LABEL == "LEAD")
label_data_2 = in.dt.2[lsnp_row_2,]
if(dim(label_data_2)[1] == 0){
lsnp_row_2 = in.dt.2[in.dt.2$LOG10P == max(in.dt.2$LOG10P),]
label_data_2 = lsnp_row_2[1,]
}
}
len1 = nchar(trunc(max(in.dt$LOG10P)))
len2 = nchar(trunc(max(in.dt.2$LOG10P)))
scaleFUN0 <- function(x) sprintf("%.0f", x)
scaleFUN1 <- function(x) sprintf("%.1f", x)
scaleFUN2 <- function(x) sprintf("%.2f", x)
scaleFUN3 <- function(x) sprintf("%.3f", x)
scaleFUN4 <- function(x) sprintf("%.4f", x)
# generate mirror plot
message("Generating plot.")
if("LD" %in% cols_1 && "LD_BIN" %in% cols_1){
a = ggplot2::ggplot(data = in.dt, ggplot2::aes_string(x = "POS", y = "LOG10P", color = "LD_BIN")) +
ggplot2::geom_point() + ggplot2::scale_colour_manual(
values = c("1.0-0.8" = "red", "0.8-0.6" = "darkorange1", "0.6-0.4" = "green1",
"0.4-0.2" = "skyblue1", "0.2-0.0" = "navyblue", "NA" = "grey"), drop = FALSE) +
ggplot2::theme_bw() + ggplot2::xlab(paste0("Chromosome ", chr_in, " Position")) + ggplot2::ylab("-log10(p-value)") +
ggplot2::scale_y_reverse() + ggplot2::theme(axis.title.x=ggplot2::element_blank(),
axis.text.x=ggplot2::element_blank(),
axis.ticks.x=ggplot2::element_blank()) +
ggplot2::theme(legend.position = "none") +
ggplot2::xlim(start,end) + ggplot2::ggtitle(paste0(name1)) +
theme(plot.title = element_text(size = 10, vjust = -1)) +
theme(plot.margin = margin(5.5,5.5,-3,5.5))
}else{
message("No LD information for dataset #1.")
a = ggplot2::ggplot(in.dt, ggplot2::aes_string(x = "POS", y = "LOG10P")) +
ggplot2::geom_point() + ggplot2::theme_bw() + ggplot2::xlab(paste0("Chromosome ", chr_in, " Position")) +
ggplot2::ylab("-log10(p-value)") +
ggplot2::scale_y_reverse() + ggplot2::theme(axis.title.x=ggplot2::element_blank(),
axis.text.x=ggplot2::element_blank(),
axis.ticks.x=ggplot2::element_blank()) +
ggplot2::theme(legend.position = "none") +
ggplot2::xlim(start,end) + ggplot2::ggtitle(paste0(name1)) +
theme(plot.title = element_text(size = 10, vjust = -1)) +
theme(plot.margin = margin(5.5,5.5,-3,5.5))
}
if("LD" %in% cols_2 && "LD_BIN" %in% cols_2){
b = ggplot2::ggplot(data = in.dt.2, ggplot2::aes_string(x = "POS", y = "LOG10P", color = "LD_BIN")) +
ggplot2::geom_point() + ggplot2::scale_colour_manual(
values = c("1.0-0.8" = "red", "0.8-0.6" = "darkorange1", "0.6-0.4" = "green1",
"0.4-0.2" = "skyblue1", "0.2-0.0" = "navyblue", "NA" = "grey"), drop = FALSE) +
ggplot2::theme_bw() + ggplot2::xlab(paste0("Chromosome ", chr_in, " Position (Mbp)")) +
ggplot2::ylab("-log10(p-value)") + ggplot2::theme(legend.position = "bottom") +
ggplot2::xlim(start,end) + ggplot2::ylim(min(in.dt.2$LOG10P),max(in.dt.2$LOG10P)) +
ggplot2::theme(axis.title.x=ggplot2::element_blank(),
axis.text.x=ggplot2::element_blank(),
axis.ticks.x=ggplot2::element_blank()) + ggplot2::ggtitle(paste0(name2)) +
theme(plot.title = element_text(size = 10, vjust = -1))
}else{
b = ggplot2::ggplot(in.dt.2, ggplot2::aes_string(x = "POS", y = "LOG10P")) +
ggplot2::geom_point() + ggplot2::theme_bw() + ggplot2::xlab(paste0("Chromosome ", chr_in, " Position (Mbp)")) +
ggplot2::ylab("-log10(p-value)") + ggplot2::theme(legend.position = "bottom") +
ggplot2::xlim(start,end) + ggplot2::ylim(min(in.dt.2$LOG10P),max(in.dt.2$LOG10P)) +
ggplot2::theme(axis.title.x=ggplot2::element_blank(),
axis.text.x=ggplot2::element_blank(),
axis.ticks.x=ggplot2::element_blank()) + ggplot2::ggtitle(paste0(name1)) +
theme(plot.title = element_text(size = 10, vjust = -1))
}
c = ggplot2::ggplot(gene_sub, ggplot2::aes_string(x = "value", y = "y_value")) +
ggplot2::geom_line(ggplot2::aes_string(group = "GENE_NAME"), size = 2) + ggplot2::theme_bw() +
ggplot2::geom_text(data = plot_lab, ggplot2::aes_string(x = "value", y = "y_value", label = "GENE_NAME"),
hjust = -0.1,vjust = 0.3, size = 2.5) + ggplot2::xlim(start,end) +
ggplot2::theme(axis.title.y = ggplot2::element_text(color = "white", size = 28),
axis.text.y = ggplot2::element_blank(),
axis.ticks.y = ggplot2::element_blank()) + ggplot2::xlab(paste0("Chromosome ", chr_in, " Position")) +
ggplot2::ylim(0,(max(gene_sub$y_value)+1))
if(len1 == len2){
a = a + scale_y_reverse(labels = scaleFUN0)
b = b + scale_y_continuous(labels = scaleFUN0)
}else if(len1 > len2){
a = a + scale_y_reverse(labels = scaleFUN1)
diff = len1 - len2
if(diff == 1){
b = b + scale_y_continuous(labels = scaleFUN2)
}else if(diff == 2){
b = b + scale_y_continuous(labels = scaleFUN3)
}else if(diff == 3){
b = b + scale_y_continuous(labels = scaleFUN4)
}
}else if(len2 > len1){
b = b + scale_y_continuous(labels = scaleFUN1)
diff = len2 - len1
if(diff == 1){
a = a + scale_y_reverse(labels = scaleFUN2)
}else if(diff == 2){
a = a + scale_y_reverse(labels = scaleFUN3)
}else if(diff == 3){
a = a + scale_y_reverse(labels = scaleFUN4)
}
}
if(label_lead == TRUE){
a = a + geom_point(data = label_data_1, aes_string(x = "POS", y = "LOG10P"), color = "purple")
a = a + geom_text(data = label_data_1, aes_string(label = "RS_ID"),
color = "black", size = 3, hjust = 1.25)
b = b + geom_point(data = label_data_2, aes_string(x = "POS", y = "LOG10P"), color = "purple")
b = b + geom_text(data = label_data_2, aes_string(label = "RS_ID"),
color = "black", size = 3, hjust = 1.25)
}
ggpubr::ggarrange(a, b, c, heights = c(2,2,1), nrow = 3, ncol = 1,
common.legend = TRUE, legend = "right")
}
|
library(ggplot2)
library(tm)
library(qdap)
library(rJava)
library(RWekajars)
library(RWeka) # Install JAVA before installing this
library(dplyr)
library(wordcloud)
library(stringr)
library(bigmemory)
library(slam)
# Open Connection
con_twitter <- file("en_US.twitter.txt", "r")
con_news <- file("en_US.news.txt", "r")
con_blogs <- file("en_US.blogs.txt", "r")
# Readlines From Connection
news <- readLines("en_US.news.txt")
twitter <- readLines("en_US.twitter.txt")
blogs <- readLines("en_US.blogs.txt")
# Close Connections
close(con_twitter)
close(con_news)
close(con_blogs)
# Sampling Data
news_data <- sample(news, length(news)*0.5, replace = F)
twitter_data <- sample(twitter, length(twitter)*0.5, replace = F)
blogs_data <- sample(blogs, length(blogs)*0.5, replace = F)
rm("news")
rm("twitter")
rm("blogs")
# Convert into vectorsource
news.vec <- VectorSource(news_data)
twitter.vec <- VectorSource(twitter_data)
blogs.vec <- VectorSource(blogs_data)
# Make a Corpus
news.corpus <- Corpus(news.vec)
twitter.corpus <- Corpus(twitter.vec)
blogs.corpus <- Corpus(blogs.vec)
# Clean corpus (remove punctuations, numbers and convert into lower letters)
clean <- function(x) {
data.corpus <- tm_map(x, content_transformer(removePunctuation))
data.corpus <- tm_map(data.corpus, content_transformer(removeNumbers)) # remove numbers
data.corpus <- tm_map(data.corpus, content_transformer(tolower)) # remove punctuation
data.corpus
}
news.clean.corpus <- clean(news.corpus)
twitter.clean.corpus <- clean(twitter.corpus)
blogs.clean.corpus <- clean(blogs.corpus)
# Extract N-grams
UnigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 1, max = 1))
BigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
TrigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 3, max = 3))
QuadgramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 4, max = 4))
# This give the same thing as tdm1gram)
#one.g_Tokenizer <- NGramTokenizer(data.corpus, Weka_control(min = 1, max = 1))
#one_gram <- data.frame(table(one.g_Tokenizer))
#sd = TermDocumentMatrix(data.corpus, control = list(one.g_Tokenizer))
#two.g_Tokenizer <- NGramTokenizer(data.corpus, Weka_control(min = 2, max = 2))
#two_gram <- data.frame(table(two.g_Tokenizer))
options(mc.cores=1)
#tdm1gram <- TermDocumentMatrix(data.corpus, control = list(tokenize = UnigramTokenizer))
#tdm1gram <- removeSparseTerms(tdm1gram,0.98)
# NEWS #
news.tdm2gram <- TermDocumentMatrix(news.clean.corpus, control = list(tokenize = BigramTokenizer))
news.tdm2gram <- removeSparseTerms(news.tdm2gram, sparse = 0.9997)
news.tdm2gram <- rollup(news.tdm2gram, 2, na.rm=TRUE, FUN = sum)
news.tdm3gram <- TermDocumentMatrix(news.clean.corpus, control = list(tokenize = TrigramTokenizer))
news.tdm3gram <- removeSparseTerms(news.tdm3gram, sparse = 0.9999)
news.tdm3gram <- rollup(news.tdm3gram, 2, na.rm=TRUE, FUN = sum)
news.tdm4gram <- TermDocumentMatrix(news.clean.corpus, control = list(tokenize = QuadgramTokenizer))
news.tdm4gram <- removeSparseTerms(news.tdm4gram, sparse = 0.99995)
news.tdm4gram <- rollup(news.tdm4gram, 2, na.rm=TRUE, FUN = sum)
# TWITTER #
twitter.tdm2gram <- TermDocumentMatrix(twitter.clean.corpus, control = list(tokenize = BigramTokenizer))
twitter.tdm2gram <- removeSparseTerms(twitter.tdm2gram, sparse = 0.9999)
twitter.tdm2gram <- rollup(twitter.tdm2gram, 2, na.rm=TRUE, FUN = sum)
twitter.tdm3gram <- TermDocumentMatrix(twitter.clean.corpus, control = list(tokenize = TrigramTokenizer))
twitter.tdm3gram<- removeSparseTerms(twitter.tdm3gram, sparse = 0.99993)
twitter.tdm3gram <- rollup(twitter.tdm3gram, 2, na.rm=TRUE, FUN = sum)
twitter.tdm4gram <- TermDocumentMatrix(twitter.clean.corpus, control = list(tokenize = QuadgramTokenizer))
twitter.tdm4gram<- removeSparseTerms(twitter.tdm4gram, sparse = 0.99999)
twitter.tdm4gram <- rollup(twitter.tdm4gram, 2, na.rm=TRUE, FUN = sum)
# BLOGS $
blogs.tdm2gram <- TermDocumentMatrix(blogs.clean.corpus, control = list(tokenize = BigramTokenizer))
blogs.tdm2gram<- removeSparseTerms(blogs.tdm2gram, sparse = 0.9997)
blogs.tdm2gram <- rollup(blogs.tdm2gram, 2, na.rm=TRUE, FUN = sum)
blogs.tdm3gram <- TermDocumentMatrix(blogs.clean.corpus, control = list(tokenize = TrigramTokenizer))
blogs.tdm3gram<- removeSparseTerms(blogs.tdm3gram, sparse = 0.9997)
blogs.tdm3gram <- rollup(blogs.tdm3gram, 2, na.rm=TRUE, FUN = sum)
blogs.tdm4gram <- TermDocumentMatrix(blogs.clean.corpus, control = list(tokenize = QuadgramTokenizer))
blogs.tdm4gram<- removeSparseTerms(blogs.tdm4gram, sparse = 0.9997)
blogs.tdm4gram <- rollup(blogs.tdm4gram, 2, na.rm=TRUE, FUN = sum)
# Convert Term Document Matrix into Data Frame
tdm2.to.df <- function(x) {
df2gram <- as.data.frame(inspect(x)) # Converts td2gram to a data frame
colnames(df2gram) <- c("num")
df2gram[c('predictor', 'prediction')] <- subset(str_match(row.names(df2gram), "(.*) ([^ ]*)"), select=c(2,3))
df2gram <- subset(df2gram, select=c('predictor', 'prediction', 'num'))
df2gram <- df2gram[order(df2gram$predictor,-df2gram$num),]
row.names(df2gram) <- NULL
df2gram
}
tdm3.to.df <- function(x) {
df3gram <- as.data.frame(inspect(x)) # Converts td2gram to a data frame
colnames(df3gram) <- c("num")
df3gram[c('predictor', 'prediction')] <- subset(str_match(row.names(df3gram), "(.*) ([^ ]*)"), select=c(2,3))
df3gram <- subset(df3gram, select=c('predictor', 'prediction', 'num'))
df3gram <- df3gram[order(df3gram$predictor,-df3gram$num),]
row.names(df3gram) <- NULL
df3gram
}
tdm4.to.df <- function(x) {
df4gram <- as.data.frame(inspect(x)) # Converts td2gram to a data frame
colnames(df4gram) <- c("num")
df4gram[c('predictor', 'prediction')] <- subset(str_match(row.names(df4gram), "(.*) ([^ ]*)"), select=c(2,3))
df4gram <- subset(df4gram, select=c('predictor', 'prediction', 'num'))
df4gram <- df4gram[order(df4gram$predictor,-df4gram$num),]
row.names(df4gram) <- NULL
df4gram
}
# NEWS #
news2gram <- tdm2.to.df(news.tdm2gram)
news3gram <- tdm3.to.df(news.tdm3gram)
news4gram <- tdm4.to.df(news.tdm4gram)
# TWITTER #
twitter2gram <- tdm2.to.df(twitter.tdm2gram)
twitter3gram <- tdm3.to.df(twitter.tdm3gram)
twitter4gram <- tdm4.to.df(twitter.tdm4gram)
# BLOGS #
blogs2gram <- tdm2.to.df(blogs.tdm2gram)
blogs3gram <- tdm3.to.df(blogs.tdm3gram)
blogs4gram <- tdm4.to.df(blogs.tdm4gram)
# Merger News, Twitter and Blogs Files #
# Two Grams #
test1 <- merge(news2gram,twitter2gram, by.x = c("predictor","prediction"),by.y = c("predictor","prediction"),all = TRUE)
two.gram <- merge(test1, blogs2gram, by.x = c("predictor","prediction"),by.y = c("predictor","prediction"),all = TRUE )
# Three Grams #
test1 <- merge(news3gram,twitter3gram, by.x = c("predictor","prediction"),by.y = c("predictor","prediction"),all = TRUE)
three.gram <- merge(test1, blogs3gram, by.x = c("predictor","prediction"),by.y = c("predictor","prediction"),all = TRUE )
# Quad Grams #
test1 <- merge(news4gram,twitter4gram, by.x = c("predictor","prediction"),by.y = c("predictor","prediction"),all = TRUE)
quad.gram <- merge(test1, blogs4gram, by.x = c("predictor","prediction"),by.y = c("predictor","prediction"),all = TRUE )
# Sum All Rows to get total frequency #
sum.all.rows <- function(x) {
x$num.x[is.na(x$num.x)] <- 0
x$num.y[is.na(x$num.y)] <- 0
x$num[is.na(x$num)] <- 0
x$total <- x$num.x + x$num.y + x$num
x<- x[,c("predictor","prediction","total")]
}
two.gram <- sum.all.rows(two.gram)
three.gram <- sum.all.rows(three.gram)
quad.gram <- sum.all.rows(quad.gram)
save(two.gram, file="two.gram.Rdata")
save(three.gram, file="three.gram.Rdata")
save(quad.gram, file="quad.gram.Rdata")
| /nextword1.R | no_license | sdasadia/NextWord | R | false | false | 7,910 | r | library(ggplot2)
library(tm)
library(qdap)
library(rJava)
library(RWekajars)
library(RWeka) # Install JAVA before installing this
library(dplyr)
library(wordcloud)
library(stringr)
library(bigmemory)
library(slam)
# Open Connection
con_twitter <- file("en_US.twitter.txt", "r")
con_news <- file("en_US.news.txt", "r")
con_blogs <- file("en_US.blogs.txt", "r")
# Readlines From Connection
news <- readLines("en_US.news.txt")
twitter <- readLines("en_US.twitter.txt")
blogs <- readLines("en_US.blogs.txt")
# Close Connections
close(con_twitter)
close(con_news)
close(con_blogs)
# Sampling Data
news_data <- sample(news, length(news)*0.5, replace = F)
twitter_data <- sample(twitter, length(twitter)*0.5, replace = F)
blogs_data <- sample(blogs, length(blogs)*0.5, replace = F)
rm("news")
rm("twitter")
rm("blogs")
# Convert into vectorsource
news.vec <- VectorSource(news_data)
twitter.vec <- VectorSource(twitter_data)
blogs.vec <- VectorSource(blogs_data)
# Make a Corpus
news.corpus <- Corpus(news.vec)
twitter.corpus <- Corpus(twitter.vec)
blogs.corpus <- Corpus(blogs.vec)
# Clean corpus (remove punctuations, numbers and convert into lower letters)
clean <- function(x) {
data.corpus <- tm_map(x, content_transformer(removePunctuation))
data.corpus <- tm_map(data.corpus, content_transformer(removeNumbers)) # remove numbers
data.corpus <- tm_map(data.corpus, content_transformer(tolower)) # remove punctuation
data.corpus
}
news.clean.corpus <- clean(news.corpus)
twitter.clean.corpus <- clean(twitter.corpus)
blogs.clean.corpus <- clean(blogs.corpus)
# Extract N-grams
UnigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 1, max = 1))
BigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
TrigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 3, max = 3))
QuadgramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 4, max = 4))
# This give the same thing as tdm1gram)
#one.g_Tokenizer <- NGramTokenizer(data.corpus, Weka_control(min = 1, max = 1))
#one_gram <- data.frame(table(one.g_Tokenizer))
#sd = TermDocumentMatrix(data.corpus, control = list(one.g_Tokenizer))
#two.g_Tokenizer <- NGramTokenizer(data.corpus, Weka_control(min = 2, max = 2))
#two_gram <- data.frame(table(two.g_Tokenizer))
options(mc.cores=1)
#tdm1gram <- TermDocumentMatrix(data.corpus, control = list(tokenize = UnigramTokenizer))
#tdm1gram <- removeSparseTerms(tdm1gram,0.98)
# NEWS #
news.tdm2gram <- TermDocumentMatrix(news.clean.corpus, control = list(tokenize = BigramTokenizer))
news.tdm2gram <- removeSparseTerms(news.tdm2gram, sparse = 0.9997)
news.tdm2gram <- rollup(news.tdm2gram, 2, na.rm=TRUE, FUN = sum)
news.tdm3gram <- TermDocumentMatrix(news.clean.corpus, control = list(tokenize = TrigramTokenizer))
news.tdm3gram <- removeSparseTerms(news.tdm3gram, sparse = 0.9999)
news.tdm3gram <- rollup(news.tdm3gram, 2, na.rm=TRUE, FUN = sum)
news.tdm4gram <- TermDocumentMatrix(news.clean.corpus, control = list(tokenize = QuadgramTokenizer))
news.tdm4gram <- removeSparseTerms(news.tdm4gram, sparse = 0.99995)
news.tdm4gram <- rollup(news.tdm4gram, 2, na.rm=TRUE, FUN = sum)
# TWITTER #
twitter.tdm2gram <- TermDocumentMatrix(twitter.clean.corpus, control = list(tokenize = BigramTokenizer))
twitter.tdm2gram <- removeSparseTerms(twitter.tdm2gram, sparse = 0.9999)
twitter.tdm2gram <- rollup(twitter.tdm2gram, 2, na.rm=TRUE, FUN = sum)
twitter.tdm3gram <- TermDocumentMatrix(twitter.clean.corpus, control = list(tokenize = TrigramTokenizer))
twitter.tdm3gram<- removeSparseTerms(twitter.tdm3gram, sparse = 0.99993)
twitter.tdm3gram <- rollup(twitter.tdm3gram, 2, na.rm=TRUE, FUN = sum)
twitter.tdm4gram <- TermDocumentMatrix(twitter.clean.corpus, control = list(tokenize = QuadgramTokenizer))
twitter.tdm4gram<- removeSparseTerms(twitter.tdm4gram, sparse = 0.99999)
twitter.tdm4gram <- rollup(twitter.tdm4gram, 2, na.rm=TRUE, FUN = sum)
# BLOGS $
blogs.tdm2gram <- TermDocumentMatrix(blogs.clean.corpus, control = list(tokenize = BigramTokenizer))
blogs.tdm2gram<- removeSparseTerms(blogs.tdm2gram, sparse = 0.9997)
blogs.tdm2gram <- rollup(blogs.tdm2gram, 2, na.rm=TRUE, FUN = sum)
blogs.tdm3gram <- TermDocumentMatrix(blogs.clean.corpus, control = list(tokenize = TrigramTokenizer))
blogs.tdm3gram<- removeSparseTerms(blogs.tdm3gram, sparse = 0.9997)
blogs.tdm3gram <- rollup(blogs.tdm3gram, 2, na.rm=TRUE, FUN = sum)
blogs.tdm4gram <- TermDocumentMatrix(blogs.clean.corpus, control = list(tokenize = QuadgramTokenizer))
blogs.tdm4gram<- removeSparseTerms(blogs.tdm4gram, sparse = 0.9997)
blogs.tdm4gram <- rollup(blogs.tdm4gram, 2, na.rm=TRUE, FUN = sum)
# Convert Term Document Matrix into Data Frame
tdm2.to.df <- function(x) {
df2gram <- as.data.frame(inspect(x)) # Converts td2gram to a data frame
colnames(df2gram) <- c("num")
df2gram[c('predictor', 'prediction')] <- subset(str_match(row.names(df2gram), "(.*) ([^ ]*)"), select=c(2,3))
df2gram <- subset(df2gram, select=c('predictor', 'prediction', 'num'))
df2gram <- df2gram[order(df2gram$predictor,-df2gram$num),]
row.names(df2gram) <- NULL
df2gram
}
tdm3.to.df <- function(x) {
df3gram <- as.data.frame(inspect(x)) # Converts td2gram to a data frame
colnames(df3gram) <- c("num")
df3gram[c('predictor', 'prediction')] <- subset(str_match(row.names(df3gram), "(.*) ([^ ]*)"), select=c(2,3))
df3gram <- subset(df3gram, select=c('predictor', 'prediction', 'num'))
df3gram <- df3gram[order(df3gram$predictor,-df3gram$num),]
row.names(df3gram) <- NULL
df3gram
}
tdm4.to.df <- function(x) {
df4gram <- as.data.frame(inspect(x)) # Converts td2gram to a data frame
colnames(df4gram) <- c("num")
df4gram[c('predictor', 'prediction')] <- subset(str_match(row.names(df4gram), "(.*) ([^ ]*)"), select=c(2,3))
df4gram <- subset(df4gram, select=c('predictor', 'prediction', 'num'))
df4gram <- df4gram[order(df4gram$predictor,-df4gram$num),]
row.names(df4gram) <- NULL
df4gram
}
# NEWS #
news2gram <- tdm2.to.df(news.tdm2gram)
news3gram <- tdm3.to.df(news.tdm3gram)
news4gram <- tdm4.to.df(news.tdm4gram)
# TWITTER #
twitter2gram <- tdm2.to.df(twitter.tdm2gram)
twitter3gram <- tdm3.to.df(twitter.tdm3gram)
twitter4gram <- tdm4.to.df(twitter.tdm4gram)
# BLOGS #
blogs2gram <- tdm2.to.df(blogs.tdm2gram)
blogs3gram <- tdm3.to.df(blogs.tdm3gram)
blogs4gram <- tdm4.to.df(blogs.tdm4gram)
# Merger News, Twitter and Blogs Files #
# Two Grams #
test1 <- merge(news2gram,twitter2gram, by.x = c("predictor","prediction"),by.y = c("predictor","prediction"),all = TRUE)
two.gram <- merge(test1, blogs2gram, by.x = c("predictor","prediction"),by.y = c("predictor","prediction"),all = TRUE )
# Three Grams #
test1 <- merge(news3gram,twitter3gram, by.x = c("predictor","prediction"),by.y = c("predictor","prediction"),all = TRUE)
three.gram <- merge(test1, blogs3gram, by.x = c("predictor","prediction"),by.y = c("predictor","prediction"),all = TRUE )
# Quad Grams #
test1 <- merge(news4gram,twitter4gram, by.x = c("predictor","prediction"),by.y = c("predictor","prediction"),all = TRUE)
quad.gram <- merge(test1, blogs4gram, by.x = c("predictor","prediction"),by.y = c("predictor","prediction"),all = TRUE )
# Sum All Rows to get total frequency #
sum.all.rows <- function(x) {
x$num.x[is.na(x$num.x)] <- 0
x$num.y[is.na(x$num.y)] <- 0
x$num[is.na(x$num)] <- 0
x$total <- x$num.x + x$num.y + x$num
x<- x[,c("predictor","prediction","total")]
}
two.gram <- sum.all.rows(two.gram)
three.gram <- sum.all.rows(three.gram)
quad.gram <- sum.all.rows(quad.gram)
save(two.gram, file="two.gram.Rdata")
save(three.gram, file="three.gram.Rdata")
save(quad.gram, file="quad.gram.Rdata")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/toggleConstraint.R
\name{toggleConstraint}
\alias{toggleConstraint}
\title{toggleConstraint}
\usage{
toggleConstraint(myCaNmod, constr)
}
\arguments{
\item{myCaNmod}{a CaNmod object with following elements}
\item{constr}{a vector of strings identifying the constraints to be toogled.
The elements will be compared to the beginning of the line names of
matrices A and C (regular expressions).}
}
\value{
an updated CaNmod object
}
\description{
Activate or inactivate a constraint.
}
\examples{
myCaNmod <- buildCaN(system.file("extdata", "CaN_template_mini.xlsx",
package = "RCaNmodel"))
toggleConstraint(myCaNmod, "C02")
}
| /RCaNmodel/man/toggleConstraint.Rd | permissive | inrae/RCaNmodel | R | false | true | 705 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/toggleConstraint.R
\name{toggleConstraint}
\alias{toggleConstraint}
\title{toggleConstraint}
\usage{
toggleConstraint(myCaNmod, constr)
}
\arguments{
\item{myCaNmod}{a CaNmod object with following elements}
\item{constr}{a vector of strings identifying the constraints to be toogled.
The elements will be compared to the beginning of the line names of
matrices A and C (regular expressions).}
}
\value{
an updated CaNmod object
}
\description{
Activate or inactivate a constraint.
}
\examples{
myCaNmod <- buildCaN(system.file("extdata", "CaN_template_mini.xlsx",
package = "RCaNmodel"))
toggleConstraint(myCaNmod, "C02")
}
|
source('testingDataset.R')
monStationTemplate <- read_excel('data/tbl_ir_mon_stations_template.xlsx') # from X:\2018_Assessment\StationsDatabase\VRO
# Single station data ----------------------------------------------------------------------
conventionals_HUC<- left_join(conventionals, dplyr::select(stationTable, FDT_STA_ID, SEC, CLASS, SPSTDS, ID305B_1, ID305B_2, ID305B_3), by='FDT_STA_ID')
AUData <- filter(conventionals_HUC, ID305B_1 %in% 'VAW-I04R_JKS04A00' |
ID305B_2 %in% 'VAW-I04R_JKS04A00' |
ID305B_2 %in% 'VAW-I04R_JKS04A00')%>%
left_join(WQSvalues, by = 'CLASS')
x <-filter(conventionals_HUC, FDT_STA_ID %in% '2-JKS033.06') #'2-JMS279.41')#
x2 <- filter(conventionals_HUC, FDT_STA_ID %in% '2-JKS028.69')
# Bring in Roger's Metal Assessment
WCmetals <- read_excel('data/WATER_METALS_20170712.xlsx')
Smetals <- read_excel('data/SEDIMENT_20170712.xlsx')
WCmetals[558,97] <- 'NSP'
z <- filter(WCmetals, FDT_STA_ID %in% x$FDT_STA_ID)# %>%
# dplyr::select(FDT_STA_ID:HARDNESS)
z <- dplyr::select(z, `ANTIMONY HUMAN HEALTH PWS`:`ZINC ALL OTHER SURFACE WATERS`)
s <- filter(Smetals, FDT_STA_ID %in% x2$FDT_STA_ID)
metalsExceedances <- function(x, metalType){
# if any data given to function
if(nrow(x) > 0){ VIO <- length(which(x == 'NSP'))
}else {
VIO <- NA }
x <- data.frame(VIO = VIO, STAT = ifelse(VIO > 0, 'Review', 'S'))
names(x) <- paste(metalType,names(x), sep='_')
return(x)
}
metalsExceedances(z, 'WAT_MET')
metalsTableSingleStationUI <- function(id){
ns <- NS(id)
tagList(
tabsetPanel(
tabPanel('Water Column Metals',
wellPanel(
h4(strong('Single Station Data Visualization')),
uiOutput(ns('WCmetals_oneStationSelectionUI')),
h5('All water column metals data available for the ',span(strong('selected site')),' are available below.
If no data is presented, then the station does not have any water column metals data available.'),
DT::dataTableOutput(ns('WCmetalsRangeTableSingleSite')),br(), br(), br(),
h5('Metals assessments for the ',span(strong('selected site')),' are highlighted below.'),
DT::dataTableOutput(ns("WCstationmetalsExceedanceRate")))),
tabPanel('Sediment Metals',
wellPanel(
h4(strong('Single Station Data Visualization')),
uiOutput(ns('Smetals_oneStationSelectionUI')),
h5('All sediment metals data available for the ',span(strong('selected site')),' are available below.
If no data is presented, then the station does not have any sediment metals data available.'),
DT::dataTableOutput(ns('SmetalsRangeTableSingleSite')),br(), br(), br(),
h5('Metals assessments for the ',span(strong('selected site')),' are highlighted below.'),
DT::dataTableOutput(ns("SstationmetalsExceedanceRate"))))
))
}
metalsTableSingleStation <- function(input,output,session, AUdata, WCmetals ,Smetals, stationSelectedAbove){
ns <- session$ns
# Select One station for individual review
output$WCmetals_oneStationSelectionUI <- renderUI({
req(stationSelectedAbove)
selectInput(ns('WCmetals_oneStationSelection'),strong('Select Station to Review'),choices= sort(unique(c(stationSelectedAbove(),AUdata()$FDT_STA_ID))),#unique(AUdata())$FDT_STA_ID,
width='300px', selected = stationSelectedAbove())})# "2-JMS279.41" )})
WCmetals_oneStation <- reactive({
req(ns(input$WCmetals_oneStationSelection))
filter(WCmetals, FDT_STA_ID %in% input$WCmetals_oneStationSelection)})
output$WCmetalsRangeTableSingleSite <- DT::renderDataTable({
req(WCmetals_oneStation())
z <- dplyr::select(WCmetals_oneStation(), FDT_STA_ID:HARDNESS)
z$FDT_DATE_TIME <- as.character(as.POSIXct(z$FDT_DATE_TIME, format="%m/%d/%Y %H:%M"))
DT::datatable(z, rownames = FALSE, options= list(scrollX = TRUE, pageLength = nrow(z), scrollY = "250px", dom='t')) })
output$WCstationmetalsExceedanceRate <- DT::renderDataTable({
req(input$WCmetals_oneStationSelection, WCmetals_oneStation())
z <- dplyr::select(WCmetals_oneStation(), FDT_STA_ID, `FDT_DATE_TIME`,`ANTIMONY HUMAN HEALTH PWS`:`ZINC ALL OTHER SURFACE WATERS`)
z$FDT_DATE_TIME <- as.character(as.POSIXct(z$FDT_DATE_TIME, format="%m/%d/%Y %H:%M"))
DT::datatable(z, rownames = FALSE, options= list(scrollX = TRUE, pageLength = nrow(z), scrollY = "250px", dom='t')) %>%
formatStyle(names(z), backgroundColor = styleEqual(c('NSP'), c('red'))) # highlight cells red if not supporting
})
# Sediment Metals
# Select One station for individual review
output$Smetals_oneStationSelectionUI <- renderUI({
req(stationSelectedAbove)
selectInput(ns('Smetals_oneStationSelection'),strong('Select Station to Review'),choices= sort(unique(c(stationSelectedAbove(),AUdata()$FDT_STA_ID))),#unique(AUdata())$FDT_STA_ID,
width='300px', selected = stationSelectedAbove())})# "2-JMS279.41" )})
Smetals_oneStation <- reactive({
req(ns(input$Smetals_oneStationSelection))
filter(Smetals, FDT_STA_ID %in% input$Smetals_oneStationSelection)})
output$SmetalsRangeTableSingleSite <- DT::renderDataTable({
req(Smetals_oneStation())
z <- dplyr::select(Smetals_oneStation(), FDT_STA_ID, FDT_DATE_TIME:`CHLORDANE_TOTAL`)
z$FDT_DATE_TIME <- as.character(as.POSIXct(z$FDT_DATE_TIME, format="%m/%d/%Y %H:%M"))
DT::datatable(z, rownames = FALSE, options= list(scrollX = TRUE, pageLength = nrow(z), scrollY = "250px", dom='t')) })
output$SstationmetalsExceedanceRate <- DT::renderDataTable({
req(input$Smetals_oneStationSelection, Smetals_oneStation())
z <- dplyr::select(Smetals_oneStation(), FDT_STA_ID, `FDT_DATE_TIME`,`ACENAPHTHENE`:COMMENT)
z$FDT_DATE_TIME <- as.character(as.POSIXct(z$FDT_DATE_TIME, format="%m/%d/%Y %H:%M"))
DT::datatable(z, rownames = FALSE, options= list(scrollX = TRUE, pageLength = nrow(z), scrollY = "250px", dom='t')) %>%
formatStyle(names(z), backgroundColor = styleEqual(c('OE'), c('red'))) # highlight cells red if not supporting
})
}
ui <- fluidPage(
helpText('Review each site using the single site visualization section. There are no WQS for Specific Conductivity.'),
metalsTableSingleStationUI('metals')
)
server <- function(input,output,session){
stationData <- eventReactive( input$stationSelection, {
filter(AUData, FDT_STA_ID %in% input$stationSelection) })
stationSelected <- reactive({input$stationSelection})
AUData <- reactive({filter(conventionals_HUC, ID305B_1 %in% 'VAW-I04R_JKS03A00' |
ID305B_2 %in% 'VAW-I04R_JKS03A00' |
ID305B_2 %in% 'VAW-I04R_JKS03A00')%>%
left_join(WQSvalues, by = 'CLASS')})
callModule(metalsTableSingleStation,'metals', AUData, WCmetals ,Smetals, stationSelected)
}
shinyApp(ui,server)
| /R&S_app_v1/appModules_Testing/buildMetalsModule.R | no_license | EmmaVJones/Rivers-StreamsAssessment | R | false | false | 7,055 | r | source('testingDataset.R')
monStationTemplate <- read_excel('data/tbl_ir_mon_stations_template.xlsx') # from X:\2018_Assessment\StationsDatabase\VRO
# Single station data ----------------------------------------------------------------------
conventionals_HUC<- left_join(conventionals, dplyr::select(stationTable, FDT_STA_ID, SEC, CLASS, SPSTDS, ID305B_1, ID305B_2, ID305B_3), by='FDT_STA_ID')
AUData <- filter(conventionals_HUC, ID305B_1 %in% 'VAW-I04R_JKS04A00' |
ID305B_2 %in% 'VAW-I04R_JKS04A00' |
ID305B_2 %in% 'VAW-I04R_JKS04A00')%>%
left_join(WQSvalues, by = 'CLASS')
x <-filter(conventionals_HUC, FDT_STA_ID %in% '2-JKS033.06') #'2-JMS279.41')#
x2 <- filter(conventionals_HUC, FDT_STA_ID %in% '2-JKS028.69')
# Bring in Roger's Metal Assessment
WCmetals <- read_excel('data/WATER_METALS_20170712.xlsx')
Smetals <- read_excel('data/SEDIMENT_20170712.xlsx')
WCmetals[558,97] <- 'NSP'
z <- filter(WCmetals, FDT_STA_ID %in% x$FDT_STA_ID)# %>%
# dplyr::select(FDT_STA_ID:HARDNESS)
z <- dplyr::select(z, `ANTIMONY HUMAN HEALTH PWS`:`ZINC ALL OTHER SURFACE WATERS`)
s <- filter(Smetals, FDT_STA_ID %in% x2$FDT_STA_ID)
metalsExceedances <- function(x, metalType){
# if any data given to function
if(nrow(x) > 0){ VIO <- length(which(x == 'NSP'))
}else {
VIO <- NA }
x <- data.frame(VIO = VIO, STAT = ifelse(VIO > 0, 'Review', 'S'))
names(x) <- paste(metalType,names(x), sep='_')
return(x)
}
metalsExceedances(z, 'WAT_MET')
metalsTableSingleStationUI <- function(id){
ns <- NS(id)
tagList(
tabsetPanel(
tabPanel('Water Column Metals',
wellPanel(
h4(strong('Single Station Data Visualization')),
uiOutput(ns('WCmetals_oneStationSelectionUI')),
h5('All water column metals data available for the ',span(strong('selected site')),' are available below.
If no data is presented, then the station does not have any water column metals data available.'),
DT::dataTableOutput(ns('WCmetalsRangeTableSingleSite')),br(), br(), br(),
h5('Metals assessments for the ',span(strong('selected site')),' are highlighted below.'),
DT::dataTableOutput(ns("WCstationmetalsExceedanceRate")))),
tabPanel('Sediment Metals',
wellPanel(
h4(strong('Single Station Data Visualization')),
uiOutput(ns('Smetals_oneStationSelectionUI')),
h5('All sediment metals data available for the ',span(strong('selected site')),' are available below.
If no data is presented, then the station does not have any sediment metals data available.'),
DT::dataTableOutput(ns('SmetalsRangeTableSingleSite')),br(), br(), br(),
h5('Metals assessments for the ',span(strong('selected site')),' are highlighted below.'),
DT::dataTableOutput(ns("SstationmetalsExceedanceRate"))))
))
}
metalsTableSingleStation <- function(input,output,session, AUdata, WCmetals ,Smetals, stationSelectedAbove){
ns <- session$ns
# Select One station for individual review
output$WCmetals_oneStationSelectionUI <- renderUI({
req(stationSelectedAbove)
selectInput(ns('WCmetals_oneStationSelection'),strong('Select Station to Review'),choices= sort(unique(c(stationSelectedAbove(),AUdata()$FDT_STA_ID))),#unique(AUdata())$FDT_STA_ID,
width='300px', selected = stationSelectedAbove())})# "2-JMS279.41" )})
WCmetals_oneStation <- reactive({
req(ns(input$WCmetals_oneStationSelection))
filter(WCmetals, FDT_STA_ID %in% input$WCmetals_oneStationSelection)})
output$WCmetalsRangeTableSingleSite <- DT::renderDataTable({
req(WCmetals_oneStation())
z <- dplyr::select(WCmetals_oneStation(), FDT_STA_ID:HARDNESS)
z$FDT_DATE_TIME <- as.character(as.POSIXct(z$FDT_DATE_TIME, format="%m/%d/%Y %H:%M"))
DT::datatable(z, rownames = FALSE, options= list(scrollX = TRUE, pageLength = nrow(z), scrollY = "250px", dom='t')) })
output$WCstationmetalsExceedanceRate <- DT::renderDataTable({
req(input$WCmetals_oneStationSelection, WCmetals_oneStation())
z <- dplyr::select(WCmetals_oneStation(), FDT_STA_ID, `FDT_DATE_TIME`,`ANTIMONY HUMAN HEALTH PWS`:`ZINC ALL OTHER SURFACE WATERS`)
z$FDT_DATE_TIME <- as.character(as.POSIXct(z$FDT_DATE_TIME, format="%m/%d/%Y %H:%M"))
DT::datatable(z, rownames = FALSE, options= list(scrollX = TRUE, pageLength = nrow(z), scrollY = "250px", dom='t')) %>%
formatStyle(names(z), backgroundColor = styleEqual(c('NSP'), c('red'))) # highlight cells red if not supporting
})
# Sediment Metals
# Select One station for individual review
output$Smetals_oneStationSelectionUI <- renderUI({
req(stationSelectedAbove)
selectInput(ns('Smetals_oneStationSelection'),strong('Select Station to Review'),choices= sort(unique(c(stationSelectedAbove(),AUdata()$FDT_STA_ID))),#unique(AUdata())$FDT_STA_ID,
width='300px', selected = stationSelectedAbove())})# "2-JMS279.41" )})
Smetals_oneStation <- reactive({
req(ns(input$Smetals_oneStationSelection))
filter(Smetals, FDT_STA_ID %in% input$Smetals_oneStationSelection)})
output$SmetalsRangeTableSingleSite <- DT::renderDataTable({
req(Smetals_oneStation())
z <- dplyr::select(Smetals_oneStation(), FDT_STA_ID, FDT_DATE_TIME:`CHLORDANE_TOTAL`)
z$FDT_DATE_TIME <- as.character(as.POSIXct(z$FDT_DATE_TIME, format="%m/%d/%Y %H:%M"))
DT::datatable(z, rownames = FALSE, options= list(scrollX = TRUE, pageLength = nrow(z), scrollY = "250px", dom='t')) })
output$SstationmetalsExceedanceRate <- DT::renderDataTable({
req(input$Smetals_oneStationSelection, Smetals_oneStation())
z <- dplyr::select(Smetals_oneStation(), FDT_STA_ID, `FDT_DATE_TIME`,`ACENAPHTHENE`:COMMENT)
z$FDT_DATE_TIME <- as.character(as.POSIXct(z$FDT_DATE_TIME, format="%m/%d/%Y %H:%M"))
DT::datatable(z, rownames = FALSE, options= list(scrollX = TRUE, pageLength = nrow(z), scrollY = "250px", dom='t')) %>%
formatStyle(names(z), backgroundColor = styleEqual(c('OE'), c('red'))) # highlight cells red if not supporting
})
}
ui <- fluidPage(
helpText('Review each site using the single site visualization section. There are no WQS for Specific Conductivity.'),
metalsTableSingleStationUI('metals')
)
server <- function(input,output,session){
stationData <- eventReactive( input$stationSelection, {
filter(AUData, FDT_STA_ID %in% input$stationSelection) })
stationSelected <- reactive({input$stationSelection})
AUData <- reactive({filter(conventionals_HUC, ID305B_1 %in% 'VAW-I04R_JKS03A00' |
ID305B_2 %in% 'VAW-I04R_JKS03A00' |
ID305B_2 %in% 'VAW-I04R_JKS03A00')%>%
left_join(WQSvalues, by = 'CLASS')})
callModule(metalsTableSingleStation,'metals', AUData, WCmetals ,Smetals, stationSelected)
}
shinyApp(ui,server)
|
\name{MakeCensWeights}
\alias{MakeCensWeights}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Additive hazard weights for censoring.
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
MakeCensWeights(fFit, cfFit, data, startTimeName, stopTimeName,
endStatusName, intStatusName, idName)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{fFormula}{
The \code{\link[survival]{aalen}} fit for the graph hazard.
}
\item{cfFormula}{
The \code{\link[survival]{aalen}} fit for the counterfactual hazard.
}
\item{data}{
Data frame on long format,
}
\item{startTimeName}{
Name of column with start-time of the at-risk period,
}
\item{stopTimeName}{
Name of column with stop-time of the at-risk period,
}
\item{endStatusName}{
Name of the variable that contains the name of end state for each at-risk interval,
}
\item{intStatusName}{
Name of the variable that contains the initial state for each at-risk interval,
}
\item{idName}{
Name of column in \code{data} that identifies individuals.
}
}
\details{
Uses \code{\link[survival]{aalen}} to perform the two additive hazard models, and then
\code{\link{RefineTimeScale}} to subdivide \code{data} such that the
subdivision is compatible with every change of the weights.
The weights are then computed as a stochastic exponential (product integral), i.e.
}
\value{
Sufficiently expanded version of the data frame \code{data} where the weights
are appended.
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Kjetil Røysland <kjetil.roysland@medisin.uio.no>
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/MakeCensWeights.Rd | no_license | kjetilroysland/ahw | R | false | false | 2,197 | rd | \name{MakeCensWeights}
\alias{MakeCensWeights}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Additive hazard weights for censoring.
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
MakeCensWeights(fFit, cfFit, data, startTimeName, stopTimeName,
endStatusName, intStatusName, idName)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{fFormula}{
The \code{\link[survival]{aalen}} fit for the graph hazard.
}
\item{cfFormula}{
The \code{\link[survival]{aalen}} fit for the counterfactual hazard.
}
\item{data}{
Data frame on long format,
}
\item{startTimeName}{
Name of column with start-time of the at-risk period,
}
\item{stopTimeName}{
Name of column with stop-time of the at-risk period,
}
\item{endStatusName}{
Name of the variable that contains the name of end state for each at-risk interval,
}
\item{intStatusName}{
Name of the variable that contains the initial state for each at-risk interval,
}
\item{idName}{
Name of column in \code{data} that identifies individuals.
}
}
\details{
Uses \code{\link[survival]{aalen}} to perform the two additive hazard models, and then
\code{\link{RefineTimeScale}} to subdivide \code{data} such that the
subdivision is compatible with every change of the weights.
The weights are then computed as a stochastic exponential (product integral), i.e.
}
\value{
Sufficiently expanded version of the data frame \code{data} where the weights
are appended.
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Kjetil Røysland <kjetil.roysland@medisin.uio.no>
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
library(BCEA)
#load_all("..")
#load_all("voi")
#load_all("../../../BCEA")
chemo_bcea <- bcea(e=chemo_cea$e, c=chemo_cea$c, wtp=30000)
test_that("Agrees with BCEA: single parameter, default GAM",{
expect_equal(
BCEA::evppi(parameter=c("pi1"), he=chemo_bcea, input=chemo_pars)$evppi[1],
voi::evppi(outputs=chemo_cea, inputs=chemo_pars, pars=c("pi1"))[chemo_cea$k == 30000,"evppi"]
)
})
test_that("Agrees with BCEA: single parameter, default GAM",{
expect_equal(
BCEA::evppi(parameter=c("pi1","rho"), he=chemo_bcea, input=chemo_pars)$evppi[1],
voi::evppi(outputs=chemo_cea, inputs=chemo_pars, pars=c("pi1","rho"))[chemo_cea$k == 30000,"evppi"]
)
})
chemo_bcea100 <- chemo_bcea
chemo_bcea100$n.sim <- 100
chemo_pars100 <- chemo_pars[1:100,]
## This is slow, should be in extra slow tests file
pars <- c("pi1","rho","gamma","gamma2")
res_bcea <- BCEA::evppi(parameter=pars, he=chemo_bcea100, input=chemo_pars, method="INLA")$evppi[1]
res_voi <- voi::evppi(outputs=chemo_cea, inputs=chemo_pars,
pars=pars, method="inla", nsim=100)[chemo_cea$k == 30000,"evppi"]
test_that("Agrees with BCEA: single parameter, INLA",{
expect_equal(res_bcea, res_voi)
})
| /tests/tests_slow/test_bcea.R | no_license | chjackson/voi | R | false | false | 1,229 | r | library(BCEA)
#load_all("..")
#load_all("voi")
#load_all("../../../BCEA")
chemo_bcea <- bcea(e=chemo_cea$e, c=chemo_cea$c, wtp=30000)
test_that("Agrees with BCEA: single parameter, default GAM",{
expect_equal(
BCEA::evppi(parameter=c("pi1"), he=chemo_bcea, input=chemo_pars)$evppi[1],
voi::evppi(outputs=chemo_cea, inputs=chemo_pars, pars=c("pi1"))[chemo_cea$k == 30000,"evppi"]
)
})
test_that("Agrees with BCEA: single parameter, default GAM",{
expect_equal(
BCEA::evppi(parameter=c("pi1","rho"), he=chemo_bcea, input=chemo_pars)$evppi[1],
voi::evppi(outputs=chemo_cea, inputs=chemo_pars, pars=c("pi1","rho"))[chemo_cea$k == 30000,"evppi"]
)
})
chemo_bcea100 <- chemo_bcea
chemo_bcea100$n.sim <- 100
chemo_pars100 <- chemo_pars[1:100,]
## This is slow, should be in extra slow tests file
pars <- c("pi1","rho","gamma","gamma2")
res_bcea <- BCEA::evppi(parameter=pars, he=chemo_bcea100, input=chemo_pars, method="INLA")$evppi[1]
res_voi <- voi::evppi(outputs=chemo_cea, inputs=chemo_pars,
pars=pars, method="inla", nsim=100)[chemo_cea$k == 30000,"evppi"]
test_that("Agrees with BCEA: single parameter, INLA",{
expect_equal(res_bcea, res_voi)
})
|
MISSING_VALUE <- "Not available"
#' DatasetSummary, This class contains the information of one particuar dataset in OmicsDI
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass DatasetSummary
#'
setClass("DatasetSummary",
slots = c(
dataset.id = "character",
description = "character",
database = "character",
keywords = "vector",
publication.date = "character",
organisms = "vector",
title = "character",
visit.count = "numeric"),
prototype = list(
dataset.id = MISSING_VALUE,
description = MISSING_VALUE,
database = MISSING_VALUE,
keywords = MISSING_VALUE,
publication.date = MISSING_VALUE,
title = MISSING_VALUE,
organisms = MISSING_VALUE,
visit.count = 0
),
validity = function(object) {
# check assay.accession
if (!is.character(object@dataset.id) || nchar(object@dataset.id) == 0 || is.na(object@dataset.id))
return("'dataset.id' must be a single valid string")
# check project.accession
if (!is.character(object@database) || nchar(object@database) == 0 || is.na(object@database))
return("'database' must be a single valid string")
# check file.size
if (!is.numeric(object@visit.count) || object@visit.count < 0 || is.na(object@visit.count))
return("'visit.count' must be a none negative number")
return(TRUE)
}
)
#' Organism return an organism entity including its name and accession
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass Organism
setClass("Organism",
slots = c(
name = "character",
accession = "character"
),
prototype = list(
name = MISSING_VALUE,
accession = MISSING_VALUE
),
validity = function(object){
# check name
if (!is.character(object@name) || nchar(object@name) == 0 || is.na(object@name))
return("'name' must be a single valid string")
}
)
#'FacetValue provides the information about an specific Facet in the API
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass FacetValue
#'
setClass("FacetValue",
slots = c(value = "character",
count = "character",
label = "character"
),
prototype = list( value = MISSING_VALUE,
count = MISSING_VALUE,
label = MISSING_VALUE
),
validity = function(object){
# check value
if (!is.character(object@value) || nchar(object@value) == 0 || is.na(object@value))
return("'value' must be a single valid string")
# check count
if (!is.character(object@count) || nchar(object@count) == 0 || is.na(object@count))
return("'value' must be a single valid string")
# check label
if (!is.character(object@label) || nchar(object@label) == 0 || is.na(object@label))
return("'label' must be a single valid string")
}
)
#' DatasetResult, provides a list of datasets for an specific query
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass DatasetResult
#'
setClass("DatasetResult",
representation(
count = "numeric",
facets = "vector",
datasets = "vector"
),
prototype(
count = 0,
facets = c(MISSING_VALUE),
datasets = c(MISSING_VALUE)
)
)
#'Facet provides the information about an specific Facet in the API
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass Facet
#'
setClass("Facet",
slots = c(
facet.id = "character",
total = "numeric",
facetValues = "vector",
label = "character"),
prototype = list(
facet.id = MISSING_VALUE,
label = MISSING_VALUE,
total = 0,
facetValues = list()
)
)
#' DatasetDetail provides the information about an specific Facet in the API
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass DatasetDetail
#'
setClass("DatasetDetail",
slots = c(
name = "character",
dataset.id = "character",
description = "character",
database = "character",
protocols = "vector",
keywords = "vector",
tissues = "vector",
diseases = "vector",
full.dataset.link = "character",
instruments = "vector",
experiment.type = "vector",
publication.date = "character",
publication.ids = "vector",
organisms = "vector",
lab.members = "vector"
),
prototype = list(
name = MISSING_VALUE,
dataset.id = MISSING_VALUE,
description = MISSING_VALUE,
database = MISSING_VALUE,
protocols = list(MISSING_VALUE),
keywords = list(MISSING_VALUE),
tissues = list(MISSING_VALUE),
diseases = list(MISSING_VALUE),
full.dataset.link = MISSING_VALUE,
instruments = list(MISSING_VALUE),
experiment.type = list(MISSING_VALUE),
publication.date = MISSING_VALUE,
publication.ids = list(MISSING_VALUE),
organisms = list(MISSING_VALUE),
lab.members = list(MISSING_VALUE)
),
validity = function(object){
# name
if (!is.character(object@name) || nchar(object@name) == 0 || is.na(object@name))
return("'name' must be a single valid string")
# check project.accession
if (!is.character(object@dataset.id) || nchar(object@dataset.id) == 0 || is.na(object@dataset.id))
return("'dataset.id' must be a single valid string")
# check project.accession
if (!is.character(object@database) || nchar(object@database) == 0 || is.na(object@database))
return("'database' must be a single valid string")
}
)
#' Protocol retirve the information of a Protocol used in the experiment
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass Protocol
#'
setClass("Protocol",
slots = c(
name = "character",
description = "character"
),
prototype = list(
name = MISSING_VALUE,
description = MISSING_VALUE
),
validity = function(object){
# check project.accession
if (!is.character(object@name) || nchar(object@name) == 0 || is.na(object@name))
return("'name' must be a single valid string")
# check project.accession
if (!is.character(object@description) || nchar(object@description) == 0 || is.na(object@description))
return("'description' must be a single valid string")
}
)
#' LabMember retrieve the information of a lab member including affilation, name, etc
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass LabMember
setClass("LabMember",
slots = c(
name = "character",
role = "character",
affilation = "character",
email = "character"
),
prototype = list(
name = MISSING_VALUE,
role = MISSING_VALUE,
affiliation = MISSING_VALUE,
email = MISSING_VALUE)
)
#' StatRecord, This class contains the information of statistics records
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass StatRecord
setClass("StatRecord",
slots = c(
name = "character",
value = "character",
id = "character",
label = "character"),
prototype = list(
name = MISSING_VALUE,
value = MISSING_VALUE,
id = MISSING_VALUE,
label = MISSING_VALUE),
validity = function(object){
# check name
if (!is.character(object@name) || nchar(object@name) == 0 || is.na(object@name))
return("'name' must be a single valid string")
# check value
if (!is.character(object@value) || nchar(object@value) == 0 || is.na(object@value))
return("'value' must be a single valid string")
# check id
if (!is.character(object@id) || nchar(object@id) == 0 || is.na(object@id))
return("'id' must be a single valid string")
# check label
if (!is.character(object@label) || nchar(object@label) == 0 || is.na(object@label))
return("'label' must be a single valid string")
}
)
#' DictWord, This class contains the information of a list of Dictionary Words
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass DictWord
#'
setClass("DictWord",
slots = c(
total.count = "numeric",
items = "vector"),
prototype = list(
total.count = 0,
items = list()
)
)
#' Item, This class contains the information of an Item in the Dictonary
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass Item
#'
setClass("Item",
slots = c(
name = "character"
),
prototype = list(
name = MISSING_VALUE
),
validity = function(object){
# check name
if (!is.character(object@name) || nchar(object@name) == 0 || is.na(object@name))
return("'name' must be a single valid string")
}
)
#' Term, This class contains the information of a Term
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass Term
setClass("Term",
slots = c(
label = "character",
frequent = "character"
),
prototype = list(
label = MISSING_VALUE,
frequent = MISSING_VALUE
)
)
#' PiublicationResult, This class contains a list of publication for specific Query
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass PublicationResult
setClass("PublicationResult",
slots = c(count = "numeric",
publications = "vector"),
prototype = list(
count = 0,
publications = list()
)
)
#' PublicationDetail, This class contains a Publication Record
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass PublicationDetail
#'
setClass("PublicationDetail",
slots = c(
id = "character",
date = "character",
database = "character",
keywords = "vector",
affilation = "vector",
title = "character",
authors = "vector",
abstract = "vector",
journal = "character",
issue = "character",
pagination = "character",
volume = "character"
),
prototype = list(
id = MISSING_VALUE,
date = MISSING_VALUE,
database = MISSING_VALUE,
keywords = list(),
affilation = list(),
title = MISSING_VALUE,
authors = list(),
abstract = list(),
journal = MISSING_VALUE,
issue = MISSING_VALUE,
pagination = MISSING_VALUE,
volume = MISSING_VALUE
),
validity = function(object){
# check id
if (!is.character(object@id) || nchar(object@id) == 0 || is.na(object@id))
return("'id' must be a single valid string")
# check date
if (!is.character(object@date) || nchar(object@date) == 0 || is.na(object@date))
return("'date' must be a single valid string")
# check database
if (!is.character(object@database) || nchar(object@database) == 0 || is.na(object@database))
return("'database' must be a single valid string")
# check title
if (!is.character(object@title) || nchar(object@title) == 0 || is.na(object@title))
return("'title' must be a single valid string")
# check journal
if (!is.character(object@journal) || nchar(object@journal) == 0 || is.na(object@journal))
return("'journal' must be a single valid string")
}
)
| /R/AllClasses.R | no_license | gccong/ddiR-sirius | R | false | false | 13,057 | r | MISSING_VALUE <- "Not available"
#' DatasetSummary, This class contains the information of one particuar dataset in OmicsDI
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass DatasetSummary
#'
setClass("DatasetSummary",
slots = c(
dataset.id = "character",
description = "character",
database = "character",
keywords = "vector",
publication.date = "character",
organisms = "vector",
title = "character",
visit.count = "numeric"),
prototype = list(
dataset.id = MISSING_VALUE,
description = MISSING_VALUE,
database = MISSING_VALUE,
keywords = MISSING_VALUE,
publication.date = MISSING_VALUE,
title = MISSING_VALUE,
organisms = MISSING_VALUE,
visit.count = 0
),
validity = function(object) {
# check assay.accession
if (!is.character(object@dataset.id) || nchar(object@dataset.id) == 0 || is.na(object@dataset.id))
return("'dataset.id' must be a single valid string")
# check project.accession
if (!is.character(object@database) || nchar(object@database) == 0 || is.na(object@database))
return("'database' must be a single valid string")
# check file.size
if (!is.numeric(object@visit.count) || object@visit.count < 0 || is.na(object@visit.count))
return("'visit.count' must be a none negative number")
return(TRUE)
}
)
#' Organism return an organism entity including its name and accession
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass Organism
setClass("Organism",
slots = c(
name = "character",
accession = "character"
),
prototype = list(
name = MISSING_VALUE,
accession = MISSING_VALUE
),
validity = function(object){
# check name
if (!is.character(object@name) || nchar(object@name) == 0 || is.na(object@name))
return("'name' must be a single valid string")
}
)
#'FacetValue provides the information about an specific Facet in the API
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass FacetValue
#'
setClass("FacetValue",
slots = c(value = "character",
count = "character",
label = "character"
),
prototype = list( value = MISSING_VALUE,
count = MISSING_VALUE,
label = MISSING_VALUE
),
validity = function(object){
# check value
if (!is.character(object@value) || nchar(object@value) == 0 || is.na(object@value))
return("'value' must be a single valid string")
# check count
if (!is.character(object@count) || nchar(object@count) == 0 || is.na(object@count))
return("'value' must be a single valid string")
# check label
if (!is.character(object@label) || nchar(object@label) == 0 || is.na(object@label))
return("'label' must be a single valid string")
}
)
#' DatasetResult, provides a list of datasets for an specific query
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass DatasetResult
#'
setClass("DatasetResult",
representation(
count = "numeric",
facets = "vector",
datasets = "vector"
),
prototype(
count = 0,
facets = c(MISSING_VALUE),
datasets = c(MISSING_VALUE)
)
)
#'Facet provides the information about an specific Facet in the API
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass Facet
#'
setClass("Facet",
slots = c(
facet.id = "character",
total = "numeric",
facetValues = "vector",
label = "character"),
prototype = list(
facet.id = MISSING_VALUE,
label = MISSING_VALUE,
total = 0,
facetValues = list()
)
)
#' DatasetDetail provides the information about an specific Facet in the API
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass DatasetDetail
#'
setClass("DatasetDetail",
slots = c(
name = "character",
dataset.id = "character",
description = "character",
database = "character",
protocols = "vector",
keywords = "vector",
tissues = "vector",
diseases = "vector",
full.dataset.link = "character",
instruments = "vector",
experiment.type = "vector",
publication.date = "character",
publication.ids = "vector",
organisms = "vector",
lab.members = "vector"
),
prototype = list(
name = MISSING_VALUE,
dataset.id = MISSING_VALUE,
description = MISSING_VALUE,
database = MISSING_VALUE,
protocols = list(MISSING_VALUE),
keywords = list(MISSING_VALUE),
tissues = list(MISSING_VALUE),
diseases = list(MISSING_VALUE),
full.dataset.link = MISSING_VALUE,
instruments = list(MISSING_VALUE),
experiment.type = list(MISSING_VALUE),
publication.date = MISSING_VALUE,
publication.ids = list(MISSING_VALUE),
organisms = list(MISSING_VALUE),
lab.members = list(MISSING_VALUE)
),
validity = function(object){
# name
if (!is.character(object@name) || nchar(object@name) == 0 || is.na(object@name))
return("'name' must be a single valid string")
# check project.accession
if (!is.character(object@dataset.id) || nchar(object@dataset.id) == 0 || is.na(object@dataset.id))
return("'dataset.id' must be a single valid string")
# check project.accession
if (!is.character(object@database) || nchar(object@database) == 0 || is.na(object@database))
return("'database' must be a single valid string")
}
)
#' Protocol retirve the information of a Protocol used in the experiment
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass Protocol
#'
setClass("Protocol",
slots = c(
name = "character",
description = "character"
),
prototype = list(
name = MISSING_VALUE,
description = MISSING_VALUE
),
validity = function(object){
# check project.accession
if (!is.character(object@name) || nchar(object@name) == 0 || is.na(object@name))
return("'name' must be a single valid string")
# check project.accession
if (!is.character(object@description) || nchar(object@description) == 0 || is.na(object@description))
return("'description' must be a single valid string")
}
)
#' LabMember retrieve the information of a lab member including affilation, name, etc
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass LabMember
setClass("LabMember",
slots = c(
name = "character",
role = "character",
affilation = "character",
email = "character"
),
prototype = list(
name = MISSING_VALUE,
role = MISSING_VALUE,
affiliation = MISSING_VALUE,
email = MISSING_VALUE)
)
#' StatRecord, This class contains the information of statistics records
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass StatRecord
setClass("StatRecord",
slots = c(
name = "character",
value = "character",
id = "character",
label = "character"),
prototype = list(
name = MISSING_VALUE,
value = MISSING_VALUE,
id = MISSING_VALUE,
label = MISSING_VALUE),
validity = function(object){
# check name
if (!is.character(object@name) || nchar(object@name) == 0 || is.na(object@name))
return("'name' must be a single valid string")
# check value
if (!is.character(object@value) || nchar(object@value) == 0 || is.na(object@value))
return("'value' must be a single valid string")
# check id
if (!is.character(object@id) || nchar(object@id) == 0 || is.na(object@id))
return("'id' must be a single valid string")
# check label
if (!is.character(object@label) || nchar(object@label) == 0 || is.na(object@label))
return("'label' must be a single valid string")
}
)
#' DictWord, This class contains the information of a list of Dictionary Words
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass DictWord
#'
setClass("DictWord",
slots = c(
total.count = "numeric",
items = "vector"),
prototype = list(
total.count = 0,
items = list()
)
)
#' Item, This class contains the information of an Item in the Dictonary
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass Item
#'
setClass("Item",
slots = c(
name = "character"
),
prototype = list(
name = MISSING_VALUE
),
validity = function(object){
# check name
if (!is.character(object@name) || nchar(object@name) == 0 || is.na(object@name))
return("'name' must be a single valid string")
}
)
#' Term, This class contains the information of a Term
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass Term
setClass("Term",
slots = c(
label = "character",
frequent = "character"
),
prototype = list(
label = MISSING_VALUE,
frequent = MISSING_VALUE
)
)
#' PiublicationResult, This class contains a list of publication for specific Query
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass PublicationResult
setClass("PublicationResult",
slots = c(count = "numeric",
publications = "vector"),
prototype = list(
count = 0,
publications = list()
)
)
#' PublicationDetail, This class contains a Publication Record
#'
#' @importFrom rjson fromJSON
#' @import methods
#' @export
#' @exportClass PublicationDetail
#'
setClass("PublicationDetail",
slots = c(
id = "character",
date = "character",
database = "character",
keywords = "vector",
affilation = "vector",
title = "character",
authors = "vector",
abstract = "vector",
journal = "character",
issue = "character",
pagination = "character",
volume = "character"
),
prototype = list(
id = MISSING_VALUE,
date = MISSING_VALUE,
database = MISSING_VALUE,
keywords = list(),
affilation = list(),
title = MISSING_VALUE,
authors = list(),
abstract = list(),
journal = MISSING_VALUE,
issue = MISSING_VALUE,
pagination = MISSING_VALUE,
volume = MISSING_VALUE
),
validity = function(object){
# check id
if (!is.character(object@id) || nchar(object@id) == 0 || is.na(object@id))
return("'id' must be a single valid string")
# check date
if (!is.character(object@date) || nchar(object@date) == 0 || is.na(object@date))
return("'date' must be a single valid string")
# check database
if (!is.character(object@database) || nchar(object@database) == 0 || is.na(object@database))
return("'database' must be a single valid string")
# check title
if (!is.character(object@title) || nchar(object@title) == 0 || is.na(object@title))
return("'title' must be a single valid string")
# check journal
if (!is.character(object@journal) || nchar(object@journal) == 0 || is.na(object@journal))
return("'journal' must be a single valid string")
}
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gl.ibd.r
\name{gl.ibd}
\alias{gl.ibd}
\title{Isolation by distance}
\usage{
gl.ibd(
gl = NULL,
Dgen = NULL,
Dgeo = NULL,
projected = FALSE,
permutations = 999,
plot = TRUE
)
}
\arguments{
\item{gl}{genlight object. If provided a standard analysis on Fst/1-Fst and log(distance) is performed}
\item{Dgen}{genetic distance matrix if no genlight object with coordinates is provided}
\item{Dgeo}{Euclidean distance matrix if no genlight object is provided}
\item{projected}{Switch to indicate that coordinates are already projected (not in lat long) and therefore no projection is carried out. Default is FALSE, so it is assumed coordinates are in lat/longs.}
\item{permutations}{number of permutations in the mantel test}
\item{plot}{should an isolation by distance plot be returned. Default is plot=TRUE}
}
\value{
returns a list of the following components: Dgen (the genetic distance matrix), Dgeo (the Euclidean distance matrix), mantel (the statistics of the mantel test)
}
\description{
This functions performs an isolation by distance analysis based on a mantel test and also produces an isolation by distance plot. If a genlight object with coordinates is provided) then a Euclidean and genetic distance matrix are calculated (currently. Currently only pairwise Fst between population is implemented. Coordinates are expected as lat long and converted to Google Earth Mercator projection. If coordinates are already projected, set projected=TRUE. If such an object is provided an isolation by distance analysis and plot is performed on log(Euclidean distance) against population based pairwise Fst/1-Fst (see Rousseau's distance measure. Genetics April 1, 1997 vol. 145 no. 4 1219-1228)
You can provide also your own genetic and Euclidean distance matrix. The function is based on the code provided by the adegenet tutorial (\url{http://adegenet.r-forge.r-project.org/files/tutorial-basics.pdf}), using the functions \link[vegan]{mantel} (package vegan), \link[StAMPP]{stamppFst} (package StAMPP) and Mercator in package dismo.
}
\examples{
\donttest{
ibd <- gl.ibd(bandicoot.gl)
ibd <- gl.ibd(bandicoot.gl,plot = FALSE)
}
}
\references{
Rousset (1997) Genetic Differentiation and Estimation of Gene Flow from F-Statistics Under Isolation by Distancenetics 145(4), 1219-1228.
}
\seealso{
\link[vegan]{mantel}, \link[StAMPP]{stamppFst}
}
\author{
Bernd Gruber (bugs? Post to \url{https://groups.google.com/d/forum/dartr})
}
| /man/gl.ibd.Rd | no_license | qitsweauca/dartR | R | false | true | 2,525 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gl.ibd.r
\name{gl.ibd}
\alias{gl.ibd}
\title{Isolation by distance}
\usage{
gl.ibd(
gl = NULL,
Dgen = NULL,
Dgeo = NULL,
projected = FALSE,
permutations = 999,
plot = TRUE
)
}
\arguments{
\item{gl}{genlight object. If provided a standard analysis on Fst/1-Fst and log(distance) is performed}
\item{Dgen}{genetic distance matrix if no genlight object with coordinates is provided}
\item{Dgeo}{Euclidean distance matrix if no genlight object is provided}
\item{projected}{Switch to indicate that coordinates are already projected (not in lat long) and therefore no projection is carried out. Default is FALSE, so it is assumed coordinates are in lat/longs.}
\item{permutations}{number of permutations in the mantel test}
\item{plot}{should an isolation by distance plot be returned. Default is plot=TRUE}
}
\value{
returns a list of the following components: Dgen (the genetic distance matrix), Dgeo (the Euclidean distance matrix), mantel (the statistics of the mantel test)
}
\description{
This functions performs an isolation by distance analysis based on a mantel test and also produces an isolation by distance plot. If a genlight object with coordinates is provided) then a Euclidean and genetic distance matrix are calculated (currently. Currently only pairwise Fst between population is implemented. Coordinates are expected as lat long and converted to Google Earth Mercator projection. If coordinates are already projected, set projected=TRUE. If such an object is provided an isolation by distance analysis and plot is performed on log(Euclidean distance) against population based pairwise Fst/1-Fst (see Rousseau's distance measure. Genetics April 1, 1997 vol. 145 no. 4 1219-1228)
You can provide also your own genetic and Euclidean distance matrix. The function is based on the code provided by the adegenet tutorial (\url{http://adegenet.r-forge.r-project.org/files/tutorial-basics.pdf}), using the functions \link[vegan]{mantel} (package vegan), \link[StAMPP]{stamppFst} (package StAMPP) and Mercator in package dismo.
}
\examples{
\donttest{
ibd <- gl.ibd(bandicoot.gl)
ibd <- gl.ibd(bandicoot.gl,plot = FALSE)
}
}
\references{
Rousset (1997) Genetic Differentiation and Estimation of Gene Flow from F-Statistics Under Isolation by Distancenetics 145(4), 1219-1228.
}
\seealso{
\link[vegan]{mantel}, \link[StAMPP]{stamppFst}
}
\author{
Bernd Gruber (bugs? Post to \url{https://groups.google.com/d/forum/dartr})
}
|
library(glmnet)
mydata = read.table("./TrainingSet/Correlation/large_intestine.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.55,family="gaussian",standardize=TRUE)
sink('./Model/EN/Correlation/large_intestine/large_intestine_061.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Correlation/large_intestine/large_intestine_061.R | no_license | leon1003/QSMART | R | false | false | 387 | r | library(glmnet)
mydata = read.table("./TrainingSet/Correlation/large_intestine.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.55,family="gaussian",standardize=TRUE)
sink('./Model/EN/Correlation/large_intestine/large_intestine_061.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
# File: R_1_Basics.R
# Course: Introduction to R
# Section: 1: The basics
# Author: Christopher Solis, uic.edu, @csolisoc
# Date: 2019-04-23
# Based on Mhairi McNeill Base R Cheat Sheet
# Some good introductory guides to R:
# https://rladiessydney.org/courses/ryouwithme/01-basicbasics-2/
# https://moderndive.netlify.app/1-getting-started.html
# 1. Getting Help ########################################################
?mean # Get help of a particular function
help.search("weighted mean") # Search the help files for a word or phrase
help(package = "dplyr") #Find help for a package.
# 2. Using Libraries #####################################################
install.packages("dplyr") # Download and install a package from CRAN
library(dplyr) # Load the package into the session, making all its functions available to use
dplyr::select # Use a particular function from a package.
# Try to find "dplyr" under Packages tab in the bottom-rith quadrant
# 3. Working Direactory #################################################
getwd() # Find the current working directory (where inputs are found and outputs are sent).
setwd("~/Desktop") #Change the current working directory.
# 4. Vectors ############################################################
2:6 # just an interger sequence
c(2,4,6) #Join elements into a vector
seq(2, 3, by=0.5)# A complex sequence
rep(1:2, times=3)# Repeat a vector
rep(1:2, each=3) # Repeat elements of a vector
A <- sample(1:10, 12, replace = TRUE)
A # shows content of vector a
print(A) # shows values of vector a
sort(A) # Return A sorted
table(A) # See counts of values (makes a frequency distribution list)
rev(A) # Return x reversed
unique(A) # See unique values
rm(A) # removes A
A
# 5. "<-" vs. "=" #####################################################
#<- is preferred and advised in R Coding style guides:
#https://google.github.io/styleguide/Rguide.xml#assignment
#http://adv-r.had.co.nz/Style.html
# Example of interchangeable orientations with arrows
A <- 12
13 -> B
A
B
A -> B
A <- B
A
B
# Example of how this doesn't apply to "="
A = 12
13 = B # doesn't work!
# 6. Pick elements from vectors ###########################################
A <- sample(1:10, 12, replace = TRUE)
A[4] # Pick the fourth element in A
A[-4] # Pick all but the fourth
A[2:4] # Elements two to four
A[-(2:4)] # All elements except two to four
A[c(1, 5)] # Elements one and five
A[x == 10] # Elements which are equal to 10
A[A < 4] # All elements less than four
B = c(George = "apple", Maria = "orange", John = "cherry",Anthony = "apple")
B[2] # Call one element based on its index
B["Maria"] # Call one element based on the name
names(B)[B == "apple"] # Call names that selected apples
# 7. Programming ##############################################################
# For loop. Structure:
#for (variable in sequence){
# Do something
#}
for (i in 1:4){
j <- i + 10
print(j) }
# Functions. Structure:
#function_name <- function(var){
# Do something
# return(new_variable)
#}
square <- function(x){
squared <- x*x
return(squared)
}
square(9)
# 8. Math Functions ############################################################
A <- sample(1:10, 6, replace = TRUE)
A
log(A) # Natural log
exp(A) # Exponential
max(A) # Largest element
min(A) # Smallest element
round(exp(A), 2) # Round to n decimal places
signif(exp(A), 3) # Round to n significant figures
cor(A, rev(A)) # Correlation. Pearson is the default
sum(A) # Sum
mean(A) # Mean
median(A) # Median
quantile(A) # Percentage quantiles
rank(A) # Rank of elements
var(A) # The variance
sqrt(var(A)) # The sqaure root of the variance... (What's that?)
sd(A) # The standard deviation
# 9. Matrices ################################################################
M <- matrix(A, nrow = 3, ncol = 2)
A
M
class(M)
M[2, ] # Select a row
M[ , 1] # Select a column. Notice that it's displayed horizontally in the console
M[3, 2] # Select an element
t(M) # Transpose
M %*% t(M) # Matrix Multiplication
diag(3) # creates and identity matrix (aka I)
N <- solve(diag(3), M) # Find x in: I * x = M
N # this is the inverse matrix of M (aka M^-1)
diag(3) %*% N # identity matrix I times the inverse of M (aka M^-1) gives M back!
# 10. Data Frames ################################################################
A <- data.frame(x = 1:8, y = c("a","b","c","d","e","f", "g","h"))
View(A) # See full data frame
head(A) # See the first 6 rows
nrow(A) # number of rows (also works with matrices)
ncol(A) # number of columns (also works with matrices)
dim(A)# number of rows and columns (also works with matrices)
A$x # gets "x" subset
A$y # gets "y" subset
A[[2]] # gets "y" subset. Similar to typing A$y
cbind(A,A$y,A$x) # bind columns
rbind(A,A[3:4, ]) # bind rows
rbind(A,A[3:6, ],A[3:6, ]) # bind rows
# 11. Strings ################################################################
A <- c("a","b","c","d","e","f", "g","h")
B <- c("I","J","K","L","M","N", "O","P")
paste(A,B, sep = ' ') # Join multiple vectors together
paste(A, collapse = ' ') # Join elements of a vector together
toupper(A) # Convert to uppercase
tolower(B) # Convert to lowercase
nchar(A) # Number of characters in a string
A <- paste(A, collapse = ' ')# join elements in A
nchar(A) # when all the elements in A are previously joined
factor(A) # Turn a vector into a factor. Can set the levels of the factor and the order
A <- c(1:4)
cut(A, breaks = 4) # Turn a numeric vector into a factor but ‘cutting’ into sections
# 12. Clean up your workspace #################################################
# clear packages
detach("package:datasets", unload = TRUE) # For base. detaches the previously loaded packge "dplyr"
# Clear plots
dev.off() # But only if there IS a plot
# Clear console
cat("\014") # ctrl+L
rm(list = ls()) # removes variables from local environment (variables displayed on Top right quadrant under Environment)
# FIN! | /IntroR/R_1_Basics.R | no_license | Tafheemmalik/https-github.com-chsolis-Into-to-R | R | false | false | 5,914 | r | # File: R_1_Basics.R
# Course: Introduction to R
# Section: 1: The basics
# Author: Christopher Solis, uic.edu, @csolisoc
# Date: 2019-04-23
# Based on Mhairi McNeill Base R Cheat Sheet
# Some good introductory guides to R:
# https://rladiessydney.org/courses/ryouwithme/01-basicbasics-2/
# https://moderndive.netlify.app/1-getting-started.html
# 1. Getting Help ########################################################
?mean # Get help of a particular function
help.search("weighted mean") # Search the help files for a word or phrase
help(package = "dplyr") #Find help for a package.
# 2. Using Libraries #####################################################
install.packages("dplyr") # Download and install a package from CRAN
library(dplyr) # Load the package into the session, making all its functions available to use
dplyr::select # Use a particular function from a package.
# Try to find "dplyr" under Packages tab in the bottom-rith quadrant
# 3. Working Direactory #################################################
getwd() # Find the current working directory (where inputs are found and outputs are sent).
setwd("~/Desktop") #Change the current working directory.
# 4. Vectors ############################################################
2:6 # just an interger sequence
c(2,4,6) #Join elements into a vector
seq(2, 3, by=0.5)# A complex sequence
rep(1:2, times=3)# Repeat a vector
rep(1:2, each=3) # Repeat elements of a vector
A <- sample(1:10, 12, replace = TRUE)
A # shows content of vector a
print(A) # shows values of vector a
sort(A) # Return A sorted
table(A) # See counts of values (makes a frequency distribution list)
rev(A) # Return x reversed
unique(A) # See unique values
rm(A) # removes A
A
# 5. "<-" vs. "=" #####################################################
#<- is preferred and advised in R Coding style guides:
#https://google.github.io/styleguide/Rguide.xml#assignment
#http://adv-r.had.co.nz/Style.html
# Example of interchangeable orientations with arrows
A <- 12
13 -> B
A
B
A -> B
A <- B
A
B
# Example of how this doesn't apply to "="
A = 12
13 = B # doesn't work!
# 6. Pick elements from vectors ###########################################
A <- sample(1:10, 12, replace = TRUE)
A[4] # Pick the fourth element in A
A[-4] # Pick all but the fourth
A[2:4] # Elements two to four
A[-(2:4)] # All elements except two to four
A[c(1, 5)] # Elements one and five
A[x == 10] # Elements which are equal to 10
A[A < 4] # All elements less than four
B = c(George = "apple", Maria = "orange", John = "cherry",Anthony = "apple")
B[2] # Call one element based on its index
B["Maria"] # Call one element based on the name
names(B)[B == "apple"] # Call names that selected apples
# 7. Programming ##############################################################
# For loop. Structure:
#for (variable in sequence){
# Do something
#}
for (i in 1:4){
j <- i + 10
print(j) }
# Functions. Structure:
#function_name <- function(var){
# Do something
# return(new_variable)
#}
square <- function(x){
squared <- x*x
return(squared)
}
square(9)
# 8. Math Functions ############################################################
A <- sample(1:10, 6, replace = TRUE)
A
log(A) # Natural log
exp(A) # Exponential
max(A) # Largest element
min(A) # Smallest element
round(exp(A), 2) # Round to n decimal places
signif(exp(A), 3) # Round to n significant figures
cor(A, rev(A)) # Correlation. Pearson is the default
sum(A) # Sum
mean(A) # Mean
median(A) # Median
quantile(A) # Percentage quantiles
rank(A) # Rank of elements
var(A) # The variance
sqrt(var(A)) # The sqaure root of the variance... (What's that?)
sd(A) # The standard deviation
# 9. Matrices ################################################################
M <- matrix(A, nrow = 3, ncol = 2)
A
M
class(M)
M[2, ] # Select a row
M[ , 1] # Select a column. Notice that it's displayed horizontally in the console
M[3, 2] # Select an element
t(M) # Transpose
M %*% t(M) # Matrix Multiplication
diag(3) # creates and identity matrix (aka I)
N <- solve(diag(3), M) # Find x in: I * x = M
N # this is the inverse matrix of M (aka M^-1)
diag(3) %*% N # identity matrix I times the inverse of M (aka M^-1) gives M back!
# 10. Data Frames ################################################################
A <- data.frame(x = 1:8, y = c("a","b","c","d","e","f", "g","h"))
View(A) # See full data frame
head(A) # See the first 6 rows
nrow(A) # number of rows (also works with matrices)
ncol(A) # number of columns (also works with matrices)
dim(A)# number of rows and columns (also works with matrices)
A$x # gets "x" subset
A$y # gets "y" subset
A[[2]] # gets "y" subset. Similar to typing A$y
cbind(A,A$y,A$x) # bind columns
rbind(A,A[3:4, ]) # bind rows
rbind(A,A[3:6, ],A[3:6, ]) # bind rows
# 11. Strings ################################################################
A <- c("a","b","c","d","e","f", "g","h")
B <- c("I","J","K","L","M","N", "O","P")
paste(A,B, sep = ' ') # Join multiple vectors together
paste(A, collapse = ' ') # Join elements of a vector together
toupper(A) # Convert to uppercase
tolower(B) # Convert to lowercase
nchar(A) # Number of characters in a string
A <- paste(A, collapse = ' ')# join elements in A
nchar(A) # when all the elements in A are previously joined
factor(A) # Turn a vector into a factor. Can set the levels of the factor and the order
A <- c(1:4)
cut(A, breaks = 4) # Turn a numeric vector into a factor but ‘cutting’ into sections
# 12. Clean up your workspace #################################################
# clear packages
detach("package:datasets", unload = TRUE) # For base. detaches the previously loaded packge "dplyr"
# Clear plots
dev.off() # But only if there IS a plot
# Clear console
cat("\014") # ctrl+L
rm(list = ls()) # removes variables from local environment (variables displayed on Top right quadrant under Environment)
# FIN! |
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{prop_domain}
\alias{prop_domain}
\title{Property domain.}
\usage{
prop_domain(x, data)
}
\arguments{
\item{x}{property to dispatch on}
\item{data}{name of data set}
}
\description{
Property domain.
}
| /man/prop_domain.Rd | no_license | jjallaire/ggvis | R | false | false | 261 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{prop_domain}
\alias{prop_domain}
\title{Property domain.}
\usage{
prop_domain(x, data)
}
\arguments{
\item{x}{property to dispatch on}
\item{data}{name of data set}
}
\description{
Property domain.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TwoPart_MultiMS.R
\name{convert_log2}
\alias{convert_log2}
\title{Convert values in a matrix to log2 transfored values}
\usage{
convert_log2(mm, use_cols)
}
\arguments{
\item{mm}{a dataframe of raw intensities in format:
(# peptides)x(# samples+possibly peptide & protein information (metadata))}
\item{use_cols}{vector of column indecies that make up the intensities
usually in sequential order but do not have to be
user is responsible for making sure that specified columns are
indeed numeric and correspond to intensities for each sample}
}
\value{
matrix of log2 transforemd intensities where 0's were
replaced with NA's prior
to transformation
}
\description{
convert_log2 replaces 0's with NA's than does a log2 transformation
Replacing 0's with NA's is the correct approach to Proteomics data analysis
as 0's are not values that should be left in the data where no
observation was made, see citation below.
Karpievitch et al. 2009 "Normalization of peak intensities in
bottom-up MS-based proteomics using singular value decomposition".
PMID: 19602524
Karpievitch et al. 2009 "A statistical framework for protein
quantitation in bottom-up MS-based proteomics". PMID: 19535538
}
\examples{
data(mm_peptides)
head(mm_peptides)
intsCols = 8:13
metaCols = 1:7
m_logInts = make_intencities(mm_peptides, intsCols)
m_prot.info = make_meta(mm_peptides, metaCols)
m_logInts = convert_log2(m_logInts) # 0's replaced with NAs and
# log2 transnform applied
}
| /man/convert_log2.Rd | permissive | YuliyaLab/ProteoMM | R | false | true | 1,593 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TwoPart_MultiMS.R
\name{convert_log2}
\alias{convert_log2}
\title{Convert values in a matrix to log2 transfored values}
\usage{
convert_log2(mm, use_cols)
}
\arguments{
\item{mm}{a dataframe of raw intensities in format:
(# peptides)x(# samples+possibly peptide & protein information (metadata))}
\item{use_cols}{vector of column indecies that make up the intensities
usually in sequential order but do not have to be
user is responsible for making sure that specified columns are
indeed numeric and correspond to intensities for each sample}
}
\value{
matrix of log2 transforemd intensities where 0's were
replaced with NA's prior
to transformation
}
\description{
convert_log2 replaces 0's with NA's than does a log2 transformation
Replacing 0's with NA's is the correct approach to Proteomics data analysis
as 0's are not values that should be left in the data where no
observation was made, see citation below.
Karpievitch et al. 2009 "Normalization of peak intensities in
bottom-up MS-based proteomics using singular value decomposition".
PMID: 19602524
Karpievitch et al. 2009 "A statistical framework for protein
quantitation in bottom-up MS-based proteomics". PMID: 19535538
}
\examples{
data(mm_peptides)
head(mm_peptides)
intsCols = 8:13
metaCols = 1:7
m_logInts = make_intencities(mm_peptides, intsCols)
m_prot.info = make_meta(mm_peptides, metaCols)
m_logInts = convert_log2(m_logInts) # 0's replaced with NAs and
# log2 transnform applied
}
|
#Prepare a classification model using SVM for salary data
#Data Description:
# age -- age of a person
#workclass -- A work class is a grouping of work
#education -- Education of an individuals
#maritalstatus -- Marital status of an individulas
#occupation -- occupation of an individuals
#relationship --
# race -- Race of an Individual
#sex -- Gender of an Individual
#capitalgain -- profit received from the sale of an investment
#capitalloss -- A decrease in the value of a capital asset
#hoursperweek -- number of hours work per week
#native -- Native of an individual
#Salary -- salary of an individual
# Loading the required packages
install.packages("kernlab")
install.packages("ggplot2")
install.packages("caret")
install.packages("psych")
library(kernlab)
library(ggplot2)
library(caret)
library(psych)
# Loading the Salary Dataset
SD_Train <-read.csv("C:/datasciences/asssignments/Support Vector Machine/SalaryData_Train.csv")
SD_Test <-read.csv("C:/datasciences/asssignments/Support Vector Machine/SalaryData_Test.csv")
View(SD_Train)
str(SD_Train)
summary(SD_Train)
# plotting the salary against all fields
plot(factor(SD_Train$Salary),factor(SD_Train$age))
plot(factor(SD_Train$Salary),factor(SD_Train$workclass))
plot(factor(SD_Train$Salary),factor(SD_Train$education))
plot(factor(SD_Train$Salary),factor(SD_Train$educationno))
plot(factor(SD_Train$Salary),factor(SD_Train$maritalstatus))
plot(factor(SD_Train$Salary),factor(SD_Train$occupation))
plot(factor(SD_Train$Salary),factor(SD_Train$relationship))
plot(factor(SD_Train$Salary),factor(SD_Train$race))
plot(factor(SD_Train$Salary),factor(SD_Train$sex))
plot(factor(SD_Train$Salary),factor(SD_Train$capitalgain))
plot(factor(SD_Train$Salary),factor(SD_Train$capitalloss))
plot(factor(SD_Train$Salary),factor(SD_Train$hoursperweek))
plot(factor(SD_Train$Salary),factor(SD_Train$native))
# converting the size category to factor
SD_Train$Salary <- factor(SD_Train$Salary)
table(SD_Train$Salary) # Majority class variable value <=50K
#Training a model on the data ----
# a simple linear SVM
colnames(SD_Train)
model1<- ksvm(Salary ~ .,
data= SD_Train, kernel = "vanilladot")
model1 # Training error - 0.15, Support Vectors - 10593
## Evaluating model performance ----
# predictions on testing dataset
Area_pred <- predict(model1, SD_Test)
confusionMatrix(table(Area_pred,SD_Test$Salary))
# Accuracy is high - 0.84 % however,
# No Information Rate : 0.75 i.e 75% chance of sal <=50K without applying any model
# Sensitivity : 0.93 which means it is giving 93% correct prediction for salary >50K
# Specificity : 0.58 which means it is giving 58% correct prediction for Salary <=50K
#Improving model performance ----
model_rfdot <- ksvm(Salary ~ .,
data= SD_Train,kernel = "rbfdot")
model_rfdot # Training error - 0.13 , Support Vectors - 199
# Model performance evaluation , prediction on test data
pred_rfdot<-predict(model_rfdot,SD_Test)
confusionMatrix(table(pred_rfdot, SD_Test$Salary))
# Accuracy is 0.85
# No Information Rate : 0.75 remains same as above
# Sensitivity : 0.93
# Specificity : 0.59
# Improving model performance ----
# By using the non-linear model "besseldot"
model_besseldot <- ksvm(Salary ~ .,
data= SD_Train,kernel = "besseldot")
model_besseldot # Training error - 0.22 , Number of Support Vectors - 8255
pred_bessel <- predict(model_besseldot,SD_Test)
confusionMatrix(table(pred_bessel,SD_Test$Salary))
# Accuracy is 0.77
# No Information Rate : 0.75 remains same as above
# Sensitivity : 0.85
# Specificity : 0.52
## Further improving model performance by using plydot:
model_poly <- ksvm(Salary ~ .,
data= SD_Train,kernel = "polydot")
model_poly # Training error - 0.15 Number of support vectors 10591
pred_poly<-predict(model_poly,SD_Test)
confusionMatrix(table(pred_poly,SD_Test$Salary))
# Accuracy is 0.84
# No Information Rate : 0.75 remains same as above
# Sensitivity : 0.93
# Specificity : 0.58
# Conclusion:
# Besseldot model is good as accuracy is high 0.85
| /SVM/salary_svm_solution.R | no_license | ksanu24/Data-science-assignment | R | false | false | 4,237 | r | #Prepare a classification model using SVM for salary data
#Data Description:
# age -- age of a person
#workclass -- A work class is a grouping of work
#education -- Education of an individuals
#maritalstatus -- Marital status of an individulas
#occupation -- occupation of an individuals
#relationship --
# race -- Race of an Individual
#sex -- Gender of an Individual
#capitalgain -- profit received from the sale of an investment
#capitalloss -- A decrease in the value of a capital asset
#hoursperweek -- number of hours work per week
#native -- Native of an individual
#Salary -- salary of an individual
# Loading the required packages
install.packages("kernlab")
install.packages("ggplot2")
install.packages("caret")
install.packages("psych")
library(kernlab)
library(ggplot2)
library(caret)
library(psych)
# Loading the Salary Dataset
SD_Train <-read.csv("C:/datasciences/asssignments/Support Vector Machine/SalaryData_Train.csv")
SD_Test <-read.csv("C:/datasciences/asssignments/Support Vector Machine/SalaryData_Test.csv")
View(SD_Train)
str(SD_Train)
summary(SD_Train)
# plotting the salary against all fields
plot(factor(SD_Train$Salary),factor(SD_Train$age))
plot(factor(SD_Train$Salary),factor(SD_Train$workclass))
plot(factor(SD_Train$Salary),factor(SD_Train$education))
plot(factor(SD_Train$Salary),factor(SD_Train$educationno))
plot(factor(SD_Train$Salary),factor(SD_Train$maritalstatus))
plot(factor(SD_Train$Salary),factor(SD_Train$occupation))
plot(factor(SD_Train$Salary),factor(SD_Train$relationship))
plot(factor(SD_Train$Salary),factor(SD_Train$race))
plot(factor(SD_Train$Salary),factor(SD_Train$sex))
plot(factor(SD_Train$Salary),factor(SD_Train$capitalgain))
plot(factor(SD_Train$Salary),factor(SD_Train$capitalloss))
plot(factor(SD_Train$Salary),factor(SD_Train$hoursperweek))
plot(factor(SD_Train$Salary),factor(SD_Train$native))
# converting the size category to factor
SD_Train$Salary <- factor(SD_Train$Salary)
table(SD_Train$Salary) # Majority class variable value <=50K
#Training a model on the data ----
# a simple linear SVM
colnames(SD_Train)
model1<- ksvm(Salary ~ .,
data= SD_Train, kernel = "vanilladot")
model1 # Training error - 0.15, Support Vectors - 10593
## Evaluating model performance ----
# predictions on testing dataset
Area_pred <- predict(model1, SD_Test)
confusionMatrix(table(Area_pred,SD_Test$Salary))
# Accuracy is high - 0.84 % however,
# No Information Rate : 0.75 i.e 75% chance of sal <=50K without applying any model
# Sensitivity : 0.93 which means it is giving 93% correct prediction for salary >50K
# Specificity : 0.58 which means it is giving 58% correct prediction for Salary <=50K
#Improving model performance ----
model_rfdot <- ksvm(Salary ~ .,
data= SD_Train,kernel = "rbfdot")
model_rfdot # Training error - 0.13 , Support Vectors - 199
# Model performance evaluation , prediction on test data
pred_rfdot<-predict(model_rfdot,SD_Test)
confusionMatrix(table(pred_rfdot, SD_Test$Salary))
# Accuracy is 0.85
# No Information Rate : 0.75 remains same as above
# Sensitivity : 0.93
# Specificity : 0.59
# Improving model performance ----
# By using the non-linear model "besseldot"
model_besseldot <- ksvm(Salary ~ .,
data= SD_Train,kernel = "besseldot")
model_besseldot # Training error - 0.22 , Number of Support Vectors - 8255
pred_bessel <- predict(model_besseldot,SD_Test)
confusionMatrix(table(pred_bessel,SD_Test$Salary))
# Accuracy is 0.77
# No Information Rate : 0.75 remains same as above
# Sensitivity : 0.85
# Specificity : 0.52
## Further improving model performance by using plydot:
model_poly <- ksvm(Salary ~ .,
data= SD_Train,kernel = "polydot")
model_poly # Training error - 0.15 Number of support vectors 10591
pred_poly<-predict(model_poly,SD_Test)
confusionMatrix(table(pred_poly,SD_Test$Salary))
# Accuracy is 0.84
# No Information Rate : 0.75 remains same as above
# Sensitivity : 0.93
# Specificity : 0.58
# Conclusion:
# Besseldot model is good as accuracy is high 0.85
|
prelim.wt.cat.lines <- function(agg.data, supplier.name){
cat.agg <- aggregate(Total.Sales ~ Categ.By.Year + Category + Year, agg.data, sum)
cats.line <- ggplot(cat.agg, aes(x = Year, color = Category)) +
geom_line(aes(y = Total.Sales), size = 3) +
theme_dark() + labs(y = "Sales", title = paste("Sales by Category for", supplier.name)) +
scale_y_continuous(labels = scales::dollar)
cats.line
} | /supplier.dash.scripts.jan18/prelim.wt.cat.lines.R | no_license | hamnsannah/supp_dash_fix | R | false | false | 413 | r | prelim.wt.cat.lines <- function(agg.data, supplier.name){
cat.agg <- aggregate(Total.Sales ~ Categ.By.Year + Category + Year, agg.data, sum)
cats.line <- ggplot(cat.agg, aes(x = Year, color = Category)) +
geom_line(aes(y = Total.Sales), size = 3) +
theme_dark() + labs(y = "Sales", title = paste("Sales by Category for", supplier.name)) +
scale_y_continuous(labels = scales::dollar)
cats.line
} |
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(3.97314911878724e-307, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(1.46950620900256e+302, 4.11932782999429e-175, -3.85515401974544e+79, -3.02137085628715e+143, -1.07335709985308e+237, 1.20695523931594e-309, 3.32562378928677e+80, -1.04944149130577e-291, -6.66433666280476e+260, -1.24299680236504e+248, 9.70815500676051e+204, 4.46572320545082e-23, -1.13853964838196e+217, 95.7774360421032, 2.0018737059126e-28, -4636800105173434, 1.65447250389292e-256, -2.30374790479512e+88, 9.31444420548792e+294, 1.87140051912765e+293, 7.81174850164908e+153, -1.81388628605987e-210, 2.97417034753781e-112, 3.07889205700993e+72, -5.68358142431207e+115, -1.49905137588813e-296, -4.83607699504741e+296, -4.39048939437592e-283, 6.14411608709023e-73, -7.9700945594356e-175, -7.74871223767381e-132, 4.16882816770762e+216, 1.77638799941844e-103, 3.10673888773823e+67, 7.78963466942964e+235, -3.58131929196381e+99, -0.000144958566634, -1.97272183211855e+299, -4.80684530567003e-211, 1.27171785317634e+32, 7.27866839395753e-304, -4.03745792148629e+247, 6.98516021012687e+303, -1.47416531241142e-29, -9.26916759452804e-30, 2.80442413482245e+93, -3.49120966287497e+274, -1.64918989358022e+230, -6.65976989513026e-283, 4.42844269247337e-45, 1.98141864604823e-95, -2.80316332377215e+114, 3.39496965625457e+134, -1.15574798364676e+282, -4.86507829573234e+261, -1.12181685914956e-204, 4.83444858402713e-21, 4.44411230227823e-288, 1.74273204902173e-84, 3.6354008294539e-305), temp = c(1.4174931883648e-311, -9.27191279380401e-227, -3.30454338512553e-220, 0.00326457501838524, -4.11828281046168e-243, -1.95893925610339e-77, -7.57690586869615e+160, 1.77288451463919e+81, 7.30351788343351e+245, 1.14935825540514e+262, 9.09252021533702e-172, 1.65646662424464e-91, 2.77067322468006e+114, 6.44719590123194e+27, -1.82639555575468e-07, -4.2372858822964e-119, -1.19043356885614e+85, 3.31651557487312e-262, 1.82363221083299e-238, 4.35812421290471e+289, 1.11765367033464e-296))
result <- do.call(meteor:::ET0_PriestleyTaylor,testlist)
str(result) | /meteor/inst/testfiles/ET0_PriestleyTaylor/AFL_ET0_PriestleyTaylor/ET0_PriestleyTaylor_valgrind_files/1615844203-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 2,232 | r | testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(3.97314911878724e-307, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(1.46950620900256e+302, 4.11932782999429e-175, -3.85515401974544e+79, -3.02137085628715e+143, -1.07335709985308e+237, 1.20695523931594e-309, 3.32562378928677e+80, -1.04944149130577e-291, -6.66433666280476e+260, -1.24299680236504e+248, 9.70815500676051e+204, 4.46572320545082e-23, -1.13853964838196e+217, 95.7774360421032, 2.0018737059126e-28, -4636800105173434, 1.65447250389292e-256, -2.30374790479512e+88, 9.31444420548792e+294, 1.87140051912765e+293, 7.81174850164908e+153, -1.81388628605987e-210, 2.97417034753781e-112, 3.07889205700993e+72, -5.68358142431207e+115, -1.49905137588813e-296, -4.83607699504741e+296, -4.39048939437592e-283, 6.14411608709023e-73, -7.9700945594356e-175, -7.74871223767381e-132, 4.16882816770762e+216, 1.77638799941844e-103, 3.10673888773823e+67, 7.78963466942964e+235, -3.58131929196381e+99, -0.000144958566634, -1.97272183211855e+299, -4.80684530567003e-211, 1.27171785317634e+32, 7.27866839395753e-304, -4.03745792148629e+247, 6.98516021012687e+303, -1.47416531241142e-29, -9.26916759452804e-30, 2.80442413482245e+93, -3.49120966287497e+274, -1.64918989358022e+230, -6.65976989513026e-283, 4.42844269247337e-45, 1.98141864604823e-95, -2.80316332377215e+114, 3.39496965625457e+134, -1.15574798364676e+282, -4.86507829573234e+261, -1.12181685914956e-204, 4.83444858402713e-21, 4.44411230227823e-288, 1.74273204902173e-84, 3.6354008294539e-305), temp = c(1.4174931883648e-311, -9.27191279380401e-227, -3.30454338512553e-220, 0.00326457501838524, -4.11828281046168e-243, -1.95893925610339e-77, -7.57690586869615e+160, 1.77288451463919e+81, 7.30351788343351e+245, 1.14935825540514e+262, 9.09252021533702e-172, 1.65646662424464e-91, 2.77067322468006e+114, 6.44719590123194e+27, -1.82639555575468e-07, -4.2372858822964e-119, -1.19043356885614e+85, 3.31651557487312e-262, 1.82363221083299e-238, 4.35812421290471e+289, 1.11765367033464e-296))
result <- do.call(meteor:::ET0_PriestleyTaylor,testlist)
str(result) |
testlist <- list(A = structure(c(2.17107980817984e+205, 3.13388079053782e+296 ), .Dim = 1:2), B = structure(c(2.19477802979261e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613122359-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 323 | r | testlist <- list(A = structure(c(2.17107980817984e+205, 3.13388079053782e+296 ), .Dim = 1:2), B = structure(c(2.19477802979261e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
#' @title ATAC-seq Quality Control
#'
#' @description ATAC-seq, an assay for Transposase-Accessible Chromatin using
#' sequencing, is a rapid and sensitive method for chromatin accessibility
#' analysis. It was developed as an alternative method to MNase-seq, FAIRE-seq
#' and DNAse-seq. Comparing to the other methods, ATAC-seq requires less amount
#' of the biological samples and time to process. In the process of analyzing
#' several ATAC-seq dataset produced in our labs, we learned some of the unique
#' aspects of the quality assessment for ATAC-seq data.To help users to quickly
#' assess whether their ATAC-seq experiment is successful, we developed
#' ATACseqQC package partially following the guideline published in Nature
#' Method 2013 (Greenleaf et al.), including diagnostic plot of fragment size
#' distribution, proportion of mitochondria reads, nucleosome positioning
#' pattern, and CTCF or other Transcript Factor footprints.
"_PACKAGE"
| /R/ATACseqQC.R | no_license | jaime11/Transcription-Factor-Footprinting | R | false | false | 969 | r | #' @title ATAC-seq Quality Control
#'
#' @description ATAC-seq, an assay for Transposase-Accessible Chromatin using
#' sequencing, is a rapid and sensitive method for chromatin accessibility
#' analysis. It was developed as an alternative method to MNase-seq, FAIRE-seq
#' and DNAse-seq. Comparing to the other methods, ATAC-seq requires less amount
#' of the biological samples and time to process. In the process of analyzing
#' several ATAC-seq dataset produced in our labs, we learned some of the unique
#' aspects of the quality assessment for ATAC-seq data.To help users to quickly
#' assess whether their ATAC-seq experiment is successful, we developed
#' ATACseqQC package partially following the guideline published in Nature
#' Method 2013 (Greenleaf et al.), including diagnostic plot of fragment size
#' distribution, proportion of mitochondria reads, nucleosome positioning
#' pattern, and CTCF or other Transcript Factor footprints.
"_PACKAGE"
|
#this script is appended to readcounts2.R, and plots the forward and reverse strand reads for each category of RNA
#ddd
fwdrev = readcounts$Vaaa+readcounts$Vbbb #total number of reads
rect(base,c(1:nsamples)-0.8,base+fwdrev/readcounts$V2,c(1:nsamples)-0.2,col=colors()[ccc])
revfrac = readcounts$Vbbb/fwdrev
rect(base,c(1:nsamples)-0.8,base+fwdrev/readcounts$V2,c(1:nsamples)-0.8+0.6*revfrac,col=colors()[315])
base = base + fwdrev/readcounts$V2
rect(0.0+x1,nsamples+y0-xxx*yd+y1,0.1+x1,nsamples+y0-xxx*yd-y1,col=colors()[ccc])
text(0.12+x1,nsamples+y0-xxx*yd,adj=0.0,labels="ddd",cex=cc1)
| /Plots/readcounts_2.R | no_license | a-jartseva/RiboSeq-Analysis | R | false | false | 591 | r | #this script is appended to readcounts2.R, and plots the forward and reverse strand reads for each category of RNA
#ddd
fwdrev = readcounts$Vaaa+readcounts$Vbbb #total number of reads
rect(base,c(1:nsamples)-0.8,base+fwdrev/readcounts$V2,c(1:nsamples)-0.2,col=colors()[ccc])
revfrac = readcounts$Vbbb/fwdrev
rect(base,c(1:nsamples)-0.8,base+fwdrev/readcounts$V2,c(1:nsamples)-0.8+0.6*revfrac,col=colors()[315])
base = base + fwdrev/readcounts$V2
rect(0.0+x1,nsamples+y0-xxx*yd+y1,0.1+x1,nsamples+y0-xxx*yd-y1,col=colors()[ccc])
text(0.12+x1,nsamples+y0-xxx*yd,adj=0.0,labels="ddd",cex=cc1)
|
library(ape)
testtree <- read.tree("10571_2.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10571_2_unrooted.txt") | /codeml_files/newick_trees_processed_and_cleaned/10571_2/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 137 | r | library(ape)
testtree <- read.tree("10571_2.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10571_2_unrooted.txt") |
library(shiny)
fluidPage(
titlePanel("Image output"),
fluidRow(
radioButtons("type","Image type:",c("wordcloud" = "cloud","mapping"="map"))
),
imageOutput("image2")
)
| /ui.R | no_license | linlu66/finalproject06 | R | false | false | 212 | r | library(shiny)
fluidPage(
titlePanel("Image output"),
fluidRow(
radioButtons("type","Image type:",c("wordcloud" = "cloud","mapping"="map"))
),
imageOutput("image2")
)
|
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
if(!require(e1071)) install.packages("e1071", repos = "http://cran.us.r-project.org")
library(e1071)
library(dplyr)
library(tidyverse)
library(data.table)
library(caret)
# Data input
setwd("~/R") # PLEASE SET THE DIRECTORY ACCORDINGLY
download.file(url="https://github.com/rahmed2/creditcardData/archive/master.zip",
destfile= "creditcardfraud.zip")
unzip(zipfile = "creditcardfraud.zip")
setwd("~/R/creditcardData-master") # PLEASE SET THE DIRECTORY ACCORDINGLY
unzip(zipfile = "creditcard.csv.zip")
my_data <- read.csv('creditcard.csv')
str(my_data)
setwd("..")
# Report Data
data_report<- gather(my_data, factor_key=TRUE) %>% group_by(key)%>%
summarize(mean= mean(value), sd= sd(value), max = max(value),min = min(value))
my_data$Class <- ifelse(my_data$Class == 1, "1", "0") %>% factor(levels = c("1","0"))
# histogram of non masked data
amount_hist<-my_data %>%
ggplot(aes(Amount)) +
geom_histogram(bins = 10, color = "black")
amount_time<-my_data %>%
ggplot(aes(Time)) +
geom_histogram(bins = 10, color = "black")
bar_prev<-my_data %>%
ggplot(aes(Class)) + geom_bar()
# Prevalence
K<-my_data %>% group_by(Class) %>% count()
pr<-K$n[K$Class==1]/sum(K$n)
# Scaling non scaled data
my_data$Amount <- scale(my_data$Amount)
my_data$Time <- scale(my_data$Time)
# Creating train and test set
set.seed(1)
# if using R 3.5 or earlier, use `set.seed(1)` instead
test_index <- createDataPartition(my_data$Class, times = 1, p = 0.3, list = FALSE)
train <- my_data[-test_index,]
test <- my_data[test_index,]
## ALGORITHMS
# Logistic Regression
glm_fit<- train %>% glm(Class~.,data=.,family = "binomial")
p_hat_logit <- predict(glm_fit, newdata = test, type = "response")
y_hat_logit <- ifelse(p_hat_logit > 0.5, "0", "1") %>% factor(levels = levels(my_data$Class))
acc1<- confusionMatrix(y_hat_logit, test$Class, positive = "1")$overall[["Accuracy"]]
F1<-F_meas(data=y_hat_logit,reference = test$Class)
x<- train[,!(colnames(my_data)=="Class")]
y<- train$Class
# LDA
fit_lda <- train(x, y, method = "lda")
fit_lda$results["Accuracy"]
y_hat_lda<-predict(fit_lda, newdata = test) %>%
factor(levels = levels(my_data$Class))
acc2<-confusionMatrix(y_hat_lda,test$Class)$overall[["Accuracy"]]
F2<-F_meas(data=y_hat_lda,reference = test$Class)
# QDA
fit_qda <- train(x, y, method = "qda")
fit_qda$results["Accuracy"]
y_hat_qda<-predict(fit_qda, newdata = test) %>%
factor(levels = levels(my_data$Class))
acc3<-confusionMatrix(y_hat_qda,test$Class)$overall[["Accuracy"]]
F3<-F_meas(data=y_hat_qda,reference = test$Class)
# Naive Bayes
fit_nb <- train(x, y, method = "naive_bayes")
fit_nb$results["Accuracy"]
y_hat_nb<-predict(fit_nb, newdata = test) %>%
factor(levels = levels(my_data$Class))
acc4<-confusionMatrix(y_hat_nb,test$Class)$overall[["Accuracy"]]
F4<-F_meas(data=y_hat_nb,reference = test$Class)
# PCA to try to reduce dimensions
pca<- prcomp(x)
var_explained <- cumsum(pca$sdev^2 / sum(pca$sdev^2))
var_explained
# ANOMALY DETECTION
# Creating train, validation and test set
a<- my_data[which(my_data$Class==0),]
b<- my_data[which(my_data$Class==1),]
set.seed(1) # if using R 3.5 or earlier, use `set.seed(1)` instead
# Train set
valid_index_ad <- createDataPartition(a$V1, times = 1, p = 0.4, list = FALSE)
train_ad <- a[-valid_index_ad,]
valid_ad <- a[valid_index_ad,]
# Valid and test set
set.seed(1) # if using R 3.5 or earlier, use `set.seed(1)` instead
test_index_ad <- createDataPartition(b$Amount, times = 1, p = 0.5, list = FALSE)
valid_ad_1 <- b[-test_index_ad,]
test_ad_1 <- b[test_index_ad,]
set.seed(1) # if using R 3.5 or earlier, use `set.seed(1)` instead
test_ad_ad <- createDataPartition(valid_ad$Amount, times = 1, p = 0.5, list = FALSE)
valid_ad_2 <- valid_ad[-test_ad_ad,]
test_ad_2 <- valid_ad[test_ad_ad,]
valid_AD<- rbind(valid_ad_2,valid_ad_1)
test_AD<- rbind(test_ad_2,test_ad_1)
# Finding mu and sigma
train_ad$Class<- NULL
report<- gather(train_ad, factor_key=TRUE) %>% group_by(key)%>%
summarize(mean= mean(value), sd= sd(value))
dim(report)
Prob<-NULL
for (i in 1:length(train_ad)){
d<-dnorm(valid_AD[,i],mean = report$mean[i],sd=report$sd[i])
Prob<- cbind(Prob,d)
}
jj<-as.vector(apply(Prob, 1, prod))
dat<-data.frame(Prob=jj,data=valid_AD$Class)
p<-dat[dat$data==1,]
acc<-0
F5<-0
for (i in 1:length(p[,1])){
epsilon<-p$Prob[i]
y_hat_ad<- ifelse(jj<=epsilon,"1","0") %>% factor(levels = levels(valid_AD$Class))
acc[i]<-confusionMatrix(y_hat_ad,valid_AD$Class)$overall[["Accuracy"]]
F5[i]<-F_meas(data=y_hat_ad,reference = valid_AD$Class)
}
epsilon<-p$Prob[which.max(F5)]
# on test set
Prob_new<-NULL
for (i in 1:length(train_ad)){
d<-dnorm(test_AD[,i],mean = report$mean[i],sd=report$sd[i])
Prob_new<- cbind(Prob_new,d)
}
j<-as.vector(apply(Prob_new, 1, prod))
y_hat<- ifelse(j<=epsilon,"1","0") %>% factor(levels = levels(test_AD$Class))
acc5<-confusionMatrix(y_hat,test_AD$Class)$overall[["Accuracy"]]
F5<-F_meas(data=y_hat,reference = test_AD$Class)
#Summary
F1_results <- data.frame(method = c("Logistic Regression","LDA","QDA","Naive Bayes", "Anomaly Detection"),
F1_score = c(F1,F2,F3,F4,F5))
| /project_fraud_detection.R | no_license | rahmed2/Credit-Card-Fraud-Detection | R | false | false | 5,581 | r | if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
if(!require(e1071)) install.packages("e1071", repos = "http://cran.us.r-project.org")
library(e1071)
library(dplyr)
library(tidyverse)
library(data.table)
library(caret)
# Data input
setwd("~/R") # PLEASE SET THE DIRECTORY ACCORDINGLY
download.file(url="https://github.com/rahmed2/creditcardData/archive/master.zip",
destfile= "creditcardfraud.zip")
unzip(zipfile = "creditcardfraud.zip")
setwd("~/R/creditcardData-master") # PLEASE SET THE DIRECTORY ACCORDINGLY
unzip(zipfile = "creditcard.csv.zip")
my_data <- read.csv('creditcard.csv')
str(my_data)
setwd("..")
# Report Data
data_report<- gather(my_data, factor_key=TRUE) %>% group_by(key)%>%
summarize(mean= mean(value), sd= sd(value), max = max(value),min = min(value))
my_data$Class <- ifelse(my_data$Class == 1, "1", "0") %>% factor(levels = c("1","0"))
# histogram of non masked data
amount_hist<-my_data %>%
ggplot(aes(Amount)) +
geom_histogram(bins = 10, color = "black")
amount_time<-my_data %>%
ggplot(aes(Time)) +
geom_histogram(bins = 10, color = "black")
bar_prev<-my_data %>%
ggplot(aes(Class)) + geom_bar()
# Prevalence
K<-my_data %>% group_by(Class) %>% count()
pr<-K$n[K$Class==1]/sum(K$n)
# Scaling non scaled data
my_data$Amount <- scale(my_data$Amount)
my_data$Time <- scale(my_data$Time)
# Creating train and test set
set.seed(1)
# if using R 3.5 or earlier, use `set.seed(1)` instead
test_index <- createDataPartition(my_data$Class, times = 1, p = 0.3, list = FALSE)
train <- my_data[-test_index,]
test <- my_data[test_index,]
## ALGORITHMS
# Logistic Regression
glm_fit<- train %>% glm(Class~.,data=.,family = "binomial")
p_hat_logit <- predict(glm_fit, newdata = test, type = "response")
y_hat_logit <- ifelse(p_hat_logit > 0.5, "0", "1") %>% factor(levels = levels(my_data$Class))
acc1<- confusionMatrix(y_hat_logit, test$Class, positive = "1")$overall[["Accuracy"]]
F1<-F_meas(data=y_hat_logit,reference = test$Class)
x<- train[,!(colnames(my_data)=="Class")]
y<- train$Class
# LDA
fit_lda <- train(x, y, method = "lda")
fit_lda$results["Accuracy"]
y_hat_lda<-predict(fit_lda, newdata = test) %>%
factor(levels = levels(my_data$Class))
acc2<-confusionMatrix(y_hat_lda,test$Class)$overall[["Accuracy"]]
F2<-F_meas(data=y_hat_lda,reference = test$Class)
# QDA
fit_qda <- train(x, y, method = "qda")
fit_qda$results["Accuracy"]
y_hat_qda<-predict(fit_qda, newdata = test) %>%
factor(levels = levels(my_data$Class))
acc3<-confusionMatrix(y_hat_qda,test$Class)$overall[["Accuracy"]]
F3<-F_meas(data=y_hat_qda,reference = test$Class)
# Naive Bayes
fit_nb <- train(x, y, method = "naive_bayes")
fit_nb$results["Accuracy"]
y_hat_nb<-predict(fit_nb, newdata = test) %>%
factor(levels = levels(my_data$Class))
acc4<-confusionMatrix(y_hat_nb,test$Class)$overall[["Accuracy"]]
F4<-F_meas(data=y_hat_nb,reference = test$Class)
# PCA to try to reduce dimensions
pca<- prcomp(x)
var_explained <- cumsum(pca$sdev^2 / sum(pca$sdev^2))
var_explained
# ANOMALY DETECTION
# Creating train, validation and test set
a<- my_data[which(my_data$Class==0),]
b<- my_data[which(my_data$Class==1),]
set.seed(1) # if using R 3.5 or earlier, use `set.seed(1)` instead
# Train set
valid_index_ad <- createDataPartition(a$V1, times = 1, p = 0.4, list = FALSE)
train_ad <- a[-valid_index_ad,]
valid_ad <- a[valid_index_ad,]
# Valid and test set
set.seed(1) # if using R 3.5 or earlier, use `set.seed(1)` instead
test_index_ad <- createDataPartition(b$Amount, times = 1, p = 0.5, list = FALSE)
valid_ad_1 <- b[-test_index_ad,]
test_ad_1 <- b[test_index_ad,]
set.seed(1) # if using R 3.5 or earlier, use `set.seed(1)` instead
test_ad_ad <- createDataPartition(valid_ad$Amount, times = 1, p = 0.5, list = FALSE)
valid_ad_2 <- valid_ad[-test_ad_ad,]
test_ad_2 <- valid_ad[test_ad_ad,]
valid_AD<- rbind(valid_ad_2,valid_ad_1)
test_AD<- rbind(test_ad_2,test_ad_1)
# Finding mu and sigma
train_ad$Class<- NULL
report<- gather(train_ad, factor_key=TRUE) %>% group_by(key)%>%
summarize(mean= mean(value), sd= sd(value))
dim(report)
Prob<-NULL
for (i in 1:length(train_ad)){
d<-dnorm(valid_AD[,i],mean = report$mean[i],sd=report$sd[i])
Prob<- cbind(Prob,d)
}
jj<-as.vector(apply(Prob, 1, prod))
dat<-data.frame(Prob=jj,data=valid_AD$Class)
p<-dat[dat$data==1,]
acc<-0
F5<-0
for (i in 1:length(p[,1])){
epsilon<-p$Prob[i]
y_hat_ad<- ifelse(jj<=epsilon,"1","0") %>% factor(levels = levels(valid_AD$Class))
acc[i]<-confusionMatrix(y_hat_ad,valid_AD$Class)$overall[["Accuracy"]]
F5[i]<-F_meas(data=y_hat_ad,reference = valid_AD$Class)
}
epsilon<-p$Prob[which.max(F5)]
# on test set
Prob_new<-NULL
for (i in 1:length(train_ad)){
d<-dnorm(test_AD[,i],mean = report$mean[i],sd=report$sd[i])
Prob_new<- cbind(Prob_new,d)
}
j<-as.vector(apply(Prob_new, 1, prod))
y_hat<- ifelse(j<=epsilon,"1","0") %>% factor(levels = levels(test_AD$Class))
acc5<-confusionMatrix(y_hat,test_AD$Class)$overall[["Accuracy"]]
F5<-F_meas(data=y_hat,reference = test_AD$Class)
#Summary
F1_results <- data.frame(method = c("Logistic Regression","LDA","QDA","Naive Bayes", "Anomaly Detection"),
F1_score = c(F1,F2,F3,F4,F5))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.r
\name{lsos}
\alias{lsos}
\title{show largest objects}
\usage{
lsos(..., n = 10)
}
\arguments{
\item{...}{arguments passed on to \code{.ls.objects}}
\item{n}{scalar integer, number of objects to show}
}
\description{
https://gist.github.com/1187166.git Taken from
http://stackoverflow.com/questions/1358003/\
tricks-to-manage-the-available-memory-in-an-r-session
}
| /man/lsos.Rd | no_license | jackwasey/jwutil | R | false | true | 454 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.r
\name{lsos}
\alias{lsos}
\title{show largest objects}
\usage{
lsos(..., n = 10)
}
\arguments{
\item{...}{arguments passed on to \code{.ls.objects}}
\item{n}{scalar integer, number of objects to show}
}
\description{
https://gist.github.com/1187166.git Taken from
http://stackoverflow.com/questions/1358003/\
tricks-to-manage-the-available-memory-in-an-r-session
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is.url.R
\name{is.url}
\alias{is.url}
\title{Which entries are URL's.}
\usage{
is.url(x)
}
\arguments{
\item{x}{a character vector.}
}
\value{
a logical vector of length x. values are TRUE if the entry looks
like a URL. NB the URL's are not checked for 404 (dead links) errors.
}
\description{
x must start with http://, ftp://, or file://
See also download.file
}
\author{
Mark Cowley, 2009-12-11
}
| /man/is.url.Rd | no_license | drmjc/mjcbase | R | false | true | 481 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is.url.R
\name{is.url}
\alias{is.url}
\title{Which entries are URL's.}
\usage{
is.url(x)
}
\arguments{
\item{x}{a character vector.}
}
\value{
a logical vector of length x. values are TRUE if the entry looks
like a URL. NB the URL's are not checked for 404 (dead links) errors.
}
\description{
x must start with http://, ftp://, or file://
See also download.file
}
\author{
Mark Cowley, 2009-12-11
}
|
\name{densum}
\alias{densum}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Do Something
}
\description{
Densum
}
\usage{
densum(vin, bw = 5, dw = 3, match.wt.f = NULL, return.x = T,
from = min(vin), to = max(vin), step = 1, new.code = T)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{vin}{
Parameter
}
\item{bw}{
Parameter
}
\item{dw}{
Parameter
}
\item{match.wt.f}{
Parameter
}
\item{return.x}{
Parameter
}
\item{from}{
Parameter
}
\item{to}{
Parameter
}
\item{step}{
Parameter
}
\item{new.code}{
Parameter
}
}
\value{
Some sum
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\examples{
\dontrun{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (vin, bw = 5, dw = 3, match.wt.f = NULL, return.x = T,
from = min(vin), to = max(vin), step = 1, new.code = T)
{
tc <- table(vin[vin >= from & vin <= to])
pos <- as.numeric(names(tc))
storage.mode(pos) <- "double"
tc <- as.numeric(tc)
storage.mode(tc) <- "double"
n <- length(pos)
if (!is.null(match.wt.f)) {
tc <- tc * match.wt.f(pos)
}
rng <- c(from, to)
if (rng[1] < 0) {
stop("range extends into negative values")
}
if (range(pos)[1] < 0) {
stop("position vector contains negative values")
}
storage.mode(n) <- storage.mode(rng) <- storage.mode(bw)
<- storage.mode(dw) <- storage.mode(step) <- "integer"
spos <- rng[1]
storage.mode(spos) <- "double"
dlength <- floor((rng[2] - rng[1])/step) + 1
if (dlength < 1) {
stop("zero data range")
}
if (new.code) {
storage.mode(step) <- storage.mode(dlength)
<- storage.mode(bw) <- storage.mode(dw) <- "integer"
dout <- .Call("ccdensum", pos, tc, spos, bw, dw, dlength,
step)
}
else {
dout <- numeric(dlength)
storage.mode(dout) <- "double"
storage.mode(dlength) <- "integer"
.C("cdensum", n, pos, tc, spos, bw, dw, dlength, step,
dout)
}
if (return.x) {
return(list(x = c(rng[1], rng[1] + step * (dlength -
1)), y = dout, step = step))
}
else {
return(dout)
}
}
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/densum.Rd | no_license | fbrundu/spp | R | false | false | 2,572 | rd | \name{densum}
\alias{densum}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Do Something
}
\description{
Densum
}
\usage{
densum(vin, bw = 5, dw = 3, match.wt.f = NULL, return.x = T,
from = min(vin), to = max(vin), step = 1, new.code = T)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{vin}{
Parameter
}
\item{bw}{
Parameter
}
\item{dw}{
Parameter
}
\item{match.wt.f}{
Parameter
}
\item{return.x}{
Parameter
}
\item{from}{
Parameter
}
\item{to}{
Parameter
}
\item{step}{
Parameter
}
\item{new.code}{
Parameter
}
}
\value{
Some sum
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\examples{
\dontrun{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (vin, bw = 5, dw = 3, match.wt.f = NULL, return.x = T,
from = min(vin), to = max(vin), step = 1, new.code = T)
{
tc <- table(vin[vin >= from & vin <= to])
pos <- as.numeric(names(tc))
storage.mode(pos) <- "double"
tc <- as.numeric(tc)
storage.mode(tc) <- "double"
n <- length(pos)
if (!is.null(match.wt.f)) {
tc <- tc * match.wt.f(pos)
}
rng <- c(from, to)
if (rng[1] < 0) {
stop("range extends into negative values")
}
if (range(pos)[1] < 0) {
stop("position vector contains negative values")
}
storage.mode(n) <- storage.mode(rng) <- storage.mode(bw)
<- storage.mode(dw) <- storage.mode(step) <- "integer"
spos <- rng[1]
storage.mode(spos) <- "double"
dlength <- floor((rng[2] - rng[1])/step) + 1
if (dlength < 1) {
stop("zero data range")
}
if (new.code) {
storage.mode(step) <- storage.mode(dlength)
<- storage.mode(bw) <- storage.mode(dw) <- "integer"
dout <- .Call("ccdensum", pos, tc, spos, bw, dw, dlength,
step)
}
else {
dout <- numeric(dlength)
storage.mode(dout) <- "double"
storage.mode(dlength) <- "integer"
.C("cdensum", n, pos, tc, spos, bw, dw, dlength, step,
dout)
}
if (return.x) {
return(list(x = c(rng[1], rng[1] + step * (dlength -
1)), y = dout, step = step))
}
else {
return(dout)
}
}
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
library(reshape2)
airquality <- airquality
View(airquality)
# question 1. Using the melt() function, format "airquality"
# with 1 measurement per Month/Day date.
# Store it as "weatherSurvey".
head(airquality)
str(airquality)
names(airquality) <- tolower(names(airquality))
##위의 변수는 대문자를 소문자로 변환
melt_test <- melt(airquality)
melt_test2 <- melt(airquality, id.vars = c("month", "wind"), measure.vars = "ozone")
View(melt_test2)
weatherSurvey <- melt(airquality, id.vars = c("month","day"))
View(weatherSurvey)
# question 2. Specify the name of “weatherSurvey” column 3
# as “Condition“, and the name of column 4 as “Measurement“,
# using the melt() formula in question 1.
aq_melt <- melt(airquality, id.vars = c("month","day"), na.rm =TRUE)
View(aq_melt)
weatherSurvey2 <- melt(airquality, id.vars = c("month","day"),
variable.name = "condition",
value.name = "measurement")
View(weatherSurvey2)
# question 3. Use dcast() to reshape "weatherSurvey" from long to wide,
# with Month and Day as the first 2 columns.
# Store it as "airqualityEdit".
aq_dcast <- dcast(aq_melt, month + day ~ variable )
View(aq_dcast)
airqulistyEdit <- dcast(weatherSurvey2, month +day ~ condition, value.var = "measurement")
View(airqulistyEdit)
# question 4. Use the dcast() function to get the means of “weatherSurvey”
# measurement variables by month.
# Store the results as "airQualityMean".
# Also, remove not available values.
airqualityMean <- dcast(weatherSurvey2, month ~ condition,
fun.aggregate = mean,
value.var = "measurement", na.rm =TRUE)
View(airqualityMean)
#na.rm = NA delete
| /practice_5_AdsP_reshape_student.R | no_license | MyChoYS/R_ADsp_basic | R | false | false | 1,731 | r | library(reshape2)
airquality <- airquality
View(airquality)
# question 1. Using the melt() function, format "airquality"
# with 1 measurement per Month/Day date.
# Store it as "weatherSurvey".
head(airquality)
str(airquality)
names(airquality) <- tolower(names(airquality))
##위의 변수는 대문자를 소문자로 변환
melt_test <- melt(airquality)
melt_test2 <- melt(airquality, id.vars = c("month", "wind"), measure.vars = "ozone")
View(melt_test2)
weatherSurvey <- melt(airquality, id.vars = c("month","day"))
View(weatherSurvey)
# question 2. Specify the name of “weatherSurvey” column 3
# as “Condition“, and the name of column 4 as “Measurement“,
# using the melt() formula in question 1.
aq_melt <- melt(airquality, id.vars = c("month","day"), na.rm =TRUE)
View(aq_melt)
weatherSurvey2 <- melt(airquality, id.vars = c("month","day"),
variable.name = "condition",
value.name = "measurement")
View(weatherSurvey2)
# question 3. Use dcast() to reshape "weatherSurvey" from long to wide,
# with Month and Day as the first 2 columns.
# Store it as "airqualityEdit".
aq_dcast <- dcast(aq_melt, month + day ~ variable )
View(aq_dcast)
airqulistyEdit <- dcast(weatherSurvey2, month +day ~ condition, value.var = "measurement")
View(airqulistyEdit)
# question 4. Use the dcast() function to get the means of “weatherSurvey”
# measurement variables by month.
# Store the results as "airQualityMean".
# Also, remove not available values.
airqualityMean <- dcast(weatherSurvey2, month ~ condition,
fun.aggregate = mean,
value.var = "measurement", na.rm =TRUE)
View(airqualityMean)
#na.rm = NA delete
|
# install.packages(c("ggplot2", "dplyr", "geojsonio"))
library(leaflet)
library(dplyr)
# Basic leaflet usage ----
# leaflet() Starts a map
# addTiles() Add basemap so you can se eenvironment
# addMarkers() Add a marker at a position
leaflet() %>%
addTiles() %>%
addMarkers(lng = -122.30768, lat = 47.65486, popup = "Mary Gates Hall")
# setView() Set center and zoom of map
leaflet() %>%
addTiles() %>%
setView(lng = -122.30768, lat = 47.65486, zoom = 10)
# Example: Leaflet with UW Data ----
uw_landmarks <- data.frame(
lng = c(
-122.30768, -122.30992, -122.31500
),
lat = c(
47.65486, 47.65584, 47.65246
),
name = c(
"Mary Gates Hall", "Brick Monoliths", "Fisheries"
),
minutes_away = c(
0, 5, 20
),
stringsAsFactors = F
)
leaflet() %>%
addTiles() %>%
addMarkers(lng = uw_landmarks$lng, lat = uw_landmarks$lat,
popup = uw_landmarks$name)
# tilde character can be considered as "evaluate this variable
# in the context of the data we're using." Similar to when you
# use dplyr, except with leaflet, you have to explicitly include
# tilde.
leaflet(uw_landmarks) %>%
addTiles() %>%
addMarkers(lng = ~lng, lat = ~lat,
popup = ~name)
leaflet(data = uw_landmarks) %>%
addTiles() %>%
addCircleMarkers(lng = ~lng, lat = ~lat,
radius = ~minutes_away,
popup = ~name)
# colorNumeric Creates color palette given a set of colors and a domain
distance_pal <- colorNumeric(
palette = c("green", "red"),
domain = uw_landmarks$minutes_away
)
leaflet(data = uw_landmarks) %>%
addTiles() %>%
addCircleMarkers(lng = ~lng, lat = ~lat,
color = ~distance_pal(minutes_away),
popup = ~name)
# Example: Leaflet Choropleth of State Density
library(geojsonio)
states <- geojson_read("us-states.geojson", what = "sp")
dens_pal <- colorNumeric(palette = "Reds", domain = states$density)
leaflet(states) %>%
addTiles() %>%
addPolygons(
fillColor = ~dens_pal(density),
weight = 0,
popup = ~paste0(name, ": ", density, " people per square mile")
)
# That's pretty pale... why??
bins <- c(0, 10, 20, 50, 100, 200, 500, 1000, Inf)
dens_pal <- colorBin(palette = "Reds", domain = states$density, bins = bins)
leaflet(states) %>%
addTiles() %>%
addPolygons(
fillColor = ~dens_pal(density),
weight = 0,
popup = ~paste0(name, ": ", density, " people per square mile")
)
| /leaflet.R | permissive | andreybutenko/leaflet-demo | R | false | false | 2,470 | r | # install.packages(c("ggplot2", "dplyr", "geojsonio"))
library(leaflet)
library(dplyr)
# Basic leaflet usage ----
# leaflet() Starts a map
# addTiles() Add basemap so you can se eenvironment
# addMarkers() Add a marker at a position
leaflet() %>%
addTiles() %>%
addMarkers(lng = -122.30768, lat = 47.65486, popup = "Mary Gates Hall")
# setView() Set center and zoom of map
leaflet() %>%
addTiles() %>%
setView(lng = -122.30768, lat = 47.65486, zoom = 10)
# Example: Leaflet with UW Data ----
uw_landmarks <- data.frame(
lng = c(
-122.30768, -122.30992, -122.31500
),
lat = c(
47.65486, 47.65584, 47.65246
),
name = c(
"Mary Gates Hall", "Brick Monoliths", "Fisheries"
),
minutes_away = c(
0, 5, 20
),
stringsAsFactors = F
)
leaflet() %>%
addTiles() %>%
addMarkers(lng = uw_landmarks$lng, lat = uw_landmarks$lat,
popup = uw_landmarks$name)
# tilde character can be considered as "evaluate this variable
# in the context of the data we're using." Similar to when you
# use dplyr, except with leaflet, you have to explicitly include
# tilde.
leaflet(uw_landmarks) %>%
addTiles() %>%
addMarkers(lng = ~lng, lat = ~lat,
popup = ~name)
leaflet(data = uw_landmarks) %>%
addTiles() %>%
addCircleMarkers(lng = ~lng, lat = ~lat,
radius = ~minutes_away,
popup = ~name)
# colorNumeric Creates color palette given a set of colors and a domain
distance_pal <- colorNumeric(
palette = c("green", "red"),
domain = uw_landmarks$minutes_away
)
leaflet(data = uw_landmarks) %>%
addTiles() %>%
addCircleMarkers(lng = ~lng, lat = ~lat,
color = ~distance_pal(minutes_away),
popup = ~name)
# Example: Leaflet Choropleth of State Density
library(geojsonio)
states <- geojson_read("us-states.geojson", what = "sp")
dens_pal <- colorNumeric(palette = "Reds", domain = states$density)
leaflet(states) %>%
addTiles() %>%
addPolygons(
fillColor = ~dens_pal(density),
weight = 0,
popup = ~paste0(name, ": ", density, " people per square mile")
)
# That's pretty pale... why??
bins <- c(0, 10, 20, 50, 100, 200, 500, 1000, Inf)
dens_pal <- colorBin(palette = "Reds", domain = states$density, bins = bins)
leaflet(states) %>%
addTiles() %>%
addPolygons(
fillColor = ~dens_pal(density),
weight = 0,
popup = ~paste0(name, ": ", density, " people per square mile")
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glmnetFlex.R
\name{predict.glmnetfit}
\alias{predict.glmnetfit}
\title{Get predictions from a \code{glmnetfit} fit object}
\usage{
\method{predict}{glmnetfit}(object, newx, s = NULL, type = c("link",
"response", "coefficients", "nonzero"), exact = FALSE, newoffset, ...)
}
\arguments{
\item{object}{Fitted "glmnetfit" object.}
\item{newx}{Matrix of new values for \code{x} at which predictions are to be
made. Must be a matrix. This argument is not used for \code{type =
c("coefficients","nonzero")}.}
\item{s}{Value(s) of the penalty parameter lambda at which predictions are
required. Default is the entire sequence used to create the model.}
\item{type}{Type of prediction required. Type "link" gives the linear
predictors (eta scale); Type "response" gives the fitted values (mu scale).
Type "coefficients" computes the coefficients at the requested values for s.
Type "nonzero" returns a list of the indices of the nonzero coefficients for
each value of s.}
\item{exact}{This argument is relevant only when predictions are made at values
of \code{s} (lambda) \emph{different} from those used in the fitting of the
original model. If \code{exact=FALSE} (default), then the predict function
uses linear interpolation to make predictions for values of \code{s} (lambda)
that do not coincide with those used in the fitting algorithm. While this is
often a good approximation, it can sometimes be a bit coarse. With
\code{exact=TRUE}, these different values of \code{s} are merged (and sorted)
with \code{object$lambda}, and the model is refit before predictions are made.
In this case, it is required to supply the original data x= and y= as additional
named arguments to predict() or coef(). The workhorse \code{predict.glmnet()}
needs to update the model, and so needs the data used to create it. The same
is true of weights, offset, penalty.factor, lower.limits, upper.limits if
these were used in the original call. Failure to do so will result in an error.}
\item{newoffset}{If an offset is used in the fit, then one must be supplied for
making predictions (except for type="coefficients" or type="nonzero").}
\item{...}{This is the mechanism for passing arguments like \code{x=} when
\code{exact=TRUE}; see \code{exact} argument.}
}
\value{
The object returned depends on type.
}
\description{
Gives fitted values, linear predictors, coefficients and number of non-zero
coefficients from a fitted \code{glmnetfit} object.
}
| /man/predict.glmnetfit.Rd | no_license | nfultz/glmnet-mirror | R | false | true | 2,517 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glmnetFlex.R
\name{predict.glmnetfit}
\alias{predict.glmnetfit}
\title{Get predictions from a \code{glmnetfit} fit object}
\usage{
\method{predict}{glmnetfit}(object, newx, s = NULL, type = c("link",
"response", "coefficients", "nonzero"), exact = FALSE, newoffset, ...)
}
\arguments{
\item{object}{Fitted "glmnetfit" object.}
\item{newx}{Matrix of new values for \code{x} at which predictions are to be
made. Must be a matrix. This argument is not used for \code{type =
c("coefficients","nonzero")}.}
\item{s}{Value(s) of the penalty parameter lambda at which predictions are
required. Default is the entire sequence used to create the model.}
\item{type}{Type of prediction required. Type "link" gives the linear
predictors (eta scale); Type "response" gives the fitted values (mu scale).
Type "coefficients" computes the coefficients at the requested values for s.
Type "nonzero" returns a list of the indices of the nonzero coefficients for
each value of s.}
\item{exact}{This argument is relevant only when predictions are made at values
of \code{s} (lambda) \emph{different} from those used in the fitting of the
original model. If \code{exact=FALSE} (default), then the predict function
uses linear interpolation to make predictions for values of \code{s} (lambda)
that do not coincide with those used in the fitting algorithm. While this is
often a good approximation, it can sometimes be a bit coarse. With
\code{exact=TRUE}, these different values of \code{s} are merged (and sorted)
with \code{object$lambda}, and the model is refit before predictions are made.
In this case, it is required to supply the original data x= and y= as additional
named arguments to predict() or coef(). The workhorse \code{predict.glmnet()}
needs to update the model, and so needs the data used to create it. The same
is true of weights, offset, penalty.factor, lower.limits, upper.limits if
these were used in the original call. Failure to do so will result in an error.}
\item{newoffset}{If an offset is used in the fit, then one must be supplied for
making predictions (except for type="coefficients" or type="nonzero").}
\item{...}{This is the mechanism for passing arguments like \code{x=} when
\code{exact=TRUE}; see \code{exact} argument.}
}
\value{
The object returned depends on type.
}
\description{
Gives fitted values, linear predictors, coefficients and number of non-zero
coefficients from a fitted \code{glmnetfit} object.
}
|
#' convert phrases into single tokens
#'
#' Replace multi-word phrases in text(s) with a compound version of the phrases
#' concatenated with \code{concatenator} (by default, the "\code{_}" character) to
#' form a single token. This prevents tokenization of the phrases during
#' subsequent processing by eliminating the whitespace delimiter.
#' @param object source texts, a character or character vector
#' @param phrases a \code{\link{dictionary}} object that
#' contains some phrases, defined as multiple words delimited by whitespace,
#' up to 9 words long; or a quanteda collocation object created
#' by \code{\link{collocations}}
#' @param concatenator the concatenation character that will connect the words
#' making up the multi-word phrases. The default \code{_} is highly
#' recommended since it will not be removed during normal cleaning and
#' tokenization (while nearly all other punctuation characters, at least those
#' in the Unicode punctuation class [P] will be removed.
#' @return character or character vector of texts with phrases replaced by
#' compound "words" joined by the concatenator
#' @export
#' @author Kenneth Benoit
#' @examples
#' mytexts <- c("The new law included a capital gains tax, and an inheritance tax.",
#' "New York City has raised a taxes: an income tax and a sales tax.")
#' mydict <- dictionary(list(tax=c("tax", "income tax", "capital gains tax", "inheritance tax")))
#' (cw <- phrasetotoken(mytexts, mydict))
#' dfm(cw, verbose=FALSE)
#'
#' # when used as a dictionary for dfm creation
#' mydfm2 <- dfm(cw, dictionary = lapply(mydict, function(x) gsub(" ", "_", x)))
#' mydfm2
#' # to pick up "taxes" in the second text, set valuetype = "regex"
#' mydfm3 <- dfm(cw, dictionary = lapply(mydict, phrasetotoken, mydict),
#' valuetype = "regex")
#' mydfm3
#' ## one more token counted for "tax" than before
setGeneric("phrasetotoken",
function(object, phrases, ...)
standardGeneric("phrasetotoken"))
#' @rdname phrasetotoken
#' @export
setMethod("phrasetotoken", signature = c("corpus", "ANY"),
function(object, phrases, ...) {
texts(object) <- phrasetotoken(texts(object), phrases, ...)
object
})
#' @rdname phrasetotoken
#' @export
#' @examples
#' # using a dictionary to pre-process multi-word expressions
#' myDict <- dictionary(list(negative = c("bad* word*", "negative", "awful text"),
#' postiive = c("good stuff", "like? th??")))
#' txt <- c("I liked this, when we can use bad words, in awful text.",
#' "Some damn good stuff, like the text, she likes that too.")
#' phrasetotoken(txt, myDict)
#'
setMethod("phrasetotoken", signature = c("character", "dictionary"),
function(object, phrases, ...) {
phraseConcatenator <- phrases@concatenator
phrasesTmp <- unlist(phrases, use.names = FALSE)
compoundPhrases <- phrasesTmp[stringi::stri_detect_fixed(phrasesTmp, phraseConcatenator)]
# replace string concatenator with simple space
compoundPhrases <- stringi::stri_replace_all_fixed(compoundPhrases, phraseConcatenator, " ")
phrasetotoken(object, compoundPhrases, ...)
})
setClass("collocations", contains = "data.table")
#' @rdname phrasetotoken
#' @export
setMethod("phrasetotoken", signature = c("character", "collocations"),
function(object, phrases, ...) {
word1 <- word2 <- word3 <- NULL
# concatenate the words
word123 <- phrases[, list(word1, word2, word3)]
mwes <- apply(word123, 1, paste, collapse=" ")
# strip trailing white space (if no word 3)
mwes <- stringi::stri_trim_both(mwes)
phrasetotoken(object, mwes, ...)
})
#' @rdname phrasetotoken
#' @param valuetype how to interpret word matching patterns: \code{"glob"} for
#' "glob"-style wildcarding, \code{fixed} for words as
#' is; \code{"regex"} for regular expressions
#' @param case_insensitive if \code{TRUE}, ignore case when matching
#' @param ... additional arguments passed through to core \code{"character,character"} method
#' @export
#' @examples
#' # on simple text
#' phrasetotoken("This is a simpler version of multi word expressions.", "multi word expression*")
setMethod("phrasetotoken", signature = c("character", "character"),
function(object, phrases, concatenator = "_", valuetype = c("glob", "regex", "fixed"),
case_insensitive = TRUE, ...) {
valuetype <- match.arg(valuetype)
if (valuetype == "glob" | valuetype == "fixed")
compoundPhrases <- stringi::stri_replace_all_fixed(phrases, c("*", "?"),
c("[^\\s]*", "[^\\s]"),
vectorize_all = FALSE)
compoundPhrasesList <- strsplit(compoundPhrases, "\\s")
for (l in compoundPhrasesList) {
re.search <- paste("(\\b", paste(l, collapse = paste0(")\\p{WHITE_SPACE}+(")), "\\b)", sep = "")
re.replace <- paste("$", 1:length(l), sep = "", collapse = concatenator)
object <- stringi::stri_replace_all_regex(object, re.search, re.replace, case_insensitive = case_insensitive)
}
object
})
| /R/phrases.R | no_license | VIboy/quanteda | R | false | false | 5,571 | r |
#' convert phrases into single tokens
#'
#' Replace multi-word phrases in text(s) with a compound version of the phrases
#' concatenated with \code{concatenator} (by default, the "\code{_}" character) to
#' form a single token. This prevents tokenization of the phrases during
#' subsequent processing by eliminating the whitespace delimiter.
#' @param object source texts, a character or character vector
#' @param phrases a \code{\link{dictionary}} object that
#' contains some phrases, defined as multiple words delimited by whitespace,
#' up to 9 words long; or a quanteda collocation object created
#' by \code{\link{collocations}}
#' @param concatenator the concatenation character that will connect the words
#' making up the multi-word phrases. The default \code{_} is highly
#' recommended since it will not be removed during normal cleaning and
#' tokenization (while nearly all other punctuation characters, at least those
#' in the Unicode punctuation class [P] will be removed.
#' @return character or character vector of texts with phrases replaced by
#' compound "words" joined by the concatenator
#' @export
#' @author Kenneth Benoit
#' @examples
#' mytexts <- c("The new law included a capital gains tax, and an inheritance tax.",
#' "New York City has raised a taxes: an income tax and a sales tax.")
#' mydict <- dictionary(list(tax=c("tax", "income tax", "capital gains tax", "inheritance tax")))
#' (cw <- phrasetotoken(mytexts, mydict))
#' dfm(cw, verbose=FALSE)
#'
#' # when used as a dictionary for dfm creation
#' mydfm2 <- dfm(cw, dictionary = lapply(mydict, function(x) gsub(" ", "_", x)))
#' mydfm2
#' # to pick up "taxes" in the second text, set valuetype = "regex"
#' mydfm3 <- dfm(cw, dictionary = lapply(mydict, phrasetotoken, mydict),
#' valuetype = "regex")
#' mydfm3
#' ## one more token counted for "tax" than before
setGeneric("phrasetotoken",
function(object, phrases, ...)
standardGeneric("phrasetotoken"))
#' @rdname phrasetotoken
#' @export
setMethod("phrasetotoken", signature = c("corpus", "ANY"),
function(object, phrases, ...) {
texts(object) <- phrasetotoken(texts(object), phrases, ...)
object
})
#' @rdname phrasetotoken
#' @export
#' @examples
#' # using a dictionary to pre-process multi-word expressions
#' myDict <- dictionary(list(negative = c("bad* word*", "negative", "awful text"),
#' postiive = c("good stuff", "like? th??")))
#' txt <- c("I liked this, when we can use bad words, in awful text.",
#' "Some damn good stuff, like the text, she likes that too.")
#' phrasetotoken(txt, myDict)
#'
setMethod("phrasetotoken", signature = c("character", "dictionary"),
function(object, phrases, ...) {
phraseConcatenator <- phrases@concatenator
phrasesTmp <- unlist(phrases, use.names = FALSE)
compoundPhrases <- phrasesTmp[stringi::stri_detect_fixed(phrasesTmp, phraseConcatenator)]
# replace string concatenator with simple space
compoundPhrases <- stringi::stri_replace_all_fixed(compoundPhrases, phraseConcatenator, " ")
phrasetotoken(object, compoundPhrases, ...)
})
setClass("collocations", contains = "data.table")
#' @rdname phrasetotoken
#' @export
setMethod("phrasetotoken", signature = c("character", "collocations"),
function(object, phrases, ...) {
word1 <- word2 <- word3 <- NULL
# concatenate the words
word123 <- phrases[, list(word1, word2, word3)]
mwes <- apply(word123, 1, paste, collapse=" ")
# strip trailing white space (if no word 3)
mwes <- stringi::stri_trim_both(mwes)
phrasetotoken(object, mwes, ...)
})
#' @rdname phrasetotoken
#' @param valuetype how to interpret word matching patterns: \code{"glob"} for
#' "glob"-style wildcarding, \code{fixed} for words as
#' is; \code{"regex"} for regular expressions
#' @param case_insensitive if \code{TRUE}, ignore case when matching
#' @param ... additional arguments passed through to core \code{"character,character"} method
#' @export
#' @examples
#' # on simple text
#' phrasetotoken("This is a simpler version of multi word expressions.", "multi word expression*")
setMethod("phrasetotoken", signature = c("character", "character"),
function(object, phrases, concatenator = "_", valuetype = c("glob", "regex", "fixed"),
case_insensitive = TRUE, ...) {
valuetype <- match.arg(valuetype)
if (valuetype == "glob" | valuetype == "fixed")
compoundPhrases <- stringi::stri_replace_all_fixed(phrases, c("*", "?"),
c("[^\\s]*", "[^\\s]"),
vectorize_all = FALSE)
compoundPhrasesList <- strsplit(compoundPhrases, "\\s")
for (l in compoundPhrasesList) {
re.search <- paste("(\\b", paste(l, collapse = paste0(")\\p{WHITE_SPACE}+(")), "\\b)", sep = "")
re.replace <- paste("$", 1:length(l), sep = "", collapse = concatenator)
object <- stringi::stri_replace_all_regex(object, re.search, re.replace, case_insensitive = case_insensitive)
}
object
})
|
#' Maximum likelihood estimate
#'
#' Perform inference of bias and interaction parameters for a single response group
#'
#' Given numeric data matrix, either pseudo-likelihood
#' of mean-field theory is used to find the maximum likelihood estimate
#' of bias \code{h} and interaction \code{J} parameters. Normally
#' called by \code{\link{bbl}} rather than directly.
#'
#' @param xi Data matrix; expected to be numeric with elements ranging from
#' zero to positive integral upper bound \code{L-1}.
#' @param weights Frequency vector of number of times each row of \code{xi}
#' is to be repeated. If \code{NULL}, defaults to 1. Expected
#' to be non-negative integers.
#' @param qJ Matrix of logicals indicating which predictor pairs are
#' interacting. If \code{NULL}, all are allowed.
#' @param method \code{c('pseudo','mf')} for pseudo-likelihood maximization or
#' mean field inference.
#' @param L Vector of number of factor levels in each predictor. If
#' \code{NULL}, will be inferred from \code{xi}.
#' @param lambda Vector of L2 regularization parameters for
#' \code{method = 'pseudo'}. Applies to interaction parameters \code{J}.
#' @param lambdah L2 parameters for \code{h} in \code{'pseudo'}.
#' If \code{NULL}, it is set equal to \code{lambda}.
#' \code{lambdah = 0} will free \code{h} from penalization.
#' @param symmetrize Enforce the symmetry of interaction parameters by
#' taking mean values of the matrix and its trace:
#' \eqn{J_{ij}^{(y)}(x_1,x_2)=J_{ji}^{(y)}(x_2,x_1)}.
#' @param eps Vector of regularization parameters for \code{mf}. Must be
#' within the range of \eqn{\epsilon \in [0,1]}.
#' @param nprint Frequency of printing iteration progress under \code{'pseudo'}.
#' @param itmax Maximum number of iterations for \code{'pseudo'}.
#' @param tolerance Upper bound for fractional changes in pseduo-likelihood
#' values before termiating iteration in \code{'pseudo'}.
#' @param verbose Verbosity level.
#' @param prior.count Prior count for \code{method = 'mf'} to reduce
#' numerical instability.
#' @param naive Naive Bayes inference. Equivalent to \code{method = 'mf'} together
#' with \code{eps = 0}.
#' @param lz.half Divide interaction term in approximation to \eqn{\ln Z_{iy}}
#' in \code{'pseudo'}.
#' @return List of inferred parameters \code{h} and \code{J}. See
#' \code{\link{bbl}} for parameter structures.
#' @examples
#' set.seed(535)
#' predictors <- list()
#' for(i in 1:5) predictors[[i]] <- c('a','c','g','t')
#' par <- randompar(predictors)
#' par
#' xi <- sample_xi(nsample=5000, predictors=predictors, h=par$h, J=par$J,
#' code_out=TRUE)
#' head(xi)
#' ps <- mlestimate(xi=xi, method='pseudo', lambda=0)
#' ps$h
#' ps$J[[1]]
#' mf <- mlestimate(xi=xi, method='mf', eps=0.9)
#' plot(x=unlist(par$h), y=unlist(ps$h), xlab='True', ylab='Inferred')
#' segments(x0=-2, x1=2, y0=-2, y1=2, lty=2)
#' points(x=unlist(par$J), y=unlist(ps$J), col='red')
#' points(x=unlist(par$h), y=unlist(mf$h), col='blue')
#' points(x=unlist(par$J), y=unlist(mf$J), col='green')
#' @export
mlestimate <- function(xi, weights=NULL, qJ=NULL, method='pseudo',
L=NULL, lambda=1e-5, lambdah=0, symmetrize=TRUE, eps=0.9,
nprint=100, itmax=10000, tolerance=1e-5, verbose=1,
prior.count=1, naive=FALSE, lz.half=FALSE){
if(is.null(lambdah))
lambdah <- lambda
m <- NCOL(xi)
if(is.null(qJ)){
qJ <- matrix(TRUE, nrow=m, ncol=m)
rownames(qJ) <- colnames(qJ) <- colnames(xi)
diag(qJ) <- FALSE
}
if(naive) qJ[which(qJ,arr.ind=TRUE)] <- FALSE
else naive <- sum(qJ)==0 # no interaction
La <- apply(xi, 2, max)
if(is.null(L))
L <- La
else{
if(!all(L >= La))
stop('Data provided have predictor levels exceeding L')
L <- L-1
}
xi <- as.matrix(xi)
if(!is.numeric(xi[1,1]) | min(xi)<0)
stop('Input data to mlestimate must be numeric and non-negative')
nsample <- NROW(xi)
if(is.null(weights)) weights <- rep(1L, nsample)
else if(length(weights)!=nsample)
stop('Length of weights does not match data')
if(method=='pseudo'){
Lambda <- c(lambda, lambdah)
Nprint <- c(nprint)
Itmax <- c(itmax)
Tol <- c(tolerance)
Verbose <- c(verbose)
Lzhalf <- c(lz.half)
Naive <- c(naive)
theta <- pseudo_mle(xi, weights, qJ, L, Lambda, Nprint, Itmax, Tol,
Naive, Verbose, Lzhalf)
L <- theta$L
}
else if(method=='mf'){
Eps <- c(eps)
theta <- mfwrapper(xi, weights, qJ, L, Eps, prior.count)
}
else stop('unknown method in mlestimate')
h <- theta$h
J <- vector('list',m)
for(i in seq_len(m)) J[[i]] <- vector('list',m)
for(i in seq(1,m)){
Li <- L[i]
if(naive) next()
for(j in seq(i,m)){
Lj <- L[j]
x <- matrix(theta$J[[i]][[j]], nrow=Li, ncol=Lj, byrow=TRUE)
xt <- matrix(theta$J[[j]][[i]], nrow=Lj, ncol=Li, byrow=TRUE)
if(i<j & symmetrize){
x <- (x + t(xt))/2
xt <- t(x)
}
J[[i]][[j]] <- x
J[[j]][[i]] <- xt
}
}
return(list(h=h, J=J, lkh=theta$lkh, lz=theta$lz))
}
| /bbl/R/mlk.R | no_license | akhikolla/InformationHouse | R | false | false | 5,233 | r | #' Maximum likelihood estimate
#'
#' Perform inference of bias and interaction parameters for a single response group
#'
#' Given numeric data matrix, either pseudo-likelihood
#' of mean-field theory is used to find the maximum likelihood estimate
#' of bias \code{h} and interaction \code{J} parameters. Normally
#' called by \code{\link{bbl}} rather than directly.
#'
#' @param xi Data matrix; expected to be numeric with elements ranging from
#' zero to positive integral upper bound \code{L-1}.
#' @param weights Frequency vector of number of times each row of \code{xi}
#' is to be repeated. If \code{NULL}, defaults to 1. Expected
#' to be non-negative integers.
#' @param qJ Matrix of logicals indicating which predictor pairs are
#' interacting. If \code{NULL}, all are allowed.
#' @param method \code{c('pseudo','mf')} for pseudo-likelihood maximization or
#' mean field inference.
#' @param L Vector of number of factor levels in each predictor. If
#' \code{NULL}, will be inferred from \code{xi}.
#' @param lambda Vector of L2 regularization parameters for
#' \code{method = 'pseudo'}. Applies to interaction parameters \code{J}.
#' @param lambdah L2 parameters for \code{h} in \code{'pseudo'}.
#' If \code{NULL}, it is set equal to \code{lambda}.
#' \code{lambdah = 0} will free \code{h} from penalization.
#' @param symmetrize Enforce the symmetry of interaction parameters by
#' taking mean values of the matrix and its trace:
#' \eqn{J_{ij}^{(y)}(x_1,x_2)=J_{ji}^{(y)}(x_2,x_1)}.
#' @param eps Vector of regularization parameters for \code{mf}. Must be
#' within the range of \eqn{\epsilon \in [0,1]}.
#' @param nprint Frequency of printing iteration progress under \code{'pseudo'}.
#' @param itmax Maximum number of iterations for \code{'pseudo'}.
#' @param tolerance Upper bound for fractional changes in pseduo-likelihood
#' values before termiating iteration in \code{'pseudo'}.
#' @param verbose Verbosity level.
#' @param prior.count Prior count for \code{method = 'mf'} to reduce
#' numerical instability.
#' @param naive Naive Bayes inference. Equivalent to \code{method = 'mf'} together
#' with \code{eps = 0}.
#' @param lz.half Divide interaction term in approximation to \eqn{\ln Z_{iy}}
#' in \code{'pseudo'}.
#' @return List of inferred parameters \code{h} and \code{J}. See
#' \code{\link{bbl}} for parameter structures.
#' @examples
#' set.seed(535)
#' predictors <- list()
#' for(i in 1:5) predictors[[i]] <- c('a','c','g','t')
#' par <- randompar(predictors)
#' par
#' xi <- sample_xi(nsample=5000, predictors=predictors, h=par$h, J=par$J,
#' code_out=TRUE)
#' head(xi)
#' ps <- mlestimate(xi=xi, method='pseudo', lambda=0)
#' ps$h
#' ps$J[[1]]
#' mf <- mlestimate(xi=xi, method='mf', eps=0.9)
#' plot(x=unlist(par$h), y=unlist(ps$h), xlab='True', ylab='Inferred')
#' segments(x0=-2, x1=2, y0=-2, y1=2, lty=2)
#' points(x=unlist(par$J), y=unlist(ps$J), col='red')
#' points(x=unlist(par$h), y=unlist(mf$h), col='blue')
#' points(x=unlist(par$J), y=unlist(mf$J), col='green')
#' @export
mlestimate <- function(xi, weights=NULL, qJ=NULL, method='pseudo',
L=NULL, lambda=1e-5, lambdah=0, symmetrize=TRUE, eps=0.9,
nprint=100, itmax=10000, tolerance=1e-5, verbose=1,
prior.count=1, naive=FALSE, lz.half=FALSE){
if(is.null(lambdah))
lambdah <- lambda
m <- NCOL(xi)
if(is.null(qJ)){
qJ <- matrix(TRUE, nrow=m, ncol=m)
rownames(qJ) <- colnames(qJ) <- colnames(xi)
diag(qJ) <- FALSE
}
if(naive) qJ[which(qJ,arr.ind=TRUE)] <- FALSE
else naive <- sum(qJ)==0 # no interaction
La <- apply(xi, 2, max)
if(is.null(L))
L <- La
else{
if(!all(L >= La))
stop('Data provided have predictor levels exceeding L')
L <- L-1
}
xi <- as.matrix(xi)
if(!is.numeric(xi[1,1]) | min(xi)<0)
stop('Input data to mlestimate must be numeric and non-negative')
nsample <- NROW(xi)
if(is.null(weights)) weights <- rep(1L, nsample)
else if(length(weights)!=nsample)
stop('Length of weights does not match data')
if(method=='pseudo'){
Lambda <- c(lambda, lambdah)
Nprint <- c(nprint)
Itmax <- c(itmax)
Tol <- c(tolerance)
Verbose <- c(verbose)
Lzhalf <- c(lz.half)
Naive <- c(naive)
theta <- pseudo_mle(xi, weights, qJ, L, Lambda, Nprint, Itmax, Tol,
Naive, Verbose, Lzhalf)
L <- theta$L
}
else if(method=='mf'){
Eps <- c(eps)
theta <- mfwrapper(xi, weights, qJ, L, Eps, prior.count)
}
else stop('unknown method in mlestimate')
h <- theta$h
J <- vector('list',m)
for(i in seq_len(m)) J[[i]] <- vector('list',m)
for(i in seq(1,m)){
Li <- L[i]
if(naive) next()
for(j in seq(i,m)){
Lj <- L[j]
x <- matrix(theta$J[[i]][[j]], nrow=Li, ncol=Lj, byrow=TRUE)
xt <- matrix(theta$J[[j]][[i]], nrow=Lj, ncol=Li, byrow=TRUE)
if(i<j & symmetrize){
x <- (x + t(xt))/2
xt <- t(x)
}
J[[i]][[j]] <- x
J[[j]][[i]] <- xt
}
}
return(list(h=h, J=J, lkh=theta$lkh, lz=theta$lz))
}
|
library(ggplot2)
pivottrackerdata = read.csv("applico_fall_intern_project.csv",header = TRUE, stringsAsFactors = TRUE)
projectbudgetdata = read.csv("Fall_Intern_Project_Budget.csv",header = TRUE, stringsAsFactors = F)
pivottrackerdata$Estimate
sum(pivottrackerdata$Estimate)
pointsremaining = numeric(6)
for(i in 1:5){
pointsremaining[1] = 20
pointsremaining[i+1] = pointsremaining[i]-pivottrackerdata$Estimate[i]
}
projectbudgetdata$Budget_Used = gsub("\\$","", projectbudgetdata$Budget_Used)
projectbudgetdata$Budget_Used = as.numeric(gsub(",", "", projectbudgetdata$Budget_Used)) # Converting the currency to numeric
projectbudgetdata$Budget_Used
budgetused = numeric(6)
for(i in 1:5){
budgetused[1] = 0
budgetused[i+1] = budgetused[i]+projectbudgetdata$Budget_Used[i]
}
budgetused
weeks = c(1,2,3,4,5,6)
pointsremaining
burnchartdataframe = data.frame(Week_Number = weeks,Budget_Consumed = budgetused,Complexity_Points_Remaining = pointsremaining)
burnchartdataframe
cc = ggplot(burnchartdataframe, aes(Budget_Consumed, Complexity Points Remaining = Complexity_Points_Remaining))
cc + geom_point(size = 5, colour = "darkblue", shape = 18) + xlab("Budget Consumed ($)") +
ylab("Complexity Points Remaining") + ggtitle("Intern Project Burndown Chart") +
theme(plot.title = element_text(size=20, face="bold")) + coord_cartesian(xlim=c(-500, 10000)) +
theme(axis.title.x = element_text(face="bold", colour="#990000", size=15))+
theme(axis.title.y = element_text(face="bold", colour="black", size=15))
| /Burndownchart.R | no_license | gketchum/Data-Intern-Project | R | false | false | 1,540 | r | library(ggplot2)
pivottrackerdata = read.csv("applico_fall_intern_project.csv",header = TRUE, stringsAsFactors = TRUE)
projectbudgetdata = read.csv("Fall_Intern_Project_Budget.csv",header = TRUE, stringsAsFactors = F)
pivottrackerdata$Estimate
sum(pivottrackerdata$Estimate)
pointsremaining = numeric(6)
for(i in 1:5){
pointsremaining[1] = 20
pointsremaining[i+1] = pointsremaining[i]-pivottrackerdata$Estimate[i]
}
projectbudgetdata$Budget_Used = gsub("\\$","", projectbudgetdata$Budget_Used)
projectbudgetdata$Budget_Used = as.numeric(gsub(",", "", projectbudgetdata$Budget_Used)) # Converting the currency to numeric
projectbudgetdata$Budget_Used
budgetused = numeric(6)
for(i in 1:5){
budgetused[1] = 0
budgetused[i+1] = budgetused[i]+projectbudgetdata$Budget_Used[i]
}
budgetused
weeks = c(1,2,3,4,5,6)
pointsremaining
burnchartdataframe = data.frame(Week_Number = weeks,Budget_Consumed = budgetused,Complexity_Points_Remaining = pointsremaining)
burnchartdataframe
cc = ggplot(burnchartdataframe, aes(Budget_Consumed, Complexity Points Remaining = Complexity_Points_Remaining))
cc + geom_point(size = 5, colour = "darkblue", shape = 18) + xlab("Budget Consumed ($)") +
ylab("Complexity Points Remaining") + ggtitle("Intern Project Burndown Chart") +
theme(plot.title = element_text(size=20, face="bold")) + coord_cartesian(xlim=c(-500, 10000)) +
theme(axis.title.x = element_text(face="bold", colour="#990000", size=15))+
theme(axis.title.y = element_text(face="bold", colour="black", size=15))
|
plot2 <- function(){
energy_data <- read.table("c:/users/hugo.m.barros/videos/Data Science/Exploratory Data Analysis/Course Project 1/household_power_consumption.txt",sep = ";")
lines<- grep("^(1|2)/2/2007$",energy_data[,"V1"])
feb2007 <- energy_data[lines,]
measures <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
colnames(feb2007) <- measures
date_day <- as.character(feb2007$Date)
date_time <- as.character(feb2007$Time)
full_date <- paste(date_day, date_time)
full_date <- as.POSIXct(full_date, format = '%d/%m/%Y %H:%M:%S')
plot(full_date, as.numeric(as.character(feb2007$Global_active_power)), type = "l", ylab = "Global Active Power (Kilowatts)", xlab = "")
dev.copy(png, file = "C:/Users/hugo.m.barros/Videos/Data Science/Exploratory Data Analysis/Course Project 1/plot2.png")
dev.off()
} | /plot2.R | no_license | hudafh/ExData_Plotting1 | R | false | false | 987 | r | plot2 <- function(){
energy_data <- read.table("c:/users/hugo.m.barros/videos/Data Science/Exploratory Data Analysis/Course Project 1/household_power_consumption.txt",sep = ";")
lines<- grep("^(1|2)/2/2007$",energy_data[,"V1"])
feb2007 <- energy_data[lines,]
measures <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
colnames(feb2007) <- measures
date_day <- as.character(feb2007$Date)
date_time <- as.character(feb2007$Time)
full_date <- paste(date_day, date_time)
full_date <- as.POSIXct(full_date, format = '%d/%m/%Y %H:%M:%S')
plot(full_date, as.numeric(as.character(feb2007$Global_active_power)), type = "l", ylab = "Global Active Power (Kilowatts)", xlab = "")
dev.copy(png, file = "C:/Users/hugo.m.barros/Videos/Data Science/Exploratory Data Analysis/Course Project 1/plot2.png")
dev.off()
} |
#install.packages("trajectorySim", repos="http://R-Forge.R-project.org")
#install.packages("SimilarityMeasures")
#install.packages("vegclust")
#install.packages("RColorBrewer")
#install.packages("smacof")
#install.packages("simmer")
#install.packages("MASS")
#install.packages("ggpubr")
#install.packages("factoextra")
methods(class="trajectory")
library(vegclust)
library(RColorBrewer)
library(smacof)
library(SimilarityMeasures)
library(trajectorySim)
library(simmer)
library(leaflet)
library(MASS)
library(ggpubr)
library(factoextra)
library (dplyr)
help(SimilarityMeasures)
help(trajectorySim)
help(timeout)
file <- choose.files()
file
df <- read.csv(file,header = T)
as <- subset(df, select = -c(X,Length,Beam,TRGT_SENSOR_KIND,AREA_ID,MSG_ID,LOC_ACCRCY))
head(as)
tail(as)
#안됨
t0 <- trajectory() %>%
seize("res0", 1) %>%
branch(function() 1, c(TRUE, FALSE),
trajectory() %>%
clone(2,
trajectory() %>%
seize("res1", 1) %>%
timeout(1) %>%
release("res1", 1),
trajectory() %>%
trap("signal",
handler=trajectory() %>%
timeout(1)) %>%
timeout(1)),
trajectory() %>%
set_attribute("dummy", 1) %>%
seize("res2", function() 1) %>%
timeout(function() rnorm(1, 20)) %>%
release("res2", function() 1) %>%
release("res0", 1) %>%
rollback(11)) %>%
synchronize() %>%
rollback(2) %>%
release("res0", 1)
# re
as$trj <- paste(as$LAT_VAL ,as$LON_VAL, sep = ',' )
as
#par(mar=c(4,4,1,1))
#trajectoryPCoA(as, as$stype, surveys, traj.colors = c("black","red", "blue"), lwd = 2)
#legend("topleft", col=c("black","red", "blue"),
# legend=c("Trajectory 1", "Trajectory 2", "Trajectory 3"), bty="n", lty=1, lwd = 2)
trajectoryLengths(as, as$SHIP_ID, surveys)
s1 = step1measures(as, example.data$time, ID = TRUE)
s2 = step2factors(as)
install.packages("traj")
library(traj)
s1 = step1measures(as$trj, as$RECV_DT , ID = T)
s2 = step2factors(as)
###########################################################################
file <- choose.files()
file
df <- read.csv(file,header = T)
as <- subset(df, select = -c(X,Length,Beam,TRGT_SENSOR_KIND,AREA_ID,MSG_ID,LOC_ACCRCY))
head(as)
tail(as)
# Setup data and time
zz <- subset(df, select = -c(X,Length,ShipName,stype,Beam,TRGT_SENSOR_KIND,AREA_ID,MSG_ID,LOC_ACCRCY))
head(zz)
data = zz$SOG_VAL
time = zz$RECV_DT
data
time
# Run step1measures, step2factors and step3clusters
s1 = step1measures(data, time, SHIP_ID=TRUE)
s2 = step2factors(s1)
s3 = step3clusters(s2)
# Print and plot "traj object"
s3
plot(s3)
###########################################################
a1 <- filter (zz, SHIP_ID == 440004750 )# 케이스 추출(filter)
a2 <- select(a1,LON_VAL , LAT_VAL, SOG_VAL) # 변수 추출(select)
library (tidyr)
a3 <- spread (a2, SHIP_ID) #케이스를 변수로(spread)
a1 <- zz[duplicated(zz$SHIP_ID),]
tail(a1)
############################ https://www.datanovia.com/en/blog/k-means-clustering-visualization-in-r-step-by-step-guide/
data("iris")
dd <- iris
head(zz, 3)
# Compute k-means with k = 3
set.seed(123)
res.km <- kmeans(scale(zz[, 1]), 3, nstart = 25)
# K-means clusters showing the group of each individuals
tail(res.km$cluster)
fviz_cluster(res.km, data = zz[, 1],
palette = c("#2E9FDF", "#00AFBB"),
geom = "point",
ellipse.type = "convex",
ggtheme = theme_bw()
)
# Dimension reduction using PCA
res.pca <- prcomp(dd[, 1], scale = TRUE)
# Coordinates of individuals
ind.coord <- as.data.frame(get_pca_ind(res.pca)$coord)
# Add clusters obtained using the K-means algorithm
ind.coord$cluster <- factor(res.km$cluster)
# Add Species groups from the original data sett
ind.coord$Species <- df$Species
# Data inspection
head(ind.coord)
# Percentage of variance explained by dimensions
eigenvalue <- round(get_eigenvalue(res.pca), 1)
variance.percent <- eigenvalue$variance.percent
head(eigenvalue)
ggscatter(
ind.coord, x = "Dim.1", y = "Dim.2",
color = "cluster", palette = "npg", ellipse = TRUE, ellipse.type = "convex",
shape = "cluster", size = 1.5, legend = "right", ggtheme = theme_bw(),
xlab = paste0("Dim 1 (", variance.percent[1], "% )" ),
ylab = paste0("Dim 2 (", variance.percent[2], "% )" )
) +
stat_mean(aes(color = cluster), size = 4)
ind.coord
zz
##############################
# Compute k-means with k = 3
set.seed(123)
res.km <- kmeans(scale(zz[, -5]), 3, nstart = 25)
# K-means clusters showing the group of each individuals
res.km$cluste
zz[, -5]
data("iris")
df <- iris
head(df, 3)
df[, -5]
| /0724 trajectory similarity.R | no_license | llim913/2020-R-Trajectory-with-MoveVis-Move | R | false | false | 4,929 | r | #install.packages("trajectorySim", repos="http://R-Forge.R-project.org")
#install.packages("SimilarityMeasures")
#install.packages("vegclust")
#install.packages("RColorBrewer")
#install.packages("smacof")
#install.packages("simmer")
#install.packages("MASS")
#install.packages("ggpubr")
#install.packages("factoextra")
methods(class="trajectory")
library(vegclust)
library(RColorBrewer)
library(smacof)
library(SimilarityMeasures)
library(trajectorySim)
library(simmer)
library(leaflet)
library(MASS)
library(ggpubr)
library(factoextra)
library (dplyr)
help(SimilarityMeasures)
help(trajectorySim)
help(timeout)
file <- choose.files()
file
df <- read.csv(file,header = T)
as <- subset(df, select = -c(X,Length,Beam,TRGT_SENSOR_KIND,AREA_ID,MSG_ID,LOC_ACCRCY))
head(as)
tail(as)
#안됨
t0 <- trajectory() %>%
seize("res0", 1) %>%
branch(function() 1, c(TRUE, FALSE),
trajectory() %>%
clone(2,
trajectory() %>%
seize("res1", 1) %>%
timeout(1) %>%
release("res1", 1),
trajectory() %>%
trap("signal",
handler=trajectory() %>%
timeout(1)) %>%
timeout(1)),
trajectory() %>%
set_attribute("dummy", 1) %>%
seize("res2", function() 1) %>%
timeout(function() rnorm(1, 20)) %>%
release("res2", function() 1) %>%
release("res0", 1) %>%
rollback(11)) %>%
synchronize() %>%
rollback(2) %>%
release("res0", 1)
# re
as$trj <- paste(as$LAT_VAL ,as$LON_VAL, sep = ',' )
as
#par(mar=c(4,4,1,1))
#trajectoryPCoA(as, as$stype, surveys, traj.colors = c("black","red", "blue"), lwd = 2)
#legend("topleft", col=c("black","red", "blue"),
# legend=c("Trajectory 1", "Trajectory 2", "Trajectory 3"), bty="n", lty=1, lwd = 2)
trajectoryLengths(as, as$SHIP_ID, surveys)
s1 = step1measures(as, example.data$time, ID = TRUE)
s2 = step2factors(as)
install.packages("traj")
library(traj)
s1 = step1measures(as$trj, as$RECV_DT , ID = T)
s2 = step2factors(as)
###########################################################################
file <- choose.files()
file
df <- read.csv(file,header = T)
as <- subset(df, select = -c(X,Length,Beam,TRGT_SENSOR_KIND,AREA_ID,MSG_ID,LOC_ACCRCY))
head(as)
tail(as)
# Setup data and time
zz <- subset(df, select = -c(X,Length,ShipName,stype,Beam,TRGT_SENSOR_KIND,AREA_ID,MSG_ID,LOC_ACCRCY))
head(zz)
data = zz$SOG_VAL
time = zz$RECV_DT
data
time
# Run step1measures, step2factors and step3clusters
s1 = step1measures(data, time, SHIP_ID=TRUE)
s2 = step2factors(s1)
s3 = step3clusters(s2)
# Print and plot "traj object"
s3
plot(s3)
###########################################################
a1 <- filter (zz, SHIP_ID == 440004750 )# 케이스 추출(filter)
a2 <- select(a1,LON_VAL , LAT_VAL, SOG_VAL) # 변수 추출(select)
library (tidyr)
a3 <- spread (a2, SHIP_ID) #케이스를 변수로(spread)
a1 <- zz[duplicated(zz$SHIP_ID),]
tail(a1)
############################ https://www.datanovia.com/en/blog/k-means-clustering-visualization-in-r-step-by-step-guide/
data("iris")
dd <- iris
head(zz, 3)
# Compute k-means with k = 3
set.seed(123)
res.km <- kmeans(scale(zz[, 1]), 3, nstart = 25)
# K-means clusters showing the group of each individuals
tail(res.km$cluster)
fviz_cluster(res.km, data = zz[, 1],
palette = c("#2E9FDF", "#00AFBB"),
geom = "point",
ellipse.type = "convex",
ggtheme = theme_bw()
)
# Dimension reduction using PCA
res.pca <- prcomp(dd[, 1], scale = TRUE)
# Coordinates of individuals
ind.coord <- as.data.frame(get_pca_ind(res.pca)$coord)
# Add clusters obtained using the K-means algorithm
ind.coord$cluster <- factor(res.km$cluster)
# Add Species groups from the original data sett
ind.coord$Species <- df$Species
# Data inspection
head(ind.coord)
# Percentage of variance explained by dimensions
eigenvalue <- round(get_eigenvalue(res.pca), 1)
variance.percent <- eigenvalue$variance.percent
head(eigenvalue)
ggscatter(
ind.coord, x = "Dim.1", y = "Dim.2",
color = "cluster", palette = "npg", ellipse = TRUE, ellipse.type = "convex",
shape = "cluster", size = 1.5, legend = "right", ggtheme = theme_bw(),
xlab = paste0("Dim 1 (", variance.percent[1], "% )" ),
ylab = paste0("Dim 2 (", variance.percent[2], "% )" )
) +
stat_mean(aes(color = cluster), size = 4)
ind.coord
zz
##############################
# Compute k-means with k = 3
set.seed(123)
res.km <- kmeans(scale(zz[, -5]), 3, nstart = 25)
# K-means clusters showing the group of each individuals
res.km$cluste
zz[, -5]
data("iris")
df <- iris
head(df, 3)
df[, -5]
|
#' Raw Read for data corresponding to the Schuur dataset
#'
#' Download and read in the raw data tables from Hicks-Pries C. and Bonanza Creek LTER (2009).
#' Identifier: knb-lter-bnz.366.16
#'
#' This is not post-processed or QA/QCed by this package.
#'
#' @param dataDir filename for download directory
#' @param download boolean that will download the files from repository
#'
#' @return a list with meta data and raw table reads. This will be ____ KB in size.
#' @export
#' @importFrom readr read_csv
#url provided is stale and cannot be downloaded, see ##TODO under data_reports/107_Schuur.Rmd
#contacting of researchers is necessary
readSchuur2009 <- function(dataDir, download=TRUE, verbose=FALSE){
urlTable <- data.frame(fileName = c(file.path(dataDir, 'SchuurData.txt'),
file.path(dataDir, 'SchuurMetaData.xml')),
downloadURL = c('',
''))
#for loop to read in data from url_table if it does not exist in local repository
for(ii in 1:nrow(urlTable)){
dataFile <- urlTable$fileName[ii]
if(!(file.exists(dataFile))){
download.file(urlTable$downloadURL[ii], destfile= dataFile, quiet=FALSE)
}
}
# #this code was not used as it requires additional package dependency
# plyr::d_ply(
# urlTable,
# c('downloadURL','fileBase'),
# function(xx){
# dataFile <- file.path(dataDir, xx$fileBase)
# if(!(file.exists(dataFile))){
# download.file(xx$downloadURL, destfile= dataFile, quiet=FALSE)
# }
# }
# )
#reading in data
readSchuurdata <- readr::read_csv(urlTable$fileName[1])
readSchuurMetadata <- readr::read_csv(urlTable$fileName[2])
#ans and its return
ans <- list(downloadFiles = c(urlTable$fileName[1],
urlTable$fileName[2]),
file.txt = readSchuurdata,
file.xml = readSchuurMetadata,
licenseShort = "",
licenseFull = "",
citation = c("Caitlin Elizabeth Hicks-Pries and Bonanza Creek LTER. 2009. The impact of permafrost thaw on ecosystem carbon balance: Eight Mile Lake soil carbon and nitrogen. LTER Network Member Node. knb-lter-bnz.366.16."),
abstract = c("In this larger study, we are asking the question: Is old carbon that comprises the bulk of the soil organic matter pool released in response to thawing of permafrost? We are answering this question by using a combination of field and laboratory experiments to measure radiocarbon isotope ratios in soil organic matter, soil respiration, and dissolved organic carbon, in tundra ecosystems. The objective of these proposed measurements is to develop a mechanistic understanding of the SOM sources contributing to C losses following permafrost thawing. We are making these measurements at an established tundra field site near Healy, Alaska in the foothills of the Alaska Range. Field measurements center on a natural experiment where permafrost has been observed to warm and thaw over the past several decades. This area represents a gradient of sites each with a different degree of change due to permafrost thawing. As such, this area is unique for addressing questions at the time and spatial scales relevant for change in arctic ecosystems. In this data set, thaw gradient site soil carbon and nitrogen inventories are reported by depth layer for both organic and mineral horizons. The inventories include % Carbon, % Nitrogen, bulk density, gC/m2, gN/m2, 13C, and 14N."))
return(ans)
} | /R/readSchuur2009.R | permissive | kaleydodson/SOCDRaHR2 | R | false | false | 3,592 | r | #' Raw Read for data corresponding to the Schuur dataset
#'
#' Download and read in the raw data tables from Hicks-Pries C. and Bonanza Creek LTER (2009).
#' Identifier: knb-lter-bnz.366.16
#'
#' This is not post-processed or QA/QCed by this package.
#'
#' @param dataDir filename for download directory
#' @param download boolean that will download the files from repository
#'
#' @return a list with meta data and raw table reads. This will be ____ KB in size.
#' @export
#' @importFrom readr read_csv
#url provided is stale and cannot be downloaded, see ##TODO under data_reports/107_Schuur.Rmd
#contacting of researchers is necessary
readSchuur2009 <- function(dataDir, download=TRUE, verbose=FALSE){
urlTable <- data.frame(fileName = c(file.path(dataDir, 'SchuurData.txt'),
file.path(dataDir, 'SchuurMetaData.xml')),
downloadURL = c('',
''))
#for loop to read in data from url_table if it does not exist in local repository
for(ii in 1:nrow(urlTable)){
dataFile <- urlTable$fileName[ii]
if(!(file.exists(dataFile))){
download.file(urlTable$downloadURL[ii], destfile= dataFile, quiet=FALSE)
}
}
# #this code was not used as it requires additional package dependency
# plyr::d_ply(
# urlTable,
# c('downloadURL','fileBase'),
# function(xx){
# dataFile <- file.path(dataDir, xx$fileBase)
# if(!(file.exists(dataFile))){
# download.file(xx$downloadURL, destfile= dataFile, quiet=FALSE)
# }
# }
# )
#reading in data
readSchuurdata <- readr::read_csv(urlTable$fileName[1])
readSchuurMetadata <- readr::read_csv(urlTable$fileName[2])
#ans and its return
ans <- list(downloadFiles = c(urlTable$fileName[1],
urlTable$fileName[2]),
file.txt = readSchuurdata,
file.xml = readSchuurMetadata,
licenseShort = "",
licenseFull = "",
citation = c("Caitlin Elizabeth Hicks-Pries and Bonanza Creek LTER. 2009. The impact of permafrost thaw on ecosystem carbon balance: Eight Mile Lake soil carbon and nitrogen. LTER Network Member Node. knb-lter-bnz.366.16."),
abstract = c("In this larger study, we are asking the question: Is old carbon that comprises the bulk of the soil organic matter pool released in response to thawing of permafrost? We are answering this question by using a combination of field and laboratory experiments to measure radiocarbon isotope ratios in soil organic matter, soil respiration, and dissolved organic carbon, in tundra ecosystems. The objective of these proposed measurements is to develop a mechanistic understanding of the SOM sources contributing to C losses following permafrost thawing. We are making these measurements at an established tundra field site near Healy, Alaska in the foothills of the Alaska Range. Field measurements center on a natural experiment where permafrost has been observed to warm and thaw over the past several decades. This area represents a gradient of sites each with a different degree of change due to permafrost thawing. As such, this area is unique for addressing questions at the time and spatial scales relevant for change in arctic ecosystems. In this data set, thaw gradient site soil carbon and nitrogen inventories are reported by depth layer for both organic and mineral horizons. The inventories include % Carbon, % Nitrogen, bulk density, gC/m2, gN/m2, 13C, and 14N."))
return(ans)
} |
#' gets median of sample
#'
#' @param a vector
#' @param integer sample size
#' @param integer number of repetitions
#'
#'
#' @return medians of the vector samples
#'
#' @import stringr
#' @import dplyr
#' @import glue
#' @import purrr
#'
#' @export
many_sample_medians <- function(vec, n, reps){
medians <- replicate(reps, sample_median(vec, n))
return(medians)
}
| /R/many_sample_medians.R | permissive | nadav-levanoni/CLT_Helper | R | false | false | 369 | r | #' gets median of sample
#'
#' @param a vector
#' @param integer sample size
#' @param integer number of repetitions
#'
#'
#' @return medians of the vector samples
#'
#' @import stringr
#' @import dplyr
#' @import glue
#' @import purrr
#'
#' @export
many_sample_medians <- function(vec, n, reps){
medians <- replicate(reps, sample_median(vec, n))
return(medians)
}
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Functions that evaluate model effectiveness and variable importance
VarShare <- function(rf.obj, members) {
# Calculates share of splits in the forest involving predictors
# See http://stats.stackexchange.com/q/92419/19676
count <- table(rf.obj$forest$bestvar)[-1]
names(count) <- names(rf.obj$forest$ncat)
share <- count[members] / sum(count[members])
return(share)
}
GroupImportance <- function(rf.obj, groups) {
# Calculates importance of groupings of predictors in a random forests model
# Adapted from http://stats.stackexchange.com/q/92419/19676
#
# Args:
# rf.obj: Random forests model object
# groups: List of variable grouping vectors, e.g.
# list(Sepal=c("Sepal.Width", "Sepal.Length"),
# Petal=c("Petal.Width", "Petal.Length"))
#
# Returns:
# Matrix o
var.imp <- as.matrix(sapply(groups, function(g) {
sum(importance(rf.obj, 2)[g, ] * VarShare(rf.obj, g))
}))
colnames(var.imp) <- "MeanDecreaseGini"
return(var.imp)
}
BrierScore <- function(observed, pred) {
# Calculates Brier score, a MSE analog for binomial responses
#
# Args:
# observed: Vector of 1/0 or T/F indicating observed values
# pred: Vector of predicted probabilities
#
# Returns:
# Number indicating model's Brier score
return(mean((pred - observed) ^ 2, na.rm=T))
}
| /R/evaluate_models.R | permissive | datasciences/glassbox | R | false | false | 1,947 | r | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Functions that evaluate model effectiveness and variable importance
VarShare <- function(rf.obj, members) {
# Calculates share of splits in the forest involving predictors
# See http://stats.stackexchange.com/q/92419/19676
count <- table(rf.obj$forest$bestvar)[-1]
names(count) <- names(rf.obj$forest$ncat)
share <- count[members] / sum(count[members])
return(share)
}
GroupImportance <- function(rf.obj, groups) {
# Calculates importance of groupings of predictors in a random forests model
# Adapted from http://stats.stackexchange.com/q/92419/19676
#
# Args:
# rf.obj: Random forests model object
# groups: List of variable grouping vectors, e.g.
# list(Sepal=c("Sepal.Width", "Sepal.Length"),
# Petal=c("Petal.Width", "Petal.Length"))
#
# Returns:
# Matrix o
var.imp <- as.matrix(sapply(groups, function(g) {
sum(importance(rf.obj, 2)[g, ] * VarShare(rf.obj, g))
}))
colnames(var.imp) <- "MeanDecreaseGini"
return(var.imp)
}
BrierScore <- function(observed, pred) {
# Calculates Brier score, a MSE analog for binomial responses
#
# Args:
# observed: Vector of 1/0 or T/F indicating observed values
# pred: Vector of predicted probabilities
#
# Returns:
# Number indicating model's Brier score
return(mean((pred - observed) ^ 2, na.rm=T))
}
|
source(system.file("extdata", "utils.R", package = "nhdplusTools"))
data_dir <- file.path(tempdir(check = TRUE), "nhdplusTools")
download_pkg_data("sample_natseamless.gpkg",
"https://doi-usgs.github.io/nhdplusTools/data/sample_natseamless.gpkg",
data_dir)
sample_data <- file.path(data_dir, "sample_natseamless.gpkg")
| /inst/extdata/sample_data.R | permissive | cran/nhdplusTools | R | false | false | 366 | r | source(system.file("extdata", "utils.R", package = "nhdplusTools"))
data_dir <- file.path(tempdir(check = TRUE), "nhdplusTools")
download_pkg_data("sample_natseamless.gpkg",
"https://doi-usgs.github.io/nhdplusTools/data/sample_natseamless.gpkg",
data_dir)
sample_data <- file.path(data_dir, "sample_natseamless.gpkg")
|
`ep` <-
function(mu, R, rho, n, isd=NULL, nRep=1, seed=NULL, crit=1e-6, maxiter=20)
{
if (nRep < 1 ) stop("Number of replications must be at least 1")
if (crit <= 0 ) stop("Precision criterion must be greater than zero")
if (maxiter < 1) stop("Maximum number of iterations must be at least 1")
if ( missing(mu) )
{
# If ISD is present, then use it!
if ( !is.null(isd) )
{
#Making default response names.
if ( !is.null(isd$n) ) {lgth <- n }
else {lgth <- length( isd$mu) }
cnames <- rep(NA, lgth)
for (i in 1:lgth){ cnames[i] <- paste("y", i, sep="") }
y <- matrix( isd.to.y(isd=isd, nRep=nRep, crit=crit, maxiter=maxiter, seed=seed),ncol=length(isd$mu) )
rownames(y) <- 1:nrow(y)
colnames(y) <- cnames
return( list(y=data.frame(y), isd=isd) )
}
else {stop("mu is a required argument")}
}
# BEGIN: generating default response names .
if ( !missing(n) ) { m <- n }
else { m <- length(mu) }
cnames <- rep(NA, m)
for(i in 1:m)
{
cnames[i] <- paste("y", i, sep="")
}
# END: generating default response names .
if ( (any(mu < 0) ) | (any(mu > 1 ) ) ) stop("The vector `mu' is out of range or degenerate")
if ( (any(mu == 0) ) | (any(mu == 1) ) ) warning("At least one member of mu is degenerate (0 or 1)")
# If rho is specified, then use it!
if ( !missing(rho) )
{
if((rho > 1) | (rho < -1)) { stop("Your correlation must adhere to -1 <= rho <= 1 ") }
if (missing(n)) {stop("The cluster size ``n'' is needed")}
if (length(mu) > 1){stop("``mu'' must be a vector of length 1")}
isd1 <- isd( mu=mu, rho=rho, p=n, crit=crit, maxiter=maxiter )
y <- isd.to.y(isd=isd1, nRep=nRep, crit=crit, maxiter=maxiter, seed=seed)
rownames(y) <- 1:nrow(y)
colnames(y) <- cnames
return(list(y=y, isd=isd1))
}
# If R is specified, then use it!
if (!missing(R))
{
isd1 <- isd( mu=mu, R=R, crit=crit, maxiter=maxiter )
y <- isd.to.y(isd=isd1, nRep=nRep, crit=crit, maxiter=maxiter, seed=seed)
rownames(y) <- 1:nrow(y)
colnames(y) <- cnames
return(list(y=y, isd=isd1))
}
else
{
stop("You need to provide either `rho', `R', or `isd' ")
}
}
| /R/ep.R | no_license | cran/mvtBinaryEP | R | false | false | 2,505 | r | `ep` <-
function(mu, R, rho, n, isd=NULL, nRep=1, seed=NULL, crit=1e-6, maxiter=20)
{
if (nRep < 1 ) stop("Number of replications must be at least 1")
if (crit <= 0 ) stop("Precision criterion must be greater than zero")
if (maxiter < 1) stop("Maximum number of iterations must be at least 1")
if ( missing(mu) )
{
# If ISD is present, then use it!
if ( !is.null(isd) )
{
#Making default response names.
if ( !is.null(isd$n) ) {lgth <- n }
else {lgth <- length( isd$mu) }
cnames <- rep(NA, lgth)
for (i in 1:lgth){ cnames[i] <- paste("y", i, sep="") }
y <- matrix( isd.to.y(isd=isd, nRep=nRep, crit=crit, maxiter=maxiter, seed=seed),ncol=length(isd$mu) )
rownames(y) <- 1:nrow(y)
colnames(y) <- cnames
return( list(y=data.frame(y), isd=isd) )
}
else {stop("mu is a required argument")}
}
# BEGIN: generating default response names .
if ( !missing(n) ) { m <- n }
else { m <- length(mu) }
cnames <- rep(NA, m)
for(i in 1:m)
{
cnames[i] <- paste("y", i, sep="")
}
# END: generating default response names .
if ( (any(mu < 0) ) | (any(mu > 1 ) ) ) stop("The vector `mu' is out of range or degenerate")
if ( (any(mu == 0) ) | (any(mu == 1) ) ) warning("At least one member of mu is degenerate (0 or 1)")
# If rho is specified, then use it!
if ( !missing(rho) )
{
if((rho > 1) | (rho < -1)) { stop("Your correlation must adhere to -1 <= rho <= 1 ") }
if (missing(n)) {stop("The cluster size ``n'' is needed")}
if (length(mu) > 1){stop("``mu'' must be a vector of length 1")}
isd1 <- isd( mu=mu, rho=rho, p=n, crit=crit, maxiter=maxiter )
y <- isd.to.y(isd=isd1, nRep=nRep, crit=crit, maxiter=maxiter, seed=seed)
rownames(y) <- 1:nrow(y)
colnames(y) <- cnames
return(list(y=y, isd=isd1))
}
# If R is specified, then use it!
if (!missing(R))
{
isd1 <- isd( mu=mu, R=R, crit=crit, maxiter=maxiter )
y <- isd.to.y(isd=isd1, nRep=nRep, crit=crit, maxiter=maxiter, seed=seed)
rownames(y) <- 1:nrow(y)
colnames(y) <- cnames
return(list(y=y, isd=isd1))
}
else
{
stop("You need to provide either `rho', `R', or `isd' ")
}
}
|
## ----global_options, include = FALSE------------------------------------------------------------------------------------------------------------------------------
try(source("../../.Rprofile"))
## import platform as platform
## print(platform.release())
## # This assums using an EC2 instance where amzn is in platform name
## if 'amzn' in platform.release():
## s3_status = True
## else:
## s3_status = False
## print(s3_status)
## import boto3
## s3 = boto3.client('s3')
## spn_local_path_file_name = "C:/Users/fan/Py4Econ/aws/setup/_data/iris_s3.dta"
## str_bucket_name = "fans3testbucket"
## spn_remote_path_file_name = "_data/iris_s3.dta"
## s3.upload_file(spn_local_path_file_name, str_bucket_name, spn_remote_path_file_name)
## import boto3
## s3 = boto3.client('s3')
## spn_local_path_file_name = "C:/Users/fan/Py4Econ/aws/setup/_data/iris_s3_downloaded.dta"
## str_bucket_name = "fans3testbucket"
## spn_remote_path_file_name = "_data/iris_s3.dta"
## s3.download_file(str_bucket_name, spn_remote_path_file_name, spn_local_path_file_name)
## # ssh into EC2 linux 2 AMI
## ssh -i "G:/repos/ThaiJMP/boto3aws/aws_ec2/pem/fan_wang-key-pair-us_east_nv.pem" ec2-user@3.81.101.142
## # generate data folder
## mkdir data
## # install boto3
## sudo yum install python-pip python3-wheel && Pip install boto3 --user
## # try download file using boto3
## # go into python
## python
## import boto3
## s3 = boto3.client('s3')
## spn_ec2_path_file_name = "/home/ec2-user/data/iris_s3_downloaded.dta"
## str_bucket_name = "fans3testbucket"
## spn_s3_path_file_name = "_data/iris_s3.dta"
## s3.download_file(str_bucket_name, spn_s3_path_file_name, spn_ec2_path_file_name)
## # inside EC2 AMI Linux 2, start dockers
## sudo service docker start
## sudo service docker status
## # see docker images
## docker images
## # run docker container and enter inside
## docker run -t -i fanconda /bin/bash
## # make a data directory and a esti subdirectory
## mkdir data
## cd data
## mkdir esti
## # enter python
## python
## import boto3
## s3 = boto3.client('s3')
## spn_container_path_file_name = "/data/esti/iris_s3_downloaded.dta"
## str_bucket_name = "fans3testbucket"
## spn_s3_path_file_name = "_data/iris_s3.dta"
## s3.download_file(str_bucket_name, spn_s3_path_file_name, spn_container_path_file_name)
## import os
## # This generates a file directly under bucket _data\iris_s3:
## spn_remote_path_file_name_backslash = "_data\\iris_s3_slashbackforward.dta"
## s3.upload_file(spn_local_path_file_name, str_bucket_name, spn_remote_path_file_name_backslash)
## # This allows the folder structure to be clickable:
## spn_remote_path_file_name_forwardslash = spn_remote_path_file_name_backslash.replace(os.sep, '/')
## s3.upload_file(spn_local_path_file_name, str_bucket_name, spn_remote_path_file_name_forwardslash)
## # Print slashs
## print(f'{spn_remote_path_file_name_backslash=}')
## print(f'{spn_remote_path_file_name_forwardslash=}')
## # CD into a directory
## cd /d "G:\S3\fanconda202010\esti"
## # Make a new directory making S3 Directory Name
## mkdir e_20201025x_esr_medtst_list_tKap_mlt_ce1a2
## # cd into the directory just made
## cd /d "G:\S3\thaijmp202010\esti\e_20201025x_esr_medtst_list_tKap_mlt_ce1a2"
## # copy all results from the s3 folder's subfolders including subfolders, excluding images
## aws s3 cp ^
## s3://fanconda202010/esti/e_20201025x_esr_medtst_list_tKap_mlt_ce1a2/ . ^
## --recursive --exclude "*.png"
| /aws/s3/htmlpdfr/fs_aws_s3.R | permissive | FanWangEcon/Py4Econ | R | false | false | 3,532 | r | ## ----global_options, include = FALSE------------------------------------------------------------------------------------------------------------------------------
try(source("../../.Rprofile"))
## import platform as platform
## print(platform.release())
## # This assums using an EC2 instance where amzn is in platform name
## if 'amzn' in platform.release():
## s3_status = True
## else:
## s3_status = False
## print(s3_status)
## import boto3
## s3 = boto3.client('s3')
## spn_local_path_file_name = "C:/Users/fan/Py4Econ/aws/setup/_data/iris_s3.dta"
## str_bucket_name = "fans3testbucket"
## spn_remote_path_file_name = "_data/iris_s3.dta"
## s3.upload_file(spn_local_path_file_name, str_bucket_name, spn_remote_path_file_name)
## import boto3
## s3 = boto3.client('s3')
## spn_local_path_file_name = "C:/Users/fan/Py4Econ/aws/setup/_data/iris_s3_downloaded.dta"
## str_bucket_name = "fans3testbucket"
## spn_remote_path_file_name = "_data/iris_s3.dta"
## s3.download_file(str_bucket_name, spn_remote_path_file_name, spn_local_path_file_name)
## # ssh into EC2 linux 2 AMI
## ssh -i "G:/repos/ThaiJMP/boto3aws/aws_ec2/pem/fan_wang-key-pair-us_east_nv.pem" ec2-user@3.81.101.142
## # generate data folder
## mkdir data
## # install boto3
## sudo yum install python-pip python3-wheel && Pip install boto3 --user
## # try download file using boto3
## # go into python
## python
## import boto3
## s3 = boto3.client('s3')
## spn_ec2_path_file_name = "/home/ec2-user/data/iris_s3_downloaded.dta"
## str_bucket_name = "fans3testbucket"
## spn_s3_path_file_name = "_data/iris_s3.dta"
## s3.download_file(str_bucket_name, spn_s3_path_file_name, spn_ec2_path_file_name)
## # inside EC2 AMI Linux 2, start dockers
## sudo service docker start
## sudo service docker status
## # see docker images
## docker images
## # run docker container and enter inside
## docker run -t -i fanconda /bin/bash
## # make a data directory and a esti subdirectory
## mkdir data
## cd data
## mkdir esti
## # enter python
## python
## import boto3
## s3 = boto3.client('s3')
## spn_container_path_file_name = "/data/esti/iris_s3_downloaded.dta"
## str_bucket_name = "fans3testbucket"
## spn_s3_path_file_name = "_data/iris_s3.dta"
## s3.download_file(str_bucket_name, spn_s3_path_file_name, spn_container_path_file_name)
## import os
## # This generates a file directly under bucket _data\iris_s3:
## spn_remote_path_file_name_backslash = "_data\\iris_s3_slashbackforward.dta"
## s3.upload_file(spn_local_path_file_name, str_bucket_name, spn_remote_path_file_name_backslash)
## # This allows the folder structure to be clickable:
## spn_remote_path_file_name_forwardslash = spn_remote_path_file_name_backslash.replace(os.sep, '/')
## s3.upload_file(spn_local_path_file_name, str_bucket_name, spn_remote_path_file_name_forwardslash)
## # Print slashs
## print(f'{spn_remote_path_file_name_backslash=}')
## print(f'{spn_remote_path_file_name_forwardslash=}')
## # CD into a directory
## cd /d "G:\S3\fanconda202010\esti"
## # Make a new directory making S3 Directory Name
## mkdir e_20201025x_esr_medtst_list_tKap_mlt_ce1a2
## # cd into the directory just made
## cd /d "G:\S3\thaijmp202010\esti\e_20201025x_esr_medtst_list_tKap_mlt_ce1a2"
## # copy all results from the s3 folder's subfolders including subfolders, excluding images
## aws s3 cp ^
## s3://fanconda202010/esti/e_20201025x_esr_medtst_list_tKap_mlt_ce1a2/ . ^
## --recursive --exclude "*.png"
|
#!/applications/R/R-3.5.0/bin/Rscript
# Perform hypergeometric tests to determine whether each HudsonRM_all
# gene quantile is over-represented or under-represented for
# NLR-encoding genes (as defined by Ksenia Krasileva) and, separately,
# meiotic-module genes (as defined in Alabdullah et al. 2019 Front. Plant Sci.)
# (i.e., is the proportion of NLR-encoding genes or meiotic-module genes
# contained within each gene quantile significantly greater or smaller than expected by chance
# based on the hypergeometric distribution?)
# P-value is the probability of drawing >= length(quantile_NLRs) [x] features
# in a sample size of length(quantile_genes) [k] from a total feature set consisting of
# length(genome_NLRs) [m] + ( length(genome_genes) - length(genome_NLRs)) [n]
# Usage
# ./proportion_NLRs_in_gene_quantiles_hypergeometricTest_HudsonRM.R 'HudsonRM_all' 'bodies' 1 2 'genomewide' 'Agenome_Bgenome_Dgenome' 100000
library(methods)
library(plotrix)
library(ggplot2)
library(ggbeeswarm)
library(ggthemes)
library(grid)
library(gridExtra)
library(extrafont)
#libName <- "HudsonRM_all"
#featRegion <- "bodies"
#quantileFirst <- 1
#quantileLast <- 2
#region <- "genomewide"
#genomeName <- "Agenome_Bgenome_Dgenome"
#samplesNum <- 100000
args <- commandArgs(trailingOnly = TRUE)
libName <- args[1]
featRegion <- args[2]
quantileFirst <- as.integer(args[3])
quantileLast <- as.integer(args[4])
region <- args[5]
genomeName <- args[6]
samplesNum <- as.numeric(args[7])
pop_name_plot <- c(
"Africa",
"Middle East",
"Asia",
"Former SU",
"Eastern Europe",
"Western Europe",
"North America",
"Central America",
"South America",
"Oceania"
)
pop_name <- gsub(" ", "", pop_name_plot)
outDir <- paste0("quantiles_by_", libName, "/hypergeometricTests/")
plotDir <- paste0(outDir, "plots/")
system(paste0("[ -d ", outDir, " ] || mkdir ", outDir))
system(paste0("[ -d ", plotDir, " ] || mkdir ", plotDir))
# Define quantile colours
quantileColours <- c("red", "navy")
makeTransparent <- function(thisColour, alpha = 180)
{
newColour <- col2rgb(thisColour)
apply(newColour, 2, function(x) {
rgb(red = x[1], green = x[2], blue = x[3],
alpha = alpha, maxColorValue = 255)
})
}
quantileColours <- makeTransparent(quantileColours)
for(p in 1:length(pop_name)) {
# Load feature quantiles
featuresDF <- read.table(paste0(sub("hypergeometricTests/", "", outDir),
"/", pop_name[p], "/features_", quantileLast, "quantiles_by_",
libName, "_of_genes_in_",
genomeName, "_", region, "_", pop_name[p], ".txt"),
header = T, sep = "\t", row.names = NULL, stringsAsFactors = F)
featuresDF$featureID <- sub(pattern = "\\.\\d+", replacement = "",
x = featuresDF$featureID)
genome_genes <- featuresDF$featureID
quantile_genes_list <- lapply(quantileFirst:quantileLast, function(x) {
featuresDF[featuresDF$quantile == paste0("Quantile ", x),]$featureID
})
rm(featuresDF); gc()
# Load NLRs
#NLRs <- read.table(paste0("/home/ajt200/analysis/wheat/annotation/221118_download/iwgsc_refseqv1.1_genes_2017July06/",
# "NLRs_Krasileva/NB_ARC_genes_IWGSC_v1_Ksenia_Krasileva_representative_mRNA.gff3"),
# header = F, stringsAsFactors = F)
NLRs <- read.table(paste0("/home/ajt200/analysis/wheat/annotation/221118_download/iwgsc_refseqv1.1_genes_2017July06/",
"NLRs_Steuernagel_Wulff_2020_Plant_Physiol/NLR_genes_complete_representative_mRNA.gff3"),
header = F, stringsAsFactors = F)
chrs <- paste0(rep("chr", 21), rep(1:7, 3),
c(rep("A", 7), rep("B", 7), rep("D", 7)))
genomeLetter <- unlist(strsplit(gsub("genome", "", genomeName), split = "_"))
# Subset NLRs to only those within a given subgenome
if(length(genomeLetter) == 1) {
chrs <- chrs[grepl(genomeLetter, chrs)]
NLRs <- NLRs[NLRs$V1 %in% chrs,]
}
# Replace gene model ID decimal suffix (e.g., ".1")
NLRs$V9 <- sub(pattern = "\\.\\d+", replacement = "",
x = NLRs$V9)
genome_NLRs <- as.character(NLRs$V9)
genome_NLRs <- intersect(genome_NLRs, genome_genes)
# Set class for hypergeometric test results object
setClass("hypergeomTest",
representation(alternative = "character",
alpha0.05 = "numeric",
pval = "numeric",
observed = "numeric",
expected = "numeric",
log2obsexp = "numeric",
log2alpha = "numeric",
quantile_genes = "numeric",
proportion_of_quantile = "numeric",
random_proportions_of_quantile = "numeric",
hypergeomDist = "numeric"))
# P-value is the probability of drawing >= length(quantile_NLRs) [x] features
# in a sample size of length(quantile_genes) [k] from a total feature set consisting of
# length(genome_NLRs) [m] + ( length(genome_genes) - length(genome_NLRs)) [n]
# From Karl Broman's answer at
# https://stats.stackexchange.com/questions/16247/calculating-the-probability-of-gene-list-overlap-between-an-rna-seq-and-a-chip-c:
# dhyper(x, m, n, k) gives the probability of drawing exactly x.
# So P-value is given by the sum of the probabilities of drawing
# length(quantile_NLRs) to length(quantile_genes)
#lapply(seq_along(quantile_genes_list), function(z) {
for(z in seq_along(quantile_genes_list)) {
quantile_genes <- quantile_genes_list[[z]]
# Get intersection of gene IDs in quantile z and gene IDs of NLRs
quantile_NLRs <- intersect(quantile_genes, genome_NLRs)
# Calculate the P-values for over-representation and under-representation
# of NLRs among quantile z genes
set.seed(2847502)
# Over-representation:
Pval_overrep <- sum(dhyper(x = length(quantile_NLRs):length(quantile_genes),
m = length(genome_NLRs),
n = length(genome_genes) - length(genome_NLRs),
k = length(quantile_genes)))
print(Pval_overrep)
# Or by 1 minus the sum of the probabilities of drawing 0:(length(quantile_NLRs)-1)
print(1 - sum(dhyper(x = 0:(length(quantile_NLRs)-1),
m = length(genome_NLRs),
n = length(genome_genes) - length(genome_NLRs),
k = length(quantile_genes))))
# Under-representation
Pval_underrep <- phyper(q = length(quantile_NLRs),
m = length(genome_NLRs),
n = length(genome_genes) - length(genome_NLRs),
k = length(quantile_genes))
print(Pval_underrep)
# Sample without replacement
hgDist <- rhyper(nn = samplesNum,
m = length(genome_NLRs),
n = length(genome_genes) - length(genome_NLRs),
k = length(quantile_genes))
# Calculate P-values and significance levels
if(length(quantile_NLRs) > mean(hgDist)) {
Pval <- Pval_overrep
MoreOrLessThanRandom <- "MoreThanRandom"
alpha0.05 <- quantile(hgDist, probs = 0.95)[[1]]
} else {
Pval <- Pval_underrep
MoreOrLessThanRandom <- "LessThanRandom"
alpha0.05 <- quantile(hgDist, probs = 0.05)[[1]]
}
hgTestResults <- new("hypergeomTest",
alternative = MoreOrLessThanRandom,
alpha0.05 = alpha0.05,
pval = Pval,
observed = length(quantile_NLRs),
expected = mean(hgDist),
log2obsexp = log2( length(quantile_NLRs) / mean(hgDist) ),
log2alpha = log2( alpha0.05 / mean(hgDist) ),
quantile_genes = length(quantile_genes),
proportion_of_quantile = length(quantile_NLRs) / length(quantile_genes),
random_proportions_of_quantile = hgDist / length(quantile_genes),
hypergeomDist = hgDist)
save(hgTestResults,
file = paste0(outDir,
"NLR_gene_representation_among_quantile", z, "_of_", quantileLast,
"_by_", libName, "_of_genes_in_",
genomeName, "_", region, "_hypergeomTestRes_", pop_name[p], ".RData"))
# Generate histogram
pdf(paste0(plotDir,
"NLR_gene_representation_among_quantile", z, "_of_", quantileLast,
"_by_", libName, "_of_genes_in_",
genomeName, "_", region, "_hypergeomTestRes_", pop_name[p], "_hist.pdf"),
height = 4.5, width = 5)
par(mar = c(3.1, 3.1, 4.1, 1.1),
mgp = c(1.85, 0.75, 0))
## Disable scientific notation (e.g., 0.0001 rather than 1e-04)
#options(scipen = 100)
# Calculate max density
maxDensityPlus <- max(density(hgTestResults@hypergeomDist)$y)*1.2
if(hgTestResults@alternative == "MoreThanRandom") {
xlim <- c(pmin(0, min(hgTestResults@hypergeomDist)/1.2),
pmax(hgTestResults@observed*1.2, hgTestResults@alpha0.05*1.2))
textX1 <- quantile(xlim, 0.25)[[1]]
# textX1 <- min(hgTestResults@hypergeomDist)/1.15
} else {
xlim <- c(pmin(0, hgTestResults@observed/1.2),
max(hgTestResults@hypergeomDist)*1.2)
textX1 <- quantile(xlim, 0.75)[[1]]
# textX1 <- min(hgTestResults@hypergeomDist)/1.15
}
hist(hgTestResults@hypergeomDist,
breaks = 50,
freq = FALSE,
col = "dodgerblue",
border = NA,
lwd = 2,
xlim = c(pretty(xlim)[1],
pretty(xlim)[length(pretty(xlim))]),
ylim = c(0,
maxDensityPlus),
xaxt = "n", yaxt = "n",
xlab = "", ylab = "", main = "",
axes = FALSE)
axis(side = 2,
at = pretty(density(hgTestResults@hypergeomDist)$y),
lwd = 2)
mtext(side = 2,
text = "Density",
line = 1.85)
axis(side = 1,
at = pretty(xlim),
lwd = 2)
mtext(side = 1,
text = bquote("Genes (" * .(pop_name_plot[p]) * ")"),
line = 1.85)
titleText <- list(bquote("NLR-encoding genes in" ~
.(libName) ~ "Quantile" ~ .(as.character(z)) ~
"(" * .(featRegion) * ") in" ~
.(gsub("_", " ", genomeName)) ~ .(region)),
bquote(italic("P")*" = "*
# .(as.character(round(hgTestResults@pval,
# digits = 6)))),
.(as.character(hgTestResults@pval))),
bquote("Samples (hypergeometric distribution) = "*.(prettyNum(length(hgTestResults@hypergeomDist),
big.mark = ",",
trim = T))))
mtext(do.call(expression, titleText), side = 3, line = 3:1, cex = c(0.7, 1, 1))
lines(density(hgTestResults@hypergeomDist),
col = "dodgerblue3",
lwd = 1.5)
ablineclip(v = hgTestResults@expected,
y1 = 0, y2 = maxDensityPlus*.92, lwd = 2)
ablineclip(v = hgTestResults@observed,
y1 = 0, y2 = maxDensityPlus*.92, lwd = 2, col = "forestgreen")
ablineclip(v = hgTestResults@alpha0.05,
y1 = 0, y2 = maxDensityPlus*.92, lwd = 2, lty = 5, col = "red")
text(x = c(textX1,
hgTestResults@expected,
hgTestResults@observed,
hgTestResults@alpha0.05),
y = c(maxDensityPlus*.95,
maxDensityPlus,
maxDensityPlus,
maxDensityPlus*.95),
labels = c("Simulated",
"Expected",
"Observed",
expression(alpha*" = 0.05")),
col = c("dodgerblue",
"black",
"forestgreen",
"red"),
cex = 0.8)
dev.off()
}
options(scipen = 100)
# Plot bar graph summarising permutation test results
pt_list <- list()
for(z in quantileFirst:quantileLast) {
load(paste0(outDir,
"NLR_gene_representation_among_quantile", z, "_of_", quantileLast,
"_by_", libName, "_of_genes_in_",
genomeName, "_", region, "_hypergeomTestRes_", pop_name[p], ".RData"))
pt_list <- c(pt_list, hgTestResults)
}
bargraph_df <- data.frame(Quantile = paste0("Quantile ", quantileFirst:quantileLast),
log2ObsExp = sapply(seq_along(pt_list), function(x) { pt_list[[x]]@log2obsexp }),
log2alpha0.05 = sapply(seq_along(pt_list), function(x) { pt_list[[x]]@log2alpha }))
bargraph_df$Quantile <- factor(bargraph_df$Quantile,
levels = paste0("Quantile ", quantileFirst:quantileLast))
bp <- ggplot(data = bargraph_df,
mapping = aes(x = Quantile,
y = log2ObsExp,
fill = " ")) +
geom_bar(stat = "identity",
position = position_dodge()) +
scale_fill_manual(name = "",
values = c("dodgerblue3"),
labels = " ") +
geom_point(mapping = aes(x = Quantile,
y = log2alpha0.05),
position = position_dodge(0.9),
shape = "-", colour = "grey80", size = 20) +
labs(y = bquote("Log"[2]*"(observed/expected) genes in quantile")) +
# scale_y_continuous(limits = c(-1.5, 1.5)) +
scale_x_discrete(position = "top") +
guides(fill = guide_legend(direction = "horizontal",
label.position = "top",
label.theme = element_text(size = 20, hjust = 0, vjust = 0.5, angle = 90),
nrow = 1,
byrow = TRUE)) +
theme_bw() +
theme(axis.line.y = element_line(size = 1, colour = "black"),
axis.ticks.y = element_line(size = 1, colour = "black"),
axis.text.y = element_text(size = 20, colour = "black", hjust = 0.5, vjust = 0.5, angle = 90),
axis.title.y = element_text(size = 20, colour = "black"),
axis.ticks.x = element_blank(),
axis.text.x = element_text(size = 20, colour = "black", hjust = 0, vjust = 0.5, angle = 90),
axis.title.x = element_blank(),
panel.grid = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
legend.position = "none",
#legend.position = c(0.05, 0.30),
legend.background = element_rect(fill = "transparent"),
legend.key = element_rect(colour = "transparent",
fill = "transparent"),
plot.margin = unit(c(5.5, 5.5, 10.5, 5.5), "pt"),
plot.title = element_text(size = 16, colour = "black", hjust = 0.5)) +
ggtitle(bquote("NLR-encoding genes in" ~
.(libName) ~ "quantiles" ~
"(" * .(pop_name_plot[p]) * ") in" ~
.(gsub("_", " ", genomeName)) ~ .(region) ~
"(" * .(prettyNum(samplesNum,
big.mark = ",",
trim = T)) ~ " samples)"))
ggsave(paste0(plotDir,
"bargraph_NLR_gene_representation_among_", quantileLast,
"quantiles_by_", libName, "_of_genes_in_",
genomeName, "_", region, "_hypergeomTestRes_", pop_name[p], ".pdf"),
plot = bp,
height = 8, width = 12)
}
| /ASY1_CS/snakemake_ChIPseq/mapped/geneProfiles_subgenomes/quantiles/proportion_NLRs_in_gene_quantiles_hypergeometricTest_HudsonRM.R | no_license | ajtock/wheat | R | false | false | 15,624 | r | #!/applications/R/R-3.5.0/bin/Rscript
# Perform hypergeometric tests to determine whether each HudsonRM_all
# gene quantile is over-represented or under-represented for
# NLR-encoding genes (as defined by Ksenia Krasileva) and, separately,
# meiotic-module genes (as defined in Alabdullah et al. 2019 Front. Plant Sci.)
# (i.e., is the proportion of NLR-encoding genes or meiotic-module genes
# contained within each gene quantile significantly greater or smaller than expected by chance
# based on the hypergeometric distribution?)
# P-value is the probability of drawing >= length(quantile_NLRs) [x] features
# in a sample size of length(quantile_genes) [k] from a total feature set consisting of
# length(genome_NLRs) [m] + ( length(genome_genes) - length(genome_NLRs)) [n]
# Usage
# ./proportion_NLRs_in_gene_quantiles_hypergeometricTest_HudsonRM.R 'HudsonRM_all' 'bodies' 1 2 'genomewide' 'Agenome_Bgenome_Dgenome' 100000
library(methods)
library(plotrix)
library(ggplot2)
library(ggbeeswarm)
library(ggthemes)
library(grid)
library(gridExtra)
library(extrafont)
#libName <- "HudsonRM_all"
#featRegion <- "bodies"
#quantileFirst <- 1
#quantileLast <- 2
#region <- "genomewide"
#genomeName <- "Agenome_Bgenome_Dgenome"
#samplesNum <- 100000
args <- commandArgs(trailingOnly = TRUE)
libName <- args[1]
featRegion <- args[2]
quantileFirst <- as.integer(args[3])
quantileLast <- as.integer(args[4])
region <- args[5]
genomeName <- args[6]
samplesNum <- as.numeric(args[7])
pop_name_plot <- c(
"Africa",
"Middle East",
"Asia",
"Former SU",
"Eastern Europe",
"Western Europe",
"North America",
"Central America",
"South America",
"Oceania"
)
pop_name <- gsub(" ", "", pop_name_plot)
outDir <- paste0("quantiles_by_", libName, "/hypergeometricTests/")
plotDir <- paste0(outDir, "plots/")
system(paste0("[ -d ", outDir, " ] || mkdir ", outDir))
system(paste0("[ -d ", plotDir, " ] || mkdir ", plotDir))
# Define quantile colours
quantileColours <- c("red", "navy")
makeTransparent <- function(thisColour, alpha = 180)
{
newColour <- col2rgb(thisColour)
apply(newColour, 2, function(x) {
rgb(red = x[1], green = x[2], blue = x[3],
alpha = alpha, maxColorValue = 255)
})
}
quantileColours <- makeTransparent(quantileColours)
for(p in 1:length(pop_name)) {
# Load feature quantiles
featuresDF <- read.table(paste0(sub("hypergeometricTests/", "", outDir),
"/", pop_name[p], "/features_", quantileLast, "quantiles_by_",
libName, "_of_genes_in_",
genomeName, "_", region, "_", pop_name[p], ".txt"),
header = T, sep = "\t", row.names = NULL, stringsAsFactors = F)
featuresDF$featureID <- sub(pattern = "\\.\\d+", replacement = "",
x = featuresDF$featureID)
genome_genes <- featuresDF$featureID
quantile_genes_list <- lapply(quantileFirst:quantileLast, function(x) {
featuresDF[featuresDF$quantile == paste0("Quantile ", x),]$featureID
})
rm(featuresDF); gc()
# Load NLRs
#NLRs <- read.table(paste0("/home/ajt200/analysis/wheat/annotation/221118_download/iwgsc_refseqv1.1_genes_2017July06/",
# "NLRs_Krasileva/NB_ARC_genes_IWGSC_v1_Ksenia_Krasileva_representative_mRNA.gff3"),
# header = F, stringsAsFactors = F)
NLRs <- read.table(paste0("/home/ajt200/analysis/wheat/annotation/221118_download/iwgsc_refseqv1.1_genes_2017July06/",
"NLRs_Steuernagel_Wulff_2020_Plant_Physiol/NLR_genes_complete_representative_mRNA.gff3"),
header = F, stringsAsFactors = F)
chrs <- paste0(rep("chr", 21), rep(1:7, 3),
c(rep("A", 7), rep("B", 7), rep("D", 7)))
genomeLetter <- unlist(strsplit(gsub("genome", "", genomeName), split = "_"))
# Subset NLRs to only those within a given subgenome
if(length(genomeLetter) == 1) {
chrs <- chrs[grepl(genomeLetter, chrs)]
NLRs <- NLRs[NLRs$V1 %in% chrs,]
}
# Replace gene model ID decimal suffix (e.g., ".1")
NLRs$V9 <- sub(pattern = "\\.\\d+", replacement = "",
x = NLRs$V9)
genome_NLRs <- as.character(NLRs$V9)
genome_NLRs <- intersect(genome_NLRs, genome_genes)
# Set class for hypergeometric test results object
setClass("hypergeomTest",
representation(alternative = "character",
alpha0.05 = "numeric",
pval = "numeric",
observed = "numeric",
expected = "numeric",
log2obsexp = "numeric",
log2alpha = "numeric",
quantile_genes = "numeric",
proportion_of_quantile = "numeric",
random_proportions_of_quantile = "numeric",
hypergeomDist = "numeric"))
# P-value is the probability of drawing >= length(quantile_NLRs) [x] features
# in a sample size of length(quantile_genes) [k] from a total feature set consisting of
# length(genome_NLRs) [m] + ( length(genome_genes) - length(genome_NLRs)) [n]
# From Karl Broman's answer at
# https://stats.stackexchange.com/questions/16247/calculating-the-probability-of-gene-list-overlap-between-an-rna-seq-and-a-chip-c:
# dhyper(x, m, n, k) gives the probability of drawing exactly x.
# So P-value is given by the sum of the probabilities of drawing
# length(quantile_NLRs) to length(quantile_genes)
#lapply(seq_along(quantile_genes_list), function(z) {
for(z in seq_along(quantile_genes_list)) {
quantile_genes <- quantile_genes_list[[z]]
# Get intersection of gene IDs in quantile z and gene IDs of NLRs
quantile_NLRs <- intersect(quantile_genes, genome_NLRs)
# Calculate the P-values for over-representation and under-representation
# of NLRs among quantile z genes
set.seed(2847502)
# Over-representation:
Pval_overrep <- sum(dhyper(x = length(quantile_NLRs):length(quantile_genes),
m = length(genome_NLRs),
n = length(genome_genes) - length(genome_NLRs),
k = length(quantile_genes)))
print(Pval_overrep)
# Or by 1 minus the sum of the probabilities of drawing 0:(length(quantile_NLRs)-1)
print(1 - sum(dhyper(x = 0:(length(quantile_NLRs)-1),
m = length(genome_NLRs),
n = length(genome_genes) - length(genome_NLRs),
k = length(quantile_genes))))
# Under-representation
Pval_underrep <- phyper(q = length(quantile_NLRs),
m = length(genome_NLRs),
n = length(genome_genes) - length(genome_NLRs),
k = length(quantile_genes))
print(Pval_underrep)
# Sample without replacement
hgDist <- rhyper(nn = samplesNum,
m = length(genome_NLRs),
n = length(genome_genes) - length(genome_NLRs),
k = length(quantile_genes))
# Calculate P-values and significance levels
if(length(quantile_NLRs) > mean(hgDist)) {
Pval <- Pval_overrep
MoreOrLessThanRandom <- "MoreThanRandom"
alpha0.05 <- quantile(hgDist, probs = 0.95)[[1]]
} else {
Pval <- Pval_underrep
MoreOrLessThanRandom <- "LessThanRandom"
alpha0.05 <- quantile(hgDist, probs = 0.05)[[1]]
}
hgTestResults <- new("hypergeomTest",
alternative = MoreOrLessThanRandom,
alpha0.05 = alpha0.05,
pval = Pval,
observed = length(quantile_NLRs),
expected = mean(hgDist),
log2obsexp = log2( length(quantile_NLRs) / mean(hgDist) ),
log2alpha = log2( alpha0.05 / mean(hgDist) ),
quantile_genes = length(quantile_genes),
proportion_of_quantile = length(quantile_NLRs) / length(quantile_genes),
random_proportions_of_quantile = hgDist / length(quantile_genes),
hypergeomDist = hgDist)
save(hgTestResults,
file = paste0(outDir,
"NLR_gene_representation_among_quantile", z, "_of_", quantileLast,
"_by_", libName, "_of_genes_in_",
genomeName, "_", region, "_hypergeomTestRes_", pop_name[p], ".RData"))
# Generate histogram
pdf(paste0(plotDir,
"NLR_gene_representation_among_quantile", z, "_of_", quantileLast,
"_by_", libName, "_of_genes_in_",
genomeName, "_", region, "_hypergeomTestRes_", pop_name[p], "_hist.pdf"),
height = 4.5, width = 5)
par(mar = c(3.1, 3.1, 4.1, 1.1),
mgp = c(1.85, 0.75, 0))
## Disable scientific notation (e.g., 0.0001 rather than 1e-04)
#options(scipen = 100)
# Calculate max density
maxDensityPlus <- max(density(hgTestResults@hypergeomDist)$y)*1.2
if(hgTestResults@alternative == "MoreThanRandom") {
xlim <- c(pmin(0, min(hgTestResults@hypergeomDist)/1.2),
pmax(hgTestResults@observed*1.2, hgTestResults@alpha0.05*1.2))
textX1 <- quantile(xlim, 0.25)[[1]]
# textX1 <- min(hgTestResults@hypergeomDist)/1.15
} else {
xlim <- c(pmin(0, hgTestResults@observed/1.2),
max(hgTestResults@hypergeomDist)*1.2)
textX1 <- quantile(xlim, 0.75)[[1]]
# textX1 <- min(hgTestResults@hypergeomDist)/1.15
}
hist(hgTestResults@hypergeomDist,
breaks = 50,
freq = FALSE,
col = "dodgerblue",
border = NA,
lwd = 2,
xlim = c(pretty(xlim)[1],
pretty(xlim)[length(pretty(xlim))]),
ylim = c(0,
maxDensityPlus),
xaxt = "n", yaxt = "n",
xlab = "", ylab = "", main = "",
axes = FALSE)
axis(side = 2,
at = pretty(density(hgTestResults@hypergeomDist)$y),
lwd = 2)
mtext(side = 2,
text = "Density",
line = 1.85)
axis(side = 1,
at = pretty(xlim),
lwd = 2)
mtext(side = 1,
text = bquote("Genes (" * .(pop_name_plot[p]) * ")"),
line = 1.85)
titleText <- list(bquote("NLR-encoding genes in" ~
.(libName) ~ "Quantile" ~ .(as.character(z)) ~
"(" * .(featRegion) * ") in" ~
.(gsub("_", " ", genomeName)) ~ .(region)),
bquote(italic("P")*" = "*
# .(as.character(round(hgTestResults@pval,
# digits = 6)))),
.(as.character(hgTestResults@pval))),
bquote("Samples (hypergeometric distribution) = "*.(prettyNum(length(hgTestResults@hypergeomDist),
big.mark = ",",
trim = T))))
mtext(do.call(expression, titleText), side = 3, line = 3:1, cex = c(0.7, 1, 1))
lines(density(hgTestResults@hypergeomDist),
col = "dodgerblue3",
lwd = 1.5)
ablineclip(v = hgTestResults@expected,
y1 = 0, y2 = maxDensityPlus*.92, lwd = 2)
ablineclip(v = hgTestResults@observed,
y1 = 0, y2 = maxDensityPlus*.92, lwd = 2, col = "forestgreen")
ablineclip(v = hgTestResults@alpha0.05,
y1 = 0, y2 = maxDensityPlus*.92, lwd = 2, lty = 5, col = "red")
text(x = c(textX1,
hgTestResults@expected,
hgTestResults@observed,
hgTestResults@alpha0.05),
y = c(maxDensityPlus*.95,
maxDensityPlus,
maxDensityPlus,
maxDensityPlus*.95),
labels = c("Simulated",
"Expected",
"Observed",
expression(alpha*" = 0.05")),
col = c("dodgerblue",
"black",
"forestgreen",
"red"),
cex = 0.8)
dev.off()
}
options(scipen = 100)
# Plot bar graph summarising permutation test results
pt_list <- list()
for(z in quantileFirst:quantileLast) {
load(paste0(outDir,
"NLR_gene_representation_among_quantile", z, "_of_", quantileLast,
"_by_", libName, "_of_genes_in_",
genomeName, "_", region, "_hypergeomTestRes_", pop_name[p], ".RData"))
pt_list <- c(pt_list, hgTestResults)
}
bargraph_df <- data.frame(Quantile = paste0("Quantile ", quantileFirst:quantileLast),
log2ObsExp = sapply(seq_along(pt_list), function(x) { pt_list[[x]]@log2obsexp }),
log2alpha0.05 = sapply(seq_along(pt_list), function(x) { pt_list[[x]]@log2alpha }))
bargraph_df$Quantile <- factor(bargraph_df$Quantile,
levels = paste0("Quantile ", quantileFirst:quantileLast))
bp <- ggplot(data = bargraph_df,
mapping = aes(x = Quantile,
y = log2ObsExp,
fill = " ")) +
geom_bar(stat = "identity",
position = position_dodge()) +
scale_fill_manual(name = "",
values = c("dodgerblue3"),
labels = " ") +
geom_point(mapping = aes(x = Quantile,
y = log2alpha0.05),
position = position_dodge(0.9),
shape = "-", colour = "grey80", size = 20) +
labs(y = bquote("Log"[2]*"(observed/expected) genes in quantile")) +
# scale_y_continuous(limits = c(-1.5, 1.5)) +
scale_x_discrete(position = "top") +
guides(fill = guide_legend(direction = "horizontal",
label.position = "top",
label.theme = element_text(size = 20, hjust = 0, vjust = 0.5, angle = 90),
nrow = 1,
byrow = TRUE)) +
theme_bw() +
theme(axis.line.y = element_line(size = 1, colour = "black"),
axis.ticks.y = element_line(size = 1, colour = "black"),
axis.text.y = element_text(size = 20, colour = "black", hjust = 0.5, vjust = 0.5, angle = 90),
axis.title.y = element_text(size = 20, colour = "black"),
axis.ticks.x = element_blank(),
axis.text.x = element_text(size = 20, colour = "black", hjust = 0, vjust = 0.5, angle = 90),
axis.title.x = element_blank(),
panel.grid = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
legend.position = "none",
#legend.position = c(0.05, 0.30),
legend.background = element_rect(fill = "transparent"),
legend.key = element_rect(colour = "transparent",
fill = "transparent"),
plot.margin = unit(c(5.5, 5.5, 10.5, 5.5), "pt"),
plot.title = element_text(size = 16, colour = "black", hjust = 0.5)) +
ggtitle(bquote("NLR-encoding genes in" ~
.(libName) ~ "quantiles" ~
"(" * .(pop_name_plot[p]) * ") in" ~
.(gsub("_", " ", genomeName)) ~ .(region) ~
"(" * .(prettyNum(samplesNum,
big.mark = ",",
trim = T)) ~ " samples)"))
ggsave(paste0(plotDir,
"bargraph_NLR_gene_representation_among_", quantileLast,
"quantiles_by_", libName, "_of_genes_in_",
genomeName, "_", region, "_hypergeomTestRes_", pop_name[p], ".pdf"),
plot = bp,
height = 8, width = 12)
}
|
#' Create a pp module which converts numerical features to principal components.
#'
#' @param cols A character vector. Specify the names of features to convert.
#' @param threshold A numeric value in [0, 1).
#' @param desc (optional) Description of the module.
pp_pca <- function(cols, threshold = .8,
desc = NULL, data = NULL, standby = TRUE) {
if (standby) {
return (function(data) pp_pca(cols, threshold, desc, data, standby = FALSE))
}
prcomp_fitted <-
data %>%
select(cols) %>%
na.omit() %>%
prcomp()
pve <- prcomp_fitted$sdev^2 %>% (function(x) {x / sum(x)})
n_pc <- first(which(cumsum(pve) > threshold))
loading <- prcomp_fitted$rotation[, 1:n_pc]
pp(
cols = cols,
loading = loading,
desc = desc,
subclass = "pp_pca"
)
}
predict.pp_pca <- function(object, data) {
cols <- object$cols
x_origin <-
data %>%
select(cols) %>%
as.matrix()
x_pc <- as_tibble(x_origin %*% object$loading)
data %>%
select(-cols) %>%
bind_cols(x_pc)
}
| /R/pp_pca.R | no_license | jeongnna/dps | R | false | false | 1,042 | r | #' Create a pp module which converts numerical features to principal components.
#'
#' @param cols A character vector. Specify the names of features to convert.
#' @param threshold A numeric value in [0, 1).
#' @param desc (optional) Description of the module.
pp_pca <- function(cols, threshold = .8,
desc = NULL, data = NULL, standby = TRUE) {
if (standby) {
return (function(data) pp_pca(cols, threshold, desc, data, standby = FALSE))
}
prcomp_fitted <-
data %>%
select(cols) %>%
na.omit() %>%
prcomp()
pve <- prcomp_fitted$sdev^2 %>% (function(x) {x / sum(x)})
n_pc <- first(which(cumsum(pve) > threshold))
loading <- prcomp_fitted$rotation[, 1:n_pc]
pp(
cols = cols,
loading = loading,
desc = desc,
subclass = "pp_pca"
)
}
predict.pp_pca <- function(object, data) {
cols <- object$cols
x_origin <-
data %>%
select(cols) %>%
as.matrix()
x_pc <- as_tibble(x_origin %*% object$loading)
data %>%
select(-cols) %>%
bind_cols(x_pc)
}
|
ggplot(df, aes(as.factor(Age_Bin), Pop_Percentage, fill = Gender)) +
geom_bar(stat = "identity") +
scale_y_continuous(breaks=seq(-100,100,5),labels=abs(seq(-100,100,5)),limits = (c(minx,maxx))) +
scale_x_discrete(labels = paste0(df$Age_Bin, "-", df$Age_Bin + 4)) +
coord_flip() +
ylab("Population (%)") +
xlab("Age") +
theme_tufte() +
scale_fill_economist() +
ggtitle(str_title_case(paste0(input$indicator," cases - ", input$year))) +
theme(plot.title = element_text(hjust = 0.5))
dcd | /css/ggplot-orig.R | no_license | kev-ho/Upskill-The-Essential-Web-Developer-Course | R | false | false | 551 | r | ggplot(df, aes(as.factor(Age_Bin), Pop_Percentage, fill = Gender)) +
geom_bar(stat = "identity") +
scale_y_continuous(breaks=seq(-100,100,5),labels=abs(seq(-100,100,5)),limits = (c(minx,maxx))) +
scale_x_discrete(labels = paste0(df$Age_Bin, "-", df$Age_Bin + 4)) +
coord_flip() +
ylab("Population (%)") +
xlab("Age") +
theme_tufte() +
scale_fill_economist() +
ggtitle(str_title_case(paste0(input$indicator," cases - ", input$year))) +
theme(plot.title = element_text(hjust = 0.5))
dcd |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{t11}
\alias{t11}
\title{97130187.t11}
\format{A data frame with 401 rows and 6 variables:
\describe{
\item{depth}{depth}
\item{temper}{temperature}
\item{salinity}{salinity}
\item{oxygen}{oxygen}
\item{oxygen.sat}{oxygen saturation}
\item{density}{Density}
...
}}
\source{
Supplied by R Thomson
}
\usage{
t11
}
\description{
Data supplied with original FORTRAN code
}
\keyword{datasets}
| /man/t11.Rd | no_license | boshek/limnotools | R | false | true | 507 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{t11}
\alias{t11}
\title{97130187.t11}
\format{A data frame with 401 rows and 6 variables:
\describe{
\item{depth}{depth}
\item{temper}{temperature}
\item{salinity}{salinity}
\item{oxygen}{oxygen}
\item{oxygen.sat}{oxygen saturation}
\item{density}{Density}
...
}}
\source{
Supplied by R Thomson
}
\usage{
t11
}
\description{
Data supplied with original FORTRAN code
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tt_afun_utils.R
\name{in_rows}
\alias{in_rows}
\title{Create multiple rows in analysis or summary functions}
\usage{
in_rows(
...,
.list = NULL,
.names = NULL,
.labels = NULL,
.formats = NULL,
.indent_mods = NULL,
.cell_footnotes = list(NULL),
.row_footnotes = list(NULL)
)
}
\arguments{
\item{...}{single row defining expressions}
\item{.list}{list. list cell content, usually \code{rcells}, the \code{.list} is concatenated to \code{...}}
\item{.names}{character or NULL. Names of the returned list/structure.}
\item{.labels}{character or NULL. labels for the defined rows}
\item{.formats}{character or NULL. Formats for the values}
\item{.indent_mods}{integer or NULL. Indent modificatons for the defined rows.}
\item{.cell_footnotes}{list. Referential footnote messages to be associated by name with \emph{cells}}
\item{.row_footnotes}{list. Referential footnotes messages to be associated by name with \emph{rows}}
}
\value{
an \code{RowsVerticalSection} object (or \code{NULL}). The details of this object should be considered an internal implementation detail.
}
\description{
define the cells that get placed into multiple rows in \code{afun}
}
\note{
currently the \code{.name} argument is not used
}
\examples{
in_rows(1, 2, 3, .names = c("a", "b", "c"))
in_rows(1, 2, 3, .labels = c("a", "b", "c"))
in_rows(1, 2, 3, .names = c("a", "b", "c"), .labels = c("AAA", "BBB", "CCC"))
in_rows(.list = list(a = 1, b = 2, c = 3))
in_rows(1, 2, .list = list(3), .names = c("a", "b", "c"))
basic_table() \%>\%
split_cols_by("ARM") \%>\%
analyze("AGE", afun = function(x) {
in_rows(
"Mean (sd)" = rcell(c(mean(x), sd(x)), format = "xx.xx (xx.xx)"),
"Range" = rcell(range(x), format = "xx.xx - xx.xx")
)
}) \%>\%
build_table(ex_adsl)
}
\seealso{
\code{analyze}
}
| /man/in_rows.Rd | permissive | jcheng5/rtables | R | false | true | 1,894 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tt_afun_utils.R
\name{in_rows}
\alias{in_rows}
\title{Create multiple rows in analysis or summary functions}
\usage{
in_rows(
...,
.list = NULL,
.names = NULL,
.labels = NULL,
.formats = NULL,
.indent_mods = NULL,
.cell_footnotes = list(NULL),
.row_footnotes = list(NULL)
)
}
\arguments{
\item{...}{single row defining expressions}
\item{.list}{list. list cell content, usually \code{rcells}, the \code{.list} is concatenated to \code{...}}
\item{.names}{character or NULL. Names of the returned list/structure.}
\item{.labels}{character or NULL. labels for the defined rows}
\item{.formats}{character or NULL. Formats for the values}
\item{.indent_mods}{integer or NULL. Indent modificatons for the defined rows.}
\item{.cell_footnotes}{list. Referential footnote messages to be associated by name with \emph{cells}}
\item{.row_footnotes}{list. Referential footnotes messages to be associated by name with \emph{rows}}
}
\value{
an \code{RowsVerticalSection} object (or \code{NULL}). The details of this object should be considered an internal implementation detail.
}
\description{
define the cells that get placed into multiple rows in \code{afun}
}
\note{
currently the \code{.name} argument is not used
}
\examples{
in_rows(1, 2, 3, .names = c("a", "b", "c"))
in_rows(1, 2, 3, .labels = c("a", "b", "c"))
in_rows(1, 2, 3, .names = c("a", "b", "c"), .labels = c("AAA", "BBB", "CCC"))
in_rows(.list = list(a = 1, b = 2, c = 3))
in_rows(1, 2, .list = list(3), .names = c("a", "b", "c"))
basic_table() \%>\%
split_cols_by("ARM") \%>\%
analyze("AGE", afun = function(x) {
in_rows(
"Mean (sd)" = rcell(c(mean(x), sd(x)), format = "xx.xx (xx.xx)"),
"Range" = rcell(range(x), format = "xx.xx - xx.xx")
)
}) \%>\%
build_table(ex_adsl)
}
\seealso{
\code{analyze}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xGridBuild.R
\name{xGridBuild}
\alias{xGridBuild}
\title{xGridBuild}
\usage{
xGridBuild(xmin, xmax, ymin, ymax, cellsize, CELLCENTER = FALSE)
}
\arguments{
\item{xmin}{xmin}
\item{xmax}{xmax}
\item{ymin}{ymin}
\item{ymax}{ymax}
\item{cellsize}{cellsize}
\item{CELLCENTER}{boolean}
}
\description{
xGridBuild
}
\examples{
\dontrun{
xGridBuild(...)
}
}
\keyword{spatialManip}
| /man/xGridBuild.Rd | no_license | jeanmarie-eu/spatialManip | R | false | true | 458 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xGridBuild.R
\name{xGridBuild}
\alias{xGridBuild}
\title{xGridBuild}
\usage{
xGridBuild(xmin, xmax, ymin, ymax, cellsize, CELLCENTER = FALSE)
}
\arguments{
\item{xmin}{xmin}
\item{xmax}{xmax}
\item{ymin}{ymin}
\item{ymax}{ymax}
\item{cellsize}{cellsize}
\item{CELLCENTER}{boolean}
}
\description{
xGridBuild
}
\examples{
\dontrun{
xGridBuild(...)
}
}
\keyword{spatialManip}
|
###########################################################################/**
# @set "class=Class"
#
# @RdocMethod getRdDeclaration
#
# @title "Gets the class declaraction in Rd format"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{...}{Not used.}
# }
#
# \value{
# Returns a @character string.
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @keyword documentation
#*/###########################################################################
setMethodS3("getRdDeclaration", "Class", function(this, ...) {
s <- "public"; # visibility(this);
if (isAbstract(this))
s <- paste(s, "abstract");
if (isStatic(this))
s <- paste(s, "static");
if (inherits(this, "Class"))
s <- paste(s, "class")
else
throw(getName(this), " is neither a class nor an interface.");
s <- paste(s, " \\bold{", getName(this), "}\\cr\n", sep="");
links <- getSuperclasses(this);
if (length(links) > 0) {
name <- links[1];
link <- name;
cls <- .getClassByName(name, mustExist=FALSE);
if (inherits(cls, "Class")) {
pkg <- getPackage(cls);
if (is.null(pkg))
link <- paste("\\link{", link ,"}", sep="")
else
link <- paste("\\link[", pkg, "]{", link ,"}", sep="");
if (isAbstract(cls))
link <- paste("\\emph{", link, "}", sep="");
}
paste("\\code{", link ,"}", sep="");
s <- paste(s, "extends ", link, "\\cr\n", sep="");
}
s;
}, private=TRUE);
###########################################################################/**
# @RdocMethod getRdMethods
#
# @title "Gets the methods of a class in Rd format"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{visibilities}{A @character string specifying what types of methods
# to return.}
# \item{...}{Not used.}
# }
#
# \value{
# Returns a @character string.
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @keyword documentation
#*/###########################################################################
setMethodS3("getRdMethods", "Class", function(class, visibilities=c("private", "protected", "public"), ...) {
className <- getName(class);
methods <- getMethods(class, private=TRUE); # Excludes empty classes!
methods <- methods[[className]];
methods <- names(methods);
src <- "\\bold{Methods:}\\cr\n";
tmpsrc <- "\\tabular{rll}{\n";
count <- 0;
for (method in methods) {
fcnName <- paste(method, className, sep=".");
fcn <- .getS3Method(fcnName);
modifiers <- attr(fcn, "modifiers");
if (Rdoc$isVisible(modifiers, visibilities)) {
helpName <- Rdoc$createName(getName(class), method, escape=TRUE);
label <- method;
title <- Rdoc$getRdTitle(class, method);
package <- attr(title, "package");
if (is.null(package))
package <- Rdoc$package;
# Is there a specific help document for this method or not?
if (!is.null(title)) {
link <- paste("\\link[", package, ":", helpName, "]{", label, "}", sep="");
} else {
link <- label;
}
item <- paste(" \\tab \\code{", link, "} \\tab ", sep="");
# Create the title
if (!is.null(title)) {
if (title != "")
item <- paste(item, title, ".\\cr", sep="");
} else {
item <- paste(item, " -\\cr", sep="");
}
tmpsrc <- paste(tmpsrc, item, "\n", sep="");
count <- count + 1;
} # if(isVisible(...))
}
tmpsrc <- paste(tmpsrc, "}\n", sep=""); # end of \tabular{rll}
if (count == 0)
src <- paste(src, "\\emph{No methods defined}.\n", sep="")
else
src <- paste(src, tmpsrc, sep="");
src;
}, private=TRUE);
###########################################################################/**
# @RdocMethod getRdHierarchy
#
# @title "Gets the class hierarchy in Rd format"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{...}{Not used.}
# }
#
# \value{
# Returns a @character string.
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @keyword documentation
#*/###########################################################################
setMethodS3("getRdHierarchy", "Class", function(this, ...) {
package <- getPackage(this);
s <- paste("Package: ", package, "\\cr\n");
what <- if (inherits(this, "Class")) "Class" else "Interface";
s <- paste(s, "\\bold{", what, " ", getName(this), "}\\cr\n\n", sep="");
indent <- "";
for (extend in rev(getSuperclasses(this))) {
link <- sapply(extend, FUN=function(name) {
# isAbstract <- FALSE;
link <- name;
cls <- .getClassByName(name, mustExist=FALSE);
if (inherits(cls, "Class")) {
pkg <- getPackage(cls);
if (is.null(pkg)) {
link <- paste("\\link{", link ,"}", sep="")
} else {
link <- paste("\\link[", pkg, "]{", link ,"}", sep="");
}
# if (isAbstract(cls)) {
# link <- paste("\\emph{", link, "}", sep="");
# isAbstract <- TRUE;
}
paste("\\code{", link ,"}", sep="");
});
if (indent == "") {
s <- paste(s, link, "\\cr\n", sep="");
indent <- "~~";
} else {
s <- paste(s, "\\code{", indent, "+--}", link, "\\cr\n", sep="");
indent <- paste(indent, "~~~~~", sep="");
}
s <- paste(s, "\\code{", indent, "|}\\cr\n", sep="");
}
link <- paste("\\code{", getName(this), "}", sep="");
if (isAbstract(this))
link <- paste("\\emph{", link, "}", sep="");
s <- paste(s, "\\code{", indent, "+--}", link, "\\cr\n\n", sep="");
s;
}, private=TRUE);
#########################################################################
# HISTORY:
# 2014-03-30
# o BUG FIX: Now getRdDeclaration(), getRdHierarchy() and getRdMethods()
# for Class handles also non-exported methods and Classes.
# 2006-05-29
# o Added support for visibility of getRdMethods().
# 2005-02-15
# o Added arguments '...' in order to match any generic functions.
# 2004-10-22
# o BUG FIX: getRdMethods() returned empty \tabular{rll}{} if no methods
# exist, but this gives an error in R CMD Rdconv.
# 2004-10-18
# o BUG FIX: Invalid filenames and link names are now escaped.
# 2004-10-17
# o Added Rdoc comments.
#########################################################################
| /R.oo/R/Class.misc.R | no_license | ingted/R-Examples | R | false | false | 6,214 | r | ###########################################################################/**
# @set "class=Class"
#
# @RdocMethod getRdDeclaration
#
# @title "Gets the class declaraction in Rd format"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{...}{Not used.}
# }
#
# \value{
# Returns a @character string.
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @keyword documentation
#*/###########################################################################
setMethodS3("getRdDeclaration", "Class", function(this, ...) {
s <- "public"; # visibility(this);
if (isAbstract(this))
s <- paste(s, "abstract");
if (isStatic(this))
s <- paste(s, "static");
if (inherits(this, "Class"))
s <- paste(s, "class")
else
throw(getName(this), " is neither a class nor an interface.");
s <- paste(s, " \\bold{", getName(this), "}\\cr\n", sep="");
links <- getSuperclasses(this);
if (length(links) > 0) {
name <- links[1];
link <- name;
cls <- .getClassByName(name, mustExist=FALSE);
if (inherits(cls, "Class")) {
pkg <- getPackage(cls);
if (is.null(pkg))
link <- paste("\\link{", link ,"}", sep="")
else
link <- paste("\\link[", pkg, "]{", link ,"}", sep="");
if (isAbstract(cls))
link <- paste("\\emph{", link, "}", sep="");
}
paste("\\code{", link ,"}", sep="");
s <- paste(s, "extends ", link, "\\cr\n", sep="");
}
s;
}, private=TRUE);
###########################################################################/**
# @RdocMethod getRdMethods
#
# @title "Gets the methods of a class in Rd format"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{visibilities}{A @character string specifying what types of methods
# to return.}
# \item{...}{Not used.}
# }
#
# \value{
# Returns a @character string.
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @keyword documentation
#*/###########################################################################
setMethodS3("getRdMethods", "Class", function(class, visibilities=c("private", "protected", "public"), ...) {
className <- getName(class);
methods <- getMethods(class, private=TRUE); # Excludes empty classes!
methods <- methods[[className]];
methods <- names(methods);
src <- "\\bold{Methods:}\\cr\n";
tmpsrc <- "\\tabular{rll}{\n";
count <- 0;
for (method in methods) {
fcnName <- paste(method, className, sep=".");
fcn <- .getS3Method(fcnName);
modifiers <- attr(fcn, "modifiers");
if (Rdoc$isVisible(modifiers, visibilities)) {
helpName <- Rdoc$createName(getName(class), method, escape=TRUE);
label <- method;
title <- Rdoc$getRdTitle(class, method);
package <- attr(title, "package");
if (is.null(package))
package <- Rdoc$package;
# Is there a specific help document for this method or not?
if (!is.null(title)) {
link <- paste("\\link[", package, ":", helpName, "]{", label, "}", sep="");
} else {
link <- label;
}
item <- paste(" \\tab \\code{", link, "} \\tab ", sep="");
# Create the title
if (!is.null(title)) {
if (title != "")
item <- paste(item, title, ".\\cr", sep="");
} else {
item <- paste(item, " -\\cr", sep="");
}
tmpsrc <- paste(tmpsrc, item, "\n", sep="");
count <- count + 1;
} # if(isVisible(...))
}
tmpsrc <- paste(tmpsrc, "}\n", sep=""); # end of \tabular{rll}
if (count == 0)
src <- paste(src, "\\emph{No methods defined}.\n", sep="")
else
src <- paste(src, tmpsrc, sep="");
src;
}, private=TRUE);
###########################################################################/**
# @RdocMethod getRdHierarchy
#
# @title "Gets the class hierarchy in Rd format"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{...}{Not used.}
# }
#
# \value{
# Returns a @character string.
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @keyword documentation
#*/###########################################################################
setMethodS3("getRdHierarchy", "Class", function(this, ...) {
package <- getPackage(this);
s <- paste("Package: ", package, "\\cr\n");
what <- if (inherits(this, "Class")) "Class" else "Interface";
s <- paste(s, "\\bold{", what, " ", getName(this), "}\\cr\n\n", sep="");
indent <- "";
for (extend in rev(getSuperclasses(this))) {
link <- sapply(extend, FUN=function(name) {
# isAbstract <- FALSE;
link <- name;
cls <- .getClassByName(name, mustExist=FALSE);
if (inherits(cls, "Class")) {
pkg <- getPackage(cls);
if (is.null(pkg)) {
link <- paste("\\link{", link ,"}", sep="")
} else {
link <- paste("\\link[", pkg, "]{", link ,"}", sep="");
}
# if (isAbstract(cls)) {
# link <- paste("\\emph{", link, "}", sep="");
# isAbstract <- TRUE;
}
paste("\\code{", link ,"}", sep="");
});
if (indent == "") {
s <- paste(s, link, "\\cr\n", sep="");
indent <- "~~";
} else {
s <- paste(s, "\\code{", indent, "+--}", link, "\\cr\n", sep="");
indent <- paste(indent, "~~~~~", sep="");
}
s <- paste(s, "\\code{", indent, "|}\\cr\n", sep="");
}
link <- paste("\\code{", getName(this), "}", sep="");
if (isAbstract(this))
link <- paste("\\emph{", link, "}", sep="");
s <- paste(s, "\\code{", indent, "+--}", link, "\\cr\n\n", sep="");
s;
}, private=TRUE);
#########################################################################
# HISTORY:
# 2014-03-30
# o BUG FIX: Now getRdDeclaration(), getRdHierarchy() and getRdMethods()
# for Class handles also non-exported methods and Classes.
# 2006-05-29
# o Added support for visibility of getRdMethods().
# 2005-02-15
# o Added arguments '...' in order to match any generic functions.
# 2004-10-22
# o BUG FIX: getRdMethods() returned empty \tabular{rll}{} if no methods
# exist, but this gives an error in R CMD Rdconv.
# 2004-10-18
# o BUG FIX: Invalid filenames and link names are now escaped.
# 2004-10-17
# o Added Rdoc comments.
#########################################################################
|
rows <- nrow(d)
fold <- floor(rows/10)
folds <- c(1,fold,fold*2,fold*3,fold*4,fold*5,fold*6,fold*7,fold*8,fold*9)
for(j in 1:9){
print(j)
if( j == 1){
train <- d[folds[j+1]+1:nrow(d),]
test <- d[folds[j]:folds[j+1],]
print(paste("train ", folds[j+1]+1,"-" , nrow(d)))
print(paste("test ", folds[j],"-" , folds[j+1]))
}else{
if(j == 9){
train <- d[1:folds[j]-1,]
test <- d[folds[j]:nrow(d),]
print(paste("train ", 1,"-",folds[j]-1))
print(paste("test ", folds[j],"-" , nrow(d)))
}else {
train1 <- d[1:folds[j]-1,]
train2 <- d[folds[j+1]+1:nrow(d),]
size <- nrow(d) - folds[j+1]
test <- d[folds[j]:folds[j+1],]
train <- rbind(train1, train2[1,])
for(k in 2:size){
train <- rbind(train, train2[k,])
}
print(paste("train ", 1,"-",folds[j]-1))
print(paste("test ", folds[j],"-" , folds[j+1]))
print(paste("train ", folds[j+1]+1,"-",nrow(d)))
}
}
}
| /folds.R | no_license | gigacardoso/HMMinR | R | false | false | 904 | r | rows <- nrow(d)
fold <- floor(rows/10)
folds <- c(1,fold,fold*2,fold*3,fold*4,fold*5,fold*6,fold*7,fold*8,fold*9)
for(j in 1:9){
print(j)
if( j == 1){
train <- d[folds[j+1]+1:nrow(d),]
test <- d[folds[j]:folds[j+1],]
print(paste("train ", folds[j+1]+1,"-" , nrow(d)))
print(paste("test ", folds[j],"-" , folds[j+1]))
}else{
if(j == 9){
train <- d[1:folds[j]-1,]
test <- d[folds[j]:nrow(d),]
print(paste("train ", 1,"-",folds[j]-1))
print(paste("test ", folds[j],"-" , nrow(d)))
}else {
train1 <- d[1:folds[j]-1,]
train2 <- d[folds[j+1]+1:nrow(d),]
size <- nrow(d) - folds[j+1]
test <- d[folds[j]:folds[j+1],]
train <- rbind(train1, train2[1,])
for(k in 2:size){
train <- rbind(train, train2[k,])
}
print(paste("train ", 1,"-",folds[j]-1))
print(paste("test ", folds[j],"-" , folds[j+1]))
print(paste("train ", folds[j+1]+1,"-",nrow(d)))
}
}
}
|
# required_data(c("STG_TRACK_PIECE", "STG_TRACK"))
# input_track <- 2
create_track_table_wide <- function(input_track, STG_TRACK_PIECE, STG_TRACK) {
currtrack <- STG_TRACK[input_track == TRACK_ID, TRACK_PIECE_VECTOR]
splitted_track <- data.table(TRACK_PIECE_ID = strsplit(currtrack, split = ",")[[1]])
splitted_track[, order := seq_len(.N)]
sscols <- STG_TRACK_PIECE[, .(TRACK_PIECE_ID, LANES, PIECE_ATTRIBUTE, START, FINISH)]
#Game_slot_id Lane_1 Lane_2 Lane_3 Attribue Finish
joinaa <- sscols[splitted_track, on = "TRACK_PIECE_ID"]
joinaa[, ':=' (GAME_SLOT_ID = seq_len(.N),
order = NULL,
dcast_value = 0)]
max_lanes <- joinaa[, max(LANES)]
laneCols <- paste0("Lane", 1:max_lanes)
row_rep <-joinaa[rep(1:.N,LANES)][,Indx:=1:.N,by=GAME_SLOT_ID]
kaadettu <- dcast.data.table(row_rep, TRACK_PIECE_ID + PIECE_ATTRIBUTE + START + FINISH + GAME_SLOT_ID ~ Indx, value.var = "dcast_value")
colnames(kaadettu)[(length(kaadettu) - max_lanes + 1):length(kaadettu)] <- laneCols
kaadettu[, ':=' (TRACK_PIECE_ID = NULL)]
sort <- kaadettu[order(GAME_SLOT_ID)]
return(sort)
}
| /scripts/solution/create_track_table_wide.R | no_license | Laurigit/flAImme | R | false | false | 1,131 | r |
# required_data(c("STG_TRACK_PIECE", "STG_TRACK"))
# input_track <- 2
create_track_table_wide <- function(input_track, STG_TRACK_PIECE, STG_TRACK) {
currtrack <- STG_TRACK[input_track == TRACK_ID, TRACK_PIECE_VECTOR]
splitted_track <- data.table(TRACK_PIECE_ID = strsplit(currtrack, split = ",")[[1]])
splitted_track[, order := seq_len(.N)]
sscols <- STG_TRACK_PIECE[, .(TRACK_PIECE_ID, LANES, PIECE_ATTRIBUTE, START, FINISH)]
#Game_slot_id Lane_1 Lane_2 Lane_3 Attribue Finish
joinaa <- sscols[splitted_track, on = "TRACK_PIECE_ID"]
joinaa[, ':=' (GAME_SLOT_ID = seq_len(.N),
order = NULL,
dcast_value = 0)]
max_lanes <- joinaa[, max(LANES)]
laneCols <- paste0("Lane", 1:max_lanes)
row_rep <-joinaa[rep(1:.N,LANES)][,Indx:=1:.N,by=GAME_SLOT_ID]
kaadettu <- dcast.data.table(row_rep, TRACK_PIECE_ID + PIECE_ATTRIBUTE + START + FINISH + GAME_SLOT_ID ~ Indx, value.var = "dcast_value")
colnames(kaadettu)[(length(kaadettu) - max_lanes + 1):length(kaadettu)] <- laneCols
kaadettu[, ':=' (TRACK_PIECE_ID = NULL)]
sort <- kaadettu[order(GAME_SLOT_ID)]
return(sort)
}
|
library(pracma)
library(xlsx)
library(sf)
load(file = "f:/UCDAVIS/C2VSIM_FG_OR/C2Vsim_FG_v2/ERS_proj/DGW2015.Rdata")
script_path = "F:/UCDAVIS/C2VSIM_FG_OR/C2Vsim_FG_v2/ERS_proj/"
c2vsim_path <- "f:/UCDAVIS/C2VSIM_FG_OR/C2Vsim_FG_v2/C2VSimFG-BETA_PublicRelease/"
# Read the Kern subregions
kern <- read_sf(paste0(script_path, "gis_data/Kern_only.shp"))
kern_geom <- st_geometry(kern)
# Read the mesh elements and node coordinates
NlinesSkip <- 90
nNodes <- 30179
XY <- read.table(file = paste0(c2vsim_path, "Preprocessor/C2VSimFG_Nodes.dat"),
header = FALSE, sep = "", skip = NlinesSkip, nrows = nNodes,
quote = "",fill = TRUE,
col.names = c("ID", "X", "Y"))
NlinesSkip <- 142
nNodes <- 32537
MSH <- read.table(file = paste0(c2vsim_path, "Preprocessor/C2VSimFG_Elements.dat"),
header = FALSE, sep = "", skip = NlinesSkip, nrows = nNodes,
quote = "",fill = TRUE,
col.names = c("ID", "nd1", "nd2", "nd3", "nd4", "S"))
#calculate element barycenters
quad_el <- which(MSH$nd4 != 0, arr.ind = FALSE)
tri_el <- which(MSH$nd4 == 0, arr.ind = FALSE)
cc <- matrix(data = 0, nrow = dim(MSH)[1], ncol = 2)
cc[quad_el,1] <- (XY$X[MSH$nd1[quad_el]] + XY$X[MSH$nd2[quad_el]] + XY$X[MSH$nd3[quad_el]] + XY$X[MSH$nd4[quad_el]])/4
cc[quad_el,2] <- (XY$Y[MSH$nd1[quad_el]] + XY$Y[MSH$nd2[quad_el]] + XY$Y[MSH$nd3[quad_el]] + XY$Y[MSH$nd4[quad_el]])/4
cc[tri_el,1] <- (XY$X[MSH$nd1[tri_el]] + XY$X[MSH$nd2[tri_el]] + XY$X[MSH$nd3[tri_el]])/3
cc[tri_el,2] <- (XY$Y[MSH$nd1[tri_el]] + XY$Y[MSH$nd2[tri_el]] + XY$Y[MSH$nd3[tri_el]])/3
# for each subregion find the mesh element ids
kern_elems <- vector("list", dim(kern)[1])
for (i in 1:dim(kern)[1]) {
kern_elems[[i]] <- which(inpolygon(cc[,1], cc[,2], kern_geom[[i]][[1]][[1]][,1], kern_geom[[i]][[1]][[1]][,2]) == TRUE, arr.ind = FALSE)
}
AvDGW <- vector(mode = "numeric",length = dim(kern)[1])
for (i in 1:dim(kern)[1]) {
el_ids <- kern_elems[[i]]
subRegNodes <- MSH[el_ids,2:5]
tmpnds <- as.vector(t(subRegNodes))
subRegNodes <- unique(tmpnds)
i_zero <- which(subRegNodes == 0)
if (length(i_zero) > 0)
subRegNodes <- subRegNodes[-i_zero]
AvDGW[i] <- mean(DGW2015[subRegNodes])
}
dgw_df <- data.frame(AvDGW, row.names = kern$KernDistri)
names(dgw_df)[names(dgw_df) == "AvDGW"] <- "DGW2015"
write.xlsx(x = dgw_df, file = paste0(script_path,"Depth2GW_2015.xlsx"), sheetName = "Kern", append = TRUE)
| /ERS_proj/KernDGW.R | no_license | giorgk/C2Vsim_FG_v2 | R | false | false | 2,467 | r | library(pracma)
library(xlsx)
library(sf)
load(file = "f:/UCDAVIS/C2VSIM_FG_OR/C2Vsim_FG_v2/ERS_proj/DGW2015.Rdata")
script_path = "F:/UCDAVIS/C2VSIM_FG_OR/C2Vsim_FG_v2/ERS_proj/"
c2vsim_path <- "f:/UCDAVIS/C2VSIM_FG_OR/C2Vsim_FG_v2/C2VSimFG-BETA_PublicRelease/"
# Read the Kern subregions
kern <- read_sf(paste0(script_path, "gis_data/Kern_only.shp"))
kern_geom <- st_geometry(kern)
# Read the mesh elements and node coordinates
NlinesSkip <- 90
nNodes <- 30179
XY <- read.table(file = paste0(c2vsim_path, "Preprocessor/C2VSimFG_Nodes.dat"),
header = FALSE, sep = "", skip = NlinesSkip, nrows = nNodes,
quote = "",fill = TRUE,
col.names = c("ID", "X", "Y"))
NlinesSkip <- 142
nNodes <- 32537
MSH <- read.table(file = paste0(c2vsim_path, "Preprocessor/C2VSimFG_Elements.dat"),
header = FALSE, sep = "", skip = NlinesSkip, nrows = nNodes,
quote = "",fill = TRUE,
col.names = c("ID", "nd1", "nd2", "nd3", "nd4", "S"))
#calculate element barycenters
quad_el <- which(MSH$nd4 != 0, arr.ind = FALSE)
tri_el <- which(MSH$nd4 == 0, arr.ind = FALSE)
cc <- matrix(data = 0, nrow = dim(MSH)[1], ncol = 2)
cc[quad_el,1] <- (XY$X[MSH$nd1[quad_el]] + XY$X[MSH$nd2[quad_el]] + XY$X[MSH$nd3[quad_el]] + XY$X[MSH$nd4[quad_el]])/4
cc[quad_el,2] <- (XY$Y[MSH$nd1[quad_el]] + XY$Y[MSH$nd2[quad_el]] + XY$Y[MSH$nd3[quad_el]] + XY$Y[MSH$nd4[quad_el]])/4
cc[tri_el,1] <- (XY$X[MSH$nd1[tri_el]] + XY$X[MSH$nd2[tri_el]] + XY$X[MSH$nd3[tri_el]])/3
cc[tri_el,2] <- (XY$Y[MSH$nd1[tri_el]] + XY$Y[MSH$nd2[tri_el]] + XY$Y[MSH$nd3[tri_el]])/3
# for each subregion find the mesh element ids
kern_elems <- vector("list", dim(kern)[1])
for (i in 1:dim(kern)[1]) {
kern_elems[[i]] <- which(inpolygon(cc[,1], cc[,2], kern_geom[[i]][[1]][[1]][,1], kern_geom[[i]][[1]][[1]][,2]) == TRUE, arr.ind = FALSE)
}
AvDGW <- vector(mode = "numeric",length = dim(kern)[1])
for (i in 1:dim(kern)[1]) {
el_ids <- kern_elems[[i]]
subRegNodes <- MSH[el_ids,2:5]
tmpnds <- as.vector(t(subRegNodes))
subRegNodes <- unique(tmpnds)
i_zero <- which(subRegNodes == 0)
if (length(i_zero) > 0)
subRegNodes <- subRegNodes[-i_zero]
AvDGW[i] <- mean(DGW2015[subRegNodes])
}
dgw_df <- data.frame(AvDGW, row.names = kern$KernDistri)
names(dgw_df)[names(dgw_df) == "AvDGW"] <- "DGW2015"
write.xlsx(x = dgw_df, file = paste0(script_path,"Depth2GW_2015.xlsx"), sheetName = "Kern", append = TRUE)
|
library(ggplot2)
cosmicColumnName <- c("Ap", "Dst", "Kp", "Lyman", "Sunspot", "Solar", "Proton10", "Proton30", "Proton60", "Temperature", "Density", "Speed", "PCN", "PCS")
nameDirs <- c("Ap_index", "Dst_index", "Kp_index", "Lyman_alpha_index", "R_sunspot_number", "Solar_index_F10.7", "Proton_flux_over_10_MeV", "Proton_flux_over_30_MeV", "Proton_flux_over_60_MeV", "Solar_wind_temperature", "Solar_wind_density", "Solar_wind_speed", "Polar_cap_north", "Polar_cap_south")
cosmicName <- c("Ap index", "Dst index", "Kp index", "Lyman alpha solar index", "R sunspot number", "Solar index_F10.7", "Proton flux over 10 MeV", "Proton flux over 30 MeV", "Proton flux over 60 MeV", "Solar wind temperature", "Solar wind density", "Solar wind speed", "Polar cap north (Thule)", "Polar cap south (Vostok)")
cosmicCount <- 1
for(nameDir in nameDirs)
{
setwd(nameDir)
nameFile <- c('porovnanie',cosmicColumnName[cosmicCount],'.csv')
nameFile <- paste(nameFile, collapse='')
table <- read.csv(nameFile,sep=';')
changeTable <- table[order(abs(table$Correlation),decreasing=TRUE),]
write.table(changeTable,file=nameFile,sep=';',append=FALSE,row.names=FALSE,col.names=TRUE)
nameFile <- c('porovnanie',cosmicColumnName[cosmicCount],'.csv')
nameFile <- paste(nameFile, collapse='')
open <- read.csv(nameFile,sep=';')
Traffic <- open$Traffic
Correlation <- open$Correlation
table <- data.frame(Correlation,Traffic)
ggplot(data=table,aes(x=reorder(Traffic,abs(-Correlation)),y=Correlation, fill=Traffic)) +
geom_bar(stat='identity',position='dodge') +
geom_text(aes(label=sprintf("%.2f",Correlation),y=Correlation),position = position_dodge(width=1),size=3) +
coord_flip() +
xlab('Typ doprávnej nehodovostí') +
labs(title=paste(c(cosmicName[cosmicCount],", 2010-2018"),collapse="")) +
theme(legend.position="none")
ggsave(paste(c('porovnanieTypovNehodovosti.jpg'),collapse=""),width=14,quality=50)
if(file.exists('Rplots.pdf'))
file.remove('Rplots.pdf')
setwd('..')
cosmicCount <- cosmicCount + 1
}
| /spracovane_data/2010-2018/9-rocna_korelacia/Statistika/porovnanieTypovNehodovosti/vizualizaciaPorovnanieTypovNehodovosti.r | no_license | astronomer93/dopravna_nehodovost | R | false | false | 2,030 | r | library(ggplot2)
cosmicColumnName <- c("Ap", "Dst", "Kp", "Lyman", "Sunspot", "Solar", "Proton10", "Proton30", "Proton60", "Temperature", "Density", "Speed", "PCN", "PCS")
nameDirs <- c("Ap_index", "Dst_index", "Kp_index", "Lyman_alpha_index", "R_sunspot_number", "Solar_index_F10.7", "Proton_flux_over_10_MeV", "Proton_flux_over_30_MeV", "Proton_flux_over_60_MeV", "Solar_wind_temperature", "Solar_wind_density", "Solar_wind_speed", "Polar_cap_north", "Polar_cap_south")
cosmicName <- c("Ap index", "Dst index", "Kp index", "Lyman alpha solar index", "R sunspot number", "Solar index_F10.7", "Proton flux over 10 MeV", "Proton flux over 30 MeV", "Proton flux over 60 MeV", "Solar wind temperature", "Solar wind density", "Solar wind speed", "Polar cap north (Thule)", "Polar cap south (Vostok)")
cosmicCount <- 1
for(nameDir in nameDirs)
{
setwd(nameDir)
nameFile <- c('porovnanie',cosmicColumnName[cosmicCount],'.csv')
nameFile <- paste(nameFile, collapse='')
table <- read.csv(nameFile,sep=';')
changeTable <- table[order(abs(table$Correlation),decreasing=TRUE),]
write.table(changeTable,file=nameFile,sep=';',append=FALSE,row.names=FALSE,col.names=TRUE)
nameFile <- c('porovnanie',cosmicColumnName[cosmicCount],'.csv')
nameFile <- paste(nameFile, collapse='')
open <- read.csv(nameFile,sep=';')
Traffic <- open$Traffic
Correlation <- open$Correlation
table <- data.frame(Correlation,Traffic)
ggplot(data=table,aes(x=reorder(Traffic,abs(-Correlation)),y=Correlation, fill=Traffic)) +
geom_bar(stat='identity',position='dodge') +
geom_text(aes(label=sprintf("%.2f",Correlation),y=Correlation),position = position_dodge(width=1),size=3) +
coord_flip() +
xlab('Typ doprávnej nehodovostí') +
labs(title=paste(c(cosmicName[cosmicCount],", 2010-2018"),collapse="")) +
theme(legend.position="none")
ggsave(paste(c('porovnanieTypovNehodovosti.jpg'),collapse=""),width=14,quality=50)
if(file.exists('Rplots.pdf'))
file.remove('Rplots.pdf')
setwd('..')
cosmicCount <- cosmicCount + 1
}
|
#!/bin/env Rscript
## Utility for mass automated download of QxMD Paper Collections
## utilizing RSelenium, rvest, and good ol' fashioned parsing
## Parse arguments -------------------------------------------------------------
library(optparse)
## Help text
option_list <- list(
make_option(c("-d", "--dest"), action = "store", type = "character", default = "~/Documents/scraped/",
help = "Directory where scraped PDFs will be placed. [default: '~/Documents/scraped/']"),
make_option(c("-s", "--site"), action = "store", type = "character", default = NULL,
help = "Site where public Read by QxMD collection is held"))
parser <- OptionParser(usage = "%prog [options]", option_list = option_list)
arguments <- parse_args(parser, positional_arguments = 0)
opt <- arguments$options
## Save args
dest <- opt$dest
site <- opt$site
base <- "https://www.readbyqxmd.com"
## Error check
if (is.null(site)) {
stop("Error: specify collections site. Check your email.")
}
## Check if destination directory exists
if (!file.exists(dest)) {
system(paste0("mkdir -p ", dest))
}
## Run the pipeline ------------------------------------------------------------
## Load Libraries
library(RSelenium)
library(rvest)
library(stringr)
library(curl)
## Run first pass
source("navSite.R") ## Navigate collection; load all papers in collection
source("extractElements.R") ## Parse each paper's URL
source("grabPDFs.R") ## perform first pass downloading; sets flag = 1 if fails exist
## Run second pass in case of failed downloads
if (flag == 1) {
source("failPDFs.R") ## finds additional links; then tries again to download PDF
}
## Close all windows - user can do this..
## remDr$close()
## remDr$closeWindow()
## remDr$closeServer()
## Define Site - for testing
## site <- "https://www.readbyqxmd.com/collection/6717"
## base <- "https://www.readbyqxmd.com"
## dest <- "~/Documents/dl2"
| /collectionScraper.R | no_license | robertamezquita/collectionScraper | R | false | false | 1,921 | r | #!/bin/env Rscript
## Utility for mass automated download of QxMD Paper Collections
## utilizing RSelenium, rvest, and good ol' fashioned parsing
## Parse arguments -------------------------------------------------------------
library(optparse)
## Help text
option_list <- list(
make_option(c("-d", "--dest"), action = "store", type = "character", default = "~/Documents/scraped/",
help = "Directory where scraped PDFs will be placed. [default: '~/Documents/scraped/']"),
make_option(c("-s", "--site"), action = "store", type = "character", default = NULL,
help = "Site where public Read by QxMD collection is held"))
parser <- OptionParser(usage = "%prog [options]", option_list = option_list)
arguments <- parse_args(parser, positional_arguments = 0)
opt <- arguments$options
## Save args
dest <- opt$dest
site <- opt$site
base <- "https://www.readbyqxmd.com"
## Error check
if (is.null(site)) {
stop("Error: specify collections site. Check your email.")
}
## Check if destination directory exists
if (!file.exists(dest)) {
system(paste0("mkdir -p ", dest))
}
## Run the pipeline ------------------------------------------------------------
## Load Libraries
library(RSelenium)
library(rvest)
library(stringr)
library(curl)
## Run first pass
source("navSite.R") ## Navigate collection; load all papers in collection
source("extractElements.R") ## Parse each paper's URL
source("grabPDFs.R") ## perform first pass downloading; sets flag = 1 if fails exist
## Run second pass in case of failed downloads
if (flag == 1) {
source("failPDFs.R") ## finds additional links; then tries again to download PDF
}
## Close all windows - user can do this..
## remDr$close()
## remDr$closeWindow()
## remDr$closeServer()
## Define Site - for testing
## site <- "https://www.readbyqxmd.com/collection/6717"
## base <- "https://www.readbyqxmd.com"
## dest <- "~/Documents/dl2"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Class-BootPos.R
\docType{class}
\name{BootPos-class}
\alias{BootPos}
\alias{BootPos-class}
\title{Class for Generation of Bootstrapped Replications of a Time Series.}
\description{
\code{BootPos} is an S4 class that provides a common interface
to different algorithms that can be used for implementation of a block
bootstrap procedure in the time domain.
}
\details{
After initialization the bootstrapping can be performed by applying
\code{getPositions} to the object.
Different block bootstraps are implemented by creating a subclass together
with a \code{getPositions} method that contains the implementation of the
block resampling procedure.
Currently the following implementations are available:
\itemize{
\item \code{\link{MovingBlocks}} and \code{\link{getPositions-MovingBlocks}}.
}
}
\section{Slots}{
\describe{
\item{\code{l}}{the (expected) block length for the block bootstrap methods}
\item{\code{N}}{number of available observations to bootstrap from}
}}
\references{
Lahiri, S. N. (1999). Theoretical Comparisons of Block Bootstrap Methods.
\emph{The Annals of Statistics}, \bold{27}(1), 386--404.
}
\keyword{S4-classes}
| /man/BootPos-class.Rd | no_license | LiangCZhang/quantspec | R | false | true | 1,223 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Class-BootPos.R
\docType{class}
\name{BootPos-class}
\alias{BootPos}
\alias{BootPos-class}
\title{Class for Generation of Bootstrapped Replications of a Time Series.}
\description{
\code{BootPos} is an S4 class that provides a common interface
to different algorithms that can be used for implementation of a block
bootstrap procedure in the time domain.
}
\details{
After initialization the bootstrapping can be performed by applying
\code{getPositions} to the object.
Different block bootstraps are implemented by creating a subclass together
with a \code{getPositions} method that contains the implementation of the
block resampling procedure.
Currently the following implementations are available:
\itemize{
\item \code{\link{MovingBlocks}} and \code{\link{getPositions-MovingBlocks}}.
}
}
\section{Slots}{
\describe{
\item{\code{l}}{the (expected) block length for the block bootstrap methods}
\item{\code{N}}{number of available observations to bootstrap from}
}}
\references{
Lahiri, S. N. (1999). Theoretical Comparisons of Block Bootstrap Methods.
\emph{The Annals of Statistics}, \bold{27}(1), 386--404.
}
\keyword{S4-classes}
|
# read csv file without factors
hh_power_cons <- read.table("household_power_consumption.txt",
header = TRUE, sep=";", as.is = TRUE, na.strings="?")
# convert Date column into date:
hh_power_cons$Date2 <- as.Date(hh_power_cons$Date, "%d/%m/%Y")
# subset to 2007-02-01 and 2007-02-01 dates:
hh_power_cons2007 <- hh_power_cons[hh_power_cons$Date2 >= "2007-02-01" & hh_power_cons$Date2 <= "2007-02-02",]
# convert Date and Time columns into new datetime variable:
hh_power_cons2007$DateTime <- as.POSIXlt(paste(hh_power_cons2007$Date, hh_power_cons2007$Time), format = "%d/%m/%Y %H:%M:%S")
# remove large data frame from memory:
rm(hh_power_cons)
# convert Global_active_power to numeric
hh_power_cons2007$Global_active_power <- as.numeric(hh_power_cons2007$Global_active_power)
# open PNG device
png("plot2.png", width=480, height=480)
# plot:
plot(hh_power_cons2007$DateTime, hh_power_cons2007$Global_active_power, xlab= "", ylab = "Global Active Power (kilowatts)", type="l")
# close png device:
dev.off()
| /plot2.R | no_license | arne7777777/ExData_Plotting1 | R | false | false | 1,019 | r | # read csv file without factors
hh_power_cons <- read.table("household_power_consumption.txt",
header = TRUE, sep=";", as.is = TRUE, na.strings="?")
# convert Date column into date:
hh_power_cons$Date2 <- as.Date(hh_power_cons$Date, "%d/%m/%Y")
# subset to 2007-02-01 and 2007-02-01 dates:
hh_power_cons2007 <- hh_power_cons[hh_power_cons$Date2 >= "2007-02-01" & hh_power_cons$Date2 <= "2007-02-02",]
# convert Date and Time columns into new datetime variable:
hh_power_cons2007$DateTime <- as.POSIXlt(paste(hh_power_cons2007$Date, hh_power_cons2007$Time), format = "%d/%m/%Y %H:%M:%S")
# remove large data frame from memory:
rm(hh_power_cons)
# convert Global_active_power to numeric
hh_power_cons2007$Global_active_power <- as.numeric(hh_power_cons2007$Global_active_power)
# open PNG device
png("plot2.png", width=480, height=480)
# plot:
plot(hh_power_cons2007$DateTime, hh_power_cons2007$Global_active_power, xlab= "", ylab = "Global Active Power (kilowatts)", type="l")
# close png device:
dev.off()
|
# taken from https://gist.github.com/kdauria/524eade46135f6348140
# modified to only add r^2, not the equation as well; and b
stat_smooth_func <- function(mapping = NULL, data = NULL,
geom = "smooth", position = "identity",
...,
method = "auto",
formula = y ~ x,
se = TRUE,
n = 80,
span = 0.75,
fullrange = FALSE,
level = 0.95,
method.args = list(),
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE,
xpos = NULL,
ypos = NULL) {
layer(
data = data,
mapping = mapping,
stat = StatSmoothFunc,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
method = method,
formula = formula,
se = se,
n = n,
fullrange = fullrange,
level = level,
na.rm = na.rm,
method.args = method.args,
span = span,
xpos = xpos,
ypos = ypos,
...
)
)
}
StatSmoothFunc <- ggproto("StatSmooth", Stat,
setup_params = function(data, params) {
# Figure out what type of smoothing to do: loess for small datasets,
# gam with a cubic regression basis for large data
# This is based on the size of the _largest_ group.
if (identical(params$method, "auto")) {
max_group <- max(table(data$group))
if (max_group < 1000) {
params$method <- "loess"
} else {
params$method <- "gam"
params$formula <- y ~ s(x, bs = "cs")
}
}
if (identical(params$method, "gam")) {
params$method <- mgcv::gam
}
params
},
compute_group = function(data, scales, method = "auto", formula = y~x,
se = TRUE, n = 80, span = 0.75, fullrange = FALSE,
xseq = NULL, level = 0.95, method.args = list(),
na.rm = FALSE, xpos=NULL, ypos=NULL) {
if (length(unique(data$x)) < 2) {
# Not enough data to perform fit
return(data.frame())
}
if (is.null(data$weight)) data$weight <- 1
if (is.null(xseq)) {
if (is.integer(data$x)) {
if (fullrange) {
xseq <- scales$x$dimension()
} else {
xseq <- sort(unique(data$x))
}
} else {
if (fullrange) {
range <- scales$x$dimension()
} else {
range <- range(data$x, na.rm = TRUE)
}
xseq <- seq(range[1], range[2], length.out = n)
}
}
# Special case span because it's the most commonly used model argument
if (identical(method, "loess")) {
method.args$span <- span
}
if (is.character(method)) method <- match.fun(method)
base.args <- list(quote(formula), data = quote(data), weights = quote(weight))
model <- do.call(method, c(base.args, method.args))
m = model
eq <- substitute('m' == b*','~~italic(r)^2~"="~r2,
list( b = format(coef(m)[2], digits = 2),
r2 = format(summary(m)$r.squared, digits = 2)))
func_string = as.character(as.expression(eq))
if(is.null(xpos)) xpos = min(data$x)*0.9
if(is.null(ypos)) ypos = max(data$y)*0.9
data.frame(x=xpos, y=ypos, label=func_string)
},
required_aes = c("x", "y")
)
| /functions/stat_smooth_func.R | no_license | jdeines/RS_irrigation | R | false | false | 5,062 | r | # taken from https://gist.github.com/kdauria/524eade46135f6348140
# modified to only add r^2, not the equation as well; and b
stat_smooth_func <- function(mapping = NULL, data = NULL,
geom = "smooth", position = "identity",
...,
method = "auto",
formula = y ~ x,
se = TRUE,
n = 80,
span = 0.75,
fullrange = FALSE,
level = 0.95,
method.args = list(),
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE,
xpos = NULL,
ypos = NULL) {
layer(
data = data,
mapping = mapping,
stat = StatSmoothFunc,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
method = method,
formula = formula,
se = se,
n = n,
fullrange = fullrange,
level = level,
na.rm = na.rm,
method.args = method.args,
span = span,
xpos = xpos,
ypos = ypos,
...
)
)
}
StatSmoothFunc <- ggproto("StatSmooth", Stat,
setup_params = function(data, params) {
# Figure out what type of smoothing to do: loess for small datasets,
# gam with a cubic regression basis for large data
# This is based on the size of the _largest_ group.
if (identical(params$method, "auto")) {
max_group <- max(table(data$group))
if (max_group < 1000) {
params$method <- "loess"
} else {
params$method <- "gam"
params$formula <- y ~ s(x, bs = "cs")
}
}
if (identical(params$method, "gam")) {
params$method <- mgcv::gam
}
params
},
compute_group = function(data, scales, method = "auto", formula = y~x,
se = TRUE, n = 80, span = 0.75, fullrange = FALSE,
xseq = NULL, level = 0.95, method.args = list(),
na.rm = FALSE, xpos=NULL, ypos=NULL) {
if (length(unique(data$x)) < 2) {
# Not enough data to perform fit
return(data.frame())
}
if (is.null(data$weight)) data$weight <- 1
if (is.null(xseq)) {
if (is.integer(data$x)) {
if (fullrange) {
xseq <- scales$x$dimension()
} else {
xseq <- sort(unique(data$x))
}
} else {
if (fullrange) {
range <- scales$x$dimension()
} else {
range <- range(data$x, na.rm = TRUE)
}
xseq <- seq(range[1], range[2], length.out = n)
}
}
# Special case span because it's the most commonly used model argument
if (identical(method, "loess")) {
method.args$span <- span
}
if (is.character(method)) method <- match.fun(method)
base.args <- list(quote(formula), data = quote(data), weights = quote(weight))
model <- do.call(method, c(base.args, method.args))
m = model
eq <- substitute('m' == b*','~~italic(r)^2~"="~r2,
list( b = format(coef(m)[2], digits = 2),
r2 = format(summary(m)$r.squared, digits = 2)))
func_string = as.character(as.expression(eq))
if(is.null(xpos)) xpos = min(data$x)*0.9
if(is.null(ypos)) ypos = max(data$y)*0.9
data.frame(x=xpos, y=ypos, label=func_string)
},
required_aes = c("x", "y")
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/htmlTable_helpers.R
\name{prAddCells}
\alias{prAddCells}
\title{Add a cell}
\usage{
prAddCells(rowcells, cellcode, align, style, cgroup_spacer_cells, has_rn_col,
col.columns, offset = 1, css.cell)
}
\arguments{
\item{rowcells}{The cells with the values that are to be added}
\item{cellcode}{Type of cell, can either be \code{th} or \code{td}}
\item{align}{A character strings specifying column alignments, defaulting to
\code{\link[base]{paste}(rep('c',ncol(x)),collapse='')} to center. Valid alignments are
l = left, c = center and r = right. You can also specify \code{align='c|c'} and
other LaTeX tabular formatting. If you want to set the alignment of the
rownames this string needst to be \code{ncol(x) + 1}, otherwise it automatically
pads the string with a left alignment for the rownames.}
\item{style}{The cell style}
\item{cgroup_spacer_cells}{The number of cells that occur between
columns due to the cgroup arguments.}
\item{has_rn_col}{Due to the alignment issue we need to keep track
of if there has already been printed a rowname column or not and therefore
we have this has_rn_col that is either 0 or 1.}
\item{col.columns}{Alternating colors for each column.}
\item{offset}{For rgroup rows there may be an offset != 1}
\item{css.cell}{The css.cell but only for this row compared to the htmlTable matrix}
}
\value{
\code{string} Returns the string with the new cell elements
}
\description{
Adds a row of cells <td>val</td><td>...</td> to a table string for
\code{\link{htmlTable}}
}
\seealso{
Other hidden helper functions for \code{\link{htmlTable}}: \code{\link{prAddSemicolon2StrEnd}},
\code{\link{prEscapeHtml}},
\code{\link{prGetCgroupHeader}},
\code{\link{prGetRowlabelPos}}, \code{\link{prGetStyle}},
\code{\link{prPrepareAlign}},
\code{\link{prPrepareCgroup}}, \code{\link{prTblNo}}
}
\keyword{internal}
| /man/prAddCells.Rd | no_license | ghaarsma/htmlTable | R | false | true | 1,927 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/htmlTable_helpers.R
\name{prAddCells}
\alias{prAddCells}
\title{Add a cell}
\usage{
prAddCells(rowcells, cellcode, align, style, cgroup_spacer_cells, has_rn_col,
col.columns, offset = 1, css.cell)
}
\arguments{
\item{rowcells}{The cells with the values that are to be added}
\item{cellcode}{Type of cell, can either be \code{th} or \code{td}}
\item{align}{A character strings specifying column alignments, defaulting to
\code{\link[base]{paste}(rep('c',ncol(x)),collapse='')} to center. Valid alignments are
l = left, c = center and r = right. You can also specify \code{align='c|c'} and
other LaTeX tabular formatting. If you want to set the alignment of the
rownames this string needst to be \code{ncol(x) + 1}, otherwise it automatically
pads the string with a left alignment for the rownames.}
\item{style}{The cell style}
\item{cgroup_spacer_cells}{The number of cells that occur between
columns due to the cgroup arguments.}
\item{has_rn_col}{Due to the alignment issue we need to keep track
of if there has already been printed a rowname column or not and therefore
we have this has_rn_col that is either 0 or 1.}
\item{col.columns}{Alternating colors for each column.}
\item{offset}{For rgroup rows there may be an offset != 1}
\item{css.cell}{The css.cell but only for this row compared to the htmlTable matrix}
}
\value{
\code{string} Returns the string with the new cell elements
}
\description{
Adds a row of cells <td>val</td><td>...</td> to a table string for
\code{\link{htmlTable}}
}
\seealso{
Other hidden helper functions for \code{\link{htmlTable}}: \code{\link{prAddSemicolon2StrEnd}},
\code{\link{prEscapeHtml}},
\code{\link{prGetCgroupHeader}},
\code{\link{prGetRowlabelPos}}, \code{\link{prGetStyle}},
\code{\link{prPrepareAlign}},
\code{\link{prPrepareCgroup}}, \code{\link{prTblNo}}
}
\keyword{internal}
|
#' @export
#' @title Download Southern Oscillation Index data
#'
#'
#' @description The Southern Oscillation Index is defined as the standardized difference between barometric readings at Darwin, Australia and Tahiti.
#'
#' @return
#' \itemize{
#' \item Date: Date object that uses the first of the month as a placeholder. Date formatted as date on the first of the month because R only supports one partial of date time
#' \item Month: Month of record
#' \item Year: Year of record
#' \item SOI: Southern Oscillation Index
#' \item SOI_3MON_AVG: 3 Month Average Southern Oscillation Index
#' }
#' @examples
#' soi <- download_soi()
#' plot(x = enso$Date, y = enso$SOI, type = "l")
#'
#' @references \url{https://www.ncdc.noaa.gov/teleconnections/enso/indicators/soi/}
## Function to bring in SOI data
download_soi <- function(){
soi = readr::read_csv(
"https://www.ncdc.noaa.gov/teleconnections/enso/indicators/soi/data.csv",
skip = 2, col_names = c("Date","SOI")
)
## Create Date formatted as date
soi$Date = lubridate::ymd(paste0(soi$Date, "01"))
## Create Year and Month columns
soi$Month = lubridate::month(soi$Date, abbr = TRUE, label = TRUE)
soi$Year = lubridate::year(soi$Date)
## Create 3 month average window. Each row is a month
soi$SOI_3MON_AVG = as.numeric(stats::filter(soi$SOI,rep(1/3,3), sides=2))
soi = soi[,c("Date", "Month", "Year", "SOI", "SOI_3MON_AVG")]
soi
} | /R/download-soi.R | no_license | pslota/rsoi | R | false | false | 1,430 | r | #' @export
#' @title Download Southern Oscillation Index data
#'
#'
#' @description The Southern Oscillation Index is defined as the standardized difference between barometric readings at Darwin, Australia and Tahiti.
#'
#' @return
#' \itemize{
#' \item Date: Date object that uses the first of the month as a placeholder. Date formatted as date on the first of the month because R only supports one partial of date time
#' \item Month: Month of record
#' \item Year: Year of record
#' \item SOI: Southern Oscillation Index
#' \item SOI_3MON_AVG: 3 Month Average Southern Oscillation Index
#' }
#' @examples
#' soi <- download_soi()
#' plot(x = enso$Date, y = enso$SOI, type = "l")
#'
#' @references \url{https://www.ncdc.noaa.gov/teleconnections/enso/indicators/soi/}
## Function to bring in SOI data
download_soi <- function(){
soi = readr::read_csv(
"https://www.ncdc.noaa.gov/teleconnections/enso/indicators/soi/data.csv",
skip = 2, col_names = c("Date","SOI")
)
## Create Date formatted as date
soi$Date = lubridate::ymd(paste0(soi$Date, "01"))
## Create Year and Month columns
soi$Month = lubridate::month(soi$Date, abbr = TRUE, label = TRUE)
soi$Year = lubridate::year(soi$Date)
## Create 3 month average window. Each row is a month
soi$SOI_3MON_AVG = as.numeric(stats::filter(soi$SOI,rep(1/3,3), sides=2))
soi = soi[,c("Date", "Month", "Year", "SOI", "SOI_3MON_AVG")]
soi
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/id_mapping.R
\name{variant_to_study}
\alias{variant_to_study}
\title{Map a variant id to a study id}
\usage{
variant_to_study(variant_id, verbose = FALSE, warnings = TRUE)
}
\arguments{
\item{variant_id}{A character vector of variant identifiers.}
\item{verbose}{Whether the function should be
verbose about the different queries or not.}
\item{warnings}{Whether to print warnings.}
}
\value{
A dataframe of two identifiers. First column is the variant
identifier and the second column is the study identifier.
}
\description{
Map a variant identifier to a study accession identifier.
}
\examples{
# Map GWAS variant identifiers to study identifiers
variant_to_study(c('rs7904579', 'rs138331350'))
}
| /man/variant_to_study.Rd | permissive | fmadani/gwasrapidd | R | false | true | 783 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/id_mapping.R
\name{variant_to_study}
\alias{variant_to_study}
\title{Map a variant id to a study id}
\usage{
variant_to_study(variant_id, verbose = FALSE, warnings = TRUE)
}
\arguments{
\item{variant_id}{A character vector of variant identifiers.}
\item{verbose}{Whether the function should be
verbose about the different queries or not.}
\item{warnings}{Whether to print warnings.}
}
\value{
A dataframe of two identifiers. First column is the variant
identifier and the second column is the study identifier.
}
\description{
Map a variant identifier to a study accession identifier.
}
\examples{
# Map GWAS variant identifiers to study identifiers
variant_to_study(c('rs7904579', 'rs138331350'))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot-aaa.r
\docType{methods}
\name{removeRmlist,plot_base-method}
\alias{removeRmlist,plot_base-method}
\title{Remove Registered Temporary Objects}
\usage{
\S4method{removeRmlist}{plot_base}()
}
\description{
\code{removeRmlist} method removes registered temporary objects.
}
\seealso{
Other plot: \code{\link{checkError,plot_base-method}},
\code{\link{checkTheme,plot_base-method}},
\code{\link{checkVariable,plot_base-method}},
\code{\link{commandDoIt,plot_base-method}},
\code{\link{factorize-class}}, \code{\link{gbox-class}},
\code{\link{gcont-class}}, \code{\link{gdiscbar-class}},
\code{\link{gdist-class}},
\code{\link{getCoord,plot_base-method}},
\code{\link{getFacet,plot_base-method}},
\code{\link{getGeom,plot_base-method}},
\code{\link{getGgplot,plot_base-method}},
\code{\link{getHelp,plot_base-method}},
\code{\link{getMain,plot_base-method}},
\code{\link{getMessage,plot_base-method}},
\code{\link{getOpts,plot_base-method}},
\code{\link{getParms,plot_base-method}},
\code{\link{getPlot,plot_base-method}},
\code{\link{getScale,plot_base-method}},
\code{\link{getTheme,plot_base-method}},
\code{\link{getWindowTitle,plot_base-method}},
\code{\link{getXlab,plot_base-method}},
\code{\link{getYlab,plot_base-method}},
\code{\link{getZlab,plot_base-method}},
\code{\link{ghist-class}}, \code{\link{gkm-class}},
\code{\link{gline-class}}, \code{\link{gpie-class}},
\code{\link{gqq-class}}, \code{\link{gscat-class}},
\code{\link{gscatmat-class}},
\code{\link{plotWindow,plot_base-method}},
\code{\link{plot_base-class}},
\code{\link{registRmlist,plot_base-method}},
\code{\link{savePlot,plot_base-method}},
\code{\link{setBack,plot_base-method}},
\code{\link{setDataframe,plot_base-method}},
\code{\link{setFront,plot_base-method}}
}
\concept{plot}
\keyword{hplot}
| /man/plot-plot_base-removeRmlist.Rd | no_license | cran/RcmdrPlugin.KMggplot2 | R | false | true | 1,971 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot-aaa.r
\docType{methods}
\name{removeRmlist,plot_base-method}
\alias{removeRmlist,plot_base-method}
\title{Remove Registered Temporary Objects}
\usage{
\S4method{removeRmlist}{plot_base}()
}
\description{
\code{removeRmlist} method removes registered temporary objects.
}
\seealso{
Other plot: \code{\link{checkError,plot_base-method}},
\code{\link{checkTheme,plot_base-method}},
\code{\link{checkVariable,plot_base-method}},
\code{\link{commandDoIt,plot_base-method}},
\code{\link{factorize-class}}, \code{\link{gbox-class}},
\code{\link{gcont-class}}, \code{\link{gdiscbar-class}},
\code{\link{gdist-class}},
\code{\link{getCoord,plot_base-method}},
\code{\link{getFacet,plot_base-method}},
\code{\link{getGeom,plot_base-method}},
\code{\link{getGgplot,plot_base-method}},
\code{\link{getHelp,plot_base-method}},
\code{\link{getMain,plot_base-method}},
\code{\link{getMessage,plot_base-method}},
\code{\link{getOpts,plot_base-method}},
\code{\link{getParms,plot_base-method}},
\code{\link{getPlot,plot_base-method}},
\code{\link{getScale,plot_base-method}},
\code{\link{getTheme,plot_base-method}},
\code{\link{getWindowTitle,plot_base-method}},
\code{\link{getXlab,plot_base-method}},
\code{\link{getYlab,plot_base-method}},
\code{\link{getZlab,plot_base-method}},
\code{\link{ghist-class}}, \code{\link{gkm-class}},
\code{\link{gline-class}}, \code{\link{gpie-class}},
\code{\link{gqq-class}}, \code{\link{gscat-class}},
\code{\link{gscatmat-class}},
\code{\link{plotWindow,plot_base-method}},
\code{\link{plot_base-class}},
\code{\link{registRmlist,plot_base-method}},
\code{\link{savePlot,plot_base-method}},
\code{\link{setBack,plot_base-method}},
\code{\link{setDataframe,plot_base-method}},
\code{\link{setFront,plot_base-method}}
}
\concept{plot}
\keyword{hplot}
|
\name{concept_names}
\alias{concept_names}
\title{Resolve concept names in the supplied lazy table reference}
\description{
This function accepts a (lazy) tibble and for each variable of the form
\code{XXX_concept_id} adds a corresponding variable
\code{XXX_concept_name} obtained by (left) joining against the
\code{concept} table of the 'CDM'.
}
\usage{
concept_names(tibl, names = NULL, cin = omopr.global$cin,
verb = FALSE, fill = FALSE, copy = FALSE)
}
\arguments{
\item{tibl}{A (lazy) reference to a tibble.}
\item{names}{An optional list of \code{concept_id}s to be
resolved. Defaults to all.}
\item{cin}{A (lazy) reference to a vocabulary tibble with variables
\code{concept_id} and \code{concept_name}, used to resolve the
concepts.}
\item{verb}{If true, print progress to the console.}
\item{fill}{If true, fill non-matching concept names with a string
conversion of the concept ID.}
\item{copy}{\code{copy} arg to be passed to
\code{left_join}. Will need to be \code{true} if input tibble is not
a lazy reference, but will be very slow in that case. Work with lazy
references as long as possible.}
}
\value{
A named list with elements corresponding to \code{dbplyr} lazy tibble references.
}
\seealso{\code{\link{omopr_init}}, \code{\link{row_counts}}}
\examples{
con = omopr:::dummy_con() # dummy connection to allow example to run
tRefs = omopr_init(con)
tRefs[["measurement"]] \%>\% concept_names()
}
\keyword{dbplyr}
| /man/concept_names.Rd | no_license | cran/omopr | R | false | false | 1,500 | rd | \name{concept_names}
\alias{concept_names}
\title{Resolve concept names in the supplied lazy table reference}
\description{
This function accepts a (lazy) tibble and for each variable of the form
\code{XXX_concept_id} adds a corresponding variable
\code{XXX_concept_name} obtained by (left) joining against the
\code{concept} table of the 'CDM'.
}
\usage{
concept_names(tibl, names = NULL, cin = omopr.global$cin,
verb = FALSE, fill = FALSE, copy = FALSE)
}
\arguments{
\item{tibl}{A (lazy) reference to a tibble.}
\item{names}{An optional list of \code{concept_id}s to be
resolved. Defaults to all.}
\item{cin}{A (lazy) reference to a vocabulary tibble with variables
\code{concept_id} and \code{concept_name}, used to resolve the
concepts.}
\item{verb}{If true, print progress to the console.}
\item{fill}{If true, fill non-matching concept names with a string
conversion of the concept ID.}
\item{copy}{\code{copy} arg to be passed to
\code{left_join}. Will need to be \code{true} if input tibble is not
a lazy reference, but will be very slow in that case. Work with lazy
references as long as possible.}
}
\value{
A named list with elements corresponding to \code{dbplyr} lazy tibble references.
}
\seealso{\code{\link{omopr_init}}, \code{\link{row_counts}}}
\examples{
con = omopr:::dummy_con() # dummy connection to allow example to run
tRefs = omopr_init(con)
tRefs[["measurement"]] \%>\% concept_names()
}
\keyword{dbplyr}
|
## Step 1. Data preperation
#(i) loading
#load IO data
IOT_b=read.csv(file="IOT_b.csv",header=T, as.is=T)
#load index mapping
sector_ind=read.csv(file="indcode.csv",header=T, as.is=T)
#(ii)preparing index
row_ind=sector_ind[,1:2]
col_ind=sector_ind[,3:4]
sec_group=sector_ind[,5:8]
va_ind=sector_ind[,9:10]
fd_ind=sector_ind[,12:13]
sec_BR=sector_ind[,c(5,6,15,16)]
#getting rid of NA
row_ind=row_ind[!is.na(row_ind[,1]),]
sec_group=sec_group[!is.na(sec_group[,1]),]
sec_BR=sec_BR[!is.na(sec_BR[,1]),]
va_ind=va_ind[!is.na(va_ind[,1]),]
fd_ind=fd_ind[!is.na(fd_ind[,1]),]
nsector=dim(sec_group)[1]
nBR=length(unique(sec_BR[,3]))
nva=dim(va_ind)[1]
nfd=dim(fd_ind)[1]
#(iii)preparing IO for merging with mapping
#set NA observations in IO as 0
IOT_b[is.na(IOT_b)]=0
#add row index to be used in merging with mapping data to IO
IOT_b$basecode_row=IOT_b$X
#get dimmension of IO
dim_IOT_b=dim(IOT_b)
## Step 2. Rowsum: merge and obtain rowsum using aggregate function
IOT_b_sec=merge(IOT_b,row_ind, by="basecode_row", all=T)
IOT_b_36=aggregate(IOT_b_sec[,4:(dim_IOT_b[2])],list(IOT_b_sec$sector_row),FUN=sum)
## Step 3. Column sum
#(i) Traspose rowsum
T_IOT_b_36=data.frame(t(IOT_b_36))
#(ii) add column names for transposed data
colnames(T_IOT_b_36)[1:nsector]=sec_group[(1:nsector),2]
colnames(T_IOT_b_36)[(nsector+1):(nsector+nva)]=va_ind[,2]
#(iii) drop Group indicator used in rowsum
T_IOT_b_36=T_IOT_b_36[-1,]
#(iv) add index to be used in column sum
T_IOT_b_36$basecode_col=col_ind[,2]
#(v) take column sum using aggregate function
T_IOT_36_col=aggregate(T_IOT_b_36[,1:(nsector+nva)],list(T_IOT_b_36$basecode_col),FUN=sum)
## Step 4. obtain IO table
#(i)obtain transpose of column sum
IOT_36=data.frame(t(T_IOT_36_col))
#(ii) add column names
colnames(IOT_36)[1:nsector]=sec_group[(1:36),2]
colnames(IOT_36)[(nsector+1):(nsector+nfd)]=fd_ind[,2]
#(iii) drop aggregatio indicator
IOT_36=IOT_36[-1,]
#Step 5. checking balance
# total input + Resout = Total Demand
check1=as.numeric(IOT_36["Tinput",(1:nsector)])+IOT_36$Resout[(1:nsector)]+IOT_36$Imp[(1:nsector)]-IOT_36$Dtotal[(1:nsector)]
check2=IOT_36$Qtotal[(1:nsector)]+IOT_36$Qself[(1:nsector)]+IOT_36$Resout[(1:nsector)]+IOT_36$Imp[(1:nsector)]-IOT_36$Dtotal[(1:nsector)]
check1
check2
write.csv(IOT_36, file="IO_model.csv")
###### Group IO #####
#Step.1 Preparing data
#(i)set aside industry data IOT_36
IOT_36_Group=IOT_36
#(ii)preparing index to sort after merging
IOT_36_Group$index=(1:(dim(IOT_36)[1]))
#(iii)preparing index to merge with sector-group mapping
IOT_36_Group$sector=rownames(IOT_36)
#(iv) merge with sector-group mapping and sort to original order
IOT_36_Group=merge(IOT_36_Group,sec_group, by.x="sector", by.y="sector_name", all=T)
IOT_36_Group=IOT_36_Group[order(IOT_36_Group$index),]
#(v) give row names
rownames(IOT_36_Group)=IOT_36_Group$sector
#(vi)preparing Group name and Group index to be used in aggregation. The VA part
IOT_36_Group$Group_name[is.na(IOT_36_Group$Group_name)]=IOT_36_Group$sector[is.na(IOT_36_Group$Group_name)]
Gimax=max(IOT_36_Group$Group_ind,na.rm=T)
Giblank=length(IOT_36_Group$Group_ind[is.na(IOT_36_Group$Group_ind)])
IOT_36_Group$Group_ind[is.na(IOT_36_Group$Group_ind)]=((Gimax+1):(Gimax+Giblank))
#step 2. rowsum by aggregate function: take row sum
IOT_7=aggregate(IOT_36_Group[,2:(nsector+nfd+1)],list(IOT_36_Group$Group_ind),FUN=sum)
#step 3. prepare for column sum
#(i)add group name and index to rowsum data
## (i-1) prepare group_name and index to add to rowsum data b/c character vector with group name was excluded in aggregation
Group_ind=data.frame(unique(cbind(IOT_36_Group$Group_ind,IOT_36_Group$Group_name)))
colnames(Group_ind)=c("Group_ind","Group_name")
##(i-2) add group_name and change rowname
IOT_7_row=merge(IOT_7, Group_ind, by.x="Group.1", by.y="Group_ind", all=T)
rownames(IOT_7_row)=IOT_7_row$Group_name
##(i-3) getting rid of aggregate group indicator (1st variable) and Group_name variable, so thea we can apply aggregate function
IOT_7_row=IOT_7_row[,-1*c(1, dim(IOT_7_row)[2])]
##(i-4)Transpose
T_IOT_7_col=data.frame(t(IOT_7_row))
#step 4. colsum by aggregate function
#(i) prepare to merge with mapping index
##(i-1) prepare merging varible to merge with sector_group mapping
T_IOT_7_col$sector=rownames(T_IOT_7_col)
##(i-2) prepare index varible to sort after merging
T_IOT_7_col$index=(1:dim(T_IOT_7_col)[1])
##(i-3) merge with sector_group mapping
T_IOT_7_Group=merge(T_IOT_7_col,sec_group, by.x="sector", by.y="sector_name", all=T,sort=F)
##(i-4) sort merged data to 'before-merge' order
T_IOT_7_Group=T_IOT_7_Group[order(T_IOT_7_Group$index),]
##(i-5) give rownames
rownames(T_IOT_7_Group)=T_IOT_7_Group$sector
#(ii) prepare variable for colum sum aggregation: prepare Group name and group index to use in aggregate funciton
## Group name =Group name + Final demand elements
T_IOT_7_Group$Group_name[is.na(T_IOT_7_Group$Group_name)]=T_IOT_7_Group$sector[is.na(T_IOT_7_Group$Group_name)]
Gimax2=max(T_IOT_7_Group$Group_ind,na.rm=T)
Giblank2=length(T_IOT_7_Group$Group_ind[is.na(T_IOT_7_Group$Group_ind)])
T_IOT_7_Group$Group_ind[is.na(T_IOT_7_Group$Group_ind)]=((Gimax2+1):(Gimax2+Giblank2))
ngroup=8
#(iii) column sum by aggregate function
T_IOT_7=aggregate(T_IOT_7_Group[,2:(ngroup+nva+1)],list(T_IOT_7_Group$Group_ind),FUN=sum)
#step 5. management after aggregation
#(i) add row names to aggregated data
##(i-1) prepare row names =group name + final demand elements
Group_ind_7=data.frame(unique(cbind(T_IOT_7_Group$Group_ind,T_IOT_7_Group$Group_name)))
colnames(Group_ind_7)=c("Group_ind","Group_name")
##(i-2) merge row name data
T_IOT_7=merge(T_IOT_7, Group_ind_7, by.x="Group.1", by.y="Group_ind", all=T)
##(i-3) change row name
rownames(T_IOT_7)=T_IOT_7$Group_name
#(ii) Take transpose again to obtain Group IO data
IOT_7=data.frame(t(T_IOT_7[,-1*c(1,dim(T_IOT_7)[2])]))
#checking balance
# total input + Resout = Total Demand
check3=as.numeric(IOT_7["Tinput",(1:ngroup)])+IOT_7$Resout[(1:ngroup)]+IOT_7$Imp[(1:ngroup)]-IOT_7$Dtotal[(1:ngroup)]
check4=IOT_7$Qtotal[(1:ngroup)]+IOT_7$Qself[(1:ngroup)]+IOT_7$Resout[(1:ngroup)]+IOT_7$Imp[(1:ngroup)]-IOT_7$Dtotal[(1:ngroup)]
check3
check4
write.csv(IOT_7, file="IO_group.csv")
## Boehringer and Rutherford Toy model secter index
# Originally, BR has 6 sectors Elec, OIL, COAL, GAS, X(energy intensive),Y(non energy intensive)
# We separage agriculture to link with agriculutre bottom up model. That makes our sectors seven sectors
# ELEC, OIL, COAL,GASHEAT,EINT,NEINT,AGRI
# GAS and Heat are bundled as GASHEAT, ROIL and OIL are interageted into OIL
#Step.1 Preparing data
#(i)set aside industry data IOT_36
IOT_36_BR=IOT_36
#(ii)preparing index to sort after merging
IOT_36_BR$index=(1:(dim(IOT_36)[1]))
#(iii)preparing index to merge with sector-group mapping
IOT_36_BR$sector=rownames(IOT_36_BR)
#(iv) merge with sector-BR index mapping and sort to original order
IOT_36_BR=merge(IOT_36_BR,sec_BR, by.x="sector", by.y="sector_name", all=T)
IOT_36_BR=IOT_36_BR[order(IOT_36_BR$index),]
#(v) give row names
rownames(IOT_36_BR)=IOT_36_BR$sector
#(vi)preparing Group name and Group index to be used in aggregation. The VA part
IOT_36_BR$BR_name[is.na(IOT_36_BR$BR_name)]=IOT_36_BR$sector[is.na(IOT_36_BR$BR_name)]
BRimax=max(IOT_36_BR$BR_ind,na.rm=T)
BRiblank=length(IOT_36_BR$BR_ind[is.na(IOT_36_BR$BR_ind)])
IOT_36_BR$BR_ind[is.na(IOT_36_BR$BR_ind)]=((BRimax+1):(BRimax+BRiblank))
#step 2. rowsum by aggregate function: take row sum
IOT_BR_row=aggregate(IOT_36_BR[,2:(nsector+nfd+1)],list(IOT_36_BR$BR_ind),FUN=sum)
#step 3. prepare for column sum
#(i)add group name and index to rowsum data
## (i-1) prepare BR_name and index to add to rowsum data b/c character vector with group name was excluded in aggregation
BR_ind=data.frame(unique(cbind(IOT_36_BR$BR_ind,IOT_36_BR$BR_name)))
colnames(BR_ind)=c("BR_ind","BR_name")
##(i-2) add BR_name and change rowname
IOT_BR_row=merge(IOT_BR_row, BR_ind, by.x="Group.1", by.y="BR_ind", all=T)
rownames(IOT_BR_row)=IOT_BR_row$BR_name
##(i-3) getting rid of aggregate group indicator (1st variable) and Group_name variable, so thea we can apply aggregate function
IOT_BR_row=IOT_BR_row[,-1*c(1, dim(IOT_BR_row)[2])]
##(i-4)Transpose
T_IOT_BR_row=data.frame(t(IOT_BR_row))
#step 4. colsum by aggregate function
#(i) prepare to merge with mapping index
##(i-1) prepare merging varible to merge with sector_group mapping
T_IOT_BR_row$sector=rownames(T_IOT_BR_row)
##(i-2) prepare index varible to sort after merging
T_IOT_BR_row$index=(1:dim(T_IOT_BR_row)[1])
##(i-3) merge with sector_BR mapping
T_IOT_BR_row=merge(T_IOT_BR_row,sec_BR, by.x="sector", by.y="sector_name", all=T,sort=F)
##(i-4) sort merged data to 'before-merge' order
T_IOT_BR_row=T_IOT_BR_row[order(T_IOT_BR_row$index),]
##(i-5) give rownames
rownames(T_IOT_BR_row)=T_IOT_BR_row$sector
#(ii) prepare variable for colum sum aggregation: prepare BR name and BR index to use in aggregate funciton
## Group name =Group name + Final demand elements
T_IOT_BR_row$BR_name[is.na(T_IOT_BR_row$BR_name)]=T_IOT_BR_row$sector[is.na(T_IOT_BR_row$BR_name)]
BRimax2=max(T_IOT_BR_row$BR_ind,na.rm=T)
BRiblank2=length(T_IOT_BR_row$BR_ind[is.na(T_IOT_BR_row$BR_ind)])
T_IOT_BR_row$BR_ind[is.na(T_IOT_BR_row$BR_ind)]=((BRimax2+1):(BRimax2+BRiblank2))
#ngroup=8
#(iii) column sum by aggregate function
T_IOT_BR=aggregate(T_IOT_BR_row[,2:(nBR+nva+1)],list(T_IOT_BR_row$BR_ind),FUN=sum)
#step 5. management after aggregation
#(i) add row names to aggregated data
##(i-1) prepare row names =BR name + final demand elements
BR_ind_name=data.frame(unique(cbind(T_IOT_BR_row$BR_ind,T_IOT_BR_row$BR_name)))
colnames(BR_ind_name)=c("BR_ind","BR_name")
##(i-2) merge row name data
T_IOT_BR=merge(T_IOT_BR, BR_ind_name, by.x="Group.1", by.y="BR_ind", all=T)
##(i-3) change row name
rownames(T_IOT_BR)=T_IOT_BR$BR_name
#(ii) Take transpose again to obtain BR IO data
IOT_BR=data.frame(t(T_IOT_BR[,-1*c(1,dim(T_IOT_BR)[2])]))
#checking balance
# total input + Resout = Total Demand
check5=as.numeric(IOT_BR["Tinput",(1:nBR)])+IOT_BR$Resout[(1:nBR)]+IOT_BR$Imp[(1:nBR)]-IOT_BR$Dtotal[(1:nBR)]
check6=IOT_BR$Qtotal[(1:nBR)]+IOT_BR$Qself[(1:nBR)]+IOT_BR$Resout[(1:nBR)]+IOT_BR$Imp[(1:nBR)]-IOT_BR$Dtotal[(1:nBR)]
check5
check6
write.csv(IOT_BR, file="IO_B.csv") | /SAM/IO/agg.r | no_license | katto2/CGE_2016 | R | false | false | 10,280 | r | ## Step 1. Data preperation
#(i) loading
#load IO data
IOT_b=read.csv(file="IOT_b.csv",header=T, as.is=T)
#load index mapping
sector_ind=read.csv(file="indcode.csv",header=T, as.is=T)
#(ii)preparing index
row_ind=sector_ind[,1:2]
col_ind=sector_ind[,3:4]
sec_group=sector_ind[,5:8]
va_ind=sector_ind[,9:10]
fd_ind=sector_ind[,12:13]
sec_BR=sector_ind[,c(5,6,15,16)]
#getting rid of NA
row_ind=row_ind[!is.na(row_ind[,1]),]
sec_group=sec_group[!is.na(sec_group[,1]),]
sec_BR=sec_BR[!is.na(sec_BR[,1]),]
va_ind=va_ind[!is.na(va_ind[,1]),]
fd_ind=fd_ind[!is.na(fd_ind[,1]),]
nsector=dim(sec_group)[1]
nBR=length(unique(sec_BR[,3]))
nva=dim(va_ind)[1]
nfd=dim(fd_ind)[1]
#(iii)preparing IO for merging with mapping
#set NA observations in IO as 0
IOT_b[is.na(IOT_b)]=0
#add row index to be used in merging with mapping data to IO
IOT_b$basecode_row=IOT_b$X
#get dimmension of IO
dim_IOT_b=dim(IOT_b)
## Step 2. Rowsum: merge and obtain rowsum using aggregate function
IOT_b_sec=merge(IOT_b,row_ind, by="basecode_row", all=T)
IOT_b_36=aggregate(IOT_b_sec[,4:(dim_IOT_b[2])],list(IOT_b_sec$sector_row),FUN=sum)
## Step 3. Column sum
#(i) Traspose rowsum
T_IOT_b_36=data.frame(t(IOT_b_36))
#(ii) add column names for transposed data
colnames(T_IOT_b_36)[1:nsector]=sec_group[(1:nsector),2]
colnames(T_IOT_b_36)[(nsector+1):(nsector+nva)]=va_ind[,2]
#(iii) drop Group indicator used in rowsum
T_IOT_b_36=T_IOT_b_36[-1,]
#(iv) add index to be used in column sum
T_IOT_b_36$basecode_col=col_ind[,2]
#(v) take column sum using aggregate function
T_IOT_36_col=aggregate(T_IOT_b_36[,1:(nsector+nva)],list(T_IOT_b_36$basecode_col),FUN=sum)
## Step 4. obtain IO table
#(i)obtain transpose of column sum
IOT_36=data.frame(t(T_IOT_36_col))
#(ii) add column names
colnames(IOT_36)[1:nsector]=sec_group[(1:36),2]
colnames(IOT_36)[(nsector+1):(nsector+nfd)]=fd_ind[,2]
#(iii) drop aggregatio indicator
IOT_36=IOT_36[-1,]
#Step 5. checking balance
# total input + Resout = Total Demand
check1=as.numeric(IOT_36["Tinput",(1:nsector)])+IOT_36$Resout[(1:nsector)]+IOT_36$Imp[(1:nsector)]-IOT_36$Dtotal[(1:nsector)]
check2=IOT_36$Qtotal[(1:nsector)]+IOT_36$Qself[(1:nsector)]+IOT_36$Resout[(1:nsector)]+IOT_36$Imp[(1:nsector)]-IOT_36$Dtotal[(1:nsector)]
check1
check2
write.csv(IOT_36, file="IO_model.csv")
###### Group IO #####
#Step.1 Preparing data
#(i)set aside industry data IOT_36
IOT_36_Group=IOT_36
#(ii)preparing index to sort after merging
IOT_36_Group$index=(1:(dim(IOT_36)[1]))
#(iii)preparing index to merge with sector-group mapping
IOT_36_Group$sector=rownames(IOT_36)
#(iv) merge with sector-group mapping and sort to original order
IOT_36_Group=merge(IOT_36_Group,sec_group, by.x="sector", by.y="sector_name", all=T)
IOT_36_Group=IOT_36_Group[order(IOT_36_Group$index),]
#(v) give row names
rownames(IOT_36_Group)=IOT_36_Group$sector
#(vi)preparing Group name and Group index to be used in aggregation. The VA part
IOT_36_Group$Group_name[is.na(IOT_36_Group$Group_name)]=IOT_36_Group$sector[is.na(IOT_36_Group$Group_name)]
Gimax=max(IOT_36_Group$Group_ind,na.rm=T)
Giblank=length(IOT_36_Group$Group_ind[is.na(IOT_36_Group$Group_ind)])
IOT_36_Group$Group_ind[is.na(IOT_36_Group$Group_ind)]=((Gimax+1):(Gimax+Giblank))
#step 2. rowsum by aggregate function: take row sum
IOT_7=aggregate(IOT_36_Group[,2:(nsector+nfd+1)],list(IOT_36_Group$Group_ind),FUN=sum)
#step 3. prepare for column sum
#(i)add group name and index to rowsum data
## (i-1) prepare group_name and index to add to rowsum data b/c character vector with group name was excluded in aggregation
Group_ind=data.frame(unique(cbind(IOT_36_Group$Group_ind,IOT_36_Group$Group_name)))
colnames(Group_ind)=c("Group_ind","Group_name")
##(i-2) add group_name and change rowname
IOT_7_row=merge(IOT_7, Group_ind, by.x="Group.1", by.y="Group_ind", all=T)
rownames(IOT_7_row)=IOT_7_row$Group_name
##(i-3) getting rid of aggregate group indicator (1st variable) and Group_name variable, so thea we can apply aggregate function
IOT_7_row=IOT_7_row[,-1*c(1, dim(IOT_7_row)[2])]
##(i-4)Transpose
T_IOT_7_col=data.frame(t(IOT_7_row))
#step 4. colsum by aggregate function
#(i) prepare to merge with mapping index
##(i-1) prepare merging varible to merge with sector_group mapping
T_IOT_7_col$sector=rownames(T_IOT_7_col)
##(i-2) prepare index varible to sort after merging
T_IOT_7_col$index=(1:dim(T_IOT_7_col)[1])
##(i-3) merge with sector_group mapping
T_IOT_7_Group=merge(T_IOT_7_col,sec_group, by.x="sector", by.y="sector_name", all=T,sort=F)
##(i-4) sort merged data to 'before-merge' order
T_IOT_7_Group=T_IOT_7_Group[order(T_IOT_7_Group$index),]
##(i-5) give rownames
rownames(T_IOT_7_Group)=T_IOT_7_Group$sector
#(ii) prepare variable for colum sum aggregation: prepare Group name and group index to use in aggregate funciton
## Group name =Group name + Final demand elements
T_IOT_7_Group$Group_name[is.na(T_IOT_7_Group$Group_name)]=T_IOT_7_Group$sector[is.na(T_IOT_7_Group$Group_name)]
Gimax2=max(T_IOT_7_Group$Group_ind,na.rm=T)
Giblank2=length(T_IOT_7_Group$Group_ind[is.na(T_IOT_7_Group$Group_ind)])
T_IOT_7_Group$Group_ind[is.na(T_IOT_7_Group$Group_ind)]=((Gimax2+1):(Gimax2+Giblank2))
ngroup=8
#(iii) column sum by aggregate function
T_IOT_7=aggregate(T_IOT_7_Group[,2:(ngroup+nva+1)],list(T_IOT_7_Group$Group_ind),FUN=sum)
#step 5. management after aggregation
#(i) add row names to aggregated data
##(i-1) prepare row names =group name + final demand elements
Group_ind_7=data.frame(unique(cbind(T_IOT_7_Group$Group_ind,T_IOT_7_Group$Group_name)))
colnames(Group_ind_7)=c("Group_ind","Group_name")
##(i-2) merge row name data
T_IOT_7=merge(T_IOT_7, Group_ind_7, by.x="Group.1", by.y="Group_ind", all=T)
##(i-3) change row name
rownames(T_IOT_7)=T_IOT_7$Group_name
#(ii) Take transpose again to obtain Group IO data
IOT_7=data.frame(t(T_IOT_7[,-1*c(1,dim(T_IOT_7)[2])]))
#checking balance
# total input + Resout = Total Demand
check3=as.numeric(IOT_7["Tinput",(1:ngroup)])+IOT_7$Resout[(1:ngroup)]+IOT_7$Imp[(1:ngroup)]-IOT_7$Dtotal[(1:ngroup)]
check4=IOT_7$Qtotal[(1:ngroup)]+IOT_7$Qself[(1:ngroup)]+IOT_7$Resout[(1:ngroup)]+IOT_7$Imp[(1:ngroup)]-IOT_7$Dtotal[(1:ngroup)]
check3
check4
write.csv(IOT_7, file="IO_group.csv")
## Boehringer and Rutherford Toy model secter index
# Originally, BR has 6 sectors Elec, OIL, COAL, GAS, X(energy intensive),Y(non energy intensive)
# We separage agriculture to link with agriculutre bottom up model. That makes our sectors seven sectors
# ELEC, OIL, COAL,GASHEAT,EINT,NEINT,AGRI
# GAS and Heat are bundled as GASHEAT, ROIL and OIL are interageted into OIL
#Step.1 Preparing data
#(i)set aside industry data IOT_36
IOT_36_BR=IOT_36
#(ii)preparing index to sort after merging
IOT_36_BR$index=(1:(dim(IOT_36)[1]))
#(iii)preparing index to merge with sector-group mapping
IOT_36_BR$sector=rownames(IOT_36_BR)
#(iv) merge with sector-BR index mapping and sort to original order
IOT_36_BR=merge(IOT_36_BR,sec_BR, by.x="sector", by.y="sector_name", all=T)
IOT_36_BR=IOT_36_BR[order(IOT_36_BR$index),]
#(v) give row names
rownames(IOT_36_BR)=IOT_36_BR$sector
#(vi)preparing Group name and Group index to be used in aggregation. The VA part
IOT_36_BR$BR_name[is.na(IOT_36_BR$BR_name)]=IOT_36_BR$sector[is.na(IOT_36_BR$BR_name)]
BRimax=max(IOT_36_BR$BR_ind,na.rm=T)
BRiblank=length(IOT_36_BR$BR_ind[is.na(IOT_36_BR$BR_ind)])
IOT_36_BR$BR_ind[is.na(IOT_36_BR$BR_ind)]=((BRimax+1):(BRimax+BRiblank))
#step 2. rowsum by aggregate function: take row sum
IOT_BR_row=aggregate(IOT_36_BR[,2:(nsector+nfd+1)],list(IOT_36_BR$BR_ind),FUN=sum)
#step 3. prepare for column sum
#(i)add group name and index to rowsum data
## (i-1) prepare BR_name and index to add to rowsum data b/c character vector with group name was excluded in aggregation
BR_ind=data.frame(unique(cbind(IOT_36_BR$BR_ind,IOT_36_BR$BR_name)))
colnames(BR_ind)=c("BR_ind","BR_name")
##(i-2) add BR_name and change rowname
IOT_BR_row=merge(IOT_BR_row, BR_ind, by.x="Group.1", by.y="BR_ind", all=T)
rownames(IOT_BR_row)=IOT_BR_row$BR_name
##(i-3) getting rid of aggregate group indicator (1st variable) and Group_name variable, so thea we can apply aggregate function
IOT_BR_row=IOT_BR_row[,-1*c(1, dim(IOT_BR_row)[2])]
##(i-4)Transpose
T_IOT_BR_row=data.frame(t(IOT_BR_row))
#step 4. colsum by aggregate function
#(i) prepare to merge with mapping index
##(i-1) prepare merging varible to merge with sector_group mapping
T_IOT_BR_row$sector=rownames(T_IOT_BR_row)
##(i-2) prepare index varible to sort after merging
T_IOT_BR_row$index=(1:dim(T_IOT_BR_row)[1])
##(i-3) merge with sector_BR mapping
T_IOT_BR_row=merge(T_IOT_BR_row,sec_BR, by.x="sector", by.y="sector_name", all=T,sort=F)
##(i-4) sort merged data to 'before-merge' order
T_IOT_BR_row=T_IOT_BR_row[order(T_IOT_BR_row$index),]
##(i-5) give rownames
rownames(T_IOT_BR_row)=T_IOT_BR_row$sector
#(ii) prepare variable for colum sum aggregation: prepare BR name and BR index to use in aggregate funciton
## Group name =Group name + Final demand elements
T_IOT_BR_row$BR_name[is.na(T_IOT_BR_row$BR_name)]=T_IOT_BR_row$sector[is.na(T_IOT_BR_row$BR_name)]
BRimax2=max(T_IOT_BR_row$BR_ind,na.rm=T)
BRiblank2=length(T_IOT_BR_row$BR_ind[is.na(T_IOT_BR_row$BR_ind)])
T_IOT_BR_row$BR_ind[is.na(T_IOT_BR_row$BR_ind)]=((BRimax2+1):(BRimax2+BRiblank2))
#ngroup=8
#(iii) column sum by aggregate function
T_IOT_BR=aggregate(T_IOT_BR_row[,2:(nBR+nva+1)],list(T_IOT_BR_row$BR_ind),FUN=sum)
#step 5. management after aggregation
#(i) add row names to aggregated data
##(i-1) prepare row names =BR name + final demand elements
BR_ind_name=data.frame(unique(cbind(T_IOT_BR_row$BR_ind,T_IOT_BR_row$BR_name)))
colnames(BR_ind_name)=c("BR_ind","BR_name")
##(i-2) merge row name data
T_IOT_BR=merge(T_IOT_BR, BR_ind_name, by.x="Group.1", by.y="BR_ind", all=T)
##(i-3) change row name
rownames(T_IOT_BR)=T_IOT_BR$BR_name
#(ii) Take transpose again to obtain BR IO data
IOT_BR=data.frame(t(T_IOT_BR[,-1*c(1,dim(T_IOT_BR)[2])]))
#checking balance
# total input + Resout = Total Demand
check5=as.numeric(IOT_BR["Tinput",(1:nBR)])+IOT_BR$Resout[(1:nBR)]+IOT_BR$Imp[(1:nBR)]-IOT_BR$Dtotal[(1:nBR)]
check6=IOT_BR$Qtotal[(1:nBR)]+IOT_BR$Qself[(1:nBR)]+IOT_BR$Resout[(1:nBR)]+IOT_BR$Imp[(1:nBR)]-IOT_BR$Dtotal[(1:nBR)]
check5
check6
write.csv(IOT_BR, file="IO_B.csv") |
<html>
<head>
<meta name="TextLength" content="SENT_NUM:5, WORD_NUM:97">
</head>
<body bgcolor="white">
<a href="#0" id="0">These children will soon be adults."</a>
<a href="#1" id="1">They will live this case until the day they die."</a>
<a href="#2" id="2">Many of the children who attended McMartin are now entering their early teens, but the youngest of those who attended the school then is now 8 years old.</a>
<a href="#3" id="3">The indictment was superseded by a criminal complaint of 208 counts of molestation and conspiracy.</a>
<a href="#4" id="4">Though the historic McMartin Pre-School criminal trial finally has ended in acquittals, a host of civil lawsuits generated by the Manhattan Beach molestation case conceivably could keep the issue alive in the courts for decades, legal experts say.</a>
</body>
</html> | /DUC-Dataset/Summary_p100_R/D080.LA012090-0110.html.R | no_license | Angela7126/SLNSumEval | R | false | false | 827 | r | <html>
<head>
<meta name="TextLength" content="SENT_NUM:5, WORD_NUM:97">
</head>
<body bgcolor="white">
<a href="#0" id="0">These children will soon be adults."</a>
<a href="#1" id="1">They will live this case until the day they die."</a>
<a href="#2" id="2">Many of the children who attended McMartin are now entering their early teens, but the youngest of those who attended the school then is now 8 years old.</a>
<a href="#3" id="3">The indictment was superseded by a criminal complaint of 208 counts of molestation and conspiracy.</a>
<a href="#4" id="4">Though the historic McMartin Pre-School criminal trial finally has ended in acquittals, a host of civil lawsuits generated by the Manhattan Beach molestation case conceivably could keep the issue alive in the courts for decades, legal experts say.</a>
</body>
</html> |
#' Plot anisotropy
#'
#' @param object An object from [sdmTMB()].
#'
#' @export
#' @rdname plot_anisotropy
#' @examples
#' \donttest{
#' d <- pcod
#' m <- sdmTMB(data = d,
#' formula = density ~ 0 + as.factor(year),
#' time = "year", spde = make_mesh(d, c("X", "Y"), n_knots = 80, type = "kmeans"),
#' family = tweedie(link = "log"), anisotropy = TRUE,
#' include_spatial = FALSE)
#' plot_anisotropy(m)
#' }
plot_anisotropy <- function(object) {
stopifnot(identical(class(object), "sdmTMB"))
report <- object$tmb_obj$report()
eig <- eigen(report$H)
dat <- data.frame(
x0 = c(0, 0),
y0 = c(0, 0),
x1 = eig$vectors[1, , drop = TRUE] * eig$values,
y1 = eig$vectors[2, , drop = TRUE] * eig$values
)
plot(0, xlim = range(c(dat$x0, dat$x1)),
ylim = range(c(dat$y0, dat$y1)), type = "n", asp = 1, xlab = "", ylab = "")
graphics::arrows(dat$x0, dat$y0, dat$x1, dat$y1)
invisible(list(eig = eig, dat = dat, H = report$H))
}
| /R/plot.R | no_license | Kotkot/sdmTMB | R | false | false | 960 | r | #' Plot anisotropy
#'
#' @param object An object from [sdmTMB()].
#'
#' @export
#' @rdname plot_anisotropy
#' @examples
#' \donttest{
#' d <- pcod
#' m <- sdmTMB(data = d,
#' formula = density ~ 0 + as.factor(year),
#' time = "year", spde = make_mesh(d, c("X", "Y"), n_knots = 80, type = "kmeans"),
#' family = tweedie(link = "log"), anisotropy = TRUE,
#' include_spatial = FALSE)
#' plot_anisotropy(m)
#' }
plot_anisotropy <- function(object) {
stopifnot(identical(class(object), "sdmTMB"))
report <- object$tmb_obj$report()
eig <- eigen(report$H)
dat <- data.frame(
x0 = c(0, 0),
y0 = c(0, 0),
x1 = eig$vectors[1, , drop = TRUE] * eig$values,
y1 = eig$vectors[2, , drop = TRUE] * eig$values
)
plot(0, xlim = range(c(dat$x0, dat$x1)),
ylim = range(c(dat$y0, dat$y1)), type = "n", asp = 1, xlab = "", ylab = "")
graphics::arrows(dat$x0, dat$y0, dat$x1, dat$y1)
invisible(list(eig = eig, dat = dat, H = report$H))
}
|
reactivation_tranches <- function(connection, month.min = 3, active.var = c('confirmed', 'manually_created', 'shared'),
lag.var = c('confirmed', 'rejected', 'manually_created', 'auto_created', 'shared'), lags = 3, tranche.min = 550){
#Connect to Local PSQL Database and Pull Data
print(noquote("Connecting to local PostgreSQL Database"))
connection <- db_connect()
data <- dbGetQuery(connection, "SELECT * FROM datasci_modeling.vw_event_counts_user_month")
if(is.null(lag.var)){stop(noquote("No lag variables provided"))}
if(!lags %in% c(1:4)){stop(noquote("Number of lags must be between 1 and 4"))}
umon <- data
user_ids <- data[, c('external_account_id', 'user_id')]
umon$user_id <- NULL
print(noquote("Processing user event count data"))
# subsetting only users with minimum number of months
tmp <- aggregate(umon$month, by = list(umon$external_account_id), 'max')
colnames(tmp) <- c('external_account_id', 'month')
tmp <- tmp[tmp$month >= month.min,]
umon <- umon[umon$external_account_id %in% tmp[, c('external_account_id')],]
# empty data frame of rows = n users, columns = n months
mths <- setNames(data.frame(matrix(ncol = length(unique(umon$month)), nrow = nrow(umon))),
paste('mth_', unique(umon$month), sep=""))
# merge with user month data
umon <- cbind(umon, mths)
cols <- colnames(umon)
# populate new columns with binary based on the row's month value
umon <- data.frame(t(apply(umon, 1, function(x){
mth <- as.numeric(x['month'])
x <- t(x)
i <- grep('mth_', colnames(x))
x[i] <- ifelse(gsub("mth_", "", colnames(x))==as.numeric(mth), 1, 0)[i]
return(x)
})))
colnames(umon) <- cols
i <- sapply(umon,is.factor)
umon[i] <- lapply(umon[i], as.character)
i <- which(!colnames(umon) %in% c('start_date', 'type'))
umon[,i] <- lapply(umon[,i], as.numeric)
# creating binary active variable based on active.var argument
print(noquote(paste("Creating binary active variable based on ", paste(active.var, sep = ' ', collapse = ', '), sep = '')))
umon$active <- ifelse(eval(parse(text = paste('umon$', active.var, ' == 0', sep = '', collapse = ' & '))), 0, 1)
# removing and storing current (incomplete) month
current_month <- aggregate(umon$month, list(umon$external_account_id), FUN = 'max')
colnames(current_month) <- c('external_account_id', 'month')
current_month <- merge(current_month, umon, by = c('external_account_id', 'month'))
umon <- anti_join(umon, current_month, by = c('external_account_id', 'month'))
# creating dataset with lags
print(noquote("Creating lag variables"))
# y side
ylag <- umon[umon$month!=0, which(!colnames(umon) %in% lag.var)]
# x side
xlag <- umon[, c('external_account_id', 'month', lag.var)]
xlag$month <- xlag$month + 1
colnames(xlag)[which(colnames(xlag) %in% lag.var)] <- paste(colnames(xlag)[which(colnames(xlag) %in% lag.var)], '_1', sep='')
# merge
umon.lag <- merge(xlag, ylag, by = c('external_account_id', 'month'))
if(lags > 1){
# second lag
xlag <- umon[, c('external_account_id', 'month', lag.var)]
xlag$month <- xlag$month + 2
colnames(xlag)[which(colnames(xlag) %in% lag.var)] <- paste(colnames(xlag)[which(colnames(xlag) %in% lag.var)], '_2', sep='')
# merge
umon.lag <- merge(xlag, umon.lag, by = c('external_account_id', 'month'))}
if(lags > 2){
# third lag
xlag <- umon[, c('external_account_id', 'month', lag.var)]
xlag$month <- xlag$month + 3
colnames(xlag)[which(colnames(xlag) %in% lag.var)] <- paste(colnames(xlag)[which(colnames(xlag) %in% lag.var)], '_3', sep='')
# merge
umon.lag <- merge(xlag, umon.lag, by = c('external_account_id', 'month'))}
if(lags > 3){
# fourth lag
xlag <- umon[, c('external_account_id', 'month', lag.var)]
xlag$month <- xlag$month + 4
colnames(xlag)[which(colnames(xlag) %in% lag.var)] <- paste(colnames(xlag)[which(colnames(xlag) %in% lag.var)], '_4', sep='')
# merge
umon.lag <- merge(xlag, umon.lag, by = c('external_account_id', 'month'))}
# reconcile signup_method
umon.lag <- umon.lag[umon.lag$signup_method %in% c(2,4,5,6),]
umon.lag$signup_method[umon.lag$signup_method == 2] <- 6
umon.lag$signup_chrome_ext <- ifelse(umon.lag$signup_method == 4, 1, 0)
umon.lag$signup_mobile <- ifelse(umon.lag$signup_method == 5, 1, 0)
umon.lag$signup_desktop <- ifelse(umon.lag$signup_method == 6, 1, 0)
umon.lag$signup_method <- as.factor(umon.lag$signup_method)
# removing and storing most recent completed month
last_month <- aggregate(umon.lag$month, list(umon.lag$external_account_id), FUN = 'max')
colnames(last_month) <- c('external_account_id', 'month')
last_month <- merge(last_month, umon.lag, by = c('external_account_id', 'month'))
umon.lag <- anti_join(umon.lag, last_month, by = c('external_account_id', 'month'))
## ggplot output
#mean.data <- aggregate(umon.lag$active.y, by=list(umon.lag$month, umon.lag$signup_method, umon.lag$subscribed), mean)
#colnames(mean.data) <- c('month', 'signup_method', 'subscribed', 'prob_active')
#mean.data$signup_method <- as.character(mean.data$signup_method)
#mean.data <- mean.data[mean.data$signup_method %in% c(4,5,6),]
#mean.data$signup_method[mean.data$signup_method == 4 & mean.data$subscribed == 0] <- 'not subscribed: chrome_ext'
#mean.data$signup_method[mean.data$signup_method == 5 & mean.data$subscribed == 0] <- 'not subscribed: mobile'
#mean.data$signup_method[mean.data$signup_method == 6 & mean.data$subscribed == 0] <- 'not subscribed: desktop'
#mean.data$signup_method[mean.data$signup_method == 4 & mean.data$subscribed == 1] <- 'subscribed: chrome_ext'
#mean.data$signup_method[mean.data$signup_method == 5 & mean.data$subscribed == 1] <- 'subscribed: mobile'
#mean.data$signup_method[mean.data$signup_method == 6 & mean.data$subscribed == 1] <- 'subscribed: desktop'
#mean.data$signup_method <- as.factor(mean.data$signup_method)
#ggplot(mean.data[mean.data$month < 6,], aes(x=month, y=prob_active, colour=signup_method)) + geom_point() + geom_line()
#mean.data <- aggregate(umon$active, by=list(umon$month, umon$signup_method), mean)
#colnames(mean.data) <- c('month', 'signup_method', 'prob_active')
#mean.data$signup_method <- as.character(mean.data$signup_method)
#mean.data <- mean.data[mean.data$signup_method %in% c(4,5,6),]
#mean.data$signup_method[mean.data$signup_method == 4] <- 'chrome_ext'
#mean.data$signup_method[mean.data$signup_method == 5] <- 'mobile'
#mean.data$signup_method[mean.data$signup_method == 6] <- 'desktop'
#mean.data$signup_method <- as.factor(mean.data$signup_method)
#ggplot(mean.data[mean.data$signup_method!='mobile',], aes(x=month, y=prob_active, colour=signup_method)) + geom_point() + geom_line() + ylim(0, 1)
#mean.data <- aggregate(umon$active, by=list(umon$month, umon$subscribed), mean)
#colnames(mean.data) <- c('month', 'subscribed', 'prob_active')
#mean.data$subscribed[mean.data$subscribed == 1] <- 'subscribed'
#mean.data$subscribed[mean.data$subscribed == 0] <- 'not subscribed'
#mean.data$subscribed <- as.factor(mean.data$subscribed)
#ggplot(mean.data, aes(x=month, y=prob_active, colour=subscribed)) + geom_point() + geom_line() + ylim(0, 1) + scale_color_brewer(palette="Paired")
# compile formula and run model
print(noquote("Calculating logit model"))
no.rhs <- c('external_account_id', 'month', 'active', 'start_date', 'signup_method', 'signup_chrome_ext')
formula <-
as.formula(paste('active ~', paste(
colnames(umon.lag[which(!colnames(umon.lag)
%in% no.rhs)])[apply(umon.lag[which(!colnames(umon.lag) %in% no.rhs)], 2,
function(x) {length(unique(x)) >1})], collapse = ' + '), '-1'))
# full model with lag data
logit_user_event_count <- suppressWarnings((glm(formula, family = binomial(link = 'logit'), data = umon.lag)))
print(noquote("Evaluating model strength"))
diff <- cbind(umon.lag, fitted = logit_user_event_count$fitted.values)
diff <- cbind(diff, residuals = logit_user_event_count$residuals)
tmp <- aggregate(diff$month, by = list(diff$external_account_id), 'max')
colnames(tmp) <- c('external_account_id', 'month')
diff <- merge(diff, tmp, by = c('external_account_id', 'month'))
#two_class_stats(diff, 'fitted', 'active', .5)
tmp <- thresh_iter(0, 1, 0.01, diff, 'fitted', 'active')
best.thresh <- tmp$threshold[which.max(tmp$inverse_distance)]
print(noquote(paste('Model AUC: ', round(unique(tmp$auc), digits = 4), sep = '')))
# using model estimates to create fitted values for last_month
print(noquote("Generating predicted probabilities for most recent completed month"))
estimates <- as.data.frame(t(logit_user_event_count$coefficients[!is.na(logit_user_event_count$coefficients)]))
last_month$fitted <- suppressWarnings(predict.glm(logit_user_event_count, last_month, type = 'response'))
# separating into tranches
print(noquote("Separating recently inactive users into tranches"))
intv <- last_month[last_month$active == 0 & last_month$subscribed == 0 & last_month$fitted >= best.thresh,]
# removing users active in the current month
intv <- intv[!intv$external_account_id %in% current_month$external_account_id[current_month$active == 1],]
t1 <- sort(intv$fitted, decreasing = T)[tranche.min]
intv$tranche[intv$fitted >= t1] <- 1
t2 <- t1 - (((1-t1)/(1-best.thresh)) * 0.25)
while (nrow(intv[intv$fitted < t1 & intv$fitted >= t2,]) <= tranche.min){
t2 <- t2 - 0.01}
intv$tranche[intv$fitted < t1 & intv$fitted >= t2] <- 2
t3 <- t2 - (((1-t1)/(1-best.thresh)) * 0.25)
while (nrow(intv[intv$fitted < t2 & intv$fitted >= t3,]) <= tranche.min){
t3 <- t3 - 0.01}
intv$tranche[intv$fitted < t2 & intv$fitted >= t3] <- 3
intv$tranche[intv$fitted < t3] <- 4
print(noquote('Tranche n-size:'))
print(table(intv$tranche))
print(noquote('Assigning treatment'))
intv$treatment <- sample(0:1, nrow(intv), replace = T)
intv <- intv[, c('external_account_id', 'fitted', 'tranche', 'treatment')]
intv <- merge(intv, user_ids, by = 'external_account_id', all.x = T)
print(noquote('Storing model and threshold iterations'))
model_storage(model = logit_user_event_count, location_folder = "reactivation", model_script = "Reactivation_lag_logit.R",
strip_model_fn = strip_model, model_grain = "user", model_response = "Binary", model_outcome = "binary", model_type = "Logistic Regression",
model_test = diff, model_test_cols = c('fitted', 'active'))
# writing tranche assignment data frame to datasci_projects.tranche_assignments - **NEED TO OVERWRITE TABLE TO INCLUDE USER_ID COLUMN**
dbWriteTable(connection, c('datasci_projects','tranche_assignments'), value = as.data.frame(intv), overwrite = FALSE, append = TRUE, row.names = FALSE)
}
| /datascience-master/functions/reactivation_tranches.R | no_license | goughgorski/clusteryourself | R | false | false | 10,472 | r | reactivation_tranches <- function(connection, month.min = 3, active.var = c('confirmed', 'manually_created', 'shared'),
lag.var = c('confirmed', 'rejected', 'manually_created', 'auto_created', 'shared'), lags = 3, tranche.min = 550){
#Connect to Local PSQL Database and Pull Data
print(noquote("Connecting to local PostgreSQL Database"))
connection <- db_connect()
data <- dbGetQuery(connection, "SELECT * FROM datasci_modeling.vw_event_counts_user_month")
if(is.null(lag.var)){stop(noquote("No lag variables provided"))}
if(!lags %in% c(1:4)){stop(noquote("Number of lags must be between 1 and 4"))}
umon <- data
user_ids <- data[, c('external_account_id', 'user_id')]
umon$user_id <- NULL
print(noquote("Processing user event count data"))
# subsetting only users with minimum number of months
tmp <- aggregate(umon$month, by = list(umon$external_account_id), 'max')
colnames(tmp) <- c('external_account_id', 'month')
tmp <- tmp[tmp$month >= month.min,]
umon <- umon[umon$external_account_id %in% tmp[, c('external_account_id')],]
# empty data frame of rows = n users, columns = n months
mths <- setNames(data.frame(matrix(ncol = length(unique(umon$month)), nrow = nrow(umon))),
paste('mth_', unique(umon$month), sep=""))
# merge with user month data
umon <- cbind(umon, mths)
cols <- colnames(umon)
# populate new columns with binary based on the row's month value
umon <- data.frame(t(apply(umon, 1, function(x){
mth <- as.numeric(x['month'])
x <- t(x)
i <- grep('mth_', colnames(x))
x[i] <- ifelse(gsub("mth_", "", colnames(x))==as.numeric(mth), 1, 0)[i]
return(x)
})))
colnames(umon) <- cols
i <- sapply(umon,is.factor)
umon[i] <- lapply(umon[i], as.character)
i <- which(!colnames(umon) %in% c('start_date', 'type'))
umon[,i] <- lapply(umon[,i], as.numeric)
# creating binary active variable based on active.var argument
print(noquote(paste("Creating binary active variable based on ", paste(active.var, sep = ' ', collapse = ', '), sep = '')))
umon$active <- ifelse(eval(parse(text = paste('umon$', active.var, ' == 0', sep = '', collapse = ' & '))), 0, 1)
# removing and storing current (incomplete) month
current_month <- aggregate(umon$month, list(umon$external_account_id), FUN = 'max')
colnames(current_month) <- c('external_account_id', 'month')
current_month <- merge(current_month, umon, by = c('external_account_id', 'month'))
umon <- anti_join(umon, current_month, by = c('external_account_id', 'month'))
# creating dataset with lags
print(noquote("Creating lag variables"))
# y side
ylag <- umon[umon$month!=0, which(!colnames(umon) %in% lag.var)]
# x side
xlag <- umon[, c('external_account_id', 'month', lag.var)]
xlag$month <- xlag$month + 1
colnames(xlag)[which(colnames(xlag) %in% lag.var)] <- paste(colnames(xlag)[which(colnames(xlag) %in% lag.var)], '_1', sep='')
# merge
umon.lag <- merge(xlag, ylag, by = c('external_account_id', 'month'))
if(lags > 1){
# second lag
xlag <- umon[, c('external_account_id', 'month', lag.var)]
xlag$month <- xlag$month + 2
colnames(xlag)[which(colnames(xlag) %in% lag.var)] <- paste(colnames(xlag)[which(colnames(xlag) %in% lag.var)], '_2', sep='')
# merge
umon.lag <- merge(xlag, umon.lag, by = c('external_account_id', 'month'))}
if(lags > 2){
# third lag
xlag <- umon[, c('external_account_id', 'month', lag.var)]
xlag$month <- xlag$month + 3
colnames(xlag)[which(colnames(xlag) %in% lag.var)] <- paste(colnames(xlag)[which(colnames(xlag) %in% lag.var)], '_3', sep='')
# merge
umon.lag <- merge(xlag, umon.lag, by = c('external_account_id', 'month'))}
if(lags > 3){
# fourth lag
xlag <- umon[, c('external_account_id', 'month', lag.var)]
xlag$month <- xlag$month + 4
colnames(xlag)[which(colnames(xlag) %in% lag.var)] <- paste(colnames(xlag)[which(colnames(xlag) %in% lag.var)], '_4', sep='')
# merge
umon.lag <- merge(xlag, umon.lag, by = c('external_account_id', 'month'))}
# reconcile signup_method
umon.lag <- umon.lag[umon.lag$signup_method %in% c(2,4,5,6),]
umon.lag$signup_method[umon.lag$signup_method == 2] <- 6
umon.lag$signup_chrome_ext <- ifelse(umon.lag$signup_method == 4, 1, 0)
umon.lag$signup_mobile <- ifelse(umon.lag$signup_method == 5, 1, 0)
umon.lag$signup_desktop <- ifelse(umon.lag$signup_method == 6, 1, 0)
umon.lag$signup_method <- as.factor(umon.lag$signup_method)
# removing and storing most recent completed month
last_month <- aggregate(umon.lag$month, list(umon.lag$external_account_id), FUN = 'max')
colnames(last_month) <- c('external_account_id', 'month')
last_month <- merge(last_month, umon.lag, by = c('external_account_id', 'month'))
umon.lag <- anti_join(umon.lag, last_month, by = c('external_account_id', 'month'))
## ggplot output
#mean.data <- aggregate(umon.lag$active.y, by=list(umon.lag$month, umon.lag$signup_method, umon.lag$subscribed), mean)
#colnames(mean.data) <- c('month', 'signup_method', 'subscribed', 'prob_active')
#mean.data$signup_method <- as.character(mean.data$signup_method)
#mean.data <- mean.data[mean.data$signup_method %in% c(4,5,6),]
#mean.data$signup_method[mean.data$signup_method == 4 & mean.data$subscribed == 0] <- 'not subscribed: chrome_ext'
#mean.data$signup_method[mean.data$signup_method == 5 & mean.data$subscribed == 0] <- 'not subscribed: mobile'
#mean.data$signup_method[mean.data$signup_method == 6 & mean.data$subscribed == 0] <- 'not subscribed: desktop'
#mean.data$signup_method[mean.data$signup_method == 4 & mean.data$subscribed == 1] <- 'subscribed: chrome_ext'
#mean.data$signup_method[mean.data$signup_method == 5 & mean.data$subscribed == 1] <- 'subscribed: mobile'
#mean.data$signup_method[mean.data$signup_method == 6 & mean.data$subscribed == 1] <- 'subscribed: desktop'
#mean.data$signup_method <- as.factor(mean.data$signup_method)
#ggplot(mean.data[mean.data$month < 6,], aes(x=month, y=prob_active, colour=signup_method)) + geom_point() + geom_line()
#mean.data <- aggregate(umon$active, by=list(umon$month, umon$signup_method), mean)
#colnames(mean.data) <- c('month', 'signup_method', 'prob_active')
#mean.data$signup_method <- as.character(mean.data$signup_method)
#mean.data <- mean.data[mean.data$signup_method %in% c(4,5,6),]
#mean.data$signup_method[mean.data$signup_method == 4] <- 'chrome_ext'
#mean.data$signup_method[mean.data$signup_method == 5] <- 'mobile'
#mean.data$signup_method[mean.data$signup_method == 6] <- 'desktop'
#mean.data$signup_method <- as.factor(mean.data$signup_method)
#ggplot(mean.data[mean.data$signup_method!='mobile',], aes(x=month, y=prob_active, colour=signup_method)) + geom_point() + geom_line() + ylim(0, 1)
#mean.data <- aggregate(umon$active, by=list(umon$month, umon$subscribed), mean)
#colnames(mean.data) <- c('month', 'subscribed', 'prob_active')
#mean.data$subscribed[mean.data$subscribed == 1] <- 'subscribed'
#mean.data$subscribed[mean.data$subscribed == 0] <- 'not subscribed'
#mean.data$subscribed <- as.factor(mean.data$subscribed)
#ggplot(mean.data, aes(x=month, y=prob_active, colour=subscribed)) + geom_point() + geom_line() + ylim(0, 1) + scale_color_brewer(palette="Paired")
# compile formula and run model
print(noquote("Calculating logit model"))
no.rhs <- c('external_account_id', 'month', 'active', 'start_date', 'signup_method', 'signup_chrome_ext')
formula <-
as.formula(paste('active ~', paste(
colnames(umon.lag[which(!colnames(umon.lag)
%in% no.rhs)])[apply(umon.lag[which(!colnames(umon.lag) %in% no.rhs)], 2,
function(x) {length(unique(x)) >1})], collapse = ' + '), '-1'))
# full model with lag data
logit_user_event_count <- suppressWarnings((glm(formula, family = binomial(link = 'logit'), data = umon.lag)))
print(noquote("Evaluating model strength"))
diff <- cbind(umon.lag, fitted = logit_user_event_count$fitted.values)
diff <- cbind(diff, residuals = logit_user_event_count$residuals)
tmp <- aggregate(diff$month, by = list(diff$external_account_id), 'max')
colnames(tmp) <- c('external_account_id', 'month')
diff <- merge(diff, tmp, by = c('external_account_id', 'month'))
#two_class_stats(diff, 'fitted', 'active', .5)
tmp <- thresh_iter(0, 1, 0.01, diff, 'fitted', 'active')
best.thresh <- tmp$threshold[which.max(tmp$inverse_distance)]
print(noquote(paste('Model AUC: ', round(unique(tmp$auc), digits = 4), sep = '')))
# using model estimates to create fitted values for last_month
print(noquote("Generating predicted probabilities for most recent completed month"))
estimates <- as.data.frame(t(logit_user_event_count$coefficients[!is.na(logit_user_event_count$coefficients)]))
last_month$fitted <- suppressWarnings(predict.glm(logit_user_event_count, last_month, type = 'response'))
# separating into tranches
print(noquote("Separating recently inactive users into tranches"))
intv <- last_month[last_month$active == 0 & last_month$subscribed == 0 & last_month$fitted >= best.thresh,]
# removing users active in the current month
intv <- intv[!intv$external_account_id %in% current_month$external_account_id[current_month$active == 1],]
t1 <- sort(intv$fitted, decreasing = T)[tranche.min]
intv$tranche[intv$fitted >= t1] <- 1
t2 <- t1 - (((1-t1)/(1-best.thresh)) * 0.25)
while (nrow(intv[intv$fitted < t1 & intv$fitted >= t2,]) <= tranche.min){
t2 <- t2 - 0.01}
intv$tranche[intv$fitted < t1 & intv$fitted >= t2] <- 2
t3 <- t2 - (((1-t1)/(1-best.thresh)) * 0.25)
while (nrow(intv[intv$fitted < t2 & intv$fitted >= t3,]) <= tranche.min){
t3 <- t3 - 0.01}
intv$tranche[intv$fitted < t2 & intv$fitted >= t3] <- 3
intv$tranche[intv$fitted < t3] <- 4
print(noquote('Tranche n-size:'))
print(table(intv$tranche))
print(noquote('Assigning treatment'))
intv$treatment <- sample(0:1, nrow(intv), replace = T)
intv <- intv[, c('external_account_id', 'fitted', 'tranche', 'treatment')]
intv <- merge(intv, user_ids, by = 'external_account_id', all.x = T)
print(noquote('Storing model and threshold iterations'))
model_storage(model = logit_user_event_count, location_folder = "reactivation", model_script = "Reactivation_lag_logit.R",
strip_model_fn = strip_model, model_grain = "user", model_response = "Binary", model_outcome = "binary", model_type = "Logistic Regression",
model_test = diff, model_test_cols = c('fitted', 'active'))
# writing tranche assignment data frame to datasci_projects.tranche_assignments - **NEED TO OVERWRITE TABLE TO INCLUDE USER_ID COLUMN**
dbWriteTable(connection, c('datasci_projects','tranche_assignments'), value = as.data.frame(intv), overwrite = FALSE, append = TRUE, row.names = FALSE)
}
|
\name{Biodetection}
\alias{Biodetection-class}
\alias{Biodetection}
\alias{show,Biodetection-method}
\alias{explo.plot,Biodetection-method}
\alias{dat2save,Biodetection-method}
\docType{class}
\title{Biodetection class}
\description{
Biodetection class generated from dat() function with type="biodetection". This object contains the percentage of each biological
class (e.g. biotype) in the genome (i.e. in the whole set of features provided), the corresponding percentage detected by the sample
and the percentage of the biotype within the sample.
}
\usage{
\S4method{explo.plot}{Biodetection}(object, samples = c(1, 2), plottype = c("persample", "comparison"), toplot = "protein_coding", ...)
\S4method{dat2save}{Biodetection}(object)
}
\arguments{
\item{object}{
Object generated from \code{dat()} function.
}
\item{samples}{
Samples or conditions to be plotted. If NULL, the two first samples are plotted because the plot for this object only admit a maximum of two samples.
}
\item{plottype}{
If plottype="persample", each sample is plotted in a separate plot displaying abundance of byotype in genome, percentage of biotype detected by sample and abundance of biotype in sample.
If plottype="comparison", two samples can be compared in the same plot. Two plots are generated, one for the percentage of biotype detected by each of the compared samples, and the other
for the abundance of the biotypes within the compared samples.
}
\item{toplot}{
If plottype="comparison" and a biotype is specified in this argument (by default toplot="protein_coding"), a proportion test is performed to test if the abundance of that biotype
is significantly different for the two samples being compared.
}
\item{...}{
Any argument from \code{par}.
}
}
\section{Slots/List Components}{
An object of this class contains an element (dat) which is a list with the following components:
\code{genome}: Vector containing the percentage of features per biotype in the genome.
\code{biotables}: List with as many elements as samples or conditions. Each element of the list contains the percentage of features in the genome per biotype detected in that sample or condition features per biotype and the percentage of detected features in the sample or condition per biotype.
}
\section{Methods}{
This class has an specific \code{show} method in order to work and print
a summary of the elements which are contained and a \code{dat2save} method
to save the relevant information in an object cleanly. It also has an
\code{explo.plot} method to plot the data contained in the object.
}
\author{Sonia Tarazona}
\keyword{classes}
| /man/Biodetection.Rd | no_license | SoniaTC/NOISeq | R | false | false | 2,679 | rd | \name{Biodetection}
\alias{Biodetection-class}
\alias{Biodetection}
\alias{show,Biodetection-method}
\alias{explo.plot,Biodetection-method}
\alias{dat2save,Biodetection-method}
\docType{class}
\title{Biodetection class}
\description{
Biodetection class generated from dat() function with type="biodetection". This object contains the percentage of each biological
class (e.g. biotype) in the genome (i.e. in the whole set of features provided), the corresponding percentage detected by the sample
and the percentage of the biotype within the sample.
}
\usage{
\S4method{explo.plot}{Biodetection}(object, samples = c(1, 2), plottype = c("persample", "comparison"), toplot = "protein_coding", ...)
\S4method{dat2save}{Biodetection}(object)
}
\arguments{
\item{object}{
Object generated from \code{dat()} function.
}
\item{samples}{
Samples or conditions to be plotted. If NULL, the two first samples are plotted because the plot for this object only admit a maximum of two samples.
}
\item{plottype}{
If plottype="persample", each sample is plotted in a separate plot displaying abundance of byotype in genome, percentage of biotype detected by sample and abundance of biotype in sample.
If plottype="comparison", two samples can be compared in the same plot. Two plots are generated, one for the percentage of biotype detected by each of the compared samples, and the other
for the abundance of the biotypes within the compared samples.
}
\item{toplot}{
If plottype="comparison" and a biotype is specified in this argument (by default toplot="protein_coding"), a proportion test is performed to test if the abundance of that biotype
is significantly different for the two samples being compared.
}
\item{...}{
Any argument from \code{par}.
}
}
\section{Slots/List Components}{
An object of this class contains an element (dat) which is a list with the following components:
\code{genome}: Vector containing the percentage of features per biotype in the genome.
\code{biotables}: List with as many elements as samples or conditions. Each element of the list contains the percentage of features in the genome per biotype detected in that sample or condition features per biotype and the percentage of detected features in the sample or condition per biotype.
}
\section{Methods}{
This class has an specific \code{show} method in order to work and print
a summary of the elements which are contained and a \code{dat2save} method
to save the relevant information in an object cleanly. It also has an
\code{explo.plot} method to plot the data contained in the object.
}
\author{Sonia Tarazona}
\keyword{classes}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createFLT.R
\name{createFLT}
\alias{createFLT}
\title{Create cells' FLT}
\usage{
createFLT(cell_list, Ncols)
}
\arguments{
\item{cell_list}{A list containing all the cell instants of the movie.}
\item{Ncols}{Number of colonies in the movie, a non-zero positive integer value.}
}
\value{
A named list with the following components:
\item{LTmain}{The main part of the overall FLT,
a connected lineage tree containing the imaginary \emph{root} cells (object of class \code{"igraph"}).
\cr\cr
Attribute \code{"colony"} in the \code{LTmain} depicts the starting colony of each cell instant
(i.e. the colony of the corresponding colony's \emph{root} cell from which a cell instant emanated).
This approach is necessary in order to keep track of merging colonies.}
\item{branches}{A list with the motherless branches of the overall FLT.
Each branch (element of the list) is a connected lineage tree (object of class \code{"igraph"}).
Motherless branches arise from tracking errors, in case a cell instant (root of the branch) fails to be
connected to any cell instant of the previous frame,
or when a cell instant (root of the branch) just entered the field of view.}
}
\description{
Creates the cells' Forest of Lineage Trees (FLT) given a list containing all the cell instants of the movie.
}
\details{
Apart from the cell instants of the movie contained in the \code{cell_list},
FLT nodes include an imaginary main \emph{root} cell as well as imaginary \emph{root} cells for each colony.
Colonies' \emph{root} cells are daughters of the main \emph{root} cell.
Cell instants of the first frame of the movie are daughters of the corresponding colony's \emph{root} cell.
The imaginary \emph{root} cells are used to facilitate the tree representation of the movie and the colony tracking
and are automatically excluded from the analysis.
\cr\cr
Each node of the FLT has as attributes all numeric and boolean values existing as components
in the corresponding element of the \code{cell_list}.
The imaginary \emph{root} cells have value \code{-1} in all numeric attributes and value \code{FALSE} in all boolean attributes.
The following character string values also form attributes of each FLT node:
\itemize{
\item \code{"name"} is the label of the cell in the FLT, a non-zero positive integer number stored as a character string.
Value \code{"1"} corresponds to the main \emph{root} cell.
Values \code{"1+<i>"} correspond to the colonies' \emph{root} cells, where \code{"<i>"} is the colony ID.
The rest values correspond to the cell instants in the \code{cell_list} (1-1 correspondence).
\item \code{"cellName"}, as in the \code{cell_list}.
Value \code{"root"} is used for the main \emph{root} cell.
Values \code{"colony<i>"} are used for the colonies' \emph{root} cells, where \code{"<i>"} is the colony ID.
\item \code{"colId"}, as in the \code{cell_list}, but here stored as a character string.
Value \code{"-1"} is used for the imaginary \emph{root} cells.
\cr\cr
NOTE: This attribute is stored iff it exists as component in the elements of the \code{cell_list}.
}
}
\seealso{
\code{\link{save_tree}} for saving a tree on disc,
\code{\link{add_branch}} for connecting a motherless branch to a lineage tree.
}
| /man/createFLT.Rd | no_license | vicstefanou/ViSCA | R | false | true | 3,285 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createFLT.R
\name{createFLT}
\alias{createFLT}
\title{Create cells' FLT}
\usage{
createFLT(cell_list, Ncols)
}
\arguments{
\item{cell_list}{A list containing all the cell instants of the movie.}
\item{Ncols}{Number of colonies in the movie, a non-zero positive integer value.}
}
\value{
A named list with the following components:
\item{LTmain}{The main part of the overall FLT,
a connected lineage tree containing the imaginary \emph{root} cells (object of class \code{"igraph"}).
\cr\cr
Attribute \code{"colony"} in the \code{LTmain} depicts the starting colony of each cell instant
(i.e. the colony of the corresponding colony's \emph{root} cell from which a cell instant emanated).
This approach is necessary in order to keep track of merging colonies.}
\item{branches}{A list with the motherless branches of the overall FLT.
Each branch (element of the list) is a connected lineage tree (object of class \code{"igraph"}).
Motherless branches arise from tracking errors, in case a cell instant (root of the branch) fails to be
connected to any cell instant of the previous frame,
or when a cell instant (root of the branch) just entered the field of view.}
}
\description{
Creates the cells' Forest of Lineage Trees (FLT) given a list containing all the cell instants of the movie.
}
\details{
Apart from the cell instants of the movie contained in the \code{cell_list},
FLT nodes include an imaginary main \emph{root} cell as well as imaginary \emph{root} cells for each colony.
Colonies' \emph{root} cells are daughters of the main \emph{root} cell.
Cell instants of the first frame of the movie are daughters of the corresponding colony's \emph{root} cell.
The imaginary \emph{root} cells are used to facilitate the tree representation of the movie and the colony tracking
and are automatically excluded from the analysis.
\cr\cr
Each node of the FLT has as attributes all numeric and boolean values existing as components
in the corresponding element of the \code{cell_list}.
The imaginary \emph{root} cells have value \code{-1} in all numeric attributes and value \code{FALSE} in all boolean attributes.
The following character string values also form attributes of each FLT node:
\itemize{
\item \code{"name"} is the label of the cell in the FLT, a non-zero positive integer number stored as a character string.
Value \code{"1"} corresponds to the main \emph{root} cell.
Values \code{"1+<i>"} correspond to the colonies' \emph{root} cells, where \code{"<i>"} is the colony ID.
The rest values correspond to the cell instants in the \code{cell_list} (1-1 correspondence).
\item \code{"cellName"}, as in the \code{cell_list}.
Value \code{"root"} is used for the main \emph{root} cell.
Values \code{"colony<i>"} are used for the colonies' \emph{root} cells, where \code{"<i>"} is the colony ID.
\item \code{"colId"}, as in the \code{cell_list}, but here stored as a character string.
Value \code{"-1"} is used for the imaginary \emph{root} cells.
\cr\cr
NOTE: This attribute is stored iff it exists as component in the elements of the \code{cell_list}.
}
}
\seealso{
\code{\link{save_tree}} for saving a tree on disc,
\code{\link{add_branch}} for connecting a motherless branch to a lineage tree.
}
|
#NOTE: this code requires that the two scripts be saved in the same directory.
iterations=100
xt=matrix(NA, nrow=iterations, ncol=3)
mt=matrix(NA,nrow=iterations, ncol=3)
for(j in 1:iterations) {
source("stochastic_freq-depend.r")
xt[j,1] = max(which(N[,1] > 0))
xt[j,2] = max(which(N[,2] > 0))
xt[j,3] = max(which(N[,3] > 0))
mt[j,1] = mean(N[,1])
mt[j,2] = mean(N[,2])
mt[j,3] = mean(N[,3])
}
#mean and standard deviation of time to extinction for each population
#(filter out all time-to-extinction values that equal the total run-time)
#pop1MTE = mean(xt[which(xt[,1] != TotTime),1])
#pop2MTE = mean(xt[which(xt[,2] != TotTime),2])
#pop3MTE = mean(xt[which(xt[,3] != TotTime),3])
#pop1SDTE = sd(xt[which(xt[,1] != TotTime),1])
#pop2SDTE = sd(xt[which(xt[,2] != TotTime),2])
#pop3SDTE = sd(xt[which(xt[,3] != TotTime),3])
#number of times each species survives to the end of the simulation
wins1 = length(which(xt[,1] == TotTime))
wins2 = length(which(xt[,2] == TotTime))
wins3 = length(which(xt[,3] == TotTime))
#expected=c(1/3,1/3,1/3)
#wins=c(wins1,wins2,wins3)
#mean and sd times to extinction for each population
#pop1MTE
#pop2MTE
#pop3MTE
#pop1SDTE
#pop2SDTE
#pop3SDTE
#mean and SD population abundance
#mean(mt[,1]) #mean pop 1
#mean(mt[,2]) #mean pop 1
#mean(mt[,3]) #mean pop 1
#sd(mt[,1]) #sd pop 1
#sd(mt[,2]) #sd pop 2
#sd(mt[,3]) #sd pop 3
#No. simulations:
#iterations
#number of times that Pop1 survives to the end of the simulation
#wins1
#number of times that Pop2 survives to the end of the simulation
#wins2
#number of times that Pop2 survives to the end of the simulation
#wins3
#Chi-Square test on survival numbers
#chisq.test(wins,p=expected) | /Community Original/iterations.r | no_license | gmyenni/RareStabilizationSimulation | R | false | false | 1,698 | r | #NOTE: this code requires that the two scripts be saved in the same directory.
iterations=100
xt=matrix(NA, nrow=iterations, ncol=3)
mt=matrix(NA,nrow=iterations, ncol=3)
for(j in 1:iterations) {
source("stochastic_freq-depend.r")
xt[j,1] = max(which(N[,1] > 0))
xt[j,2] = max(which(N[,2] > 0))
xt[j,3] = max(which(N[,3] > 0))
mt[j,1] = mean(N[,1])
mt[j,2] = mean(N[,2])
mt[j,3] = mean(N[,3])
}
#mean and standard deviation of time to extinction for each population
#(filter out all time-to-extinction values that equal the total run-time)
#pop1MTE = mean(xt[which(xt[,1] != TotTime),1])
#pop2MTE = mean(xt[which(xt[,2] != TotTime),2])
#pop3MTE = mean(xt[which(xt[,3] != TotTime),3])
#pop1SDTE = sd(xt[which(xt[,1] != TotTime),1])
#pop2SDTE = sd(xt[which(xt[,2] != TotTime),2])
#pop3SDTE = sd(xt[which(xt[,3] != TotTime),3])
#number of times each species survives to the end of the simulation
wins1 = length(which(xt[,1] == TotTime))
wins2 = length(which(xt[,2] == TotTime))
wins3 = length(which(xt[,3] == TotTime))
#expected=c(1/3,1/3,1/3)
#wins=c(wins1,wins2,wins3)
#mean and sd times to extinction for each population
#pop1MTE
#pop2MTE
#pop3MTE
#pop1SDTE
#pop2SDTE
#pop3SDTE
#mean and SD population abundance
#mean(mt[,1]) #mean pop 1
#mean(mt[,2]) #mean pop 1
#mean(mt[,3]) #mean pop 1
#sd(mt[,1]) #sd pop 1
#sd(mt[,2]) #sd pop 2
#sd(mt[,3]) #sd pop 3
#No. simulations:
#iterations
#number of times that Pop1 survives to the end of the simulation
#wins1
#number of times that Pop2 survives to the end of the simulation
#wins2
#number of times that Pop2 survives to the end of the simulation
#wins3
#Chi-Square test on survival numbers
#chisq.test(wins,p=expected) |
#EJERCICIOS DE ARRAYS Y FACTORES.
#----------------------------------------
#este vector con 3 dimensiones 2 filas 2 columnas y 2 capas de profundidad
a <- array(1:8, dim=c(2, 2, 2))
'FACTORES'
'------------------------------------------------------'
#Creación de factor sin orden
gender_vector <- c('M', 'F', 'F', 'M', 'M', 'F')
gender_factor <- factor(gender_vector)
gender_factor
#Creación de factor con orden (sin especificar qué orden) - alfabéticamente
size_vector <- c('S', 'L', 'M', 'L', 'S', 'M')
size_factor <- factor(size_vector, ordered = TRUE) # L < M < S ordena alfabeticamente
size_factor
#Creación de factor con orden (especificando el orden)
size_vector_2 <- c('S', 'L', 'M', 'L', 'S', 'M')
size_factor_2 <- factor(size_vector_2, ordered = TRUE, levels = c("S", "M", "L")) # S < M < L
size_factor_2
#Factores: Operaciones
'Comprobaciones en factores sin orden solo se puede usar =='
gender_factor[1] == gender_factor[2] #Devuelve FALSE porque no están en el mismo orden
gender_factor[1] == size_factor[2] # Da ERROR: solo se pueden comparar factores si son del mismo tipo.
#En factores con orden se puede usar >, < …
size_factor[1] > size_factor[2] # Da True.
#Obtener los niveles
levels(size_factor)
levels(size_factor)[1] #podemos aplicar slicing con los niveles.
#Comprobar la existencia de niveles
any(levels(size_factor) %in% c('L', 'S')) #comprobar si existe un factor
#Añadir nuevos niveles
levels(size_factor)[length(levels(size_factor)) + 1] <- 'XL'
levels(size_factor) <- c(levels(size_factor), 'XS')
# En ambos casos, añadimos un nivel nuevo al final del todo.
#Reordenar niveles- pero lo hará por orden alfabético.
size_factor <- factor(size_factor, ordered = TRUE, levels(size_factor[c(5, 3:1, 4)]))
size_factor
#EJERCICIOS DE FACTORES
#---------------------------------
'Crea dos factores con los siguientes vectores
1. Animales sin orden
2. Temperatura con orden'
animals_vector <- c("Elephant", "Giraffe", "Donkey", "Horse")
temperature_vector <- c("High", "Low", "High","Low", "Medium")
animals_factor <- factor(animals_vector)
temperature_factor <- factor(temperature_vector, ordered = TRUE, levels = c("Low", "Medium", "High"))
'Modifica los niveles del factor para que sean: "Female" y "Male"'
survey_vector <- c("M", "F", "F", "M", "M")
#Modo 1, modifico vector inicial y genero factores.
for(nivel in 1:length(survey_vector)){
if(survey_vector[nivel] == "M"){
survey_vector[nivel] <- "Male"
}else{
survey_vector[nivel] <- "Female"
}
}
survey_vector
survey_factor <- factor(survey_vector)
survey_factor
#Modo 2. Genero los factores y hago el mismo for pero con el slicing al factor
survey_vector <- c("M", "F", "F", "M", "M")
length(levels(survey_factor))
survey_factor <- factor(survey_vector)
for(nivel in 1:length(levels(survey_factor))){
if(levels(survey_factor)[nivel] == "M"){
levels(survey_factor)[nivel] <- "Male"
}else{
levels(survey_factor)[nivel] <- "Female"
}
}
'Crea un factor ordenado para el siguiente vector'
speed_vector <- c("Fast", "Slow", "Slow", "Fast", "Ultra-fast")
speed_factor <- factor(speed_vector, ordered = TRUE, levels = c("Slow","Fast","Ultra-Fast"))
speed_factor
| /Scripts/Ejercicios_Arrays_Factores1.R | no_license | WadieAC/R-CODES | R | false | false | 3,200 | r | #EJERCICIOS DE ARRAYS Y FACTORES.
#----------------------------------------
#este vector con 3 dimensiones 2 filas 2 columnas y 2 capas de profundidad
a <- array(1:8, dim=c(2, 2, 2))
'FACTORES'
'------------------------------------------------------'
#Creación de factor sin orden
gender_vector <- c('M', 'F', 'F', 'M', 'M', 'F')
gender_factor <- factor(gender_vector)
gender_factor
#Creación de factor con orden (sin especificar qué orden) - alfabéticamente
size_vector <- c('S', 'L', 'M', 'L', 'S', 'M')
size_factor <- factor(size_vector, ordered = TRUE) # L < M < S ordena alfabeticamente
size_factor
#Creación de factor con orden (especificando el orden)
size_vector_2 <- c('S', 'L', 'M', 'L', 'S', 'M')
size_factor_2 <- factor(size_vector_2, ordered = TRUE, levels = c("S", "M", "L")) # S < M < L
size_factor_2
#Factores: Operaciones
'Comprobaciones en factores sin orden solo se puede usar =='
gender_factor[1] == gender_factor[2] #Devuelve FALSE porque no están en el mismo orden
gender_factor[1] == size_factor[2] # Da ERROR: solo se pueden comparar factores si son del mismo tipo.
#En factores con orden se puede usar >, < …
size_factor[1] > size_factor[2] # Da True.
#Obtener los niveles
levels(size_factor)
levels(size_factor)[1] #podemos aplicar slicing con los niveles.
#Comprobar la existencia de niveles
any(levels(size_factor) %in% c('L', 'S')) #comprobar si existe un factor
#Añadir nuevos niveles
levels(size_factor)[length(levels(size_factor)) + 1] <- 'XL'
levels(size_factor) <- c(levels(size_factor), 'XS')
# En ambos casos, añadimos un nivel nuevo al final del todo.
#Reordenar niveles- pero lo hará por orden alfabético.
size_factor <- factor(size_factor, ordered = TRUE, levels(size_factor[c(5, 3:1, 4)]))
size_factor
#EJERCICIOS DE FACTORES
#---------------------------------
'Crea dos factores con los siguientes vectores
1. Animales sin orden
2. Temperatura con orden'
animals_vector <- c("Elephant", "Giraffe", "Donkey", "Horse")
temperature_vector <- c("High", "Low", "High","Low", "Medium")
animals_factor <- factor(animals_vector)
temperature_factor <- factor(temperature_vector, ordered = TRUE, levels = c("Low", "Medium", "High"))
'Modifica los niveles del factor para que sean: "Female" y "Male"'
survey_vector <- c("M", "F", "F", "M", "M")
#Modo 1, modifico vector inicial y genero factores.
for(nivel in 1:length(survey_vector)){
if(survey_vector[nivel] == "M"){
survey_vector[nivel] <- "Male"
}else{
survey_vector[nivel] <- "Female"
}
}
survey_vector
survey_factor <- factor(survey_vector)
survey_factor
#Modo 2. Genero los factores y hago el mismo for pero con el slicing al factor
survey_vector <- c("M", "F", "F", "M", "M")
length(levels(survey_factor))
survey_factor <- factor(survey_vector)
for(nivel in 1:length(levels(survey_factor))){
if(levels(survey_factor)[nivel] == "M"){
levels(survey_factor)[nivel] <- "Male"
}else{
levels(survey_factor)[nivel] <- "Female"
}
}
'Crea un factor ordenado para el siguiente vector'
speed_vector <- c("Fast", "Slow", "Slow", "Fast", "Ultra-fast")
speed_factor <- factor(speed_vector, ordered = TRUE, levels = c("Slow","Fast","Ultra-Fast"))
speed_factor
|
# load source files
LoadSource<-function(bamchop.path) {
source.path<-paste(bamchop.path, '/source', sep='');
sources<-dir(source.path);
sources<-sources[grep('R$', sources)];
sapply(paste(source.path, sources, sep='/'), source)->x;
} | /source/LoadSource.R | no_license | BioinformaticsArchive/bamchop | R | false | false | 242 | r | # load source files
LoadSource<-function(bamchop.path) {
source.path<-paste(bamchop.path, '/source', sep='');
sources<-dir(source.path);
sources<-sources[grep('R$', sources)];
sapply(paste(source.path, sources, sep='/'), source)->x;
} |
# RPackageBuildDemo.R
# Do these first
# install.packages(c("devtools", "roxygen2", "testthat", "usethis"))
# update.packages(ask = FALSE, checkBuilt = TRUE)
# You need the #'@export before any local functions you want visible
#
#' @export
hello = function (){cat('Hello ...')}
#' @export
goodbye = function (){cat('Goodbye\n')}
# Do this: usethis::create_package("~/Dropbox/RStudioPackageBuild20MinFolder/RPackageBuildDemoPkgFolder")
# If you rely on functions func1 func2 and func3 from pkgX
# you also need to do this in command line:
# > usethis: use_package("pkgX")
# and in the source code:
# #'@importFrom <pkgX> <func1> <func2> <func3> for any
# and if you want users to be able to access func1 and func3
# you need to add this to source:
# #' @export
# pkgX::func1
# #' @export
# pkgX::func3
| /R/RPackageBuildDemo.R | no_license | tnearey/RPackageCreationDemo | R | false | false | 807 | r | # RPackageBuildDemo.R
# Do these first
# install.packages(c("devtools", "roxygen2", "testthat", "usethis"))
# update.packages(ask = FALSE, checkBuilt = TRUE)
# You need the #'@export before any local functions you want visible
#
#' @export
hello = function (){cat('Hello ...')}
#' @export
goodbye = function (){cat('Goodbye\n')}
# Do this: usethis::create_package("~/Dropbox/RStudioPackageBuild20MinFolder/RPackageBuildDemoPkgFolder")
# If you rely on functions func1 func2 and func3 from pkgX
# you also need to do this in command line:
# > usethis: use_package("pkgX")
# and in the source code:
# #'@importFrom <pkgX> <func1> <func2> <func3> for any
# and if you want users to be able to access func1 and func3
# you need to add this to source:
# #' @export
# pkgX::func1
# #' @export
# pkgX::func3
|
#' @title A-priori data information class
#' @description
#' This function creates an apriori class based on input.
#' @param data_type character, specifying type of data
#' @param units character, specifying units of data
#' @return A-priori object.
#' @examples
#' # For example:
#' ap <- apriori("air pressure", "cmH2O")
#'
#' @export
#'
apriori <- function(data_type = c("air pressure", "hydrostatic pressure"), units = c("cmH2O")) {
data_type <- match.arg(data_type)
units <- match.arg(units)
structure(
list("mean" = 1033.317,
"var" = 9.586297^2,
"data_type" = data_type,
"units" = units),
class = "apriori")
}
setOldClass("apriori")
| /gwloggeR/R/apriori.R | permissive | afronhoffs/groundwater-logger-validation | R | false | false | 683 | r | #' @title A-priori data information class
#' @description
#' This function creates an apriori class based on input.
#' @param data_type character, specifying type of data
#' @param units character, specifying units of data
#' @return A-priori object.
#' @examples
#' # For example:
#' ap <- apriori("air pressure", "cmH2O")
#'
#' @export
#'
apriori <- function(data_type = c("air pressure", "hydrostatic pressure"), units = c("cmH2O")) {
data_type <- match.arg(data_type)
units <- match.arg(units)
structure(
list("mean" = 1033.317,
"var" = 9.586297^2,
"data_type" = data_type,
"units" = units),
class = "apriori")
}
setOldClass("apriori")
|
# Intrinio API
#
# Welcome to the Intrinio API! Through our Financial Data Marketplace, we offer a wide selection of financial data feed APIs sourced by our own proprietary processes as well as from many data vendors. For a complete API request / response reference please view the [Intrinio API documentation](https://intrinio.com/documentation/api_v2). If you need additional help in using the API, please visit the [Intrinio website](https://intrinio.com) and click on the chat icon in the lower right corner.
#
# OpenAPI spec version: 2.15.3
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' ApiResponseSecurityDonchianChannel Class
#'
#' @field technicals
#' @field indicator
#' @field security
#' @field next_page
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ApiResponseSecurityDonchianChannel <- R6::R6Class(
'ApiResponseSecurityDonchianChannel',
public = list(
`technicals` = NA,
`technicals_data_frame` = NULL,
`indicator` = NA,
`security` = NA,
`next_page` = NA,
initialize = function(`technicals`, `indicator`, `security`, `next_page`){
if (!missing(`technicals`)) {
self$`technicals` <- `technicals`
}
if (!missing(`indicator`)) {
self$`indicator` <- `indicator`
}
if (!missing(`security`)) {
self$`security` <- `security`
}
if (!missing(`next_page`)) {
self$`next_page` <- `next_page`
}
},
toJSON = function() {
ApiResponseSecurityDonchianChannelObject <- list()
if (!is.null(self$`technicals`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`technicals`) && ((length(self$`technicals`) == 0) || ((length(self$`technicals`) != 0 && R6::is.R6(self$`technicals`[[1]]))))) {
ApiResponseSecurityDonchianChannelObject[['technicals']] <- lapply(self$`technicals`, function(x) x$toJSON())
} else {
ApiResponseSecurityDonchianChannelObject[['technicals']] <- jsonlite::toJSON(self$`technicals`, auto_unbox = TRUE)
}
}
if (!is.null(self$`indicator`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`indicator`) && ((length(self$`indicator`) == 0) || ((length(self$`indicator`) != 0 && R6::is.R6(self$`indicator`[[1]]))))) {
ApiResponseSecurityDonchianChannelObject[['indicator']] <- lapply(self$`indicator`, function(x) x$toJSON())
} else {
ApiResponseSecurityDonchianChannelObject[['indicator']] <- jsonlite::toJSON(self$`indicator`, auto_unbox = TRUE)
}
}
if (!is.null(self$`security`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`security`) && ((length(self$`security`) == 0) || ((length(self$`security`) != 0 && R6::is.R6(self$`security`[[1]]))))) {
ApiResponseSecurityDonchianChannelObject[['security']] <- lapply(self$`security`, function(x) x$toJSON())
} else {
ApiResponseSecurityDonchianChannelObject[['security']] <- jsonlite::toJSON(self$`security`, auto_unbox = TRUE)
}
}
if (!is.null(self$`next_page`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`next_page`) && ((length(self$`next_page`) == 0) || ((length(self$`next_page`) != 0 && R6::is.R6(self$`next_page`[[1]]))))) {
ApiResponseSecurityDonchianChannelObject[['next_page']] <- lapply(self$`next_page`, function(x) x$toJSON())
} else {
ApiResponseSecurityDonchianChannelObject[['next_page']] <- jsonlite::toJSON(self$`next_page`, auto_unbox = TRUE)
}
}
ApiResponseSecurityDonchianChannelObject
},
fromJSON = function(ApiResponseSecurityDonchianChannelJson) {
ApiResponseSecurityDonchianChannelObject <- jsonlite::fromJSON(ApiResponseSecurityDonchianChannelJson)
if (!is.null(ApiResponseSecurityDonchianChannelObject$`technicals`)) {
self$`technicals` <- ApiResponseSecurityDonchianChannelObject$`technicals`
}
if (!is.null(ApiResponseSecurityDonchianChannelObject$`indicator`)) {
self$`indicator` <- ApiResponseSecurityDonchianChannelObject$`indicator`
}
if (!is.null(ApiResponseSecurityDonchianChannelObject$`security`)) {
self$`security` <- ApiResponseSecurityDonchianChannelObject$`security`
}
if (!is.null(ApiResponseSecurityDonchianChannelObject$`next_page`)) {
self$`next_page` <- ApiResponseSecurityDonchianChannelObject$`next_page`
}
},
toJSONString = function() {
jsonlite::toJSON(self$toJSON(), auto_unbox = TRUE, pretty = TRUE)
},
fromJSONString = function(ApiResponseSecurityDonchianChannelJson) {
ApiResponseSecurityDonchianChannelObject <- jsonlite::fromJSON(ApiResponseSecurityDonchianChannelJson, simplifyDataFrame = FALSE)
self$setFromList(ApiResponseSecurityDonchianChannelObject)
},
setFromList = function(listObject) {
self$`technicals` <- lapply(listObject$`technicals`, function(x) {
DonchianChannelTechnicalValueObject <- DonchianChannelTechnicalValue$new()
DonchianChannelTechnicalValueObject$setFromList(x)
return(DonchianChannelTechnicalValueObject)
})
technicals_list <- lapply(self$`technicals`, function(x) {
return(x$getAsList())
})
self$`technicals_data_frame` <- do.call(rbind, lapply(technicals_list, data.frame))
self$`indicator` <- TechnicalIndicator$new()
self$`indicator`$setFromList(listObject$`indicator`)
self$`security` <- SecuritySummary$new()
self$`security`$setFromList(listObject$`security`)
if (!is.null(listObject$`next_page`)) {
self$`next_page` <- listObject$`next_page`
}
else {
self$`next_page` <- NA
}
},
getAsList = function() {
listObject = list()
# listObject[["technicals"]] <- lapply(self$`technicals`, function(o) {
# return(o$getAsList())
# })
indicator_list <- self$`indicator`$getAsList()
for (x in names(indicator_list)) {
listObject[[paste("indicator_",x, sep = "")]] <- self$`indicator`[[x]]
}
security_list <- self$`security`$getAsList()
for (x in names(security_list)) {
listObject[[paste("security_",x, sep = "")]] <- self$`security`[[x]]
}
listObject[["next_page"]] <- self$`next_page`
return(listObject)
}
)
)
| /R/ApiResponseSecurityDonchianChannel.r | no_license | federico-700/r-sdk | R | false | false | 6,550 | r | # Intrinio API
#
# Welcome to the Intrinio API! Through our Financial Data Marketplace, we offer a wide selection of financial data feed APIs sourced by our own proprietary processes as well as from many data vendors. For a complete API request / response reference please view the [Intrinio API documentation](https://intrinio.com/documentation/api_v2). If you need additional help in using the API, please visit the [Intrinio website](https://intrinio.com) and click on the chat icon in the lower right corner.
#
# OpenAPI spec version: 2.15.3
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' ApiResponseSecurityDonchianChannel Class
#'
#' @field technicals
#' @field indicator
#' @field security
#' @field next_page
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ApiResponseSecurityDonchianChannel <- R6::R6Class(
'ApiResponseSecurityDonchianChannel',
public = list(
`technicals` = NA,
`technicals_data_frame` = NULL,
`indicator` = NA,
`security` = NA,
`next_page` = NA,
initialize = function(`technicals`, `indicator`, `security`, `next_page`){
if (!missing(`technicals`)) {
self$`technicals` <- `technicals`
}
if (!missing(`indicator`)) {
self$`indicator` <- `indicator`
}
if (!missing(`security`)) {
self$`security` <- `security`
}
if (!missing(`next_page`)) {
self$`next_page` <- `next_page`
}
},
toJSON = function() {
ApiResponseSecurityDonchianChannelObject <- list()
if (!is.null(self$`technicals`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`technicals`) && ((length(self$`technicals`) == 0) || ((length(self$`technicals`) != 0 && R6::is.R6(self$`technicals`[[1]]))))) {
ApiResponseSecurityDonchianChannelObject[['technicals']] <- lapply(self$`technicals`, function(x) x$toJSON())
} else {
ApiResponseSecurityDonchianChannelObject[['technicals']] <- jsonlite::toJSON(self$`technicals`, auto_unbox = TRUE)
}
}
if (!is.null(self$`indicator`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`indicator`) && ((length(self$`indicator`) == 0) || ((length(self$`indicator`) != 0 && R6::is.R6(self$`indicator`[[1]]))))) {
ApiResponseSecurityDonchianChannelObject[['indicator']] <- lapply(self$`indicator`, function(x) x$toJSON())
} else {
ApiResponseSecurityDonchianChannelObject[['indicator']] <- jsonlite::toJSON(self$`indicator`, auto_unbox = TRUE)
}
}
if (!is.null(self$`security`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`security`) && ((length(self$`security`) == 0) || ((length(self$`security`) != 0 && R6::is.R6(self$`security`[[1]]))))) {
ApiResponseSecurityDonchianChannelObject[['security']] <- lapply(self$`security`, function(x) x$toJSON())
} else {
ApiResponseSecurityDonchianChannelObject[['security']] <- jsonlite::toJSON(self$`security`, auto_unbox = TRUE)
}
}
if (!is.null(self$`next_page`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`next_page`) && ((length(self$`next_page`) == 0) || ((length(self$`next_page`) != 0 && R6::is.R6(self$`next_page`[[1]]))))) {
ApiResponseSecurityDonchianChannelObject[['next_page']] <- lapply(self$`next_page`, function(x) x$toJSON())
} else {
ApiResponseSecurityDonchianChannelObject[['next_page']] <- jsonlite::toJSON(self$`next_page`, auto_unbox = TRUE)
}
}
ApiResponseSecurityDonchianChannelObject
},
fromJSON = function(ApiResponseSecurityDonchianChannelJson) {
ApiResponseSecurityDonchianChannelObject <- jsonlite::fromJSON(ApiResponseSecurityDonchianChannelJson)
if (!is.null(ApiResponseSecurityDonchianChannelObject$`technicals`)) {
self$`technicals` <- ApiResponseSecurityDonchianChannelObject$`technicals`
}
if (!is.null(ApiResponseSecurityDonchianChannelObject$`indicator`)) {
self$`indicator` <- ApiResponseSecurityDonchianChannelObject$`indicator`
}
if (!is.null(ApiResponseSecurityDonchianChannelObject$`security`)) {
self$`security` <- ApiResponseSecurityDonchianChannelObject$`security`
}
if (!is.null(ApiResponseSecurityDonchianChannelObject$`next_page`)) {
self$`next_page` <- ApiResponseSecurityDonchianChannelObject$`next_page`
}
},
toJSONString = function() {
jsonlite::toJSON(self$toJSON(), auto_unbox = TRUE, pretty = TRUE)
},
fromJSONString = function(ApiResponseSecurityDonchianChannelJson) {
ApiResponseSecurityDonchianChannelObject <- jsonlite::fromJSON(ApiResponseSecurityDonchianChannelJson, simplifyDataFrame = FALSE)
self$setFromList(ApiResponseSecurityDonchianChannelObject)
},
setFromList = function(listObject) {
self$`technicals` <- lapply(listObject$`technicals`, function(x) {
DonchianChannelTechnicalValueObject <- DonchianChannelTechnicalValue$new()
DonchianChannelTechnicalValueObject$setFromList(x)
return(DonchianChannelTechnicalValueObject)
})
technicals_list <- lapply(self$`technicals`, function(x) {
return(x$getAsList())
})
self$`technicals_data_frame` <- do.call(rbind, lapply(technicals_list, data.frame))
self$`indicator` <- TechnicalIndicator$new()
self$`indicator`$setFromList(listObject$`indicator`)
self$`security` <- SecuritySummary$new()
self$`security`$setFromList(listObject$`security`)
if (!is.null(listObject$`next_page`)) {
self$`next_page` <- listObject$`next_page`
}
else {
self$`next_page` <- NA
}
},
getAsList = function() {
listObject = list()
# listObject[["technicals"]] <- lapply(self$`technicals`, function(o) {
# return(o$getAsList())
# })
indicator_list <- self$`indicator`$getAsList()
for (x in names(indicator_list)) {
listObject[[paste("indicator_",x, sep = "")]] <- self$`indicator`[[x]]
}
security_list <- self$`security`$getAsList()
for (x in names(security_list)) {
listObject[[paste("security_",x, sep = "")]] <- self$`security`[[x]]
}
listObject[["next_page"]] <- self$`next_page`
return(listObject)
}
)
)
|
separate_arguments <- function(arglist){
if (is.null(names(arglist))) {
return(list(unamed = arglist, named = list()))
}
idx <- names(arglist) != ""
unamed <- arglist[!idx]
names(unamed) <- NULL
return(list(unamed = unamed, named = arglist[idx]))
}
| /R/argument.R | permissive | Non-Contradiction/JuliaCall | R | false | false | 281 | r | separate_arguments <- function(arglist){
if (is.null(names(arglist))) {
return(list(unamed = arglist, named = list()))
}
idx <- names(arglist) != ""
unamed <- arglist[!idx]
names(unamed) <- NULL
return(list(unamed = unamed, named = arglist[idx]))
}
|
# functions get_Splinebasis to get x spline basis
# functions get_TimeSplinebasis to get time spline basis
get_Splinebasis <- function(objterm,
data=parent.frame(),
specials="NPHNLL",
all.vars.func=all_specials_vars,
unique=TRUE,
order=c("formula", "specials")){
# get spline parameters of each NPHNLL terms
# input
# terms : a term object
# output : list of "SplineBasis" objects
order <- match.arg(order)
indxvar <- attr(objterm, "specials")[specials]
nvars <- length(unlist(indxvar))
if(nvars==0){
# no "specials" vars
return(NULL)
}
else{
if(order=="specials"){
oindxvar <- 1:nvars
}
else {
oindxvar <- order(unlist(indxvar))
}
var_list <- NULL
Spline_list <- NULL
for(is in specials){
fun <- mget(is,
mode = "function",
envir = parent.frame(), inherits=TRUE,
ifnotfound=list(NULL))[[1]]
for( i in indxvar[[is]]){
thecall <- match.call(fun, attr(objterm,"variables")[[i+1]])
thevar <- thecall[["x"]]
Knots <- eval(as.expression(thecall[["Knots"]]))
if( !is.null(thecall[["Boundary.knots"]]) ){
therange <- eval(as.expression(thecall[["Boundary.knots"]]))
}
else {
# compute the range of the variable
therange <- eval(call("range", thevar), envir=data)
}
thecall[["Spline"]] <- ifelse(is.null(thecall[["Spline"]]),
eval(formals(fun)$Spline)[1],
thecall[["Spline"]])
if( is.null(thecall[["Spline"]])){
# default is b-spline
thespline <- MSplineBasis(knots=c(therange[1],
eval(as.expression(thecall[["Knots"]])),
therange[2]),
degree=ifelse(is.null(thecall[["Degree"]]),
formals(fun)[["Degree"]],
thecall[["Degree"]]),
keep.duplicates=FALSE)
}
else if( thecall[["Spline"]]== "tp-spline" ){
thespline <- TPSplineBasis(knots=eval(as.expression(thecall[["Knots"]])),
degree=ifelse(is.null(thecall[["Degree"]]),
formals(fun)[["Degree"]],
thecall[["Degree"]]),
min=therange[1],
max=therange[2],
type="standard")
}
else if( thecall[["Spline"]]== "tpi-spline" ){
thespline <- TPSplineBasis(knots=eval(as.expression(thecall[["Knots"]])),
degree=ifelse(is.null(thecall[["Degree"]]),
formals(fun)[["Degree"]],
thecall[["Degree"]]),
min=therange[1],
max=therange[2],
type="increasing")
}
else if( thecall[["Spline"]]== "b-spline" ){
if (is.null(thecall[["Degree"]])) {thecall[["Degree"]]<-3}
thespline <- MSplineBasis(knots=c(therange[1],
eval(as.expression(thecall[["Knots"]])),
therange[2]),
degree=ifelse(is.null(thecall[["Degree"]]),
formals(fun)[["Degree"]],
thecall[["Degree"]]),
keep.duplicates=FALSE)
}
else {
stop("wrong type of spline specification", attr(objterm,"variables")[[i+1]])
}
var_list <- c( var_list, thevar)
Spline_list <- c( Spline_list, thespline)
}
}
names(Spline_list) <- var_list
return(Spline_list[oindxvar])
}
}
get_TimeSplinebasis <- function(objterm,
data=parent.frame(),
specials="NPHNLL",
all.vars.func=all_specials_vars,
unique=TRUE,
order=c("formula", "specials")){
# get spline parameters of each NPHNLL terms
# input
# terms : a term object
# output : list of "SplineBasis" objects
order <- match.arg(order)
indxvar <- attr(objterm, "specials")[specials]
nvars <- length(unlist(indxvar))
if(nvars==0){
# no "specials" vars
return(NULL)
}
else{
if(order=="specials"){
oindxvar <- 1:nvars
}
else {
oindxvar <- order(unlist(indxvar))
}
var_list <- NULL
Spline_list <- NULL
for(is in specials){
fun <- mget(is,
mode = "function",
envir = parent.frame(), inherits=TRUE,
ifnotfound=list(NULL))[[1]]
for( i in indxvar[[is]]){
thecall <- match.call(fun, attr(objterm,"variables")[[i+1]])
thevar <- thecall[["timevar"]]
Knots <- eval(as.expression(thecall[["Knots.t"]]))
if( !is.null(thecall[["Boundary.knots.t"]]) ){
therange <- eval(as.expression(thecall[["Boundary.knots.t"]]))
}
else {
# compute the range of the variable
therange <- eval(call("range", thevar), envir=data)
}
thecall[["Spline"]] <- ifelse(is.null(thecall[["Spline"]]),
eval(formals(fun)$Spline)[1],
thecall[["Spline"]])
if( is.null(thecall[["Spline"]])){
# default is b-spline
thespline <- MSplineBasis(knots=c(therange[1],
eval(as.expression(thecall[["Knots.t"]])),
therange[2]),
degree=ifelse(is.null(thecall[["Degree.t"]]),
formals(fun)[["Degree.t"]],
thecall[["Degree.t"]]),
keep.duplicates=FALSE)
}
else if( thecall[["Spline"]]== "tp-spline" ){
thespline <- TPSplineBasis(knots=eval(as.expression(thecall[["Knots.t"]])),
degree=ifelse(is.null(thecall[["Degree.t"]]),
formals(fun)[["Degree.t"]],
thecall[["Degree.t"]]),
min=therange[1],
max=therange[2],
type="standard")
}
else if( thecall[["Spline"]]== "tpi-spline" ){
thespline <- TPSplineBasis(knots=eval(as.expression(thecall[["Knots.t"]])),
degree=ifelse(is.null(thecall[["Degree.t"]]),
formals(fun)[["Degree.t"]],
thecall[["Degree.t"]]),
min=therange[1],
max=therange[2],
type="standard")
}
else if( thecall[["Spline"]]== "b-spline" ){
if (is.null(thecall[["Degree.t"]])) {thecall[["Degree.t"]]<-3}
thespline <- MSplineBasis(knots=c(therange[1],
eval(as.expression(thecall[["Knots.t"]])),
therange[2]),
degree=ifelse(is.null(thecall[["Degree.t"]]),
formals(fun)[["Degree.t"]],
thecall[["Degree.t"]]),
keep.duplicates=FALSE)
}
else {
stop("wrong type of spline specification", attr(objterm,"variables")[[i+1]])
}
var_list <- c( var_list, thevar)
Spline_list <- c( Spline_list, thespline)
}
}
names(Spline_list) <- var_list
return(Spline_list[oindxvar])
}
}
| /flexrsurv/R/get_Splinebasis.R | no_license | ingted/R-Examples | R | false | false | 8,756 | r | # functions get_Splinebasis to get x spline basis
# functions get_TimeSplinebasis to get time spline basis
get_Splinebasis <- function(objterm,
data=parent.frame(),
specials="NPHNLL",
all.vars.func=all_specials_vars,
unique=TRUE,
order=c("formula", "specials")){
# get spline parameters of each NPHNLL terms
# input
# terms : a term object
# output : list of "SplineBasis" objects
order <- match.arg(order)
indxvar <- attr(objterm, "specials")[specials]
nvars <- length(unlist(indxvar))
if(nvars==0){
# no "specials" vars
return(NULL)
}
else{
if(order=="specials"){
oindxvar <- 1:nvars
}
else {
oindxvar <- order(unlist(indxvar))
}
var_list <- NULL
Spline_list <- NULL
for(is in specials){
fun <- mget(is,
mode = "function",
envir = parent.frame(), inherits=TRUE,
ifnotfound=list(NULL))[[1]]
for( i in indxvar[[is]]){
thecall <- match.call(fun, attr(objterm,"variables")[[i+1]])
thevar <- thecall[["x"]]
Knots <- eval(as.expression(thecall[["Knots"]]))
if( !is.null(thecall[["Boundary.knots"]]) ){
therange <- eval(as.expression(thecall[["Boundary.knots"]]))
}
else {
# compute the range of the variable
therange <- eval(call("range", thevar), envir=data)
}
thecall[["Spline"]] <- ifelse(is.null(thecall[["Spline"]]),
eval(formals(fun)$Spline)[1],
thecall[["Spline"]])
if( is.null(thecall[["Spline"]])){
# default is b-spline
thespline <- MSplineBasis(knots=c(therange[1],
eval(as.expression(thecall[["Knots"]])),
therange[2]),
degree=ifelse(is.null(thecall[["Degree"]]),
formals(fun)[["Degree"]],
thecall[["Degree"]]),
keep.duplicates=FALSE)
}
else if( thecall[["Spline"]]== "tp-spline" ){
thespline <- TPSplineBasis(knots=eval(as.expression(thecall[["Knots"]])),
degree=ifelse(is.null(thecall[["Degree"]]),
formals(fun)[["Degree"]],
thecall[["Degree"]]),
min=therange[1],
max=therange[2],
type="standard")
}
else if( thecall[["Spline"]]== "tpi-spline" ){
thespline <- TPSplineBasis(knots=eval(as.expression(thecall[["Knots"]])),
degree=ifelse(is.null(thecall[["Degree"]]),
formals(fun)[["Degree"]],
thecall[["Degree"]]),
min=therange[1],
max=therange[2],
type="increasing")
}
else if( thecall[["Spline"]]== "b-spline" ){
if (is.null(thecall[["Degree"]])) {thecall[["Degree"]]<-3}
thespline <- MSplineBasis(knots=c(therange[1],
eval(as.expression(thecall[["Knots"]])),
therange[2]),
degree=ifelse(is.null(thecall[["Degree"]]),
formals(fun)[["Degree"]],
thecall[["Degree"]]),
keep.duplicates=FALSE)
}
else {
stop("wrong type of spline specification", attr(objterm,"variables")[[i+1]])
}
var_list <- c( var_list, thevar)
Spline_list <- c( Spline_list, thespline)
}
}
names(Spline_list) <- var_list
return(Spline_list[oindxvar])
}
}
get_TimeSplinebasis <- function(objterm,
data=parent.frame(),
specials="NPHNLL",
all.vars.func=all_specials_vars,
unique=TRUE,
order=c("formula", "specials")){
# get spline parameters of each NPHNLL terms
# input
# terms : a term object
# output : list of "SplineBasis" objects
order <- match.arg(order)
indxvar <- attr(objterm, "specials")[specials]
nvars <- length(unlist(indxvar))
if(nvars==0){
# no "specials" vars
return(NULL)
}
else{
if(order=="specials"){
oindxvar <- 1:nvars
}
else {
oindxvar <- order(unlist(indxvar))
}
var_list <- NULL
Spline_list <- NULL
for(is in specials){
fun <- mget(is,
mode = "function",
envir = parent.frame(), inherits=TRUE,
ifnotfound=list(NULL))[[1]]
for( i in indxvar[[is]]){
thecall <- match.call(fun, attr(objterm,"variables")[[i+1]])
thevar <- thecall[["timevar"]]
Knots <- eval(as.expression(thecall[["Knots.t"]]))
if( !is.null(thecall[["Boundary.knots.t"]]) ){
therange <- eval(as.expression(thecall[["Boundary.knots.t"]]))
}
else {
# compute the range of the variable
therange <- eval(call("range", thevar), envir=data)
}
thecall[["Spline"]] <- ifelse(is.null(thecall[["Spline"]]),
eval(formals(fun)$Spline)[1],
thecall[["Spline"]])
if( is.null(thecall[["Spline"]])){
# default is b-spline
thespline <- MSplineBasis(knots=c(therange[1],
eval(as.expression(thecall[["Knots.t"]])),
therange[2]),
degree=ifelse(is.null(thecall[["Degree.t"]]),
formals(fun)[["Degree.t"]],
thecall[["Degree.t"]]),
keep.duplicates=FALSE)
}
else if( thecall[["Spline"]]== "tp-spline" ){
thespline <- TPSplineBasis(knots=eval(as.expression(thecall[["Knots.t"]])),
degree=ifelse(is.null(thecall[["Degree.t"]]),
formals(fun)[["Degree.t"]],
thecall[["Degree.t"]]),
min=therange[1],
max=therange[2],
type="standard")
}
else if( thecall[["Spline"]]== "tpi-spline" ){
thespline <- TPSplineBasis(knots=eval(as.expression(thecall[["Knots.t"]])),
degree=ifelse(is.null(thecall[["Degree.t"]]),
formals(fun)[["Degree.t"]],
thecall[["Degree.t"]]),
min=therange[1],
max=therange[2],
type="standard")
}
else if( thecall[["Spline"]]== "b-spline" ){
if (is.null(thecall[["Degree.t"]])) {thecall[["Degree.t"]]<-3}
thespline <- MSplineBasis(knots=c(therange[1],
eval(as.expression(thecall[["Knots.t"]])),
therange[2]),
degree=ifelse(is.null(thecall[["Degree.t"]]),
formals(fun)[["Degree.t"]],
thecall[["Degree.t"]]),
keep.duplicates=FALSE)
}
else {
stop("wrong type of spline specification", attr(objterm,"variables")[[i+1]])
}
var_list <- c( var_list, thevar)
Spline_list <- c( Spline_list, thespline)
}
}
names(Spline_list) <- var_list
return(Spline_list[oindxvar])
}
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL # set the inverse of the Matrix
}
get <- function() x # get the inverse of the Matrix
setinverse <- function(inverse) m <<- inverse # set the inverse of the matrix
getinverse <- function() m # get the inverse of the matrix
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse() # get the inverse of the matrix
if(!is.null(m)) { #if inverse of matrix is exit,then retruns message "getting cached inverse of matrix" and returns inverse of matrix
message("getting cached inverse of matrix")
return(m)
}
data <- x$get()
m <- solve(data, ...) # calculating inverse of matrix
x$setinverse(m)
m
}
| /cachematrix.R | no_license | thirupathip7/ProgrammingAssignment2 | R | false | false | 1,223 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL # set the inverse of the Matrix
}
get <- function() x # get the inverse of the Matrix
setinverse <- function(inverse) m <<- inverse # set the inverse of the matrix
getinverse <- function() m # get the inverse of the matrix
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse() # get the inverse of the matrix
if(!is.null(m)) { #if inverse of matrix is exit,then retruns message "getting cached inverse of matrix" and returns inverse of matrix
message("getting cached inverse of matrix")
return(m)
}
data <- x$get()
m <- solve(data, ...) # calculating inverse of matrix
x$setinverse(m)
m
}
|
library(shiny)
# Define UI for dataset viewer application
shinyUI(pageWithSidebar(
# Application title.
headerPanel(""),
sidebarPanel(
sliderInput(inputId = "starve",
label = "Starvation period (hrs)",
min = 2,
max = 24,
value = 2,
animate = animationOptions(interval=800, loop=TRUE)
)
),
mainPanel(
tabsetPanel(
tabPanel("Plot", plotOutput("plot_predicted")),
tabPanel("Model Summary", verbatimTextOutput("summary")),
tabPanel("Data", tableOutput("raw_data")),
id = "tabs"
)
)
))
| /ui.R | no_license | NatashaDukach/shiny-survival-covariate | R | false | false | 627 | r | library(shiny)
# Define UI for dataset viewer application
shinyUI(pageWithSidebar(
# Application title.
headerPanel(""),
sidebarPanel(
sliderInput(inputId = "starve",
label = "Starvation period (hrs)",
min = 2,
max = 24,
value = 2,
animate = animationOptions(interval=800, loop=TRUE)
)
),
mainPanel(
tabsetPanel(
tabPanel("Plot", plotOutput("plot_predicted")),
tabPanel("Model Summary", verbatimTextOutput("summary")),
tabPanel("Data", tableOutput("raw_data")),
id = "tabs"
)
)
))
|
#input mirBase_allNTA.txt
if (!require("ggplot2")){
install.packages("ggplot2", repos='http://cran.us.r-project.org')
}
if (!require("scales")){
install.packages("scales", repos='http://cran.us.r-project.org')
}
args <- commandArgs(TRUE)
STATFILE <- args[1]
GRAPHFILE <- args[2]
th<-theme_bw()+theme(axis.title.x = element_text(size=18), axis.text.x = element_text(size=12, angle=0), axis.title.y=element_text(size=18), axis.text.y = element_text(size=12), panel.border=element_rect(linetype="dashed"))+theme(plot.title=element_text( size=24, vjust = 2.5))
table_data <- read.table(STATFILE, header = TRUE, sep = "\t", na.strings="---", colClasses=c("character","numeric","numeric","numeric","numeric","numeric"))
top<-table_data
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
require(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
png(
filename = GRAPHFILE,
width=1200,
height=1200,
res=90
)
p1<-ggplot(top, aes(x=name, y=wMean)) + geom_bar(stat="identity",color="black", fill="#009E73") +ylab("weighted isomiR fraction")+xlab("") +th+ggtitle("Fraction of non-templated additions")
p2<-ggplot(top, aes(x=name, y=mean)) + geom_bar(stat="identity",color="black", fill="#045FB4") +ylab("mean isomiR fraction per microRNA")+xlab("")+th
# p1<-ggplot(top, aes(x=name, y=wMean)) + geom_bar(stat="identity",color="black", fill="#009E73")+ylab("weighted isomiR fraction")+xlab("")+scale_x_discrete(limits=(as.vector(top$name)), label=comma)+th+ggtitle("Fraction of non-templated additions")
# p2<-ggplot(top, aes(x=name, y=mean)) + geom_bar(stat="identity",color="black", fill="#045FB4")+ylab("mean isomiR fraction per microRNA")+xlab("")+scale_x_discrete(limits=(as.vector(top$name)), label=comma)+th
multiplot(p1, p2)
dev.off()
| /isomiR.r | no_license | rlebron88/sRNAde_Ranalysis | R | false | false | 2,748 | r | #input mirBase_allNTA.txt
if (!require("ggplot2")){
install.packages("ggplot2", repos='http://cran.us.r-project.org')
}
if (!require("scales")){
install.packages("scales", repos='http://cran.us.r-project.org')
}
args <- commandArgs(TRUE)
STATFILE <- args[1]
GRAPHFILE <- args[2]
th<-theme_bw()+theme(axis.title.x = element_text(size=18), axis.text.x = element_text(size=12, angle=0), axis.title.y=element_text(size=18), axis.text.y = element_text(size=12), panel.border=element_rect(linetype="dashed"))+theme(plot.title=element_text( size=24, vjust = 2.5))
table_data <- read.table(STATFILE, header = TRUE, sep = "\t", na.strings="---", colClasses=c("character","numeric","numeric","numeric","numeric","numeric"))
top<-table_data
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
require(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
png(
filename = GRAPHFILE,
width=1200,
height=1200,
res=90
)
p1<-ggplot(top, aes(x=name, y=wMean)) + geom_bar(stat="identity",color="black", fill="#009E73") +ylab("weighted isomiR fraction")+xlab("") +th+ggtitle("Fraction of non-templated additions")
p2<-ggplot(top, aes(x=name, y=mean)) + geom_bar(stat="identity",color="black", fill="#045FB4") +ylab("mean isomiR fraction per microRNA")+xlab("")+th
# p1<-ggplot(top, aes(x=name, y=wMean)) + geom_bar(stat="identity",color="black", fill="#009E73")+ylab("weighted isomiR fraction")+xlab("")+scale_x_discrete(limits=(as.vector(top$name)), label=comma)+th+ggtitle("Fraction of non-templated additions")
# p2<-ggplot(top, aes(x=name, y=mean)) + geom_bar(stat="identity",color="black", fill="#045FB4")+ylab("mean isomiR fraction per microRNA")+xlab("")+scale_x_discrete(limits=(as.vector(top$name)), label=comma)+th
multiplot(p1, p2)
dev.off()
|
test_that("agg_ppm generates file", {
file <- tempfile(fileext = '.ppm')
agg_ppm(file)
plot(1:10, 1:10)
dev.off()
expect_gt(file.info(file)$size, 0)
unlink(file)
})
| /tests/testthat/test-ppm.R | permissive | r-lib/ragg | R | false | false | 183 | r | test_that("agg_ppm generates file", {
file <- tempfile(fileext = '.ppm')
agg_ppm(file)
plot(1:10, 1:10)
dev.off()
expect_gt(file.info(file)$size, 0)
unlink(file)
})
|
################################################################################
# TODO LIST
# TODO: Re-write to simplify and for efficiency.
################################################################################
# CHANGE LOG (last 20 changes)
# 24.08.2018: Removed unused variables.
# 06.08.2017: Added audit trail.
# 06.08.2017: Fixed warning "if (!is.na(need)):the condition has length > 1".
# 18.09.2016: Fixed attribute saved dataset, and kit now saved correctly.
# 09.01.2016: Added attributes to result.
# 28.08.2015: Added importFrom
# 17.12.2014: Fixed error NA Dye for e.g. Yfiler Plus (added 'toupper' in 'match' calls).
# 11.05.2014: Added 'orange' and 'purple'.
# 27.04.2014: Added option to ignore case in marker names.
# 15.12.2013: Fixed check for 'have' and 'need' when converting vector.
# 27.11.2013: Added option 'overwrite'.
# 04.10.2013: Added some debug information.
# 18.09.2013: Added support for vector conversion.
# 17.09.2013: First version.
#' @title Add Color Information.
#'
#' @description
#' Add color information 'Color', 'Dye' or 'R Color'.
#'
#' @details
#' Primers in forensic STR typing kits are labeled with a fluorescent
#' dye. The dyes are represented with single letters (Dye) in exported result
#' files or with strings (Color) in 'panels' files.
#' For visualization in R the R color names are used (R.Color).
#' The function can add new color schemes matched to the existing, or
#' it can convert a vector containing one scheme to another.
#'
#' @param data data frame or vector.
#' @param kit string representing the forensic STR kit used.
#' Default is NA, in which case 'have' must contain a valid column.
#' @param have character string to specify color column to be matched.
#' Default is NA, in which case color information is derived from 'kit' and added
#' to a column named 'Color'.
#' If 'data' is a vector 'have' must be a single string.
#' @param need character string or string vector to specify color columns to be added.
#' Default is NA, in which case all columns will be added.
#' If 'data' is a vector 'need' must be a single string.
#' @param overwrite logical if TRUE and column exist it will be overwritten.
#' @param ignore.case logical if TRUE case in marker names will be ignored.
#' @param debug logical indicating printing debug information.
#'
#' @return data.frame with additional columns for added colors,
#' or vector with converted values.
#'
#' @export
#'
#' @importFrom utils str
#'
#' @examples
#' # Get marker and colors for SGM Plus.
#' df <- getKit("SGMPlus", what = "Color")
#' # Add dye color.
#' dfDye <- addColor(data = df, need = "Dye")
#' # Add all color alternatives.
#' dfAll <- addColor(data = df)
#' # Convert a dye vector to R colors
#' addColor(data = c("R", "G", "Y", "B"), have = "dye", need = "r.color")
addColor <- function(data, kit = NA, have = NA, need = NA, overwrite = FALSE,
ignore.case = FALSE, debug = FALSE) {
if (debug) {
print(paste("IN:", match.call()[[1]]))
}
# Names:
colorSchemes <- toupper(c("Color", "Dye", "R.Color"))
# Definitions:
schemeColor <- c("black", "blue", "green", "yellow", "red", "orange", "purple")
schemeDye <- c("X", "B", "G", "Y", "R", "O", "P")
schemeRColor <- c("black", "blue", "green3", "black", "red", "orange", "purple")
# Numeric values corresponding to color abreviations:
# NB! There are 8 colors that can be represented by a single number character, palette():
# 1="black", 2="red", 3="green3", 4="blue", 5="cyan", 6="magenta", 7="yellow", 8="gray"
if (debug) {
print("data")
print(str(data))
print("kit")
print(kit)
print("have")
print(have)
print("need")
print(need)
print("overwrite")
print(overwrite)
print("ignore.case")
print(ignore.case)
}
# Check if overwrite.
if (overwrite) {
if ("R.COLOR" %in% toupper(names(data))) {
message("Column 'R.Color' will be overwritten!")
data$R.Color <- NULL
}
if ("COLOR" %in% toupper(names(data))) {
message("Column 'Color' will be overwritten!")
data$Color <- NULL
}
if ("DYE" %in% toupper(names(data))) {
message("Column 'Dye' will be overwritten!")
data$Dye <- NULL
}
}
# A vector with factors gives 'FALSE' for is.vector but dim always gives 'NULL'.
if (is.vector(data) | is.null(dim(data))) {
if (debug) {
print("data is vector OR dim is NULL")
}
# Add color if not exist and kit is provided.
if (any(is.na(have)) | any(is.na(need))) {
warning("For vector conversion 'have' and 'need' must be provided!")
} else {
if (toupper(have) == "COLOR") {
if (toupper(need) == "DYE") {
data <- schemeDye[match(toupper(data), toupper(schemeColor))]
}
if (toupper(need) == "R.COLOR") {
data <- schemeRColor[match(toupper(data), toupper(schemeColor))]
}
}
if (toupper(have) == "DYE") {
if (toupper(need) == "COLOR") {
data <- schemeColor[match(toupper(data), toupper(schemeDye))]
}
if (toupper(need) == "R.COLOR") {
data <- schemeRColor[match(toupper(data), toupper(schemeDye))]
}
}
if (toupper(have) == "R.COLOR") {
if (toupper(need) == "COLOR") {
data <- schemeColor[match(toupper(data), toupper(schemeRColor))]
}
if (toupper(need) == "DYE") {
data <- schemeDye[match(toupper(data), toupper(schemeRColor))]
}
}
}
} else if (is.data.frame(data)) {
if (debug) {
print("data is data.frame")
}
# Add color if not exist and kit is provided.
if (is.na(have) & !is.na(kit)) {
# Check if exist.
if (!"COLOR" %in% toupper(names(data))) {
# Get markers and their color.
kitInfo <- getKit(kit, what = "Color")
marker <- kitInfo$Marker
# NB! Color case is not consistent between kits, so use lower case.
mColor <- tolower(kitInfo$Color)
if (debug) {
print("marker")
print(str(marker))
print("mColor")
print(str(mColor))
}
if (ignore.case) {
# Loop over all markers.
for (m in seq(along = marker)) {
# Add new column and colors per marker.
data$Color[toupper(data$Marker) == toupper(marker[m])] <- mColor[m]
}
} else {
# Loop over all markers.
for (m in seq(along = marker)) {
# Add new column and colors per marker.
data$Color[data$Marker == marker[m]] <- mColor[m]
}
}
}
# Add to have.
have <- "Color"
}
# Find existing colors and convert to upper case.
if (is.na(have)) {
have <- toupper(names(data)[toupper(names(data)) %in% colorSchemes])
} else {
have <- toupper(have)
}
# Convert to upper case.
if (!is.na(need[1])) {
need <- toupper(need)
} else {
need <- colorSchemes
}
# Check if supported.
if (!any(need %in% colorSchemes)) {
warning(paste(paste(need, collapse = ","), "not supported!"))
}
count <- 1
repeat{
if ("COLOR" %in% need) {
# Check if exist.
if ("COLOR" %in% toupper(names(data))) {
message("A column 'Color' already exist in data frame!")
} else {
# Convert using Dye.
if ("DYE" %in% have) {
if ("DYE" %in% toupper(names(data))) {
# Convert dye to color.
data$Color <- schemeColor[match(toupper(data$Dye), toupper(schemeDye))]
} else {
warning("Can't find column 'Dye'!\n'Color' was not added!")
}
}
# Convert using R color.
if ("R.COLOR" %in% have) {
if ("R.COLOR" %in% toupper(names(data))) {
# Convert dye to color.
data$Color <- schemeColor[match(toupper(data$R.Color), toupper(schemeRColor))]
} else {
warning("Can't find column 'R.Color'!\n'Color' was not added!")
}
}
}
# Remove from need.
need <- need[need != "COLOR"]
}
if ("DYE" %in% need) {
# Check if exist.
if ("DYE" %in% toupper(names(data))) {
message("A column 'Dye' already exist in data frame!")
} else {
# Convert using Color.
if ("COLOR" %in% have) {
if ("COLOR" %in% toupper(names(data))) {
# Convert color to dye.
data$Dye <- schemeDye[match(toupper(data$Color), toupper(schemeColor))]
} else {
warning("Can't find column 'Color'!\n'Dye' was not added!")
}
}
# Convert using R color.
if ("R.COLOR" %in% have) {
if ("R.COLOR" %in% toupper(names(data))) {
# Convert R color to dye.
data$Dye <- schemeDye[match(toupper(data$R.Color), toupper(schemeRColor))]
} else {
warning("Can't find column 'R.Color'!\n'Dye' was not added!")
}
}
}
# Remove from need.
need <- need[need != "DYE"]
}
if ("R.COLOR" %in% need) {
# Check if exist.
if ("R.COLOR" %in% toupper(names(data))) {
message("A column 'R.Color' already exist in data frame!")
} else {
# Convert using Color.
if ("COLOR" %in% have) {
if ("COLOR" %in% toupper(names(data))) {
# Convert color to R color.
data$R.Color <- schemeRColor[match(toupper(data$Color), toupper(schemeColor))]
} else {
warning("Can't find column 'Color'!\n'R.Color' was not added!")
}
}
# Convert using Dye.
if ("DYE" %in% have) {
if ("DYE" %in% toupper(names(data))) {
# Convert dye to R color.
data$R.Color <- schemeRColor[match(toupper(data$Dye), toupper(schemeDye))]
} else {
warning("Can't find column 'Dye'! \n'R.Color' was not added!")
}
}
}
# Remove from need.
need <- need[need != "R.COLOR"]
}
# Exit loop.
if (length(need) == 0 | count > 1) {
break
}
# Increase loop counter.
count <- count + 1
} # End repeat.
} else {
warning("Unsupported data type!\n No color was added!")
}
# Add attributes to result.
attr(data, which = "kit") <- kit
# Update audit trail.
data <- auditTrail(obj = data, f.call = match.call(), package = "strvalidator")
if (debug) {
print("Return")
print(str(data))
}
return(data)
}
| /R/addColor.r | no_license | OskarHansson/strvalidator | R | false | false | 10,737 | r | ################################################################################
# TODO LIST
# TODO: Re-write to simplify and for efficiency.
################################################################################
# CHANGE LOG (last 20 changes)
# 24.08.2018: Removed unused variables.
# 06.08.2017: Added audit trail.
# 06.08.2017: Fixed warning "if (!is.na(need)):the condition has length > 1".
# 18.09.2016: Fixed attribute saved dataset, and kit now saved correctly.
# 09.01.2016: Added attributes to result.
# 28.08.2015: Added importFrom
# 17.12.2014: Fixed error NA Dye for e.g. Yfiler Plus (added 'toupper' in 'match' calls).
# 11.05.2014: Added 'orange' and 'purple'.
# 27.04.2014: Added option to ignore case in marker names.
# 15.12.2013: Fixed check for 'have' and 'need' when converting vector.
# 27.11.2013: Added option 'overwrite'.
# 04.10.2013: Added some debug information.
# 18.09.2013: Added support for vector conversion.
# 17.09.2013: First version.
#' @title Add Color Information.
#'
#' @description
#' Add color information 'Color', 'Dye' or 'R Color'.
#'
#' @details
#' Primers in forensic STR typing kits are labeled with a fluorescent
#' dye. The dyes are represented with single letters (Dye) in exported result
#' files or with strings (Color) in 'panels' files.
#' For visualization in R the R color names are used (R.Color).
#' The function can add new color schemes matched to the existing, or
#' it can convert a vector containing one scheme to another.
#'
#' @param data data frame or vector.
#' @param kit string representing the forensic STR kit used.
#' Default is NA, in which case 'have' must contain a valid column.
#' @param have character string to specify color column to be matched.
#' Default is NA, in which case color information is derived from 'kit' and added
#' to a column named 'Color'.
#' If 'data' is a vector 'have' must be a single string.
#' @param need character string or string vector to specify color columns to be added.
#' Default is NA, in which case all columns will be added.
#' If 'data' is a vector 'need' must be a single string.
#' @param overwrite logical if TRUE and column exist it will be overwritten.
#' @param ignore.case logical if TRUE case in marker names will be ignored.
#' @param debug logical indicating printing debug information.
#'
#' @return data.frame with additional columns for added colors,
#' or vector with converted values.
#'
#' @export
#'
#' @importFrom utils str
#'
#' @examples
#' # Get marker and colors for SGM Plus.
#' df <- getKit("SGMPlus", what = "Color")
#' # Add dye color.
#' dfDye <- addColor(data = df, need = "Dye")
#' # Add all color alternatives.
#' dfAll <- addColor(data = df)
#' # Convert a dye vector to R colors
#' addColor(data = c("R", "G", "Y", "B"), have = "dye", need = "r.color")
addColor <- function(data, kit = NA, have = NA, need = NA, overwrite = FALSE,
ignore.case = FALSE, debug = FALSE) {
if (debug) {
print(paste("IN:", match.call()[[1]]))
}
# Names:
colorSchemes <- toupper(c("Color", "Dye", "R.Color"))
# Definitions:
schemeColor <- c("black", "blue", "green", "yellow", "red", "orange", "purple")
schemeDye <- c("X", "B", "G", "Y", "R", "O", "P")
schemeRColor <- c("black", "blue", "green3", "black", "red", "orange", "purple")
# Numeric values corresponding to color abreviations:
# NB! There are 8 colors that can be represented by a single number character, palette():
# 1="black", 2="red", 3="green3", 4="blue", 5="cyan", 6="magenta", 7="yellow", 8="gray"
if (debug) {
print("data")
print(str(data))
print("kit")
print(kit)
print("have")
print(have)
print("need")
print(need)
print("overwrite")
print(overwrite)
print("ignore.case")
print(ignore.case)
}
# Check if overwrite.
if (overwrite) {
if ("R.COLOR" %in% toupper(names(data))) {
message("Column 'R.Color' will be overwritten!")
data$R.Color <- NULL
}
if ("COLOR" %in% toupper(names(data))) {
message("Column 'Color' will be overwritten!")
data$Color <- NULL
}
if ("DYE" %in% toupper(names(data))) {
message("Column 'Dye' will be overwritten!")
data$Dye <- NULL
}
}
# A vector with factors gives 'FALSE' for is.vector but dim always gives 'NULL'.
if (is.vector(data) | is.null(dim(data))) {
if (debug) {
print("data is vector OR dim is NULL")
}
# Add color if not exist and kit is provided.
if (any(is.na(have)) | any(is.na(need))) {
warning("For vector conversion 'have' and 'need' must be provided!")
} else {
if (toupper(have) == "COLOR") {
if (toupper(need) == "DYE") {
data <- schemeDye[match(toupper(data), toupper(schemeColor))]
}
if (toupper(need) == "R.COLOR") {
data <- schemeRColor[match(toupper(data), toupper(schemeColor))]
}
}
if (toupper(have) == "DYE") {
if (toupper(need) == "COLOR") {
data <- schemeColor[match(toupper(data), toupper(schemeDye))]
}
if (toupper(need) == "R.COLOR") {
data <- schemeRColor[match(toupper(data), toupper(schemeDye))]
}
}
if (toupper(have) == "R.COLOR") {
if (toupper(need) == "COLOR") {
data <- schemeColor[match(toupper(data), toupper(schemeRColor))]
}
if (toupper(need) == "DYE") {
data <- schemeDye[match(toupper(data), toupper(schemeRColor))]
}
}
}
} else if (is.data.frame(data)) {
if (debug) {
print("data is data.frame")
}
# Add color if not exist and kit is provided.
if (is.na(have) & !is.na(kit)) {
# Check if exist.
if (!"COLOR" %in% toupper(names(data))) {
# Get markers and their color.
kitInfo <- getKit(kit, what = "Color")
marker <- kitInfo$Marker
# NB! Color case is not consistent between kits, so use lower case.
mColor <- tolower(kitInfo$Color)
if (debug) {
print("marker")
print(str(marker))
print("mColor")
print(str(mColor))
}
if (ignore.case) {
# Loop over all markers.
for (m in seq(along = marker)) {
# Add new column and colors per marker.
data$Color[toupper(data$Marker) == toupper(marker[m])] <- mColor[m]
}
} else {
# Loop over all markers.
for (m in seq(along = marker)) {
# Add new column and colors per marker.
data$Color[data$Marker == marker[m]] <- mColor[m]
}
}
}
# Add to have.
have <- "Color"
}
# Find existing colors and convert to upper case.
if (is.na(have)) {
have <- toupper(names(data)[toupper(names(data)) %in% colorSchemes])
} else {
have <- toupper(have)
}
# Convert to upper case.
if (!is.na(need[1])) {
need <- toupper(need)
} else {
need <- colorSchemes
}
# Check if supported.
if (!any(need %in% colorSchemes)) {
warning(paste(paste(need, collapse = ","), "not supported!"))
}
count <- 1
repeat{
if ("COLOR" %in% need) {
# Check if exist.
if ("COLOR" %in% toupper(names(data))) {
message("A column 'Color' already exist in data frame!")
} else {
# Convert using Dye.
if ("DYE" %in% have) {
if ("DYE" %in% toupper(names(data))) {
# Convert dye to color.
data$Color <- schemeColor[match(toupper(data$Dye), toupper(schemeDye))]
} else {
warning("Can't find column 'Dye'!\n'Color' was not added!")
}
}
# Convert using R color.
if ("R.COLOR" %in% have) {
if ("R.COLOR" %in% toupper(names(data))) {
# Convert dye to color.
data$Color <- schemeColor[match(toupper(data$R.Color), toupper(schemeRColor))]
} else {
warning("Can't find column 'R.Color'!\n'Color' was not added!")
}
}
}
# Remove from need.
need <- need[need != "COLOR"]
}
if ("DYE" %in% need) {
# Check if exist.
if ("DYE" %in% toupper(names(data))) {
message("A column 'Dye' already exist in data frame!")
} else {
# Convert using Color.
if ("COLOR" %in% have) {
if ("COLOR" %in% toupper(names(data))) {
# Convert color to dye.
data$Dye <- schemeDye[match(toupper(data$Color), toupper(schemeColor))]
} else {
warning("Can't find column 'Color'!\n'Dye' was not added!")
}
}
# Convert using R color.
if ("R.COLOR" %in% have) {
if ("R.COLOR" %in% toupper(names(data))) {
# Convert R color to dye.
data$Dye <- schemeDye[match(toupper(data$R.Color), toupper(schemeRColor))]
} else {
warning("Can't find column 'R.Color'!\n'Dye' was not added!")
}
}
}
# Remove from need.
need <- need[need != "DYE"]
}
if ("R.COLOR" %in% need) {
# Check if exist.
if ("R.COLOR" %in% toupper(names(data))) {
message("A column 'R.Color' already exist in data frame!")
} else {
# Convert using Color.
if ("COLOR" %in% have) {
if ("COLOR" %in% toupper(names(data))) {
# Convert color to R color.
data$R.Color <- schemeRColor[match(toupper(data$Color), toupper(schemeColor))]
} else {
warning("Can't find column 'Color'!\n'R.Color' was not added!")
}
}
# Convert using Dye.
if ("DYE" %in% have) {
if ("DYE" %in% toupper(names(data))) {
# Convert dye to R color.
data$R.Color <- schemeRColor[match(toupper(data$Dye), toupper(schemeDye))]
} else {
warning("Can't find column 'Dye'! \n'R.Color' was not added!")
}
}
}
# Remove from need.
need <- need[need != "R.COLOR"]
}
# Exit loop.
if (length(need) == 0 | count > 1) {
break
}
# Increase loop counter.
count <- count + 1
} # End repeat.
} else {
warning("Unsupported data type!\n No color was added!")
}
# Add attributes to result.
attr(data, which = "kit") <- kit
# Update audit trail.
data <- auditTrail(obj = data, f.call = match.call(), package = "strvalidator")
if (debug) {
print("Return")
print(str(data))
}
return(data)
}
|
#' @include internal.R
NULL
#' Coerce object to another object
#'
#' Coerce an object.
#'
#' @param x Object.
#'
#' @param ... unused arguments.
#'
#' @return An object.
#'
#' @name as
NULL
#' @rdname as
#' @method as.list Parameters
#' @export
as.list.Parameters <- function(x, ...)
structure(lapply(x$ids(), function(i) x[[i]]$value),
.Names = x$names(), id = x$ids())
#' @rdname as
#' @method as.list Zones
#' @export
as.list.Zones <- function(x, ...) {
attributes(x) <- NULL
class(x) <- "list"
x
}
#' Is it?
#'
#' Test if an object inherits from a class.
#'
#' @param x Object.
#'
#' @return `logical` indicating if it inherits from the class.
#'
#' @name is
NULL
| /R/misc.R | no_license | bbest/prioritizr | R | false | false | 692 | r | #' @include internal.R
NULL
#' Coerce object to another object
#'
#' Coerce an object.
#'
#' @param x Object.
#'
#' @param ... unused arguments.
#'
#' @return An object.
#'
#' @name as
NULL
#' @rdname as
#' @method as.list Parameters
#' @export
as.list.Parameters <- function(x, ...)
structure(lapply(x$ids(), function(i) x[[i]]$value),
.Names = x$names(), id = x$ids())
#' @rdname as
#' @method as.list Zones
#' @export
as.list.Zones <- function(x, ...) {
attributes(x) <- NULL
class(x) <- "list"
x
}
#' Is it?
#'
#' Test if an object inherits from a class.
#'
#' @param x Object.
#'
#' @return `logical` indicating if it inherits from the class.
#'
#' @name is
NULL
|
library(timereg)
### Name: two.stage
### Title: Fit Clayton-Oakes-Glidden Two-Stage model
### Aliases: two.stage
### Keywords: survival
### ** Examples
library(timereg)
data(diabetes)
# Marginal Cox model with treat as covariate
marg <- cox.aalen(Surv(time,status)~prop(treat)+prop(adult)+
cluster(id),data=diabetes,resample.iid=1)
fit<-two.stage(marg,data=diabetes,theta=1.0,Nit=40)
summary(fit)
# using coxph and giving clusters, but SE wittout cox uncetainty
margph <- coxph(Surv(time,status)~treat,data=diabetes)
fit<-two.stage(margph,data=diabetes,theta=1.0,Nit=40,clusters=diabetes$id)
# Stratification after adult
theta.des<-model.matrix(~-1+factor(adult),diabetes);
des.t<-model.matrix(~-1+factor(treat),diabetes);
design.treat<-cbind(des.t[,-1]*(diabetes$adult==1),
des.t[,-1]*(diabetes$adult==2))
# test for common baselines included here
marg1<-cox.aalen(Surv(time,status)~-1+factor(adult)+prop(design.treat)+cluster(id),
data=diabetes,resample.iid=1,Nit=50)
fit.s<-two.stage(marg1,data=diabetes,Nit=40,theta=1,theta.des=theta.des)
summary(fit.s)
# with common baselines and common treatment effect (although test reject this)
fit.s2<-two.stage(marg,data=diabetes,Nit=40,theta=1,theta.des=theta.des)
summary(fit.s2)
# test for same variance among the two strata
theta.des<-model.matrix(~factor(adult),diabetes);
fit.s3<-two.stage(marg,data=diabetes,Nit=40,theta=1,theta.des=theta.des)
summary(fit.s3)
# to fit model without covariates, use beta.fixed=1 and prop or aalen function
marg <- aalen(Surv(time,status)~+1+cluster(id),
data=diabetes,resample.iid=1,n.sim=0)
fita<-two.stage(marg,data=diabetes,theta=0.95,detail=0)
summary(fita)
# same model but se's without variation from marginal model to speed up computations
marg <- aalen(Surv(time,status) ~+1+cluster(id),data=diabetes,
resample.iid=0,n.sim=0)
fit<-two.stage(marg,data=diabetes,theta=0.95,detail=0)
summary(fit)
# same model but se's now with fewer time-points for approx of iid decomp of marginal
# model to speed up computations
marg <- cox.aalen(Surv(time,status) ~+prop(treat)+cluster(id),data=diabetes,
resample.iid=1,n.sim=0,max.timepoint.sim=5,beta.fixed=1,beta=0)
fit<-two.stage(marg,data=diabetes,theta=0.95,detail=0)
summary(fit)
| /data/genthat_extracted_code/timereg/examples/two.stage.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 2,279 | r | library(timereg)
### Name: two.stage
### Title: Fit Clayton-Oakes-Glidden Two-Stage model
### Aliases: two.stage
### Keywords: survival
### ** Examples
library(timereg)
data(diabetes)
# Marginal Cox model with treat as covariate
marg <- cox.aalen(Surv(time,status)~prop(treat)+prop(adult)+
cluster(id),data=diabetes,resample.iid=1)
fit<-two.stage(marg,data=diabetes,theta=1.0,Nit=40)
summary(fit)
# using coxph and giving clusters, but SE wittout cox uncetainty
margph <- coxph(Surv(time,status)~treat,data=diabetes)
fit<-two.stage(margph,data=diabetes,theta=1.0,Nit=40,clusters=diabetes$id)
# Stratification after adult
theta.des<-model.matrix(~-1+factor(adult),diabetes);
des.t<-model.matrix(~-1+factor(treat),diabetes);
design.treat<-cbind(des.t[,-1]*(diabetes$adult==1),
des.t[,-1]*(diabetes$adult==2))
# test for common baselines included here
marg1<-cox.aalen(Surv(time,status)~-1+factor(adult)+prop(design.treat)+cluster(id),
data=diabetes,resample.iid=1,Nit=50)
fit.s<-two.stage(marg1,data=diabetes,Nit=40,theta=1,theta.des=theta.des)
summary(fit.s)
# with common baselines and common treatment effect (although test reject this)
fit.s2<-two.stage(marg,data=diabetes,Nit=40,theta=1,theta.des=theta.des)
summary(fit.s2)
# test for same variance among the two strata
theta.des<-model.matrix(~factor(adult),diabetes);
fit.s3<-two.stage(marg,data=diabetes,Nit=40,theta=1,theta.des=theta.des)
summary(fit.s3)
# to fit model without covariates, use beta.fixed=1 and prop or aalen function
marg <- aalen(Surv(time,status)~+1+cluster(id),
data=diabetes,resample.iid=1,n.sim=0)
fita<-two.stage(marg,data=diabetes,theta=0.95,detail=0)
summary(fita)
# same model but se's without variation from marginal model to speed up computations
marg <- aalen(Surv(time,status) ~+1+cluster(id),data=diabetes,
resample.iid=0,n.sim=0)
fit<-two.stage(marg,data=diabetes,theta=0.95,detail=0)
summary(fit)
# same model but se's now with fewer time-points for approx of iid decomp of marginal
# model to speed up computations
marg <- cox.aalen(Surv(time,status) ~+prop(treat)+cluster(id),data=diabetes,
resample.iid=1,n.sim=0,max.timepoint.sim=5,beta.fixed=1,beta=0)
fit<-two.stage(marg,data=diabetes,theta=0.95,detail=0)
summary(fit)
|
# Automatically generated from all.nw using noweb
predict.coxph <- function(object, newdata,
type=c("lp", "risk", "expected", "terms"),
se.fit=FALSE, na.action=na.pass,
terms=names(object$assign), collapse,
reference=c("strata", "sample"), ...) {
if (!inherits(object, 'coxph'))
stop("Primary argument much be a coxph object")
Call <- match.call()
type <-match.arg(type)
n <- object$n
Terms <- object$terms
if (!missing(terms)) {
if (is.numeric(terms)) {
if (any(terms != floor(terms) |
terms > length(object$assign) |
terms <1)) stop("Invalid terms argument")
}
else if (any(is.na(match(terms, names(object$assign)))))
stop("a name given in the terms argument not found in the model")
}
# I will never need the cluster argument, if present delete it.
# Terms2 are terms I need for the newdata (if present), y is only
# needed there if type == 'expected'
if (length(attr(Terms, 'specials')$cluster)) {
temp <- untangle.specials(Terms, 'cluster', 1)
Terms <- object$terms[-temp$terms]
}
else Terms <- object$terms
if (type != 'expected') Terms2 <- delete.response(Terms)
else Terms2 <- Terms
has.strata <- !is.null(attr(Terms, 'specials')$strata)
has.offset <- !is.null(attr(Terms, 'offset'))
has.weights <- any(names(object$call) == 'weights')
na.action.used <- object$na.action
n <- length(object$residuals)
if (missing(reference) && type=="terms") reference <- "sample"
else reference <- match.arg(reference)
have.mf <- FALSE
if (type == 'expected') {
y <- object[['y']]
if (is.null(y)) { # very rare case
mf <- model.frame(object)
y <- model.extract(mf, 'response')
have.mf <- TRUE #for the logic a few lines below, avoid double work
}
}
if (se.fit || type=='terms' || (!missing(newdata) && type=="expected") ||
(has.strata && (reference=="strata") || type=="expected")) {
use.x <- TRUE
if (is.null(object[['x']]) || has.weights || has.offset ||
(has.strata && is.null(object$strata))) {
# I need the original model frame
if (!have.mf) mf <- model.frame(object)
if (nrow(mf) != n)
stop("Data is not the same size as it was in the original fit")
x <- model.matrix(object, data=mf)
if (has.strata) {
if (!is.null(object$strata)) oldstrat <- object$strata
else {
stemp <- untangle.specials(Terms, 'strata')
if (length(stemp$vars)==1) oldstrat <- mf[[stemp$vars]]
else oldstrat <- strata(mf[,stemp$vars], shortlabel=TRUE)
}
}
else oldstrat <- rep(0L, n)
weights <- model.weights(mf)
if (is.null(weights)) weights <- rep(1.0, n)
offset <- model.offset(mf)
if (is.null(offset)) offset <- 0
}
else {
x <- object[['x']]
if (has.strata) oldstrat <- object$strata
else oldstrat <- rep(0L, n)
weights <- rep(1.,n)
offset <- 0
}
}
else {
# I won't need strata in this case either
if (has.strata) {
stemp <- untangle.specials(Terms, 'strata', 1)
Terms2 <- Terms2[-stemp$terms]
has.strata <- FALSE #remaining routine never needs to look
}
oldstrat <- rep(0L, n)
offset <- 0
use.x <- FALSE
}
if (!missing(newdata)) {
use.x <- TRUE #we do use an X matrix later
tcall <- Call[c(1, match(c("newdata", "collapse"), names(Call), nomatch=0))]
names(tcall)[2] <- 'data' #rename newdata to data
tcall$formula <- Terms2 #version with no response
tcall$na.action <- na.action #always present, since there is a default
tcall[[1]] <- as.name('model.frame') # change the function called
if (!is.null(attr(Terms, "specials")$strata) && !has.strata) {
temp.lev <- object$xlevels
temp.lev[[stemp$vars]] <- NULL
tcall$xlev <- temp.lev
}
else tcall$xlev <- object$xlevels
mf2 <- eval(tcall, parent.frame())
collapse <- model.extract(mf2, "collapse")
n2 <- nrow(mf2)
if (has.strata) {
if (length(stemp$vars)==1) newstrat <- mf2[[stemp$vars]]
else newstrat <- strata(mf2[,stemp$vars], shortlabel=TRUE)
if (any(is.na(match(newstrat, oldstrat))))
stop("New data has a strata not found in the original model")
else newstrat <- factor(newstrat, levels=levels(oldstrat)) #give it all
if (length(stemp$terms))
newx <- model.matrix(Terms2[-stemp$terms], mf2,
contr=object$contrasts)[,-1,drop=FALSE]
else newx <- model.matrix(Terms2, mf2,
contr=object$contrasts)[,-1,drop=FALSE]
}
else {
newx <- model.matrix(Terms2, mf2,
contr=object$contrasts)[,-1,drop=FALSE]
newstrat <- rep(0L, nrow(mf2))
}
newoffset <- model.offset(mf2)
if (is.null(newoffset)) newoffset <- 0
if (type== 'expected') {
newy <- model.response(mf2)
if (attr(newy, 'type') != attr(y, 'type'))
stop("New data has a different survival type than the model")
}
na.action.used <- attr(mf2, 'na.action')
}
else n2 <- n
if (type=="expected") {
if (missing(newdata))
pred <- y[,ncol(y)] - object$residuals
if (!missing(newdata) || se.fit) {
ustrata <- unique(oldstrat)
risk <- exp(object$linear.predictors)
x <- x - rep(object$means, each=nrow(x)) #subtract from each column
if (missing(newdata)) #se.fit must be true
se <- double(n)
else {
pred <- se <- double(nrow(mf2))
newx <- newx - rep(object$means, each=nrow(newx))
newrisk <- c(exp(newx %*% object$coef))
}
survtype<- ifelse(object$method=='efron', 3,2)
for (i in ustrata) {
indx <- which(oldstrat == i)
afit <- agsurv(y[indx,,drop=F], x[indx,,drop=F],
weights[indx], risk[indx],
survtype, survtype)
afit.n <- length(afit$time)
if (missing(newdata)) {
# In this case we need se.fit, nothing else
j1 <- approx(afit$time, 1:afit.n, y[indx,1], method='constant',
f=0, yleft=0, yright=afit.n)$y
chaz <- c(0, afit$cumhaz)[j1 +1]
varh <- c(0, cumsum(afit$varhaz))[j1 +1]
xbar <- rbind(0, afit$xbar)[j1+1,,drop=F]
if (ncol(y)==2) {
dt <- (chaz * x[indx,]) - xbar
se[indx] <- sqrt(varh + rowSums((dt %*% object$var) *dt)) *
risk[indx]
}
else {
j2 <- approx(afit$time, 1:afit.n, y[indx,2], method='constant',
f=0, yleft=0, yright=afit.n)$y
chaz2 <- c(0, afit$cumhaz)[j2 +1]
varh2 <- c(0, cumsum(afit$varhaz))[j2 +1]
xbar2 <- rbind(0, afit$xbar)[j2+1,,drop=F]
dt <- (chaz * x[indx,]) - xbar
v1 <- varh + rowSums((dt %*% object$var) *dt)
dt2 <- (chaz2 * x[indx,]) - xbar2
v2 <- varh2 + rowSums((dt2 %*% object$var) *dt2)
se[indx] <- sqrt(v2-v1)* risk[indx]
}
}
else {
#there is new data
use.x <- TRUE
indx2 <- which(newstrat == i)
j1 <- approx(afit$time, 1:afit.n, newy[indx2,1],
method='constant', f=0, yleft=0, yright=afit.n)$y
chaz <-c(0, afit$cumhaz)[j1+1]
pred[indx2] <- chaz * newrisk[indx2]
if (se.fit) {
varh <- c(0, cumsum(afit$varhaz))[j1+1]
xbar <- rbind(0, afit$xbar)[j1+1,,drop=F]
}
if (ncol(y)==2) {
if (se.fit) {
dt <- (chaz * newx[indx2,]) - xbar
se[indx2] <- sqrt(varh + rowSums((dt %*% object$var) *dt)) *
newrisk[indx2]
}
}
else {
j2 <- approx(afit$time, 1:afit.n, newy[indx2,2],
method='constant', f=0, yleft=0, yright=afit.n)$y
chaz2 <- approx(-afit$time, afit$cumhaz, -newy[indx2,2],
method="constant", rule=2, f=0)$y
chaz2 <-c(0, afit$cumhaz)[j2+1]
pred[indx2] <- (chaz2 - chaz) * newrisk[indx2]
if (se.fit) {
varh2 <- c(0, cumsum(afit$varhaz))[j1+1]
xbar2 <- rbind(0, afit$xbar)[j1+1,,drop=F]
dt <- (chaz * newx[indx2,]) - xbar
dt2 <- (chaz2 * newx[indx2,]) - xbar2
v2 <- varh2 + rowSums((dt2 %*% object$var) *dt2)
v1 <- varh + rowSums((dt %*% object$var) *dt)
se[indx2] <- sqrt(v2-v1)* risk[indx2]
}
}
}
}
}
}
else {
if (is.null(object$coefficients))
coef<-numeric(0)
else {
# Replace any NA coefs with 0, to stop NA in the linear predictor
coef <- ifelse(is.na(object$coefficients), 0, object$coefficients)
}
if (missing(newdata)) {
offset <- offset - mean(offset)
if (has.strata && reference=="strata") {
# We can't use as.integer(oldstrat) as an index, if oldstrat is
# a factor variable with unrepresented levels as.integer could
# give 1,2,5 for instance.
xmeans <- rowsum(x*weights, oldstrat)/c(rowsum(weights, oldstrat))
newx <- x - xmeans[match(oldstrat,row.names(xmeans)),]
}
else if (use.x) newx <- x - rep(object$means, each=nrow(x))
}
else {
offset <- newoffset - mean(offset)
if (has.strata && reference=="strata") {
xmeans <- rowsum(x*weights, oldstrat)/c(rowsum(weights, oldstrat))
newx <- newx - xmeans[match(newstrat, row.names(xmeans)),]
}
else newx <- newx - rep(object$means, each=nrow(newx))
}
if (type=='lp' || type=='risk') {
if (use.x) pred <- drop(newx %*% coef) + offset
else pred <- object$linear.predictors
if (se.fit) se <- sqrt(rowSums((newx %*% object$var) *newx))
if (type=='risk') {
pred <- exp(pred)
if (se.fit) se <- se * sqrt(pred) # standard Taylor series approx
}
}
else if (type=='terms') {
asgn <- object$assign
nterms<-length(asgn)
pred<-matrix(ncol=nterms,nrow=NROW(newx))
dimnames(pred) <- list(rownames(newx), names(asgn))
if (se.fit) se <- pred
for (i in 1:nterms) {
tt <- asgn[[i]]
tt <- tt[!is.na(object$coefficients[tt])]
xtt <- newx[,tt, drop=F]
pred[,i] <- xtt %*% object$coefficient[tt]
if (se.fit)
se[,i] <- sqrt(rowSums((xtt %*% object$var[tt,tt]) *xtt))
}
pred <- pred[,terms, drop=F]
if (se.fit) se <- se[,terms, drop=F]
attr(pred, 'constant') <- sum(object$coefficients*object$means, na.rm=T)
}
}
if (type != 'terms') {
pred <- drop(pred)
if (se.fit) se <- drop(se)
}
if (!is.null(na.action.used)) {
pred <- napredict(na.action.used, pred)
if (is.matrix(pred)) n <- nrow(pred)
else n <- length(pred)
if(se.fit) se <- napredict(na.action.used, se)
}
if (!missing(collapse) && !is.null(collapse)) {
if (length(collapse) != n2) stop("Collapse vector is the wrong length")
pred <- rowsum(pred, collapse) # in R, rowsum is a matrix, always
if (se.fit) se <- sqrt(rowsum(se^2, collapse))
if (type != 'terms') {
pred <- drop(pred)
if (se.fit) se <- drop(se)
}
}
if (se.fit) list(fit=pred, se.fit=se)
else pred
}
| /survival/R/predict.coxph.R | no_license | fuentesdt/viewsurvivalsource | R | false | false | 13,579 | r | # Automatically generated from all.nw using noweb
predict.coxph <- function(object, newdata,
type=c("lp", "risk", "expected", "terms"),
se.fit=FALSE, na.action=na.pass,
terms=names(object$assign), collapse,
reference=c("strata", "sample"), ...) {
if (!inherits(object, 'coxph'))
stop("Primary argument much be a coxph object")
Call <- match.call()
type <-match.arg(type)
n <- object$n
Terms <- object$terms
if (!missing(terms)) {
if (is.numeric(terms)) {
if (any(terms != floor(terms) |
terms > length(object$assign) |
terms <1)) stop("Invalid terms argument")
}
else if (any(is.na(match(terms, names(object$assign)))))
stop("a name given in the terms argument not found in the model")
}
# I will never need the cluster argument, if present delete it.
# Terms2 are terms I need for the newdata (if present), y is only
# needed there if type == 'expected'
if (length(attr(Terms, 'specials')$cluster)) {
temp <- untangle.specials(Terms, 'cluster', 1)
Terms <- object$terms[-temp$terms]
}
else Terms <- object$terms
if (type != 'expected') Terms2 <- delete.response(Terms)
else Terms2 <- Terms
has.strata <- !is.null(attr(Terms, 'specials')$strata)
has.offset <- !is.null(attr(Terms, 'offset'))
has.weights <- any(names(object$call) == 'weights')
na.action.used <- object$na.action
n <- length(object$residuals)
if (missing(reference) && type=="terms") reference <- "sample"
else reference <- match.arg(reference)
have.mf <- FALSE
if (type == 'expected') {
y <- object[['y']]
if (is.null(y)) { # very rare case
mf <- model.frame(object)
y <- model.extract(mf, 'response')
have.mf <- TRUE #for the logic a few lines below, avoid double work
}
}
if (se.fit || type=='terms' || (!missing(newdata) && type=="expected") ||
(has.strata && (reference=="strata") || type=="expected")) {
use.x <- TRUE
if (is.null(object[['x']]) || has.weights || has.offset ||
(has.strata && is.null(object$strata))) {
# I need the original model frame
if (!have.mf) mf <- model.frame(object)
if (nrow(mf) != n)
stop("Data is not the same size as it was in the original fit")
x <- model.matrix(object, data=mf)
if (has.strata) {
if (!is.null(object$strata)) oldstrat <- object$strata
else {
stemp <- untangle.specials(Terms, 'strata')
if (length(stemp$vars)==1) oldstrat <- mf[[stemp$vars]]
else oldstrat <- strata(mf[,stemp$vars], shortlabel=TRUE)
}
}
else oldstrat <- rep(0L, n)
weights <- model.weights(mf)
if (is.null(weights)) weights <- rep(1.0, n)
offset <- model.offset(mf)
if (is.null(offset)) offset <- 0
}
else {
x <- object[['x']]
if (has.strata) oldstrat <- object$strata
else oldstrat <- rep(0L, n)
weights <- rep(1.,n)
offset <- 0
}
}
else {
# I won't need strata in this case either
if (has.strata) {
stemp <- untangle.specials(Terms, 'strata', 1)
Terms2 <- Terms2[-stemp$terms]
has.strata <- FALSE #remaining routine never needs to look
}
oldstrat <- rep(0L, n)
offset <- 0
use.x <- FALSE
}
if (!missing(newdata)) {
use.x <- TRUE #we do use an X matrix later
tcall <- Call[c(1, match(c("newdata", "collapse"), names(Call), nomatch=0))]
names(tcall)[2] <- 'data' #rename newdata to data
tcall$formula <- Terms2 #version with no response
tcall$na.action <- na.action #always present, since there is a default
tcall[[1]] <- as.name('model.frame') # change the function called
if (!is.null(attr(Terms, "specials")$strata) && !has.strata) {
temp.lev <- object$xlevels
temp.lev[[stemp$vars]] <- NULL
tcall$xlev <- temp.lev
}
else tcall$xlev <- object$xlevels
mf2 <- eval(tcall, parent.frame())
collapse <- model.extract(mf2, "collapse")
n2 <- nrow(mf2)
if (has.strata) {
if (length(stemp$vars)==1) newstrat <- mf2[[stemp$vars]]
else newstrat <- strata(mf2[,stemp$vars], shortlabel=TRUE)
if (any(is.na(match(newstrat, oldstrat))))
stop("New data has a strata not found in the original model")
else newstrat <- factor(newstrat, levels=levels(oldstrat)) #give it all
if (length(stemp$terms))
newx <- model.matrix(Terms2[-stemp$terms], mf2,
contr=object$contrasts)[,-1,drop=FALSE]
else newx <- model.matrix(Terms2, mf2,
contr=object$contrasts)[,-1,drop=FALSE]
}
else {
newx <- model.matrix(Terms2, mf2,
contr=object$contrasts)[,-1,drop=FALSE]
newstrat <- rep(0L, nrow(mf2))
}
newoffset <- model.offset(mf2)
if (is.null(newoffset)) newoffset <- 0
if (type== 'expected') {
newy <- model.response(mf2)
if (attr(newy, 'type') != attr(y, 'type'))
stop("New data has a different survival type than the model")
}
na.action.used <- attr(mf2, 'na.action')
}
else n2 <- n
if (type=="expected") {
if (missing(newdata))
pred <- y[,ncol(y)] - object$residuals
if (!missing(newdata) || se.fit) {
ustrata <- unique(oldstrat)
risk <- exp(object$linear.predictors)
x <- x - rep(object$means, each=nrow(x)) #subtract from each column
if (missing(newdata)) #se.fit must be true
se <- double(n)
else {
pred <- se <- double(nrow(mf2))
newx <- newx - rep(object$means, each=nrow(newx))
newrisk <- c(exp(newx %*% object$coef))
}
survtype<- ifelse(object$method=='efron', 3,2)
for (i in ustrata) {
indx <- which(oldstrat == i)
afit <- agsurv(y[indx,,drop=F], x[indx,,drop=F],
weights[indx], risk[indx],
survtype, survtype)
afit.n <- length(afit$time)
if (missing(newdata)) {
# In this case we need se.fit, nothing else
j1 <- approx(afit$time, 1:afit.n, y[indx,1], method='constant',
f=0, yleft=0, yright=afit.n)$y
chaz <- c(0, afit$cumhaz)[j1 +1]
varh <- c(0, cumsum(afit$varhaz))[j1 +1]
xbar <- rbind(0, afit$xbar)[j1+1,,drop=F]
if (ncol(y)==2) {
dt <- (chaz * x[indx,]) - xbar
se[indx] <- sqrt(varh + rowSums((dt %*% object$var) *dt)) *
risk[indx]
}
else {
j2 <- approx(afit$time, 1:afit.n, y[indx,2], method='constant',
f=0, yleft=0, yright=afit.n)$y
chaz2 <- c(0, afit$cumhaz)[j2 +1]
varh2 <- c(0, cumsum(afit$varhaz))[j2 +1]
xbar2 <- rbind(0, afit$xbar)[j2+1,,drop=F]
dt <- (chaz * x[indx,]) - xbar
v1 <- varh + rowSums((dt %*% object$var) *dt)
dt2 <- (chaz2 * x[indx,]) - xbar2
v2 <- varh2 + rowSums((dt2 %*% object$var) *dt2)
se[indx] <- sqrt(v2-v1)* risk[indx]
}
}
else {
#there is new data
use.x <- TRUE
indx2 <- which(newstrat == i)
j1 <- approx(afit$time, 1:afit.n, newy[indx2,1],
method='constant', f=0, yleft=0, yright=afit.n)$y
chaz <-c(0, afit$cumhaz)[j1+1]
pred[indx2] <- chaz * newrisk[indx2]
if (se.fit) {
varh <- c(0, cumsum(afit$varhaz))[j1+1]
xbar <- rbind(0, afit$xbar)[j1+1,,drop=F]
}
if (ncol(y)==2) {
if (se.fit) {
dt <- (chaz * newx[indx2,]) - xbar
se[indx2] <- sqrt(varh + rowSums((dt %*% object$var) *dt)) *
newrisk[indx2]
}
}
else {
j2 <- approx(afit$time, 1:afit.n, newy[indx2,2],
method='constant', f=0, yleft=0, yright=afit.n)$y
chaz2 <- approx(-afit$time, afit$cumhaz, -newy[indx2,2],
method="constant", rule=2, f=0)$y
chaz2 <-c(0, afit$cumhaz)[j2+1]
pred[indx2] <- (chaz2 - chaz) * newrisk[indx2]
if (se.fit) {
varh2 <- c(0, cumsum(afit$varhaz))[j1+1]
xbar2 <- rbind(0, afit$xbar)[j1+1,,drop=F]
dt <- (chaz * newx[indx2,]) - xbar
dt2 <- (chaz2 * newx[indx2,]) - xbar2
v2 <- varh2 + rowSums((dt2 %*% object$var) *dt2)
v1 <- varh + rowSums((dt %*% object$var) *dt)
se[indx2] <- sqrt(v2-v1)* risk[indx2]
}
}
}
}
}
}
else {
if (is.null(object$coefficients))
coef<-numeric(0)
else {
# Replace any NA coefs with 0, to stop NA in the linear predictor
coef <- ifelse(is.na(object$coefficients), 0, object$coefficients)
}
if (missing(newdata)) {
offset <- offset - mean(offset)
if (has.strata && reference=="strata") {
# We can't use as.integer(oldstrat) as an index, if oldstrat is
# a factor variable with unrepresented levels as.integer could
# give 1,2,5 for instance.
xmeans <- rowsum(x*weights, oldstrat)/c(rowsum(weights, oldstrat))
newx <- x - xmeans[match(oldstrat,row.names(xmeans)),]
}
else if (use.x) newx <- x - rep(object$means, each=nrow(x))
}
else {
offset <- newoffset - mean(offset)
if (has.strata && reference=="strata") {
xmeans <- rowsum(x*weights, oldstrat)/c(rowsum(weights, oldstrat))
newx <- newx - xmeans[match(newstrat, row.names(xmeans)),]
}
else newx <- newx - rep(object$means, each=nrow(newx))
}
if (type=='lp' || type=='risk') {
if (use.x) pred <- drop(newx %*% coef) + offset
else pred <- object$linear.predictors
if (se.fit) se <- sqrt(rowSums((newx %*% object$var) *newx))
if (type=='risk') {
pred <- exp(pred)
if (se.fit) se <- se * sqrt(pred) # standard Taylor series approx
}
}
else if (type=='terms') {
asgn <- object$assign
nterms<-length(asgn)
pred<-matrix(ncol=nterms,nrow=NROW(newx))
dimnames(pred) <- list(rownames(newx), names(asgn))
if (se.fit) se <- pred
for (i in 1:nterms) {
tt <- asgn[[i]]
tt <- tt[!is.na(object$coefficients[tt])]
xtt <- newx[,tt, drop=F]
pred[,i] <- xtt %*% object$coefficient[tt]
if (se.fit)
se[,i] <- sqrt(rowSums((xtt %*% object$var[tt,tt]) *xtt))
}
pred <- pred[,terms, drop=F]
if (se.fit) se <- se[,terms, drop=F]
attr(pred, 'constant') <- sum(object$coefficients*object$means, na.rm=T)
}
}
if (type != 'terms') {
pred <- drop(pred)
if (se.fit) se <- drop(se)
}
if (!is.null(na.action.used)) {
pred <- napredict(na.action.used, pred)
if (is.matrix(pred)) n <- nrow(pred)
else n <- length(pred)
if(se.fit) se <- napredict(na.action.used, se)
}
if (!missing(collapse) && !is.null(collapse)) {
if (length(collapse) != n2) stop("Collapse vector is the wrong length")
pred <- rowsum(pred, collapse) # in R, rowsum is a matrix, always
if (se.fit) se <- sqrt(rowsum(se^2, collapse))
if (type != 'terms') {
pred <- drop(pred)
if (se.fit) se <- drop(se)
}
}
if (se.fit) list(fit=pred, se.fit=se)
else pred
}
|
library(tidyverse)
library(dplyr)
library(ggplot2)
library(lubridate)
setwd('~/Dropbox/chlorophyll_processing/data/')
chla_master_df <- read_csv('manual_chlorophyll_2014_2021')
snp <- filter(chla_master_df, Reservoir == "SNP")
#################################################################
########################## Plotting #############################
#################################################################
# Facetted plot of the datapoints within each reservoir over the entire dataset
ggplot(subset(chla_master_df), aes(x = DateTime, y = Chla_ugL, col = Reservoir)) +
geom_point(size = 1) +
facet_grid(Reservoir ~., scales = "free_y") +
ggtitle("Entire Dataset Timeseries")
# Altering dataset in order to plot stats
chla_long_year <- chla_master_df %>%
ungroup(.) %>%
select(-(Flag_Chla)) %>%
gather(metric, value, Chla_ugL) %>%
mutate(year = year(DateTime)) %>%
mutate(month = month(DateTime))
# Facetted plot of the range of each reservoir for each year and the mean of the range
ggplot(subset(chla_long_year), aes(x = year, y = value, col = Reservoir))+
geom_point(size = 1) +
stat_summary( fun.y = "mean", geom = "point", pch = 21, size = 3, fill = 'black') +
facet_grid(metric ~ Reservoir, scales = 'free_y') +
scale_x_continuous("DateTime", breaks = seq(2014, 2021, 1)) +
scale_y_continuous("Concentration (ugL)") +
theme(axis.text.x = element_text(angle = 45, hjust = 1), legend.position = 'none') +
ggtitle("Range and Mean of Range of Each Reservoir Over Time")
# Facetted plot of the range of each reservoir for each year and the median of the range
ggplot(subset(chla_long_year, Site == 50), aes(x = year, y = value, col = Reservoir))+
geom_point(size = 1) +
stat_summary( fun.y = "median", geom = "point", pch = 21, size = 3, fill = 'black') +
facet_grid(metric ~ Reservoir, scales = 'free_y') +
scale_x_continuous("DateTime", breaks = seq(2014,2021, 1)) +
scale_y_continuous("Concentration (ugL)") +
theme(axis.text.x = element_text(angle = 45, hjust = 1), legend.position = 'none') +
ggtitle("Range and Median of Range of Each Reservoir Over Time")
jet.colors <- c("#00007F", "#00007F", "blue", "blue", "#007FFF", "cyan", "#7FFF7F", "#7FFF7F",
"yellow","yellow", "#FF7F00", "#FF7F00", "red", "#7F0000")
ggplot(subset(chla_long_year, Reservoir == 'FCR' & Site == 50), aes(x = month, y = value, col = as.factor(Depth_m))) +
geom_point(cex = 2) +
facet_grid(year ~., scales = 'free_y') +
scale_x_continuous("Month") +
scale_y_continuous("Concentration (ugL)") +
ggtitle("FCR Timeseries")
# ggplot(subset(chla_long_year, Reservoir == 'FCR' & year == 2019 & Depth_m == 0.1), aes(x = DateTime, y = value, col = as.factor(Site))) +
# geom_point() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle("FCR Multisite 0.1m (2019)")
ggplot(subset(chla_long_year, Reservoir == 'BVR' & Site == 50), aes(x = month, y = value, col = as.factor(Depth_m))) +
geom_point(cex = 2) +
facet_grid(year ~., scales = 'free_y') +
scale_x_continuous("Month") +
scale_y_continuous("Concentration (ugL)") +
ggtitle ("BVR Timeseries")
ggplot(subset(chla_long_year, Reservoir == 'CCR'), aes(x = month, y = value, col = as.factor(Depth_m))) +
geom_point(cex = 2) +
facet_grid(year ~., scales = 'free_y') +
scale_x_continuous("Month") +
scale_y_continuous("Concentration (ugL)") +
ggtitle ("CCR Timeseries")
ggplot(subset(chla_long_year, Reservoir == 'SNP'), aes(x = month, y = value, col = as.factor(Depth_m))) +
geom_point(cex = 2) +
facet_grid(year ~.) +
scale_x_continuous("Month") +
scale_y_continuous("Concentration (ugL)") +
ggtitle ("SNP Timeseries")
# ggplot(subset(chla_long_year, Reservoir == 'BVR' & year == 2019 & Depth_m == 0.1), aes(x = DateTime, y = value, col = as.factor(Site))) +
# geom_point() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle("BVR Multisite 0.1m (2019)")
# #2014 plots
#
# ggplot(subset(chla_long_year, Site == 50 & year == 2014), aes(x = month, y = value, col = Reservoir))+
# geom_point(size = 1) +
# stat_summary( fun.y = "mean", geom = "point", pch = 21, size = 3, fill = 'black') +
# facet_grid(metric ~ Reservoir, scales = 'free_y') +
# scale_x_continuous("Month", breaks = seq(1, 12, 1)) +
# scale_y_continuous("Concentration (ugL)") +
# theme(axis.text.x = element_text(angle = 45, hjust = 1), legend.position = 'none') +
# ggtitle ("Range and Mean of Range of 2014 Data")
#
#
# # Facetted plot of the range of each reservoir for each year and the median of the range
# ggplot(subset(chla_long_year, Site == 50 & year == 2014), aes(x = month, y = value, col = Reservoir))+
# geom_point(size = 1) +
# stat_summary( fun.y = "median", geom = "point", pch = 21, size = 3, fill = 'black') +
# facet_grid(metric ~ Reservoir, scales = 'free_y') +
# scale_x_continuous("Month", breaks = seq(1, 12, 1)) +
# scale_y_continuous("Concentration (ugL)") +
# theme(axis.text.x = element_text(angle = 45, hjust = 1), legend.position = 'none') +
# ggtitle ("Range and Median of Range of 2014 Data")
#
#
#
#
# ggplot(subset(chla_long_year, year == 2014), aes(x = DateTime, y = value, col = as.factor(Depth_m))) +
# geom_point() +
# facet_grid(Reservoir ~., scales = "free_y") +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle("2014 Timeseries")
#
#
#
# # BVR plots
#
# bvr_data <- filter(chla_long_year, Reservoir == 'BVR')
#
#
# unique(bvr_data$Depth_m)
#
# ggplot(subset(bvr_data, year == 2015 & Depth_m == 0.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2015 BVR at 0.0 m')
#
# ggplot(subset(bvr_data, year == 2015 & Depth_m == 3.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2015 BVR at 3.0 m')
#
# ggplot(subset(bvr_data, year == 2015 & Depth_m == 6.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2015 BVR at 6.0 m')
#
# ggplot(subset(bvr_data, year == 2015 & Depth_m == 9.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2015 BVR at 9.0 m ')
#
# ggplot(subset(bvr_data, year == 2015 & Depth_m == 12.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2015 BVR at 12.0 m')
#
#
#
#
#
# ggplot(subset(bvr_data, year == 2016 & Depth_m == 3.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2016 BVR at 3.0 m')
#
# ggplot(subset(bvr_data, year == 2016 & Depth_m == 4.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2016 BVR at 4.0 m')
#
# ggplot(subset(bvr_data, year == 2016 & Depth_m == 5.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2016 BVR at 5.0 m')
#
# ggplot(subset(bvr_data, year == 2016 & Depth_m == 6.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2016 BVR at 6.0 m')
#
# ggplot(subset(bvr_data, year == 2016 & Depth_m == 7.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2016 BVR at 7.0 m')
#
# ggplot(subset(bvr_data, year == 2016 & Depth_m == 7.5), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2016 BVR at 7.5 m')
#
# ggplot(subset(bvr_data, year == 2016 & Depth_m == 9.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2016 BVR at 9.0 m')
#
#
#
#
#
#
# ggplot(subset(bvr_data, year == 2018 & Depth_m == 0.1), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2018 BVR at 0.1 m')
#
# ggplot(subset(bvr_data, year == 2018 & Depth_m == 3.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2018 BVR at 3.0 m')
#
# ggplot(subset(bvr_data, year == 2018 & Depth_m == 6.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2018 BVR at 6.0 m')
#
#
#
#
#
#
#
# ggplot(subset(bvr_data, year == 2019 & Depth_m == 0.1 & Site == 50), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2019 BVR at 0.1 m')
#
# ggplot(subset(bvr_data, year == 2019 & Depth_m == 6.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2019 BVR at 6.0 m')
#
# ggplot(subset(bvr_data, year == 2019 & Depth_m == 9.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2019 BVR at 9.0 m')
#
#
#
#
# ccr_data <- filter(chla_long_year, Reservoir == 'CCR')
#
# ggplot(subset(ccr_data, year == 2014 & Depth_m == 0.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2014 CCR at 0.0 m')
#
# ggplot(subset(ccr_data, year == 2014 & Depth_m == 5.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2014 CCR at 5.0 m')
#
# ggplot(subset(ccr_data, year == 2014 & Depth_m == 6.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2014 CCR at 6.0 m')
#
# ggplot(subset(ccr_data, year == 2014 & Depth_m == 14.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2014 CCR at 14.0 m')
#
# ggplot(subset(ccr_data, year == 2014 & Depth_m == 19.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2014 CCR at 18.0 m')
#
# ggplot(subset(ccr_data, year == 2014 & Depth_m == 20.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2014 CCR at 20.0 m')
#
#
#
#
#
# ggplot(subset(ccr_data, year == 2015 & Depth_m == 0.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2015 CCR at 0.0 m')
#
# ggplot(subset(ccr_data, year == 2015 & Depth_m == 6.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2015 CCR at 6.0 m')
#
# ggplot(subset(ccr_data, year == 2015 & Depth_m == 19.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2015 CCR at 19.0 m')
#
# ggplot(subset(ccr_data, year == 2015 & Depth_m == 20.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2015 CCR at 20.0 m')
#
# ggplot(subset(ccr_data, year == 2015 & Depth_m == 21.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2015 CCR at 21.0 m')
#
#
#
#
#
#
# ggplot(subset(ccr_data, year == 2018 & Depth_m == 0.1), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2018 CCR at 0.1 m')
#
# ggplot(subset(ccr_data, year == 2018 & Depth_m == 6.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2018 CCR at 6.0 m')
#
# ggplot(subset(ccr_data, year == 2018 & Depth_m == 12.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2018 CCR at 12.0 m')
m | /Data/DataAlreadyUploadedToEDI/EDIProductionFiles/MakeEMLFilteredChlorophyll/2021/chla_pub_plots.R | no_license | CareyLabVT/Reservoirs | R | false | false | 11,841 | r | library(tidyverse)
library(dplyr)
library(ggplot2)
library(lubridate)
setwd('~/Dropbox/chlorophyll_processing/data/')
chla_master_df <- read_csv('manual_chlorophyll_2014_2021')
snp <- filter(chla_master_df, Reservoir == "SNP")
#################################################################
########################## Plotting #############################
#################################################################
# Facetted plot of the datapoints within each reservoir over the entire dataset
ggplot(subset(chla_master_df), aes(x = DateTime, y = Chla_ugL, col = Reservoir)) +
geom_point(size = 1) +
facet_grid(Reservoir ~., scales = "free_y") +
ggtitle("Entire Dataset Timeseries")
# Altering dataset in order to plot stats
chla_long_year <- chla_master_df %>%
ungroup(.) %>%
select(-(Flag_Chla)) %>%
gather(metric, value, Chla_ugL) %>%
mutate(year = year(DateTime)) %>%
mutate(month = month(DateTime))
# Facetted plot of the range of each reservoir for each year and the mean of the range
ggplot(subset(chla_long_year), aes(x = year, y = value, col = Reservoir))+
geom_point(size = 1) +
stat_summary( fun.y = "mean", geom = "point", pch = 21, size = 3, fill = 'black') +
facet_grid(metric ~ Reservoir, scales = 'free_y') +
scale_x_continuous("DateTime", breaks = seq(2014, 2021, 1)) +
scale_y_continuous("Concentration (ugL)") +
theme(axis.text.x = element_text(angle = 45, hjust = 1), legend.position = 'none') +
ggtitle("Range and Mean of Range of Each Reservoir Over Time")
# Facetted plot of the range of each reservoir for each year and the median of the range
ggplot(subset(chla_long_year, Site == 50), aes(x = year, y = value, col = Reservoir))+
geom_point(size = 1) +
stat_summary( fun.y = "median", geom = "point", pch = 21, size = 3, fill = 'black') +
facet_grid(metric ~ Reservoir, scales = 'free_y') +
scale_x_continuous("DateTime", breaks = seq(2014,2021, 1)) +
scale_y_continuous("Concentration (ugL)") +
theme(axis.text.x = element_text(angle = 45, hjust = 1), legend.position = 'none') +
ggtitle("Range and Median of Range of Each Reservoir Over Time")
jet.colors <- c("#00007F", "#00007F", "blue", "blue", "#007FFF", "cyan", "#7FFF7F", "#7FFF7F",
"yellow","yellow", "#FF7F00", "#FF7F00", "red", "#7F0000")
ggplot(subset(chla_long_year, Reservoir == 'FCR' & Site == 50), aes(x = month, y = value, col = as.factor(Depth_m))) +
geom_point(cex = 2) +
facet_grid(year ~., scales = 'free_y') +
scale_x_continuous("Month") +
scale_y_continuous("Concentration (ugL)") +
ggtitle("FCR Timeseries")
# ggplot(subset(chla_long_year, Reservoir == 'FCR' & year == 2019 & Depth_m == 0.1), aes(x = DateTime, y = value, col = as.factor(Site))) +
# geom_point() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle("FCR Multisite 0.1m (2019)")
ggplot(subset(chla_long_year, Reservoir == 'BVR' & Site == 50), aes(x = month, y = value, col = as.factor(Depth_m))) +
geom_point(cex = 2) +
facet_grid(year ~., scales = 'free_y') +
scale_x_continuous("Month") +
scale_y_continuous("Concentration (ugL)") +
ggtitle ("BVR Timeseries")
ggplot(subset(chla_long_year, Reservoir == 'CCR'), aes(x = month, y = value, col = as.factor(Depth_m))) +
geom_point(cex = 2) +
facet_grid(year ~., scales = 'free_y') +
scale_x_continuous("Month") +
scale_y_continuous("Concentration (ugL)") +
ggtitle ("CCR Timeseries")
ggplot(subset(chla_long_year, Reservoir == 'SNP'), aes(x = month, y = value, col = as.factor(Depth_m))) +
geom_point(cex = 2) +
facet_grid(year ~.) +
scale_x_continuous("Month") +
scale_y_continuous("Concentration (ugL)") +
ggtitle ("SNP Timeseries")
# ggplot(subset(chla_long_year, Reservoir == 'BVR' & year == 2019 & Depth_m == 0.1), aes(x = DateTime, y = value, col = as.factor(Site))) +
# geom_point() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle("BVR Multisite 0.1m (2019)")
# #2014 plots
#
# ggplot(subset(chla_long_year, Site == 50 & year == 2014), aes(x = month, y = value, col = Reservoir))+
# geom_point(size = 1) +
# stat_summary( fun.y = "mean", geom = "point", pch = 21, size = 3, fill = 'black') +
# facet_grid(metric ~ Reservoir, scales = 'free_y') +
# scale_x_continuous("Month", breaks = seq(1, 12, 1)) +
# scale_y_continuous("Concentration (ugL)") +
# theme(axis.text.x = element_text(angle = 45, hjust = 1), legend.position = 'none') +
# ggtitle ("Range and Mean of Range of 2014 Data")
#
#
# # Facetted plot of the range of each reservoir for each year and the median of the range
# ggplot(subset(chla_long_year, Site == 50 & year == 2014), aes(x = month, y = value, col = Reservoir))+
# geom_point(size = 1) +
# stat_summary( fun.y = "median", geom = "point", pch = 21, size = 3, fill = 'black') +
# facet_grid(metric ~ Reservoir, scales = 'free_y') +
# scale_x_continuous("Month", breaks = seq(1, 12, 1)) +
# scale_y_continuous("Concentration (ugL)") +
# theme(axis.text.x = element_text(angle = 45, hjust = 1), legend.position = 'none') +
# ggtitle ("Range and Median of Range of 2014 Data")
#
#
#
#
# ggplot(subset(chla_long_year, year == 2014), aes(x = DateTime, y = value, col = as.factor(Depth_m))) +
# geom_point() +
# facet_grid(Reservoir ~., scales = "free_y") +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle("2014 Timeseries")
#
#
#
# # BVR plots
#
# bvr_data <- filter(chla_long_year, Reservoir == 'BVR')
#
#
# unique(bvr_data$Depth_m)
#
# ggplot(subset(bvr_data, year == 2015 & Depth_m == 0.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2015 BVR at 0.0 m')
#
# ggplot(subset(bvr_data, year == 2015 & Depth_m == 3.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2015 BVR at 3.0 m')
#
# ggplot(subset(bvr_data, year == 2015 & Depth_m == 6.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2015 BVR at 6.0 m')
#
# ggplot(subset(bvr_data, year == 2015 & Depth_m == 9.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2015 BVR at 9.0 m ')
#
# ggplot(subset(bvr_data, year == 2015 & Depth_m == 12.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2015 BVR at 12.0 m')
#
#
#
#
#
# ggplot(subset(bvr_data, year == 2016 & Depth_m == 3.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2016 BVR at 3.0 m')
#
# ggplot(subset(bvr_data, year == 2016 & Depth_m == 4.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2016 BVR at 4.0 m')
#
# ggplot(subset(bvr_data, year == 2016 & Depth_m == 5.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2016 BVR at 5.0 m')
#
# ggplot(subset(bvr_data, year == 2016 & Depth_m == 6.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2016 BVR at 6.0 m')
#
# ggplot(subset(bvr_data, year == 2016 & Depth_m == 7.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2016 BVR at 7.0 m')
#
# ggplot(subset(bvr_data, year == 2016 & Depth_m == 7.5), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2016 BVR at 7.5 m')
#
# ggplot(subset(bvr_data, year == 2016 & Depth_m == 9.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2016 BVR at 9.0 m')
#
#
#
#
#
#
# ggplot(subset(bvr_data, year == 2018 & Depth_m == 0.1), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2018 BVR at 0.1 m')
#
# ggplot(subset(bvr_data, year == 2018 & Depth_m == 3.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2018 BVR at 3.0 m')
#
# ggplot(subset(bvr_data, year == 2018 & Depth_m == 6.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2018 BVR at 6.0 m')
#
#
#
#
#
#
#
# ggplot(subset(bvr_data, year == 2019 & Depth_m == 0.1 & Site == 50), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2019 BVR at 0.1 m')
#
# ggplot(subset(bvr_data, year == 2019 & Depth_m == 6.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2019 BVR at 6.0 m')
#
# ggplot(subset(bvr_data, year == 2019 & Depth_m == 9.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2019 BVR at 9.0 m')
#
#
#
#
# ccr_data <- filter(chla_long_year, Reservoir == 'CCR')
#
# ggplot(subset(ccr_data, year == 2014 & Depth_m == 0.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2014 CCR at 0.0 m')
#
# ggplot(subset(ccr_data, year == 2014 & Depth_m == 5.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2014 CCR at 5.0 m')
#
# ggplot(subset(ccr_data, year == 2014 & Depth_m == 6.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2014 CCR at 6.0 m')
#
# ggplot(subset(ccr_data, year == 2014 & Depth_m == 14.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2014 CCR at 14.0 m')
#
# ggplot(subset(ccr_data, year == 2014 & Depth_m == 19.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2014 CCR at 18.0 m')
#
# ggplot(subset(ccr_data, year == 2014 & Depth_m == 20.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2014 CCR at 20.0 m')
#
#
#
#
#
# ggplot(subset(ccr_data, year == 2015 & Depth_m == 0.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2015 CCR at 0.0 m')
#
# ggplot(subset(ccr_data, year == 2015 & Depth_m == 6.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2015 CCR at 6.0 m')
#
# ggplot(subset(ccr_data, year == 2015 & Depth_m == 19.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2015 CCR at 19.0 m')
#
# ggplot(subset(ccr_data, year == 2015 & Depth_m == 20.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2015 CCR at 20.0 m')
#
# ggplot(subset(ccr_data, year == 2015 & Depth_m == 21.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2015 CCR at 21.0 m')
#
#
#
#
#
#
# ggplot(subset(ccr_data, year == 2018 & Depth_m == 0.1), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2018 CCR at 0.1 m')
#
# ggplot(subset(ccr_data, year == 2018 & Depth_m == 6.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2018 CCR at 6.0 m')
#
# ggplot(subset(ccr_data, year == 2018 & Depth_m == 12.0), aes(x = DateTime, y = value)) + geom_line() +
# scale_y_continuous("Concentration (ugL)") +
# ggtitle('2018 CCR at 12.0 m')
m |
####################################################
##### ADMMnet (ADMM-L0) #####
##### Penalty: L1, L2, Laplacian #####
##### Algorithm: one-step coordinate descent #####
####################################################
ADMMnet=function(x, y, family=c("gaussian", "cox"), penalty=c("Lasso","Enet", "Net"), Omega=NULL, alpha=1.0, lambda=NULL, nlambda=50, rlambda=NULL, nfolds=1, foldid=NULL, inzero=TRUE, adaptive=c(FALSE, TRUE), aini=NULL, isd=FALSE, keep.beta=FALSE, ifast=TRUE, thresh=1e-7, maxit=1e+5) {
#fcall=match.call()
family=match.arg(family)
penalty=match.arg(penalty)
if (penalty=="Lasso") {
penalty="Enet"
alpha=1.0
}
if (penalty=="Net" & is.null(Omega)) {
penalty="Enet"
cat("Enet was performed as no input of Omega")
}
if (family == "gaussian") {
fit=switch(penalty,
"Enet"=EnetLm(x,y,alpha,lambda,nlambda,rlambda,nfolds,foldid,inzero,adaptive[1],aini,isd,keep.beta,thresh,maxit),
"Net"=NetLm(x,y,Omega,alpha,lambda,nlambda,rlambda,nfolds,foldid,inzero,adaptive,aini,isd,keep.beta,thresh,maxit))
fit$family="gaussian"
} else if (family == "cox") {
fit=switch(penalty,
"Enet"=EnetCox(x,y,alpha,lambda,nlambda,rlambda,nfolds,foldid,inzero,adaptive[1],aini,isd,keep.beta,ifast,thresh,maxit),
"Net"=NetCox(x,y,Omega,alpha,lambda,nlambda,rlambda,nfolds,foldid,inzero,adaptive,aini,isd,keep.beta,ifast,thresh,maxit))
fit$family="cox"
}
#fit$call=fcall
class(fit)="ADMMnet"
return(fit)
}
| /fuzzedpackages/ADMMnet/R/ADMMnet.R | no_license | akhikolla/testpackages | R | false | false | 1,628 | r |
####################################################
##### ADMMnet (ADMM-L0) #####
##### Penalty: L1, L2, Laplacian #####
##### Algorithm: one-step coordinate descent #####
####################################################
ADMMnet=function(x, y, family=c("gaussian", "cox"), penalty=c("Lasso","Enet", "Net"), Omega=NULL, alpha=1.0, lambda=NULL, nlambda=50, rlambda=NULL, nfolds=1, foldid=NULL, inzero=TRUE, adaptive=c(FALSE, TRUE), aini=NULL, isd=FALSE, keep.beta=FALSE, ifast=TRUE, thresh=1e-7, maxit=1e+5) {
#fcall=match.call()
family=match.arg(family)
penalty=match.arg(penalty)
if (penalty=="Lasso") {
penalty="Enet"
alpha=1.0
}
if (penalty=="Net" & is.null(Omega)) {
penalty="Enet"
cat("Enet was performed as no input of Omega")
}
if (family == "gaussian") {
fit=switch(penalty,
"Enet"=EnetLm(x,y,alpha,lambda,nlambda,rlambda,nfolds,foldid,inzero,adaptive[1],aini,isd,keep.beta,thresh,maxit),
"Net"=NetLm(x,y,Omega,alpha,lambda,nlambda,rlambda,nfolds,foldid,inzero,adaptive,aini,isd,keep.beta,thresh,maxit))
fit$family="gaussian"
} else if (family == "cox") {
fit=switch(penalty,
"Enet"=EnetCox(x,y,alpha,lambda,nlambda,rlambda,nfolds,foldid,inzero,adaptive[1],aini,isd,keep.beta,ifast,thresh,maxit),
"Net"=NetCox(x,y,Omega,alpha,lambda,nlambda,rlambda,nfolds,foldid,inzero,adaptive,aini,isd,keep.beta,ifast,thresh,maxit))
fit$family="cox"
}
#fit$call=fcall
class(fit)="ADMMnet"
return(fit)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alteration.R
\name{early_or_late}
\alias{early_or_late}
\title{Determine if timing metrics are early, late, or in range}
\usage{
early_or_late(value, early_value, late_value, days_in_water_year)
}
\description{
Properly rolls over the calendar at 365 days, but can tell you if a metric is early, late, or "within range"
based on the modeled early_value, modeled late_value, and the actual value.
}
\details{
It returns within range (0)
if the value is between early_value and late_value. If not, it splits the distance between late_value and
early_value in two, rolling over at the end of the calendar year, and assesses if the value is closer to
the late_value (then returns late (1)), or the early value (then returns early (-1)).
This function is currently not used in the package - instead, a simpler evaluation that does not roll
over the calendar year is used.
}
| /ffcAPIClient/man/early_or_late.Rd | no_license | ceff-tech/ffc_api_client | R | false | true | 948 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alteration.R
\name{early_or_late}
\alias{early_or_late}
\title{Determine if timing metrics are early, late, or in range}
\usage{
early_or_late(value, early_value, late_value, days_in_water_year)
}
\description{
Properly rolls over the calendar at 365 days, but can tell you if a metric is early, late, or "within range"
based on the modeled early_value, modeled late_value, and the actual value.
}
\details{
It returns within range (0)
if the value is between early_value and late_value. If not, it splits the distance between late_value and
early_value in two, rolling over at the end of the calendar year, and assesses if the value is closer to
the late_value (then returns late (1)), or the early value (then returns early (-1)).
This function is currently not used in the package - instead, a simpler evaluation that does not roll
over the calendar year is used.
}
|
### ===== File part of R package expm =====
###
### Function to compute the matrix logarithm
###
logm <- function(x, method = c("Higham08", "Eigen"),
## order = 8, trySym = TRUE,
tol = .Machine$double.eps)
{
## work with "Matrix" too: A<-as.matrix(A)
d <- dim(x)
if(length(d) != 2 || d[1] != d[2])
stop(gettextf("'%s' must be a square matrix", "x"), domain=NA)
method <- match.arg(method)
switch(method,
"Higham08" = logm.Higham08(x)
,
"Eigen" = {
## AUTHOR: Christophe Dutang
## matrix exponential using eigenvalues / spectral decomposition and
## Ward(1977) algorithm if x is numerically non diagonalisable
.Call(do_logm_eigen, x, tol)
})
}
| /R/logm.R | no_license | cran/expm | R | false | false | 734 | r | ### ===== File part of R package expm =====
###
### Function to compute the matrix logarithm
###
logm <- function(x, method = c("Higham08", "Eigen"),
## order = 8, trySym = TRUE,
tol = .Machine$double.eps)
{
## work with "Matrix" too: A<-as.matrix(A)
d <- dim(x)
if(length(d) != 2 || d[1] != d[2])
stop(gettextf("'%s' must be a square matrix", "x"), domain=NA)
method <- match.arg(method)
switch(method,
"Higham08" = logm.Higham08(x)
,
"Eigen" = {
## AUTHOR: Christophe Dutang
## matrix exponential using eigenvalues / spectral decomposition and
## Ward(1977) algorithm if x is numerically non diagonalisable
.Call(do_logm_eigen, x, tol)
})
}
|
## Now using K-nearest neighbours
kn3<-knn3(as.factor(Attrition_Flag)~.,data =train_data,k=7,prob=TRUE)
summary(kn3)
## we predict using kn3
kn3_predict<-predict(kn3,test_data,type = "class")
plot(kn3_predict)
confusionMatrix(data = as.factor(kn3_predict), reference = test_AF)
##Calculating accuracy
mean(kn3_predict==test_data$Attrition_Flag)
###SVM model
# Fitting model SVM
svm_model <-svm(Attrition_Flag ~., data = train_data)
summary(svm_model)
#Predict Output
predicted_SVM<- predict(svm_model,test_data,type="class")
plot(predicted_SVM)
## Here we define any value less than 0.5 to be predicted as 0 and
##greater than 0.5 to be predicted as 1.
test_data$pred1[predicted_SVM<0.5]<-0
test_data$pred1[predicted_SVM>0.5]<-1
table(a1=test_data$pred1,b=test_data$Attrition_Flag)
##ACcuracy of data
mean(test_data$pred1==test_data$Attrition_Flag)
##Confusion Matrix of the model.
tp_svm<-test_data$pred1%>% factor()
confusionMatrix(data= tp_svm ,reference = as.factor(test_data$Attrition_Flag ))
| /Knn-model.R | no_license | ashvin14/Credit_Card_customers | R | false | false | 1,058 | r |
## Now using K-nearest neighbours
kn3<-knn3(as.factor(Attrition_Flag)~.,data =train_data,k=7,prob=TRUE)
summary(kn3)
## we predict using kn3
kn3_predict<-predict(kn3,test_data,type = "class")
plot(kn3_predict)
confusionMatrix(data = as.factor(kn3_predict), reference = test_AF)
##Calculating accuracy
mean(kn3_predict==test_data$Attrition_Flag)
###SVM model
# Fitting model SVM
svm_model <-svm(Attrition_Flag ~., data = train_data)
summary(svm_model)
#Predict Output
predicted_SVM<- predict(svm_model,test_data,type="class")
plot(predicted_SVM)
## Here we define any value less than 0.5 to be predicted as 0 and
##greater than 0.5 to be predicted as 1.
test_data$pred1[predicted_SVM<0.5]<-0
test_data$pred1[predicted_SVM>0.5]<-1
table(a1=test_data$pred1,b=test_data$Attrition_Flag)
##ACcuracy of data
mean(test_data$pred1==test_data$Attrition_Flag)
##Confusion Matrix of the model.
tp_svm<-test_data$pred1%>% factor()
confusionMatrix(data= tp_svm ,reference = as.factor(test_data$Attrition_Flag ))
|
# This file contains decision functions (not exported)
# to use, pass cmfilter:::corMinusPartCor to cmf function
#' the difference in coefficients decision function (correlation - partial cor)
#'
#' @importFrom stats cor qnorm
#'
#' @examples # generate data
#' dat <- generateMed(n = 100, a = 0.4, b = -0.8)
#' cmfilter:::corMinusPartCor(dat$x, dat$M, dat$y)
#'
#' @keywords internal
corMinusPartCor <- function(x, m, y, p.value = 0.1) {
n <- length(x)
rxy <- cor(x, y)
rym <- cor(y, m)
rxm <- cor(x, m)
rxy2 <- rxy^2
rym2 <- rym^2
rxm2 <- rxm^2
rxy.m <- (rxy - rym*rxm) / sqrt((1 - rym2)*(1 - rxm2))
rdif <- rxy - rxy.m
if (rdif == 0) return(FALSE)
partder <- c(
(rym - rxm*rxy) / (sqrt(1 - rym2) * (1 - rxm2)^(3/2)),
1 - 1/sqrt((1 - rym2)*(1 - rxm2)),
(rxm - rxy*rym) / (sqrt(1 - rxm2) * (1 - rym2)^(3/2))
)
# Then the large-sample variances
rvars <- c(
(1 - rxm2)^2 / n, # var(rxm)
(1 - rxy2)^2 / n, # var(rxy)
(1 - rym2)^2 / n # var(rmy)
)
# Create the variance-covariance matrix
vcov <- diag(rvars)
c <- (1 - rym2 - rxm2 - rxy2) / 2 # constant term in all covs
vcov[1,2] <- vcov[2,1] <- ((2*rym - rxm*rxy)*c + rym^3) / n
vcov[1,3] <- vcov[3,1] <- ((2*rxy - rxm*rym)*c + rxy^3) / n
vcov[2,3] <- vcov[3,2] <- ((2*rxm - rxy*rym)*c + rxm^3) / n
seOlkinFinn <- sqrt(partder %*% vcov %*% partder)
return(abs(rdif/seOlkinFinn) > qnorm(p.value/2, lower.tail = F))
}
#' the product of coefficients decision function
#'
#' @examples # generate data
#' dat <- generateMed(n = 100, a = 0.4, b = -0.8)
#' cmfilter:::prodCoef(dat$x, dat$M, dat$y)
#'
#' @keywords internal
prodCoef <- function(x, m, y, p.value = 0.1, dir = TRUE) {
n <- length(x)
# first the alpha path
cpx <- crossprod(x) # cross product of x
alpha <- solve(cpx, crossprod(x, m)) # alpha path
res_m <- m - x * c(alpha) # residual of m~x+0
var_m <- as.numeric(crossprod(res_m) / (n - 1)) # rss variance
var_a <- var_m/cpx # variance of alpha
# then the beta path
if (dir) {
mm <- cbind(x, m) # model matrix
} else {
mm <- cbind(m)
}
cpm <- crossprod(mm) # cross product of mm
beta <- solve(cpm, crossprod(mm, y)) # beta
res_y <- y - mm %*% c(beta) # residual of y~m+x+0
var_y <- as.numeric(crossprod(res_y) / (n - 1)) # rss variance
var_b <- diag(var_y * chol2inv(chol(cpm))) # variance of beta
stat <- alpha * beta[2] # product of coefficients
se <- sqrt(alpha^2 * var_b[2] + beta[2]^2 * var_a) #- var_a * var_b[2])
if (is.na(stat) || is.na(se) || !is.numeric(stat) || !is.numeric(se)) {
return(FALSE)
} else {
return(abs(stat/se) > qnorm(p.value/2, lower.tail = F))
}
}
| /R/decisionFunctions.R | no_license | cran/cmfilter | R | false | false | 2,990 | r | # This file contains decision functions (not exported)
# to use, pass cmfilter:::corMinusPartCor to cmf function
#' the difference in coefficients decision function (correlation - partial cor)
#'
#' @importFrom stats cor qnorm
#'
#' @examples # generate data
#' dat <- generateMed(n = 100, a = 0.4, b = -0.8)
#' cmfilter:::corMinusPartCor(dat$x, dat$M, dat$y)
#'
#' @keywords internal
corMinusPartCor <- function(x, m, y, p.value = 0.1) {
n <- length(x)
rxy <- cor(x, y)
rym <- cor(y, m)
rxm <- cor(x, m)
rxy2 <- rxy^2
rym2 <- rym^2
rxm2 <- rxm^2
rxy.m <- (rxy - rym*rxm) / sqrt((1 - rym2)*(1 - rxm2))
rdif <- rxy - rxy.m
if (rdif == 0) return(FALSE)
partder <- c(
(rym - rxm*rxy) / (sqrt(1 - rym2) * (1 - rxm2)^(3/2)),
1 - 1/sqrt((1 - rym2)*(1 - rxm2)),
(rxm - rxy*rym) / (sqrt(1 - rxm2) * (1 - rym2)^(3/2))
)
# Then the large-sample variances
rvars <- c(
(1 - rxm2)^2 / n, # var(rxm)
(1 - rxy2)^2 / n, # var(rxy)
(1 - rym2)^2 / n # var(rmy)
)
# Create the variance-covariance matrix
vcov <- diag(rvars)
c <- (1 - rym2 - rxm2 - rxy2) / 2 # constant term in all covs
vcov[1,2] <- vcov[2,1] <- ((2*rym - rxm*rxy)*c + rym^3) / n
vcov[1,3] <- vcov[3,1] <- ((2*rxy - rxm*rym)*c + rxy^3) / n
vcov[2,3] <- vcov[3,2] <- ((2*rxm - rxy*rym)*c + rxm^3) / n
seOlkinFinn <- sqrt(partder %*% vcov %*% partder)
return(abs(rdif/seOlkinFinn) > qnorm(p.value/2, lower.tail = F))
}
#' the product of coefficients decision function
#'
#' @examples # generate data
#' dat <- generateMed(n = 100, a = 0.4, b = -0.8)
#' cmfilter:::prodCoef(dat$x, dat$M, dat$y)
#'
#' @keywords internal
prodCoef <- function(x, m, y, p.value = 0.1, dir = TRUE) {
n <- length(x)
# first the alpha path
cpx <- crossprod(x) # cross product of x
alpha <- solve(cpx, crossprod(x, m)) # alpha path
res_m <- m - x * c(alpha) # residual of m~x+0
var_m <- as.numeric(crossprod(res_m) / (n - 1)) # rss variance
var_a <- var_m/cpx # variance of alpha
# then the beta path
if (dir) {
mm <- cbind(x, m) # model matrix
} else {
mm <- cbind(m)
}
cpm <- crossprod(mm) # cross product of mm
beta <- solve(cpm, crossprod(mm, y)) # beta
res_y <- y - mm %*% c(beta) # residual of y~m+x+0
var_y <- as.numeric(crossprod(res_y) / (n - 1)) # rss variance
var_b <- diag(var_y * chol2inv(chol(cpm))) # variance of beta
stat <- alpha * beta[2] # product of coefficients
se <- sqrt(alpha^2 * var_b[2] + beta[2]^2 * var_a) #- var_a * var_b[2])
if (is.na(stat) || is.na(se) || !is.numeric(stat) || !is.numeric(se)) {
return(FALSE)
} else {
return(abs(stat/se) > qnorm(p.value/2, lower.tail = F))
}
}
|
#' simMultDir - creates multiple Dirichlet distributions.
#'
#' @param n A number corresponding to the number of distributions to generate.
#' @param vec A vector of parameter values for the Dirichlet
#' @seealso Note that this loads the \code{gtools} package for the \code{rdirichlet} function.
#' @examples
#' makeDir(1, c(2, 4, 3))
#' makeDir(2, c(2, 4, 3))
makeDir <- function(n, vec){
gtools::rdirichlet(n, vec)
}
| /R/simMultDir.R | no_license | robschick/eliciteg | R | false | false | 422 | r | #' simMultDir - creates multiple Dirichlet distributions.
#'
#' @param n A number corresponding to the number of distributions to generate.
#' @param vec A vector of parameter values for the Dirichlet
#' @seealso Note that this loads the \code{gtools} package for the \code{rdirichlet} function.
#' @examples
#' makeDir(1, c(2, 4, 3))
#' makeDir(2, c(2, 4, 3))
makeDir <- function(n, vec){
gtools::rdirichlet(n, vec)
}
|
incerc sa - mi imaginez ce face un parlamentar roman in singuratatea biroului sau .
isi suna nevasta sau amanta pentru un " ce mai faci " , urmat de doua - trei detalii marunte .
mergem acolo , ai luat aia , ai vorbit cu cutare , ce nota a luat copilu' , ne vedem la sapte !
pe urma , suna un coleg de partid .
ai vorbit , ai mobilizat , ai rezolvat ( daca e devotat partidului ) sau daca e un fel de autostopist , o trage repede pe " ai auzit ce - a zis ala , ai citit ce - a declarat alalalt , ce facem ?
" astea sint cazuri banale , aproape reflexe . Fac parte din cotidian .
ca sa nu fiu acuzat de terfelirea imaginii institutiei , precizez ca doar unii mai pun mina pe telefon si zic , ba , daca vrei sa iei contractul pentru asfaltarea strazii cutare sau a drumului cutare sau daca vrei statia de apa de nu stiu unde , dai atita la partid si mai pui si tu cinci la suta pentru tescherea !
sau , ba , desteptule , noi aranjam sa - ti plateasca datoriile alea de o suta de miliarde , dar ne dai si noua zece !
mai sint si obligatiile de circumscriptie .
se pling marile societati comerciale cu gauri , si parlamentarul trebuie sa clopoteasca pe la ministere pentru a gasi o solutie .
sa - i ajutam cumva pe nenorocitii aia ca - s de - ai nostri !
dar nu poate suna de seara pina dimineata pe la tot felul de colegi de partid , pe la ministere sau pe la alegatori . Vine si la el lenea de dupa masa , cind i se aduna singele in stomac si rinza incepe sa lucreze din greu .
sau seara apasatoare , cind nu sint receptii si sedinte de partid .
si ce face atunci alesul poporului ?
il lovesc singuratatea si angoasa !
e cuprins de un vid interior cit toata viata lui Bacovia .
de vorbarie s - a saturat , de sprit i s - a umflat burta , de idei ii vijiie creierul .
natia asta e contradictorie .
nu stii ce vrea pentru ca nici ea nu e convinsa , e galagioasa si obositoare .
de televizor s - a saturat , de talkshow - uri asijderea , pe nevasta o stie pina la unghiile crapate de spalat vase si haine .
asta reprezinta tristetea si singuratatea activitatii de parlamentar , un fel de inundatie in valuri a subconstientului .
atunci incepe oftica pe cei mai mari , atunci urla ambitia de putere ( daca eram mai mare , o armata de trepadusi ar fi tras momele si bezele in jurul meu ) si tot atunci dau in clocot refularile sexuale .
e chiar o drama , o nefericire , sa fii scapatat de culmea barbatiei si sa stai toata saptamina in Bucuresti numai cu ochii pe microfon si , in pauze , pe secretara si consilieri .
cum sa mai iubesti pe cineva , cum sa mai scapi intr - o aventura ?
cum sa faci curte unei adolescente cind nesimtitii aia de ziaristi te vineaza ca pe singurul iepure de pe cimp ?
ce sa faci ?
cititul e chiar mai dificil decit privitul la televizor , tara e departe si complicata , Europa e si mai confuza , nu e mai odihnitor sa dai un telefon la 89.99.69 . si sa asculte caseta cu vino , iubitule , te astept , sint fierbinte !
e asa de bine !
tara si nevasta sint departe .
vede scene dubioase din filme de dragoste pe care le - a visat de atitea ori cu el in rolul principal si mai formeaza un 89.99.69 . unde e , in direct , live , adica nu e cu caseta , si aude , spune iubitule , te simti singur ?
si el nu stie ca sta de vorba cu o gonflabila telefonica si nici nu - i pasa ca natia plateste pentru acest giumbusluc al destinului tulburat .
si nu e numai tehnica parlamentarului . E si a bietului functionar apasat de griji si de acreala nevestei , si a paznicului de noapte , uniti cu totii intr - o mica parte din natia erotica defulabila prin telefon .
numai ca in alte cazuri plateste familia sau patronul fraier , sau statul , si mai fraier , care baga si el pierderile la gramada , cu credite si telefoane erotice , cu energie electrica si suna si cistigi , mai ales ca li s - a virit sub ochi o mura in gura si aproape ca li s - a spus , suna , ma boule , nu vezi ca ai si tu ocazia sa te alegi cu o masina sau un frigider !
si pentru ca s - a prins ce face Jim cu capra , suna repede si isi satisface rapid instinctul de cistigator intr - o cursa de doua minute care costa ca porcul de Craciun .
pentru a mai tunde din bugetul natiei , nu ne ramine decit sa acceptam parlamentarilor romani o tombola in holul institutiei si o seara de strip - tease pe saptamina .
gratis !
altfel , bugetul Romaniei va fi handicapat de pornirile erotice ale politicienilor insingurati si ale portarilor fara somn si fara bani de - o bere .
| /data/Newspapers/1999.05.02.editorial.46925.0025.r | no_license | narcis96/decrypting-alpha | R | false | false | 4,488 | r | incerc sa - mi imaginez ce face un parlamentar roman in singuratatea biroului sau .
isi suna nevasta sau amanta pentru un " ce mai faci " , urmat de doua - trei detalii marunte .
mergem acolo , ai luat aia , ai vorbit cu cutare , ce nota a luat copilu' , ne vedem la sapte !
pe urma , suna un coleg de partid .
ai vorbit , ai mobilizat , ai rezolvat ( daca e devotat partidului ) sau daca e un fel de autostopist , o trage repede pe " ai auzit ce - a zis ala , ai citit ce - a declarat alalalt , ce facem ?
" astea sint cazuri banale , aproape reflexe . Fac parte din cotidian .
ca sa nu fiu acuzat de terfelirea imaginii institutiei , precizez ca doar unii mai pun mina pe telefon si zic , ba , daca vrei sa iei contractul pentru asfaltarea strazii cutare sau a drumului cutare sau daca vrei statia de apa de nu stiu unde , dai atita la partid si mai pui si tu cinci la suta pentru tescherea !
sau , ba , desteptule , noi aranjam sa - ti plateasca datoriile alea de o suta de miliarde , dar ne dai si noua zece !
mai sint si obligatiile de circumscriptie .
se pling marile societati comerciale cu gauri , si parlamentarul trebuie sa clopoteasca pe la ministere pentru a gasi o solutie .
sa - i ajutam cumva pe nenorocitii aia ca - s de - ai nostri !
dar nu poate suna de seara pina dimineata pe la tot felul de colegi de partid , pe la ministere sau pe la alegatori . Vine si la el lenea de dupa masa , cind i se aduna singele in stomac si rinza incepe sa lucreze din greu .
sau seara apasatoare , cind nu sint receptii si sedinte de partid .
si ce face atunci alesul poporului ?
il lovesc singuratatea si angoasa !
e cuprins de un vid interior cit toata viata lui Bacovia .
de vorbarie s - a saturat , de sprit i s - a umflat burta , de idei ii vijiie creierul .
natia asta e contradictorie .
nu stii ce vrea pentru ca nici ea nu e convinsa , e galagioasa si obositoare .
de televizor s - a saturat , de talkshow - uri asijderea , pe nevasta o stie pina la unghiile crapate de spalat vase si haine .
asta reprezinta tristetea si singuratatea activitatii de parlamentar , un fel de inundatie in valuri a subconstientului .
atunci incepe oftica pe cei mai mari , atunci urla ambitia de putere ( daca eram mai mare , o armata de trepadusi ar fi tras momele si bezele in jurul meu ) si tot atunci dau in clocot refularile sexuale .
e chiar o drama , o nefericire , sa fii scapatat de culmea barbatiei si sa stai toata saptamina in Bucuresti numai cu ochii pe microfon si , in pauze , pe secretara si consilieri .
cum sa mai iubesti pe cineva , cum sa mai scapi intr - o aventura ?
cum sa faci curte unei adolescente cind nesimtitii aia de ziaristi te vineaza ca pe singurul iepure de pe cimp ?
ce sa faci ?
cititul e chiar mai dificil decit privitul la televizor , tara e departe si complicata , Europa e si mai confuza , nu e mai odihnitor sa dai un telefon la 89.99.69 . si sa asculte caseta cu vino , iubitule , te astept , sint fierbinte !
e asa de bine !
tara si nevasta sint departe .
vede scene dubioase din filme de dragoste pe care le - a visat de atitea ori cu el in rolul principal si mai formeaza un 89.99.69 . unde e , in direct , live , adica nu e cu caseta , si aude , spune iubitule , te simti singur ?
si el nu stie ca sta de vorba cu o gonflabila telefonica si nici nu - i pasa ca natia plateste pentru acest giumbusluc al destinului tulburat .
si nu e numai tehnica parlamentarului . E si a bietului functionar apasat de griji si de acreala nevestei , si a paznicului de noapte , uniti cu totii intr - o mica parte din natia erotica defulabila prin telefon .
numai ca in alte cazuri plateste familia sau patronul fraier , sau statul , si mai fraier , care baga si el pierderile la gramada , cu credite si telefoane erotice , cu energie electrica si suna si cistigi , mai ales ca li s - a virit sub ochi o mura in gura si aproape ca li s - a spus , suna , ma boule , nu vezi ca ai si tu ocazia sa te alegi cu o masina sau un frigider !
si pentru ca s - a prins ce face Jim cu capra , suna repede si isi satisface rapid instinctul de cistigator intr - o cursa de doua minute care costa ca porcul de Craciun .
pentru a mai tunde din bugetul natiei , nu ne ramine decit sa acceptam parlamentarilor romani o tombola in holul institutiei si o seara de strip - tease pe saptamina .
gratis !
altfel , bugetul Romaniei va fi handicapat de pornirile erotice ale politicienilor insingurati si ale portarilor fara somn si fara bani de - o bere .
|
# Challenge 313 Intermediate ----------------------------------------------
library(stringr)
library(RCurl)
library(readr)
img_mani <- function(img, op) {
all <- str_split(img, "\n")[[1]]
data <- as.numeric(all[c(-1)])
data <- data[!is.na(data)]
dim <- str_split(all[1], " ")[[1]]
img <- matrix(data, nrow = as.numeric(dim[3]), ncol = as.numeric(dim[2]), byrow = T)
op <- str_split(op, "")[[1]]
h <- sum(op == "H"); v <- sum(op == "V")
r <- sum(op == "R"); l <- sum(op == "L")
ops <- vector()
if(h%%2 == 1)
ops <- c(ops, "H")
if(v%%2 == 1)
ops <- c(ops, "V")
if(r != l) {
if(r > l)
ops <- c(ops, rep("R", (r-l)%%4))
else
ops <- c(ops, rep("L", (l-r)%%4))
}
for(i in seq_along(ops)) {
if(any(ops[i] == c("R", "L")))
dim[2:3] <- dim[3:2]
rc <- ifelse(any(ops[i] == c("R", "V")), 2, 1)
img <- apply(img, rc, rev)
if(any(ops[i] == c("R", "H")))
img <- t(img)
}
dim <- str_c(dim, collapse = " ")
op <- str_c(op, collapse = "")
write(c(dim, as.vector(t(img))), sprintf("earth-%s.pgm", op))
}
input <- getURL("https://raw.githubusercontent.com/cosmologicon/problems/master/pgm/earth.pgm")
| /313 Intermediate.R | no_license | albcab/-r-dailyprogrammer- | R | false | false | 1,219 | r |
# Challenge 313 Intermediate ----------------------------------------------
library(stringr)
library(RCurl)
library(readr)
img_mani <- function(img, op) {
all <- str_split(img, "\n")[[1]]
data <- as.numeric(all[c(-1)])
data <- data[!is.na(data)]
dim <- str_split(all[1], " ")[[1]]
img <- matrix(data, nrow = as.numeric(dim[3]), ncol = as.numeric(dim[2]), byrow = T)
op <- str_split(op, "")[[1]]
h <- sum(op == "H"); v <- sum(op == "V")
r <- sum(op == "R"); l <- sum(op == "L")
ops <- vector()
if(h%%2 == 1)
ops <- c(ops, "H")
if(v%%2 == 1)
ops <- c(ops, "V")
if(r != l) {
if(r > l)
ops <- c(ops, rep("R", (r-l)%%4))
else
ops <- c(ops, rep("L", (l-r)%%4))
}
for(i in seq_along(ops)) {
if(any(ops[i] == c("R", "L")))
dim[2:3] <- dim[3:2]
rc <- ifelse(any(ops[i] == c("R", "V")), 2, 1)
img <- apply(img, rc, rev)
if(any(ops[i] == c("R", "H")))
img <- t(img)
}
dim <- str_c(dim, collapse = " ")
op <- str_c(op, collapse = "")
write(c(dim, as.vector(t(img))), sprintf("earth-%s.pgm", op))
}
input <- getURL("https://raw.githubusercontent.com/cosmologicon/problems/master/pgm/earth.pgm")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulation_functions.R
\name{simulate_answer}
\alias{simulate_answer}
\title{Simulate answer}
\usage{
simulate_answer(theta, model, alpha, beta, guessing, item_keys)
}
\arguments{
\item{theta}{Vector with true theta.}
\item{model}{One of \code{"3PLM"}, \code{"GPCM"}, \code{"SM"} or \code{"GRM"}, for the three-parameter logistic, generalized partial credit, sequential or graded response model, respectively.}
\item{alpha}{Matrix of alpha parameters, one column per dimension, one row per item. Row names should contain the item keys.
Note that so called within-dimensional models still use an alpha matrix, they simply have only one non-zero loading per item.}
\item{beta}{Matrix of beta parameters, one column per item step, one row per item. Row names should contain the item keys.
Note that \code{shadowcat} expects answer categories to be sequential, and without gaps. That is, the weight parameter in the GPCM model is assumed to be sequential,
and equal to the position of the 'location' of the beta parameter in the beta matrix.
The matrix should have a number of columns equal to the largest number of item steps over items, items with fewer answer categories should be
right-padded with \code{NA}. \code{NA} values between answer categories are not allowed, and will lead to errors.}
\item{guessing}{Matrix with one column of guessing parameters per item. Row names should contain the item keys. Optionally used in 3PLM model, ignored for all others.}
\item{item_keys}{Character vector of item keys for which answers should be simulated.}
}
\value{
Vector with responses.
}
\description{
Simulate answer on specified items, given true theta.
}
\examples{
alpha_beta <- simulate_testbank(model = "3PLM", number_items = 50,
number_dimensions = 1, number_itemsteps = 1)
guessing <- matrix(rep(.5, 50), dimnames = list(rownames(alpha_beta$alpha), NULL))
# Without guessing parameter
simulate_answer(theta = .3, model = "3PLM", alpha = alpha_beta$alpha,
beta = alpha_beta$beta, guessing = NULL, item_keys = "item3")
# With guessing parameter
simulate_answer(theta = .3, model = "3PLM", alpha = alpha_beta$alpha,
beta = alpha_beta$beta, guessing = guessing, item_keys = "item3")
# Simulate answers for more than one item
simulate_answer(theta = .3, model = "3PLM", alpha = alpha_beta$alpha,
beta = alpha_beta$beta, guessing = NULL,
item_keys = c("item5", "item2", "item8", "item1", "item18"))
}
| /man/simulate_answer.Rd | no_license | Karel-Kroeze/ShadowCAT | R | false | true | 2,591 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulation_functions.R
\name{simulate_answer}
\alias{simulate_answer}
\title{Simulate answer}
\usage{
simulate_answer(theta, model, alpha, beta, guessing, item_keys)
}
\arguments{
\item{theta}{Vector with true theta.}
\item{model}{One of \code{"3PLM"}, \code{"GPCM"}, \code{"SM"} or \code{"GRM"}, for the three-parameter logistic, generalized partial credit, sequential or graded response model, respectively.}
\item{alpha}{Matrix of alpha parameters, one column per dimension, one row per item. Row names should contain the item keys.
Note that so called within-dimensional models still use an alpha matrix, they simply have only one non-zero loading per item.}
\item{beta}{Matrix of beta parameters, one column per item step, one row per item. Row names should contain the item keys.
Note that \code{shadowcat} expects answer categories to be sequential, and without gaps. That is, the weight parameter in the GPCM model is assumed to be sequential,
and equal to the position of the 'location' of the beta parameter in the beta matrix.
The matrix should have a number of columns equal to the largest number of item steps over items, items with fewer answer categories should be
right-padded with \code{NA}. \code{NA} values between answer categories are not allowed, and will lead to errors.}
\item{guessing}{Matrix with one column of guessing parameters per item. Row names should contain the item keys. Optionally used in 3PLM model, ignored for all others.}
\item{item_keys}{Character vector of item keys for which answers should be simulated.}
}
\value{
Vector with responses.
}
\description{
Simulate answer on specified items, given true theta.
}
\examples{
alpha_beta <- simulate_testbank(model = "3PLM", number_items = 50,
number_dimensions = 1, number_itemsteps = 1)
guessing <- matrix(rep(.5, 50), dimnames = list(rownames(alpha_beta$alpha), NULL))
# Without guessing parameter
simulate_answer(theta = .3, model = "3PLM", alpha = alpha_beta$alpha,
beta = alpha_beta$beta, guessing = NULL, item_keys = "item3")
# With guessing parameter
simulate_answer(theta = .3, model = "3PLM", alpha = alpha_beta$alpha,
beta = alpha_beta$beta, guessing = guessing, item_keys = "item3")
# Simulate answers for more than one item
simulate_answer(theta = .3, model = "3PLM", alpha = alpha_beta$alpha,
beta = alpha_beta$beta, guessing = NULL,
item_keys = c("item5", "item2", "item8", "item1", "item18"))
}
|
testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = numeric(0), temp = numeric(0))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615852744-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 267 | r | testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = numeric(0), temp = numeric(0))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/terrain_analysis.R
\name{wbt_slope}
\alias{wbt_slope}
\title{Slope}
\usage{
wbt_slope(
dem,
output,
zfactor = NULL,
units = "degrees",
wd = NULL,
verbose_mode = FALSE,
compress_rasters = FALSE
)
}
\arguments{
\item{dem}{Input raster DEM file.}
\item{output}{Output raster file.}
\item{zfactor}{Optional multiplier for when the vertical and horizontal units are not the same.}
\item{units}{Units of output raster; options include 'degrees', 'radians', 'percent'.}
\item{wd}{Changes the working directory.}
\item{verbose_mode}{Sets verbose mode. If verbose mode is False, tools will not print output messages.}
\item{compress_rasters}{Sets the flag used by WhiteboxTools to determine whether to use compression for output rasters.}
}
\value{
Returns the tool text outputs.
}
\description{
Calculates a slope raster from an input DEM.
}
| /man/wbt_slope.Rd | permissive | dondealban/whiteboxR | R | false | true | 932 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/terrain_analysis.R
\name{wbt_slope}
\alias{wbt_slope}
\title{Slope}
\usage{
wbt_slope(
dem,
output,
zfactor = NULL,
units = "degrees",
wd = NULL,
verbose_mode = FALSE,
compress_rasters = FALSE
)
}
\arguments{
\item{dem}{Input raster DEM file.}
\item{output}{Output raster file.}
\item{zfactor}{Optional multiplier for when the vertical and horizontal units are not the same.}
\item{units}{Units of output raster; options include 'degrees', 'radians', 'percent'.}
\item{wd}{Changes the working directory.}
\item{verbose_mode}{Sets verbose mode. If verbose mode is False, tools will not print output messages.}
\item{compress_rasters}{Sets the flag used by WhiteboxTools to determine whether to use compression for output rasters.}
}
\value{
Returns the tool text outputs.
}
\description{
Calculates a slope raster from an input DEM.
}
|
# Run the coevolutionary model with interactions shifting in time.
# Based on a probability prob_change (g), in each timestep of the simulation, an interaction
# shifts (MM -> AM and AM -> MM).
#
# This script returns a simple graph with species traits changing in time due to coevolution.
# The asteriscs in the graph shows the timesteps in which the interactions shift occurs.
# loading packages and functions
setwd("~/Dropbox/Master/Code/coevo_mut_antag/R/data/")
source("~/Dropbox/Master/Code/coevo_mut_antag/R/functions/Antagonize.R")
source("~/Dropbox/Master/Code/coevo_mut_antag/R/functions/ConDepCoevoMutAntNet.R")
library(ggplot2)
library(cowplot)
library(reshape2)
# initial parameters
antprob = 0.8 # current probability value
prob_change = 0.01 # g or current probability of an interaction shift
n_sp = 10 # defining number of species
M = matrix(1, ncol = n_sp, nrow = n_sp) # building matrix M of positive effects
diag(M) = 0 # no intraespecific interactions
# Antagonize M (transform positive links in negative)
antagonize = Antagonize(M, antprob)
M = antagonize[[1]]
V = antagonize[[2]]
# coevolutionary model parameters
phi = 0.2
alpha = 0.2
theta = runif(n_sp, 0, 10)
init = runif(n_sp, 0, 10)
p = 0.1
epsilon = 5
eq_dif = 0.0001
t_max = 1000
# running coevolution simulation
simulation = ConDepCoevoMutAntNet(n_sp, M, V, phi, alpha,
theta, init, p, epsilon, eq_dif, t_max, prob_change)
traits = simulation[[1]] # species traits
w_time = as.data.frame(simulation[[2]]) # which time interactions shifted
#prepare data frame with tracked timesteps
colnames(w_time) = "xplace"
w_time$yplace = 1
# building data frame to plot results
traits = as.data.frame(traits)
n_sp = ncol(traits)
traits_vec = c(as.matrix(traits))
traits_df = data.frame(species = rep(paste("sp", 1:n_sp, sep = ""), each = nrow(traits)),
time = rep(1:nrow(traits), times = n_sp),
trait = traits_vec)
# plotting traits through time
plotar = ggplot() +
geom_path(data=traits_df, aes(x = time, y = trait, group=species,
color = species),size = 1.8, alpha = 0.7) +
geom_text(data = w_time, aes(x=xplace, y=yplace),label = "*", size = 7) +
ggtitle(paste("g =", prob_change, ", p = ", antprob)) +
xlab("Time") +
ylab("Mean species trait (z)") +
theme(axis.text.x = element_text(size = 11),
axis.text.y = element_text(size = 11),
axis.title = element_text(size = 14),
legend.key.size = unit(0.6, "cm"),
legend.text = element_text(size = 12))
ggsave(plotar, filename = "ConDep_Basic_Traits.pdf", width = 19, height = 11, units = "cm") | /R/scripts/ConDep_Basic_Traits.R | no_license | lucascamacho/coevo_mut_antag | R | false | false | 2,681 | r | # Run the coevolutionary model with interactions shifting in time.
# Based on a probability prob_change (g), in each timestep of the simulation, an interaction
# shifts (MM -> AM and AM -> MM).
#
# This script returns a simple graph with species traits changing in time due to coevolution.
# The asteriscs in the graph shows the timesteps in which the interactions shift occurs.
# loading packages and functions
setwd("~/Dropbox/Master/Code/coevo_mut_antag/R/data/")
source("~/Dropbox/Master/Code/coevo_mut_antag/R/functions/Antagonize.R")
source("~/Dropbox/Master/Code/coevo_mut_antag/R/functions/ConDepCoevoMutAntNet.R")
library(ggplot2)
library(cowplot)
library(reshape2)
# initial parameters
antprob = 0.8 # current probability value
prob_change = 0.01 # g or current probability of an interaction shift
n_sp = 10 # defining number of species
M = matrix(1, ncol = n_sp, nrow = n_sp) # building matrix M of positive effects
diag(M) = 0 # no intraespecific interactions
# Antagonize M (transform positive links in negative)
antagonize = Antagonize(M, antprob)
M = antagonize[[1]]
V = antagonize[[2]]
# coevolutionary model parameters
phi = 0.2
alpha = 0.2
theta = runif(n_sp, 0, 10)
init = runif(n_sp, 0, 10)
p = 0.1
epsilon = 5
eq_dif = 0.0001
t_max = 1000
# running coevolution simulation
simulation = ConDepCoevoMutAntNet(n_sp, M, V, phi, alpha,
theta, init, p, epsilon, eq_dif, t_max, prob_change)
traits = simulation[[1]] # species traits
w_time = as.data.frame(simulation[[2]]) # which time interactions shifted
#prepare data frame with tracked timesteps
colnames(w_time) = "xplace"
w_time$yplace = 1
# building data frame to plot results
traits = as.data.frame(traits)
n_sp = ncol(traits)
traits_vec = c(as.matrix(traits))
traits_df = data.frame(species = rep(paste("sp", 1:n_sp, sep = ""), each = nrow(traits)),
time = rep(1:nrow(traits), times = n_sp),
trait = traits_vec)
# plotting traits through time
plotar = ggplot() +
geom_path(data=traits_df, aes(x = time, y = trait, group=species,
color = species),size = 1.8, alpha = 0.7) +
geom_text(data = w_time, aes(x=xplace, y=yplace),label = "*", size = 7) +
ggtitle(paste("g =", prob_change, ", p = ", antprob)) +
xlab("Time") +
ylab("Mean species trait (z)") +
theme(axis.text.x = element_text(size = 11),
axis.text.y = element_text(size = 11),
axis.title = element_text(size = 14),
legend.key.size = unit(0.6, "cm"),
legend.text = element_text(size = 12))
ggsave(plotar, filename = "ConDep_Basic_Traits.pdf", width = 19, height = 11, units = "cm") |
library(caret)
install.packages("party")
library(rpart)
library(randomForest)
library(dplyr)
library(caTools)
install.packages("gbm")
library(gbm)
library(plyr)
# load data
train <- read.csv("train.csv")
test <- read.csv("test.csv")
# combine train and test data
casual <- train$casual
registered <- train$registered
count <- train$count
train <- train[,-c(10:12)]
bike <- rbind(train,test)
# seperate date and time variable
Hours <- format(as.POSIXct(strptime(bike$datetime,"%d/%m/%Y %H:%M:%S",tz="")) ,format = "%H:%M:%S")
date <- as.POSIXct(bike$datetime)
df_date <- data.frame(date = date, Year = as.numeric(format(date, format = "%Y")), Month = as.numeric(format(date, format = "%m")), Day = as.numeric(format(date, format = "%d")), Hour = as.numeric(format(date, format = "%H")))
# create weekday variable
df_date$weekday <- as.factor(weekdays(df_date$date))
df_date <- df_date[,2:6]
bike <- cbind(bike, df_date)
str(bike)
# create weekend variable
bike$weekend <- ifelse(bike$weekday == "Saturday" | bike$weekday == "Sunday", "1", "0")
# distribution of numerical variable
bike$season <- as.numeric(bike$season)
hist(bike$season)
bike$holiday <- as.numeric(bike$holiday)
hist(bike$holiday)
bike$workingday <- as.numeric(bike$workingday)
hist(bike$workingday)
hist(bike$temp)
bike$weather <- as.numeric(bike$weather)
hist(bike$weather)
bike$weekend <- as.numeric(bike$weekend)
hist(bike$weekend)
hist(bike$humidity)
# transform variables
table(bike$season)
bike$season <- as.factor(bike$season)
bike$Hour <- as.factor(bike$Hour)
bike$holiday <- as.factor(bike$holiday)
bike$workingday <- as.factor(bike$workingday)
bike$weather <- as.factor(bike$weather)
bike$Year <- as.factor(bike$Year)
bike$Month <- as.factor(bike$Month)
bike$weekend <- as.factor(bike$weekend)
str(bike)
# group Hour variable into 4 groups: 0-6, 7-15, 16-19, 20-23
bike$Hour <- as.numeric(bike$Hour)
hour <- ifelse(bike$Hour < 7, "0-6", ifelse( bike$Hour >= 7 & bike$Hour < 16, "7-15", ifelse(bike$Hour >= 16 & bike$Hour < 20, "16-19", "20-23")))
bike <- cbind(bike, hour)
table(bike$hour)
bike <- bike[,-13]
bike$hour <- as.factor(bike$hour)
str(bike)
# group Month variable into 3 groups: 1-5, 6-10, 11-12
bike$Month <- as.numeric(bike$Month)
month <- ifelse(bike$Month < 6, "1-5", ifelse(bike$Month >= 6 & bike$Month < 11, "6-10", "11-12"))
bike <- cbind(bike, month)
table(bike$month)
bike <- bike[,-11]
bike$month <- as.factor(bike$month)
# normalize variables: temp, atemp, humidity, windspeed
dat <- bike %>% mutate_each_(funs(scale), vars = c("temp", "atemp", "humidity", "windspeed"))
# data preparation, we split train data into 2 sets to evaluate the model before submit
train <- bike[1:10886,]
test <- bike[10887:17379,]
train_dat <- cbind(train, casual)
train_dat <- cbind(train_dat, registered)
train_dat <- cbind(train_dat, count)
# split data
set.seed(1)
split = sample.split(train_dat$count, SplitRatio = 0.7)
subTrain = subset(train_dat, split == TRUE)
subTest = subset(train_dat, split == FALSE)
### building model
# create formular
casual <- casual ~ season + holiday + workingday + weather + temp + atemp + humidity + windspeed + Year + Day + weekday + weekend + hour + month
registered <- registered ~ season + holiday + workingday + weather + temp + atemp + humidity + windspeed + Year + Day + weekday + weekend + hour + month
##### simple tree model
casual_rpart <- rpart(casual, data = subTrain)
casual_pred <- round(predict(casual_rpart, newdata = subTest))
registered_rpart <- rpart(registered, data = subTrain)
registered_pred <- round(predict(registered_rpart, newdata = subTest))
count_rpart <- casual_pred + registered_pred
# calculate RMSE
rmse_rpart <- sqrt(mean((count_rpart - subTest$count)^2))
rmse_rpart
# rmse = 115
# simple tree model with "caret" package
set.seed(1)
fitControl1 <- trainControl(method = "cv", number = 10)
Grid <- expand.grid(cp = seq(0,0.05,0.005))
# predict casual and registered
casual_rpartcv <- train(casual, data = subTrain, method = "rpart", trControl = fitControl1, tuneGrid = Grid, metric = "RMSE", maximize = FALSE)
casual_predcv <- predict(casual_rpartcv, newdata = subTest)
registered_rpartcv <- train(registered, data = subTrain, method = "rpart", trControl = fitControl1, tuneGrid = Grid, metric = "RMSE", maximize = FALSE)
registered_predcv <- predict(registered_rpartcv, newdata = subTest)
count_rpartcv <- round(casual_predcv + registered_predcv)
# calculate RMSE
rmse_rpartcv <- sqrt(mean((count_rpartcv - subTest$count)^2))
rmse_rpartcv
# rmse = 113
# using caret package for CART tree does not improve the result much
##### try random forest
set.seed(1)
fitControl2 <- trainControl(method = "cv", number = 10)
Grid1 <- expand.grid(mtry = seq(4,16,4))
# predict casual and registered
casual_rf <- train(casual, data = subTrain, method = "rf", trControl = fitControl2, metric = "RMSE", maximize = FALSE, tuneGrid = Grid1, ntree = 250, verbose = FALSE)
casual_rf
casual_predrf <- predict(casual_rf, newdata = subTest)
registered_rf <- train(registered, data = subTrain, method = "rf", trControl = fitControl2, metric = "RMSE", maximize = FALSE, tuneGrid = Grid1, ntree = 250)
registered_rf
registered_predrf <- predict(registered_rf, newdata = subTest)
count_rf <- round(casual_predrf + registered_predrf)
# calculate RMSE
rmse_rf <- sqrt(mean((count_rf - subTest$count)^2))
rmse_rfrf
# rmse = 100
# compared with cart model, random forest do better with rmse = 100 comapred with 113, the two most important parameter of random forest is mtry and ntree, in the case above, i just tune the mtry parameter cause of the running time of computer
##### try gradient boosting algorithm
set.seed(1)
fitControl3 <- trainControl(method = "cv", number = 3)
Grid3 <- expand.grid(shrinkage = 0.01, interaction.depth = 8, n.minobsinnode = 10, n.trees = 2500)
# predict casual and registered
casual_gbm <- train(casual ~ season + holiday + workingday + weather + temp + atemp + humidity + windspeed + Year + Day + weekday + weekend + hour + month, data = subTrain, method = "gbm", trControl = fitControl3, metric = "RMSE", maximize = FALSE, tuneGrid = Grid3 )
casual_gbm
casual_predgbm <- predict(casual_gbm, newdata = subTest)
registered_gbm <- train(registered ~ season + holiday + workingday + weather + temp + atemp + humidity + windspeed + Year + Day + weekday + weekend + hour + month, data = subTrain, method = "gbm", trControl = fitControl3, metric = "RMSE", maximize = FALSE, tuneGrid = Grid3)
registered_gbm
registered_predgbm <- predict(registered_gbm, newdata = subTest)
count_gbm <- casual_predgbm + registered_predgbm
# calculate RMSE
rmse_gbm <- sqrt(mean((count_gbm - subTest$count)^2))
rmse_gbm
| /Code/Bike.R | no_license | TrungLe205/Bike-Sharing | R | false | false | 6,682 | r | library(caret)
install.packages("party")
library(rpart)
library(randomForest)
library(dplyr)
library(caTools)
install.packages("gbm")
library(gbm)
library(plyr)
# load data
train <- read.csv("train.csv")
test <- read.csv("test.csv")
# combine train and test data
casual <- train$casual
registered <- train$registered
count <- train$count
train <- train[,-c(10:12)]
bike <- rbind(train,test)
# seperate date and time variable
Hours <- format(as.POSIXct(strptime(bike$datetime,"%d/%m/%Y %H:%M:%S",tz="")) ,format = "%H:%M:%S")
date <- as.POSIXct(bike$datetime)
df_date <- data.frame(date = date, Year = as.numeric(format(date, format = "%Y")), Month = as.numeric(format(date, format = "%m")), Day = as.numeric(format(date, format = "%d")), Hour = as.numeric(format(date, format = "%H")))
# create weekday variable
df_date$weekday <- as.factor(weekdays(df_date$date))
df_date <- df_date[,2:6]
bike <- cbind(bike, df_date)
str(bike)
# create weekend variable
bike$weekend <- ifelse(bike$weekday == "Saturday" | bike$weekday == "Sunday", "1", "0")
# distribution of numerical variable
bike$season <- as.numeric(bike$season)
hist(bike$season)
bike$holiday <- as.numeric(bike$holiday)
hist(bike$holiday)
bike$workingday <- as.numeric(bike$workingday)
hist(bike$workingday)
hist(bike$temp)
bike$weather <- as.numeric(bike$weather)
hist(bike$weather)
bike$weekend <- as.numeric(bike$weekend)
hist(bike$weekend)
hist(bike$humidity)
# transform variables
table(bike$season)
bike$season <- as.factor(bike$season)
bike$Hour <- as.factor(bike$Hour)
bike$holiday <- as.factor(bike$holiday)
bike$workingday <- as.factor(bike$workingday)
bike$weather <- as.factor(bike$weather)
bike$Year <- as.factor(bike$Year)
bike$Month <- as.factor(bike$Month)
bike$weekend <- as.factor(bike$weekend)
str(bike)
# group Hour variable into 4 groups: 0-6, 7-15, 16-19, 20-23
bike$Hour <- as.numeric(bike$Hour)
hour <- ifelse(bike$Hour < 7, "0-6", ifelse( bike$Hour >= 7 & bike$Hour < 16, "7-15", ifelse(bike$Hour >= 16 & bike$Hour < 20, "16-19", "20-23")))
bike <- cbind(bike, hour)
table(bike$hour)
bike <- bike[,-13]
bike$hour <- as.factor(bike$hour)
str(bike)
# group Month variable into 3 groups: 1-5, 6-10, 11-12
bike$Month <- as.numeric(bike$Month)
month <- ifelse(bike$Month < 6, "1-5", ifelse(bike$Month >= 6 & bike$Month < 11, "6-10", "11-12"))
bike <- cbind(bike, month)
table(bike$month)
bike <- bike[,-11]
bike$month <- as.factor(bike$month)
# normalize variables: temp, atemp, humidity, windspeed
dat <- bike %>% mutate_each_(funs(scale), vars = c("temp", "atemp", "humidity", "windspeed"))
# data preparation, we split train data into 2 sets to evaluate the model before submit
train <- bike[1:10886,]
test <- bike[10887:17379,]
train_dat <- cbind(train, casual)
train_dat <- cbind(train_dat, registered)
train_dat <- cbind(train_dat, count)
# split data
set.seed(1)
split = sample.split(train_dat$count, SplitRatio = 0.7)
subTrain = subset(train_dat, split == TRUE)
subTest = subset(train_dat, split == FALSE)
### building model
# create formular
casual <- casual ~ season + holiday + workingday + weather + temp + atemp + humidity + windspeed + Year + Day + weekday + weekend + hour + month
registered <- registered ~ season + holiday + workingday + weather + temp + atemp + humidity + windspeed + Year + Day + weekday + weekend + hour + month
##### simple tree model
casual_rpart <- rpart(casual, data = subTrain)
casual_pred <- round(predict(casual_rpart, newdata = subTest))
registered_rpart <- rpart(registered, data = subTrain)
registered_pred <- round(predict(registered_rpart, newdata = subTest))
count_rpart <- casual_pred + registered_pred
# calculate RMSE
rmse_rpart <- sqrt(mean((count_rpart - subTest$count)^2))
rmse_rpart
# rmse = 115
# simple tree model with "caret" package
set.seed(1)
fitControl1 <- trainControl(method = "cv", number = 10)
Grid <- expand.grid(cp = seq(0,0.05,0.005))
# predict casual and registered
casual_rpartcv <- train(casual, data = subTrain, method = "rpart", trControl = fitControl1, tuneGrid = Grid, metric = "RMSE", maximize = FALSE)
casual_predcv <- predict(casual_rpartcv, newdata = subTest)
registered_rpartcv <- train(registered, data = subTrain, method = "rpart", trControl = fitControl1, tuneGrid = Grid, metric = "RMSE", maximize = FALSE)
registered_predcv <- predict(registered_rpartcv, newdata = subTest)
count_rpartcv <- round(casual_predcv + registered_predcv)
# calculate RMSE
rmse_rpartcv <- sqrt(mean((count_rpartcv - subTest$count)^2))
rmse_rpartcv
# rmse = 113
# using caret package for CART tree does not improve the result much
##### try random forest
set.seed(1)
fitControl2 <- trainControl(method = "cv", number = 10)
Grid1 <- expand.grid(mtry = seq(4,16,4))
# predict casual and registered
casual_rf <- train(casual, data = subTrain, method = "rf", trControl = fitControl2, metric = "RMSE", maximize = FALSE, tuneGrid = Grid1, ntree = 250, verbose = FALSE)
casual_rf
casual_predrf <- predict(casual_rf, newdata = subTest)
registered_rf <- train(registered, data = subTrain, method = "rf", trControl = fitControl2, metric = "RMSE", maximize = FALSE, tuneGrid = Grid1, ntree = 250)
registered_rf
registered_predrf <- predict(registered_rf, newdata = subTest)
count_rf <- round(casual_predrf + registered_predrf)
# calculate RMSE
rmse_rf <- sqrt(mean((count_rf - subTest$count)^2))
rmse_rfrf
# rmse = 100
# compared with cart model, random forest do better with rmse = 100 comapred with 113, the two most important parameter of random forest is mtry and ntree, in the case above, i just tune the mtry parameter cause of the running time of computer
##### try gradient boosting algorithm
set.seed(1)
fitControl3 <- trainControl(method = "cv", number = 3)
Grid3 <- expand.grid(shrinkage = 0.01, interaction.depth = 8, n.minobsinnode = 10, n.trees = 2500)
# predict casual and registered
casual_gbm <- train(casual ~ season + holiday + workingday + weather + temp + atemp + humidity + windspeed + Year + Day + weekday + weekend + hour + month, data = subTrain, method = "gbm", trControl = fitControl3, metric = "RMSE", maximize = FALSE, tuneGrid = Grid3 )
casual_gbm
casual_predgbm <- predict(casual_gbm, newdata = subTest)
registered_gbm <- train(registered ~ season + holiday + workingday + weather + temp + atemp + humidity + windspeed + Year + Day + weekday + weekend + hour + month, data = subTrain, method = "gbm", trControl = fitControl3, metric = "RMSE", maximize = FALSE, tuneGrid = Grid3)
registered_gbm
registered_predgbm <- predict(registered_gbm, newdata = subTest)
count_gbm <- casual_predgbm + registered_predgbm
# calculate RMSE
rmse_gbm <- sqrt(mean((count_gbm - subTest$count)^2))
rmse_gbm
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.