content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
library(glmnet)
LASSO <- function(X, Y, new_X) {
cvfit = cv.glmnet(X, Y, alpha = 1, type.measure = "mse", nfolds = 10, intercept = F)
predict(cvfit, newx = new_X, s = "lambda.min")
}
Est_K <- function(X, K_max = 30) {
n <- nrow(X); p <- ncol(X)
U <- svd(X, nv = 0)$u
penalty <- (n + p) / n / p * log(p * n / (p + n))
PCs <- t(U) %*% X
loss <- vector("numeric", K_max)
for (k in 1:K_max) {
loss[k] <- log(norm(X - as.matrix(U[,1:k]) %*% PCs[1:k,], "F") ** 2 / n / p) + k * penalty
}
which.min(loss)
}
Est_K_ratio <- function(X, K_max = 30) {
eigens <- (svd(X, nu = 0, nv = 0)$d[1:K_max]) ** 2
L <- length(eigens)
ratios <- eigens[1:(L-1)] / eigens[2:L]
which.max(ratios)
}
PCR <- function(X, Y, K = NULL, option = "additive") {
n <- nrow(X); p <- ncol(X)
K_max <- min(n, p) %/% 2
if (is.null(K)) {
if (option == "additive")
K_hat <- Est_K(X, K_max)
else
K_hat <- Est_K_ratio(X, K_max)
} else
K_hat <- K
K_hat <- ifelse(K_hat == 0, 1, K_hat) # if the selected K is 0, set it to 1 instead
U <- svd(X, nu = 0, nv = K_hat)$v
Z_hat <- X %*% U / p
beta_hat <- solve(crossprod(Z_hat), crossprod(Z_hat, Y))
return(list(theta = U %*% beta_hat / p, fit = Z_hat %*% beta_hat, K = K_hat,
Z = Z_hat, A = U / p))
}
PLS <- function(trainX, trainY, validX) {
whole_data <- data.frame(Y = c(trainY, rep(NA, nrow(validX))), X = rbind(trainX, validX), row.names = NULL)
training_index <- 1:nrow(trainX)
fit_pls <- plsr(Y ~ ., data = whole_data[training_index, ], validation = "CV", segments = 5)
n_comp <- selectNcomp(fit_pls, method = "randomization")
n_comp <- ifelse(n_comp == 0, 1, n_comp)
res_pred <- predict(fit_pls, comps = n_comp, newdata = whole_data[-training_index, ],
type = "response")
return(res_pred)
}
|
/code/Other_algorithms.R
|
no_license
|
jishnu-lab/ER
|
R
| false
| false
| 1,853
|
r
|
library(glmnet)
LASSO <- function(X, Y, new_X) {
cvfit = cv.glmnet(X, Y, alpha = 1, type.measure = "mse", nfolds = 10, intercept = F)
predict(cvfit, newx = new_X, s = "lambda.min")
}
Est_K <- function(X, K_max = 30) {
n <- nrow(X); p <- ncol(X)
U <- svd(X, nv = 0)$u
penalty <- (n + p) / n / p * log(p * n / (p + n))
PCs <- t(U) %*% X
loss <- vector("numeric", K_max)
for (k in 1:K_max) {
loss[k] <- log(norm(X - as.matrix(U[,1:k]) %*% PCs[1:k,], "F") ** 2 / n / p) + k * penalty
}
which.min(loss)
}
Est_K_ratio <- function(X, K_max = 30) {
eigens <- (svd(X, nu = 0, nv = 0)$d[1:K_max]) ** 2
L <- length(eigens)
ratios <- eigens[1:(L-1)] / eigens[2:L]
which.max(ratios)
}
PCR <- function(X, Y, K = NULL, option = "additive") {
n <- nrow(X); p <- ncol(X)
K_max <- min(n, p) %/% 2
if (is.null(K)) {
if (option == "additive")
K_hat <- Est_K(X, K_max)
else
K_hat <- Est_K_ratio(X, K_max)
} else
K_hat <- K
K_hat <- ifelse(K_hat == 0, 1, K_hat) # if the selected K is 0, set it to 1 instead
U <- svd(X, nu = 0, nv = K_hat)$v
Z_hat <- X %*% U / p
beta_hat <- solve(crossprod(Z_hat), crossprod(Z_hat, Y))
return(list(theta = U %*% beta_hat / p, fit = Z_hat %*% beta_hat, K = K_hat,
Z = Z_hat, A = U / p))
}
PLS <- function(trainX, trainY, validX) {
whole_data <- data.frame(Y = c(trainY, rep(NA, nrow(validX))), X = rbind(trainX, validX), row.names = NULL)
training_index <- 1:nrow(trainX)
fit_pls <- plsr(Y ~ ., data = whole_data[training_index, ], validation = "CV", segments = 5)
n_comp <- selectNcomp(fit_pls, method = "randomization")
n_comp <- ifelse(n_comp == 0, 1, n_comp)
res_pred <- predict(fit_pls, comps = n_comp, newdata = whole_data[-training_index, ],
type = "response")
return(res_pred)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getExportedValue.R
\name{getExportedValue}
\alias{getExportedValue}
\title{fun_name}
\usage{
getExportedValue(params)
}
\arguments{
\item{param}{fun_name}
}
\description{
kolejna funkcja podmieniona
}
\keyword{Gruba}
\keyword{Przy}
\keyword{boski}
\keyword{chillout}
\keyword{piwerku}
\keyword{rozkmina}
\keyword{sie}
\keyword{toczy}
|
/man/getExportedValue.Rd
|
no_license
|
granatb/RapeR
|
R
| false
| true
| 412
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getExportedValue.R
\name{getExportedValue}
\alias{getExportedValue}
\title{fun_name}
\usage{
getExportedValue(params)
}
\arguments{
\item{param}{fun_name}
}
\description{
kolejna funkcja podmieniona
}
\keyword{Gruba}
\keyword{Przy}
\keyword{boski}
\keyword{chillout}
\keyword{piwerku}
\keyword{rozkmina}
\keyword{sie}
\keyword{toczy}
|
## if(getRversion() < "2.13") {
## nobs <- function (object, ...) UseMethod("nobs")
## ## also used for mlm fits *and* lmrob :
## nobs.lm <- function(object, ...)
## if(!is.null(w <- object$weights)) sum(w != 0) else NROW(object$residuals)
## ## for glmrob :
## nobs.glm <- function(object, ...) sum(!is.na(object$residuals))
## }
## Here and in NAMESPACE:
if(getRversion() < "3.1.0") {
## cut'n'paste from R's source src/library/stats/R/confint.R
format.perc <- function(probs, digits)
## Not yet exported, maybe useful in other contexts:
## quantile.default() sometimes uses a version of it
paste(format(100 * probs, trim = TRUE, scientific = FALSE, digits = digits),
"%")
confint.lm <- function(object, parm, level = 0.95, ...)
{
cf <- coef(object)
pnames <- names(cf)
if(missing(parm)) parm <- pnames
else if(is.numeric(parm)) parm <- pnames[parm]
a <- (1 - level)/2
a <- c(a, 1 - a)
fac <- qt(a, object$df.residual) # difference from default method
pct <- format.perc(a, 3)
ci <- array(NA, dim = c(length(parm), 2L),
dimnames = list(parm, pct))
ses <- sqrt(diag(vcov(object)))[parm] # gives NA for aliased parms
ci[] <- cf[parm] + ses %o% fac
ci
}
## cut'n'paste from R's source src/library/stats/R/dummy.coef.R
dummy.coef.lm <- function(object, use.na=FALSE, ...)
{
Terms <- terms(object)
tl <- attr(Terms, "term.labels")
int <- attr(Terms, "intercept")
facs <- attr(Terms, "factors")[-1, , drop=FALSE]
Terms <- delete.response(Terms)
vars <- all.vars(Terms)
xl <- object$xlevels
if(!length(xl)) { # no factors in model
return(as.list(coef(object)))
}
nxl <- setNames(rep.int(1, length(vars)), vars)
tmp <- unlist(lapply(xl, length)) ## ?? vapply(xl, length, 1L)
nxl[names(tmp)] <- tmp
lterms <- apply(facs, 2L, function(x) prod(nxl[x > 0]))
nl <- sum(lterms)
args <- setNames(vector("list", length(vars)), vars)
for(i in vars)
args[[i]] <- if(nxl[[i]] == 1) rep.int(1, nl)
else factor(rep.int(xl[[i]][1L], nl), levels = xl[[i]])
dummy <- do.call("data.frame", args)
pos <- 0
rn <- rep.int(tl, lterms)
rnn <- rep.int("", nl)
for(j in tl) {
i <- vars[facs[, j] > 0]
ifac <- i[nxl[i] > 1]
if(length(ifac) == 0L) { # quantitative factor
rnn[pos+1] <- j
} else if(length(ifac) == 1L) { # main effect
dummy[ pos+1L:lterms[j], ifac ] <- xl[[ifac]]
rnn[ pos+1L:lterms[j] ] <- as.character(xl[[ifac]])
} else { # interaction
tmp <- expand.grid(xl[ifac])
dummy[ pos+1L:lterms[j], ifac ] <- tmp
rnn[ pos+1L:lterms[j] ] <-
apply(as.matrix(tmp), 1L, function(x) paste(x, collapse=":"))
}
pos <- pos + lterms[j]
}
## some terms like poly(x,1) will give problems here, so allow
## NaNs and set to NA afterwards.
mf <- model.frame(Terms, dummy, na.action=function(x)x, xlev=xl)
mm <- model.matrix(Terms, mf, object$contrasts, xl)
if(any(is.na(mm))) {
warning("some terms will have NAs due to the limits of the method")
mm[is.na(mm)] <- NA
}
coef <- object$coefficients
if(!use.na) coef[is.na(coef)] <- 0
asgn <- attr(mm,"assign")
res <- setNames(vector("list", length(tl)), tl)
for(j in seq_along(tl)) {
keep <- asgn == j
ij <- rn == tl[j]
res[[j]] <-
setNames(drop(mm[ij, keep, drop=FALSE] %*% coef[keep]), rnn[ij])
}
if(int > 0) {
res <- c(list("(Intercept)" = coef[int]), res)
}
class(res) <- "dummy_coef"
res
}
}# if R <= 3.1.0
## Not exported, and only used because CRAN checks must be faster
doExtras <- function() {
interactive() || nzchar(Sys.getenv("R_robustbase_check_extra")) ||
identical("true", unname(Sys.getenv("R_PKG_CHECKING_doExtras")))
}
if(getRversion() < "3.3") {
sigma <- function(object, ...) UseMethod("sigma")
## For completeness, and when comparing with nlrob() results:
sigma.nls <- function(object, ...)
## sqrt ( sum( R_i ^ 2) / (n - p) ) :
sqrt( deviance(object) / (nobs(object) - length(coef(object))) )
}
## shortcut -- used often in print() etc:
pasteK <- function(...) paste(..., collapse = ", ")
## stopifnot(..) helper :
is.1num <- function(x) is.numeric(x) && length(x) == 1L
|
/pkgs/robustbase/R/AAA.R
|
no_license
|
vaguiar/EDAV_Project_2017
|
R
| false
| false
| 4,260
|
r
|
## if(getRversion() < "2.13") {
## nobs <- function (object, ...) UseMethod("nobs")
## ## also used for mlm fits *and* lmrob :
## nobs.lm <- function(object, ...)
## if(!is.null(w <- object$weights)) sum(w != 0) else NROW(object$residuals)
## ## for glmrob :
## nobs.glm <- function(object, ...) sum(!is.na(object$residuals))
## }
## Here and in NAMESPACE:
if(getRversion() < "3.1.0") {
## cut'n'paste from R's source src/library/stats/R/confint.R
format.perc <- function(probs, digits)
## Not yet exported, maybe useful in other contexts:
## quantile.default() sometimes uses a version of it
paste(format(100 * probs, trim = TRUE, scientific = FALSE, digits = digits),
"%")
confint.lm <- function(object, parm, level = 0.95, ...)
{
cf <- coef(object)
pnames <- names(cf)
if(missing(parm)) parm <- pnames
else if(is.numeric(parm)) parm <- pnames[parm]
a <- (1 - level)/2
a <- c(a, 1 - a)
fac <- qt(a, object$df.residual) # difference from default method
pct <- format.perc(a, 3)
ci <- array(NA, dim = c(length(parm), 2L),
dimnames = list(parm, pct))
ses <- sqrt(diag(vcov(object)))[parm] # gives NA for aliased parms
ci[] <- cf[parm] + ses %o% fac
ci
}
## cut'n'paste from R's source src/library/stats/R/dummy.coef.R
dummy.coef.lm <- function(object, use.na=FALSE, ...)
{
Terms <- terms(object)
tl <- attr(Terms, "term.labels")
int <- attr(Terms, "intercept")
facs <- attr(Terms, "factors")[-1, , drop=FALSE]
Terms <- delete.response(Terms)
vars <- all.vars(Terms)
xl <- object$xlevels
if(!length(xl)) { # no factors in model
return(as.list(coef(object)))
}
nxl <- setNames(rep.int(1, length(vars)), vars)
tmp <- unlist(lapply(xl, length)) ## ?? vapply(xl, length, 1L)
nxl[names(tmp)] <- tmp
lterms <- apply(facs, 2L, function(x) prod(nxl[x > 0]))
nl <- sum(lterms)
args <- setNames(vector("list", length(vars)), vars)
for(i in vars)
args[[i]] <- if(nxl[[i]] == 1) rep.int(1, nl)
else factor(rep.int(xl[[i]][1L], nl), levels = xl[[i]])
dummy <- do.call("data.frame", args)
pos <- 0
rn <- rep.int(tl, lterms)
rnn <- rep.int("", nl)
for(j in tl) {
i <- vars[facs[, j] > 0]
ifac <- i[nxl[i] > 1]
if(length(ifac) == 0L) { # quantitative factor
rnn[pos+1] <- j
} else if(length(ifac) == 1L) { # main effect
dummy[ pos+1L:lterms[j], ifac ] <- xl[[ifac]]
rnn[ pos+1L:lterms[j] ] <- as.character(xl[[ifac]])
} else { # interaction
tmp <- expand.grid(xl[ifac])
dummy[ pos+1L:lterms[j], ifac ] <- tmp
rnn[ pos+1L:lterms[j] ] <-
apply(as.matrix(tmp), 1L, function(x) paste(x, collapse=":"))
}
pos <- pos + lterms[j]
}
## some terms like poly(x,1) will give problems here, so allow
## NaNs and set to NA afterwards.
mf <- model.frame(Terms, dummy, na.action=function(x)x, xlev=xl)
mm <- model.matrix(Terms, mf, object$contrasts, xl)
if(any(is.na(mm))) {
warning("some terms will have NAs due to the limits of the method")
mm[is.na(mm)] <- NA
}
coef <- object$coefficients
if(!use.na) coef[is.na(coef)] <- 0
asgn <- attr(mm,"assign")
res <- setNames(vector("list", length(tl)), tl)
for(j in seq_along(tl)) {
keep <- asgn == j
ij <- rn == tl[j]
res[[j]] <-
setNames(drop(mm[ij, keep, drop=FALSE] %*% coef[keep]), rnn[ij])
}
if(int > 0) {
res <- c(list("(Intercept)" = coef[int]), res)
}
class(res) <- "dummy_coef"
res
}
}# if R <= 3.1.0
## Not exported, and only used because CRAN checks must be faster
doExtras <- function() {
interactive() || nzchar(Sys.getenv("R_robustbase_check_extra")) ||
identical("true", unname(Sys.getenv("R_PKG_CHECKING_doExtras")))
}
if(getRversion() < "3.3") {
sigma <- function(object, ...) UseMethod("sigma")
## For completeness, and when comparing with nlrob() results:
sigma.nls <- function(object, ...)
## sqrt ( sum( R_i ^ 2) / (n - p) ) :
sqrt( deviance(object) / (nobs(object) - length(coef(object))) )
}
## shortcut -- used often in print() etc:
pasteK <- function(...) paste(..., collapse = ", ")
## stopifnot(..) helper :
is.1num <- function(x) is.numeric(x) && length(x) == 1L
|
#' Example breast cancer RNA editing dataset.
#'
#' @description A subset of the TCGA breast cancer RNA editing dataset for 272
#' edited sites on genes PHACTR4, CCR5, METTL7A and a few randomly sampled
#' sites for 221 subjects.
#'
#' @format A data frame containing RNA editing levels for 272 sites (in the
#' rows) for 221 subjects (in the columns). Row names are site IDs and column
#' names are sample IDs.
#'
#' @source Synapse database ID: syn2374375.
#'
"rnaedit_df"
|
/R/data_rnaedit_df.R
|
no_license
|
TransBioInfoLab/rnaEditr
|
R
| false
| false
| 485
|
r
|
#' Example breast cancer RNA editing dataset.
#'
#' @description A subset of the TCGA breast cancer RNA editing dataset for 272
#' edited sites on genes PHACTR4, CCR5, METTL7A and a few randomly sampled
#' sites for 221 subjects.
#'
#' @format A data frame containing RNA editing levels for 272 sites (in the
#' rows) for 221 subjects (in the columns). Row names are site IDs and column
#' names are sample IDs.
#'
#' @source Synapse database ID: syn2374375.
#'
"rnaedit_df"
|
### 主成分分析
### 人工データ(2次元)による例
set.seed(123)
n <- 100 # データ数
(a <- c(1, 2)/sqrt(5)) # 主成分方向(単位ベクトル)の設定
mydata <- data.frame(runif(n,-1,1) %o% a + rnorm(2*n, sd=0.2))
names(mydata) <- paste0("x",1:2) # 観測データ
## aのスカラー倍に正規乱数がのった形となっており
## a方向に本質的な情報が集約されていることがわかる
head(mydata) # データの一部を表示
plot(mydata, asp=1, # 縦横比を1とした散布図
pch=4, col="blue") # 点(×)と色(青)を指定
abline(0, a[2]/a[1], col="red", lwd=2) # 主成分方向の図示
## 主成分方向の推定
(est <- prcomp(mydata))
## 第1主成分方向がaに非常に近い (符号は反対)
abline(0, est$rotation[2,1]/est$rotation[1,1], # 図示
col="orange", lty="dotted", lwd=2)
## 主成分得点の計算
head(predict(est)) # 主成分得点の一部を表示
pc1 <- predict(est)[,1] # 第1主成分得点の取得
points(pc1 %o% est$rotation[,1], # 元の散布図上で図示
pch=18, col="purple")
plot(predict(est), asp=1, # 主成分得点による散布図
pch=4, col="blue")
|
/docs/autumn/code/07-toy.r
|
no_license
|
noboru-murata/sda
|
R
| false
| false
| 1,188
|
r
|
### 主成分分析
### 人工データ(2次元)による例
set.seed(123)
n <- 100 # データ数
(a <- c(1, 2)/sqrt(5)) # 主成分方向(単位ベクトル)の設定
mydata <- data.frame(runif(n,-1,1) %o% a + rnorm(2*n, sd=0.2))
names(mydata) <- paste0("x",1:2) # 観測データ
## aのスカラー倍に正規乱数がのった形となっており
## a方向に本質的な情報が集約されていることがわかる
head(mydata) # データの一部を表示
plot(mydata, asp=1, # 縦横比を1とした散布図
pch=4, col="blue") # 点(×)と色(青)を指定
abline(0, a[2]/a[1], col="red", lwd=2) # 主成分方向の図示
## 主成分方向の推定
(est <- prcomp(mydata))
## 第1主成分方向がaに非常に近い (符号は反対)
abline(0, est$rotation[2,1]/est$rotation[1,1], # 図示
col="orange", lty="dotted", lwd=2)
## 主成分得点の計算
head(predict(est)) # 主成分得点の一部を表示
pc1 <- predict(est)[,1] # 第1主成分得点の取得
points(pc1 %o% est$rotation[,1], # 元の散布図上で図示
pch=18, col="purple")
plot(predict(est), asp=1, # 主成分得点による散布図
pch=4, col="blue")
|
#' Title
#'
#' @param data.rdu
#' @param kdebug1
#' @param JustEvent
#'
#' @return NULL
#' @export
#' @name RiskSet
#' @rdname RiskSet_r
#'
#' @examples
#' \dontrun{
#'
#' halfbeak.rdu <- frame.to.rdu(halfbeak,
#' ID.column = "unit",
#' time.column = "hours" ,
#' event.column = "event",
#' data.title = "Halfbeak Data",
#' time.units = "Thousands of Hours of Operation")
#'
#' RiskSet(halfbeak.rdu)
#'
#' }
RiskSet <-
function (data.rdu,
kdebug1 = F,
JustEvent = T)
{
time.column <- attr(data.rdu, "time.column")
event.column <- attr(data.rdu, "event.column")
WindowInfo <- attr(data.rdu, "WindowInfo")
event <- data.rdu[, event.column]
Times <- data.rdu[, time.column]
EndPoints <- is.element(casefold(event), c("end", "mend"))
StartPoints <- is.element(casefold(event), c("start", "mstart"))
Cevent <- !(EndPoints | StartPoints)
`if`(JustEvent,
tuniq <- unique(sort(Times[Cevent])),
tuniq <- unique(sort(c(0, Times[Cevent], WindowInfo$WindowL, WindowInfo$WindowU))))
zout <- RISKSET(muniqrecurr = as.integer(length(tuniq)),
tuniq = as.double(tuniq),
nwindows = as.integer(length(WindowInfo$WindowU)),
twindowsl = as.double(WindowInfo$WindowL),
twindowsu = as.double(WindowInfo$WindowU),
wcounts = as.integer(WindowInfo$WindowCounts),
iordl = integer(length(WindowInfo$WindowL)),
iordu = integer(length(WindowInfo$WindowL)),
delta = integer(length(tuniq)),
kdebug = as.integer(kdebug1),
iscrat = integer(length(WindowInfo$WindowL)))
return(list(Times = zout$tuniq,
Counts = zout$delta,
NumberUnits = length(unique(get.UnitID(data.rdu)))))
}
|
/R/RiskSet.R
|
no_license
|
anhnguyendepocen/SMRD
|
R
| false
| false
| 2,036
|
r
|
#' Title
#'
#' @param data.rdu
#' @param kdebug1
#' @param JustEvent
#'
#' @return NULL
#' @export
#' @name RiskSet
#' @rdname RiskSet_r
#'
#' @examples
#' \dontrun{
#'
#' halfbeak.rdu <- frame.to.rdu(halfbeak,
#' ID.column = "unit",
#' time.column = "hours" ,
#' event.column = "event",
#' data.title = "Halfbeak Data",
#' time.units = "Thousands of Hours of Operation")
#'
#' RiskSet(halfbeak.rdu)
#'
#' }
RiskSet <-
function (data.rdu,
kdebug1 = F,
JustEvent = T)
{
time.column <- attr(data.rdu, "time.column")
event.column <- attr(data.rdu, "event.column")
WindowInfo <- attr(data.rdu, "WindowInfo")
event <- data.rdu[, event.column]
Times <- data.rdu[, time.column]
EndPoints <- is.element(casefold(event), c("end", "mend"))
StartPoints <- is.element(casefold(event), c("start", "mstart"))
Cevent <- !(EndPoints | StartPoints)
`if`(JustEvent,
tuniq <- unique(sort(Times[Cevent])),
tuniq <- unique(sort(c(0, Times[Cevent], WindowInfo$WindowL, WindowInfo$WindowU))))
zout <- RISKSET(muniqrecurr = as.integer(length(tuniq)),
tuniq = as.double(tuniq),
nwindows = as.integer(length(WindowInfo$WindowU)),
twindowsl = as.double(WindowInfo$WindowL),
twindowsu = as.double(WindowInfo$WindowU),
wcounts = as.integer(WindowInfo$WindowCounts),
iordl = integer(length(WindowInfo$WindowL)),
iordu = integer(length(WindowInfo$WindowL)),
delta = integer(length(tuniq)),
kdebug = as.integer(kdebug1),
iscrat = integer(length(WindowInfo$WindowL)))
return(list(Times = zout$tuniq,
Counts = zout$delta,
NumberUnits = length(unique(get.UnitID(data.rdu)))))
}
|
datasets = read.csv('Data.csv')
datasets$Age = ifelse(is.na(datasets$Age),
ave(datasets$Age, FUN = function(x) mean(x, na.rm = TRUE)),
datasets$Age)
datasets$Salary = ifelse(is.na(datasets$Salary ),
ave(datasets$Salary , FUN = function(x) mean(x, na.rm = TRUE)),
datasets$Salary)
|
/Part 1 - Data Preprocessing/test_R_1.R
|
no_license
|
taimurIslam/Machine-Learning-A-Z
|
R
| false
| false
| 369
|
r
|
datasets = read.csv('Data.csv')
datasets$Age = ifelse(is.na(datasets$Age),
ave(datasets$Age, FUN = function(x) mean(x, na.rm = TRUE)),
datasets$Age)
datasets$Salary = ifelse(is.na(datasets$Salary ),
ave(datasets$Salary , FUN = function(x) mean(x, na.rm = TRUE)),
datasets$Salary)
|
#' @export
CAplotsMultDataSets <- function(atSea=NULL, port=NULL, fsrs=NULL, out.dir='bio.lobster',subset=F) {
#using the three year aggregated LCAs
fd = file.path(project.figuredirectory(out.dir),'CohortAnalysisPlots')
dir.create( fd, recursive = TRUE, showWarnings = FALSE )
lf = c(27,29,32,33)
for(i in lf){
fname = paste('CombinedDataCohortAnalysisExploitationPlotsLFA',i,'.png',sep="")
if(subset) fname = paste('subset',fname,sep='')
cols = c()
nn = c()
yt = c()
xp = c()
lt = c()
if(!is.null(atSea)){ o = subset(atSea,LFA==i); o$YEAR =apply(o[,c('Year.min','Year.max')],1,mean) ; cols = c(cols,'black'); nn = c(nn,'AtSea'); yt = c(yt,o$YEAR); xp = c(xp,o$expl); lt = c(lt,1)}
if(!is.null(port)) {p = subset(port,LFA==i); p$YEAR = apply(p[,c('Year.min','Year.max')],1,mean); cols = c(cols,'red'); nn = c(nn,'Port'); yt = c(yt,p$YEAR); xp = c(xp,p$expl); lt = c(lt,2)}
if(!is.null(fsrs)) {r = subset(fsrs,LFA==i); if(nrow(r)>0) {r$YEAR = apply(r[,c('Year.min','Year.max')],1,mean); cols = c(cols,'blue'); nn = c(nn,'FSRS'); yt = c(yt,r$YEAR); xp = c(xp,r$expl); lt = c(lt,3)}}
png(file=file.path(fd,fname),units='in',width=15,height=12,pointsize=24, res=300,type='cairo')
xr = range(yt)
yr = range(xp)
plot(1,1,type='n',xlab='Year',ylab = 'Exploitation Index',main=paste('LFA',i),xlim=xr,ylim=yr)
if(!is.null(atSea)) with(o,lines(YEAR,expl,lty=1,col='black',lwd=2,pch=16,type='b'))
if(!is.null(port)) with(p,lines(YEAR,expl,lty=2,col='red',lwd=2,pch=16,type='b'))
if(!is.null(fsrs) & nrow(r)>0) with(r,lines(YEAR,expl,lty=3,col='blue',lwd=2,pch=16,type='b'))
legend('bottomright',legend=nn,col=cols,lty=lt,lwd=2,bty='n',cex=0.8)
dev.off()
}
}
|
/R/CAplotsMultDataSets.r
|
no_license
|
LobsterScience/bio.lobster
|
R
| false
| false
| 1,754
|
r
|
#' @export
CAplotsMultDataSets <- function(atSea=NULL, port=NULL, fsrs=NULL, out.dir='bio.lobster',subset=F) {
#using the three year aggregated LCAs
fd = file.path(project.figuredirectory(out.dir),'CohortAnalysisPlots')
dir.create( fd, recursive = TRUE, showWarnings = FALSE )
lf = c(27,29,32,33)
for(i in lf){
fname = paste('CombinedDataCohortAnalysisExploitationPlotsLFA',i,'.png',sep="")
if(subset) fname = paste('subset',fname,sep='')
cols = c()
nn = c()
yt = c()
xp = c()
lt = c()
if(!is.null(atSea)){ o = subset(atSea,LFA==i); o$YEAR =apply(o[,c('Year.min','Year.max')],1,mean) ; cols = c(cols,'black'); nn = c(nn,'AtSea'); yt = c(yt,o$YEAR); xp = c(xp,o$expl); lt = c(lt,1)}
if(!is.null(port)) {p = subset(port,LFA==i); p$YEAR = apply(p[,c('Year.min','Year.max')],1,mean); cols = c(cols,'red'); nn = c(nn,'Port'); yt = c(yt,p$YEAR); xp = c(xp,p$expl); lt = c(lt,2)}
if(!is.null(fsrs)) {r = subset(fsrs,LFA==i); if(nrow(r)>0) {r$YEAR = apply(r[,c('Year.min','Year.max')],1,mean); cols = c(cols,'blue'); nn = c(nn,'FSRS'); yt = c(yt,r$YEAR); xp = c(xp,r$expl); lt = c(lt,3)}}
png(file=file.path(fd,fname),units='in',width=15,height=12,pointsize=24, res=300,type='cairo')
xr = range(yt)
yr = range(xp)
plot(1,1,type='n',xlab='Year',ylab = 'Exploitation Index',main=paste('LFA',i),xlim=xr,ylim=yr)
if(!is.null(atSea)) with(o,lines(YEAR,expl,lty=1,col='black',lwd=2,pch=16,type='b'))
if(!is.null(port)) with(p,lines(YEAR,expl,lty=2,col='red',lwd=2,pch=16,type='b'))
if(!is.null(fsrs) & nrow(r)>0) with(r,lines(YEAR,expl,lty=3,col='blue',lwd=2,pch=16,type='b'))
legend('bottomright',legend=nn,col=cols,lty=lt,lwd=2,bty='n',cex=0.8)
dev.off()
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vca.R
\name{vca}
\alias{vca}
\title{Title}
\usage{
vca(R, p, SNR = NULL, verbose = F)
}
\arguments{
\item{R}{matrix describing points (possibly lyinh in a simplex) in high dimensional space}
\item{p}{number endpoints to find}
\item{SNR}{signal to noise ratio, NULL by default}
\item{verbose}{verbosity, deafult value is FALSE}
}
\value{
matrix of columns from R which are considered to be endpoints
}
\description{
Title
}
|
/man/vca.Rd
|
no_license
|
ctlab/ClusDec
|
R
| false
| true
| 504
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vca.R
\name{vca}
\alias{vca}
\title{Title}
\usage{
vca(R, p, SNR = NULL, verbose = F)
}
\arguments{
\item{R}{matrix describing points (possibly lyinh in a simplex) in high dimensional space}
\item{p}{number endpoints to find}
\item{SNR}{signal to noise ratio, NULL by default}
\item{verbose}{verbosity, deafult value is FALSE}
}
\value{
matrix of columns from R which are considered to be endpoints
}
\description{
Title
}
|
#
# litvals.R, 24 Apr 18
# Data from:
#
# The New C Standard
# Derek M. Jones
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
pal_col=rainbow(2)
int_lit=read.csv(paste0(ESEUR_dir, "sourcecode/intlitvals.csv.xz"), as.is=TRUE)
hex_lit=read.csv(paste0(ESEUR_dir, "sourcecode/hexlitvals.csv.xz"), as.is=TRUE)
plot(int_lit$value, int_lit$occurrences, log="xy", col=pal_col[1],
xlab="Numeric value", ylab="Occurrences\n",
xlim=c(1, 1024))
points(hex_lit$value, hex_lit$occurrences, col=pal_col[2])
legend(x="topright", legend=c("Decimal", "Hexadecimal"),
bty="n", fill=pal_col, cex=1.2)
|
/sourcecode/litvals.R
|
no_license
|
alanponce/ESEUR-code-data
|
R
| false
| false
| 648
|
r
|
#
# litvals.R, 24 Apr 18
# Data from:
#
# The New C Standard
# Derek M. Jones
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
pal_col=rainbow(2)
int_lit=read.csv(paste0(ESEUR_dir, "sourcecode/intlitvals.csv.xz"), as.is=TRUE)
hex_lit=read.csv(paste0(ESEUR_dir, "sourcecode/hexlitvals.csv.xz"), as.is=TRUE)
plot(int_lit$value, int_lit$occurrences, log="xy", col=pal_col[1],
xlab="Numeric value", ylab="Occurrences\n",
xlim=c(1, 1024))
points(hex_lit$value, hex_lit$occurrences, col=pal_col[2])
legend(x="topright", legend=c("Decimal", "Hexadecimal"),
bty="n", fill=pal_col, cex=1.2)
|
data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", nrows = 69516, as.is = TRUE);
data <- rbind( data[data$Date == "1/2/2007" , ] , data[ data$Date == "2/2/2007" , ]);
library(dplyr);
windows();
par(mfrow = c(2,2))
# First plot (top-left)
data <- mutate(data, Global_active_power = as.numeric(Global_active_power));
data <- mutate(data, Date = as.Date(Date, format = "%d/%m/%Y"));
plot.ts(data$Global_active_power, ylab = "Global Active Power", xlab = "", xaxt = "n");
p <- c(1, length(data$Global_active_power)/2, length(data$Global_active_power));
axis(1, at = p, labels = c("Thu", "Fri", "Sat"));
# Second plot (top-right)
plot.ts(data$Voltage, ylab = "Voltage", xlab = "datetime", xaxt = "n");
axis(1, at = p, labels = c("Thu", "Fri", "Sat"));
# Third plot (bottom-left)
time_serie <- select(data, Date, Time, Sub_metering_1, Sub_metering_2, Sub_metering_3);
time_serie <- mutate(time_serie, Time = paste(Date, Time));
time_serie <- select(time_serie, -Date);
pars = list(xlab = "", xaxt = "n", ylab = "Energy sub metering", col = c("blue", "black", "red"))
ts.plot(time_serie, gpars = pars)
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
p <- c(1, length(data$Global_active_power)/2, length(data$Global_active_power));
axis(1, at = p, labels = c("Thu", "Fri", "Sat"));
# Forth plot (bottom-right)
plot.ts(data$Global_reactive_power, ylab = "Global_reactive_power", xlab = "datetime", xaxt = "n");
axis(1, at = p, labels = c("Thu", "Fri", "Sat"));
dev.copy(png, "plot4.png");
dev.off();
|
/plot4.R
|
no_license
|
marcossf82/ExData_Plotting1
|
R
| false
| false
| 1,605
|
r
|
data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", nrows = 69516, as.is = TRUE);
data <- rbind( data[data$Date == "1/2/2007" , ] , data[ data$Date == "2/2/2007" , ]);
library(dplyr);
windows();
par(mfrow = c(2,2))
# First plot (top-left)
data <- mutate(data, Global_active_power = as.numeric(Global_active_power));
data <- mutate(data, Date = as.Date(Date, format = "%d/%m/%Y"));
plot.ts(data$Global_active_power, ylab = "Global Active Power", xlab = "", xaxt = "n");
p <- c(1, length(data$Global_active_power)/2, length(data$Global_active_power));
axis(1, at = p, labels = c("Thu", "Fri", "Sat"));
# Second plot (top-right)
plot.ts(data$Voltage, ylab = "Voltage", xlab = "datetime", xaxt = "n");
axis(1, at = p, labels = c("Thu", "Fri", "Sat"));
# Third plot (bottom-left)
time_serie <- select(data, Date, Time, Sub_metering_1, Sub_metering_2, Sub_metering_3);
time_serie <- mutate(time_serie, Time = paste(Date, Time));
time_serie <- select(time_serie, -Date);
pars = list(xlab = "", xaxt = "n", ylab = "Energy sub metering", col = c("blue", "black", "red"))
ts.plot(time_serie, gpars = pars)
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
p <- c(1, length(data$Global_active_power)/2, length(data$Global_active_power));
axis(1, at = p, labels = c("Thu", "Fri", "Sat"));
# Forth plot (bottom-right)
plot.ts(data$Global_reactive_power, ylab = "Global_reactive_power", xlab = "datetime", xaxt = "n");
axis(1, at = p, labels = c("Thu", "Fri", "Sat"));
dev.copy(png, "plot4.png");
dev.off();
|
dmtbino <- function(x,size,Q) {
drop(sapply(Q[,1],dtbino,x=x,size=size)%*%Q[,2])
}
|
/R/dmtbino.R
|
no_license
|
leandroroser/Ares_1.2-4
|
R
| false
| false
| 85
|
r
|
dmtbino <- function(x,size,Q) {
drop(sapply(Q[,1],dtbino,x=x,size=size)%*%Q[,2])
}
|
# pareto density plot
reversePareto<-function(u,alpha=3){
return(1/u^(1/alpha))
}
paretoProb<-function(z,prob=1e-6,alpha=3){
return(1/prob*alpha*z^(-alpha-1)*(1-z^(-alpha))^(1/prob-1))
}
# how much of the furute prediction interval contains the real
futureCIcontain<-function(prob=1e-6,alpha=3,PI){
return((1-PI[2]^(-alpha))^(1/prob)-(1-PI[1]^(-alpha))^(1/prob))
}
# From ACER package
reverseGeneral<-function(eps,est){
return(est[2]+(1/(est[1]*est[5])*((eps/est[4])^(-est[5])-1))^(1/est[3]))
}
General<-function(eta,est){
if(est[5]<0){eta[eta>(est[2]-1/(est[1]*est[5]))]=NA}
return(est[4]*(1+est[1]*est[5]*(eta-est[2])^est[3])^(-1/est[5]))
}
# z_p pot MCMC
reversePOTmcmc<-function(prob,MCMC){
return(MCMC$theta[2,]/MCMC$theta[1,]*((prob/MCMC$theta[3,])^(-MCMC$theta[1,])-1)+MCMC$u)
}
POTmcmc<-function(z,MCMC){
return(MCMC$theta[3,]*(1+MCMC$theta[1,]*(z-MCMC$u)/MCMC$theta[2,])^(-1/MCMC$theta[1,]))
}
# narrowest CI for future predicted value
predCI<-function(mod,prob=1e-6,alpha=0.9,nACER=100,nmcmc=10000){
minCI<-c(0,.Machine$integer.max)
tempCI<-c(NA,NA)
if(class(mod)=='ACER'){
low<-(1-alpha)/2
up<-(1+alpha)/2
rseq<-seq(0,1-alpha,length.out=nACER+2)[2:(nACER+1)]
for(i in 1:nACER){
tempCI[1]<-reverseGeneral(eps=1-rseq[i]^prob,est=mod$coef[1:5])
tempCI[2]<-reverseGeneral(eps=1-(rseq[i]+alpha)^prob,est=mod$coef[1:5])
if(diff(minCI)>diff(tempCI)){
minCI<-tempCI
}
}
}else if(class(mod)=='mcmc'){
dist<-predDist(mod=mod,prob=prob,nmcmc=nmcmc)
minCI<-distCI(dist=dist,alpha=alpha)
}
return(minCI)
}
# Narrowest CI from a numerical distribution
distCI<-function(dist,alpha=0.9){
minCI<-c(0,.Machine$integer.max)
tempCI<-c(NA,NA)
dist<-sort(dist)
ndist<-length(dist)
interval<-round(ndist*alpha)
for(i in 1:(ndist-interval)){
tempCI<-c(dist[i],dist[i+interval])
if(diff(minCI)>diff(tempCI)){
minCI<-tempCI
}
}
return(minCI)
}
# Distribution of future predicted value
predDist<-function(mod,prob=1e-6,nACER=10000,nmcmc=10000){
if(class(mod)=='ACER'){
u<-seq(0,1,length.out=nACER+2)[2:(nACER+1)]#runif(nACER)
return(reverseGeneral(eps=1-u^prob,est=mod$coef[1:5]))
}else if(class(mod)=='mcmc'){
n<-length(mod$theta[1,])
nloop<-ceiling(nmcmc/n)
dist<-rep(NA,nloop*n)
for(i in 1:nloop){
dist[(1+(i-1)*n):(i*n)]<-reversePOTmcmc(prob=1-runif(n)^prob,MCMC=mod)
}
return(dist)
}
}
acerMcmcPlot<-function(acerMod,mcmcMod,acerCol,mcmcCol,ylim,xlim,n=200,xlab,ylab,alpha=0.95){
eta<-seq(xlim[1],xlim[2],length.out=n)
plot(eta,General(eta,acerMod$coef),type='l',col=acerCol,ylim=ylim,xlim=xlim,xlab=xlab,ylab=ylab,log='y')
lines(eta,General(eta,acerMod$upperCIcoef),lty=2,col=acerCol)
lines(eta,General(eta,acerMod$lowerCIcoef),lty=2,col=acerCol)
meanCI<-matrix(NA,3,n)
for(i in 1:n){
dist<-POTmcmc(eta[i],mcmcMod)
meanCI[2,i]<-mean(dist)
meanCI[c(1,3),i]<-distCI(dist,alpha=alpha)
}
lines(eta,meanCI[2,],col=mcmcCol)
lines(eta,meanCI[1,],lty=2,col=mcmcCol)
lines(eta,meanCI[3,],lty=2,col=mcmcCol)
}
###################################################
############## Pareto analysis ####################
###################################################
day<-365
year<-25
set.seed(5000)
u<-runif(day*year)
a<-3
data<-matrix((1/(u))^(1/a),year,day,byrow=TRUE) #10 realisations (one for each day over 10 years)
plot(as.vector(data),type='h',ylab=bquote(paste(x[i])),xlab='i')
#points((1:(365*25))[as.vector(data)>4],as.vector(data)[as.vector(data)>4],pch=16,cex=0.5)
#points(as.vector(data),pch=16,cex=0.5)
plot(1:200,1/(1:200)^(a),log='y',type='l',ylim=c(1e-7,1),ylab='P(X>z)', xlab='z')
pYearly<-1/365
pDecade<-1/(10*365)
pCentury<-1/(100*365)
pMillennium<-1/(1000*365)
library(ACER)
acerMod<-ACER(data,k=1,eta1=1.5,CI=0.95,stationary=TRUE,method='general', check.weibull = FALSE)
mcmcMod<-mcmc.gpd(as.vector(data),u=1.5,n=10000,start=c(1,1),a=0.35)
old.par <- par(mfrow=c(2, 1))
plot(mcmcMod$theta[1,1:1000],type='l',ylab=expression(xi^t),xlab='t',mgp=c(2,1,0))
plot(mcmcMod$theta[2,1:1000],type='l',ylab=expression(phi^t),xlab='t',mgp=c(2,1,0))
par(old.par)
aRate(mcmcMod$theta[2,350:10000])
mcmcMod<-mcmc.gpd(as.vector(data),u=1.5,n=10000)
mcmcMod$theta=mcmcMod$theta[1:3,500:10000]
effsampSize(mcmcMod$theta[1,])
effsampSize(mcmcMod$theta[2,])
# plot est Pr(X>z)
acerMcmcPlot(acerMod=acerMod,mcmcMod=mcmcMod,acerCol='blue',mcmcCol='red',ylim=c(1e-8,1e-3),xlim=c(10,150),n=200,ylab='Pr(Z>z)', xlab='z',alpha=0.95)
lines(seq(10,150,length.out=200),1/(seq(10,150,length.out=200))^(3))
legend("topright", inset=0,c('Pareto Dist','ACER','POT MCMC'), fill=c('black','blue','red'), horiz=FALSE)
old.par <- par(mfrow=c(1, 3))
plot(density(mcmcMod$theta[1,],bw=0.005),xlab=expression(xi),main='')
plot(density(mcmcMod$theta[2,],bw=0.005),xlab=expression(sigma),main='')
plot(density(mcmcMod$theta[3,],bw=0.005),xlab=expression(beta),main='')
par(old.par)
# plot year est Pr(M_n=z) # '#FF003322' first 6 is color, last two is transparancy.
m <- matrix(c(1,2,3,4,5,5),nrow = 3,ncol = 2,byrow = TRUE)
layout(mat = m,heights = c(0.4,0.4,0.2))
#old.par<-par(mar = c(2,2,1,1))
xlim=c(1,30)
ylim=NULL
z=seq(xlim[1],xlim[2],length.out=200)
plot(z,paretoProb(z=z,prob=pYearly,alpha=a),type='l',col=1,ylim=ylim,ylab=bquote(paste('Pr(',M[year],' = z)')),xlab='z',mgp=c(2,1,0),cex.lab=1.8)
lines(density(predDist(acerMod,prob=pYearly,nACER=10000),bw=0.25,from=xlim[1],to=xlim[2]),col='blue')
abline(v=predCI(acerMod,prob=pYearly,alpha=0.90,nACER=100),lty=2,col='blue')
lines(density(predDist(mcmcMod,prob=pYearly,nmcmc=99501),bw=0.25,from=xlim[1],to=xlim[2]),col='red')
abline(v=predCI(mcmcMod,prob=pYearly,alpha=0.90,nmcmc=99501),lty=2,col='red')
#legend("topright", inset=0,c('Pareto Dist','ACER','POT MCMC'), fill=c('black','blue','red'), horiz=FALSE,cex = 0.75)
xlim=c(1,60)
ylim=NULL
z=seq(xlim[1],xlim[2],length.out=200)
plot(z,paretoProb(z=z,prob=pDecade,alpha=a),type='l',col=1,ylim=ylim,ylab=bquote(paste('Pr(',M[decade],' = z)')),xlab='z',mgp=c(2,1,0),cex.lab=1.8)
lines(density(predDist(acerMod,prob=pDecade,nACER=10000),bw=0.25,from=xlim[1],to=xlim[2]),col='blue')
abline(v=predCI(acerMod,prob=pDecade,alpha=0.90,nACER=100),lty=2,col='blue')
lines(density(predDist(mcmcMod,prob=pDecade,nmcmc=99501),bw=0.25,from=xlim[1],to=xlim[2]),col='red')
abline(v=predCI(mcmcMod,prob=pDecade,alpha=0.90,nmcmc=99501),lty=2,col='red')
#legend("topright", inset=0,c('Pareto Dist','ACER','POT MCMC'), fill=c('black','blue','red'), horiz=FALSE,cex = 0.75)
#par(old.par)
#old.par <- par(mfrow=c(2, 1))
xlim=c(1,150)
ylim=NULL
z=seq(xlim[1],xlim[2],length.out=200)
plot(z,paretoProb(z=z,prob=pCentury,alpha=a),type='l',col=1,ylim=ylim,ylab=bquote(paste('Pr(',M[century],' = z)')),xlab='z',mgp=c(2,1,0),cex.lab=1.8)
lines(density(predDist(acerMod,prob=pCentury,nACER=10000),bw=0.25,from=xlim[1],to=xlim[2]),col='blue')
abline(v=predCI(acerMod,prob=pCentury,alpha=0.90,nACER=100),lty=2,col='blue')
lines(density(predDist(mcmcMod,prob=pCentury,nmcmc=99501),bw=0.25,from=xlim[1],to=xlim[2]),col='red')
abline(v=predCI(mcmcMod,prob=pCentury,alpha=0.90,nmcmc=99501),lty=2,col='red')
#legend("topright", inset=0,c('Pareto Dist','ACER','POT MCMC'), fill=c('black','blue','red'), horiz=FALSE)
xlim=c(1,300)
ylim=NULL
z=seq(xlim[1],xlim[2],length.out=200)
plot(z,paretoProb(z=z,prob=pMillennium,alpha=a),type='l',col=1,ylim=ylim,ylab=bquote(paste('Pr(',M[millennium],' = z)')),xlab='z',mgp=c(2,1,0),cex.lab=1.8)
lines(density(predDist(acerMod,prob=pMillennium,nACER=10000),bw=0.25,from=xlim[1],to=xlim[2]),col='blue')
abline(v=predCI(acerMod,prob=pMillennium,alpha=0.90,nACER=100),lty=2,col='blue')
lines(density(predDist(mcmcMod,prob=pMillennium,nmcmc=99501),bw=0.25,from=xlim[1],to=xlim[2]),col='red')
abline(v=predCI(mcmcMod,prob=pMillennium,alpha=0.90,nmcmc=99501),lty=2,col='red')
#legend("topright", inset=0,c('Pareto Dist','ACER','POT MCMC'), fill=c('black','blue','red'), horiz=FALSE)
#par(old.par)
par(mar = c(2,2,1,1))
plot(1, type = "n", axes=FALSE, xlab="", ylab="")
legend(x="top", inset=0,legend=c('Pareto Dist ','ACER ','POT MCMC '), fill=c('black','blue','red'),lwd=1,horiz = TRUE)
par(mar=c(5.1,4.1,4.1,2.1))
futureCIcontain(prob=pYearly,alpha=3,PI=predCI(mcmcMod,prob=pYearly,alpha=0.90,nmcmc=99501))
futureCIcontain(prob=pDecade,alpha=3,PI=predCI(mcmcMod,prob=pDecade,alpha=0.90,nmcmc=99501))
futureCIcontain(prob=pCentury,alpha=3,PI=predCI(mcmcMod,prob=pCentury,alpha=0.90,nmcmc=99501))
futureCIcontain(prob=pMillennium,alpha=3,PI=predCI(mcmcMod,prob=pMillennium,alpha=0.90,nmcmc=99501))
futureCIcontain(prob=pYearly,alpha=3,PI=predCI(acerMod,prob=pYearly,alpha=0.90,nACER=100))
futureCIcontain(prob=pDecade,alpha=3,PI=predCI(acerMod,prob=pDecade,alpha=0.90,nACER=100))
futureCIcontain(prob=pCentury,alpha=3,PI=predCI(acerMod,prob=pCentury,alpha=0.90,nACER=100))
futureCIcontain(prob=pMillennium,alpha=3,PI=predCI(acerMod,prob=pMillennium,alpha=0.90,nACER=100))
diff(predCI(mcmcMod,prob=pYearly,alpha=0.90,nmcmc=99501))
diff(predCI(mcmcMod,prob=pDecade,alpha=0.90,nmcmc=99501))
diff(predCI(mcmcMod,prob=pCentury,alpha=0.90,nmcmc=99501))
diff(predCI(mcmcMod,prob=pMillennium,alpha=0.90,nmcmc=99501))
print('ACER START')
diff(predCI(acerMod,prob=pYearly,alpha=0.90,nACER=100))
diff(predCI(acerMod,prob=pDecade,alpha=0.90,nACER=100))
diff(predCI(acerMod,prob=pCentury,alpha=0.90,nACER=100))
diff(predCI(acerMod,prob=pMillennium,alpha=0.90,nACER=100))
###################################################
############### SIM ANALYSE #######################
###################################################
coil<-read.table("plotcode/coil.txt",header=TRUE)
plot(coil[,2],type='l')
n<-length(coil[,2])
noMeanCoil<-coil[,2]-mean(coil[,2])
#gamma<-(diff(coil[,2])>0)*1
library(fGarch)
testA0<-garchFit(formula=~aparch(1,1),cond.dist="sstd",data=coil[,2],leverage=TRUE)
testA1<-garchFit(formula=~arma(1,0)+aparch(1,1),cond.dist="sstd",data=coil[,2],leverage=TRUE)#,include.mean=FALSE)
test0@fit$ics
test0@fit$llh
testA2<-garchFit(formula=~arma(2,0)+aparch(1,1),cond.dist="sstd",data=coil[,2],leverage=TRUE)#,include.mean=FALSE)
testA2@fit$ics
testA2@fit$llh
testA3<-garchFit(formula=~arma(3,0)+aparch(1,1),cond.dist="sstd",data=coil[,2],leverage=TRUE)#,include.mean=FALSE)
testA3@fit$ics
testA3@fit$llh
testA4<-garchFit(formula=~arma(4,0)+aparch(1,1),cond.dist="sstd",data=coil[,2],leverage=TRUE)#,include.mean=FALSE)
testA4@fit$ics
testA4@fit$llh
testA5<-garchFit(formula=~arma(5,0)+aparch(1,1),cond.dist="sstd",data=coil[,2],leverage=TRUE)#,include.mean=FALSE)
testA6<-garchFit(formula=~arma(6,0)+aparch(1,1),cond.dist="sstd",data=coil[,2],leverage=TRUE)#,include.mean=FALSE)
testA7<-garchFit(formula=~arma(7,0)+aparch(1,1),cond.dist="sstd",data=coil[,2],leverage=TRUE)#,include.mean=FALSE)
testA15<-garchFit(formula=~arma(15,0)+aparch(1,1),cond.dist="sstd",data=coil[,2],leverage=TRUE)#,include.mean=FALSE)
2*(-testA4@fit$llh+testA3@fit$llh)
2*(-testA5@fit$llh+testA4@fit$llh)
2*(-testA6@fit$llh+testA5@fit$llh)
2*(-testA7@fit$llh+testA6@fit$llh)
# AIC=>A3
# BIC=>A2
# SIC=>A4
# HQIC=>A$
testA3<-garchFit(formula=~arma(3,0)+aparch(1,1),cond.dist="sstd",data=coil[,2],leverage=TRUE)#,include.mean=FALSE)
model3=list(ar=c(-2.546e-02,-4.335e-02,-1.723e-02),beta=0.95,mu=4.126e-04,omega=9.769e-05,alpha=5.545e-02,gamma=2.333e-01,delta=1.127e+00,skew=9.617e-01,shape=7.342e+00)
garchSpecModel3<-garchSpec(model=model3,cond.dist = "sstd",presample = cbind(z=testA3@fit$series$z[1:10],h=testA3@fit$series$h[1:10],y=testA3@fit$series$x[1:10]))
plot(garchSim(garchSpecModel3,n=length(coil[,2]))$garch,type='l',xlab='t',ylab=expression(z[t]),mgp=c(2,1,0),cex.lab=1.4)
garchSim(garchSpec(model=test@fit$params$params,cond.dist="sstd"),n=100)
plot(garchSim(garchSpec(model=testA3@fit$params$params,cond.dist="sstd"),n=5000),type='l',ylab=)
# residuals:
# (standardize=FALSE) difference between Yt and its conditional expectation (est \hat{a}_t)
# (standardize=TRUE) \hat{eps}_t=\hat{a}_t / \hat{\sigma}_t
plot(residuals(test0, standardize = TRUE),type='l')
#EKSTRA??
acermod<-ACERm(data,k=1,stationary=TRUE)
acermod<-CI(acermod,level=alpha[i])
acermod<-updatecond(acermod,eta1=2)
acermod<-MSEoptimization(acermod,metgid='general')
# effective sample sice vs accaptance rate a
eff=rep(NA,31)
for(i in 1:31){
print(i)
u<-runif(day*year)
data<-(1/(u))^(1/3)
temp1=effsampSize(mcmc.gpd(as.vector(data),u=2,n=10000,start=c(1,1),a=0.19+i/100,cpp=TRUE)$theta[1,500:10000])
u<-runif(day*year)
data<-(1/(u))^(1/3)
temp2=effsampSize(mcmc.gpd(as.vector(data),u=2,n=10000,start=c(1,1),a=0.19+i/100,cpp=TRUE)$theta[1,500:10000])
eff[i]=(temp1+temp1)/2
}
plot(20:50,eff,type='p')
|
/apgarchtest.R
|
no_license
|
rodvei/ACER-vs-MCMC-GEV.R
|
R
| false
| false
| 12,702
|
r
|
# pareto density plot
reversePareto<-function(u,alpha=3){
return(1/u^(1/alpha))
}
paretoProb<-function(z,prob=1e-6,alpha=3){
return(1/prob*alpha*z^(-alpha-1)*(1-z^(-alpha))^(1/prob-1))
}
# how much of the furute prediction interval contains the real
futureCIcontain<-function(prob=1e-6,alpha=3,PI){
return((1-PI[2]^(-alpha))^(1/prob)-(1-PI[1]^(-alpha))^(1/prob))
}
# From ACER package
reverseGeneral<-function(eps,est){
return(est[2]+(1/(est[1]*est[5])*((eps/est[4])^(-est[5])-1))^(1/est[3]))
}
General<-function(eta,est){
if(est[5]<0){eta[eta>(est[2]-1/(est[1]*est[5]))]=NA}
return(est[4]*(1+est[1]*est[5]*(eta-est[2])^est[3])^(-1/est[5]))
}
# z_p pot MCMC
reversePOTmcmc<-function(prob,MCMC){
return(MCMC$theta[2,]/MCMC$theta[1,]*((prob/MCMC$theta[3,])^(-MCMC$theta[1,])-1)+MCMC$u)
}
POTmcmc<-function(z,MCMC){
return(MCMC$theta[3,]*(1+MCMC$theta[1,]*(z-MCMC$u)/MCMC$theta[2,])^(-1/MCMC$theta[1,]))
}
# narrowest CI for future predicted value
predCI<-function(mod,prob=1e-6,alpha=0.9,nACER=100,nmcmc=10000){
minCI<-c(0,.Machine$integer.max)
tempCI<-c(NA,NA)
if(class(mod)=='ACER'){
low<-(1-alpha)/2
up<-(1+alpha)/2
rseq<-seq(0,1-alpha,length.out=nACER+2)[2:(nACER+1)]
for(i in 1:nACER){
tempCI[1]<-reverseGeneral(eps=1-rseq[i]^prob,est=mod$coef[1:5])
tempCI[2]<-reverseGeneral(eps=1-(rseq[i]+alpha)^prob,est=mod$coef[1:5])
if(diff(minCI)>diff(tempCI)){
minCI<-tempCI
}
}
}else if(class(mod)=='mcmc'){
dist<-predDist(mod=mod,prob=prob,nmcmc=nmcmc)
minCI<-distCI(dist=dist,alpha=alpha)
}
return(minCI)
}
# Narrowest CI from a numerical distribution
distCI<-function(dist,alpha=0.9){
minCI<-c(0,.Machine$integer.max)
tempCI<-c(NA,NA)
dist<-sort(dist)
ndist<-length(dist)
interval<-round(ndist*alpha)
for(i in 1:(ndist-interval)){
tempCI<-c(dist[i],dist[i+interval])
if(diff(minCI)>diff(tempCI)){
minCI<-tempCI
}
}
return(minCI)
}
# Distribution of future predicted value
predDist<-function(mod,prob=1e-6,nACER=10000,nmcmc=10000){
if(class(mod)=='ACER'){
u<-seq(0,1,length.out=nACER+2)[2:(nACER+1)]#runif(nACER)
return(reverseGeneral(eps=1-u^prob,est=mod$coef[1:5]))
}else if(class(mod)=='mcmc'){
n<-length(mod$theta[1,])
nloop<-ceiling(nmcmc/n)
dist<-rep(NA,nloop*n)
for(i in 1:nloop){
dist[(1+(i-1)*n):(i*n)]<-reversePOTmcmc(prob=1-runif(n)^prob,MCMC=mod)
}
return(dist)
}
}
acerMcmcPlot<-function(acerMod,mcmcMod,acerCol,mcmcCol,ylim,xlim,n=200,xlab,ylab,alpha=0.95){
eta<-seq(xlim[1],xlim[2],length.out=n)
plot(eta,General(eta,acerMod$coef),type='l',col=acerCol,ylim=ylim,xlim=xlim,xlab=xlab,ylab=ylab,log='y')
lines(eta,General(eta,acerMod$upperCIcoef),lty=2,col=acerCol)
lines(eta,General(eta,acerMod$lowerCIcoef),lty=2,col=acerCol)
meanCI<-matrix(NA,3,n)
for(i in 1:n){
dist<-POTmcmc(eta[i],mcmcMod)
meanCI[2,i]<-mean(dist)
meanCI[c(1,3),i]<-distCI(dist,alpha=alpha)
}
lines(eta,meanCI[2,],col=mcmcCol)
lines(eta,meanCI[1,],lty=2,col=mcmcCol)
lines(eta,meanCI[3,],lty=2,col=mcmcCol)
}
###################################################
############## Pareto analysis ####################
###################################################
day<-365
year<-25
set.seed(5000)
u<-runif(day*year)
a<-3
data<-matrix((1/(u))^(1/a),year,day,byrow=TRUE) #10 realisations (one for each day over 10 years)
plot(as.vector(data),type='h',ylab=bquote(paste(x[i])),xlab='i')
#points((1:(365*25))[as.vector(data)>4],as.vector(data)[as.vector(data)>4],pch=16,cex=0.5)
#points(as.vector(data),pch=16,cex=0.5)
plot(1:200,1/(1:200)^(a),log='y',type='l',ylim=c(1e-7,1),ylab='P(X>z)', xlab='z')
pYearly<-1/365
pDecade<-1/(10*365)
pCentury<-1/(100*365)
pMillennium<-1/(1000*365)
library(ACER)
acerMod<-ACER(data,k=1,eta1=1.5,CI=0.95,stationary=TRUE,method='general', check.weibull = FALSE)
mcmcMod<-mcmc.gpd(as.vector(data),u=1.5,n=10000,start=c(1,1),a=0.35)
old.par <- par(mfrow=c(2, 1))
plot(mcmcMod$theta[1,1:1000],type='l',ylab=expression(xi^t),xlab='t',mgp=c(2,1,0))
plot(mcmcMod$theta[2,1:1000],type='l',ylab=expression(phi^t),xlab='t',mgp=c(2,1,0))
par(old.par)
aRate(mcmcMod$theta[2,350:10000])
mcmcMod<-mcmc.gpd(as.vector(data),u=1.5,n=10000)
mcmcMod$theta=mcmcMod$theta[1:3,500:10000]
effsampSize(mcmcMod$theta[1,])
effsampSize(mcmcMod$theta[2,])
# plot est Pr(X>z)
acerMcmcPlot(acerMod=acerMod,mcmcMod=mcmcMod,acerCol='blue',mcmcCol='red',ylim=c(1e-8,1e-3),xlim=c(10,150),n=200,ylab='Pr(Z>z)', xlab='z',alpha=0.95)
lines(seq(10,150,length.out=200),1/(seq(10,150,length.out=200))^(3))
legend("topright", inset=0,c('Pareto Dist','ACER','POT MCMC'), fill=c('black','blue','red'), horiz=FALSE)
old.par <- par(mfrow=c(1, 3))
plot(density(mcmcMod$theta[1,],bw=0.005),xlab=expression(xi),main='')
plot(density(mcmcMod$theta[2,],bw=0.005),xlab=expression(sigma),main='')
plot(density(mcmcMod$theta[3,],bw=0.005),xlab=expression(beta),main='')
par(old.par)
# plot year est Pr(M_n=z) # '#FF003322' first 6 is color, last two is transparancy.
m <- matrix(c(1,2,3,4,5,5),nrow = 3,ncol = 2,byrow = TRUE)
layout(mat = m,heights = c(0.4,0.4,0.2))
#old.par<-par(mar = c(2,2,1,1))
xlim=c(1,30)
ylim=NULL
z=seq(xlim[1],xlim[2],length.out=200)
plot(z,paretoProb(z=z,prob=pYearly,alpha=a),type='l',col=1,ylim=ylim,ylab=bquote(paste('Pr(',M[year],' = z)')),xlab='z',mgp=c(2,1,0),cex.lab=1.8)
lines(density(predDist(acerMod,prob=pYearly,nACER=10000),bw=0.25,from=xlim[1],to=xlim[2]),col='blue')
abline(v=predCI(acerMod,prob=pYearly,alpha=0.90,nACER=100),lty=2,col='blue')
lines(density(predDist(mcmcMod,prob=pYearly,nmcmc=99501),bw=0.25,from=xlim[1],to=xlim[2]),col='red')
abline(v=predCI(mcmcMod,prob=pYearly,alpha=0.90,nmcmc=99501),lty=2,col='red')
#legend("topright", inset=0,c('Pareto Dist','ACER','POT MCMC'), fill=c('black','blue','red'), horiz=FALSE,cex = 0.75)
xlim=c(1,60)
ylim=NULL
z=seq(xlim[1],xlim[2],length.out=200)
plot(z,paretoProb(z=z,prob=pDecade,alpha=a),type='l',col=1,ylim=ylim,ylab=bquote(paste('Pr(',M[decade],' = z)')),xlab='z',mgp=c(2,1,0),cex.lab=1.8)
lines(density(predDist(acerMod,prob=pDecade,nACER=10000),bw=0.25,from=xlim[1],to=xlim[2]),col='blue')
abline(v=predCI(acerMod,prob=pDecade,alpha=0.90,nACER=100),lty=2,col='blue')
lines(density(predDist(mcmcMod,prob=pDecade,nmcmc=99501),bw=0.25,from=xlim[1],to=xlim[2]),col='red')
abline(v=predCI(mcmcMod,prob=pDecade,alpha=0.90,nmcmc=99501),lty=2,col='red')
#legend("topright", inset=0,c('Pareto Dist','ACER','POT MCMC'), fill=c('black','blue','red'), horiz=FALSE,cex = 0.75)
#par(old.par)
#old.par <- par(mfrow=c(2, 1))
xlim=c(1,150)
ylim=NULL
z=seq(xlim[1],xlim[2],length.out=200)
plot(z,paretoProb(z=z,prob=pCentury,alpha=a),type='l',col=1,ylim=ylim,ylab=bquote(paste('Pr(',M[century],' = z)')),xlab='z',mgp=c(2,1,0),cex.lab=1.8)
lines(density(predDist(acerMod,prob=pCentury,nACER=10000),bw=0.25,from=xlim[1],to=xlim[2]),col='blue')
abline(v=predCI(acerMod,prob=pCentury,alpha=0.90,nACER=100),lty=2,col='blue')
lines(density(predDist(mcmcMod,prob=pCentury,nmcmc=99501),bw=0.25,from=xlim[1],to=xlim[2]),col='red')
abline(v=predCI(mcmcMod,prob=pCentury,alpha=0.90,nmcmc=99501),lty=2,col='red')
#legend("topright", inset=0,c('Pareto Dist','ACER','POT MCMC'), fill=c('black','blue','red'), horiz=FALSE)
xlim=c(1,300)
ylim=NULL
z=seq(xlim[1],xlim[2],length.out=200)
plot(z,paretoProb(z=z,prob=pMillennium,alpha=a),type='l',col=1,ylim=ylim,ylab=bquote(paste('Pr(',M[millennium],' = z)')),xlab='z',mgp=c(2,1,0),cex.lab=1.8)
lines(density(predDist(acerMod,prob=pMillennium,nACER=10000),bw=0.25,from=xlim[1],to=xlim[2]),col='blue')
abline(v=predCI(acerMod,prob=pMillennium,alpha=0.90,nACER=100),lty=2,col='blue')
lines(density(predDist(mcmcMod,prob=pMillennium,nmcmc=99501),bw=0.25,from=xlim[1],to=xlim[2]),col='red')
abline(v=predCI(mcmcMod,prob=pMillennium,alpha=0.90,nmcmc=99501),lty=2,col='red')
#legend("topright", inset=0,c('Pareto Dist','ACER','POT MCMC'), fill=c('black','blue','red'), horiz=FALSE)
#par(old.par)
par(mar = c(2,2,1,1))
plot(1, type = "n", axes=FALSE, xlab="", ylab="")
legend(x="top", inset=0,legend=c('Pareto Dist ','ACER ','POT MCMC '), fill=c('black','blue','red'),lwd=1,horiz = TRUE)
par(mar=c(5.1,4.1,4.1,2.1))
futureCIcontain(prob=pYearly,alpha=3,PI=predCI(mcmcMod,prob=pYearly,alpha=0.90,nmcmc=99501))
futureCIcontain(prob=pDecade,alpha=3,PI=predCI(mcmcMod,prob=pDecade,alpha=0.90,nmcmc=99501))
futureCIcontain(prob=pCentury,alpha=3,PI=predCI(mcmcMod,prob=pCentury,alpha=0.90,nmcmc=99501))
futureCIcontain(prob=pMillennium,alpha=3,PI=predCI(mcmcMod,prob=pMillennium,alpha=0.90,nmcmc=99501))
futureCIcontain(prob=pYearly,alpha=3,PI=predCI(acerMod,prob=pYearly,alpha=0.90,nACER=100))
futureCIcontain(prob=pDecade,alpha=3,PI=predCI(acerMod,prob=pDecade,alpha=0.90,nACER=100))
futureCIcontain(prob=pCentury,alpha=3,PI=predCI(acerMod,prob=pCentury,alpha=0.90,nACER=100))
futureCIcontain(prob=pMillennium,alpha=3,PI=predCI(acerMod,prob=pMillennium,alpha=0.90,nACER=100))
diff(predCI(mcmcMod,prob=pYearly,alpha=0.90,nmcmc=99501))
diff(predCI(mcmcMod,prob=pDecade,alpha=0.90,nmcmc=99501))
diff(predCI(mcmcMod,prob=pCentury,alpha=0.90,nmcmc=99501))
diff(predCI(mcmcMod,prob=pMillennium,alpha=0.90,nmcmc=99501))
print('ACER START')
diff(predCI(acerMod,prob=pYearly,alpha=0.90,nACER=100))
diff(predCI(acerMod,prob=pDecade,alpha=0.90,nACER=100))
diff(predCI(acerMod,prob=pCentury,alpha=0.90,nACER=100))
diff(predCI(acerMod,prob=pMillennium,alpha=0.90,nACER=100))
###################################################
############### SIM ANALYSE #######################
###################################################
coil<-read.table("plotcode/coil.txt",header=TRUE)
plot(coil[,2],type='l')
n<-length(coil[,2])
noMeanCoil<-coil[,2]-mean(coil[,2])
#gamma<-(diff(coil[,2])>0)*1
library(fGarch)
testA0<-garchFit(formula=~aparch(1,1),cond.dist="sstd",data=coil[,2],leverage=TRUE)
testA1<-garchFit(formula=~arma(1,0)+aparch(1,1),cond.dist="sstd",data=coil[,2],leverage=TRUE)#,include.mean=FALSE)
test0@fit$ics
test0@fit$llh
testA2<-garchFit(formula=~arma(2,0)+aparch(1,1),cond.dist="sstd",data=coil[,2],leverage=TRUE)#,include.mean=FALSE)
testA2@fit$ics
testA2@fit$llh
testA3<-garchFit(formula=~arma(3,0)+aparch(1,1),cond.dist="sstd",data=coil[,2],leverage=TRUE)#,include.mean=FALSE)
testA3@fit$ics
testA3@fit$llh
testA4<-garchFit(formula=~arma(4,0)+aparch(1,1),cond.dist="sstd",data=coil[,2],leverage=TRUE)#,include.mean=FALSE)
testA4@fit$ics
testA4@fit$llh
testA5<-garchFit(formula=~arma(5,0)+aparch(1,1),cond.dist="sstd",data=coil[,2],leverage=TRUE)#,include.mean=FALSE)
testA6<-garchFit(formula=~arma(6,0)+aparch(1,1),cond.dist="sstd",data=coil[,2],leverage=TRUE)#,include.mean=FALSE)
testA7<-garchFit(formula=~arma(7,0)+aparch(1,1),cond.dist="sstd",data=coil[,2],leverage=TRUE)#,include.mean=FALSE)
testA15<-garchFit(formula=~arma(15,0)+aparch(1,1),cond.dist="sstd",data=coil[,2],leverage=TRUE)#,include.mean=FALSE)
2*(-testA4@fit$llh+testA3@fit$llh)
2*(-testA5@fit$llh+testA4@fit$llh)
2*(-testA6@fit$llh+testA5@fit$llh)
2*(-testA7@fit$llh+testA6@fit$llh)
# AIC=>A3
# BIC=>A2
# SIC=>A4
# HQIC=>A$
testA3<-garchFit(formula=~arma(3,0)+aparch(1,1),cond.dist="sstd",data=coil[,2],leverage=TRUE)#,include.mean=FALSE)
model3=list(ar=c(-2.546e-02,-4.335e-02,-1.723e-02),beta=0.95,mu=4.126e-04,omega=9.769e-05,alpha=5.545e-02,gamma=2.333e-01,delta=1.127e+00,skew=9.617e-01,shape=7.342e+00)
garchSpecModel3<-garchSpec(model=model3,cond.dist = "sstd",presample = cbind(z=testA3@fit$series$z[1:10],h=testA3@fit$series$h[1:10],y=testA3@fit$series$x[1:10]))
plot(garchSim(garchSpecModel3,n=length(coil[,2]))$garch,type='l',xlab='t',ylab=expression(z[t]),mgp=c(2,1,0),cex.lab=1.4)
garchSim(garchSpec(model=test@fit$params$params,cond.dist="sstd"),n=100)
plot(garchSim(garchSpec(model=testA3@fit$params$params,cond.dist="sstd"),n=5000),type='l',ylab=)
# residuals:
# (standardize=FALSE) difference between Yt and its conditional expectation (est \hat{a}_t)
# (standardize=TRUE) \hat{eps}_t=\hat{a}_t / \hat{\sigma}_t
plot(residuals(test0, standardize = TRUE),type='l')
#EKSTRA??
acermod<-ACERm(data,k=1,stationary=TRUE)
acermod<-CI(acermod,level=alpha[i])
acermod<-updatecond(acermod,eta1=2)
acermod<-MSEoptimization(acermod,metgid='general')
# effective sample sice vs accaptance rate a
eff=rep(NA,31)
for(i in 1:31){
print(i)
u<-runif(day*year)
data<-(1/(u))^(1/3)
temp1=effsampSize(mcmc.gpd(as.vector(data),u=2,n=10000,start=c(1,1),a=0.19+i/100,cpp=TRUE)$theta[1,500:10000])
u<-runif(day*year)
data<-(1/(u))^(1/3)
temp2=effsampSize(mcmc.gpd(as.vector(data),u=2,n=10000,start=c(1,1),a=0.19+i/100,cpp=TRUE)$theta[1,500:10000])
eff[i]=(temp1+temp1)/2
}
plot(20:50,eff,type='p')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print_sg.R
\name{summary.sg}
\alias{summary.sg}
\title{sg summary}
\usage{
\method{summary}{sg}(object, ...)
}
\arguments{
\item{object}{sg object}
\item{...}{ignored}
}
\description{
sg summary
}
|
/man/summary.sg.Rd
|
no_license
|
antiphon/spatgraphs
|
R
| false
| true
| 276
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print_sg.R
\name{summary.sg}
\alias{summary.sg}
\title{sg summary}
\usage{
\method{summary}{sg}(object, ...)
}
\arguments{
\item{object}{sg object}
\item{...}{ignored}
}
\description{
sg summary
}
|
context("Rle")
test_that("Rle construction works", {
x1 <- c(1, 1, 1, 1, 2, 2, 3, 4, 4, 4, 5, 6, 8, 8, 8.01)
expect_true(all(biosignals:::asRle(x1) == Rle(x1)))
})
test_that("Rle constructor handles weird input", {
## An Rle with uniform input killed my entire sunday
r1 <- biosignals:::asRle(rep(10, 5))
ir1 <- Rle(rep(10, 5))
expect_true(all(r1 == ir1))
r2 <- biosignals:::asRle(numeric())
ir2 <- Rle(numeric())
expect_true(all(r2 == ir2))
})
test_that("Rle expansion works", {
x <- c(0, 0, 1, 1, 1, 0.1, 0.1, 0.2, 3, 3, 4)
r <- Rle(x)
expect_equal(biosignals:::expandRle(r), x)
expect_equal(biosignals:::expandRleS4(r), x)
})
test_that("Convolution over sparse Rle is bueno", {
cvr <- readRDS(system.file('extdata', 'coverage.rds', package="biosignals"))
all.islands <- slice(cvr, lower=0, includeLower=FALSE, rangesOnly=TRUE)
## these look like "normal" signals
smooth.idx <- c(801, 9021, 9022)
normal.islands <- all.islands[smooth.idx]
normal.smooth <- convolve1d(Views(cvr, normal.islands))
cvrs <- subject(normal.smooth)
expect_equal(length(cvrs), length(cvr))
expect_equal(length(normal.smooth), length(normal.islands))
## expect that a range that wasn't convolved isn't different,
not.smooth <- setdiff(1:length(all.islands), smooth.idx)
ns.idx <- sample(not.smooth)[1]
expect_equal(cvrs[all.islands[100]], cvr[all.islands[100]])
## data in range that was convolved should be different
s.idx <- sample(smooth.idx)[1]
expect_false(all(cvrs[all.islands[801]] == cvr[all.islands[801]]))
})
|
/inst/tests/test-Rle.R
|
no_license
|
lianos/biosignals
|
R
| false
| false
| 1,572
|
r
|
context("Rle")
test_that("Rle construction works", {
x1 <- c(1, 1, 1, 1, 2, 2, 3, 4, 4, 4, 5, 6, 8, 8, 8.01)
expect_true(all(biosignals:::asRle(x1) == Rle(x1)))
})
test_that("Rle constructor handles weird input", {
## An Rle with uniform input killed my entire sunday
r1 <- biosignals:::asRle(rep(10, 5))
ir1 <- Rle(rep(10, 5))
expect_true(all(r1 == ir1))
r2 <- biosignals:::asRle(numeric())
ir2 <- Rle(numeric())
expect_true(all(r2 == ir2))
})
test_that("Rle expansion works", {
x <- c(0, 0, 1, 1, 1, 0.1, 0.1, 0.2, 3, 3, 4)
r <- Rle(x)
expect_equal(biosignals:::expandRle(r), x)
expect_equal(biosignals:::expandRleS4(r), x)
})
test_that("Convolution over sparse Rle is bueno", {
cvr <- readRDS(system.file('extdata', 'coverage.rds', package="biosignals"))
all.islands <- slice(cvr, lower=0, includeLower=FALSE, rangesOnly=TRUE)
## these look like "normal" signals
smooth.idx <- c(801, 9021, 9022)
normal.islands <- all.islands[smooth.idx]
normal.smooth <- convolve1d(Views(cvr, normal.islands))
cvrs <- subject(normal.smooth)
expect_equal(length(cvrs), length(cvr))
expect_equal(length(normal.smooth), length(normal.islands))
## expect that a range that wasn't convolved isn't different,
not.smooth <- setdiff(1:length(all.islands), smooth.idx)
ns.idx <- sample(not.smooth)[1]
expect_equal(cvrs[all.islands[100]], cvr[all.islands[100]])
## data in range that was convolved should be different
s.idx <- sample(smooth.idx)[1]
expect_false(all(cvrs[all.islands[801]] == cvr[all.islands[801]]))
})
|
\name{regcor}
\alias{regcor}
\title{
Regularized correlation matrix estimation
}
\description{
\code{regcor} is a function that determines the optimal penalty value and, subsequently, the optimal Ledoit-Wolf type regularized correlation matrix using K-fold cross validation of the negative log-likelihood.
}
\usage{
regcor(X, fold = 5, verbose = TRUE)
}
\arguments{
\item{X}{
A (possibly centered and scaled and possibly subsetted) data \code{matrix}.
}
\item{fold}{
A \code{numeric} integer or \code{integer} indicating the number of folds to use in cross-validation.
}
\item{verbose}{
A \code{logical} indicating if function should run silently.\cr
Runs silently when \code{verbose = FALSE}.
}
}
\details{
This function estimates a Ledoit-Wolf-type (Ledoit & Wolf, 2004) regularized correlation matrix.
The optimal penalty-value is determined internally by \emph{K}-fold cross-validation of the of the negative log-likelihood function.
The procedure is efficient as it makes use of the Brent root-finding procedure (Brent, 1971).
The value at which the \emph{K}-fold cross-validated negative log-likelihood score is minimized is deemed optimal.
The function employs the Brent algorithm as implemented in the \href{https://stat.ethz.ch/R-manual/R-devel/library/stats/html/optim.html}{optim} function.
It outputs the optimal value for the penalty parameter and the regularized correlation matrix under this optimal penalty value.
See Peeters \emph{et al.} (2019) for further details.
The optimal penalty-value can be used to assess the conditioning of the estimated regularized correlation matrix using, for example, a condition number plot (Peeters, van de Wiel, van Wieringen, 2016).
The regularized correlation matrix under the optimal penalty can serve as the input to functions that assess factorability (\code{\link{SA}}), evaluate optimal choices of the latent common factor dimensionality (e.g., \code{\link{dimGB}}), and perform maximum likelihood factor analysis (\code{\link{mlFA}}).
}
\value{
The function returns an object of class \code{list}:
\item{$optPen}{A \code{numeric} scalar representing the optimal value for the penalty parameter.}
\item{$optCor}{A \code{matrix} representing the regularized correlation matrix under the optimal penalty-value.}
}
\references{
Brent, R.P. (1971). An Algorithm with Guaranteed Convergence for Finding a Zero of a Function. Computer Journal 14: 422--425.
Ledoit, O, & Wolf, M. (2004). A well-conditioned estimator for large-dimensional covariance matrices. Journal of Multivariate Analysis, 88:365--411.
Peeters, C.F.W. \emph{et al.} (2019). Stable prediction with radiomics data.
\href{https://arxiv.org/abs/1903.11696}{arXiv:1903.11696 [stat.ML]}.
Peeters, C.F.W., van de Wiel, M.A., & van Wieringen, W.N. (2016). The spectral condition number plot for regularization parameter determination, arXiv:1608.04123v1 [stat.CO].
}
\author{
Carel F.W. Peeters <cf.peeters@vumc.nl>
}
\note{
Note that, for argument \code{X}, the observations are expected to be in the rows and the features are expected to be in the columns.
}
\seealso{
\code{\link{RF}}, \code{\link{subSet}}, \code{\link{SA}}, \code{\link{dimGB}}, \code{\link{mlFA}}
}
\examples{
## Generate some (high-dimensional) data
## Get correlation matrix
p = 25
n = 10
set.seed(333)
X = matrix(rnorm(n*p), nrow = n, ncol = p)
colnames(X)[1:25] = letters[1:25]
R <- cor(X)
## Redundancy visualization, at threshold value .9
radioHeat(R, diag = FALSE, threshold = TRUE, threshvalue = .9)
## Redundancy-filtering of correlation matrix
Rfilter <- RF(R, t = .9)
dim(Rfilter)
## Subsetting data
DataSubset <- subSet(X, Rfilter)
dim(DataSubset)
## Obtain regularized correlation matrix
RegR <- regcor(DataSubset, fold = 5, verbose = TRUE)
RegR$optPen ## optimal penalty-value
}
\concept{regularized correlation}
|
/man/regcor.Rd
|
no_license
|
CFWP/FMradio
|
R
| false
| false
| 3,845
|
rd
|
\name{regcor}
\alias{regcor}
\title{
Regularized correlation matrix estimation
}
\description{
\code{regcor} is a function that determines the optimal penalty value and, subsequently, the optimal Ledoit-Wolf type regularized correlation matrix using K-fold cross validation of the negative log-likelihood.
}
\usage{
regcor(X, fold = 5, verbose = TRUE)
}
\arguments{
\item{X}{
A (possibly centered and scaled and possibly subsetted) data \code{matrix}.
}
\item{fold}{
A \code{numeric} integer or \code{integer} indicating the number of folds to use in cross-validation.
}
\item{verbose}{
A \code{logical} indicating if function should run silently.\cr
Runs silently when \code{verbose = FALSE}.
}
}
\details{
This function estimates a Ledoit-Wolf-type (Ledoit & Wolf, 2004) regularized correlation matrix.
The optimal penalty-value is determined internally by \emph{K}-fold cross-validation of the of the negative log-likelihood function.
The procedure is efficient as it makes use of the Brent root-finding procedure (Brent, 1971).
The value at which the \emph{K}-fold cross-validated negative log-likelihood score is minimized is deemed optimal.
The function employs the Brent algorithm as implemented in the \href{https://stat.ethz.ch/R-manual/R-devel/library/stats/html/optim.html}{optim} function.
It outputs the optimal value for the penalty parameter and the regularized correlation matrix under this optimal penalty value.
See Peeters \emph{et al.} (2019) for further details.
The optimal penalty-value can be used to assess the conditioning of the estimated regularized correlation matrix using, for example, a condition number plot (Peeters, van de Wiel, van Wieringen, 2016).
The regularized correlation matrix under the optimal penalty can serve as the input to functions that assess factorability (\code{\link{SA}}), evaluate optimal choices of the latent common factor dimensionality (e.g., \code{\link{dimGB}}), and perform maximum likelihood factor analysis (\code{\link{mlFA}}).
}
\value{
The function returns an object of class \code{list}:
\item{$optPen}{A \code{numeric} scalar representing the optimal value for the penalty parameter.}
\item{$optCor}{A \code{matrix} representing the regularized correlation matrix under the optimal penalty-value.}
}
\references{
Brent, R.P. (1971). An Algorithm with Guaranteed Convergence for Finding a Zero of a Function. Computer Journal 14: 422--425.
Ledoit, O, & Wolf, M. (2004). A well-conditioned estimator for large-dimensional covariance matrices. Journal of Multivariate Analysis, 88:365--411.
Peeters, C.F.W. \emph{et al.} (2019). Stable prediction with radiomics data.
\href{https://arxiv.org/abs/1903.11696}{arXiv:1903.11696 [stat.ML]}.
Peeters, C.F.W., van de Wiel, M.A., & van Wieringen, W.N. (2016). The spectral condition number plot for regularization parameter determination, arXiv:1608.04123v1 [stat.CO].
}
\author{
Carel F.W. Peeters <cf.peeters@vumc.nl>
}
\note{
Note that, for argument \code{X}, the observations are expected to be in the rows and the features are expected to be in the columns.
}
\seealso{
\code{\link{RF}}, \code{\link{subSet}}, \code{\link{SA}}, \code{\link{dimGB}}, \code{\link{mlFA}}
}
\examples{
## Generate some (high-dimensional) data
## Get correlation matrix
p = 25
n = 10
set.seed(333)
X = matrix(rnorm(n*p), nrow = n, ncol = p)
colnames(X)[1:25] = letters[1:25]
R <- cor(X)
## Redundancy visualization, at threshold value .9
radioHeat(R, diag = FALSE, threshold = TRUE, threshvalue = .9)
## Redundancy-filtering of correlation matrix
Rfilter <- RF(R, t = .9)
dim(Rfilter)
## Subsetting data
DataSubset <- subSet(X, Rfilter)
dim(DataSubset)
## Obtain regularized correlation matrix
RegR <- regcor(DataSubset, fold = 5, verbose = TRUE)
RegR$optPen ## optimal penalty-value
}
\concept{regularized correlation}
|
# Projeto Regressão Logística
# Reabsorção radicular externa após reimplantes de dentes permanentes
#O objetivo do estudo foi identificar a associação do desfecho, RRE,
#com a idade no momento do trauma e com variáveis clínicas relacionadas ao
#manejo e tratamento emergencial do dente avulsionado.
# Remove todos os objetos definidos
rm(list = ls())
# Pacotes
if (!require(pacman)) install.packages('pacman')
library(pacman)
pacman::p_load(dplyr, psych, car, ggplot2, janitor, mfx, caret, stargazer, faraway, ResourceSelection, hnp, rms)
# Banco de dados
dados <- read.table('http://www.est.ufmg.br/~enricoc/pdf/avancados_medicina/ProjetoEnrico.txt',
head=T, dec=',')
View(dados)
glimpse(dados)
# Verificar dados ausentes
is.null(dados)
# Excluir variáveis que não serão utilizadas
dados <- subset(dados, select = -c(Indice1, Splintd, NumRegistro))
glimpse(dados)
# Alterar classes das variáveis
#dados em forma de texto (character) ou como fator/categóricas (factor)
#números inteiros (integer), números decimais (numeric ou double) ou números complexos (complex)
dados$Idade11 = as.factor(dados$Idade11)
dados$Idde16 = as.factor(dados$Idde16)
dados$Pereob15 = as.factor(dados$Pereob15)
dados$Meio3 = as.factor(dados$Meio3)
dados$TempoTERd = as.numeric(dados$TempoTERd)
dados$Ind1gbin = as.factor(dados$Ind1gbin)
glimpse(dados)
# Recodificar variável resposta em 0 e 1
dados$Ind1gbin_cat <- Recode(dados$Ind1gbin, '1 = 0; 2 = 1', as.factor=T)
# Análise Descritiva
data.frame(table(dados$Idade11)) %>%
mutate(Rel_Freq = Freq/sum(Freq)) %>%
rename(Idade11 = Var1)
data.frame(table(dados$Idde16)) %>%
mutate(Rel_Freq = Freq/sum(Freq)) %>%
rename(Idde16 = Var1)
data.frame(table(dados$Pereob15)) %>%
mutate(Rel_Freq = Freq/sum(Freq)) %>%
rename(Pereob15 = Var1)
data.frame(table(dados$Meio3)) %>%
mutate(Rel_Freq = Freq/sum(Freq)) %>%
rename(Meio3 = Var1)
describe(dados$TempoTERd)
summary(dados$TempoTERd)
ggplot(dados, aes(x = '', y = TempoTERd))+
geom_boxplot(fill = 'gray', colour = 'black',
outlier.colour = 'red', outlier.size = 2)+
labs(x = '', y = 'Tempo TER',
subtitle = 'Medido em dias',
title = 'Tempo decorrido entre o reimplante dentário
e a realização do TER (Tratamento Endodontico Radical)')+
theme_classic()
data.frame(table(dados$Ind1gbin_cat)) %>%
mutate(Rel_Freq = Freq/sum(Freq)) %>%
rename(Ind1gbin_cat = Var1)
# Colinearidade?
# Análise Univariada (Frequência e Teste qui-quadrado)
dados %>%
tabyl(Idade11, Ind1gbin_cat) %>%
adorn_totals(c('row', 'col')) %>%
adorn_percentages("col") %>%
adorn_pct_formatting(digits = 1) %>%
adorn_ns %>%
adorn_title('combined') %>%
knitr::kable()
chisq.test(dados$Idade11, dados$Ind1gbin_cat, correct = F)
dados %>%
tabyl(Idde16, Ind1gbin_cat) %>%
adorn_totals(c('row', 'col')) %>%
adorn_percentages("col") %>%
adorn_pct_formatting(digits = 1) %>%
adorn_ns %>%
adorn_title('combined') %>%
knitr::kable()
chisq.test(dados$Idde16, dados$Ind1gbin_cat, correct = F)
fisher.test(dados$Idde16, dados$Ind1gbin_cat)
dados %>%
tabyl(Pereob15, Ind1gbin_cat) %>%
adorn_totals(c('row', 'col')) %>%
adorn_percentages("col") %>%
adorn_pct_formatting(digits = 1) %>%
adorn_ns %>%
adorn_title('combined') %>%
knitr::kable()
chisq.test(dados$Pereob15, dados$Ind1gbin_cat, correct = F)
fisher.test(dados$Pereob15, dados$Ind1gbin_cat)
dados %>%
tabyl(Meio3, Ind1gbin_cat) %>%
adorn_totals(c('row', 'col')) %>%
adorn_percentages("col") %>%
adorn_pct_formatting(digits = 1) %>%
adorn_ns %>%
adorn_title('combined') %>%
knitr::kable()
chisq.test(dados$Meio3, dados$Ind1gbin_cat, correct = F)
describeBy(dados$TempoTERd, group = dados$Ind1gbin_cat, digits = 3)
ggplot(dados, aes(x=TempoTERd, y=Ind1gbin_cat)) +
geom_point() +
stat_smooth(method="glm", method.args=list(family="binomial"), se=FALSE)
fisher.test(dados$TempoTERd, dados$Ind1gbin_cat)
# Análise Univariada (Regressão Logística, Razão de Chances e Intervalo de Confiança)
ajusteIdade11 <- glm(Ind1gbin_cat ~ Idade11, family = binomial(link = "logit"), data = dados)
summary(ajusteIdade11)
anova(ajusteIdade11, test = "Chisq")
#Os individuos de > 11 anos têm em média 0,9% a menos
#de reabsorção observado na consulta de inicio do Tratamento Endodontico Radical (TER)
#em comparação com os individuos de idade <= 11 anos.
#Foi observado significância estatística a p=0,009 na utilização da variável para o modelo.
logitor(Ind1gbin_cat ~ Idade11, data = dados)
exp(cbind(OR=coef(ajusteIdade11), confint(ajusteIdade11)))
#A chance de reabsorção observado na consulta de inicio do Tratamento Endodontico Radical (TER)
#em individuos de > 11 anos é cerca de 0,38 vezes a chance daqueles do grupo <= 11 anos.
ajusteIdde16 <- glm(Ind1gbin_cat ~ Idde16, family = binomial(link = "logit"), data = dados)
summary(ajusteIdde16)
anova(ajusteIdde16, test = "Chisq")
#Os individuos de > 16 anos têm em média 2,4% a menos
#de reabsorção observado na consulta de inicio do Tratamento Endodontico Radical (TER)
#em comparação com os individuos de idade <= 16 anos.
#Foi observado significância estatística a p=0,023 na utilização da variável para o modelo.
logitor(Ind1gbin_cat ~ Idde16, data = dados)
exp(cbind(OR=coef(ajusteIdde16), confint(ajusteIdde16)))
#A chance de reabsorção observado na consulta de inicio do Tratamento Endodontico Radical (TER)
#em individuos de > 16 anos é cerca de 0,94 vezes a chance daqueles do grupo <= 16 anos.
ajustePereob15 <- glm(Ind1gbin_cat ~ Pereob15, family = binomial(link = "logit"), data = dados)
summary(ajustePereob15)
anova(ajustePereob15, test = "Chisq")
#O tempo de > 15 min têm em média 0,6% a mais
#de reabsorção observado na consulta de inicio do Tratamento Endodontico Radical (TER)
#em comparação com os <= 15 anos.
#Não foi observado significância estatística a p=0,423 na utilização da variável para o modelo.
logitor(Ind1gbin_cat ~ Pereob15, data = dados)
exp(cbind(OR=coef(ajustePereob15), confint(ajustePereob15)))
#A chance de reabsorção observado na consulta de inicio do Tratamento Endodontico Radical (TER)
#em tempo > 15 min é cerca de 1,89 vezes a chance daqueles do grupo <= 15 min.
ajusteMeio3 <- glm(Ind1gbin_cat ~ Meio3, family = binomial(link = "logit"), data = dados)
summary(ajusteMeio3)
anova(ajusteMeio3, test = "Chisq")
#O Leite têm em média 0,08% a menos
#de reabsorção observado na consulta de inicio do Tratamento Endodontico Radical (TER)
#em comparação com os Meios úmidos.
#O Seco têm em média 0,14% a mais
#de reabsorção observado na consulta de inicio do Tratamento Endodontico Radical (TER)
#em comparação com os Meios úmidos.
#Não foi observado significância estatística a p=0,867 e p=0,722 na utilização das variáveis para o modelo.
logitor(Ind1gbin_cat ~ Meio3, data = dados)
exp(cbind(OR=coef(ajusteMeio3), confint(ajusteMeio3)))
#A chance de reabsorção observado na consulta de inicio do Tratamento Endodontico Radical (TER)
#em Leite é cerca de 0,92 vezes a chance daqueles do Meios úmidos.
#A chance de reabsorção observado na consulta de inicio do Tratamento Endodontico Radical (TER)
#em Seco é cerca de 1,15 vezes a chance daqueles do Meios úmidos.
ajusteTempoTERd <- glm(Ind1gbin_cat ~ TempoTERd, family = binomial(link = "logit"), data = dados)
summary(ajusteTempoTERd)
anova(ajusteTempoTERd, test = "Chisq")
#O aumento 1 (dia) tempo decorrido entre o reimplante dentário e a realização do TER
#aumenta em média 0,004% doindice de reabsorção observado na consulta de inicio do Tratamento Endodontico Radical (TER)
#Foi observado significância estatística a p=0,000 na utilização da variável para o modelo.
logitor(Ind1gbin_cat ~ TempoTERd, data = dados)
exp(cbind(OR=coef(ajusteTempoTERd), confint(ajusteTempoTERd)))
#Para cada variação unitária no TempoTERd, as chances de ocorrência de Ind1gbin_cat aumentam 1,00 vezes
# Matriz de Confusão
dados$pdata <- as.factor(
ifelse(
predict(ajusteTempoTERd,
newdata = dados,
type = "response")
>0.5,"1","0"))
confusionMatrix(dados$pdata, dados$Ind1gbin_cat, positive="1")
#Regressão Logística Múltipla
ajuste1 <- glm(Ind1gbin_cat ~ Idade11 + TempoTERd, family = binomial(link = "logit"), data = dados)
summary(ajuste1)
anova(ajuste1, test="Chisq")
#TempoTERd tem distribuição linear?
#Não tem interação?
stargazer(ajuste1, title="Resultados", type = "text")
#?
# OR e IC95%
logitor(Ind1gbin_cat ~ Idade11 + TempoTERd, data = dados)
exp(coef(ajuste1))
exp(cbind(OR=coef(ajuste1), confint(ajuste1)))
#VIF
vif(ajuste1)
# Teste Hosmer e Lemeshow
hl=hoslem.test(dados$Ind1gbin_cat,fitted(ajuste1),g=10)
hl
# Q-QPLOT com envelope simulado
fit.model <- ajuste1
par(mfrow = c(1, 1))
X <- model.matrix(fit.model)
n <- nrow(X)
p <- ncol(X)
w <- fit.model$weights
W <- diag(w)
H <- solve(t(X) %*% W %*% X)
H <- sqrt(W) %*% X %*% H %*% t(X) %*% sqrt(W)
h <- diag(H)
td <- resid(fit.model, type = "deviance") / sqrt(1 - h)
e <- matrix(0, n, 100)
for(i in 1:100){
dif <- runif(n) - fitted(fit.model)
dif[dif >= 0 ] <- 0
dif[dif < 0] <- 1
nresp <- dif
fit <- glm(nresp ~ X, family = binomial)
w <- fit$weights
W <- diag(w)
H <- solve(t(X) %*% W %*% X)
H <- sqrt(W) %*% X %*% H %*% t(X) %*% sqrt(W)
h <- diag(H)
e[,i] <- sort(resid(fit, type = "deviance") / sqrt(1 - h))
}
e1 <- numeric(n)
e2 <- numeric(n)
for(i in 1:n){
eo <- sort(e[i,])
e1[i] <- eo[5]
e2[i] <- eo[95]
}
med <- apply(e, 1, mean)
faixa <- range(td, e1, e2)
par(pty = "s")
qqnorm(td, xlab = "Percentis da N(0,1)", ylab = "Componente da deviance", ylim = faixa, pch = 16)
par(new = T)
qqnorm(e1, axes = F, xlab = "", ylab = "", type = "l", ylim = faixa, lty = 1)
par(new = T)
qqnorm(e2, axes = F, xlab = "", ylab = "", type = "l", ylim = faixa, lty = 1)
par(new = T)
qqnorm(med, axes = F, xlab = "", ylab = "", type = "l", ylim = faixa, lty = 2)
# Nomograma
ddist <- datadist(dados)
options(datadist='ddist')
ajuste1r<-lrm(Ind1gbin_cat ~ Idade11 + TempoTERd, data = dados)
nom<-nomogram(ajuste1r,fun=plogis,funlabel="probabilidade",
fun.at=c(.01,.05,.1,.25,.5,.75,.90,.95,.99))
plot(nom,xfrac=0.45)
|
/7. Projeto Regressão Logística.R
|
no_license
|
amandasmagalhaes/metodos-estatisticos-epidemio
|
R
| false
| false
| 10,322
|
r
|
# Projeto Regressão Logística
# Reabsorção radicular externa após reimplantes de dentes permanentes
#O objetivo do estudo foi identificar a associação do desfecho, RRE,
#com a idade no momento do trauma e com variáveis clínicas relacionadas ao
#manejo e tratamento emergencial do dente avulsionado.
# Remove todos os objetos definidos
rm(list = ls())
# Pacotes
if (!require(pacman)) install.packages('pacman')
library(pacman)
pacman::p_load(dplyr, psych, car, ggplot2, janitor, mfx, caret, stargazer, faraway, ResourceSelection, hnp, rms)
# Banco de dados
dados <- read.table('http://www.est.ufmg.br/~enricoc/pdf/avancados_medicina/ProjetoEnrico.txt',
head=T, dec=',')
View(dados)
glimpse(dados)
# Verificar dados ausentes
is.null(dados)
# Excluir variáveis que não serão utilizadas
dados <- subset(dados, select = -c(Indice1, Splintd, NumRegistro))
glimpse(dados)
# Alterar classes das variáveis
#dados em forma de texto (character) ou como fator/categóricas (factor)
#números inteiros (integer), números decimais (numeric ou double) ou números complexos (complex)
dados$Idade11 = as.factor(dados$Idade11)
dados$Idde16 = as.factor(dados$Idde16)
dados$Pereob15 = as.factor(dados$Pereob15)
dados$Meio3 = as.factor(dados$Meio3)
dados$TempoTERd = as.numeric(dados$TempoTERd)
dados$Ind1gbin = as.factor(dados$Ind1gbin)
glimpse(dados)
# Recodificar variável resposta em 0 e 1
dados$Ind1gbin_cat <- Recode(dados$Ind1gbin, '1 = 0; 2 = 1', as.factor=T)
# Análise Descritiva
data.frame(table(dados$Idade11)) %>%
mutate(Rel_Freq = Freq/sum(Freq)) %>%
rename(Idade11 = Var1)
data.frame(table(dados$Idde16)) %>%
mutate(Rel_Freq = Freq/sum(Freq)) %>%
rename(Idde16 = Var1)
data.frame(table(dados$Pereob15)) %>%
mutate(Rel_Freq = Freq/sum(Freq)) %>%
rename(Pereob15 = Var1)
data.frame(table(dados$Meio3)) %>%
mutate(Rel_Freq = Freq/sum(Freq)) %>%
rename(Meio3 = Var1)
describe(dados$TempoTERd)
summary(dados$TempoTERd)
ggplot(dados, aes(x = '', y = TempoTERd))+
geom_boxplot(fill = 'gray', colour = 'black',
outlier.colour = 'red', outlier.size = 2)+
labs(x = '', y = 'Tempo TER',
subtitle = 'Medido em dias',
title = 'Tempo decorrido entre o reimplante dentário
e a realização do TER (Tratamento Endodontico Radical)')+
theme_classic()
data.frame(table(dados$Ind1gbin_cat)) %>%
mutate(Rel_Freq = Freq/sum(Freq)) %>%
rename(Ind1gbin_cat = Var1)
# Colinearidade?
# Análise Univariada (Frequência e Teste qui-quadrado)
dados %>%
tabyl(Idade11, Ind1gbin_cat) %>%
adorn_totals(c('row', 'col')) %>%
adorn_percentages("col") %>%
adorn_pct_formatting(digits = 1) %>%
adorn_ns %>%
adorn_title('combined') %>%
knitr::kable()
chisq.test(dados$Idade11, dados$Ind1gbin_cat, correct = F)
dados %>%
tabyl(Idde16, Ind1gbin_cat) %>%
adorn_totals(c('row', 'col')) %>%
adorn_percentages("col") %>%
adorn_pct_formatting(digits = 1) %>%
adorn_ns %>%
adorn_title('combined') %>%
knitr::kable()
chisq.test(dados$Idde16, dados$Ind1gbin_cat, correct = F)
fisher.test(dados$Idde16, dados$Ind1gbin_cat)
dados %>%
tabyl(Pereob15, Ind1gbin_cat) %>%
adorn_totals(c('row', 'col')) %>%
adorn_percentages("col") %>%
adorn_pct_formatting(digits = 1) %>%
adorn_ns %>%
adorn_title('combined') %>%
knitr::kable()
chisq.test(dados$Pereob15, dados$Ind1gbin_cat, correct = F)
fisher.test(dados$Pereob15, dados$Ind1gbin_cat)
dados %>%
tabyl(Meio3, Ind1gbin_cat) %>%
adorn_totals(c('row', 'col')) %>%
adorn_percentages("col") %>%
adorn_pct_formatting(digits = 1) %>%
adorn_ns %>%
adorn_title('combined') %>%
knitr::kable()
chisq.test(dados$Meio3, dados$Ind1gbin_cat, correct = F)
describeBy(dados$TempoTERd, group = dados$Ind1gbin_cat, digits = 3)
ggplot(dados, aes(x=TempoTERd, y=Ind1gbin_cat)) +
geom_point() +
stat_smooth(method="glm", method.args=list(family="binomial"), se=FALSE)
fisher.test(dados$TempoTERd, dados$Ind1gbin_cat)
# Análise Univariada (Regressão Logística, Razão de Chances e Intervalo de Confiança)
ajusteIdade11 <- glm(Ind1gbin_cat ~ Idade11, family = binomial(link = "logit"), data = dados)
summary(ajusteIdade11)
anova(ajusteIdade11, test = "Chisq")
#Os individuos de > 11 anos têm em média 0,9% a menos
#de reabsorção observado na consulta de inicio do Tratamento Endodontico Radical (TER)
#em comparação com os individuos de idade <= 11 anos.
#Foi observado significância estatística a p=0,009 na utilização da variável para o modelo.
logitor(Ind1gbin_cat ~ Idade11, data = dados)
exp(cbind(OR=coef(ajusteIdade11), confint(ajusteIdade11)))
#A chance de reabsorção observado na consulta de inicio do Tratamento Endodontico Radical (TER)
#em individuos de > 11 anos é cerca de 0,38 vezes a chance daqueles do grupo <= 11 anos.
ajusteIdde16 <- glm(Ind1gbin_cat ~ Idde16, family = binomial(link = "logit"), data = dados)
summary(ajusteIdde16)
anova(ajusteIdde16, test = "Chisq")
#Os individuos de > 16 anos têm em média 2,4% a menos
#de reabsorção observado na consulta de inicio do Tratamento Endodontico Radical (TER)
#em comparação com os individuos de idade <= 16 anos.
#Foi observado significância estatística a p=0,023 na utilização da variável para o modelo.
logitor(Ind1gbin_cat ~ Idde16, data = dados)
exp(cbind(OR=coef(ajusteIdde16), confint(ajusteIdde16)))
#A chance de reabsorção observado na consulta de inicio do Tratamento Endodontico Radical (TER)
#em individuos de > 16 anos é cerca de 0,94 vezes a chance daqueles do grupo <= 16 anos.
ajustePereob15 <- glm(Ind1gbin_cat ~ Pereob15, family = binomial(link = "logit"), data = dados)
summary(ajustePereob15)
anova(ajustePereob15, test = "Chisq")
#O tempo de > 15 min têm em média 0,6% a mais
#de reabsorção observado na consulta de inicio do Tratamento Endodontico Radical (TER)
#em comparação com os <= 15 anos.
#Não foi observado significância estatística a p=0,423 na utilização da variável para o modelo.
logitor(Ind1gbin_cat ~ Pereob15, data = dados)
exp(cbind(OR=coef(ajustePereob15), confint(ajustePereob15)))
#A chance de reabsorção observado na consulta de inicio do Tratamento Endodontico Radical (TER)
#em tempo > 15 min é cerca de 1,89 vezes a chance daqueles do grupo <= 15 min.
ajusteMeio3 <- glm(Ind1gbin_cat ~ Meio3, family = binomial(link = "logit"), data = dados)
summary(ajusteMeio3)
anova(ajusteMeio3, test = "Chisq")
#O Leite têm em média 0,08% a menos
#de reabsorção observado na consulta de inicio do Tratamento Endodontico Radical (TER)
#em comparação com os Meios úmidos.
#O Seco têm em média 0,14% a mais
#de reabsorção observado na consulta de inicio do Tratamento Endodontico Radical (TER)
#em comparação com os Meios úmidos.
#Não foi observado significância estatística a p=0,867 e p=0,722 na utilização das variáveis para o modelo.
logitor(Ind1gbin_cat ~ Meio3, data = dados)
exp(cbind(OR=coef(ajusteMeio3), confint(ajusteMeio3)))
#A chance de reabsorção observado na consulta de inicio do Tratamento Endodontico Radical (TER)
#em Leite é cerca de 0,92 vezes a chance daqueles do Meios úmidos.
#A chance de reabsorção observado na consulta de inicio do Tratamento Endodontico Radical (TER)
#em Seco é cerca de 1,15 vezes a chance daqueles do Meios úmidos.
ajusteTempoTERd <- glm(Ind1gbin_cat ~ TempoTERd, family = binomial(link = "logit"), data = dados)
summary(ajusteTempoTERd)
anova(ajusteTempoTERd, test = "Chisq")
#O aumento 1 (dia) tempo decorrido entre o reimplante dentário e a realização do TER
#aumenta em média 0,004% doindice de reabsorção observado na consulta de inicio do Tratamento Endodontico Radical (TER)
#Foi observado significância estatística a p=0,000 na utilização da variável para o modelo.
logitor(Ind1gbin_cat ~ TempoTERd, data = dados)
exp(cbind(OR=coef(ajusteTempoTERd), confint(ajusteTempoTERd)))
#Para cada variação unitária no TempoTERd, as chances de ocorrência de Ind1gbin_cat aumentam 1,00 vezes
# Matriz de Confusão
dados$pdata <- as.factor(
ifelse(
predict(ajusteTempoTERd,
newdata = dados,
type = "response")
>0.5,"1","0"))
confusionMatrix(dados$pdata, dados$Ind1gbin_cat, positive="1")
#Regressão Logística Múltipla
ajuste1 <- glm(Ind1gbin_cat ~ Idade11 + TempoTERd, family = binomial(link = "logit"), data = dados)
summary(ajuste1)
anova(ajuste1, test="Chisq")
#TempoTERd tem distribuição linear?
#Não tem interação?
stargazer(ajuste1, title="Resultados", type = "text")
#?
# OR e IC95%
logitor(Ind1gbin_cat ~ Idade11 + TempoTERd, data = dados)
exp(coef(ajuste1))
exp(cbind(OR=coef(ajuste1), confint(ajuste1)))
#VIF
vif(ajuste1)
# Teste Hosmer e Lemeshow
hl=hoslem.test(dados$Ind1gbin_cat,fitted(ajuste1),g=10)
hl
# Q-QPLOT com envelope simulado
fit.model <- ajuste1
par(mfrow = c(1, 1))
X <- model.matrix(fit.model)
n <- nrow(X)
p <- ncol(X)
w <- fit.model$weights
W <- diag(w)
H <- solve(t(X) %*% W %*% X)
H <- sqrt(W) %*% X %*% H %*% t(X) %*% sqrt(W)
h <- diag(H)
td <- resid(fit.model, type = "deviance") / sqrt(1 - h)
e <- matrix(0, n, 100)
for(i in 1:100){
dif <- runif(n) - fitted(fit.model)
dif[dif >= 0 ] <- 0
dif[dif < 0] <- 1
nresp <- dif
fit <- glm(nresp ~ X, family = binomial)
w <- fit$weights
W <- diag(w)
H <- solve(t(X) %*% W %*% X)
H <- sqrt(W) %*% X %*% H %*% t(X) %*% sqrt(W)
h <- diag(H)
e[,i] <- sort(resid(fit, type = "deviance") / sqrt(1 - h))
}
e1 <- numeric(n)
e2 <- numeric(n)
for(i in 1:n){
eo <- sort(e[i,])
e1[i] <- eo[5]
e2[i] <- eo[95]
}
med <- apply(e, 1, mean)
faixa <- range(td, e1, e2)
par(pty = "s")
qqnorm(td, xlab = "Percentis da N(0,1)", ylab = "Componente da deviance", ylim = faixa, pch = 16)
par(new = T)
qqnorm(e1, axes = F, xlab = "", ylab = "", type = "l", ylim = faixa, lty = 1)
par(new = T)
qqnorm(e2, axes = F, xlab = "", ylab = "", type = "l", ylim = faixa, lty = 1)
par(new = T)
qqnorm(med, axes = F, xlab = "", ylab = "", type = "l", ylim = faixa, lty = 2)
# Nomograma
ddist <- datadist(dados)
options(datadist='ddist')
ajuste1r<-lrm(Ind1gbin_cat ~ Idade11 + TempoTERd, data = dados)
nom<-nomogram(ajuste1r,fun=plogis,funlabel="probabilidade",
fun.at=c(.01,.05,.1,.25,.5,.75,.90,.95,.99))
plot(nom,xfrac=0.45)
|
#Random Forest Model
library(randomForest)
library('dplyr')
library("caret")
#divide the data into train and test
# to get same data in each time
set.seed(123)
train = data[sample(nrow(data), 20000, replace = F), ]
test = data[!(1:nrow(data)) %in% as.numeric(row.names(train)), ]
rf_model <- randomForest(X1 ~ X4 + X8 + X9 + X13 + X21 + X22 + X24+ X27 + X28 + X29 + X30 +
X31 + credit_limit , data = train,
importance = TRUE , ntree = 100)
## Predict using the test set
prediction = predict(rf_model, test)
importance = importance(rf_model, type = 1)
#calculate MAPE
mape = function(y, yhat)
mean(abs((y - yhat)/y))
1-mape(test[,1], prediction)
# Get importance
importance = importance(rf_model)
varImportance = data.frame(Variables = row.names(importance),
Importance = round(importance[ , '%IncMSE'], 2))
# Create a rank variable based on importance
rankImportance = varImportance %>%
mutate(Rank = paste0('#',dense_rank(desc(Importance))))
# Use ggplot2 to visualize the relative importance of variables
ggplot(rankImportance, aes(x = reorder(Variables, Importance),
y = Importance, fill = Importance)) +
geom_bar(stat='identity') +
geom_text(aes(x = Variables, y = 0.5, label = Rank),
hjust=0, vjust=0.55, size = 6, colour = 'yellow') +
labs(x = 'Variables') + coord_flip() + theme_few()
|
/4.Random Forest Evaluation.R
|
no_license
|
rakesh-analytics/interest-rate-project
|
R
| false
| false
| 1,497
|
r
|
#Random Forest Model
library(randomForest)
library('dplyr')
library("caret")
#divide the data into train and test
# to get same data in each time
set.seed(123)
train = data[sample(nrow(data), 20000, replace = F), ]
test = data[!(1:nrow(data)) %in% as.numeric(row.names(train)), ]
rf_model <- randomForest(X1 ~ X4 + X8 + X9 + X13 + X21 + X22 + X24+ X27 + X28 + X29 + X30 +
X31 + credit_limit , data = train,
importance = TRUE , ntree = 100)
## Predict using the test set
prediction = predict(rf_model, test)
importance = importance(rf_model, type = 1)
#calculate MAPE
mape = function(y, yhat)
mean(abs((y - yhat)/y))
1-mape(test[,1], prediction)
# Get importance
importance = importance(rf_model)
varImportance = data.frame(Variables = row.names(importance),
Importance = round(importance[ , '%IncMSE'], 2))
# Create a rank variable based on importance
rankImportance = varImportance %>%
mutate(Rank = paste0('#',dense_rank(desc(Importance))))
# Use ggplot2 to visualize the relative importance of variables
ggplot(rankImportance, aes(x = reorder(Variables, Importance),
y = Importance, fill = Importance)) +
geom_bar(stat='identity') +
geom_text(aes(x = Variables, y = 0.5, label = Rank),
hjust=0, vjust=0.55, size = 6, colour = 'yellow') +
labs(x = 'Variables') + coord_flip() + theme_few()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jglmm.r
\name{jglmm}
\alias{jglmm}
\title{Fitting Generalized Linear Mixed-Effects Models in Julia}
\usage{
jglmm(
formula,
data,
family = "normal",
link = NULL,
weights = NULL,
contrasts = NULL,
return_val = c("jglmm", "julia_model_str")
)
}
\arguments{
\item{formula}{A two-sided linear formula object describing both the
fixed-effects and random-effects part of the model, with the response on
the left of a ~ operator and the terms, separated by + operators, on the
right. Random-effects terms are distinguished by vertical bars ("|")
separating expressions for design matrices from grouping factors.}
\item{data}{A data frame containing the variables named in formula.}
\item{family}{(optional) The distribution family for the response variable
(defaults to "normal").}
\item{link}{(optional) The model link function (defaults to "identity").}
\item{weights}{(optional) A vector of prior case weights.}
\item{contrasts}{(optional) A named list mapping column names of categorical
variables in data to coding schemes (defauls to dummy coding all
categorical variables).}
\item{return_val}{return fitted model ("jglmm") or just Julia model string ("julia_model_str")?}
}
\value{
An object of class `jglmm`.
}
\description{
Fitting Generalized Linear Mixed-Effects Models in Julia
}
\examples{
\dontrun{
# linear model
lm1 <- jglmm(Reaction ~ Days + (Days | Subject), lme4::sleepstudy)
# logistic model
cbpp <- dplyr::mutate(lme4::cbpp, prop = incidence / size)
gm1 <- jglmm(prop ~ period + (1 | herd), data = cbpp, family = "binomial",
weights = cbpp$size)
gm2 <- jglmm(prop ~ period + (1 | herd), data = cbpp, family = "binomial",
weights = cbpp$size, contrasts = list(period = "effects"))
}
}
|
/man/jglmm.Rd
|
no_license
|
bbolker/jglmm
|
R
| false
| true
| 1,823
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jglmm.r
\name{jglmm}
\alias{jglmm}
\title{Fitting Generalized Linear Mixed-Effects Models in Julia}
\usage{
jglmm(
formula,
data,
family = "normal",
link = NULL,
weights = NULL,
contrasts = NULL,
return_val = c("jglmm", "julia_model_str")
)
}
\arguments{
\item{formula}{A two-sided linear formula object describing both the
fixed-effects and random-effects part of the model, with the response on
the left of a ~ operator and the terms, separated by + operators, on the
right. Random-effects terms are distinguished by vertical bars ("|")
separating expressions for design matrices from grouping factors.}
\item{data}{A data frame containing the variables named in formula.}
\item{family}{(optional) The distribution family for the response variable
(defaults to "normal").}
\item{link}{(optional) The model link function (defaults to "identity").}
\item{weights}{(optional) A vector of prior case weights.}
\item{contrasts}{(optional) A named list mapping column names of categorical
variables in data to coding schemes (defauls to dummy coding all
categorical variables).}
\item{return_val}{return fitted model ("jglmm") or just Julia model string ("julia_model_str")?}
}
\value{
An object of class `jglmm`.
}
\description{
Fitting Generalized Linear Mixed-Effects Models in Julia
}
\examples{
\dontrun{
# linear model
lm1 <- jglmm(Reaction ~ Days + (Days | Subject), lme4::sleepstudy)
# logistic model
cbpp <- dplyr::mutate(lme4::cbpp, prop = incidence / size)
gm1 <- jglmm(prop ~ period + (1 | herd), data = cbpp, family = "binomial",
weights = cbpp$size)
gm2 <- jglmm(prop ~ period + (1 | herd), data = cbpp, family = "binomial",
weights = cbpp$size, contrasts = list(period = "effects"))
}
}
|
\docType{methods}
\name{dbListResults,SQLiteConnection-method}
\alias{dbListResults,SQLiteConnection-method}
\title{List available SQLite result sets.}
\usage{
\S4method{dbListResults}{SQLiteConnection}(conn, ...)
}
\arguments{
\item{conn}{An existing
\code{\linkS4class{SQLiteConnection}}}
\item{...}{Ignored. Included for compatibility with
generic.}
}
\description{
List available SQLite result sets.
}
|
/man/dbListResults-SQLiteConnection-method.Rd
|
permissive
|
snowdj/RSQLite
|
R
| false
| false
| 416
|
rd
|
\docType{methods}
\name{dbListResults,SQLiteConnection-method}
\alias{dbListResults,SQLiteConnection-method}
\title{List available SQLite result sets.}
\usage{
\S4method{dbListResults}{SQLiteConnection}(conn, ...)
}
\arguments{
\item{conn}{An existing
\code{\linkS4class{SQLiteConnection}}}
\item{...}{Ignored. Included for compatibility with
generic.}
}
\description{
List available SQLite result sets.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/correlation_clique.R
\name{correlation_set_module_size}
\alias{correlation_set_module_size}
\title{correlation_set_module_size}
\usage{
correlation_set_module_size(size, correlation_module)
}
\arguments{
\item{size}{Module object that has been produced by \code{correlation_clique}
function}
\item{correlation_module}{Module object that has been produced by \code{correlation_clique} function}
}
\value{
\code{correlation_clique} module object
}
\description{
Returns a correlation_clique module closest to \code{size}
}
\details{
The function will find the the frequency cutoff for that will result
in a \code{correlation_clique} module object closest to \code{size}
}
\seealso{
\code{\link{correlation_clique}}
}
\author{
Dirk de Weerd
}
|
/man/correlation_set_module_size.Rd
|
no_license
|
ddeweerd/MODifieRDev
|
R
| false
| true
| 820
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/correlation_clique.R
\name{correlation_set_module_size}
\alias{correlation_set_module_size}
\title{correlation_set_module_size}
\usage{
correlation_set_module_size(size, correlation_module)
}
\arguments{
\item{size}{Module object that has been produced by \code{correlation_clique}
function}
\item{correlation_module}{Module object that has been produced by \code{correlation_clique} function}
}
\value{
\code{correlation_clique} module object
}
\description{
Returns a correlation_clique module closest to \code{size}
}
\details{
The function will find the the frequency cutoff for that will result
in a \code{correlation_clique} module object closest to \code{size}
}
\seealso{
\code{\link{correlation_clique}}
}
\author{
Dirk de Weerd
}
|
Response <- function(fit, x, trans, alpha, ...) {
## Calculate predictions, partial residuals
if ("randomForest" %in% class(fit)) {
if (fit$type=="regression") rr <- fit$y - fit$predicted
if (fit$type=="classification") {
P <- predict(fit, type="prob")
rr <- (fit$y==colnames(P)[2]) - P[,2]
}
} else {
rr <- residuals(fit)
}
if (class(fit)[1]!="mlm") rr <- rr[!is.na(rr)]
nr <- if (is.matrix(rr)) nrow(rr) else length(rr)
if (nrow(x$D) != nr) warning("Residuals do not match data; have you changed the original data set? If so, visreg is probably not displaying the residuals for the data set that was actually used to fit the model.")
predict.args <- list(object=fit, newdata=x$D)
if ("lme" %in% class(fit)) predict.args$level <- 0
if (inherits(fit, "merMod")) predict.args$re.form <- NA
dots <- list(...)
if (length(dots)) predict.args[names(dots)] <- dots
if ("randomForest" %in% class(fit) && fit$type=="classification") {
r <- P[,2]+rr
} else {
r <- suppressWarnings(do.call("predict", predict.args)) + rr
}
predict.args$newdata <- x$DD
if (class(fit)[1]=="mlm") {
p <- list(fit = suppressWarnings(do.call("predict", predict.args)), se.fit = se.mlm(fit, newdata=x$DD))
} else if ("randomForest" %in% class(fit) && fit$type=="classification") {
predict.args$type <- "prob"
P <- suppressWarnings(do.call("predict", predict.args))
p <- list(fit=P[,2], se.fit=NA)
} else {
predict.args$se.fit <- TRUE ## note: se.fit required by some; add $se on case-by-case basis
p <- suppressWarnings(do.call("predict", predict.args))
}
## Format output
if (class(p)=="svystat") p <- list(fit=as.numeric(p), se.fit=sqrt(attr(p,"var")))
if ("rms" %in% class(fit)) p$fit <- p$linear.predictors
if (is.numeric(p)) p <- list(fit=p, se.fit=NA)
m <- ifelse(identical(class(fit),"lm"),qt(1-alpha/2,fit$df.residual),qnorm(1-alpha/2))
upr <- p$fit + m*p$se.fit
lwr <- p$fit - m*p$se.fit
if (class(fit)[1]=="mlm") {
val <- list(fit=matrix(trans(p$fit), ncol=ncol(p$fit)), lwr=matrix(trans(lwr), ncol=ncol(p$fit)), upr=matrix(trans(upr), ncol=ncol(p$fit)), r=matrix(trans(r), ncol=ncol(p$fit)))
val$name <- colnames(val$fit) <- colnames(p$fit)
} else {
val <- list(fit=as.numeric(trans(p$fit)), lwr=as.numeric(trans(lwr)), upr=as.numeric(trans(upr)), r=as.numeric(trans(r)), name=as.character(formula(fit)[2]))
}
val$pos <- rr>0
val$n <- if (class(fit)[1]=="mlm") ncol(p$fit) else 1
val
}
|
/R/Response.R
|
no_license
|
oldi/visreg
|
R
| false
| false
| 2,511
|
r
|
Response <- function(fit, x, trans, alpha, ...) {
## Calculate predictions, partial residuals
if ("randomForest" %in% class(fit)) {
if (fit$type=="regression") rr <- fit$y - fit$predicted
if (fit$type=="classification") {
P <- predict(fit, type="prob")
rr <- (fit$y==colnames(P)[2]) - P[,2]
}
} else {
rr <- residuals(fit)
}
if (class(fit)[1]!="mlm") rr <- rr[!is.na(rr)]
nr <- if (is.matrix(rr)) nrow(rr) else length(rr)
if (nrow(x$D) != nr) warning("Residuals do not match data; have you changed the original data set? If so, visreg is probably not displaying the residuals for the data set that was actually used to fit the model.")
predict.args <- list(object=fit, newdata=x$D)
if ("lme" %in% class(fit)) predict.args$level <- 0
if (inherits(fit, "merMod")) predict.args$re.form <- NA
dots <- list(...)
if (length(dots)) predict.args[names(dots)] <- dots
if ("randomForest" %in% class(fit) && fit$type=="classification") {
r <- P[,2]+rr
} else {
r <- suppressWarnings(do.call("predict", predict.args)) + rr
}
predict.args$newdata <- x$DD
if (class(fit)[1]=="mlm") {
p <- list(fit = suppressWarnings(do.call("predict", predict.args)), se.fit = se.mlm(fit, newdata=x$DD))
} else if ("randomForest" %in% class(fit) && fit$type=="classification") {
predict.args$type <- "prob"
P <- suppressWarnings(do.call("predict", predict.args))
p <- list(fit=P[,2], se.fit=NA)
} else {
predict.args$se.fit <- TRUE ## note: se.fit required by some; add $se on case-by-case basis
p <- suppressWarnings(do.call("predict", predict.args))
}
## Format output
if (class(p)=="svystat") p <- list(fit=as.numeric(p), se.fit=sqrt(attr(p,"var")))
if ("rms" %in% class(fit)) p$fit <- p$linear.predictors
if (is.numeric(p)) p <- list(fit=p, se.fit=NA)
m <- ifelse(identical(class(fit),"lm"),qt(1-alpha/2,fit$df.residual),qnorm(1-alpha/2))
upr <- p$fit + m*p$se.fit
lwr <- p$fit - m*p$se.fit
if (class(fit)[1]=="mlm") {
val <- list(fit=matrix(trans(p$fit), ncol=ncol(p$fit)), lwr=matrix(trans(lwr), ncol=ncol(p$fit)), upr=matrix(trans(upr), ncol=ncol(p$fit)), r=matrix(trans(r), ncol=ncol(p$fit)))
val$name <- colnames(val$fit) <- colnames(p$fit)
} else {
val <- list(fit=as.numeric(trans(p$fit)), lwr=as.numeric(trans(lwr)), upr=as.numeric(trans(upr)), r=as.numeric(trans(r)), name=as.character(formula(fit)[2]))
}
val$pos <- rr>0
val$n <- if (class(fit)[1]=="mlm") ncol(p$fit) else 1
val
}
|
#### Include library
library(psych)
library(MASS)
library(ggplot2)
library(plotly)
countsInWindows = 0
annotationFile$ANKLE_COUNTS_ADDED <- NA
annotationFile$WRIST_COUNTS_ADDED <- NA
annotationFile$TOTAL_ROWS_TEMP_ANKLE <- NA
annotationFile$TOTAL_ROWS_TEMP_WRIST <- NA
flag = 0
for (i in 1:nrow(annotationFile)){
print(paste0("At the annotation number: ", i))
tempSpadesFrame <- spadesAnkle[spadesAnkle$DATE_TIME_ANKLE >= annotationFile$HEADER_START_TIME[i] &
spadesAnkle$DATE_TIME_ANKLE <= annotationFile$HEADER_STOP_TIME[i],]
countsInWindows = sum(tempSpadesFrame$Vector.Magnitude)
print(paste0("Total counts found: ", countsInWindows))
annotationFile$ANKLE_COUNTS_ADDED[i] <- countsInWindows
annotationFile$TOTAL_ROWS_TEMP_ANKLE[i] <- nrow(tempSpadesFrame)
print("Counts have been added")
}
### Commenting it out for now
# countsInWindows = 0
#
# for (i in 1:nrow(annotationFile)){
# print(paste0("At the annotation number: ", i))
#
# tempSpadesFrame <- spadesWrist[spadesWrist$DATE_TIME_ANKLE >= annotationFile$HEADER_START_TIME[i] &
# spadesWrist$DATE_TIME_ANKLE <= annotationFile$HEADER_STOP_TIME[i],]
#
# countsInWindows = sum(tempSpadesFrame$Vector.Magnitude)
#
# print(paste0("Total counts found: ", countsInWindows))
#
# annotationFile$WRIST_COUNTS_ADDED[i] <- countsInWindows
# annotationFile$TOTAL_ROWS_TEMP_WRIST[i] <- nrow(tempSpadesFrame)
#
# print("Counts have been added")
#
# }
annotationFile <- select(annotationFile, -ANNOTATION)
head(annotationFile)
outPathSummary = "C:/Users/Dharam/Downloads/microEMA/StudyFiles/SPADES_ACTIVITY_COUNT/SPADES_1/LabCountsCombined.csv"
write.csv(file = outPathSummary, x = annotationFile, quote = FALSE, row.names = FALSE, col.names = TRUE, sep = ",")
|
/microEMAReferenceCountsManager/getCountsSummaryForLabels.R
|
no_license
|
adityaponnada/microEMA-Preprocessing
|
R
| false
| false
| 1,854
|
r
|
#### Include library
library(psych)
library(MASS)
library(ggplot2)
library(plotly)
countsInWindows = 0
annotationFile$ANKLE_COUNTS_ADDED <- NA
annotationFile$WRIST_COUNTS_ADDED <- NA
annotationFile$TOTAL_ROWS_TEMP_ANKLE <- NA
annotationFile$TOTAL_ROWS_TEMP_WRIST <- NA
flag = 0
for (i in 1:nrow(annotationFile)){
print(paste0("At the annotation number: ", i))
tempSpadesFrame <- spadesAnkle[spadesAnkle$DATE_TIME_ANKLE >= annotationFile$HEADER_START_TIME[i] &
spadesAnkle$DATE_TIME_ANKLE <= annotationFile$HEADER_STOP_TIME[i],]
countsInWindows = sum(tempSpadesFrame$Vector.Magnitude)
print(paste0("Total counts found: ", countsInWindows))
annotationFile$ANKLE_COUNTS_ADDED[i] <- countsInWindows
annotationFile$TOTAL_ROWS_TEMP_ANKLE[i] <- nrow(tempSpadesFrame)
print("Counts have been added")
}
### Commenting it out for now
# countsInWindows = 0
#
# for (i in 1:nrow(annotationFile)){
# print(paste0("At the annotation number: ", i))
#
# tempSpadesFrame <- spadesWrist[spadesWrist$DATE_TIME_ANKLE >= annotationFile$HEADER_START_TIME[i] &
# spadesWrist$DATE_TIME_ANKLE <= annotationFile$HEADER_STOP_TIME[i],]
#
# countsInWindows = sum(tempSpadesFrame$Vector.Magnitude)
#
# print(paste0("Total counts found: ", countsInWindows))
#
# annotationFile$WRIST_COUNTS_ADDED[i] <- countsInWindows
# annotationFile$TOTAL_ROWS_TEMP_WRIST[i] <- nrow(tempSpadesFrame)
#
# print("Counts have been added")
#
# }
annotationFile <- select(annotationFile, -ANNOTATION)
head(annotationFile)
outPathSummary = "C:/Users/Dharam/Downloads/microEMA/StudyFiles/SPADES_ACTIVITY_COUNT/SPADES_1/LabCountsCombined.csv"
write.csv(file = outPathSummary, x = annotationFile, quote = FALSE, row.names = FALSE, col.names = TRUE, sep = ",")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/acf.R
\name{plot.theo_arma}
\alias{autoplot.theo_arma}
\alias{plot.theo_arma}
\title{Plot Theoretical Autocorrelation (ACF) for ARMA Models}
\usage{
\method{plot}{theo_arma}(x, ...)
\method{autoplot}{theo_arma}(object, ...)
}
\arguments{
\item{x, object}{An \code{"theo_arma"} object from \code{\link{theo_acf}}
or \code{\link{theo_pacf}}.}
\item{...}{Additional parameters}
}
\value{
An \code{array} of dimensions \eqn{N \times S \times S}{N x S x S}.
}
\description{
Displays the theoretical autocorrelation for ARMA Models
}
\examples{
# Compute Theoretical ACF
m = theo_acf(ARMA(ar = -0.25, ma = NULL), lag.max = 7)
# Compute Theoretical PACF
m2 = theo_pacf(ARMA(ar = -0.25, ma = NULL), lag.max = 7)
# Plot either the theoretical ACF or PACF
plot(m); plot(m2)
}
|
/man/plot.theo_arma.Rd
|
no_license
|
SMAC-Group/exts
|
R
| false
| true
| 850
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/acf.R
\name{plot.theo_arma}
\alias{autoplot.theo_arma}
\alias{plot.theo_arma}
\title{Plot Theoretical Autocorrelation (ACF) for ARMA Models}
\usage{
\method{plot}{theo_arma}(x, ...)
\method{autoplot}{theo_arma}(object, ...)
}
\arguments{
\item{x, object}{An \code{"theo_arma"} object from \code{\link{theo_acf}}
or \code{\link{theo_pacf}}.}
\item{...}{Additional parameters}
}
\value{
An \code{array} of dimensions \eqn{N \times S \times S}{N x S x S}.
}
\description{
Displays the theoretical autocorrelation for ARMA Models
}
\examples{
# Compute Theoretical ACF
m = theo_acf(ARMA(ar = -0.25, ma = NULL), lag.max = 7)
# Compute Theoretical PACF
m2 = theo_pacf(ARMA(ar = -0.25, ma = NULL), lag.max = 7)
# Plot either the theoretical ACF or PACF
plot(m); plot(m2)
}
|
fpiter <- function(par, fixptfn, objfn=NULL, control=list( ), ...){
control.default <- list(tol=1.e-07, maxiter=5000, trace=FALSE)
namc <- names(control)
if (!all(namc %in% names(control.default)))
stop("unknown names in control: ", namc[!(namc %in% names(control.default))])
ctrl <- modifyList(control.default, control)
#
# method = reduced-rank ("rre") or minimal-polynomial ("mpe") extrapolation
# K = order of extrapolation scheme; K=2,3,4 are typical choices
# square = a logical variable indicating whether or not "squaring" is used
tol <- ctrl$tol
maxiter <- ctrl$maxiter
trace <- ctrl$trace
if (trace) cat("fpiter \n")
iter <- 1
resid <- rep(NA,1)
objeval <- 0
conv <- FALSE
while (iter < maxiter) {
p.new <- fixptfn(par, ...)
res <- sqrt(crossprod(p.new - par))
if ( res < tol) {conv <- TRUE; break}
if (trace) {
if (!is.null(objfn)) {cat("Iter: ", iter, "Objective fn: ",objfn(par, ...), "\n"); objeval <- objeval + 1}
else cat("Iter: ", iter, "Residual: ",res, "\n")
}
par <- p.new
iter <- iter+1
}
loglik.best <- if (!is.null(objfn)) objfn(par, ...) else NA
return(list(par=par, value.objfn=loglik.best, fpevals=iter, objfevals = objeval, convergence=conv))
}
|
/R/fpiter.R
|
no_license
|
cran/daarem
|
R
| false
| false
| 1,268
|
r
|
fpiter <- function(par, fixptfn, objfn=NULL, control=list( ), ...){
control.default <- list(tol=1.e-07, maxiter=5000, trace=FALSE)
namc <- names(control)
if (!all(namc %in% names(control.default)))
stop("unknown names in control: ", namc[!(namc %in% names(control.default))])
ctrl <- modifyList(control.default, control)
#
# method = reduced-rank ("rre") or minimal-polynomial ("mpe") extrapolation
# K = order of extrapolation scheme; K=2,3,4 are typical choices
# square = a logical variable indicating whether or not "squaring" is used
tol <- ctrl$tol
maxiter <- ctrl$maxiter
trace <- ctrl$trace
if (trace) cat("fpiter \n")
iter <- 1
resid <- rep(NA,1)
objeval <- 0
conv <- FALSE
while (iter < maxiter) {
p.new <- fixptfn(par, ...)
res <- sqrt(crossprod(p.new - par))
if ( res < tol) {conv <- TRUE; break}
if (trace) {
if (!is.null(objfn)) {cat("Iter: ", iter, "Objective fn: ",objfn(par, ...), "\n"); objeval <- objeval + 1}
else cat("Iter: ", iter, "Residual: ",res, "\n")
}
par <- p.new
iter <- iter+1
}
loglik.best <- if (!is.null(objfn)) objfn(par, ...) else NA
return(list(par=par, value.objfn=loglik.best, fpevals=iter, objfevals = objeval, convergence=conv))
}
|
#------------------ Classification -----------------#
# features <- c("session_id",
# "day",
# "hour",
# "course",
# "wind_speed",
# "temperature",
# "period",
# "heading",
# "middle_lat",
# "middle_lon",
# "flow")
#------------------ Regression -----------------#
# Model 0
features <- c("session_id",
"speed",
"wday",
"hour",
"heading",
"middle_lat",
"middle_lon")
# Marcios model
features <- c("session_id",
"speed",
"wday",
"hour",
"wind_speed",
"temperature",
"heading",
"middle_lat",
"middle_lon")
features <- c("session_id",
"speed",
"time",
"wind_speed",
"temperature",
"middle_lat",
"middle_lon")
# Model 2
features <- c("session_id",
"speed",
"hour",
"wday",
"period",
"heading",
"middle_lat",
"middle_lon",
"wind_speed",
"temperature",
"Conditions")
# Model 4
features <- c("session_id",
"speed",
"hour",
"wday",
"heading",
"middle_lat",
"middle_lon",
"chuva")
# Model 5
features <- c("session_id",
"speed",
"wday",
"hour",
"wind_speed",
"temperature",
"heading",
"chuva")
features <- c("session_id",
"speed",
"wday",
"chuva",
"wind_speed",
"temperature",
"period",
"heading",
"middle_lat",
"middle_lon")
****hour
features <- c("session_id",
"speed",
"wday",
"chuva",
"wind_speed",
"temperature",
"hour",
"heading",
"middle_lat",
"middle_lon")
# Marcios model _ 2
features <- c("session_id",
"speed",
"wday",
"hour",
"wind_speed",
"temperature",
"heading")
|
/sensemy/rscripts/features.R
|
no_license
|
danielasocas/it
|
R
| false
| false
| 2,481
|
r
|
#------------------ Classification -----------------#
# features <- c("session_id",
# "day",
# "hour",
# "course",
# "wind_speed",
# "temperature",
# "period",
# "heading",
# "middle_lat",
# "middle_lon",
# "flow")
#------------------ Regression -----------------#
# Model 0
features <- c("session_id",
"speed",
"wday",
"hour",
"heading",
"middle_lat",
"middle_lon")
# Marcios model
features <- c("session_id",
"speed",
"wday",
"hour",
"wind_speed",
"temperature",
"heading",
"middle_lat",
"middle_lon")
features <- c("session_id",
"speed",
"time",
"wind_speed",
"temperature",
"middle_lat",
"middle_lon")
# Model 2
features <- c("session_id",
"speed",
"hour",
"wday",
"period",
"heading",
"middle_lat",
"middle_lon",
"wind_speed",
"temperature",
"Conditions")
# Model 4
features <- c("session_id",
"speed",
"hour",
"wday",
"heading",
"middle_lat",
"middle_lon",
"chuva")
# Model 5
features <- c("session_id",
"speed",
"wday",
"hour",
"wind_speed",
"temperature",
"heading",
"chuva")
features <- c("session_id",
"speed",
"wday",
"chuva",
"wind_speed",
"temperature",
"period",
"heading",
"middle_lat",
"middle_lon")
****hour
features <- c("session_id",
"speed",
"wday",
"chuva",
"wind_speed",
"temperature",
"hour",
"heading",
"middle_lat",
"middle_lon")
# Marcios model _ 2
features <- c("session_id",
"speed",
"wday",
"hour",
"wind_speed",
"temperature",
"heading")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/feature_columns.R
\name{column_categorical_with_identity}
\alias{column_categorical_with_identity}
\title{Construct a Categorical Column that Returns Identity Values}
\usage{
column_categorical_with_identity(..., num_buckets, default_value = NULL)
}
\arguments{
\item{...}{Expression(s) identifying input feature(s). Used as the column
name and the dictionary key for feature parsing configs, feature tensors,
and feature columns.}
\item{num_buckets}{Number of unique values.}
\item{default_value}{If \code{NULL}, this column's graph operations will fail for
out-of-range inputs. Otherwise, this value must be in the range \code{[0, num_buckets)}, and will replace inputs in that range.}
}
\value{
A categorical column that returns identity values.
}
\description{
Use this when your inputs are integers in the range \code{[0, num_buckets)}, and
you want to use the input value itself as the categorical ID. Values outside
this range will result in \code{default_value} if specified, otherwise it will
fail.
}
\details{
Typically, this is used for contiguous ranges of integer indexes, but it
doesn't have to be. This might be inefficient, however, if many of IDs are
unused. Consider \code{categorical_column_with_hash_bucket} in that case.
For input dictionary \code{features}, \code{features$key} is either tensor or sparse
tensor object. If it's tensor object, missing values can be represented by \code{-1} for
int and \code{''} for string. Note that these values are independent of the
\code{default_value} argument.
}
\section{Raises}{
\itemize{
\item ValueError: if \code{num_buckets} is less than one.
\item ValueError: if \code{default_value} is not in range \code{[0, num_buckets)}.
}
}
\seealso{
Other feature column constructors: \code{\link{column_bucketized}},
\code{\link{column_categorical_weighted}},
\code{\link{column_categorical_with_hash_bucket}},
\code{\link{column_categorical_with_vocabulary_file}},
\code{\link{column_categorical_with_vocabulary_list}},
\code{\link{column_crossed}},
\code{\link{column_embedding}},
\code{\link{column_numeric}}, \code{\link{input_layer}}
}
|
/man/column_categorical_with_identity.Rd
|
no_license
|
MhAmine/tfestimators
|
R
| false
| true
| 2,197
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/feature_columns.R
\name{column_categorical_with_identity}
\alias{column_categorical_with_identity}
\title{Construct a Categorical Column that Returns Identity Values}
\usage{
column_categorical_with_identity(..., num_buckets, default_value = NULL)
}
\arguments{
\item{...}{Expression(s) identifying input feature(s). Used as the column
name and the dictionary key for feature parsing configs, feature tensors,
and feature columns.}
\item{num_buckets}{Number of unique values.}
\item{default_value}{If \code{NULL}, this column's graph operations will fail for
out-of-range inputs. Otherwise, this value must be in the range \code{[0, num_buckets)}, and will replace inputs in that range.}
}
\value{
A categorical column that returns identity values.
}
\description{
Use this when your inputs are integers in the range \code{[0, num_buckets)}, and
you want to use the input value itself as the categorical ID. Values outside
this range will result in \code{default_value} if specified, otherwise it will
fail.
}
\details{
Typically, this is used for contiguous ranges of integer indexes, but it
doesn't have to be. This might be inefficient, however, if many of IDs are
unused. Consider \code{categorical_column_with_hash_bucket} in that case.
For input dictionary \code{features}, \code{features$key} is either tensor or sparse
tensor object. If it's tensor object, missing values can be represented by \code{-1} for
int and \code{''} for string. Note that these values are independent of the
\code{default_value} argument.
}
\section{Raises}{
\itemize{
\item ValueError: if \code{num_buckets} is less than one.
\item ValueError: if \code{default_value} is not in range \code{[0, num_buckets)}.
}
}
\seealso{
Other feature column constructors: \code{\link{column_bucketized}},
\code{\link{column_categorical_weighted}},
\code{\link{column_categorical_with_hash_bucket}},
\code{\link{column_categorical_with_vocabulary_file}},
\code{\link{column_categorical_with_vocabulary_list}},
\code{\link{column_crossed}},
\code{\link{column_embedding}},
\code{\link{column_numeric}}, \code{\link{input_layer}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pffr-methods.R
\name{qq.pffr}
\alias{qq.pffr}
\title{QQ plots for pffr model residuals}
\usage{
qq.pffr(object, rep = 0, level = 0.9, s.rep = 10, type = c("deviance",
"pearson", "response"), pch = ".", rl.col = 2, rep.col = "gray80", ...)
}
\arguments{
\item{object}{a fitted \code{\link{pffr}}-object}
\item{rep}{How many replicate datasets to generate to simulate quantiles
of the residual distribution. \code{0} results in an efficient
simulation free method for direct calculation, if this is possible for
the object family.}
\item{level}{If simulation is used for the quantiles, then reference intervals can be provided for the QQ-plot, this specifies the level.
0 or less for no intervals, 1 or more to simply plot the QQ plot for each replicate generated.}
\item{s.rep}{how many times to randomize uniform quantiles to data under direct computation.}
\item{type}{what sort of residuals should be plotted? See
\code{\link{residuals.gam}}.}
\item{pch}{plot character to use. 19 is good.}
\item{rl.col}{color for the reference line on the plot.}
\item{rep.col}{color for reference bands or replicate reference plots.}
\item{...}{extra graphics parameters to pass to plotting functions.}
}
\description{
This is simply a wrapper for code{\link[mgcv]{qq.gam}()}.
}
|
/man/qq.pffr.Rd
|
no_license
|
dill/refund
|
R
| false
| true
| 1,380
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pffr-methods.R
\name{qq.pffr}
\alias{qq.pffr}
\title{QQ plots for pffr model residuals}
\usage{
qq.pffr(object, rep = 0, level = 0.9, s.rep = 10, type = c("deviance",
"pearson", "response"), pch = ".", rl.col = 2, rep.col = "gray80", ...)
}
\arguments{
\item{object}{a fitted \code{\link{pffr}}-object}
\item{rep}{How many replicate datasets to generate to simulate quantiles
of the residual distribution. \code{0} results in an efficient
simulation free method for direct calculation, if this is possible for
the object family.}
\item{level}{If simulation is used for the quantiles, then reference intervals can be provided for the QQ-plot, this specifies the level.
0 or less for no intervals, 1 or more to simply plot the QQ plot for each replicate generated.}
\item{s.rep}{how many times to randomize uniform quantiles to data under direct computation.}
\item{type}{what sort of residuals should be plotted? See
\code{\link{residuals.gam}}.}
\item{pch}{plot character to use. 19 is good.}
\item{rl.col}{color for the reference line on the plot.}
\item{rep.col}{color for reference bands or replicate reference plots.}
\item{...}{extra graphics parameters to pass to plotting functions.}
}
\description{
This is simply a wrapper for code{\link[mgcv]{qq.gam}()}.
}
|
[
{
"title": "Statistics: Losing Ground to CS, Losing Image Among Students",
"href": "https://matloff.wordpress.com/2014/08/26/statistics-losing-ground-to-cs-losing-image-among-students/"
},
{
"title": "Revolution Newsletter: October 2011",
"href": "http://blog.revolutionanalytics.com/2011/10/revolution-newsletter-october-2011.html"
},
{
"title": "In case you missed it: May 2012 Roundup",
"href": "http://blog.revolutionanalytics.com/2012/06/in-case-you-missed-it-may-2012-roundup.html"
},
{
"title": "Two tips: adding title for graph with multiple plots; add significance asterix onto a boxplot",
"href": "http://onetipperday.sterding.com/2012/06/two-tips-adding-title-for-graph-with.html"
},
{
"title": "Create Word documents from R with R2DOCX",
"href": "http://blog.revolutionanalytics.com/2013/06/create-word-documents-from-r-with-r2docx.html"
},
{
"title": "Computing on the Language",
"href": "http://simplystatistics.tumblr.com/post/11988685443/computing-on-the-language"
},
{
"title": "Messy matters explores the probability of winning of basketball…",
"href": "https://web.archive.org/web/http://blog.ggplot2.org/post/24401184979"
},
{
"title": "Lots of data != \"Big Data\"",
"href": "http://blog.revolutionanalytics.com/2013/03/lots-of-data-big-data.html"
},
{
"title": "News about speeding R up",
"href": "https://xianblog.wordpress.com/2011/05/24/news-about-speeding-r-up/"
},
{
"title": "Review: Kölner R Meeting 26 Feburary 2014",
"href": "http://www.magesblog.com/2014/03/review-kolner-r-meeting-26-feburary-2014.html"
},
{
"title": "Update for Backtesting Asset Allocation Portfolios post",
"href": "https://systematicinvestor.wordpress.com/2013/10/24/update-for-backtesting-asset-allocation-portfolios-post/"
},
{
"title": "taskscheduleR: R package to schedule R scripts with the Windows task manager",
"href": "http://www.bnosac.be/index.php/blog/56-taskscheduler-r-package-to-schedule-r-scripts-with-the-windows-task-manager"
},
{
"title": "R progress indicators",
"href": "https://binfalse.de/2011/06/19/r-progress-indicators/"
},
{
"title": "A million ? what are the odds…",
"href": "http://blog.free.fr/"
},
{
"title": "Merging Multiple Data Files into One Data Frame",
"href": "https://feedproxy.google.com/~r/CoffeeAndEconometricsInTheMorning/~3/T9s9bnTAAmw/merging-multiple-data-files-into-one.html"
},
{
"title": "Giving a Damn About Statistics: Baseball, Shark Attacks, and Green M&M’s",
"href": "http://spatioanalytics.com/2015/03/13/giving-a-damn-about-statistics-baseball-shark-attacks-and-green-mms/"
},
{
"title": "R 3.0 released; ggplot2 stat_summary bug fixed!",
"href": "http://mindingthebrain.blogspot.com/2013/04/r-30-released-ggplot2-statsummary-bug.html"
},
{
"title": "Stats in the Court Room Hands on Tutorial",
"href": "http://plausibel.blogspot.com/2013/04/stats-in-court-room-hands-on-tutorial.html"
},
{
"title": "Reverse Engineering with Correlated Features",
"href": "http://freakonometrics.hypotheses.org/47979"
},
{
"title": "how to download and install r",
"href": "http://www.twotorials.com/2012/03/how-to-download-and-install-r.html"
},
{
"title": "Timer progress bar added to pbapply package",
"href": "http://peter.solymos.org/code/2016/03/04/timer-progress-bar-added-to-pbapply-package.html"
},
{
"title": "How to make a rough check to see if your data is normally distributed",
"href": "http://firsttimeprogrammer.blogspot.com/2015/07/how-to-make-rough-check-to-see-if-your.html"
},
{
"title": "With Size, Does Risk–>Return?",
"href": "http://timelyportfolio.blogspot.com/2011/12/with-size-does-risk.html"
},
{
"title": "ROracle support for TimesTen In-Memory Database",
"href": "https://blogs.oracle.com/R/entry/roracle_support_times_ten_in"
},
{
"title": "R and SAS in the curriculum: getting students to \"think with data\"",
"href": "https://feedproxy.google.com/~r/SASandR/~3/dhaUlhF7kTQ/r-and-sas-in-curriculum-getting.html"
},
{
"title": "Who are the pollinators? (with R plot)",
"href": "https://bartomeuslab.com/2012/12/17/who-are-the-pollinators/"
},
{
"title": "covr: A Victory for Open Source",
"href": "http://www.mango-solutions.com/wp/2016/07/covr-a-victory-for-open-source/"
},
{
"title": "FastCompany on telling stories with data",
"href": "http://blog.revolutionanalytics.com/2011/08/fastcompany-on-telling-stories-with-data.html"
},
{
"title": "Revised market prediction distributions",
"href": "https://feedproxy.google.com/~r/PortfolioProbeRLanguage/~3/il93MmFLb8I/"
},
{
"title": "Software tools for data analysis – an overview",
"href": "https://feedproxy.google.com/~r/RUserGroups/~3/U-fUeqHqeRM/"
},
{
"title": "Using neural networks for credit scoring: a simple example",
"href": "https://web.archive.org/web/http://fishyoperations.com/tag_feeds/fishyoperations.com/fishyoperations.com/neural-network-for-credit-scoring.html"
},
{
"title": "Shiny cheat sheet",
"href": "https://blog.rstudio.org/2014/06/30/shiny-cheat-sheet/"
},
{
"title": "Key Driver vs. Network Analysis in R",
"href": "http://joelcadwell.blogspot.com/2013/11/key-driver-vs-network-analysis-in-r.html"
},
{
"title": "How to make a scientific result disappear",
"href": "https://politicalmethodology.wordpress.com/2013/02/27/how-to-make-a-scientific-result-disappear/"
},
{
"title": "Creating inset maps using spatial objects",
"href": "https://jannesm.wordpress.com/2016/07/05/inset-maps-using-spatial-objects/"
},
{
"title": "The New Microsoft Data Science User Group Program",
"href": "http://blog.revolutionanalytics.com/2015/09/the-new-microsoft-data-science-user-group-program.html"
},
{
"title": "High incidence in Measles Data in Project Tycho",
"href": "http://wiekvoet.blogspot.com/2014/04/high-incidence-in-measles-data-in.html"
},
{
"title": "The avalanche of publications mentioning GO",
"href": "https://web.archive.org/web/http://www.cwcity.de/fehler/404.php"
},
{
"title": "Bootstrap, strap-on, anal-yzing… statistics is getting weirder by the moment",
"href": "https://danganothererror.wordpress.com/2010/07/29/bootstrap-strap-on-anal-yzing-statistics-is-getting-weirder-by-the-moment/"
},
{
"title": "Where have all the Hacker News old-timers gone?",
"href": "http://blog.revolutionanalytics.com/2010/08/where-have-all-the-hacker-news-oldtimers-gone.html"
},
{
"title": "Gauge Chart in R",
"href": "https://gastonsanchez.wordpress.com/2013/01/10/gauge-chart-in-r/"
},
{
"title": "Evolve your own beats — automatically generating music via algorithms",
"href": "https://web.archive.org/web/http://www.vikparuchuri.com/blog/categories/r/www.vikparuchuri.com/blog/evolve-your-own-beats-automatically-generating-music/"
},
{
"title": "A central hub for R bloggers",
"href": "https://feedproxy.google.com/~r/OneRTipADay/~3/vVXUXc-hRvU/central-hub-for-r-bloggers.html"
},
{
"title": "Profile Likelihood",
"href": "http://freakonometrics.hypotheses.org/20573"
},
{
"title": "Simple template for scientific manuscripts in R markdown",
"href": "http://www.petrkeil.com/?p=2401"
},
{
"title": "Call for Presentations – EARL Conference, London",
"href": "https://www.r-users.com/jobs/call-for-presentations-earl-conference-london/"
},
{
"title": "Introduction to R: Installation, Using R as a Calculator, Operators",
"href": "https://r-norberg.blogspot.com/2012/10/introduction-to-r-installation-using-r.html"
},
{
"title": "Because it’s Friday: Asteroids",
"href": "http://blog.revolutionanalytics.com/2010/11/because-its-friday-asteroids.html"
},
{
"title": "NIPS 2010: Monte Carlo workshop",
"href": "https://xianblog.wordpress.com/2010/09/03/nips-2010-monte-carlo-workshop/"
},
{
"title": "analyze the national survey on drug use and health (nsduh) with r",
"href": "http://www.asdfree.com/2012/11/analyze-national-survey-on-drug-use-and.html"
}
]
|
/json/218.r
|
no_license
|
rweekly/rweekly.org
|
R
| false
| false
| 8,334
|
r
|
[
{
"title": "Statistics: Losing Ground to CS, Losing Image Among Students",
"href": "https://matloff.wordpress.com/2014/08/26/statistics-losing-ground-to-cs-losing-image-among-students/"
},
{
"title": "Revolution Newsletter: October 2011",
"href": "http://blog.revolutionanalytics.com/2011/10/revolution-newsletter-october-2011.html"
},
{
"title": "In case you missed it: May 2012 Roundup",
"href": "http://blog.revolutionanalytics.com/2012/06/in-case-you-missed-it-may-2012-roundup.html"
},
{
"title": "Two tips: adding title for graph with multiple plots; add significance asterix onto a boxplot",
"href": "http://onetipperday.sterding.com/2012/06/two-tips-adding-title-for-graph-with.html"
},
{
"title": "Create Word documents from R with R2DOCX",
"href": "http://blog.revolutionanalytics.com/2013/06/create-word-documents-from-r-with-r2docx.html"
},
{
"title": "Computing on the Language",
"href": "http://simplystatistics.tumblr.com/post/11988685443/computing-on-the-language"
},
{
"title": "Messy matters explores the probability of winning of basketball…",
"href": "https://web.archive.org/web/http://blog.ggplot2.org/post/24401184979"
},
{
"title": "Lots of data != \"Big Data\"",
"href": "http://blog.revolutionanalytics.com/2013/03/lots-of-data-big-data.html"
},
{
"title": "News about speeding R up",
"href": "https://xianblog.wordpress.com/2011/05/24/news-about-speeding-r-up/"
},
{
"title": "Review: Kölner R Meeting 26 Feburary 2014",
"href": "http://www.magesblog.com/2014/03/review-kolner-r-meeting-26-feburary-2014.html"
},
{
"title": "Update for Backtesting Asset Allocation Portfolios post",
"href": "https://systematicinvestor.wordpress.com/2013/10/24/update-for-backtesting-asset-allocation-portfolios-post/"
},
{
"title": "taskscheduleR: R package to schedule R scripts with the Windows task manager",
"href": "http://www.bnosac.be/index.php/blog/56-taskscheduler-r-package-to-schedule-r-scripts-with-the-windows-task-manager"
},
{
"title": "R progress indicators",
"href": "https://binfalse.de/2011/06/19/r-progress-indicators/"
},
{
"title": "A million ? what are the odds…",
"href": "http://blog.free.fr/"
},
{
"title": "Merging Multiple Data Files into One Data Frame",
"href": "https://feedproxy.google.com/~r/CoffeeAndEconometricsInTheMorning/~3/T9s9bnTAAmw/merging-multiple-data-files-into-one.html"
},
{
"title": "Giving a Damn About Statistics: Baseball, Shark Attacks, and Green M&M’s",
"href": "http://spatioanalytics.com/2015/03/13/giving-a-damn-about-statistics-baseball-shark-attacks-and-green-mms/"
},
{
"title": "R 3.0 released; ggplot2 stat_summary bug fixed!",
"href": "http://mindingthebrain.blogspot.com/2013/04/r-30-released-ggplot2-statsummary-bug.html"
},
{
"title": "Stats in the Court Room Hands on Tutorial",
"href": "http://plausibel.blogspot.com/2013/04/stats-in-court-room-hands-on-tutorial.html"
},
{
"title": "Reverse Engineering with Correlated Features",
"href": "http://freakonometrics.hypotheses.org/47979"
},
{
"title": "how to download and install r",
"href": "http://www.twotorials.com/2012/03/how-to-download-and-install-r.html"
},
{
"title": "Timer progress bar added to pbapply package",
"href": "http://peter.solymos.org/code/2016/03/04/timer-progress-bar-added-to-pbapply-package.html"
},
{
"title": "How to make a rough check to see if your data is normally distributed",
"href": "http://firsttimeprogrammer.blogspot.com/2015/07/how-to-make-rough-check-to-see-if-your.html"
},
{
"title": "With Size, Does Risk–>Return?",
"href": "http://timelyportfolio.blogspot.com/2011/12/with-size-does-risk.html"
},
{
"title": "ROracle support for TimesTen In-Memory Database",
"href": "https://blogs.oracle.com/R/entry/roracle_support_times_ten_in"
},
{
"title": "R and SAS in the curriculum: getting students to \"think with data\"",
"href": "https://feedproxy.google.com/~r/SASandR/~3/dhaUlhF7kTQ/r-and-sas-in-curriculum-getting.html"
},
{
"title": "Who are the pollinators? (with R plot)",
"href": "https://bartomeuslab.com/2012/12/17/who-are-the-pollinators/"
},
{
"title": "covr: A Victory for Open Source",
"href": "http://www.mango-solutions.com/wp/2016/07/covr-a-victory-for-open-source/"
},
{
"title": "FastCompany on telling stories with data",
"href": "http://blog.revolutionanalytics.com/2011/08/fastcompany-on-telling-stories-with-data.html"
},
{
"title": "Revised market prediction distributions",
"href": "https://feedproxy.google.com/~r/PortfolioProbeRLanguage/~3/il93MmFLb8I/"
},
{
"title": "Software tools for data analysis – an overview",
"href": "https://feedproxy.google.com/~r/RUserGroups/~3/U-fUeqHqeRM/"
},
{
"title": "Using neural networks for credit scoring: a simple example",
"href": "https://web.archive.org/web/http://fishyoperations.com/tag_feeds/fishyoperations.com/fishyoperations.com/neural-network-for-credit-scoring.html"
},
{
"title": "Shiny cheat sheet",
"href": "https://blog.rstudio.org/2014/06/30/shiny-cheat-sheet/"
},
{
"title": "Key Driver vs. Network Analysis in R",
"href": "http://joelcadwell.blogspot.com/2013/11/key-driver-vs-network-analysis-in-r.html"
},
{
"title": "How to make a scientific result disappear",
"href": "https://politicalmethodology.wordpress.com/2013/02/27/how-to-make-a-scientific-result-disappear/"
},
{
"title": "Creating inset maps using spatial objects",
"href": "https://jannesm.wordpress.com/2016/07/05/inset-maps-using-spatial-objects/"
},
{
"title": "The New Microsoft Data Science User Group Program",
"href": "http://blog.revolutionanalytics.com/2015/09/the-new-microsoft-data-science-user-group-program.html"
},
{
"title": "High incidence in Measles Data in Project Tycho",
"href": "http://wiekvoet.blogspot.com/2014/04/high-incidence-in-measles-data-in.html"
},
{
"title": "The avalanche of publications mentioning GO",
"href": "https://web.archive.org/web/http://www.cwcity.de/fehler/404.php"
},
{
"title": "Bootstrap, strap-on, anal-yzing… statistics is getting weirder by the moment",
"href": "https://danganothererror.wordpress.com/2010/07/29/bootstrap-strap-on-anal-yzing-statistics-is-getting-weirder-by-the-moment/"
},
{
"title": "Where have all the Hacker News old-timers gone?",
"href": "http://blog.revolutionanalytics.com/2010/08/where-have-all-the-hacker-news-oldtimers-gone.html"
},
{
"title": "Gauge Chart in R",
"href": "https://gastonsanchez.wordpress.com/2013/01/10/gauge-chart-in-r/"
},
{
"title": "Evolve your own beats — automatically generating music via algorithms",
"href": "https://web.archive.org/web/http://www.vikparuchuri.com/blog/categories/r/www.vikparuchuri.com/blog/evolve-your-own-beats-automatically-generating-music/"
},
{
"title": "A central hub for R bloggers",
"href": "https://feedproxy.google.com/~r/OneRTipADay/~3/vVXUXc-hRvU/central-hub-for-r-bloggers.html"
},
{
"title": "Profile Likelihood",
"href": "http://freakonometrics.hypotheses.org/20573"
},
{
"title": "Simple template for scientific manuscripts in R markdown",
"href": "http://www.petrkeil.com/?p=2401"
},
{
"title": "Call for Presentations – EARL Conference, London",
"href": "https://www.r-users.com/jobs/call-for-presentations-earl-conference-london/"
},
{
"title": "Introduction to R: Installation, Using R as a Calculator, Operators",
"href": "https://r-norberg.blogspot.com/2012/10/introduction-to-r-installation-using-r.html"
},
{
"title": "Because it’s Friday: Asteroids",
"href": "http://blog.revolutionanalytics.com/2010/11/because-its-friday-asteroids.html"
},
{
"title": "NIPS 2010: Monte Carlo workshop",
"href": "https://xianblog.wordpress.com/2010/09/03/nips-2010-monte-carlo-workshop/"
},
{
"title": "analyze the national survey on drug use and health (nsduh) with r",
"href": "http://www.asdfree.com/2012/11/analyze-national-survey-on-drug-use-and.html"
}
]
|
# 安装package ---------------------------------------------------------------------
#
# packages=c("shiny","ggprism","htmltools","thematic","tidyverse","ggpubr","ggthemes","rstatix","DT","ggpubr", "ggsci", "agricolae")
# ipak <- function(pkg){
# new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
# if (length(new.pkg))
# install.packages(new.pkg, dependencies = TRUE, repos='https://mirrors.tuna.tsinghua.edu.cn/CRAN/' )
# sapply(pkg, require, character.only = TRUE)
# }
# ipak(packages)
# devtools::install_github("RinteRface/bs4Dash")
# 读入package ---------------------------------------------------------------
library(shiny)
library(bs4Dash)
library(tidyverse)
library(ggpubr)
library(ggthemes)
library(rstatix)
library(ggprism)
library(ggsci)
library(DT)
library(agricolae)
# UI界面 --------------------------------------------------------------------
ui <- bs4DashPage(
dark = FALSE,
header = dashboardHeader(
title = dashboardBrand(
title = "Make a barplot",
color = "primary"
)
),
# 侧边栏 ---------------------------------------------------------------------
sidebar = bs4DashSidebar(
width = "350px",
skin = "light",
status = "primary",
collapsed = FALSE,
fixed = TRUE,
bs4SidebarMenu(
id = "test",
bs4SidebarMenuItem(
text = "Data Import",
icon = icon("table"),
startExpanded = FALSE,
fileInput(
"import_data",
"Choose CSV Data",
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")
),
selectInput(
"pivot_long",
"Long Pivot",
choices = c("FALSE" = "False", "TRUE" = "True"),
selected = "FALSE"
),
selectInput(
"name",
"Group name",
choices = colnames(iris),
selected = "Species"
)
),
menuItem(
"Charts",
icon = icon("chart-bar"),
startExpanded = FALSE,
selectInput(
"position",
"Position",
choices = c("Group" = "group", "Stack" = "stack")
),
selectInput(
"label",
"Label:",
choices = c("Asterisk", "Letter"),
selected = "Asterisk"
)
),
menuItem(
"Theme and Palette",
icon = icon("palette"),
startExpanded = FALSE,
sliderInput("slider_theme", "Theme:", 1, 7, 1, value = 7),
sliderInput("slider_palette", "Palette:", 1, 10, 1, value = 10)
),
menuItem(
"Set Bars",
icon = icon("signal"),
sliderInput("bar_width", "Bar width:", 0, 1, .1, value = 0.6),
sliderInput("bar_gap", "Bar gap:", 0, 1, .1, value = 0.8),
sliderInput("y_limit", label = "Y limits", 0, 10, .5, value = 0),
selectInput(
"line_color",
"Line color:",
c("Grey10", "Grey30", "Grey50", "Grey70", "Grey90", "black"),
selected = "black"
),
selectInput(
"bar_fill",
"Fill",
c("group", "key"),
selected = "key")
),
menuItem(
"Facet",
icon = icon("border-all"),
selectInput("facet_warp", "Facet warp", c("Yes", "No")),
selectInput("facet_scale", "Facet scale", c("free", "fixed")),
selectInput("facet_row", "Facet row", c("Null", 1:10)),
selectInput("facet_col", "Facet column", c("Null", 1:10))
))
),
# 主体 ----------------------------------------------------------------------
bs4DashBody(
# 开始定义主体 ------------------------------------------------------------------
fluidRow(
box(solidHeader = T, collapsible = T, status = "primary",
title = "Figure Label and Size",
textInput("x_title", "X title:"),
textInput("y_title", "Y title:"),
textInput("fill_lab", "Legend:"),
sliderInput("fig.width",
"Figure width:",
100, 900, 50, value = 800),
sliderInput("fig.length",
"Figure length:",
100, 900, 50, value = 600),
sliderInput("font_size",
"Label font size",
6, 32, 1, value = 16),
sliderInput("label_size",
"Asterisk/letter size",
1, 20, 1, value = 6),
selectInput(
"legend_position",
"Legend position",
c("right","top","bottom","none")
),
selectInput("label_angle",
"X lab angle:",
choices = c(0, 45, 90, -45),
selected = 0),
downloadButton(outputId = "downloader_pdf",
label = "Download PDF"),
downloadButton(outputId = "downloader_jpeg",
label = "Download JPEG"), width = 3),
box(
title = "Barplot",
status = "success",
solidHeader = T,
plotOutput("barplot"),
width = 9,
height = 1000
)),
fluidRow(
box(plotOutput("barplot_total"), solidHeader = T, collapsible = T,
title = "Barplot total", status = "warning", height = 600),
box(DT::dataTableOutput("table"), solidHeader = T, collapsible = T,
title = "Table total", status = "danger", height = 600)
)))
# server ------------------------------------------------------------------
server <- function(input, output,session) {
df <- iris %>%
gather(.,
key,
value,
-Species) %>%
rename("group" = Species)
# 更新参数 --------------------------------------------------------------------
observeEvent(input$import_data, {
dta <- read_csv(input$import_data$datapath)
updateSelectInput(session, "name", label = "Select", choices = colnames(dta))
updateSliderInput(session, "y_limit",label = "Y limits", 0, max(dta %>% select(where(is.numeric))),.5, value = 0)
})
# 读取文件 --------------------------------------------------------------------
data <- reactive({
if (is.null(input$import_data)) { #默认数据
data <- iris
} else { #读取文件
data <- read_csv(input$import_data$datapath)
}
if (input$pivot_long == "True") { #如果你已经调好格式了
colnames(data) <- c("group","key","value") #改个名就好啦
} else { #正常人的选择
data <- data %>%
gather(.,
key,
value,
-input$name) %>% #边长
rename("group" = input$name) #改个名
}
data$group <- factor(data$group, levels = data$group) #以防万一
data$key <- factor(data$key, levels = data$key) #以防万一 again
data <- data %>%
drop_na(value) #以防万一 again and again
})
# 画表格 ----------------------------------------------------------------------
output$table <- DT::renderDataTable({
if(is.null(data())){dta_table <- df} #不忍直视了
dta_table <- data()
dta_table <- dta_table %>% #过于好用,不需要解释了。
group_by(group, key) %>% #这参数快被淘汰了,下次可能用across写了
summarise_each(funs(mean,
sd))
dta_table
})
# 最主要的图 -------------------------------------------------------------------
# 数据预处理 -------------------------------------------------------------------
plotInput <- reactive({
if(is.null(data())) {dta_barplot <- df} #又不是不能用
dta_barplot <- data() #真正的读入数据
if (dta_barplot %>% distinct(group) %>% nrow() > 2) {
key_name <- dta_barplot %>% distinct(key) %>% .$key
posthoc <- data.frame(
value = double(),
groups = character(),
key = character(),
group = character(),
stringsAsFactors = FALSE
)
for (i in 1:length(key_name)) {
anova <- dta_barplot %>%
filter(key == key_name[i]) %>%
aov(value ~ group, .)
posthoc.test <- anova %>%
LSD.test(., "group", p.adj = 'bonferroni')
posthoc <- posthoc.test$groups %>%
mutate(key = key_name[i],
group = rownames(.)) %>%
bind_rows(., posthoc)
} #合并结果
} else if (dta_barplot %>% distinct(key) %>% nrow() > 2) {
anova <- aov(value ~ group + key, dta_barplot) #ANOVA
posthoc.test <-
LSD.test(anova, c('group', 'key'), p.adj = 'bonferroni')
posthoc <-
posthoc.test$groups %>%
mutate(name = row.names(.)) %>%
separate(name, into = c("group", "key"), sep = ":") #合并结果
}
#set theme
switch(
input$slider_theme,
theme_set(theme_few()),
theme_set(theme_bw()),
theme_set(theme_classic()),
theme_set(theme_pubclean()),
theme_set(theme_pubr()),
theme_set(theme_minimal()),
theme_set(theme_prism())
)
#set palette
mypal <- switch(
input$slider_palette,
pal_npg()(9),
pal_jco()(9),
pal_lancet()(9),
pal_locuszoom()(9),
prism_fill_pal(palette = "prism_light")(9),
prism_fill_pal(palette = "floral")(12),
prism_fill_pal(palette = "prism_dark")(10),
prism_fill_pal(palette = "viridis")(6),
prism_fill_pal(palette = "warm_and_sunny")(10),
prism_fill_pal(palette = "black_and_white")(9)
)
label_just = case_when(
input$label_angle == 0 ~ c(.5,.5),
input$label_angle == 45 ~ c(1,1),
input$label_angle == -45 ~ c(0,1),
input$label_angle == 90 ~ c(1,.5)
)
# 开始画图 --------------------------------------------------------------------
if (input$position == "stack") {
# stacked bars
p <- dta_barplot %>%
group_by(group, key) %>% #分组
summarise_each(funs(mean,
sd)) %>% #计算均值,标准差
group_by(group) %>% #分组
mutate(SDPos = cumsum(rev(mean))) %>% #精髓的reverse + cumsum
left_join(., posthoc, by = c("group","key")) %>%
ggplot(aes(x = group, y = mean, fill = key)) + #做图
geom_bar(
color = input$line_color,
stat = "identity",
width = input$bar_width,
position = position_stack(input$bar_gap)
) + # 柱状图
geom_text(aes(label = groups),
position = position_stack(vjust = 0.5),
vjust = 0.5,
size = input$label_size,
fontface = "bold") + #标签
geom_errorbar(
aes(ymax = SDPos + sd, ymin = SDPos - sd),
width = input$bar_width / 4,
position = "identity"
) + #误差线
scale_fill_manual(values = mypal) + #颜色
scale_y_continuous(expand = expansion(mult = c(0, .1))) + #设置柱状图从(0,0)点开始
labs(x = input$x_title,
y = input$y_title,
fill = input$fill_lab) + #xy title name
theme(axis.text.x = element_text(
angle = as.numeric(input$label_angle),
hjust = as.numeric(label_just[1]),
vjust = as.numeric(label_just[2])
),
legend.position = input$legend_position,
text = element_text(size = input$font_size)) #一点细节
} else {
if (input$label == "Asterisk") {
#带星号标记t检验的facet barplot
stat.test <- dta_barplot %>%
group_by(key) %>%
t_test(value ~ group) #遇事不决t检验
stat.test <- stat.test %>%
adjust_pvalue(method = "bonferroni") %>%
add_significance("p.adj") %>%
mutate(p.signif = p,
p.signif = case_when(
p.signif <= 0.0001 ~ "****",
p.signif <= 0.001 ~ "***",
p.signif <= 0.01 ~ "**",
p.signif <= 0.05 ~ "*",
p.signif > 0.05~"ns"
)
) #不知道为什么当数据只有两组会没有p signif,这里手动写一下。
stat.test <- stat.test %>%
add_xy_position(fun = "mean_sd",
x = "group",
dodge = input$bar_gap) #给标签加上位置信息
# facet bars with asterisks
p <- dta_barplot %>%
group_by(group, key) %>% #嗨呀,跟上个图一样
summarise_each(funs(mean,
sd)) %>%
ungroup() %>%
ggplot(aes(x = group, y = mean)) +
geom_bar(
color = input$line_color,
aes_string(fill = input$bar_fill),
stat = "identity",
width = input$bar_width,
color = input$line_color,
position = position_dodge(input$bar_gap)
) +
geom_errorbar(
aes(ymax = mean + sd, ymin = mean - sd),
position = "identity",
width = input$bar_width / 3
) +
scale_y_continuous(expand = expansion(mult = c(0, .1))) +
scale_fill_manual(values = mypal) +
coord_cartesian(ylim = c(as.double(input$y_limit), NA)) +
labs(x = input$x_title,
y = input$y_title,
fill = input$fill_lab) +
stat_pvalue_manual(
stat.test,
label = "p.signif",
size = input$label_size,
tip.length = 0.01,
hide.ns = TRUE
) +
theme(axis.text.x = element_text(
angle = as.numeric(input$label_angle),
hjust = as.numeric(label_just[1]),
vjust = as.numeric(label_just[2])
),
legend.position = input$legend_position,
text = element_text(size = input$font_size))
if (input$facet_warp == "Yes") {
p +
facet_wrap(
~ key,
scales = input$facet_scale,
nrow = input$facet_row,
ncol = input$facet_col
)
}
else {
p + facet_wrap(~ key,
scales = "fixed",
nrow = 1)
}
} else {
# group bars with label
p <- dta_barplot %>%
group_by(group, key) %>%
summarise_each(funs(mean,
sd)) %>%
left_join(., posthoc, by = c("group","key")) %>% #懒得注释了,连字母都是从前面扒的
ggplot(aes_string(x = "group", y = "mean",group = "key", fill = input$bar_fill)) +
geom_bar(
color = input$line_color,
stat = "identity",
position = position_dodge(input$bar_gap),
width = input$bar_width,
color = input$line_color
) +
geom_text(
aes(label = groups,
y = mean + sd * 2),
position = position_dodge(input$bar_gap),
vjust = 0,
size = input$label_size,
fontface = "bold"
)+
geom_errorbar(
aes(ymax = mean + sd, ymin = mean - sd),
position = position_dodge(input$bar_gap),
width = input$bar_width / 3
) +
scale_fill_manual(values = mypal) +
coord_cartesian(ylim = c(as.double(input$y_limit), NA)) +
labs(x = input$x_title,
y = input$y_title,
fill = input$fill_lab) +
scale_y_continuous(expand = expansion(mult = c(0, .1))) +
theme(axis.text.x = element_text(
angle = as.numeric(input$label_angle),
hjust = as.numeric(label_just[1]),
vjust = as.numeric(label_just[2])
),
legend.position = input$legend_position,
text = element_text(size = input$font_size))
if (input$facet_warp == "Yes") {
p +
facet_wrap(
~ key,
scales = input$facet_scale,
nrow = input$facet_row,
ncol = input$facet_col
)
} else { p }
}
}
})
output$barplot <- renderPlot({ #没这个就看不到图了
print(plotInput())
ggsave("plot.pdf",
plotInput(),
width = input$fig.width/72,
height = input$fig.length/72)
ggsave("plot.jpeg",
plotInput(),
width = input$fig.width/72,
height = input$fig.length/72, dpi = 300)
}, width = function() {input$fig.width},
height = function() {input$fig.length}) #其实可以等用户点了再生成图片的,直接先偷偷生成两个吧
# 另一个柱状图 ------------------------------------------------------------------
output$barplot_total <- renderPlot({
if(is.null(data())){dta_total <- df} #丈育环节
dta_total <- data()
if (input$pivot_long == "True") {#如果你已经调好格式了
dta_total <- dta_total %>%
transmute(group = paste0(group," ", key), #改名
value = value)
} else { #正常人的选择
dta_total <- dta_total %>%
transmute(group = paste0(group," ", key), #合体
value = value)
}
anova <- aov(value ~ group, dta_total) #ANOVA
posthoc.test <- LSD.test(anova, 'group', p.adj = 'bonferroni')
#这结果也就看看,尤其是你把各种维度的数据丢一起比较的时候
#set theme
switch(
input$slider_theme,
theme_set(theme_few()),
theme_set(theme_bw()),
theme_set(theme_classic()),
theme_set(theme_pubclean()),
theme_set(theme_pubr()),
theme_set(theme_minimal()),
theme_set(theme_prism())
)
#set palette
mypal <- switch(
input$slider_palette,
pal_npg()(9),
pal_jco()(9),
pal_lancet()(9),
pal_locuszoom()(9),
prism_fill_pal(palette = "prism_light")(9),
prism_fill_pal(palette = "floral")(12),
prism_fill_pal(palette = "prism_dark")(10),
prism_fill_pal(palette = "viridis")(6),
prism_fill_pal(palette = "warm_and_sunny")(10),
prism_fill_pal(palette = "black_and_white")(9)
)
dta_sum <- dta_total %>%
group_by(group) %>%
summarise_each(funs(mean,
sd)) %>% #总之就是好用,但是要被淘汰了
ungroup()
out <- posthoc.test$groups %>% mutate(group = row.names(.)) %>% select(-value)
dta_sum %>% left_join(.,out, by = "group") %>%
ggplot(aes(x = group, y = mean)) + #画图嘛,都差不多
geom_bar(
color = input$line_color,
aes_string(fill = input$bar_fill),
stat = "identity",
width = input$bar_width,
color = input$line_color,
fill = mypal[1],
position = position_dodge(input$bar_gap)
) +
geom_errorbar(aes(ymax = mean + sd, ymin = mean - sd),
position = "identity",
width = input$bar_width / 3) +
scale_y_continuous(expand = expansion(mult = c(0, .1))) +
coord_cartesian(ylim=c(as.double(input$y_limit),NA)) +
labs(x = input$x_title, y = input$y_title, fill = input$fill_lab) +
geom_text(aes(label = groups, y = (mean + 2 * sd) * 1.05),
position = position_dodge(input$bar_gap),
size = 6,
vjust = 0,
fontface = "bold") +
theme(
axis.text.x = element_text(angle = 90, hjust = 1, vjust = .5),
text = element_text(size=16)
)
}, height = 500
)
# Download ----------------------------------------------------------
output$downloader_pdf <- downloadHandler( #保存图片
filename = function() {
"plot.pdf"
},
content = function(file) {
file.copy("plot.pdf", file, overwrite=TRUE)
}
)
output$downloader_jpeg <- downloadHandler( #保存图片*2
filename = function() {
"plot.jpeg"
},
content = function(file) {
file.copy("plot.jpeg", file, overwrite=TRUE)
}
)
}
shinyApp(ui, server)
|
/bar_plot/app.R
|
no_license
|
barnett874/barplot_bs4Dash
|
R
| false
| false
| 23,263
|
r
|
# 安装package ---------------------------------------------------------------------
#
# packages=c("shiny","ggprism","htmltools","thematic","tidyverse","ggpubr","ggthemes","rstatix","DT","ggpubr", "ggsci", "agricolae")
# ipak <- function(pkg){
# new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
# if (length(new.pkg))
# install.packages(new.pkg, dependencies = TRUE, repos='https://mirrors.tuna.tsinghua.edu.cn/CRAN/' )
# sapply(pkg, require, character.only = TRUE)
# }
# ipak(packages)
# devtools::install_github("RinteRface/bs4Dash")
# 读入package ---------------------------------------------------------------
library(shiny)
library(bs4Dash)
library(tidyverse)
library(ggpubr)
library(ggthemes)
library(rstatix)
library(ggprism)
library(ggsci)
library(DT)
library(agricolae)
# UI界面 --------------------------------------------------------------------
ui <- bs4DashPage(
dark = FALSE,
header = dashboardHeader(
title = dashboardBrand(
title = "Make a barplot",
color = "primary"
)
),
# 侧边栏 ---------------------------------------------------------------------
sidebar = bs4DashSidebar(
width = "350px",
skin = "light",
status = "primary",
collapsed = FALSE,
fixed = TRUE,
bs4SidebarMenu(
id = "test",
bs4SidebarMenuItem(
text = "Data Import",
icon = icon("table"),
startExpanded = FALSE,
fileInput(
"import_data",
"Choose CSV Data",
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")
),
selectInput(
"pivot_long",
"Long Pivot",
choices = c("FALSE" = "False", "TRUE" = "True"),
selected = "FALSE"
),
selectInput(
"name",
"Group name",
choices = colnames(iris),
selected = "Species"
)
),
menuItem(
"Charts",
icon = icon("chart-bar"),
startExpanded = FALSE,
selectInput(
"position",
"Position",
choices = c("Group" = "group", "Stack" = "stack")
),
selectInput(
"label",
"Label:",
choices = c("Asterisk", "Letter"),
selected = "Asterisk"
)
),
menuItem(
"Theme and Palette",
icon = icon("palette"),
startExpanded = FALSE,
sliderInput("slider_theme", "Theme:", 1, 7, 1, value = 7),
sliderInput("slider_palette", "Palette:", 1, 10, 1, value = 10)
),
menuItem(
"Set Bars",
icon = icon("signal"),
sliderInput("bar_width", "Bar width:", 0, 1, .1, value = 0.6),
sliderInput("bar_gap", "Bar gap:", 0, 1, .1, value = 0.8),
sliderInput("y_limit", label = "Y limits", 0, 10, .5, value = 0),
selectInput(
"line_color",
"Line color:",
c("Grey10", "Grey30", "Grey50", "Grey70", "Grey90", "black"),
selected = "black"
),
selectInput(
"bar_fill",
"Fill",
c("group", "key"),
selected = "key")
),
menuItem(
"Facet",
icon = icon("border-all"),
selectInput("facet_warp", "Facet warp", c("Yes", "No")),
selectInput("facet_scale", "Facet scale", c("free", "fixed")),
selectInput("facet_row", "Facet row", c("Null", 1:10)),
selectInput("facet_col", "Facet column", c("Null", 1:10))
))
),
# 主体 ----------------------------------------------------------------------
bs4DashBody(
# 开始定义主体 ------------------------------------------------------------------
fluidRow(
box(solidHeader = T, collapsible = T, status = "primary",
title = "Figure Label and Size",
textInput("x_title", "X title:"),
textInput("y_title", "Y title:"),
textInput("fill_lab", "Legend:"),
sliderInput("fig.width",
"Figure width:",
100, 900, 50, value = 800),
sliderInput("fig.length",
"Figure length:",
100, 900, 50, value = 600),
sliderInput("font_size",
"Label font size",
6, 32, 1, value = 16),
sliderInput("label_size",
"Asterisk/letter size",
1, 20, 1, value = 6),
selectInput(
"legend_position",
"Legend position",
c("right","top","bottom","none")
),
selectInput("label_angle",
"X lab angle:",
choices = c(0, 45, 90, -45),
selected = 0),
downloadButton(outputId = "downloader_pdf",
label = "Download PDF"),
downloadButton(outputId = "downloader_jpeg",
label = "Download JPEG"), width = 3),
box(
title = "Barplot",
status = "success",
solidHeader = T,
plotOutput("barplot"),
width = 9,
height = 1000
)),
fluidRow(
box(plotOutput("barplot_total"), solidHeader = T, collapsible = T,
title = "Barplot total", status = "warning", height = 600),
box(DT::dataTableOutput("table"), solidHeader = T, collapsible = T,
title = "Table total", status = "danger", height = 600)
)))
# server ------------------------------------------------------------------
server <- function(input, output,session) {
df <- iris %>%
gather(.,
key,
value,
-Species) %>%
rename("group" = Species)
# 更新参数 --------------------------------------------------------------------
observeEvent(input$import_data, {
dta <- read_csv(input$import_data$datapath)
updateSelectInput(session, "name", label = "Select", choices = colnames(dta))
updateSliderInput(session, "y_limit",label = "Y limits", 0, max(dta %>% select(where(is.numeric))),.5, value = 0)
})
# 读取文件 --------------------------------------------------------------------
data <- reactive({
if (is.null(input$import_data)) { #默认数据
data <- iris
} else { #读取文件
data <- read_csv(input$import_data$datapath)
}
if (input$pivot_long == "True") { #如果你已经调好格式了
colnames(data) <- c("group","key","value") #改个名就好啦
} else { #正常人的选择
data <- data %>%
gather(.,
key,
value,
-input$name) %>% #边长
rename("group" = input$name) #改个名
}
data$group <- factor(data$group, levels = data$group) #以防万一
data$key <- factor(data$key, levels = data$key) #以防万一 again
data <- data %>%
drop_na(value) #以防万一 again and again
})
# 画表格 ----------------------------------------------------------------------
output$table <- DT::renderDataTable({
if(is.null(data())){dta_table <- df} #不忍直视了
dta_table <- data()
dta_table <- dta_table %>% #过于好用,不需要解释了。
group_by(group, key) %>% #这参数快被淘汰了,下次可能用across写了
summarise_each(funs(mean,
sd))
dta_table
})
# 最主要的图 -------------------------------------------------------------------
# 数据预处理 -------------------------------------------------------------------
plotInput <- reactive({
if(is.null(data())) {dta_barplot <- df} #又不是不能用
dta_barplot <- data() #真正的读入数据
if (dta_barplot %>% distinct(group) %>% nrow() > 2) {
key_name <- dta_barplot %>% distinct(key) %>% .$key
posthoc <- data.frame(
value = double(),
groups = character(),
key = character(),
group = character(),
stringsAsFactors = FALSE
)
for (i in 1:length(key_name)) {
anova <- dta_barplot %>%
filter(key == key_name[i]) %>%
aov(value ~ group, .)
posthoc.test <- anova %>%
LSD.test(., "group", p.adj = 'bonferroni')
posthoc <- posthoc.test$groups %>%
mutate(key = key_name[i],
group = rownames(.)) %>%
bind_rows(., posthoc)
} #合并结果
} else if (dta_barplot %>% distinct(key) %>% nrow() > 2) {
anova <- aov(value ~ group + key, dta_barplot) #ANOVA
posthoc.test <-
LSD.test(anova, c('group', 'key'), p.adj = 'bonferroni')
posthoc <-
posthoc.test$groups %>%
mutate(name = row.names(.)) %>%
separate(name, into = c("group", "key"), sep = ":") #合并结果
}
#set theme
switch(
input$slider_theme,
theme_set(theme_few()),
theme_set(theme_bw()),
theme_set(theme_classic()),
theme_set(theme_pubclean()),
theme_set(theme_pubr()),
theme_set(theme_minimal()),
theme_set(theme_prism())
)
#set palette
mypal <- switch(
input$slider_palette,
pal_npg()(9),
pal_jco()(9),
pal_lancet()(9),
pal_locuszoom()(9),
prism_fill_pal(palette = "prism_light")(9),
prism_fill_pal(palette = "floral")(12),
prism_fill_pal(palette = "prism_dark")(10),
prism_fill_pal(palette = "viridis")(6),
prism_fill_pal(palette = "warm_and_sunny")(10),
prism_fill_pal(palette = "black_and_white")(9)
)
label_just = case_when(
input$label_angle == 0 ~ c(.5,.5),
input$label_angle == 45 ~ c(1,1),
input$label_angle == -45 ~ c(0,1),
input$label_angle == 90 ~ c(1,.5)
)
# 开始画图 --------------------------------------------------------------------
if (input$position == "stack") {
# stacked bars
p <- dta_barplot %>%
group_by(group, key) %>% #分组
summarise_each(funs(mean,
sd)) %>% #计算均值,标准差
group_by(group) %>% #分组
mutate(SDPos = cumsum(rev(mean))) %>% #精髓的reverse + cumsum
left_join(., posthoc, by = c("group","key")) %>%
ggplot(aes(x = group, y = mean, fill = key)) + #做图
geom_bar(
color = input$line_color,
stat = "identity",
width = input$bar_width,
position = position_stack(input$bar_gap)
) + # 柱状图
geom_text(aes(label = groups),
position = position_stack(vjust = 0.5),
vjust = 0.5,
size = input$label_size,
fontface = "bold") + #标签
geom_errorbar(
aes(ymax = SDPos + sd, ymin = SDPos - sd),
width = input$bar_width / 4,
position = "identity"
) + #误差线
scale_fill_manual(values = mypal) + #颜色
scale_y_continuous(expand = expansion(mult = c(0, .1))) + #设置柱状图从(0,0)点开始
labs(x = input$x_title,
y = input$y_title,
fill = input$fill_lab) + #xy title name
theme(axis.text.x = element_text(
angle = as.numeric(input$label_angle),
hjust = as.numeric(label_just[1]),
vjust = as.numeric(label_just[2])
),
legend.position = input$legend_position,
text = element_text(size = input$font_size)) #一点细节
} else {
if (input$label == "Asterisk") {
#带星号标记t检验的facet barplot
stat.test <- dta_barplot %>%
group_by(key) %>%
t_test(value ~ group) #遇事不决t检验
stat.test <- stat.test %>%
adjust_pvalue(method = "bonferroni") %>%
add_significance("p.adj") %>%
mutate(p.signif = p,
p.signif = case_when(
p.signif <= 0.0001 ~ "****",
p.signif <= 0.001 ~ "***",
p.signif <= 0.01 ~ "**",
p.signif <= 0.05 ~ "*",
p.signif > 0.05~"ns"
)
) #不知道为什么当数据只有两组会没有p signif,这里手动写一下。
stat.test <- stat.test %>%
add_xy_position(fun = "mean_sd",
x = "group",
dodge = input$bar_gap) #给标签加上位置信息
# facet bars with asterisks
p <- dta_barplot %>%
group_by(group, key) %>% #嗨呀,跟上个图一样
summarise_each(funs(mean,
sd)) %>%
ungroup() %>%
ggplot(aes(x = group, y = mean)) +
geom_bar(
color = input$line_color,
aes_string(fill = input$bar_fill),
stat = "identity",
width = input$bar_width,
color = input$line_color,
position = position_dodge(input$bar_gap)
) +
geom_errorbar(
aes(ymax = mean + sd, ymin = mean - sd),
position = "identity",
width = input$bar_width / 3
) +
scale_y_continuous(expand = expansion(mult = c(0, .1))) +
scale_fill_manual(values = mypal) +
coord_cartesian(ylim = c(as.double(input$y_limit), NA)) +
labs(x = input$x_title,
y = input$y_title,
fill = input$fill_lab) +
stat_pvalue_manual(
stat.test,
label = "p.signif",
size = input$label_size,
tip.length = 0.01,
hide.ns = TRUE
) +
theme(axis.text.x = element_text(
angle = as.numeric(input$label_angle),
hjust = as.numeric(label_just[1]),
vjust = as.numeric(label_just[2])
),
legend.position = input$legend_position,
text = element_text(size = input$font_size))
if (input$facet_warp == "Yes") {
p +
facet_wrap(
~ key,
scales = input$facet_scale,
nrow = input$facet_row,
ncol = input$facet_col
)
}
else {
p + facet_wrap(~ key,
scales = "fixed",
nrow = 1)
}
} else {
# group bars with label
p <- dta_barplot %>%
group_by(group, key) %>%
summarise_each(funs(mean,
sd)) %>%
left_join(., posthoc, by = c("group","key")) %>% #懒得注释了,连字母都是从前面扒的
ggplot(aes_string(x = "group", y = "mean",group = "key", fill = input$bar_fill)) +
geom_bar(
color = input$line_color,
stat = "identity",
position = position_dodge(input$bar_gap),
width = input$bar_width,
color = input$line_color
) +
geom_text(
aes(label = groups,
y = mean + sd * 2),
position = position_dodge(input$bar_gap),
vjust = 0,
size = input$label_size,
fontface = "bold"
)+
geom_errorbar(
aes(ymax = mean + sd, ymin = mean - sd),
position = position_dodge(input$bar_gap),
width = input$bar_width / 3
) +
scale_fill_manual(values = mypal) +
coord_cartesian(ylim = c(as.double(input$y_limit), NA)) +
labs(x = input$x_title,
y = input$y_title,
fill = input$fill_lab) +
scale_y_continuous(expand = expansion(mult = c(0, .1))) +
theme(axis.text.x = element_text(
angle = as.numeric(input$label_angle),
hjust = as.numeric(label_just[1]),
vjust = as.numeric(label_just[2])
),
legend.position = input$legend_position,
text = element_text(size = input$font_size))
if (input$facet_warp == "Yes") {
p +
facet_wrap(
~ key,
scales = input$facet_scale,
nrow = input$facet_row,
ncol = input$facet_col
)
} else { p }
}
}
})
output$barplot <- renderPlot({ #没这个就看不到图了
print(plotInput())
ggsave("plot.pdf",
plotInput(),
width = input$fig.width/72,
height = input$fig.length/72)
ggsave("plot.jpeg",
plotInput(),
width = input$fig.width/72,
height = input$fig.length/72, dpi = 300)
}, width = function() {input$fig.width},
height = function() {input$fig.length}) #其实可以等用户点了再生成图片的,直接先偷偷生成两个吧
# 另一个柱状图 ------------------------------------------------------------------
output$barplot_total <- renderPlot({
if(is.null(data())){dta_total <- df} #丈育环节
dta_total <- data()
if (input$pivot_long == "True") {#如果你已经调好格式了
dta_total <- dta_total %>%
transmute(group = paste0(group," ", key), #改名
value = value)
} else { #正常人的选择
dta_total <- dta_total %>%
transmute(group = paste0(group," ", key), #合体
value = value)
}
anova <- aov(value ~ group, dta_total) #ANOVA
posthoc.test <- LSD.test(anova, 'group', p.adj = 'bonferroni')
#这结果也就看看,尤其是你把各种维度的数据丢一起比较的时候
#set theme
switch(
input$slider_theme,
theme_set(theme_few()),
theme_set(theme_bw()),
theme_set(theme_classic()),
theme_set(theme_pubclean()),
theme_set(theme_pubr()),
theme_set(theme_minimal()),
theme_set(theme_prism())
)
#set palette
mypal <- switch(
input$slider_palette,
pal_npg()(9),
pal_jco()(9),
pal_lancet()(9),
pal_locuszoom()(9),
prism_fill_pal(palette = "prism_light")(9),
prism_fill_pal(palette = "floral")(12),
prism_fill_pal(palette = "prism_dark")(10),
prism_fill_pal(palette = "viridis")(6),
prism_fill_pal(palette = "warm_and_sunny")(10),
prism_fill_pal(palette = "black_and_white")(9)
)
dta_sum <- dta_total %>%
group_by(group) %>%
summarise_each(funs(mean,
sd)) %>% #总之就是好用,但是要被淘汰了
ungroup()
out <- posthoc.test$groups %>% mutate(group = row.names(.)) %>% select(-value)
dta_sum %>% left_join(.,out, by = "group") %>%
ggplot(aes(x = group, y = mean)) + #画图嘛,都差不多
geom_bar(
color = input$line_color,
aes_string(fill = input$bar_fill),
stat = "identity",
width = input$bar_width,
color = input$line_color,
fill = mypal[1],
position = position_dodge(input$bar_gap)
) +
geom_errorbar(aes(ymax = mean + sd, ymin = mean - sd),
position = "identity",
width = input$bar_width / 3) +
scale_y_continuous(expand = expansion(mult = c(0, .1))) +
coord_cartesian(ylim=c(as.double(input$y_limit),NA)) +
labs(x = input$x_title, y = input$y_title, fill = input$fill_lab) +
geom_text(aes(label = groups, y = (mean + 2 * sd) * 1.05),
position = position_dodge(input$bar_gap),
size = 6,
vjust = 0,
fontface = "bold") +
theme(
axis.text.x = element_text(angle = 90, hjust = 1, vjust = .5),
text = element_text(size=16)
)
}, height = 500
)
# Download ----------------------------------------------------------
output$downloader_pdf <- downloadHandler( #保存图片
filename = function() {
"plot.pdf"
},
content = function(file) {
file.copy("plot.pdf", file, overwrite=TRUE)
}
)
output$downloader_jpeg <- downloadHandler( #保存图片*2
filename = function() {
"plot.jpeg"
},
content = function(file) {
file.copy("plot.jpeg", file, overwrite=TRUE)
}
)
}
shinyApp(ui, server)
|
######################
library("gridExtra")
library("ggplot2")
library("grid")
#####################
panel.correlation <- function(x, y, corMethod="spearman", digits=2, prefix="", cex.cor, col="black",...)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- cor(x, y,method=corMethod)
txt <- format(c(r, 0.123456789), digits=digits)[1]
txt <- paste(prefix, txt, sep="")
if(missing(cex.cor)) cex.cor <- 0.8/strwidth(txt)
if(abs(r) > .8 ) cex.cor = cex.cor + .5
if(abs(r) > .9) col = "red"
text(0.5, 0.5, txt, cex = cex.cor, col = col )
}
panel.smooth <- function (x, y, col = par("col"), bg = NA, pch = par("pch"),
cex = 1, col.smooth = "red", span = 2/3, iter = 3, ...)
{
points(x, y, pch = pch, col = col, bg = bg, cex = cex)
ok <- is.finite(x) & is.finite(y)
if (any(ok))
lines(stats::lowess(x[ok], y[ok], f = span, iter = iter),
col = col.smooth, ...)
}
corPlot <- function(data_matrix,...){
m <- as.matrix(data_matrix)
#remove any rows with NA
to_keep <- !apply(m,1, function(x) any(is.na(x)))
m <- m[to_keep,]
pairs(m, upper.panel=panel.correlation, lower.panel=panel.smooth, ...)
}
|
/R/corPlot.R
|
no_license
|
apratap/rbundle
|
R
| false
| false
| 1,182
|
r
|
######################
library("gridExtra")
library("ggplot2")
library("grid")
#####################
panel.correlation <- function(x, y, corMethod="spearman", digits=2, prefix="", cex.cor, col="black",...)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- cor(x, y,method=corMethod)
txt <- format(c(r, 0.123456789), digits=digits)[1]
txt <- paste(prefix, txt, sep="")
if(missing(cex.cor)) cex.cor <- 0.8/strwidth(txt)
if(abs(r) > .8 ) cex.cor = cex.cor + .5
if(abs(r) > .9) col = "red"
text(0.5, 0.5, txt, cex = cex.cor, col = col )
}
panel.smooth <- function (x, y, col = par("col"), bg = NA, pch = par("pch"),
cex = 1, col.smooth = "red", span = 2/3, iter = 3, ...)
{
points(x, y, pch = pch, col = col, bg = bg, cex = cex)
ok <- is.finite(x) & is.finite(y)
if (any(ok))
lines(stats::lowess(x[ok], y[ok], f = span, iter = iter),
col = col.smooth, ...)
}
corPlot <- function(data_matrix,...){
m <- as.matrix(data_matrix)
#remove any rows with NA
to_keep <- !apply(m,1, function(x) any(is.na(x)))
m <- m[to_keep,]
pairs(m, upper.panel=panel.correlation, lower.panel=panel.smooth, ...)
}
|
#1 Use data in the birthwt dataset in the MASS library
library(MASS)
View(birthwt)
#1a Construct three density plots of birthweight (bwt) grouping by race
#in the same plotting pane
library(ggplot2)
f<- ggplot(birthwt, aes(x=bwt, fill=race))
f+ geom_density(color="red",alpha=.4)+theme_bw()+
scale_fill_brewer(palette = "OrRd")
#*****Correct
# change race to a factor so that we can separate the density plots based on this factor
birthwt$race <- factor(birthwt$race)
# these are the four plotting steps: p1, p2, p3, p4
# ** Breaking plots down into these steps isn't necessary. You could just do a bunch
# of ggplot(...) + ggtitle('...') + geom_point() etc. But this might be clearer.**
p1 <- ggplot(data = birthwt, aes(x = bwt))
# (use ?birthwt to see which columns correspond to birth weight and mother's race)
p2 <- p1 + ggtitle('Smoothed Birth Weight Density, by Mother\'s Race')
p3 <- p2 + geom_density(aes(group = race, fill = race), color = 'black', alpha = 0.4)
# (can experiment with the above color and alpha parameters to see what's clearest for you)
p4 <- p3 + theme_classic()
# need to "run" the final step of the graph to actually plot it, if using the step-by-step method:
p4
#*****
#1b Construct a multipane scatterplot of mother's weight (lwt) versus
#birthweigth with a separate pane/facet for smoking status.
p<- ggplot(data =birthwt, aes(x=lwt, y=bwt), color=smoke)
p+facet_grid(.~smoke)+geom_point()
#2 See a dataset 'Sika'
View(Sitka)
#2a Produce a plot of size (y) versus Time (x) for tree 1.
tree1<- subset(Sitka, tree==1)
p1<- ggplot(tree1, aes(x=Time, y= size))
p1+geom_line()
#2b Produce a plot of size (y) versus Time (x) for all 79 trees with a
#separate line for each tree. Use the color or each line to denote the
#condition (control or ozone-rich).
g2<-ggplot(data=Sitka, aes(x=Time, y=size,color=treat))
g2+geom_line (aes(group=tree))
#2c Same as previous graph in part (b) on facet based on control or
#ozone-rich. Based on this graph, does there appear to be a dierence
#in growth between the two conditions?
g2+geom_line(aes(group=tree))+facet_grid(.~treat)
#yes it does show the different in growth.
#The control tree has more variance in growth than the ozone tree
|
/5. ggplot2.R
|
no_license
|
nhinguyen23/Homework
|
R
| false
| false
| 2,265
|
r
|
#1 Use data in the birthwt dataset in the MASS library
library(MASS)
View(birthwt)
#1a Construct three density plots of birthweight (bwt) grouping by race
#in the same plotting pane
library(ggplot2)
f<- ggplot(birthwt, aes(x=bwt, fill=race))
f+ geom_density(color="red",alpha=.4)+theme_bw()+
scale_fill_brewer(palette = "OrRd")
#*****Correct
# change race to a factor so that we can separate the density plots based on this factor
birthwt$race <- factor(birthwt$race)
# these are the four plotting steps: p1, p2, p3, p4
# ** Breaking plots down into these steps isn't necessary. You could just do a bunch
# of ggplot(...) + ggtitle('...') + geom_point() etc. But this might be clearer.**
p1 <- ggplot(data = birthwt, aes(x = bwt))
# (use ?birthwt to see which columns correspond to birth weight and mother's race)
p2 <- p1 + ggtitle('Smoothed Birth Weight Density, by Mother\'s Race')
p3 <- p2 + geom_density(aes(group = race, fill = race), color = 'black', alpha = 0.4)
# (can experiment with the above color and alpha parameters to see what's clearest for you)
p4 <- p3 + theme_classic()
# need to "run" the final step of the graph to actually plot it, if using the step-by-step method:
p4
#*****
#1b Construct a multipane scatterplot of mother's weight (lwt) versus
#birthweigth with a separate pane/facet for smoking status.
p<- ggplot(data =birthwt, aes(x=lwt, y=bwt), color=smoke)
p+facet_grid(.~smoke)+geom_point()
#2 See a dataset 'Sika'
View(Sitka)
#2a Produce a plot of size (y) versus Time (x) for tree 1.
tree1<- subset(Sitka, tree==1)
p1<- ggplot(tree1, aes(x=Time, y= size))
p1+geom_line()
#2b Produce a plot of size (y) versus Time (x) for all 79 trees with a
#separate line for each tree. Use the color or each line to denote the
#condition (control or ozone-rich).
g2<-ggplot(data=Sitka, aes(x=Time, y=size,color=treat))
g2+geom_line (aes(group=tree))
#2c Same as previous graph in part (b) on facet based on control or
#ozone-rich. Based on this graph, does there appear to be a dierence
#in growth between the two conditions?
g2+geom_line(aes(group=tree))+facet_grid(.~treat)
#yes it does show the different in growth.
#The control tree has more variance in growth than the ozone tree
|
#Word Cloud
#(http://www.sthda.com/english/wiki/word-cloud-generator-in-r-one-killer-function-to-do-everything-you-need)
#WordCloud From : an R object containing plain text; a txt file containing plain text. It works with local and online hosted txt files; A URL of a web page
#Install Packages
library(wordcloud)
install.packages()
install.packages(c("tm", "SnowballC", "RColorBrewer","RCurl", "XML"))
#Load these libraries
library(wordcloud)
library(RColorBrewer)
library(SnowballC)
library(XML)
library(tm)
install.packages("RCurl")
library(RCurl)
text()
source('http://www.sthda.com/upload/rquery_wordcloud.r')
filePath <- ('https://en.wikipedia.org/wiki/MS_Dhoni')
res<-rquery.wordcloud(filePath, type ="file", lang = "english", color='blue','red')
#to plot more words
res<-rquery.wordcloud(filePath, type ="file", lang = "english", min.freq = 5, max.words = 50)
#change colors
# Reds color palette
res<-rquery.wordcloud(filePath, type ="file", lang = "english", colorPalette = "Reds")
# RdBu color palette
res<-rquery.wordcloud(filePath, type ="file", lang = "english",colorPalette = "RdBu")
# use unique color
res<-rquery.wordcloud(filePath, type ="file", lang = "english", colorPalette = "black")
#about wordcloud
(tdm <- res$tdm)
(freqTable <- res$freqTable)
# Show the top10 words and their frequency
head(freqTable, 10)
# Bar plot of the frequency for the top10
barplot(freqTable[1:10,]$freq, las = 2, names.arg = freqTable[1:10,]$word, col ="lightblue", main ="Most frequent words", ylab = "Word frequencies")
findFreqTerms(tdm, lowfreq = 4) #occuring 4 times
#words are associated with “freedom” in I have a dream speech :
findAssocs(tdm, terms = "freedom", corlimit = 0.3)
#Wordcloud of webpage
url = "http://www.sthda.com/english/wiki/create-and-format-powerpoint-documents-from-r-software"
#clear your plot area before plotting new by clicking on paintbrush in plots
rquery.wordcloud(x=url, type="url")
url = "http://www.sthda.com/english/wiki/create-and-format-powerpoint-documents-from-r-software"
rquery.wordcloud(x=url, type="url")
|
/Word cloud.R
|
no_license
|
miliraj/analytics1
|
R
| false
| false
| 2,071
|
r
|
#Word Cloud
#(http://www.sthda.com/english/wiki/word-cloud-generator-in-r-one-killer-function-to-do-everything-you-need)
#WordCloud From : an R object containing plain text; a txt file containing plain text. It works with local and online hosted txt files; A URL of a web page
#Install Packages
library(wordcloud)
install.packages()
install.packages(c("tm", "SnowballC", "RColorBrewer","RCurl", "XML"))
#Load these libraries
library(wordcloud)
library(RColorBrewer)
library(SnowballC)
library(XML)
library(tm)
install.packages("RCurl")
library(RCurl)
text()
source('http://www.sthda.com/upload/rquery_wordcloud.r')
filePath <- ('https://en.wikipedia.org/wiki/MS_Dhoni')
res<-rquery.wordcloud(filePath, type ="file", lang = "english", color='blue','red')
#to plot more words
res<-rquery.wordcloud(filePath, type ="file", lang = "english", min.freq = 5, max.words = 50)
#change colors
# Reds color palette
res<-rquery.wordcloud(filePath, type ="file", lang = "english", colorPalette = "Reds")
# RdBu color palette
res<-rquery.wordcloud(filePath, type ="file", lang = "english",colorPalette = "RdBu")
# use unique color
res<-rquery.wordcloud(filePath, type ="file", lang = "english", colorPalette = "black")
#about wordcloud
(tdm <- res$tdm)
(freqTable <- res$freqTable)
# Show the top10 words and their frequency
head(freqTable, 10)
# Bar plot of the frequency for the top10
barplot(freqTable[1:10,]$freq, las = 2, names.arg = freqTable[1:10,]$word, col ="lightblue", main ="Most frequent words", ylab = "Word frequencies")
findFreqTerms(tdm, lowfreq = 4) #occuring 4 times
#words are associated with “freedom” in I have a dream speech :
findAssocs(tdm, terms = "freedom", corlimit = 0.3)
#Wordcloud of webpage
url = "http://www.sthda.com/english/wiki/create-and-format-powerpoint-documents-from-r-software"
#clear your plot area before plotting new by clicking on paintbrush in plots
rquery.wordcloud(x=url, type="url")
url = "http://www.sthda.com/english/wiki/create-and-format-powerpoint-documents-from-r-software"
rquery.wordcloud(x=url, type="url")
|
library(rCharts)
library(reshape2)
library(plyr)
library(scales)
CWFull=read.csv("CAWomen.csv")
TotCrimesmelt=melt(CWFull,id=c("Year","StateUT"))
save(TotCrimesmelt,file="TotCrimesmelt.rda")
TCrimeplot=nPlot(value~Year, group="variable", data=TotCrimesmelt[which(TotCrimesmelt$StateUT=="TOTAL"),], type="lineWithFocusChart",
height=450,width=750)
TCrimeplot
TCrimeplot$save("TCrimeplot.html",cdn=T)
########################### % of crimes over years
PCTYears=CWFull
PCTYears$RapePercent=PCTYears$Rape*100/PCTYears$TotalCrimes
PCTYears$KidnappingAndAbductionPercent=PCTYears$KidnappingAndAbduction*100/PCTYears$TotalCrimes
PCTYears$DowryDeathsPercent =PCTYears$DowryDeaths*100/PCTYears$TotalCrimes
PCTYears$AssaultWithIntentToOutrageModestyPercent=PCTYears$AssaultWithIntentToOutrageModesty*100/PCTYears$TotalCrimes
PCTYears$InsultToModestyPercent=PCTYears$InsultToModesty*100/PCTYears$TotalCrimes
PCTYears$CrueltyByHusbandOrHisRelativesPercent=PCTYears$CrueltyByHusbandOrHisRelatives *100/PCTYears$TotalCrimes
PCTYears$ImportationOfGirlsFromForeignCountryPercent=PCTYears$ImportationOfGirlsFromForeignCountry*100/PCTYears$TotalCrimes
PCTYears$ImmoralTrafficPActPercent=PCTYears$ImmoralTrafficPAct*100/PCTYears$TotalCrimes
PCTYears$DowryProhibitionActPercent=PCTYears$DowryProhibitionAct*100/PCTYears$TotalCrimes
PCTYears$IndecentRepresentationOfWomenPActPercent=PCTYears$IndecentRepresentationOfWomenPAct*100/PCTYears$TotalCrimes
PCTYears$CommissionOfSatiPreventionActPercent=PCTYears$CommissionOfSatiPreventionAct*100/PCTYears$TotalCrimes
PCTYears$TotalCrimesPercent=PCTYears$TotalCrimes*100/PCTYears$TotalCrimes
onlypct=PCTYears[c(1,15:25,14)]
onlyraw=CWFull[c(1:12,14)]
onlypctmelt=melt (onlypct,id=c("Year","StateUT"))
onlyrawmelt=melt(onlyraw,id=c("Year","StateUT"))
names(onlypctmelt)=c("v1","v2","v3","percval")
meltpcdata=cbind(onlyrawmelt,onlypctmelt$percval)
names(meltpcdata)=c("Year","StateUT","variable","value","percval")
meltpcdata$percval=paste(round(meltpcdata$percval,digits=2),"% of Total Crimes for the year")
save(meltpcdata,file="meltpcdata.rda")
TCrimeplot1=nPlot(value~Year, group="variable", data=meltpcdata[which(meltpcdata$StateUT=="TOTAL"),], type="stackedAreaChart",
height=600,width=750)
TCrimeplot1$chart(tooltip = "#! function(key, x, y, e, graph) {
return '<h3>' + key + '</h3>' +
'<p>' + y + ' on ' + x + '</p>' +
'<p>' + e.point.percval + '</p>'
}!#")
TCrimeplot1$save("TCrimeplot1.html",cdn=T)
TCrimeplot1
#############################################
# Focus on 2014 data only now and remove the row with Total values and columns for Total Crimes and Year
CW2014=CWFull[which(CWFull$Year==2014),]
CW2014=CW2014[which(CW2014$StateUT!="TOTAL"),]
CW2014=CW2014[c(-13,-14)]
# Since all values under the Commision of Sati Prevention Act variable are 0, let's drop that column as well
CW2014=CW2014[c(-12)]
# Correlation matrix of variables
corrmatrix<-cor(CW2014[c(-1)]) #store corr matrix
corrdata=as.data.frame(corrmatrix)
corrdata$Variable1=names(corrdata)
corrdatamelt=melt(corrdata,id="Variable1")
names(corrdatamelt)=c("Variable1","Variable2","CorrelationCoefficient")
corrmatplot = rPlot(Variable2 ~ Variable1, color = 'CorrelationCoefficient', data = corrdatamelt, type = 'tile', height = 600)
corrmatplot$addParams(height = 450, width=1000)
corrmatplot$guides("{color: {scale: {type: gradient2, lower: 'red', middle: 'white', upper: 'blue',midpoint: 0}}}")
corrmatplot$guides(y = list(numticks = length(unique(corrdatamelt$Variable1))))
corrmatplot$guides(x = list(numticks = 3))
corrmatplot$addParams(staggerLabels=TRUE)
corrmatplot$save("corrmatplotstate.html",cdn=T)
corrmatplot
# heatmap of variables and State UTs
stateutmelt=ddply(melt(CW2014),.(variable),transform,rescale=rescale(value))
names(stateutmelt)=c("StateUT","Crime","value","rescale")
hmap <- rPlot(StateUT ~ Crime, color = 'rescale', data = stateutmelt, type = 'tile')
hmap$addParams(height = 600, width=1000)
hmap$guides(reduceXTicks = FALSE)
hmap$guides("{color: {scale: {type: gradient, lower: white, upper: red}}}")
hmap$guides(y = list(numticks = length(unique(stateutmelt$StateUT))))
hmap$guides(x = list(numticks = 3))
hmap$save("heatmapstate.html",cdn=T)
hmap
################ Clustering (Quick reference: Quick-R, Kabacoff, http://www.statmethods.net/advstats/cluster.html)
set.seed(123)
kmeansdata=kmeans(CW2014[c(-1)],5) # Decided on 5 for interpretation
# get cluster means
meanvarsw=aggregate(CW2014[c(-1)],by=list(kmeansdata$cluster),FUN=mean)
# append cluster assignment
CW2014clust <- data.frame(CW2014, kmeansdata$cluster)
# plotting states/uts by cluster number
stategpplot=dPlot(x="StateUT", y="kmeansdata.cluster",groups="kmeansdata.cluster",data=CW2014clust,
type="bar",height=475,width=700,bounds = list(x=50, y=10, width=600, height=300))
stategpplot$yAxis(type="addCategoryAxis")
stategpplot$xAxis(type="addCategoryAxis",orderRule="kmeansdata.cluster")
stategpplot$save("stategpplot.html",cdn=T)
stategpplot
############## Parallel Plot#############
names(meanvars)=c("Group","Rape","KidnapAbduct","DowryDeath","AssaultModesty","InsultModesty","CrueltyHusband",
"Importation","ImmoralTraffic","DowryProhibit","IndecentRep")
parrstateut <- rCharts$new()
parrstateut$field('lib', 'parcoords')
parrstateut$set(padding = list(top = 25, left = 5, bottom = 10, right = 0), width=1080, height=400)
parrstateut$set(data = toJSONArray(meanvars, json = F),
colorby = 'Rape',
range = range(meanvars$Rape),
colors = c('red','green')
)
parrstateut$setLib("parcoords")
# parrstateut$save("parallelplotstate.html", cdn=T)
parrstateut
|
/state1.R
|
no_license
|
tush9011/tush9011.github.io
|
R
| false
| false
| 5,902
|
r
|
library(rCharts)
library(reshape2)
library(plyr)
library(scales)
CWFull=read.csv("CAWomen.csv")
TotCrimesmelt=melt(CWFull,id=c("Year","StateUT"))
save(TotCrimesmelt,file="TotCrimesmelt.rda")
TCrimeplot=nPlot(value~Year, group="variable", data=TotCrimesmelt[which(TotCrimesmelt$StateUT=="TOTAL"),], type="lineWithFocusChart",
height=450,width=750)
TCrimeplot
TCrimeplot$save("TCrimeplot.html",cdn=T)
########################### % of crimes over years
PCTYears=CWFull
PCTYears$RapePercent=PCTYears$Rape*100/PCTYears$TotalCrimes
PCTYears$KidnappingAndAbductionPercent=PCTYears$KidnappingAndAbduction*100/PCTYears$TotalCrimes
PCTYears$DowryDeathsPercent =PCTYears$DowryDeaths*100/PCTYears$TotalCrimes
PCTYears$AssaultWithIntentToOutrageModestyPercent=PCTYears$AssaultWithIntentToOutrageModesty*100/PCTYears$TotalCrimes
PCTYears$InsultToModestyPercent=PCTYears$InsultToModesty*100/PCTYears$TotalCrimes
PCTYears$CrueltyByHusbandOrHisRelativesPercent=PCTYears$CrueltyByHusbandOrHisRelatives *100/PCTYears$TotalCrimes
PCTYears$ImportationOfGirlsFromForeignCountryPercent=PCTYears$ImportationOfGirlsFromForeignCountry*100/PCTYears$TotalCrimes
PCTYears$ImmoralTrafficPActPercent=PCTYears$ImmoralTrafficPAct*100/PCTYears$TotalCrimes
PCTYears$DowryProhibitionActPercent=PCTYears$DowryProhibitionAct*100/PCTYears$TotalCrimes
PCTYears$IndecentRepresentationOfWomenPActPercent=PCTYears$IndecentRepresentationOfWomenPAct*100/PCTYears$TotalCrimes
PCTYears$CommissionOfSatiPreventionActPercent=PCTYears$CommissionOfSatiPreventionAct*100/PCTYears$TotalCrimes
PCTYears$TotalCrimesPercent=PCTYears$TotalCrimes*100/PCTYears$TotalCrimes
onlypct=PCTYears[c(1,15:25,14)]
onlyraw=CWFull[c(1:12,14)]
onlypctmelt=melt (onlypct,id=c("Year","StateUT"))
onlyrawmelt=melt(onlyraw,id=c("Year","StateUT"))
names(onlypctmelt)=c("v1","v2","v3","percval")
meltpcdata=cbind(onlyrawmelt,onlypctmelt$percval)
names(meltpcdata)=c("Year","StateUT","variable","value","percval")
meltpcdata$percval=paste(round(meltpcdata$percval,digits=2),"% of Total Crimes for the year")
save(meltpcdata,file="meltpcdata.rda")
TCrimeplot1=nPlot(value~Year, group="variable", data=meltpcdata[which(meltpcdata$StateUT=="TOTAL"),], type="stackedAreaChart",
height=600,width=750)
TCrimeplot1$chart(tooltip = "#! function(key, x, y, e, graph) {
return '<h3>' + key + '</h3>' +
'<p>' + y + ' on ' + x + '</p>' +
'<p>' + e.point.percval + '</p>'
}!#")
TCrimeplot1$save("TCrimeplot1.html",cdn=T)
TCrimeplot1
#############################################
# Focus on 2014 data only now and remove the row with Total values and columns for Total Crimes and Year
CW2014=CWFull[which(CWFull$Year==2014),]
CW2014=CW2014[which(CW2014$StateUT!="TOTAL"),]
CW2014=CW2014[c(-13,-14)]
# Since all values under the Commision of Sati Prevention Act variable are 0, let's drop that column as well
CW2014=CW2014[c(-12)]
# Correlation matrix of variables
corrmatrix<-cor(CW2014[c(-1)]) #store corr matrix
corrdata=as.data.frame(corrmatrix)
corrdata$Variable1=names(corrdata)
corrdatamelt=melt(corrdata,id="Variable1")
names(corrdatamelt)=c("Variable1","Variable2","CorrelationCoefficient")
corrmatplot = rPlot(Variable2 ~ Variable1, color = 'CorrelationCoefficient', data = corrdatamelt, type = 'tile', height = 600)
corrmatplot$addParams(height = 450, width=1000)
corrmatplot$guides("{color: {scale: {type: gradient2, lower: 'red', middle: 'white', upper: 'blue',midpoint: 0}}}")
corrmatplot$guides(y = list(numticks = length(unique(corrdatamelt$Variable1))))
corrmatplot$guides(x = list(numticks = 3))
corrmatplot$addParams(staggerLabels=TRUE)
corrmatplot$save("corrmatplotstate.html",cdn=T)
corrmatplot
# heatmap of variables and State UTs
stateutmelt=ddply(melt(CW2014),.(variable),transform,rescale=rescale(value))
names(stateutmelt)=c("StateUT","Crime","value","rescale")
hmap <- rPlot(StateUT ~ Crime, color = 'rescale', data = stateutmelt, type = 'tile')
hmap$addParams(height = 600, width=1000)
hmap$guides(reduceXTicks = FALSE)
hmap$guides("{color: {scale: {type: gradient, lower: white, upper: red}}}")
hmap$guides(y = list(numticks = length(unique(stateutmelt$StateUT))))
hmap$guides(x = list(numticks = 3))
hmap$save("heatmapstate.html",cdn=T)
hmap
################ Clustering (Quick reference: Quick-R, Kabacoff, http://www.statmethods.net/advstats/cluster.html)
set.seed(123)
kmeansdata=kmeans(CW2014[c(-1)],5) # Decided on 5 for interpretation
# get cluster means
meanvarsw=aggregate(CW2014[c(-1)],by=list(kmeansdata$cluster),FUN=mean)
# append cluster assignment
CW2014clust <- data.frame(CW2014, kmeansdata$cluster)
# plotting states/uts by cluster number
stategpplot=dPlot(x="StateUT", y="kmeansdata.cluster",groups="kmeansdata.cluster",data=CW2014clust,
type="bar",height=475,width=700,bounds = list(x=50, y=10, width=600, height=300))
stategpplot$yAxis(type="addCategoryAxis")
stategpplot$xAxis(type="addCategoryAxis",orderRule="kmeansdata.cluster")
stategpplot$save("stategpplot.html",cdn=T)
stategpplot
############## Parallel Plot#############
names(meanvars)=c("Group","Rape","KidnapAbduct","DowryDeath","AssaultModesty","InsultModesty","CrueltyHusband",
"Importation","ImmoralTraffic","DowryProhibit","IndecentRep")
parrstateut <- rCharts$new()
parrstateut$field('lib', 'parcoords')
parrstateut$set(padding = list(top = 25, left = 5, bottom = 10, right = 0), width=1080, height=400)
parrstateut$set(data = toJSONArray(meanvars, json = F),
colorby = 'Rape',
range = range(meanvars$Rape),
colors = c('red','green')
)
parrstateut$setLib("parcoords")
# parrstateut$save("parallelplotstate.html", cdn=T)
parrstateut
|
# The implementation of improved EE.
# Pay attention that we haven't consider about storage cost of these functions.
#-------------------------------------------------------------------------------
library("igraph")
library('magic')
library("matlab")
library('foreach')
library('doParallel')
source('thresholding.r')
#Improved Elementary Wstimator
improvedEE<-function(S,lambda,p,thr_func=hardThreshold,core_num=1){
#Initialization
Omega<-zeros(p)
#Thresholding on covariance matrix
S_lambda<-thr_func(S,lambda,p)
#Get connected component
graph<-graph_from_adjacency_matrix(S_lambda,mode="undirected",weighted=TRUE)
comps<-components(graph)
num<-comps[[3]]
bins<-comps[[1]]
csize<-comps[[2]]
print(sprintf('%d components',num))
#Permuting the matrix so that it is block diagonal
var_seq<-zeros(p,2)
var_seq[,1]<-1:p
var_seq[,2]<-bins
var_seq<-var_seq[order(var_seq[,2]),]
var_seq<-var_seq[,1]
S_lambda<-S_lambda[var_seq,]
S_lambda<-S_lambda[,var_seq]
if(core_num>1){
#Parallelizing
each_inv<-function(pairs){
library('magic')
res<-list()
for(i in 1:length(pairs)){
pair<-pairs[[i]]
start<-pair[[1]]
end<-pair[[2]]
res[i]<-list(solve(S_lambda[start:end,start:end]))
}
return (do.call('adiag',res))
}
seq_pair<-list()
tmp<-1
for(i in 1:(length(csize))){
seq_pair[[i]]<-list(tmp,csize[i]+tmp-1)
tmp<-tmp+csize[i]
}
size<-length(seq_pair)/core_num
node_seq<-list()
tmp<-1
for (i in 1:core_num){
node_seq[i]<-list(seq_pair[tmp:(tmp+size-1)])
tmp<-tmp+size
}
cl <- makeCluster(core_num)
registerDoParallel(cl)
inner_par<-list(S_lambda,node_seq)
Omega <- foreach(each=1:core_num, .combine='adiag', .export='inner_par') %dopar%
{
S_lambda<-inner_par[[1]]
node_seq<-inner_par[[2]]
each_inv(node_seq[[each]])
}
stopImplicitCluster()
}
else if(core_num==1){
Omega<-zeros(p)
tmp<-1
for(i in 1:(length(csize))){
Omega[tmp:(csize[i]+tmp-1),tmp:(csize[i]+tmp-1)]<-
+solve(S_lambda[tmp:(csize[i]+tmp-1),tmp:(csize[i]+tmp-1)])
tmp<-tmp+csize[i]
}
}
return (list(soliution=Omega,var_seq=var_seq))
}
|
/improved_EE.r
|
no_license
|
ZJQxxn/Improved-EE
|
R
| false
| false
| 2,565
|
r
|
# The implementation of improved EE.
# Pay attention that we haven't consider about storage cost of these functions.
#-------------------------------------------------------------------------------
library("igraph")
library('magic')
library("matlab")
library('foreach')
library('doParallel')
source('thresholding.r')
#Improved Elementary Wstimator
improvedEE<-function(S,lambda,p,thr_func=hardThreshold,core_num=1){
#Initialization
Omega<-zeros(p)
#Thresholding on covariance matrix
S_lambda<-thr_func(S,lambda,p)
#Get connected component
graph<-graph_from_adjacency_matrix(S_lambda,mode="undirected",weighted=TRUE)
comps<-components(graph)
num<-comps[[3]]
bins<-comps[[1]]
csize<-comps[[2]]
print(sprintf('%d components',num))
#Permuting the matrix so that it is block diagonal
var_seq<-zeros(p,2)
var_seq[,1]<-1:p
var_seq[,2]<-bins
var_seq<-var_seq[order(var_seq[,2]),]
var_seq<-var_seq[,1]
S_lambda<-S_lambda[var_seq,]
S_lambda<-S_lambda[,var_seq]
if(core_num>1){
#Parallelizing
each_inv<-function(pairs){
library('magic')
res<-list()
for(i in 1:length(pairs)){
pair<-pairs[[i]]
start<-pair[[1]]
end<-pair[[2]]
res[i]<-list(solve(S_lambda[start:end,start:end]))
}
return (do.call('adiag',res))
}
seq_pair<-list()
tmp<-1
for(i in 1:(length(csize))){
seq_pair[[i]]<-list(tmp,csize[i]+tmp-1)
tmp<-tmp+csize[i]
}
size<-length(seq_pair)/core_num
node_seq<-list()
tmp<-1
for (i in 1:core_num){
node_seq[i]<-list(seq_pair[tmp:(tmp+size-1)])
tmp<-tmp+size
}
cl <- makeCluster(core_num)
registerDoParallel(cl)
inner_par<-list(S_lambda,node_seq)
Omega <- foreach(each=1:core_num, .combine='adiag', .export='inner_par') %dopar%
{
S_lambda<-inner_par[[1]]
node_seq<-inner_par[[2]]
each_inv(node_seq[[each]])
}
stopImplicitCluster()
}
else if(core_num==1){
Omega<-zeros(p)
tmp<-1
for(i in 1:(length(csize))){
Omega[tmp:(csize[i]+tmp-1),tmp:(csize[i]+tmp-1)]<-
+solve(S_lambda[tmp:(csize[i]+tmp-1),tmp:(csize[i]+tmp-1)])
tmp<-tmp+csize[i]
}
}
return (list(soliution=Omega,var_seq=var_seq))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wm_id2name.R
\name{wm_id2name}
\alias{wm_id2name}
\alias{wm_id2name_}
\title{Get taxonomic name for an AphiaID}
\usage{
wm_id2name(id, ...)
wm_id2name_(id, ...)
}
\arguments{
\item{id}{(numeric/integer) an AphiaID, required. For \code{wm_id2name}
must be \code{length(id) = 1}, but for \code{wm_id2name_} can be
\code{length(id) >= 1}}
\item{...}{named curl options. see \code{curl::curl_options}}
}
\value{
An character string that is the taxnomic name. When using underscore
method, a list, named by the input IDs
}
\description{
Get taxonomic name for an AphiaID
}
\section{Singular vs. plural}{
Of the two sister functions, the one without the underscore is the original
function that wraps the relavant WoRMS API method - and only accepts
one thing (i.e., name or AphiaID) per request.
The sister function with the underscore at the end is the plural version,
accepting more than one input. Internally this function loops over
the non-underscore method, and labels output (whether it's a list or
data.frame rows) with the input names or IDs so that you can easily
parse output by your inputs.
}
\examples{
\dontrun{
wm_id2name(id = 105706)
wm_id2name_(id = c(105706, 126436))
}
}
|
/man/wm_id2name.Rd
|
permissive
|
shivam11/worrms
|
R
| false
| true
| 1,268
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wm_id2name.R
\name{wm_id2name}
\alias{wm_id2name}
\alias{wm_id2name_}
\title{Get taxonomic name for an AphiaID}
\usage{
wm_id2name(id, ...)
wm_id2name_(id, ...)
}
\arguments{
\item{id}{(numeric/integer) an AphiaID, required. For \code{wm_id2name}
must be \code{length(id) = 1}, but for \code{wm_id2name_} can be
\code{length(id) >= 1}}
\item{...}{named curl options. see \code{curl::curl_options}}
}
\value{
An character string that is the taxnomic name. When using underscore
method, a list, named by the input IDs
}
\description{
Get taxonomic name for an AphiaID
}
\section{Singular vs. plural}{
Of the two sister functions, the one without the underscore is the original
function that wraps the relavant WoRMS API method - and only accepts
one thing (i.e., name or AphiaID) per request.
The sister function with the underscore at the end is the plural version,
accepting more than one input. Internally this function loops over
the non-underscore method, and labels output (whether it's a list or
data.frame rows) with the input names or IDs so that you can easily
parse output by your inputs.
}
\examples{
\dontrun{
wm_id2name(id = 105706)
wm_id2name_(id = c(105706, 126436))
}
}
|
# utility for drawing labeled vectors
# TODO: handle xpd=TRUE somewhere so labels aren't cut off
# DONE: allow origin to be a two-col matrix like x
# TODO: calculate default length in terms of par("usr")
vectors <- function(x, origin=c(0,0), labels=rownames(x),
scale=1,
col="blue",
lwd=1,
cex=1,
length=.1, angle=13,
pos=NULL, ...) {
x <- scale*x
if (is.vector(origin)) origin <- matrix(origin, ncol=2)
.arrows(origin[,1], origin[,2], x[,1], x[,2], lwd=lwd, col=col, length=length, angle=angle, ...)
if (!is.null(labels)) {
if(missing(pos)) pos <- ifelse(x[,1]>0, 4, 2)
# DONE: position labels relative to arrow ends (outside)
text(x[,1], x[,2], labels, pos=pos, cex=cex, col=col, ...)
}
}
# the following function isn't exported
.arrows <- function(..., angle=13){
angles <- seq(1, angle, by=3)
for (ang in angles) arrows(..., angle=ang)
}
|
/R/vectors.R
|
no_license
|
cran/candisc
|
R
| false
| false
| 906
|
r
|
# utility for drawing labeled vectors
# TODO: handle xpd=TRUE somewhere so labels aren't cut off
# DONE: allow origin to be a two-col matrix like x
# TODO: calculate default length in terms of par("usr")
vectors <- function(x, origin=c(0,0), labels=rownames(x),
scale=1,
col="blue",
lwd=1,
cex=1,
length=.1, angle=13,
pos=NULL, ...) {
x <- scale*x
if (is.vector(origin)) origin <- matrix(origin, ncol=2)
.arrows(origin[,1], origin[,2], x[,1], x[,2], lwd=lwd, col=col, length=length, angle=angle, ...)
if (!is.null(labels)) {
if(missing(pos)) pos <- ifelse(x[,1]>0, 4, 2)
# DONE: position labels relative to arrow ends (outside)
text(x[,1], x[,2], labels, pos=pos, cex=cex, col=col, ...)
}
}
# the following function isn't exported
.arrows <- function(..., angle=13){
angles <- seq(1, angle, by=3)
for (ang in angles) arrows(..., angle=ang)
}
|
# This script visualizes our Data Gathering, Merging and Manipulating Process with the example of Turkey
source("0 - Loading Packages.R")
PreGTD <- read.csv("TerrorData/pregtd.csv")
#Finding 2 basic maps of Turkey for visualisation
Turkey <- qmap("Davulga", zoom = 7, extent = "device", legend = "topleft")
BaseMap <- qmap("Davulga", zoom = 7, extent = "device", source="stamen", maptype="toner")
#Creating our PreGTD subset with example attacks
ExA <- subset(PreGTD, eventid == 200608280001 | eventid == 199811270002 | eventid == 199903050002
| eventid == 200308010004 | eventid == 199907300003 | eventid == 199809090001
| eventid == 200311140004 )
ExA$inUC <- as.factor(ExA$inUC)
ExA$latitude <- as.numeric(ExA$latitude)
ExA$longitude <- as.numeric(ExA$longitude)
# Attack Summaries:
# 200807270020: On Sunday, two bombs exploded minutes apart in one of Istanbul's busy shopping districts on the European side. Though the Kurdistan Workers Party denied responsibility for the attack, officials continually accuse them of direct involvement.
# 200608280001: Four people were killed and approximately 65 were injured when a bomb attached to a motorcycle detonated in Antalya, Turkey. No one claimed responsibility for the attack.
# 199811270002: A bomb exploded on a bus near Kirikkale, Turkey. The explosion killed four passengers and injured 20 others. The bomb was placed in the luggage department of the bus. There was no claim of responsibility for this attack.
# 200608280002: A car filled with explosives exploded 80 meters from the Russian Consulate General in Antalya, Turkey, killing three people and wounding at least 20 more. No one claimed responsibility for the attack, but authorities believed the Kurdish separatists were to blame.
# 199903050002: A car bomb attack targeted the Cankiri Provincial Governor, Ayhan Cevik, while he was driving in Cankiri, Turkey. Four people were killed and the Governor and nine others were wounded. The Turkish Workers and Peasants Liberation Army (TKP/ML-TIKKO) claimed responsibility for the attack
# 200308010004: An unnamed leftist organization detonated an explosive device in the garden of the Justice Ministry's Center of Education for Judge and Prosecutor Candidates in Ankara, Turkey. Eleven people, including policemen and judges, were injured in the explosion. Although a group claimed responsibility for the attack, Turkey's Interior Minister, Abdulkadir Aksu, did not publicly reveal the perpetrator group's name
# 199907300003: A total of four village guards were killed in Gurpinar, Turkey, when a group of perpetrators attacked the guards who were protecting the Telekom employees. Officials reported that the employees where the target of this attack.
# 199809090001: In one of two related attacks, rebels from the Kurdistan Workers Party (PKK) attacked the Imamli settlement unit in the village of Kuyucak, Turkey, killing one person and injuring three (Mustafa, Hilmi and Nuriye Atici). The perpetrators fled after this incident.
# 200311140004: Two car bombings by the militant Islamic group, Great East Islamic Raiders Front (IBDA-C - Islami Buyuk Dogu Akincilar Cephesi), on synagogues in Istanbul, Turkey, killed at least twenty people and injured 302 others. One of the two almost simultaneous blasts occurred at the Beth Israel Synagogue, damaging the building and several cars. Numerous people were reportedly killed and injured in the bombing. The group identified itself in a telephone call to the Anadolu News Agency.
# Formatting the table so it does look nicely in Rmarkdwon and represents the right things
Rmdattacks <- subset(ExA, select = c(Date, inUC, HUMscale, access, access.MAX, light, light.MAX, density, density.MAX, coast.dist, coast.dist.MAX, original.city, latitude))
#ordering attacks from North to South
Rmdattacks <- Rmdattacks[order(-Rmdattacks$latitude), ]
Rmdattacks$inUC <- as.character(Rmdattacks$inUC)
Rmdattacks$inUC[Rmdattacks$inUC == 0] <- "no"
Rmdattacks$inUC[Rmdattacks$inUC == 1] <- "yes"
Rmdattacks$Rel.Access <- round((((Rmdattacks$access/Rmdattacks$access.MAX)-1)*-100), digits = 2 )
Rmdattacks$Rel.Light <- round(((Rmdattacks$light/Rmdattacks$light.MAX)*100), digits = 2)
Rmdattacks$Rel.Density <- round(((Rmdattacks$density/Rmdattacks$density.MAX)*100), digits = 2)
Rmdattacks$Rel.Coast.Dist <- round((((Rmdattacks$coast.dist/Rmdattacks$coast.dist.MAX)-1)*-100), digits = 2)
Rmdattacks$Date.Place <- paste(Rmdattacks$Date, Rmdattacks$original.city, sep=", ")
Rmdattacks$latitude <- NULL
Rmdattacks$access <- NULL
Rmdattacks$access.MAX <- NULL
Rmdattacks$density <- NULL
Rmdattacks$density.MAX <- NULL
Rmdattacks$light <- NULL
Rmdattacks$light.MAX <- NULL
Rmdattacks$coast.dist <- NULL
Rmdattacks$coast.dist.MAX <- NULL
Rmdattacks$Date <- NULL
Rmdattacks$original.city <- NULL
colnames(Rmdattacks) <- c("On.UC", "Kill.Wound", "Access", "Light", "Dens", "Prox.Coast", "Date.Place")
Rmdattacks <- Rmdattacks[c(7,1,2,4,3,6,5) ]
Rmdattacks$row.names <- NULL
# show example attacks on map with distinginishing if take place on UC and labels
AttackMap <- BaseMap + geom_point(aes(x=longitude, y=latitude, shape = inUC, color = inUC), data = ExA, size = 7 ) +
theme(legend.position="none")+
geom_text(aes(x=longitude+0.3, y=latitude+0.08, fontface="bold", color = inUC, fontsize = 7), data=ExA, label=ExA$Date) +
scale_colour_manual(values = c("firebrick","pink"))
# Show lightmap
unzip("Downloaded_Data/Downloaded_Raster_Data.zip", exdir="Downloaded_Data")
unlink("Downloaded_Data/NLDI_2006_0p25_rev20111230.tif")
unlink("Downloaded_Data/GDP_grid_flt.tif")
unlink("Downloaded_Data/DICGSH1a.tif")
unlink("Downloaded_Data/GACGEM2a.tif")
unlink("Downloaded_Data/G19ESA3a.tif")
RASTERlight<- raster("Downloaded_Data/LNMDMS2a.tif")
#crop Raster to Turkey maps boundaries
e <- extent(27.9, 34.9, 36.2, 41.7)
LightLayer <- crop(RASTERlight, e)
#convert to usable dataframe including eliminating minor light values and aggregating the rest from 63 to only 6 levels
LightLayer <- as(LightLayer, "SpatialPixelsDataFrame")
LightLayer <- data.frame(LightLayer)
LightLayer$y <- LightLayer$y-0.09
colnames(LightLayer) <- c("light", "longitude", "latitude")
LightLayer$light <- as.numeric(LightLayer$light)
#LightLayer <- subset(LightLayer, light >60)
LightLayer$light <- recode (LightLayer$light, "0:4 = 0; 4:10 = 1 ; 11:18 = 2; 19:30 = 3; 31:40 = 4; 41:50 = 5; 51:63 = 6", as.numeric.result=TRUE)
LightMap <- BaseMap + geom_tile(aes(x = longitude, y = latitude, fill=light), data = LightLayer, alpha = 0.6) +
geom_point(aes(x=longitude, y=latitude-0.1, color="white"), data = ExA, size = 5 ) +
geom_point(aes(x=longitude, y=latitude-0.1, color="red"), data = ExA, size = 4 ) +
geom_point(aes(x=longitude, shape= inUC, color=inUC, y=latitude-0.1), data = ExA, size = 8 ) +
geom_text(aes(x=longitude+0.3, y=latitude+0.19, fontface="bold", colour="firebrick", fontsize = 7), data=ExA, label=ExA$Date) +
scale_fill_gradient(low = "darkblue", high= "yellow") +
scale_shape_manual(values=c(10, 10, 13, 13)) +
scale_colour_manual(values = c("white","white", "white","black", "firebrick", "firebrick")) +
theme(legend.position="none")
unlink("Downloaded_Data/LNMDMS2a.tif")
rm(LightLayer, RASTERlight, e)
# Show Accessmap
unzip("Downloaded_Data/Downloaded_Raster_Data.zip", exdir="Downloaded_Data")
unlink("Downloaded_Data/NLDI_2006_0p25_rev20111230.tif")
unlink("Downloaded_Data/GDP_grid_flt.tif")
unlink("Downloaded_Data/DICGSH1a.tif")
unlink("Downloaded_Data/G19ESA3a.tif")
unlink("Downloaded_Data/LNMDMS2a.tif")
RASTERAccess<- raster("Downloaded_Data/GACGEM2a.tif")
#crop Raster to Turkey maps boundaries
e <- extent(27.9, 34.9, 36.2, 41.7)
AccessLayer <- crop(RASTERAccess, e)
#aggregating cells because else, way too muh values
#AccessLayer <- raster::aggregate(AccessLayer, fact=1, fun=mean, na.rm=TRUE)
#convert to usable dataframe including eliminating largest Access time values (in minutes ) and aggregating the rest to 10 min intervals
AccessLayer <- as(AccessLayer, "SpatialPixelsDataFrame")
AccessLayer <- data.frame(AccessLayer)
AccessLayer$y <- AccessLayer$y-0.085
colnames(AccessLayer) <- c("AccessTime", "longitude", "latitude")
AccessLayer$AccessTime <- as.numeric(AccessLayer$AccessTime)
#subsetting and binning Access Time values to 10 minute bins to reduce observations. N
#ow, only displaying areas that have Access Time < 120 minutes
#AccessLayer <- subset(AccessLayer, AccessTime < 120)
AccessLayer$AccessTime <- AccessLayer$AccessTime/10
AccessLayer$AccessTime <- ifelse(AccessLayer$AccessTime >= 18, 18, AccessLayer$AccessTime)
AccessLayer$AccessTime <- round(AccessLayer$AccessTime)
AccessMap <- BaseMap + geom_tile(aes(x = longitude, y = latitude, fill=AccessTime), data = AccessLayer, alpha = 0.75) +
geom_point(aes(x=longitude, y=latitude-0.1, color="white"), data = ExA, size = 5 ) +
geom_point(aes(x=longitude, y=latitude-0.1, color="red"), data = ExA, size = 4 ) +
geom_point(aes(x=longitude, shape= inUC, color=inUC, y=latitude-0.1), data = ExA, size = 8 ) +
geom_text(aes(x=longitude+0.3, y=latitude+0.19, fontface="bold", colour="firebrick", fontsize = 7), data=ExA, label=ExA$Date) +
scale_fill_gradient(low = "yellow", high= "darkgreen") +
scale_shape_manual(values=c(10, 10, 13, 13)) +
scale_colour_manual(values = c("white","white", "white","black", "firebrick", "firebrick")) +
theme(legend.position="none")
unlink("Downloaded_Data/GACGEM2a.tif")
rm(RASTERAccess, e)
# Proximity to Coast
unzip("Downloaded_Data/Downloaded_Raster_Data.zip", exdir="Downloaded_Data")
unlink("Downloaded_Data/NLDI_2006_0p25_rev20111230.tif")
unlink("Downloaded_Data/GDP_grid_flt.tif")
unlink("Downloaded_Data/GACGEM2a.tif")
unlink("Downloaded_Data/G19ESA3a.tif")
unlink("Downloaded_Data/LNMDMS2a.tif")
RASTERCoast<- raster("Downloaded_Data/DICGSH1a.tif")
#crop Raster to Turkey maps boundaries
e <- extent(27.9, 34.9, 36.2, 41.7)
CoastLayer <- crop(RASTERCoast, e)
CoastLayer <- as(CoastLayer, "SpatialPixelsDataFrame")
CoastLayer <- data.frame(CoastLayer)
CoastLayer$y <- CoastLayer$y-0.09
colnames(CoastLayer) <- c("Coastdist", "longitude", "latitude")
CoastLayer$Coastdist <- as.numeric(CoastLayer$Coastdist)
#reducing No of observations to on-coast and less than a 100 km from coast
CoastLayer <- subset(CoastLayer, Coastdist <=0)
CoastLayer$Coastdist <- CoastLayer$Coastdist*-1
CoastLayer$Coastdist <- ifelse(CoastLayer$Coastdist>= 120, 120, CoastLayer$Coastdist)
CoastMap <- BaseMap + geom_tile(aes(x = longitude, y = latitude, fill=Coastdist), data = CoastLayer, alpha=0.85) +
scale_fill_gradient(low = "lightblue", high= "darkblue") +
geom_point(aes(x=longitude, y=latitude-0.1, color="white"), data = ExA, size = 5 ) +
geom_point(aes(x=longitude, y=latitude-0.1, color="red"), data = ExA, size = 4 ) +
geom_point(aes(x=longitude, shape= inUC, color=inUC, y=latitude-0.1), data = ExA, size = 8 ) +
geom_text(aes(x=longitude+0.3, y=latitude+0.19, fontface="bold", colour="firebrick", fontsize = 7), data=ExA, label=ExA$Date) +
scale_shape_manual(values=c(10, 10, 13, 13)) +
scale_colour_manual(values = c("white","white", "white","black", "firebrick", "firebrick")) +
theme(legend.position="none")
plot(CoastMap)
rm(RASTERCoast, e)
unlink("Downloaded_Data/DICGSH1a.tif")
# Population Density
# Data sourced from http://neo.sci.gsfc.nasa.gov/servlet/RenderData?si=875430&cs=rgb&format=TIFF&width=3600&height=1800 and saved to Downloaded_Data
RASTERDens <- raster("Downloaded_Data/SEDAC_POP_2000-01-01_rgb_3600x1800.TIFF")
#crop Raster to Turkey maps boundaries
e <- extent(27.9, 34.9, 36.2, 41.7)
DensLayer <- crop(RASTERDens, e)
DensLayer <- as(DensLayer, "SpatialPixelsDataFrame")
DensLayer <- data.frame(DensLayer)
DensLayer$y <- DensLayer$y-0.09
colnames(DensLayer) <- c("Density", "longitude", "latitude")
DensLayer$Density <- as.numeric(DensLayer$Density)
# 255 is like NA, e.g. the sea
DensLayer <- subset(DensLayer, Density<255)
DensLayer$Density <- ifelse(DensLayer$Density <= 100, 100, DensLayer$Density )
DensMap <- BaseMap + geom_tile(aes(x = longitude, y = latitude, fill=Density), data = DensLayer, alpha=0.75) +
scale_fill_gradient(low = "lightgreen", high= "firebrick") +
geom_point(aes(x=longitude, y=latitude-0.1, color="white"), data = ExA, size = 5 ) +
geom_point(aes(x=longitude, y=latitude-0.1, color="red"), data = ExA, size = 4 ) +
geom_point(aes(x=longitude, shape= inUC, color=inUC, y=latitude-0.1), data = ExA, size = 8 ) +
geom_text(aes(x=longitude+0.3, y=latitude+0.19, fontface="bold", colour="firebrick", fontsize = 7), data=ExA, label=ExA$Date) +
scale_shape_manual(values=c(10, 10, 13, 13)) +
scale_colour_manual(values = c("white","white", "white","black", "firebrick", "firebrick")) +
theme(legend.position="none")
plot(DensMap)
rm(RASTERDens, e)
|
/AttackVis.R
|
no_license
|
Leonardo2011/UrbanTerror
|
R
| false
| false
| 12,892
|
r
|
# This script visualizes our Data Gathering, Merging and Manipulating Process with the example of Turkey
source("0 - Loading Packages.R")
PreGTD <- read.csv("TerrorData/pregtd.csv")
#Finding 2 basic maps of Turkey for visualisation
Turkey <- qmap("Davulga", zoom = 7, extent = "device", legend = "topleft")
BaseMap <- qmap("Davulga", zoom = 7, extent = "device", source="stamen", maptype="toner")
#Creating our PreGTD subset with example attacks
ExA <- subset(PreGTD, eventid == 200608280001 | eventid == 199811270002 | eventid == 199903050002
| eventid == 200308010004 | eventid == 199907300003 | eventid == 199809090001
| eventid == 200311140004 )
ExA$inUC <- as.factor(ExA$inUC)
ExA$latitude <- as.numeric(ExA$latitude)
ExA$longitude <- as.numeric(ExA$longitude)
# Attack Summaries:
# 200807270020: On Sunday, two bombs exploded minutes apart in one of Istanbul's busy shopping districts on the European side. Though the Kurdistan Workers Party denied responsibility for the attack, officials continually accuse them of direct involvement.
# 200608280001: Four people were killed and approximately 65 were injured when a bomb attached to a motorcycle detonated in Antalya, Turkey. No one claimed responsibility for the attack.
# 199811270002: A bomb exploded on a bus near Kirikkale, Turkey. The explosion killed four passengers and injured 20 others. The bomb was placed in the luggage department of the bus. There was no claim of responsibility for this attack.
# 200608280002: A car filled with explosives exploded 80 meters from the Russian Consulate General in Antalya, Turkey, killing three people and wounding at least 20 more. No one claimed responsibility for the attack, but authorities believed the Kurdish separatists were to blame.
# 199903050002: A car bomb attack targeted the Cankiri Provincial Governor, Ayhan Cevik, while he was driving in Cankiri, Turkey. Four people were killed and the Governor and nine others were wounded. The Turkish Workers and Peasants Liberation Army (TKP/ML-TIKKO) claimed responsibility for the attack
# 200308010004: An unnamed leftist organization detonated an explosive device in the garden of the Justice Ministry's Center of Education for Judge and Prosecutor Candidates in Ankara, Turkey. Eleven people, including policemen and judges, were injured in the explosion. Although a group claimed responsibility for the attack, Turkey's Interior Minister, Abdulkadir Aksu, did not publicly reveal the perpetrator group's name
# 199907300003: A total of four village guards were killed in Gurpinar, Turkey, when a group of perpetrators attacked the guards who were protecting the Telekom employees. Officials reported that the employees where the target of this attack.
# 199809090001: In one of two related attacks, rebels from the Kurdistan Workers Party (PKK) attacked the Imamli settlement unit in the village of Kuyucak, Turkey, killing one person and injuring three (Mustafa, Hilmi and Nuriye Atici). The perpetrators fled after this incident.
# 200311140004: Two car bombings by the militant Islamic group, Great East Islamic Raiders Front (IBDA-C - Islami Buyuk Dogu Akincilar Cephesi), on synagogues in Istanbul, Turkey, killed at least twenty people and injured 302 others. One of the two almost simultaneous blasts occurred at the Beth Israel Synagogue, damaging the building and several cars. Numerous people were reportedly killed and injured in the bombing. The group identified itself in a telephone call to the Anadolu News Agency.
# Formatting the table so it does look nicely in Rmarkdwon and represents the right things
Rmdattacks <- subset(ExA, select = c(Date, inUC, HUMscale, access, access.MAX, light, light.MAX, density, density.MAX, coast.dist, coast.dist.MAX, original.city, latitude))
#ordering attacks from North to South
Rmdattacks <- Rmdattacks[order(-Rmdattacks$latitude), ]
Rmdattacks$inUC <- as.character(Rmdattacks$inUC)
Rmdattacks$inUC[Rmdattacks$inUC == 0] <- "no"
Rmdattacks$inUC[Rmdattacks$inUC == 1] <- "yes"
Rmdattacks$Rel.Access <- round((((Rmdattacks$access/Rmdattacks$access.MAX)-1)*-100), digits = 2 )
Rmdattacks$Rel.Light <- round(((Rmdattacks$light/Rmdattacks$light.MAX)*100), digits = 2)
Rmdattacks$Rel.Density <- round(((Rmdattacks$density/Rmdattacks$density.MAX)*100), digits = 2)
Rmdattacks$Rel.Coast.Dist <- round((((Rmdattacks$coast.dist/Rmdattacks$coast.dist.MAX)-1)*-100), digits = 2)
Rmdattacks$Date.Place <- paste(Rmdattacks$Date, Rmdattacks$original.city, sep=", ")
Rmdattacks$latitude <- NULL
Rmdattacks$access <- NULL
Rmdattacks$access.MAX <- NULL
Rmdattacks$density <- NULL
Rmdattacks$density.MAX <- NULL
Rmdattacks$light <- NULL
Rmdattacks$light.MAX <- NULL
Rmdattacks$coast.dist <- NULL
Rmdattacks$coast.dist.MAX <- NULL
Rmdattacks$Date <- NULL
Rmdattacks$original.city <- NULL
colnames(Rmdattacks) <- c("On.UC", "Kill.Wound", "Access", "Light", "Dens", "Prox.Coast", "Date.Place")
Rmdattacks <- Rmdattacks[c(7,1,2,4,3,6,5) ]
Rmdattacks$row.names <- NULL
# show example attacks on map with distinginishing if take place on UC and labels
AttackMap <- BaseMap + geom_point(aes(x=longitude, y=latitude, shape = inUC, color = inUC), data = ExA, size = 7 ) +
theme(legend.position="none")+
geom_text(aes(x=longitude+0.3, y=latitude+0.08, fontface="bold", color = inUC, fontsize = 7), data=ExA, label=ExA$Date) +
scale_colour_manual(values = c("firebrick","pink"))
# Show lightmap
unzip("Downloaded_Data/Downloaded_Raster_Data.zip", exdir="Downloaded_Data")
unlink("Downloaded_Data/NLDI_2006_0p25_rev20111230.tif")
unlink("Downloaded_Data/GDP_grid_flt.tif")
unlink("Downloaded_Data/DICGSH1a.tif")
unlink("Downloaded_Data/GACGEM2a.tif")
unlink("Downloaded_Data/G19ESA3a.tif")
RASTERlight<- raster("Downloaded_Data/LNMDMS2a.tif")
#crop Raster to Turkey maps boundaries
e <- extent(27.9, 34.9, 36.2, 41.7)
LightLayer <- crop(RASTERlight, e)
#convert to usable dataframe including eliminating minor light values and aggregating the rest from 63 to only 6 levels
LightLayer <- as(LightLayer, "SpatialPixelsDataFrame")
LightLayer <- data.frame(LightLayer)
LightLayer$y <- LightLayer$y-0.09
colnames(LightLayer) <- c("light", "longitude", "latitude")
LightLayer$light <- as.numeric(LightLayer$light)
#LightLayer <- subset(LightLayer, light >60)
LightLayer$light <- recode (LightLayer$light, "0:4 = 0; 4:10 = 1 ; 11:18 = 2; 19:30 = 3; 31:40 = 4; 41:50 = 5; 51:63 = 6", as.numeric.result=TRUE)
LightMap <- BaseMap + geom_tile(aes(x = longitude, y = latitude, fill=light), data = LightLayer, alpha = 0.6) +
geom_point(aes(x=longitude, y=latitude-0.1, color="white"), data = ExA, size = 5 ) +
geom_point(aes(x=longitude, y=latitude-0.1, color="red"), data = ExA, size = 4 ) +
geom_point(aes(x=longitude, shape= inUC, color=inUC, y=latitude-0.1), data = ExA, size = 8 ) +
geom_text(aes(x=longitude+0.3, y=latitude+0.19, fontface="bold", colour="firebrick", fontsize = 7), data=ExA, label=ExA$Date) +
scale_fill_gradient(low = "darkblue", high= "yellow") +
scale_shape_manual(values=c(10, 10, 13, 13)) +
scale_colour_manual(values = c("white","white", "white","black", "firebrick", "firebrick")) +
theme(legend.position="none")
unlink("Downloaded_Data/LNMDMS2a.tif")
rm(LightLayer, RASTERlight, e)
# Show Accessmap
unzip("Downloaded_Data/Downloaded_Raster_Data.zip", exdir="Downloaded_Data")
unlink("Downloaded_Data/NLDI_2006_0p25_rev20111230.tif")
unlink("Downloaded_Data/GDP_grid_flt.tif")
unlink("Downloaded_Data/DICGSH1a.tif")
unlink("Downloaded_Data/G19ESA3a.tif")
unlink("Downloaded_Data/LNMDMS2a.tif")
RASTERAccess<- raster("Downloaded_Data/GACGEM2a.tif")
#crop Raster to Turkey maps boundaries
e <- extent(27.9, 34.9, 36.2, 41.7)
AccessLayer <- crop(RASTERAccess, e)
#aggregating cells because else, way too muh values
#AccessLayer <- raster::aggregate(AccessLayer, fact=1, fun=mean, na.rm=TRUE)
#convert to usable dataframe including eliminating largest Access time values (in minutes ) and aggregating the rest to 10 min intervals
AccessLayer <- as(AccessLayer, "SpatialPixelsDataFrame")
AccessLayer <- data.frame(AccessLayer)
AccessLayer$y <- AccessLayer$y-0.085
colnames(AccessLayer) <- c("AccessTime", "longitude", "latitude")
AccessLayer$AccessTime <- as.numeric(AccessLayer$AccessTime)
#subsetting and binning Access Time values to 10 minute bins to reduce observations. N
#ow, only displaying areas that have Access Time < 120 minutes
#AccessLayer <- subset(AccessLayer, AccessTime < 120)
AccessLayer$AccessTime <- AccessLayer$AccessTime/10
AccessLayer$AccessTime <- ifelse(AccessLayer$AccessTime >= 18, 18, AccessLayer$AccessTime)
AccessLayer$AccessTime <- round(AccessLayer$AccessTime)
AccessMap <- BaseMap + geom_tile(aes(x = longitude, y = latitude, fill=AccessTime), data = AccessLayer, alpha = 0.75) +
geom_point(aes(x=longitude, y=latitude-0.1, color="white"), data = ExA, size = 5 ) +
geom_point(aes(x=longitude, y=latitude-0.1, color="red"), data = ExA, size = 4 ) +
geom_point(aes(x=longitude, shape= inUC, color=inUC, y=latitude-0.1), data = ExA, size = 8 ) +
geom_text(aes(x=longitude+0.3, y=latitude+0.19, fontface="bold", colour="firebrick", fontsize = 7), data=ExA, label=ExA$Date) +
scale_fill_gradient(low = "yellow", high= "darkgreen") +
scale_shape_manual(values=c(10, 10, 13, 13)) +
scale_colour_manual(values = c("white","white", "white","black", "firebrick", "firebrick")) +
theme(legend.position="none")
unlink("Downloaded_Data/GACGEM2a.tif")
rm(RASTERAccess, e)
# Proximity to Coast
unzip("Downloaded_Data/Downloaded_Raster_Data.zip", exdir="Downloaded_Data")
unlink("Downloaded_Data/NLDI_2006_0p25_rev20111230.tif")
unlink("Downloaded_Data/GDP_grid_flt.tif")
unlink("Downloaded_Data/GACGEM2a.tif")
unlink("Downloaded_Data/G19ESA3a.tif")
unlink("Downloaded_Data/LNMDMS2a.tif")
RASTERCoast<- raster("Downloaded_Data/DICGSH1a.tif")
#crop Raster to Turkey maps boundaries
e <- extent(27.9, 34.9, 36.2, 41.7)
CoastLayer <- crop(RASTERCoast, e)
CoastLayer <- as(CoastLayer, "SpatialPixelsDataFrame")
CoastLayer <- data.frame(CoastLayer)
CoastLayer$y <- CoastLayer$y-0.09
colnames(CoastLayer) <- c("Coastdist", "longitude", "latitude")
CoastLayer$Coastdist <- as.numeric(CoastLayer$Coastdist)
#reducing No of observations to on-coast and less than a 100 km from coast
CoastLayer <- subset(CoastLayer, Coastdist <=0)
CoastLayer$Coastdist <- CoastLayer$Coastdist*-1
CoastLayer$Coastdist <- ifelse(CoastLayer$Coastdist>= 120, 120, CoastLayer$Coastdist)
CoastMap <- BaseMap + geom_tile(aes(x = longitude, y = latitude, fill=Coastdist), data = CoastLayer, alpha=0.85) +
scale_fill_gradient(low = "lightblue", high= "darkblue") +
geom_point(aes(x=longitude, y=latitude-0.1, color="white"), data = ExA, size = 5 ) +
geom_point(aes(x=longitude, y=latitude-0.1, color="red"), data = ExA, size = 4 ) +
geom_point(aes(x=longitude, shape= inUC, color=inUC, y=latitude-0.1), data = ExA, size = 8 ) +
geom_text(aes(x=longitude+0.3, y=latitude+0.19, fontface="bold", colour="firebrick", fontsize = 7), data=ExA, label=ExA$Date) +
scale_shape_manual(values=c(10, 10, 13, 13)) +
scale_colour_manual(values = c("white","white", "white","black", "firebrick", "firebrick")) +
theme(legend.position="none")
plot(CoastMap)
rm(RASTERCoast, e)
unlink("Downloaded_Data/DICGSH1a.tif")
# Population Density
# Data sourced from http://neo.sci.gsfc.nasa.gov/servlet/RenderData?si=875430&cs=rgb&format=TIFF&width=3600&height=1800 and saved to Downloaded_Data
RASTERDens <- raster("Downloaded_Data/SEDAC_POP_2000-01-01_rgb_3600x1800.TIFF")
#crop Raster to Turkey maps boundaries
e <- extent(27.9, 34.9, 36.2, 41.7)
DensLayer <- crop(RASTERDens, e)
DensLayer <- as(DensLayer, "SpatialPixelsDataFrame")
DensLayer <- data.frame(DensLayer)
DensLayer$y <- DensLayer$y-0.09
colnames(DensLayer) <- c("Density", "longitude", "latitude")
DensLayer$Density <- as.numeric(DensLayer$Density)
# 255 is like NA, e.g. the sea
DensLayer <- subset(DensLayer, Density<255)
DensLayer$Density <- ifelse(DensLayer$Density <= 100, 100, DensLayer$Density )
DensMap <- BaseMap + geom_tile(aes(x = longitude, y = latitude, fill=Density), data = DensLayer, alpha=0.75) +
scale_fill_gradient(low = "lightgreen", high= "firebrick") +
geom_point(aes(x=longitude, y=latitude-0.1, color="white"), data = ExA, size = 5 ) +
geom_point(aes(x=longitude, y=latitude-0.1, color="red"), data = ExA, size = 4 ) +
geom_point(aes(x=longitude, shape= inUC, color=inUC, y=latitude-0.1), data = ExA, size = 8 ) +
geom_text(aes(x=longitude+0.3, y=latitude+0.19, fontface="bold", colour="firebrick", fontsize = 7), data=ExA, label=ExA$Date) +
scale_shape_manual(values=c(10, 10, 13, 13)) +
scale_colour_manual(values = c("white","white", "white","black", "firebrick", "firebrick")) +
theme(legend.position="none")
plot(DensMap)
rm(RASTERDens, e)
|
#load the tidyverse. enough constituent packages were used to warrant loading the
#whole thing
library(tidyverse)
#download and unzip the data
download.file(url = 'https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip',
destfile = 'getdata_projectfiles_UCI HAR Dataset.zip')
unzip('getdata_projectfiles_UCI HAR Dataset.zip')
setwd('UCI HAR Dataset')
#lines 15-34 do the following:
#load the data, label the data set with descriptive variable names (#4), and
#merge test train sets (#1). the resulting dataset is a tibble called 'testrain'.
features <- read_delim('features.txt', delim = ' ', col_names = FALSE, col_types = '_c') %>% pull()
X <- c('test/X_test.txt', 'train/X_train.txt') %>%
map(read_delim, delim = ' ', col_names = features,
col_types = cols(.default = 'd'), trim_ws = TRUE) %>%
reduce(rbind)
#NB: this introduces warnings because some of the feature names are EXACT duplicates
#of one another. I am not sure how to handle this. In fact I don't really know why
#there are two identical features in the first place.
y <- c('test/y_test.txt', 'train/y_train.txt') %>%
map(read_table, col_names = 'activity', col_types = 'f') %>%
reduce(rbind)
subject <- c('test/subject_test.txt', 'train/subject_train.txt') %>%
map(read_table, col_names = 'subject', col_types = 'i') %>%
reduce(rbind)
testrain <- bind_cols(subject, y, X)
#extract mean and standard deviation measurements (#2)
testrain <- testrain %>% select(matches('(subject|activity|mean\\(\\)|std\\(\\))'))
#make activity names descriptive (#3)
testrain <- testrain %>% mutate(activity = fct_recode(activity,
'walking' = '1',
'walking upstairs' = '2',
'walking downstairs' = '3',
'sitting' = '4',
'standing' = '5',
'laying' = '6'))
#create a tidy dataset with the average of each variable for each activity and subject
tidy <- testrain %>% group_by(subject, activity) %>% summarize_all(mean)
#NB while i wish the assignment's instructions could've been followed in the numerical
#order they were given (#1-#4), efforts to make that happen resulted in ugly code.
|
/run_analysis.R
|
no_license
|
LeanMC/UCI-HAR-summary
|
R
| false
| false
| 2,455
|
r
|
#load the tidyverse. enough constituent packages were used to warrant loading the
#whole thing
library(tidyverse)
#download and unzip the data
download.file(url = 'https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip',
destfile = 'getdata_projectfiles_UCI HAR Dataset.zip')
unzip('getdata_projectfiles_UCI HAR Dataset.zip')
setwd('UCI HAR Dataset')
#lines 15-34 do the following:
#load the data, label the data set with descriptive variable names (#4), and
#merge test train sets (#1). the resulting dataset is a tibble called 'testrain'.
features <- read_delim('features.txt', delim = ' ', col_names = FALSE, col_types = '_c') %>% pull()
X <- c('test/X_test.txt', 'train/X_train.txt') %>%
map(read_delim, delim = ' ', col_names = features,
col_types = cols(.default = 'd'), trim_ws = TRUE) %>%
reduce(rbind)
#NB: this introduces warnings because some of the feature names are EXACT duplicates
#of one another. I am not sure how to handle this. In fact I don't really know why
#there are two identical features in the first place.
y <- c('test/y_test.txt', 'train/y_train.txt') %>%
map(read_table, col_names = 'activity', col_types = 'f') %>%
reduce(rbind)
subject <- c('test/subject_test.txt', 'train/subject_train.txt') %>%
map(read_table, col_names = 'subject', col_types = 'i') %>%
reduce(rbind)
testrain <- bind_cols(subject, y, X)
#extract mean and standard deviation measurements (#2)
testrain <- testrain %>% select(matches('(subject|activity|mean\\(\\)|std\\(\\))'))
#make activity names descriptive (#3)
testrain <- testrain %>% mutate(activity = fct_recode(activity,
'walking' = '1',
'walking upstairs' = '2',
'walking downstairs' = '3',
'sitting' = '4',
'standing' = '5',
'laying' = '6'))
#create a tidy dataset with the average of each variable for each activity and subject
tidy <- testrain %>% group_by(subject, activity) %>% summarize_all(mean)
#NB while i wish the assignment's instructions could've been followed in the numerical
#order they were given (#1-#4), efforts to make that happen resulted in ugly code.
|
starwars %>%
dplyr::select(height, mass)
|
/ex_ans/ts_error4a.R
|
no_license
|
psy218/r_tutorial
|
R
| false
| false
| 44
|
r
|
starwars %>%
dplyr::select(height, mass)
|
test_that("List of Speices matches the data", {
X<-ListSpecies()
expect_identical(X[1],Species[1])
})
|
/tests/testthat/test-ListSpecies.R
|
permissive
|
Tsmnbx/ZooGVT
|
R
| false
| false
| 106
|
r
|
test_that("List of Speices matches the data", {
X<-ListSpecies()
expect_identical(X[1],Species[1])
})
|
# clean and compile phytometer biomass and in situ stem count
# authors: LMH, CTW
# created: Jan 2019
# script purpose:
# calculate average individual phytometer weights per species per competition plot
# > necesitates making data QA decisions..
# (TO DO [later, extra]: create addition cleaned up dataset for just AVFA that includes culm and pct greenness data)
# script steps:
## read in phytometer biomass and shelter key
## check phytometer biomass sensitivity to position and, for forbs, later season clip date (i.e. sample2 == 1)
## calculate individual plant wgts from (sample drymass / sample stem count)
## join stems counted in field and shelter key data
## write out to competition cleaned data folder
# note:
# should probably keep notes from anpp and stems datasets so can make decisions pre-analysis about how to treat browsed samples and confirm missing samples
# but CTW too lazy to write that in right now, just want to get all scripts working together first
# -- SETUP ----
rm(list=ls()) # clean environment
library(readxl)
library(dplyr)
library(tidyr)
library(ggplot2)
options(stringsAsFactors = F)
theme_set(theme_bw())
na_vals <- c(" ", "", NA, "NA")
# set path to main level competition data folder
datpath <- "~/Dropbox/ClimVar/Competition/Data/"
# read in data
# phytometer metadata (for variable definition reference)
phyto.meta <- read_excel(paste0(datpath, "Competition_EnteredData/Competition_phytometers_spring2017.xlsx"),
sheet="metadata", na = na_vals, trim_ws = T, skip = 20)
# phytometer biomass
phyto.dat <- read_excel(paste0(datpath, "Competition_EnteredData/Competition_phytometers_spring2017.xlsx"),
sheet="phyto_anpp", na = na_vals, trim_ws = T)
# phytometer in situ stem count
phyto.stems <- read_excel(paste0(datpath, "Competition_EnteredData/Competition_phytometers_spring2017.xlsx"),
sheet="phyto_stems", na = na_vals, trim_ws = T)
# shelter key
shelter.key <- read.csv(paste0(datpath,"Shelter_key.csv"), na.strings = na_vals, strip.white = T)
# -- SENSITIVITY CHECKS ON RAW DATA -----
## PHYTOMETER POSITION ##
# noticeable differences in wgt by sp per competitor by position?
# don't expect any differences but just in case...
for(i in sort(unique(phyto.dat$phytometer))){
p <- subset(phyto.dat, phytometer == i & sample2 == 0 & is.na(disturbed)) %>%
ggplot(aes(as.factor(position), dry_wgt_g)) +
geom_point(alpha = 0.6) +
ggtitle(paste(i, "biomass sensitivity to phytometer position")) +
facet_wrap(~background, scales = "free_y")
print(p)
}
# plot all together
subset(phyto.dat, sample2 == 0 & is.na(disturbed)) %>%
ggplot(aes(as.factor(position), dry_wgt_g, col = phytometer)) +
stat_summary(fun.y = mean, geom = "point", alpha = 0.6, pch =1) +
ggtitle(paste("Phytometer position sensitivity: mean biomass per species per position")) +
facet_wrap(~background)
# plot phytometer weights against all background competitors and densities in same panel
subset(phyto.dat, sample2 == 0 & is.na(disturbed)) %>%
ggplot(aes(as.factor(position), dry_wgt_g, group = background)) +
stat_summary(fun.y = mean, geom = "point", alpha = 0.6) +
ggtitle(paste("Phytometer position sensitivity: mean biomass per species per position")) +
facet_wrap(~phytometer, scales = "free_y")
# check stem count by position
for(i in sort(unique(phyto.stems$phytometer))){
p <- subset(phyto.stems, phytometer == i & is.na(disturbed)) %>%
ggplot(aes(as.factor(position), stems)) +
geom_point(alpha = 0.6) +
ggtitle(paste(i, "stem count sensitivity to phytometer position")) +
facet_wrap(~background, scales = "free_y")
print(p)
}
# > no visually noticeable influences in position. good!
## LATE SEASON CLIP FOR FORBS ##
# e.g. did we clip ESCA or TRHI too early? how to adjust if so?
unique(phyto.dat$phytometer[phyto.dat$sample2==1]) #ESCA, TRHI and VUMY (ignore VUMY, all way clipped in May)
data.frame(phyto.dat[phyto.dat$phytometer == "VUMY" & phyto.dat$sample2 == 1,])
# looked at data, definitely ignore this row, rep 2 of VUMY at the same position in same plot, and significantly more stems.. anomaly
#plot ESCA and TRHI
subset(phyto.dat, phytometer %in% c("ESCA", "TRHI")) %>%
mutate(clip_date = ifelse(!grepl("pril", clip_date), "May", "April")) %>%
ggplot(aes(clip_date, dry_wgt_g)) +
geom_boxplot() +
#geom_point(alpha = 0.5) +
facet_wrap(phytometer ~ background, scales = "free_y")
# esca seems to have developed more in may (i.e. heavier) *IF* it competed (e.g. april-may difference not the same in AVFA_HI vs AVFA_LO)
# control may esca also heavier .. ESCA is a later season forb tho
# trhi more consistently pooped out by may, except for in low density inferior grasses (VUMY ANd BRHO... but VUMY AND BRHO are also earlier season grasses than AVFA, and TRHI can hold on longer)
# > best we can do is acknowledge esca could have been clipped in may (since mid-later season forb) if results prove funky
## LOOK FOR OUTLIERS IN BIOMASS OR STEM COUNTS ##
#biomass
ggplot(subset(phyto.dat, sample2 == 0), aes(background, dry_wgt_g)) +
geom_point(alpha = 0.5) +
theme(axis.text.x = element_text(angle = 90)) +
facet_wrap(~ phytometer, scales = "free_y")
# BRHO (laca_hi), ESCA (vumy_hi), and LACA (trhi_lo) all have 1 outlier..
# check ranges
with(subset(phyto.dat, sample2 == 0), lapply(split(dry_wgt_g, phytometer), function(x) range(x, na.rm=T)))
# range for values greater than 0
with(subset(phyto.dat, sample2 == 0 & dry_wgt_g>0), lapply(split(dry_wgt_g, phytometer), function(x) range(x, na.rm=T)))
#stem counts
ggplot(subset(phyto.dat, sample2 == 0), aes(background, stems)) +
geom_point(alpha = 0.5) +
theme(axis.text.x = element_text(angle = 90)) +
facet_wrap(~ phytometer, scales = "free_y")
# BRHO (laca_hi), ESCA (vumy_hi), and LACA (trhi_lo) all have 1 outlier..
# check ranges
with(subset(phyto.dat, sample2 == 0), lapply(split(stems, phytometer), function(x) range(x, na.rm=T)))
# range for values greater than 0
with(subset(phyto.dat, sample2 == 0 & stems>0), lapply(split(stems, phytometer), function(x) range(x, na.rm=T)))
# not as bad
# check boxplots
ggplot(subset(phyto.dat, sample2 == 0), aes(background, stems)) +
geom_boxplot() +
theme(axis.text.x = element_text(angle = 90)) +
facet_wrap(~ phytometer, scales = "free_y") #meh.. one AVFA outlier in BRHO_LO.. AV competitive.. metadata notes up to roughly 10 AV seeded, so outlier number is possible
# > general conclusions about data quality:
## > exclude sample2
## > no worries about clip date or phyto position
## > check how outliers in ESCA, TRHI, and, LACA biomass play out in individual weights
# -- CLEAN UP AND AGGREGATE DATA -----
# Get individual weights
phyto.dat2 <- as.data.frame(subset(phyto.dat, sample2 == 0)) # excude 2nd rep samples
phyto.dat2$p.ind.wgt.g <- with(phyto.dat2, round(dry_wgt_g/stems,6)) # round to 6 decimal places (altho our analytic scale could only measure out to 4..)
# check NaNs generated (is it all from 0 biomass wgt? [i.e. nothing grew])
phyto.dat2$dry_wgt_g[is.nan(phyto.dat2$p.ind.wgt.g)] # yes
# change NaNs to 0
phyto.dat2$p.ind.wgt.g[is.nan(phyto.dat2$p.ind.wgt.g)] <- 0
# split background competition and background seeding density
phyto.dat2$backgroundspp <- substr(phyto.dat2$background,1,4)
phyto.dat2$backgrounddensity <- gsub("[A-Z]{4}_", "", phyto.dat2$background)
# clean up background control names
phyto.dat2$backgroundspp[grepl("Co", phyto.dat2$background)] <- "Control"
phyto.dat2$backgrounddensity[grepl("Co", phyto.dat2$background)] <- NA
# change NA in disturbed to 0 (so true binary var)
phyto.dat2$disturbed[is.na(phyto.dat2$disturbed)] <- 0
# prep stem dataset for merging with biomass
phyto.stems2 <- dplyr::select(phyto.stems, plot, background:stems, disturbed) %>%
rename(insitu_pstems = stems,
insitu_pdisturbed = disturbed) %>%
mutate(insitu_pdisturbed = ifelse(is.na(insitu_pdisturbed), 0, insitu_pdisturbed),
# split background species from density treatment
backgroundspp = gsub("_.*", "", background),
backgrounddensity = ifelse(grepl("lo", background), "LO",
ifelse(grepl("hi", background), "HI", NA)),
# change genera to 4-letter code
backgroundspp = recode(backgroundspp, Avena = "AVFA", Bromus = "BRHO", Lasthenia = "LACA",
Eschscholzia = "ESCA", Vulpia = "VUMY", Trifolium = "TRHI"),
backgroundspp = ifelse(grepl("Cont", backgroundspp), "Control", backgroundspp),
# change genera to 4-letter code in phytometer col
phytometer = recode(phytometer, Avena = "AVFA", Bromus = "BRHO", Lasthenia = "LACA",
Eschscholzia = "ESCA", Vulpia = "VUMY", Trifolium = "TRHI"))
# combine biomass and in situ stem count datasets, select final cols for clean dataset
phyto.dat3 <- phyto.dat2 %>%
dplyr::select(plot, backgroundspp, backgrounddensity, phytometer, dry_wgt_g, stems, p.ind.wgt.g, disturbed) %>%
# rename stem column in ANPP dataset to not confused with in situ stem column
rename(pdry_wgt_g = dry_wgt_g,
pANPP_stems = stems,
pANPP_disturbed = disturbed) %>%
# join stem counts
left_join(phyto.stems2[!colnames(phyto.stems2) %in% c("background", "position")]) %>%
mutate(p_totwgt = round(p.ind.wgt.g*insitu_pstems,4))
# logic check: do any ANPP stem counts exceed field stem counts? (should not)
summary(phyto.dat3$pANPP_stems > phyto.dat3$insitu_pstems)
# what are the NAs?
View(phyto.dat3[is.na(phyto.dat3$pANPP_stems > phyto.dat3$insitu_pstems),])
# these are true NAs for ANPP:
# 1) VUMY was planted in place of BRHO
# 2) ESCA wasn't clipped, but stems counted
# 3) TRHI was missing (didn't see it in plot photo, no sample for it in lab.. but since stem count exists, must have not been clipped)
# rename cols to lmh's preferred names -- not yet
# colnames(phyto.dat3)[colnames(phyto.dat3) %in% c("phytometer", "dry_wgt_g", "ANPP_stems", "field_stems")] <- c("phyto", "drywgt.g", "ANPP.no.plants", "field.no.stems")
# -- FINISHING ----
# join with shelter key and clean up
phyto.bmass <-left_join(phyto.dat3, shelter.key, by = "plot") %>%
mutate(backgrounddensity = recode(backgrounddensity, LO = "low", HI = "high"),
falltreatment = ifelse(treatment %in% c("fallDry", "consistentDry"), "dry", "wet")) #springDry received ambient rainfall in fall
# write out to dropbox competition cleaned data folder
#write.csv(phyto.bmass, paste0(datpath, "Competition_CleanedData/ClimVar_Comp_phytometer-biomass-2.csv"), row.names = F)
# give more informative name
write.csv(phyto.bmass, paste0(datpath, "Competition_CleanedData/Competition_phytometers_clean.csv"), row.names = F)
|
/Competition/Data-cleaning/Phytometer-stem-biomass_datacleaning.R
|
no_license
|
HallettLab/usda-climvar
|
R
| false
| false
| 10,763
|
r
|
# clean and compile phytometer biomass and in situ stem count
# authors: LMH, CTW
# created: Jan 2019
# script purpose:
# calculate average individual phytometer weights per species per competition plot
# > necesitates making data QA decisions..
# (TO DO [later, extra]: create addition cleaned up dataset for just AVFA that includes culm and pct greenness data)
# script steps:
## read in phytometer biomass and shelter key
## check phytometer biomass sensitivity to position and, for forbs, later season clip date (i.e. sample2 == 1)
## calculate individual plant wgts from (sample drymass / sample stem count)
## join stems counted in field and shelter key data
## write out to competition cleaned data folder
# note:
# should probably keep notes from anpp and stems datasets so can make decisions pre-analysis about how to treat browsed samples and confirm missing samples
# but CTW too lazy to write that in right now, just want to get all scripts working together first
# -- SETUP ----
rm(list=ls()) # clean environment
library(readxl)
library(dplyr)
library(tidyr)
library(ggplot2)
options(stringsAsFactors = F)
theme_set(theme_bw())
na_vals <- c(" ", "", NA, "NA")
# set path to main level competition data folder
datpath <- "~/Dropbox/ClimVar/Competition/Data/"
# read in data
# phytometer metadata (for variable definition reference)
phyto.meta <- read_excel(paste0(datpath, "Competition_EnteredData/Competition_phytometers_spring2017.xlsx"),
sheet="metadata", na = na_vals, trim_ws = T, skip = 20)
# phytometer biomass
phyto.dat <- read_excel(paste0(datpath, "Competition_EnteredData/Competition_phytometers_spring2017.xlsx"),
sheet="phyto_anpp", na = na_vals, trim_ws = T)
# phytometer in situ stem count
phyto.stems <- read_excel(paste0(datpath, "Competition_EnteredData/Competition_phytometers_spring2017.xlsx"),
sheet="phyto_stems", na = na_vals, trim_ws = T)
# shelter key
shelter.key <- read.csv(paste0(datpath,"Shelter_key.csv"), na.strings = na_vals, strip.white = T)
# -- SENSITIVITY CHECKS ON RAW DATA -----
## PHYTOMETER POSITION ##
# noticeable differences in wgt by sp per competitor by position?
# don't expect any differences but just in case...
for(i in sort(unique(phyto.dat$phytometer))){
p <- subset(phyto.dat, phytometer == i & sample2 == 0 & is.na(disturbed)) %>%
ggplot(aes(as.factor(position), dry_wgt_g)) +
geom_point(alpha = 0.6) +
ggtitle(paste(i, "biomass sensitivity to phytometer position")) +
facet_wrap(~background, scales = "free_y")
print(p)
}
# plot all together
subset(phyto.dat, sample2 == 0 & is.na(disturbed)) %>%
ggplot(aes(as.factor(position), dry_wgt_g, col = phytometer)) +
stat_summary(fun.y = mean, geom = "point", alpha = 0.6, pch =1) +
ggtitle(paste("Phytometer position sensitivity: mean biomass per species per position")) +
facet_wrap(~background)
# plot phytometer weights against all background competitors and densities in same panel
subset(phyto.dat, sample2 == 0 & is.na(disturbed)) %>%
ggplot(aes(as.factor(position), dry_wgt_g, group = background)) +
stat_summary(fun.y = mean, geom = "point", alpha = 0.6) +
ggtitle(paste("Phytometer position sensitivity: mean biomass per species per position")) +
facet_wrap(~phytometer, scales = "free_y")
# check stem count by position
for(i in sort(unique(phyto.stems$phytometer))){
p <- subset(phyto.stems, phytometer == i & is.na(disturbed)) %>%
ggplot(aes(as.factor(position), stems)) +
geom_point(alpha = 0.6) +
ggtitle(paste(i, "stem count sensitivity to phytometer position")) +
facet_wrap(~background, scales = "free_y")
print(p)
}
# > no visually noticeable influences in position. good!
## LATE SEASON CLIP FOR FORBS ##
# e.g. did we clip ESCA or TRHI too early? how to adjust if so?
unique(phyto.dat$phytometer[phyto.dat$sample2==1]) #ESCA, TRHI and VUMY (ignore VUMY, all way clipped in May)
data.frame(phyto.dat[phyto.dat$phytometer == "VUMY" & phyto.dat$sample2 == 1,])
# looked at data, definitely ignore this row, rep 2 of VUMY at the same position in same plot, and significantly more stems.. anomaly
#plot ESCA and TRHI
subset(phyto.dat, phytometer %in% c("ESCA", "TRHI")) %>%
mutate(clip_date = ifelse(!grepl("pril", clip_date), "May", "April")) %>%
ggplot(aes(clip_date, dry_wgt_g)) +
geom_boxplot() +
#geom_point(alpha = 0.5) +
facet_wrap(phytometer ~ background, scales = "free_y")
# esca seems to have developed more in may (i.e. heavier) *IF* it competed (e.g. april-may difference not the same in AVFA_HI vs AVFA_LO)
# control may esca also heavier .. ESCA is a later season forb tho
# trhi more consistently pooped out by may, except for in low density inferior grasses (VUMY ANd BRHO... but VUMY AND BRHO are also earlier season grasses than AVFA, and TRHI can hold on longer)
# > best we can do is acknowledge esca could have been clipped in may (since mid-later season forb) if results prove funky
## LOOK FOR OUTLIERS IN BIOMASS OR STEM COUNTS ##
#biomass
ggplot(subset(phyto.dat, sample2 == 0), aes(background, dry_wgt_g)) +
geom_point(alpha = 0.5) +
theme(axis.text.x = element_text(angle = 90)) +
facet_wrap(~ phytometer, scales = "free_y")
# BRHO (laca_hi), ESCA (vumy_hi), and LACA (trhi_lo) all have 1 outlier..
# check ranges
with(subset(phyto.dat, sample2 == 0), lapply(split(dry_wgt_g, phytometer), function(x) range(x, na.rm=T)))
# range for values greater than 0
with(subset(phyto.dat, sample2 == 0 & dry_wgt_g>0), lapply(split(dry_wgt_g, phytometer), function(x) range(x, na.rm=T)))
#stem counts
ggplot(subset(phyto.dat, sample2 == 0), aes(background, stems)) +
geom_point(alpha = 0.5) +
theme(axis.text.x = element_text(angle = 90)) +
facet_wrap(~ phytometer, scales = "free_y")
# BRHO (laca_hi), ESCA (vumy_hi), and LACA (trhi_lo) all have 1 outlier..
# check ranges
with(subset(phyto.dat, sample2 == 0), lapply(split(stems, phytometer), function(x) range(x, na.rm=T)))
# range for values greater than 0
with(subset(phyto.dat, sample2 == 0 & stems>0), lapply(split(stems, phytometer), function(x) range(x, na.rm=T)))
# not as bad
# check boxplots
ggplot(subset(phyto.dat, sample2 == 0), aes(background, stems)) +
geom_boxplot() +
theme(axis.text.x = element_text(angle = 90)) +
facet_wrap(~ phytometer, scales = "free_y") #meh.. one AVFA outlier in BRHO_LO.. AV competitive.. metadata notes up to roughly 10 AV seeded, so outlier number is possible
# > general conclusions about data quality:
## > exclude sample2
## > no worries about clip date or phyto position
## > check how outliers in ESCA, TRHI, and, LACA biomass play out in individual weights
# -- CLEAN UP AND AGGREGATE DATA -----
# Get individual weights
phyto.dat2 <- as.data.frame(subset(phyto.dat, sample2 == 0)) # excude 2nd rep samples
phyto.dat2$p.ind.wgt.g <- with(phyto.dat2, round(dry_wgt_g/stems,6)) # round to 6 decimal places (altho our analytic scale could only measure out to 4..)
# check NaNs generated (is it all from 0 biomass wgt? [i.e. nothing grew])
phyto.dat2$dry_wgt_g[is.nan(phyto.dat2$p.ind.wgt.g)] # yes
# change NaNs to 0
phyto.dat2$p.ind.wgt.g[is.nan(phyto.dat2$p.ind.wgt.g)] <- 0
# split background competition and background seeding density
phyto.dat2$backgroundspp <- substr(phyto.dat2$background,1,4)
phyto.dat2$backgrounddensity <- gsub("[A-Z]{4}_", "", phyto.dat2$background)
# clean up background control names
phyto.dat2$backgroundspp[grepl("Co", phyto.dat2$background)] <- "Control"
phyto.dat2$backgrounddensity[grepl("Co", phyto.dat2$background)] <- NA
# change NA in disturbed to 0 (so true binary var)
phyto.dat2$disturbed[is.na(phyto.dat2$disturbed)] <- 0
# prep stem dataset for merging with biomass
phyto.stems2 <- dplyr::select(phyto.stems, plot, background:stems, disturbed) %>%
rename(insitu_pstems = stems,
insitu_pdisturbed = disturbed) %>%
mutate(insitu_pdisturbed = ifelse(is.na(insitu_pdisturbed), 0, insitu_pdisturbed),
# split background species from density treatment
backgroundspp = gsub("_.*", "", background),
backgrounddensity = ifelse(grepl("lo", background), "LO",
ifelse(grepl("hi", background), "HI", NA)),
# change genera to 4-letter code
backgroundspp = recode(backgroundspp, Avena = "AVFA", Bromus = "BRHO", Lasthenia = "LACA",
Eschscholzia = "ESCA", Vulpia = "VUMY", Trifolium = "TRHI"),
backgroundspp = ifelse(grepl("Cont", backgroundspp), "Control", backgroundspp),
# change genera to 4-letter code in phytometer col
phytometer = recode(phytometer, Avena = "AVFA", Bromus = "BRHO", Lasthenia = "LACA",
Eschscholzia = "ESCA", Vulpia = "VUMY", Trifolium = "TRHI"))
# combine biomass and in situ stem count datasets, select final cols for clean dataset
phyto.dat3 <- phyto.dat2 %>%
dplyr::select(plot, backgroundspp, backgrounddensity, phytometer, dry_wgt_g, stems, p.ind.wgt.g, disturbed) %>%
# rename stem column in ANPP dataset to not confused with in situ stem column
rename(pdry_wgt_g = dry_wgt_g,
pANPP_stems = stems,
pANPP_disturbed = disturbed) %>%
# join stem counts
left_join(phyto.stems2[!colnames(phyto.stems2) %in% c("background", "position")]) %>%
mutate(p_totwgt = round(p.ind.wgt.g*insitu_pstems,4))
# logic check: do any ANPP stem counts exceed field stem counts? (should not)
summary(phyto.dat3$pANPP_stems > phyto.dat3$insitu_pstems)
# what are the NAs?
View(phyto.dat3[is.na(phyto.dat3$pANPP_stems > phyto.dat3$insitu_pstems),])
# these are true NAs for ANPP:
# 1) VUMY was planted in place of BRHO
# 2) ESCA wasn't clipped, but stems counted
# 3) TRHI was missing (didn't see it in plot photo, no sample for it in lab.. but since stem count exists, must have not been clipped)
# rename cols to lmh's preferred names -- not yet
# colnames(phyto.dat3)[colnames(phyto.dat3) %in% c("phytometer", "dry_wgt_g", "ANPP_stems", "field_stems")] <- c("phyto", "drywgt.g", "ANPP.no.plants", "field.no.stems")
# -- FINISHING ----
# join with shelter key and clean up
phyto.bmass <-left_join(phyto.dat3, shelter.key, by = "plot") %>%
mutate(backgrounddensity = recode(backgrounddensity, LO = "low", HI = "high"),
falltreatment = ifelse(treatment %in% c("fallDry", "consistentDry"), "dry", "wet")) #springDry received ambient rainfall in fall
# write out to dropbox competition cleaned data folder
#write.csv(phyto.bmass, paste0(datpath, "Competition_CleanedData/ClimVar_Comp_phytometer-biomass-2.csv"), row.names = F)
# give more informative name
write.csv(phyto.bmass, paste0(datpath, "Competition_CleanedData/Competition_phytometers_clean.csv"), row.names = F)
|
##Reading in results from non-economic simulation on all
##livestock exploitations in Flanders
#Including dynamic simulation 2016-2035
#Required packages + defining GAMS directory
library(gdxrrw)
library(reshape2)
library(ggplot2)
library(scales)
library(qdap)
igdx("C:/GAMS/win64/24.7/")
#Clear environment
rm(list = ls())
Stables <- read.csv("C:/Users/ddpue/Documents/Spatial Optimization Flanders/DataHandling_VLM/StablesS1.csv")
#Set new working directory
setwd("C:/Users/ddpue/Documents/Spatial Optimization Flanders/ResultsNonEconomic")
################################################################################
#First: effect of random allocation (NIS based or totally random)
################################################################################
#Clear environment
rm(list = ls())
AggregateResults <- data.frame(matrix(nrow=9, ncol=6))
colnames(AggregateResults) <- c("Seed", "Scenario", "Emission", "ClosedStables", "Impact", "Allocation")
AggregateResults$Seed <- paste("NISbased_seed", c(rep(1,3), rep(2,3), rep(3,3)), sep="")
AggregateResults$Scenario <- paste("scenario", rep(c(1,2,3), 3), sep="")
AggregateResults$Emission <- c(30.8, 31.60, 30.80, 30.86, 31.64, 30.86,
30.73, 31.57, 30.73)
AggregateResults$ClosedStables <- c(856, 2081, 3287, 825, 2033,
3220, 837, 2193, 3401)
AggregateResults$Impact <- c(20122, 20122, 17590, 20204, 20204, 17706,
20105, 20105, 17471)
AggregateResults$Allocation <- "NISbased"
AggregateResultsRandom <- data.frame(matrix(nrow=9, ncol=6))
colnames(AggregateResultsRandom) <- c("Seed", "Scenario", "Emission", "ClosedStables", "Impact", "Allocation")
AggregateResultsRandom$Seed <- paste("FullyRandom_seed", c(rep(1,3), rep(2,3), rep(3,3)), sep="")
AggregateResultsRandom$Scenario <- paste("scenario", rep(c(1,2,3), 3), sep="")
AggregateResultsRandom$Emission <- c(30.46, 31.37, 30.46, 30.48, 31.40, 30.48,
30.52, 31.39, 30.52)
AggregateResultsRandom$ClosedStables <- c(1113, 1954, 3864, 977, 1851, 3165,
1093, 1940, 3085)
AggregateResultsRandom$Impact <- c(23138, 23138, 19937, 23351, 23351, 20110,
23159, 23159, 20063)
AggregateResultsRandom$Allocation <- "FullyRandom"
AggregateResults <- rbind(AggregateResults, AggregateResultsRandom)
AggregateResults <- melt(AggregateResults)
AggregateResults$Scenario <- mgsub(c("scenario1", "scenario2", "scenario3"),
c("<5% SS", "SO max NH3", "SO min ADS"),
AggregateResults$Scenario)
ggplot(data=subset(AggregateResults, variable=="Emission" & Scenario %in% c("<5% SS", "SO max NH3"))
, aes(x=Scenario, y=value, group=Seed, colour=Seed, shape=Allocation))+
geom_line(aes(linetype=Allocation)) +
geom_point() +
theme(legend.title=element_blank())+
ylab("Total Ammonia Emission (kton/yr)") + xlab(NULL)
ggplot(data=subset(AggregateResults, variable=="ClosedStables")
, aes(x=Scenario, y=value, group=Seed, colour=Seed, shape=Allocation))+
geom_line(aes(linetype=Allocation)) +
geom_point()+
theme(legend.title=element_blank())+
ylab("Number of empty stables") + xlab(NULL)
ggplot(data=subset(AggregateResults, variable=="Impact" & Scenario %in% c("<5% SS", "SO min ADS"))
, aes(x=Scenario, y=value, group=Seed, colour=Seed, shape=Allocation))+
geom_line(aes(linetype=Allocation)) +
geom_point()+
theme(legend.title=element_blank())+
ylab("Impact (total aggregate deposition score)") + xlab(NULL)
################################################################################
#Resuls non-dynamic
################################################################################
#Read in results (NIS-based allocation, seed 1)
Pars <- c("dSignificanceScore", "dADS", "dTotalADS", "dPercentageOccupied", "dPercentageOccupiedRegion",
"dClosedStables", "dClosedExploitations","dEmissionStable",
"dEmissionExploitation", "dEmissionRegion", "dAnimals",
"dStableTypesNIS", "dAnimalsNIS", "dAnimalGroupNIS",
"dEmissionNIS", "dADSNIS", "dEmissionAnimal", "dEmissionStableType", "dEmissionAnimalGroup",
"dMarginalSignificanceScore", "dMaxEmissionStable", "dMaxEmissionExploitation", "dMaxEmissionNIS",
"dMaxEmissionAnimalCategory","dMaxEmissionSector", "dMaxEmissionStableType", "dMaxAnimalsGroup", "dAnimalsGroup")
AllData <- sapply(Pars, function(x){
tmp <- rgdx.param("results.gdx", x, squeeze=FALSE)
})
list2env(AllData, envir=.GlobalEnv)
##Emission per sector (cattle, pigs, poultry, others, horses)
colnames(dMaxEmissionSector) <- c("Sector", "Emission")
dMaxEmissionSector$Scenario <- "max"
colnames(dEmissionAnimalGroup) <- c("Sector","Scenario","Emission")
dEmissionAnimalGroup <- rbind.data.frame(dMaxEmissionSector, dEmissionAnimalGroup)
levels(dEmissionAnimalGroup$Sector)[levels(dEmissionAnimalGroup$Sector)
== "Runderen"] <- "Cattle"
levels(dEmissionAnimalGroup$Sector)[levels(dEmissionAnimalGroup$Sector)
== "Varkens"] <- "Pigs"
levels(dEmissionAnimalGroup$Sector)[levels(dEmissionAnimalGroup$Sector)
== "Pluimvee"] <- "Poultry"
levels(dEmissionAnimalGroup$Sector)[levels(dEmissionAnimalGroup$Sector)
== "Andere"] <- "Others"
levels(dEmissionAnimalGroup$Sector)[levels(dEmissionAnimalGroup$Sector)
== "Paarden"] <- "Horses"
dEmissionAnimalGroup$Scenario <- mgsub(c("sc1", "sc2", "sc3", "sc4"),
c("<5% SS", "SO", "PAN", "BAT"),
dEmissionAnimalGroup$Scenario)
SectorCols <- brewer.pal(5, name='Accent')
#All sectors together
ggplot(data=subset(dEmissionAnimalGroup, Scenario != "max"),
aes(x=Scenario, y=Emission, group=Sector, colour=Sector, shape=Sector))+
geom_line(lty="dotted") +
geom_point(size=3)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("Total Ammonia Emission (kgNH3/yr)") + xlab(NULL)
ggplot(data=subset(dEmissionAnimalGroup, Scenario != "max"),
aes(x=Scenario, y=Emission, fill=Sector))+
geom_bar(colour="black", stat="identity")+
scale_fill_manual(values=SectorCols)
#Relative to maximum
EmissionAnimalGroupRelative <- subset(dEmissionAnimalGroup, Scenario != "max")
EmissionAnimalGroupRelative$EmissionRelative <- apply(EmissionAnimalGroupRelative, 1, function(x){
max <- as.numeric(subset(dEmissionAnimalGroup, Sector == x[1] & Scenario== "max")[2])
print(as.numeric(x[2])/max)
})
ggplot(data=EmissionAnimalGroupRelative,
aes(x=Scenario, y=EmissionRelative, group=Sector, colour=Sector, shape=Sector))+
geom_line(lty="dotted") +
geom_point(size=3)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("Relative NH3 emission (1=maximum)") + xlab(NULL)
ggplot(data=EmissionAnimalGroupRelative, aes(x=Scenario, y=EmissionRelative, fill=Sector))+
geom_bar(stat="identity", position=position_dodge(), colour="black")+
scale_fill_manual(values=SectorCols)
#ggsave("EmissionAnimalGroup_relative.png", dpi=400)
##Animal per sector
colnames(dAnimalsGroup) <- c("Sector", "Scenario", "Number")
dMaxAnimalsGroup$Scenario <- "max"
colnames(dMaxAnimalsGroup) <- c("Sector", "Number", "Scenario")
dAnimalsGroup$Scenario <- mgsub(c("sc1", "sc2", "sc3", "sc4"),
c("<5% SS", "SO", "PAN", "BAT"),
dAnimalsGroup$Scenario)
dAnimalsGroup <- rbind.data.frame(dMaxAnimalsGroup, dAnimalsGroup)
levels(dAnimalsGroup$Sector)[levels(dAnimalsGroup$Sector)
== "Runderen"] <- "Cattle"
levels(dAnimalsGroup$Sector)[levels(dAnimalsGroup$Sector)
== "Varkens"] <- "Pigs"
levels(dAnimalsGroup$Sector)[levels(dAnimalsGroup$Sector)
== "Pluimvee"] <- "Poultry"
levels(dAnimalsGroup$Sector)[levels(dAnimalsGroup$Sector)
== "Andere"] <- "Others"
levels(dAnimalsGroup$Sector)[levels(dAnimalsGroup$Sector)
== "Paarden"] <- "Horses"
#Pigs
max=as.numeric(subset(dAnimalsGroup, Sector=="Pigs" & Scenario=="max")[2])
ggplot(data=subset(dAnimalsGroup, Sector=="Pigs" & Scenario != "max"),
aes(x=Scenario, y=Number, group=1))+
geom_line(lty="dotted") +
geom_point(size=3)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
geom_hline(aes(yintercept=max), lty="dashed", colour="red")+
ylab("Total number of pigs") + xlab(NULL)
#Cattle
max=as.numeric(subset(dAnimalsGroup, Sector=="Cattle" & Scenario=="max")[2])
ggplot(data=subset(dAnimalsGroup, Sector=="Cattle" & Scenario != "max"),
aes(x=Scenario, y=Number, group=1))+
geom_line(lty="dotted") +
geom_point(size=2)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+ geom_hline(aes(yintercept=max), lty="dashed", colour="red")+
ylab("Total number of cattle") + xlab(NULL)
#Poultry
max=as.numeric(subset(dAnimalsGroup, Sector=="Poultry" & Scenario=="max")[2])
ggplot(data=subset(dAnimalsGroup, Sector=="Poultry" & Scenario != "max"),
aes(x=Scenario, y=Number, group=1))+
geom_line(lty="dotted") +
geom_point(size=2)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
geom_hline(aes(yintercept=max), lty="dashed", colour="red")+
ylab("Total number of poultry") + xlab(NULL)
#Other
max=as.numeric(subset(dAnimalsGroup, Sector=="Others" & Scenario=="max")[2])
ggplot(data=subset(dAnimalsGroup, Sector=="Others" & Scenario != "max"),
aes(x=Scenario, y=Number, group=1))+
geom_line(lty="dotted") +
geom_point(size=2)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
geom_hline(aes(yintercept=max), lty="dashed", colour="red")+
ylab("Total number of others") + xlab(NULL)
#Horses
max=as.numeric(subset(dAnimalsGroup, Sector=="Horses" & Scenario=="max")[2])
ggplot(data=subset(dAnimalsGroup, Sector=="Horses" & Scenario != "max"),
aes(x=Scenario, y=Number, group=1))+
geom_line(lty="dotted") +
geom_point(size=2)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+ geom_hline(aes(yintercept=max), lty="dashed", colour="red")+
ylab("Total number of horses") + xlab(NULL)
#Relative to maximum
AnimalGroupRelative <- subset(dAnimalsGroup, Scenario != "max")
AnimalGroupRelative$NumberRelative <- apply(AnimalGroupRelative, 1, function(x){
max <- as.numeric(subset(dAnimalsGroup, Sector == x[1] & Scenario== "max")[2])
print(as.numeric(x[2])/max)
})
ggplot(data=AnimalGroupRelative,
aes(x=Scenario, y=NumberRelative, group=Sector, colour=Sector, shape=Sector))+
geom_line(lty='dotted') +
geom_point(size=3)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("Relative number of animals (1=maximum)") + xlab(NULL)
#ClosedStables per stable Type
EmissionStable <- dcast(dEmissionStable, sStable~sScen)
EmissionStable[is.na(EmissionStable)] <- 0
Stables <- read.csv("C:/Users/ddpue/Documents/Spatial Optimization Flanders/DataHandling_VLM/StablesS1.csv")
StablesClosed <- Stables[c("StableType", "Name")]
colnames(StablesClosed) <- c("StableType", "sStable")
EmissionStable <- merge(EmissionStable, StablesClosed, by="sStable")
EmissionStableZeroSc1 <- subset(EmissionStable, sc1 == 0)
Sc1Closed <- as.data.frame(table(EmissionStableZeroSc1$StableType))
EmissionStableZeroSc2 <- subset(EmissionStable, sc2 == 0)
Sc2Closed <- as.data.frame(table(EmissionStableZeroSc2$StableType))
EmissionStableZeroSc3 <- subset(EmissionStable, sc3 == 0)
Sc3Closed <- as.data.frame(table(EmissionStableZeroSc3$StableType))
##Make files for QGIS
#Stables
StablesQGIS <- dcast(dEmissionStable, sStable~sScen)
StablesQGIS <- merge(StablesQGIS, dMaxEmissionStable, by="sStable")
Stables_Select <- Stables[c("Name", "X", "Y", "ADS", "SS")]
colnames(Stables_Select) <- c("sStable", "X", "Y", "ADS", "SS")
StablesQGIS <- merge(StablesQGIS, Stables_Select, by="sStable")
StablesQGIS[is.na(StablesQGIS)] <- 0
write.csv(StablesQGIS, "C:/Users/ddpue/Documents/Spatial Optimization Flanders/GIS/StablesQGIS.csv")
##NIS
ADSNIS <- dcast(dADSNIS, sNIS~sScen)
colnames(ADSNIS) <- c("sNIS", "ADS_scen1", "ADs_scen2", "ADS_scen3", "ADS_scen4")
EmissionNIS <- dcast(dEmissionNIS, sNIS~sScen)
colnames(EmissionNIS) <- c("sNIS", "EmSc1", "EmSc2", "EmSc3", "EmSc4")
EmissionNIS <- merge(EmissionNIS, dMaxEmissionNIS, by = "sNIS")
DataNISQGIS <- merge(EmissionNIS, ADSNIS, by="sNIS")
library(stringi)
DataNISQGIS$sNIS <- stri_sub(DataNISQGIS$sNIS, 2)
DataNISQGIS[is.na(DataNISQGIS)] <- 0
write.csv(DataNISQGIS, "C:/Users/ddpue/Documents/Spatial Optimization Flanders/GIS/DataNISQGIS.csv")
################################################################################
#Dynamic simulation
################################################################################
#Clear environment
rm(list = ls())
Pars <- c("dAmmoniaEmissionExploitation", "dAmmoniaEmissionRegion", "dImpactExploitation",
"dImpactRegion", "dAnimalGroup", "dLivestockUnits")
DynamicData <- sapply(Pars, function(x){
tmp <- rgdx.param("resultsDynamicS1.gdx", x, squeeze=FALSE)
})
list2env(DynamicData, envir=.GlobalEnv)
colnames(dAmmoniaEmissionRegion) <- c("Scenario", "Year", "Emission")
dAmmoniaEmissionRegion$Year <- as.character(dAmmoniaEmissionRegion$Year)
dAmmoniaEmissionRegion$Year <- as.numeric(dAmmoniaEmissionRegion$Year)
original <- paste("Scenario", c(1:6), sep="")
new <- c("<5% SS", "PAN", "BAT", "SO max NH3", "SO ref", "SO NEC")
dAmmoniaEmissionRegion$Scenario <- mgsub(original, new, dAmmoniaEmissionRegion$Scenario)
ggplot(data=subset(dAmmoniaEmissionRegion, Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC")),
aes(x=Year, y=Emission, group=Scenario, colour=Scenario, shape=Scenario))+
geom_line(size =1) +
geom_point(size =3)+
#scale_colour_discrete(labels = c(1:6))+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
geom_hline(yintercept=32155570, colour="DarkBlue", lty="dashed")+
geom_hline(yintercept=28546271, colour="Red", lty="dashed")+
ylab("Total ammonia emission kg NH3/yr") + xlab(NULL)
colnames(dImpactRegion) <- c("Scenario", "Year", "TotalADS")
dImpactRegion$Year <- as.character(dImpactRegion$Year)
dImpactRegion$Year <- as.numeric(dImpactRegion$Year)
dImpactRegion$Scenario <- mgsub(original, new, dImpactRegion$Scenario)
ggplot(data=subset(dImpactRegion, Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC")),
aes(x=Year, y=TotalADS, group=Scenario, colour=Scenario, shape=Scenario))+
geom_line(size=1) +
geom_point(size=3)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("Total ADS") + xlab(NULL)
Ammonia_Impact <- dAmmoniaEmissionRegion[,1:2]
Ammonia_Impact$Ratio <- dImpactRegion$TotalADS/dAmmoniaEmissionRegion$Emission * 10^6
Ammonia_Impact$Scenario <- mgsub(original, new, Ammonia_Impact$Scenario)
ggplot(data=subset(Ammonia_Impact, Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC")),
aes(x=Year, y=Ratio, group=Scenario, colour=Scenario, shape=Scenario))+
geom_line(size=1) +
geom_point(size=3)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("ADS/kton NH3") + xlab(NULL)
colnames(dLivestockUnits) <- c("Scenario", "Year", "TotalLSU")
dLivestockUnits$Year <- as.character(dLivestockUnits$Year)
dLivestockUnits$Year <- as.numeric(dLivestockUnits$Year)
dLivestockUnits$Scenario <- mgsub(original, new, dLivestockUnits$Scenario)
ggplot(data=subset(dLivestockUnits, Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC")),
aes(x=Year, y=TotalLSU, group=Scenario, colour=Scenario, shape=Scenario))+
geom_line(size=1) +
geom_point(size=3)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("Total LSU") + xlab(NULL)
colnames(dAnimalGroup) <- c("Sector", "Scenario", "Year", "Number")
dAnimalGroup$Year <- as.character(dAnimalGroup$Year)
dAnimalGroup$Year <- as.numeric(dAnimalGroup$Year)
dAnimalGroup$Scenario <- mgsub(original, new, dAnimalGroup$Scenario)
ggplot(data=subset(dAnimalGroup, Sector=="Runderen" & Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC") ),
aes(x=Year, y=Number, group=Scenario, colour=Scenario, shape=Scenario))+
geom_line(size=1) +
geom_point(size=3)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("Total number of Cattle") + xlab(NULL)
ggplot(data=subset(dAnimalGroup, Sector=="Varkens" & Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC") ),
aes(x=Year, y=Number, group=Scenario, colour=Scenario, shape=Scenario))+
geom_line(size=1) +
geom_point(size=3)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("Total number of Pigs") + xlab(NULL)
ggplot(data=subset(dAnimalGroup, Sector=="Pluimvee" & Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC") ),
aes(x=Year, y=Number, group=Scenario, colour=Scenario, shape=Scenario))+
geom_line(size=1) +
geom_point(size=3)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("Total number of Poultry") + xlab(NULL)
ggplot(data=subset(dAnimalGroup, Sector=="Paarden" & Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC") ),
aes(x=Year, y=Number, group=Scenario, colour=Scenario, shape=Scenario))+
geom_line(size=1) +
geom_point(size=3)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("Total number of Horses") + xlab(NULL)
ggplot(data=subset(dAnimalGroup, Sector=="Andere" & Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC")),
aes(x=Year, y=Number, group=Scenario, colour=Scenario, shape=Scenario))+
geom_line(size=1) +
geom_point(size=3)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("Total number of Other") + xlab(NULL)
################################################################################
#Effect random draw year of permit (3 iterations)
################################################################################
#Clear environment
rm(list = ls())
Pars <- c("dAmmoniaEmissionRegion",
"dImpactRegion", "dLivestockUnits", "dAmmoniaEmissionExploitation")
DataS1 <- sapply(Pars, function(x){
tmp <- rgdx.param("resultsDynamicS1.gdx", x, squeeze=FALSE)
})
list2env(DataS1, envir=.GlobalEnv)
dAmmoniaEmissionRegion$Iteration <- "Seed1"
Ammonia <- dAmmoniaEmissionRegion
dImpactRegion$Iteration <- "Seed1"
Impact <- dImpactRegion
dLivestockUnits$Iteration <- "Seed1"
Livestock <- dLivestockUnits
DataS2 <- sapply(Pars, function(x){
tmp <- rgdx.param("resultsDynamicS2.gdx", x, squeeze=FALSE)
})
list2env(DataS2, envir=.GlobalEnv)
dAmmoniaEmissionRegion$Iteration <- "Seed2"
dImpactRegion$Iteration <- "Seed2"
dLivestockUnits$Iteration <- "Seed2"
Ammonia <- rbind(Ammonia, dAmmoniaEmissionRegion)
Impact <- rbind(Impact, dImpactRegion)
Livestock <- rbind(Livestock, dLivestockUnits)
DataS3 <- sapply(Pars, function(x){
tmp <- rgdx.param("resultsDynamicS3.gdx", x, squeeze=FALSE)
})
list2env(DataS3, envir=.GlobalEnv)
dAmmoniaEmissionRegion$Iteration <- "Seed3"
dImpactRegion$Iteration <- "Seed3"
dLivestockUnits$Iteration <- "Seed3"
Ammonia <- rbind(Ammonia, dAmmoniaEmissionRegion)
Impact <- rbind(Impact, dImpactRegion)
Livestock <- rbind(Livestock, dLivestockUnits)
original <- paste("Scenario", c(1:6), sep="")
new <- c("<5% SS", "PAN", "BAT", "SO max NH3", "SO min ADS", "SO NEC")
colnames(Ammonia) <- c("Scenario", "Year", "Emission", "Iteration")
Ammonia$Scenario <- mgsub(original, new, Ammonia$Scenario)
Ammonia$Year <- as.character(Ammonia$Year)
Ammonia$Year <- as.numeric(Ammonia$Year)
ggplot(data=subset(Ammonia, Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC")),
aes(x=Year, y=Emission, group=Scenario, colour=Scenario, shape=Iteration))+
geom_point(size=3)+
#scale_colour_discrete(labels = c(1:6))+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
geom_hline(yintercept=32155570, colour="DarkBlue", lty="dashed")+
geom_hline(yintercept=28546271, colour="Red", lty="dashed")+
ylab("Total ammonia emission kg NH3/yr") + xlab(NULL)
colnames(Impact) <- c("Scenario", "Year", "ADS", "Iteration")
Impact$Scenario <- mgsub(original, new, Impact$Scenario)
Impact$Year <- as.character(Impact$Year)
Impact$Year <- as.numeric(Impact$Year)
ggplot(data=subset(Impact, Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC")),
aes(x=Year, y=ADS, group=Scenario, colour=Scenario, shape=Iteration))+
geom_point()+
#scale_colour_discrete(labels = c(1:6))+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("Total ADS") + xlab(NULL)
Ammonia_Impact <- Ammonia[,c("Scenario", "Year", "Iteration")]
Ammonia_Impact$Ratio <- Impact$ADS/Ammonia$Emission * 10^6
Ammonia_Impact$Scenario <- mgsub(original, new, Ammonia_Impact$Scenario)
ggplot(data=subset(Ammonia_Impact, Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC")),
aes(x=Year, y=Ratio, group=Scenario, colour=Scenario, shape=Iteration))+
geom_point()+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("ADS/kton NH3") + xlab(NULL)
colnames(Livestock) <- c("Scenario", "Year", "LSU", "Iteration")
Livestock$Scenario <- mgsub(original, new, Livestock$Scenario)
Livestock$Year <- as.character(Livestock$Year)
Livestock$Year <- as.numeric(Livestock$Year)
ggplot(data=subset(Livestock, Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC")),
aes(x=Year, y=LSU, group=Scenario, colour=Scenario, shape=Iteration))+
geom_point()+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("Total LSU") + xlab(NULL)
#Plot variance in impact
# VarRatio <- aggregate(LSU ~ Scenario + Year, data=Livestock, var)
#
# ggplot(data=VarRatio,
# aes(x=Year, y=LSU, group=Scenario, colour=Scenario, shape=Scenario))+
# geom_line()+
# geom_point()+
# theme(legend.title=element_blank(),
# plot.title = element_text(size = 15, face = "bold"),
# axis.title = element_text(size = 18),
# legend.title = element_text(size = 18),
# legend.text = element_text(size = 18),
# axis.text = element_text(size = 18))+
# ylab("Var ADS/kt NH3") + xlab(NULL)
|
/PostProcessing_NonEconomic.R
|
no_license
|
DaafDP/SO_Flanders
|
R
| false
| false
| 28,595
|
r
|
##Reading in results from non-economic simulation on all
##livestock exploitations in Flanders
#Including dynamic simulation 2016-2035
#Required packages + defining GAMS directory
library(gdxrrw)
library(reshape2)
library(ggplot2)
library(scales)
library(qdap)
igdx("C:/GAMS/win64/24.7/")
#Clear environment
rm(list = ls())
Stables <- read.csv("C:/Users/ddpue/Documents/Spatial Optimization Flanders/DataHandling_VLM/StablesS1.csv")
#Set new working directory
setwd("C:/Users/ddpue/Documents/Spatial Optimization Flanders/ResultsNonEconomic")
################################################################################
#First: effect of random allocation (NIS based or totally random)
################################################################################
#Clear environment
rm(list = ls())
AggregateResults <- data.frame(matrix(nrow=9, ncol=6))
colnames(AggregateResults) <- c("Seed", "Scenario", "Emission", "ClosedStables", "Impact", "Allocation")
AggregateResults$Seed <- paste("NISbased_seed", c(rep(1,3), rep(2,3), rep(3,3)), sep="")
AggregateResults$Scenario <- paste("scenario", rep(c(1,2,3), 3), sep="")
AggregateResults$Emission <- c(30.8, 31.60, 30.80, 30.86, 31.64, 30.86,
30.73, 31.57, 30.73)
AggregateResults$ClosedStables <- c(856, 2081, 3287, 825, 2033,
3220, 837, 2193, 3401)
AggregateResults$Impact <- c(20122, 20122, 17590, 20204, 20204, 17706,
20105, 20105, 17471)
AggregateResults$Allocation <- "NISbased"
AggregateResultsRandom <- data.frame(matrix(nrow=9, ncol=6))
colnames(AggregateResultsRandom) <- c("Seed", "Scenario", "Emission", "ClosedStables", "Impact", "Allocation")
AggregateResultsRandom$Seed <- paste("FullyRandom_seed", c(rep(1,3), rep(2,3), rep(3,3)), sep="")
AggregateResultsRandom$Scenario <- paste("scenario", rep(c(1,2,3), 3), sep="")
AggregateResultsRandom$Emission <- c(30.46, 31.37, 30.46, 30.48, 31.40, 30.48,
30.52, 31.39, 30.52)
AggregateResultsRandom$ClosedStables <- c(1113, 1954, 3864, 977, 1851, 3165,
1093, 1940, 3085)
AggregateResultsRandom$Impact <- c(23138, 23138, 19937, 23351, 23351, 20110,
23159, 23159, 20063)
AggregateResultsRandom$Allocation <- "FullyRandom"
AggregateResults <- rbind(AggregateResults, AggregateResultsRandom)
AggregateResults <- melt(AggregateResults)
AggregateResults$Scenario <- mgsub(c("scenario1", "scenario2", "scenario3"),
c("<5% SS", "SO max NH3", "SO min ADS"),
AggregateResults$Scenario)
ggplot(data=subset(AggregateResults, variable=="Emission" & Scenario %in% c("<5% SS", "SO max NH3"))
, aes(x=Scenario, y=value, group=Seed, colour=Seed, shape=Allocation))+
geom_line(aes(linetype=Allocation)) +
geom_point() +
theme(legend.title=element_blank())+
ylab("Total Ammonia Emission (kton/yr)") + xlab(NULL)
ggplot(data=subset(AggregateResults, variable=="ClosedStables")
, aes(x=Scenario, y=value, group=Seed, colour=Seed, shape=Allocation))+
geom_line(aes(linetype=Allocation)) +
geom_point()+
theme(legend.title=element_blank())+
ylab("Number of empty stables") + xlab(NULL)
ggplot(data=subset(AggregateResults, variable=="Impact" & Scenario %in% c("<5% SS", "SO min ADS"))
, aes(x=Scenario, y=value, group=Seed, colour=Seed, shape=Allocation))+
geom_line(aes(linetype=Allocation)) +
geom_point()+
theme(legend.title=element_blank())+
ylab("Impact (total aggregate deposition score)") + xlab(NULL)
################################################################################
#Resuls non-dynamic
################################################################################
#Read in results (NIS-based allocation, seed 1)
Pars <- c("dSignificanceScore", "dADS", "dTotalADS", "dPercentageOccupied", "dPercentageOccupiedRegion",
"dClosedStables", "dClosedExploitations","dEmissionStable",
"dEmissionExploitation", "dEmissionRegion", "dAnimals",
"dStableTypesNIS", "dAnimalsNIS", "dAnimalGroupNIS",
"dEmissionNIS", "dADSNIS", "dEmissionAnimal", "dEmissionStableType", "dEmissionAnimalGroup",
"dMarginalSignificanceScore", "dMaxEmissionStable", "dMaxEmissionExploitation", "dMaxEmissionNIS",
"dMaxEmissionAnimalCategory","dMaxEmissionSector", "dMaxEmissionStableType", "dMaxAnimalsGroup", "dAnimalsGroup")
AllData <- sapply(Pars, function(x){
tmp <- rgdx.param("results.gdx", x, squeeze=FALSE)
})
list2env(AllData, envir=.GlobalEnv)
##Emission per sector (cattle, pigs, poultry, others, horses)
colnames(dMaxEmissionSector) <- c("Sector", "Emission")
dMaxEmissionSector$Scenario <- "max"
colnames(dEmissionAnimalGroup) <- c("Sector","Scenario","Emission")
dEmissionAnimalGroup <- rbind.data.frame(dMaxEmissionSector, dEmissionAnimalGroup)
levels(dEmissionAnimalGroup$Sector)[levels(dEmissionAnimalGroup$Sector)
== "Runderen"] <- "Cattle"
levels(dEmissionAnimalGroup$Sector)[levels(dEmissionAnimalGroup$Sector)
== "Varkens"] <- "Pigs"
levels(dEmissionAnimalGroup$Sector)[levels(dEmissionAnimalGroup$Sector)
== "Pluimvee"] <- "Poultry"
levels(dEmissionAnimalGroup$Sector)[levels(dEmissionAnimalGroup$Sector)
== "Andere"] <- "Others"
levels(dEmissionAnimalGroup$Sector)[levels(dEmissionAnimalGroup$Sector)
== "Paarden"] <- "Horses"
dEmissionAnimalGroup$Scenario <- mgsub(c("sc1", "sc2", "sc3", "sc4"),
c("<5% SS", "SO", "PAN", "BAT"),
dEmissionAnimalGroup$Scenario)
SectorCols <- brewer.pal(5, name='Accent')
#All sectors together
ggplot(data=subset(dEmissionAnimalGroup, Scenario != "max"),
aes(x=Scenario, y=Emission, group=Sector, colour=Sector, shape=Sector))+
geom_line(lty="dotted") +
geom_point(size=3)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("Total Ammonia Emission (kgNH3/yr)") + xlab(NULL)
ggplot(data=subset(dEmissionAnimalGroup, Scenario != "max"),
aes(x=Scenario, y=Emission, fill=Sector))+
geom_bar(colour="black", stat="identity")+
scale_fill_manual(values=SectorCols)
#Relative to maximum
EmissionAnimalGroupRelative <- subset(dEmissionAnimalGroup, Scenario != "max")
EmissionAnimalGroupRelative$EmissionRelative <- apply(EmissionAnimalGroupRelative, 1, function(x){
max <- as.numeric(subset(dEmissionAnimalGroup, Sector == x[1] & Scenario== "max")[2])
print(as.numeric(x[2])/max)
})
ggplot(data=EmissionAnimalGroupRelative,
aes(x=Scenario, y=EmissionRelative, group=Sector, colour=Sector, shape=Sector))+
geom_line(lty="dotted") +
geom_point(size=3)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("Relative NH3 emission (1=maximum)") + xlab(NULL)
ggplot(data=EmissionAnimalGroupRelative, aes(x=Scenario, y=EmissionRelative, fill=Sector))+
geom_bar(stat="identity", position=position_dodge(), colour="black")+
scale_fill_manual(values=SectorCols)
#ggsave("EmissionAnimalGroup_relative.png", dpi=400)
##Animal per sector
colnames(dAnimalsGroup) <- c("Sector", "Scenario", "Number")
dMaxAnimalsGroup$Scenario <- "max"
colnames(dMaxAnimalsGroup) <- c("Sector", "Number", "Scenario")
dAnimalsGroup$Scenario <- mgsub(c("sc1", "sc2", "sc3", "sc4"),
c("<5% SS", "SO", "PAN", "BAT"),
dAnimalsGroup$Scenario)
dAnimalsGroup <- rbind.data.frame(dMaxAnimalsGroup, dAnimalsGroup)
levels(dAnimalsGroup$Sector)[levels(dAnimalsGroup$Sector)
== "Runderen"] <- "Cattle"
levels(dAnimalsGroup$Sector)[levels(dAnimalsGroup$Sector)
== "Varkens"] <- "Pigs"
levels(dAnimalsGroup$Sector)[levels(dAnimalsGroup$Sector)
== "Pluimvee"] <- "Poultry"
levels(dAnimalsGroup$Sector)[levels(dAnimalsGroup$Sector)
== "Andere"] <- "Others"
levels(dAnimalsGroup$Sector)[levels(dAnimalsGroup$Sector)
== "Paarden"] <- "Horses"
#Pigs
max=as.numeric(subset(dAnimalsGroup, Sector=="Pigs" & Scenario=="max")[2])
ggplot(data=subset(dAnimalsGroup, Sector=="Pigs" & Scenario != "max"),
aes(x=Scenario, y=Number, group=1))+
geom_line(lty="dotted") +
geom_point(size=3)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
geom_hline(aes(yintercept=max), lty="dashed", colour="red")+
ylab("Total number of pigs") + xlab(NULL)
#Cattle
max=as.numeric(subset(dAnimalsGroup, Sector=="Cattle" & Scenario=="max")[2])
ggplot(data=subset(dAnimalsGroup, Sector=="Cattle" & Scenario != "max"),
aes(x=Scenario, y=Number, group=1))+
geom_line(lty="dotted") +
geom_point(size=2)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+ geom_hline(aes(yintercept=max), lty="dashed", colour="red")+
ylab("Total number of cattle") + xlab(NULL)
#Poultry
max=as.numeric(subset(dAnimalsGroup, Sector=="Poultry" & Scenario=="max")[2])
ggplot(data=subset(dAnimalsGroup, Sector=="Poultry" & Scenario != "max"),
aes(x=Scenario, y=Number, group=1))+
geom_line(lty="dotted") +
geom_point(size=2)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
geom_hline(aes(yintercept=max), lty="dashed", colour="red")+
ylab("Total number of poultry") + xlab(NULL)
#Other
max=as.numeric(subset(dAnimalsGroup, Sector=="Others" & Scenario=="max")[2])
ggplot(data=subset(dAnimalsGroup, Sector=="Others" & Scenario != "max"),
aes(x=Scenario, y=Number, group=1))+
geom_line(lty="dotted") +
geom_point(size=2)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
geom_hline(aes(yintercept=max), lty="dashed", colour="red")+
ylab("Total number of others") + xlab(NULL)
#Horses
max=as.numeric(subset(dAnimalsGroup, Sector=="Horses" & Scenario=="max")[2])
ggplot(data=subset(dAnimalsGroup, Sector=="Horses" & Scenario != "max"),
aes(x=Scenario, y=Number, group=1))+
geom_line(lty="dotted") +
geom_point(size=2)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+ geom_hline(aes(yintercept=max), lty="dashed", colour="red")+
ylab("Total number of horses") + xlab(NULL)
#Relative to maximum
AnimalGroupRelative <- subset(dAnimalsGroup, Scenario != "max")
AnimalGroupRelative$NumberRelative <- apply(AnimalGroupRelative, 1, function(x){
max <- as.numeric(subset(dAnimalsGroup, Sector == x[1] & Scenario== "max")[2])
print(as.numeric(x[2])/max)
})
ggplot(data=AnimalGroupRelative,
aes(x=Scenario, y=NumberRelative, group=Sector, colour=Sector, shape=Sector))+
geom_line(lty='dotted') +
geom_point(size=3)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("Relative number of animals (1=maximum)") + xlab(NULL)
#ClosedStables per stable Type
EmissionStable <- dcast(dEmissionStable, sStable~sScen)
EmissionStable[is.na(EmissionStable)] <- 0
Stables <- read.csv("C:/Users/ddpue/Documents/Spatial Optimization Flanders/DataHandling_VLM/StablesS1.csv")
StablesClosed <- Stables[c("StableType", "Name")]
colnames(StablesClosed) <- c("StableType", "sStable")
EmissionStable <- merge(EmissionStable, StablesClosed, by="sStable")
EmissionStableZeroSc1 <- subset(EmissionStable, sc1 == 0)
Sc1Closed <- as.data.frame(table(EmissionStableZeroSc1$StableType))
EmissionStableZeroSc2 <- subset(EmissionStable, sc2 == 0)
Sc2Closed <- as.data.frame(table(EmissionStableZeroSc2$StableType))
EmissionStableZeroSc3 <- subset(EmissionStable, sc3 == 0)
Sc3Closed <- as.data.frame(table(EmissionStableZeroSc3$StableType))
##Make files for QGIS
#Stables
StablesQGIS <- dcast(dEmissionStable, sStable~sScen)
StablesQGIS <- merge(StablesQGIS, dMaxEmissionStable, by="sStable")
Stables_Select <- Stables[c("Name", "X", "Y", "ADS", "SS")]
colnames(Stables_Select) <- c("sStable", "X", "Y", "ADS", "SS")
StablesQGIS <- merge(StablesQGIS, Stables_Select, by="sStable")
StablesQGIS[is.na(StablesQGIS)] <- 0
write.csv(StablesQGIS, "C:/Users/ddpue/Documents/Spatial Optimization Flanders/GIS/StablesQGIS.csv")
##NIS
ADSNIS <- dcast(dADSNIS, sNIS~sScen)
colnames(ADSNIS) <- c("sNIS", "ADS_scen1", "ADs_scen2", "ADS_scen3", "ADS_scen4")
EmissionNIS <- dcast(dEmissionNIS, sNIS~sScen)
colnames(EmissionNIS) <- c("sNIS", "EmSc1", "EmSc2", "EmSc3", "EmSc4")
EmissionNIS <- merge(EmissionNIS, dMaxEmissionNIS, by = "sNIS")
DataNISQGIS <- merge(EmissionNIS, ADSNIS, by="sNIS")
library(stringi)
DataNISQGIS$sNIS <- stri_sub(DataNISQGIS$sNIS, 2)
DataNISQGIS[is.na(DataNISQGIS)] <- 0
write.csv(DataNISQGIS, "C:/Users/ddpue/Documents/Spatial Optimization Flanders/GIS/DataNISQGIS.csv")
################################################################################
#Dynamic simulation
################################################################################
#Clear environment
rm(list = ls())
Pars <- c("dAmmoniaEmissionExploitation", "dAmmoniaEmissionRegion", "dImpactExploitation",
"dImpactRegion", "dAnimalGroup", "dLivestockUnits")
DynamicData <- sapply(Pars, function(x){
tmp <- rgdx.param("resultsDynamicS1.gdx", x, squeeze=FALSE)
})
list2env(DynamicData, envir=.GlobalEnv)
colnames(dAmmoniaEmissionRegion) <- c("Scenario", "Year", "Emission")
dAmmoniaEmissionRegion$Year <- as.character(dAmmoniaEmissionRegion$Year)
dAmmoniaEmissionRegion$Year <- as.numeric(dAmmoniaEmissionRegion$Year)
original <- paste("Scenario", c(1:6), sep="")
new <- c("<5% SS", "PAN", "BAT", "SO max NH3", "SO ref", "SO NEC")
dAmmoniaEmissionRegion$Scenario <- mgsub(original, new, dAmmoniaEmissionRegion$Scenario)
ggplot(data=subset(dAmmoniaEmissionRegion, Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC")),
aes(x=Year, y=Emission, group=Scenario, colour=Scenario, shape=Scenario))+
geom_line(size =1) +
geom_point(size =3)+
#scale_colour_discrete(labels = c(1:6))+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
geom_hline(yintercept=32155570, colour="DarkBlue", lty="dashed")+
geom_hline(yintercept=28546271, colour="Red", lty="dashed")+
ylab("Total ammonia emission kg NH3/yr") + xlab(NULL)
colnames(dImpactRegion) <- c("Scenario", "Year", "TotalADS")
dImpactRegion$Year <- as.character(dImpactRegion$Year)
dImpactRegion$Year <- as.numeric(dImpactRegion$Year)
dImpactRegion$Scenario <- mgsub(original, new, dImpactRegion$Scenario)
ggplot(data=subset(dImpactRegion, Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC")),
aes(x=Year, y=TotalADS, group=Scenario, colour=Scenario, shape=Scenario))+
geom_line(size=1) +
geom_point(size=3)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("Total ADS") + xlab(NULL)
Ammonia_Impact <- dAmmoniaEmissionRegion[,1:2]
Ammonia_Impact$Ratio <- dImpactRegion$TotalADS/dAmmoniaEmissionRegion$Emission * 10^6
Ammonia_Impact$Scenario <- mgsub(original, new, Ammonia_Impact$Scenario)
ggplot(data=subset(Ammonia_Impact, Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC")),
aes(x=Year, y=Ratio, group=Scenario, colour=Scenario, shape=Scenario))+
geom_line(size=1) +
geom_point(size=3)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("ADS/kton NH3") + xlab(NULL)
colnames(dLivestockUnits) <- c("Scenario", "Year", "TotalLSU")
dLivestockUnits$Year <- as.character(dLivestockUnits$Year)
dLivestockUnits$Year <- as.numeric(dLivestockUnits$Year)
dLivestockUnits$Scenario <- mgsub(original, new, dLivestockUnits$Scenario)
ggplot(data=subset(dLivestockUnits, Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC")),
aes(x=Year, y=TotalLSU, group=Scenario, colour=Scenario, shape=Scenario))+
geom_line(size=1) +
geom_point(size=3)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("Total LSU") + xlab(NULL)
colnames(dAnimalGroup) <- c("Sector", "Scenario", "Year", "Number")
dAnimalGroup$Year <- as.character(dAnimalGroup$Year)
dAnimalGroup$Year <- as.numeric(dAnimalGroup$Year)
dAnimalGroup$Scenario <- mgsub(original, new, dAnimalGroup$Scenario)
ggplot(data=subset(dAnimalGroup, Sector=="Runderen" & Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC") ),
aes(x=Year, y=Number, group=Scenario, colour=Scenario, shape=Scenario))+
geom_line(size=1) +
geom_point(size=3)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("Total number of Cattle") + xlab(NULL)
ggplot(data=subset(dAnimalGroup, Sector=="Varkens" & Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC") ),
aes(x=Year, y=Number, group=Scenario, colour=Scenario, shape=Scenario))+
geom_line(size=1) +
geom_point(size=3)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("Total number of Pigs") + xlab(NULL)
ggplot(data=subset(dAnimalGroup, Sector=="Pluimvee" & Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC") ),
aes(x=Year, y=Number, group=Scenario, colour=Scenario, shape=Scenario))+
geom_line(size=1) +
geom_point(size=3)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("Total number of Poultry") + xlab(NULL)
ggplot(data=subset(dAnimalGroup, Sector=="Paarden" & Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC") ),
aes(x=Year, y=Number, group=Scenario, colour=Scenario, shape=Scenario))+
geom_line(size=1) +
geom_point(size=3)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("Total number of Horses") + xlab(NULL)
ggplot(data=subset(dAnimalGroup, Sector=="Andere" & Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC")),
aes(x=Year, y=Number, group=Scenario, colour=Scenario, shape=Scenario))+
geom_line(size=1) +
geom_point(size=3)+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("Total number of Other") + xlab(NULL)
################################################################################
#Effect random draw year of permit (3 iterations)
################################################################################
#Clear environment
rm(list = ls())
Pars <- c("dAmmoniaEmissionRegion",
"dImpactRegion", "dLivestockUnits", "dAmmoniaEmissionExploitation")
DataS1 <- sapply(Pars, function(x){
tmp <- rgdx.param("resultsDynamicS1.gdx", x, squeeze=FALSE)
})
list2env(DataS1, envir=.GlobalEnv)
dAmmoniaEmissionRegion$Iteration <- "Seed1"
Ammonia <- dAmmoniaEmissionRegion
dImpactRegion$Iteration <- "Seed1"
Impact <- dImpactRegion
dLivestockUnits$Iteration <- "Seed1"
Livestock <- dLivestockUnits
DataS2 <- sapply(Pars, function(x){
tmp <- rgdx.param("resultsDynamicS2.gdx", x, squeeze=FALSE)
})
list2env(DataS2, envir=.GlobalEnv)
dAmmoniaEmissionRegion$Iteration <- "Seed2"
dImpactRegion$Iteration <- "Seed2"
dLivestockUnits$Iteration <- "Seed2"
Ammonia <- rbind(Ammonia, dAmmoniaEmissionRegion)
Impact <- rbind(Impact, dImpactRegion)
Livestock <- rbind(Livestock, dLivestockUnits)
DataS3 <- sapply(Pars, function(x){
tmp <- rgdx.param("resultsDynamicS3.gdx", x, squeeze=FALSE)
})
list2env(DataS3, envir=.GlobalEnv)
dAmmoniaEmissionRegion$Iteration <- "Seed3"
dImpactRegion$Iteration <- "Seed3"
dLivestockUnits$Iteration <- "Seed3"
Ammonia <- rbind(Ammonia, dAmmoniaEmissionRegion)
Impact <- rbind(Impact, dImpactRegion)
Livestock <- rbind(Livestock, dLivestockUnits)
original <- paste("Scenario", c(1:6), sep="")
new <- c("<5% SS", "PAN", "BAT", "SO max NH3", "SO min ADS", "SO NEC")
colnames(Ammonia) <- c("Scenario", "Year", "Emission", "Iteration")
Ammonia$Scenario <- mgsub(original, new, Ammonia$Scenario)
Ammonia$Year <- as.character(Ammonia$Year)
Ammonia$Year <- as.numeric(Ammonia$Year)
ggplot(data=subset(Ammonia, Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC")),
aes(x=Year, y=Emission, group=Scenario, colour=Scenario, shape=Iteration))+
geom_point(size=3)+
#scale_colour_discrete(labels = c(1:6))+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
geom_hline(yintercept=32155570, colour="DarkBlue", lty="dashed")+
geom_hline(yintercept=28546271, colour="Red", lty="dashed")+
ylab("Total ammonia emission kg NH3/yr") + xlab(NULL)
colnames(Impact) <- c("Scenario", "Year", "ADS", "Iteration")
Impact$Scenario <- mgsub(original, new, Impact$Scenario)
Impact$Year <- as.character(Impact$Year)
Impact$Year <- as.numeric(Impact$Year)
ggplot(data=subset(Impact, Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC")),
aes(x=Year, y=ADS, group=Scenario, colour=Scenario, shape=Iteration))+
geom_point()+
#scale_colour_discrete(labels = c(1:6))+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("Total ADS") + xlab(NULL)
Ammonia_Impact <- Ammonia[,c("Scenario", "Year", "Iteration")]
Ammonia_Impact$Ratio <- Impact$ADS/Ammonia$Emission * 10^6
Ammonia_Impact$Scenario <- mgsub(original, new, Ammonia_Impact$Scenario)
ggplot(data=subset(Ammonia_Impact, Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC")),
aes(x=Year, y=Ratio, group=Scenario, colour=Scenario, shape=Iteration))+
geom_point()+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("ADS/kton NH3") + xlab(NULL)
colnames(Livestock) <- c("Scenario", "Year", "LSU", "Iteration")
Livestock$Scenario <- mgsub(original, new, Livestock$Scenario)
Livestock$Year <- as.character(Livestock$Year)
Livestock$Year <- as.numeric(Livestock$Year)
ggplot(data=subset(Livestock, Scenario %in% c("<5% SS", "PAN", "BAT", "SO ref", "SO NEC")),
aes(x=Year, y=LSU, group=Scenario, colour=Scenario, shape=Iteration))+
geom_point()+
theme(legend.title=element_blank(),
plot.title = element_text(size = 15, face = "bold"),
axis.title = element_text(size = 18),
legend.title = element_text(size = 18),
legend.text = element_text(size = 18),
axis.text = element_text(size = 18))+
ylab("Total LSU") + xlab(NULL)
#Plot variance in impact
# VarRatio <- aggregate(LSU ~ Scenario + Year, data=Livestock, var)
#
# ggplot(data=VarRatio,
# aes(x=Year, y=LSU, group=Scenario, colour=Scenario, shape=Scenario))+
# geom_line()+
# geom_point()+
# theme(legend.title=element_blank(),
# plot.title = element_text(size = 15, face = "bold"),
# axis.title = element_text(size = 18),
# legend.title = element_text(size = 18),
# legend.text = element_text(size = 18),
# axis.text = element_text(size = 18))+
# ylab("Var ADS/kt NH3") + xlab(NULL)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scale_WMRR.R
\name{scaleWMRR}
\alias{scaleWMRR}
\title{Scaling by wavelet multiresolution regression (WMRR)}
\usage{
scaleWMRR(
formula,
family,
data,
coord,
scale = 1,
detail = TRUE,
wavelet = "haar",
wtrafo = "dwt",
b.ini = NULL,
pad = list(),
control = list(),
moran.params = list(),
trace = FALSE
)
}
\arguments{
\item{formula}{With specified notation according to names in data frame.}
\item{family}{\code{gaussian}, \code{binomial}, or \code{poisson}.}
\item{data}{Data frame.}
\item{coord}{Corresponding coordinates which have to be integer.}
\item{scale}{0 (which is equivalent to GLM) or
higher integers possible (limit depends on sample size).}
\item{detail}{Remove smooth wavelets? If \code{TRUE}, only detail components are analyzed.
If set to \code{FALSE}, smooth and detail components are analyzed. Default is \code{TRUE}.}
\item{wavelet}{Type of wavelet: \code{haar} or \code{d4} or \code{la8}}
\item{wtrafo}{Type of wavelet transform: \code{dwt} or \code{modwt}.}
\item{b.ini}{Initial parameter values. Default is \code{NULL}.}
\item{pad}{A list of parameters for padding wavelet coefficients.
\itemize{
\item{padform} - 0, 1, and 2 are possible.
\code{padform} is automatically set to
0 when either \code{level}=0 or
the \code{formula} includes an intercept and has a non-\code{gaussian}
\code{family}.
\itemize{
\item{0} - Padding with 0s.
\item{1} - Padding with mean values.
\item{2} - Padding with mirror values.
}
\item{padzone} - Factor for expanding the padding zone
}}
\item{control}{A list of parameters for controlling the fitting process.
\itemize{
\item{\code{eps}} - Positive convergence tolerance. Smaller values of
\code{eps} provide better parameter estimates, but also reduce the probability
of the iterations converging. In case of issues with convergence, test larger
values of \code{eps}. Default is 10^-5.
\item{\code{denom.eps}} - Default is 10^-20.
\item{\code{itmax}} - Integer giving the maximum number of iterations.
Default is 200.
}}
\item{moran.params}{A list of parameters for calculating Moran's I.
\itemize{
\item\code{lim1} - Lower limit for first bin. Default is 0.
\item\code{increment} - Step size for calculating Moran's I. Default is 1.
}}
\item{trace}{A logical value indicating whether to print parameter estimates
to the console}
}
\value{
scaleWMRR returns a list containing the following elements
\describe{
\item{\code{call}}{Model call}
\item{\code{b}}{Estimates of regression parameters}
\item{\code{s.e.}}{Standard errors of the parameter estimates}
\item{\code{z}}{Z values (or corresponding values for statistics)}
\item{\code{p}}{p-values for each parameter estimate}
\item{\code{df}}{Degrees of freedom}
\item{\code{fitted}}{Fitted values}
\item{\code{resid}}{Pearson residuals}
\item{\code{converged}}{Logical value whether the procedure converged}
\item{\code{trace}}{Logical. If TRUE:}
\itemize{
\item\code{ac.glm} Autocorrelation of glm.residuals
\item\code{ac} Autocorrelation of wavelet.residuals
}
}
}
\description{
scaleWMRR performs a scale-specific regression based on a
wavelet multiresolution analysis.
}
\details{
This function fits generalized linear models while taking the
two-dimensional grid structure of
datasets into account. The following error distributions (in
conjunction with appropriate link functions) are allowed: \code{gaussian},
\code{binomial}, or \code{poisson}. The model provides scale-specific
results for data sampled on a contiguous geographical area. The
dataset is assumed to be regular gridded and the grid cells are
assumed to be square. A function from the package 'waveslim' is used
for the wavelet transformations (Whitcher, 2005).
Furthermore, this function requires that \strong{all predictor variables
be continuous}.
}
\examples{
data(carlinadata)
coords <- carlinadata[ ,4:5]
\dontrun{
# scaleWMRR at scale = 0 is equivalent to GLM
ms0 <- scaleWMRR(carlina.horrida ~ aridity + land.use,
family = "poisson",
data = carlinadata,
coord = coords,
scale = 0,
trace = TRUE)
# scale-specific regressions for detail components
ms1 <- scaleWMRR(carlina.horrida ~ aridity + land.use,
family = "poisson",
data = carlinadata,
coord = coords,
scale = 1,
trace = TRUE)
ms2 <- scaleWMRR(carlina.horrida ~ aridity + land.use,
family = "poisson",
data = carlinadata,
coord = coords,
scale = 2,
trace = TRUE)
ms3<- scaleWMRR(carlina.horrida ~ aridity + land.use,
family = "poisson",
data = carlinadata,
coord = coords,
scale = 3,
trace = TRUE)
}
}
\references{
Carl G, Doktor D, Schweiger O, Kuehn I (2016)
Assessing relative variable importance across different spatial
scales: a two-dimensional wavelet analysis.
Journal of Biogeography 43: 2502-2512.
Whitcher, B. (2005) Waveslim: basic wavelet routines for one-, two-
and three-dimensional signal processing. R package version 1.5.
}
\seealso{
\pkg{waveslim},\code{\link[waveslim]{mra.2d}}
}
\author{
Gudrun Carl
}
|
/man/scaleWMRR.Rd
|
no_license
|
cran/spind
|
R
| false
| true
| 5,637
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scale_WMRR.R
\name{scaleWMRR}
\alias{scaleWMRR}
\title{Scaling by wavelet multiresolution regression (WMRR)}
\usage{
scaleWMRR(
formula,
family,
data,
coord,
scale = 1,
detail = TRUE,
wavelet = "haar",
wtrafo = "dwt",
b.ini = NULL,
pad = list(),
control = list(),
moran.params = list(),
trace = FALSE
)
}
\arguments{
\item{formula}{With specified notation according to names in data frame.}
\item{family}{\code{gaussian}, \code{binomial}, or \code{poisson}.}
\item{data}{Data frame.}
\item{coord}{Corresponding coordinates which have to be integer.}
\item{scale}{0 (which is equivalent to GLM) or
higher integers possible (limit depends on sample size).}
\item{detail}{Remove smooth wavelets? If \code{TRUE}, only detail components are analyzed.
If set to \code{FALSE}, smooth and detail components are analyzed. Default is \code{TRUE}.}
\item{wavelet}{Type of wavelet: \code{haar} or \code{d4} or \code{la8}}
\item{wtrafo}{Type of wavelet transform: \code{dwt} or \code{modwt}.}
\item{b.ini}{Initial parameter values. Default is \code{NULL}.}
\item{pad}{A list of parameters for padding wavelet coefficients.
\itemize{
\item{padform} - 0, 1, and 2 are possible.
\code{padform} is automatically set to
0 when either \code{level}=0 or
the \code{formula} includes an intercept and has a non-\code{gaussian}
\code{family}.
\itemize{
\item{0} - Padding with 0s.
\item{1} - Padding with mean values.
\item{2} - Padding with mirror values.
}
\item{padzone} - Factor for expanding the padding zone
}}
\item{control}{A list of parameters for controlling the fitting process.
\itemize{
\item{\code{eps}} - Positive convergence tolerance. Smaller values of
\code{eps} provide better parameter estimates, but also reduce the probability
of the iterations converging. In case of issues with convergence, test larger
values of \code{eps}. Default is 10^-5.
\item{\code{denom.eps}} - Default is 10^-20.
\item{\code{itmax}} - Integer giving the maximum number of iterations.
Default is 200.
}}
\item{moran.params}{A list of parameters for calculating Moran's I.
\itemize{
\item\code{lim1} - Lower limit for first bin. Default is 0.
\item\code{increment} - Step size for calculating Moran's I. Default is 1.
}}
\item{trace}{A logical value indicating whether to print parameter estimates
to the console}
}
\value{
scaleWMRR returns a list containing the following elements
\describe{
\item{\code{call}}{Model call}
\item{\code{b}}{Estimates of regression parameters}
\item{\code{s.e.}}{Standard errors of the parameter estimates}
\item{\code{z}}{Z values (or corresponding values for statistics)}
\item{\code{p}}{p-values for each parameter estimate}
\item{\code{df}}{Degrees of freedom}
\item{\code{fitted}}{Fitted values}
\item{\code{resid}}{Pearson residuals}
\item{\code{converged}}{Logical value whether the procedure converged}
\item{\code{trace}}{Logical. If TRUE:}
\itemize{
\item\code{ac.glm} Autocorrelation of glm.residuals
\item\code{ac} Autocorrelation of wavelet.residuals
}
}
}
\description{
scaleWMRR performs a scale-specific regression based on a
wavelet multiresolution analysis.
}
\details{
This function fits generalized linear models while taking the
two-dimensional grid structure of
datasets into account. The following error distributions (in
conjunction with appropriate link functions) are allowed: \code{gaussian},
\code{binomial}, or \code{poisson}. The model provides scale-specific
results for data sampled on a contiguous geographical area. The
dataset is assumed to be regular gridded and the grid cells are
assumed to be square. A function from the package 'waveslim' is used
for the wavelet transformations (Whitcher, 2005).
Furthermore, this function requires that \strong{all predictor variables
be continuous}.
}
\examples{
data(carlinadata)
coords <- carlinadata[ ,4:5]
\dontrun{
# scaleWMRR at scale = 0 is equivalent to GLM
ms0 <- scaleWMRR(carlina.horrida ~ aridity + land.use,
family = "poisson",
data = carlinadata,
coord = coords,
scale = 0,
trace = TRUE)
# scale-specific regressions for detail components
ms1 <- scaleWMRR(carlina.horrida ~ aridity + land.use,
family = "poisson",
data = carlinadata,
coord = coords,
scale = 1,
trace = TRUE)
ms2 <- scaleWMRR(carlina.horrida ~ aridity + land.use,
family = "poisson",
data = carlinadata,
coord = coords,
scale = 2,
trace = TRUE)
ms3<- scaleWMRR(carlina.horrida ~ aridity + land.use,
family = "poisson",
data = carlinadata,
coord = coords,
scale = 3,
trace = TRUE)
}
}
\references{
Carl G, Doktor D, Schweiger O, Kuehn I (2016)
Assessing relative variable importance across different spatial
scales: a two-dimensional wavelet analysis.
Journal of Biogeography 43: 2502-2512.
Whitcher, B. (2005) Waveslim: basic wavelet routines for one-, two-
and three-dimensional signal processing. R package version 1.5.
}
\seealso{
\pkg{waveslim},\code{\link[waveslim]{mra.2d}}
}
\author{
Gudrun Carl
}
|
# read in the CSV file
temps = read.csv("temp_log", header = FALSE, sep = ",");
# create an image device on which to draw the plot
png(filename="temp_log.png",height=800,width=800,res=72);
# plot the data points
plot(0:(length(temps)-1),temps/100,ylab="Temperature (degrees Celsius)",xlab="Time (in measurement intervals)",type='o',col='black');
# add a title to the plot
title(main="Recorded Temperature Data");
# close the device
dev.off();
|
/ADC/generatePlot.R
|
no_license
|
bilodeau/ECE477PROJECTS
|
R
| false
| false
| 461
|
r
|
# read in the CSV file
temps = read.csv("temp_log", header = FALSE, sep = ",");
# create an image device on which to draw the plot
png(filename="temp_log.png",height=800,width=800,res=72);
# plot the data points
plot(0:(length(temps)-1),temps/100,ylab="Temperature (degrees Celsius)",xlab="Time (in measurement intervals)",type='o',col='black');
# add a title to the plot
title(main="Recorded Temperature Data");
# close the device
dev.off();
|
# Functions used only for testing
# Step Size Expectation ---------------------------------------------------
expect_step <- function(actual, x, f, df, alpha = x, nfev, tolerance = 1e-4) {
expect_equal(actual$step$par, x, tolerance = tolerance)
expect_equal(actual$step$f, f, tolerance = tolerance)
expect_equal(actual$step$df, df, tolerance = tolerance)
expect_equal(actual$step$alpha, alpha, tolerance = tolerance)
expect_equal(actual$nfn, nfev)
}
# Finite Difference -------------------------------------------------------
gfd <- function(par, fn, rel_eps = sqrt(.Machine$double.eps)) {
g <- rep(0, length(par))
for (i in 1:length(par)) {
oldx <- par[i]
if (oldx != 0) {
eps <- oldx * rel_eps
}
else {
eps <- 1e-3
}
par[i] <- oldx + eps
fplus <- fn(par)
par[i] <- oldx - eps
fminus <- fn(par)
par[i] <- oldx
g[i] <- (fplus - fminus) / (2 * eps)
}
g
}
make_gfd <- function(fn, eps = 1.e-3) {
function(par) {
gfd(par, fn, eps)
}
}
hfd <- function(par, fn, rel_eps = sqrt(.Machine$double.eps)) {
hs <- matrix(0, nrow = length(par), ncol = length(par))
for (i in 1:length(par)) {
for (j in i:length(par)) {
oldxi <- par[i]
oldxj <- par[j]
if (oldxi != 0 && oldxj != 0) {
eps <- min(oldxi, oldxj) * rel_eps
}
else {
eps <- 1e-3
}
if (i != j) {
par[i] <- par[i] + eps
par[j] <- par[j] + eps
fpp <- fn(par)
par[j] <- oldxj - eps
fpm <- fn(par)
par[i] <- oldxi - eps
par[j] <- oldxj + eps
fmp <- fn(par)
par[j] <- oldxj - eps
fmm <- fn(par)
par[i] <- oldxi
par[j] <- oldxj
val <- (fpp - fpm - fmp + fmm) / (4 * eps * eps)
hs[i, j] <- val
hs[j, i] <- val
}
else {
f <- fn(par)
oldxi <- par[i]
par[i] <- oldxi + 2 * eps
fpp <- fn(par)
par[i] <- oldxi + eps
fp <- fn(par)
par[i] <- oldxi - 2 * eps
fmm <- fn(par)
par[i] <- oldxi - eps
fm <- fn(par)
par[i] <- oldxi
hs[i, i] <- (-fpp + 16 * fp - 30 * f + 16 * fm - fmm) / (12 * eps * eps)
}
}
}
hs
}
make_hfd <- function(fn, eps = 1.e-3) {
function(par) {
hfd(par, fn, eps)
}
}
make_fg <- function(fn, gr = NULL, hs = NULL) {
if (is.null(gr)) {
gr <- make_gfd(fn)
}
if (is.null(hs)) {
hs <- make_hfd(fn)
}
list(
fn = fn,
gr = gr,
hs = hs
)
}
# Rosenbrock ---------------------------------------------------------------
rb0 <- c(-1.2, 1)
# taken from the optim man page
rosenbrock_fg <- list(
fn = function(x) {
x1 <- x[1]
x2 <- x[2]
100 * (x2 - x1 * x1) ^ 2 + (1 - x1) ^ 2
},
gr = function(x) {
x1 <- x[1]
x2 <- x[2]
c(
-400 * x1 * (x2 - x1 * x1) - 2 * (1 - x1),
200 * (x2 - x1 * x1))
},
hs = function(x) {
xx <- 1200 * x[1] * x[1] - 400 * x[2] + 2
xy <- x[1] * -400
yy <- 200
matrix(c(xx, xy, xy, yy), nrow = 2)
},
hi = function(x) {
1 / c(1200 * x[1] * x[1] - 400 * x[2] + 2, 200)
},
fg = function(x) {
x1 <- x[1]
x2 <- x[2]
a <- (x2 - x1 * x1)
b <- 1 - x1
list(
fn = 100 * a * a + b * b,
gr = c(
-400 * x1 * a - 2 * b,
200 * a
)
)
},
n = 2
)
rosen_no_hess <- rosenbrock_fg
rosen_no_hess$hs <- NULL
rosen_no_hess$hi <- NULL
# log-sum-exp -------------------------------------------------------------
# http://papers.nips.cc/paper/5322-a-differential-equation-for-modeling-nesterovs-accelerated-gradient-method-theory-and-insights.pdf
log_sum_exp_fg <- function(n = 50, m = 200, rho = 20, bvar = 2) {
A <- matrix(stats::rnorm(m * n), nrow = m)
b <- stats::rnorm(n = m, mean = 0, sd = sqrt(bvar))
lse_fg(A = A, b = b, rho = rho)
}
# Smooth and convex, but not strongly convex
lse_fg <- function(A, b, rho) {
fn <- function(x) {
rho * log(sum(exp((A %*% x - b) / rho)))
}
gr <- function(x) {
rAxb <- exp((A %*% x - b) / rho)
mult <- sweep(A, 1, rAxb, "*")
num <- colSums(mult)
as.vector(num / sum(rAxb))
}
res <- list(
fn = fn,
gr = gr,
A = A,
b = b,
rho = rho
)
res$grr <- make_gfd(fn = res$fn)
res
}
# Function with unhelpful Hessian -----------------------------------------
tricky_fg <- function() {
res <- list(
fn = function(par) {
x1 <- par[1]
x2 <- par[2]
(1.5 - x1 + x1 * x2)^2 +
(2.25 - x1 + x1 * x2 * x2)^2 +
(2.625 - x1 + x1 * x2 * x2 * x2)^2
}
)
res$gr <- make_gfd(res$fn)
res$hs <- make_hfd(res$fn)
res
}
# Line Search Util --------------------------------------------------------
# Create Initial Step Value
#
# Given a set of start parameters and a search direction, initializes the
# step data. Utility function for testing.
make_step0 <- function(fg, x, pv, f = fg$fn(x), df = fg$gr(x)) {
list(
x = x,
alpha = 0,
f = f,
df = df,
d = dot(pv, df)
)
}
# More'-Thuente test functions --------------------------------------------
# Test Function 1 ---------------------------------------------------------
# 1 phi(a) = -(a) / (a^2 + b) phi'(a) = (a^2 - b) / (a^2 + b)^2 b = 2
fn1 <- function(alpha, beta = 2) {
-alpha / (alpha ^ 2 + beta)
}
gr1 <- function(alpha, beta = 2) {
(alpha ^ 2 - beta) / ((alpha ^ 2 + beta) ^ 2)
}
fcn1 <- function(x) {
list(f = fn1(x), g = gr1(x))
}
# Test Function 2 ---------------------------------------------------------
# 2 phi(a) = (a + b)^5 - 2(a + b)^4 phi'(a) = (a+b)^3*(5a+5b-8) b = 0.004
fn2 <- function(alpha, beta = 0.004) {
(alpha + beta) ^ 5 - 2 * (alpha + beta) ^ 4
}
gr2 <- function(alpha, beta = 0.004) {
(alpha + beta) ^ 3 * (5 * alpha + 5 * beta - 8)
}
fcn2 <- function(x) {
list(f = fn2(x), g = gr2(x))
}
# Test Function 3 ---------------------------------------------------------
# 3 phi(a) = phi_0(a) + [2(1-b)/(l*pi)]sin(l*pi*alpha*0.5)
# phi'(a) = phi_0'(a) + (1-b)cos(l*pi*alpha*0.5)
# phi_0(a) = 1 - a if a <= 1 - b phi_0'(a) = -1
# = a - 1 if a >= 1 + b phi_0'(a) = 1
# = (a-1)^2/2b + b/2 if a in [1-b,1+b] phi_0'(a) = a - 1 /b
# b = 0.01 l = 39
fn3 <- function(alpha, beta = 0.01, l = 39) {
if (alpha <= 1 - beta) {
phi_0 <- 1 - alpha
} else if (alpha >= 1 + beta) {
phi_0 <- alpha - 1
} else {
phi_0 <- (alpha - 1) ^ 2 / (2 * beta) + (beta / 2)
}
phi_0 + (2 * (1 - beta) * sin(l * pi * alpha * 0.5)) / (l * pi)
}
# phi'(a) = phi_0'(a) + (1-b)cos(l*pi*alpha*0.5)
gr3 <- function(alpha, beta = 0.01, l = 39) {
if (alpha <= 1 - beta) {
dphi_0 <- -1
} else if (alpha >= 1 + beta) {
dphi_0 <- 1
} else {
dphi_0 <- (alpha - 1) / beta
}
dphi_0 + (1 - beta) * cos(l * pi * alpha * 0.5)
}
fcn3 <- function(x) {
list(f = fn3(x), g = gr3(x))
}
# Utils for Test Functions 4-6 --------------------------------------------
# gamma(b) = (1+b^2)^1/2 - b
yanaig <- function(beta) {
sqrt(1 + beta ^ 2) - beta
}
yanai1 <- function(alpha, beta) {
sqrt((1 - alpha) ^ 2 + beta ^ 2)
}
gryanai1 <- function(alpha, beta) {
(alpha - 1) / yanai1(alpha, beta)
}
yanai2 <- function(alpha, beta) {
sqrt(alpha ^ 2 + beta ^ 2)
}
gryanai2 <- function(alpha, beta) {
alpha / yanai2(alpha, beta)
}
# phi(a) = gamma(b_1)[(1-a)^2 + b_2^2]^1/2 + gamma(b_2)[a^2 + b_1^2]^1/2
yanai <- function(alpha, beta1, beta2) {
yanaig(beta1) * yanai1(alpha, beta2) +
yanaig(beta2) * yanai2(alpha, beta1)
}
# phi'(a) = -[gamma(b_1)(1-a)]/sqrt[(1-a)^2 + b_2^2] + gamma(b_2)a/sqrt([a^2 + b_1^2])
gryanai <- function(alpha, beta1, beta2) {
(yanaig(beta1) * gryanai1(alpha, beta2)) + (yanaig(beta2) * gryanai2(alpha, beta1))
}
# Test Function 4 ---------------------------------------------------------
fn4 <- function(alpha) {
yanai(alpha, beta1 = 0.001, beta2 = 0.001)
}
gr4 <- function(alpha) {
gryanai(alpha, beta1 = 0.001, beta2 = 0.001)
}
fcn4 <- function(x) {
list(f = fn4(x), g = gr4(x))
}
# Test Function 5 ---------------------------------------------------------
fn5 <- function(alpha) {
yanai(alpha, beta1 = 0.01, beta2 = 0.001)
}
gr5 <- function(alpha) {
gryanai(alpha, beta1 = 0.01, beta2 = 0.001)
}
fcn5 <- function(x) {
list(f = fn5(x), g = gr5(x))
}
# Test Function 6 ---------------------------------------------------------
fn6 <- function(alpha) {
yanai(alpha, beta1 = 0.001, beta2 = 0.01)
}
gr6 <- function(alpha) {
gryanai(alpha, beta1 = 0.001, beta2 = 0.01)
}
fcn6 <- function(x) {
list(f = fn6(x), g = gr6(x))
}
f1 <- list(fn = fn1, gr = gr1)
f2 <- list(fn = fn2, gr = gr2)
f3 <- list(fn = fn3, gr = gr3)
f4 <- list(fn = fn4, gr = gr4)
f5 <- list(fn = fn5, gr = gr5)
f6 <- list(fn = fn6, gr = gr6)
|
/data/genthat_extracted_code/mize/tests/helper_util.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 9,170
|
r
|
# Functions used only for testing
# Step Size Expectation ---------------------------------------------------
expect_step <- function(actual, x, f, df, alpha = x, nfev, tolerance = 1e-4) {
expect_equal(actual$step$par, x, tolerance = tolerance)
expect_equal(actual$step$f, f, tolerance = tolerance)
expect_equal(actual$step$df, df, tolerance = tolerance)
expect_equal(actual$step$alpha, alpha, tolerance = tolerance)
expect_equal(actual$nfn, nfev)
}
# Finite Difference -------------------------------------------------------
gfd <- function(par, fn, rel_eps = sqrt(.Machine$double.eps)) {
g <- rep(0, length(par))
for (i in 1:length(par)) {
oldx <- par[i]
if (oldx != 0) {
eps <- oldx * rel_eps
}
else {
eps <- 1e-3
}
par[i] <- oldx + eps
fplus <- fn(par)
par[i] <- oldx - eps
fminus <- fn(par)
par[i] <- oldx
g[i] <- (fplus - fminus) / (2 * eps)
}
g
}
make_gfd <- function(fn, eps = 1.e-3) {
function(par) {
gfd(par, fn, eps)
}
}
hfd <- function(par, fn, rel_eps = sqrt(.Machine$double.eps)) {
hs <- matrix(0, nrow = length(par), ncol = length(par))
for (i in 1:length(par)) {
for (j in i:length(par)) {
oldxi <- par[i]
oldxj <- par[j]
if (oldxi != 0 && oldxj != 0) {
eps <- min(oldxi, oldxj) * rel_eps
}
else {
eps <- 1e-3
}
if (i != j) {
par[i] <- par[i] + eps
par[j] <- par[j] + eps
fpp <- fn(par)
par[j] <- oldxj - eps
fpm <- fn(par)
par[i] <- oldxi - eps
par[j] <- oldxj + eps
fmp <- fn(par)
par[j] <- oldxj - eps
fmm <- fn(par)
par[i] <- oldxi
par[j] <- oldxj
val <- (fpp - fpm - fmp + fmm) / (4 * eps * eps)
hs[i, j] <- val
hs[j, i] <- val
}
else {
f <- fn(par)
oldxi <- par[i]
par[i] <- oldxi + 2 * eps
fpp <- fn(par)
par[i] <- oldxi + eps
fp <- fn(par)
par[i] <- oldxi - 2 * eps
fmm <- fn(par)
par[i] <- oldxi - eps
fm <- fn(par)
par[i] <- oldxi
hs[i, i] <- (-fpp + 16 * fp - 30 * f + 16 * fm - fmm) / (12 * eps * eps)
}
}
}
hs
}
make_hfd <- function(fn, eps = 1.e-3) {
function(par) {
hfd(par, fn, eps)
}
}
make_fg <- function(fn, gr = NULL, hs = NULL) {
if (is.null(gr)) {
gr <- make_gfd(fn)
}
if (is.null(hs)) {
hs <- make_hfd(fn)
}
list(
fn = fn,
gr = gr,
hs = hs
)
}
# Rosenbrock ---------------------------------------------------------------
rb0 <- c(-1.2, 1)
# taken from the optim man page
rosenbrock_fg <- list(
fn = function(x) {
x1 <- x[1]
x2 <- x[2]
100 * (x2 - x1 * x1) ^ 2 + (1 - x1) ^ 2
},
gr = function(x) {
x1 <- x[1]
x2 <- x[2]
c(
-400 * x1 * (x2 - x1 * x1) - 2 * (1 - x1),
200 * (x2 - x1 * x1))
},
hs = function(x) {
xx <- 1200 * x[1] * x[1] - 400 * x[2] + 2
xy <- x[1] * -400
yy <- 200
matrix(c(xx, xy, xy, yy), nrow = 2)
},
hi = function(x) {
1 / c(1200 * x[1] * x[1] - 400 * x[2] + 2, 200)
},
fg = function(x) {
x1 <- x[1]
x2 <- x[2]
a <- (x2 - x1 * x1)
b <- 1 - x1
list(
fn = 100 * a * a + b * b,
gr = c(
-400 * x1 * a - 2 * b,
200 * a
)
)
},
n = 2
)
rosen_no_hess <- rosenbrock_fg
rosen_no_hess$hs <- NULL
rosen_no_hess$hi <- NULL
# log-sum-exp -------------------------------------------------------------
# http://papers.nips.cc/paper/5322-a-differential-equation-for-modeling-nesterovs-accelerated-gradient-method-theory-and-insights.pdf
log_sum_exp_fg <- function(n = 50, m = 200, rho = 20, bvar = 2) {
A <- matrix(stats::rnorm(m * n), nrow = m)
b <- stats::rnorm(n = m, mean = 0, sd = sqrt(bvar))
lse_fg(A = A, b = b, rho = rho)
}
# Smooth and convex, but not strongly convex
lse_fg <- function(A, b, rho) {
fn <- function(x) {
rho * log(sum(exp((A %*% x - b) / rho)))
}
gr <- function(x) {
rAxb <- exp((A %*% x - b) / rho)
mult <- sweep(A, 1, rAxb, "*")
num <- colSums(mult)
as.vector(num / sum(rAxb))
}
res <- list(
fn = fn,
gr = gr,
A = A,
b = b,
rho = rho
)
res$grr <- make_gfd(fn = res$fn)
res
}
# Function with unhelpful Hessian -----------------------------------------
tricky_fg <- function() {
res <- list(
fn = function(par) {
x1 <- par[1]
x2 <- par[2]
(1.5 - x1 + x1 * x2)^2 +
(2.25 - x1 + x1 * x2 * x2)^2 +
(2.625 - x1 + x1 * x2 * x2 * x2)^2
}
)
res$gr <- make_gfd(res$fn)
res$hs <- make_hfd(res$fn)
res
}
# Line Search Util --------------------------------------------------------
# Create Initial Step Value
#
# Given a set of start parameters and a search direction, initializes the
# step data. Utility function for testing.
make_step0 <- function(fg, x, pv, f = fg$fn(x), df = fg$gr(x)) {
list(
x = x,
alpha = 0,
f = f,
df = df,
d = dot(pv, df)
)
}
# More'-Thuente test functions --------------------------------------------
# Test Function 1 ---------------------------------------------------------
# 1 phi(a) = -(a) / (a^2 + b) phi'(a) = (a^2 - b) / (a^2 + b)^2 b = 2
fn1 <- function(alpha, beta = 2) {
-alpha / (alpha ^ 2 + beta)
}
gr1 <- function(alpha, beta = 2) {
(alpha ^ 2 - beta) / ((alpha ^ 2 + beta) ^ 2)
}
fcn1 <- function(x) {
list(f = fn1(x), g = gr1(x))
}
# Test Function 2 ---------------------------------------------------------
# 2 phi(a) = (a + b)^5 - 2(a + b)^4 phi'(a) = (a+b)^3*(5a+5b-8) b = 0.004
fn2 <- function(alpha, beta = 0.004) {
(alpha + beta) ^ 5 - 2 * (alpha + beta) ^ 4
}
gr2 <- function(alpha, beta = 0.004) {
(alpha + beta) ^ 3 * (5 * alpha + 5 * beta - 8)
}
fcn2 <- function(x) {
list(f = fn2(x), g = gr2(x))
}
# Test Function 3 ---------------------------------------------------------
# 3 phi(a) = phi_0(a) + [2(1-b)/(l*pi)]sin(l*pi*alpha*0.5)
# phi'(a) = phi_0'(a) + (1-b)cos(l*pi*alpha*0.5)
# phi_0(a) = 1 - a if a <= 1 - b phi_0'(a) = -1
# = a - 1 if a >= 1 + b phi_0'(a) = 1
# = (a-1)^2/2b + b/2 if a in [1-b,1+b] phi_0'(a) = a - 1 /b
# b = 0.01 l = 39
fn3 <- function(alpha, beta = 0.01, l = 39) {
if (alpha <= 1 - beta) {
phi_0 <- 1 - alpha
} else if (alpha >= 1 + beta) {
phi_0 <- alpha - 1
} else {
phi_0 <- (alpha - 1) ^ 2 / (2 * beta) + (beta / 2)
}
phi_0 + (2 * (1 - beta) * sin(l * pi * alpha * 0.5)) / (l * pi)
}
# phi'(a) = phi_0'(a) + (1-b)cos(l*pi*alpha*0.5)
gr3 <- function(alpha, beta = 0.01, l = 39) {
if (alpha <= 1 - beta) {
dphi_0 <- -1
} else if (alpha >= 1 + beta) {
dphi_0 <- 1
} else {
dphi_0 <- (alpha - 1) / beta
}
dphi_0 + (1 - beta) * cos(l * pi * alpha * 0.5)
}
fcn3 <- function(x) {
list(f = fn3(x), g = gr3(x))
}
# Utils for Test Functions 4-6 --------------------------------------------
# gamma(b) = (1+b^2)^1/2 - b
yanaig <- function(beta) {
sqrt(1 + beta ^ 2) - beta
}
yanai1 <- function(alpha, beta) {
sqrt((1 - alpha) ^ 2 + beta ^ 2)
}
gryanai1 <- function(alpha, beta) {
(alpha - 1) / yanai1(alpha, beta)
}
yanai2 <- function(alpha, beta) {
sqrt(alpha ^ 2 + beta ^ 2)
}
gryanai2 <- function(alpha, beta) {
alpha / yanai2(alpha, beta)
}
# phi(a) = gamma(b_1)[(1-a)^2 + b_2^2]^1/2 + gamma(b_2)[a^2 + b_1^2]^1/2
yanai <- function(alpha, beta1, beta2) {
yanaig(beta1) * yanai1(alpha, beta2) +
yanaig(beta2) * yanai2(alpha, beta1)
}
# phi'(a) = -[gamma(b_1)(1-a)]/sqrt[(1-a)^2 + b_2^2] + gamma(b_2)a/sqrt([a^2 + b_1^2])
gryanai <- function(alpha, beta1, beta2) {
(yanaig(beta1) * gryanai1(alpha, beta2)) + (yanaig(beta2) * gryanai2(alpha, beta1))
}
# Test Function 4 ---------------------------------------------------------
fn4 <- function(alpha) {
yanai(alpha, beta1 = 0.001, beta2 = 0.001)
}
gr4 <- function(alpha) {
gryanai(alpha, beta1 = 0.001, beta2 = 0.001)
}
fcn4 <- function(x) {
list(f = fn4(x), g = gr4(x))
}
# Test Function 5 ---------------------------------------------------------
fn5 <- function(alpha) {
yanai(alpha, beta1 = 0.01, beta2 = 0.001)
}
gr5 <- function(alpha) {
gryanai(alpha, beta1 = 0.01, beta2 = 0.001)
}
fcn5 <- function(x) {
list(f = fn5(x), g = gr5(x))
}
# Test Function 6 ---------------------------------------------------------
fn6 <- function(alpha) {
yanai(alpha, beta1 = 0.001, beta2 = 0.01)
}
gr6 <- function(alpha) {
gryanai(alpha, beta1 = 0.001, beta2 = 0.01)
}
fcn6 <- function(x) {
list(f = fn6(x), g = gr6(x))
}
f1 <- list(fn = fn1, gr = gr1)
f2 <- list(fn = fn2, gr = gr2)
f3 <- list(fn = fn3, gr = gr3)
f4 <- list(fn = fn4, gr = gr4)
f5 <- list(fn = fn5, gr = gr5)
f6 <- list(fn = fn6, gr = gr6)
|
#=================================================================================================#
# PLOT 4: Multiple plots
# Load packages used in script
require(dplyr)
require(lubridate)
require(reshape2)
# Set working directory
setwd("./Working_directory/Exploratory data analysis")
# Read data and onvert to tbl_df for dplyr usage
hhpowerdata <- read.table("./household_power_consumption.txt", sep = ";", header = TRUE)
hhpowerdata <- tbl_df(hhpowerdata)
# Convert first variable from factor to date
hhpowerdata$Date <- as.Date(hhpowerdata$Date, format = "%d/%m/%Y")
# Subset dataframe based on required dates only and save as new dataset
hhpowerdatasubset <- filter(hhpowerdata, Date =="2007-02-01" | Date== "2007-02-02")
# Create 2 rows and 2 columns for plots
par(mfrow = c(2, 2))
# PLOT TOP LEFT
# Combine date and time and convert this variable to date and time format
hhpowerdatasubset$newdatetime <- paste(hhpowerdatasubset$Date, hhpowerdatasubset$Time)
hhpowerdatasubset$newdatetime <- ymd_hms(hhpowerdatasubset$newdatetime)
# Convert Global Active Power to numeric variable to enable plotting
hhpowerdatasubset$Global_active_power <- as.numeric(as.character(hhpowerdatasubset$Global_active_power))
# Plot the GAP by date and time
plot(hhpowerdatasubset$newdatetime, hhpowerdatasubset$Global_active_power, pch=NA_integer_,
xlab ="",ylab = "Global Active Power")
# Add lines to the graph
lines(hhpowerdatasubset$newdatetime, hhpowerdatasubset$Global_active_power)
# PLOT TOP RIGHT
# Convert Voltage to numeric variable to enable plotting
hhpowerdatasubset$Voltage <- as.numeric(as.character(hhpowerdatasubset$Voltage))
# Plot the Voltage by date and time
plot(hhpowerdatasubset$newdatetime, hhpowerdatasubset$Voltage, pch=NA_integer_,
xlab ="datetime",ylab = "Voltage")
# Add lines to the graph
lines(hhpowerdatasubset$newdatetime, hhpowerdatasubset$Voltage)
# PLOT BOTTOM RIGHT
# Combine date and time and convert this variable to date and time format
hhpowerdatasubset$newdatetime <- paste(hhpowerdatasubset$Date, hhpowerdatasubset$Time)
hhpowerdatasubset$newdatetime <- ymd_hms(hhpowerdatasubset$newdatetime)
# Subset data to contain only datetime and values of sub metering data (3 variables)
hhpowersub2 <- hhpowerdatasubset[,grep(("^Sub|newdatetime"), colnames(hhpowerdatasubset))]
# Melt subset data from wide to long for easier plotting
melteddata <- melt(hhpowersub2, id = "newdatetime")
# Plot melted data
with(melteddata, plot(newdatetime,value, type = "n", pch=NA_integer_,
xlab ="",ylab = "Energy sub metering"))
with(subset(melteddata, variable == "Sub_metering_1"), lines(newdatetime, value,
col = "black"))
with(subset(melteddata, variable == "Sub_metering_2"), lines(newdatetime, value,
col = "red"))
with(subset(melteddata, variable == "Sub_metering_3"), lines(newdatetime, value,
col = "blue"))
legend("topright", col=c("black","red","blue"), c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),
lty=c(1,1), bty="n", cex=.5, y.intersp = 0.25)
# PLOT BOTTOM LEFT
# Convert Global reactive power to numeric variable to enable plotting
hhpowerdatasubset$Global_reactive_power <- as.numeric(as.character(hhpowerdatasubset$Global_reactive_power))
# Plot the Global reactive power by date and time
plot(hhpowerdatasubset$newdatetime, hhpowerdatasubset$Global_reactive_power, pch=NA_integer_,
xlab ="datetime",ylab = "Global_reactive_power")
# Add lines to the graph
lines(hhpowerdatasubset$newdatetime, hhpowerdatasubset$Global_reactive_power)
# Copy plot to PNG file
dev.copy(png, file = "plot4.png")
dev.off()
|
/Plot4.R
|
no_license
|
HenkPret/ExData_Plotting1
|
R
| false
| false
| 3,904
|
r
|
#=================================================================================================#
# PLOT 4: Multiple plots
# Load packages used in script
require(dplyr)
require(lubridate)
require(reshape2)
# Set working directory
setwd("./Working_directory/Exploratory data analysis")
# Read data and onvert to tbl_df for dplyr usage
hhpowerdata <- read.table("./household_power_consumption.txt", sep = ";", header = TRUE)
hhpowerdata <- tbl_df(hhpowerdata)
# Convert first variable from factor to date
hhpowerdata$Date <- as.Date(hhpowerdata$Date, format = "%d/%m/%Y")
# Subset dataframe based on required dates only and save as new dataset
hhpowerdatasubset <- filter(hhpowerdata, Date =="2007-02-01" | Date== "2007-02-02")
# Create 2 rows and 2 columns for plots
par(mfrow = c(2, 2))
# PLOT TOP LEFT
# Combine date and time and convert this variable to date and time format
hhpowerdatasubset$newdatetime <- paste(hhpowerdatasubset$Date, hhpowerdatasubset$Time)
hhpowerdatasubset$newdatetime <- ymd_hms(hhpowerdatasubset$newdatetime)
# Convert Global Active Power to numeric variable to enable plotting
hhpowerdatasubset$Global_active_power <- as.numeric(as.character(hhpowerdatasubset$Global_active_power))
# Plot the GAP by date and time
plot(hhpowerdatasubset$newdatetime, hhpowerdatasubset$Global_active_power, pch=NA_integer_,
xlab ="",ylab = "Global Active Power")
# Add lines to the graph
lines(hhpowerdatasubset$newdatetime, hhpowerdatasubset$Global_active_power)
# PLOT TOP RIGHT
# Convert Voltage to numeric variable to enable plotting
hhpowerdatasubset$Voltage <- as.numeric(as.character(hhpowerdatasubset$Voltage))
# Plot the Voltage by date and time
plot(hhpowerdatasubset$newdatetime, hhpowerdatasubset$Voltage, pch=NA_integer_,
xlab ="datetime",ylab = "Voltage")
# Add lines to the graph
lines(hhpowerdatasubset$newdatetime, hhpowerdatasubset$Voltage)
# PLOT BOTTOM RIGHT
# Combine date and time and convert this variable to date and time format
hhpowerdatasubset$newdatetime <- paste(hhpowerdatasubset$Date, hhpowerdatasubset$Time)
hhpowerdatasubset$newdatetime <- ymd_hms(hhpowerdatasubset$newdatetime)
# Subset data to contain only datetime and values of sub metering data (3 variables)
hhpowersub2 <- hhpowerdatasubset[,grep(("^Sub|newdatetime"), colnames(hhpowerdatasubset))]
# Melt subset data from wide to long for easier plotting
melteddata <- melt(hhpowersub2, id = "newdatetime")
# Plot melted data
with(melteddata, plot(newdatetime,value, type = "n", pch=NA_integer_,
xlab ="",ylab = "Energy sub metering"))
with(subset(melteddata, variable == "Sub_metering_1"), lines(newdatetime, value,
col = "black"))
with(subset(melteddata, variable == "Sub_metering_2"), lines(newdatetime, value,
col = "red"))
with(subset(melteddata, variable == "Sub_metering_3"), lines(newdatetime, value,
col = "blue"))
legend("topright", col=c("black","red","blue"), c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),
lty=c(1,1), bty="n", cex=.5, y.intersp = 0.25)
# PLOT BOTTOM LEFT
# Convert Global reactive power to numeric variable to enable plotting
hhpowerdatasubset$Global_reactive_power <- as.numeric(as.character(hhpowerdatasubset$Global_reactive_power))
# Plot the Global reactive power by date and time
plot(hhpowerdatasubset$newdatetime, hhpowerdatasubset$Global_reactive_power, pch=NA_integer_,
xlab ="datetime",ylab = "Global_reactive_power")
# Add lines to the graph
lines(hhpowerdatasubset$newdatetime, hhpowerdatasubset$Global_reactive_power)
# Copy plot to PNG file
dev.copy(png, file = "plot4.png")
dev.off()
|
# like seq_len but starts from 0L so it can be used for iterating through c vectors
seq_len_0 <- function(len) {
seq_len(len) - 1L
}
# Works like seq_along for many COPASI vectors (0 based index)
seq_along_v <- function(c_copasivector) {
len <- c_copasivector$size()
if (len == 0L)
return(integer())
seq_len_0(len)
}
# get items of std_vector in list
get_sv <- function(c_vector, indices = seq_along_v(c_vector) + 1L) {
c_vector[indices]
}
# Attempts to guess what Object a CDataVector returns when $get() is called
get_cdv <- function(c_copasivector, indices = seq_along_v(c_copasivector)) {
type <- is(c_copasivector)[1L]
# exise the items class from the classname of the vector
type <- paste0("_p_", stringr::str_match(type, "^_p_CDataVector\\w+_(\\w+)_t$")[2L])
# typecasting the result
map(indices, ~ as(c_copasivector$get(.x), type))
}
# get items of C vectors
get_cv <- function(c_vector, indices = seq_along_v(c_vector)) {
assert_that(inherits(c_vector, "_p_CVectorT_double_t"))
map_dbl(indices, unclass(FloatVectorCore_get), self = c_vector)
}
|
/R/utils_copasi_vector.R
|
permissive
|
jpahle/CoRC
|
R
| false
| false
| 1,097
|
r
|
# like seq_len but starts from 0L so it can be used for iterating through c vectors
seq_len_0 <- function(len) {
seq_len(len) - 1L
}
# Works like seq_along for many COPASI vectors (0 based index)
seq_along_v <- function(c_copasivector) {
len <- c_copasivector$size()
if (len == 0L)
return(integer())
seq_len_0(len)
}
# get items of std_vector in list
get_sv <- function(c_vector, indices = seq_along_v(c_vector) + 1L) {
c_vector[indices]
}
# Attempts to guess what Object a CDataVector returns when $get() is called
get_cdv <- function(c_copasivector, indices = seq_along_v(c_copasivector)) {
type <- is(c_copasivector)[1L]
# exise the items class from the classname of the vector
type <- paste0("_p_", stringr::str_match(type, "^_p_CDataVector\\w+_(\\w+)_t$")[2L])
# typecasting the result
map(indices, ~ as(c_copasivector$get(.x), type))
}
# get items of C vectors
get_cv <- function(c_vector, indices = seq_along_v(c_vector)) {
assert_that(inherits(c_vector, "_p_CVectorT_double_t"))
map_dbl(indices, unclass(FloatVectorCore_get), self = c_vector)
}
|
#' @name spsurml
#' @rdname spsurml
#' @title Maximum likelihood estimation of spatial SUR model.
#' @description This function estimates spatial SUR models using
#' maximum-likelihood methods.The number of equations, time periods
#' and cross-sectional units is not restricted.The user can choose
#' between different spatial specifications as described below.
#' The estimation procedure allows for the introduction of linear
#' restrictions on the \eqn{\beta} parameters associated to the
#' regressors.
#' @usage spsurml(formula = NULL, data = NULL, na.action,
#' listw = NULL, type = "sim", Durbin = NULL,
#' method = "eigen", zero.policy = NULL, interval = NULL,
#' trs = NULL, R = NULL, b = NULL, X = NULL, Y = NULL,
#' G = NULL, N = NULL, Tm = NULL,p = NULL,
#' control = list() )
#' @param formula An object type \code{\link[Formula]{Formula}}
#' similar to objects created with the package \pkg{Formula}
#' describing the equations to be estimated in the model.
#' This model may contain several responses (explained
#' variables) and a varying number of regressors in each equation.
#' @param data An object of class data.frame or a matrix.
#' @param na.action A function (default \code{options("na.action")}),
#' can also be \code{na.omit} or \code{na.exclude} with consequences
#' for residuals and fitted values. It may be necessary to set
#' \code{zero.policy} to \code{TRUE} because this subsetting may
#' create no-neighbour observations.
#' @param listw A \code{listw} object created for example by
#' \code{\link[spdep]{nb2listw}} from \pkg{spatialreg} package; if
#' \code{\link[spdep]{nb2listw}} not given, set to
#' the same spatial weights as the \code{listw} argument. It can
#' also be a spatial weighting matrix of order \emph{(NxN)} instead of
#' a \code{listw} object. Default = \code{NULL}.
#' @param method Similar to the corresponding parameter of
#' \code{\link[spatialreg]{lagsarlm}} function in \pkg{spatialreg} package.
#' "eigen" (default) - the Jacobian is computed as the product of
#' (1 - rho*eigenvalue) using \code{\link[spatialreg]{eigenw}}, and
#' "spam" or "Matrix_J" for strictly symmetric weights lists of
#' styles "B" and "C", or made symmetric by similarity
#' (Ord, 1975, Appendix C) if possible for styles "W" and "S",
#' using code from the spam or Matrix packages to calculate the
#' determinant; "Matrix" and "spam_update" provide updating Cholesky
#' decomposition methods; "LU" provides an alternative sparse matrix
#' decomposition approach. In addition, there are "Chebyshev" and
#' Monte Carlo "MC" approximate log-determinant methods;
#' the Smirnov/Anselin (2009) trace approximation is available
#' as "moments". Three methods: "SE_classic", "SE_whichMin",
#' and "SE_interp" are provided experimentally, the first to
#' attempt to emulate the behaviour of Spatial Econometrics
#' toolbox ML fitting functions. All use grids of log determinant
#' values, and the latter two attempt to ameliorate some features
#' of "SE_classic".
#' @param interval Search interval for autoregressive parameter.
#' Default = \code{NULL}.
#' @param trs Similar to the corresponding parameter of
#' \code{\link[spatialreg]{lagsarlm}} function in \pkg{spatialreg} package.
#' Default \code{NULL}, if given, a vector of powered spatial weights
#' matrix traces output by \code{\link[spdep]{trW}}.
#' @param zero.policy Similar to the corresponding parameter of
#' \code{\link[spatialreg]{lagsarlm}} function in \pkg{spatialreg} package.
#' If \code{TRUE} assign zero to the lagged value of zones without
#' neighbours, if \code{FALSE} assign \code{NA} - causing
#' \code{spsurml()} to terminate with an error. Default = \code{NULL}.
#' @param Durbin If a formula object and model is type "sdm", "sdem"
#' or "slx" the subset of explanatory variables to lag for each equation.
#' @param Y A column vector of order \emph{(NTmGx1)}, with the
#' observations of the explained variables. The ordering of the data
#' must be (first) equation, (second) time dimension and (third)
#' cross-sectional/spatial units. The specification of \emph{Y} is
#' only necessary if not available a \code{\link[Formula]{Formula}}
#' and a data frame. Default = \code{NULL}.
#' @param X A data matrix of order \emph{(NTmGxp)} with the observations
#' of the regressors. The number of covariates in the SUR model is
#' \emph{p} = \eqn{sum(p_{g})} where \emph{\eqn{p_{g}}} is the number
#' of regressors (including the intercept) in the g-th equation,
#' \emph{g = 1,...,G}). The specification of "X" is only
#' necessary if not available a \code{\link[Formula]{Formula}} and a
#' data frame. Default = \code{NULL}.
#' @param p Number of regressors by equation, including the intercept.
#' \emph{p} can be a row vector of order \emph{(1xG)}, if the number
#' of regressors is not the same for all the equations, or a scalar,
#' if the \emph{G} equations have the same number of regressors. The
#' specification of \emph{p} is only necessary if not available a
#' \code{\link[Formula]{Formula}} and a data frame.
#' @param G Number of equations.
#' @param N Number of cross-section or spatial units
#' @param Tm Number of time periods.
#' @param type Type of spatial model specification: "sim",
#' "slx", "slm", "sem", "sdm",
#' "sdem", "sarar" or "gnm". Default = "sim".
#' @param R A row vector of order \emph{(1xpr)} with the set of
#' \emph{r} linear constraints on the \emph{beta} parameters. The
#' \emph{first} restriction appears in the first \emph{p} terms,
#' the second restriction in the next \emph{p} terms and so on.
#' Default = \code{NULL}.
#' @param b A column vector of order \emph{(rx1)} with the values of
#' the linear restrictions on the \emph{beta} parameters.
#' Default = \code{NULL}.
#' @param control List of additional control arguments.
#' @details
#' The list of (spatial) models that can be estimated with the \emph{spsurml} function are:
#' \itemize{
#' \item "sim": SUR model with no spatial effects
#' \deqn{ y_{tg} = X_{tg} \beta_{g} + \epsilon_{tg} }
#' \item "slx": SUR model with spatial lags of the regressors
#' \deqn{ y_{tg} = X_{tg} \beta_{g} + WX_{tg} \theta_{g} + \epsilon_{tg} }
#' \item "slm": SUR model with spatial lags of the explained variables
#' \deqn{y_{tg} = \rho_{g} Wy_{tg} + X_{tg} \beta_{g} + \epsilon_{tg} }
#' \item "sem": SUR model with spatial errors
#' \deqn{ y_{tg} = X_{tg} \beta_{g} + u_{tg} }
#' \deqn{ u_{tg} = \lambda_{g} Wu_{tg} + \epsilon_{tg} }
#' \item "sdm": SUR model of the Spatial Durbin type
#' \deqn{ y_{tg} = \rho_{g} Wy_{tg} + X_{tt} \beta_{g} + WX_{tg} \theta_{g} + \epsilon_{tg} }
#' \item "sdem": SUR model with spatial lags of the regressors and spatial errors
#' \deqn{ y_{tg} = X_{tg} \beta_{g} + WX_{tg} \theta_{g} + u_{tg} }
#' \deqn{ u_{tg} = \lambda_{g} W u_{tg} + \epsilon_{tg} }
#' \item "sarar": SUR model with spatial lags of the explained variables and spatial
#' errors
#' \deqn{ y_{tg} = \rho_{g} Wy_{tg} + X_{tg} \beta_{g} + u_{tg} }
#' \deqn{ u_{tg} = \lambda_{g} W u_{tg} + \epsilon_{tg} }
#' \item "gnm": SUR model with spatial lags of the explained variables,
#' regressors and spatial errors
#' \deqn{ y_{tg} = \rho_{g} Wy_{tg} + X_{tg} \beta_{g} +
#' WX_{tg} \theta_{g} + u_{tg} }
#' \deqn{ u_{tg} = \lambda_{g} W u_{tg} + \epsilon_{tg} }
#' }
#'
#' @return Object of \code{spsur} class with the output of the
#' maximum-likelihood estimation of the specified spatial SUR model.
#' A list with:
#' \tabular{ll}{
#' \code{call} \tab Matched call. \cr
#' \code{type} \tab Type of model specified. \cr
#' \code{method} \tab Value of \code{method} argument to compute the
#' Jacobian \cr
#' \code{Durbin} \tab Value of \code{Durbin} argument. \cr
#' \code{coefficients} \tab Estimated coefficients for the regressors. \cr
#' \code{deltas} \tab Estimated spatial coefficients. \cr
#' \code{rest.se} \tab Estimated standard errors for the
#' estimates of \emph{beta}. \cr
#' \code{deltas.se} \tab Estimated standard errors for the estimates of
#' the spatial coefficients (\code{deltas}). \cr
#' \code{resvar} \tab Estimated covariance matrix for the estimates of
#' \emph{beta's} and spatial coefficients (\code{deltas}).\cr
#' \code{LL} \tab Value of the likelihood function at the
#' maximum-likelihood estimates. \cr
#' \code{R2} \tab Coefficient of determination for each equation,
#' obtained as the squared of the correlation coefficient between the
#' corresponding explained variable and its estimate.
#' \code{spsurml} also shows a \emph{global} coefficient of
#' determination obtained, in the same manner, for the set of
#' the \emph{G} equations. \cr
#' \code{Sigma} \tab Estimated covariance matrix for the residuals of
#' the \emph{G} equations. \cr
#' \code{fdHess} \tab Logical value of \code{fdHess} argument when
#' computing numerical covariances. \cr
#' \code{residuals} \tab Residuals of the model. \cr
#' \code{df.residuals} \tab Degrees of freedom for the residuals. \cr
#' \code{fitted.values} \tab Estimated values for the dependent
#' variables. \cr
#' \code{BP} \tab Value of the Breusch-Pagan statistic to test the
#' null hypothesis of diagonality among the errors of the \emph{G}
#' equations. \cr
#' \code{LMM} \tab Marginal Lagrange Multipliers,
#' LM(\eqn{\rho}|\eqn{\lambda}) and
#' LM(\eqn{\lambda}|\eqn{\rho}), to test for omitted spatial effects
#' in the specification. \cr
#' \code{G} \tab Number of equations. \cr
#' \code{N} \tab Number of cross-sections or spatial units. \cr
#' \code{Tm} \tab Number of time periods. \cr
#' \code{p} \tab Number of regressors by equation (including intercepts). \cr
#' \code{Y} \tab Vector \emph{Y} of the explained variables of the
#' SUR model. \cr
#' \code{X} \tab Matrix \emph{X} of the regressors of the SUR model. \cr
#' \code{W} \tab Spatial weighting matrix. \cr
#' \code{zero.policy} \tab Logical value of \code{zero.policy} . \cr
#' \code{interval} \tab Search interval for spatial parameter. \cr
#' \code{listw_style} \tab Style of neighborhood matrix \code{W}. \cr
#' \code{trs} \tab Either \code{NULL} or vector of powered spatial weights
#' matrix traces output by \code{trW}. \cr
#' \code{insert} \tab Logical value to check if \code{is.null(trs)}. \cr
#' }
#'
#' @section Control arguments:
#' \tabular{ll}{
#' \code{tol} \tab Numerical value for the tolerance for the estimation
#' algorithm until convergence. Default = 1e-3. \cr
#' \code{maxit} \tab Maximum number of iterations until convergence;
#' it must be an integer value. Default = 200. \cr
#' \code{trace} \tab A logical value to show intermediate results during
#' the estimation process. Default = \code{TRUE}. \cr
#' \code{fdHess} \tab Compute variance-covariance matrix using the numerical
#' hessian. Suited for large samples. Default = \code{FALSE} \cr
#' \code{Imult} \tab default 2; used for preparing the Cholesky
#' decompositions for updating in the Jacobian function \cr
#' \code{super} \tab if \code{NULL} (default), set to \code{FALSE} to use
#' a simplicial decomposition for the sparse Cholesky decomposition and
#' method "Matrix_J", set to as.logical(NA) for method "Matrix", if
#' \code{TRUE}, use a supernodal decomposition \cr
#' \code{cheb_q} \tab default 5; highest power of the approximating
#' polynomial for the Chebyshev approximation \cr
#' \code{MC_p} \tab default 16; number of random variates \cr
#' \code{MC_m} \tab default 30; number of products of random variates
#' matrix and spatial weights matrix \cr
#' \code{spamPivot} \tab default "MMD", alternative "RCM" \cr
#' \code{in_coef} \tab default 0.1, coefficient value for initial Cholesky
#' decomposition in "spam_update" \cr
#' \code{type} \tab default "MC", used with method "moments"; alternatives
#' "mult" and "moments", for use if trs is missing \cr
#' \code{correct} \tab default \code{TRUE}, used with method "moments" to
#' compute the Smirnov/Anselin correction term \cr
#' \code{trunc} \tab default \code{TRUE}, used with method "moments" to
#' truncate the Smirnov/Anselin correction term \cr
#' \code{SE_method} \tab default "LU", may be "MC" \cr
#' \code{nrho} \tab default 200, as in SE toolbox; the size of the first
#' stage lndet grid; it may be reduced to for example 40 \cr
#' \code{interpn} \tab default 2000, as in SE toolbox; the size of the
#' second stage lndet grid \cr
#' \code{SElndet} \tab default \code{NULL}, may be used to pass a
#' pre-computed SE toolbox style matrix of coefficients and their lndet
#' values to the "SE_classic" and "SE_whichMin" methods \cr
#' \code{LU_order} \tab default \code{FALSE}; used in "LU_prepermutate",
#' note warnings given for lu method \cr
#' \code{pre_eig} \tab default \code{NULL}; may be used to pass a
#' pre-computed vector of eigenvalues \cr
#' }
#'
#' @author
#' \tabular{ll}{
#' Fernando López \tab \email{fernando.lopez@@upct.es} \cr
#' Román Mínguez \tab \email{roman.minguez@@uclm.es} \cr
#' Jesús Mur \tab \email{jmur@@unizar.es} \cr
#' }
#'
#' @references
#' \itemize{
#' \item Anselin, L. (1988). \emph{Spatial econometrics: methods and models.}
#' Dordrecht: Kluwer
#' \item Bivand, R.S. and Piras G. (2015). Comparing Implementations of
#' Estimation Methods for Spatial Econometrics. \emph{Journal of
#' Statistical Software}, 63(18), 1-36.
#' \url{https://www.jstatsoft.org/v63/i18/}.
#' \item Bivand, R. S., Hauke, J., and Kossowski, T. (2013).
#' Computing the Jacobian in Gaussian spatial autoregressive models: An
#' illustrated comparison of available methods. \emph{ Geographical
#' Analysis}, 45(2), 150-179.
#' \item Breusch T., Pagan A. (1980). The Lagrange multiplier test and its
#' applications to model specification in econometrics.
#' \emph{Rev Econ Stud} 47: 239-254
#' \item Cliff, A.D. and Ord, J.K. (1981). \emph{Spatial processes: Models
#' and applications}, Pion.
#' \item LeSage J and Pace, R.K. (2009). \emph{Introduction to Spatial
#' Econometrics.} CRC Press, Boca Raton.
#' \item López, F.A., Mur, J., and Angulo, A. (2014). Spatial model
#' selection strategies in a SUR framework. The case of regional
#' productivity in EU. \emph{Annals of Regional Science}, 53(1), 197-220.
#' \item Mur, J., López, F., and Herrera, M. (2010). Testing for spatial
#' effects in seemingly unrelated regressions.
#' \emph{Spatial Economic Analysis}, 5(4), 399-440.
#' \item Ord, J.K. (1975). Estimation methods for models of spatial
#' interaction, \emph{Journal of the American Statistical Association},
#' 70, 120-126;
#' }
#'
#' @seealso
#' \code{\link{spsur3sls}}, \code{\link[spatialreg]{lagsarlm}},
#' \code{\link{lmtestspsur}}, \code{\link{wald_betas}},
#' \code{\link{lrtest}}
#'
#' @examples
#'
#' #################################################
#' ######## CROSS SECTION DATA (G>1; Tm=1) ########
#' #################################################
#'
#' #### Example 1: Spatial Phillips-Curve. Anselin (1988, p. 203)
#' rm(list = ls()) # Clean memory
#' data(spc)
#' Tformula <- WAGE83 | WAGE81 ~ UN83 + NMR83 + SMSA | UN80 + NMR80 + SMSA
#' spcsur.sim <- spsurml(formula = Tformula, data = spc, type = "sim")
#' summary(spcsur.sim)
#' # All the coefficients in a single table.
#' print(spcsur.sim)
#' # Plot of the coefficients of each equation in different graphs
#' plot(spcsur.sim)
#'
#' ## A SUR-SLX model
#' ## (listw argument can be either a matrix or a listw object )
#' spcsur.slx <- spsurml(formula = Tformula, data = spc, type = "slx",
#' listw = Wspc)
#' summary(spcsur.slx)
#' # All the coefficients in a single table.
#' print(spcsur.slx)
#' # Plot of the coefficients in a single graph
#' if (require(gridExtra)) {
#' pl <- plot(spcsur.slx, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' nrow = 2)
#' }
#'
#' ## VIP: The output of the whole set of the examples can be examined
#' ## by executing demo(demo_spsurml, package="spsur")
#'
#' \donttest{
#' ### A SUR-SLM model
#' spcsur.slm <- spsurml(formula = Tformula, data = spc, type = "slm",
#' listw = Wspc)
#' summary(spcsur.slm)
#' if (require(gridExtra)) {
#' pl <- plot(spcsur.slm, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$pldeltas, nrow = 3)
#' }
#'
#' #' ### A SUR-SEM model
#' spcsur.sem <- spsurml(formula = Tformula, data = spc, type = "sem",
#' listw = Wspc)
#' summary(spcsur.sem)
#' print(spcsur.sem)
#' if (require(gridExtra)) {
#' pl <- plot(spcsur.sem, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$pldeltas, nrow = 3)
#' }
#'
#' ### A SUR-SDM model
#' spcsur.sdm <- spsurml(formula = Tformula, data = spc, type = "sdm",
#' listw = Wspc)
#' summary(spcsur.sdm)
#' print(spcsur.sdm)
#' if (require(gridExtra)) {
#' pl <- plot(spcsur.sdm, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$pldeltas, nrow = 3)
#' }
#'
#' ## A SUR-SDM model with different spatial lags in each equation
#' TformulaD <- ~ UN83 + NMR83 + SMSA | UN80
#' spcsur.sdm2 <- spsurml(formula = Tformula, data = spc, type = "sdm",
#' listw = Wspc, Durbin = TformulaD)
#' summary(spcsur.sdm2)
#' if (require(gridExtra)) {
#' pl <- plot(spcsur.sdm2, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$pldeltas, nrow = 3)
#' }
#' ### A SUR-SDEM model
#' spcsur.sdem <- spsurml(formula = Tformula, data = spc, type = "sdem",
#' listw = Wspc)
#' print(spcsur.sdem)
#' if (require(gridExtra)) {
#' pl <- plot(spcsur.sdem, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$pldeltas, nrow = 3)
#' }
#'
#' ### A SUR-SARAR model
#' spcsur.sarar <- spsurml(formula = Tformula, data = spc, type = "sarar",
#' listw = Wspc, control = list(tol = 0.1))
#' print(spcsur.sarar)
#' if (require(gridExtra)) {
#' pl <- plot(spcsur.sarar, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$pldeltas, nrow = 3)
#' }
#'
#' ### A SUR-GNM model
#' spcsur.gnm <- spsurml(formula = Tformula, data = spc, type = "gnm",
#' listw = Wspc, control = list(tol = 0.1))
#' print(spcsur.gnm)
#' if (require(gridExtra)) {
#' pl <- plot(spcsur.gnm, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$pldeltas, nrow = 3)
#' }
#'
#' ## A A SUR-GNM model model with different spatial lags in each equation
#' TformulaD <- ~ UN83 + NMR83 + SMSA | UN80
#' spcsur.gnm2 <-spsurml(formula = Tformula, data = spc, type = "gnm",
#' listw = Wspc, Durbin = TformulaD,
#' control = list(tol = 0.1))
#' print(spcsur.gnm2)
#' if (require(gridExtra)) {
#' pl <- plot(spcsur.gnm2, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$pldeltas, nrow = 3)
#' }
#'
#' }
#'
#' ##################################################
#' ######### CLASSIC PANEL DATA G=1; Tm>1 ########
#' ##################################################
#' #
#' ##### Example 2: Homicides + Socio-Economics (1960-90)
#' ## Homicides and selected socio-economic characteristics for continental
#' ## U.S. counties.
#' ## Data for four decennial census years: 1960, 1970, 1980 and 1990.
#' ## \url{https://geodacenter.github.io/data-and-lab/ncovr/}
#'
#'\donttest{
#' ### It usually requires 1-2 minutes maximum...
#' rm(list = ls()) # Clean memory
#' ### Read NCOVR.sf object
#' data(NCOVR, package = "spsur")
#' nbncovr <- spdep::poly2nb(NCOVR.sf, queen = TRUE)
#' ### Some regions with no links...
#' lwncovr <- spdep::nb2listw(nbncovr, style = "W", zero.policy = TRUE)
#' Tformula <- HR80 | HR90 ~ PS80 + UE80 | PS90 + UE90
#' ### A SUR-SIM model
#' NCOVRSUR.sim <- spsurml(formula = Tformula, data = NCOVR.sf, type = "sim")
#' summary(NCOVRSUR.sim)
#' if (require(gridExtra)) {
#' pl <- plot(NCOVRSUR.sim, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]], nrow = 3)
#' }
#' ### A SUR-SLX model
#' NCOVRSUR.slx <- spsurml(formula = Tformula, data = NCOVR.sf, type = "slx",
#' listw = lwncovr, zero.policy = TRUE)
#' print(NCOVRSUR.slx)
#' if (require(gridExtra)) {
#' pl <- plot(NCOVRSUR.slx, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]], nrow = 2)
#' }
#'
#' ### A SUR-SLM model
#' ### method = "Matrix" (Cholesky) instead of "eigen"
#' ### (fdHess = TRUE to compute numerical covariances )
#' NCOVRSUR.slm <- spsurml(formula = Tformula, data = NCOVR.sf,
#' type = "slm", listw = lwncovr, method = "Matrix",
#' zero.policy = TRUE, control = list(fdHess = TRUE))
#' summary(NCOVRSUR.slm)
#'
#' if (require(gridExtra)) {
#' pl <- plot(NCOVRSUR.slm, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$pldeltas, nrow = 3)
#' }
#'
#' # LR test for nested models
#' anova(NCOVRSUR.sim, NCOVRSUR.slm)
#'
#' ### A SUR-SDM model with different spatial lags in each equation
#' ### Analytical covariances (default)
#' TformulaD <- ~ PS80 + UE80 | PS90
#' NCOVRSUR.sdm <- spsurml(formula = Tformula, data = NCOVR.sf,
#' type = "sdm", listw = lwncovr, method = "Matrix",
#' Durbin = TformulaD, zero.policy = TRUE)
#' print(NCOVRSUR.sdm)
#' if (require(gridExtra)) {
#' pl <- plot(NCOVRSUR.sdm, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$pldeltas, nrow = 3)
#' }
#' ### A SUR-SEM model
#' NCOVRSUR.sem <- spsurml(formula = Tformula, data = NCOVR.sf,
#' type = "sem", listw = lwncovr, method = "Matrix",
#' zero.policy = TRUE, control = list(fdHess = TRUE))
#' print(NCOVRSUR.sem)
#' if (require(gridExtra)) {
#' pl <- plot(NCOVRSUR.sem, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$pldeltas, nrow = 3)
#' }
#'
#' ### A SUR-SDEM model
#' NCOVRSUR.sdem <-spsurml(formula = Tformula, data = NCOVR.sf,
#' type = "sdem", listw = lwncovr, method = "Matrix",
#' zero.policy = TRUE, control = list(fdHess = TRUE))
#' print(NCOVRSUR.sdem)
#' if (require(gridExtra)) {
#' pl <- plot(NCOVRSUR.sdem, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$pldeltas, nrow = 3)
#' }
#'}
#'
#' ###############################################
#' ## MULTI-DIMENSIONAL SUR PANEL G>1; Tm>1 ###
#' ###############################################
#' ##### Reshape NCOVR in panel format
#' \donttest{
#' N <- nrow(NCOVR.sf)
#' Tm <- 4
#' index_time <- rep(1:Tm, each = N)
#' index_indiv <- rep(1:N, Tm)
#' pHR <- c(NCOVR.sf$HR60, NCOVR.sf$HR70, NCOVR.sf$HR80, NCOVR.sf$HR90)
#' pPS <- c(NCOVR.sf$PS60, NCOVR.sf$PS70, NCOVR.sf$PS80, NCOVR.sf$PS90)
#' pUE <- c(NCOVR.sf$UE60, NCOVR.sf$UE70, NCOVR.sf$UE80, NCOVR.sf$UE90)
#' pDV <- c(NCOVR.sf$DV60, NCOVR.sf$DV70, NCOVR.sf$DV80, NCOVR.sf$DV90)
#' pFP <- c(NCOVR.sf$FP59, NCOVR.sf$FP70, NCOVR.sf$FP80, NCOVR.sf$FP90)
#' pSOUTH <- rep(NCOVR.sf$SOUTH, Tm)
#' pNCOVR <- data.frame(indiv = index_indiv, time = index_time,
#' HR = pHR, PS = pPS, UE = pUE, DV = pDV,
#' FP = pFP, SOUTH = pSOUTH)
#' pform <- HR | DV | FP ~ PS + UE | PS + UE + SOUTH | PS
#' ### SIM
#' ### Remark: It is necessary to provide Tm value as argument
#' ### when G>1 && Tm>1
#' pNCOVRSUR.sim <- spsurml(formula = pform, data = pNCOVR,
#' type = "sim", Tm = Tm)
#' print(pNCOVRSUR.sim)
#' if (require(gridExtra)) {
#' pl <- plot(pNCOVRSUR.sim, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$lplbetas[[3]], nrow = 3)
#' }
#' # SLM
#' pNCOVRSUR.slm <- spsurml(formula = pform, data = pNCOVR,
#' listw = lwncovr, type = "slm", method = "Matrix", Tm = Tm,
#' zero.policy = TRUE, control= list(fdHess = TRUE))
#' print(pNCOVRSUR.slm)
#' if (require(gridExtra)) {
#' pl <- plot(pNCOVRSUR.slm, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$lplbetas[[3]], pl$pldeltas, nrow = 4)
#' }
#'
#' pNCOVRSUR.sem <- spsurml(formula = pform, data = pNCOVR,
#' listw = lwncovr, type = "sem", method = "Matrix", Tm = Tm,
#' zero.policy = TRUE, control= list(fdHess = TRUE))
#' print(pNCOVRSUR.sem)
#' if (require(gridExtra)) {
#' pl <- plot(pNCOVRSUR.sem, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$lplbetas[[3]], pl$pldeltas, nrow = 4)
#' }
#' }
#' @export
spsurml <- function(formula = NULL, data = NULL, na.action,
listw = NULL, type = "sim", Durbin = NULL,
method = "eigen",
zero.policy = NULL,
interval = NULL,
trs = NULL,
R = NULL, b = NULL,
X = NULL, Y = NULL,
G = NULL, N = NULL, Tm = NULL,
p = NULL, control = list() ) {
con <- list(tol = 0.001, maxit = 200, trace = TRUE,
fdHess = NULL,
Imult = 2, cheb_q = 5, MC_p = 16L, MC_m = 30L, super = NULL,
spamPivot = "MMD", in_coef = 0.1, type = "MC", correct = TRUE,
trunc = TRUE, SE_method = "LU", nrho = 200, interpn = 2000,
SElndet = NULL, LU_order = FALSE,
pre_eig = NULL)
nmsC <- names(con)
con[(namc <- names(control))] <- control
if (length(noNms <- namc[!namc %in% nmsC]))
warning("unknown names in control: ", paste(noNms, collapse = ", "))
if (!(type == "sim")) {
if (is.null(listw) || !inherits(listw,c("listw","Matrix","matrix")))
stop("listw format unknown or NULL")
if (inherits(listw, "listw")) {
if (is.null(formula) || is.null(data)) {
W <- Matrix::Matrix(spdep::listw2mat(listw))
}
}
if (inherits(listw, "matrix")) {
W <- Matrix::Matrix(listw)
listw <- spdep::mat2listw(W)
}
if (inherits(listw, "Matrix")) {
W <- listw
listw <- spdep::mat2listw(as.matrix(W))
}
} else W <- NULL
if (is.null(zero.policy))
zero.policy <- spatialreg::get.ZeroPolicyOption()
can.sim <- FALSE
if (!(is.null(listw)) && listw$style %in% c("W", "S")) {
can.sim <- spatialreg::can.be.simmed(listw)
}
if (!is.null(Tm) && !is.null(G) && Tm > 1 && G == 1){
# Change dimensions (assumption: matrix as data)
G <- Tm
Tm <- 1
}
if (!is.null(formula) && (!inherits(formula, "Formula")))
formula <- Formula::Formula(formula)
cl <- match.call()
if (!is.null(formula) && !is.null(data)) {
mt <- terms(formula, data = data)
mf <- lm(formula, data = data, na.action = na.action,
method = "model.frame")
mf$drop.unused.levels <- TRUE
na.act <- attr(mf, "na.action")
if (!(type == "sim")) {
if (!is.null(na.act)) {
subset <- !(1:length(listw$neighbours) %in% na.act)
listw <- subset(listw, subset, zero.policy = zero.policy)
}
W <- Matrix::Matrix(spdep::listw2mat(listw))
}
if (any(type == c("gnm", "sdm", "sdem", "slx"))) {
if(!inherits(Durbin, "formula")) Durbin <- TRUE
} else { Durbin <- FALSE }
get_XY <- get_data_spsur(formula = formula, mf = mf,
Durbin = Durbin,
listw = listw,
zero.policy = zero.policy,
N = N, Tm = Tm)
Y <- get_XY$Y
X <- get_XY$X
G <- get_XY$G
N <- get_XY$N
Tm <- get_XY$Tm
p <- get_XY$p
dvars <- get_XY$dvars
if (Tm > 1 && G == 1) {
# Change dimensions in this case with Matrix Data
G <- Tm
Tm <- 1
}
rm(get_XY)
if (length(p) == 1) p <- rep(p,G)
} else {# Input data in matrix form...
dvars <- vector("list", G)
for (i in 1:G) {
dvars[[i]] <- c(p[i], 0L)
}
}
if (length(p) == 1) p <- rep(p,G)
names(p) <- NULL
if (!is.null(R) && !is.null(b)) {
Xorig <- X
porig <- p
restr <- X_restr(X = X, R = R, b = b, p = p)
X <- restr$Xstar
p <- restr$pstar
}
#### ASIGNACIONES DE DATOS A NEW ENVIRONMENT ##############
similar <- FALSE
env <- new.env()
assign("Y", Y, envir = env)
assign("X", X, envir = env)
assign("N", N, envir = env)
assign("G", G, envir = env)
assign("Tm", Tm, envir = env)
assign("p", p, envir = env)
assign("dvars", dvars, envir = env)
# CÓDIGO EJEMPLO PARA DETERMINANTE JACOBIANO
if (!(is.null(listw))) {
assign("listw", listw, envir = env)
assign("n", length(listw$neighbours), envir = env)
assign("similar", FALSE, envir = env)
assign("can.sim", can.sim, envir = env)
assign("verbose", con$trace, envir = env)
assign("family", "SAR", envir = env) # CHEQUEAR OTRAS OPCIONES
if (!(type == "sim" || type == "slx")) {
interval <- spatialreg::jacobianSetup(method, env, con,
pre_eig = con$pre_eig,
trs = trs,
interval = interval)
assign("interval", interval, envir = env)
}
}
if (any(type == c("sim","slm","sem","sarar")))
name_fit <- paste("fit_spsur", type, sep = "")
if (type == "sdm") name_fit <- "fit_spsurslm"
if (type == "sdem") name_fit <- "fit_spsursem"
if (type == "slx") name_fit <- "fit_spsursim"
if (type == "gnm") name_fit <- "fit_spsursarar"
fit <- get(name_fit)
if (con$trace) start_fit <- proc.time()[3]
# Maximize concentrate likelihood
z <- fit(env = env, con = con)
if (con$trace) {
end_fit <- proc.time()[3]
cat("Time to fit the model: ",
end_fit-start_fit," seconds\n")
}
coefficients <- z$coefficients
deltas <- z$deltas
Sigma <- z$Sigma
names_sigma <- NULL
for (i in 1:G){
new_name <- paste0("sigma",i,sep="")
names_sigma <- c(names_sigma,new_name)
}
colnames(Sigma) <- rownames(Sigma) <- names_sigma
LL <- z$LL
parameters <- length(coefficients) +
length(deltas) + G*(G + 1)/2
df.residual <- G*N*Tm - parameters
dn <- colnames(X)
if (is.null(dn)) dn <- paste0("x", 1L:(G*sum(p)), sep = "")
names(coefficients) <- dn
names_deltas <- NULL
for (i in 1:G) {
if (any(type == c("slm","sdm")))
names_deltas[i] <- paste("rho", i, sep = "_")
if (any(type == c("sem","sdem")))
names_deltas[i] <- paste("lambda", i, sep = "_")
if (any(type == c("sarar", "gnm"))) {
names_deltas[i] <- paste("rho", i, sep = "_")
names_deltas[G + i] <- paste("lambda", i, sep = "_")
}
}
names(deltas) <- names_deltas
assign("Sigma", Matrix::Matrix(z$Sigma), envir = env)
if (!(type == "sim" || type == "slx")) {
assign("deltas",Matrix::Diagonal(length(deltas),deltas),
envir = env)
}
if (con$trace) start_cov <- proc.time()[3]
fdHess <- con$fdHess
if (is.null(fdHess) || !(fdHess) ||
any(type == c("sim","slx"))) {
# ANALYTICAL VARIANCE-COVARIANCE MATRIX
if (any(type == c("sim","slx")))
name_cov_fit <- "cov_spsursim_f"
if (any(type == c("slm","sem","sarar")))
name_cov_fit <- paste("cov_spsur", type, "_f", sep = "")
if (type == "sdm") name_cov_fit <- "cov_spsurslm_f"
if (type == "sdem") name_cov_fit <- "cov_spsursem_f"
if (type == "gnm") name_cov_fit <- "cov_spsursarar_f"
cov_fit <- get(name_cov_fit)
allcov <- try( cov_fit(env = env) )
if (inherits(allcov, "try-error")) {
cat("Impossible to compute analytical covariances ","\n")
fdHess <- TRUE
} else {
fdHess <- FALSE
rest.se <- allcov$rest.se
names(rest.se) <- names(coefficients)
deltas.se <- allcov$deltas.se
names(deltas.se) <- names(deltas)
if (!is.null(allcov$LMM)) LMM <- allcov$LMM else LMM <- NULL
if (!is.null(allcov$BP)) BP <- allcov$BP else BP <- NULL
if (any(type == c("sim","slx"))) {
resvar <- allcov$vcov
colnames(resvar) <- rownames(resvar) <-
names(coefficients)
} else {
resvar <- allcov$vcov
names_var_Sigma <- NULL
for (k in 1:G) {
for (l in k:G) {
new_name <- paste0("sigma",k,l,sep="")
names_var_Sigma <- c(names_var_Sigma,new_name)
}
}
colnames(resvar) <- rownames(resvar) <-
c(names(coefficients),
names(deltas),names_var_Sigma)
## VIP: CAMBIO ORDEN MATRIZ COVARIANZAS
## IGUAL ORDEN QUE SPDEP Y SPATIALREG PACKAGES...
resvar <- resvar[c(names_var_Sigma,names(deltas),names(coefficients)),
c(names_var_Sigma,names(deltas),names(coefficients))]
}
}
}
if (fdHess) {
if (con$trace){
cat("Computing numerical covariances...","\n")
}
if (any(type == c("slm","sdm"))) name_cov_fit <- "f_sur_lag"
if (any(type == c("sem","sdem"))) name_cov_fit <- "f_sur_sem"
if (any(type == c("sarar","gnm"))) name_cov_fit <- "f_sur_sarar"
cov_fit <- get(name_cov_fit)
vardeltas <- solve(numDeriv::hessian(func = cov_fit,
x = deltas, env = env))
deltas.se <- as.vector(sqrt(diag(vardeltas)))
names(deltas.se) <- names(deltas)
IT <- Matrix::Diagonal(Tm)
IR <- Matrix::Diagonal(N)
Sigmainv <- Matrix::solve(z$Sigma)
OMEinv <- kronecker(kronecker(IT, Sigmainv), IR)
varbetas <- Matrix::solve(Matrix::crossprod(X, OMEinv %*% X))
rest.se <- sqrt(diag(as.matrix(varbetas)))
names(rest.se) <- names(coefficients)
rm(IT,IR,OMEinv)
resvar <- as.matrix(Matrix::bdiag(list(vardeltas, varbetas)))
colnames(resvar) <- c(names(deltas), names(coefficients))
rownames(resvar) <- colnames(resvar)
## Añadido por Fernando 16/03/2020:
# Incluyo BP
Sigma_corr <- stats::cov2cor(Sigma)
index_ltri <- lower.tri(Sigma_corr)
BP <- N*Tm*sum(Sigma_corr[index_ltri]^2)
# Para calcular los marginales es necesario la matriz de varianzas y covarainzas de todos cos parñametros del modelo
# Cuando se calculan la cov numéricas no de calculas las covarianzas de los sigma (solo de los deltas y los betas)
# Propongo poner un aviso que diga que para obtener los test marginales se seleccionen las cov-no-numericas
LMM <- NULL
}
if (con$trace) {
end_cov <- proc.time()[3]
cat("Time to compute covariances: ",
end_cov - start_cov," seconds \n")
}
# Compute R^2 general and for each equation
Yhat <- z$fitted.values
R2_pool <- as.numeric((cor(Y,Yhat))^2)
names(R2_pool) <- c("R2_pool")
arrYhat <- array(Yhat,c(N,G,Tm))
arrY <- array(Y,c(N,G,Tm))
R2_eq <- rep(0,G)
for (i in 1:G) {
R2_eq[i] <- cor(matrix(arrY[,i,], ncol = 1),
matrix(arrYhat[,i,], ncol = 1))^2
}
names(R2_eq) <- paste0("R2_eq", 1:G)
if (!is.null(R) && !is.null(b)) {
namesXorig <- colnames(Xorig)
coefforig <- seorig <- rep(0, ncol(Xorig))
names(coefforig) <- names(seorig) <- colnames(Xorig)
coefforig[names(coefficients)] <- coefficients
seorig[names(rest.se)] <- rest.se
b <- as.numeric(b)
for (i in 1:nrow(R)) {
lidxRi <- R[i,] != 0
widxRi <- which(lidxRi)
vidxRi <- R[i,lidxRi]
## Check if the constraint is the equality between coefficients
if ((length(widxRi) == 2) && (sum(vidxRi) == 0)
&& (b[i] == 0)) {
coefforig[widxRi[2]] <- coefforig[widxRi[1]]
seorig[widxRi[2]] <- seorig[widxRi[1]]
# Updates covariance matrix to include constrained coefficient
name1 <- names(coefforig)[widxRi[1]]
name2 <- names(coefforig)[widxRi[2]]
pr1 <- rbind(resvar, resvar[name1, ])
rownames(pr1) <- c(rownames(resvar), name2)
pr2 <- cbind(pr1, c(resvar[, name1], resvar[name1, name1]))
colnames(pr2) <- rownames(pr2)
resvar <- pr2
rm(pr1, pr2)
}
# ## Check if the constraint is individual coefficient = 0
# if ((length(widxRi) == 1) && (vidxRi == 0)&& (b[i] == 0)) {
# coefforig[widxRi] <- seorig[widxRi] <- 0
# }
}
X <- Xorig
p <- porig
coefficients <- coefforig
rest.se <- seorig
}
ret <- new_spsur(list(call = cl, type = type,
method = method, Durbin = Durbin,
G = G, N = N, Tm = Tm,
deltas = deltas, deltas.se = deltas.se,
coefficients = coefficients, rest.se = rest.se,
resvar = resvar, fdHess = fdHess,
p = p, dvars = dvars,
parameters = parameters,
LL = LL, R2 = c(R2_pool,R2_eq),
Sigma = Sigma,
BP = BP, LMM = LMM,
residuals = z$residuals, df.residual = df.residual,
fitted.values = z$fitted.values, se.fit = NULL,
Y = Y, X = X, W = W,
similar = similar, can.sim = can.sim,
zero.policy = zero.policy, listw_style = listw$style,
interval = interval,
insert = !is.null(trs)))
if (zero.policy) {
zero.regs <- attr(listw$neighbours, "region.id")[which(
spdep::card(listw$neighbours) == 0)]
if (length(zero.regs) > 0L)
attr(ret, "zero.regs") <- zero.regs
}
if(exists("na.act")) { # It could don't exist with data matrices
if (!is.null(na.act)) ret$na.action <- na.act
}
ret
}
|
/R/spsurml.R
|
no_license
|
shizelong1985/spsur
|
R
| false
| false
| 39,802
|
r
|
#' @name spsurml
#' @rdname spsurml
#' @title Maximum likelihood estimation of spatial SUR model.
#' @description This function estimates spatial SUR models using
#' maximum-likelihood methods.The number of equations, time periods
#' and cross-sectional units is not restricted.The user can choose
#' between different spatial specifications as described below.
#' The estimation procedure allows for the introduction of linear
#' restrictions on the \eqn{\beta} parameters associated to the
#' regressors.
#' @usage spsurml(formula = NULL, data = NULL, na.action,
#' listw = NULL, type = "sim", Durbin = NULL,
#' method = "eigen", zero.policy = NULL, interval = NULL,
#' trs = NULL, R = NULL, b = NULL, X = NULL, Y = NULL,
#' G = NULL, N = NULL, Tm = NULL,p = NULL,
#' control = list() )
#' @param formula An object type \code{\link[Formula]{Formula}}
#' similar to objects created with the package \pkg{Formula}
#' describing the equations to be estimated in the model.
#' This model may contain several responses (explained
#' variables) and a varying number of regressors in each equation.
#' @param data An object of class data.frame or a matrix.
#' @param na.action A function (default \code{options("na.action")}),
#' can also be \code{na.omit} or \code{na.exclude} with consequences
#' for residuals and fitted values. It may be necessary to set
#' \code{zero.policy} to \code{TRUE} because this subsetting may
#' create no-neighbour observations.
#' @param listw A \code{listw} object created for example by
#' \code{\link[spdep]{nb2listw}} from \pkg{spatialreg} package; if
#' \code{\link[spdep]{nb2listw}} not given, set to
#' the same spatial weights as the \code{listw} argument. It can
#' also be a spatial weighting matrix of order \emph{(NxN)} instead of
#' a \code{listw} object. Default = \code{NULL}.
#' @param method Similar to the corresponding parameter of
#' \code{\link[spatialreg]{lagsarlm}} function in \pkg{spatialreg} package.
#' "eigen" (default) - the Jacobian is computed as the product of
#' (1 - rho*eigenvalue) using \code{\link[spatialreg]{eigenw}}, and
#' "spam" or "Matrix_J" for strictly symmetric weights lists of
#' styles "B" and "C", or made symmetric by similarity
#' (Ord, 1975, Appendix C) if possible for styles "W" and "S",
#' using code from the spam or Matrix packages to calculate the
#' determinant; "Matrix" and "spam_update" provide updating Cholesky
#' decomposition methods; "LU" provides an alternative sparse matrix
#' decomposition approach. In addition, there are "Chebyshev" and
#' Monte Carlo "MC" approximate log-determinant methods;
#' the Smirnov/Anselin (2009) trace approximation is available
#' as "moments". Three methods: "SE_classic", "SE_whichMin",
#' and "SE_interp" are provided experimentally, the first to
#' attempt to emulate the behaviour of Spatial Econometrics
#' toolbox ML fitting functions. All use grids of log determinant
#' values, and the latter two attempt to ameliorate some features
#' of "SE_classic".
#' @param interval Search interval for autoregressive parameter.
#' Default = \code{NULL}.
#' @param trs Similar to the corresponding parameter of
#' \code{\link[spatialreg]{lagsarlm}} function in \pkg{spatialreg} package.
#' Default \code{NULL}, if given, a vector of powered spatial weights
#' matrix traces output by \code{\link[spdep]{trW}}.
#' @param zero.policy Similar to the corresponding parameter of
#' \code{\link[spatialreg]{lagsarlm}} function in \pkg{spatialreg} package.
#' If \code{TRUE} assign zero to the lagged value of zones without
#' neighbours, if \code{FALSE} assign \code{NA} - causing
#' \code{spsurml()} to terminate with an error. Default = \code{NULL}.
#' @param Durbin If a formula object and model is type "sdm", "sdem"
#' or "slx" the subset of explanatory variables to lag for each equation.
#' @param Y A column vector of order \emph{(NTmGx1)}, with the
#' observations of the explained variables. The ordering of the data
#' must be (first) equation, (second) time dimension and (third)
#' cross-sectional/spatial units. The specification of \emph{Y} is
#' only necessary if not available a \code{\link[Formula]{Formula}}
#' and a data frame. Default = \code{NULL}.
#' @param X A data matrix of order \emph{(NTmGxp)} with the observations
#' of the regressors. The number of covariates in the SUR model is
#' \emph{p} = \eqn{sum(p_{g})} where \emph{\eqn{p_{g}}} is the number
#' of regressors (including the intercept) in the g-th equation,
#' \emph{g = 1,...,G}). The specification of "X" is only
#' necessary if not available a \code{\link[Formula]{Formula}} and a
#' data frame. Default = \code{NULL}.
#' @param p Number of regressors by equation, including the intercept.
#' \emph{p} can be a row vector of order \emph{(1xG)}, if the number
#' of regressors is not the same for all the equations, or a scalar,
#' if the \emph{G} equations have the same number of regressors. The
#' specification of \emph{p} is only necessary if not available a
#' \code{\link[Formula]{Formula}} and a data frame.
#' @param G Number of equations.
#' @param N Number of cross-section or spatial units
#' @param Tm Number of time periods.
#' @param type Type of spatial model specification: "sim",
#' "slx", "slm", "sem", "sdm",
#' "sdem", "sarar" or "gnm". Default = "sim".
#' @param R A row vector of order \emph{(1xpr)} with the set of
#' \emph{r} linear constraints on the \emph{beta} parameters. The
#' \emph{first} restriction appears in the first \emph{p} terms,
#' the second restriction in the next \emph{p} terms and so on.
#' Default = \code{NULL}.
#' @param b A column vector of order \emph{(rx1)} with the values of
#' the linear restrictions on the \emph{beta} parameters.
#' Default = \code{NULL}.
#' @param control List of additional control arguments.
#' @details
#' The list of (spatial) models that can be estimated with the \emph{spsurml} function are:
#' \itemize{
#' \item "sim": SUR model with no spatial effects
#' \deqn{ y_{tg} = X_{tg} \beta_{g} + \epsilon_{tg} }
#' \item "slx": SUR model with spatial lags of the regressors
#' \deqn{ y_{tg} = X_{tg} \beta_{g} + WX_{tg} \theta_{g} + \epsilon_{tg} }
#' \item "slm": SUR model with spatial lags of the explained variables
#' \deqn{y_{tg} = \rho_{g} Wy_{tg} + X_{tg} \beta_{g} + \epsilon_{tg} }
#' \item "sem": SUR model with spatial errors
#' \deqn{ y_{tg} = X_{tg} \beta_{g} + u_{tg} }
#' \deqn{ u_{tg} = \lambda_{g} Wu_{tg} + \epsilon_{tg} }
#' \item "sdm": SUR model of the Spatial Durbin type
#' \deqn{ y_{tg} = \rho_{g} Wy_{tg} + X_{tt} \beta_{g} + WX_{tg} \theta_{g} + \epsilon_{tg} }
#' \item "sdem": SUR model with spatial lags of the regressors and spatial errors
#' \deqn{ y_{tg} = X_{tg} \beta_{g} + WX_{tg} \theta_{g} + u_{tg} }
#' \deqn{ u_{tg} = \lambda_{g} W u_{tg} + \epsilon_{tg} }
#' \item "sarar": SUR model with spatial lags of the explained variables and spatial
#' errors
#' \deqn{ y_{tg} = \rho_{g} Wy_{tg} + X_{tg} \beta_{g} + u_{tg} }
#' \deqn{ u_{tg} = \lambda_{g} W u_{tg} + \epsilon_{tg} }
#' \item "gnm": SUR model with spatial lags of the explained variables,
#' regressors and spatial errors
#' \deqn{ y_{tg} = \rho_{g} Wy_{tg} + X_{tg} \beta_{g} +
#' WX_{tg} \theta_{g} + u_{tg} }
#' \deqn{ u_{tg} = \lambda_{g} W u_{tg} + \epsilon_{tg} }
#' }
#'
#' @return Object of \code{spsur} class with the output of the
#' maximum-likelihood estimation of the specified spatial SUR model.
#' A list with:
#' \tabular{ll}{
#' \code{call} \tab Matched call. \cr
#' \code{type} \tab Type of model specified. \cr
#' \code{method} \tab Value of \code{method} argument to compute the
#' Jacobian \cr
#' \code{Durbin} \tab Value of \code{Durbin} argument. \cr
#' \code{coefficients} \tab Estimated coefficients for the regressors. \cr
#' \code{deltas} \tab Estimated spatial coefficients. \cr
#' \code{rest.se} \tab Estimated standard errors for the
#' estimates of \emph{beta}. \cr
#' \code{deltas.se} \tab Estimated standard errors for the estimates of
#' the spatial coefficients (\code{deltas}). \cr
#' \code{resvar} \tab Estimated covariance matrix for the estimates of
#' \emph{beta's} and spatial coefficients (\code{deltas}).\cr
#' \code{LL} \tab Value of the likelihood function at the
#' maximum-likelihood estimates. \cr
#' \code{R2} \tab Coefficient of determination for each equation,
#' obtained as the squared of the correlation coefficient between the
#' corresponding explained variable and its estimate.
#' \code{spsurml} also shows a \emph{global} coefficient of
#' determination obtained, in the same manner, for the set of
#' the \emph{G} equations. \cr
#' \code{Sigma} \tab Estimated covariance matrix for the residuals of
#' the \emph{G} equations. \cr
#' \code{fdHess} \tab Logical value of \code{fdHess} argument when
#' computing numerical covariances. \cr
#' \code{residuals} \tab Residuals of the model. \cr
#' \code{df.residuals} \tab Degrees of freedom for the residuals. \cr
#' \code{fitted.values} \tab Estimated values for the dependent
#' variables. \cr
#' \code{BP} \tab Value of the Breusch-Pagan statistic to test the
#' null hypothesis of diagonality among the errors of the \emph{G}
#' equations. \cr
#' \code{LMM} \tab Marginal Lagrange Multipliers,
#' LM(\eqn{\rho}|\eqn{\lambda}) and
#' LM(\eqn{\lambda}|\eqn{\rho}), to test for omitted spatial effects
#' in the specification. \cr
#' \code{G} \tab Number of equations. \cr
#' \code{N} \tab Number of cross-sections or spatial units. \cr
#' \code{Tm} \tab Number of time periods. \cr
#' \code{p} \tab Number of regressors by equation (including intercepts). \cr
#' \code{Y} \tab Vector \emph{Y} of the explained variables of the
#' SUR model. \cr
#' \code{X} \tab Matrix \emph{X} of the regressors of the SUR model. \cr
#' \code{W} \tab Spatial weighting matrix. \cr
#' \code{zero.policy} \tab Logical value of \code{zero.policy} . \cr
#' \code{interval} \tab Search interval for spatial parameter. \cr
#' \code{listw_style} \tab Style of neighborhood matrix \code{W}. \cr
#' \code{trs} \tab Either \code{NULL} or vector of powered spatial weights
#' matrix traces output by \code{trW}. \cr
#' \code{insert} \tab Logical value to check if \code{is.null(trs)}. \cr
#' }
#'
#' @section Control arguments:
#' \tabular{ll}{
#' \code{tol} \tab Numerical value for the tolerance for the estimation
#' algorithm until convergence. Default = 1e-3. \cr
#' \code{maxit} \tab Maximum number of iterations until convergence;
#' it must be an integer value. Default = 200. \cr
#' \code{trace} \tab A logical value to show intermediate results during
#' the estimation process. Default = \code{TRUE}. \cr
#' \code{fdHess} \tab Compute variance-covariance matrix using the numerical
#' hessian. Suited for large samples. Default = \code{FALSE} \cr
#' \code{Imult} \tab default 2; used for preparing the Cholesky
#' decompositions for updating in the Jacobian function \cr
#' \code{super} \tab if \code{NULL} (default), set to \code{FALSE} to use
#' a simplicial decomposition for the sparse Cholesky decomposition and
#' method "Matrix_J", set to as.logical(NA) for method "Matrix", if
#' \code{TRUE}, use a supernodal decomposition \cr
#' \code{cheb_q} \tab default 5; highest power of the approximating
#' polynomial for the Chebyshev approximation \cr
#' \code{MC_p} \tab default 16; number of random variates \cr
#' \code{MC_m} \tab default 30; number of products of random variates
#' matrix and spatial weights matrix \cr
#' \code{spamPivot} \tab default "MMD", alternative "RCM" \cr
#' \code{in_coef} \tab default 0.1, coefficient value for initial Cholesky
#' decomposition in "spam_update" \cr
#' \code{type} \tab default "MC", used with method "moments"; alternatives
#' "mult" and "moments", for use if trs is missing \cr
#' \code{correct} \tab default \code{TRUE}, used with method "moments" to
#' compute the Smirnov/Anselin correction term \cr
#' \code{trunc} \tab default \code{TRUE}, used with method "moments" to
#' truncate the Smirnov/Anselin correction term \cr
#' \code{SE_method} \tab default "LU", may be "MC" \cr
#' \code{nrho} \tab default 200, as in SE toolbox; the size of the first
#' stage lndet grid; it may be reduced to for example 40 \cr
#' \code{interpn} \tab default 2000, as in SE toolbox; the size of the
#' second stage lndet grid \cr
#' \code{SElndet} \tab default \code{NULL}, may be used to pass a
#' pre-computed SE toolbox style matrix of coefficients and their lndet
#' values to the "SE_classic" and "SE_whichMin" methods \cr
#' \code{LU_order} \tab default \code{FALSE}; used in "LU_prepermutate",
#' note warnings given for lu method \cr
#' \code{pre_eig} \tab default \code{NULL}; may be used to pass a
#' pre-computed vector of eigenvalues \cr
#' }
#'
#' @author
#' \tabular{ll}{
#' Fernando López \tab \email{fernando.lopez@@upct.es} \cr
#' Román Mínguez \tab \email{roman.minguez@@uclm.es} \cr
#' Jesús Mur \tab \email{jmur@@unizar.es} \cr
#' }
#'
#' @references
#' \itemize{
#' \item Anselin, L. (1988). \emph{Spatial econometrics: methods and models.}
#' Dordrecht: Kluwer
#' \item Bivand, R.S. and Piras G. (2015). Comparing Implementations of
#' Estimation Methods for Spatial Econometrics. \emph{Journal of
#' Statistical Software}, 63(18), 1-36.
#' \url{https://www.jstatsoft.org/v63/i18/}.
#' \item Bivand, R. S., Hauke, J., and Kossowski, T. (2013).
#' Computing the Jacobian in Gaussian spatial autoregressive models: An
#' illustrated comparison of available methods. \emph{ Geographical
#' Analysis}, 45(2), 150-179.
#' \item Breusch T., Pagan A. (1980). The Lagrange multiplier test and its
#' applications to model specification in econometrics.
#' \emph{Rev Econ Stud} 47: 239-254
#' \item Cliff, A.D. and Ord, J.K. (1981). \emph{Spatial processes: Models
#' and applications}, Pion.
#' \item LeSage J and Pace, R.K. (2009). \emph{Introduction to Spatial
#' Econometrics.} CRC Press, Boca Raton.
#' \item López, F.A., Mur, J., and Angulo, A. (2014). Spatial model
#' selection strategies in a SUR framework. The case of regional
#' productivity in EU. \emph{Annals of Regional Science}, 53(1), 197-220.
#' \item Mur, J., López, F., and Herrera, M. (2010). Testing for spatial
#' effects in seemingly unrelated regressions.
#' \emph{Spatial Economic Analysis}, 5(4), 399-440.
#' \item Ord, J.K. (1975). Estimation methods for models of spatial
#' interaction, \emph{Journal of the American Statistical Association},
#' 70, 120-126;
#' }
#'
#' @seealso
#' \code{\link{spsur3sls}}, \code{\link[spatialreg]{lagsarlm}},
#' \code{\link{lmtestspsur}}, \code{\link{wald_betas}},
#' \code{\link{lrtest}}
#'
#' @examples
#'
#' #################################################
#' ######## CROSS SECTION DATA (G>1; Tm=1) ########
#' #################################################
#'
#' #### Example 1: Spatial Phillips-Curve. Anselin (1988, p. 203)
#' rm(list = ls()) # Clean memory
#' data(spc)
#' Tformula <- WAGE83 | WAGE81 ~ UN83 + NMR83 + SMSA | UN80 + NMR80 + SMSA
#' spcsur.sim <- spsurml(formula = Tformula, data = spc, type = "sim")
#' summary(spcsur.sim)
#' # All the coefficients in a single table.
#' print(spcsur.sim)
#' # Plot of the coefficients of each equation in different graphs
#' plot(spcsur.sim)
#'
#' ## A SUR-SLX model
#' ## (listw argument can be either a matrix or a listw object )
#' spcsur.slx <- spsurml(formula = Tformula, data = spc, type = "slx",
#' listw = Wspc)
#' summary(spcsur.slx)
#' # All the coefficients in a single table.
#' print(spcsur.slx)
#' # Plot of the coefficients in a single graph
#' if (require(gridExtra)) {
#' pl <- plot(spcsur.slx, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' nrow = 2)
#' }
#'
#' ## VIP: The output of the whole set of the examples can be examined
#' ## by executing demo(demo_spsurml, package="spsur")
#'
#' \donttest{
#' ### A SUR-SLM model
#' spcsur.slm <- spsurml(formula = Tformula, data = spc, type = "slm",
#' listw = Wspc)
#' summary(spcsur.slm)
#' if (require(gridExtra)) {
#' pl <- plot(spcsur.slm, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$pldeltas, nrow = 3)
#' }
#'
#' #' ### A SUR-SEM model
#' spcsur.sem <- spsurml(formula = Tformula, data = spc, type = "sem",
#' listw = Wspc)
#' summary(spcsur.sem)
#' print(spcsur.sem)
#' if (require(gridExtra)) {
#' pl <- plot(spcsur.sem, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$pldeltas, nrow = 3)
#' }
#'
#' ### A SUR-SDM model
#' spcsur.sdm <- spsurml(formula = Tformula, data = spc, type = "sdm",
#' listw = Wspc)
#' summary(spcsur.sdm)
#' print(spcsur.sdm)
#' if (require(gridExtra)) {
#' pl <- plot(spcsur.sdm, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$pldeltas, nrow = 3)
#' }
#'
#' ## A SUR-SDM model with different spatial lags in each equation
#' TformulaD <- ~ UN83 + NMR83 + SMSA | UN80
#' spcsur.sdm2 <- spsurml(formula = Tformula, data = spc, type = "sdm",
#' listw = Wspc, Durbin = TformulaD)
#' summary(spcsur.sdm2)
#' if (require(gridExtra)) {
#' pl <- plot(spcsur.sdm2, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$pldeltas, nrow = 3)
#' }
#' ### A SUR-SDEM model
#' spcsur.sdem <- spsurml(formula = Tformula, data = spc, type = "sdem",
#' listw = Wspc)
#' print(spcsur.sdem)
#' if (require(gridExtra)) {
#' pl <- plot(spcsur.sdem, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$pldeltas, nrow = 3)
#' }
#'
#' ### A SUR-SARAR model
#' spcsur.sarar <- spsurml(formula = Tformula, data = spc, type = "sarar",
#' listw = Wspc, control = list(tol = 0.1))
#' print(spcsur.sarar)
#' if (require(gridExtra)) {
#' pl <- plot(spcsur.sarar, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$pldeltas, nrow = 3)
#' }
#'
#' ### A SUR-GNM model
#' spcsur.gnm <- spsurml(formula = Tformula, data = spc, type = "gnm",
#' listw = Wspc, control = list(tol = 0.1))
#' print(spcsur.gnm)
#' if (require(gridExtra)) {
#' pl <- plot(spcsur.gnm, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$pldeltas, nrow = 3)
#' }
#'
#' ## A A SUR-GNM model model with different spatial lags in each equation
#' TformulaD <- ~ UN83 + NMR83 + SMSA | UN80
#' spcsur.gnm2 <-spsurml(formula = Tformula, data = spc, type = "gnm",
#' listw = Wspc, Durbin = TformulaD,
#' control = list(tol = 0.1))
#' print(spcsur.gnm2)
#' if (require(gridExtra)) {
#' pl <- plot(spcsur.gnm2, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$pldeltas, nrow = 3)
#' }
#'
#' }
#'
#' ##################################################
#' ######### CLASSIC PANEL DATA G=1; Tm>1 ########
#' ##################################################
#' #
#' ##### Example 2: Homicides + Socio-Economics (1960-90)
#' ## Homicides and selected socio-economic characteristics for continental
#' ## U.S. counties.
#' ## Data for four decennial census years: 1960, 1970, 1980 and 1990.
#' ## \url{https://geodacenter.github.io/data-and-lab/ncovr/}
#'
#'\donttest{
#' ### It usually requires 1-2 minutes maximum...
#' rm(list = ls()) # Clean memory
#' ### Read NCOVR.sf object
#' data(NCOVR, package = "spsur")
#' nbncovr <- spdep::poly2nb(NCOVR.sf, queen = TRUE)
#' ### Some regions with no links...
#' lwncovr <- spdep::nb2listw(nbncovr, style = "W", zero.policy = TRUE)
#' Tformula <- HR80 | HR90 ~ PS80 + UE80 | PS90 + UE90
#' ### A SUR-SIM model
#' NCOVRSUR.sim <- spsurml(formula = Tformula, data = NCOVR.sf, type = "sim")
#' summary(NCOVRSUR.sim)
#' if (require(gridExtra)) {
#' pl <- plot(NCOVRSUR.sim, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]], nrow = 3)
#' }
#' ### A SUR-SLX model
#' NCOVRSUR.slx <- spsurml(formula = Tformula, data = NCOVR.sf, type = "slx",
#' listw = lwncovr, zero.policy = TRUE)
#' print(NCOVRSUR.slx)
#' if (require(gridExtra)) {
#' pl <- plot(NCOVRSUR.slx, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]], nrow = 2)
#' }
#'
#' ### A SUR-SLM model
#' ### method = "Matrix" (Cholesky) instead of "eigen"
#' ### (fdHess = TRUE to compute numerical covariances )
#' NCOVRSUR.slm <- spsurml(formula = Tformula, data = NCOVR.sf,
#' type = "slm", listw = lwncovr, method = "Matrix",
#' zero.policy = TRUE, control = list(fdHess = TRUE))
#' summary(NCOVRSUR.slm)
#'
#' if (require(gridExtra)) {
#' pl <- plot(NCOVRSUR.slm, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$pldeltas, nrow = 3)
#' }
#'
#' # LR test for nested models
#' anova(NCOVRSUR.sim, NCOVRSUR.slm)
#'
#' ### A SUR-SDM model with different spatial lags in each equation
#' ### Analytical covariances (default)
#' TformulaD <- ~ PS80 + UE80 | PS90
#' NCOVRSUR.sdm <- spsurml(formula = Tformula, data = NCOVR.sf,
#' type = "sdm", listw = lwncovr, method = "Matrix",
#' Durbin = TformulaD, zero.policy = TRUE)
#' print(NCOVRSUR.sdm)
#' if (require(gridExtra)) {
#' pl <- plot(NCOVRSUR.sdm, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$pldeltas, nrow = 3)
#' }
#' ### A SUR-SEM model
#' NCOVRSUR.sem <- spsurml(formula = Tformula, data = NCOVR.sf,
#' type = "sem", listw = lwncovr, method = "Matrix",
#' zero.policy = TRUE, control = list(fdHess = TRUE))
#' print(NCOVRSUR.sem)
#' if (require(gridExtra)) {
#' pl <- plot(NCOVRSUR.sem, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$pldeltas, nrow = 3)
#' }
#'
#' ### A SUR-SDEM model
#' NCOVRSUR.sdem <-spsurml(formula = Tformula, data = NCOVR.sf,
#' type = "sdem", listw = lwncovr, method = "Matrix",
#' zero.policy = TRUE, control = list(fdHess = TRUE))
#' print(NCOVRSUR.sdem)
#' if (require(gridExtra)) {
#' pl <- plot(NCOVRSUR.sdem, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$pldeltas, nrow = 3)
#' }
#'}
#'
#' ###############################################
#' ## MULTI-DIMENSIONAL SUR PANEL G>1; Tm>1 ###
#' ###############################################
#' ##### Reshape NCOVR in panel format
#' \donttest{
#' N <- nrow(NCOVR.sf)
#' Tm <- 4
#' index_time <- rep(1:Tm, each = N)
#' index_indiv <- rep(1:N, Tm)
#' pHR <- c(NCOVR.sf$HR60, NCOVR.sf$HR70, NCOVR.sf$HR80, NCOVR.sf$HR90)
#' pPS <- c(NCOVR.sf$PS60, NCOVR.sf$PS70, NCOVR.sf$PS80, NCOVR.sf$PS90)
#' pUE <- c(NCOVR.sf$UE60, NCOVR.sf$UE70, NCOVR.sf$UE80, NCOVR.sf$UE90)
#' pDV <- c(NCOVR.sf$DV60, NCOVR.sf$DV70, NCOVR.sf$DV80, NCOVR.sf$DV90)
#' pFP <- c(NCOVR.sf$FP59, NCOVR.sf$FP70, NCOVR.sf$FP80, NCOVR.sf$FP90)
#' pSOUTH <- rep(NCOVR.sf$SOUTH, Tm)
#' pNCOVR <- data.frame(indiv = index_indiv, time = index_time,
#' HR = pHR, PS = pPS, UE = pUE, DV = pDV,
#' FP = pFP, SOUTH = pSOUTH)
#' pform <- HR | DV | FP ~ PS + UE | PS + UE + SOUTH | PS
#' ### SIM
#' ### Remark: It is necessary to provide Tm value as argument
#' ### when G>1 && Tm>1
#' pNCOVRSUR.sim <- spsurml(formula = pform, data = pNCOVR,
#' type = "sim", Tm = Tm)
#' print(pNCOVRSUR.sim)
#' if (require(gridExtra)) {
#' pl <- plot(pNCOVRSUR.sim, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$lplbetas[[3]], nrow = 3)
#' }
#' # SLM
#' pNCOVRSUR.slm <- spsurml(formula = pform, data = pNCOVR,
#' listw = lwncovr, type = "slm", method = "Matrix", Tm = Tm,
#' zero.policy = TRUE, control= list(fdHess = TRUE))
#' print(pNCOVRSUR.slm)
#' if (require(gridExtra)) {
#' pl <- plot(pNCOVRSUR.slm, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$lplbetas[[3]], pl$pldeltas, nrow = 4)
#' }
#'
#' pNCOVRSUR.sem <- spsurml(formula = pform, data = pNCOVR,
#' listw = lwncovr, type = "sem", method = "Matrix", Tm = Tm,
#' zero.policy = TRUE, control= list(fdHess = TRUE))
#' print(pNCOVRSUR.sem)
#' if (require(gridExtra)) {
#' pl <- plot(pNCOVRSUR.sem, viewplot = FALSE)
#' grid.arrange(pl$lplbetas[[1]], pl$lplbetas[[2]],
#' pl$lplbetas[[3]], pl$pldeltas, nrow = 4)
#' }
#' }
#' @export
spsurml <- function(formula = NULL, data = NULL, na.action,
listw = NULL, type = "sim", Durbin = NULL,
method = "eigen",
zero.policy = NULL,
interval = NULL,
trs = NULL,
R = NULL, b = NULL,
X = NULL, Y = NULL,
G = NULL, N = NULL, Tm = NULL,
p = NULL, control = list() ) {
con <- list(tol = 0.001, maxit = 200, trace = TRUE,
fdHess = NULL,
Imult = 2, cheb_q = 5, MC_p = 16L, MC_m = 30L, super = NULL,
spamPivot = "MMD", in_coef = 0.1, type = "MC", correct = TRUE,
trunc = TRUE, SE_method = "LU", nrho = 200, interpn = 2000,
SElndet = NULL, LU_order = FALSE,
pre_eig = NULL)
nmsC <- names(con)
con[(namc <- names(control))] <- control
if (length(noNms <- namc[!namc %in% nmsC]))
warning("unknown names in control: ", paste(noNms, collapse = ", "))
if (!(type == "sim")) {
if (is.null(listw) || !inherits(listw,c("listw","Matrix","matrix")))
stop("listw format unknown or NULL")
if (inherits(listw, "listw")) {
if (is.null(formula) || is.null(data)) {
W <- Matrix::Matrix(spdep::listw2mat(listw))
}
}
if (inherits(listw, "matrix")) {
W <- Matrix::Matrix(listw)
listw <- spdep::mat2listw(W)
}
if (inherits(listw, "Matrix")) {
W <- listw
listw <- spdep::mat2listw(as.matrix(W))
}
} else W <- NULL
if (is.null(zero.policy))
zero.policy <- spatialreg::get.ZeroPolicyOption()
can.sim <- FALSE
if (!(is.null(listw)) && listw$style %in% c("W", "S")) {
can.sim <- spatialreg::can.be.simmed(listw)
}
if (!is.null(Tm) && !is.null(G) && Tm > 1 && G == 1){
# Change dimensions (assumption: matrix as data)
G <- Tm
Tm <- 1
}
if (!is.null(formula) && (!inherits(formula, "Formula")))
formula <- Formula::Formula(formula)
cl <- match.call()
if (!is.null(formula) && !is.null(data)) {
mt <- terms(formula, data = data)
mf <- lm(formula, data = data, na.action = na.action,
method = "model.frame")
mf$drop.unused.levels <- TRUE
na.act <- attr(mf, "na.action")
if (!(type == "sim")) {
if (!is.null(na.act)) {
subset <- !(1:length(listw$neighbours) %in% na.act)
listw <- subset(listw, subset, zero.policy = zero.policy)
}
W <- Matrix::Matrix(spdep::listw2mat(listw))
}
if (any(type == c("gnm", "sdm", "sdem", "slx"))) {
if(!inherits(Durbin, "formula")) Durbin <- TRUE
} else { Durbin <- FALSE }
get_XY <- get_data_spsur(formula = formula, mf = mf,
Durbin = Durbin,
listw = listw,
zero.policy = zero.policy,
N = N, Tm = Tm)
Y <- get_XY$Y
X <- get_XY$X
G <- get_XY$G
N <- get_XY$N
Tm <- get_XY$Tm
p <- get_XY$p
dvars <- get_XY$dvars
if (Tm > 1 && G == 1) {
# Change dimensions in this case with Matrix Data
G <- Tm
Tm <- 1
}
rm(get_XY)
if (length(p) == 1) p <- rep(p,G)
} else {# Input data in matrix form...
dvars <- vector("list", G)
for (i in 1:G) {
dvars[[i]] <- c(p[i], 0L)
}
}
if (length(p) == 1) p <- rep(p,G)
names(p) <- NULL
if (!is.null(R) && !is.null(b)) {
Xorig <- X
porig <- p
restr <- X_restr(X = X, R = R, b = b, p = p)
X <- restr$Xstar
p <- restr$pstar
}
#### ASIGNACIONES DE DATOS A NEW ENVIRONMENT ##############
similar <- FALSE
env <- new.env()
assign("Y", Y, envir = env)
assign("X", X, envir = env)
assign("N", N, envir = env)
assign("G", G, envir = env)
assign("Tm", Tm, envir = env)
assign("p", p, envir = env)
assign("dvars", dvars, envir = env)
# CÓDIGO EJEMPLO PARA DETERMINANTE JACOBIANO
if (!(is.null(listw))) {
assign("listw", listw, envir = env)
assign("n", length(listw$neighbours), envir = env)
assign("similar", FALSE, envir = env)
assign("can.sim", can.sim, envir = env)
assign("verbose", con$trace, envir = env)
assign("family", "SAR", envir = env) # CHEQUEAR OTRAS OPCIONES
if (!(type == "sim" || type == "slx")) {
interval <- spatialreg::jacobianSetup(method, env, con,
pre_eig = con$pre_eig,
trs = trs,
interval = interval)
assign("interval", interval, envir = env)
}
}
if (any(type == c("sim","slm","sem","sarar")))
name_fit <- paste("fit_spsur", type, sep = "")
if (type == "sdm") name_fit <- "fit_spsurslm"
if (type == "sdem") name_fit <- "fit_spsursem"
if (type == "slx") name_fit <- "fit_spsursim"
if (type == "gnm") name_fit <- "fit_spsursarar"
fit <- get(name_fit)
if (con$trace) start_fit <- proc.time()[3]
# Maximize concentrate likelihood
z <- fit(env = env, con = con)
if (con$trace) {
end_fit <- proc.time()[3]
cat("Time to fit the model: ",
end_fit-start_fit," seconds\n")
}
coefficients <- z$coefficients
deltas <- z$deltas
Sigma <- z$Sigma
names_sigma <- NULL
for (i in 1:G){
new_name <- paste0("sigma",i,sep="")
names_sigma <- c(names_sigma,new_name)
}
colnames(Sigma) <- rownames(Sigma) <- names_sigma
LL <- z$LL
parameters <- length(coefficients) +
length(deltas) + G*(G + 1)/2
df.residual <- G*N*Tm - parameters
dn <- colnames(X)
if (is.null(dn)) dn <- paste0("x", 1L:(G*sum(p)), sep = "")
names(coefficients) <- dn
names_deltas <- NULL
for (i in 1:G) {
if (any(type == c("slm","sdm")))
names_deltas[i] <- paste("rho", i, sep = "_")
if (any(type == c("sem","sdem")))
names_deltas[i] <- paste("lambda", i, sep = "_")
if (any(type == c("sarar", "gnm"))) {
names_deltas[i] <- paste("rho", i, sep = "_")
names_deltas[G + i] <- paste("lambda", i, sep = "_")
}
}
names(deltas) <- names_deltas
assign("Sigma", Matrix::Matrix(z$Sigma), envir = env)
if (!(type == "sim" || type == "slx")) {
assign("deltas",Matrix::Diagonal(length(deltas),deltas),
envir = env)
}
if (con$trace) start_cov <- proc.time()[3]
fdHess <- con$fdHess
if (is.null(fdHess) || !(fdHess) ||
any(type == c("sim","slx"))) {
# ANALYTICAL VARIANCE-COVARIANCE MATRIX
if (any(type == c("sim","slx")))
name_cov_fit <- "cov_spsursim_f"
if (any(type == c("slm","sem","sarar")))
name_cov_fit <- paste("cov_spsur", type, "_f", sep = "")
if (type == "sdm") name_cov_fit <- "cov_spsurslm_f"
if (type == "sdem") name_cov_fit <- "cov_spsursem_f"
if (type == "gnm") name_cov_fit <- "cov_spsursarar_f"
cov_fit <- get(name_cov_fit)
allcov <- try( cov_fit(env = env) )
if (inherits(allcov, "try-error")) {
cat("Impossible to compute analytical covariances ","\n")
fdHess <- TRUE
} else {
fdHess <- FALSE
rest.se <- allcov$rest.se
names(rest.se) <- names(coefficients)
deltas.se <- allcov$deltas.se
names(deltas.se) <- names(deltas)
if (!is.null(allcov$LMM)) LMM <- allcov$LMM else LMM <- NULL
if (!is.null(allcov$BP)) BP <- allcov$BP else BP <- NULL
if (any(type == c("sim","slx"))) {
resvar <- allcov$vcov
colnames(resvar) <- rownames(resvar) <-
names(coefficients)
} else {
resvar <- allcov$vcov
names_var_Sigma <- NULL
for (k in 1:G) {
for (l in k:G) {
new_name <- paste0("sigma",k,l,sep="")
names_var_Sigma <- c(names_var_Sigma,new_name)
}
}
colnames(resvar) <- rownames(resvar) <-
c(names(coefficients),
names(deltas),names_var_Sigma)
## VIP: CAMBIO ORDEN MATRIZ COVARIANZAS
## IGUAL ORDEN QUE SPDEP Y SPATIALREG PACKAGES...
resvar <- resvar[c(names_var_Sigma,names(deltas),names(coefficients)),
c(names_var_Sigma,names(deltas),names(coefficients))]
}
}
}
if (fdHess) {
if (con$trace){
cat("Computing numerical covariances...","\n")
}
if (any(type == c("slm","sdm"))) name_cov_fit <- "f_sur_lag"
if (any(type == c("sem","sdem"))) name_cov_fit <- "f_sur_sem"
if (any(type == c("sarar","gnm"))) name_cov_fit <- "f_sur_sarar"
cov_fit <- get(name_cov_fit)
vardeltas <- solve(numDeriv::hessian(func = cov_fit,
x = deltas, env = env))
deltas.se <- as.vector(sqrt(diag(vardeltas)))
names(deltas.se) <- names(deltas)
IT <- Matrix::Diagonal(Tm)
IR <- Matrix::Diagonal(N)
Sigmainv <- Matrix::solve(z$Sigma)
OMEinv <- kronecker(kronecker(IT, Sigmainv), IR)
varbetas <- Matrix::solve(Matrix::crossprod(X, OMEinv %*% X))
rest.se <- sqrt(diag(as.matrix(varbetas)))
names(rest.se) <- names(coefficients)
rm(IT,IR,OMEinv)
resvar <- as.matrix(Matrix::bdiag(list(vardeltas, varbetas)))
colnames(resvar) <- c(names(deltas), names(coefficients))
rownames(resvar) <- colnames(resvar)
## Añadido por Fernando 16/03/2020:
# Incluyo BP
Sigma_corr <- stats::cov2cor(Sigma)
index_ltri <- lower.tri(Sigma_corr)
BP <- N*Tm*sum(Sigma_corr[index_ltri]^2)
# Para calcular los marginales es necesario la matriz de varianzas y covarainzas de todos cos parñametros del modelo
# Cuando se calculan la cov numéricas no de calculas las covarianzas de los sigma (solo de los deltas y los betas)
# Propongo poner un aviso que diga que para obtener los test marginales se seleccionen las cov-no-numericas
LMM <- NULL
}
if (con$trace) {
end_cov <- proc.time()[3]
cat("Time to compute covariances: ",
end_cov - start_cov," seconds \n")
}
# Compute R^2 general and for each equation
Yhat <- z$fitted.values
R2_pool <- as.numeric((cor(Y,Yhat))^2)
names(R2_pool) <- c("R2_pool")
arrYhat <- array(Yhat,c(N,G,Tm))
arrY <- array(Y,c(N,G,Tm))
R2_eq <- rep(0,G)
for (i in 1:G) {
R2_eq[i] <- cor(matrix(arrY[,i,], ncol = 1),
matrix(arrYhat[,i,], ncol = 1))^2
}
names(R2_eq) <- paste0("R2_eq", 1:G)
if (!is.null(R) && !is.null(b)) {
namesXorig <- colnames(Xorig)
coefforig <- seorig <- rep(0, ncol(Xorig))
names(coefforig) <- names(seorig) <- colnames(Xorig)
coefforig[names(coefficients)] <- coefficients
seorig[names(rest.se)] <- rest.se
b <- as.numeric(b)
for (i in 1:nrow(R)) {
lidxRi <- R[i,] != 0
widxRi <- which(lidxRi)
vidxRi <- R[i,lidxRi]
## Check if the constraint is the equality between coefficients
if ((length(widxRi) == 2) && (sum(vidxRi) == 0)
&& (b[i] == 0)) {
coefforig[widxRi[2]] <- coefforig[widxRi[1]]
seorig[widxRi[2]] <- seorig[widxRi[1]]
# Updates covariance matrix to include constrained coefficient
name1 <- names(coefforig)[widxRi[1]]
name2 <- names(coefforig)[widxRi[2]]
pr1 <- rbind(resvar, resvar[name1, ])
rownames(pr1) <- c(rownames(resvar), name2)
pr2 <- cbind(pr1, c(resvar[, name1], resvar[name1, name1]))
colnames(pr2) <- rownames(pr2)
resvar <- pr2
rm(pr1, pr2)
}
# ## Check if the constraint is individual coefficient = 0
# if ((length(widxRi) == 1) && (vidxRi == 0)&& (b[i] == 0)) {
# coefforig[widxRi] <- seorig[widxRi] <- 0
# }
}
X <- Xorig
p <- porig
coefficients <- coefforig
rest.se <- seorig
}
ret <- new_spsur(list(call = cl, type = type,
method = method, Durbin = Durbin,
G = G, N = N, Tm = Tm,
deltas = deltas, deltas.se = deltas.se,
coefficients = coefficients, rest.se = rest.se,
resvar = resvar, fdHess = fdHess,
p = p, dvars = dvars,
parameters = parameters,
LL = LL, R2 = c(R2_pool,R2_eq),
Sigma = Sigma,
BP = BP, LMM = LMM,
residuals = z$residuals, df.residual = df.residual,
fitted.values = z$fitted.values, se.fit = NULL,
Y = Y, X = X, W = W,
similar = similar, can.sim = can.sim,
zero.policy = zero.policy, listw_style = listw$style,
interval = interval,
insert = !is.null(trs)))
if (zero.policy) {
zero.regs <- attr(listw$neighbours, "region.id")[which(
spdep::card(listw$neighbours) == 0)]
if (length(zero.regs) > 0L)
attr(ret, "zero.regs") <- zero.regs
}
if(exists("na.act")) { # It could don't exist with data matrices
if (!is.null(na.act)) ret$na.action <- na.act
}
ret
}
|
\name{amn}
\alias{amn}
\alias{18.5.7}
\alias{18.5.8}
\title{matrix a on page 637}
\description{
Matrix of coefficients of the Taylor series for
\eqn{\sigma(z)}{sigma(z)} as described on page 636 and tabulated on page
637.
}
\usage{
amn(u)
}
\arguments{
\item{u}{Integer specifying size of output matrix}
}
\details{
Reproduces the coefficients \eqn{a_{mn}}{a_mn} on page 637 according to
recurrence formulae 18.5.7 and 18.5.8, p636. Used in equation
18.5.6.
}
\author{Robin K. S. Hankin}
\examples{
amn(12) #page 637
}
\keyword{math}
|
/man/amn.Rd
|
no_license
|
RobinHankin/elliptic
|
R
| false
| false
| 551
|
rd
|
\name{amn}
\alias{amn}
\alias{18.5.7}
\alias{18.5.8}
\title{matrix a on page 637}
\description{
Matrix of coefficients of the Taylor series for
\eqn{\sigma(z)}{sigma(z)} as described on page 636 and tabulated on page
637.
}
\usage{
amn(u)
}
\arguments{
\item{u}{Integer specifying size of output matrix}
}
\details{
Reproduces the coefficients \eqn{a_{mn}}{a_mn} on page 637 according to
recurrence formulae 18.5.7 and 18.5.8, p636. Used in equation
18.5.6.
}
\author{Robin K. S. Hankin}
\examples{
amn(12) #page 637
}
\keyword{math}
|
# description -------------------------------------------------------------
# TidyTuesday week 37 Formula 1
# set up -----------------------------------------------------------------
if(!require(pacman)) install.package("pacman")
devtools::install_github("davidsjoberg/ggsankey")
pacman::p_load(tidyverse,
showtext,
ggsankey,
ggalluvial)
font_add_google("")
showtext_auto()
col_pallette <- c("#C30201",
"#06CFBA",
"#07007D",
"#FF7B09",
"#FFFFFF",
"#FEB800",
"#B5323C",
"#388748",
"#5B819D")
font_col <- c("white",
"black",
"white",
"white",
"black",
"black",
"white",
"white",
"white")
# load data --------------------------------------------------------------
tuesdata <- tidytuesdayR::tt_load(2021, week = 37)
# join results, with drivers and constructors
df_results_dr_co <- tuesdata$results %>%
left_join(tuesdata$races %>% select("raceId", "year"), by = "raceId") %>%
left_join(tuesdata$drivers, by = "driverId") %>%
left_join(tuesdata$constructors, by = "constructorId")
# get the number of points by driver and constructor
df_sankey_data <- df_results_dr_co %>%
mutate(driver = paste(forename, surname)) %>%
group_by(driverId, constructorId, driver, name) %>%
rename(constructor = name) %>%
summarise(total_points = sum(points)) %>%
ungroup() %>%
group_by(driverId) %>%
filter(total_points >= 300) %>%
group_by(constructorId) %>%
mutate(total_constuctor_points = sum(total_points)) %>%
arrange(desc(total_constuctor_points), desc(total_points)) %>%
ungroup() %>%
select(driver, constructor, total_points, total_constuctor_points) %>%
pivot_longer(cols = c("driver", "constructor"), names_to = "what", values_to = "name")
fct_levels <- data.frame(constructor = levels((factor(df_sankey_data$name[df_sankey_data$what == "constructor"], ordered = TRUE, levels = unique(df_sankey_data$name[df_sankey_data$what == "constructor"]))))) %>%
mutate(position = 1:n())
df_sankey_data2 <- df_sankey_data %>%
merge(fct_levels, by.x = "name", by.y = "constructor", all.x = TRUE) %>%
arrange(desc(total_constuctor_points), desc(total_points), desc(what)) %>%
mutate(cohort = rep(1:(n()/2), each = 2)) %>%
mutate(colour = ifelse(what == "driver", col_pallette[lead(position)], "grey60")) %>%
mutate(colour2 = ifelse(what == "driver", "grey60", col_pallette[position])) %>%
mutate(font_col = ifelse(what == "driver", "white", font_col[position]))
ragg::agg_png("week37.png", width = 5, height = 5, units = "in", res = 300, scaling = )
df_sankey_data2 %>%
ggplot(aes(x = fct_rev(what), stratum = name, alluvium = cohort, y = total_points)) +
geom_alluvium(aes(fill = colour), decreasing = FALSE) +
scale_fill_identity() +
geom_stratum(aes(fill= colour2), color = "black", decreasing = FALSE, size = 0.1) +
geom_text(stat = "stratum", aes(label = name, color = font_col), decreasing = FALSE, size = 5) +
scale_color_identity() +
theme_void(base_size = 20) +
theme(legend.position = "none",
plot.background = element_rect(fill = "#353535"),
plot.title = element_text(hjust = 0.5, color = "white"),
plot.subtitle = element_text(hjust = 0.5, color = "white"),
plot.caption = element_text(hjust = 0.5, color = "white"),
text = element_text(lineheight = 0.3)) +
labs(title = "History of Formula 1 (1950 - 2021)",
subtitle = str_wrap("This alluvial graph links drivers to constructors by looking at where each driver scored their points. Only driver-constructor scores of >= 300 are included for visibility.",60),
caption = paste("\n\n #TidyTuesday week 37 | dataviz by @kayleahaynes | Source: Ergast API by the way of Data is Plural \n\n"))
invisible(dev.off())
|
/2021/week37/rscript.r
|
no_license
|
kayleahaynes/TidyTuesday
|
R
| false
| false
| 3,989
|
r
|
# description -------------------------------------------------------------
# TidyTuesday week 37 Formula 1
# set up -----------------------------------------------------------------
if(!require(pacman)) install.package("pacman")
devtools::install_github("davidsjoberg/ggsankey")
pacman::p_load(tidyverse,
showtext,
ggsankey,
ggalluvial)
font_add_google("")
showtext_auto()
col_pallette <- c("#C30201",
"#06CFBA",
"#07007D",
"#FF7B09",
"#FFFFFF",
"#FEB800",
"#B5323C",
"#388748",
"#5B819D")
font_col <- c("white",
"black",
"white",
"white",
"black",
"black",
"white",
"white",
"white")
# load data --------------------------------------------------------------
tuesdata <- tidytuesdayR::tt_load(2021, week = 37)
# join results, with drivers and constructors
df_results_dr_co <- tuesdata$results %>%
left_join(tuesdata$races %>% select("raceId", "year"), by = "raceId") %>%
left_join(tuesdata$drivers, by = "driverId") %>%
left_join(tuesdata$constructors, by = "constructorId")
# get the number of points by driver and constructor
df_sankey_data <- df_results_dr_co %>%
mutate(driver = paste(forename, surname)) %>%
group_by(driverId, constructorId, driver, name) %>%
rename(constructor = name) %>%
summarise(total_points = sum(points)) %>%
ungroup() %>%
group_by(driverId) %>%
filter(total_points >= 300) %>%
group_by(constructorId) %>%
mutate(total_constuctor_points = sum(total_points)) %>%
arrange(desc(total_constuctor_points), desc(total_points)) %>%
ungroup() %>%
select(driver, constructor, total_points, total_constuctor_points) %>%
pivot_longer(cols = c("driver", "constructor"), names_to = "what", values_to = "name")
fct_levels <- data.frame(constructor = levels((factor(df_sankey_data$name[df_sankey_data$what == "constructor"], ordered = TRUE, levels = unique(df_sankey_data$name[df_sankey_data$what == "constructor"]))))) %>%
mutate(position = 1:n())
df_sankey_data2 <- df_sankey_data %>%
merge(fct_levels, by.x = "name", by.y = "constructor", all.x = TRUE) %>%
arrange(desc(total_constuctor_points), desc(total_points), desc(what)) %>%
mutate(cohort = rep(1:(n()/2), each = 2)) %>%
mutate(colour = ifelse(what == "driver", col_pallette[lead(position)], "grey60")) %>%
mutate(colour2 = ifelse(what == "driver", "grey60", col_pallette[position])) %>%
mutate(font_col = ifelse(what == "driver", "white", font_col[position]))
ragg::agg_png("week37.png", width = 5, height = 5, units = "in", res = 300, scaling = )
df_sankey_data2 %>%
ggplot(aes(x = fct_rev(what), stratum = name, alluvium = cohort, y = total_points)) +
geom_alluvium(aes(fill = colour), decreasing = FALSE) +
scale_fill_identity() +
geom_stratum(aes(fill= colour2), color = "black", decreasing = FALSE, size = 0.1) +
geom_text(stat = "stratum", aes(label = name, color = font_col), decreasing = FALSE, size = 5) +
scale_color_identity() +
theme_void(base_size = 20) +
theme(legend.position = "none",
plot.background = element_rect(fill = "#353535"),
plot.title = element_text(hjust = 0.5, color = "white"),
plot.subtitle = element_text(hjust = 0.5, color = "white"),
plot.caption = element_text(hjust = 0.5, color = "white"),
text = element_text(lineheight = 0.3)) +
labs(title = "History of Formula 1 (1950 - 2021)",
subtitle = str_wrap("This alluvial graph links drivers to constructors by looking at where each driver scored their points. Only driver-constructor scores of >= 300 are included for visibility.",60),
caption = paste("\n\n #TidyTuesday week 37 | dataviz by @kayleahaynes | Source: Ergast API by the way of Data is Plural \n\n"))
invisible(dev.off())
|
## CDS.R test case for Tokyo Electric Power Co. Inc.
library(CDS)
## truth1 <- data.frame(TDate = as.Date("2014-04-15"),
## maturity = "5Y",
## contract ="STEC",
## parSpread = round(250.00, digits=2),
## upfront = round(701502, digits=-4),
## IRDV01 = round(-184.69, digits=0),
## price = 92.91,
## principal = round(709002, digits=-3),
## RecRisk01 = round(-1061.74, digits=-3),
## defaultExpo = round(5790998, digits=-3),
## spreadDV01 = round(4448.92, digits=0),
## currency = "JPY",
## ptsUpfront = round(0.0709, digits=2),
## freqCDS = "Q",
## pencouponDate = as.Date("2019-03-20"),
## backstopDate = as.Date("2014-02-14"),
## coupon = 100,
## recoveryRate = 0.35,
## defaultProb = round(0.1830, digits=2),
## notional = 1e7)
## save(truth1, file = "CDS.TokyoElectricPower.test.RData")
load("CDS.TokyoElectricPower.test.RData")
result1 <- CDS(TDate = "2014-04-15",
maturity = "5Y",
contract ="STEC",
parSpread = 250,
currency = "JPY",
coupon = 100,
recoveryRate = 0.35,
isPriceClean = FALSE,
notional = 1e7)
stopifnot(all.equal(truth1, CDSdf(result1)))
|
/pkg/tests/CDS.TokyoElectricPower.test.R
|
no_license
|
bdivet/CDS
|
R
| false
| false
| 1,563
|
r
|
## CDS.R test case for Tokyo Electric Power Co. Inc.
library(CDS)
## truth1 <- data.frame(TDate = as.Date("2014-04-15"),
## maturity = "5Y",
## contract ="STEC",
## parSpread = round(250.00, digits=2),
## upfront = round(701502, digits=-4),
## IRDV01 = round(-184.69, digits=0),
## price = 92.91,
## principal = round(709002, digits=-3),
## RecRisk01 = round(-1061.74, digits=-3),
## defaultExpo = round(5790998, digits=-3),
## spreadDV01 = round(4448.92, digits=0),
## currency = "JPY",
## ptsUpfront = round(0.0709, digits=2),
## freqCDS = "Q",
## pencouponDate = as.Date("2019-03-20"),
## backstopDate = as.Date("2014-02-14"),
## coupon = 100,
## recoveryRate = 0.35,
## defaultProb = round(0.1830, digits=2),
## notional = 1e7)
## save(truth1, file = "CDS.TokyoElectricPower.test.RData")
load("CDS.TokyoElectricPower.test.RData")
result1 <- CDS(TDate = "2014-04-15",
maturity = "5Y",
contract ="STEC",
parSpread = 250,
currency = "JPY",
coupon = 100,
recoveryRate = 0.35,
isPriceClean = FALSE,
notional = 1e7)
stopifnot(all.equal(truth1, CDSdf(result1)))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CentSim2D.R
\name{Idom.numCSup.bnd.tri}
\alias{Idom.numCSup.bnd.tri}
\title{Indicator for an upper bound for the domination number of Central Similarity Proximity Catch Digraph
(CS-PCD) by the exact algorithm - one triangle case}
\usage{
Idom.numCSup.bnd.tri(Xp, k, tri, t, M = c(1, 1, 1))
}
\arguments{
\item{Xp}{A set of 2D points which constitute the vertices of CS-PCD.}
\item{k}{A positive integer to be tested for an upper bound for the domination number of CS-PCDs.}
\item{tri}{A \eqn{3 \times 2} matrix with each row representing a vertex of the triangle.}
\item{t}{A positive real number which serves as the expansion parameter in CS proximity region in the
triangle \code{tri}.}
\item{M}{A 2D point in Cartesian coordinates or a 3D point in barycentric coordinates which serves as a
center in the interior of the triangle \code{tri}; default is \eqn{M=(1,1,1)}, i.e.
the center of mass of \code{tri}.}
}
\value{
A \code{list} with two elements
\item{domUB}{The upper bound \code{k} (to be checked) for the domination number of CS-PCD. It is prespecified
as \code{k} in the function arguments.}
\item{Idom.num.up.bnd}{The indicator for the upper bound for domination number of CS-PCD being the
specified value \code{k} or not. It returns 1 if the upper bound is \code{k}, and 0 otherwise.}
\item{ind.domset}{The vertices (i.e., data points) in the dominating set of size \code{k} if it exists,
otherwise it is \code{NULL}.}
}
\description{
Returns \eqn{I(}domination number of CS-PCD is less than or equal to \code{k}\eqn{)} where the vertices of the CS-PCD are the data points \code{Xp},
that is, returns 1 if the domination number of CS-PCD is less than the prespecified value \code{k}, returns 0
otherwise. It also provides the vertices (i.e., data points) in a dominating set of size \code{k} of CS-PCD.
CS proximity region is constructed with respect to the triangle \code{tri}\eqn{=T(A,B,C)} with
expansion parameter \eqn{t>0} and edge regions are based on the center \eqn{M=(m_1,m_2)}
in Cartesian coordinates or \eqn{M=(\alpha,\beta,\gamma)} in barycentric coordinates in the interior of \code{tri};
default is \eqn{M=(1,1,1)} i.e., the center of mass of \code{tri}.
Edges of \code{tri}, \eqn{AB}, \eqn{BC}, \eqn{AC}, are also labeled as 3, 1, and 2, respectively.
Loops are allowed in the digraph.
See also (\insertCite{ceyhan:mcap2012;textual}{pcds}).
Caveat: It takes a long time for large number of vertices (i.e., large number of row numbers).
}
\examples{
\dontrun{
A<-c(1,1); B<-c(2,0); C<-c(1.5,2);
Tr<-rbind(A,B,C);
n<-10
set.seed(1)
Xp<-runif.tri(n,Tr)$gen.points
M<-as.numeric(runif.tri(1,Tr)$g) #try also M<-c(1.6,1.0)
t<-.5
Idom.numCSup.bnd.tri(Xp,1,Tr,t,M)
for (k in 1:n)
print(c(k,Idom.numCSup.bnd.tri(Xp,k,Tr,t,M)))
}
}
\references{
\insertAllCited{}
}
\seealso{
\code{\link{Idom.numCSup.bnd.std.tri}}, \code{\link{Idom.num.up.bnd}}, \code{\link{Idom.numASup.bnd.tri}},
and \code{\link{dom.num.exact}}
}
\author{
Elvan Ceyhan
}
|
/man/Idom.numCSup.bnd.tri.Rd
|
no_license
|
elvanceyhan/pcds
|
R
| false
| true
| 3,056
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CentSim2D.R
\name{Idom.numCSup.bnd.tri}
\alias{Idom.numCSup.bnd.tri}
\title{Indicator for an upper bound for the domination number of Central Similarity Proximity Catch Digraph
(CS-PCD) by the exact algorithm - one triangle case}
\usage{
Idom.numCSup.bnd.tri(Xp, k, tri, t, M = c(1, 1, 1))
}
\arguments{
\item{Xp}{A set of 2D points which constitute the vertices of CS-PCD.}
\item{k}{A positive integer to be tested for an upper bound for the domination number of CS-PCDs.}
\item{tri}{A \eqn{3 \times 2} matrix with each row representing a vertex of the triangle.}
\item{t}{A positive real number which serves as the expansion parameter in CS proximity region in the
triangle \code{tri}.}
\item{M}{A 2D point in Cartesian coordinates or a 3D point in barycentric coordinates which serves as a
center in the interior of the triangle \code{tri}; default is \eqn{M=(1,1,1)}, i.e.
the center of mass of \code{tri}.}
}
\value{
A \code{list} with two elements
\item{domUB}{The upper bound \code{k} (to be checked) for the domination number of CS-PCD. It is prespecified
as \code{k} in the function arguments.}
\item{Idom.num.up.bnd}{The indicator for the upper bound for domination number of CS-PCD being the
specified value \code{k} or not. It returns 1 if the upper bound is \code{k}, and 0 otherwise.}
\item{ind.domset}{The vertices (i.e., data points) in the dominating set of size \code{k} if it exists,
otherwise it is \code{NULL}.}
}
\description{
Returns \eqn{I(}domination number of CS-PCD is less than or equal to \code{k}\eqn{)} where the vertices of the CS-PCD are the data points \code{Xp},
that is, returns 1 if the domination number of CS-PCD is less than the prespecified value \code{k}, returns 0
otherwise. It also provides the vertices (i.e., data points) in a dominating set of size \code{k} of CS-PCD.
CS proximity region is constructed with respect to the triangle \code{tri}\eqn{=T(A,B,C)} with
expansion parameter \eqn{t>0} and edge regions are based on the center \eqn{M=(m_1,m_2)}
in Cartesian coordinates or \eqn{M=(\alpha,\beta,\gamma)} in barycentric coordinates in the interior of \code{tri};
default is \eqn{M=(1,1,1)} i.e., the center of mass of \code{tri}.
Edges of \code{tri}, \eqn{AB}, \eqn{BC}, \eqn{AC}, are also labeled as 3, 1, and 2, respectively.
Loops are allowed in the digraph.
See also (\insertCite{ceyhan:mcap2012;textual}{pcds}).
Caveat: It takes a long time for large number of vertices (i.e., large number of row numbers).
}
\examples{
\dontrun{
A<-c(1,1); B<-c(2,0); C<-c(1.5,2);
Tr<-rbind(A,B,C);
n<-10
set.seed(1)
Xp<-runif.tri(n,Tr)$gen.points
M<-as.numeric(runif.tri(1,Tr)$g) #try also M<-c(1.6,1.0)
t<-.5
Idom.numCSup.bnd.tri(Xp,1,Tr,t,M)
for (k in 1:n)
print(c(k,Idom.numCSup.bnd.tri(Xp,k,Tr,t,M)))
}
}
\references{
\insertAllCited{}
}
\seealso{
\code{\link{Idom.numCSup.bnd.std.tri}}, \code{\link{Idom.num.up.bnd}}, \code{\link{Idom.numASup.bnd.tri}},
and \code{\link{dom.num.exact}}
}
\author{
Elvan Ceyhan
}
|
library(glmnet)
mydata = read.table("./TrainingSet/AvgRank/endometrium.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.45,family="gaussian",standardize=FALSE)
sink('./Model/EN/AvgRank/endometrium/endometrium_056.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/AvgRank/endometrium/endometrium_056.R
|
no_license
|
leon1003/QSMART
|
R
| false
| false
| 368
|
r
|
library(glmnet)
mydata = read.table("./TrainingSet/AvgRank/endometrium.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.45,family="gaussian",standardize=FALSE)
sink('./Model/EN/AvgRank/endometrium/endometrium_056.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
## Loading required libraries
library(dplyr)
## Loading data into R session
if (!file.exists("./data"))
{
dir.create("./data")
}
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "./data/exdata_household_power_consumption.zip")
dataset <- read.csv(unz("./data/exdata_household_power_consumption.zip", "household_power_consumption.txt"), sep = ';', header = TRUE, quote = "", na.strings = "?", stringsAsFactors = FALSE)
dataset <- tbl_df(dataset)
dataset <- dataset %>% mutate(Date = as.Date(dataset$Date, format = "%d/%m/%Y")) %>% filter(Date >= '2007-2-1' & Date <= '2007-2-2') %>% mutate(DateTime = as.POSIXct(strptime(paste(Date, Time, sep = " "), format = "%Y-%m-%d %H:%M:%S")))
## Histogram of Global Active Power
png(file = "./data/plot1.png", width = 480, height = 480)
hist(dataset$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red")
dev.off()
|
/plot1.R
|
no_license
|
DmitryBaranov1986/ExData_Plotting1
|
R
| false
| false
| 991
|
r
|
## Loading required libraries
library(dplyr)
## Loading data into R session
if (!file.exists("./data"))
{
dir.create("./data")
}
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "./data/exdata_household_power_consumption.zip")
dataset <- read.csv(unz("./data/exdata_household_power_consumption.zip", "household_power_consumption.txt"), sep = ';', header = TRUE, quote = "", na.strings = "?", stringsAsFactors = FALSE)
dataset <- tbl_df(dataset)
dataset <- dataset %>% mutate(Date = as.Date(dataset$Date, format = "%d/%m/%Y")) %>% filter(Date >= '2007-2-1' & Date <= '2007-2-2') %>% mutate(DateTime = as.POSIXct(strptime(paste(Date, Time, sep = " "), format = "%Y-%m-%d %H:%M:%S")))
## Histogram of Global Active Power
png(file = "./data/plot1.png", width = 480, height = 480)
hist(dataset$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red")
dev.off()
|
#' @title flatten_dimension_all
#'
#' @param data multi-dimensional data to completely flattened/reduced to a single dimension
#'
#' @return data that is flattened to a single dimension
#' @export
#' @import dplyr tidyr purrr furrr
#' @examples
#' # to be added
flatten_dimension_all <- function(data) {
plan(multiprocess)
data_dim <- data %>% dim()
if (is.null(data_dim)) {
n_dim <- 1
} else {
n_dim <- length(data_dim)
}
df <- flatten_dimension(data)
while(n_dim > 1) {
df <- df %>%
mutate(data = future_map(data, flatten_dimension)) %>%
unnest()
data_dim <- df$data[[1]] %>% dim()
if (is.null(data_dim)) {
n_dim <- 1
} else {
n_dim <- length(data_dim)
}
}
df <- df %>%
mutate(data = future_map(data, flatten_dimension)) %>%
unnest() %>%
unnest()
return(df)
}
|
/R/flatten_dimension_all.R
|
permissive
|
epongpipat/eepR
|
R
| false
| false
| 854
|
r
|
#' @title flatten_dimension_all
#'
#' @param data multi-dimensional data to completely flattened/reduced to a single dimension
#'
#' @return data that is flattened to a single dimension
#' @export
#' @import dplyr tidyr purrr furrr
#' @examples
#' # to be added
flatten_dimension_all <- function(data) {
plan(multiprocess)
data_dim <- data %>% dim()
if (is.null(data_dim)) {
n_dim <- 1
} else {
n_dim <- length(data_dim)
}
df <- flatten_dimension(data)
while(n_dim > 1) {
df <- df %>%
mutate(data = future_map(data, flatten_dimension)) %>%
unnest()
data_dim <- df$data[[1]] %>% dim()
if (is.null(data_dim)) {
n_dim <- 1
} else {
n_dim <- length(data_dim)
}
}
df <- df %>%
mutate(data = future_map(data, flatten_dimension)) %>%
unnest() %>%
unnest()
return(df)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/diff.R
\name{deletions}
\alias{deletions}
\title{Compute a patch of deletions on a recursive object.}
\usage{
deletions(old_object, new_object)
}
\arguments{
\item{old_object}{ANY. The "before" object.}
\item{new_object}{ANY. The "new" object. These are usually a data.frame or
an environment.}
}
\description{
Compute a patch of deletions on a recursive object.
}
|
/man/deletions.Rd
|
permissive
|
kirillseva/objectdiff
|
R
| false
| false
| 453
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/diff.R
\name{deletions}
\alias{deletions}
\title{Compute a patch of deletions on a recursive object.}
\usage{
deletions(old_object, new_object)
}
\arguments{
\item{old_object}{ANY. The "before" object.}
\item{new_object}{ANY. The "new" object. These are usually a data.frame or
an environment.}
}
\description{
Compute a patch of deletions on a recursive object.
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 53466
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 53466
c
c Input Parameter (command line, file):
c input filename QBFLIB/Tentrup/mult-matrix/mult_bool_matrix_10_14_9.unsat.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 18142
c no.of clauses 53466
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 53466
c
c QBFLIB/Tentrup/mult-matrix/mult_bool_matrix_10_14_9.unsat.qdimacs 18142 53466 E1 [] 0 90 18052 53466 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Tentrup/mult-matrix/mult_bool_matrix_10_14_9.unsat/mult_bool_matrix_10_14_9.unsat.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 670
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 53466
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 53466
c
c Input Parameter (command line, file):
c input filename QBFLIB/Tentrup/mult-matrix/mult_bool_matrix_10_14_9.unsat.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 18142
c no.of clauses 53466
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 53466
c
c QBFLIB/Tentrup/mult-matrix/mult_bool_matrix_10_14_9.unsat.qdimacs 18142 53466 E1 [] 0 90 18052 53466 NONE
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/UserIdOnly.r
\name{UserIdOnly}
\alias{UserIdOnly}
\title{UserIdOnly Class}
\description{
UserIdOnly Class
UserIdOnly Class
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{id}}{}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{UserIdOnly$new()}}
\item \href{#method-toJSON}{\code{UserIdOnly$toJSON()}}
\item \href{#method-fromJSON}{\code{UserIdOnly$fromJSON()}}
\item \href{#method-toJSONString}{\code{UserIdOnly$toJSONString()}}
\item \href{#method-fromJSONString}{\code{UserIdOnly$fromJSONString()}}
\item \href{#method-clone}{\code{UserIdOnly$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{UserIdOnly$new(id)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-toJSON"></a>}}
\if{latex}{\out{\hypertarget{method-toJSON}{}}}
\subsection{Method \code{toJSON()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{UserIdOnly$toJSON()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-fromJSON"></a>}}
\if{latex}{\out{\hypertarget{method-fromJSON}{}}}
\subsection{Method \code{fromJSON()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{UserIdOnly$fromJSON(UserIdOnlyJson)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-toJSONString"></a>}}
\if{latex}{\out{\hypertarget{method-toJSONString}{}}}
\subsection{Method \code{toJSONString()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{UserIdOnly$toJSONString()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-fromJSONString"></a>}}
\if{latex}{\out{\hypertarget{method-fromJSONString}{}}}
\subsection{Method \code{fromJSONString()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{UserIdOnly$fromJSONString(UserIdOnlyJson)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{UserIdOnly$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
/man/UserIdOnly.Rd
|
permissive
|
grepinsight/lookr
|
R
| false
| true
| 2,650
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/UserIdOnly.r
\name{UserIdOnly}
\alias{UserIdOnly}
\title{UserIdOnly Class}
\description{
UserIdOnly Class
UserIdOnly Class
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{id}}{}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{UserIdOnly$new()}}
\item \href{#method-toJSON}{\code{UserIdOnly$toJSON()}}
\item \href{#method-fromJSON}{\code{UserIdOnly$fromJSON()}}
\item \href{#method-toJSONString}{\code{UserIdOnly$toJSONString()}}
\item \href{#method-fromJSONString}{\code{UserIdOnly$fromJSONString()}}
\item \href{#method-clone}{\code{UserIdOnly$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{UserIdOnly$new(id)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-toJSON"></a>}}
\if{latex}{\out{\hypertarget{method-toJSON}{}}}
\subsection{Method \code{toJSON()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{UserIdOnly$toJSON()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-fromJSON"></a>}}
\if{latex}{\out{\hypertarget{method-fromJSON}{}}}
\subsection{Method \code{fromJSON()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{UserIdOnly$fromJSON(UserIdOnlyJson)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-toJSONString"></a>}}
\if{latex}{\out{\hypertarget{method-toJSONString}{}}}
\subsection{Method \code{toJSONString()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{UserIdOnly$toJSONString()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-fromJSONString"></a>}}
\if{latex}{\out{\hypertarget{method-fromJSONString}{}}}
\subsection{Method \code{fromJSONString()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{UserIdOnly$fromJSONString(UserIdOnlyJson)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{UserIdOnly$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
#' Geographical detectors: a one-step function.
#'
#' @description A one-step function for optimal discretization and geographical detectors for
#' multiple variables and visualization.
#'
#' @usage gdm(formula, continuous_variable = NULL, data = NULL, discmethod, discitv)
#' \method{print}{gdm}(x, ...)
#' \method{plot}{gdm}(x, ...)
#'
#' @aliases gdm print.gdm plot.gdm
#'
#' @param formula A formula of response and explanatory variables
#' @param continuous_variable A vector of continuous variable names
#' @param data A data.frame includes response and explanatory variables
#' @param discmethod A character vector of discretization methods
#' @param discitv A numeric vector of numbers of intervals
#' @param x A list of \code{gdm} result
#' @param ... Ignore
#'
#' @importFrom grid grid.newpage pushViewport viewport grid.layout
#'
#' @examples
#' ###############
#' ## NDVI: ndvi_40
#' ###############
#' ## set optional parameters of optimal discretization
#' ## optional methods: equal, natural, quantile, geometric, sd and manual
#' discmethod <- c("equal","quantile")
#' discitv <- c(4:5)
#' ## "gdm" function
#' ndvigdm <- gdm(NDVIchange ~ Climatezone + Mining + Tempchange,
#' continuous_variable = c("Tempchange"),
#' data = ndvi_40,
#' discmethod = discmethod, discitv = discitv)
#' ndvigdm
#' plot(ndvigdm)
#' \dontrun{
#' #############
#' ## H1N1: h1n1_100
#' #############
#' ## set optional parameters of optimal discretization
#' discmethod <- c("equal","natural","quantile")
#' discitv <- c(4:6)
#' continuous_variable <- colnames(h1n1_100)[-c(1,11)]
#' ## "gdm" function
#' h1n1gdm <- gdm(H1N1 ~ .,
#' continuous_variable = continuous_variable,
#' data = h1n1_100,
#' discmethod = discmethod, discitv = discitv)
#' h1n1gdm
#' plot(h1n1gdm)
#' }
#'
#' @export
gdm <- function(formula, continuous_variable = NULL, data = NULL, discmethod, discitv){
formula <- as.formula(formula)
formula.vars <- all.vars(formula)
response <- data[, formula.vars[1], drop = FALSE]
if (formula.vars[2] == "."){
explanatory <- data[, !(colnames(data) %in% formula.vars[1]), drop = FALSE]
} else {
explanatory <- data[, formula.vars[-1], drop = FALSE]
}
### result of optimal discretization
if (!is.null(continuous_variable)){
explanatory_continuous <- data[,match(continuous_variable, colnames(data)),drop=FALSE]
n.continuous <- ncol(explanatory_continuous)
data.ctn <- cbind(y = response[,1,drop=TRUE], explanatory_continuous)
# debug: use new optidisc function and lapply
odc1 <- optidisc(y ~ ., data.ctn, discmethod, discitv)
explanatory_stra <- explanatory_continuous
for (j in 1:n.continuous){
breakj <- unique(odc1[[j]]$itv)
explanatory_stra[,j] <- cut(explanatory_continuous[, j, drop = TRUE], breakj, include.lowest = TRUE)
}
explanatory[, match(continuous_variable, colnames(explanatory))] <- explanatory_stra
}
newdata <- cbind(response, explanatory)
### geographical detectors
### factor detectors
gd1 <- gd(formula, newdata)
### risk detectors
gdrm1 <- riskmean(formula, newdata)
gdr1 <- gdrisk(formula, newdata)
if (ncol(explanatory) == 1){
### interaction and ecological detectors
cat("Factor and risk detectors are computed.
At least two explanatory variables are required for computing
interaction and ecological detectors.\n")
gdi1 <- c()
gde1 <- c()
} else {
### interaction detectors
gdi1 <- gdinteract(formula, newdata)
### ecological detectors
gde1 <- gdeco(formula, newdata)
}
### output
if (is.null(continuous_variable)){
result <- list("Factor.detector" = gd1,"Risk.mean" = gdrm1,"Risk.detector" = gdr1,
"Interaction.detector" = gdi1,"Ecological.detector" = gde1)
} else {
result <- list("Discretization" = odc1,"Factor.detector" = gd1,
"Risk.mean" = gdrm1,"Risk.detector" = gdr1,
"Interaction.detector" = gdi1,"Ecological.detector" = gde1)
}
## define class
class(result) <- "gdm"
result
}
print.gdm <- function(x, ...){
### print optimal discretization
if (length(x$Discretization)==0){
cat("Explanatory variables are categorical variables.\n\n")
} else {
cat("Explanatory variables include", length(x$Discretization), "continuous variables.\n\n")
print(x$Discretization)
}
### print geographical detectors
cat("Geographical detectors results:\n")
cat("\nFactor detector:\n")
print(x$Factor.detector)
cat("\nRisk detector:\n")
print(x$Risk.mean)
print(x$Risk.detector)
if (length(x$Interaction.detector) > 0){
print(x$Interaction.detector)
cat("\n")
print(x$Ecological.detector)
}
invisible(x)
}
plot.gdm <- function(x, ...){
### plot optimal discretization
lrd <- length(x$Discretization)
if (lrd == 0){
cat("\n\nall explanatory variables are categorical variables ...\n\n")
} else {
plot(x$Discretization)
}
### plot geographical detectors
cat("plot factor detectors ...\n\n")
plot(x$Factor.detector)
cat("plot risk mean values ...\n\n")
plot(x$Risk.mean)
cat("plot risk detectors ...\n\n")
plot(x$Risk.detector)
if (length(x$Interaction.detector) > 0){
cat("plot interaction detectors ...\n\n")
plot(x$Interaction.detector)
cat("plot ecological detectors ...\n")
plot(x$Ecological.detector)
}
}
|
/R/gdm.R
|
no_license
|
cran/GD
|
R
| false
| false
| 5,453
|
r
|
#' Geographical detectors: a one-step function.
#'
#' @description A one-step function for optimal discretization and geographical detectors for
#' multiple variables and visualization.
#'
#' @usage gdm(formula, continuous_variable = NULL, data = NULL, discmethod, discitv)
#' \method{print}{gdm}(x, ...)
#' \method{plot}{gdm}(x, ...)
#'
#' @aliases gdm print.gdm plot.gdm
#'
#' @param formula A formula of response and explanatory variables
#' @param continuous_variable A vector of continuous variable names
#' @param data A data.frame includes response and explanatory variables
#' @param discmethod A character vector of discretization methods
#' @param discitv A numeric vector of numbers of intervals
#' @param x A list of \code{gdm} result
#' @param ... Ignore
#'
#' @importFrom grid grid.newpage pushViewport viewport grid.layout
#'
#' @examples
#' ###############
#' ## NDVI: ndvi_40
#' ###############
#' ## set optional parameters of optimal discretization
#' ## optional methods: equal, natural, quantile, geometric, sd and manual
#' discmethod <- c("equal","quantile")
#' discitv <- c(4:5)
#' ## "gdm" function
#' ndvigdm <- gdm(NDVIchange ~ Climatezone + Mining + Tempchange,
#' continuous_variable = c("Tempchange"),
#' data = ndvi_40,
#' discmethod = discmethod, discitv = discitv)
#' ndvigdm
#' plot(ndvigdm)
#' \dontrun{
#' #############
#' ## H1N1: h1n1_100
#' #############
#' ## set optional parameters of optimal discretization
#' discmethod <- c("equal","natural","quantile")
#' discitv <- c(4:6)
#' continuous_variable <- colnames(h1n1_100)[-c(1,11)]
#' ## "gdm" function
#' h1n1gdm <- gdm(H1N1 ~ .,
#' continuous_variable = continuous_variable,
#' data = h1n1_100,
#' discmethod = discmethod, discitv = discitv)
#' h1n1gdm
#' plot(h1n1gdm)
#' }
#'
#' @export
gdm <- function(formula, continuous_variable = NULL, data = NULL, discmethod, discitv){
formula <- as.formula(formula)
formula.vars <- all.vars(formula)
response <- data[, formula.vars[1], drop = FALSE]
if (formula.vars[2] == "."){
explanatory <- data[, !(colnames(data) %in% formula.vars[1]), drop = FALSE]
} else {
explanatory <- data[, formula.vars[-1], drop = FALSE]
}
### result of optimal discretization
if (!is.null(continuous_variable)){
explanatory_continuous <- data[,match(continuous_variable, colnames(data)),drop=FALSE]
n.continuous <- ncol(explanatory_continuous)
data.ctn <- cbind(y = response[,1,drop=TRUE], explanatory_continuous)
# debug: use new optidisc function and lapply
odc1 <- optidisc(y ~ ., data.ctn, discmethod, discitv)
explanatory_stra <- explanatory_continuous
for (j in 1:n.continuous){
breakj <- unique(odc1[[j]]$itv)
explanatory_stra[,j] <- cut(explanatory_continuous[, j, drop = TRUE], breakj, include.lowest = TRUE)
}
explanatory[, match(continuous_variable, colnames(explanatory))] <- explanatory_stra
}
newdata <- cbind(response, explanatory)
### geographical detectors
### factor detectors
gd1 <- gd(formula, newdata)
### risk detectors
gdrm1 <- riskmean(formula, newdata)
gdr1 <- gdrisk(formula, newdata)
if (ncol(explanatory) == 1){
### interaction and ecological detectors
cat("Factor and risk detectors are computed.
At least two explanatory variables are required for computing
interaction and ecological detectors.\n")
gdi1 <- c()
gde1 <- c()
} else {
### interaction detectors
gdi1 <- gdinteract(formula, newdata)
### ecological detectors
gde1 <- gdeco(formula, newdata)
}
### output
if (is.null(continuous_variable)){
result <- list("Factor.detector" = gd1,"Risk.mean" = gdrm1,"Risk.detector" = gdr1,
"Interaction.detector" = gdi1,"Ecological.detector" = gde1)
} else {
result <- list("Discretization" = odc1,"Factor.detector" = gd1,
"Risk.mean" = gdrm1,"Risk.detector" = gdr1,
"Interaction.detector" = gdi1,"Ecological.detector" = gde1)
}
## define class
class(result) <- "gdm"
result
}
print.gdm <- function(x, ...){
### print optimal discretization
if (length(x$Discretization)==0){
cat("Explanatory variables are categorical variables.\n\n")
} else {
cat("Explanatory variables include", length(x$Discretization), "continuous variables.\n\n")
print(x$Discretization)
}
### print geographical detectors
cat("Geographical detectors results:\n")
cat("\nFactor detector:\n")
print(x$Factor.detector)
cat("\nRisk detector:\n")
print(x$Risk.mean)
print(x$Risk.detector)
if (length(x$Interaction.detector) > 0){
print(x$Interaction.detector)
cat("\n")
print(x$Ecological.detector)
}
invisible(x)
}
plot.gdm <- function(x, ...){
### plot optimal discretization
lrd <- length(x$Discretization)
if (lrd == 0){
cat("\n\nall explanatory variables are categorical variables ...\n\n")
} else {
plot(x$Discretization)
}
### plot geographical detectors
cat("plot factor detectors ...\n\n")
plot(x$Factor.detector)
cat("plot risk mean values ...\n\n")
plot(x$Risk.mean)
cat("plot risk detectors ...\n\n")
plot(x$Risk.detector)
if (length(x$Interaction.detector) > 0){
cat("plot interaction detectors ...\n\n")
plot(x$Interaction.detector)
cat("plot ecological detectors ...\n")
plot(x$Ecological.detector)
}
}
|
setMethod("abs",
signature(x = "db.Rquery"),
function (x)
{
stop("need a definition for the method here")
}
)
|
/rwrapper/abs_db.Rquery.R
|
no_license
|
walkingsparrow/tests
|
R
| false
| false
| 135
|
r
|
setMethod("abs",
signature(x = "db.Rquery"),
function (x)
{
stop("need a definition for the method here")
}
)
|
file_read <- function (fileName, ...) {
read.csv(fileName, header = TRUE, stringsAsFactors = FALSE, ...)
}
summarise_data <- function (data) {
rx <- range(data$lnGdpPercap)
subset(data, lnGdpPercap %in% rx)
}
makeFigure <- function (data, lines_data) {
ggplot() +
geom_point(data = data, aes(x = lnGdpPercap, y = lnLifeExp, color = continent), alpha = 0.3) +
geom_line(data = lines_data, aes(x = lnGdpPercap, y = predicted, color = continent)) +
labs(title = 'Whole time series', x = 'log(gdpPercap)', y = 'log(lifeExp)') +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5))
}
|
/R/functions.R
|
no_license
|
dbarneche/gapminder
|
R
| false
| false
| 652
|
r
|
file_read <- function (fileName, ...) {
read.csv(fileName, header = TRUE, stringsAsFactors = FALSE, ...)
}
summarise_data <- function (data) {
rx <- range(data$lnGdpPercap)
subset(data, lnGdpPercap %in% rx)
}
makeFigure <- function (data, lines_data) {
ggplot() +
geom_point(data = data, aes(x = lnGdpPercap, y = lnLifeExp, color = continent), alpha = 0.3) +
geom_line(data = lines_data, aes(x = lnGdpPercap, y = predicted, color = continent)) +
labs(title = 'Whole time series', x = 'log(gdpPercap)', y = 'log(lifeExp)') +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5))
}
|
source("utils.R")
source("webapi.R")
# The display option 'Show Raster Overlay' can be enabled by setting the
# enviornment variable "ENABLE_RASTER_OVERLAY" to 'true'
enableRasterOverlay <- as.logical(Sys.getenv("ENABLE_RASTER_OVERLAY", unset = FALSE))
#' Adds an on click listner to a specified layer and
#' triggers an input message if the layer is clicked.
#'
#' @param map the \code{leaflet} or \code{leafletProxy} object
#' @param category the category the on click listner should be applied to, e.g. shape
#' @param layerId the layer the on click listner should be attached to
#' @param inputId the id of the input triggered on click
addOnClickListner <- function(map, category, layerId, inputId) {
flog.debug(
"addOnClickListner: category = %s, layerId = %s, inputId = %s",
category,
layerId,
inputId,
data = list()
)
# invokes the addOnClickListner JavaScript method defined in www/message-handler.js
leaflet::invokeMethod(map,
data,
"addOnClickListner",
category,
layerId,
inputId)
}
#' Wraps an element in a simple \code{div} inline-block.
#'
#' @param element the element that should be wrapped in the inline block
#' @param width the width of the div element
#' @param class the class of the div element
inlineBlock <- function(element, width = NULL, class = NULL) {
div(
style = "display: inline-block;",
style = if (!is.null(width))
paste0("width: ", validateCssUnit(width), ";"),
class = if (!is.null(class)) class,
element
)
}
#' An \code{actionButton} displayed inline with an other input widget, e.g. an \code{textInput}.
#'
#' @param inputId the \code{inputId} of the \code{actionButton}
#' @param label the label
#' @param icon the icon
#' @param width the width of the \code{actionButton}
#'
#' @seealso shiny::actionButton
#' @note http://stackoverflow.com/questions/20637248/shiny-4-small-textinput-boxes-side-by-side/21132918#21132918
inlineActionButton <-
function(inputId,
label,
icon = NULL,
width = NULL,
...) {
div(
# A hack to make shure, that the button has the same position as an text input placed side by side
# and to avoid the differen handling of 'vertical-align: middle' in firefox and chrome
# TODO: find a better solution
class = "inline-action-button-container",
style = "display:inline-block",
actionButton(
inputId = inputId,
label = label,
icon = icon,
width = width,
...
))
}
#' An timeInput widget to be placed inline.
#' @param inputId the input id
#' @param label the label
#' @param value the initial value
#' @param seconds display a input field for seconds?
#' @param class the html class to be applied on the element
#' @param width the width of the input filed, or NULL
#' @seealso shinyTime::timeInput()
inlineTimeInput <- function(inputId,
label,
value = NULL,
seconds = TRUE,
class = "form-control",
width = NULL) {
time_input <- timeInput(
inputId = inputId,
label = label,
value = value,
seconds = seconds
)
# Add the class 'form-control', so that form-contorl styles are applied
# to the time input fields
# TODO: finde a better soloution
for (i in seq_along(time_input[[2]]$children[[2]]$children)) {
if (any(time_input[[2]]$children[[2]]$children[[i]]$name == "input")) {
time_input[[2]]$children[[2]]$children[[i]]$attribs$class <- class
}
}
# return(inlineBlock(time_input, width = width, class = "inline-time-input-container"))
return(time_input)
}
#' Creates a html table to be showed in the route or marker pop ups.
#'
#' @param vals a named list of key-value-pairs to be displayed in the pop up
#' @param class a class to be applied to the table tag
#' @param digits the number of digits to be displayed, e.g. 5.34
#' @param key.sep seperate used between key and value, e.g. ':'
#' @param fun a optional callback applied to each key-value-pair; it must return a htmltools::tags$tr with to htmltools::tags$td
popupTable <-
function(vals,
class = "marker-popup",
digits = 2,
key.sep = ":",
fun = NULL) {
htmltools::withTags(table(class = class,
Map(function(key, val) {
if (!is.null(fun) && is.function(fun)) {
res <- fun(key, val)
res
} else {
if (is.numeric(val) && !is.null(digits))
val <- round(val, digits = digits)
tr(td(paste0(key, key.sep)),
td(val))
}
}, names(vals), vals)))
}
# creates the popup label displayed, when the user clicks on a route
popupTableRouting <- function(dt, cols) {
stopifnot(is.data.table(dt))
dt <- dt[, names(dt) %in% cols, with = F]
dt <- dt[, cols, with = F]
popupTable(
dt[, names(dt) %in% cols, with = F],
fun = function(key, val) {
if (key == "distance") {
val <- paste0(round(val, digits = 2), " m")
} else if (key == "diff_distance") {
if (val < 0)
val <- span(class = "good", paste0(round(val, digits = 2), " m"))
else if (val > 0)
val <- span(class = "poor", paste0("+", round(val, digits = 2), " m"))
else
val <- val
} else if (key == "diff_duration") {
if (startsWith(val, "-"))
val <- span(class = "good", val)
else if (!any(grepl("0:00:00", val)))
val <- span(class = "poor", paste0("+", val))
else
val <- val
} else if (key == "rel_distance") {
val <- -round(val * 100, 2)
if (val < 0)
val <- span(class = "good", paste0(val, "%"))
else if (val > 0)
val <- span(class = "poor", paste0("+", val, "%"))
else
val <- paste0(val, "%")
} else if (grepl(".per.distance", key)) {
key <- sub("route_weight\\.([[:alpha:]]+)\\.per\\.distance", "\\1 / distance", key)
val <- paste0(round(val, digits = 2), "°C")
} else if (grepl("rel.route_weight", key)) {
key <- sub("rel.route_weight\\.([[:alpha:]]+)", "difference heat stress (\\1)", key)
val <- -round(val * 100, 2)
if (val < 0)
val <- span(class = "good", paste0(val, "%"))
else if (val > 0)
val <- span(class = "poor", paste0("+", val, "%"))
else
val <- paste0(val, "%")
} else if (lubridate::is.duration(val)){
val <- formatDurationSecs(as.integer(val))
}
if (!is.null(val)) {
return(tags$tr(
tags$td(paste0(key, ":")),
tags$td(val)
))
} else {
return(NULL)
}
}
)
}
popupTableOptimalTime <- function(dt, cols) {
stopifnot(is.data.table(dt))
dt <- dt[, names(dt) %in% cols, with = F]
dt <- dt[, cols, with = F]
popupTable(
dt,
fun = function(key, val) {
if (key == "distance") {
val <- paste0(round(val, digits = 2), " m")
} else if (key == "duration") {
# val <- lubridate::as.duration(val / 1000)
val <- formatDurationMillis(val, print.millis = F)
} else if (key == "optimal_time") {
val <- format(strptime(val, format = "%Y-%m-%dT%H:%M"), format = "%H:%M")
} else if (key == "optimal_value") {
val <- round(val, digits = 2)
}
if (!is.null(val)) {
return(tags$tr(
tags$td(paste0(key, ":")),
tags$td(val)
))
} else {
return(NULL)
}
}
)
}
|
/shiny-frontend/HeatStressRouting-Frontend/global.R
|
permissive
|
biggis-project/path-optimizer
|
R
| false
| false
| 7,988
|
r
|
source("utils.R")
source("webapi.R")
# The display option 'Show Raster Overlay' can be enabled by setting the
# enviornment variable "ENABLE_RASTER_OVERLAY" to 'true'
enableRasterOverlay <- as.logical(Sys.getenv("ENABLE_RASTER_OVERLAY", unset = FALSE))
#' Adds an on click listner to a specified layer and
#' triggers an input message if the layer is clicked.
#'
#' @param map the \code{leaflet} or \code{leafletProxy} object
#' @param category the category the on click listner should be applied to, e.g. shape
#' @param layerId the layer the on click listner should be attached to
#' @param inputId the id of the input triggered on click
addOnClickListner <- function(map, category, layerId, inputId) {
flog.debug(
"addOnClickListner: category = %s, layerId = %s, inputId = %s",
category,
layerId,
inputId,
data = list()
)
# invokes the addOnClickListner JavaScript method defined in www/message-handler.js
leaflet::invokeMethod(map,
data,
"addOnClickListner",
category,
layerId,
inputId)
}
#' Wraps an element in a simple \code{div} inline-block.
#'
#' @param element the element that should be wrapped in the inline block
#' @param width the width of the div element
#' @param class the class of the div element
inlineBlock <- function(element, width = NULL, class = NULL) {
div(
style = "display: inline-block;",
style = if (!is.null(width))
paste0("width: ", validateCssUnit(width), ";"),
class = if (!is.null(class)) class,
element
)
}
#' An \code{actionButton} displayed inline with an other input widget, e.g. an \code{textInput}.
#'
#' @param inputId the \code{inputId} of the \code{actionButton}
#' @param label the label
#' @param icon the icon
#' @param width the width of the \code{actionButton}
#'
#' @seealso shiny::actionButton
#' @note http://stackoverflow.com/questions/20637248/shiny-4-small-textinput-boxes-side-by-side/21132918#21132918
inlineActionButton <-
function(inputId,
label,
icon = NULL,
width = NULL,
...) {
div(
# A hack to make shure, that the button has the same position as an text input placed side by side
# and to avoid the differen handling of 'vertical-align: middle' in firefox and chrome
# TODO: find a better solution
class = "inline-action-button-container",
style = "display:inline-block",
actionButton(
inputId = inputId,
label = label,
icon = icon,
width = width,
...
))
}
#' An timeInput widget to be placed inline.
#' @param inputId the input id
#' @param label the label
#' @param value the initial value
#' @param seconds display a input field for seconds?
#' @param class the html class to be applied on the element
#' @param width the width of the input filed, or NULL
#' @seealso shinyTime::timeInput()
inlineTimeInput <- function(inputId,
label,
value = NULL,
seconds = TRUE,
class = "form-control",
width = NULL) {
time_input <- timeInput(
inputId = inputId,
label = label,
value = value,
seconds = seconds
)
# Add the class 'form-control', so that form-contorl styles are applied
# to the time input fields
# TODO: finde a better soloution
for (i in seq_along(time_input[[2]]$children[[2]]$children)) {
if (any(time_input[[2]]$children[[2]]$children[[i]]$name == "input")) {
time_input[[2]]$children[[2]]$children[[i]]$attribs$class <- class
}
}
# return(inlineBlock(time_input, width = width, class = "inline-time-input-container"))
return(time_input)
}
#' Creates a html table to be showed in the route or marker pop ups.
#'
#' @param vals a named list of key-value-pairs to be displayed in the pop up
#' @param class a class to be applied to the table tag
#' @param digits the number of digits to be displayed, e.g. 5.34
#' @param key.sep seperate used between key and value, e.g. ':'
#' @param fun a optional callback applied to each key-value-pair; it must return a htmltools::tags$tr with to htmltools::tags$td
popupTable <-
function(vals,
class = "marker-popup",
digits = 2,
key.sep = ":",
fun = NULL) {
htmltools::withTags(table(class = class,
Map(function(key, val) {
if (!is.null(fun) && is.function(fun)) {
res <- fun(key, val)
res
} else {
if (is.numeric(val) && !is.null(digits))
val <- round(val, digits = digits)
tr(td(paste0(key, key.sep)),
td(val))
}
}, names(vals), vals)))
}
# creates the popup label displayed, when the user clicks on a route
popupTableRouting <- function(dt, cols) {
stopifnot(is.data.table(dt))
dt <- dt[, names(dt) %in% cols, with = F]
dt <- dt[, cols, with = F]
popupTable(
dt[, names(dt) %in% cols, with = F],
fun = function(key, val) {
if (key == "distance") {
val <- paste0(round(val, digits = 2), " m")
} else if (key == "diff_distance") {
if (val < 0)
val <- span(class = "good", paste0(round(val, digits = 2), " m"))
else if (val > 0)
val <- span(class = "poor", paste0("+", round(val, digits = 2), " m"))
else
val <- val
} else if (key == "diff_duration") {
if (startsWith(val, "-"))
val <- span(class = "good", val)
else if (!any(grepl("0:00:00", val)))
val <- span(class = "poor", paste0("+", val))
else
val <- val
} else if (key == "rel_distance") {
val <- -round(val * 100, 2)
if (val < 0)
val <- span(class = "good", paste0(val, "%"))
else if (val > 0)
val <- span(class = "poor", paste0("+", val, "%"))
else
val <- paste0(val, "%")
} else if (grepl(".per.distance", key)) {
key <- sub("route_weight\\.([[:alpha:]]+)\\.per\\.distance", "\\1 / distance", key)
val <- paste0(round(val, digits = 2), "°C")
} else if (grepl("rel.route_weight", key)) {
key <- sub("rel.route_weight\\.([[:alpha:]]+)", "difference heat stress (\\1)", key)
val <- -round(val * 100, 2)
if (val < 0)
val <- span(class = "good", paste0(val, "%"))
else if (val > 0)
val <- span(class = "poor", paste0("+", val, "%"))
else
val <- paste0(val, "%")
} else if (lubridate::is.duration(val)){
val <- formatDurationSecs(as.integer(val))
}
if (!is.null(val)) {
return(tags$tr(
tags$td(paste0(key, ":")),
tags$td(val)
))
} else {
return(NULL)
}
}
)
}
popupTableOptimalTime <- function(dt, cols) {
stopifnot(is.data.table(dt))
dt <- dt[, names(dt) %in% cols, with = F]
dt <- dt[, cols, with = F]
popupTable(
dt,
fun = function(key, val) {
if (key == "distance") {
val <- paste0(round(val, digits = 2), " m")
} else if (key == "duration") {
# val <- lubridate::as.duration(val / 1000)
val <- formatDurationMillis(val, print.millis = F)
} else if (key == "optimal_time") {
val <- format(strptime(val, format = "%Y-%m-%dT%H:%M"), format = "%H:%M")
} else if (key == "optimal_value") {
val <- round(val, digits = 2)
}
if (!is.null(val)) {
return(tags$tr(
tags$td(paste0(key, ":")),
tags$td(val)
))
} else {
return(NULL)
}
}
)
}
|
\name{Goodness of Fit - Coefficient of Variation}
\alias{gofCV}
\title{
Coefficient of Variation.
}
\description{
Calculates and returns goodness of fit - coefficient of variation (CV).
}
\usage{
gofCV(Obs, Prd, dgt=3)
}
\arguments{
\item{Obs}{
Observed or measured values or target vector.
}
\item{Prd}{
Predicted or fitted values by the model. Values produced by approximation or regression.
}
\item{dgt}{
Number of digits in decimal places. Default is 3.
}
}
\value{
\item{CoeficientOfVariation}{Goodness of fit - coefficient of variation (CV).}
}
\references{
Comparison of Different Data Mining Algorithms for Prediction of Body Weight From Several Morphological Measurements in Dogs - S Celik, O Yilmaz
}
\author{
Prof. Dr. Ecevit Eyduran,
TA. Alper Gulbe
}
\examples{
# dummy inputs, independent variable
# integers from 0 to 19
inputs <- 0:19
# dummy targets/observed values, dependent variable
# a product of 2*times inputs minus 5 with some normal noise
targets <- -5 + inputs*1.2 + rnorm(20)
# linear regression model
model<-lm(targets~inputs)
# model's predicted values against targets
predicted<-model$fitted.values
# using library ehaGoF for goodness of fit.
library(ehaGoF)
# Goodness of fit - coefficient of variation (CV)
gofCV(targets, predicted)
}
|
/man/gofCV.Rd
|
no_license
|
cran/ehaGoF
|
R
| false
| false
| 1,353
|
rd
|
\name{Goodness of Fit - Coefficient of Variation}
\alias{gofCV}
\title{
Coefficient of Variation.
}
\description{
Calculates and returns goodness of fit - coefficient of variation (CV).
}
\usage{
gofCV(Obs, Prd, dgt=3)
}
\arguments{
\item{Obs}{
Observed or measured values or target vector.
}
\item{Prd}{
Predicted or fitted values by the model. Values produced by approximation or regression.
}
\item{dgt}{
Number of digits in decimal places. Default is 3.
}
}
\value{
\item{CoeficientOfVariation}{Goodness of fit - coefficient of variation (CV).}
}
\references{
Comparison of Different Data Mining Algorithms for Prediction of Body Weight From Several Morphological Measurements in Dogs - S Celik, O Yilmaz
}
\author{
Prof. Dr. Ecevit Eyduran,
TA. Alper Gulbe
}
\examples{
# dummy inputs, independent variable
# integers from 0 to 19
inputs <- 0:19
# dummy targets/observed values, dependent variable
# a product of 2*times inputs minus 5 with some normal noise
targets <- -5 + inputs*1.2 + rnorm(20)
# linear regression model
model<-lm(targets~inputs)
# model's predicted values against targets
predicted<-model$fitted.values
# using library ehaGoF for goodness of fit.
library(ehaGoF)
# Goodness of fit - coefficient of variation (CV)
gofCV(targets, predicted)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/taxtr.R
\name{taxtr}
\alias{taxtr}
\title{convertion of microne ncbi id and ScientificName}
\usage{
taxtr(Input, Type, Level)
}
\arguments{
\item{Input}{a query vector of microbe ncbi ids or ScientificName}
\item{Type}{The Type of Input, should be "TaxId" or "ScientificName"}
\item{Level}{The taxon level of Input, only "species" and "genus" are accepted.}
}
\value{
}
\description{
convertion of microne ncbi id and ScientificName
}
\examples{
\dontrun{
taxtr(Input = c("2840314","2839514","2839126","2794228"), Type = "TaxId", Level = "genus")
}
}
|
/man/taxtr.Rd
|
no_license
|
swcyo/MicrobiomeProfiler
|
R
| false
| true
| 632
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/taxtr.R
\name{taxtr}
\alias{taxtr}
\title{convertion of microne ncbi id and ScientificName}
\usage{
taxtr(Input, Type, Level)
}
\arguments{
\item{Input}{a query vector of microbe ncbi ids or ScientificName}
\item{Type}{The Type of Input, should be "TaxId" or "ScientificName"}
\item{Level}{The taxon level of Input, only "species" and "genus" are accepted.}
}
\value{
}
\description{
convertion of microne ncbi id and ScientificName
}
\examples{
\dontrun{
taxtr(Input = c("2840314","2839514","2839126","2794228"), Type = "TaxId", Level = "genus")
}
}
|
### what is 90% of pi?
almost.pi <- 90/100 * pi
## percents should be escaped neither in inlinedocs, nor in
## Documentation Lists, but will be escaped when written to Rd files.
.result <- list(almost.pi=list(description="what is 90% of pi?",
definition="almost.pi <- 90/100 * pi",
format="",
title="almost pi"))
|
/inst/testfiles/percent.R
|
no_license
|
tdhock/inlinedocs
|
R
| false
| false
| 362
|
r
|
### what is 90% of pi?
almost.pi <- 90/100 * pi
## percents should be escaped neither in inlinedocs, nor in
## Documentation Lists, but will be escaped when written to Rd files.
.result <- list(almost.pi=list(description="what is 90% of pi?",
definition="almost.pi <- 90/100 * pi",
format="",
title="almost pi"))
|
#August 2015 Reporting Challenge
a<-read.xlsx("db.xlsx")
FeaturesNames<-as.character(a$url)
#calculate summary of domain 1 :tvnewonline.fake
news<-FeaturesNames[grep("http://www.tvnewsonline.fake/news",FeaturesNames)]
music<-FeaturesNames[grep("http://www.tvnewsonline.fake/music",FeaturesNames)]
gallery<-FeaturesNames[grep("http://www.tvnewsonline.fake/gallery",FeaturesNames)]
tv<-FeaturesNames[grep("http://www.tvnewsonline.fake/tv",FeaturesNames)]
media_video<-FeaturesNames[grep("http://www.tvnewsonline.fake/media/video",FeaturesNames)]
f1<-subset(a,a$url %in% news)
f2<-subset(a,a$url %in% music)
f3<-subset(a,a$url %in% gallery)
f4<-subset(a,a$url %in% tv)
f5<-subset(a,a$url %in% media_video)
m1<-summary(f1$shares)[c(1,4,6)]
m2<-summary(f2$shares)[c(1,4,6)]
m3<-summary(f3$shares)[c(1,4,6)]
m4<-summary(f4$shares)[c(1,4,6)]
m5<-summary(f5$shares)[c(1,4,6)]
#calculate summary of domain 2 :celebritynew.fake
headlines<-FeaturesNames[grep("http://www.celebritynews.fake/headlines",FeaturesNames)]
entertainment<-FeaturesNames[grep("http://www.celebritynews.fake/entertainment",FeaturesNames)]
f6<-subset(a,a$url %in% headlines)
m6<-summary(f6$shares)[c(1,4,6)]
f7<-subset(a,a$url %in% entertainment)
m7<-summary(f7$shares)[c(1,4,6)]
#calculate summary of domain 3 :cookingshow.fake
recipes<-FeaturesNames[grep("http://www.cookingshow.fake/food/recipe",FeaturesNames)]
f8<-subset(a,a$url %in% recipes)
m8<-summary(f8$shares)[c(1,4,6)]
#plotting the shares for all domain
plot(x,mean_set,xlab=c("Domains"),ylab="Average Shares",type = "n")
points(x[1:5],mean_set[1:5],col=4,pch=16)
points(x[6:7],mean_set[6:7],col=3,pch=16)
points(x[8],mean_set[8],col=2,pch=16)
text(x,mean_set+100,c("news","music","gallery","tv","media_video","headlines","entertainment"))
text(x[8]-0.2,mean_set[8]-100,"recipes")
title(main="August 2015 Reporting Challenge")
legend("topleft",legend = c("tvnewonline.fake","celebritynew.fake","cookingshow.fake"),pch=16,cex=0.8,col = c(4,3,2))
dev.copy(png,file="naina.png", width=1024, height=1024)
dev.off()
|
/August 2015 Reporting Challenge /August 2015 Reporting Challenge .R
|
no_license
|
SankhlaDushyant/Data-analysis
|
R
| false
| false
| 2,043
|
r
|
#August 2015 Reporting Challenge
a<-read.xlsx("db.xlsx")
FeaturesNames<-as.character(a$url)
#calculate summary of domain 1 :tvnewonline.fake
news<-FeaturesNames[grep("http://www.tvnewsonline.fake/news",FeaturesNames)]
music<-FeaturesNames[grep("http://www.tvnewsonline.fake/music",FeaturesNames)]
gallery<-FeaturesNames[grep("http://www.tvnewsonline.fake/gallery",FeaturesNames)]
tv<-FeaturesNames[grep("http://www.tvnewsonline.fake/tv",FeaturesNames)]
media_video<-FeaturesNames[grep("http://www.tvnewsonline.fake/media/video",FeaturesNames)]
f1<-subset(a,a$url %in% news)
f2<-subset(a,a$url %in% music)
f3<-subset(a,a$url %in% gallery)
f4<-subset(a,a$url %in% tv)
f5<-subset(a,a$url %in% media_video)
m1<-summary(f1$shares)[c(1,4,6)]
m2<-summary(f2$shares)[c(1,4,6)]
m3<-summary(f3$shares)[c(1,4,6)]
m4<-summary(f4$shares)[c(1,4,6)]
m5<-summary(f5$shares)[c(1,4,6)]
#calculate summary of domain 2 :celebritynew.fake
headlines<-FeaturesNames[grep("http://www.celebritynews.fake/headlines",FeaturesNames)]
entertainment<-FeaturesNames[grep("http://www.celebritynews.fake/entertainment",FeaturesNames)]
f6<-subset(a,a$url %in% headlines)
m6<-summary(f6$shares)[c(1,4,6)]
f7<-subset(a,a$url %in% entertainment)
m7<-summary(f7$shares)[c(1,4,6)]
#calculate summary of domain 3 :cookingshow.fake
recipes<-FeaturesNames[grep("http://www.cookingshow.fake/food/recipe",FeaturesNames)]
f8<-subset(a,a$url %in% recipes)
m8<-summary(f8$shares)[c(1,4,6)]
#plotting the shares for all domain
plot(x,mean_set,xlab=c("Domains"),ylab="Average Shares",type = "n")
points(x[1:5],mean_set[1:5],col=4,pch=16)
points(x[6:7],mean_set[6:7],col=3,pch=16)
points(x[8],mean_set[8],col=2,pch=16)
text(x,mean_set+100,c("news","music","gallery","tv","media_video","headlines","entertainment"))
text(x[8]-0.2,mean_set[8]-100,"recipes")
title(main="August 2015 Reporting Challenge")
legend("topleft",legend = c("tvnewonline.fake","celebritynew.fake","cookingshow.fake"),pch=16,cex=0.8,col = c(4,3,2))
dev.copy(png,file="naina.png", width=1024, height=1024)
dev.off()
|
## svm model s tunanjem hyperparametrov
## odstrani obdobja ko elektrarna ne deluje
## iz nocnih ur vzame samo nekaj nakljucnih ur na dan
library(e1071)
library(xts)
## okolje kjer so funckije
OkoljeFunkcije <- 'C:/Users/Podlogar/Documents/Projekt Elektro/Funkcije'
## okolje kjer so feature matrike (train)
OkoljeFM <- 'C:/Users/Podlogar/Documents/Projekt Elektro/00_Podatki/featureMatrix/Train'
## okolje kjer so feature matrike (valid)
OkoljeFM_valid <- 'C:/Users/Podlogar/Documents/Projekt Elektro/00_Podatki/featureMatrix/Valid'
## okolje kjer so realizacije (train)
OkoljeReal <- 'C:/Users/Podlogar/Documents/Projekt Elektro/00_Podatki/realizacija/Train'
## okolje kjer so realizacije (valid)
OkoljeReal_valid <- 'C:/Users/Podlogar/Documents/Projekt Elektro/00_Podatki/realizacija/Valid'
## okolje kjer se ustvari mapa v katero se shranijo modeli
OkoljeModel <- 'C:/Users/Podlogar/Documents/Projekt Elektro/03_Ucenje modela/Sonce/SVM/Model'
## okolje kjer se ustvari mapa v katero se shranijo rezultati na validacijski mnozici
OkoljeValidacija <- 'C:/Users/Podlogar/Documents/Projekt Elektro/03_Ucenje modela/Sonce/SVM/Validacija'
setwd(OkoljeFunkcije)
for(fun in dir()){
print(fun)
source(fun)
}
## ustvari mapo datum in nacin za shranjevanje modela
Okolje1 <- paste0(OkoljeModel, '/', Sys.Date(), '_brezNEDELA')
dir.create(Okolje1, showWarnings = TRUE, recursive = FALSE, mode = "0777")
## ustvari mapo z datuom in nacinom za shranjevanje validacije
Okolje2 <- paste0(OkoljeValidacija, '/', Sys.Date(), '_brezNEDELA')
dir.create(Okolje2, showWarnings = TRUE, recursive = FALSE, mode = "0777")
cost = 8
epsilon = 0.1
setwd(OkoljeFM)
for(kraj in dir()){
print(kraj)
## nalozi vremensko napvoed za kraj
if(substr(kraj, 1, 5) == 'vreme'){
## ucna matrika
featureMatrix <- readRDS(kraj)
## validacijska matrika
setwd(OkoljeFM_valid)
krajValid <- paste0(substr(kraj, 1, nchar(kraj)-9), 'Valid.rds')
featureMatrixValid <- readRDS(krajValid)
setwd(OkoljeReal)
## za vse elektrarne v blizini kraja kraj uporabi vremensko napvoed
for(elekt in dir()){
if (gsub(".*vreme_|_FM.*", "", kraj) == gsub(".*K]|_.*", "", elekt) &
gsub(".*T]|_.*", "", elekt) == 'Sonce'){
print(elekt)
realizacija <- readRDS(elekt)
## ciscenje pdoatkov
## odstrani obdobje ko elektrarna ne dela
realizacija <- nedelovanje(realizacija, 1)
## odstranitev dela noci
realizacija <- odstNoc(realizacija, 2)
## odstranjevanje NA-jev
X <- as.matrix(featureMatrix)
Y <- as.vector(realizacija)
A <- cbind(X,Y)
A <- na.omit(A)
Y <- A[, ncol(A)]
X <- A[,1:(ncol(A)-1)]
## tunanje hyperparametrov in ucenje modela
tc <- tune.control(cross = 3)
print('tune')
op <- tune(svm, X, Y, ranges = list(epsilon = seq(0,0.5,0.1), cost = c(2,4,6,8,12,14,16,18,20)), tunecontrol = tc)
model <- op$best.model
## validacija modela
print('validacija')
newdata <- data.frame(X = I(featureMatrixValid))
napovedSVM <- predict(model, newdata)
napovedSVM <- xts(napovedSVM, index(featureMatrixValid))
## shranjevanje modela
setwd(Okolje1)
## ime modela
imeModel <- paste0(substr(elekt, 1, 8), '_model_', epsilon, '_', cost,'_SVM_cist.rds')
saveRDS(op, imeModel)
## shranjevanje validacije
setwd(Okolje2)
## ime validacije
imeValid <- paste0(substr(elekt, 1, 8), '_validacija_', epsilon, '_', cost,'_SVM_cist.rds')
saveRDS(napovedSVM, imeValid)
setwd(OkoljeReal)
}
}
}
setwd(OkoljeFM)
}
## ucenje
setwd(OkoljeFM)
kraj <- dir()[2]
featureMatrix <- readRDS(kraj)
setwd(OkoljeReal)
elekt <- dir()[116]
realizacija <- readRDS(elekt)
X <- featureMatrix
start1 <- Sys.time()
model <- svm(realizacija ~ X, cost = 2)
stop1 <- Sys.time()
cas1 <- stop1 - start1
start2 <- Sys.time()
model <- svm(realizacija ~ X, cost = 5)
stop2 <- Sys.time()
cas2 <- stop2 - start2
print(cas1)
print(cas2)
## validacija
setwd(OkoljeFM_valid)
kraj <- dir()[2]
featureMatrixValid <- readRDS(kraj)
setwd(OkoljeReal_valid)
elekt <- dir()[116]
realizacijaValid <- readRDS(elekt)
newdata <- data.frame(X = I(featureMatrixValid))
napovedSVR <- predict(model, newdata)
kakoDobro <- xts(napovedSVR, index(realizacijaValid))
plot(kakoDobro[1:500])
lines(realizacijaValid[1:500], col = 'red')
## tune
neNic <- (realizacija != 0)
Y <- realizacija[neNic]
X <- featureMatrix[neNic, ]
tc <- tune.control(cross = 3)
start <- Sys.time()
op <- tune(svm, Y ~ X, ranges = list(epsilon = seq(0,1,0.3), cost = 2^(c(0,2,4,6))), tunecontrol = tc)
stop <- Sys.time()
cas <- stop - start
setwd(OkoljeModel)
saveRDS(cas, 'casTunanja.rds')
saveRDS(op, 'testniSVMmodel.rds')
##op <- tune(svm, realizacija ~ featureMatrix, ranges = list(epsilon = seq(0,1,0.3), cost = 2^(2:4)), tunecontrol = tc)
print(op)
plot(op)
bestModel <- op$best.model
model <- bestModel
|
/03UcenjeModela/Sonce/SVM/svm_model_cistiPodatki_tune.r
|
no_license
|
JureP/Napove-elektro
|
R
| false
| false
| 4,853
|
r
|
## svm model s tunanjem hyperparametrov
## odstrani obdobja ko elektrarna ne deluje
## iz nocnih ur vzame samo nekaj nakljucnih ur na dan
library(e1071)
library(xts)
## okolje kjer so funckije
OkoljeFunkcije <- 'C:/Users/Podlogar/Documents/Projekt Elektro/Funkcije'
## okolje kjer so feature matrike (train)
OkoljeFM <- 'C:/Users/Podlogar/Documents/Projekt Elektro/00_Podatki/featureMatrix/Train'
## okolje kjer so feature matrike (valid)
OkoljeFM_valid <- 'C:/Users/Podlogar/Documents/Projekt Elektro/00_Podatki/featureMatrix/Valid'
## okolje kjer so realizacije (train)
OkoljeReal <- 'C:/Users/Podlogar/Documents/Projekt Elektro/00_Podatki/realizacija/Train'
## okolje kjer so realizacije (valid)
OkoljeReal_valid <- 'C:/Users/Podlogar/Documents/Projekt Elektro/00_Podatki/realizacija/Valid'
## okolje kjer se ustvari mapa v katero se shranijo modeli
OkoljeModel <- 'C:/Users/Podlogar/Documents/Projekt Elektro/03_Ucenje modela/Sonce/SVM/Model'
## okolje kjer se ustvari mapa v katero se shranijo rezultati na validacijski mnozici
OkoljeValidacija <- 'C:/Users/Podlogar/Documents/Projekt Elektro/03_Ucenje modela/Sonce/SVM/Validacija'
setwd(OkoljeFunkcije)
for(fun in dir()){
print(fun)
source(fun)
}
## ustvari mapo datum in nacin za shranjevanje modela
Okolje1 <- paste0(OkoljeModel, '/', Sys.Date(), '_brezNEDELA')
dir.create(Okolje1, showWarnings = TRUE, recursive = FALSE, mode = "0777")
## ustvari mapo z datuom in nacinom za shranjevanje validacije
Okolje2 <- paste0(OkoljeValidacija, '/', Sys.Date(), '_brezNEDELA')
dir.create(Okolje2, showWarnings = TRUE, recursive = FALSE, mode = "0777")
cost = 8
epsilon = 0.1
setwd(OkoljeFM)
for(kraj in dir()){
print(kraj)
## nalozi vremensko napvoed za kraj
if(substr(kraj, 1, 5) == 'vreme'){
## ucna matrika
featureMatrix <- readRDS(kraj)
## validacijska matrika
setwd(OkoljeFM_valid)
krajValid <- paste0(substr(kraj, 1, nchar(kraj)-9), 'Valid.rds')
featureMatrixValid <- readRDS(krajValid)
setwd(OkoljeReal)
## za vse elektrarne v blizini kraja kraj uporabi vremensko napvoed
for(elekt in dir()){
if (gsub(".*vreme_|_FM.*", "", kraj) == gsub(".*K]|_.*", "", elekt) &
gsub(".*T]|_.*", "", elekt) == 'Sonce'){
print(elekt)
realizacija <- readRDS(elekt)
## ciscenje pdoatkov
## odstrani obdobje ko elektrarna ne dela
realizacija <- nedelovanje(realizacija, 1)
## odstranitev dela noci
realizacija <- odstNoc(realizacija, 2)
## odstranjevanje NA-jev
X <- as.matrix(featureMatrix)
Y <- as.vector(realizacija)
A <- cbind(X,Y)
A <- na.omit(A)
Y <- A[, ncol(A)]
X <- A[,1:(ncol(A)-1)]
## tunanje hyperparametrov in ucenje modela
tc <- tune.control(cross = 3)
print('tune')
op <- tune(svm, X, Y, ranges = list(epsilon = seq(0,0.5,0.1), cost = c(2,4,6,8,12,14,16,18,20)), tunecontrol = tc)
model <- op$best.model
## validacija modela
print('validacija')
newdata <- data.frame(X = I(featureMatrixValid))
napovedSVM <- predict(model, newdata)
napovedSVM <- xts(napovedSVM, index(featureMatrixValid))
## shranjevanje modela
setwd(Okolje1)
## ime modela
imeModel <- paste0(substr(elekt, 1, 8), '_model_', epsilon, '_', cost,'_SVM_cist.rds')
saveRDS(op, imeModel)
## shranjevanje validacije
setwd(Okolje2)
## ime validacije
imeValid <- paste0(substr(elekt, 1, 8), '_validacija_', epsilon, '_', cost,'_SVM_cist.rds')
saveRDS(napovedSVM, imeValid)
setwd(OkoljeReal)
}
}
}
setwd(OkoljeFM)
}
## ucenje
setwd(OkoljeFM)
kraj <- dir()[2]
featureMatrix <- readRDS(kraj)
setwd(OkoljeReal)
elekt <- dir()[116]
realizacija <- readRDS(elekt)
X <- featureMatrix
start1 <- Sys.time()
model <- svm(realizacija ~ X, cost = 2)
stop1 <- Sys.time()
cas1 <- stop1 - start1
start2 <- Sys.time()
model <- svm(realizacija ~ X, cost = 5)
stop2 <- Sys.time()
cas2 <- stop2 - start2
print(cas1)
print(cas2)
## validacija
setwd(OkoljeFM_valid)
kraj <- dir()[2]
featureMatrixValid <- readRDS(kraj)
setwd(OkoljeReal_valid)
elekt <- dir()[116]
realizacijaValid <- readRDS(elekt)
newdata <- data.frame(X = I(featureMatrixValid))
napovedSVR <- predict(model, newdata)
kakoDobro <- xts(napovedSVR, index(realizacijaValid))
plot(kakoDobro[1:500])
lines(realizacijaValid[1:500], col = 'red')
## tune
neNic <- (realizacija != 0)
Y <- realizacija[neNic]
X <- featureMatrix[neNic, ]
tc <- tune.control(cross = 3)
start <- Sys.time()
op <- tune(svm, Y ~ X, ranges = list(epsilon = seq(0,1,0.3), cost = 2^(c(0,2,4,6))), tunecontrol = tc)
stop <- Sys.time()
cas <- stop - start
setwd(OkoljeModel)
saveRDS(cas, 'casTunanja.rds')
saveRDS(op, 'testniSVMmodel.rds')
##op <- tune(svm, realizacija ~ featureMatrix, ranges = list(epsilon = seq(0,1,0.3), cost = 2^(2:4)), tunecontrol = tc)
print(op)
plot(op)
bestModel <- op$best.model
model <- bestModel
|
# a quick script template for reading in the files produced by the nest_read.py script
# by Drew Hill, UC Berkeley
# June 2016
# bash command to transfer file from RasPi
# scp lawson@192.168.29.157:~/nest_datalog.txt ~/Desktop
library(data.table)
library(plyr)
library(lubridate)
filename <- "~/Dropbox/Aerie/Nest Protect Teardown/early nest pats tests/HillHouse - 13June/datafiles/nest_datalog.txt"
col.names <- c('led','datetime','voltage','pi_tempC','tempC','rh')
dt1 <- fread(filename, col.names=col.names)
# truncate and update datetime
dt1[,datetime := ymd_hms(datetime, tz="America/Los_Angeles")]
dt1[, datetime := round_date(datetime, unit = c("second"))]
# convert voltage to mV
dt1[, mv := voltage*1000]
#drop regular voltage
dt1[, c('voltage') := NULL, with=F]
# create separate voltage columns by IR type
# assumes pi_temp is always the same for ir and blue at datetime X
dt <- dcast.data.table(dt1, datetime + pi_tempC + tempC +rh ~ led, value.var = c("mv"))
# dataset where readings averaged by minute
dt1_min <- dt1
dt1_min[,datetime := round_date(datetime, unit = c("minute"))]
dt1_min[,avg_mv:=mean(mv, na.rm=T),by="led,datetime"]
dt1_min[,avg_pi_tempC:=mean(pi_tempC, na.rm=T),by="led,datetime"]
dt1_min[,avg_tempC:=mean(tempC, na.rm=T),by="led,datetime"]
dt1_min[,avg_rh:=mean(rh, na.rm=T),by="led,datetime"]
dt1_min[,c("mv","pi_tempC","tempC","rh"):=NULL, with=F]
dt1_min <- unique(dt1_min)
dt_min <- dcast.data.table(dt1_min, datetime + avg_pi_tempC + avg_tempC + avg_rh ~ led, value.var = c("avg_mv"))
# average tempC and rh will differ between IR and Blue due to the frequency of measurement (Blue LEd is less frequent than IR LED), so replace average temp for Blue measurements with that of IR measurements
|
/Nest Protect as PM25 Monitor/Code drafting/R Code/read_nest_log_v2.R
|
no_license
|
drew-hill/dissertation
|
R
| false
| false
| 1,744
|
r
|
# a quick script template for reading in the files produced by the nest_read.py script
# by Drew Hill, UC Berkeley
# June 2016
# bash command to transfer file from RasPi
# scp lawson@192.168.29.157:~/nest_datalog.txt ~/Desktop
library(data.table)
library(plyr)
library(lubridate)
filename <- "~/Dropbox/Aerie/Nest Protect Teardown/early nest pats tests/HillHouse - 13June/datafiles/nest_datalog.txt"
col.names <- c('led','datetime','voltage','pi_tempC','tempC','rh')
dt1 <- fread(filename, col.names=col.names)
# truncate and update datetime
dt1[,datetime := ymd_hms(datetime, tz="America/Los_Angeles")]
dt1[, datetime := round_date(datetime, unit = c("second"))]
# convert voltage to mV
dt1[, mv := voltage*1000]
#drop regular voltage
dt1[, c('voltage') := NULL, with=F]
# create separate voltage columns by IR type
# assumes pi_temp is always the same for ir and blue at datetime X
dt <- dcast.data.table(dt1, datetime + pi_tempC + tempC +rh ~ led, value.var = c("mv"))
# dataset where readings averaged by minute
dt1_min <- dt1
dt1_min[,datetime := round_date(datetime, unit = c("minute"))]
dt1_min[,avg_mv:=mean(mv, na.rm=T),by="led,datetime"]
dt1_min[,avg_pi_tempC:=mean(pi_tempC, na.rm=T),by="led,datetime"]
dt1_min[,avg_tempC:=mean(tempC, na.rm=T),by="led,datetime"]
dt1_min[,avg_rh:=mean(rh, na.rm=T),by="led,datetime"]
dt1_min[,c("mv","pi_tempC","tempC","rh"):=NULL, with=F]
dt1_min <- unique(dt1_min)
dt_min <- dcast.data.table(dt1_min, datetime + avg_pi_tempC + avg_tempC + avg_rh ~ led, value.var = c("avg_mv"))
# average tempC and rh will differ between IR and Blue due to the frequency of measurement (Blue LEd is less frequent than IR LED), so replace average temp for Blue measurements with that of IR measurements
|
# Code for load data file and subset the properly data
dat <- read.table("household_power_consumption.txt", sep=";", header=TRUE, na.string="?")
dat <- subset(dat, Date=="1/2/2007" | Date=="2/2/2007")
DateTime <- as.POSIXct(paste(dat$Date, dat$Time), format = "%d/%m/%Y %H:%M:%S")
dat <- cbind(dat, DateTime)
# Instructions to plot figure 1
png(filename="plot1.png",
height=480, width=480)
hist(dat$Global_active_power,
main="Global Active Power",
col="red",
xlab = "Global Active Power (kilowatts)")
dev.off()
|
/plot1.R
|
no_license
|
Luis-A/ExData_Plotting1
|
R
| false
| false
| 536
|
r
|
# Code for load data file and subset the properly data
dat <- read.table("household_power_consumption.txt", sep=";", header=TRUE, na.string="?")
dat <- subset(dat, Date=="1/2/2007" | Date=="2/2/2007")
DateTime <- as.POSIXct(paste(dat$Date, dat$Time), format = "%d/%m/%Y %H:%M:%S")
dat <- cbind(dat, DateTime)
# Instructions to plot figure 1
png(filename="plot1.png",
height=480, width=480)
hist(dat$Global_active_power,
main="Global Active Power",
col="red",
xlab = "Global Active Power (kilowatts)")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eodplot.R
\name{findLandmarks}
\alias{findLandmarks}
\title{Plot EOD signal with landmarks}
\usage{
findLandmarks(plotdata)
}
\arguments{
\item{plotdata}{The EOD matrix from getEODMatrix}
}
\description{
Plot EOD signal with landmarks
}
|
/man/findLandmarks.Rd
|
no_license
|
jasongallant/eodplotter
|
R
| false
| true
| 315
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eodplot.R
\name{findLandmarks}
\alias{findLandmarks}
\title{Plot EOD signal with landmarks}
\usage{
findLandmarks(plotdata)
}
\arguments{
\item{plotdata}{The EOD matrix from getEODMatrix}
}
\description{
Plot EOD signal with landmarks
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rice.R
\name{generate_passphrase}
\alias{generate_passphrase}
\title{Generates a passphrase}
\usage{
generate_passphrase(tokens = generate_token(7), verbose = TRUE, ...)
}
\arguments{
\item{tokens}{a vector of character representing the tokens to be
used to generate the passphrase. By default, 7 are randomly
generated using \code{generate_token(7)}.}
\item{verbose}{if \code{TRUE} the passphrase is displayed as a message}
\item{...}{additional parameters to be passed to \code{\link{match_token}}}
}
\value{
a character string representing the passphrase
}
\description{
Generates a passphrase.
}
\details{
Given a wordlist and a number of words, this function generates a
passphrase. You can control the wordlist you choose and whether
the passphrase uses title case by providing additional arguments
that will be passed to \code{\link{match_token}}.
}
\examples{
generate_passphrase(tokens = generate_token(7, "pseudorandom"),
verbose = FALSE)
}
\seealso{
\code{\link{match_token}}, \code{\link{generate_token}}
}
\author{
Francois Michonneau
}
|
/man/generate_passphrase.Rd
|
no_license
|
fmichonneau/riceware
|
R
| false
| true
| 1,154
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rice.R
\name{generate_passphrase}
\alias{generate_passphrase}
\title{Generates a passphrase}
\usage{
generate_passphrase(tokens = generate_token(7), verbose = TRUE, ...)
}
\arguments{
\item{tokens}{a vector of character representing the tokens to be
used to generate the passphrase. By default, 7 are randomly
generated using \code{generate_token(7)}.}
\item{verbose}{if \code{TRUE} the passphrase is displayed as a message}
\item{...}{additional parameters to be passed to \code{\link{match_token}}}
}
\value{
a character string representing the passphrase
}
\description{
Generates a passphrase.
}
\details{
Given a wordlist and a number of words, this function generates a
passphrase. You can control the wordlist you choose and whether
the passphrase uses title case by providing additional arguments
that will be passed to \code{\link{match_token}}.
}
\examples{
generate_passphrase(tokens = generate_token(7, "pseudorandom"),
verbose = FALSE)
}
\seealso{
\code{\link{match_token}}, \code{\link{generate_token}}
}
\author{
Francois Michonneau
}
|
# Second test of update B
# Create the Phi matrix using the code from the SSGL package example
library(Rcpp)
library(RcppArmadillo)
library(MASS)
library(splines)
set.seed(129)
n <- 100
p <- 200
X_raw <- matrix(runif(n*p, 0,1), nrow = n, ncol= p)
D <- 2
Phi <- array(dim = c(n, D, p))
for(j in 1:p){
splineTemp <- splines::ns(X_raw[,j], df = D)
tmp_Phi <- matrix(nrow = n, ncol= D)
tmp_Phi[,1] <- splineTemp[,1]
for(jj in 2:D){
tmpY <- splineTemp[,jj]
tmpX <- tmp_Phi[,1:(jj-1)]
modX <- lm(tmpY ~ tmpX)
tmp_Phi[,jj] <- modX$residuals
}
# center and scale tmp_Phi
tmp_col_means <- apply(tmp_Phi, FUN = mean, MARGIN = 2)
tmp_col_sd <- apply(tmp_Phi, FUN = sd, MARGIN = 2)
for(jj in 1:D){
tmp_Phi[,jj] <- (tmp_Phi[,jj] - tmp_col_means[jj])/(tmp_col_sd[jj] * sqrt(n-1))
}
Phi[,,j] <- tmp_Phi
}
####
# Generate data
####
sigma <- 0.75
R <- sin(pi*X_raw[,1]) + 2.5 * (X_raw[,3]^2 - 0.5) +
exp(X_raw[,4]) + 3 * X_raw[,5] + sigma * rnorm(n,0,1)
xi1 <- 1
sourceCpp("src/test_update_B.cpp")
B_init <- matrix(0, nrow = D, ncol = p)
theta <- c(0.25,0.5,0.1, 0.15)
norm <- function(x){sqrt(sum(x*x))}
test_0 <- test_update_B(B_init, R, Phi,sigma*sigma, xi1, 2 * sqrt(D) * xi1, theta, verbose = FALSE, max_iter = 10000)
B_norm0 <- apply(test_0$B, MARGIN = 2, FUN = norm)
which(B_norm0 != 0)
test_1 <- test_update_B(B_init, R, Phi, sigma*sigma, xi1, 10 * sqrt(D) * xi1, theta, verbose = FALSE, max_iter = 10000)
B_norm1 <- apply(test_1$B, MARGIN = 2, FUN = norm)
which(B_norm1 != 0)
|
/testing_scripts/scripts/test_updateB2.R
|
no_license
|
skdeshpande91/GAM_SSL_SSGL
|
R
| false
| false
| 1,537
|
r
|
# Second test of update B
# Create the Phi matrix using the code from the SSGL package example
library(Rcpp)
library(RcppArmadillo)
library(MASS)
library(splines)
set.seed(129)
n <- 100
p <- 200
X_raw <- matrix(runif(n*p, 0,1), nrow = n, ncol= p)
D <- 2
Phi <- array(dim = c(n, D, p))
for(j in 1:p){
splineTemp <- splines::ns(X_raw[,j], df = D)
tmp_Phi <- matrix(nrow = n, ncol= D)
tmp_Phi[,1] <- splineTemp[,1]
for(jj in 2:D){
tmpY <- splineTemp[,jj]
tmpX <- tmp_Phi[,1:(jj-1)]
modX <- lm(tmpY ~ tmpX)
tmp_Phi[,jj] <- modX$residuals
}
# center and scale tmp_Phi
tmp_col_means <- apply(tmp_Phi, FUN = mean, MARGIN = 2)
tmp_col_sd <- apply(tmp_Phi, FUN = sd, MARGIN = 2)
for(jj in 1:D){
tmp_Phi[,jj] <- (tmp_Phi[,jj] - tmp_col_means[jj])/(tmp_col_sd[jj] * sqrt(n-1))
}
Phi[,,j] <- tmp_Phi
}
####
# Generate data
####
sigma <- 0.75
R <- sin(pi*X_raw[,1]) + 2.5 * (X_raw[,3]^2 - 0.5) +
exp(X_raw[,4]) + 3 * X_raw[,5] + sigma * rnorm(n,0,1)
xi1 <- 1
sourceCpp("src/test_update_B.cpp")
B_init <- matrix(0, nrow = D, ncol = p)
theta <- c(0.25,0.5,0.1, 0.15)
norm <- function(x){sqrt(sum(x*x))}
test_0 <- test_update_B(B_init, R, Phi,sigma*sigma, xi1, 2 * sqrt(D) * xi1, theta, verbose = FALSE, max_iter = 10000)
B_norm0 <- apply(test_0$B, MARGIN = 2, FUN = norm)
which(B_norm0 != 0)
test_1 <- test_update_B(B_init, R, Phi, sigma*sigma, xi1, 10 * sqrt(D) * xi1, theta, verbose = FALSE, max_iter = 10000)
B_norm1 <- apply(test_1$B, MARGIN = 2, FUN = norm)
which(B_norm1 != 0)
|
#' Print Method for Evaluation of Covariate-Adaptive Randomization
#'
#' Prints the parameters of a covariate-adaptive randomization procedures
#'
#' @export
#' @rdname print
#' @method print careval
#' @param x objects of class\code{careval}.
#' @param digits number of significant digits to be used.
#' @param prefix string, passed to \code{\link{strwrap}} for displaying the \code{method} component of the \code{carandom} object.
#' @param ... further arguments to be passed to or from methods.
#' @seealso \code{\link{evalRand}}, \code{\link{evalRand.sim}}.
print.careval = function(x, digits = getOption("digits"), prefix = "\t", ...){
cat("\n")
abb = c("HuHuCAR", "PocSimMIN", "StrBCD", "StrPBR", "DoptBCD", "BayesBCD", "AdjBCD")
com = c("Hu and Hu's General CAR", "Pocock and Simon's Procedure with Two Arms",
"Shao's Procedure", "Stratified Randomization with Two Arms",
"Atkinson's Optimum Biased Coin Design", "Bayesian Biased Coin Design",
"Covariate-adjusted Biased Coin Design")
ind = which(abb == x$method, arr.ind = T)
meth = com[ind]
cat(strwrap(meth, prefix = prefix), sep = "\n")
cat("\n")
cat("call:\n",
paste("evalRand.sim(", "method = ", x$method, ")\n", sep = ""));
cat("\n");
cat("group", "=", LETTERS[1 : 2], "\n", sep = " ")
cat("N", "=", x$N, "\n", sep = " ")
cat("iteration", "=", x$iteration, "\n", sep = " ")
cat("cov_num", "=", x$cov_num, "\n", sep = " ")
cat("level_num", "=", as.character(x$level_num), "\n", sep = " ")
if(x$method == "BayesBCD"){
cat("Categor class numbers", "=", x$J, "\n", sep = " ")
}
if(x$method == "StrPBR"){
cat("block", "=", x$bsize, "\n", sep = " ")
}
cat("Data type: ", x$`Data Type`, "\n");
if(x$`Data Type` == "Simulated"){
cat("Data generation mode: ", x$DataGeneration, "\n", sep = " ")
}
cat("\n")
if(x$N <= 7){K = x$N}else{K = 7}
if(x$iteration <= 3){I = x$iteration}else{I = 3}
cat("assignments of the first", I, "iterations for the first", K,
"patients", ":", "\n", sep = " ")
ass = as.data.frame(t(x$Assig[1 : K, 1 : I]))
for(l in 1 : I){
ass[l, ] = LETTERS[as.numeric(ass[l, ])]
}
ass$' ' = rep("...", times = I)
print(ass)
cat("\n")
cat("evaluation by imbalances: \n");
cat("absolute overall imbalances:\n")
print(x$Imb[1, ], digits = 3);
cat("\n");
if(x$strt_num <= 3){s = x$strt_num}else{s = 3}
cat("absolute within-strt. imbalances for the first", s, "strata:", "\n", sep = " ");
print(x$Imb[2 : (s + 1), ], digits = 3)
cat("\n");
cat("absolute marginal imbalances for", x$cov_num, "margins:", "\n", sep = " ");
v = vector();
r = 1 + x$strt_num + 1;
for(i in 1 :x$cov_num){
v[i] = r;
r = r + x$level_num[i];
}
print(x$Imb[v, ], digits = 3);
cat("\n")
invisible(x)
}
|
/caratOMP/R/careval.R
|
no_license
|
zhenxuanzhang/carat
|
R
| false
| false
| 2,899
|
r
|
#' Print Method for Evaluation of Covariate-Adaptive Randomization
#'
#' Prints the parameters of a covariate-adaptive randomization procedures
#'
#' @export
#' @rdname print
#' @method print careval
#' @param x objects of class\code{careval}.
#' @param digits number of significant digits to be used.
#' @param prefix string, passed to \code{\link{strwrap}} for displaying the \code{method} component of the \code{carandom} object.
#' @param ... further arguments to be passed to or from methods.
#' @seealso \code{\link{evalRand}}, \code{\link{evalRand.sim}}.
print.careval = function(x, digits = getOption("digits"), prefix = "\t", ...){
cat("\n")
abb = c("HuHuCAR", "PocSimMIN", "StrBCD", "StrPBR", "DoptBCD", "BayesBCD", "AdjBCD")
com = c("Hu and Hu's General CAR", "Pocock and Simon's Procedure with Two Arms",
"Shao's Procedure", "Stratified Randomization with Two Arms",
"Atkinson's Optimum Biased Coin Design", "Bayesian Biased Coin Design",
"Covariate-adjusted Biased Coin Design")
ind = which(abb == x$method, arr.ind = T)
meth = com[ind]
cat(strwrap(meth, prefix = prefix), sep = "\n")
cat("\n")
cat("call:\n",
paste("evalRand.sim(", "method = ", x$method, ")\n", sep = ""));
cat("\n");
cat("group", "=", LETTERS[1 : 2], "\n", sep = " ")
cat("N", "=", x$N, "\n", sep = " ")
cat("iteration", "=", x$iteration, "\n", sep = " ")
cat("cov_num", "=", x$cov_num, "\n", sep = " ")
cat("level_num", "=", as.character(x$level_num), "\n", sep = " ")
if(x$method == "BayesBCD"){
cat("Categor class numbers", "=", x$J, "\n", sep = " ")
}
if(x$method == "StrPBR"){
cat("block", "=", x$bsize, "\n", sep = " ")
}
cat("Data type: ", x$`Data Type`, "\n");
if(x$`Data Type` == "Simulated"){
cat("Data generation mode: ", x$DataGeneration, "\n", sep = " ")
}
cat("\n")
if(x$N <= 7){K = x$N}else{K = 7}
if(x$iteration <= 3){I = x$iteration}else{I = 3}
cat("assignments of the first", I, "iterations for the first", K,
"patients", ":", "\n", sep = " ")
ass = as.data.frame(t(x$Assig[1 : K, 1 : I]))
for(l in 1 : I){
ass[l, ] = LETTERS[as.numeric(ass[l, ])]
}
ass$' ' = rep("...", times = I)
print(ass)
cat("\n")
cat("evaluation by imbalances: \n");
cat("absolute overall imbalances:\n")
print(x$Imb[1, ], digits = 3);
cat("\n");
if(x$strt_num <= 3){s = x$strt_num}else{s = 3}
cat("absolute within-strt. imbalances for the first", s, "strata:", "\n", sep = " ");
print(x$Imb[2 : (s + 1), ], digits = 3)
cat("\n");
cat("absolute marginal imbalances for", x$cov_num, "margins:", "\n", sep = " ");
v = vector();
r = 1 + x$strt_num + 1;
for(i in 1 :x$cov_num){
v[i] = r;
r = r + x$level_num[i];
}
print(x$Imb[v, ], digits = 3);
cat("\n")
invisible(x)
}
|
testlist <- list(Beta = 0, CVLinf = -1.37672045511449e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 4.12843075136762e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615827487-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 487
|
r
|
testlist <- list(Beta = 0, CVLinf = -1.37672045511449e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 4.12843075136762e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
library(R.utils)
### Name: Options
### Title: The Options class
### Aliases: Options
### Keywords: classes programming
### ** Examples
local <- Options()
# Query a missing option
cex <- getOption(local, "graphics/cex")
cat("graphics/cex =", cex, "\n") # Returns NULL
# Query a missing option with default value
cex <- getOption(local, "graphics/cex", defaultValue=1)
cat("graphics/cex =", cex, "\n") # Returns NULL
# Set option and get previous value
oldCex <- setOption(local, "graphics/cex", 2)
cat("previous graphics/cex =", oldCex, "\n") # Returns NULL
# Set option again and get previous value
oldCex <- setOption(local, "graphics/cex", 3)
cat("previous graphics/cex =", oldCex, "\n") # Returns 2
# Query a missing option with default value, which is ignored
cex <- getOption(local, "graphics/cex", defaultValue=1)
cat("graphics/cex =", cex, "\n") # Returns 3
# Query multiple options with multiple default values
multi <- getOption(local, c("graphics/cex", "graphics/pch"), c(1,2))
print(multi);
# Check existance of multiple options
has <- hasOption(local, c("graphics/cex", "graphics/pch"))
print(has);
# Get a subtree of options
graphics <- getOption(local, "graphics")
print(graphics)
# Get the complete tree of options
all <- getOption(local)
print(all)
|
/data/genthat_extracted_code/R.utils/examples/Options.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,288
|
r
|
library(R.utils)
### Name: Options
### Title: The Options class
### Aliases: Options
### Keywords: classes programming
### ** Examples
local <- Options()
# Query a missing option
cex <- getOption(local, "graphics/cex")
cat("graphics/cex =", cex, "\n") # Returns NULL
# Query a missing option with default value
cex <- getOption(local, "graphics/cex", defaultValue=1)
cat("graphics/cex =", cex, "\n") # Returns NULL
# Set option and get previous value
oldCex <- setOption(local, "graphics/cex", 2)
cat("previous graphics/cex =", oldCex, "\n") # Returns NULL
# Set option again and get previous value
oldCex <- setOption(local, "graphics/cex", 3)
cat("previous graphics/cex =", oldCex, "\n") # Returns 2
# Query a missing option with default value, which is ignored
cex <- getOption(local, "graphics/cex", defaultValue=1)
cat("graphics/cex =", cex, "\n") # Returns 3
# Query multiple options with multiple default values
multi <- getOption(local, c("graphics/cex", "graphics/pch"), c(1,2))
print(multi);
# Check existance of multiple options
has <- hasOption(local, c("graphics/cex", "graphics/pch"))
print(has);
# Get a subtree of options
graphics <- getOption(local, "graphics")
print(graphics)
# Get the complete tree of options
all <- getOption(local)
print(all)
|
### link CSC to GDP_PM25
link_CSC <- function(GDP_PM25){
library(dplyr)
source("code/Functions/readCSC.R")
CSC <- readCSC()
load("RData/GDP_PM25.RData")
GDP_PM25_CSC <- left_join(GDP_PM25,CSC,by=c("Country.Code","Year"="time"),suffix=c("","_CSC"))
### check matching result
print(with(GDP_PM25_CSC,table(is.na(Country.Name),Year)))
GDP_PM25_CSC$Country.Name <- NULL
detach("package:dplyr",unload=T)
return(GDP_PM25_CSC)
}
|
/Functions/link_CSC.R
|
no_license
|
Danny1127/Pollution_Growth
|
R
| false
| false
| 473
|
r
|
### link CSC to GDP_PM25
link_CSC <- function(GDP_PM25){
library(dplyr)
source("code/Functions/readCSC.R")
CSC <- readCSC()
load("RData/GDP_PM25.RData")
GDP_PM25_CSC <- left_join(GDP_PM25,CSC,by=c("Country.Code","Year"="time"),suffix=c("","_CSC"))
### check matching result
print(with(GDP_PM25_CSC,table(is.na(Country.Name),Year)))
GDP_PM25_CSC$Country.Name <- NULL
detach("package:dplyr",unload=T)
return(GDP_PM25_CSC)
}
|
\name{FTR.makeBidLimits}
\alias{FTR.makeBidLimits}
\title{Create the max and min bid price for a set of paths.}
\description{Create the max and min bid price for a set of paths. It
compares the monthly settle prices and monthly cleared prices for the
past ... auctions, and has a simple algo built in. This is supposed
to be a high throughput function. All paths are supposed to by buys
(bids). If a simulated settle is provided
\code{is.null(sSP)==FALSE}, the min(max) bid will be taken
as the minimum(cut.quantile) of the simulated settle price distribution.}
\usage{
FTR.makeBidLimits(paths, sSP, cut.quantile=list(c(0,0.25)))
}
\arguments{
\item{paths}{The usual data.frame that defines the path. It has at
least c("path.no", "class.type", "source.ptid",
"sink.ptid") column names.}
\item{sSP}{Simulated settle prices. As returned by
\code{FTR.simulate.SP}.}
\item{cut.quantile}{What quantile of the simulated settle price
distribution is to be used as the max bid. A list with length equal
to the number of paths. The elements of the list are the min/max
quantiles of the simulated prices.}
}
\value{
A list with two data.frames. The second one has what you want
with at least columns c("path.no", "min.bid", "max.bid").
}
\author{Adrian Dragulescu}
%\seealso{\code{\link{FTR.get.CP.for.paths}}}
\examples{
Paths <- data.frame(path.no=1:2, source.ptid = c(326, 4000),
sink.ptid = c(4001, 4002), class.type=c("ONPEAK","OFFPEAK"))
sSP <- FTR.simulate.SP( hSP, options, noSims=5000, noEns=50 )
res <- FTR.makeBidLimits( Paths, sSP, cut.quantile=list(0,0.1) )
# path.no CP.min CP.enh.p5 CP.enh.p25 CP.enh.p50 SP.min SP.enh.p25 SP.nby.p25 min.bid max.bid
#1 1 -7.0502976 -7.0502976 -4.8218342 -3.5018750 -9.2487216 -4.785093 -4.603363 -9.2487216 -4.785093
#2 2 -0.6349734 -0.4417015 -0.2531378 -0.1555875 -0.6383468 -0.272164 -0.278682 -0.6383468 -0.272164
}
|
/R Extension/RMG/Utilities/Interfaces/FTR/man/FTR.makeBidLimits.Rd
|
no_license
|
uhasan1/QLExtension-backup
|
R
| false
| false
| 1,958
|
rd
|
\name{FTR.makeBidLimits}
\alias{FTR.makeBidLimits}
\title{Create the max and min bid price for a set of paths.}
\description{Create the max and min bid price for a set of paths. It
compares the monthly settle prices and monthly cleared prices for the
past ... auctions, and has a simple algo built in. This is supposed
to be a high throughput function. All paths are supposed to by buys
(bids). If a simulated settle is provided
\code{is.null(sSP)==FALSE}, the min(max) bid will be taken
as the minimum(cut.quantile) of the simulated settle price distribution.}
\usage{
FTR.makeBidLimits(paths, sSP, cut.quantile=list(c(0,0.25)))
}
\arguments{
\item{paths}{The usual data.frame that defines the path. It has at
least c("path.no", "class.type", "source.ptid",
"sink.ptid") column names.}
\item{sSP}{Simulated settle prices. As returned by
\code{FTR.simulate.SP}.}
\item{cut.quantile}{What quantile of the simulated settle price
distribution is to be used as the max bid. A list with length equal
to the number of paths. The elements of the list are the min/max
quantiles of the simulated prices.}
}
\value{
A list with two data.frames. The second one has what you want
with at least columns c("path.no", "min.bid", "max.bid").
}
\author{Adrian Dragulescu}
%\seealso{\code{\link{FTR.get.CP.for.paths}}}
\examples{
Paths <- data.frame(path.no=1:2, source.ptid = c(326, 4000),
sink.ptid = c(4001, 4002), class.type=c("ONPEAK","OFFPEAK"))
sSP <- FTR.simulate.SP( hSP, options, noSims=5000, noEns=50 )
res <- FTR.makeBidLimits( Paths, sSP, cut.quantile=list(0,0.1) )
# path.no CP.min CP.enh.p5 CP.enh.p25 CP.enh.p50 SP.min SP.enh.p25 SP.nby.p25 min.bid max.bid
#1 1 -7.0502976 -7.0502976 -4.8218342 -3.5018750 -9.2487216 -4.785093 -4.603363 -9.2487216 -4.785093
#2 2 -0.6349734 -0.4417015 -0.2531378 -0.1555875 -0.6383468 -0.272164 -0.278682 -0.6383468 -0.272164
}
|
library(dplyr, warn.conflicts = FALSE)
# Download 2016 Plan selections by ZIP Code for the 38 states -------------
# Source: https://aspe.hhs.gov/basic-report/plan-selections-zip-code-and-county-health-insurance-marketplace-march-2016
# (November 1, 2015 – February 1, 2016), including SEP activity through Feb. 22, 2015)
# Count: 9.63 million plan selections for 38 states
url = "https://aspe.hhs.gov/sites/default/files/aspe-files/187796/mar2016marketplacezipcode_1.xlsx"
lcl = "data-raw/mar2016marketplacezipcode_1.xlsx"
if (!file.exists(lcl)) download.file(url, lcl)
enrollment2016 = readxl::read_excel(lcl, skip = 21, sheet = 1)
enrollment2016_county = readxl::read_excel(lcl, skip = 21, sheet = 2)
enrollment2016 %>%
names() %>%
tolower() %>%
stringi::stri_trans_totitle() %>%
stringr::str_replace_all(" ", "") ->
names(enrollment2016)
enrollment2016 %>%
mutate(
ZipCode = as.integer(ZipCode),
PlanSelections = as.integer(PlanSelections)
) -> enrollment2016
enrollment2016
# Zip code to county link file --------------------------------------------
# devtools::install_github("jjchern/zcta")
# devtools::install_github("jjchern/gaze")
# devtools::install_github("jjchern/zipzcta")
zctacounty = zcta::zcta_county_rel_10 %>%
select(zcta5, state, county, geoid, poppt, zpoppct, copop) %>%
group_by(zcta5) %>%
slice(which.max(zpoppct)) %>%
left_join(gaze::county10, by = "geoid") %>%
select(zcta5, state, usps, county, geoid, name, copop)
zctacounty
zipcounty = zipzcta::zipzcta %>%
left_join(zctacounty, by = c("zcta" = "zcta5")) %>%
select(zip, zcta, state = usps, countygeoid = geoid, countyname = name, copop) %>%
arrange(zip)
zipcounty
# Add county and county names ---------------------------------------------
enrollment2016 %>%
left_join(zipcounty, by = c("ZipCode" = "zip")) -> enrollment2016
# Save --------------------------------------------------------------------
enrollment2016
devtools::use_data(enrollment2016, overwrite = TRUE)
|
/data-raw/prep_enrollment2016.R
|
no_license
|
wander99/qhp
|
R
| false
| false
| 2,006
|
r
|
library(dplyr, warn.conflicts = FALSE)
# Download 2016 Plan selections by ZIP Code for the 38 states -------------
# Source: https://aspe.hhs.gov/basic-report/plan-selections-zip-code-and-county-health-insurance-marketplace-march-2016
# (November 1, 2015 – February 1, 2016), including SEP activity through Feb. 22, 2015)
# Count: 9.63 million plan selections for 38 states
url = "https://aspe.hhs.gov/sites/default/files/aspe-files/187796/mar2016marketplacezipcode_1.xlsx"
lcl = "data-raw/mar2016marketplacezipcode_1.xlsx"
if (!file.exists(lcl)) download.file(url, lcl)
enrollment2016 = readxl::read_excel(lcl, skip = 21, sheet = 1)
enrollment2016_county = readxl::read_excel(lcl, skip = 21, sheet = 2)
enrollment2016 %>%
names() %>%
tolower() %>%
stringi::stri_trans_totitle() %>%
stringr::str_replace_all(" ", "") ->
names(enrollment2016)
enrollment2016 %>%
mutate(
ZipCode = as.integer(ZipCode),
PlanSelections = as.integer(PlanSelections)
) -> enrollment2016
enrollment2016
# Zip code to county link file --------------------------------------------
# devtools::install_github("jjchern/zcta")
# devtools::install_github("jjchern/gaze")
# devtools::install_github("jjchern/zipzcta")
zctacounty = zcta::zcta_county_rel_10 %>%
select(zcta5, state, county, geoid, poppt, zpoppct, copop) %>%
group_by(zcta5) %>%
slice(which.max(zpoppct)) %>%
left_join(gaze::county10, by = "geoid") %>%
select(zcta5, state, usps, county, geoid, name, copop)
zctacounty
zipcounty = zipzcta::zipzcta %>%
left_join(zctacounty, by = c("zcta" = "zcta5")) %>%
select(zip, zcta, state = usps, countygeoid = geoid, countyname = name, copop) %>%
arrange(zip)
zipcounty
# Add county and county names ---------------------------------------------
enrollment2016 %>%
left_join(zipcounty, by = c("ZipCode" = "zip")) -> enrollment2016
# Save --------------------------------------------------------------------
enrollment2016
devtools::use_data(enrollment2016, overwrite = TRUE)
|
## Initialize inverse as NULL during the start and whenever the matrix is changed
## set(y) creates a new matrix
## get() retrieves the matrix
## set(inv) sets the inverse of the matrix
## getinv() retrieves the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(solve) inv <<- solve
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## If the matrix is unchanged, then obtain the existing inverse of the matrix
## Else calculate the new inverse
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setinv(inv)
inv
}
|
/cachematrix.R
|
no_license
|
mitenshah/ProgrammingAssignment2
|
R
| false
| false
| 933
|
r
|
## Initialize inverse as NULL during the start and whenever the matrix is changed
## set(y) creates a new matrix
## get() retrieves the matrix
## set(inv) sets the inverse of the matrix
## getinv() retrieves the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(solve) inv <<- solve
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## If the matrix is unchanged, then obtain the existing inverse of the matrix
## Else calculate the new inverse
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setinv(inv)
inv
}
|
## Script for building the plot of energy sub metering vs time
## 1. Read the data file
DT <- fread("../household_power_consumption.txt", na.strings = c("?"))
##Get the two-month subset of the data
DTs <- subset(DT, Date=="1/2/2007" | Date == "2/2/2007")
## Concatenate the data in the Date and Time columns
datetime <- with( DTs, (paste(Date, Time, sep="*")))
## Convert the list of concatenated strings to the list of time objects
time<-strptime(datetime,"%d/%m/%Y*%H:%M:%S")
## Open the PNG device
png(filename = "Plot3.png",width=480, height=480, units="px")
## Draw a plot of the data with type="n" and default box
plot(time, DTs$Sub_metering_1, ann=FALSE, type="n")
box()
## Add lines
lines(time, DTs$Sub_metering_1)
lines(time, DTs$Sub_metering_2, col="red")
lines(time, DTs$Sub_metering_3, col="blue")
## Add a y label
title(ylab="Energy sub metering")
## add a legend
legend("topright", names(DTs)[7:9], col=c("black", "red", "blue"), lty = 1, lwd=2)
## 5. Close the graphical device to write the file
dev.off()
## Clear the workspace of the data
rm(DT, DTs, datetime, time)
|
/Plot3.R
|
no_license
|
Aytakatya/ExData_Plotting1
|
R
| false
| false
| 1,085
|
r
|
## Script for building the plot of energy sub metering vs time
## 1. Read the data file
DT <- fread("../household_power_consumption.txt", na.strings = c("?"))
##Get the two-month subset of the data
DTs <- subset(DT, Date=="1/2/2007" | Date == "2/2/2007")
## Concatenate the data in the Date and Time columns
datetime <- with( DTs, (paste(Date, Time, sep="*")))
## Convert the list of concatenated strings to the list of time objects
time<-strptime(datetime,"%d/%m/%Y*%H:%M:%S")
## Open the PNG device
png(filename = "Plot3.png",width=480, height=480, units="px")
## Draw a plot of the data with type="n" and default box
plot(time, DTs$Sub_metering_1, ann=FALSE, type="n")
box()
## Add lines
lines(time, DTs$Sub_metering_1)
lines(time, DTs$Sub_metering_2, col="red")
lines(time, DTs$Sub_metering_3, col="blue")
## Add a y label
title(ylab="Energy sub metering")
## add a legend
legend("topright", names(DTs)[7:9], col=c("black", "red", "blue"), lty = 1, lwd=2)
## 5. Close the graphical device to write the file
dev.off()
## Clear the workspace of the data
rm(DT, DTs, datetime, time)
|
library(gtools)
library(tidyverse)
df1 <- read.csv2(file = "janeiro-2018.csv")
df2 <- read.csv2(file = "fevereiro-2018.csv")
df3 = smartbind(df1, df2) #junta verticalmente duas ou mais tabelas
write.csv(df3, file = "jan-fev-2018.csv")
|
/scripts/empilhar_tabelas.R
|
permissive
|
herbertizidro/r_scripts
|
R
| false
| false
| 236
|
r
|
library(gtools)
library(tidyverse)
df1 <- read.csv2(file = "janeiro-2018.csv")
df2 <- read.csv2(file = "fevereiro-2018.csv")
df3 = smartbind(df1, df2) #junta verticalmente duas ou mais tabelas
write.csv(df3, file = "jan-fev-2018.csv")
|
#' Calculate mutual information between vectors
#'
#' @param x,y Vectors to be compared using mutual information.
#' @param mitype Type of mutual information estimator to be used. See details.
#' @param minorm Should mutual information be normalized? See details.
#' @param autoswitch If KDE fails, should estimator be switched to Jackknife?.
#' See details.
#' @param lc Should linear correlation between x and y be calculated?
#'
#' @details Two types of estimators of mutual information can be chosen by the
#' user: KDE and Jackknife. The latter tends to give consistent results, but
#' it is slower.
#'
#' When normalization is applied, mutual information is divided by the average
#' of the entropies of signals x and y.
#'
#' Sometimes KDE fails to produce a mutual information estimate. A common case
#' is when having a sample size too low for KDE estimation.
#'
#' Results of mutual information are expressed in nats.
#'
#' @export
mi_vector <- function(x,y, mitype= "kde",
minorm= F, autoswitch= F, lc= T){
if (lc){
cor <- cor(x, y)
}else{
cor <- NA
}
if (mitype == "kde"){
mi <- tryCatch(expr = {
mikde(x, y)
}, error= function(e){
return(NA)
})
if (is.infinite(mi) & autoswitch){
mi <- JMI::JMI(x, y, BN = 10)$mi
mitype <- "jacknife"
print("Not suitable for KDE. Switching to Jackknife")
}
}else if (mitype == "jackknife"){
mi <- JMI::JMI(x, y, BN = 10)$mi
}else if (mitype == "none"){
mi <- NA
minorm <- F
}
if (minorm){
if (mitype == "kde"){
h1 <- tryCatch(expr = {
mikde(x, x)
}, error= function(e){
return(NA)
})
h2 <- tryCatch(expr = {
mikde(y, y)
}, error= function(e){
return(NA)
})
}else if (mitype == "jackknife"){
h1 <- JMI::JMI(x, x, BN = 10)$mi
h2 <- JMI::JMI(y, y, BN = 10)$mi
}
minorm <- 2 * mi / (h1 + h2)
}else{
minorm <- NA
}
nlc <- ifelse(
test= is.infinite(mi) | is.na(mi),
yes = NA,
no = sqrt(1 - exp(-2 * mi))
)
return(data.frame(mi= mi, nlc= nlc, lc= cor, minorm= minorm, mitype= mitype))
}
|
/R/mi_vector.R
|
no_license
|
crodriguez-saltos/misound
|
R
| false
| false
| 2,175
|
r
|
#' Calculate mutual information between vectors
#'
#' @param x,y Vectors to be compared using mutual information.
#' @param mitype Type of mutual information estimator to be used. See details.
#' @param minorm Should mutual information be normalized? See details.
#' @param autoswitch If KDE fails, should estimator be switched to Jackknife?.
#' See details.
#' @param lc Should linear correlation between x and y be calculated?
#'
#' @details Two types of estimators of mutual information can be chosen by the
#' user: KDE and Jackknife. The latter tends to give consistent results, but
#' it is slower.
#'
#' When normalization is applied, mutual information is divided by the average
#' of the entropies of signals x and y.
#'
#' Sometimes KDE fails to produce a mutual information estimate. A common case
#' is when having a sample size too low for KDE estimation.
#'
#' Results of mutual information are expressed in nats.
#'
#' @export
mi_vector <- function(x,y, mitype= "kde",
minorm= F, autoswitch= F, lc= T){
if (lc){
cor <- cor(x, y)
}else{
cor <- NA
}
if (mitype == "kde"){
mi <- tryCatch(expr = {
mikde(x, y)
}, error= function(e){
return(NA)
})
if (is.infinite(mi) & autoswitch){
mi <- JMI::JMI(x, y, BN = 10)$mi
mitype <- "jacknife"
print("Not suitable for KDE. Switching to Jackknife")
}
}else if (mitype == "jackknife"){
mi <- JMI::JMI(x, y, BN = 10)$mi
}else if (mitype == "none"){
mi <- NA
minorm <- F
}
if (minorm){
if (mitype == "kde"){
h1 <- tryCatch(expr = {
mikde(x, x)
}, error= function(e){
return(NA)
})
h2 <- tryCatch(expr = {
mikde(y, y)
}, error= function(e){
return(NA)
})
}else if (mitype == "jackknife"){
h1 <- JMI::JMI(x, x, BN = 10)$mi
h2 <- JMI::JMI(y, y, BN = 10)$mi
}
minorm <- 2 * mi / (h1 + h2)
}else{
minorm <- NA
}
nlc <- ifelse(
test= is.infinite(mi) | is.na(mi),
yes = NA,
no = sqrt(1 - exp(-2 * mi))
)
return(data.frame(mi= mi, nlc= nlc, lc= cor, minorm= minorm, mitype= mitype))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/last.R
\name{last}
\alias{last}
\title{A letter counting function}
\usage{
last(x)
}
\arguments{
\item{which}{string would you like to count the letters of}
}
\description{
This function counts letters
}
\examples{
last()
}
\keyword{count}
\keyword{string}
|
/man/last.Rd
|
no_license
|
louischaman/Rstartup
|
R
| false
| true
| 335
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/last.R
\name{last}
\alias{last}
\title{A letter counting function}
\usage{
last(x)
}
\arguments{
\item{which}{string would you like to count the letters of}
}
\description{
This function counts letters
}
\examples{
last()
}
\keyword{count}
\keyword{string}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-movie.R
\docType{data}
\name{movie_215}
\alias{movie_215}
\title{The Distinguished Gentleman}
\format{igraph object}
\source{
https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/T4HBA3
https://www.imdb.com/title/tt0104114
}
\usage{
movie_215
}
\description{
Interactions of characters in the movie "The Distinguished Gentleman" (1992)
}
\details{
The networks were built with a movie script parser. Even after multiple manual checks, the data set can still contain minor errors (e.g. typos in character names or wrongly parsed names). This may require some additional manual checks before using the data. Please report any such issues (https://github.com/schochastics/networkdata/issues/)
}
\references{
Kaminski, Jermain; Schober, Michael; Albaladejo, Raymond; Zastupailo, Oleksandr; Hidalgo, César, 2018, Moviegalaxies - Social Networks in Movies, https://doi.org/10.7910/DVN/T4HBA3, Harvard Dataverse, V3
}
\keyword{datasets}
|
/man/movie_215.Rd
|
permissive
|
kjhealy/networkdata
|
R
| false
| true
| 1,035
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-movie.R
\docType{data}
\name{movie_215}
\alias{movie_215}
\title{The Distinguished Gentleman}
\format{igraph object}
\source{
https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/T4HBA3
https://www.imdb.com/title/tt0104114
}
\usage{
movie_215
}
\description{
Interactions of characters in the movie "The Distinguished Gentleman" (1992)
}
\details{
The networks were built with a movie script parser. Even after multiple manual checks, the data set can still contain minor errors (e.g. typos in character names or wrongly parsed names). This may require some additional manual checks before using the data. Please report any such issues (https://github.com/schochastics/networkdata/issues/)
}
\references{
Kaminski, Jermain; Schober, Michael; Albaladejo, Raymond; Zastupailo, Oleksandr; Hidalgo, César, 2018, Moviegalaxies - Social Networks in Movies, https://doi.org/10.7910/DVN/T4HBA3, Harvard Dataverse, V3
}
\keyword{datasets}
|
#Regression Template
# Importing the dataset
dataset = read.csv('Position_Salaries.csv')
dataset = dataset[2:3]
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
#library(caTools)
#set.seed(123)
#split = sample.split(dataset$DependentVariable, SplitRatio = 0.8)
#training_set = subset(dataset, split == TRUE)
#test_set = subset(dataset, split == FALSE)
# Feature Scaling
# training_set = scale(training_set)
# test_set = scale(test_set)
#Fitting Regression Model
#Creating the regressor
#Predecting results using Regression Model
y_pred = predict(regressor, data.frame(Level = 6.5))
#Visualising Regression Model results
ggplot() +
geom_point(aes(x = dataset$Level, y = dataset$Salary),
colour = 'red') +
geom_line(aes(x = dataset$Level, y = predict(regressor, newdata = dataset)),
colour = 'blue') +
ggtitle("Truth or Bluff (Polynomial Regression)") +
xlab("Level") +
ylab("Salary")
#Visualising Regression Model results (Higher precision)
x_grid = seq(min(dataset$Level),max(dataset$Level), 0.1)
ggplot() +
geom_point(aes(x = dataset$Level, y = dataset$Salary),
colour = 'red') +
geom_line(aes(x = x_grid$Level, y = predict(regressor, newdata = data.frame(Level = x_grid))),
colour = 'blue') +
ggtitle("Truth or Bluff (Polynomial Regression)") +
xlab("Level") +
ylab("Salary")
|
/Machine Learning Template Folder/Part 2 - Regression/Polynomial Regression/regression_template.R
|
no_license
|
lionadis/Data-Science
|
R
| false
| false
| 1,470
|
r
|
#Regression Template
# Importing the dataset
dataset = read.csv('Position_Salaries.csv')
dataset = dataset[2:3]
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
#library(caTools)
#set.seed(123)
#split = sample.split(dataset$DependentVariable, SplitRatio = 0.8)
#training_set = subset(dataset, split == TRUE)
#test_set = subset(dataset, split == FALSE)
# Feature Scaling
# training_set = scale(training_set)
# test_set = scale(test_set)
#Fitting Regression Model
#Creating the regressor
#Predecting results using Regression Model
y_pred = predict(regressor, data.frame(Level = 6.5))
#Visualising Regression Model results
ggplot() +
geom_point(aes(x = dataset$Level, y = dataset$Salary),
colour = 'red') +
geom_line(aes(x = dataset$Level, y = predict(regressor, newdata = dataset)),
colour = 'blue') +
ggtitle("Truth or Bluff (Polynomial Regression)") +
xlab("Level") +
ylab("Salary")
#Visualising Regression Model results (Higher precision)
x_grid = seq(min(dataset$Level),max(dataset$Level), 0.1)
ggplot() +
geom_point(aes(x = dataset$Level, y = dataset$Salary),
colour = 'red') +
geom_line(aes(x = x_grid$Level, y = predict(regressor, newdata = data.frame(Level = x_grid))),
colour = 'blue') +
ggtitle("Truth or Bluff (Polynomial Regression)") +
xlab("Level") +
ylab("Salary")
|
library(discreteRV)
### Name: rsim
### Title: Simulate n independent trials from a random variable X:
### Aliases: rsim
### ** Examples
X.Bern <- RV(c(1,0), c(.5,.5))
X.Bern.sim100 <- rsim(X.Bern, 100)
X.loaded.die <- RV(1:6, odds = c(1,1,1,1,2,4))
X.loaded.die.sim100 <- rsim(X.loaded.die, 100)
# The function 'rsim()' attaches the probabilities as names to the random draws.
# To get the values only, use 'as.vector()':
as.vector(X.Bern.sim100)
as.vector(X.loaded.die.sim100)
|
/data/genthat_extracted_code/discreteRV/examples/rsim.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 487
|
r
|
library(discreteRV)
### Name: rsim
### Title: Simulate n independent trials from a random variable X:
### Aliases: rsim
### ** Examples
X.Bern <- RV(c(1,0), c(.5,.5))
X.Bern.sim100 <- rsim(X.Bern, 100)
X.loaded.die <- RV(1:6, odds = c(1,1,1,1,2,4))
X.loaded.die.sim100 <- rsim(X.loaded.die, 100)
# The function 'rsim()' attaches the probabilities as names to the random draws.
# To get the values only, use 'as.vector()':
as.vector(X.Bern.sim100)
as.vector(X.loaded.die.sim100)
|
melanomendata <- function(directory) {
if (directory == "MelanomenData") {
setwd("/Users/lukas/Documents/Uni Maas/Medicine/Jaar 6/Onderzoek_Dermatologie/MelanomenData/")
library(gdata)
library(plyr)
print("Getting the results of the 2015-2016 Databases")
melanomen2015 <- read.xls("2015.xlsx", method = "tab", na.strings = "-")
melanomen2016 <- read.xls("2016.xlsx", method = "tab", na.strings = "-")
} else {
print("Directory not found")
}
## calculation n for 2015
nna <- !is.na(melanomen2015[,14])
n <- sum(nna == TRUE)
print(n)
## calculation 2015 mean
listmean2015 <- lapply(melanomen2015[,c(14:22,27)], mean, na.rm = TRUE)
print("2015 Data Mean")
str(listmean2015, digits.d = 4)
## calculation 2016 mean
listmean2016 <- lapply(melanomen2016[,c(14:22,27)], mean, na.rm = TRUE)
print("2016 Data Mean")
str(listmean2016, digits.d = 4)
## calculation 2015 median
listmedian2015 <- lapply(melanomen2015[,c(14:22,27)], median, na.rm = TRUE)
print("2015 Data median")
str(listmedian2015, digits.d = 4)
## calculation 2016 median
listmedian2016 <- lapply(melanomen2016[,c(14:22,27)], median, na.rm = TRUE)
print("2016 Data median")
str(listmedian2016, digits.d = 4)
## calculation 2015 sd
listsd2015 <- lapply(melanomen2015[,c(14:22,27)], sd, na.rm = TRUE)
print("2015 Data SD")
str(listsd2015, digits.d = 4)
## calculation 2016 sd
listsd2016 <- lapply(melanomen2016[,c(14:22,27)], sd, na.rm = TRUE)
print("2016 Data SD")
str(listsd2016, digits.d = 4)
## outcomes for functions
meltable15 <- table(melanomen2015[,25])
names(meltable15) <- c("Männer", "Frauen")
frequencies <- prop.table(meltable15)
print("Prozentsätze, Männer und Frauen in der 2015 Studie:")
print(frequencies)
## manwomen <- summary(melanomen2015[,25], na.rm = TRUE)
## print(manwomen)
meltable16 <- table(melanomen2016[,25])
names(meltable16) <- c("Männer", "Frauen")
frequencies <- prop.table(meltable16)
print("Prozentsätze, Männer und Frauen in der 2016 Studie:")
print(frequencies)
## 2015 TNM tumor stadium
tnmtable15 <- table(melanomen2015[,26])
print(tnmtable15)
## 2016 TNM tumor stadium
tnmtable16 <- table(melanomen2016[,26])
print(tnmtable16)
## list to cache outcome of the loops
print("Die Ergebnisse der statistischen Tests:")
testresult <- list()
for (i in c(14:16, 18:22)) {
shapiro2016 <- shapiro.test(melanomen2016[,i])
shapiro2016p <- shapiro2016$p.value
shapiro2015 <- shapiro.test(melanomen2015[,i])
shapiro2015p <- shapiro2015$p.value
hist(melanomen2015[,i])
hist(melanomen2016[,i])
if (shapiro2016p > 0.05 & shapiro2015p > 0.05) {
testresult[[i]] <- t.test(melanomen2015[,i], melanomen2016[,i], na.rm = TRUE)
} else {
testresult[[i]] <- wilcox.test(melanomen2015[,i], melanomen2016[,i], na.rm = TRUE, conf.int = TRUE, exact = FALSE)
}
}
nulltest <- Filter(Negate(is.null), testresult)
print(nulltest)
## Einseitiger Wilcox rank sum test 2015 toegangstijden
ttrichtlijn2015 <- wilcox.test(melanomen2015[,14], alternative = "less", mu = 14)
print("2015 resultat vergleich mit landelijker richtlijn")
print(ttrichtlijn2015)
## Einseitiger Wilcox rank sum test 2016 toegangstijden
ttrichtlijn2016 <- wilcox.test(melanomen2016[,14], alternative = "less", mu = 14, exact = FALSE)
print("2016 resultat vergleich mit landelijker richtlijn")
print(ttrichtlijn2016)
## Einseitiger Wilcox rank sum test 2015
ugrichtlijn2015 <- wilcox.test(melanomen2015[,19], alternative = "less", mu = 14)
print("2015 resultat vergleich mit landelijker richtlijn doorlooptijden uitslaggesprek")
print(ugrichtlijn2015)
## Einseitiger Wilcox rank sum test 2016
ugrichtlijn2016 <- wilcox.test(melanomen2016[,19], alternative = "less", mu = 14, exact = FALSE)
print("2016 resultat vergleich mit landelijker richtlijn doorlooptijden uitslaggesprek")
print(ugrichtlijn2016)
## Einseitiger Wilcox rank sum test 2015 tot therapeutische excisie
exrichtlijn2015 <- wilcox.test(melanomen2015[,21], alternative = "less", mu = 42)
print("2015 resultat vergleich mit landelijker richtlijn doorlooptijden uitslaggesprek")
print(exrichtlijn2015)
## Einseitiger Wilcox rank sum test 2016 tot therapeutische excisie
exrichtlijn2016 <- wilcox.test(melanomen2016[,21], alternative = "less", mu = 42, exact = FALSE)
print("2016 resultat vergleich mit landelijker richtlijn doorlooptijden uitslaggesprek")
print(exrichtlijn2016)
## einseitiger t-test für de vergleich der SN prozedur 2015
snrichtlijn2015 <- t.test(melanomen2015[,22], alternative = "less", mu = 42)
print("2016 resultat vergleich mit landelijker richtlijn doorlooptijden sn")
print(snrichtlijn2015)
## einseitiger t-test für de vergleich der SN prozedur 2016
snrichtlijn2016 <- t.test(melanomen2016[,22], alternative = "less", mu = 42)
print("2016 resultat vergleich mit landelijker richtlijn doorlooptijden sn")
print(snrichtlijn2016)
}
|
/MDataBase.R
|
no_license
|
Lukas2010/accesstime_public_hospital
|
R
| false
| false
| 5,095
|
r
|
melanomendata <- function(directory) {
if (directory == "MelanomenData") {
setwd("/Users/lukas/Documents/Uni Maas/Medicine/Jaar 6/Onderzoek_Dermatologie/MelanomenData/")
library(gdata)
library(plyr)
print("Getting the results of the 2015-2016 Databases")
melanomen2015 <- read.xls("2015.xlsx", method = "tab", na.strings = "-")
melanomen2016 <- read.xls("2016.xlsx", method = "tab", na.strings = "-")
} else {
print("Directory not found")
}
## calculation n for 2015
nna <- !is.na(melanomen2015[,14])
n <- sum(nna == TRUE)
print(n)
## calculation 2015 mean
listmean2015 <- lapply(melanomen2015[,c(14:22,27)], mean, na.rm = TRUE)
print("2015 Data Mean")
str(listmean2015, digits.d = 4)
## calculation 2016 mean
listmean2016 <- lapply(melanomen2016[,c(14:22,27)], mean, na.rm = TRUE)
print("2016 Data Mean")
str(listmean2016, digits.d = 4)
## calculation 2015 median
listmedian2015 <- lapply(melanomen2015[,c(14:22,27)], median, na.rm = TRUE)
print("2015 Data median")
str(listmedian2015, digits.d = 4)
## calculation 2016 median
listmedian2016 <- lapply(melanomen2016[,c(14:22,27)], median, na.rm = TRUE)
print("2016 Data median")
str(listmedian2016, digits.d = 4)
## calculation 2015 sd
listsd2015 <- lapply(melanomen2015[,c(14:22,27)], sd, na.rm = TRUE)
print("2015 Data SD")
str(listsd2015, digits.d = 4)
## calculation 2016 sd
listsd2016 <- lapply(melanomen2016[,c(14:22,27)], sd, na.rm = TRUE)
print("2016 Data SD")
str(listsd2016, digits.d = 4)
## outcomes for functions
meltable15 <- table(melanomen2015[,25])
names(meltable15) <- c("Männer", "Frauen")
frequencies <- prop.table(meltable15)
print("Prozentsätze, Männer und Frauen in der 2015 Studie:")
print(frequencies)
## manwomen <- summary(melanomen2015[,25], na.rm = TRUE)
## print(manwomen)
meltable16 <- table(melanomen2016[,25])
names(meltable16) <- c("Männer", "Frauen")
frequencies <- prop.table(meltable16)
print("Prozentsätze, Männer und Frauen in der 2016 Studie:")
print(frequencies)
## 2015 TNM tumor stadium
tnmtable15 <- table(melanomen2015[,26])
print(tnmtable15)
## 2016 TNM tumor stadium
tnmtable16 <- table(melanomen2016[,26])
print(tnmtable16)
## list to cache outcome of the loops
print("Die Ergebnisse der statistischen Tests:")
testresult <- list()
for (i in c(14:16, 18:22)) {
shapiro2016 <- shapiro.test(melanomen2016[,i])
shapiro2016p <- shapiro2016$p.value
shapiro2015 <- shapiro.test(melanomen2015[,i])
shapiro2015p <- shapiro2015$p.value
hist(melanomen2015[,i])
hist(melanomen2016[,i])
if (shapiro2016p > 0.05 & shapiro2015p > 0.05) {
testresult[[i]] <- t.test(melanomen2015[,i], melanomen2016[,i], na.rm = TRUE)
} else {
testresult[[i]] <- wilcox.test(melanomen2015[,i], melanomen2016[,i], na.rm = TRUE, conf.int = TRUE, exact = FALSE)
}
}
nulltest <- Filter(Negate(is.null), testresult)
print(nulltest)
## Einseitiger Wilcox rank sum test 2015 toegangstijden
ttrichtlijn2015 <- wilcox.test(melanomen2015[,14], alternative = "less", mu = 14)
print("2015 resultat vergleich mit landelijker richtlijn")
print(ttrichtlijn2015)
## Einseitiger Wilcox rank sum test 2016 toegangstijden
ttrichtlijn2016 <- wilcox.test(melanomen2016[,14], alternative = "less", mu = 14, exact = FALSE)
print("2016 resultat vergleich mit landelijker richtlijn")
print(ttrichtlijn2016)
## Einseitiger Wilcox rank sum test 2015
ugrichtlijn2015 <- wilcox.test(melanomen2015[,19], alternative = "less", mu = 14)
print("2015 resultat vergleich mit landelijker richtlijn doorlooptijden uitslaggesprek")
print(ugrichtlijn2015)
## Einseitiger Wilcox rank sum test 2016
ugrichtlijn2016 <- wilcox.test(melanomen2016[,19], alternative = "less", mu = 14, exact = FALSE)
print("2016 resultat vergleich mit landelijker richtlijn doorlooptijden uitslaggesprek")
print(ugrichtlijn2016)
## Einseitiger Wilcox rank sum test 2015 tot therapeutische excisie
exrichtlijn2015 <- wilcox.test(melanomen2015[,21], alternative = "less", mu = 42)
print("2015 resultat vergleich mit landelijker richtlijn doorlooptijden uitslaggesprek")
print(exrichtlijn2015)
## Einseitiger Wilcox rank sum test 2016 tot therapeutische excisie
exrichtlijn2016 <- wilcox.test(melanomen2016[,21], alternative = "less", mu = 42, exact = FALSE)
print("2016 resultat vergleich mit landelijker richtlijn doorlooptijden uitslaggesprek")
print(exrichtlijn2016)
## einseitiger t-test für de vergleich der SN prozedur 2015
snrichtlijn2015 <- t.test(melanomen2015[,22], alternative = "less", mu = 42)
print("2016 resultat vergleich mit landelijker richtlijn doorlooptijden sn")
print(snrichtlijn2015)
## einseitiger t-test für de vergleich der SN prozedur 2016
snrichtlijn2016 <- t.test(melanomen2016[,22], alternative = "less", mu = 42)
print("2016 resultat vergleich mit landelijker richtlijn doorlooptijden sn")
print(snrichtlijn2016)
}
|
# Exercise 1: calling built-in functions
# Create a variable `my_name` that contains your name
my_name <- "Emily"
# Create a variable `name_length` that holds how many letters (including spaces)
# are in your name (use the `nchar()` function)
name_length <- nchar("Emily")
# Print the number of letters in your name
print(name_length)
name_length
# Create a variable `now_doing` that is your name followed by "is programming!"
# (use the `paste()` function)
now_doing <- paste("Emily", " is programming!")
now_doing
# Make the `now_doing` variable upper case
now_doing <- toupper(now_doing)
print(now_doing)
### Bonus
# Pick two of your favorite numbers (between 1 and 100) and assign them to
# variables `fav_1` and `fav_2`
fav_1 <- 23
fav_2 <- 8
# Divide each number by the square root of 201 and save the new value in the
# original variable
fav_1 <- fav_1 / sqrt(201)
fav_1
# Create a variable `raw_sum` that is the sum of the two variables. Use the
# `sum()` function for practice.
# Create a variable `round_sum` that is the `raw_sum` rounded to 1 decimal place.
# Use the `round()` function.
# Create two new variables `round_1` and `round_2` that are your `fav_1` and
# `fav_2` variables rounded to 1 decimal places
# Create a variable `sum_round` that is the sum of the rounded values
# Which is bigger, `round_sum` or `sum_round`? (You can use the `max()` function!)
|
/exercise-1/exercise.R
|
permissive
|
aemelialialia/ch6-functions
|
R
| false
| false
| 1,399
|
r
|
# Exercise 1: calling built-in functions
# Create a variable `my_name` that contains your name
my_name <- "Emily"
# Create a variable `name_length` that holds how many letters (including spaces)
# are in your name (use the `nchar()` function)
name_length <- nchar("Emily")
# Print the number of letters in your name
print(name_length)
name_length
# Create a variable `now_doing` that is your name followed by "is programming!"
# (use the `paste()` function)
now_doing <- paste("Emily", " is programming!")
now_doing
# Make the `now_doing` variable upper case
now_doing <- toupper(now_doing)
print(now_doing)
### Bonus
# Pick two of your favorite numbers (between 1 and 100) and assign them to
# variables `fav_1` and `fav_2`
fav_1 <- 23
fav_2 <- 8
# Divide each number by the square root of 201 and save the new value in the
# original variable
fav_1 <- fav_1 / sqrt(201)
fav_1
# Create a variable `raw_sum` that is the sum of the two variables. Use the
# `sum()` function for practice.
# Create a variable `round_sum` that is the `raw_sum` rounded to 1 decimal place.
# Use the `round()` function.
# Create two new variables `round_1` and `round_2` that are your `fav_1` and
# `fav_2` variables rounded to 1 decimal places
# Create a variable `sum_round` that is the sum of the rounded values
# Which is bigger, `round_sum` or `sum_round`? (You can use the `max()` function!)
|
seedChangeEval =
function(code, envir = globalenv(), verbose = TRUE, ...)
{
if(is.character(code)) {
if(file.exists(code))
code = parse(code)
else
code = parse(text = code)
}
if(!is.language(code) || is.call(code))
stop("need a language object")
names(code) = sapply(code, function(x) paste(deparse(x), collapse = " "))
if(!exists(".Random.seed"))
set.seed(Sys.time())
curSeed = .Random.seed
changes = sapply(code, function(x) {
if(verbose)
print(deparse(x))
system.time(eval(x, envir))
ans = !identical(curSeed, .Random.seed)
if(ans)
curSeed <<- .Random.seed
ans
})
split(as.list(code), cumsum(changes))
}
|
/R/seedChangeEval.R
|
no_license
|
duncantl/CallCounter
|
R
| false
| false
| 878
|
r
|
seedChangeEval =
function(code, envir = globalenv(), verbose = TRUE, ...)
{
if(is.character(code)) {
if(file.exists(code))
code = parse(code)
else
code = parse(text = code)
}
if(!is.language(code) || is.call(code))
stop("need a language object")
names(code) = sapply(code, function(x) paste(deparse(x), collapse = " "))
if(!exists(".Random.seed"))
set.seed(Sys.time())
curSeed = .Random.seed
changes = sapply(code, function(x) {
if(verbose)
print(deparse(x))
system.time(eval(x, envir))
ans = !identical(curSeed, .Random.seed)
if(ans)
curSeed <<- .Random.seed
ans
})
split(as.list(code), cumsum(changes))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BP_pred_MGWRSAR.R
\name{BP_pred_MGWRSAR}
\alias{BP_pred_MGWRSAR}
\title{BP_pred_MGWRSAR
to be documented}
\usage{
BP_pred_MGWRSAR(YS,X,W,e,beta_hat,lambda_hat,S,O,coord,type='BPN',k=16,Wk=NULL)
}
\arguments{
\item{YS}{to be documented}
\item{X}{to be documented}
\item{W}{to be documented}
\item{e}{to be documented}
\item{beta_hat}{to be documented}
\item{lambda_hat}{to be documented}
\item{S}{to be documented}
\item{O}{to be documented}
\item{coord}{to be documented}
\item{type}{to be documented}
\item{k}{to be documented}
\item{Wk}{to be documented}
}
\value{
to be documented
}
\description{
BP_pred_MGWRSAR
to be documented
}
\keyword{internal}
|
/man/BP_pred_MGWRSAR.Rd
|
no_license
|
shepherdmeng/mgwrsar
|
R
| false
| true
| 743
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BP_pred_MGWRSAR.R
\name{BP_pred_MGWRSAR}
\alias{BP_pred_MGWRSAR}
\title{BP_pred_MGWRSAR
to be documented}
\usage{
BP_pred_MGWRSAR(YS,X,W,e,beta_hat,lambda_hat,S,O,coord,type='BPN',k=16,Wk=NULL)
}
\arguments{
\item{YS}{to be documented}
\item{X}{to be documented}
\item{W}{to be documented}
\item{e}{to be documented}
\item{beta_hat}{to be documented}
\item{lambda_hat}{to be documented}
\item{S}{to be documented}
\item{O}{to be documented}
\item{coord}{to be documented}
\item{type}{to be documented}
\item{k}{to be documented}
\item{Wk}{to be documented}
}
\value{
to be documented
}
\description{
BP_pred_MGWRSAR
to be documented
}
\keyword{internal}
|
# Load the package (after installation, see above).
library(GenSA) # GenSA is better than optimx (although somewhat slower)
library(FD) # for FD::maxent() (make sure this is up-to-date)
library(snow) # (if you want to use multicore functionality; some systems/R versions prefer library(parallel), try either)
library(parallel)
#######################################################
# 2018-10-10 update: I have been putting the
# updates on CRAN/GitHub
# You should use:
# rexpokit version 0.26.6 from CRAN
# cladoRcpp version 0.15 from CRAN
# BioGeoBEARS version 1.1 from GitHub, install with:
# library(devtools)
# devtools::install_github(repo="nmatzke/BioGeoBEARS")
#######################################################
library(rexpokit)
library(cladoRcpp)
library(BioGeoBEARS)
#######################################################
# CUT: The old instructions to source() online upgrade .R files have been deleted,
# all updates are now on the GitHub version of the package, version 1.1+
#######################################################
#######################################################
# (This local-sourcing is mostly useful for Nick, while actively developing)
# Local source()-ing method -- uses BioGeoBEARS sourceall() function
# on a directory of .R files, so you don't have to type them out.
# The directories here are on my machine, you would have to make a
# directory, save the .R files there, and refer to them.
#
# NOTE: it's best to source the "cladoRcpp.R" update first, to avoid warnings like this:
##
## Note: possible error in 'rcpp_calc_anclikes_sp_COOweights_faster(Rcpp_leftprobs = tmpca_1, ':
## unused arguments (m = m, m_null_range = include_null_range, jts_matrix = jts_matrix)
##
#
# TO USE: Delete or comment out the 'source("http://...")' commands above, and un-comment
# the below...
########################################################################
# Un-comment (and fix directory paths) to use:
#library(BioGeoBEARS)
#source("/drives/Dropbox/_njm/__packages/cladoRcpp_setup/cladoRcpp.R")
#sourceall("/drives/Dropbox/_njm/__packages/BioGeoBEARS_setup/")
#calc_loglike_sp = compiler::cmpfun(calc_loglike_sp_prebyte) # crucial to fix bug in uppass calculations
#calc_independent_likelihoods_on_each_branch = compiler::cmpfun(calc_independent_likelihoods_on_each_branch_prebyte)
########################################################################
#######################################################
# SETUP: YOUR WORKING DIRECTORY
#######################################################
# You will need to set your working directory to match your local system
# Note these very handy functions!
# Command "setwd(x)" sets your working directory
# Command "getwd()" gets your working directory and tells you what it is.
# Command "list.files()" lists the files in your working directory
# To get help on any command, use "?". E.g., "?list.files"
# Set your working directory for output files
# default here is your home directory ("~")
# Change this as you like
wd = "/GitHub/BioGeoBEARS/inst/extdata/examples/check_strat5_ML/M0/"
setwd(wd)
# Double-check your working directory with getwd()
getwd()
#######################################################
# SETUP: Extension data directory
#######################################################
# When R packages contain extra files, they are stored in the "extdata" directory
# inside the installed package.
#
# BioGeoBEARS contains various example files and scripts in its extdata directory.
#
# Each computer operating system might install BioGeoBEARS in a different place,
# depending on your OS and settings.
#
# However, you can find the extdata directory like this:
extdata_dir = np(system.file("extdata", package="BioGeoBEARS"))
extdata_dir
list.files(extdata_dir)
# "system.file" looks in the directory of a specified package (in this case BioGeoBEARS)
# The function "np" is just a shortcut for normalizePath(), which converts the
# path to the format appropriate for your system (e.g., Mac/Linux use "/", but
# Windows uses "\\", if memory serves).
# Even when using your own data files, you should KEEP these commands in your
# script, since the plot_BioGeoBEARS_results function needs a script from the
# extdata directory to calculate the positions of "corners" on the plot. This cannot
# be made into a straight up BioGeoBEARS function because it uses C routines
# from the package APE which do not pass R CMD check for some reason.
#######################################################
# SETUP: YOUR TREE FILE AND GEOGRAPHY FILE
#######################################################
# Example files are given below. To run your own data,
# make the below lines point to your own files, e.g.
# trfn = "/mydata/frogs/frogBGB/tree.newick"
# geogfn = "/mydata/frogs/frogBGB/geog.data"
#######################################################
# Phylogeny file
# Notes:
# 1. Must be binary/bifurcating: no polytomies
# 2. No negative branchlengths (e.g. BEAST MCC consensus trees sometimes have negative branchlengths)
# 3. Be careful of very short branches, as BioGeoBEARS will interpret ultrashort branches as direct ancestors
# 4. You can use non-ultrametric trees, but BioGeoBEARS will interpret any tips significantly below the
# top of the tree as fossils! This is only a good idea if you actually do have fossils in your tree,
# as in e.g. Wood, Matzke et al. (2013), Systematic Biology.
# 5. The default settings of BioGeoBEARS make sense for trees where the branchlengths are in units of
# millions of years, and the tree is 1-1000 units tall. If you have a tree with a total height of
# e.g. 0.00001, you will need to adjust e.g. the max values of d and e, or (simpler) multiply all
# your branchlengths to get them into reasonable units.
# 6. DON'T USE SPACES IN SPECIES NAMES, USE E.G. "_"
#######################################################
# This is the example Newick file for Hawaiian 3taxa
# (from Ree & Smith 2008)
# "trfn" = "tree file name"
trfn = "tree.newick"
# Look at the raw Newick file:
moref(trfn)
# Look at your phylogeny (plots to a PDF, which avoids issues with multiple graphics in same window):
pdffn = "tree.pdf"
pdf(file=pdffn, width=9, height=12)
tr = read.tree(trfn)
tr
plot(tr)
title("Example 3taxa phylogeny")
axisPhylo() # plots timescale
dev.off()
cmdstr = paste0("open ", pdffn)
system(cmdstr)
#######################################################
# Geography file
# Notes:
# 1. This is a PHYLIP-formatted file. This means that in the
# first line,
# - the 1st number equals the number of rows (species)
# - the 2nd number equals the number of columns (number of areas)
# - after a tab, put the areas in parentheses, with spaces: (A B C D)
#
# 1.5. Example first line:
# 10 4 (A B C D)
#
# 2. The second line, and subsequent lines:
# speciesA 0110
# speciesB 0111
# speciesC 0001
# ...
#
# 2.5a. This means a TAB between the species name and the area 0/1s
# 2.5b. This also means NO SPACE AND NO TAB between the area 0/1s.
#
# 3. See example files at:
# http://phylo.wikidot.com/biogeobears#files
#
# 4. Make you understand what a PLAIN-TEXT EDITOR is:
# http://phylo.wikidot.com/biogeobears#texteditors
#
# 3. The PHYLIP format is the same format used for C++ LAGRANGE geography files.
#
# 4. All names in the geography file must match names in the phylogeny file.
#
# 5. DON'T USE SPACES IN SPECIES NAMES, USE E.G. "_"
#
# 6. Operational taxonomic units (OTUs) should ideally be phylogenetic lineages,
# i.e. genetically isolated populations. These may or may not be identical
# with species. You would NOT want to just use specimens, as each specimen
# automatically can only live in 1 area, which will typically favor DEC+J
# models. This is fine if the species/lineages really do live in single areas,
# but you wouldn't want to assume this without thinking about it at least.
# In summary, you should collapse multiple specimens into species/lineages if
# data indicates they are the same genetic population.
######################################################
# This is the example geography file for Hawaiian 3taxa
# (from Ree & Smith 2008)
geogfn = "geog.data"
# Look at the raw geography text file:
moref(geogfn)
# Look at your geographic range data:
tipranges = getranges_from_LagrangePHYLIP(lgdata_fn=geogfn)
tipranges
# Maximum range size observed:
max(rowSums(dfnums_to_numeric(tipranges@df)))
# Set the maximum number of areas any species may occupy; this cannot be larger
# than the number of areas you set up, but it can be smaller.
max_range_size = 4
####################################################
####################################################
# KEY HINT: The number of states (= number of different possible geographic ranges)
# depends on (a) the number of areas and (b) max_range_size.
# If you have more than about 500-600 states, the calculations will get REALLY slow,
# since the program has to exponentiate a matrix of e.g. 600x600. Often the computer
# will just sit there and crunch, and never get through the calculation of the first
# likelihood.
#
# (this is also what is usually happening when LAGRANGE hangs: you have too many states!)
#
# To check the number of states for a given number of ranges, try:
numstates_from_numareas(numareas=4, maxareas=4, include_null_range=TRUE)
numstates_from_numareas(numareas=4, maxareas=4, include_null_range=FALSE)
numstates_from_numareas(numareas=4, maxareas=3, include_null_range=TRUE)
numstates_from_numareas(numareas=4, maxareas=2, include_null_range=TRUE)
# Large numbers of areas have problems:
numstates_from_numareas(numareas=10, maxareas=10, include_null_range=TRUE)
# ...unless you limit the max_range_size:
numstates_from_numareas(numareas=10, maxareas=2, include_null_range=TRUE)
####################################################
####################################################
#######################################################
#######################################################
# DEC AND DEC+J ANALYSIS
#######################################################
#######################################################
# NOTE: The BioGeoBEARS "DEC" model is identical with
# the Lagrange DEC model, and should return identical
# ML estimates of parameters, and the same
# log-likelihoods, for the same datasets.
#
# Ancestral state probabilities at nodes will be slightly
# different, since BioGeoBEARS is reporting the
# ancestral state probabilities under the global ML
# model, and Lagrange is reporting ancestral state
# probabilities after re-optimizing the likelihood
# after fixing the state at each node. These will
# be similar, but not identical. See Matzke (2014),
# Systematic Biology, for discussion.
#
# Also see Matzke (2014) for presentation of the
# DEC+J model.
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
# Run DEC
#######################################################
# Intitialize a default model (DEC model)
BioGeoBEARS_run_object = define_BioGeoBEARS_run()
# Give BioGeoBEARS the location of the phylogeny Newick file
BioGeoBEARS_run_object$trfn = trfn
# Give BioGeoBEARS the location of the geography text file
BioGeoBEARS_run_object$geogfn = geogfn
# Input the maximum range size
BioGeoBEARS_run_object$max_range_size = max_range_size
BioGeoBEARS_run_object$min_branchlength = 0.000001 # Min to treat tip as a direct ancestor (no speciation event)
BioGeoBEARS_run_object$include_null_range = TRUE # set to FALSE for e.g. DEC* model, DEC*+J, etc.
# (For DEC* and other "*" models, please cite: Massana, Kathryn A.; Beaulieu,
# Jeremy M.; Matzke, Nicholas J.; O’Meara, Brian C. (2015). Non-null Effects of
# the Null Range in Biogeographic Models: Exploring Parameter Estimation in the
# DEC Model. bioRxiv, http://biorxiv.org/content/early/2015/09/16/026914 )
# Also: search script on "include_null_range" for other places to change
# Set up a time-stratified analysis:
# 1. Here, un-comment ONLY the files you want to use.
# 2. Also un-comment "BioGeoBEARS_run_object = section_the_tree(...", below.
# 3. For example files see (a) extdata_dir,
# or (b) http://phylo.wikidot.com/biogeobears#files
# and BioGeoBEARS Google Group posts for further hints)
#
# Uncomment files you wish to use in time-stratified analyses:
#BioGeoBEARS_run_object$timesfn = "timeperiods.txt"
#BioGeoBEARS_run_object$dispersal_multipliers_fn = "manual_dispersal_multipliers.txt"
#BioGeoBEARS_run_object$areas_allowed_fn = "areas_allowed.txt"
#BioGeoBEARS_run_object$areas_adjacency_fn = "areas_adjacency.txt"
#BioGeoBEARS_run_object$distsfn = "distances_matrix.txt"
# See notes on the distances model on PhyloWiki's BioGeoBEARS updates page.
# Speed options and multicore processing if desired
BioGeoBEARS_run_object$on_NaN_error = -1e50 # returns very low lnL if parameters produce NaN error (underflow check)
BioGeoBEARS_run_object$speedup = TRUE # shorcuts to speed ML search; use FALSE if worried (e.g. >3 params)
BioGeoBEARS_run_object$use_optimx = "GenSA" # if FALSE, use optim() instead of optimx()
BioGeoBEARS_run_object$num_cores_to_use = 1
# (use more cores to speed it up; this requires
# library(parallel) and/or library(snow). The package "parallel"
# is now default on Macs in R 3.0+, but apparently still
# has to be typed on some Windows machines. Note: apparently
# parallel works on Mac command-line R, but not R.app.
# BioGeoBEARS checks for this and resets to 1
# core with R.app)
# Sparse matrix exponentiation is an option for huge numbers of ranges/states (600+)
# I have experimented with sparse matrix exponentiation in EXPOKIT/rexpokit,
# but the results are imprecise and so I haven't explored it further.
# In a Bayesian analysis, it might work OK, but the ML point estimates are
# not identical.
# Also, I have not implemented all functions to work with force_sparse=TRUE.
# Volunteers are welcome to work on it!!
BioGeoBEARS_run_object$force_sparse = FALSE # force_sparse=TRUE causes pathology & isn't much faster at this scale
# This function loads the dispersal multiplier matrix etc. from the text files into the model object. Required for these to work!
# (It also runs some checks on these inputs for certain errors.)
BioGeoBEARS_run_object = readfiles_BioGeoBEARS_run(BioGeoBEARS_run_object)
# Divide the tree up by timeperiods/strata (uncomment this for stratified analysis)
#BioGeoBEARS_run_object = section_the_tree(inputs=BioGeoBEARS_run_object, make_master_table=TRUE, plot_pieces=FALSE)
# The stratified tree is described in this table:
#BioGeoBEARS_run_object$master_table
# Good default settings to get ancestral states
BioGeoBEARS_run_object$return_condlikes_table = TRUE
BioGeoBEARS_run_object$calc_TTL_loglike_from_condlikes_table = TRUE
BioGeoBEARS_run_object$calc_ancprobs = TRUE # get ancestral states from optim run
# Set up DEC model
# (nothing to do; defaults)
# Look at the BioGeoBEARS_run_object; it's just a list of settings etc.
BioGeoBEARS_run_object
# This contains the model object
BioGeoBEARS_run_object$BioGeoBEARS_model_object
# This table contains the parameters of the model
BioGeoBEARS_run_object$BioGeoBEARS_model_object@params_table
# Run this to check inputs. Read the error messages if you get them!
check_BioGeoBEARS_run(BioGeoBEARS_run_object)
# For a slow analysis, run once, then set runslow=FALSE to just
# load the saved result.
runslow = TRUE
resfn = "3taxa_DEC_M0_unconstrained_v1.Rdata"
if (runslow)
{
res = bears_optim_run(BioGeoBEARS_run_object)
res
save(res, file=resfn)
resDEC = res
} else {
# Loads to "res"
load(resfn)
resDEC = res
}
#######################################################
# Run DEC+J
#######################################################
BioGeoBEARS_run_object = define_BioGeoBEARS_run()
BioGeoBEARS_run_object$trfn = trfn
BioGeoBEARS_run_object$geogfn = geogfn
BioGeoBEARS_run_object$max_range_size = max_range_size
BioGeoBEARS_run_object$min_branchlength = 0.000001 # Min to treat tip as a direct ancestor (no speciation event)
BioGeoBEARS_run_object$include_null_range = TRUE # set to FALSE for e.g. DEC* model, DEC*+J, etc.
# (For DEC* and other "*" models, please cite: Massana, Kathryn A.; Beaulieu,
# Jeremy M.; Matzke, Nicholas J.; O’Meara, Brian C. (2015). Non-null Effects of
# the Null Range in Biogeographic Models: Exploring Parameter Estimation in the
# DEC Model. bioRxiv, http://biorxiv.org/content/early/2015/09/16/026914 )
# Also: search script on "include_null_range" for other places to change
# Set up a time-stratified analysis:
#BioGeoBEARS_run_object$timesfn = "timeperiods.txt"
#BioGeoBEARS_run_object$dispersal_multipliers_fn = "manual_dispersal_multipliers.txt"
#BioGeoBEARS_run_object$areas_allowed_fn = "areas_allowed.txt"
#BioGeoBEARS_run_object$areas_adjacency_fn = "areas_adjacency.txt"
#BioGeoBEARS_run_object$distsfn = "distances_matrix.txt"
# See notes on the distances model on PhyloWiki's BioGeoBEARS updates page.
# Speed options and multicore processing if desired
BioGeoBEARS_run_object$on_NaN_error = -1e50 # returns very low lnL if parameters produce NaN error (underflow check)
BioGeoBEARS_run_object$speedup = TRUE # shorcuts to speed ML search; use FALSE if worried (e.g. >3 params)
BioGeoBEARS_run_object$use_optimx = "GenSA" # if FALSE, use optim() instead of optimx()
BioGeoBEARS_run_object$num_cores_to_use = 1
BioGeoBEARS_run_object$force_sparse = FALSE # force_sparse=TRUE causes pathology & isn't much faster at this scale
# This function loads the dispersal multiplier matrix etc. from the text files into the model object. Required for these to work!
# (It also runs some checks on these inputs for certain errors.)
BioGeoBEARS_run_object = readfiles_BioGeoBEARS_run(BioGeoBEARS_run_object)
# Divide the tree up by timeperiods/strata (uncomment this for stratified analysis)
#BioGeoBEARS_run_object = section_the_tree(inputs=BioGeoBEARS_run_object, make_master_table=TRUE, plot_pieces=FALSE)
# The stratified tree is described in this table:
#BioGeoBEARS_run_object$master_table
# Good default settings to get ancestral states
BioGeoBEARS_run_object$return_condlikes_table = TRUE
BioGeoBEARS_run_object$calc_TTL_loglike_from_condlikes_table = TRUE
BioGeoBEARS_run_object$calc_ancprobs = TRUE # get ancestral states from optim run
# Set up DEC+J model
# Get the ML parameter values from the 2-parameter nested model
# (this will ensure that the 3-parameter model always does at least as good)
dstart = resDEC$outputs@params_table["d","est"]
estart = resDEC$outputs@params_table["e","est"]
jstart = 0.0001
# Input starting values for d, e
BioGeoBEARS_run_object$BioGeoBEARS_model_object@params_table["d","init"] = dstart
BioGeoBEARS_run_object$BioGeoBEARS_model_object@params_table["d","est"] = dstart
BioGeoBEARS_run_object$BioGeoBEARS_model_object@params_table["e","init"] = estart
BioGeoBEARS_run_object$BioGeoBEARS_model_object@params_table["e","est"] = estart
# Add j as a free parameter
BioGeoBEARS_run_object$BioGeoBEARS_model_object@params_table["j","type"] = "free"
BioGeoBEARS_run_object$BioGeoBEARS_model_object@params_table["j","init"] = jstart
BioGeoBEARS_run_object$BioGeoBEARS_model_object@params_table["j","est"] = jstart
check_BioGeoBEARS_run(BioGeoBEARS_run_object)
resfn = "3taxa_DEC+J_M0_unconstrained_v1.Rdata"
runslow = TRUE
if (runslow)
{
#sourceall("/Dropbox/_njm/__packages/BioGeoBEARS_setup/")
res = bears_optim_run(BioGeoBEARS_run_object)
res
save(res, file=resfn)
resDECj = res
} else {
# Loads to "res"
load(resfn)
resDECj = res
}
#######################################################
# PDF plots
#######################################################
pdffn = "3taxa_DEC_vs_DEC+J_M0_unconstrained_v1.pdf"
pdf(pdffn, width=6, height=6)
#######################################################
# Plot ancestral states - DEC
#######################################################
analysis_titletxt ="BioGeoBEARS DEC on 3taxa M0_unconstrained"
# Setup
results_object = resDEC
scriptdir = np(system.file("extdata/a_scripts", package="BioGeoBEARS"))
# States
res2 = plot_BioGeoBEARS_results(results_object, analysis_titletxt, addl_params=list("j"), plotwhat="text", label.offset=0.45, tipcex=0.7, statecex=0.7, splitcex=0.6, titlecex=0.8, plotsplits=TRUE, cornercoords_loc=scriptdir, include_null_range=TRUE, tr=tr, tipranges=tipranges)
# Pie chart
plot_BioGeoBEARS_results(results_object, analysis_titletxt, addl_params=list("j"), plotwhat="pie", label.offset=0.45, tipcex=0.7, statecex=0.7, splitcex=0.6, titlecex=0.8, plotsplits=TRUE, cornercoords_loc=scriptdir, include_null_range=TRUE, tr=tr, tipranges=tipranges)
#######################################################
# Plot ancestral states - DECJ
#######################################################
analysis_titletxt ="BioGeoBEARS DEC+J on 3taxa M0_unconstrained"
# Setup
results_object = resDECj
scriptdir = np(system.file("extdata/a_scripts", package="BioGeoBEARS"))
# States
res1 = plot_BioGeoBEARS_results(results_object, analysis_titletxt, addl_params=list("j"), plotwhat="text", label.offset=0.45, tipcex=0.7, statecex=0.7, splitcex=0.6, titlecex=0.8, plotsplits=TRUE, cornercoords_loc=scriptdir, include_null_range=TRUE, tr=tr, tipranges=tipranges)
# Pie chart
plot_BioGeoBEARS_results(results_object, analysis_titletxt, addl_params=list("j"), plotwhat="pie", label.offset=0.45, tipcex=0.7, statecex=0.7, splitcex=0.6, titlecex=0.8, plotsplits=TRUE, cornercoords_loc=scriptdir, include_null_range=TRUE, tr=tr, tipranges=tipranges)
dev.off() # Turn off PDF
cmdstr = paste("open ", pdffn, sep="")
system(cmdstr) # Plot it
|
/inst/extdata/examples/check_strat5_ML/M0/script_v1.R
|
no_license
|
nmatzke/BioGeoBEARS
|
R
| false
| false
| 22,121
|
r
|
# Load the package (after installation, see above).
library(GenSA) # GenSA is better than optimx (although somewhat slower)
library(FD) # for FD::maxent() (make sure this is up-to-date)
library(snow) # (if you want to use multicore functionality; some systems/R versions prefer library(parallel), try either)
library(parallel)
#######################################################
# 2018-10-10 update: I have been putting the
# updates on CRAN/GitHub
# You should use:
# rexpokit version 0.26.6 from CRAN
# cladoRcpp version 0.15 from CRAN
# BioGeoBEARS version 1.1 from GitHub, install with:
# library(devtools)
# devtools::install_github(repo="nmatzke/BioGeoBEARS")
#######################################################
library(rexpokit)
library(cladoRcpp)
library(BioGeoBEARS)
#######################################################
# CUT: The old instructions to source() online upgrade .R files have been deleted,
# all updates are now on the GitHub version of the package, version 1.1+
#######################################################
#######################################################
# (This local-sourcing is mostly useful for Nick, while actively developing)
# Local source()-ing method -- uses BioGeoBEARS sourceall() function
# on a directory of .R files, so you don't have to type them out.
# The directories here are on my machine, you would have to make a
# directory, save the .R files there, and refer to them.
#
# NOTE: it's best to source the "cladoRcpp.R" update first, to avoid warnings like this:
##
## Note: possible error in 'rcpp_calc_anclikes_sp_COOweights_faster(Rcpp_leftprobs = tmpca_1, ':
## unused arguments (m = m, m_null_range = include_null_range, jts_matrix = jts_matrix)
##
#
# TO USE: Delete or comment out the 'source("http://...")' commands above, and un-comment
# the below...
########################################################################
# Un-comment (and fix directory paths) to use:
#library(BioGeoBEARS)
#source("/drives/Dropbox/_njm/__packages/cladoRcpp_setup/cladoRcpp.R")
#sourceall("/drives/Dropbox/_njm/__packages/BioGeoBEARS_setup/")
#calc_loglike_sp = compiler::cmpfun(calc_loglike_sp_prebyte) # crucial to fix bug in uppass calculations
#calc_independent_likelihoods_on_each_branch = compiler::cmpfun(calc_independent_likelihoods_on_each_branch_prebyte)
########################################################################
#######################################################
# SETUP: YOUR WORKING DIRECTORY
#######################################################
# You will need to set your working directory to match your local system
# Note these very handy functions!
# Command "setwd(x)" sets your working directory
# Command "getwd()" gets your working directory and tells you what it is.
# Command "list.files()" lists the files in your working directory
# To get help on any command, use "?". E.g., "?list.files"
# Set your working directory for output files
# default here is your home directory ("~")
# Change this as you like
wd = "/GitHub/BioGeoBEARS/inst/extdata/examples/check_strat5_ML/M0/"
setwd(wd)
# Double-check your working directory with getwd()
getwd()
#######################################################
# SETUP: Extension data directory
#######################################################
# When R packages contain extra files, they are stored in the "extdata" directory
# inside the installed package.
#
# BioGeoBEARS contains various example files and scripts in its extdata directory.
#
# Each computer operating system might install BioGeoBEARS in a different place,
# depending on your OS and settings.
#
# However, you can find the extdata directory like this:
extdata_dir = np(system.file("extdata", package="BioGeoBEARS"))
extdata_dir
list.files(extdata_dir)
# "system.file" looks in the directory of a specified package (in this case BioGeoBEARS)
# The function "np" is just a shortcut for normalizePath(), which converts the
# path to the format appropriate for your system (e.g., Mac/Linux use "/", but
# Windows uses "\\", if memory serves).
# Even when using your own data files, you should KEEP these commands in your
# script, since the plot_BioGeoBEARS_results function needs a script from the
# extdata directory to calculate the positions of "corners" on the plot. This cannot
# be made into a straight up BioGeoBEARS function because it uses C routines
# from the package APE which do not pass R CMD check for some reason.
#######################################################
# SETUP: YOUR TREE FILE AND GEOGRAPHY FILE
#######################################################
# Example files are given below. To run your own data,
# make the below lines point to your own files, e.g.
# trfn = "/mydata/frogs/frogBGB/tree.newick"
# geogfn = "/mydata/frogs/frogBGB/geog.data"
#######################################################
# Phylogeny file
# Notes:
# 1. Must be binary/bifurcating: no polytomies
# 2. No negative branchlengths (e.g. BEAST MCC consensus trees sometimes have negative branchlengths)
# 3. Be careful of very short branches, as BioGeoBEARS will interpret ultrashort branches as direct ancestors
# 4. You can use non-ultrametric trees, but BioGeoBEARS will interpret any tips significantly below the
# top of the tree as fossils! This is only a good idea if you actually do have fossils in your tree,
# as in e.g. Wood, Matzke et al. (2013), Systematic Biology.
# 5. The default settings of BioGeoBEARS make sense for trees where the branchlengths are in units of
# millions of years, and the tree is 1-1000 units tall. If you have a tree with a total height of
# e.g. 0.00001, you will need to adjust e.g. the max values of d and e, or (simpler) multiply all
# your branchlengths to get them into reasonable units.
# 6. DON'T USE SPACES IN SPECIES NAMES, USE E.G. "_"
#######################################################
# This is the example Newick file for Hawaiian 3taxa
# (from Ree & Smith 2008)
# "trfn" = "tree file name"
trfn = "tree.newick"
# Look at the raw Newick file:
moref(trfn)
# Look at your phylogeny (plots to a PDF, which avoids issues with multiple graphics in same window):
pdffn = "tree.pdf"
pdf(file=pdffn, width=9, height=12)
tr = read.tree(trfn)
tr
plot(tr)
title("Example 3taxa phylogeny")
axisPhylo() # plots timescale
dev.off()
cmdstr = paste0("open ", pdffn)
system(cmdstr)
#######################################################
# Geography file
# Notes:
# 1. This is a PHYLIP-formatted file. This means that in the
# first line,
# - the 1st number equals the number of rows (species)
# - the 2nd number equals the number of columns (number of areas)
# - after a tab, put the areas in parentheses, with spaces: (A B C D)
#
# 1.5. Example first line:
# 10 4 (A B C D)
#
# 2. The second line, and subsequent lines:
# speciesA 0110
# speciesB 0111
# speciesC 0001
# ...
#
# 2.5a. This means a TAB between the species name and the area 0/1s
# 2.5b. This also means NO SPACE AND NO TAB between the area 0/1s.
#
# 3. See example files at:
# http://phylo.wikidot.com/biogeobears#files
#
# 4. Make you understand what a PLAIN-TEXT EDITOR is:
# http://phylo.wikidot.com/biogeobears#texteditors
#
# 3. The PHYLIP format is the same format used for C++ LAGRANGE geography files.
#
# 4. All names in the geography file must match names in the phylogeny file.
#
# 5. DON'T USE SPACES IN SPECIES NAMES, USE E.G. "_"
#
# 6. Operational taxonomic units (OTUs) should ideally be phylogenetic lineages,
# i.e. genetically isolated populations. These may or may not be identical
# with species. You would NOT want to just use specimens, as each specimen
# automatically can only live in 1 area, which will typically favor DEC+J
# models. This is fine if the species/lineages really do live in single areas,
# but you wouldn't want to assume this without thinking about it at least.
# In summary, you should collapse multiple specimens into species/lineages if
# data indicates they are the same genetic population.
######################################################
# This is the example geography file for Hawaiian 3taxa
# (from Ree & Smith 2008)
geogfn = "geog.data"
# Look at the raw geography text file:
moref(geogfn)
# Look at your geographic range data:
tipranges = getranges_from_LagrangePHYLIP(lgdata_fn=geogfn)
tipranges
# Maximum range size observed:
max(rowSums(dfnums_to_numeric(tipranges@df)))
# Set the maximum number of areas any species may occupy; this cannot be larger
# than the number of areas you set up, but it can be smaller.
max_range_size = 4
####################################################
####################################################
# KEY HINT: The number of states (= number of different possible geographic ranges)
# depends on (a) the number of areas and (b) max_range_size.
# If you have more than about 500-600 states, the calculations will get REALLY slow,
# since the program has to exponentiate a matrix of e.g. 600x600. Often the computer
# will just sit there and crunch, and never get through the calculation of the first
# likelihood.
#
# (this is also what is usually happening when LAGRANGE hangs: you have too many states!)
#
# To check the number of states for a given number of ranges, try:
numstates_from_numareas(numareas=4, maxareas=4, include_null_range=TRUE)
numstates_from_numareas(numareas=4, maxareas=4, include_null_range=FALSE)
numstates_from_numareas(numareas=4, maxareas=3, include_null_range=TRUE)
numstates_from_numareas(numareas=4, maxareas=2, include_null_range=TRUE)
# Large numbers of areas have problems:
numstates_from_numareas(numareas=10, maxareas=10, include_null_range=TRUE)
# ...unless you limit the max_range_size:
numstates_from_numareas(numareas=10, maxareas=2, include_null_range=TRUE)
####################################################
####################################################
#######################################################
#######################################################
# DEC AND DEC+J ANALYSIS
#######################################################
#######################################################
# NOTE: The BioGeoBEARS "DEC" model is identical with
# the Lagrange DEC model, and should return identical
# ML estimates of parameters, and the same
# log-likelihoods, for the same datasets.
#
# Ancestral state probabilities at nodes will be slightly
# different, since BioGeoBEARS is reporting the
# ancestral state probabilities under the global ML
# model, and Lagrange is reporting ancestral state
# probabilities after re-optimizing the likelihood
# after fixing the state at each node. These will
# be similar, but not identical. See Matzke (2014),
# Systematic Biology, for discussion.
#
# Also see Matzke (2014) for presentation of the
# DEC+J model.
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
# Run DEC
#######################################################
# Intitialize a default model (DEC model)
BioGeoBEARS_run_object = define_BioGeoBEARS_run()
# Give BioGeoBEARS the location of the phylogeny Newick file
BioGeoBEARS_run_object$trfn = trfn
# Give BioGeoBEARS the location of the geography text file
BioGeoBEARS_run_object$geogfn = geogfn
# Input the maximum range size
BioGeoBEARS_run_object$max_range_size = max_range_size
BioGeoBEARS_run_object$min_branchlength = 0.000001 # Min to treat tip as a direct ancestor (no speciation event)
BioGeoBEARS_run_object$include_null_range = TRUE # set to FALSE for e.g. DEC* model, DEC*+J, etc.
# (For DEC* and other "*" models, please cite: Massana, Kathryn A.; Beaulieu,
# Jeremy M.; Matzke, Nicholas J.; O’Meara, Brian C. (2015). Non-null Effects of
# the Null Range in Biogeographic Models: Exploring Parameter Estimation in the
# DEC Model. bioRxiv, http://biorxiv.org/content/early/2015/09/16/026914 )
# Also: search script on "include_null_range" for other places to change
# Set up a time-stratified analysis:
# 1. Here, un-comment ONLY the files you want to use.
# 2. Also un-comment "BioGeoBEARS_run_object = section_the_tree(...", below.
# 3. For example files see (a) extdata_dir,
# or (b) http://phylo.wikidot.com/biogeobears#files
# and BioGeoBEARS Google Group posts for further hints)
#
# Uncomment files you wish to use in time-stratified analyses:
#BioGeoBEARS_run_object$timesfn = "timeperiods.txt"
#BioGeoBEARS_run_object$dispersal_multipliers_fn = "manual_dispersal_multipliers.txt"
#BioGeoBEARS_run_object$areas_allowed_fn = "areas_allowed.txt"
#BioGeoBEARS_run_object$areas_adjacency_fn = "areas_adjacency.txt"
#BioGeoBEARS_run_object$distsfn = "distances_matrix.txt"
# See notes on the distances model on PhyloWiki's BioGeoBEARS updates page.
# Speed options and multicore processing if desired
BioGeoBEARS_run_object$on_NaN_error = -1e50 # returns very low lnL if parameters produce NaN error (underflow check)
BioGeoBEARS_run_object$speedup = TRUE # shorcuts to speed ML search; use FALSE if worried (e.g. >3 params)
BioGeoBEARS_run_object$use_optimx = "GenSA" # if FALSE, use optim() instead of optimx()
BioGeoBEARS_run_object$num_cores_to_use = 1
# (use more cores to speed it up; this requires
# library(parallel) and/or library(snow). The package "parallel"
# is now default on Macs in R 3.0+, but apparently still
# has to be typed on some Windows machines. Note: apparently
# parallel works on Mac command-line R, but not R.app.
# BioGeoBEARS checks for this and resets to 1
# core with R.app)
# Sparse matrix exponentiation is an option for huge numbers of ranges/states (600+)
# I have experimented with sparse matrix exponentiation in EXPOKIT/rexpokit,
# but the results are imprecise and so I haven't explored it further.
# In a Bayesian analysis, it might work OK, but the ML point estimates are
# not identical.
# Also, I have not implemented all functions to work with force_sparse=TRUE.
# Volunteers are welcome to work on it!!
BioGeoBEARS_run_object$force_sparse = FALSE # force_sparse=TRUE causes pathology & isn't much faster at this scale
# This function loads the dispersal multiplier matrix etc. from the text files into the model object. Required for these to work!
# (It also runs some checks on these inputs for certain errors.)
BioGeoBEARS_run_object = readfiles_BioGeoBEARS_run(BioGeoBEARS_run_object)
# Divide the tree up by timeperiods/strata (uncomment this for stratified analysis)
#BioGeoBEARS_run_object = section_the_tree(inputs=BioGeoBEARS_run_object, make_master_table=TRUE, plot_pieces=FALSE)
# The stratified tree is described in this table:
#BioGeoBEARS_run_object$master_table
# Good default settings to get ancestral states
BioGeoBEARS_run_object$return_condlikes_table = TRUE
BioGeoBEARS_run_object$calc_TTL_loglike_from_condlikes_table = TRUE
BioGeoBEARS_run_object$calc_ancprobs = TRUE # get ancestral states from optim run
# Set up DEC model
# (nothing to do; defaults)
# Look at the BioGeoBEARS_run_object; it's just a list of settings etc.
BioGeoBEARS_run_object
# This contains the model object
BioGeoBEARS_run_object$BioGeoBEARS_model_object
# This table contains the parameters of the model
BioGeoBEARS_run_object$BioGeoBEARS_model_object@params_table
# Run this to check inputs. Read the error messages if you get them!
check_BioGeoBEARS_run(BioGeoBEARS_run_object)
# For a slow analysis, run once, then set runslow=FALSE to just
# load the saved result.
runslow = TRUE
resfn = "3taxa_DEC_M0_unconstrained_v1.Rdata"
if (runslow)
{
res = bears_optim_run(BioGeoBEARS_run_object)
res
save(res, file=resfn)
resDEC = res
} else {
# Loads to "res"
load(resfn)
resDEC = res
}
#######################################################
# Run DEC+J
#######################################################
BioGeoBEARS_run_object = define_BioGeoBEARS_run()
BioGeoBEARS_run_object$trfn = trfn
BioGeoBEARS_run_object$geogfn = geogfn
BioGeoBEARS_run_object$max_range_size = max_range_size
BioGeoBEARS_run_object$min_branchlength = 0.000001 # Min to treat tip as a direct ancestor (no speciation event)
BioGeoBEARS_run_object$include_null_range = TRUE # set to FALSE for e.g. DEC* model, DEC*+J, etc.
# (For DEC* and other "*" models, please cite: Massana, Kathryn A.; Beaulieu,
# Jeremy M.; Matzke, Nicholas J.; O’Meara, Brian C. (2015). Non-null Effects of
# the Null Range in Biogeographic Models: Exploring Parameter Estimation in the
# DEC Model. bioRxiv, http://biorxiv.org/content/early/2015/09/16/026914 )
# Also: search script on "include_null_range" for other places to change
# Set up a time-stratified analysis:
#BioGeoBEARS_run_object$timesfn = "timeperiods.txt"
#BioGeoBEARS_run_object$dispersal_multipliers_fn = "manual_dispersal_multipliers.txt"
#BioGeoBEARS_run_object$areas_allowed_fn = "areas_allowed.txt"
#BioGeoBEARS_run_object$areas_adjacency_fn = "areas_adjacency.txt"
#BioGeoBEARS_run_object$distsfn = "distances_matrix.txt"
# See notes on the distances model on PhyloWiki's BioGeoBEARS updates page.
# Speed options and multicore processing if desired
BioGeoBEARS_run_object$on_NaN_error = -1e50 # returns very low lnL if parameters produce NaN error (underflow check)
BioGeoBEARS_run_object$speedup = TRUE # shorcuts to speed ML search; use FALSE if worried (e.g. >3 params)
BioGeoBEARS_run_object$use_optimx = "GenSA" # if FALSE, use optim() instead of optimx()
BioGeoBEARS_run_object$num_cores_to_use = 1
BioGeoBEARS_run_object$force_sparse = FALSE # force_sparse=TRUE causes pathology & isn't much faster at this scale
# This function loads the dispersal multiplier matrix etc. from the text files into the model object. Required for these to work!
# (It also runs some checks on these inputs for certain errors.)
BioGeoBEARS_run_object = readfiles_BioGeoBEARS_run(BioGeoBEARS_run_object)
# Divide the tree up by timeperiods/strata (uncomment this for stratified analysis)
#BioGeoBEARS_run_object = section_the_tree(inputs=BioGeoBEARS_run_object, make_master_table=TRUE, plot_pieces=FALSE)
# The stratified tree is described in this table:
#BioGeoBEARS_run_object$master_table
# Good default settings to get ancestral states
BioGeoBEARS_run_object$return_condlikes_table = TRUE
BioGeoBEARS_run_object$calc_TTL_loglike_from_condlikes_table = TRUE
BioGeoBEARS_run_object$calc_ancprobs = TRUE # get ancestral states from optim run
# Set up DEC+J model
# Get the ML parameter values from the 2-parameter nested model
# (this will ensure that the 3-parameter model always does at least as good)
dstart = resDEC$outputs@params_table["d","est"]
estart = resDEC$outputs@params_table["e","est"]
jstart = 0.0001
# Input starting values for d, e
BioGeoBEARS_run_object$BioGeoBEARS_model_object@params_table["d","init"] = dstart
BioGeoBEARS_run_object$BioGeoBEARS_model_object@params_table["d","est"] = dstart
BioGeoBEARS_run_object$BioGeoBEARS_model_object@params_table["e","init"] = estart
BioGeoBEARS_run_object$BioGeoBEARS_model_object@params_table["e","est"] = estart
# Add j as a free parameter
BioGeoBEARS_run_object$BioGeoBEARS_model_object@params_table["j","type"] = "free"
BioGeoBEARS_run_object$BioGeoBEARS_model_object@params_table["j","init"] = jstart
BioGeoBEARS_run_object$BioGeoBEARS_model_object@params_table["j","est"] = jstart
check_BioGeoBEARS_run(BioGeoBEARS_run_object)
resfn = "3taxa_DEC+J_M0_unconstrained_v1.Rdata"
runslow = TRUE
if (runslow)
{
#sourceall("/Dropbox/_njm/__packages/BioGeoBEARS_setup/")
res = bears_optim_run(BioGeoBEARS_run_object)
res
save(res, file=resfn)
resDECj = res
} else {
# Loads to "res"
load(resfn)
resDECj = res
}
#######################################################
# PDF plots
#######################################################
pdffn = "3taxa_DEC_vs_DEC+J_M0_unconstrained_v1.pdf"
pdf(pdffn, width=6, height=6)
#######################################################
# Plot ancestral states - DEC
#######################################################
analysis_titletxt ="BioGeoBEARS DEC on 3taxa M0_unconstrained"
# Setup
results_object = resDEC
scriptdir = np(system.file("extdata/a_scripts", package="BioGeoBEARS"))
# States
res2 = plot_BioGeoBEARS_results(results_object, analysis_titletxt, addl_params=list("j"), plotwhat="text", label.offset=0.45, tipcex=0.7, statecex=0.7, splitcex=0.6, titlecex=0.8, plotsplits=TRUE, cornercoords_loc=scriptdir, include_null_range=TRUE, tr=tr, tipranges=tipranges)
# Pie chart
plot_BioGeoBEARS_results(results_object, analysis_titletxt, addl_params=list("j"), plotwhat="pie", label.offset=0.45, tipcex=0.7, statecex=0.7, splitcex=0.6, titlecex=0.8, plotsplits=TRUE, cornercoords_loc=scriptdir, include_null_range=TRUE, tr=tr, tipranges=tipranges)
#######################################################
# Plot ancestral states - DECJ
#######################################################
analysis_titletxt ="BioGeoBEARS DEC+J on 3taxa M0_unconstrained"
# Setup
results_object = resDECj
scriptdir = np(system.file("extdata/a_scripts", package="BioGeoBEARS"))
# States
res1 = plot_BioGeoBEARS_results(results_object, analysis_titletxt, addl_params=list("j"), plotwhat="text", label.offset=0.45, tipcex=0.7, statecex=0.7, splitcex=0.6, titlecex=0.8, plotsplits=TRUE, cornercoords_loc=scriptdir, include_null_range=TRUE, tr=tr, tipranges=tipranges)
# Pie chart
plot_BioGeoBEARS_results(results_object, analysis_titletxt, addl_params=list("j"), plotwhat="pie", label.offset=0.45, tipcex=0.7, statecex=0.7, splitcex=0.6, titlecex=0.8, plotsplits=TRUE, cornercoords_loc=scriptdir, include_null_range=TRUE, tr=tr, tipranges=tipranges)
dev.off() # Turn off PDF
cmdstr = paste("open ", pdffn, sep="")
system(cmdstr) # Plot it
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulate.R
\name{simulate_double_tanh}
\alias{simulate_double_tanh}
\title{Simulate data according to the dht model}
\usage{
simulate_double_tanh(z, n_groups, sigma2_y, mu_alpha, sigma2_alpha,
mu_beta, sigma2_beta)
}
\arguments{
\item{z}{Depths at which to evaluate the density.}
\item{n_groups}{Number of groups to simulate.}
\item{sigma2_y}{Variance about the mean curve.}
\item{mu_alpha}{Mean value of the alpha's.}
\item{sigma2_alpha}{Variance of the alpha's.}
\item{mu_beta}{Mean value of the beta's.}
\item{sigma2_beta}{Variance of the beta's.}
}
\value{
A matrix of simulated data. Rows are the individual density curves.
}
\description{
Simulate data according to the dht model
}
|
/dhtdensity/man/simulate_double_tanh.Rd
|
no_license
|
connor-duffin/dhtdensity
|
R
| false
| true
| 774
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulate.R
\name{simulate_double_tanh}
\alias{simulate_double_tanh}
\title{Simulate data according to the dht model}
\usage{
simulate_double_tanh(z, n_groups, sigma2_y, mu_alpha, sigma2_alpha,
mu_beta, sigma2_beta)
}
\arguments{
\item{z}{Depths at which to evaluate the density.}
\item{n_groups}{Number of groups to simulate.}
\item{sigma2_y}{Variance about the mean curve.}
\item{mu_alpha}{Mean value of the alpha's.}
\item{sigma2_alpha}{Variance of the alpha's.}
\item{mu_beta}{Mean value of the beta's.}
\item{sigma2_beta}{Variance of the beta's.}
}
\value{
A matrix of simulated data. Rows are the individual density curves.
}
\description{
Simulate data according to the dht model
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rdataretriever.R
\name{install_retriever}
\alias{install_retriever}
\title{install the python module `retriever`}
\usage{
install_retriever(method = "auto", conda = "auto")
}
\arguments{
\item{method}{Installation method. By default, "auto" automatically finds a
method that will work in the local environment. Change the default to force
a specific installation method. Note that the "virtualenv" method is not
available on Windows.}
\item{conda}{The path to a \code{conda} executable. Use \code{"auto"} to allow \code{reticulate} to
automatically find an appropriate \code{conda} binary. See \strong{Finding Conda} for more details.}
}
\description{
install the python module `retriever`
}
|
/man/install_retriever.Rd
|
permissive
|
fboehm/rdataretriever
|
R
| false
| true
| 771
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rdataretriever.R
\name{install_retriever}
\alias{install_retriever}
\title{install the python module `retriever`}
\usage{
install_retriever(method = "auto", conda = "auto")
}
\arguments{
\item{method}{Installation method. By default, "auto" automatically finds a
method that will work in the local environment. Change the default to force
a specific installation method. Note that the "virtualenv" method is not
available on Windows.}
\item{conda}{The path to a \code{conda} executable. Use \code{"auto"} to allow \code{reticulate} to
automatically find an appropriate \code{conda} binary. See \strong{Finding Conda} for more details.}
}
\description{
install the python module `retriever`
}
|
if (1==0) {
# # HOW TO PLOT WEIGHTED HISTOGRAMS, OVERLAID 1 DEMOG GROUP VS ALL OTHERS:
#
# # ALSO SEE 'https://medium.com/@nickmartin812/how-to-r-visualizing-distributions-49ea4141fb32'
# # for overlapping boxplots or density plots (pdf), not histograms.
# # and ggridges:: package
# # and maybe https://plot.ly/ggplot2/geom_histogram/
#
# # see pop.cdf2
# # see pop.cdf
# # see pop.cdf.density.R
# # see pop.ecdf
#
#
# # OVERLAY OF 2 histograms
#
require(ejanalysis)
# # ?"ejanalysis-package"
bg <- ejscreen::bg22[, c(ejscreen::names.d, 'pop', ejscreen::names.e, 'REGION')]
e <- bg$pm[!is.na(bg$pm)]
dpct <- bg$pctmin
dcount <- bg$pop[!is.na(bg$pm)] * dpct[!is.na(bg$pm)]
refcount <- bg$pop[!is.na(bg$pm)] * (1 - dpct[!is.na(bg$pm)])
brks <- 0:17
etxt <- 'PM2.5'
dtxt <- 'Minorities'
pop.cdf( e, pcts = dpct, pops = bg$pop)
pop.cdf2( e, dcount, refcount, etxt, dtxt, brks)
pop.cdf.density(e, dcount, refcount, etxt, dtxt )
# e = log10(bg$proximity.rmp); e[is.infinite(e)] <- NA
pop.ecdf(e, bg$pctmin, bg$pop, col='red', allothers=FALSE, main = 'RMP proximity scores within each group')
pop.ecdf(e, bg$pctlowinc, bg$pop, col='green', allothers=FALSE, add=TRUE)
pop.ecdf(e, 1-bg$pctmin, bg$pop, col='black', allothers=FALSE, add=TRUE)
pop.ecdf(e, 1-bg$pctlowinc, bg$pop, col='gray', allothers=FALSE, add=TRUE)
legend(x = 'bottomright',
legend = c('Non-POC', 'Non-Low-Income', 'POC', 'Low income' ),
fill = c('black', 'gray', 'red', 'green'))
pop.cdf.density( e = log10(bg$proximity.tsdf), dcount = bg$pop * bg[, c( "pctmin")], refcount = bg$pop * (1 - bg$pctmin), etxt = 'TSDF', dtxt = 'People of Color')
pop.cdf(bg$proximity.tsdf, bg$pctmin, bg$pop, main = "Histogram of TSDF scores in POC and non-POC")
# Demog suscept for each REGION (can't see if use vs others)
pop.ecdf(bg$traffic.score, bg$VSI.eo, bg$pop, log='x', subtitles=FALSE,
group=bg$REGION, allothers=FALSE,
xlab='Traffic score (log scale)', ylab='%ile of population',
main='Distribution of scores by EPA Region')
pop.ecdf(bg$pm, bg$pctmin, 1000, xlab='Tract air pollution levels (vertical lines are group means)',
main = 'PM2.5 levels among minorities (red curve) vs rest of US pop.')
abline(v=wtd.mean(bg$pm, weights = bg$pctmin * bg$pop), col='red')
abline(v=wtd.mean(bg$pm, weights = (1-bg$pctmin) * bg$pop), col='black')
#?plot
axis(side = 1, at = 4:14 )
}
|
/R/pop.cdf2-and-density-notes-OVERLAY-2-WTD-HISTOS.R
|
no_license
|
ejanalysis/ejanalysis
|
R
| false
| false
| 2,546
|
r
|
if (1==0) {
# # HOW TO PLOT WEIGHTED HISTOGRAMS, OVERLAID 1 DEMOG GROUP VS ALL OTHERS:
#
# # ALSO SEE 'https://medium.com/@nickmartin812/how-to-r-visualizing-distributions-49ea4141fb32'
# # for overlapping boxplots or density plots (pdf), not histograms.
# # and ggridges:: package
# # and maybe https://plot.ly/ggplot2/geom_histogram/
#
# # see pop.cdf2
# # see pop.cdf
# # see pop.cdf.density.R
# # see pop.ecdf
#
#
# # OVERLAY OF 2 histograms
#
require(ejanalysis)
# # ?"ejanalysis-package"
bg <- ejscreen::bg22[, c(ejscreen::names.d, 'pop', ejscreen::names.e, 'REGION')]
e <- bg$pm[!is.na(bg$pm)]
dpct <- bg$pctmin
dcount <- bg$pop[!is.na(bg$pm)] * dpct[!is.na(bg$pm)]
refcount <- bg$pop[!is.na(bg$pm)] * (1 - dpct[!is.na(bg$pm)])
brks <- 0:17
etxt <- 'PM2.5'
dtxt <- 'Minorities'
pop.cdf( e, pcts = dpct, pops = bg$pop)
pop.cdf2( e, dcount, refcount, etxt, dtxt, brks)
pop.cdf.density(e, dcount, refcount, etxt, dtxt )
# e = log10(bg$proximity.rmp); e[is.infinite(e)] <- NA
pop.ecdf(e, bg$pctmin, bg$pop, col='red', allothers=FALSE, main = 'RMP proximity scores within each group')
pop.ecdf(e, bg$pctlowinc, bg$pop, col='green', allothers=FALSE, add=TRUE)
pop.ecdf(e, 1-bg$pctmin, bg$pop, col='black', allothers=FALSE, add=TRUE)
pop.ecdf(e, 1-bg$pctlowinc, bg$pop, col='gray', allothers=FALSE, add=TRUE)
legend(x = 'bottomright',
legend = c('Non-POC', 'Non-Low-Income', 'POC', 'Low income' ),
fill = c('black', 'gray', 'red', 'green'))
pop.cdf.density( e = log10(bg$proximity.tsdf), dcount = bg$pop * bg[, c( "pctmin")], refcount = bg$pop * (1 - bg$pctmin), etxt = 'TSDF', dtxt = 'People of Color')
pop.cdf(bg$proximity.tsdf, bg$pctmin, bg$pop, main = "Histogram of TSDF scores in POC and non-POC")
# Demog suscept for each REGION (can't see if use vs others)
pop.ecdf(bg$traffic.score, bg$VSI.eo, bg$pop, log='x', subtitles=FALSE,
group=bg$REGION, allothers=FALSE,
xlab='Traffic score (log scale)', ylab='%ile of population',
main='Distribution of scores by EPA Region')
pop.ecdf(bg$pm, bg$pctmin, 1000, xlab='Tract air pollution levels (vertical lines are group means)',
main = 'PM2.5 levels among minorities (red curve) vs rest of US pop.')
abline(v=wtd.mean(bg$pm, weights = bg$pctmin * bg$pop), col='red')
abline(v=wtd.mean(bg$pm, weights = (1-bg$pctmin) * bg$pop), col='black')
#?plot
axis(side = 1, at = 4:14 )
}
|
source("complete.R")
corr <- function(directory, threshold=0) {
correlations <- 0
compl <- complete(directory)
nobs <- compl$nobs
ids <- compl$id
true <- nobs>threshold
use <- ids[true]
for (i in use) {
if (i<10) {
id <- toString(i)
fname <- paste(directory, "/00", id, ".csv", sep="")
}
else if (i<100) {
id <- toString(i)
fname <- paste(directory, "/0", id, ".csv", sep="")
}
else {
id <- toString(i)
fname <- paste(directory, '/', id, ".csv", sep="")
}
data <- read.csv(fname)
all <- subset(data, !is.na(data$nitrate) & !is.na(data$sulfate))
c <- cor(all$nitrate, all$sulfate)
correlations <- append(correlations, c)
}
truth <- correlations[2:length(correlations)]
truth
}
|
/rprogramming/assign1/corr.R
|
no_license
|
NormanBenbrahim/datasciencecoursera
|
R
| false
| false
| 728
|
r
|
source("complete.R")
corr <- function(directory, threshold=0) {
correlations <- 0
compl <- complete(directory)
nobs <- compl$nobs
ids <- compl$id
true <- nobs>threshold
use <- ids[true]
for (i in use) {
if (i<10) {
id <- toString(i)
fname <- paste(directory, "/00", id, ".csv", sep="")
}
else if (i<100) {
id <- toString(i)
fname <- paste(directory, "/0", id, ".csv", sep="")
}
else {
id <- toString(i)
fname <- paste(directory, '/', id, ".csv", sep="")
}
data <- read.csv(fname)
all <- subset(data, !is.na(data$nitrate) & !is.na(data$sulfate))
c <- cor(all$nitrate, all$sulfate)
correlations <- append(correlations, c)
}
truth <- correlations[2:length(correlations)]
truth
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check.R
\name{files_to_rebuild}
\alias{files_to_rebuild}
\title{Figure out which files need to be rebuilt}
\usage{
files_to_rebuild(files)
}
\arguments{
\item{files}{A character vector of paths to source files (e.g., \code{.Rmd}).}
}
\value{
A character vector of files that need to be rebuilt.
}
\description{
\code{files_to_rebuild} returns a vector of files that need to be rebuilt.
}
\details{
This function accepts a vector of source files and
returns a vector of files that need to be rebuilt because the source file is
new or has changed since the last time the site was built.
}
\seealso{
\code{\link{get_current_digests}()}, \code{\link{digests}}.
}
\keyword{internal}
|
/man/files_to_rebuild.Rd
|
permissive
|
jonathan-g/blogdownDigest
|
R
| false
| true
| 756
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check.R
\name{files_to_rebuild}
\alias{files_to_rebuild}
\title{Figure out which files need to be rebuilt}
\usage{
files_to_rebuild(files)
}
\arguments{
\item{files}{A character vector of paths to source files (e.g., \code{.Rmd}).}
}
\value{
A character vector of files that need to be rebuilt.
}
\description{
\code{files_to_rebuild} returns a vector of files that need to be rebuilt.
}
\details{
This function accepts a vector of source files and
returns a vector of files that need to be rebuilt because the source file is
new or has changed since the last time the site was built.
}
\seealso{
\code{\link{get_current_digests}()}, \code{\link{digests}}.
}
\keyword{internal}
|
#' This function updates an existing doc
#'
#' This essentially means that a
#' revision, corresponding to the '_id' has to be provided. If no '_rev' is
#' given in the \code{cdb} list the function gets the doc from the db
#' and takes the rev number for the update
#'
#' Updating a doc at couchdb means executing a http "PUT" request. The
#' \code{cdb} list must contain the \code{cdb$serverName}, \code{cdb$port},
#' \code{cdb$DBName}, \code{cdb$id}. Since v0.6 the revision of the document
#' should exist at the intended place: \code{cdb$dataList$'_rev'}.
#'
#' \code{getURL()} with \code{customrequest = "PUT"} does the work. If a
#' needed \code{cdb$} list entry is not provided \code{cdb$error} maybe says
#' something about the R side.
#'
#' @usage cdbUpdateDoc(cdb)
#' @param cdb the cdb connection configuration list must contain the
#' \code{cdb$serverName}, \code{cdb$port}, \code{cdb$DBName} and \code{cdb$id}.
#' The data which updates the data stored in the doc is provided in
#' \code{cdb$dataList}
#' @return \item{cdb }{The response of the request is stored in \code{cdb$res}
#' after converting the answer by means of \code{fromJSON()}. The revision
#' provided by the respons is used for updating the \code{cdb$dataList$'_rev'}.
#' }
#' @author wactbprot
#' @export
#' @examples
#' \dontrun{
#' ccc <- cdbIni()
#' # I assume a database at localhost:5984 already exists
#' ccc$DBName <- "r4couchdb_db"
#' ccc$dataList <- list(normalDistRand = rnorm(20))
#' ccc <- cdbAddDoc(ccc)
#'
#' ccc$dataList$Date <- date()
#' ccc <- cdbUpdateDoc(ccc)
#'}
#'
#' @seealso \code{cdbInit()}
#' @keywords misc
#'
cdbUpdateDoc <- function( cdb){
fname <- deparse(match.call()[[1]])
cdb <- cdb$checkCdb(cdb,fname)
rev <- cdb$getDocRev(cdb)
if(!is.na(rev)){
cdb$dataList[["_rev"]] <- rev
}
if(cdb$error == ""){
adrString <- paste(cdb$baseUrl(cdb),
cdb$DBName,"/",
cdb$id,
sep="")
res <- getURL(utils::URLencode(adrString),
customrequest = "PUT",
postfields = cdb$toJSON(cdb$dataList),
httpheader = c('Content-Type: application/json;charset=utf-8'),
curl = cdb$curl,
.opts = cdb$opts(cdb))
cdb <- cdb$checkRes(cdb,res)
if((length(cdb$res$ok)) > 0 ){
cdb$dataList[["_rev"]] <- cdb$res$rev
cdb$rev <- cdb$res$rev
}
return(cdb)
}else{
stop(cdb$error)
}
}
|
/R4CouchDB/R/cdbUpdateDoc.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 2,662
|
r
|
#' This function updates an existing doc
#'
#' This essentially means that a
#' revision, corresponding to the '_id' has to be provided. If no '_rev' is
#' given in the \code{cdb} list the function gets the doc from the db
#' and takes the rev number for the update
#'
#' Updating a doc at couchdb means executing a http "PUT" request. The
#' \code{cdb} list must contain the \code{cdb$serverName}, \code{cdb$port},
#' \code{cdb$DBName}, \code{cdb$id}. Since v0.6 the revision of the document
#' should exist at the intended place: \code{cdb$dataList$'_rev'}.
#'
#' \code{getURL()} with \code{customrequest = "PUT"} does the work. If a
#' needed \code{cdb$} list entry is not provided \code{cdb$error} maybe says
#' something about the R side.
#'
#' @usage cdbUpdateDoc(cdb)
#' @param cdb the cdb connection configuration list must contain the
#' \code{cdb$serverName}, \code{cdb$port}, \code{cdb$DBName} and \code{cdb$id}.
#' The data which updates the data stored in the doc is provided in
#' \code{cdb$dataList}
#' @return \item{cdb }{The response of the request is stored in \code{cdb$res}
#' after converting the answer by means of \code{fromJSON()}. The revision
#' provided by the respons is used for updating the \code{cdb$dataList$'_rev'}.
#' }
#' @author wactbprot
#' @export
#' @examples
#' \dontrun{
#' ccc <- cdbIni()
#' # I assume a database at localhost:5984 already exists
#' ccc$DBName <- "r4couchdb_db"
#' ccc$dataList <- list(normalDistRand = rnorm(20))
#' ccc <- cdbAddDoc(ccc)
#'
#' ccc$dataList$Date <- date()
#' ccc <- cdbUpdateDoc(ccc)
#'}
#'
#' @seealso \code{cdbInit()}
#' @keywords misc
#'
cdbUpdateDoc <- function( cdb){
fname <- deparse(match.call()[[1]])
cdb <- cdb$checkCdb(cdb,fname)
rev <- cdb$getDocRev(cdb)
if(!is.na(rev)){
cdb$dataList[["_rev"]] <- rev
}
if(cdb$error == ""){
adrString <- paste(cdb$baseUrl(cdb),
cdb$DBName,"/",
cdb$id,
sep="")
res <- getURL(utils::URLencode(adrString),
customrequest = "PUT",
postfields = cdb$toJSON(cdb$dataList),
httpheader = c('Content-Type: application/json;charset=utf-8'),
curl = cdb$curl,
.opts = cdb$opts(cdb))
cdb <- cdb$checkRes(cdb,res)
if((length(cdb$res$ok)) > 0 ){
cdb$dataList[["_rev"]] <- cdb$res$rev
cdb$rev <- cdb$res$rev
}
return(cdb)
}else{
stop(cdb$error)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GenEval.R
\name{mkPED}
\alias{mkPED}
\title{Pedigree relationship matrix}
\usage{
mkPED(pop.info)
}
\arguments{
\item{pop.info}{The population info, in the same format generated by \code{simPopInfo}.}
}
\value{
The pedigree relationship matrix.
}
\description{
This function builds the pedigree relationship matrix based on a population info in the same format as the generated by function \code{simPopInfo}. If you are generating the population info using \code{simPopInfo}, the pedigree relationship matrix can be created directly from \code{simPopInfo} by setting the parameter \code{pedigree=TRUE}.
}
\examples{
mkPED(simPopInfo(10,0))
}
|
/man/mkPED.Rd
|
no_license
|
bcuyabano/GenEval
|
R
| false
| true
| 720
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GenEval.R
\name{mkPED}
\alias{mkPED}
\title{Pedigree relationship matrix}
\usage{
mkPED(pop.info)
}
\arguments{
\item{pop.info}{The population info, in the same format generated by \code{simPopInfo}.}
}
\value{
The pedigree relationship matrix.
}
\description{
This function builds the pedigree relationship matrix based on a population info in the same format as the generated by function \code{simPopInfo}. If you are generating the population info using \code{simPopInfo}, the pedigree relationship matrix can be created directly from \code{simPopInfo} by setting the parameter \code{pedigree=TRUE}.
}
\examples{
mkPED(simPopInfo(10,0))
}
|
library(ggplot2)
barras_urbanos<-ggplot(data=asentamiento_urbano_df, aes(x=reorder(estado, -asentamientos), y=asentamientos)) +
labs(title = "ASENTAMIENTOS URBANOS") +
xlab("Estados") +
ylab("Asentamientos") +
geom_bar(stat="sum", fill = "steelblue") +
geom_text(aes(label=asentamientos), angle = 45) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(angle = 90, hjust = 1))
barras_urbanos
barras_semiurbanos<-ggplot(data=asentamiento_semiurbano_df, aes(x=reorder(estado, -asentamientos), y=asentamientos)) +
labs(title = "ASENTAMIENTOS SEMIURBANOS") +
xlab("Estados") +
ylab("Asentamientos") +
geom_bar(stat="sum", fill = "steelblue") +
geom_text(aes(label=asentamientos), angle = 45) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(angle = 90, hjust = 1))
barras_semiurbanos
barras_rurales<-ggplot(data=asentamiento_semiurbano_df, aes(x=reorder(estado, -asentamientos), y=asentamientos)) +
labs(title = "ASENTAMIENTOS RURALES") +
xlab("Estados") +
ylab("Asentamientos") +
geom_bar(stat="sum", fill = "steelblue") +
geom_text(aes(label=asentamientos), angle = 45) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(angle = 90, hjust = 1))
barras_rurales
|
/graficos-asentamientos.r
|
permissive
|
mmdelc/r-cp-mx
|
R
| false
| false
| 1,303
|
r
|
library(ggplot2)
barras_urbanos<-ggplot(data=asentamiento_urbano_df, aes(x=reorder(estado, -asentamientos), y=asentamientos)) +
labs(title = "ASENTAMIENTOS URBANOS") +
xlab("Estados") +
ylab("Asentamientos") +
geom_bar(stat="sum", fill = "steelblue") +
geom_text(aes(label=asentamientos), angle = 45) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(angle = 90, hjust = 1))
barras_urbanos
barras_semiurbanos<-ggplot(data=asentamiento_semiurbano_df, aes(x=reorder(estado, -asentamientos), y=asentamientos)) +
labs(title = "ASENTAMIENTOS SEMIURBANOS") +
xlab("Estados") +
ylab("Asentamientos") +
geom_bar(stat="sum", fill = "steelblue") +
geom_text(aes(label=asentamientos), angle = 45) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(angle = 90, hjust = 1))
barras_semiurbanos
barras_rurales<-ggplot(data=asentamiento_semiurbano_df, aes(x=reorder(estado, -asentamientos), y=asentamientos)) +
labs(title = "ASENTAMIENTOS RURALES") +
xlab("Estados") +
ylab("Asentamientos") +
geom_bar(stat="sum", fill = "steelblue") +
geom_text(aes(label=asentamientos), angle = 45) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(angle = 90, hjust = 1))
barras_rurales
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.