content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ismevExtension.R
\name{print.gpd.fit}
\alias{print.gpd.fit}
\title{Nicer print of gpd.fit results}
\usage{
\method{print}{gpd.fit}(x, ...)
}
\arguments{
\item{x}{a fitted object of the class pp.fit.}
\item{...}{further arguments passed to \code{print}.}
}
\description{
This function prints the MLE, se, convergence info and negative log-likelihood value.
}
\examples{
y <- c(53, 52, 49, 58, 50, 48, 47, 50, 46, 46, 49, 51, 47, 49, 50)
a <- ismev::gpd.fit(y, threshold = 46, show=FALSE)
a
}
\keyword{gpd.fit}
|
/man/print.gpd.fit.Rd
|
no_license
|
ilapros/ilaprosUtils
|
R
| false
| true
| 589
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ismevExtension.R
\name{print.gpd.fit}
\alias{print.gpd.fit}
\title{Nicer print of gpd.fit results}
\usage{
\method{print}{gpd.fit}(x, ...)
}
\arguments{
\item{x}{a fitted object of the class pp.fit.}
\item{...}{further arguments passed to \code{print}.}
}
\description{
This function prints the MLE, se, convergence info and negative log-likelihood value.
}
\examples{
y <- c(53, 52, 49, 58, 50, 48, 47, 50, 46, 46, 49, 51, 47, 49, 50)
a <- ismev::gpd.fit(y, threshold = 46, show=FALSE)
a
}
\keyword{gpd.fit}
|
makeCacheMatrix <- function(x = matrix()) {
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) { ## define the argument with default mode of "matrix"
inv <- NULL ## initialize inv as NULL; will hold value of matrix inverse
set <- function(y) { ## define the set function to assign new
x <<- y ## value of matrix in parent environment
inv <<- NULL ## if there is a new matrix, reset inv to NULL
}
get <- function() x ## define the get fucntion - returns value of the matrix argument
setinverse <- function(inverse) inv <<- inverse ## assigns value of inv in parent environment
getinverse <- function() inv ## gets the value of inv where called
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) ## you need this in order to refer
## to the functions with the $ operator
}
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
/Cachematrix.R
|
no_license
|
schmiderek/cachematrix.R
|
R
| false
| false
| 1,347
|
r
|
makeCacheMatrix <- function(x = matrix()) {
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) { ## define the argument with default mode of "matrix"
inv <- NULL ## initialize inv as NULL; will hold value of matrix inverse
set <- function(y) { ## define the set function to assign new
x <<- y ## value of matrix in parent environment
inv <<- NULL ## if there is a new matrix, reset inv to NULL
}
get <- function() x ## define the get fucntion - returns value of the matrix argument
setinverse <- function(inverse) inv <<- inverse ## assigns value of inv in parent environment
getinverse <- function() inv ## gets the value of inv where called
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) ## you need this in order to refer
## to the functions with the $ operator
}
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
pgs <- function(...) {
#' @rdname pgs
#' @export
UseMethod("pgs")
}
|
/R/pgs.R
|
permissive
|
RogerZou0108/lassosum
|
R
| false
| false
| 75
|
r
|
pgs <- function(...) {
#' @rdname pgs
#' @export
UseMethod("pgs")
}
|
##' add horizontal align lines
##'
##'
##' @title geom_aline
##' @param mapping aes mapping
##' @param linetype line type
##' @param size line size
##' @param ... additional parameter
##' @return aline layer
##' @export
##' @author Yu Guangchuang
geom_aline <- function(mapping=NULL, linetype="dotted", size=1, ...) {
x <- y <- isTip <- NULL
dot_mapping <- aes(xend=x+diff(range(x))/200, x=max(x), yend=y, subset=isTip)
if (!is.null(mapping)) {
dot_mapping <- modifyList(dot_mapping, mapping)
}
geom_segment2(dot_mapping,
linetype=linetype,
size=size, stat = StatTreeData, ...)
}
##' geom_segment2 support aes(subset) via setup_data
##'
##'
##' @title geom_segment2
##' @param mapping aes mapping
##' @param data data
##' @param stat Name of stat to modify data
##' @param position position
##' @param lineend lineend
##' @param na.rm logical
##' @param show.legend logical
##' @param inherit.aes logical
##' @param nudge_x horizontal adjustment of x
##' @param arrow specification for arrow heads, as created by arrow().
##' @param arrow.fill fill color to usse for the arrow head (if closed). `NULL` means use `colour` aesthetic.
##' @param ... additional parameter
##' @importFrom ggplot2 layer
##' @export
##' @seealso
##' \link[ggplot2]{geom_segment}
##' @return add segment layer
##' @author Guangchuang Yu
geom_segment2 <- function(mapping = NULL, data = NULL, stat = "identity",
position = "identity", lineend = "butt",
na.rm = FALSE, show.legend = NA, inherit.aes = TRUE,
nudge_x = 0, arrow = NULL, arrow.fill = NULL,
...) {
default_aes <- aes_(node=~node)
if (is.null(mapping)) {
mapping <- default_aes
} else {
mapping <- modifyList(mapping, default_aes)
}
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomSegmentGGtree,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
arrow = arrow,
lineend = lineend,
na.rm = na.rm,
nudge_x = nudge_x,
...
),
check.aes = FALSE
)
}
##' @importFrom ggplot2 GeomSegment
##' @importFrom ggplot2 draw_key_path
GeomSegmentGGtree <- ggproto("GeomSegmentGGtree", GeomSegment,
setup_data = function(data, params) {
if (is.null(data$subset))
return(data)
data[which(data$subset),]
},
draw_panel = function(data, panel_params, coord, arrow = NULL, arrow.fill = NULL,
lineend = "butt", linejoin = "round", na.rm = FALSE, nudge_x = 0) {
data$x <- data$x + nudge_x
## data$x <- data$x - sapply(data$label, function(x) convertWidth(grobWidth(textGrob(x, gp=gpar(fontsize=.04* .pt))), "native", TRUE))
GeomSegment$draw_panel(data = data, panel_params = panel_params, coord = coord,
arrow = arrow, arrow.fill = arrow.fill,
lineend = lineend, linejoin = linejoin, na.rm = na.rm)
}
)
|
/R/geom_segment.R
|
no_license
|
smyang2018/ggtree
|
R
| false
| false
| 3,534
|
r
|
##' add horizontal align lines
##'
##'
##' @title geom_aline
##' @param mapping aes mapping
##' @param linetype line type
##' @param size line size
##' @param ... additional parameter
##' @return aline layer
##' @export
##' @author Yu Guangchuang
geom_aline <- function(mapping=NULL, linetype="dotted", size=1, ...) {
x <- y <- isTip <- NULL
dot_mapping <- aes(xend=x+diff(range(x))/200, x=max(x), yend=y, subset=isTip)
if (!is.null(mapping)) {
dot_mapping <- modifyList(dot_mapping, mapping)
}
geom_segment2(dot_mapping,
linetype=linetype,
size=size, stat = StatTreeData, ...)
}
##' geom_segment2 support aes(subset) via setup_data
##'
##'
##' @title geom_segment2
##' @param mapping aes mapping
##' @param data data
##' @param stat Name of stat to modify data
##' @param position position
##' @param lineend lineend
##' @param na.rm logical
##' @param show.legend logical
##' @param inherit.aes logical
##' @param nudge_x horizontal adjustment of x
##' @param arrow specification for arrow heads, as created by arrow().
##' @param arrow.fill fill color to usse for the arrow head (if closed). `NULL` means use `colour` aesthetic.
##' @param ... additional parameter
##' @importFrom ggplot2 layer
##' @export
##' @seealso
##' \link[ggplot2]{geom_segment}
##' @return add segment layer
##' @author Guangchuang Yu
geom_segment2 <- function(mapping = NULL, data = NULL, stat = "identity",
position = "identity", lineend = "butt",
na.rm = FALSE, show.legend = NA, inherit.aes = TRUE,
nudge_x = 0, arrow = NULL, arrow.fill = NULL,
...) {
default_aes <- aes_(node=~node)
if (is.null(mapping)) {
mapping <- default_aes
} else {
mapping <- modifyList(mapping, default_aes)
}
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomSegmentGGtree,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
arrow = arrow,
lineend = lineend,
na.rm = na.rm,
nudge_x = nudge_x,
...
),
check.aes = FALSE
)
}
##' @importFrom ggplot2 GeomSegment
##' @importFrom ggplot2 draw_key_path
GeomSegmentGGtree <- ggproto("GeomSegmentGGtree", GeomSegment,
setup_data = function(data, params) {
if (is.null(data$subset))
return(data)
data[which(data$subset),]
},
draw_panel = function(data, panel_params, coord, arrow = NULL, arrow.fill = NULL,
lineend = "butt", linejoin = "round", na.rm = FALSE, nudge_x = 0) {
data$x <- data$x + nudge_x
## data$x <- data$x - sapply(data$label, function(x) convertWidth(grobWidth(textGrob(x, gp=gpar(fontsize=.04* .pt))), "native", TRUE))
GeomSegment$draw_panel(data = data, panel_params = panel_params, coord = coord,
arrow = arrow, arrow.fill = arrow.fill,
lineend = lineend, linejoin = linejoin, na.rm = na.rm)
}
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colour_palettes.R
\name{gg_fill_customs}
\alias{gg_fill_customs}
\title{Custom colours to use in ggplot as scale_fill_manual}
\usage{
gg_fill_customs()
}
\description{
This function lets the user use pre-defined default colours
}
\seealso{
Other Auxiliary:
\code{\link{gg_colour_customs}()},
\code{\link{gg_text_customs}()},
\code{\link{lares_pal}()},
\code{\link{plot_palette}()}
}
\concept{Auxiliary}
|
/man/gg_fill_customs.Rd
|
no_license
|
Hong-Sung-Hyun/lares
|
R
| false
| true
| 482
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colour_palettes.R
\name{gg_fill_customs}
\alias{gg_fill_customs}
\title{Custom colours to use in ggplot as scale_fill_manual}
\usage{
gg_fill_customs()
}
\description{
This function lets the user use pre-defined default colours
}
\seealso{
Other Auxiliary:
\code{\link{gg_colour_customs}()},
\code{\link{gg_text_customs}()},
\code{\link{lares_pal}()},
\code{\link{plot_palette}()}
}
\concept{Auxiliary}
|
library(tidyverse)
library(modelr)
options(na.action = na.warn)
library(nycflights13)
library(lubridate)
ggplot(diamonds, aes(cut, price)) + geom_boxplot()
ggplot(diamonds, aes(color, price)) + geom_boxplot()
ggplot(diamonds, aes(clarity, price)) + geom_boxplot()
ggplot(diamonds, aes(carat, price)) +
geom_hex(bins = 50)
diamonds2 <- diamonds %>%
filter(carat <= 2.5) %>%
mutate(lprice = log2(price), lcarat = log2(carat))
ggplot(diamonds2, aes(lcarat, lprice)) +
geom_hex(bins = 50)
mod_diamond <- lm(lprice ~ lcarat, data = diamonds2)
grid <- diamonds2 %>%
data_grid(carat = seq_range(carat, 20)) %>%
mutate(lcarat = log2(carat)) %>%
add_predictions(mod_diamond, "lprice") %>%
mutate(price = 2 ^ lprice)
ggplot(diamonds2, aes(carat, price)) +
geom_hex(bins = 50) +
geom_line(data = grid, color = "red", size = 1)
diamonds2 <- diamonds2 %>%
add_residuals(mod_diamond, "lresid")
ggplot(diamonds2, aes(lcarat, lresid)) +
geom_hex(bins = 50)
ggplot(diamonds2, aes(cut, lresid)) + geom_boxplot()
ggplot(diamonds2, aes(color, lresid)) + geom_boxplot()
ggplot(diamonds2, aes(clarity, lresid)) + geom_boxplot()
mod_diamond2 <- lm(
lprice ~ lcarat + color + cut + clarity,
data = diamonds2
)
grid <- diamonds2 %>%
data_grid(cut, .model = mod_diamond2) %>%
add_predictions(mod_diamond2)
grid
ggplot(grid, aes(cut, pred)) +
geom_point()
diamonds2 <- diamonds2 %>%
add_residuals(mod_diamond2, "lresid2")
ggplot(diamonds2, aes(lcarat, lresid2)) +
geom_hex(bins = 50)
diamonds2 %>%
filter(abs(lresid2) > 1) %>%
add_predictions(mod_diamond2) %>%
mutate(pred = round(2 ^ pred)) %>%
select(price, pred, carat:table, x:z) %>%
arrange(price)
daily <- flights %>%
mutate(date = make_date(year, month, day)) %>%
group_by(date) %>%
summarize(n = n())
daily
ggplot(daily, aes(date, n)) +
geom_line()
daily <- daily %>%
mutate(wday = wday(date, label = TRUE))
ggplot(daily, aes(wday, n)) +
geom_boxplot()
mod <- lm(n ~ wday, data = daily)
grid <- daily %>%
data_grid(wday) %>%
add_predictions(mod, "n")
ggplot(daily, aes(wday, n)) +
geom_boxplot() +
geom_point(data = grid, color = "red", size = 4)
daily <- daily %>%
add_residuals(mod)
daily %>%
ggplot(aes(date, resid)) +
geom_ref_line(h = 0) +
geom_line()
ggplot(daily, aes(date, resid, color = wday)) +
geom_ref_line(h = 0) +
geom_line()
daily %>%
filter(resid < -100)
daily %>%
ggplot(aes(date, resid)) +
geom_ref_line(h = 0) +
geom_line(color = "grey50") +
geom_smooth(se = FALSE, span = 0.20)
daily %>%
filter(wday == "Sat") %>%
ggplot(aes(date, n)) +
geom_point() +
geom_line() +
scale_x_dat(
NULL,
date_breaks = "1 month",
date_labels = "%b"
)
term <- function(date) {
cut(date,
breaks = ymd(20130101, 20130605, 20130825, 20140101),
labels = c("spring", "summer", "fall")
)
}
daily <- daily %>%
mutate(term = term(date))
daily %>%
filter(wday == "Sat") %>%
ggplot(aes(date, n, color = term)) +
geom_point(alpha = 1/3) +
geom_line() +
scale_x_date(
NULL,
date_breaks = "1 month",
date_labels = "%b"
)
daily %>%
ggplot(aes(wday, n, color = term)) +
geom_boxplot()
mod1 <- lm(n ~ wday, data = daily)
mod2 <- lm(n ~ wday * term, data = daily)
daily %>%
gather_residuals(without_term = mod1, with_term = mod2) %>%
ggplot(aes(date, resid, color = model)) +
geom_line(alpha = 0.75)
grid <- daily %>%
data_grid(wday, term) %>%
add_predictions(mod2, "n")
ggplot(daily, aes(wday, n)) +
geom_boxplot() +
geom_point(data = grid, color = "red") +
facet_wrap(~ term)
mod3 <- MASS::rlm(n ~ wday * term, data = daily)
daily %>%
add_residuals(mod3, "resid") %>%
ggplot(aes(date, resid)) +
geom_hline(yintercept = 0, size = 2, color = "white") +
geom_line()
compute_vars <- function(data) {
data %>%
mutate(
term = term(date),
wday = wday(date, label = TRUE)
)
}
wday2 <- function(x) wday(x, label = TRUE)
mod3 <- lm(n ~ wday2(date) * term(date), data = daily)
library(splines)
mod <- MASS::rlm(n ~ wday * ns(date, 5), data = daily)
daily %>%
data_grid(wday, date = seq_range(date, n = 13)) %>%
add_predictions(mod) %>%
ggplot(aes(date, pred, color = wday)) +
geom_line() +
geom_point()
|
/Chapter19.R
|
no_license
|
chor-nyan/r4ds
|
R
| false
| false
| 4,285
|
r
|
library(tidyverse)
library(modelr)
options(na.action = na.warn)
library(nycflights13)
library(lubridate)
ggplot(diamonds, aes(cut, price)) + geom_boxplot()
ggplot(diamonds, aes(color, price)) + geom_boxplot()
ggplot(diamonds, aes(clarity, price)) + geom_boxplot()
ggplot(diamonds, aes(carat, price)) +
geom_hex(bins = 50)
diamonds2 <- diamonds %>%
filter(carat <= 2.5) %>%
mutate(lprice = log2(price), lcarat = log2(carat))
ggplot(diamonds2, aes(lcarat, lprice)) +
geom_hex(bins = 50)
mod_diamond <- lm(lprice ~ lcarat, data = diamonds2)
grid <- diamonds2 %>%
data_grid(carat = seq_range(carat, 20)) %>%
mutate(lcarat = log2(carat)) %>%
add_predictions(mod_diamond, "lprice") %>%
mutate(price = 2 ^ lprice)
ggplot(diamonds2, aes(carat, price)) +
geom_hex(bins = 50) +
geom_line(data = grid, color = "red", size = 1)
diamonds2 <- diamonds2 %>%
add_residuals(mod_diamond, "lresid")
ggplot(diamonds2, aes(lcarat, lresid)) +
geom_hex(bins = 50)
ggplot(diamonds2, aes(cut, lresid)) + geom_boxplot()
ggplot(diamonds2, aes(color, lresid)) + geom_boxplot()
ggplot(diamonds2, aes(clarity, lresid)) + geom_boxplot()
mod_diamond2 <- lm(
lprice ~ lcarat + color + cut + clarity,
data = diamonds2
)
grid <- diamonds2 %>%
data_grid(cut, .model = mod_diamond2) %>%
add_predictions(mod_diamond2)
grid
ggplot(grid, aes(cut, pred)) +
geom_point()
diamonds2 <- diamonds2 %>%
add_residuals(mod_diamond2, "lresid2")
ggplot(diamonds2, aes(lcarat, lresid2)) +
geom_hex(bins = 50)
diamonds2 %>%
filter(abs(lresid2) > 1) %>%
add_predictions(mod_diamond2) %>%
mutate(pred = round(2 ^ pred)) %>%
select(price, pred, carat:table, x:z) %>%
arrange(price)
daily <- flights %>%
mutate(date = make_date(year, month, day)) %>%
group_by(date) %>%
summarize(n = n())
daily
ggplot(daily, aes(date, n)) +
geom_line()
daily <- daily %>%
mutate(wday = wday(date, label = TRUE))
ggplot(daily, aes(wday, n)) +
geom_boxplot()
mod <- lm(n ~ wday, data = daily)
grid <- daily %>%
data_grid(wday) %>%
add_predictions(mod, "n")
ggplot(daily, aes(wday, n)) +
geom_boxplot() +
geom_point(data = grid, color = "red", size = 4)
daily <- daily %>%
add_residuals(mod)
daily %>%
ggplot(aes(date, resid)) +
geom_ref_line(h = 0) +
geom_line()
ggplot(daily, aes(date, resid, color = wday)) +
geom_ref_line(h = 0) +
geom_line()
daily %>%
filter(resid < -100)
daily %>%
ggplot(aes(date, resid)) +
geom_ref_line(h = 0) +
geom_line(color = "grey50") +
geom_smooth(se = FALSE, span = 0.20)
daily %>%
filter(wday == "Sat") %>%
ggplot(aes(date, n)) +
geom_point() +
geom_line() +
scale_x_dat(
NULL,
date_breaks = "1 month",
date_labels = "%b"
)
term <- function(date) {
cut(date,
breaks = ymd(20130101, 20130605, 20130825, 20140101),
labels = c("spring", "summer", "fall")
)
}
daily <- daily %>%
mutate(term = term(date))
daily %>%
filter(wday == "Sat") %>%
ggplot(aes(date, n, color = term)) +
geom_point(alpha = 1/3) +
geom_line() +
scale_x_date(
NULL,
date_breaks = "1 month",
date_labels = "%b"
)
daily %>%
ggplot(aes(wday, n, color = term)) +
geom_boxplot()
mod1 <- lm(n ~ wday, data = daily)
mod2 <- lm(n ~ wday * term, data = daily)
daily %>%
gather_residuals(without_term = mod1, with_term = mod2) %>%
ggplot(aes(date, resid, color = model)) +
geom_line(alpha = 0.75)
grid <- daily %>%
data_grid(wday, term) %>%
add_predictions(mod2, "n")
ggplot(daily, aes(wday, n)) +
geom_boxplot() +
geom_point(data = grid, color = "red") +
facet_wrap(~ term)
mod3 <- MASS::rlm(n ~ wday * term, data = daily)
daily %>%
add_residuals(mod3, "resid") %>%
ggplot(aes(date, resid)) +
geom_hline(yintercept = 0, size = 2, color = "white") +
geom_line()
compute_vars <- function(data) {
data %>%
mutate(
term = term(date),
wday = wday(date, label = TRUE)
)
}
wday2 <- function(x) wday(x, label = TRUE)
mod3 <- lm(n ~ wday2(date) * term(date), data = daily)
library(splines)
mod <- MASS::rlm(n ~ wday * ns(date, 5), data = daily)
daily %>%
data_grid(wday, date = seq_range(date, n = 13)) %>%
add_predictions(mod) %>%
ggplot(aes(date, pred, color = wday)) +
geom_line() +
geom_point()
|
testlist <- list(testX = c(191493125665849920, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), trainX = structure(c(1.78844646178735e+212, 1.93075223605916e+156, 121373.193669204, 1.26689771433298e+26, 2.46020195254853e+129, 8.54794497535107e-83, 2.61907806894971e-213, 1.5105425626729e+200, 6.51877713351675e+25, 4.40467528702727e-93, 7.6427933587945, 34208333744.1307, 1.6400690920442e-111, 3.9769673154778e-304, 4.76127371594362e-307, 8.63819952335095e+122, 1.18662128550178e-59, 1128.83285802937, 3.80478583615452e-72, 1.21321365773924e-195, 9.69744674150153e-268, 8.98899319496613e+272, 7.63669788330223e+285, 3.85830749537493e+266, 2.65348875902107e+136, 8.15028899341864e+92, 2.59677146539475e-173, 1.55228780425777e-91, 8.25550184376779e+105, 1.18572662524891e+134, 1.04113208597565e+183, 1.01971211553913e-259, 1.23680594512923e-165, 5.24757023065221e+62, 3.41816623041351e-96 ), .Dim = c(5L, 7L)))
result <- do.call(dann:::calc_distance_C,testlist)
str(result)
|
/dann/inst/testfiles/calc_distance_C/AFL_calc_distance_C/calc_distance_C_valgrind_files/1609867301-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 1,199
|
r
|
testlist <- list(testX = c(191493125665849920, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), trainX = structure(c(1.78844646178735e+212, 1.93075223605916e+156, 121373.193669204, 1.26689771433298e+26, 2.46020195254853e+129, 8.54794497535107e-83, 2.61907806894971e-213, 1.5105425626729e+200, 6.51877713351675e+25, 4.40467528702727e-93, 7.6427933587945, 34208333744.1307, 1.6400690920442e-111, 3.9769673154778e-304, 4.76127371594362e-307, 8.63819952335095e+122, 1.18662128550178e-59, 1128.83285802937, 3.80478583615452e-72, 1.21321365773924e-195, 9.69744674150153e-268, 8.98899319496613e+272, 7.63669788330223e+285, 3.85830749537493e+266, 2.65348875902107e+136, 8.15028899341864e+92, 2.59677146539475e-173, 1.55228780425777e-91, 8.25550184376779e+105, 1.18572662524891e+134, 1.04113208597565e+183, 1.01971211553913e-259, 1.23680594512923e-165, 5.24757023065221e+62, 3.41816623041351e-96 ), .Dim = c(5L, 7L)))
result <- do.call(dann:::calc_distance_C,testlist)
str(result)
|
#!/usr/bin/env Rscript
#upload library
#install.packages("VennDiagram")
#setwd("/public/home/nong/pro/npc/process2/results/plot_dir")
arg = commandArgs(trailingOnly = TRUE)
read_cutoff = arg[1]
sample_cutfoff = arg[2]
library(data.table)
ciri2 = fread('ciri2.table.txt', sep="\t")
ce2 = fread('circexplorer2.table.txt', sep="\t")
dcc = fread('dcc.table.txt', sep="\t")
ciri2_f = ciri2[rowSums(ciri2[,-1] >= read_cutoff) >=sample_cutfoff,]
ce2_f = ce2[rowSums(ce2[,-1] >= read_cutoff) >=sample_cutfoff, ]
dcc_f = dcc[rowSums(dcc[,-1] >= read_cutoff) >=sample_cutfoff, ]
ciri2Name = ciri2_f$V1
ce2Name = ce2_f$V1
dccName = dcc_f$V1
allName = c(ciri2Name,ce2Name,dccName)
allName = unique(allName)
ciri2_in = allName %in% ciri2Name
ce2_in = allName %in% ce2Name
dcc_in = allName %in% dccName
all_in = ciri2_in + ce2_in + dcc_in
table(all_in)
write(allName[all_in>=2], 'at_least_2.circRNA')
write(allName[all_in>=3], 'at_least_3.circRNA')
|
/scripts/venn.R
|
permissive
|
zhaoljunproject/PipeOne
|
R
| false
| false
| 953
|
r
|
#!/usr/bin/env Rscript
#upload library
#install.packages("VennDiagram")
#setwd("/public/home/nong/pro/npc/process2/results/plot_dir")
arg = commandArgs(trailingOnly = TRUE)
read_cutoff = arg[1]
sample_cutfoff = arg[2]
library(data.table)
ciri2 = fread('ciri2.table.txt', sep="\t")
ce2 = fread('circexplorer2.table.txt', sep="\t")
dcc = fread('dcc.table.txt', sep="\t")
ciri2_f = ciri2[rowSums(ciri2[,-1] >= read_cutoff) >=sample_cutfoff,]
ce2_f = ce2[rowSums(ce2[,-1] >= read_cutoff) >=sample_cutfoff, ]
dcc_f = dcc[rowSums(dcc[,-1] >= read_cutoff) >=sample_cutfoff, ]
ciri2Name = ciri2_f$V1
ce2Name = ce2_f$V1
dccName = dcc_f$V1
allName = c(ciri2Name,ce2Name,dccName)
allName = unique(allName)
ciri2_in = allName %in% ciri2Name
ce2_in = allName %in% ce2Name
dcc_in = allName %in% dccName
all_in = ciri2_in + ce2_in + dcc_in
table(all_in)
write(allName[all_in>=2], 'at_least_2.circRNA')
write(allName[all_in>=3], 'at_least_3.circRNA')
|
context('test regex2fixed.R')
test_that("regex2fixed converts regex patterns correctly", {
regex <- list(c('^a$', '^b'), c('c'), c('d'), c('b$'))
types <- c('A', 'AA', 'B', 'BB', 'C', 'CC', 'a', 'aa', 'b', 'bb', 'c', 'cc')
expect_identical(setdiff(
regex2fixed(regex, types, 'fixed', case_insensitive=TRUE),
list('C', 'c')
), list())
expect_identical(setdiff(
regex2fixed(regex, types, 'fixed', case_insensitive=FALSE),
list('c')
), list())
expect_identical(setdiff(
regex2fixed(regex, types, 'regex', case_insensitive=TRUE),
list(c("A", "B"), c("a", "B"), c("A", "BB"), c("a", "BB"),
c("A", "b"), c("a", "b"), c("A", "bb"), c("a", "bb"),
"C", "CC", "c", "cc", "B", "BB", "b", "bb")
), list())
expect_identical(setdiff(
regex2fixed(regex, types, 'regex', case_insensitive=FALSE),
list(c("a", "b"), c("a", "bb"), "c", "cc", "b", "bb")
), list())
})
test_that("regex2fixed converts complex regex patterns correctly", {
regex <- list(c('a...b'), c('c.*d'), c('e.+f'), c('^g[xyz]+h$'), c('z'), c('[0-9]'))
types <- c('axxxb', 'cxxxd', 'exxxf', 'gyyyh', 'azzzb', 'a999b')
expect_identical(setdiff(
regex2fixed(regex, types, 'regex', case_insensitive=TRUE),
list('axxxb', 'cxxxd', 'exxxf', 'gyyyh', 'azzzb', 'a999b')
), list())
})
test_that("regex2fixed converts emoji correctly", {
regex <- ':)'
types <- c(';)', ':(', ':)', ':/', '(;')
expect_identical(
unlist(regex2fixed(regex, types, 'glob', case_insensitive=TRUE)),
':)'
)
expect_identical(
unlist(regex2fixed(regex, types, 'fixed', case_insensitive=TRUE)),
':)'
)
})
|
/tests/testthat/test-regex2fixed.R
|
no_license
|
strategist922/quanteda
|
R
| false
| false
| 1,783
|
r
|
context('test regex2fixed.R')
test_that("regex2fixed converts regex patterns correctly", {
regex <- list(c('^a$', '^b'), c('c'), c('d'), c('b$'))
types <- c('A', 'AA', 'B', 'BB', 'C', 'CC', 'a', 'aa', 'b', 'bb', 'c', 'cc')
expect_identical(setdiff(
regex2fixed(regex, types, 'fixed', case_insensitive=TRUE),
list('C', 'c')
), list())
expect_identical(setdiff(
regex2fixed(regex, types, 'fixed', case_insensitive=FALSE),
list('c')
), list())
expect_identical(setdiff(
regex2fixed(regex, types, 'regex', case_insensitive=TRUE),
list(c("A", "B"), c("a", "B"), c("A", "BB"), c("a", "BB"),
c("A", "b"), c("a", "b"), c("A", "bb"), c("a", "bb"),
"C", "CC", "c", "cc", "B", "BB", "b", "bb")
), list())
expect_identical(setdiff(
regex2fixed(regex, types, 'regex', case_insensitive=FALSE),
list(c("a", "b"), c("a", "bb"), "c", "cc", "b", "bb")
), list())
})
test_that("regex2fixed converts complex regex patterns correctly", {
regex <- list(c('a...b'), c('c.*d'), c('e.+f'), c('^g[xyz]+h$'), c('z'), c('[0-9]'))
types <- c('axxxb', 'cxxxd', 'exxxf', 'gyyyh', 'azzzb', 'a999b')
expect_identical(setdiff(
regex2fixed(regex, types, 'regex', case_insensitive=TRUE),
list('axxxb', 'cxxxd', 'exxxf', 'gyyyh', 'azzzb', 'a999b')
), list())
})
test_that("regex2fixed converts emoji correctly", {
regex <- ':)'
types <- c(';)', ':(', ':)', ':/', '(;')
expect_identical(
unlist(regex2fixed(regex, types, 'glob', case_insensitive=TRUE)),
':)'
)
expect_identical(
unlist(regex2fixed(regex, types, 'fixed', case_insensitive=TRUE)),
':)'
)
})
|
#' Ancestral Character State Estimation
#'
#' @description
#'
#' Given a tree and a cladistic matrix uses likelihood to estimate the ancestral states for every character.
#'
#' @param cladistic_matrix A character-taxon matrix in the format imported by \link{read_nexus_matrix}.
#' @param time_tree A tree (phylo object) with branch lengths that represents the relationships of the taxa in \code{cladistic_matrix}.
#' @param estimate_all_nodes Logical that allows the user to make estimates for all ancestral values. The default (\code{FALSE}) will only make estimates for nodes that link coded terminals (recommended).
#' @param estimate_tip_values Logical that allows the user to make estimates for tip values. The default (\code{FALSE}) will only makes estimates for internal nodes (recommended).
#' @param inapplicables_as_missing Logical that decides whether or not to treat inapplicables as missing (TRUE) or not (FALSE, the default and recommended option).
#' @param polymorphism_behaviour One of either "equalp" or "treatasmissing".
#' @param uncertainty_behaviour One of either "equalp" or "treatasmissing".
#' @param threshold The threshold value to use when collapsing marginal likelihoods to discrete state(s).
#' @param all_missing_allowed Logical to allow all missing character values (generally not recommended, hence default is FALSE).
#'
#' @details
#'
#' At its' core the function uses either the \link{rerootingMethod} (Yang et al. 1995) as implemented in the \link{phytools} package (for discrete characters) or the \link{ace} function in the \link{ape} package (for continuous characters) to make ancestral state estimates. For discrete characters these are collapsed to the most likely state (or states, given equal likelihoods or likelihood within a defined \code{threshold} value). In the latter case the resulting states are represented as an uncertainty (i.e., states separated by a slash, e.g., 0/1). This is the method developed for Brusatte et al. (2014).
#'
#' The function can deal with ordered or unordered characters and does so by allowing only indirect transitions (from 0 to 2 must pass through 1) or direct transitions (from 0 straight to 2), respectively. However, more complex step matrix transitions are not currently supported.
#'
#' Ancestral state estimation is complicated where polymorphic or uncertain tip values exist. These are not currently well handled here, although see the \code{fitpolyMk} function in \link{phytools} for a way these could be dealt with in future. The only available options right now are to either treat multiple states as being equally probable of the "true" tip state (i.e., a uniform prior) or to avoid dealing with them completely by treating them as missing (NA) values.
#'
#' It is also possible to try to use phylogenetic information to infer missing states, both for internal nodes (e.g., those leading to missing tip states) and for tips. This is captured by the \code{estimate_all_nodes} and \code{estimate_tip_values} options. These have been partially explored by Lloyd (2018), who cuationed against their use.
#'
#' @return
#'
#' The function will return the same \code{cladistic_matrix}, but with two key additions: 1. Internal nodes (numbered by \link{ape} formatting) will appear after taxa in each matrix block with estimated states coded for them, and 2. The time-scaled tree used will be added to \code{cladistic_matrix} as \code{cladistic_matrix$topper$tree}. Note that if using the \code{estimate_tip_values = TRUE} option then tip values may also be changed from those provided as input.
#'
#' @author Graeme T. Lloyd \email{graemetlloyd@@gmail.com} and Thomas Guillerme \email{guillert@@tcd.ie}
#'
#' @references
#'
#' Brusatte, S. L., Lloyd, G. T., Wang, S. C. and Norell, M. A., 2014. Gradual assembly of avian body plan culminated in rapid rates of evolution across dinosaur-bird transition. \emph{Current Biology}, 24, 2386-2392.
#'
#' Lloyd, G. T., 2018. Journeys through discrete-character morphospace: synthesizing phylogeny, tempo, and disparity. \emph{Palaeontology}, \bold{61}, 637-645.
#'
#' Yang, Z., Kumar, S. and Nei, M., 1995. A new method of inference of ancestral nucleotide and amino acid sequences. \emph{Genetics}, \bold{141}, 1641-1650.
#'
#' @examples
#'
#' # Set random seed:
#' set.seed(4)
#'
#' # Generate a random tree for the Day data set:
#' time_tree <- ape::rtree(n = nrow(day_2016$matrix_1$matrix))
#'
#' # Update taxon names to match those in the data matrix:
#' time_tree$tip.label <- rownames(x = day_2016$matrix_1$matrix)
#'
#' # Set root time by making youngest taxon extant:
#' time_tree$root.time <- max(diag(x = ape::vcv(phy = time_tree)))
#'
#' # Use Day matrix as cladistic matrix:
#' cladistic_matrix <- day_2016
#'
#' # Prune most characters out to make example run fast:
#' cladistic_matrix <- prune_cladistic_matrix(cladistic_matrix,
#' characters2prune = c(2:3, 5:37)
#' )
#'
#' # Estimate ancestral states:
#' estimate_ancestral_states(
#' cladistic_matrix = cladistic_matrix,
#' time_tree = time_tree
#' )
#' @export estimate_ancestral_states
estimate_ancestral_states <- function(cladistic_matrix, time_tree, estimate_all_nodes = FALSE, estimate_tip_values = FALSE, inapplicables_as_missing = FALSE, polymorphism_behaviour = "equalp", uncertainty_behaviour = "equalp", threshold = 0.01, all_missing_allowed = FALSE) {
# How to get predicted tip states for a continuous character? (Phytools answer: http://blog.phytools.org/2013/11/reconstructed-ancestral-tip-states-for.html)
# - So basically under ML just inherit state from ancestral node (really this is mean of distribution where sd would grow with duration of branch so to allow the possibility of variance this could also be sampled stochastically
# How to deal with step matrices?
# How to deal with models where intermediate tip states are not even in sample
# Change help file to explain interactions between all options, e.g., if doing all chars then polymorphisms used for discrete, midpoint for continuous etc.
# Handle all missing/inapplicable case properly
# Handle only two tips case properly
# Add Liam Revell polymorphism options
# Check cladistic_matrix has class cladisticMatrix and stop and warn user if not:
if (!inherits(x = cladistic_matrix, what = "cladisticMatrix")) stop("cladistic_matrix must be an object of class \"cladisticMatrix\".")
# Get number of tips in tree:
n_tips <- ape::Ntip(phy = time_tree)
# Get number of nodes in tree:
n_nodes <- ape::Nnode(phy = time_tree)
# Catch problem with trees with no branch lengths:
if (is.null(time_tree$edge.length)) stop("time_tree must have branch lengths.")
# Catch problem with polytomies:
if (time_tree$Nnode < (n_tips - 1)) stop("time_tree must be fully bifurcating.")
# Catch problem with zero-length branches:
if (any(time_tree$edge.length == 0)) stop("time_tree must not have zero-length branches.")
# Check for step matrices and stop and warn if found:
if (length(x = cladistic_matrix$topper$step_matrices) > 0) stop("Function can not currently deal with step matrices.")
# Check estimate_all_nodes is a logical:
if (!is.logical(estimate_all_nodes)) stop("estimate_all_nodes must be a logical (TRUE or FALSE).")
# Check estimate_tip_values is a logical:
if (!is.logical(estimate_tip_values)) stop("estimate_tip_values must be a logical (TRUE or FALSE).")
# Check inapplicables_as_missing is a logical:
if (!is.logical(inapplicables_as_missing)) stop("inapplicables_as_missing must be a logical (TRUE or FALSE).")
# Check polymorphism_behaviour is a single allowable value:
if (length(x = polymorphism_behaviour) != 1 || !any(c("equalp", "treatasmissing") == polymorphism_behaviour)) stop("polymorphism_behaviour must be a single value of either, \"equalp\" or \"treatasmissing\".")
# Check uncertainty_behaviour is a single allowable value:
if (length(x = uncertainty_behaviour) != 1 || !any(c("equalp", "treatasmissing") == uncertainty_behaviour)) stop("uncertainty_behaviour must be a single value of either, \"equalp\" or \"treatasmissing\".")
# Check threshold is a numeric value between the limits of zero and one:
if (!is.numeric(threshold) || threshold > 0.5 || threshold < 0) stop("threshold must be a numeric value between 0 and 0.5.")
# Collapse matrix to vectors for each character (state and ordering combination):
collapsed_matrix <- unname(unlist(x = lapply(X = cladistic_matrix[2:length(x = cladistic_matrix)], function(x) apply(rbind(x$matrix, x$ordering), 2, paste, collapse = ""))))
# Isolate ordering elements:
ordering <- unlist(x = lapply(X = cladistic_matrix[2:length(x = cladistic_matrix)], "[[", "ordering"))
# Isolate minimum values:
minimum_values <- unlist(x = lapply(X = cladistic_matrix[2:length(x = cladistic_matrix)], "[[", "minimum_values"))
# Isolate maximum values:
maximum_values <- unlist(x = lapply(X = cladistic_matrix[2:length(x = cladistic_matrix)], "[[", "maximum_values"))
# Store raw original matrix:
raw_cladistic_matrix <- cladistic_matrix
# Combine matrix blocks into a single matrix:
cladistic_matrix <- original_matrix <- do.call(what = cbind, args = lapply(X = cladistic_matrix[2:length(x = cladistic_matrix)], "[[", "matrix"))
# Find any failed name matches:
failed_name_matches <- c(setdiff(x = rownames(x = cladistic_matrix), y = time_tree$tip.label), setdiff(x = time_tree$tip.label, y = rownames(x = cladistic_matrix)))
# Check there are no failed name matches and stop and report if found:
if (length(x = failed_name_matches) > 0) stop(paste("The following names do not match between the tree and matrix: ", paste(sort(x = failed_name_matches), collapse = ", "), ". Check spelling and try again.", sep = ""))
# If treating inapplicables as missing (and there is at least one inapplicable) replace with NA:
if (inapplicables_as_missing && length(x = which(x = cladistic_matrix == "")) > 0) cladistic_matrix[which(x = cladistic_matrix == "")] <- NA
# If treating polymorphisms as missing:
if (polymorphism_behaviour == "treatasmissing" && length(x = grep("&", cladistic_matrix)) > 0) cladistic_matrix[grep("&", cladistic_matrix)] <- NA
# If treating uncertainties as missing:
if (uncertainty_behaviour == "treatasmissing" && length(x = grep("/", cladistic_matrix)) > 0) cladistic_matrix[grep("/", cladistic_matrix)] <- NA
# Get vector of character numbers where all values are NA:
dataless_characters <- which(x = apply(cladistic_matrix, 2, function(x) all(is.na(x))))
# Look for all missing characters and stop and wanr user if found:
if (!all_missing_allowed && length(x = dataless_characters) > 0) stop(paste0("The following characters are coded as missing across all tips: ", paste0(dataless_characters, collapse = ", "), ". This can arise either because of the input data (in which case it is recommended that the user prune these characters using prune_cladistic_matrix) or because of the chosen options for inapplicables_as_missing, polymorphism_behaviour, and/or uncertainty_behaviour (in which case the user may wish to chose different values for these)."))
# Convert tip states into a list:
data_list <- apply(cladistic_matrix, 2, function(x) list(tip_states = x))
# For each character:
for (i in 1:length(x = data_list)) {
# Add minimum value to list:
data_list[[i]]$minimum_values <- unname(minimum_values[i])
# Add maximum value to list:
data_list[[i]]$maximum_values <- unname(maximum_values[i])
# Add ordering to list:
data_list[[i]]$ordering <- unname(ordering[i])
# Add tree to list:
data_list[[i]]$tree <- time_tree
}
# If estimating values for all characters (need to set dummy tip states for missing values):
if (estimate_all_nodes) {
# Subfunction to fill missing values (and inapplicables if desired):
fill_missing <- function(tip_states) {
# Find which rows correspond to missing states:
missingRows <- which(x = is.na(tip_states$tip_states))
# If missing states found:
if (length(x = missingRows) > 0) {
# Build missing state by either forming a polymorphism of all possible tip states, or if continuous the midpoint value:
fill_states <- ifelse(tip_states$ordering == "cont", (tip_states$minimum_values + tip_states$maximum_values) / 2, paste(tip_states$minimum_values:tip_states$maximum_values, collapse = "/"))
# Insert missing values:
tip_states$tip_states[missingRows] <- fill_states
}
# Return tip states with missing values replaced:
tip_states
}
# Apply fill missing function across all characters:
data_list <- lapply(X = data_list, fill_missing)
}
# Subfunction to prune tips with missing or inapplicable values:
prune_tips <- function(x) {
# Find all missing or inapplicable value tip names:
missing <- names(sort(x = c(which(x = x$tip_states == ""), which(x = is.na(x$tip_states)))))
# Work out how many tips will be left after pruning:
n_tips_remaining <- length(x = setdiff(x = names(x$tip_states), y = missing))
# If there is at least one missing value:
if (length(x = missing) > 0) {
# If less than two tips will remain then set tree as NULL:
if (n_tips_remaining < 2) x$tree <- NULL
# If at least two tips will remain prune missing values from tree:
if (n_tips_remaining > 1) x$tree <- ape::drop.tip(phy = x$tree, tip = missing)
# Collapse tip states:
x$tip_states <- x$tip_states[setdiff(x = names(x$tip_states), y = missing)]
}
# Return pruned output:
x
}
# Prune out missing and inapplicable tips:
data_list <- lapply(X = data_list, prune_tips)
# Subfunction to build tip state matrices:
convert_tip_states_to_matrix <- function(x) {
# As long asthere is at least one tip state:
if (length(x = x$tip_states) > 0) {
# If the character is not continuous (i.e., it is some form of discrete character):
if (x$ordering != "cont") {
# Temporarily store tip states so matrix format can overwrite the stored version below:
tip_states <- x$tip_states
# Create matrix of tip state probabilities:
x$tip_states <- matrix(0, nrow = length(x = x$tip_states), ncol = x$maximum_values - x$minimum_values + 1, dimnames = list(names(x$tip_states), x$minimum_values:x$maximum_values))
# For each character state if a single state is coded store probability as 1:
for (i in colnames(x = x$tip_states)) x$tip_states[tip_states == i, i] <- 1
# If there are polymorphisms and/or uncertainties:
if (length(x = grep("&|/", tip_states)) > 0) {
# Get polymorphism locations:
polymorphisms <- grep("&", tip_states)
# Get uncertainty locations:
uncertainties <- grep("/", tip_states)
# If there are polymorphisms and using the "equalp" (equal probability of each state) option:
if (length(x = polymorphisms) > 0 && polymorphism_behaviour == "equalp") {
# For each polymorphisms set each state as equally probable:
for (i in polymorphisms) x$tip_states[i, strsplit(tip_states[i], split = "&")[[1]]] <- 1 / length(x = strsplit(tip_states[i], split = "&")[[1]])
}
# If there are uncertainties and using the "equalp" (equal probability of each state) option:
if (length(x = uncertainties) > 0 && uncertainty_behaviour == "equalp") {
# For each uncertainty set each state as equally probable:
for (i in uncertainties) x$tip_states[i, strsplit(tip_states[i], split = "/")[[1]]] <- 1 / length(x = strsplit(tip_states[i], split = "/")[[1]])
}
}
# If a continuous character:
} else {
# Simply make tip states the numeric values (should never be a polymorphism) as a vector:
x$tip_states <- as.numeric(x$tip_states)
}
# If tip state has no length (all values are missing):
} else {
# Create row-less tip states matrix:
x$tip_states <- matrix(nrow = 0, ncol = 1, dimnames = list(c(), "0"))
}
# Return the revised input in the same list format:
x
}
# Reformat tip states ready for ancestral estimation:
data_list <- lapply(X = data_list, convert_tip_states_to_matrix)
# Subfunction to build character model:
build_character_model <- function(x) {
# Set default model to equal rates (works for all binary or unordered characters):
x$model <- "ER"
# If a character is both ordered and has at least three states:
if ((x$maximum_values - x$minimum_values) > 1 && x$ordering == "ord") {
# Get number of states:
n_states <- (x$maximum_values - x$minimum_values) + 1
# Build all zero matrix to begin with:
x$model <- matrix(0, nrow = n_states, ncol = n_states, dimnames = list(x$minimum_values:x$maximum_values, x$minimum_values:x$maximum_values))
# for each (just) off-diagonal value store 1 (i.e., N steps to move between adjacent states):
for (i in 2:n_states) x$model[(i - 1), i] <- x$model[i, (i - 1)] <- 1
}
# Return full output:
x
}
# Add ancestral state model for each character:
data_list <- lapply(X = data_list, build_character_model)
# Subfunction to get ancestral states:
estimate_ancestral_state <- function(x, estimate_tip_values, threshold) {
# As long as there is a tree:
if (!is.null(x$tree)) {
# If character is continuous:
if (x$ordering == "cont") {
# Get ancestral states using ace:
x$ancestral_states <- ace(x = x$tip_states, phy = x$tree)$ace
# If character is discrete:
} else {
# If invariant character:
if (ncol(x$tip_states) == 1) {
# Get number of tips:
n_tips <- ape::Ntip(phy = x$tree)
# Set ancestral states as all the same:
x$ancestral_states <- matrix(rep(x = 1, times = (n_tips + x$tree$Nnode)), ncol = 1, dimnames = list(c(x$tree$tip.label, (n_tips + 1):(n_tips + x$tree$Nnode)), colnames(x = x$tip_states)))
}
# If variant character then get ancestral states using rerooting method:
if (ncol(x$tip_states) > 1) x$ancestral_states <- phytools::rerootingMethod(tree = x$tree, x = x$tip_states, model = x$model)$marginal.anc
# Reformat to most likely state
x$ancestral_states <- unlist(x = lapply(X = lapply(X = apply(x$ancestral_states, 1, list), unlist), function(x) {
paste(names(x[x > (max(x) - threshold)]), collapse = "/")
}))
# If not estimating tip values then prune these:
if (!estimate_tip_values) x$ancestral_states <- x$ancestral_states[-match(x$tree$tip.label, names(x$ancestral_states))]
}
# If no tree:
} else {
# Set ancestral states as NULL:
x$ancestral_states <- vector(mode = "character")
}
# Return full output of x:
x
}
# Get ancestral states for each character:
data_list <- lapply(X = data_list, estimate_ancestral_state, estimate_tip_values, threshold)
# Get Newick strings of all sampled subtrees (to use to avoid redundancy in tree node mapping):
newick_strings <- unlist(x = lapply(X = data_list, function(x) ifelse(is.null(x$tree), NA, ape::write.tree(x$tree))))
# Get just unique strings (i.e., the minimum set needded to map everything to the full tree):
unique_newick_strings <- unique(x = newick_strings[!is.na(newick_strings)])
# Convert unique Newick strings to unique trees:
unique_trees <- ape::read.tree(text = unique_newick_strings)
# If only a single tree reformat as a list:
if (inherits(unique_trees, what = "phylo")) unique_trees <- list(unique_trees)
# Subfunction to map nodes from pruned tree to full tree:
map_to_full_tree <- function(pruned_tree, full_tree) {
# Get number of tips of pruned tree:
n_tips <- ape::Ntip(phy = pruned_tree)
# Get number of nodes of pruned tree:
n_nodes <- ape::Nnode(phy = pruned_tree)
# Get all internal node numbers for pruned tree:
node_numbers <- (n_tips + 1):(n_tips + n_nodes)
# If the pruned tree is different to the full tree:
if (write.tree(pruned_tree) != write.tree(full_tree)) {
# Get descendants of each node in pruned tree:
node_descendants <- lapply(X = as.list(x = node_numbers), function(x) pruned_tree$tip.label[strap::FindDescendants(n = x, tree = pruned_tree)])
# Get corresponding ancestral node in full tree:
ancestral_nodes <- unlist(x = lapply(X = node_descendants, function(x) find_mrca(descendant_names = x, tree = full_tree)))
# If pruned tree is identical to full tree (not pruned at all):
} else {
# Set ancestors as node numbers:
ancestral_nodes <- node_numbers
}
# Output matrix matching node numbers of pruned tree to full tree:
matrix(c(node_numbers, ancestral_nodes), ncol = 2, dimnames = list(c(), c("pruned_node", "full_node")))
}
# Get pruned node to full node for each unique tree:
node_maps <- lapply(X = unique_trees, map_to_full_tree, full_tree = time_tree)
# Build out for all trees (adds in any duplicated trees):
node_maps <- node_maps[match(newick_strings, unique_newick_strings)]
# Add node maps to data list:
for (i in 1:length(x = data_list)) data_list[[i]]$node_maps <- node_maps[[i]]
# Get all node names and numbers:
nodes <- c(rownames(x = original_matrix), (n_tips + 1):(n_tips + n_nodes))
# Renumber nodes of ancestral states:
data_list <- lapply(X = data_list, function(x) {
# Renumber nodes:
names(x$ancestral_states)[match(as.character(x$node_maps[, "pruned_node"]), names(x$ancestral_states))] <- as.character(x$node_maps[, "full_node"])
# Return renumbered nodes:
x
})
# Collapse down to an ancestral state matrix ready for output:
ancestral_state_matrix <- do.call(what = cbind, args = lapply(X = data_list, function(x) {
# Get ancestral states for nodes:
x$ancestral_states <- x$ancestral_states[nodes]
# Add node names:
names(x$ancestral_states) <- nodes
# Return output:
x$ancestral_states
}))
# Isolate estimated tip values:
tip_matrix <- ancestral_state_matrix[rownames(x = original_matrix), ]
# If there are any missing values:
if (any(is.na(tip_matrix))) {
# Isolate missing values:
missing_tip_states <- which(x = is.na(tip_matrix))
# Replace missing values with original (unmodified) input values:
tip_matrix[missing_tip_states] <- original_matrix[missing_tip_states]
# Add tip values back into full output:
ancestral_state_matrix[rownames(x = original_matrix), ] <- tip_matrix
}
# Get column (character) count for each matrix block:
matrix_columns <- unlist(x = lapply(X = lapply(X = raw_cladistic_matrix[2:length(x = raw_cladistic_matrix)], "[[", "matrix"), ncol))
# For each matrix block:
for (i in 1:length(x = matrix_columns)) {
# Insert portion of ancestral state estimate into block:
raw_cladistic_matrix[[(i + 1)]]$matrix <- ancestral_state_matrix[, 1:matrix_columns[i], drop = FALSE]
# Remove that portion from the block:
ancestral_state_matrix <- ancestral_state_matrix[, -(1:matrix_columns[i]), drop = FALSE]
}
# Overwrite ancestral state output with updated raw input:
ancestral_state_matrix <- raw_cladistic_matrix
# Add tree to output:
ancestral_state_matrix$topper$tree <- time_tree
# Return ancestral state matrix:
ancestral_state_matrix
}
|
/R/estimate_ancestral_states.R
|
no_license
|
cran/Claddis
|
R
| false
| false
| 23,515
|
r
|
#' Ancestral Character State Estimation
#'
#' @description
#'
#' Given a tree and a cladistic matrix uses likelihood to estimate the ancestral states for every character.
#'
#' @param cladistic_matrix A character-taxon matrix in the format imported by \link{read_nexus_matrix}.
#' @param time_tree A tree (phylo object) with branch lengths that represents the relationships of the taxa in \code{cladistic_matrix}.
#' @param estimate_all_nodes Logical that allows the user to make estimates for all ancestral values. The default (\code{FALSE}) will only make estimates for nodes that link coded terminals (recommended).
#' @param estimate_tip_values Logical that allows the user to make estimates for tip values. The default (\code{FALSE}) will only makes estimates for internal nodes (recommended).
#' @param inapplicables_as_missing Logical that decides whether or not to treat inapplicables as missing (TRUE) or not (FALSE, the default and recommended option).
#' @param polymorphism_behaviour One of either "equalp" or "treatasmissing".
#' @param uncertainty_behaviour One of either "equalp" or "treatasmissing".
#' @param threshold The threshold value to use when collapsing marginal likelihoods to discrete state(s).
#' @param all_missing_allowed Logical to allow all missing character values (generally not recommended, hence default is FALSE).
#'
#' @details
#'
#' At its' core the function uses either the \link{rerootingMethod} (Yang et al. 1995) as implemented in the \link{phytools} package (for discrete characters) or the \link{ace} function in the \link{ape} package (for continuous characters) to make ancestral state estimates. For discrete characters these are collapsed to the most likely state (or states, given equal likelihoods or likelihood within a defined \code{threshold} value). In the latter case the resulting states are represented as an uncertainty (i.e., states separated by a slash, e.g., 0/1). This is the method developed for Brusatte et al. (2014).
#'
#' The function can deal with ordered or unordered characters and does so by allowing only indirect transitions (from 0 to 2 must pass through 1) or direct transitions (from 0 straight to 2), respectively. However, more complex step matrix transitions are not currently supported.
#'
#' Ancestral state estimation is complicated where polymorphic or uncertain tip values exist. These are not currently well handled here, although see the \code{fitpolyMk} function in \link{phytools} for a way these could be dealt with in future. The only available options right now are to either treat multiple states as being equally probable of the "true" tip state (i.e., a uniform prior) or to avoid dealing with them completely by treating them as missing (NA) values.
#'
#' It is also possible to try to use phylogenetic information to infer missing states, both for internal nodes (e.g., those leading to missing tip states) and for tips. This is captured by the \code{estimate_all_nodes} and \code{estimate_tip_values} options. These have been partially explored by Lloyd (2018), who cuationed against their use.
#'
#' @return
#'
#' The function will return the same \code{cladistic_matrix}, but with two key additions: 1. Internal nodes (numbered by \link{ape} formatting) will appear after taxa in each matrix block with estimated states coded for them, and 2. The time-scaled tree used will be added to \code{cladistic_matrix} as \code{cladistic_matrix$topper$tree}. Note that if using the \code{estimate_tip_values = TRUE} option then tip values may also be changed from those provided as input.
#'
#' @author Graeme T. Lloyd \email{graemetlloyd@@gmail.com} and Thomas Guillerme \email{guillert@@tcd.ie}
#'
#' @references
#'
#' Brusatte, S. L., Lloyd, G. T., Wang, S. C. and Norell, M. A., 2014. Gradual assembly of avian body plan culminated in rapid rates of evolution across dinosaur-bird transition. \emph{Current Biology}, 24, 2386-2392.
#'
#' Lloyd, G. T., 2018. Journeys through discrete-character morphospace: synthesizing phylogeny, tempo, and disparity. \emph{Palaeontology}, \bold{61}, 637-645.
#'
#' Yang, Z., Kumar, S. and Nei, M., 1995. A new method of inference of ancestral nucleotide and amino acid sequences. \emph{Genetics}, \bold{141}, 1641-1650.
#'
#' @examples
#'
#' # Set random seed:
#' set.seed(4)
#'
#' # Generate a random tree for the Day data set:
#' time_tree <- ape::rtree(n = nrow(day_2016$matrix_1$matrix))
#'
#' # Update taxon names to match those in the data matrix:
#' time_tree$tip.label <- rownames(x = day_2016$matrix_1$matrix)
#'
#' # Set root time by making youngest taxon extant:
#' time_tree$root.time <- max(diag(x = ape::vcv(phy = time_tree)))
#'
#' # Use Day matrix as cladistic matrix:
#' cladistic_matrix <- day_2016
#'
#' # Prune most characters out to make example run fast:
#' cladistic_matrix <- prune_cladistic_matrix(cladistic_matrix,
#' characters2prune = c(2:3, 5:37)
#' )
#'
#' # Estimate ancestral states:
#' estimate_ancestral_states(
#' cladistic_matrix = cladistic_matrix,
#' time_tree = time_tree
#' )
#' @export estimate_ancestral_states
estimate_ancestral_states <- function(cladistic_matrix, time_tree, estimate_all_nodes = FALSE, estimate_tip_values = FALSE, inapplicables_as_missing = FALSE, polymorphism_behaviour = "equalp", uncertainty_behaviour = "equalp", threshold = 0.01, all_missing_allowed = FALSE) {
# How to get predicted tip states for a continuous character? (Phytools answer: http://blog.phytools.org/2013/11/reconstructed-ancestral-tip-states-for.html)
# - So basically under ML just inherit state from ancestral node (really this is mean of distribution where sd would grow with duration of branch so to allow the possibility of variance this could also be sampled stochastically
# How to deal with step matrices?
# How to deal with models where intermediate tip states are not even in sample
# Change help file to explain interactions between all options, e.g., if doing all chars then polymorphisms used for discrete, midpoint for continuous etc.
# Handle all missing/inapplicable case properly
# Handle only two tips case properly
# Add Liam Revell polymorphism options
# Check cladistic_matrix has class cladisticMatrix and stop and warn user if not:
if (!inherits(x = cladistic_matrix, what = "cladisticMatrix")) stop("cladistic_matrix must be an object of class \"cladisticMatrix\".")
# Get number of tips in tree:
n_tips <- ape::Ntip(phy = time_tree)
# Get number of nodes in tree:
n_nodes <- ape::Nnode(phy = time_tree)
# Catch problem with trees with no branch lengths:
if (is.null(time_tree$edge.length)) stop("time_tree must have branch lengths.")
# Catch problem with polytomies:
if (time_tree$Nnode < (n_tips - 1)) stop("time_tree must be fully bifurcating.")
# Catch problem with zero-length branches:
if (any(time_tree$edge.length == 0)) stop("time_tree must not have zero-length branches.")
# Check for step matrices and stop and warn if found:
if (length(x = cladistic_matrix$topper$step_matrices) > 0) stop("Function can not currently deal with step matrices.")
# Check estimate_all_nodes is a logical:
if (!is.logical(estimate_all_nodes)) stop("estimate_all_nodes must be a logical (TRUE or FALSE).")
# Check estimate_tip_values is a logical:
if (!is.logical(estimate_tip_values)) stop("estimate_tip_values must be a logical (TRUE or FALSE).")
# Check inapplicables_as_missing is a logical:
if (!is.logical(inapplicables_as_missing)) stop("inapplicables_as_missing must be a logical (TRUE or FALSE).")
# Check polymorphism_behaviour is a single allowable value:
if (length(x = polymorphism_behaviour) != 1 || !any(c("equalp", "treatasmissing") == polymorphism_behaviour)) stop("polymorphism_behaviour must be a single value of either, \"equalp\" or \"treatasmissing\".")
# Check uncertainty_behaviour is a single allowable value:
if (length(x = uncertainty_behaviour) != 1 || !any(c("equalp", "treatasmissing") == uncertainty_behaviour)) stop("uncertainty_behaviour must be a single value of either, \"equalp\" or \"treatasmissing\".")
# Check threshold is a numeric value between the limits of zero and one:
if (!is.numeric(threshold) || threshold > 0.5 || threshold < 0) stop("threshold must be a numeric value between 0 and 0.5.")
# Collapse matrix to vectors for each character (state and ordering combination):
collapsed_matrix <- unname(unlist(x = lapply(X = cladistic_matrix[2:length(x = cladistic_matrix)], function(x) apply(rbind(x$matrix, x$ordering), 2, paste, collapse = ""))))
# Isolate ordering elements:
ordering <- unlist(x = lapply(X = cladistic_matrix[2:length(x = cladistic_matrix)], "[[", "ordering"))
# Isolate minimum values:
minimum_values <- unlist(x = lapply(X = cladistic_matrix[2:length(x = cladistic_matrix)], "[[", "minimum_values"))
# Isolate maximum values:
maximum_values <- unlist(x = lapply(X = cladistic_matrix[2:length(x = cladistic_matrix)], "[[", "maximum_values"))
# Store raw original matrix:
raw_cladistic_matrix <- cladistic_matrix
# Combine matrix blocks into a single matrix:
cladistic_matrix <- original_matrix <- do.call(what = cbind, args = lapply(X = cladistic_matrix[2:length(x = cladistic_matrix)], "[[", "matrix"))
# Find any failed name matches:
failed_name_matches <- c(setdiff(x = rownames(x = cladistic_matrix), y = time_tree$tip.label), setdiff(x = time_tree$tip.label, y = rownames(x = cladistic_matrix)))
# Check there are no failed name matches and stop and report if found:
if (length(x = failed_name_matches) > 0) stop(paste("The following names do not match between the tree and matrix: ", paste(sort(x = failed_name_matches), collapse = ", "), ". Check spelling and try again.", sep = ""))
# If treating inapplicables as missing (and there is at least one inapplicable) replace with NA:
if (inapplicables_as_missing && length(x = which(x = cladistic_matrix == "")) > 0) cladistic_matrix[which(x = cladistic_matrix == "")] <- NA
# If treating polymorphisms as missing:
if (polymorphism_behaviour == "treatasmissing" && length(x = grep("&", cladistic_matrix)) > 0) cladistic_matrix[grep("&", cladistic_matrix)] <- NA
# If treating uncertainties as missing:
if (uncertainty_behaviour == "treatasmissing" && length(x = grep("/", cladistic_matrix)) > 0) cladistic_matrix[grep("/", cladistic_matrix)] <- NA
# Get vector of character numbers where all values are NA:
dataless_characters <- which(x = apply(cladistic_matrix, 2, function(x) all(is.na(x))))
# Look for all missing characters and stop and wanr user if found:
if (!all_missing_allowed && length(x = dataless_characters) > 0) stop(paste0("The following characters are coded as missing across all tips: ", paste0(dataless_characters, collapse = ", "), ". This can arise either because of the input data (in which case it is recommended that the user prune these characters using prune_cladistic_matrix) or because of the chosen options for inapplicables_as_missing, polymorphism_behaviour, and/or uncertainty_behaviour (in which case the user may wish to chose different values for these)."))
# Convert tip states into a list:
data_list <- apply(cladistic_matrix, 2, function(x) list(tip_states = x))
# For each character:
for (i in 1:length(x = data_list)) {
# Add minimum value to list:
data_list[[i]]$minimum_values <- unname(minimum_values[i])
# Add maximum value to list:
data_list[[i]]$maximum_values <- unname(maximum_values[i])
# Add ordering to list:
data_list[[i]]$ordering <- unname(ordering[i])
# Add tree to list:
data_list[[i]]$tree <- time_tree
}
# If estimating values for all characters (need to set dummy tip states for missing values):
if (estimate_all_nodes) {
# Subfunction to fill missing values (and inapplicables if desired):
fill_missing <- function(tip_states) {
# Find which rows correspond to missing states:
missingRows <- which(x = is.na(tip_states$tip_states))
# If missing states found:
if (length(x = missingRows) > 0) {
# Build missing state by either forming a polymorphism of all possible tip states, or if continuous the midpoint value:
fill_states <- ifelse(tip_states$ordering == "cont", (tip_states$minimum_values + tip_states$maximum_values) / 2, paste(tip_states$minimum_values:tip_states$maximum_values, collapse = "/"))
# Insert missing values:
tip_states$tip_states[missingRows] <- fill_states
}
# Return tip states with missing values replaced:
tip_states
}
# Apply fill missing function across all characters:
data_list <- lapply(X = data_list, fill_missing)
}
# Subfunction to prune tips with missing or inapplicable values:
prune_tips <- function(x) {
# Find all missing or inapplicable value tip names:
missing <- names(sort(x = c(which(x = x$tip_states == ""), which(x = is.na(x$tip_states)))))
# Work out how many tips will be left after pruning:
n_tips_remaining <- length(x = setdiff(x = names(x$tip_states), y = missing))
# If there is at least one missing value:
if (length(x = missing) > 0) {
# If less than two tips will remain then set tree as NULL:
if (n_tips_remaining < 2) x$tree <- NULL
# If at least two tips will remain prune missing values from tree:
if (n_tips_remaining > 1) x$tree <- ape::drop.tip(phy = x$tree, tip = missing)
# Collapse tip states:
x$tip_states <- x$tip_states[setdiff(x = names(x$tip_states), y = missing)]
}
# Return pruned output:
x
}
# Prune out missing and inapplicable tips:
data_list <- lapply(X = data_list, prune_tips)
# Subfunction to build tip state matrices:
convert_tip_states_to_matrix <- function(x) {
# As long asthere is at least one tip state:
if (length(x = x$tip_states) > 0) {
# If the character is not continuous (i.e., it is some form of discrete character):
if (x$ordering != "cont") {
# Temporarily store tip states so matrix format can overwrite the stored version below:
tip_states <- x$tip_states
# Create matrix of tip state probabilities:
x$tip_states <- matrix(0, nrow = length(x = x$tip_states), ncol = x$maximum_values - x$minimum_values + 1, dimnames = list(names(x$tip_states), x$minimum_values:x$maximum_values))
# For each character state if a single state is coded store probability as 1:
for (i in colnames(x = x$tip_states)) x$tip_states[tip_states == i, i] <- 1
# If there are polymorphisms and/or uncertainties:
if (length(x = grep("&|/", tip_states)) > 0) {
# Get polymorphism locations:
polymorphisms <- grep("&", tip_states)
# Get uncertainty locations:
uncertainties <- grep("/", tip_states)
# If there are polymorphisms and using the "equalp" (equal probability of each state) option:
if (length(x = polymorphisms) > 0 && polymorphism_behaviour == "equalp") {
# For each polymorphisms set each state as equally probable:
for (i in polymorphisms) x$tip_states[i, strsplit(tip_states[i], split = "&")[[1]]] <- 1 / length(x = strsplit(tip_states[i], split = "&")[[1]])
}
# If there are uncertainties and using the "equalp" (equal probability of each state) option:
if (length(x = uncertainties) > 0 && uncertainty_behaviour == "equalp") {
# For each uncertainty set each state as equally probable:
for (i in uncertainties) x$tip_states[i, strsplit(tip_states[i], split = "/")[[1]]] <- 1 / length(x = strsplit(tip_states[i], split = "/")[[1]])
}
}
# If a continuous character:
} else {
# Simply make tip states the numeric values (should never be a polymorphism) as a vector:
x$tip_states <- as.numeric(x$tip_states)
}
# If tip state has no length (all values are missing):
} else {
# Create row-less tip states matrix:
x$tip_states <- matrix(nrow = 0, ncol = 1, dimnames = list(c(), "0"))
}
# Return the revised input in the same list format:
x
}
# Reformat tip states ready for ancestral estimation:
data_list <- lapply(X = data_list, convert_tip_states_to_matrix)
# Subfunction to build character model:
build_character_model <- function(x) {
# Set default model to equal rates (works for all binary or unordered characters):
x$model <- "ER"
# If a character is both ordered and has at least three states:
if ((x$maximum_values - x$minimum_values) > 1 && x$ordering == "ord") {
# Get number of states:
n_states <- (x$maximum_values - x$minimum_values) + 1
# Build all zero matrix to begin with:
x$model <- matrix(0, nrow = n_states, ncol = n_states, dimnames = list(x$minimum_values:x$maximum_values, x$minimum_values:x$maximum_values))
# for each (just) off-diagonal value store 1 (i.e., N steps to move between adjacent states):
for (i in 2:n_states) x$model[(i - 1), i] <- x$model[i, (i - 1)] <- 1
}
# Return full output:
x
}
# Add ancestral state model for each character:
data_list <- lapply(X = data_list, build_character_model)
# Subfunction to get ancestral states:
estimate_ancestral_state <- function(x, estimate_tip_values, threshold) {
# As long as there is a tree:
if (!is.null(x$tree)) {
# If character is continuous:
if (x$ordering == "cont") {
# Get ancestral states using ace:
x$ancestral_states <- ace(x = x$tip_states, phy = x$tree)$ace
# If character is discrete:
} else {
# If invariant character:
if (ncol(x$tip_states) == 1) {
# Get number of tips:
n_tips <- ape::Ntip(phy = x$tree)
# Set ancestral states as all the same:
x$ancestral_states <- matrix(rep(x = 1, times = (n_tips + x$tree$Nnode)), ncol = 1, dimnames = list(c(x$tree$tip.label, (n_tips + 1):(n_tips + x$tree$Nnode)), colnames(x = x$tip_states)))
}
# If variant character then get ancestral states using rerooting method:
if (ncol(x$tip_states) > 1) x$ancestral_states <- phytools::rerootingMethod(tree = x$tree, x = x$tip_states, model = x$model)$marginal.anc
# Reformat to most likely state
x$ancestral_states <- unlist(x = lapply(X = lapply(X = apply(x$ancestral_states, 1, list), unlist), function(x) {
paste(names(x[x > (max(x) - threshold)]), collapse = "/")
}))
# If not estimating tip values then prune these:
if (!estimate_tip_values) x$ancestral_states <- x$ancestral_states[-match(x$tree$tip.label, names(x$ancestral_states))]
}
# If no tree:
} else {
# Set ancestral states as NULL:
x$ancestral_states <- vector(mode = "character")
}
# Return full output of x:
x
}
# Get ancestral states for each character:
data_list <- lapply(X = data_list, estimate_ancestral_state, estimate_tip_values, threshold)
# Get Newick strings of all sampled subtrees (to use to avoid redundancy in tree node mapping):
newick_strings <- unlist(x = lapply(X = data_list, function(x) ifelse(is.null(x$tree), NA, ape::write.tree(x$tree))))
# Get just unique strings (i.e., the minimum set needded to map everything to the full tree):
unique_newick_strings <- unique(x = newick_strings[!is.na(newick_strings)])
# Convert unique Newick strings to unique trees:
unique_trees <- ape::read.tree(text = unique_newick_strings)
# If only a single tree reformat as a list:
if (inherits(unique_trees, what = "phylo")) unique_trees <- list(unique_trees)
# Subfunction to map nodes from pruned tree to full tree:
map_to_full_tree <- function(pruned_tree, full_tree) {
# Get number of tips of pruned tree:
n_tips <- ape::Ntip(phy = pruned_tree)
# Get number of nodes of pruned tree:
n_nodes <- ape::Nnode(phy = pruned_tree)
# Get all internal node numbers for pruned tree:
node_numbers <- (n_tips + 1):(n_tips + n_nodes)
# If the pruned tree is different to the full tree:
if (write.tree(pruned_tree) != write.tree(full_tree)) {
# Get descendants of each node in pruned tree:
node_descendants <- lapply(X = as.list(x = node_numbers), function(x) pruned_tree$tip.label[strap::FindDescendants(n = x, tree = pruned_tree)])
# Get corresponding ancestral node in full tree:
ancestral_nodes <- unlist(x = lapply(X = node_descendants, function(x) find_mrca(descendant_names = x, tree = full_tree)))
# If pruned tree is identical to full tree (not pruned at all):
} else {
# Set ancestors as node numbers:
ancestral_nodes <- node_numbers
}
# Output matrix matching node numbers of pruned tree to full tree:
matrix(c(node_numbers, ancestral_nodes), ncol = 2, dimnames = list(c(), c("pruned_node", "full_node")))
}
# Get pruned node to full node for each unique tree:
node_maps <- lapply(X = unique_trees, map_to_full_tree, full_tree = time_tree)
# Build out for all trees (adds in any duplicated trees):
node_maps <- node_maps[match(newick_strings, unique_newick_strings)]
# Add node maps to data list:
for (i in 1:length(x = data_list)) data_list[[i]]$node_maps <- node_maps[[i]]
# Get all node names and numbers:
nodes <- c(rownames(x = original_matrix), (n_tips + 1):(n_tips + n_nodes))
# Renumber nodes of ancestral states:
data_list <- lapply(X = data_list, function(x) {
# Renumber nodes:
names(x$ancestral_states)[match(as.character(x$node_maps[, "pruned_node"]), names(x$ancestral_states))] <- as.character(x$node_maps[, "full_node"])
# Return renumbered nodes:
x
})
# Collapse down to an ancestral state matrix ready for output:
ancestral_state_matrix <- do.call(what = cbind, args = lapply(X = data_list, function(x) {
# Get ancestral states for nodes:
x$ancestral_states <- x$ancestral_states[nodes]
# Add node names:
names(x$ancestral_states) <- nodes
# Return output:
x$ancestral_states
}))
# Isolate estimated tip values:
tip_matrix <- ancestral_state_matrix[rownames(x = original_matrix), ]
# If there are any missing values:
if (any(is.na(tip_matrix))) {
# Isolate missing values:
missing_tip_states <- which(x = is.na(tip_matrix))
# Replace missing values with original (unmodified) input values:
tip_matrix[missing_tip_states] <- original_matrix[missing_tip_states]
# Add tip values back into full output:
ancestral_state_matrix[rownames(x = original_matrix), ] <- tip_matrix
}
# Get column (character) count for each matrix block:
matrix_columns <- unlist(x = lapply(X = lapply(X = raw_cladistic_matrix[2:length(x = raw_cladistic_matrix)], "[[", "matrix"), ncol))
# For each matrix block:
for (i in 1:length(x = matrix_columns)) {
# Insert portion of ancestral state estimate into block:
raw_cladistic_matrix[[(i + 1)]]$matrix <- ancestral_state_matrix[, 1:matrix_columns[i], drop = FALSE]
# Remove that portion from the block:
ancestral_state_matrix <- ancestral_state_matrix[, -(1:matrix_columns[i]), drop = FALSE]
}
# Overwrite ancestral state output with updated raw input:
ancestral_state_matrix <- raw_cladistic_matrix
# Add tree to output:
ancestral_state_matrix$topper$tree <- time_tree
# Return ancestral state matrix:
ancestral_state_matrix
}
|
## Week 1, Course Project 1
# set working dir
setwd("~/R Projects/Coursera/04_ExpDataAnalysis/Week 1")
# load data
df <- read.table("household_power_consumption.txt", sep=";", header=TRUE, na.strings = "?")
df$DateTime <- strptime(paste(df$Date, df$Time), "%d/%m/%Y %T", tz="GMT")
# subset the data to 2007-02-01 to 2007-02-02
set <- subset(df, as.Date(DateTime)>="2007-02-01" & as.Date(DateTime)<="2007-02-02")
## Plot #3
# plot of sub metering over time
png("plot3.png",
width = 480,
height = 480,
units = "px")
plot(x = set$DateTime,
y = set$Sub_metering_1,
type="l",
xlab="",
ylab="Energy sub metering")
points(x = set$DateTime,
y = set$Sub_metering_2,
type = "l",
col = "red")
points(x = set$DateTime,
y = set$Sub_metering_3,
type = "l",
col = "blue")
legend("topright",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = "solid",
col=c("black","red","blue"))
dev.off()
|
/plot3.R
|
no_license
|
dzastera/ExData_Plotting1
|
R
| false
| false
| 1,059
|
r
|
## Week 1, Course Project 1
# set working dir
setwd("~/R Projects/Coursera/04_ExpDataAnalysis/Week 1")
# load data
df <- read.table("household_power_consumption.txt", sep=";", header=TRUE, na.strings = "?")
df$DateTime <- strptime(paste(df$Date, df$Time), "%d/%m/%Y %T", tz="GMT")
# subset the data to 2007-02-01 to 2007-02-02
set <- subset(df, as.Date(DateTime)>="2007-02-01" & as.Date(DateTime)<="2007-02-02")
## Plot #3
# plot of sub metering over time
png("plot3.png",
width = 480,
height = 480,
units = "px")
plot(x = set$DateTime,
y = set$Sub_metering_1,
type="l",
xlab="",
ylab="Energy sub metering")
points(x = set$DateTime,
y = set$Sub_metering_2,
type = "l",
col = "red")
points(x = set$DateTime,
y = set$Sub_metering_3,
type = "l",
col = "blue")
legend("topright",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = "solid",
col=c("black","red","blue"))
dev.off()
|
plot.PCA <- function (x, axes = c(1, 2), choix = c("ind","var","varcor"),
ellipse = NULL, xlim = NULL, ylim = NULL, habillage = "none",
col.hab = NULL, col.ind = "black", col.ind.sup = "blue",
col.quali = "magenta", col.quanti.sup = "blue",
col.var = "black", label=c("all","none","ind", "ind.sup", "quali", "var", "quanti.sup"),
invisible = c("none","ind", "ind.sup", "quali","var", "quanti.sup"), lim.cos2.var = 0.,
title = NULL, palette=NULL, autoLab=c("auto","yes","no"),new.plot=FALSE,
select=NULL, unselect = 0.7,shadowtext = FALSE, legend = list(bty = "y", x = "topleft"),
graph.type = c("ggplot","classic"), ggoptions = NULL, ...){
res.pca <- x
argument <- list(...)
if (!is.null(argument[["cex"]]) & is.null(ggoptions["size"])) ggoptions["size"] <- 4*argument$cex
ggoptions_default <- list(size = 4, point.shape = 19, line.lty = 2, line.lwd = 0.5, line.color = "black", segment.lty = 1, segment.lwd = 0.5, circle.lty = 1, circle.lwd = 0.5, circle.color = "black", low.col.quanti = "blue", high.col.quanti = "red3")
if (!is.null(ggoptions[1])) ggoptions_default[names(ggoptions)] = ggoptions[names(ggoptions)]
if (!inherits(res.pca, "PCA")) stop("non convenient data")
if (is.numeric(unselect)) if ((unselect>1)|(unselect<0)) stop("unselect should be betwwen 0 and 1")
autoLab <- match.arg(autoLab,c("auto","yes","no"))
if (autoLab == "yes") autoLab=TRUE
if (autoLab == "no") autoLab=FALSE
old.palette <- palette()
if (is.null(palette)) palette <- c("black", "red", "green3", "blue", "magenta", "darkgoldenrod","darkgray", "orange", "cyan", "violet", "lightpink", "lavender", "yellow", "darkgreen","turquoise", "lightgrey", "lightblue", "darkkhaki","darkmagenta","lightgreen", "darkolivegreen", "lightcyan", "darkorange","darkorchid", "darkred", "darksalmon", "darkseagreen","darkslateblue", "darkslategray", "darkslategrey","darkturquoise", "darkviolet", "lightgray", "lightsalmon","lightyellow", "maroon")
palette(palette) # that is necessary
label <- match.arg(label,c("all","none","ind", "ind.sup", "quali", "var", "quanti.sup"),several.ok=TRUE)
invisible <- match.arg(invisible,c("none","ind", "ind.sup", "quali","var", "quanti.sup"),several.ok=TRUE)
if ("none"%in%invisible) invisible = NULL
choix <- match.arg(choix,c("ind","var","varcor"))
graph.type <- match.arg(graph.type[1],c("ggplot","classic"))
lab.ind <- lab.quali <- lab.var <- lab.quanti <- lab.ind.sup <- FALSE
if (length(label)==1 && label=="all") lab.ind <- lab.quali <- lab.var <- lab.quanti <- lab.ind.sup <-TRUE
if ("ind" %in% label) lab.ind<-TRUE
if ("quali" %in% label) lab.quali<-TRUE
if ("var" %in% label) lab.var<-TRUE
if ("quanti.sup" %in% label) lab.quanti<-TRUE
if ("ind.sup" %in% label) lab.ind.sup<-TRUE
lab.x <- paste("Dim ",axes[1]," (",format(res.pca$eig[axes[1],2],nsmall=2,digits=2),"%)",sep="")
lab.y <- paste("Dim ",axes[2]," (",format(res.pca$eig[axes[2],2],nsmall=2,digits=2),"%)",sep="")
if (graph.type == "ggplot"){
if(!is.null(col.hab)) palette <- col.hab
theme <- theme(
axis.title = element_text(hjust = 1, size = if (is.null(argument[["cex.axis"]])) {10} else {10*argument$cex.axis},face = 2),
plot.title = element_text(hjust = 0.5, size = if (is.null(argument[["cex.main"]])) {11} else {11*argument$cex.main},face = 2),
legend.position = ifelse(legend$x %in% c("bottom","up","right","left"), legend$x, "right"),
legend.box.spacing=unit(0.1, 'cm'),legend.margin=margin()
)
liste.quali <- colnames(res.pca$call$quali.sup$quali.sup)
liste.quanti <- colnames(res.pca$call$X)[which(!(colnames(res.pca$call$X) %in% liste.quali))]
hab_2 <- c(colnames(res.pca$call$X), "contrib", "cos2")
if((habillage != "none") && !(habillage[1] %in% hab_2) && (habillage != "ind")) habillage[1] = colnames(res.pca$call$X)[as.numeric(habillage[1])]
if(habillage[1] != "none" && length(habillage) == 2){
if(!habillage[2] %in% hab_2) habillage[2] = colnames(res.pca$call$X)[as.numeric(habillage[2])]
if (length(habillage) > 2) {
warning("Habillage must be either length 1 or 2 : only 2 first arguments will be used")
habillage = habillage[1:2]
}
if ((length(habillage) == 2) & !("cos2" %in% habillage) & !("contrib" %in% habillage)){
if(!(habillage[2] %in% liste.quali)){
if (!(habillage[1] %in% liste.quali)){
habillage = habillage[1]
}
else{
habillage = habillage[2:1]
}
}}
if(length(habillage) == 1 && !(habillage %in% hab_2)) habillage = as.numeric(habillage)
if((length(habillage) == 2) & (habillage[2] %in% c("contrib","cos2"))) habillage = habillage[2:1]
if((length(habillage) == 2) & (habillage[1] %in% c("contrib","cos2")) & !(habillage[2] %in% hab_2)) habillage[2] = colnames(res.pca$call$X)[as.integer(habillage[2])]
if(class(habillage[1]) %in% c("numeric","integer") && class(habillage[2]) %in% c("numeric","integer")) habillage = c(colnames(res.pca$call$X)[habillage[1]],colnames(res.pca$call$X)[habillage[2]])
if(("cos2" %in% habillage) || ("contrib" %in% habillage)){
if((habillage[2] %in% liste.quanti) || (habillage[1] %in% liste.quanti)) habillage = habillage[1]
}
if(("cos2" %in% habillage) && ("contrib" %in% habillage)) habillage = habillage[1]
}
}
if (choix == "ind") {
if (is.null(title)) titre <- "PCA graph of individuals"
else titre <- title
coord.actif <- res.pca$ind$coord[, axes,drop=FALSE]
coord.illu <- coord.quali <- coord.ellipse <- NULL
if (!is.null(res.pca$ind.sup)) coord.illu <- res.pca$ind.sup$coord[, axes,drop=FALSE]
if (!is.null(res.pca$quali.sup)) coord.quali <- res.pca$quali.sup$coord[, axes,drop=FALSE]
if (!is.null(ellipse)) coord.ellipse <- ellipse$res
test.invisible <- vector(length = 2)
if (!is.null(invisible)) {
test.invisible[1] <- match("ind", invisible)
test.invisible[2] <- match("ind.sup", invisible)
test.invisible[3] <- match("quali", invisible)
}
else test.invisible <- rep(NA, 3)
nullxlimylim <- (is.null(xlim) & is.null(ylim))
if (is.null(xlim)) {
xmin <- xmax <- 0
if(is.na(test.invisible[1])) xmin <- min(xmin, coord.actif[,1])
if(is.na(test.invisible[1])) xmax <- max(xmax, coord.actif[,1])
if(!is.null(coord.illu)&is.na(test.invisible[2])) xmin <- min(xmin, coord.illu[, 1])
if(!is.null(coord.illu)&is.na(test.invisible[2])) xmax <- max(xmax, coord.illu[, 1])
if(!is.null(coord.quali)&is.na(test.invisible[3])) xmin <- min(xmin, coord.quali[, 1])
if(!is.null(coord.quali)&is.na(test.invisible[3])) xmax <- max(xmax, coord.quali[, 1])
if(!is.null(coord.ellipse)&is.na(test.invisible[3])) xmin <- min(xmin, coord.ellipse[, 2])
if(!is.null(coord.ellipse)&is.na(test.invisible[3])) xmax <- max(xmax, coord.ellipse[, 2])
# xlim <- c(xmin, xmax) * 1.2
xlim <- c(xmin, xmax)
xlim <- (xlim-mean(xlim))*1.2 + mean(xlim)
}
if (is.null(ylim)) {
ymin <- ymax <- 0
if(is.na(test.invisible[1])) ymin <- min(ymin, coord.actif[,2])
if(is.na(test.invisible[1])) ymax <- max(ymax, coord.actif[,2])
if(!is.null(coord.illu)&is.na(test.invisible[2])) ymin <- min(ymin, coord.illu[, 2])
if(!is.null(coord.illu)&is.na(test.invisible[2])) ymax <- max(ymax, coord.illu[, 2])
if(!is.null(coord.quali)&is.na(test.invisible[3])) ymin <- min(ymin, coord.quali[, 2])
if(!is.null(coord.quali)&is.na(test.invisible[3])) ymax <- max(ymax, coord.quali[, 2])
if(!is.null(coord.ellipse)&is.na(test.invisible[3])) ymin <- min(ymin, coord.ellipse[, 3])
if(!is.null(coord.ellipse)&is.na(test.invisible[3])) ymax <- max(ymax, coord.ellipse[, 3])
ylim <- c(ymin, ymax)
ylim <- (ylim-mean(ylim))*1.2 + mean(ylim)
}
if (nullxlimylim & diff(xlim)/diff(ylim)>3) ylim <- (ylim-mean(ylim))*diff(xlim)/diff(ylim)/3 + mean(ylim)
if (nullxlimylim & diff(xlim)/diff(ylim)<1/2) xlim <- (xlim-mean(xlim))*diff(ylim)/diff(xlim)/2 + mean(xlim)
if(graph.type=="ggplot") nudge_y <- (ylim[2] - ylim[1])*0.03
selection <- NULL
if (!is.null(select)) {
if (mode(select)=="numeric") selection <- select
else {
if (sum(rownames(res.pca$ind$coord)%in%select)+sum(rownames(res.pca$ind.sup$coord)%in%select)!=0) selection <- which(rownames(res.pca$ind$coord)%in%select)
else {
if (grepl("contrib",select[1])) selection <- (rev(order(res.pca$ind$contrib[,axes[1],drop=FALSE]*res.pca$eig[axes[1],1]+res.pca$ind$contrib[,axes[2],drop=FALSE]*res.pca$eig[axes[2],1])))[1:min(nrow(res.pca$ind$coord),sum(as.integer(unlist(strsplit(select,"contrib"))),na.rm=T))]
# if (grepl("contrib",select)) selection <- (rev(order(apply(res.pca$ind$contrib[,axes],1,sum))))[1:min(nrow(res.pca$ind$coord),sum(as.integer(unlist(strsplit(select,"contrib"))),na.rm=T))]
if (grepl("dist",select[1])) selection <- (rev(order(res.pca$ind$dist)))[1:min(nrow(res.pca$ind$coord),sum(as.integer(unlist(strsplit(select,"dist"))),na.rm=T))]
if (grepl("coord",select[1])) selection <- (rev(order(apply(res.pca$ind$coord[,axes]^2,1,sum))))[1:min(nrow(res.pca$ind$coord),sum(as.integer(unlist(strsplit(select,"coord"))),na.rm=T))]
if (grepl("cos2",select[1])) {
if (sum(as.numeric(unlist(strsplit(select,"cos2"))),na.rm=T)>=1) selection <- (rev(order(apply(res.pca$ind$cos2[,axes],1,sum))))[1:min(nrow(res.pca$ind$coord),sum(as.numeric(unlist(strsplit(select,"cos2"))),na.rm=T))]
else selection <- which(apply(res.pca$ind$cos2[,axes],1,sum)>sum(as.numeric(unlist(strsplit(select,"cos2"))),na.rm=T))
}
if (is.integer(select)) selection <- select
}
}
}
selectionS <- NULL
if ((!is.null(select))&(!is.null(res.pca$ind.sup$coord))&is.na(test.invisible[2])) {
if (mode(select)=="numeric") selectionS <- select
else {
if (sum(rownames(res.pca$ind$coord)%in%select)+sum(rownames(res.pca$ind.sup$coord)%in%select)!=0) selectionS <- which(rownames(res.pca$ind.sup$coord)%in%select)
else {
if (grepl("dist",select[1])) selectionS <- (rev(order(res.pca$ind.sup$dist)))[1:min(nrow(res.pca$ind.sup$coord),sum(as.integer(unlist(strsplit(select,"dist"))),na.rm=T))]
if (grepl("coord",select[1])) selectionS <- (rev(order(apply(res.pca$ind.sup$coord[,axes]^2,1,sum))))[1:min(nrow(res.pca$ind.sup$coord),sum(as.integer(unlist(strsplit(select,"coord"))),na.rm=T))]
if (grepl("cos2",select[1])) {
if (sum(as.numeric(unlist(strsplit(select,"cos2"))),na.rm=T)>=1) selectionS <- (rev(order(apply(res.pca$ind.sup$cos2[,axes,drop=FALSE],1,sum))))[1:min(nrow(res.pca$ind.sup$coord),sum(as.numeric(unlist(strsplit(select,"cos2"))),na.rm=T))]
else selectionS <- which(apply(res.pca$ind.sup$cos2[,axes,drop=FALSE],1,sum)>sum(as.numeric(unlist(strsplit(select,"cos2"))),na.rm=T))
}
if (is.integer(select)) selectionS <- select
}
}
}
## PARTIE GRAPHIQUE
if (graph.type =="ggplot") color.ind <- NULL
if ((new.plot)&!nzchar(Sys.getenv("RSTUDIO_USER_IDENTITY"))) dev.new(width=min(14,8*diff(xlim)/diff(ylim)),height=8)
if (is.null(palette)) palette = (c("black","red","green3","blue","cyan","magenta","darkgray","darkgoldenrod","darkgreen","violet","turquoise","orange","lightpink","lavender","yellow","lightgreen","lightgrey","lightblue","darkkhaki", "darkmagenta","darkolivegreen","lightcyan", "darkorange", "darkorchid","darkred","darksalmon","darkseagreen","darkslateblue","darkslategray","darkslategrey","darkturquoise","darkviolet", "lightgray","lightsalmon","lightyellow", "maroon"))
if (habillage[1] == "none") {
color.ind <- rep(col.ind,nrow(coord.actif))
color.mod <- col.quali
if (!is.null(res.pca$ind.sup)&is.na(test.invisible[2])) color.ind.sup <- rep(col.ind.sup,nrow(res.pca$ind.sup$coord))
}
if (habillage[1] == "ind") {
if (is.null(coord.illu)){
if(length(col.hab) == nrow(coord.actif)) color.ind <- col.hab
else color.ind <- c(1:nrow(coord.actif))
} else{
if (length(col.hab)== nrow(coord.actif)+nrow(coord.illu)){
color.ind <- col.hab[-res.pca$call$ind.sup]
color.ind.sup <- col.hab[res.pca$call$ind.sup]
} else {
color.ind <- c(1:nrow(coord.actif))
color.ind.sup <- c((nrow(coord.actif)+1):(nrow(coord.actif)+nrow(coord.illu)))
}
}
color.mod <- "darkred"
}
liste.quali <- NULL
if ((habillage[1] != "none")&(habillage[1] != "ind")&(habillage[1] != "cos2")&(habillage[1] != "contrib")){
liste.quali <- colnames(res.pca$call$quali.sup$quali.sup)
if(!(class(res.pca$call$X[,habillage[1]])[1] %in% c("numeric","double","integer"))) {
if (is.numeric(habillage)) nom.quali <- colnames(res.pca$call$X)[habillage[1]]
else nom.quali <- habillage[1]
if (!(nom.quali %in% liste.quali)) stop("The variable ", habillage[1], " is not qualitative")
n.mod <- res.pca$call$quali.sup$modalite[liste.quali == nom.quali]
if (length(col.hab) != n.mod) {
color.mod <- c(1:n.mod)
color.ind <- as.numeric(as.factor(res.pca$call$X[, nom.quali]))
color.ind.sup <- color.ind[res.pca$call$ind.sup]
if (!is.null(res.pca$call$ind.sup)) color.ind <- color.ind[-res.pca$call$ind.sup]
}
else {
color.mod <- col.hab
color.ind <- as.factor(res.pca$call$X[, nom.quali])
levels(color.ind) <- col.hab
color.ind.sup <- color.ind[res.pca$call$ind.sup]
if (!is.null(res.pca$call$ind.sup)) color.ind <- color.ind[-res.pca$call$ind.sup]
color.ind <- as.character(color.ind)
}
}
if(class(res.pca$call$X[,habillage[1]])[1] %in% c("numeric","double","integer")){
if (graph.type == "classic") stop("The variable ", habillage[1], "is not qualitative")
liste.quanti <- colnames(res.pca$call$X[which(!(colnames(res.pca$call$X) %in% colnames(res.pca$call$quali.sup$quali.sup)))])
if (is.numeric(habillage[1])) nom.quanti <- colnames(res.pca$call$X)[habillage[1]]
else nom.quanti <- habillage[1]
if (!(nom.quanti %in% liste.quanti)) stop("The variable ", habillage[1], " is not quantitative")
}}
color.sup <- col.ind.sup
if (graph.type == "classic" & ((habillage == "cos2") || (habillage == "contrib"))) stop("The variable is not qualitative")
# graphe individuals factor map
if (graph.type == "classic") {
plot(0, 0, main = titre, xlab = lab.x, ylab = lab.y, xlim = xlim, ylim = ylim, col = "white", asp=1, ...)
abline(v=0,lty=2, ...)
abline(h=0,lty=2, ...)
#
}
coo <- labe <- coll <- ipch <- fonte <- NULL
df_ind2 <- df_ind_sup <- df_quali.sup <- NULL
if (is.na(test.invisible[1])) {
coo <- rbind(coo,coord.actif)
if (lab.ind){ labe <- c(labe,rownames(coord.actif))
} else labe <- c(labe,rep("",nrow(coord.actif)))
coll <- c(coll,color.ind)
ipch <- c(ipch,rep(20,nrow(coord.actif)))
fonte <- c(fonte,rep(1,nrow(coord.actif)))
if (!is.null(selection)){
if (is.numeric(unselect)) coll[!((1:length(coll))%in%selection)] = rgb(t(col2rgb(coll[!((1:length(coll))%in%selection)])),alpha=255*(1-unselect),maxColorValue=255)
else coll[!((1:length(coll))%in%selection)] = unselect
labe[!((1:length(coll))%in%selection)] <- ""
}
df_ind2 <- data.frame(labe,coord.actif,ipch,fonte)
}
if(graph.type == "ggplot") coll2 <- NULL
if (!is.null(res.pca$ind.sup) & is.na(test.invisible[2])) {
coo <- rbind(coo,res.pca$ind.sup$coord[,axes])
if (lab.ind.sup){ labe2 <- rownames(res.pca$ind.sup$coord)
} else labe2 <- rep("",nrow(res.pca$ind.sup$coord))
if (length(color.sup)>1) coll2 <- color.sup
else coll2 <- rep(color.sup,nrow(res.pca$ind.sup$coord))
if (!is.null(selectionS)){
if (is.numeric(unselect)) coll2[!((1:length(coll2))%in%selectionS)] = rgb(t(col2rgb(coll2[!((1:length(coll2))%in%selectionS)])),alpha=255*(1-unselect),maxColorValue=255)
else coll2[!((1:length(coll2))%in%selectionS)] <- unselect
labe2[!((1:length(coll2))%in%selectionS)] <- ""
}
if (length(select)==1){
if (grepl("contrib",select)){
if (is.numeric(unselect)) coll2[1:length(coll2)] = rgb(t(col2rgb(coll2[1:length(coll2)])),alpha=255*(1-unselect),maxColorValue=255)
else coll2[1:length(coll2)] = unselect
labe2[1:length(coll2)] <- ""
}}
df_ind_sup <- data.frame(labe2, res.pca$ind.sup$coord[,axes], coll2, rep(1,nrow(res.pca$ind.sup$coord)), rep(3,nrow(res.pca$ind.sup$coord)))
coll <- c(coll,coll2)
labe <- c(labe,labe2)
ipch <- c(ipch,rep(1,nrow(res.pca$ind.sup$coord)))
fonte <- c(fonte,rep(3,nrow(res.pca$ind.sup$coord)))
}
if (!is.null(coord.quali) & is.na(test.invisible[3])) {
modalite <- res.pca$call$quali.sup$modalite
if (graph.type == "ggplot") col.quali <- col.quali
else{col.quali<-rep(col.quali, length(modalite))}
num.li <- 0
coo <- rbind(coo,coord.quali)
ipch <- c(ipch,rep(22,sum(modalite)))
if (lab.quali){ labe2 <- rownames(coord.quali)
} else labe2 <- rep("",sum(modalite))
labe <- c(labe,labe2)
fonte <- c(fonte,rep(3,sum(modalite)))
for (q in 1:length(modalite)) {
if ((habillage[1] != "none")&(habillage[1] != "ind")&(habillage[1] != "cos2")&(habillage[1] != "contrib")) {
if(!(class(res.pca$call$X[,habillage[1]])[1] %in% c("numeric","double","integer"))){
if (q == match(nom.quali, liste.quali)) coll2 <- color.mod
else coll2 <- rep(col.quali[1],modalite[q])
}} else coll2 <- rep(col.quali,modalite[q])
num.li <- num.li + modalite[q]
}
coll <- c(coll,coll2)
df_quali.sup <- data.frame(labe2, coord.quali, rep(22,nrow(coord.quali)), rep(3,nrow(coord.quali)))
}
# graphe individuals factor map, ajout des points
if (graph.type == "classic") {
points(coo[, 1], y = coo[, 2], pch = ipch, col = coll, ...)
}
if (graph.type == "ggplot") {
if (autoLab=="auto") autoLab = (length(which(labe!=""))<50)
df_ind <- data.frame(labe,coo,ipch,fonte)
gg_graph <- ggplot() +
coord_fixed(ratio = 1) +
xlim(xlim) + ylim(ylim) +
geom_hline(yintercept = 0,lty=ggoptions_default$line.lty, lwd = ggoptions_default$line.lwd, color=ggoptions_default$line.color) +
geom_vline(xintercept = 0,lty=ggoptions_default$line.lty, lwd = ggoptions_default$line.lwd, color=ggoptions_default$line.color) +
theme_light() +
labs(title = titre, x = lab.x, y= lab.y)
if(!is.null(select)) df_ind2[,1] <- ifelse(rownames(df_ind2) %in% rownames(df_ind2)[selection], rownames(df_ind2), "")
transparency_ind <- col2rgb(col.ind,alpha=TRUE)[4]/255
if (!is.null(select)) transparency_ind <- ifelse(rownames(res.pca$ind$coord) %in% rownames(res.pca$ind$coord)[selection], transparency_ind, transparency_ind*(1-unselect))
if((!is.na(test.invisible[1])) & (habillage[1] != "none") & (is.null(legend["title"][[1]]))) legend["title"][[1]] = habillage[1]
if (is.na(test.invisible[1])){
if (habillage[1] == "none" | habillage[1]=="ind"){
gg_graph <- gg_graph +
geom_point(aes(x=df_ind2[,2], y=df_ind2[,3]), color=color.ind, shape = ggoptions_default$point.shape, size = ggoptions_default$size/3, alpha = transparency_ind)
if(autoLab) text <- ggrepel::geom_text_repel(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1]), size = ggoptions_default$size, color = color.ind)
else{text <- geom_text(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1]), size = ggoptions_default$size, color = color.ind, hjust = (-sign(df_ind2[,2])+1)/2, vjust = -sign(df_ind2[,3])*0.75+0.25)}
}
if (length(habillage) == 1 & (habillage[1] != "none" | habillage[1]!="ind")){
if ((habillage %in% colnames(res.pca$call$X)) & !(habillage %in% liste.quali)){
df_ind2 <- data.frame(df_ind2, (res.pca$call$X)[rownames(df_ind2),habillage])
gg_graph <- gg_graph +
geom_point(aes(x=df_ind2[,2], y=df_ind2[,3], color = df_ind2[,6]), shape = ggoptions_default$point.shape, size = ggoptions_default$size/3, alpha = transparency_ind) +
scale_color_gradient(low=ggoptions_default$low.col.quanti, high=ggoptions_default$high.col.quanti) +
labs(color = ifelse(legend["title"] %in% legend, legend["title"][[1]], habillage))
if(autoLab) text <- ggrepel::geom_text_repel(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = df_ind2[,6]), size = ggoptions_default$size, show.legend = FALSE)
else{text <- geom_text(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = df_ind2[,6]), size = ggoptions_default$size, show.legend = FALSE, hjust = (-sign(df_ind2[,2])+1)/2, vjust = -sign(df_ind2[,3])*0.75+0.25)}
}
if (habillage %in% liste.quali){
df_ind2 <- data.frame(df_ind2, (res.pca$call$X)[rownames(df_ind2),habillage])
gg_graph <- gg_graph +
geom_point(aes(x=df_ind2[,2], y=df_ind2[,3], color = (res.pca$call$X)[rownames(df_ind2),habillage]), shape = ggoptions_default$point.shape, size = ggoptions_default$size/3, alpha = transparency_ind) +
scale_color_manual(values = palette[1:length(levels((res.pca$call$X)[,habillage]))]) +
labs(color = ifelse(legend["title"] %in% legend, legend["title"][[1]], habillage))
if (autoLab) text <- ggrepel::geom_text_repel(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = (res.pca$call$X)[rownames(res.pca$ind$coord),habillage[1]]), size = ggoptions_default$size, show.legend = FALSE)
else{text <- geom_text(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = (res.pca$call$X)[rownames(res.pca$ind$coord),habillage[1]]), size = ggoptions_default$size, show.legend = FALSE, hjust = (-sign(df_ind2[,2])+1)/2, vjust = -sign(df_ind2[,3])*0.75+0.25)}
}
if (habillage == "cos2"){
gg_graph <- gg_graph +
geom_point(aes(x=df_ind2[,2], y=df_ind2[,3], color = res.pca$ind$cos2[,axes[1]] + res.pca$ind$cos2[,axes[2]]), shape = ggoptions_default$point.shape, size = ggoptions_default$size/3, alpha = transparency_ind) +
scale_color_gradient(low=ggoptions_default$low.col.quanti, high=ggoptions_default$high.col.quanti) +
labs(color = ifelse(legend["title"] %in% legend, legend["title"][[1]], "cos2"))
if (autoLab) text <- ggrepel::geom_text_repel(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = res.pca$ind$cos2[,axes[1]] + res.pca$ind$cos2[,axes[2]]), size = ggoptions_default$size)
else{text <- geom_text(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = res.pca$ind$cos2[,axes[1]] + res.pca$ind$cos2[,axes[2]]), size = ggoptions_default$size, hjust = (-sign(df_ind2[,2])+1)/2, vjust = -sign(df_ind2[,3])*0.75+0.25)}
}
if (habillage == "contrib"){
gg_graph <- gg_graph +
geom_point(aes(x=df_ind2[,2], y=df_ind2[,3], color = (res.pca$ind$contrib[,axes[1]]*res.pca$eig[axes[1],1]+res.pca$ind$contrib[,axes[2]]*res.pca$eig[axes[2],1])/(res.pca$eig[axes[1],1]+res.pca$eig[axes[2],1])), shape = ggoptions_default$point.shape, size = ggoptions_default$size/3, alpha = transparency_ind) +
scale_color_gradient(low=ggoptions_default$low.col.quanti, high=ggoptions_default$high.col.quanti) +
labs(color = ifelse(legend["title"] %in% legend, legend["title"][[1]], "Ctr"))
if (autoLab) text <- ggrepel::geom_text_repel(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = (res.pca$ind$contrib[,axes[1]]*res.pca$eig[axes[1],1]+res.pca$ind$contrib[,axes[2]]*res.pca$eig[axes[2],1])/(res.pca$eig[axes[1],1]+res.pca$eig[axes[2],1])), size = ggoptions_default$size)
else{text <- geom_text(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = (res.pca$ind$contrib[,axes[1]]*res.pca$eig[axes[1],1]+res.pca$ind$contrib[,axes[2]]*res.pca$eig[axes[2],1])/(res.pca$eig[axes[1],1]+res.pca$eig[axes[2],1])), size = ggoptions_default$size, hjust = (-sign(df_ind2[,2])+1)/2, vjust = -sign(df_ind2[,3])*0.75+0.25)
}
}}
if(length(habillage) == 2 & habillage[1] != "none"){
if (!(habillage[1] %in% liste.quali)){
gg_graph <- gg_graph +
geom_point(aes(x=df_ind2[,2], y=df_ind2[,3], shape = res.pca$call$X[rownames(res.pca$ind$coord),habillage[2]], color = (res.pca$call$X)[rownames(res.pca$ind$coord),habillage[1]]), size = ggoptions_default$size/3, alpha = transparency_ind) +
scale_color_gradient(low=ggoptions_default$low.col.quanti, high=ggoptions_default$high.col.quanti) +
labs(color = habillage[1], shape = habillage[2])
if (autoLab) text <- ggrepel::geom_text_repel(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = (res.pca$call$X)[rownames(res.pca$ind$coord),habillage[1]]), size = ggoptions_default$size, show.legend = FALSE)
else{text <- geom_text(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = (res.pca$call$X)[rownames(res.pca$ind$coord),habillage[1]]), size = ggoptions_default$size, show.legend = FALSE, hjust = (-sign(df_ind2[,2])+1)/2, vjust = -sign(df_ind2[,3])*0.75+0.25)}
}
if (habillage[1] %in% liste.quali){
gg_graph <- gg_graph +
geom_point(aes(x=df_ind2[,2], y=df_ind2[,3], shape = res.pca$call$X[rownames(res.pca$ind$coord),habillage[2]], color = (res.pca$call$X)[rownames(res.pca$ind$coord),habillage[1]]), size = ggoptions_default$size/3, alpha = transparency_ind) +
scale_color_manual(values = palette[1:length(levels((res.pca$call$X)[,habillage[1]]))]) +
labs(color = habillage[1], shape = habillage[2])
if(autoLab) text <- ggrepel::geom_text_repel(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = (res.pca$call$X)[rownames(res.pca$ind$coord),habillage[1]]), size = ggoptions_default$size, show.legend = FALSE)
else{text <- geom_text(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = (res.pca$call$X)[rownames(res.pca$ind$coord),habillage[1]]), size = ggoptions_default$size, show.legend = FALSE, hjust = (-sign(df_ind2[,2])+1)/2, vjust = -sign(df_ind2[,3])*0.75+0.25)}
}
if (habillage[1] == "cos2"){
gg_graph <- gg_graph +
geom_point(aes(x=df_ind2[,2], y=df_ind2[,3], color = res.pca$ind$cos2[,axes[1]] + res.pca$ind$cos2[,axes[2]], shape = res.pca$call$X[rownames(res.pca$ind$coord),habillage[2]]), size = ggoptions_default$size/3, alpha = transparency_ind) +
scale_color_gradient(low=ggoptions_default$low.col.quanti, high=ggoptions_default$high.col.quanti) +
labs(color = habillage[1], shape = habillage[2])
if (autoLab) text <- ggrepel::geom_text_repel(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = res.pca$ind$cos2[,axes[1]] + res.pca$ind$cos2[,axes[2]]), size = ggoptions_default$size)
else{text <- geom_text(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = res.pca$ind$cos2[,axes[1]] + res.pca$ind$cos2[,axes[2]]), size = ggoptions_default$size, hjust = (-sign(df_ind2[,2])+1)/2, vjust = -sign(df_ind2[,3])*0.75+0.25)}
}
if (habillage[1] == "contrib"){
gg_graph <- gg_graph +
geom_point(aes(x=df_ind2[,2], y=df_ind2[,3], color = (res.pca$ind$contrib[,axes[1]]*res.pca$eig[axes[1],1]+res.pca$ind$contrib[,axes[2]]*res.pca$eig[axes[2],1])/(res.pca$eig[axes[1],1]+res.pca$eig[axes[2],1]), shape = res.pca$call$X[rownames(res.pca$ind$coord),habillage[2]]), size = ggoptions_default$size/3, alpha = transparency_ind) +
scale_color_gradient(low=ggoptions_default$low.col.quanti, high=ggoptions_default$high.col.quanti) +
labs(color = habillage[1], shape = habillage[2])
if (autoLab) text <- ggrepel::geom_text_repel(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = (res.pca$ind$contrib[,axes[1]]*res.pca$eig[axes[1],1]+res.pca$ind$contrib[,axes[2]]*res.pca$eig[axes[2],1])/(res.pca$eig[axes[1],1]+res.pca$eig[axes[2],1])), size = ggoptions_default$size)
else{text <- geom_text(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = (res.pca$ind$contrib[,axes[1]]*res.pca$eig[axes[1],1]+res.pca$ind$contrib[,axes[2]]*res.pca$eig[axes[2],1])/(res.pca$eig[axes[1],1]+res.pca$eig[axes[2],1])), size = ggoptions_default$size, hjust = (-sign(df_ind2[,2])+1)/2, vjust = -sign(df_ind2[,3])*0.75+0.25)
}
}
}
}
if (!is.na(test.invisible[1])){
gg_graph <- gg_graph +
labs(color = ifelse(legend["title"] %in% legend, legend["title"][[1]], habillage[1]))
}
gg_graph <- gg_graph + theme
if (is.na(test.invisible[1]) & isTRUE(lab.ind)) gg_graph <- gg_graph + text
if ((!is.null(res.pca$ind.sup)) && (is.na(test.invisible[2]))){
if(!is.null(select)) df_ind_sup[,1] <- ifelse(rownames(df_ind_sup) %in% rownames(df_ind_sup)[selectionS], rownames(df_ind_sup), "")
if(nrow(res.pca$ind.sup$coord) > 1){
if (isTRUE(lab.ind.sup)){
if (habillage[1] == "none"){ gg_graph <- gg_graph + geom_point(aes(x = df_ind_sup[,2], y = df_ind_sup[,3]), size = ggoptions_default$size/3, color = color.ind.sup, shape = 1)
if (autoLab) text_ind.sup <- ggrepel::geom_text_repel(aes(x = df_ind_sup[,2], y = df_ind_sup[,3], label=df_ind_sup[,1]), color = color.ind.sup, size = ggoptions_default$size, fontface = "italic")
else{text_ind.sup <- geom_text(aes(x = df_ind_sup[,2], y = df_ind_sup[,3], label=df_ind_sup[,1]), color = color.ind.sup, size = ggoptions_default$size, fontface = "italic",hjust = (-sign(df_ind_sup[,2])+1)/2, vjust = -sign(df_ind_sup[,3])*0.75+0.25)}
}
else{ if (habillage[1] == "cos2"){ gg_graph <- gg_graph + geom_point(aes(x = df_ind_sup[,2], y = df_ind_sup[,3], color = res.pca$ind.sup$cos2[,axes[1]] + res.pca$ind.sup$cos2[,axes[2]]), size = ggoptions_default$size/3, shape = 1)
if (autoLab) text_ind.sup <- ggrepel::geom_text_repel(aes(x = df_ind_sup[,2], y = df_ind_sup[,3], label=df_ind_sup[,1], color = res.pca$ind.sup$cos2[,axes[1]] + res.pca$ind.sup$cos2[,axes[2]]), size = ggoptions_default$size, fontface = "italic")
else{text_ind.sup <- geom_text(aes(x = df_ind_sup[,2], y = df_ind_sup[,3], label=df_ind_sup[,1], color = res.pca$ind.sup$cos2[,axes[1]] + res.pca$ind.sup$cos2[,axes[2]]), size = ggoptions_default$size, fontface = "italic", hjust = (-sign(df_ind_sup[,2])+1)/2, vjust = -sign(df_ind_sup[,3])*0.75+0.25)}
}
else{ if (habillage[1] == "contrib") text_ind.sup <- NULL #+ geom_point(aes(x = res.pca$ind.sup$coord[,axes[1]], y = res.pca$ind.sup$coord[,axes[2]], color = res.pca$ind$contrib[,axes[1]]*res.pca$eig[axes[1],1]+res.pca$ind$contrib[,axes[2]]*res.pca$eig[axes[2],1]), size = ggoptions_default$size/3, shape = 1)
else{gg_graph <- gg_graph + geom_point(aes(x = df_ind_sup[,2], y = df_ind_sup[,3], color = (res.pca$call$X)[rownames(res.pca$ind.sup$coord),habillage[1]]), size = ggoptions_default$size/3, shape = 1)
if (autoLab) text_ind.sup <- ggrepel::geom_text_repel(aes(x = df_ind_sup[,2], y = df_ind_sup[,3], label=df_ind_sup[,1], color = (res.pca$call$X)[rownames(res.pca$ind.sup$coord),habillage[1]]), size = ggoptions_default$size, fontface = "italic", show.legend = FALSE)
else{text_ind.sup <- geom_text(aes(x = df_ind_sup[,2], y = df_ind_sup[,3], label=df_ind_sup[,1], color = (res.pca$call$X)[rownames(res.pca$ind.sup$coord),habillage[1]]), size = ggoptions_default$size, fontface = "italic", show.legend = FALSE, hjust = (-sign(df_ind_sup[,2])+1)/2, vjust = -sign(df_ind_sup[,3])*0.75+0.25)
}}}}
gg_graph <- gg_graph + text_ind.sup
}
else{
gg_graph <- gg_graph +
geom_point(aes(x = df_ind_sup[,2], y = df_ind_sup[,3]), size = ggoptions_default$size/3, color = color.ind.sup)
}
}
else{ if(dim(res.pca$ind.sup$coord)[1] == 1){
if (is.null(select)) selectionS = 1
if (isTRUE(lab.ind.sup)){
if (habillage[1] == "none") gg_graph <- gg_graph + geom_point(aes(x = res.pca$ind.sup$coord[,axes[1]], y = res.pca$ind.sup$coord[,axes[2]]), size = ggoptions_default$size/3, color = color.ind.sup, shape = 1) + ggrepel::geom_text_repel(aes(x = res.pca$ind.sup$coord[,axes[1]], y = res.pca$ind.sup$coord[,axes[2]], label=ifelse(!is.null(selectionS), rownames(res.pca$ind.sup$coord), "")), color = color.ind.sup, size = ggoptions_default$size, fontface = "italic")
else{ if (habillage[1] == "cos2") gg_graph <- gg_graph + geom_point(aes(x = res.pca$ind.sup$coord[,axes[1]], y = res.pca$ind.sup$coord[,axes[2]], color = res.pca$ind.sup$cos2[,axes[1]] + res.pca$ind.sup$cos2[,axes[2]]), size = ggoptions_default$size/3, shape = 1) + ggrepel::geom_text_repel(aes(x = res.pca$ind.sup$coord[,axes[1]], y = res.pca$ind.sup$coord[,axes[2]], label=ifelse(!is.null(selectionS), rownames(res.pca$ind.sup$coord), ""), color = res.pca$ind.sup$cos2[,axes[1]] + res.pca$ind.sup$cos2[,axes[2]]), size = ggoptions_default$size, fontface = "italic")
else{ if (habillage[1] == "contrib") gg_graph <- gg_graph #+ geom_point(aes(x = res.pca$ind.sup$coord[,axes[1]], y = res.pca$ind.sup$coord[,axes[2]], color = res.pca$ind$contrib[,axes[1]]*res.pca$eig[axes[1],1]+res.pca$ind$contrib[,axes[2]]*res.pca$eig[axes[2],1]), size = ggoptions_default$size/3, shape = 1)
else{gg_graph <- gg_graph + geom_point(aes(x = res.pca$ind.sup$coord[,axes[1]], y = res.pca$ind.sup$coord[,axes[2]], color = (res.pca$call$X)[rownames(res.pca$ind.sup$coord),habillage[1]]), size = ggoptions_default$size/3, shape = 1) + ggrepel::geom_text_repel(aes(x = res.pca$ind.sup$coord[,axes[1]], y = res.pca$ind.sup$coord[,axes[2]], label=ifelse(!is.null(selectionS), rownames(res.pca$ind.sup$coord), ""), color = (res.pca$call$X)[rownames(res.pca$ind.sup$coord),habillage[1]]), size = ggoptions_default$size, fontface = "italic", show.legend = FALSE)
}}}
}
else{
gg_graph <- gg_graph +
geom_point(aes(x = res.pca$ind.sup$coord[,axes[1]], y = res.pca$ind.sup$coord[,axes[2]]), size = ggoptions_default$size/3, color = color.ind.sup)
}
}
}
}
if ((!is.null(res.pca$quali.sup)) && (is.na(test.invisible[3]))){
if (isTRUE(lab.quali)){
if((habillage[1] == "none") || !(habillage[1] %in% liste.quali)){
gg_graph <- gg_graph +
geom_point(aes(x = df_quali.sup[,2], y = df_quali.sup[,3]), size = ggoptions_default$size/2.8, color = col.quali, shape = 0)
if (autoLab) text_quali.sup <- ggrepel::geom_text_repel(aes(x = df_quali.sup[,2], y = df_quali.sup[,3], label=df_quali.sup[,1]), color = col.quali, size = ggoptions_default$size, fontface = "italic")
else{text_quali.sup <- geom_text(aes(x = df_quali.sup[,2], y = df_quali.sup[,3], label=df_quali.sup[,1]), color = col.quali, size = ggoptions_default$size, fontface = "italic",hjust = (-sign(df_quali.sup[,2])+1)/2, vjust = -sign(df_quali.sup[,3])*0.75+0.25)}
gg_graph <- gg_graph + text_quali.sup
}
else{
if ((habillage[1] %in% liste.quali) || (colnames(res.pca$call$X))[habillage[1]] %in% liste.quali) {
gg_graph <- gg_graph +
geom_point(aes(x = res.pca$quali.sup$coord[levels(res.pca$call$X[,habillage[1]]),axes[1]], y = res.pca$quali.sup$coord[levels(res.pca$call$X[,habillage[1]]),axes[2]]), size = ggoptions_default$size/2.8, color = palette[1:length(levels(res.pca$call$X[,habillage[1]]))], shape = 0)
if (autoLab) text_quali.sup <- ggrepel::geom_text_repel(aes(x = res.pca$quali.sup$coord[levels(res.pca$call$X[,habillage[1]]),axes[1]], y = res.pca$quali.sup$coord[levels(res.pca$call$X[,habillage[1]]),axes[2]], label=levels(res.pca$call$X[,habillage[1]])), color = palette[1:length(levels(res.pca$call$X[,habillage[1]]))], size = ggoptions_default$size, fontface = "italic")
else{text_quali.sup <- geom_text(aes(x = res.pca$quali.sup$coord[levels(res.pca$call$X[,habillage[1]]),axes[1]], y = res.pca$quali.sup$coord[levels(res.pca$call$X[,habillage[1]]),axes[2]], label=levels(res.pca$call$X[,habillage[1]])), color = palette[1:length(levels(res.pca$call$X[,habillage[1]]))], size = ggoptions_default$size, fontface = "italic",nudge_y=nudge_y,hjust = (-sign(df_quali.sup[,2])+1)/2, vjust = -sign(df_quali.sup[,3])*0.75+0.25)}
gg_graph <- gg_graph + text_quali.sup
}
if(length(liste.quali) > 1){
gg_graph <- gg_graph +
geom_point(aes(x = res.pca$quali.sup$coord[which(!(rownames(res.pca$quali.sup$coord) %in% levels(res.pca$call$X[,habillage[1]]))),axes[1]], y = res.pca$quali.sup$coord[which(!(rownames(res.pca$quali.sup$coord) %in% levels(res.pca$call$X[,habillage[1]]))),axes[2]]), size = ggoptions_default$size/2.8, color = col.quali, shape = 0)
if (autoLab) text_quali.sup <- ggrepel::geom_text_repel(aes(x = res.pca$quali.sup$coord[which(!(rownames(res.pca$quali.sup$coord) %in% levels(res.pca$call$X[,habillage[1]]))),axes[1]], y = res.pca$quali.sup$coord[which(!(rownames(res.pca$quali.sup$coord) %in% levels(res.pca$call$X[,habillage[1]]))),axes[2]], label = rownames(res.pca$quali.sup$coord)[which(!(rownames(res.pca$quali.sup$coord) %in% levels(res.pca$call$X[,habillage[1]])))]), color = col.quali, size = ggoptions_default$size, fontface = "italic")
else{text_quali.sup <- geom_text(aes(x = res.pca$quali.sup$coord[which(!(rownames(res.pca$quali.sup$coord) %in% levels(res.pca$call$X[,habillage[1]]))),axes[1]], y = res.pca$quali.sup$coord[which(!(rownames(res.pca$quali.sup$coord) %in% levels(res.pca$call$X[,habillage[1]]))),axes[2]], label = rownames(res.pca$quali.sup$coord)[which(!(rownames(res.pca$quali.sup$coord) %in% levels(res.pca$call$X[,habillage[1]])))]), color = col.quali, size = ggoptions_default$size, fontface = "italic",nudge_y=nudge_y,hjust = (-sign(df_quali.sup[,2])+1)/2, vjust = -sign(df_quali.sup[,3])*0.75+0.25)}
gg_graph <- gg_graph + text_quali.sup
}
}
}
else{
gg_graph <- gg_graph +
geom_point(aes(x = df_quali.sup[,2], y = df_quali.sup[,3]), size = ggoptions_default$size/2.8, color = col.quali, shape = 0)
}
}
}
if (graph.type == "classic"){
if (any(labe!="")){
if (autoLab=="auto") autoLab = (length(which(labe!=""))<50)
if (autoLab ==TRUE) autoLab(coo[labe!="", 1], y = coo[labe!="", 2], labels = labe[labe!=""], col = coll[labe!=""], font=fonte[labe!=""],shadotext=shadowtext,...)
if (autoLab ==FALSE) text(coo[labe!="", 1], y = coo[labe!="", 2], labels = labe[labe!=""], col = coll[labe!=""], font=fonte[labe!=""],pos=3,...)
}}
if (!is.null(ellipse)) {
nbre.ellipse <- nlevels(coord.ellipse[, 1])
for (e in 1:nbre.ellipse) {
data.elli <- coord.ellipse[ellipse$res[, 1] == levels(coord.ellipse[, 1])[e], -1]
if(graph.type=="classic"){
if ((habillage[1] != "none")&(habillage[1] != "ind")) lines(x=data.elli[, 1], y = data.elli[, 2], col = palette[color.mod[e]],...)
else lines(x=data.elli[, 1], y = data.elli[, 2], col = palette[col.quali],...)}
else{
if(graph.type=="ggplot"){
if (habillage[1] != "none"){
gg_graph <- gg_graph + geom_path(aes_string(x=data.elli[,1],y=data.elli[,2]), color = palette[color.mod[e]])
}
else {
gg_graph <- gg_graph + geom_path(aes_string(x=data.elli[,1],y=data.elli[,2]), color = palette[col.quali])
}
}
}
}
}
# if ((habillage != "none")&(habillage != "ind")) legend("topleft",legend= levels(res.pca$call$X[,habillage]),text.col= color.mod,cex=par("cex")*0.8)
if ((habillage[1] != "none") & (habillage[1] != "ind") & (habillage[1] != "cos2") & (habillage[1] != "contrib") & (graph.type == "classic")) {
L <- list(x="topleft", legend = levels(res.pca$call$X[, habillage[1]]), text.col = color.mod, cex = par("cex") * 0.8)
L <- modifyList(L, legend)
do.call(graphics::legend, L)
}
}
if (choix == "varcor") {
sauv <- res.pca$var$coord
res.pca$var$coord <- res.pca$var$cor
if (!is.null(res.pca$quanti.sup)) res.pca$quanti.sup$coord <- res.pca$quanti.sup$cor
res.pca$call$scale.unit <- TRUE
}
if ((choix == "var")||(choix == "varcor")) {
if (is.null(title)) titre <- "PCA graph of variables"
else titre <- title
selection <- selectionS <- NULL
if (!is.null(select)) {
if (mode(select)=="numeric") selection <- select
else {
if (sum(rownames(res.pca$var$coord)%in%select)+sum(rownames(res.pca$quanti.sup$coord)%in%select)!=0) selection <- which(rownames(res.pca$var$coord)%in%select)
else {
if (grepl("contrib",select[1])) selection <- (rev(order(res.pca$var$contrib[,axes[1]]*res.pca$eig[axes[1],1]+res.pca$var$contrib[,axes[2]]*res.pca$eig[axes[2],1])))[1:min(nrow(res.pca$var$coord),sum(as.integer(unlist(strsplit(select,"contrib"))),na.rm=T))]
if (grepl("coord",select[1])) selection <- (rev(order(apply(res.pca$var$coord[,axes]^2,1,sum))))[1:min(nrow(res.pca$var$coord),sum(as.integer(unlist(strsplit(select,"coord"))),na.rm=T))]
if (grepl("cos2",select[1])) {
if (sum(as.numeric(unlist(strsplit(select,"cos2"))),na.rm=T)>=1) selection <- (rev(order(apply(res.pca$var$cos2[,axes],1,sum))))[1:min(nrow(res.pca$var$coord),sum(as.numeric(unlist(strsplit(select,"cos2"))),na.rm=T))]
else selection <- which(apply(res.pca$var$cos2[,axes],1,sum)>sum(as.numeric(unlist(strsplit(select,"cos2"))),na.rm=T))
}
if (is.integer(select)) selection <- select
}
}
}
if ((!is.null(select))&(!is.null(res.pca$quanti.sup))) {
if (mode(select)=="numeric") selectionS <- select
else {
if (sum(rownames(res.pca$var$coord)%in%select)+sum(rownames(res.pca$quanti.sup$coord)%in%select)!=0) selectionS <- which(rownames(res.pca$quanti.sup$coord)%in%select)
else {
if (grepl("contrib",select[1])) selectionS <- NULL
if (grepl("coord",select[1])) selectionS <- (rev(order(apply(res.pca$quanti.sup$coord[,axes]^2,1,sum))))[1:min(nrow(res.pca$quanti.sup$coord),sum(as.integer(unlist(strsplit(select,"coord"))),na.rm=T))]
if (grepl("cos2",select[1])) {
if (sum(as.numeric(unlist(strsplit(select,"cos2"))),na.rm=T)>=1) selectionS <- (rev(order(apply(res.pca$quanti.sup$cos2[,axes],1,sum))))[1:min(nrow(res.pca$quanti.sup$coord),sum(as.numeric(unlist(strsplit(select,"cos2"))),na.rm=T))]
else selectionS <- which(apply(res.pca$quanti.sup$cos2[,axes],1,sum)>sum(as.numeric(unlist(strsplit(select,"cos2"))),na.rm=T))
}
if (is.integer(select)) selectionS <- select
}
}
}
test.invisible <- vector(length = 2)
if (!is.null(invisible)) {
test.invisible[1] <- match("var", invisible)
test.invisible[2] <- match("quanti.sup", invisible)
}
else test.invisible <- rep(NA, 2)
scale.unit <- res.pca$call$scale.unit
coord.var <- res.pca$var$coord[, axes,drop=FALSE]
if (!is.null(res.pca$quanti.sup)) coord.quanti <- res.pca$quanti.sup$coord[, axes, drop=FALSE]
else coord.quanti <- NULL
if (scale.unit) xlim <- ylim <- c(-1, 1)
else {
xmin <- min(0,coord.var[, 1], coord.quanti[, 1])
xmax <- max(0,coord.var[, 1], coord.quanti[, 1])
ymin <- min(0,coord.var[, 2], coord.quanti[, 2])
ymax <- max(0,coord.var[, 2], coord.quanti[, 2])
xlim <- c(xmin, xmax) * 1.2
ylim <- c(ymin, ymax) * 1.2
}
if ((new.plot)&!nzchar(Sys.getenv("RSTUDIO_USER_IDENTITY"))) dev.new()
if (is.null(palette)) palette(c("black","red","green3","blue","cyan","magenta","darkgray","darkgoldenrod","darkgreen","violet","turquoise","orange","lightpink","lavender","yellow","lightgreen","lightgrey","lightblue","darkkhaki", "darkmagenta","darkolivegreen","lightcyan", "darkorange", "darkorchid","darkred","darksalmon","darkseagreen","darkslateblue","darkslategray","darkslategrey","darkturquoise","darkviolet", "lightgray","lightsalmon","lightyellow", "maroon"))
# cercle variables factor map
if (graph.type == "classic") {
if (scale.unit) {
plot(0, 0, xlab = lab.x, ylab = lab.y, xlim = xlim, ylim = ylim, col = "white", asp=1, main=titre,...)
x.cercle <- seq(-1, 1, by = 0.01)
y.cercle <- sqrt(1 - x.cercle^2)
lines(x.cercle, y = y.cercle,...)
lines(x.cercle, y = -y.cercle,...)
}
else {
plot(0, 0, main = titre, xlab = lab.x, ylab = lab.y, xlim = xlim, ylim = ylim, col = "white", asp=1, ...)
}
abline(v=0,lty=2,...)
abline(h=0,lty=2,...)
}
#
coll <- coo <- labe <- posi <- NULL
if (!is.null(coord.var[ which(apply(res.pca$var$cos2[, axes,drop=FALSE],1,sum, na.rm = TRUE) >= lim.cos2.var),])&is.na(test.invisible[1])&(nrow(coord.var)>0)){
coord.var <- coord.var[ which(apply(res.pca$var$cos2[, axes,drop=FALSE],1,sum, na.rm = TRUE) >= lim.cos2.var),,drop=FALSE]
coo <- coord.var
if (length(col.var)==1) coll <- c(coll,rep(col.var,nrow(coord.var)))
else coll <- col.var
if (!is.null(col.hab)) coll <- col.hab[which(colnames(res.pca$call$X)%in%rownames(res.pca$var$coord))]
if (lab.var){ labe <- c(labe,rownames(coord.var))
} else labe <- c(labe,rep("",nrow(coord.var)))
if (!is.null(selection)){
if (is.numeric(unselect)) coll[!((1:length(coll))%in%selection)] = rgb(t(col2rgb(coll[!((1:length(coll))%in%selection)])),alpha=255*(1-unselect),maxColorValue=255)
else coll[!((1:length(coll))%in%selection)] = unselect
labe[!((1:length(coll))%in%selection)] <- ""
}
if(graph.type == "ggplot"){
df_var <- df_quanti.sup <- NULL
df_var <- data.frame(labe,coord.var,coll)
} else {
for (v in 1:nrow(coord.var)) {
arrows(0, 0, coord.var[v, 1], coord.var[v, 2], length = 0.1, angle = 15, code = 2, col = coll[v])
if (lab.var) {
if (abs(coord.var[v,1])>abs(coord.var[v,2])){
if (coord.var[v,1]>=0) posi<-c(posi,4)
else posi<-c(posi,2)
}
else {
if (coord.var[v,2]>=0) posi<-c(posi,3)
else posi<-c(posi,1)
}
}
}
}
}
#
if (!is.null(coord.quanti)){
if (!is.null(coord.quanti[ which(apply(res.pca$quanti.sup$cos2[, axes,drop=FALSE],1,sum, na.rm = TRUE) >= lim.cos2.var),])& is.na(test.invisible[2]) & (nrow(coord.quanti)>0)) {
coord.quanti <- coord.quanti[ which(apply(res.pca$quanti.sup$cos2[, axes,drop=FALSE],1,sum, na.rm = TRUE) >= lim.cos2.var),,drop=FALSE]
coo <- rbind(coo,coord.quanti)
if (length(col.quanti.sup)==1) col.quanti.sup<-rep(col.quanti.sup, nrow(coord.quanti))
if (is.null(col.hab)) coll2 <- col.quanti.sup
else coll2 <- col.hab[which(colnames(res.pca$call$X)%in%colnames(res.pca$call$quanti.sup))]
if (lab.quanti){ labe2 <- rownames(coord.quanti)
} else labe2 <- rep("",nrow(coord.quanti))
if (length(select)==1){
if (grepl("contrib",select)){
if (is.numeric(unselect)) coll2[1:length(coll2)] = rgb(t(col2rgb(coll2[1:length(coll2)])),alpha=255*(1-unselect),maxColorValue=255)
else coll2[1:length(coll2)] = unselect
labe2[1:length(coll2)] <- ""
}}
if (!is.null(selectionS)){
if (is.numeric(unselect)) coll2[!((1:length(coll2))%in%selectionS)] = rgb(t(col2rgb(coll2[!((1:length(coll2))%in%selectionS)])),alpha=255*(1-unselect),maxColorValue=255)
else coll2[!((1:length(coll2))%in%selectionS)] = unselect
labe2[!((1:length(coll2))%in%selectionS)] <- ""
}
if (graph.type == "ggplot") df_quanti.sup <- data.frame(labe2,coord.quanti,coll2)
#
if (graph.type == "classic"){
for (q in 1:nrow(coord.quanti)) {
arrows(0, 0, coord.quanti[q, 1], coord.quanti[q, 2], length = 0.1, angle = 15, code = 2, lty = 2, col=coll2[q])
#
if (lab.quanti) {
if (abs(coord.quanti[q,1])>abs(coord.quanti[q,2])){
if (coord.quanti[q,1]>=0) posi<-c(posi,4)
else posi<-c(posi,2)
}
else {
if (coord.quanti[q,2]>=0) posi<-c(posi,3)
else posi<-c(posi,1)
}
}
}}
labe <- c(labe,labe2)
coll <- c(coll,coll2)
}
}
if (graph.type == "classic"){
if (any(labe!="")){
if (autoLab=="auto") autoLab = (length(which(labe!=""))<50)
if (autoLab==FALSE) text(coo[labe!="", 1], y = coo[labe!="", 2], labels = labe[labe!=""], pos = posi[labe!=""], col = coll[labe!=""],...)
if (autoLab==TRUE) autoLab(coo[labe!="", 1], y = coo[labe!="", 2], labels = labe[labe!=""], col = coll[labe!=""], shadotext=shadowtext,...)
}
} else {
if((!is.na(test.invisible[1])) & (habillage[1] != "none") & (is.null(legend["title"][[1]]))){
legend["title"][[1]] = habillage[1]}
if (autoLab=="auto") autoLab = (length(which(labe!=""))<50)
if (length(habillage) != 1){
warning("Habillage must be length 1")
habillage <- habillage[1]
}
if (!(habillage[1] %in% c("contrib","cos2","none"))){
warning("Habillage must be in c('contrib','cos2','none')")
habillage <- "none"
}
circle <- annotate("path",
x=0+1*cos(seq(0,2*pi,length.out=100)),
y=0+1*sin(seq(0,2*pi,length.out=100)),
lty = ggoptions_default$circle.lty,
lwd = ggoptions_default$circle.lwd,
color = ggoptions_default$circle.color)
transparency_var <- ifelse(rownames(res.pca$var$coord) %in% labe, 1, 1-unselect)
gg_graph <- ggplot() +
coord_fixed(ratio = 1) +
geom_line(aes(x=x, y=y), data=data.frame(x=-1:1,y=0),lty=ggoptions_default$line.lty, lwd = ggoptions_default$line.lwd, color=ggoptions_default$line.color) +
geom_line(aes(x=x, y=y), data=data.frame(x=0,y=-1:1),lty=ggoptions_default$line.lty, lwd = ggoptions_default$line.lwd, color=ggoptions_default$line.color) +
theme_light()
if (is.na(test.invisible[1])){
if (((habillage[1] != "contrib") & (habillage[1] != "cos2"))) {
gg_graph <- gg_graph +
aes(x=df_var[,2], y=df_var[,3]) +
geom_segment(aes(x=0,y=0,xend=df_var[,2], yend=df_var[,3]),arrow=arrow(length=unit(0.2,"cm")),alpha = transparency_var, lty = ggoptions_default$segment.lty, lwd = ggoptions_default$segment.lwd, color=col.var)
if(autoLab) text <- ggrepel::geom_text_repel(aes(x=df_var[,2], y=df_var[,3],label=df_var[,1]), size = ggoptions_default$size, color = col.var)
else{text <- geom_text(aes(x=df_var[,2], y=df_var[,3],label=df_var[,1]), size = ggoptions_default$size, color = col.var, hjust = (-sign(df_var[,2])+1)/2, vjust = -sign(df_var[,3])*0.75+0.25)}
}
if (habillage[1] == "cos2" || habillage[1] == "contrib"){
if (habillage[1] == "cos2") df_var[,4] <- res.pca$var$cos2[,axes[1]] + res.pca$var$cos2[,axes[2]]
if (habillage[1] == "contrib") df_var[,4] <- (res.pca$var$contrib[,axes[1]]*res.pca$eig[axes[1],1]+res.pca$var$contrib[,axes[2]]*res.pca$eig[axes[2],1])/(res.pca$eig[axes[1],1]+res.pca$eig[axes[2],1])
gg_graph <- gg_graph +
aes(x=df_var[,2], y=df_var[,3],color = df_var[,4]) +
geom_segment(aes(x=0,y=0,xend=df_var[,2], yend=df_var[,3],col = df_var[,4]),arrow=arrow(length=unit(0.2,"cm")), alpha = transparency_var, lty = ggoptions_default$segment.lty, lwd = ggoptions_default$segment.lwd) +
scale_color_gradient(low=ggoptions_default$low.col.quanti, high=ggoptions_default$high.col.quanti)
if (habillage[1] == "cos2") gg_graph <- gg_graph + labs(color = ifelse(legend["title"] %in% legend, legend["title"][[1]], "cos2"))
if (habillage[1] == "contrib") gg_graph <- gg_graph + labs(color = ifelse(legend["title"] %in% legend, legend["title"][[1]], "Ctr"))
if(autoLab) text <- ggrepel::geom_text_repel(aes(x=df_var[,2], y=df_var[,3],label=df_var[,1], color = df_var[,4]), size = ggoptions_default$size)
else{text <- geom_text(aes(x=df_var[,2], y=df_var[,3],label=df_var[,1], color = df_var[,4]), size = ggoptions_default$size, hjust = (-sign(df_var[,2])+1)/2, vjust = -sign(df_var[,3])*0.75+0.25)}
}
}
if (!is.na(test.invisible[1])){
gg_graph <- gg_graph +
labs(color = legend["title"][[1]])
}
if ((!is.null(res.pca$quanti.sup))&(is.na(test.invisible[2]))){
transparency_quanti <- ifelse(rownames(res.pca$quanti.sup$coord) %in% labe, 1, 1-unselect)
if (isTRUE(lab.quanti)){
if(habillage[1] == "contrib") text_quanti.sup <- NULL
if (habillage[1] == "none"){ gg_graph <- gg_graph + geom_segment(aes(x=0,y=0,xend=df_quanti.sup[,2], yend=df_quanti.sup[,3]),arrow=arrow(length=unit(0.2,"cm")),lty = 2, color = col.quanti.sup,alpha = transparency_quanti)
if (autoLab) text_quanti.sup <- ggrepel::geom_text_repel(aes(x = df_quanti.sup[,2], y = df_quanti.sup[,3], label=df_quanti.sup[,1]), color = col.quanti.sup, size = ggoptions_default$size,alpha = transparency_quanti)
else{text_quanti.sup <- geom_text(aes(x = df_quanti.sup[,2], y = df_quanti.sup[,3], label=df_quanti.sup[,1]), color = col.quanti.sup, size = ggoptions_default$size,hjust = (-sign(df_quanti.sup[,2])+1)/2, vjust = -sign(df_quanti.sup[,3])*0.75+0.25,alpha = transparency_quanti)}
}
if (habillage[1] == "cos2"){gg_graph <- gg_graph + geom_segment(aes(x=0,y=0,xend=df_quanti.sup[,2], yend=df_quanti.sup[,3], color = res.pca$quanti.sup$cos2[,axes[1]] + res.pca$quanti.sup$cos2[,axes[2]]),arrow=arrow(length=unit(0.2,"cm")),lty = 2,alpha = transparency_quanti)
if (autoLab) text_quanti.sup <- ggrepel::geom_text_repel(aes(x = df_quanti.sup[,2], y = df_quanti.sup[,3], label=df_quanti.sup[,1], color = res.pca$quanti.sup$cos2[,axes[1]] + res.pca$quanti.sup$cos2[,axes[2]]), size = ggoptions_default$size,alpha = transparency_quanti)
else{text_quanti.sup <- geom_text(aes(x = df_quanti.sup[,2], y = df_quanti.sup[,3], label=df_quanti.sup[,1], color = res.pca$quanti.sup$cos2[,axes[1]] + res.pca$quanti.sup$cos2[,axes[2]]), size = ggoptions_default$size,hjust = (-sign(df_quanti.sup[,2])+1)/2, vjust = -sign(df_quanti.sup[,3])*0.75+0.25,alpha = transparency_quanti)}
}
gg_graph <- gg_graph + text_quanti.sup
} else{
gg_graph <- gg_graph +
geom_segment(aes(x=0,y=0,xend=df_quanti.sup[,2], yend=df_quanti.sup[,3]),arrow=arrow(length=unit(0.2,"cm")),lty = 2, color = col.quanti.sup)
}
}
gg_graph <- gg_graph + theme + circle + labs(title = titre, x = lab.x, y= lab.y)
if (is.na(test.invisible[1]) & (isTRUE(lab.var))) gg_graph <- gg_graph + text
}
}
palette(old.palette)
if(graph.type == "ggplot") return(gg_graph)
}
|
/R/plot.PCA.R
|
no_license
|
yyleon/FactoMineR
|
R
| false
| false
| 57,032
|
r
|
plot.PCA <- function (x, axes = c(1, 2), choix = c("ind","var","varcor"),
ellipse = NULL, xlim = NULL, ylim = NULL, habillage = "none",
col.hab = NULL, col.ind = "black", col.ind.sup = "blue",
col.quali = "magenta", col.quanti.sup = "blue",
col.var = "black", label=c("all","none","ind", "ind.sup", "quali", "var", "quanti.sup"),
invisible = c("none","ind", "ind.sup", "quali","var", "quanti.sup"), lim.cos2.var = 0.,
title = NULL, palette=NULL, autoLab=c("auto","yes","no"),new.plot=FALSE,
select=NULL, unselect = 0.7,shadowtext = FALSE, legend = list(bty = "y", x = "topleft"),
graph.type = c("ggplot","classic"), ggoptions = NULL, ...){
res.pca <- x
argument <- list(...)
if (!is.null(argument[["cex"]]) & is.null(ggoptions["size"])) ggoptions["size"] <- 4*argument$cex
ggoptions_default <- list(size = 4, point.shape = 19, line.lty = 2, line.lwd = 0.5, line.color = "black", segment.lty = 1, segment.lwd = 0.5, circle.lty = 1, circle.lwd = 0.5, circle.color = "black", low.col.quanti = "blue", high.col.quanti = "red3")
if (!is.null(ggoptions[1])) ggoptions_default[names(ggoptions)] = ggoptions[names(ggoptions)]
if (!inherits(res.pca, "PCA")) stop("non convenient data")
if (is.numeric(unselect)) if ((unselect>1)|(unselect<0)) stop("unselect should be betwwen 0 and 1")
autoLab <- match.arg(autoLab,c("auto","yes","no"))
if (autoLab == "yes") autoLab=TRUE
if (autoLab == "no") autoLab=FALSE
old.palette <- palette()
if (is.null(palette)) palette <- c("black", "red", "green3", "blue", "magenta", "darkgoldenrod","darkgray", "orange", "cyan", "violet", "lightpink", "lavender", "yellow", "darkgreen","turquoise", "lightgrey", "lightblue", "darkkhaki","darkmagenta","lightgreen", "darkolivegreen", "lightcyan", "darkorange","darkorchid", "darkred", "darksalmon", "darkseagreen","darkslateblue", "darkslategray", "darkslategrey","darkturquoise", "darkviolet", "lightgray", "lightsalmon","lightyellow", "maroon")
palette(palette) # that is necessary
label <- match.arg(label,c("all","none","ind", "ind.sup", "quali", "var", "quanti.sup"),several.ok=TRUE)
invisible <- match.arg(invisible,c("none","ind", "ind.sup", "quali","var", "quanti.sup"),several.ok=TRUE)
if ("none"%in%invisible) invisible = NULL
choix <- match.arg(choix,c("ind","var","varcor"))
graph.type <- match.arg(graph.type[1],c("ggplot","classic"))
lab.ind <- lab.quali <- lab.var <- lab.quanti <- lab.ind.sup <- FALSE
if (length(label)==1 && label=="all") lab.ind <- lab.quali <- lab.var <- lab.quanti <- lab.ind.sup <-TRUE
if ("ind" %in% label) lab.ind<-TRUE
if ("quali" %in% label) lab.quali<-TRUE
if ("var" %in% label) lab.var<-TRUE
if ("quanti.sup" %in% label) lab.quanti<-TRUE
if ("ind.sup" %in% label) lab.ind.sup<-TRUE
lab.x <- paste("Dim ",axes[1]," (",format(res.pca$eig[axes[1],2],nsmall=2,digits=2),"%)",sep="")
lab.y <- paste("Dim ",axes[2]," (",format(res.pca$eig[axes[2],2],nsmall=2,digits=2),"%)",sep="")
if (graph.type == "ggplot"){
if(!is.null(col.hab)) palette <- col.hab
theme <- theme(
axis.title = element_text(hjust = 1, size = if (is.null(argument[["cex.axis"]])) {10} else {10*argument$cex.axis},face = 2),
plot.title = element_text(hjust = 0.5, size = if (is.null(argument[["cex.main"]])) {11} else {11*argument$cex.main},face = 2),
legend.position = ifelse(legend$x %in% c("bottom","up","right","left"), legend$x, "right"),
legend.box.spacing=unit(0.1, 'cm'),legend.margin=margin()
)
liste.quali <- colnames(res.pca$call$quali.sup$quali.sup)
liste.quanti <- colnames(res.pca$call$X)[which(!(colnames(res.pca$call$X) %in% liste.quali))]
hab_2 <- c(colnames(res.pca$call$X), "contrib", "cos2")
if((habillage != "none") && !(habillage[1] %in% hab_2) && (habillage != "ind")) habillage[1] = colnames(res.pca$call$X)[as.numeric(habillage[1])]
if(habillage[1] != "none" && length(habillage) == 2){
if(!habillage[2] %in% hab_2) habillage[2] = colnames(res.pca$call$X)[as.numeric(habillage[2])]
if (length(habillage) > 2) {
warning("Habillage must be either length 1 or 2 : only 2 first arguments will be used")
habillage = habillage[1:2]
}
if ((length(habillage) == 2) & !("cos2" %in% habillage) & !("contrib" %in% habillage)){
if(!(habillage[2] %in% liste.quali)){
if (!(habillage[1] %in% liste.quali)){
habillage = habillage[1]
}
else{
habillage = habillage[2:1]
}
}}
if(length(habillage) == 1 && !(habillage %in% hab_2)) habillage = as.numeric(habillage)
if((length(habillage) == 2) & (habillage[2] %in% c("contrib","cos2"))) habillage = habillage[2:1]
if((length(habillage) == 2) & (habillage[1] %in% c("contrib","cos2")) & !(habillage[2] %in% hab_2)) habillage[2] = colnames(res.pca$call$X)[as.integer(habillage[2])]
if(class(habillage[1]) %in% c("numeric","integer") && class(habillage[2]) %in% c("numeric","integer")) habillage = c(colnames(res.pca$call$X)[habillage[1]],colnames(res.pca$call$X)[habillage[2]])
if(("cos2" %in% habillage) || ("contrib" %in% habillage)){
if((habillage[2] %in% liste.quanti) || (habillage[1] %in% liste.quanti)) habillage = habillage[1]
}
if(("cos2" %in% habillage) && ("contrib" %in% habillage)) habillage = habillage[1]
}
}
if (choix == "ind") {
if (is.null(title)) titre <- "PCA graph of individuals"
else titre <- title
coord.actif <- res.pca$ind$coord[, axes,drop=FALSE]
coord.illu <- coord.quali <- coord.ellipse <- NULL
if (!is.null(res.pca$ind.sup)) coord.illu <- res.pca$ind.sup$coord[, axes,drop=FALSE]
if (!is.null(res.pca$quali.sup)) coord.quali <- res.pca$quali.sup$coord[, axes,drop=FALSE]
if (!is.null(ellipse)) coord.ellipse <- ellipse$res
test.invisible <- vector(length = 2)
if (!is.null(invisible)) {
test.invisible[1] <- match("ind", invisible)
test.invisible[2] <- match("ind.sup", invisible)
test.invisible[3] <- match("quali", invisible)
}
else test.invisible <- rep(NA, 3)
nullxlimylim <- (is.null(xlim) & is.null(ylim))
if (is.null(xlim)) {
xmin <- xmax <- 0
if(is.na(test.invisible[1])) xmin <- min(xmin, coord.actif[,1])
if(is.na(test.invisible[1])) xmax <- max(xmax, coord.actif[,1])
if(!is.null(coord.illu)&is.na(test.invisible[2])) xmin <- min(xmin, coord.illu[, 1])
if(!is.null(coord.illu)&is.na(test.invisible[2])) xmax <- max(xmax, coord.illu[, 1])
if(!is.null(coord.quali)&is.na(test.invisible[3])) xmin <- min(xmin, coord.quali[, 1])
if(!is.null(coord.quali)&is.na(test.invisible[3])) xmax <- max(xmax, coord.quali[, 1])
if(!is.null(coord.ellipse)&is.na(test.invisible[3])) xmin <- min(xmin, coord.ellipse[, 2])
if(!is.null(coord.ellipse)&is.na(test.invisible[3])) xmax <- max(xmax, coord.ellipse[, 2])
# xlim <- c(xmin, xmax) * 1.2
xlim <- c(xmin, xmax)
xlim <- (xlim-mean(xlim))*1.2 + mean(xlim)
}
if (is.null(ylim)) {
ymin <- ymax <- 0
if(is.na(test.invisible[1])) ymin <- min(ymin, coord.actif[,2])
if(is.na(test.invisible[1])) ymax <- max(ymax, coord.actif[,2])
if(!is.null(coord.illu)&is.na(test.invisible[2])) ymin <- min(ymin, coord.illu[, 2])
if(!is.null(coord.illu)&is.na(test.invisible[2])) ymax <- max(ymax, coord.illu[, 2])
if(!is.null(coord.quali)&is.na(test.invisible[3])) ymin <- min(ymin, coord.quali[, 2])
if(!is.null(coord.quali)&is.na(test.invisible[3])) ymax <- max(ymax, coord.quali[, 2])
if(!is.null(coord.ellipse)&is.na(test.invisible[3])) ymin <- min(ymin, coord.ellipse[, 3])
if(!is.null(coord.ellipse)&is.na(test.invisible[3])) ymax <- max(ymax, coord.ellipse[, 3])
ylim <- c(ymin, ymax)
ylim <- (ylim-mean(ylim))*1.2 + mean(ylim)
}
if (nullxlimylim & diff(xlim)/diff(ylim)>3) ylim <- (ylim-mean(ylim))*diff(xlim)/diff(ylim)/3 + mean(ylim)
if (nullxlimylim & diff(xlim)/diff(ylim)<1/2) xlim <- (xlim-mean(xlim))*diff(ylim)/diff(xlim)/2 + mean(xlim)
if(graph.type=="ggplot") nudge_y <- (ylim[2] - ylim[1])*0.03
selection <- NULL
if (!is.null(select)) {
if (mode(select)=="numeric") selection <- select
else {
if (sum(rownames(res.pca$ind$coord)%in%select)+sum(rownames(res.pca$ind.sup$coord)%in%select)!=0) selection <- which(rownames(res.pca$ind$coord)%in%select)
else {
if (grepl("contrib",select[1])) selection <- (rev(order(res.pca$ind$contrib[,axes[1],drop=FALSE]*res.pca$eig[axes[1],1]+res.pca$ind$contrib[,axes[2],drop=FALSE]*res.pca$eig[axes[2],1])))[1:min(nrow(res.pca$ind$coord),sum(as.integer(unlist(strsplit(select,"contrib"))),na.rm=T))]
# if (grepl("contrib",select)) selection <- (rev(order(apply(res.pca$ind$contrib[,axes],1,sum))))[1:min(nrow(res.pca$ind$coord),sum(as.integer(unlist(strsplit(select,"contrib"))),na.rm=T))]
if (grepl("dist",select[1])) selection <- (rev(order(res.pca$ind$dist)))[1:min(nrow(res.pca$ind$coord),sum(as.integer(unlist(strsplit(select,"dist"))),na.rm=T))]
if (grepl("coord",select[1])) selection <- (rev(order(apply(res.pca$ind$coord[,axes]^2,1,sum))))[1:min(nrow(res.pca$ind$coord),sum(as.integer(unlist(strsplit(select,"coord"))),na.rm=T))]
if (grepl("cos2",select[1])) {
if (sum(as.numeric(unlist(strsplit(select,"cos2"))),na.rm=T)>=1) selection <- (rev(order(apply(res.pca$ind$cos2[,axes],1,sum))))[1:min(nrow(res.pca$ind$coord),sum(as.numeric(unlist(strsplit(select,"cos2"))),na.rm=T))]
else selection <- which(apply(res.pca$ind$cos2[,axes],1,sum)>sum(as.numeric(unlist(strsplit(select,"cos2"))),na.rm=T))
}
if (is.integer(select)) selection <- select
}
}
}
selectionS <- NULL
if ((!is.null(select))&(!is.null(res.pca$ind.sup$coord))&is.na(test.invisible[2])) {
if (mode(select)=="numeric") selectionS <- select
else {
if (sum(rownames(res.pca$ind$coord)%in%select)+sum(rownames(res.pca$ind.sup$coord)%in%select)!=0) selectionS <- which(rownames(res.pca$ind.sup$coord)%in%select)
else {
if (grepl("dist",select[1])) selectionS <- (rev(order(res.pca$ind.sup$dist)))[1:min(nrow(res.pca$ind.sup$coord),sum(as.integer(unlist(strsplit(select,"dist"))),na.rm=T))]
if (grepl("coord",select[1])) selectionS <- (rev(order(apply(res.pca$ind.sup$coord[,axes]^2,1,sum))))[1:min(nrow(res.pca$ind.sup$coord),sum(as.integer(unlist(strsplit(select,"coord"))),na.rm=T))]
if (grepl("cos2",select[1])) {
if (sum(as.numeric(unlist(strsplit(select,"cos2"))),na.rm=T)>=1) selectionS <- (rev(order(apply(res.pca$ind.sup$cos2[,axes,drop=FALSE],1,sum))))[1:min(nrow(res.pca$ind.sup$coord),sum(as.numeric(unlist(strsplit(select,"cos2"))),na.rm=T))]
else selectionS <- which(apply(res.pca$ind.sup$cos2[,axes,drop=FALSE],1,sum)>sum(as.numeric(unlist(strsplit(select,"cos2"))),na.rm=T))
}
if (is.integer(select)) selectionS <- select
}
}
}
## PARTIE GRAPHIQUE
if (graph.type =="ggplot") color.ind <- NULL
if ((new.plot)&!nzchar(Sys.getenv("RSTUDIO_USER_IDENTITY"))) dev.new(width=min(14,8*diff(xlim)/diff(ylim)),height=8)
if (is.null(palette)) palette = (c("black","red","green3","blue","cyan","magenta","darkgray","darkgoldenrod","darkgreen","violet","turquoise","orange","lightpink","lavender","yellow","lightgreen","lightgrey","lightblue","darkkhaki", "darkmagenta","darkolivegreen","lightcyan", "darkorange", "darkorchid","darkred","darksalmon","darkseagreen","darkslateblue","darkslategray","darkslategrey","darkturquoise","darkviolet", "lightgray","lightsalmon","lightyellow", "maroon"))
if (habillage[1] == "none") {
color.ind <- rep(col.ind,nrow(coord.actif))
color.mod <- col.quali
if (!is.null(res.pca$ind.sup)&is.na(test.invisible[2])) color.ind.sup <- rep(col.ind.sup,nrow(res.pca$ind.sup$coord))
}
if (habillage[1] == "ind") {
if (is.null(coord.illu)){
if(length(col.hab) == nrow(coord.actif)) color.ind <- col.hab
else color.ind <- c(1:nrow(coord.actif))
} else{
if (length(col.hab)== nrow(coord.actif)+nrow(coord.illu)){
color.ind <- col.hab[-res.pca$call$ind.sup]
color.ind.sup <- col.hab[res.pca$call$ind.sup]
} else {
color.ind <- c(1:nrow(coord.actif))
color.ind.sup <- c((nrow(coord.actif)+1):(nrow(coord.actif)+nrow(coord.illu)))
}
}
color.mod <- "darkred"
}
liste.quali <- NULL
if ((habillage[1] != "none")&(habillage[1] != "ind")&(habillage[1] != "cos2")&(habillage[1] != "contrib")){
liste.quali <- colnames(res.pca$call$quali.sup$quali.sup)
if(!(class(res.pca$call$X[,habillage[1]])[1] %in% c("numeric","double","integer"))) {
if (is.numeric(habillage)) nom.quali <- colnames(res.pca$call$X)[habillage[1]]
else nom.quali <- habillage[1]
if (!(nom.quali %in% liste.quali)) stop("The variable ", habillage[1], " is not qualitative")
n.mod <- res.pca$call$quali.sup$modalite[liste.quali == nom.quali]
if (length(col.hab) != n.mod) {
color.mod <- c(1:n.mod)
color.ind <- as.numeric(as.factor(res.pca$call$X[, nom.quali]))
color.ind.sup <- color.ind[res.pca$call$ind.sup]
if (!is.null(res.pca$call$ind.sup)) color.ind <- color.ind[-res.pca$call$ind.sup]
}
else {
color.mod <- col.hab
color.ind <- as.factor(res.pca$call$X[, nom.quali])
levels(color.ind) <- col.hab
color.ind.sup <- color.ind[res.pca$call$ind.sup]
if (!is.null(res.pca$call$ind.sup)) color.ind <- color.ind[-res.pca$call$ind.sup]
color.ind <- as.character(color.ind)
}
}
if(class(res.pca$call$X[,habillage[1]])[1] %in% c("numeric","double","integer")){
if (graph.type == "classic") stop("The variable ", habillage[1], "is not qualitative")
liste.quanti <- colnames(res.pca$call$X[which(!(colnames(res.pca$call$X) %in% colnames(res.pca$call$quali.sup$quali.sup)))])
if (is.numeric(habillage[1])) nom.quanti <- colnames(res.pca$call$X)[habillage[1]]
else nom.quanti <- habillage[1]
if (!(nom.quanti %in% liste.quanti)) stop("The variable ", habillage[1], " is not quantitative")
}}
color.sup <- col.ind.sup
if (graph.type == "classic" & ((habillage == "cos2") || (habillage == "contrib"))) stop("The variable is not qualitative")
# graphe individuals factor map
if (graph.type == "classic") {
plot(0, 0, main = titre, xlab = lab.x, ylab = lab.y, xlim = xlim, ylim = ylim, col = "white", asp=1, ...)
abline(v=0,lty=2, ...)
abline(h=0,lty=2, ...)
#
}
coo <- labe <- coll <- ipch <- fonte <- NULL
df_ind2 <- df_ind_sup <- df_quali.sup <- NULL
if (is.na(test.invisible[1])) {
coo <- rbind(coo,coord.actif)
if (lab.ind){ labe <- c(labe,rownames(coord.actif))
} else labe <- c(labe,rep("",nrow(coord.actif)))
coll <- c(coll,color.ind)
ipch <- c(ipch,rep(20,nrow(coord.actif)))
fonte <- c(fonte,rep(1,nrow(coord.actif)))
if (!is.null(selection)){
if (is.numeric(unselect)) coll[!((1:length(coll))%in%selection)] = rgb(t(col2rgb(coll[!((1:length(coll))%in%selection)])),alpha=255*(1-unselect),maxColorValue=255)
else coll[!((1:length(coll))%in%selection)] = unselect
labe[!((1:length(coll))%in%selection)] <- ""
}
df_ind2 <- data.frame(labe,coord.actif,ipch,fonte)
}
if(graph.type == "ggplot") coll2 <- NULL
if (!is.null(res.pca$ind.sup) & is.na(test.invisible[2])) {
coo <- rbind(coo,res.pca$ind.sup$coord[,axes])
if (lab.ind.sup){ labe2 <- rownames(res.pca$ind.sup$coord)
} else labe2 <- rep("",nrow(res.pca$ind.sup$coord))
if (length(color.sup)>1) coll2 <- color.sup
else coll2 <- rep(color.sup,nrow(res.pca$ind.sup$coord))
if (!is.null(selectionS)){
if (is.numeric(unselect)) coll2[!((1:length(coll2))%in%selectionS)] = rgb(t(col2rgb(coll2[!((1:length(coll2))%in%selectionS)])),alpha=255*(1-unselect),maxColorValue=255)
else coll2[!((1:length(coll2))%in%selectionS)] <- unselect
labe2[!((1:length(coll2))%in%selectionS)] <- ""
}
if (length(select)==1){
if (grepl("contrib",select)){
if (is.numeric(unselect)) coll2[1:length(coll2)] = rgb(t(col2rgb(coll2[1:length(coll2)])),alpha=255*(1-unselect),maxColorValue=255)
else coll2[1:length(coll2)] = unselect
labe2[1:length(coll2)] <- ""
}}
df_ind_sup <- data.frame(labe2, res.pca$ind.sup$coord[,axes], coll2, rep(1,nrow(res.pca$ind.sup$coord)), rep(3,nrow(res.pca$ind.sup$coord)))
coll <- c(coll,coll2)
labe <- c(labe,labe2)
ipch <- c(ipch,rep(1,nrow(res.pca$ind.sup$coord)))
fonte <- c(fonte,rep(3,nrow(res.pca$ind.sup$coord)))
}
if (!is.null(coord.quali) & is.na(test.invisible[3])) {
modalite <- res.pca$call$quali.sup$modalite
if (graph.type == "ggplot") col.quali <- col.quali
else{col.quali<-rep(col.quali, length(modalite))}
num.li <- 0
coo <- rbind(coo,coord.quali)
ipch <- c(ipch,rep(22,sum(modalite)))
if (lab.quali){ labe2 <- rownames(coord.quali)
} else labe2 <- rep("",sum(modalite))
labe <- c(labe,labe2)
fonte <- c(fonte,rep(3,sum(modalite)))
for (q in 1:length(modalite)) {
if ((habillage[1] != "none")&(habillage[1] != "ind")&(habillage[1] != "cos2")&(habillage[1] != "contrib")) {
if(!(class(res.pca$call$X[,habillage[1]])[1] %in% c("numeric","double","integer"))){
if (q == match(nom.quali, liste.quali)) coll2 <- color.mod
else coll2 <- rep(col.quali[1],modalite[q])
}} else coll2 <- rep(col.quali,modalite[q])
num.li <- num.li + modalite[q]
}
coll <- c(coll,coll2)
df_quali.sup <- data.frame(labe2, coord.quali, rep(22,nrow(coord.quali)), rep(3,nrow(coord.quali)))
}
# graphe individuals factor map, ajout des points
if (graph.type == "classic") {
points(coo[, 1], y = coo[, 2], pch = ipch, col = coll, ...)
}
if (graph.type == "ggplot") {
if (autoLab=="auto") autoLab = (length(which(labe!=""))<50)
df_ind <- data.frame(labe,coo,ipch,fonte)
gg_graph <- ggplot() +
coord_fixed(ratio = 1) +
xlim(xlim) + ylim(ylim) +
geom_hline(yintercept = 0,lty=ggoptions_default$line.lty, lwd = ggoptions_default$line.lwd, color=ggoptions_default$line.color) +
geom_vline(xintercept = 0,lty=ggoptions_default$line.lty, lwd = ggoptions_default$line.lwd, color=ggoptions_default$line.color) +
theme_light() +
labs(title = titre, x = lab.x, y= lab.y)
if(!is.null(select)) df_ind2[,1] <- ifelse(rownames(df_ind2) %in% rownames(df_ind2)[selection], rownames(df_ind2), "")
transparency_ind <- col2rgb(col.ind,alpha=TRUE)[4]/255
if (!is.null(select)) transparency_ind <- ifelse(rownames(res.pca$ind$coord) %in% rownames(res.pca$ind$coord)[selection], transparency_ind, transparency_ind*(1-unselect))
if((!is.na(test.invisible[1])) & (habillage[1] != "none") & (is.null(legend["title"][[1]]))) legend["title"][[1]] = habillage[1]
if (is.na(test.invisible[1])){
if (habillage[1] == "none" | habillage[1]=="ind"){
gg_graph <- gg_graph +
geom_point(aes(x=df_ind2[,2], y=df_ind2[,3]), color=color.ind, shape = ggoptions_default$point.shape, size = ggoptions_default$size/3, alpha = transparency_ind)
if(autoLab) text <- ggrepel::geom_text_repel(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1]), size = ggoptions_default$size, color = color.ind)
else{text <- geom_text(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1]), size = ggoptions_default$size, color = color.ind, hjust = (-sign(df_ind2[,2])+1)/2, vjust = -sign(df_ind2[,3])*0.75+0.25)}
}
if (length(habillage) == 1 & (habillage[1] != "none" | habillage[1]!="ind")){
if ((habillage %in% colnames(res.pca$call$X)) & !(habillage %in% liste.quali)){
df_ind2 <- data.frame(df_ind2, (res.pca$call$X)[rownames(df_ind2),habillage])
gg_graph <- gg_graph +
geom_point(aes(x=df_ind2[,2], y=df_ind2[,3], color = df_ind2[,6]), shape = ggoptions_default$point.shape, size = ggoptions_default$size/3, alpha = transparency_ind) +
scale_color_gradient(low=ggoptions_default$low.col.quanti, high=ggoptions_default$high.col.quanti) +
labs(color = ifelse(legend["title"] %in% legend, legend["title"][[1]], habillage))
if(autoLab) text <- ggrepel::geom_text_repel(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = df_ind2[,6]), size = ggoptions_default$size, show.legend = FALSE)
else{text <- geom_text(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = df_ind2[,6]), size = ggoptions_default$size, show.legend = FALSE, hjust = (-sign(df_ind2[,2])+1)/2, vjust = -sign(df_ind2[,3])*0.75+0.25)}
}
if (habillage %in% liste.quali){
df_ind2 <- data.frame(df_ind2, (res.pca$call$X)[rownames(df_ind2),habillage])
gg_graph <- gg_graph +
geom_point(aes(x=df_ind2[,2], y=df_ind2[,3], color = (res.pca$call$X)[rownames(df_ind2),habillage]), shape = ggoptions_default$point.shape, size = ggoptions_default$size/3, alpha = transparency_ind) +
scale_color_manual(values = palette[1:length(levels((res.pca$call$X)[,habillage]))]) +
labs(color = ifelse(legend["title"] %in% legend, legend["title"][[1]], habillage))
if (autoLab) text <- ggrepel::geom_text_repel(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = (res.pca$call$X)[rownames(res.pca$ind$coord),habillage[1]]), size = ggoptions_default$size, show.legend = FALSE)
else{text <- geom_text(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = (res.pca$call$X)[rownames(res.pca$ind$coord),habillage[1]]), size = ggoptions_default$size, show.legend = FALSE, hjust = (-sign(df_ind2[,2])+1)/2, vjust = -sign(df_ind2[,3])*0.75+0.25)}
}
if (habillage == "cos2"){
gg_graph <- gg_graph +
geom_point(aes(x=df_ind2[,2], y=df_ind2[,3], color = res.pca$ind$cos2[,axes[1]] + res.pca$ind$cos2[,axes[2]]), shape = ggoptions_default$point.shape, size = ggoptions_default$size/3, alpha = transparency_ind) +
scale_color_gradient(low=ggoptions_default$low.col.quanti, high=ggoptions_default$high.col.quanti) +
labs(color = ifelse(legend["title"] %in% legend, legend["title"][[1]], "cos2"))
if (autoLab) text <- ggrepel::geom_text_repel(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = res.pca$ind$cos2[,axes[1]] + res.pca$ind$cos2[,axes[2]]), size = ggoptions_default$size)
else{text <- geom_text(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = res.pca$ind$cos2[,axes[1]] + res.pca$ind$cos2[,axes[2]]), size = ggoptions_default$size, hjust = (-sign(df_ind2[,2])+1)/2, vjust = -sign(df_ind2[,3])*0.75+0.25)}
}
if (habillage == "contrib"){
gg_graph <- gg_graph +
geom_point(aes(x=df_ind2[,2], y=df_ind2[,3], color = (res.pca$ind$contrib[,axes[1]]*res.pca$eig[axes[1],1]+res.pca$ind$contrib[,axes[2]]*res.pca$eig[axes[2],1])/(res.pca$eig[axes[1],1]+res.pca$eig[axes[2],1])), shape = ggoptions_default$point.shape, size = ggoptions_default$size/3, alpha = transparency_ind) +
scale_color_gradient(low=ggoptions_default$low.col.quanti, high=ggoptions_default$high.col.quanti) +
labs(color = ifelse(legend["title"] %in% legend, legend["title"][[1]], "Ctr"))
if (autoLab) text <- ggrepel::geom_text_repel(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = (res.pca$ind$contrib[,axes[1]]*res.pca$eig[axes[1],1]+res.pca$ind$contrib[,axes[2]]*res.pca$eig[axes[2],1])/(res.pca$eig[axes[1],1]+res.pca$eig[axes[2],1])), size = ggoptions_default$size)
else{text <- geom_text(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = (res.pca$ind$contrib[,axes[1]]*res.pca$eig[axes[1],1]+res.pca$ind$contrib[,axes[2]]*res.pca$eig[axes[2],1])/(res.pca$eig[axes[1],1]+res.pca$eig[axes[2],1])), size = ggoptions_default$size, hjust = (-sign(df_ind2[,2])+1)/2, vjust = -sign(df_ind2[,3])*0.75+0.25)
}
}}
if(length(habillage) == 2 & habillage[1] != "none"){
if (!(habillage[1] %in% liste.quali)){
gg_graph <- gg_graph +
geom_point(aes(x=df_ind2[,2], y=df_ind2[,3], shape = res.pca$call$X[rownames(res.pca$ind$coord),habillage[2]], color = (res.pca$call$X)[rownames(res.pca$ind$coord),habillage[1]]), size = ggoptions_default$size/3, alpha = transparency_ind) +
scale_color_gradient(low=ggoptions_default$low.col.quanti, high=ggoptions_default$high.col.quanti) +
labs(color = habillage[1], shape = habillage[2])
if (autoLab) text <- ggrepel::geom_text_repel(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = (res.pca$call$X)[rownames(res.pca$ind$coord),habillage[1]]), size = ggoptions_default$size, show.legend = FALSE)
else{text <- geom_text(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = (res.pca$call$X)[rownames(res.pca$ind$coord),habillage[1]]), size = ggoptions_default$size, show.legend = FALSE, hjust = (-sign(df_ind2[,2])+1)/2, vjust = -sign(df_ind2[,3])*0.75+0.25)}
}
if (habillage[1] %in% liste.quali){
gg_graph <- gg_graph +
geom_point(aes(x=df_ind2[,2], y=df_ind2[,3], shape = res.pca$call$X[rownames(res.pca$ind$coord),habillage[2]], color = (res.pca$call$X)[rownames(res.pca$ind$coord),habillage[1]]), size = ggoptions_default$size/3, alpha = transparency_ind) +
scale_color_manual(values = palette[1:length(levels((res.pca$call$X)[,habillage[1]]))]) +
labs(color = habillage[1], shape = habillage[2])
if(autoLab) text <- ggrepel::geom_text_repel(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = (res.pca$call$X)[rownames(res.pca$ind$coord),habillage[1]]), size = ggoptions_default$size, show.legend = FALSE)
else{text <- geom_text(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = (res.pca$call$X)[rownames(res.pca$ind$coord),habillage[1]]), size = ggoptions_default$size, show.legend = FALSE, hjust = (-sign(df_ind2[,2])+1)/2, vjust = -sign(df_ind2[,3])*0.75+0.25)}
}
if (habillage[1] == "cos2"){
gg_graph <- gg_graph +
geom_point(aes(x=df_ind2[,2], y=df_ind2[,3], color = res.pca$ind$cos2[,axes[1]] + res.pca$ind$cos2[,axes[2]], shape = res.pca$call$X[rownames(res.pca$ind$coord),habillage[2]]), size = ggoptions_default$size/3, alpha = transparency_ind) +
scale_color_gradient(low=ggoptions_default$low.col.quanti, high=ggoptions_default$high.col.quanti) +
labs(color = habillage[1], shape = habillage[2])
if (autoLab) text <- ggrepel::geom_text_repel(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = res.pca$ind$cos2[,axes[1]] + res.pca$ind$cos2[,axes[2]]), size = ggoptions_default$size)
else{text <- geom_text(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = res.pca$ind$cos2[,axes[1]] + res.pca$ind$cos2[,axes[2]]), size = ggoptions_default$size, hjust = (-sign(df_ind2[,2])+1)/2, vjust = -sign(df_ind2[,3])*0.75+0.25)}
}
if (habillage[1] == "contrib"){
gg_graph <- gg_graph +
geom_point(aes(x=df_ind2[,2], y=df_ind2[,3], color = (res.pca$ind$contrib[,axes[1]]*res.pca$eig[axes[1],1]+res.pca$ind$contrib[,axes[2]]*res.pca$eig[axes[2],1])/(res.pca$eig[axes[1],1]+res.pca$eig[axes[2],1]), shape = res.pca$call$X[rownames(res.pca$ind$coord),habillage[2]]), size = ggoptions_default$size/3, alpha = transparency_ind) +
scale_color_gradient(low=ggoptions_default$low.col.quanti, high=ggoptions_default$high.col.quanti) +
labs(color = habillage[1], shape = habillage[2])
if (autoLab) text <- ggrepel::geom_text_repel(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = (res.pca$ind$contrib[,axes[1]]*res.pca$eig[axes[1],1]+res.pca$ind$contrib[,axes[2]]*res.pca$eig[axes[2],1])/(res.pca$eig[axes[1],1]+res.pca$eig[axes[2],1])), size = ggoptions_default$size)
else{text <- geom_text(aes(x=df_ind2[,2], y=df_ind2[,3], label=df_ind2[,1], color = (res.pca$ind$contrib[,axes[1]]*res.pca$eig[axes[1],1]+res.pca$ind$contrib[,axes[2]]*res.pca$eig[axes[2],1])/(res.pca$eig[axes[1],1]+res.pca$eig[axes[2],1])), size = ggoptions_default$size, hjust = (-sign(df_ind2[,2])+1)/2, vjust = -sign(df_ind2[,3])*0.75+0.25)
}
}
}
}
if (!is.na(test.invisible[1])){
gg_graph <- gg_graph +
labs(color = ifelse(legend["title"] %in% legend, legend["title"][[1]], habillage[1]))
}
gg_graph <- gg_graph + theme
if (is.na(test.invisible[1]) & isTRUE(lab.ind)) gg_graph <- gg_graph + text
if ((!is.null(res.pca$ind.sup)) && (is.na(test.invisible[2]))){
if(!is.null(select)) df_ind_sup[,1] <- ifelse(rownames(df_ind_sup) %in% rownames(df_ind_sup)[selectionS], rownames(df_ind_sup), "")
if(nrow(res.pca$ind.sup$coord) > 1){
if (isTRUE(lab.ind.sup)){
if (habillage[1] == "none"){ gg_graph <- gg_graph + geom_point(aes(x = df_ind_sup[,2], y = df_ind_sup[,3]), size = ggoptions_default$size/3, color = color.ind.sup, shape = 1)
if (autoLab) text_ind.sup <- ggrepel::geom_text_repel(aes(x = df_ind_sup[,2], y = df_ind_sup[,3], label=df_ind_sup[,1]), color = color.ind.sup, size = ggoptions_default$size, fontface = "italic")
else{text_ind.sup <- geom_text(aes(x = df_ind_sup[,2], y = df_ind_sup[,3], label=df_ind_sup[,1]), color = color.ind.sup, size = ggoptions_default$size, fontface = "italic",hjust = (-sign(df_ind_sup[,2])+1)/2, vjust = -sign(df_ind_sup[,3])*0.75+0.25)}
}
else{ if (habillage[1] == "cos2"){ gg_graph <- gg_graph + geom_point(aes(x = df_ind_sup[,2], y = df_ind_sup[,3], color = res.pca$ind.sup$cos2[,axes[1]] + res.pca$ind.sup$cos2[,axes[2]]), size = ggoptions_default$size/3, shape = 1)
if (autoLab) text_ind.sup <- ggrepel::geom_text_repel(aes(x = df_ind_sup[,2], y = df_ind_sup[,3], label=df_ind_sup[,1], color = res.pca$ind.sup$cos2[,axes[1]] + res.pca$ind.sup$cos2[,axes[2]]), size = ggoptions_default$size, fontface = "italic")
else{text_ind.sup <- geom_text(aes(x = df_ind_sup[,2], y = df_ind_sup[,3], label=df_ind_sup[,1], color = res.pca$ind.sup$cos2[,axes[1]] + res.pca$ind.sup$cos2[,axes[2]]), size = ggoptions_default$size, fontface = "italic", hjust = (-sign(df_ind_sup[,2])+1)/2, vjust = -sign(df_ind_sup[,3])*0.75+0.25)}
}
else{ if (habillage[1] == "contrib") text_ind.sup <- NULL #+ geom_point(aes(x = res.pca$ind.sup$coord[,axes[1]], y = res.pca$ind.sup$coord[,axes[2]], color = res.pca$ind$contrib[,axes[1]]*res.pca$eig[axes[1],1]+res.pca$ind$contrib[,axes[2]]*res.pca$eig[axes[2],1]), size = ggoptions_default$size/3, shape = 1)
else{gg_graph <- gg_graph + geom_point(aes(x = df_ind_sup[,2], y = df_ind_sup[,3], color = (res.pca$call$X)[rownames(res.pca$ind.sup$coord),habillage[1]]), size = ggoptions_default$size/3, shape = 1)
if (autoLab) text_ind.sup <- ggrepel::geom_text_repel(aes(x = df_ind_sup[,2], y = df_ind_sup[,3], label=df_ind_sup[,1], color = (res.pca$call$X)[rownames(res.pca$ind.sup$coord),habillage[1]]), size = ggoptions_default$size, fontface = "italic", show.legend = FALSE)
else{text_ind.sup <- geom_text(aes(x = df_ind_sup[,2], y = df_ind_sup[,3], label=df_ind_sup[,1], color = (res.pca$call$X)[rownames(res.pca$ind.sup$coord),habillage[1]]), size = ggoptions_default$size, fontface = "italic", show.legend = FALSE, hjust = (-sign(df_ind_sup[,2])+1)/2, vjust = -sign(df_ind_sup[,3])*0.75+0.25)
}}}}
gg_graph <- gg_graph + text_ind.sup
}
else{
gg_graph <- gg_graph +
geom_point(aes(x = df_ind_sup[,2], y = df_ind_sup[,3]), size = ggoptions_default$size/3, color = color.ind.sup)
}
}
else{ if(dim(res.pca$ind.sup$coord)[1] == 1){
if (is.null(select)) selectionS = 1
if (isTRUE(lab.ind.sup)){
if (habillage[1] == "none") gg_graph <- gg_graph + geom_point(aes(x = res.pca$ind.sup$coord[,axes[1]], y = res.pca$ind.sup$coord[,axes[2]]), size = ggoptions_default$size/3, color = color.ind.sup, shape = 1) + ggrepel::geom_text_repel(aes(x = res.pca$ind.sup$coord[,axes[1]], y = res.pca$ind.sup$coord[,axes[2]], label=ifelse(!is.null(selectionS), rownames(res.pca$ind.sup$coord), "")), color = color.ind.sup, size = ggoptions_default$size, fontface = "italic")
else{ if (habillage[1] == "cos2") gg_graph <- gg_graph + geom_point(aes(x = res.pca$ind.sup$coord[,axes[1]], y = res.pca$ind.sup$coord[,axes[2]], color = res.pca$ind.sup$cos2[,axes[1]] + res.pca$ind.sup$cos2[,axes[2]]), size = ggoptions_default$size/3, shape = 1) + ggrepel::geom_text_repel(aes(x = res.pca$ind.sup$coord[,axes[1]], y = res.pca$ind.sup$coord[,axes[2]], label=ifelse(!is.null(selectionS), rownames(res.pca$ind.sup$coord), ""), color = res.pca$ind.sup$cos2[,axes[1]] + res.pca$ind.sup$cos2[,axes[2]]), size = ggoptions_default$size, fontface = "italic")
else{ if (habillage[1] == "contrib") gg_graph <- gg_graph #+ geom_point(aes(x = res.pca$ind.sup$coord[,axes[1]], y = res.pca$ind.sup$coord[,axes[2]], color = res.pca$ind$contrib[,axes[1]]*res.pca$eig[axes[1],1]+res.pca$ind$contrib[,axes[2]]*res.pca$eig[axes[2],1]), size = ggoptions_default$size/3, shape = 1)
else{gg_graph <- gg_graph + geom_point(aes(x = res.pca$ind.sup$coord[,axes[1]], y = res.pca$ind.sup$coord[,axes[2]], color = (res.pca$call$X)[rownames(res.pca$ind.sup$coord),habillage[1]]), size = ggoptions_default$size/3, shape = 1) + ggrepel::geom_text_repel(aes(x = res.pca$ind.sup$coord[,axes[1]], y = res.pca$ind.sup$coord[,axes[2]], label=ifelse(!is.null(selectionS), rownames(res.pca$ind.sup$coord), ""), color = (res.pca$call$X)[rownames(res.pca$ind.sup$coord),habillage[1]]), size = ggoptions_default$size, fontface = "italic", show.legend = FALSE)
}}}
}
else{
gg_graph <- gg_graph +
geom_point(aes(x = res.pca$ind.sup$coord[,axes[1]], y = res.pca$ind.sup$coord[,axes[2]]), size = ggoptions_default$size/3, color = color.ind.sup)
}
}
}
}
if ((!is.null(res.pca$quali.sup)) && (is.na(test.invisible[3]))){
if (isTRUE(lab.quali)){
if((habillage[1] == "none") || !(habillage[1] %in% liste.quali)){
gg_graph <- gg_graph +
geom_point(aes(x = df_quali.sup[,2], y = df_quali.sup[,3]), size = ggoptions_default$size/2.8, color = col.quali, shape = 0)
if (autoLab) text_quali.sup <- ggrepel::geom_text_repel(aes(x = df_quali.sup[,2], y = df_quali.sup[,3], label=df_quali.sup[,1]), color = col.quali, size = ggoptions_default$size, fontface = "italic")
else{text_quali.sup <- geom_text(aes(x = df_quali.sup[,2], y = df_quali.sup[,3], label=df_quali.sup[,1]), color = col.quali, size = ggoptions_default$size, fontface = "italic",hjust = (-sign(df_quali.sup[,2])+1)/2, vjust = -sign(df_quali.sup[,3])*0.75+0.25)}
gg_graph <- gg_graph + text_quali.sup
}
else{
if ((habillage[1] %in% liste.quali) || (colnames(res.pca$call$X))[habillage[1]] %in% liste.quali) {
gg_graph <- gg_graph +
geom_point(aes(x = res.pca$quali.sup$coord[levels(res.pca$call$X[,habillage[1]]),axes[1]], y = res.pca$quali.sup$coord[levels(res.pca$call$X[,habillage[1]]),axes[2]]), size = ggoptions_default$size/2.8, color = palette[1:length(levels(res.pca$call$X[,habillage[1]]))], shape = 0)
if (autoLab) text_quali.sup <- ggrepel::geom_text_repel(aes(x = res.pca$quali.sup$coord[levels(res.pca$call$X[,habillage[1]]),axes[1]], y = res.pca$quali.sup$coord[levels(res.pca$call$X[,habillage[1]]),axes[2]], label=levels(res.pca$call$X[,habillage[1]])), color = palette[1:length(levels(res.pca$call$X[,habillage[1]]))], size = ggoptions_default$size, fontface = "italic")
else{text_quali.sup <- geom_text(aes(x = res.pca$quali.sup$coord[levels(res.pca$call$X[,habillage[1]]),axes[1]], y = res.pca$quali.sup$coord[levels(res.pca$call$X[,habillage[1]]),axes[2]], label=levels(res.pca$call$X[,habillage[1]])), color = palette[1:length(levels(res.pca$call$X[,habillage[1]]))], size = ggoptions_default$size, fontface = "italic",nudge_y=nudge_y,hjust = (-sign(df_quali.sup[,2])+1)/2, vjust = -sign(df_quali.sup[,3])*0.75+0.25)}
gg_graph <- gg_graph + text_quali.sup
}
if(length(liste.quali) > 1){
gg_graph <- gg_graph +
geom_point(aes(x = res.pca$quali.sup$coord[which(!(rownames(res.pca$quali.sup$coord) %in% levels(res.pca$call$X[,habillage[1]]))),axes[1]], y = res.pca$quali.sup$coord[which(!(rownames(res.pca$quali.sup$coord) %in% levels(res.pca$call$X[,habillage[1]]))),axes[2]]), size = ggoptions_default$size/2.8, color = col.quali, shape = 0)
if (autoLab) text_quali.sup <- ggrepel::geom_text_repel(aes(x = res.pca$quali.sup$coord[which(!(rownames(res.pca$quali.sup$coord) %in% levels(res.pca$call$X[,habillage[1]]))),axes[1]], y = res.pca$quali.sup$coord[which(!(rownames(res.pca$quali.sup$coord) %in% levels(res.pca$call$X[,habillage[1]]))),axes[2]], label = rownames(res.pca$quali.sup$coord)[which(!(rownames(res.pca$quali.sup$coord) %in% levels(res.pca$call$X[,habillage[1]])))]), color = col.quali, size = ggoptions_default$size, fontface = "italic")
else{text_quali.sup <- geom_text(aes(x = res.pca$quali.sup$coord[which(!(rownames(res.pca$quali.sup$coord) %in% levels(res.pca$call$X[,habillage[1]]))),axes[1]], y = res.pca$quali.sup$coord[which(!(rownames(res.pca$quali.sup$coord) %in% levels(res.pca$call$X[,habillage[1]]))),axes[2]], label = rownames(res.pca$quali.sup$coord)[which(!(rownames(res.pca$quali.sup$coord) %in% levels(res.pca$call$X[,habillage[1]])))]), color = col.quali, size = ggoptions_default$size, fontface = "italic",nudge_y=nudge_y,hjust = (-sign(df_quali.sup[,2])+1)/2, vjust = -sign(df_quali.sup[,3])*0.75+0.25)}
gg_graph <- gg_graph + text_quali.sup
}
}
}
else{
gg_graph <- gg_graph +
geom_point(aes(x = df_quali.sup[,2], y = df_quali.sup[,3]), size = ggoptions_default$size/2.8, color = col.quali, shape = 0)
}
}
}
if (graph.type == "classic"){
if (any(labe!="")){
if (autoLab=="auto") autoLab = (length(which(labe!=""))<50)
if (autoLab ==TRUE) autoLab(coo[labe!="", 1], y = coo[labe!="", 2], labels = labe[labe!=""], col = coll[labe!=""], font=fonte[labe!=""],shadotext=shadowtext,...)
if (autoLab ==FALSE) text(coo[labe!="", 1], y = coo[labe!="", 2], labels = labe[labe!=""], col = coll[labe!=""], font=fonte[labe!=""],pos=3,...)
}}
if (!is.null(ellipse)) {
nbre.ellipse <- nlevels(coord.ellipse[, 1])
for (e in 1:nbre.ellipse) {
data.elli <- coord.ellipse[ellipse$res[, 1] == levels(coord.ellipse[, 1])[e], -1]
if(graph.type=="classic"){
if ((habillage[1] != "none")&(habillage[1] != "ind")) lines(x=data.elli[, 1], y = data.elli[, 2], col = palette[color.mod[e]],...)
else lines(x=data.elli[, 1], y = data.elli[, 2], col = palette[col.quali],...)}
else{
if(graph.type=="ggplot"){
if (habillage[1] != "none"){
gg_graph <- gg_graph + geom_path(aes_string(x=data.elli[,1],y=data.elli[,2]), color = palette[color.mod[e]])
}
else {
gg_graph <- gg_graph + geom_path(aes_string(x=data.elli[,1],y=data.elli[,2]), color = palette[col.quali])
}
}
}
}
}
# if ((habillage != "none")&(habillage != "ind")) legend("topleft",legend= levels(res.pca$call$X[,habillage]),text.col= color.mod,cex=par("cex")*0.8)
if ((habillage[1] != "none") & (habillage[1] != "ind") & (habillage[1] != "cos2") & (habillage[1] != "contrib") & (graph.type == "classic")) {
L <- list(x="topleft", legend = levels(res.pca$call$X[, habillage[1]]), text.col = color.mod, cex = par("cex") * 0.8)
L <- modifyList(L, legend)
do.call(graphics::legend, L)
}
}
if (choix == "varcor") {
sauv <- res.pca$var$coord
res.pca$var$coord <- res.pca$var$cor
if (!is.null(res.pca$quanti.sup)) res.pca$quanti.sup$coord <- res.pca$quanti.sup$cor
res.pca$call$scale.unit <- TRUE
}
if ((choix == "var")||(choix == "varcor")) {
if (is.null(title)) titre <- "PCA graph of variables"
else titre <- title
selection <- selectionS <- NULL
if (!is.null(select)) {
if (mode(select)=="numeric") selection <- select
else {
if (sum(rownames(res.pca$var$coord)%in%select)+sum(rownames(res.pca$quanti.sup$coord)%in%select)!=0) selection <- which(rownames(res.pca$var$coord)%in%select)
else {
if (grepl("contrib",select[1])) selection <- (rev(order(res.pca$var$contrib[,axes[1]]*res.pca$eig[axes[1],1]+res.pca$var$contrib[,axes[2]]*res.pca$eig[axes[2],1])))[1:min(nrow(res.pca$var$coord),sum(as.integer(unlist(strsplit(select,"contrib"))),na.rm=T))]
if (grepl("coord",select[1])) selection <- (rev(order(apply(res.pca$var$coord[,axes]^2,1,sum))))[1:min(nrow(res.pca$var$coord),sum(as.integer(unlist(strsplit(select,"coord"))),na.rm=T))]
if (grepl("cos2",select[1])) {
if (sum(as.numeric(unlist(strsplit(select,"cos2"))),na.rm=T)>=1) selection <- (rev(order(apply(res.pca$var$cos2[,axes],1,sum))))[1:min(nrow(res.pca$var$coord),sum(as.numeric(unlist(strsplit(select,"cos2"))),na.rm=T))]
else selection <- which(apply(res.pca$var$cos2[,axes],1,sum)>sum(as.numeric(unlist(strsplit(select,"cos2"))),na.rm=T))
}
if (is.integer(select)) selection <- select
}
}
}
if ((!is.null(select))&(!is.null(res.pca$quanti.sup))) {
if (mode(select)=="numeric") selectionS <- select
else {
if (sum(rownames(res.pca$var$coord)%in%select)+sum(rownames(res.pca$quanti.sup$coord)%in%select)!=0) selectionS <- which(rownames(res.pca$quanti.sup$coord)%in%select)
else {
if (grepl("contrib",select[1])) selectionS <- NULL
if (grepl("coord",select[1])) selectionS <- (rev(order(apply(res.pca$quanti.sup$coord[,axes]^2,1,sum))))[1:min(nrow(res.pca$quanti.sup$coord),sum(as.integer(unlist(strsplit(select,"coord"))),na.rm=T))]
if (grepl("cos2",select[1])) {
if (sum(as.numeric(unlist(strsplit(select,"cos2"))),na.rm=T)>=1) selectionS <- (rev(order(apply(res.pca$quanti.sup$cos2[,axes],1,sum))))[1:min(nrow(res.pca$quanti.sup$coord),sum(as.numeric(unlist(strsplit(select,"cos2"))),na.rm=T))]
else selectionS <- which(apply(res.pca$quanti.sup$cos2[,axes],1,sum)>sum(as.numeric(unlist(strsplit(select,"cos2"))),na.rm=T))
}
if (is.integer(select)) selectionS <- select
}
}
}
test.invisible <- vector(length = 2)
if (!is.null(invisible)) {
test.invisible[1] <- match("var", invisible)
test.invisible[2] <- match("quanti.sup", invisible)
}
else test.invisible <- rep(NA, 2)
scale.unit <- res.pca$call$scale.unit
coord.var <- res.pca$var$coord[, axes,drop=FALSE]
if (!is.null(res.pca$quanti.sup)) coord.quanti <- res.pca$quanti.sup$coord[, axes, drop=FALSE]
else coord.quanti <- NULL
if (scale.unit) xlim <- ylim <- c(-1, 1)
else {
xmin <- min(0,coord.var[, 1], coord.quanti[, 1])
xmax <- max(0,coord.var[, 1], coord.quanti[, 1])
ymin <- min(0,coord.var[, 2], coord.quanti[, 2])
ymax <- max(0,coord.var[, 2], coord.quanti[, 2])
xlim <- c(xmin, xmax) * 1.2
ylim <- c(ymin, ymax) * 1.2
}
if ((new.plot)&!nzchar(Sys.getenv("RSTUDIO_USER_IDENTITY"))) dev.new()
if (is.null(palette)) palette(c("black","red","green3","blue","cyan","magenta","darkgray","darkgoldenrod","darkgreen","violet","turquoise","orange","lightpink","lavender","yellow","lightgreen","lightgrey","lightblue","darkkhaki", "darkmagenta","darkolivegreen","lightcyan", "darkorange", "darkorchid","darkred","darksalmon","darkseagreen","darkslateblue","darkslategray","darkslategrey","darkturquoise","darkviolet", "lightgray","lightsalmon","lightyellow", "maroon"))
# cercle variables factor map
if (graph.type == "classic") {
if (scale.unit) {
plot(0, 0, xlab = lab.x, ylab = lab.y, xlim = xlim, ylim = ylim, col = "white", asp=1, main=titre,...)
x.cercle <- seq(-1, 1, by = 0.01)
y.cercle <- sqrt(1 - x.cercle^2)
lines(x.cercle, y = y.cercle,...)
lines(x.cercle, y = -y.cercle,...)
}
else {
plot(0, 0, main = titre, xlab = lab.x, ylab = lab.y, xlim = xlim, ylim = ylim, col = "white", asp=1, ...)
}
abline(v=0,lty=2,...)
abline(h=0,lty=2,...)
}
#
coll <- coo <- labe <- posi <- NULL
if (!is.null(coord.var[ which(apply(res.pca$var$cos2[, axes,drop=FALSE],1,sum, na.rm = TRUE) >= lim.cos2.var),])&is.na(test.invisible[1])&(nrow(coord.var)>0)){
coord.var <- coord.var[ which(apply(res.pca$var$cos2[, axes,drop=FALSE],1,sum, na.rm = TRUE) >= lim.cos2.var),,drop=FALSE]
coo <- coord.var
if (length(col.var)==1) coll <- c(coll,rep(col.var,nrow(coord.var)))
else coll <- col.var
if (!is.null(col.hab)) coll <- col.hab[which(colnames(res.pca$call$X)%in%rownames(res.pca$var$coord))]
if (lab.var){ labe <- c(labe,rownames(coord.var))
} else labe <- c(labe,rep("",nrow(coord.var)))
if (!is.null(selection)){
if (is.numeric(unselect)) coll[!((1:length(coll))%in%selection)] = rgb(t(col2rgb(coll[!((1:length(coll))%in%selection)])),alpha=255*(1-unselect),maxColorValue=255)
else coll[!((1:length(coll))%in%selection)] = unselect
labe[!((1:length(coll))%in%selection)] <- ""
}
if(graph.type == "ggplot"){
df_var <- df_quanti.sup <- NULL
df_var <- data.frame(labe,coord.var,coll)
} else {
for (v in 1:nrow(coord.var)) {
arrows(0, 0, coord.var[v, 1], coord.var[v, 2], length = 0.1, angle = 15, code = 2, col = coll[v])
if (lab.var) {
if (abs(coord.var[v,1])>abs(coord.var[v,2])){
if (coord.var[v,1]>=0) posi<-c(posi,4)
else posi<-c(posi,2)
}
else {
if (coord.var[v,2]>=0) posi<-c(posi,3)
else posi<-c(posi,1)
}
}
}
}
}
#
if (!is.null(coord.quanti)){
if (!is.null(coord.quanti[ which(apply(res.pca$quanti.sup$cos2[, axes,drop=FALSE],1,sum, na.rm = TRUE) >= lim.cos2.var),])& is.na(test.invisible[2]) & (nrow(coord.quanti)>0)) {
coord.quanti <- coord.quanti[ which(apply(res.pca$quanti.sup$cos2[, axes,drop=FALSE],1,sum, na.rm = TRUE) >= lim.cos2.var),,drop=FALSE]
coo <- rbind(coo,coord.quanti)
if (length(col.quanti.sup)==1) col.quanti.sup<-rep(col.quanti.sup, nrow(coord.quanti))
if (is.null(col.hab)) coll2 <- col.quanti.sup
else coll2 <- col.hab[which(colnames(res.pca$call$X)%in%colnames(res.pca$call$quanti.sup))]
if (lab.quanti){ labe2 <- rownames(coord.quanti)
} else labe2 <- rep("",nrow(coord.quanti))
if (length(select)==1){
if (grepl("contrib",select)){
if (is.numeric(unselect)) coll2[1:length(coll2)] = rgb(t(col2rgb(coll2[1:length(coll2)])),alpha=255*(1-unselect),maxColorValue=255)
else coll2[1:length(coll2)] = unselect
labe2[1:length(coll2)] <- ""
}}
if (!is.null(selectionS)){
if (is.numeric(unselect)) coll2[!((1:length(coll2))%in%selectionS)] = rgb(t(col2rgb(coll2[!((1:length(coll2))%in%selectionS)])),alpha=255*(1-unselect),maxColorValue=255)
else coll2[!((1:length(coll2))%in%selectionS)] = unselect
labe2[!((1:length(coll2))%in%selectionS)] <- ""
}
if (graph.type == "ggplot") df_quanti.sup <- data.frame(labe2,coord.quanti,coll2)
#
if (graph.type == "classic"){
for (q in 1:nrow(coord.quanti)) {
arrows(0, 0, coord.quanti[q, 1], coord.quanti[q, 2], length = 0.1, angle = 15, code = 2, lty = 2, col=coll2[q])
#
if (lab.quanti) {
if (abs(coord.quanti[q,1])>abs(coord.quanti[q,2])){
if (coord.quanti[q,1]>=0) posi<-c(posi,4)
else posi<-c(posi,2)
}
else {
if (coord.quanti[q,2]>=0) posi<-c(posi,3)
else posi<-c(posi,1)
}
}
}}
labe <- c(labe,labe2)
coll <- c(coll,coll2)
}
}
if (graph.type == "classic"){
if (any(labe!="")){
if (autoLab=="auto") autoLab = (length(which(labe!=""))<50)
if (autoLab==FALSE) text(coo[labe!="", 1], y = coo[labe!="", 2], labels = labe[labe!=""], pos = posi[labe!=""], col = coll[labe!=""],...)
if (autoLab==TRUE) autoLab(coo[labe!="", 1], y = coo[labe!="", 2], labels = labe[labe!=""], col = coll[labe!=""], shadotext=shadowtext,...)
}
} else {
if((!is.na(test.invisible[1])) & (habillage[1] != "none") & (is.null(legend["title"][[1]]))){
legend["title"][[1]] = habillage[1]}
if (autoLab=="auto") autoLab = (length(which(labe!=""))<50)
if (length(habillage) != 1){
warning("Habillage must be length 1")
habillage <- habillage[1]
}
if (!(habillage[1] %in% c("contrib","cos2","none"))){
warning("Habillage must be in c('contrib','cos2','none')")
habillage <- "none"
}
circle <- annotate("path",
x=0+1*cos(seq(0,2*pi,length.out=100)),
y=0+1*sin(seq(0,2*pi,length.out=100)),
lty = ggoptions_default$circle.lty,
lwd = ggoptions_default$circle.lwd,
color = ggoptions_default$circle.color)
transparency_var <- ifelse(rownames(res.pca$var$coord) %in% labe, 1, 1-unselect)
gg_graph <- ggplot() +
coord_fixed(ratio = 1) +
geom_line(aes(x=x, y=y), data=data.frame(x=-1:1,y=0),lty=ggoptions_default$line.lty, lwd = ggoptions_default$line.lwd, color=ggoptions_default$line.color) +
geom_line(aes(x=x, y=y), data=data.frame(x=0,y=-1:1),lty=ggoptions_default$line.lty, lwd = ggoptions_default$line.lwd, color=ggoptions_default$line.color) +
theme_light()
if (is.na(test.invisible[1])){
if (((habillage[1] != "contrib") & (habillage[1] != "cos2"))) {
gg_graph <- gg_graph +
aes(x=df_var[,2], y=df_var[,3]) +
geom_segment(aes(x=0,y=0,xend=df_var[,2], yend=df_var[,3]),arrow=arrow(length=unit(0.2,"cm")),alpha = transparency_var, lty = ggoptions_default$segment.lty, lwd = ggoptions_default$segment.lwd, color=col.var)
if(autoLab) text <- ggrepel::geom_text_repel(aes(x=df_var[,2], y=df_var[,3],label=df_var[,1]), size = ggoptions_default$size, color = col.var)
else{text <- geom_text(aes(x=df_var[,2], y=df_var[,3],label=df_var[,1]), size = ggoptions_default$size, color = col.var, hjust = (-sign(df_var[,2])+1)/2, vjust = -sign(df_var[,3])*0.75+0.25)}
}
if (habillage[1] == "cos2" || habillage[1] == "contrib"){
if (habillage[1] == "cos2") df_var[,4] <- res.pca$var$cos2[,axes[1]] + res.pca$var$cos2[,axes[2]]
if (habillage[1] == "contrib") df_var[,4] <- (res.pca$var$contrib[,axes[1]]*res.pca$eig[axes[1],1]+res.pca$var$contrib[,axes[2]]*res.pca$eig[axes[2],1])/(res.pca$eig[axes[1],1]+res.pca$eig[axes[2],1])
gg_graph <- gg_graph +
aes(x=df_var[,2], y=df_var[,3],color = df_var[,4]) +
geom_segment(aes(x=0,y=0,xend=df_var[,2], yend=df_var[,3],col = df_var[,4]),arrow=arrow(length=unit(0.2,"cm")), alpha = transparency_var, lty = ggoptions_default$segment.lty, lwd = ggoptions_default$segment.lwd) +
scale_color_gradient(low=ggoptions_default$low.col.quanti, high=ggoptions_default$high.col.quanti)
if (habillage[1] == "cos2") gg_graph <- gg_graph + labs(color = ifelse(legend["title"] %in% legend, legend["title"][[1]], "cos2"))
if (habillage[1] == "contrib") gg_graph <- gg_graph + labs(color = ifelse(legend["title"] %in% legend, legend["title"][[1]], "Ctr"))
if(autoLab) text <- ggrepel::geom_text_repel(aes(x=df_var[,2], y=df_var[,3],label=df_var[,1], color = df_var[,4]), size = ggoptions_default$size)
else{text <- geom_text(aes(x=df_var[,2], y=df_var[,3],label=df_var[,1], color = df_var[,4]), size = ggoptions_default$size, hjust = (-sign(df_var[,2])+1)/2, vjust = -sign(df_var[,3])*0.75+0.25)}
}
}
if (!is.na(test.invisible[1])){
gg_graph <- gg_graph +
labs(color = legend["title"][[1]])
}
if ((!is.null(res.pca$quanti.sup))&(is.na(test.invisible[2]))){
transparency_quanti <- ifelse(rownames(res.pca$quanti.sup$coord) %in% labe, 1, 1-unselect)
if (isTRUE(lab.quanti)){
if(habillage[1] == "contrib") text_quanti.sup <- NULL
if (habillage[1] == "none"){ gg_graph <- gg_graph + geom_segment(aes(x=0,y=0,xend=df_quanti.sup[,2], yend=df_quanti.sup[,3]),arrow=arrow(length=unit(0.2,"cm")),lty = 2, color = col.quanti.sup,alpha = transparency_quanti)
if (autoLab) text_quanti.sup <- ggrepel::geom_text_repel(aes(x = df_quanti.sup[,2], y = df_quanti.sup[,3], label=df_quanti.sup[,1]), color = col.quanti.sup, size = ggoptions_default$size,alpha = transparency_quanti)
else{text_quanti.sup <- geom_text(aes(x = df_quanti.sup[,2], y = df_quanti.sup[,3], label=df_quanti.sup[,1]), color = col.quanti.sup, size = ggoptions_default$size,hjust = (-sign(df_quanti.sup[,2])+1)/2, vjust = -sign(df_quanti.sup[,3])*0.75+0.25,alpha = transparency_quanti)}
}
if (habillage[1] == "cos2"){gg_graph <- gg_graph + geom_segment(aes(x=0,y=0,xend=df_quanti.sup[,2], yend=df_quanti.sup[,3], color = res.pca$quanti.sup$cos2[,axes[1]] + res.pca$quanti.sup$cos2[,axes[2]]),arrow=arrow(length=unit(0.2,"cm")),lty = 2,alpha = transparency_quanti)
if (autoLab) text_quanti.sup <- ggrepel::geom_text_repel(aes(x = df_quanti.sup[,2], y = df_quanti.sup[,3], label=df_quanti.sup[,1], color = res.pca$quanti.sup$cos2[,axes[1]] + res.pca$quanti.sup$cos2[,axes[2]]), size = ggoptions_default$size,alpha = transparency_quanti)
else{text_quanti.sup <- geom_text(aes(x = df_quanti.sup[,2], y = df_quanti.sup[,3], label=df_quanti.sup[,1], color = res.pca$quanti.sup$cos2[,axes[1]] + res.pca$quanti.sup$cos2[,axes[2]]), size = ggoptions_default$size,hjust = (-sign(df_quanti.sup[,2])+1)/2, vjust = -sign(df_quanti.sup[,3])*0.75+0.25,alpha = transparency_quanti)}
}
gg_graph <- gg_graph + text_quanti.sup
} else{
gg_graph <- gg_graph +
geom_segment(aes(x=0,y=0,xend=df_quanti.sup[,2], yend=df_quanti.sup[,3]),arrow=arrow(length=unit(0.2,"cm")),lty = 2, color = col.quanti.sup)
}
}
gg_graph <- gg_graph + theme + circle + labs(title = titre, x = lab.x, y= lab.y)
if (is.na(test.invisible[1]) & (isTRUE(lab.var))) gg_graph <- gg_graph + text
}
}
palette(old.palette)
if(graph.type == "ggplot") return(gg_graph)
}
|
#PAGE=239
a11=0.05
a1=15
a=1-a11
a=qchisq(a,df=a1)
a=round(a,digits = 1)
a
b1=21
a=1-a11
a=qchisq(a,df=b1)
b=round(a,digits = 1)
b
c1=50
a=1-a11
a=qchisq(a,df=c1)
c=round(a,digits = 1)
c
|
/Schaum'S_Outline_Series_-_Theory_And_Problems_Of_Statistics_by_Murray_R._Spiegel/CH11/EX11.11.11/Ex11_11_11.R
|
permissive
|
FOSSEE/R_TBC_Uploads
|
R
| false
| false
| 210
|
r
|
#PAGE=239
a11=0.05
a1=15
a=1-a11
a=qchisq(a,df=a1)
a=round(a,digits = 1)
a
b1=21
a=1-a11
a=qchisq(a,df=b1)
b=round(a,digits = 1)
b
c1=50
a=1-a11
a=qchisq(a,df=c1)
c=round(a,digits = 1)
c
|
args<-commandArgs(T)
in_file <-paste(args[1],'-',args[2],'.kinf',sep = "")
data <-read.table(in_file,sep='\t',header = T)
d <- as.dist(data)
out.hclust=hclust(d,method="average")
out.id <- cutree(out.hclust,h=args[2])
out_file <- paste(args[1],'_',args[2],'.genotype',sep = "")
write.table(out.id, sep = "\t",col.names = F,quote = F,file=out_file )
|
/Diploid-Outcrossing/diploid_cross_cluster.R
|
no_license
|
Tomcxf/OutcrossSeq
|
R
| false
| false
| 350
|
r
|
args<-commandArgs(T)
in_file <-paste(args[1],'-',args[2],'.kinf',sep = "")
data <-read.table(in_file,sep='\t',header = T)
d <- as.dist(data)
out.hclust=hclust(d,method="average")
out.id <- cutree(out.hclust,h=args[2])
out_file <- paste(args[1],'_',args[2],'.genotype',sep = "")
write.table(out.id, sep = "\t",col.names = F,quote = F,file=out_file )
|
#Now Combine all the Test MSE to a single files
combined_linear_mse <- as_tibble(rbind(MSE_lsr, MSE_ridge, MSE_lasso, MSE_pcr, MSE_pls, MSE_boost_reg)) %>%
mutate(Fit_Technique = c("Least Squares", "Ridge", "Lasso", "PCR", "PLS", "Boosted Regression"))
colnames(combined_linear_mse)<-c("MSE", "Model")
combined_linear_mse<- combined_linear_mse %>%
mutate(MSE_round = round (MSE, 3))
(MSE_plot <-ggplot(combined_linear_mse, aes (x= Model, y = MSE))+
geom_bar(stat = "identity")+
labs(
title = "Overall MSE for Each Regression Type",
subtitle = "Predicting the Quality Asssement"
)+
geom_label(aes(x= Model, y = MSE, label = MSE_round ), position = "dodge"))
|
/src/09_summary_MSE.R
|
no_license
|
medewitt/wine_analysis
|
R
| false
| false
| 684
|
r
|
#Now Combine all the Test MSE to a single files
combined_linear_mse <- as_tibble(rbind(MSE_lsr, MSE_ridge, MSE_lasso, MSE_pcr, MSE_pls, MSE_boost_reg)) %>%
mutate(Fit_Technique = c("Least Squares", "Ridge", "Lasso", "PCR", "PLS", "Boosted Regression"))
colnames(combined_linear_mse)<-c("MSE", "Model")
combined_linear_mse<- combined_linear_mse %>%
mutate(MSE_round = round (MSE, 3))
(MSE_plot <-ggplot(combined_linear_mse, aes (x= Model, y = MSE))+
geom_bar(stat = "identity")+
labs(
title = "Overall MSE for Each Regression Type",
subtitle = "Predicting the Quality Asssement"
)+
geom_label(aes(x= Model, y = MSE, label = MSE_round ), position = "dodge"))
|
#'imputation by robust matrix decomposition
#'
#' @param Y A single cell RNA data matrix;
#' rows representing cells.
#' @param tau Tuning parameter to penalize the sparsity of S;
#' @param lambda Tuning parameter to penalize the row rank of L;
#' @param initL The initionlization of L;
#' @param initS The initionlization of S;
#' @param initLambda The initionlization of Lambda;
#' @param maxiter maxmium iteration of algorithm;
#' @param candidate the cutoff for candidate drop out;
#' @export
#' @import RSpectra
#' @import corpcor
#' @author Chong Chen, \email{cheung1990@126.com}
rmd <- function(Y, tau = NULL, lambda = NULL, initL = NULL, initS = NULL, initLambda = NULL, maxiter = 100,
abstol = 1e-3, reltol = 1e-3, rho = 1, overrelax = 1.5, candidate = 0.05, econ = 1) {
# minimize 1/2||Y - (Z - S)||_F^2 + lambda*||L||_* + tau * ||S||_1
# suject to Z = L, Z >= 0, P_Omega(S) = 0, P_{Omega^c}(S) >= 0
# initialization
n <- dim(Y)[1] # n and p is exchangeble actually
p <- dim(Y)[2]
if (min(p, n) > 1000){
econ = 0
}
Omega <- (Y > candidate)
if (is.null(lambda)) {
lambda <- (sqrt(n) + sqrt(p)) * sd(Y)
}
# determine whether to use svds
# L.d <- svd(Y, 0, 0)$d
#econ <- ifelse(min(L.d) < lambda, 1, 0)
if (is.null(initL)) {
initL <- svt(Y, lambda, econ)$A.svt
}
if (is.null(initS)) {
initS <- matrix(0, n, p)
initS[!Omega] <- initL[!Omega]
}
#lambda <- lambda*sd(Y - initL)
if (is.null(tau)) {
tau <- sd(Y[Omega] - initL[Omega])
}
if (is.null(initLambda)) {
initLambda <- matrix(0, n, p)
}
L <- initL; S <- initS; Z <- L; Lambda <- initLambda; alpha <- overrelax
# solve
history <- list(rho = c(), s_norm = c(), r_norm = c(), tol_pri = c(), tol_dual = c())
for (k in 1:maxiter) {
print(k)
# update Z,S
# on Omega
S[Omega] <- 0
tmp <- (Y[Omega] + rho * L[Omega] - Lambda[Omega]) / (1 + rho)
Z[Omega] = pmax(tmp, 0)
# on !Omega
index <- L[!Omega] < (1+rho)*tau/rho + Lambda[!Omega] / rho
tmp1 <- pmax((rho * L[!Omega] - Lambda[!Omega]) / (1 + rho), 0) # s = 0
tmp2 <- pmax(L[!Omega] - Lambda[!Omega] / rho - tau / rho, 0) # s = z - tau
tmpS <- pmax(tmp2 - tau, 0); tmpS[index] <- 0
tmpZ <- tmp2; tmpZ[index] = tmp1[index]
S[!Omega] = tmpS
Z[!Omega] = tmpZ
# overrelaxation
Z_hat <- alpha * Z + (1 - alpha) * L;
# update L
L_old <- L;
tmp <- Z_hat + Lambda / rho;
svts <- svt(tmp, lambda / rho, econ)
L <- svts$A.svt
L[L<0] = 0
r <- svts$r
# update Lambda
Lambda <- Lambda + rho * (Z_hat - L)
# diagnostics
history$rho[k] <- rho;
history$r_norm[k] <- norm(Z - L, "F")
history$s_norm[k] <- norm((L - L_old), "F")*rho
history$tol_pri[k] <- sqrt(n*p)*abstol + reltol*max(norm(Z, "F"), norm(L, "F"))
history$tol_dual[k]= sqrt(p*n)*abstol + reltol*norm(Lambda,"F");
if (history$r_norm[k] < history$tol_pri[k] && history$s_norm[k] < history$tol_dual[k]) break
if (history$r_norm[k] > 10*history$s_norm[k]) {
rho <- rho*2
} else if (history$s_norm[k] > 10*history$r_norm[k]) {
rho <- max(rho/2,1e-4)
}
}
exprs <- Y
exprs[S>0] <- L[S>0]
exprs[exprs<0] <- 0
return(list(L=L, S=S, r=r, Lambda = Lambda, exprs = exprs, tau = tau, lambda = lambda, history = history))
}
|
/R/rmd.R
|
no_license
|
ChongC1990/scRMD
|
R
| false
| false
| 3,358
|
r
|
#'imputation by robust matrix decomposition
#'
#' @param Y A single cell RNA data matrix;
#' rows representing cells.
#' @param tau Tuning parameter to penalize the sparsity of S;
#' @param lambda Tuning parameter to penalize the row rank of L;
#' @param initL The initionlization of L;
#' @param initS The initionlization of S;
#' @param initLambda The initionlization of Lambda;
#' @param maxiter maxmium iteration of algorithm;
#' @param candidate the cutoff for candidate drop out;
#' @export
#' @import RSpectra
#' @import corpcor
#' @author Chong Chen, \email{cheung1990@126.com}
rmd <- function(Y, tau = NULL, lambda = NULL, initL = NULL, initS = NULL, initLambda = NULL, maxiter = 100,
abstol = 1e-3, reltol = 1e-3, rho = 1, overrelax = 1.5, candidate = 0.05, econ = 1) {
# minimize 1/2||Y - (Z - S)||_F^2 + lambda*||L||_* + tau * ||S||_1
# suject to Z = L, Z >= 0, P_Omega(S) = 0, P_{Omega^c}(S) >= 0
# initialization
n <- dim(Y)[1] # n and p is exchangeble actually
p <- dim(Y)[2]
if (min(p, n) > 1000){
econ = 0
}
Omega <- (Y > candidate)
if (is.null(lambda)) {
lambda <- (sqrt(n) + sqrt(p)) * sd(Y)
}
# determine whether to use svds
# L.d <- svd(Y, 0, 0)$d
#econ <- ifelse(min(L.d) < lambda, 1, 0)
if (is.null(initL)) {
initL <- svt(Y, lambda, econ)$A.svt
}
if (is.null(initS)) {
initS <- matrix(0, n, p)
initS[!Omega] <- initL[!Omega]
}
#lambda <- lambda*sd(Y - initL)
if (is.null(tau)) {
tau <- sd(Y[Omega] - initL[Omega])
}
if (is.null(initLambda)) {
initLambda <- matrix(0, n, p)
}
L <- initL; S <- initS; Z <- L; Lambda <- initLambda; alpha <- overrelax
# solve
history <- list(rho = c(), s_norm = c(), r_norm = c(), tol_pri = c(), tol_dual = c())
for (k in 1:maxiter) {
print(k)
# update Z,S
# on Omega
S[Omega] <- 0
tmp <- (Y[Omega] + rho * L[Omega] - Lambda[Omega]) / (1 + rho)
Z[Omega] = pmax(tmp, 0)
# on !Omega
index <- L[!Omega] < (1+rho)*tau/rho + Lambda[!Omega] / rho
tmp1 <- pmax((rho * L[!Omega] - Lambda[!Omega]) / (1 + rho), 0) # s = 0
tmp2 <- pmax(L[!Omega] - Lambda[!Omega] / rho - tau / rho, 0) # s = z - tau
tmpS <- pmax(tmp2 - tau, 0); tmpS[index] <- 0
tmpZ <- tmp2; tmpZ[index] = tmp1[index]
S[!Omega] = tmpS
Z[!Omega] = tmpZ
# overrelaxation
Z_hat <- alpha * Z + (1 - alpha) * L;
# update L
L_old <- L;
tmp <- Z_hat + Lambda / rho;
svts <- svt(tmp, lambda / rho, econ)
L <- svts$A.svt
L[L<0] = 0
r <- svts$r
# update Lambda
Lambda <- Lambda + rho * (Z_hat - L)
# diagnostics
history$rho[k] <- rho;
history$r_norm[k] <- norm(Z - L, "F")
history$s_norm[k] <- norm((L - L_old), "F")*rho
history$tol_pri[k] <- sqrt(n*p)*abstol + reltol*max(norm(Z, "F"), norm(L, "F"))
history$tol_dual[k]= sqrt(p*n)*abstol + reltol*norm(Lambda,"F");
if (history$r_norm[k] < history$tol_pri[k] && history$s_norm[k] < history$tol_dual[k]) break
if (history$r_norm[k] > 10*history$s_norm[k]) {
rho <- rho*2
} else if (history$s_norm[k] > 10*history$r_norm[k]) {
rho <- max(rho/2,1e-4)
}
}
exprs <- Y
exprs[S>0] <- L[S>0]
exprs[exprs<0] <- 0
return(list(L=L, S=S, r=r, Lambda = Lambda, exprs = exprs, tau = tau, lambda = lambda, history = history))
}
|
#
# STEP 1
#
# read the data
#
cars <- read.csv("cars.csv")
#
# STEP 2
#
# split data into "in-sample data" and "out-of-sample" data
# (also known as "train" and "test" sets)
#
split <- floor(nrow(mtcars) * .75)
in_sample_data <- cars[1:split, ]
out_sample_data <- cars[(split+1):nrow(cars), ]
#
# STEP 3
#
# create matricies for our "y" and "X" variables. in this
# case, the "y" or response variable is the MPG rating of
# a given car. the "X" variables or predictors are the other
# features of the car, that we will use to predict the MPG.
#
# notice how we add a column of ones to the beginning of the X
# matrix, so that the model intercept can be included in
# the prediction
#
y <- in_sample_data[1]
y <- t(matrix(unlist(y), ncol=nrow(y), byrow=TRUE))
X <- in_sample_data[2:ncol(in_sample_data)]
X <- t(matrix(unlist(X), ncol=nrow(X), byrow=TRUE))
X <- cbind(1, X)
#
# STEP 4
#
# calculate the vector of model parameters according to
# the OLS proof, which selects the parameters which
# minimizes the sum of squared residuals
#
# note: in R, `t()` is a matrix transpose,
# and `solve()` is a matrix inverse
#
beta <- solve((t(X) %*% X)) %*% t(X) %*% y
cat("Model parameters: \n")
cat("\t", t(beta), "\n\n")
#
# STEP 5
#
# create a function that applies the model parameters (beta)
# to a any set of matching model predictors
#
# B0 + B1*X1 + B2*X2 + ... + Bk*Xk
#
predict.mlr <- function(beta, xs) {
beta[1] + sum(beta[2:length(beta)] * xs)
}
#
# STEP 6
#
# use the above function to predict the MPG (our response
# variable) for each data point within our "out-of-sample"
# dataset
#
predictions <- c()
for(i in 1:nrow(out_sample_data)) {
xs <- unlist(out_sample_data[i, 2:ncol(out_sample_data)], use.names=FALSE)
pred <- predict.mlr(beta, xs)
predictions <- c(predictions, pred)
}
#
# STEP 7
#
# grab the actual values for our response variable from the
# testing dataset, and use them (and our predictions) to compute
# the R-Squared for our model
#
# R^2 = 1 - (SSE/SST), where SSE is the Sum of Squared Regression Error,
# and SST is the Sum of Squared Total Error
#
actuals <- out_sample_data[, 1]
SSE <- sum((actuals - predictions)^2)
SST <- sum((actuals - mean(actuals))^2)
R2 <- (1 - (SSE/SST))
cat("Out-of-Sample R-Squared: \n")
cat("\t", R2, "\n\n")
|
/mlr.r
|
no_license
|
dbusteed/hello-regression
|
R
| false
| false
| 2,350
|
r
|
#
# STEP 1
#
# read the data
#
cars <- read.csv("cars.csv")
#
# STEP 2
#
# split data into "in-sample data" and "out-of-sample" data
# (also known as "train" and "test" sets)
#
split <- floor(nrow(mtcars) * .75)
in_sample_data <- cars[1:split, ]
out_sample_data <- cars[(split+1):nrow(cars), ]
#
# STEP 3
#
# create matricies for our "y" and "X" variables. in this
# case, the "y" or response variable is the MPG rating of
# a given car. the "X" variables or predictors are the other
# features of the car, that we will use to predict the MPG.
#
# notice how we add a column of ones to the beginning of the X
# matrix, so that the model intercept can be included in
# the prediction
#
y <- in_sample_data[1]
y <- t(matrix(unlist(y), ncol=nrow(y), byrow=TRUE))
X <- in_sample_data[2:ncol(in_sample_data)]
X <- t(matrix(unlist(X), ncol=nrow(X), byrow=TRUE))
X <- cbind(1, X)
#
# STEP 4
#
# calculate the vector of model parameters according to
# the OLS proof, which selects the parameters which
# minimizes the sum of squared residuals
#
# note: in R, `t()` is a matrix transpose,
# and `solve()` is a matrix inverse
#
beta <- solve((t(X) %*% X)) %*% t(X) %*% y
cat("Model parameters: \n")
cat("\t", t(beta), "\n\n")
#
# STEP 5
#
# create a function that applies the model parameters (beta)
# to a any set of matching model predictors
#
# B0 + B1*X1 + B2*X2 + ... + Bk*Xk
#
predict.mlr <- function(beta, xs) {
beta[1] + sum(beta[2:length(beta)] * xs)
}
#
# STEP 6
#
# use the above function to predict the MPG (our response
# variable) for each data point within our "out-of-sample"
# dataset
#
predictions <- c()
for(i in 1:nrow(out_sample_data)) {
xs <- unlist(out_sample_data[i, 2:ncol(out_sample_data)], use.names=FALSE)
pred <- predict.mlr(beta, xs)
predictions <- c(predictions, pred)
}
#
# STEP 7
#
# grab the actual values for our response variable from the
# testing dataset, and use them (and our predictions) to compute
# the R-Squared for our model
#
# R^2 = 1 - (SSE/SST), where SSE is the Sum of Squared Regression Error,
# and SST is the Sum of Squared Total Error
#
actuals <- out_sample_data[, 1]
SSE <- sum((actuals - predictions)^2)
SST <- sum((actuals - mean(actuals))^2)
R2 <- (1 - (SSE/SST))
cat("Out-of-Sample R-Squared: \n")
cat("\t", R2, "\n\n")
|
#' open_repo
#'
#' @description Open repository
#' @param repo_obj An initialized repository
#' @return
#' Summary of the repository.
#' @details Open repository.
#' @export
open_repo <- function(repo_obj) {
if(git2r::is_empty(repo_obj)){
repo <- git2r::repository(normalizePath(getwd()))
} else{
stop(paste0("There is no git repository to open."))
}
return(summary(repo))
}
|
/R/open_repo.R
|
no_license
|
AngelosPsy/pssr
|
R
| false
| false
| 407
|
r
|
#' open_repo
#'
#' @description Open repository
#' @param repo_obj An initialized repository
#' @return
#' Summary of the repository.
#' @details Open repository.
#' @export
open_repo <- function(repo_obj) {
if(git2r::is_empty(repo_obj)){
repo <- git2r::repository(normalizePath(getwd()))
} else{
stop(paste0("There is no git repository to open."))
}
return(summary(repo))
}
|
#
# Title: Web Scraping In R
# Purpose: (Knowledge Development) Learning to webscrape with R
# Author: Billy Caughey
# Date: 2020.11.19 - Initial build
#
##### Introduction to HTML #####
library(rvest)
# <h[1-5]> are header tacks
# <p> are paragraph tacks
# <ul> <li> is how to open a bulleted list
# <a href = 'site.com'>link</a> is how to set up a link in R, <a> is the tack and href is the attribute
# library(rvest) is the work horse from Tidyverse
# xml_structure(html) look at the html structure/basic outline of the document
# read_html(html) helps r read HTML in an understable way
##### Web Scraping is Cool #####
## Part 1: Read in the excerpt into R
html_excerpt_raw <- '
<html>
<body>
<h1>Web scraping is cool</h1>
<p>It involves writing code – be it R or Python.</p>
<p>DataCamp has courses on it.</p>
</body>
</html>'
# Turn the raw excerpt into an HTML document R understands
html_excerpt <- read_html(html_excerpt_raw)
html_excerpt
## Part 2: Alter the html so it points to https://datacamp.com
html_excerpt_raw <- '
<html>
<body>
<h1>Web scraping is cool</h1>
<p>It involves writing code – be it R or Python.</p>
<p><a href="https://datacamp.com">DataCamp</a>
has courses on it.</p>
</body>
</html>'
# Turn the raw excerpt into an HTML document R understands
html_excerpt <- read_html(html_excerpt_raw)
html_excerpt
## Part 3: Use the xml_structure() function to get a better overview of the tag hierarchy of the HTML excerpt.
html_excerpt_raw <- '
<html>
<body>
<h1>Web scraping is cool</h1>
<p>It involves writing code – be it R or Python.</p>
<p><a href="https://datacamp.com">DataCamp</a>
has courses on it.</p>
</body>
</html>'
# Turn the raw excerpt into an HTML document R understands
html_excerpt <- read_html(html_excerpt_raw)
html_excerpt
# Print the HTML excerpt with the xml_structure() function
xml_structure(html_excerpt)
##### Beware of syntax errors #####
broken_html_raw <- "\n
<html> \n
<h1Web scraping is cool</h1>\n
<p>It involves writing code – be it R or Python.</p>\n
<p><a href=\"https://datacamp.com\">DataCamp</a> \n
\t\thas courses on it.</p>\n
</html>"
# missing ">" on the <h1 tack
##### Navigating HTML #####
## Tree Structure ##
# <html> <--- Root Node, the child is <body>
# <body>
# <div> <--- Child of <body> tack
# <p>The first paragraph.</p>
# </div>
# <div> <--- Child of <body> tack
# Not an actual paragraph, but with a <a ref = "#">link</a>.
# </div>
# <p> A paragraph without an enclosing div. </p>
# </body>
# </html>
# html <- read_html(html_document):
# html_children(html): Takes an xml structure and returns the nodeset (or children) of the root node
# html_text(): extracts all the text from the children of the root node
# html_node(): Returns the first node that matches my selection
# html_nodes(): can take an html document or an html selector
# html selector: Defines a bath through the html tree
# html_nodes("div p"): returns the paragraphs of a div node
# html_nodes("p"): selects all the paragraphs in the html tree
# html_nodes("div p") == html %>% html_nodes("div") %>% html_nodes("p")
# html_attr: Extracts attributes from the html tree
# html %>% html_node("a") %>% html_attr("href"): returns the href attribute from the "a" node
# html_attrs(): returns all the attributes of a node as a named vector
##### Select all children of a list #####
# Part 1: Read in the corresponding HTML string
list_html <- read_html(list_html_raw)
# Part 2: # Extract the ol node
ol_node <- list_html %>% html_node("ol")
# Part 3: Extract and print the nodeset of all the children of ol_node
html_children(ol_node)
##### Parse hyperlinks into a data frame #####
# Part 1:
# Extract all the a nodes from the bulleted list
links <- hyperlink_html %>%
read_html() %>%
html_nodes("ul a") # Remember, I am looking for the bullet list!
# Parse the nodes into a data frame
link_df <- tibble(
domain = links %>% html_attr("href"),
name = links %>% html_text()
)
link_df
##### Scrape your first table #####
## Basic Outline of a table ##
# <table> <-- Designates table
# <tr> <-- Designates a table row
# <th>Name</td><td>Profession</td><td>Age</td><td>Country</th> <-- Table header tacks
# </tr>
# <tr>
# <td>Dillon Arroyo</td><td>Carpenter</td><td>54</td><td>UK</td>
# </tr>
# <tr>
# <td>Rebecca Douglas></td><td>Developer</td><td>32</td><td>USA</td>
# </td>
# </table>
# html <- read_html(table_html) # table with <th> header cells
# html %>% html_table() : reads in table, converts the table for R to read
# html <- read_html(table_html) # table without <th> header cells
# html %>% html_table(header = TRUE) : reads in table, converts the table for R to read, first row is header
# html %>% html_table(header = TRUE, fill = TRUE) : reads in table, converts the table for R to read, first row is header, fills in missing entries with NA
##### Turn a table into a data frame with html_table() #####
# Part 1: Extract the "clean" table into a data frame
mountains <- mountains_html %>%
html_node("table#clean") %>%
html_table()
mountains
# Part 2: Extract the "dirty" table into a data frame
mountains <- mountains_html %>%
html_node("table#dirty") %>%
html_table(header = T, fill = T)
mountains
##### Introduction to CSS #####
# CSS = Cascading Style Sheets
# Selectors I have seen:
# Type Selector:
type {
key:value;
}
html %>% html_nodes('type') # e.g., 'h1', 'a', or 'span'
type1, type2 {
key: value;
}
html %>% html_nodes("type1, type2")
# Universal Selector
* {
key: value;
}
html %>% html_nodes("*")
##### Select multiple HTML types #####
languages_raw_html <- "
<html>
<body>
<div>Python is perfect for programming.</div>
<p>Still, R might be better suited for data analysis.</p>
<small>(And has prettier charts, too.)</small>
</body>
</html>"
# Read in the HTML
languages_html <- read_html(languages_raw_html)
# Select the div and p tags and print their text
languages_html %>%
html_nodes("div, p") %>%
html_text()
##### CSS Classes and IDs #####
# Classes can categorize HTML elements into certain style groups
# Class is just another HTML attribute - similar to the 'href' attribute of the 'a' element
# Classes are specified with a '.' at the front of it
# If I looking for a specific class node, I write:
html %>% html_nodes(".alert")
# If I am looking for multiple classes, I can write:
html %>% html_nodes(".alert.emph") # Notice the lack of spaces
# If I write:
html %>% html_nodes(".alert, .emph")
# Then I am selecting elements with the ".alert", ".emph", and ".alert.emph" classes
# IDs are special class of classes.
# IDs are UNIQUE! This means only ONE element should have an ID
# In CSS, IDs have a '#' at the beginning like '#special'
# In R, this is done in the following way:
html %>% html_nodes("#special")
# If I want a specific element-class type, I use the following:
html %>% html_nodes("a.alert")
# If I want a specific element-ID type, I use the following: (I can do this with use the ID as they are unique)
html %>% html_nodes("div#special") # Remember this from scraping tables???
# There are pseudo classes as well. There are three basic classes:
# first-child
# nth-child
# last-child
# Consider the following HTML code:
<ol>
<li>First element.</li> # html %>% html_nodes("li:first-child")
<li>Second element.</li> # html %>% html_nodes("li:nth-child")
<li>Third element.</li> # html %>% html_nodes("li:last-child") OR html %>% html_nodes("li:nth-child(3)")
</oi>
Selector Type HTML CSS Selector
Type <p>...</p> p
Multiple Types <p>...</p><div>...</div> p, div
Class <p class = 'x'>...<p> .x
Multiple Classes <p class = 'x y'>...<p> .x.y
Type + Class <p class = 'x'>...</p> x.p
ID <p id = 'x'>...</p> #d
Type + Pseudo Class <p>...</p><p>...</p> p:first-child
##### Leverage the uniqueness of IDs #####
structured_html_raw <- "
<html>
<body>
<div id = 'first'>
<h1 class = 'big'>Joe Biden</h1>
<p class = 'first blue'>Democrat</p>
<p class = 'second blue'>Male</p>
</div>
<div id = 'second'>...</div>
<div id = 'third'>
<h1 class = 'big'>Donald Trump</h1>
<p class = 'first red'>Republican</p>
<p class = 'second red'>Male</p>
</div>
</body>
</html>"
structured_html <- read_html(structured_html_raw)
# Select the first div
structured_html %>%
html_nodes("div#first")
# Select the last child of each p group
nested_html_raw <- "
<html>
<body>
<div>
<p class = 'text'>A sophisticated text [...]</p>
<p class = 'text'>Another paragraph following [...]</p>
<p class = 'text'>Author: T.G.</p>
</div>
<p>Copyright: DC</p>
</body>
</html>"
nested_html <- read_html(nested_html_raw)
nested_html %>%
html_nodes("p:last-child")
# This time for real: Select only the last node of the p's wrapped by the div
nested_html %>%
html_nodes("div p.text:last-child")
##### CSS combinators #####
# There are four common combinators: [space], >, +, ~
# Basic structure: h2#someid {space | > | + | ~ } .someclass
# [space]: descendent combinator (parent to child)
html %>% html_nodes("div.first a")
# '>': child combinator - only selects DIRECT decendents of the parent
html %>% html_ndoes("div.first > a")
# '+': adjacent sibling combinator - selects THE NEXT direct sibling (oldest to youngest)
html %>% html_nodes("div.first + div")
# '~': general sibling combinator - selects all the siblings of the first which match the element specified
html %>% html_nodes("div.first ~ div")
# I can use the wildcard to select ALL siblings of the first no matter the element type
html %>% html_nodes("div.first ~ *")
# So why use combinators? Because there are websites without classes and ids!
##### Select direct descendents with the child combinator #####
# Extract the text of all list elements
languages_html %>%
html_nodes("li") %>%
html_text()
# Extract only the text of the computer languages (without the sub lists)
languages_html %>%
html_nodes('ul#languages li') %>%
html_text()
# Extract only the text of the computer languages (without the sub lists)
languages_html %>%
html_nodes('ul > li') %>%
html_text()
# Extract only the text of the computer languages (without the sub lists)
languages_html %>%
html_nodes('ul#languages li') %>%
html_text()
# Extract only the text of the computer languages (without the sub lists)
languages_html %>%
html_nodes('ul#languages > li') %>%
html_text()
##### Simply the best #####
complicated_html_raw <- '<html>
<body>
<div class="first section">
A text with a <a href="#">link</a>.
</div>
<div class="second section">
Some text with <a href="#">another link</a>.
<div class="first paragraph">Some text.</div>
<div class="second paragraph">Some more text.
<div>...</div>
</div>
</div>
</body>
</html>'
complicated_html <- read_html(complicated_html_raw)
# Select the three divs with a simple selector
complicated_html %>%
html_nodes("div div")
##### Not every sibling is the same #####
code_html_raw <- "
<html>
<body>
<h2 class = 'first'>First example:</h2>
<code>some = code(2)</code>
<span>will compile to...</span>
<code>some = more_code()</code>
<h2 class = 'second'>Second example:</h2>
<code>another = code(3)</code>
<span>will compile to...</span>
<code>another = more_code()</code>
</body>
</html>"
code_html <- read_html(code_html_raw)
# Select only the first code element in the second example
code_html %>%
html_nodes("h2.second + code")
# Select all code elements in the second example
code_html %>%
html_nodes("h2.second ~ code")
##### Introduction to XPATH #####
# XPATH = XML path language
# This is a different pathway than the CSS pathway
# Can go down AND up the HTML tree
# Suppose I want to select the element 'p' nodes...
# CSS: html %>% html_nodes('p')
# XPATH: html %>% html_nodes(xpath = "//p")
# Using the same example... the path would be...
# CSS: html %>% html_nodes("body p") OR html %>% html_nodes("html > body p")
# XPATH: html %>% html_nodes("//body//p") OR html %>% html_nodes("/html/body//p")
# Suppose I want to select 'p' tacks that direct children of 'div' tacks...
# CSS: html %>% html_nodes("div > p")
# XPATH: html %>% html_nodes("//div/p")
# Suppose I want to select 'div' tacks that only have an 'a' tack child...
# CSS: There is not a way to do this...
# XPATH: html %>% html_nodes("div[a]")
# Syntax of XPATH
# Axes: '/' or '//'; '/' is direct child; '//' general descendent relationship
# Steps: HTML types like 'span' and 'a'
# Predicates: [...]
##### Select by class and ID with XPATH #####
weather_html_raw <- "
<html>
<body>
<div id = 'first'>
<h1 class = 'big'>Berlin Weather Station</h1>
<p class = 'first'>Temperature: 20°C</p>
<p class = 'second'>Humidity: 45%</p>
</div>
<div id = 'second'>...</div>
<div id = 'third'>
<p class = 'first'>Sunshine: 5hrs</p>
<p class = 'second'>Precipitation: 0mm</p>
</div>
</body>
</html>"
weather_html <- read_html(weather_html_raw)
# Select all p elements
weather_html %>%
html_nodes(xpath = '//p')
# Select p elements with the second class
weather_html %>%
html_nodes(xpath = "//p[@class = 'second']")
# Select p elements that are children of "#third"
weather_html %>%
html_nodes(xpath = "//div[@id = 'third']")
# Select p elements with class "second" that are children of "#third"
weather_html %>%
html_nodes(xpath = "//div[@id = 'third']//p[@class = 'second']")
##### Use predicates to select nodes based on their chidlren #####
# Select all divs
weather_html %>%
html_nodes(xpath = "//div")
# Select all divs with p descendants
weather_html %>%
html_nodes(xpath = '//div[p]')
# Select all divs with p descendants having the "third" class
weather_html %>%
html_nodes(xpath = "//div[p[@class = 'third']]") # This reaches down TWO layers
##### XPATH functions and advanced predicates #####
# position() : selects the nth element
# CSS: html %>% html_nodes(css = "ol > li:nth-child(2)")
# XPATH: html %>% html_nodes(xpath = "//ol/li[position() = 2]")
# Now suppose I want to get the first two elements... This can't be done in CSS, but with XPATH...
# html %>% html_nodes(xpath = "//ol/li[position() < 3]")
# Now suppose i want all the elements except for the third... again, CSS can't do this... but with XPATH...
# html %>% html_nodes(xpath = "//ol/li[position() != 3]")
# Now suppose I want all the elements except for the third BUT they need to have class blue...
# html %>% html_nodes(xpath = "//ol/li[position() != 3 and @class = 'blue']")
# count() : we can select the nodes with a specific number of children
# html %>% html_nodes(xpath = "//ol[count(li) = 2]")
##### Get to know the position() function #####
rules_html_raw <- "
<body>
<div>
<h2>Today's rules</h2>
<p>Wear a mask</p>
<p>Wash your hands</p>
</div>
<div>
<h2>Tomorrow's rules</h2>
<p>Wear a mask</p>
<p>Wash your hands</p>
<p>Bring hand sanitizer with you</p>
</div>
</body>"
rules_html <- read_html(rules_html_raw)
# Select the text of the second p in every div
rules_html %>%
html_nodes(xpath = "//div/p[position() = 2]") %>%
html_text()
# Select every p except the second from every div
rules_html %>%
html_nodes(xpath = "//div/p[position() != 2]") %>%
html_text()
# Select the text of the last three nodes of the second div
rules_html %>%
html_nodes(xpath = "//div[count(p) = 3]/p") %>%
html_text()
##### Extract nodes based on the number of their children #####
forecast_html_raw <- "
<body>
<div>
<h1>Tomorrow</h1>
</div>
<div>
<h2>Berlin</h2>
<p>Temperature: 20°C</p>
<p>Humidity: 50%</p>
</div>
<div>
<h2>London</h2>
<p>Temperature: 15°C</p>
</div>
<div>
<h2>Zurich</h2>
<p>Temperature: 22°C</p>
<p>Humidity: 60%</p>
</div>
</body>"
forecast_html <- read_html(forecast_html_raw)
# Select only divs with one header and at least one paragraph
forecast_html %>%
html_nodes(xpath = "//div[count(h2) = 1 and count(p) > 1]")
##### The XPATH text() function #####
actor_html_raw <-'
<html>
<body>
<table id = "cast">
<tr><td class = "actor">Arnold S.</td><td class = "role"><em>1</em> (Voice)</td></tr>
<tr><td class = "actor">Burt R.</td><td class = "role"><em>2</em> (Choreo)</td></tr>
<tr><td class = "actor">Charlize T.</td><td class = "role"><em>3</em> (Voice)</td></tr>
</table>
</body>
</html>
'
actor_html <- read_html(actor_html_raw)
# suppose I want to access the emphasis elements and not the role. This is impossible in CSS.
# The closest I can get is:
actor_html %>%
html_nodes("#cast td.role") %>%
html_text()
# this can be done in XPATH
actor_html %>%
html_nodes(xpath = '//*[@id = "cast"]//td[@class = "role"]') %>% # equal to '#cast td.role'
html_nodes(xpath = "./text()") %>% # This only selects text from the "td" element pulled in the line previous
html_text(trim = T)
# The text() function can do something else: act as a selector by text
# Suppose I am only interest in pulling the rows where the actor gave the character it's voice
actor_html %>%
html_nodes(xpath = '//*[@id = "cast"]//td[@class = "role" and text() = " (Voice)"]')
# Now, I want to select the entire row... I can use the 'parent' filter to pull the entire 'tr' element
actor_html %>%
html_nodes(xpath = '//*[@id = "cast"]//td[@class = "role" and text() = " (Voice)"]') %>%
html_nodes(xpath = '..')
##### The shortcomings of html_table() with badly structured tables #####
roles_html_raw <- '
<table>
<tr>
<th>Actor</th>
<th>Role</th>
</tr>
<tr>
<td class = "actor">Jayden Carpenter</td>
<td class = "role"><em>Mickey Mouse</em> (Voice)</td>
</tr>
...
</table>'
roles_html <- read_html(roles_html_raw)
# Extract the data frame from the table using a known function from rvest
roles <- roles_html %>%
html_node(xpath = "//table") %>%
html_table()
# Print the contents of the role data frame
print(roles)
##### Select directly from a parent element with XPATHs text() #####
# Extract the actors in the cells having class "actor"
actors <- roles_html %>%
html_nodes(xpath = '//table//td[@class = "actor"]') %>%
html_text()
actors
# Extract the roles in the cells having class "role"
roles <- roles_html %>%
html_nodes(xpath = '//table//td[@class = "role"]/em') %>%
html_text()
roles
# Extract the functions using the appropriate XPATH function
functions <- roles_html %>%
html_nodes(xpath = '//table//td[@class = "role"]/text()') %>%
html_text(trim = TRUE)
functions
##### Combine extracted data into a data frame #####
# Create a new data frame from the extracted vectors
cast <- tibble(
Actor = actors,
Role = roles,
Function = functions)
cast
##### Scrape an element based on its text ####
programming_html_raw <- "
<body>
<h3>The rules of programming</h3>
<ol>
<li>Have <em>fun</em>.</li>
<li><strong>Don't</strong> repeat yourself.</li>
<li>Think <em>twice</em> when naming variables.</li>
</ol>
</body>"
programming_html <- read_html(programming_html_raw)
# Select all li elements
programming_html %>%
html_nodes(xpath = '//li')
# Select all li elements
programming_html %>%
html_nodes(xpath = '//li') %>%
# Select all em elements within li elements that have "twice" as text
html_nodes(xpath = '//em[text() = "twice"]')
# Select all li elements
programming_html %>%
html_nodes(xpath = '//li') %>%
# Select all em elements within li elements that have "twice" as text
html_nodes(xpath = 'em[text() = "twice"]') %>%
# Wander up the tree to select the parent of the em
html_nodes(xpath = "..")
##### The nature of HTTP requests #####
# GET() : used to fetch a resource without submitting data
# POST() : Used to send data to a server, e.g. after filling out a form on a page
# With the httr library, I can send HTTP requests from my R session
library(httr)
GET('https://httpbin.org')
# What I got from the website was the HTML code from the website. I can save this in a variable 'response'.
# Using the 'content' function, I can get the HTML code of the website
response <- GET('https://httpbin.org')
content(response)
##### Do it the httr way #####
# Get the HTML document from Wikipedia using httr
wikipedia_response <- GET('https://en.wikipedia.org/wiki/Varigotti')
# Check the status code of the response
status_code(wikipedia_response)
# Parse the response into an HTML doc
wikipedia_page <- content(wikipedia_response)
wikipedia_page
# Extract the altitude with XPATH
wikipedia_page %>%
html_nodes(xpath = '//table//tr[count(preceding-sibling::*)=8]/td') %>%
html_text()
wikipedia_page %>%
html_nodes(xpath = '//table//tr[position()=9]/td') %>%
html_text()
##### Houston, we got a 404 #####
response <- GET('https://en.wikipedia.org/wiki/Varigott')
# Print status code of inexistent page
status_code(response)
##### Telling who you are with custom user agents #####
# When scraping the web, use my email address so they know who scrapes it
# Modify headers wtih httr
response <- GET('http://example.com',
user_agent = "Hey, its me, Timo! Reach me at timo@timogrossenbacher.ch.")
# I can make the user_agent 'standard'
set_config(add_headers(`User Agent` = "Hey, it's me, Timo! Reach me at timo@timogrossenbacher.chc."))
response <- GET("http://example.com")
##### Check out your user agent #####
# Access https://httpbin.org/headers with httr
response <- GET("https://httpbin.org/headers")
# Print its content
print(content(response))
##### Add a customer user agent #####
# Pass a custom user agent to a GET query to the mentioned URL
response <- GET("https://httpbin.org/user-agent",
config = user_agent("A request form a DataCamp course on scraping"))
# Print the response content
print(content(response))
# Globally set the user agent to "A request from a DataCamp course on scraping"
set_config(add_headers(`User-Agent` = "A request form a DataCamp course on scraping"))
# Pass a custom user agent to a GET query to the mentioned URL
response <- GET("https://httpbin.org/user-agent")
# Print the response content
content(response)
##### How to be gentle and slow down your requests #####
# Throttling: scraping multiple websites right after one another after a lag
# while(T){
# print(Sys.time())
# response <- GET("https://httpbin.org")
# print(status_code(response))
# }
# a nicer way of requesting data from websites
# user the 'purrr' library to slow down requests
library(purrr)
throttled_GET <- slowly(~ GET("https://httpbin.org"),
rate = rate_delay(3)) # Delays each call by 3 seconds
# This call is hard coded and can't adjust very well. So, to adjust... use this:
throttled_GET <- slowly(GET(.),
rate = rate_delay(3))
# The dot will allow me to do this: throttled_GET(website).
# I can now interchange websites without hardcoding
##### Custom arguments for throttled functions #####
# Why is wikipedia printed instead of Google?
throttled_read_html <- slowly(~ read_html("https://wikipedia.org"),
rate = rate_delay(0.5))
for(i in c(1, 2, 3)){
throttled_read_html("https://google.com") %>%
html_node("title") %>%
html_text() %>%
print()
}
##### Apply throttling to a multipage crawler #####
# Define a throttled read_html() function with a delay of 0.5s
read_html_delayed <- slowly(~ read_html("https://en.wikipedia.org/w/index.php?title=K2&oldid=956671989"),
rate = rate_delay(0.5))
# Construct a loop that goes over all page urls
for(page_url in mountain_wiki_pages){
# Read in the html of each URL with a delay of 0.5s
html <- read_html_delayed(page_url)
}
# Construct a loop that goes over all page urls
for(page_url in mountain_wiki_pages){
# Read in the html of each URL with a delay of 0.5s
html <- read_html_delayed(page_url)
# Extract the name of the peak and its coordinates
peak <- html %>%
html_nodes("h1.firstHeading") %>% html_text()
coords <- html %>%
html_nodes("#coordinates .geo-dms") %>% html_text()
print(paste(peak, coords, sep = ": "))
}
|
/Web Scraping In R/Web Scraping in R.R
|
no_license
|
wcaughey1984/datacamp
|
R
| false
| false
| 24,819
|
r
|
#
# Title: Web Scraping In R
# Purpose: (Knowledge Development) Learning to webscrape with R
# Author: Billy Caughey
# Date: 2020.11.19 - Initial build
#
##### Introduction to HTML #####
library(rvest)
# <h[1-5]> are header tacks
# <p> are paragraph tacks
# <ul> <li> is how to open a bulleted list
# <a href = 'site.com'>link</a> is how to set up a link in R, <a> is the tack and href is the attribute
# library(rvest) is the work horse from Tidyverse
# xml_structure(html) look at the html structure/basic outline of the document
# read_html(html) helps r read HTML in an understable way
##### Web Scraping is Cool #####
## Part 1: Read in the excerpt into R
html_excerpt_raw <- '
<html>
<body>
<h1>Web scraping is cool</h1>
<p>It involves writing code – be it R or Python.</p>
<p>DataCamp has courses on it.</p>
</body>
</html>'
# Turn the raw excerpt into an HTML document R understands
html_excerpt <- read_html(html_excerpt_raw)
html_excerpt
## Part 2: Alter the html so it points to https://datacamp.com
html_excerpt_raw <- '
<html>
<body>
<h1>Web scraping is cool</h1>
<p>It involves writing code – be it R or Python.</p>
<p><a href="https://datacamp.com">DataCamp</a>
has courses on it.</p>
</body>
</html>'
# Turn the raw excerpt into an HTML document R understands
html_excerpt <- read_html(html_excerpt_raw)
html_excerpt
## Part 3: Use the xml_structure() function to get a better overview of the tag hierarchy of the HTML excerpt.
html_excerpt_raw <- '
<html>
<body>
<h1>Web scraping is cool</h1>
<p>It involves writing code – be it R or Python.</p>
<p><a href="https://datacamp.com">DataCamp</a>
has courses on it.</p>
</body>
</html>'
# Turn the raw excerpt into an HTML document R understands
html_excerpt <- read_html(html_excerpt_raw)
html_excerpt
# Print the HTML excerpt with the xml_structure() function
xml_structure(html_excerpt)
##### Beware of syntax errors #####
broken_html_raw <- "\n
<html> \n
<h1Web scraping is cool</h1>\n
<p>It involves writing code – be it R or Python.</p>\n
<p><a href=\"https://datacamp.com\">DataCamp</a> \n
\t\thas courses on it.</p>\n
</html>"
# missing ">" on the <h1 tack
##### Navigating HTML #####
## Tree Structure ##
# <html> <--- Root Node, the child is <body>
# <body>
# <div> <--- Child of <body> tack
# <p>The first paragraph.</p>
# </div>
# <div> <--- Child of <body> tack
# Not an actual paragraph, but with a <a ref = "#">link</a>.
# </div>
# <p> A paragraph without an enclosing div. </p>
# </body>
# </html>
# html <- read_html(html_document):
# html_children(html): Takes an xml structure and returns the nodeset (or children) of the root node
# html_text(): extracts all the text from the children of the root node
# html_node(): Returns the first node that matches my selection
# html_nodes(): can take an html document or an html selector
# html selector: Defines a bath through the html tree
# html_nodes("div p"): returns the paragraphs of a div node
# html_nodes("p"): selects all the paragraphs in the html tree
# html_nodes("div p") == html %>% html_nodes("div") %>% html_nodes("p")
# html_attr: Extracts attributes from the html tree
# html %>% html_node("a") %>% html_attr("href"): returns the href attribute from the "a" node
# html_attrs(): returns all the attributes of a node as a named vector
##### Select all children of a list #####
# Part 1: Read in the corresponding HTML string
list_html <- read_html(list_html_raw)
# Part 2: # Extract the ol node
ol_node <- list_html %>% html_node("ol")
# Part 3: Extract and print the nodeset of all the children of ol_node
html_children(ol_node)
##### Parse hyperlinks into a data frame #####
# Part 1:
# Extract all the a nodes from the bulleted list
links <- hyperlink_html %>%
read_html() %>%
html_nodes("ul a") # Remember, I am looking for the bullet list!
# Parse the nodes into a data frame
link_df <- tibble(
domain = links %>% html_attr("href"),
name = links %>% html_text()
)
link_df
##### Scrape your first table #####
## Basic Outline of a table ##
# <table> <-- Designates table
# <tr> <-- Designates a table row
# <th>Name</td><td>Profession</td><td>Age</td><td>Country</th> <-- Table header tacks
# </tr>
# <tr>
# <td>Dillon Arroyo</td><td>Carpenter</td><td>54</td><td>UK</td>
# </tr>
# <tr>
# <td>Rebecca Douglas></td><td>Developer</td><td>32</td><td>USA</td>
# </td>
# </table>
# html <- read_html(table_html) # table with <th> header cells
# html %>% html_table() : reads in table, converts the table for R to read
# html <- read_html(table_html) # table without <th> header cells
# html %>% html_table(header = TRUE) : reads in table, converts the table for R to read, first row is header
# html %>% html_table(header = TRUE, fill = TRUE) : reads in table, converts the table for R to read, first row is header, fills in missing entries with NA
##### Turn a table into a data frame with html_table() #####
# Part 1: Extract the "clean" table into a data frame
mountains <- mountains_html %>%
html_node("table#clean") %>%
html_table()
mountains
# Part 2: Extract the "dirty" table into a data frame
mountains <- mountains_html %>%
html_node("table#dirty") %>%
html_table(header = T, fill = T)
mountains
##### Introduction to CSS #####
# CSS = Cascading Style Sheets
# Selectors I have seen:
# Type Selector:
type {
key:value;
}
html %>% html_nodes('type') # e.g., 'h1', 'a', or 'span'
type1, type2 {
key: value;
}
html %>% html_nodes("type1, type2")
# Universal Selector
* {
key: value;
}
html %>% html_nodes("*")
##### Select multiple HTML types #####
languages_raw_html <- "
<html>
<body>
<div>Python is perfect for programming.</div>
<p>Still, R might be better suited for data analysis.</p>
<small>(And has prettier charts, too.)</small>
</body>
</html>"
# Read in the HTML
languages_html <- read_html(languages_raw_html)
# Select the div and p tags and print their text
languages_html %>%
html_nodes("div, p") %>%
html_text()
##### CSS Classes and IDs #####
# Classes can categorize HTML elements into certain style groups
# Class is just another HTML attribute - similar to the 'href' attribute of the 'a' element
# Classes are specified with a '.' at the front of it
# If I looking for a specific class node, I write:
html %>% html_nodes(".alert")
# If I am looking for multiple classes, I can write:
html %>% html_nodes(".alert.emph") # Notice the lack of spaces
# If I write:
html %>% html_nodes(".alert, .emph")
# Then I am selecting elements with the ".alert", ".emph", and ".alert.emph" classes
# IDs are special class of classes.
# IDs are UNIQUE! This means only ONE element should have an ID
# In CSS, IDs have a '#' at the beginning like '#special'
# In R, this is done in the following way:
html %>% html_nodes("#special")
# If I want a specific element-class type, I use the following:
html %>% html_nodes("a.alert")
# If I want a specific element-ID type, I use the following: (I can do this with use the ID as they are unique)
html %>% html_nodes("div#special") # Remember this from scraping tables???
# There are pseudo classes as well. There are three basic classes:
# first-child
# nth-child
# last-child
# Consider the following HTML code:
<ol>
<li>First element.</li> # html %>% html_nodes("li:first-child")
<li>Second element.</li> # html %>% html_nodes("li:nth-child")
<li>Third element.</li> # html %>% html_nodes("li:last-child") OR html %>% html_nodes("li:nth-child(3)")
</oi>
Selector Type HTML CSS Selector
Type <p>...</p> p
Multiple Types <p>...</p><div>...</div> p, div
Class <p class = 'x'>...<p> .x
Multiple Classes <p class = 'x y'>...<p> .x.y
Type + Class <p class = 'x'>...</p> x.p
ID <p id = 'x'>...</p> #d
Type + Pseudo Class <p>...</p><p>...</p> p:first-child
##### Leverage the uniqueness of IDs #####
structured_html_raw <- "
<html>
<body>
<div id = 'first'>
<h1 class = 'big'>Joe Biden</h1>
<p class = 'first blue'>Democrat</p>
<p class = 'second blue'>Male</p>
</div>
<div id = 'second'>...</div>
<div id = 'third'>
<h1 class = 'big'>Donald Trump</h1>
<p class = 'first red'>Republican</p>
<p class = 'second red'>Male</p>
</div>
</body>
</html>"
structured_html <- read_html(structured_html_raw)
# Select the first div
structured_html %>%
html_nodes("div#first")
# Select the last child of each p group
nested_html_raw <- "
<html>
<body>
<div>
<p class = 'text'>A sophisticated text [...]</p>
<p class = 'text'>Another paragraph following [...]</p>
<p class = 'text'>Author: T.G.</p>
</div>
<p>Copyright: DC</p>
</body>
</html>"
nested_html <- read_html(nested_html_raw)
nested_html %>%
html_nodes("p:last-child")
# This time for real: Select only the last node of the p's wrapped by the div
nested_html %>%
html_nodes("div p.text:last-child")
##### CSS combinators #####
# There are four common combinators: [space], >, +, ~
# Basic structure: h2#someid {space | > | + | ~ } .someclass
# [space]: descendent combinator (parent to child)
html %>% html_nodes("div.first a")
# '>': child combinator - only selects DIRECT decendents of the parent
html %>% html_ndoes("div.first > a")
# '+': adjacent sibling combinator - selects THE NEXT direct sibling (oldest to youngest)
html %>% html_nodes("div.first + div")
# '~': general sibling combinator - selects all the siblings of the first which match the element specified
html %>% html_nodes("div.first ~ div")
# I can use the wildcard to select ALL siblings of the first no matter the element type
html %>% html_nodes("div.first ~ *")
# So why use combinators? Because there are websites without classes and ids!
##### Select direct descendents with the child combinator #####
# Extract the text of all list elements
languages_html %>%
html_nodes("li") %>%
html_text()
# Extract only the text of the computer languages (without the sub lists)
languages_html %>%
html_nodes('ul#languages li') %>%
html_text()
# Extract only the text of the computer languages (without the sub lists)
languages_html %>%
html_nodes('ul > li') %>%
html_text()
# Extract only the text of the computer languages (without the sub lists)
languages_html %>%
html_nodes('ul#languages li') %>%
html_text()
# Extract only the text of the computer languages (without the sub lists)
languages_html %>%
html_nodes('ul#languages > li') %>%
html_text()
##### Simply the best #####
complicated_html_raw <- '<html>
<body>
<div class="first section">
A text with a <a href="#">link</a>.
</div>
<div class="second section">
Some text with <a href="#">another link</a>.
<div class="first paragraph">Some text.</div>
<div class="second paragraph">Some more text.
<div>...</div>
</div>
</div>
</body>
</html>'
complicated_html <- read_html(complicated_html_raw)
# Select the three divs with a simple selector
complicated_html %>%
html_nodes("div div")
##### Not every sibling is the same #####
code_html_raw <- "
<html>
<body>
<h2 class = 'first'>First example:</h2>
<code>some = code(2)</code>
<span>will compile to...</span>
<code>some = more_code()</code>
<h2 class = 'second'>Second example:</h2>
<code>another = code(3)</code>
<span>will compile to...</span>
<code>another = more_code()</code>
</body>
</html>"
code_html <- read_html(code_html_raw)
# Select only the first code element in the second example
code_html %>%
html_nodes("h2.second + code")
# Select all code elements in the second example
code_html %>%
html_nodes("h2.second ~ code")
##### Introduction to XPATH #####
# XPATH = XML path language
# This is a different pathway than the CSS pathway
# Can go down AND up the HTML tree
# Suppose I want to select the element 'p' nodes...
# CSS: html %>% html_nodes('p')
# XPATH: html %>% html_nodes(xpath = "//p")
# Using the same example... the path would be...
# CSS: html %>% html_nodes("body p") OR html %>% html_nodes("html > body p")
# XPATH: html %>% html_nodes("//body//p") OR html %>% html_nodes("/html/body//p")
# Suppose I want to select 'p' tacks that direct children of 'div' tacks...
# CSS: html %>% html_nodes("div > p")
# XPATH: html %>% html_nodes("//div/p")
# Suppose I want to select 'div' tacks that only have an 'a' tack child...
# CSS: There is not a way to do this...
# XPATH: html %>% html_nodes("div[a]")
# Syntax of XPATH
# Axes: '/' or '//'; '/' is direct child; '//' general descendent relationship
# Steps: HTML types like 'span' and 'a'
# Predicates: [...]
##### Select by class and ID with XPATH #####
weather_html_raw <- "
<html>
<body>
<div id = 'first'>
<h1 class = 'big'>Berlin Weather Station</h1>
<p class = 'first'>Temperature: 20°C</p>
<p class = 'second'>Humidity: 45%</p>
</div>
<div id = 'second'>...</div>
<div id = 'third'>
<p class = 'first'>Sunshine: 5hrs</p>
<p class = 'second'>Precipitation: 0mm</p>
</div>
</body>
</html>"
weather_html <- read_html(weather_html_raw)
# Select all p elements
weather_html %>%
html_nodes(xpath = '//p')
# Select p elements with the second class
weather_html %>%
html_nodes(xpath = "//p[@class = 'second']")
# Select p elements that are children of "#third"
weather_html %>%
html_nodes(xpath = "//div[@id = 'third']")
# Select p elements with class "second" that are children of "#third"
weather_html %>%
html_nodes(xpath = "//div[@id = 'third']//p[@class = 'second']")
##### Use predicates to select nodes based on their chidlren #####
# Select all divs
weather_html %>%
html_nodes(xpath = "//div")
# Select all divs with p descendants
weather_html %>%
html_nodes(xpath = '//div[p]')
# Select all divs with p descendants having the "third" class
weather_html %>%
html_nodes(xpath = "//div[p[@class = 'third']]") # This reaches down TWO layers
##### XPATH functions and advanced predicates #####
# position() : selects the nth element
# CSS: html %>% html_nodes(css = "ol > li:nth-child(2)")
# XPATH: html %>% html_nodes(xpath = "//ol/li[position() = 2]")
# Now suppose I want to get the first two elements... This can't be done in CSS, but with XPATH...
# html %>% html_nodes(xpath = "//ol/li[position() < 3]")
# Now suppose i want all the elements except for the third... again, CSS can't do this... but with XPATH...
# html %>% html_nodes(xpath = "//ol/li[position() != 3]")
# Now suppose I want all the elements except for the third BUT they need to have class blue...
# html %>% html_nodes(xpath = "//ol/li[position() != 3 and @class = 'blue']")
# count() : we can select the nodes with a specific number of children
# html %>% html_nodes(xpath = "//ol[count(li) = 2]")
##### Get to know the position() function #####
rules_html_raw <- "
<body>
<div>
<h2>Today's rules</h2>
<p>Wear a mask</p>
<p>Wash your hands</p>
</div>
<div>
<h2>Tomorrow's rules</h2>
<p>Wear a mask</p>
<p>Wash your hands</p>
<p>Bring hand sanitizer with you</p>
</div>
</body>"
rules_html <- read_html(rules_html_raw)
# Select the text of the second p in every div
rules_html %>%
html_nodes(xpath = "//div/p[position() = 2]") %>%
html_text()
# Select every p except the second from every div
rules_html %>%
html_nodes(xpath = "//div/p[position() != 2]") %>%
html_text()
# Select the text of the last three nodes of the second div
rules_html %>%
html_nodes(xpath = "//div[count(p) = 3]/p") %>%
html_text()
##### Extract nodes based on the number of their children #####
forecast_html_raw <- "
<body>
<div>
<h1>Tomorrow</h1>
</div>
<div>
<h2>Berlin</h2>
<p>Temperature: 20°C</p>
<p>Humidity: 50%</p>
</div>
<div>
<h2>London</h2>
<p>Temperature: 15°C</p>
</div>
<div>
<h2>Zurich</h2>
<p>Temperature: 22°C</p>
<p>Humidity: 60%</p>
</div>
</body>"
forecast_html <- read_html(forecast_html_raw)
# Select only divs with one header and at least one paragraph
forecast_html %>%
html_nodes(xpath = "//div[count(h2) = 1 and count(p) > 1]")
##### The XPATH text() function #####
actor_html_raw <-'
<html>
<body>
<table id = "cast">
<tr><td class = "actor">Arnold S.</td><td class = "role"><em>1</em> (Voice)</td></tr>
<tr><td class = "actor">Burt R.</td><td class = "role"><em>2</em> (Choreo)</td></tr>
<tr><td class = "actor">Charlize T.</td><td class = "role"><em>3</em> (Voice)</td></tr>
</table>
</body>
</html>
'
actor_html <- read_html(actor_html_raw)
# suppose I want to access the emphasis elements and not the role. This is impossible in CSS.
# The closest I can get is:
actor_html %>%
html_nodes("#cast td.role") %>%
html_text()
# this can be done in XPATH
actor_html %>%
html_nodes(xpath = '//*[@id = "cast"]//td[@class = "role"]') %>% # equal to '#cast td.role'
html_nodes(xpath = "./text()") %>% # This only selects text from the "td" element pulled in the line previous
html_text(trim = T)
# The text() function can do something else: act as a selector by text
# Suppose I am only interest in pulling the rows where the actor gave the character it's voice
actor_html %>%
html_nodes(xpath = '//*[@id = "cast"]//td[@class = "role" and text() = " (Voice)"]')
# Now, I want to select the entire row... I can use the 'parent' filter to pull the entire 'tr' element
actor_html %>%
html_nodes(xpath = '//*[@id = "cast"]//td[@class = "role" and text() = " (Voice)"]') %>%
html_nodes(xpath = '..')
##### The shortcomings of html_table() with badly structured tables #####
roles_html_raw <- '
<table>
<tr>
<th>Actor</th>
<th>Role</th>
</tr>
<tr>
<td class = "actor">Jayden Carpenter</td>
<td class = "role"><em>Mickey Mouse</em> (Voice)</td>
</tr>
...
</table>'
roles_html <- read_html(roles_html_raw)
# Extract the data frame from the table using a known function from rvest
roles <- roles_html %>%
html_node(xpath = "//table") %>%
html_table()
# Print the contents of the role data frame
print(roles)
##### Select directly from a parent element with XPATHs text() #####
# Extract the actors in the cells having class "actor"
actors <- roles_html %>%
html_nodes(xpath = '//table//td[@class = "actor"]') %>%
html_text()
actors
# Extract the roles in the cells having class "role"
roles <- roles_html %>%
html_nodes(xpath = '//table//td[@class = "role"]/em') %>%
html_text()
roles
# Extract the functions using the appropriate XPATH function
functions <- roles_html %>%
html_nodes(xpath = '//table//td[@class = "role"]/text()') %>%
html_text(trim = TRUE)
functions
##### Combine extracted data into a data frame #####
# Create a new data frame from the extracted vectors
cast <- tibble(
Actor = actors,
Role = roles,
Function = functions)
cast
##### Scrape an element based on its text ####
programming_html_raw <- "
<body>
<h3>The rules of programming</h3>
<ol>
<li>Have <em>fun</em>.</li>
<li><strong>Don't</strong> repeat yourself.</li>
<li>Think <em>twice</em> when naming variables.</li>
</ol>
</body>"
programming_html <- read_html(programming_html_raw)
# Select all li elements
programming_html %>%
html_nodes(xpath = '//li')
# Select all li elements
programming_html %>%
html_nodes(xpath = '//li') %>%
# Select all em elements within li elements that have "twice" as text
html_nodes(xpath = '//em[text() = "twice"]')
# Select all li elements
programming_html %>%
html_nodes(xpath = '//li') %>%
# Select all em elements within li elements that have "twice" as text
html_nodes(xpath = 'em[text() = "twice"]') %>%
# Wander up the tree to select the parent of the em
html_nodes(xpath = "..")
##### The nature of HTTP requests #####
# GET() : used to fetch a resource without submitting data
# POST() : Used to send data to a server, e.g. after filling out a form on a page
# With the httr library, I can send HTTP requests from my R session
library(httr)
GET('https://httpbin.org')
# What I got from the website was the HTML code from the website. I can save this in a variable 'response'.
# Using the 'content' function, I can get the HTML code of the website
response <- GET('https://httpbin.org')
content(response)
##### Do it the httr way #####
# Get the HTML document from Wikipedia using httr
wikipedia_response <- GET('https://en.wikipedia.org/wiki/Varigotti')
# Check the status code of the response
status_code(wikipedia_response)
# Parse the response into an HTML doc
wikipedia_page <- content(wikipedia_response)
wikipedia_page
# Extract the altitude with XPATH
wikipedia_page %>%
html_nodes(xpath = '//table//tr[count(preceding-sibling::*)=8]/td') %>%
html_text()
wikipedia_page %>%
html_nodes(xpath = '//table//tr[position()=9]/td') %>%
html_text()
##### Houston, we got a 404 #####
response <- GET('https://en.wikipedia.org/wiki/Varigott')
# Print status code of inexistent page
status_code(response)
##### Telling who you are with custom user agents #####
# When scraping the web, use my email address so they know who scrapes it
# Modify headers wtih httr
response <- GET('http://example.com',
user_agent = "Hey, its me, Timo! Reach me at timo@timogrossenbacher.ch.")
# I can make the user_agent 'standard'
set_config(add_headers(`User Agent` = "Hey, it's me, Timo! Reach me at timo@timogrossenbacher.chc."))
response <- GET("http://example.com")
##### Check out your user agent #####
# Access https://httpbin.org/headers with httr
response <- GET("https://httpbin.org/headers")
# Print its content
print(content(response))
##### Add a customer user agent #####
# Pass a custom user agent to a GET query to the mentioned URL
response <- GET("https://httpbin.org/user-agent",
config = user_agent("A request form a DataCamp course on scraping"))
# Print the response content
print(content(response))
# Globally set the user agent to "A request from a DataCamp course on scraping"
set_config(add_headers(`User-Agent` = "A request form a DataCamp course on scraping"))
# Pass a custom user agent to a GET query to the mentioned URL
response <- GET("https://httpbin.org/user-agent")
# Print the response content
content(response)
##### How to be gentle and slow down your requests #####
# Throttling: scraping multiple websites right after one another after a lag
# while(T){
# print(Sys.time())
# response <- GET("https://httpbin.org")
# print(status_code(response))
# }
# a nicer way of requesting data from websites
# user the 'purrr' library to slow down requests
library(purrr)
throttled_GET <- slowly(~ GET("https://httpbin.org"),
rate = rate_delay(3)) # Delays each call by 3 seconds
# This call is hard coded and can't adjust very well. So, to adjust... use this:
throttled_GET <- slowly(GET(.),
rate = rate_delay(3))
# The dot will allow me to do this: throttled_GET(website).
# I can now interchange websites without hardcoding
##### Custom arguments for throttled functions #####
# Why is wikipedia printed instead of Google?
throttled_read_html <- slowly(~ read_html("https://wikipedia.org"),
rate = rate_delay(0.5))
for(i in c(1, 2, 3)){
throttled_read_html("https://google.com") %>%
html_node("title") %>%
html_text() %>%
print()
}
##### Apply throttling to a multipage crawler #####
# Define a throttled read_html() function with a delay of 0.5s
read_html_delayed <- slowly(~ read_html("https://en.wikipedia.org/w/index.php?title=K2&oldid=956671989"),
rate = rate_delay(0.5))
# Construct a loop that goes over all page urls
for(page_url in mountain_wiki_pages){
# Read in the html of each URL with a delay of 0.5s
html <- read_html_delayed(page_url)
}
# Construct a loop that goes over all page urls
for(page_url in mountain_wiki_pages){
# Read in the html of each URL with a delay of 0.5s
html <- read_html_delayed(page_url)
# Extract the name of the peak and its coordinates
peak <- html %>%
html_nodes("h1.firstHeading") %>% html_text()
coords <- html %>%
html_nodes("#coordinates .geo-dms") %>% html_text()
print(paste(peak, coords, sep = ": "))
}
|
#'Build a simulated distribution for No. of Shared Alleles
#'@details This function generates multinomial distribution for loci known the Allele Frequency and Expected Probability of Shared 2,1 or 0 alleles
#'@usage Simulate_DistX(e,m,t)
#'@param e a matrix of Probability of Sharing 2,1 or 0 alleles at each loci. Each row denotes each locus. Three columns denote sharing 0,1 or 2 alleles.
#'@param m the sample size you want, usually similar to the real sample size.
#'@param t the number of samples you want to build/ the times to generate a sample
#'@return a matrix of frequencies of No. of shared alleles. Each row denotes each simulated sample; Each column denotes each No. of shared alleles, from 0 to 2e length of e.
#'@export
#'@examples
#'e0<-data.frame("P0"=runif(5,min = 0,max = 0.5),"P1"=runif(5,0,0.5))
#'e<-data.frame(e0,"P2"=1-rowSums(e0))
#'Simulate_DistX(e,500,10)
#'
Simulate_DistX <- function(e,m,t){
OneDist <- function(e,m){
OneSample <- sapply(data.frame(t(e)),function(x){sample(c(0,1,2),m,replace = T,prob = x)})
s<-2*nrow(e)
return(sapply(c(0:s), counta,z=rowSums(OneSample)))
}
output<-t(replicate(t,OneDist(e,m)))
s<-2*nrow(e)
colnames(output) <- c(0:s)
return(output)
}
|
/R/Simulate_DistX.R
|
no_license
|
ice4prince/mixIndependR
|
R
| false
| false
| 1,225
|
r
|
#'Build a simulated distribution for No. of Shared Alleles
#'@details This function generates multinomial distribution for loci known the Allele Frequency and Expected Probability of Shared 2,1 or 0 alleles
#'@usage Simulate_DistX(e,m,t)
#'@param e a matrix of Probability of Sharing 2,1 or 0 alleles at each loci. Each row denotes each locus. Three columns denote sharing 0,1 or 2 alleles.
#'@param m the sample size you want, usually similar to the real sample size.
#'@param t the number of samples you want to build/ the times to generate a sample
#'@return a matrix of frequencies of No. of shared alleles. Each row denotes each simulated sample; Each column denotes each No. of shared alleles, from 0 to 2e length of e.
#'@export
#'@examples
#'e0<-data.frame("P0"=runif(5,min = 0,max = 0.5),"P1"=runif(5,0,0.5))
#'e<-data.frame(e0,"P2"=1-rowSums(e0))
#'Simulate_DistX(e,500,10)
#'
Simulate_DistX <- function(e,m,t){
OneDist <- function(e,m){
OneSample <- sapply(data.frame(t(e)),function(x){sample(c(0,1,2),m,replace = T,prob = x)})
s<-2*nrow(e)
return(sapply(c(0:s), counta,z=rowSums(OneSample)))
}
output<-t(replicate(t,OneDist(e,m)))
s<-2*nrow(e)
colnames(output) <- c(0:s)
return(output)
}
|
library(rgbif)
library(spocc)
library(maps)
library(mapdata)
map('worldHires','India')
# for Himalayan Tiger
tiger<-occ(query="Tetraogallus himalayensis",from="gbif",has_coords = TRUE,limit=10) # for Himalayan Tiger
datatig<-tiger$gbif$data$Tetraogallus_himalayensis
# longitude and latitude of Himalyan Tiger
datatigx<-as.numeric(data$longitude)
datatigy<-as.numeric(data$latitude)
# plotting tiger's points on map
points(datatigx,datatigy,col="yellow",pch=3)
# occurences of Indian Rhinoceros
rhino<-occ(query="Rhinoceros unicornis",from="gbif",has_coords = TRUE,limit=10)
# longitude and latitude of Indian Rhinoceros
datarhino<-rhino$gbif$data$Rhinoceros_unicornis
datarhinox<-as.numeric(datarhino$longitude)
datarhinoy<-as.numeric(datarhino$latitude)
# plotting Indian Rhinoceros points on map
points(datarhinox,datarhinoy,col="red",pch=7)
#occurrences of Black Buck
blackbuck<-occ(query ="Antilope cervicapra",from="gbif",has_coords = TRUE,limit=10)
# longitude and latitude of BlackBuck
databuck<-blackbuck$gbif$data$Antilope_cervicapra
databuckx<-as.numeric(databuck$longitude)
databucky<-as.numeric(databuck$latitude)
#plotting BlackBuck on map
points(databuckx,databucky,col="green",pch=5)
#adding legend to map
legend('topright',c("Himalayan Tiger","Indian rhinoceros","Blackbuck"),
lty=1, col=c('yellow', 'red', 'green'), bty='n', cex=.75)
|
/Easy1.R
|
no_license
|
jatinrajani/BiodiversityDataCleaning
|
R
| false
| false
| 1,366
|
r
|
library(rgbif)
library(spocc)
library(maps)
library(mapdata)
map('worldHires','India')
# for Himalayan Tiger
tiger<-occ(query="Tetraogallus himalayensis",from="gbif",has_coords = TRUE,limit=10) # for Himalayan Tiger
datatig<-tiger$gbif$data$Tetraogallus_himalayensis
# longitude and latitude of Himalyan Tiger
datatigx<-as.numeric(data$longitude)
datatigy<-as.numeric(data$latitude)
# plotting tiger's points on map
points(datatigx,datatigy,col="yellow",pch=3)
# occurences of Indian Rhinoceros
rhino<-occ(query="Rhinoceros unicornis",from="gbif",has_coords = TRUE,limit=10)
# longitude and latitude of Indian Rhinoceros
datarhino<-rhino$gbif$data$Rhinoceros_unicornis
datarhinox<-as.numeric(datarhino$longitude)
datarhinoy<-as.numeric(datarhino$latitude)
# plotting Indian Rhinoceros points on map
points(datarhinox,datarhinoy,col="red",pch=7)
#occurrences of Black Buck
blackbuck<-occ(query ="Antilope cervicapra",from="gbif",has_coords = TRUE,limit=10)
# longitude and latitude of BlackBuck
databuck<-blackbuck$gbif$data$Antilope_cervicapra
databuckx<-as.numeric(databuck$longitude)
databucky<-as.numeric(databuck$latitude)
#plotting BlackBuck on map
points(databuckx,databucky,col="green",pch=5)
#adding legend to map
legend('topright',c("Himalayan Tiger","Indian rhinoceros","Blackbuck"),
lty=1, col=c('yellow', 'red', 'green'), bty='n', cex=.75)
|
\name{df2scidb}
\alias{df2scidb}
\title{Copy a data frame into a SciDB array.}
\description{
Copy a data frame into a new 1-D SciDB array.
}
\usage{
df2scidb(X, name = tmpnam(), dimlabel = "row", chunkSize,
rowOverlap = 0L, types = NULL, nullable, schema_only = FALSE, gc, start)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{A data frame. }
\item{name}{The SciDB array name, defaults to an automatically-generated name.}
\item{dimlabel}{Name the SciDB dimension.}
\item{chunkSize}{The SciDB chunk size.}
\item{rowOverlap}{The SciDB chunk overlap.}
\item{types}{An optional vector explicitly specifying the SciDB attribute types. Length must match the number of columns of the data frame.}
\item{nullable}{An optional vector indicating the SciDB nullable property of each attribute. Length must match the number of columns of the data frame.}
\item{schema_only}{If TRUE, return a string that would represent the SciDB array schema without uploading data to SciDB.}
\item{gc}{Optional logical value. If TRUE, then resulting SciDB array will be garbage-collected when the R variable referencing it is. The default value is FALSE.}
\item{start}{Optional integer starting index value. Defaults to 1.}
}
\details{
\code{df2scidb} is a workhorse utility function that transfers an R data frame
into a 1-D SciDB array via intermediate CSV formatting.
The columns of the data frame correspond to attributes in the SciDB array.
The \code{iquery} function returns query results using a similar method as R
data frames.
}
\value{
NULL is invisibly returned. SciDB errors are propagated as R error conditions.
}
\author{
B. W. Lewis <blewis@paradigm4.com>
}
\examples{
\dontrun{
df2scidb(iris)
scidblist()
head(iquery("scan(iris)", return=TRUE))
}
}
|
/man/df2scidb.Rd
|
no_license
|
anukat2015/SciDBR
|
R
| false
| false
| 1,803
|
rd
|
\name{df2scidb}
\alias{df2scidb}
\title{Copy a data frame into a SciDB array.}
\description{
Copy a data frame into a new 1-D SciDB array.
}
\usage{
df2scidb(X, name = tmpnam(), dimlabel = "row", chunkSize,
rowOverlap = 0L, types = NULL, nullable, schema_only = FALSE, gc, start)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{A data frame. }
\item{name}{The SciDB array name, defaults to an automatically-generated name.}
\item{dimlabel}{Name the SciDB dimension.}
\item{chunkSize}{The SciDB chunk size.}
\item{rowOverlap}{The SciDB chunk overlap.}
\item{types}{An optional vector explicitly specifying the SciDB attribute types. Length must match the number of columns of the data frame.}
\item{nullable}{An optional vector indicating the SciDB nullable property of each attribute. Length must match the number of columns of the data frame.}
\item{schema_only}{If TRUE, return a string that would represent the SciDB array schema without uploading data to SciDB.}
\item{gc}{Optional logical value. If TRUE, then resulting SciDB array will be garbage-collected when the R variable referencing it is. The default value is FALSE.}
\item{start}{Optional integer starting index value. Defaults to 1.}
}
\details{
\code{df2scidb} is a workhorse utility function that transfers an R data frame
into a 1-D SciDB array via intermediate CSV formatting.
The columns of the data frame correspond to attributes in the SciDB array.
The \code{iquery} function returns query results using a similar method as R
data frames.
}
\value{
NULL is invisibly returned. SciDB errors are propagated as R error conditions.
}
\author{
B. W. Lewis <blewis@paradigm4.com>
}
\examples{
\dontrun{
df2scidb(iris)
scidblist()
head(iquery("scan(iris)", return=TRUE))
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dprime.R
\name{dprime}
\alias{dprime}
\title{Dprime and Other Signal Detection Theory indices.}
\usage{
dprime(n_hit, n_fa, n_miss = NULL, n_cr = NULL, n_targets = NULL,
n_distractors = NULL, adjusted = TRUE)
}
\arguments{
\item{n_hit}{Number of hits.}
\item{n_fa}{Number of false alarms.}
\item{n_miss}{Number of misses.}
\item{n_cr}{Number of correct rejections.}
\item{n_targets}{Number of targets (n_hit + n_miss).}
\item{n_distractors}{Number of distractors (n_fa + n_cr).}
\item{adjusted}{Should it use the Hautus (1995) adjustments for extreme values.}
}
\value{
Calculates the d', the beta, the A' and the B''D based on the signal detection theory (SRT). See Pallier (2002) for the algorithms.
Returns a list containing 4 objects:
\itemize{
\item{dprime (d'): }{The sensitivity. Reflects the distance between the two distributions: signal, and signal+noise and corresponds to the Z value of the hit-rate minus that of the false-alarm rate.}
\item{beta: }{The bias (criterion). The value for beta is the ratio of the normal density functions at the criterion of the Z values used in the computation of d'. This reflects an observer's bias to say 'yes' or 'no' with the unbiased observer having a value around 1.0. As the bias to say 'yes' increases (liberal), resulting in a higher hit-rate and false-alarm-rate, beta approaches 0.0. As the bias to say 'no' increases (conservative), resulting in a lower hit-rate and false-alarm rate, beta increases over 1.0 on an open-ended scale.}
\item{c: }{Another index of bias. the number of standard deviations from the midpoint between these two distributions, i.e., a measure on a continuum from "conservative" to "liberal".}
}
\item{aprime (A'): }{Non-parametric estimate of discriminability. An A' near 1.0 indicates good discriminability, while a value near 0.5 means chance performance.}
\item{bppd (B''D): }{Non-parametric estimate of bias. A B''D equal to 0.0 indicates no bias, positive numbers represent conservative bias (i.e., a tendency to answer 'no'), negative numbers represent liberal bias (i.e. a tendency to answer 'yes'). The maximum absolute value is 1.0.}
Note that for d' and beta, adjustement for extreme values are made following the recommandations Hautus (1995).
}
\description{
Computes Signal Detection Theory indices (d', beta, A', B''D, c).
}
\examples{
library(psycho)
n_hit <- 9
n_fa <- 2
n_miss <- 1
n_cr <- 7
indices <- psycho::dprime(n_hit, n_fa, n_miss, n_cr)
df <- data.frame(Participant = c("A", "B", "C"),
n_hit = c(1, 2, 5),
n_fa = c(6, 8, 1))
indices <- psycho::dprime(n_hit=df$n_hit,
n_fa=df$n_fa,
n_targets=10,
n_distractors=10,
adjusted=FALSE)
}
\author{
\href{https://dominiquemakowski.github.io/}{Dominique Makowski}
}
|
/man/dprime.Rd
|
permissive
|
hectorabonza/psycho.R
|
R
| false
| true
| 2,839
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dprime.R
\name{dprime}
\alias{dprime}
\title{Dprime and Other Signal Detection Theory indices.}
\usage{
dprime(n_hit, n_fa, n_miss = NULL, n_cr = NULL, n_targets = NULL,
n_distractors = NULL, adjusted = TRUE)
}
\arguments{
\item{n_hit}{Number of hits.}
\item{n_fa}{Number of false alarms.}
\item{n_miss}{Number of misses.}
\item{n_cr}{Number of correct rejections.}
\item{n_targets}{Number of targets (n_hit + n_miss).}
\item{n_distractors}{Number of distractors (n_fa + n_cr).}
\item{adjusted}{Should it use the Hautus (1995) adjustments for extreme values.}
}
\value{
Calculates the d', the beta, the A' and the B''D based on the signal detection theory (SRT). See Pallier (2002) for the algorithms.
Returns a list containing 4 objects:
\itemize{
\item{dprime (d'): }{The sensitivity. Reflects the distance between the two distributions: signal, and signal+noise and corresponds to the Z value of the hit-rate minus that of the false-alarm rate.}
\item{beta: }{The bias (criterion). The value for beta is the ratio of the normal density functions at the criterion of the Z values used in the computation of d'. This reflects an observer's bias to say 'yes' or 'no' with the unbiased observer having a value around 1.0. As the bias to say 'yes' increases (liberal), resulting in a higher hit-rate and false-alarm-rate, beta approaches 0.0. As the bias to say 'no' increases (conservative), resulting in a lower hit-rate and false-alarm rate, beta increases over 1.0 on an open-ended scale.}
\item{c: }{Another index of bias. the number of standard deviations from the midpoint between these two distributions, i.e., a measure on a continuum from "conservative" to "liberal".}
}
\item{aprime (A'): }{Non-parametric estimate of discriminability. An A' near 1.0 indicates good discriminability, while a value near 0.5 means chance performance.}
\item{bppd (B''D): }{Non-parametric estimate of bias. A B''D equal to 0.0 indicates no bias, positive numbers represent conservative bias (i.e., a tendency to answer 'no'), negative numbers represent liberal bias (i.e. a tendency to answer 'yes'). The maximum absolute value is 1.0.}
Note that for d' and beta, adjustement for extreme values are made following the recommandations Hautus (1995).
}
\description{
Computes Signal Detection Theory indices (d', beta, A', B''D, c).
}
\examples{
library(psycho)
n_hit <- 9
n_fa <- 2
n_miss <- 1
n_cr <- 7
indices <- psycho::dprime(n_hit, n_fa, n_miss, n_cr)
df <- data.frame(Participant = c("A", "B", "C"),
n_hit = c(1, 2, 5),
n_fa = c(6, 8, 1))
indices <- psycho::dprime(n_hit=df$n_hit,
n_fa=df$n_fa,
n_targets=10,
n_distractors=10,
adjusted=FALSE)
}
\author{
\href{https://dominiquemakowski.github.io/}{Dominique Makowski}
}
|
retrieve_modis_raw_data <- function(product = "MOD13Q1",
lat = 31.938, lon = -109.080,
buffer_size = 1)
{
products <- mt_products()
bands <- mt_bands(product = product)
dates <- mt_dates(product = product, lat = lat, lon = lon)
modis_ndvi <- mt_subset(product = product,
lat = lat,
lon = lon,
band = "250m_16_days_NDVI",
start = "2000-02-18",
end = "2018-12-19",
km_lr = buffer_size,
km_ab = buffer_size,
site_name = "portal")
modis_vi_quality <- mt_subset(product = product,
lat = lat,
lon = lon,
band = "250m_16_days_VI_Quality",
start = "2000-02-18",
end = "2018-12-19",
km_lr = buffer_size,
km_ab = buffer_size,
site_name = "portal")
saveRDS(modis_ndvi, file = "data/modis_ndvi_raw.RDS")
saveRDS(modis_vi_quality, file = "data/modis_vi_quality.RDS")
}
# this map is taken from Table 5, describing the bit code for the VI Quality
# scientific data set, for MOD13A1 / MOD13A1 in the
# "MODIS Vegetation Index User's Guide", retrieved from
# https://vip.arizona.edu/documents/MODIS/MODIS_VI_UsersGuide_June_2015_C6.pdf
map_vi_quality <- function(pixel_value)
{
vi_quality_map <- c("VI produced with good quality",
"VI produced, but check other QA",
"Pixel produced, but most probably cloudy",
"Pixel not produced due to other reasons than clouds")
vi_usefulness_map <- c("Highest quality",
"Lower quality",
"Decreasing quality 2",
"Decreasing quality 3",
"Decreasing quality 4",
"Decreasing quality 5",
"Decreasing quality 6",
"Decreasing quality 7",
"Decreasing quality 8",
"Decreasing quality 9",
"Decreasing quality 10",
"Decreasing quality 11",
"Lowest quality",
"Quality so low that it is not useful",
"L1B data faulty",
"Not useful for any other reason / not processed")
aerosol_quantity_map <- c("Climatology",
"Low",
"Intermediate",
"High")
adjacent_cloud_detected_map <- c("No",
"Yes")
atmosphere_BRDF_correction_map <- c("No",
"Yes")
mixed_clouds_map <- c("No",
"Yes")
land_water_mask_map <- c("Shallow ocean",
"Land (Nothing else but land)",
"Ocean coastlines and lake shorelines",
"Shallow inland water",
"Ephemeral water",
"Deep inland water",
"Moderate or continental ocean",
"Deep ocean")
possible_snow_ice_map <- c("No",
"Yes")
possible_shadow_map <- c("No",
"Yes")
pixel_bits <- matrix(as.numeric(intToBits(pixel_value)), ncol = 32, byrow = TRUE)
out <- data.frame(
vi_quality = vi_quality_map[1 + pixel_bits[, 1] +
pixel_bits[, 2] * 2],
vi_usefulness = vi_usefulness_map[1 + pixel_bits[, 3] +
pixel_bits[, 4] * 2 +
pixel_bits[, 5] * 4 +
pixel_bits[, 6] * 8],
aerosol_quantity = aerosol_quantity_map[1 + pixel_bits[, 7] +
pixel_bits[, 8] * 2],
adjacent_cloud_detected = adjacent_cloud_detected_map[1 + pixel_bits[, 9]],
atmosphere_BRDF_correction = atmosphere_BRDF_correction_map[1 + pixel_bits[, 10]],
mixed_clouds = mixed_clouds_map[1 + pixel_bits[, 11]],
land_water_mask = land_water_mask_map[1 + pixel_bits[, 12] +
pixel_bits[, 13] * 2 +
pixel_bits[, 14] * 4],
possible_snow_ice = possible_snow_ice_map[1 + pixel_bits[, 15]],
possible_shadow = possible_shadow_map[1 + pixel_bits[, 16]]
)
return(out)
}
process_modis_raw_data <- function()
{
modis_ndvi_raw <- readRDS("data/modis_ndvi_raw.RDS")
modis_vi_quality <- readRDS("data/modis_vi_quality.RDS")
cols <- c("xllcorner", "yllcorner", "cellsize", "latitude", "longitude",
"start", "end", "modis_date", "calendar_date", "tile")
modis_vi_quality <- dplyr::bind_cols(dplyr::select(modis_vi_quality, dplyr::all_of(cols)),
map_vi_quality(modis_vi_quality$value))
if (NROW(modis_ndvi_raw) == NROW(modis_vi_quality) &&
all.equal(modis_ndvi_raw[, cols], modis_vi_quality[, cols], check.attributes = FALSE))
{
modis_ndvi_raw <- dplyr::bind_cols(modis_ndvi_raw,
dplyr::select(modis_vi_quality, -cols))
modis_ndvi_processed <- modis_ndvi_raw %>%
dplyr::filter(vi_usefulness == "Highest quality",
aerosol_quantity == "Low",
adjacent_cloud_detected == "No",
mixed_clouds == "No") %>%
dplyr::mutate(ndvi = value * as.numeric(scale)) %>%
dplyr::group_by(calendar_date) %>%
dplyr::summarize(ndvi = mean(ndvi)) %>%
dplyr::mutate(date = as.Date(calendar_date)) %>%
dplyr::select(date, ndvi) %>%
dplyr::mutate(sensor = "MODIS", source = "MODISTools")
saveRDS(modis_ndvi_processed, file = "data/modis_ndvi_processed.RDS")
}
}
|
/R/modis_NDVI_functions.R
|
permissive
|
PaulESantos/NDVIning
|
R
| false
| false
| 6,621
|
r
|
retrieve_modis_raw_data <- function(product = "MOD13Q1",
lat = 31.938, lon = -109.080,
buffer_size = 1)
{
products <- mt_products()
bands <- mt_bands(product = product)
dates <- mt_dates(product = product, lat = lat, lon = lon)
modis_ndvi <- mt_subset(product = product,
lat = lat,
lon = lon,
band = "250m_16_days_NDVI",
start = "2000-02-18",
end = "2018-12-19",
km_lr = buffer_size,
km_ab = buffer_size,
site_name = "portal")
modis_vi_quality <- mt_subset(product = product,
lat = lat,
lon = lon,
band = "250m_16_days_VI_Quality",
start = "2000-02-18",
end = "2018-12-19",
km_lr = buffer_size,
km_ab = buffer_size,
site_name = "portal")
saveRDS(modis_ndvi, file = "data/modis_ndvi_raw.RDS")
saveRDS(modis_vi_quality, file = "data/modis_vi_quality.RDS")
}
# this map is taken from Table 5, describing the bit code for the VI Quality
# scientific data set, for MOD13A1 / MOD13A1 in the
# "MODIS Vegetation Index User's Guide", retrieved from
# https://vip.arizona.edu/documents/MODIS/MODIS_VI_UsersGuide_June_2015_C6.pdf
map_vi_quality <- function(pixel_value)
{
vi_quality_map <- c("VI produced with good quality",
"VI produced, but check other QA",
"Pixel produced, but most probably cloudy",
"Pixel not produced due to other reasons than clouds")
vi_usefulness_map <- c("Highest quality",
"Lower quality",
"Decreasing quality 2",
"Decreasing quality 3",
"Decreasing quality 4",
"Decreasing quality 5",
"Decreasing quality 6",
"Decreasing quality 7",
"Decreasing quality 8",
"Decreasing quality 9",
"Decreasing quality 10",
"Decreasing quality 11",
"Lowest quality",
"Quality so low that it is not useful",
"L1B data faulty",
"Not useful for any other reason / not processed")
aerosol_quantity_map <- c("Climatology",
"Low",
"Intermediate",
"High")
adjacent_cloud_detected_map <- c("No",
"Yes")
atmosphere_BRDF_correction_map <- c("No",
"Yes")
mixed_clouds_map <- c("No",
"Yes")
land_water_mask_map <- c("Shallow ocean",
"Land (Nothing else but land)",
"Ocean coastlines and lake shorelines",
"Shallow inland water",
"Ephemeral water",
"Deep inland water",
"Moderate or continental ocean",
"Deep ocean")
possible_snow_ice_map <- c("No",
"Yes")
possible_shadow_map <- c("No",
"Yes")
pixel_bits <- matrix(as.numeric(intToBits(pixel_value)), ncol = 32, byrow = TRUE)
out <- data.frame(
vi_quality = vi_quality_map[1 + pixel_bits[, 1] +
pixel_bits[, 2] * 2],
vi_usefulness = vi_usefulness_map[1 + pixel_bits[, 3] +
pixel_bits[, 4] * 2 +
pixel_bits[, 5] * 4 +
pixel_bits[, 6] * 8],
aerosol_quantity = aerosol_quantity_map[1 + pixel_bits[, 7] +
pixel_bits[, 8] * 2],
adjacent_cloud_detected = adjacent_cloud_detected_map[1 + pixel_bits[, 9]],
atmosphere_BRDF_correction = atmosphere_BRDF_correction_map[1 + pixel_bits[, 10]],
mixed_clouds = mixed_clouds_map[1 + pixel_bits[, 11]],
land_water_mask = land_water_mask_map[1 + pixel_bits[, 12] +
pixel_bits[, 13] * 2 +
pixel_bits[, 14] * 4],
possible_snow_ice = possible_snow_ice_map[1 + pixel_bits[, 15]],
possible_shadow = possible_shadow_map[1 + pixel_bits[, 16]]
)
return(out)
}
process_modis_raw_data <- function()
{
modis_ndvi_raw <- readRDS("data/modis_ndvi_raw.RDS")
modis_vi_quality <- readRDS("data/modis_vi_quality.RDS")
cols <- c("xllcorner", "yllcorner", "cellsize", "latitude", "longitude",
"start", "end", "modis_date", "calendar_date", "tile")
modis_vi_quality <- dplyr::bind_cols(dplyr::select(modis_vi_quality, dplyr::all_of(cols)),
map_vi_quality(modis_vi_quality$value))
if (NROW(modis_ndvi_raw) == NROW(modis_vi_quality) &&
all.equal(modis_ndvi_raw[, cols], modis_vi_quality[, cols], check.attributes = FALSE))
{
modis_ndvi_raw <- dplyr::bind_cols(modis_ndvi_raw,
dplyr::select(modis_vi_quality, -cols))
modis_ndvi_processed <- modis_ndvi_raw %>%
dplyr::filter(vi_usefulness == "Highest quality",
aerosol_quantity == "Low",
adjacent_cloud_detected == "No",
mixed_clouds == "No") %>%
dplyr::mutate(ndvi = value * as.numeric(scale)) %>%
dplyr::group_by(calendar_date) %>%
dplyr::summarize(ndvi = mean(ndvi)) %>%
dplyr::mutate(date = as.Date(calendar_date)) %>%
dplyr::select(date, ndvi) %>%
dplyr::mutate(sensor = "MODIS", source = "MODISTools")
saveRDS(modis_ndvi_processed, file = "data/modis_ndvi_processed.RDS")
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot-utils.R
\name{gg_watermark}
\alias{gg_watermark}
\title{Add in a watermark to plots}
\usage{
gg_watermark(
lab = "DRAFT",
disable = getOption("ggrrr.disable.watermark", default = FALSE)
)
}
\arguments{
\item{lab}{the watermark label (DRAFT)}
\item{disable}{- global option to disable all watermarks options("ggrrr.disable.watermark"=TRUE)}
}
\value{
a watermark layer
}
\description{
Add in a watermark to plots
}
|
/man/gg_watermark.Rd
|
permissive
|
terminological/ggrrr
|
R
| false
| true
| 504
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot-utils.R
\name{gg_watermark}
\alias{gg_watermark}
\title{Add in a watermark to plots}
\usage{
gg_watermark(
lab = "DRAFT",
disable = getOption("ggrrr.disable.watermark", default = FALSE)
)
}
\arguments{
\item{lab}{the watermark label (DRAFT)}
\item{disable}{- global option to disable all watermarks options("ggrrr.disable.watermark"=TRUE)}
}
\value{
a watermark layer
}
\description{
Add in a watermark to plots
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_areas_middle.R
\name{get_areas_middle}
\alias{get_areas_middle}
\title{Implements the Acquire Area Middle Master API.}
\usage{
get_areas_middle(api_key = "eca7388c8a3c6332eb702a21bcc63b46", lang = "en")
}
\arguments{
\item{api_key}{string, your own Gurunavi API key. Ours has been provided
for convenience.}
\item{lang}{string, language of the returned data. Default = "en", can
also take values of "ja", "zh_cn", "zh_tw", "ko".}
}
\value{
A data frame of middle and large area codes and names and pref codes and names.
}
\description{
\url{http://api.gnavi.co.jp/api/manual_e.html#api05}
}
\examples{
get_areas_middle(lang = "en")
get_areas_middle(lang = "ja")
get_areas_middle(lang = "zh_cn")
get_areas_middle(lang = "zh_tw")
get_areas_middle(lang = "ko")
}
\seealso{
\code{\link{query_data}}, \code{\link{extract_nested}}
}
|
/man/get_areas_middle.Rd
|
permissive
|
gmlang/RGurunavi
|
R
| false
| true
| 912
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_areas_middle.R
\name{get_areas_middle}
\alias{get_areas_middle}
\title{Implements the Acquire Area Middle Master API.}
\usage{
get_areas_middle(api_key = "eca7388c8a3c6332eb702a21bcc63b46", lang = "en")
}
\arguments{
\item{api_key}{string, your own Gurunavi API key. Ours has been provided
for convenience.}
\item{lang}{string, language of the returned data. Default = "en", can
also take values of "ja", "zh_cn", "zh_tw", "ko".}
}
\value{
A data frame of middle and large area codes and names and pref codes and names.
}
\description{
\url{http://api.gnavi.co.jp/api/manual_e.html#api05}
}
\examples{
get_areas_middle(lang = "en")
get_areas_middle(lang = "ja")
get_areas_middle(lang = "zh_cn")
get_areas_middle(lang = "zh_tw")
get_areas_middle(lang = "ko")
}
\seealso{
\code{\link{query_data}}, \code{\link{extract_nested}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quick_design.R
\name{fill_out}
\alias{fill_out}
\alias{redesign}
\title{Declare a Design via a Template Function}
\usage{
fill_out(template, expand = TRUE, ...)
redesign(design, expand = TRUE, ...)
}
\arguments{
\item{template}{a function which yields a design.}
\item{expand}{boolean - if true, form the crossproduct of the ..., otherwise recycle them}
\item{...}{Options sent to the template.}
\item{design}{a design}
}
\value{
if set of designs is size one, the design, otherwise a `by`-list of designs.
}
\description{
\code{fill_out} easily generates a set of design from a template function.
\code{redesign} quickly generates a design from an existing one by resetting design variables internally. (Advanced).
}
\examples{
design_template <- function(N = 100) {
population <- declare_population(N = N)
return(declare_design(population))
}
# returns list of three designs
vary_n <- fill_out(design_template, N = seq(30, 100, 10))
\dontrun{
# diagnose a list of designs created by fill_out or redesign
diagnose_vary_n <- diagnose_design(vary_n)
}
# returns a single design
large_design <- fill_out(design_template, N = 200)
\dontrun{
diagnose_large_design <- diagnose_design(large_design)
}
my_population <- declare_population(N = 100)
my_design <- declare_design(my_population)
# returns a single, modified design
design_large_N <- redesign(my_design, N = 1000)
# returns a list of five modified designs
design_vary_N <- redesign(my_design, N = seq(500, 1000, 100))
}
|
/man/fill_out.Rd
|
no_license
|
antoshachekhonte/DeclareDesign
|
R
| false
| true
| 1,575
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quick_design.R
\name{fill_out}
\alias{fill_out}
\alias{redesign}
\title{Declare a Design via a Template Function}
\usage{
fill_out(template, expand = TRUE, ...)
redesign(design, expand = TRUE, ...)
}
\arguments{
\item{template}{a function which yields a design.}
\item{expand}{boolean - if true, form the crossproduct of the ..., otherwise recycle them}
\item{...}{Options sent to the template.}
\item{design}{a design}
}
\value{
if set of designs is size one, the design, otherwise a `by`-list of designs.
}
\description{
\code{fill_out} easily generates a set of design from a template function.
\code{redesign} quickly generates a design from an existing one by resetting design variables internally. (Advanced).
}
\examples{
design_template <- function(N = 100) {
population <- declare_population(N = N)
return(declare_design(population))
}
# returns list of three designs
vary_n <- fill_out(design_template, N = seq(30, 100, 10))
\dontrun{
# diagnose a list of designs created by fill_out or redesign
diagnose_vary_n <- diagnose_design(vary_n)
}
# returns a single design
large_design <- fill_out(design_template, N = 200)
\dontrun{
diagnose_large_design <- diagnose_design(large_design)
}
my_population <- declare_population(N = 100)
my_design <- declare_design(my_population)
# returns a single, modified design
design_large_N <- redesign(my_design, N = 1000)
# returns a list of five modified designs
design_vary_N <- redesign(my_design, N = seq(500, 1000, 100))
}
|
dir <- system.file("examples", "demo", package = "shinyjs")
setwd(dir)
shiny::shinyAppDir(".")
|
/shinyjs-demo/app.R
|
permissive
|
ycalvinner/shiny-server
|
R
| false
| false
| 95
|
r
|
dir <- system.file("examples", "demo", package = "shinyjs")
setwd(dir)
shiny::shinyAppDir(".")
|
#' List of persons in Priestley's A Chart of Biography
#'
#' Table with biographical information: names, birth dates, death dates,
#' ages, occupation, category, and the lifespan segments to use in the
#' timeline for all individuals in the 1st (1764) and 7th (1778) editions
#' of Joseph Priestley's *A Chart of Biography*.
#'
#' @source Davis, S. B. (2010) "Names from Desc Chart 1764 OCRcorrected.pdf" <https://drive.google.com/file/d/0B4KIGf4GncycZGRmNWY4Y2QtZjNjNS00OGEzLWE0MjctMzY0NzFhM2I2YjFj/view?authkey=CPGfreEB>.
#'
#' @references
#' Priestley, J. (1765) *A chart of biography to the Right Honourable Hugh Lord Willoughby of Parham this chart is with the greatest respect and gratitude inscribed by his Lordship's most obedient and most humble servant Joseph Priestley*. 1st ed. London. <http://explore.bl.uk/BLVU1:LSCOP-ALL:BLLSFX3360000000234303>
#' Priestley, J. (1764) *A Description of a Chart of Biography* 1st ed. Warrington.
#' Priestley, J. (1778). *A Description of a Chart of Biography; with a Catalogue of All the Names Inserted in It, and the Dates Annexed to Them*. 7th ed. London: J. Johnson.
#'
#' @format A data frame with 2418 rows and 29 columns.
#' \describe{
#' \item{text}{(string) Original text from name index.}
#' \item{name}{(string) Name of the individual}
#' \item{description}{(string) Natural language description of the individual.}
#' \item{in_1764}{(boolean) Is this entry in the 1764 edition?}
#' \item{in_1778}{(boolean) Is this entry in the 1778 edition?}
#' \item{in_names_omitted}{(boolean) Is this entry in the "Names Omitted" section of the 1764 edition?}
#' \item{division}{(string) The *Chart* divides individuals into six categories of occupations, which are referred to as "divisions" in the text.}
#' \item{occupation_abbr}{(string) Abbreviation of the occupation, as used in the name index.}
#' \item{occupation}{(string) Full name of the occupation. These values are generally the same as the provided documentation from the *Description*, but in a few cases they were modernized to avoid archaic or offensive language.}
#' \item{sect_abbr}{(string) Abbreviation of the particular sect/school of Greek philosophy to which the individual belonged, as used in the}
#' \item{sect}{(string) Name of the particular sect/school of Greek philosophy to which the individual belonged.}
#' \item{born_min}{(integer) Lower estimate of the birth year of an individual. This is used as the start of the uncertain segment of a lifespan on the timeline. Negative numbers are years BCE (0 = 1 BCE, -1 = 2 BCE).}
#' \item{born_max}{(integer) Upper estimate of the birth year of an individual. This is used as the start of the certain segment of a lifespan on the timeline. Negative numbers are years BCE (0 = 1 BCE, -1 = 2 BCE).}
#' \item{died_min}{(integer) Lower estimate of the death year of an individual. This is used as the end of the certain segment of a lifespan on the timeline. Negative numbers are years BCE (0 = 1 BCE, -1 = 2 BCE).}
#' \item{died_max}{(integer) Upper estimate of the death year of an individual. This is used as the end of the uncertain segment of a lifespan on the timeline. Negative numbers are years BCE (0 = 1 BCE, -1 = 2 BCE).}
#' \item{born}{(integer) Birth year of an individual. Negative numbers are years BCE (0 = 1 BCE, -1 = 2 BCE).}
#' \item{born_about}{(boolean) Indicator for whether the birth year is approximate: "born about".}
#' \item{died}{(integer) Death year of an individual. Negative numbers are years BCE (0 = 1 BCE, -1 = 2 BCE).}
#' \item{died_about}{(boolean) Indicator for whether the death year is approximate: "died about".}
#' \item{died_after}{(boolean) Indicator for whether the death year should be interpreted as the lower value of a death, "died after".}
#' \item{age}{(integer) Age at which an individual died.}
#' \item{age_about}{(boolean) Is the age listed in `age` an approximation, "age about"?}
#' \item{flourished}{(integer) Year in which an a individual was flourishing, meaning that the individual was active in their occupation. Negative numbers are years BCE (0 = 1 BCE, -1 = 2 BCE).}
#' \item{flourished_about}{(boolean) Indicator for whether `flourished` is approximate, "flourished about".}
#' \item{flourished_before}{(boolean) Indicator for whether `flourished `means "flourished before".}
#' \item{flourished_after}{(boolean) Indicator for whether `flourished` means "flourished after".}
#' \item{flourished_century}{(boolean) Indicator for whether the year in `flourished` refers to the century.}
#' \item{lived}{(integer) Year that an individual lived after. Negative numbers are years BCE (0 = 1 BCE, -1 = 2 BCE).}
#' \item{lifetype}{(list) A vector describing which of born, died, flourished, etc. is available for this entry.}
#'}
#'
#' @docType data
"Biographies"
|
/R/Biographies.R
|
no_license
|
jrnold/priestley
|
R
| false
| false
| 4,791
|
r
|
#' List of persons in Priestley's A Chart of Biography
#'
#' Table with biographical information: names, birth dates, death dates,
#' ages, occupation, category, and the lifespan segments to use in the
#' timeline for all individuals in the 1st (1764) and 7th (1778) editions
#' of Joseph Priestley's *A Chart of Biography*.
#'
#' @source Davis, S. B. (2010) "Names from Desc Chart 1764 OCRcorrected.pdf" <https://drive.google.com/file/d/0B4KIGf4GncycZGRmNWY4Y2QtZjNjNS00OGEzLWE0MjctMzY0NzFhM2I2YjFj/view?authkey=CPGfreEB>.
#'
#' @references
#' Priestley, J. (1765) *A chart of biography to the Right Honourable Hugh Lord Willoughby of Parham this chart is with the greatest respect and gratitude inscribed by his Lordship's most obedient and most humble servant Joseph Priestley*. 1st ed. London. <http://explore.bl.uk/BLVU1:LSCOP-ALL:BLLSFX3360000000234303>
#' Priestley, J. (1764) *A Description of a Chart of Biography* 1st ed. Warrington.
#' Priestley, J. (1778). *A Description of a Chart of Biography; with a Catalogue of All the Names Inserted in It, and the Dates Annexed to Them*. 7th ed. London: J. Johnson.
#'
#' @format A data frame with 2418 rows and 29 columns.
#' \describe{
#' \item{text}{(string) Original text from name index.}
#' \item{name}{(string) Name of the individual}
#' \item{description}{(string) Natural language description of the individual.}
#' \item{in_1764}{(boolean) Is this entry in the 1764 edition?}
#' \item{in_1778}{(boolean) Is this entry in the 1778 edition?}
#' \item{in_names_omitted}{(boolean) Is this entry in the "Names Omitted" section of the 1764 edition?}
#' \item{division}{(string) The *Chart* divides individuals into six categories of occupations, which are referred to as "divisions" in the text.}
#' \item{occupation_abbr}{(string) Abbreviation of the occupation, as used in the name index.}
#' \item{occupation}{(string) Full name of the occupation. These values are generally the same as the provided documentation from the *Description*, but in a few cases they were modernized to avoid archaic or offensive language.}
#' \item{sect_abbr}{(string) Abbreviation of the particular sect/school of Greek philosophy to which the individual belonged, as used in the}
#' \item{sect}{(string) Name of the particular sect/school of Greek philosophy to which the individual belonged.}
#' \item{born_min}{(integer) Lower estimate of the birth year of an individual. This is used as the start of the uncertain segment of a lifespan on the timeline. Negative numbers are years BCE (0 = 1 BCE, -1 = 2 BCE).}
#' \item{born_max}{(integer) Upper estimate of the birth year of an individual. This is used as the start of the certain segment of a lifespan on the timeline. Negative numbers are years BCE (0 = 1 BCE, -1 = 2 BCE).}
#' \item{died_min}{(integer) Lower estimate of the death year of an individual. This is used as the end of the certain segment of a lifespan on the timeline. Negative numbers are years BCE (0 = 1 BCE, -1 = 2 BCE).}
#' \item{died_max}{(integer) Upper estimate of the death year of an individual. This is used as the end of the uncertain segment of a lifespan on the timeline. Negative numbers are years BCE (0 = 1 BCE, -1 = 2 BCE).}
#' \item{born}{(integer) Birth year of an individual. Negative numbers are years BCE (0 = 1 BCE, -1 = 2 BCE).}
#' \item{born_about}{(boolean) Indicator for whether the birth year is approximate: "born about".}
#' \item{died}{(integer) Death year of an individual. Negative numbers are years BCE (0 = 1 BCE, -1 = 2 BCE).}
#' \item{died_about}{(boolean) Indicator for whether the death year is approximate: "died about".}
#' \item{died_after}{(boolean) Indicator for whether the death year should be interpreted as the lower value of a death, "died after".}
#' \item{age}{(integer) Age at which an individual died.}
#' \item{age_about}{(boolean) Is the age listed in `age` an approximation, "age about"?}
#' \item{flourished}{(integer) Year in which an a individual was flourishing, meaning that the individual was active in their occupation. Negative numbers are years BCE (0 = 1 BCE, -1 = 2 BCE).}
#' \item{flourished_about}{(boolean) Indicator for whether `flourished` is approximate, "flourished about".}
#' \item{flourished_before}{(boolean) Indicator for whether `flourished `means "flourished before".}
#' \item{flourished_after}{(boolean) Indicator for whether `flourished` means "flourished after".}
#' \item{flourished_century}{(boolean) Indicator for whether the year in `flourished` refers to the century.}
#' \item{lived}{(integer) Year that an individual lived after. Negative numbers are years BCE (0 = 1 BCE, -1 = 2 BCE).}
#' \item{lifetype}{(list) A vector describing which of born, died, flourished, etc. is available for this entry.}
#'}
#'
#' @docType data
"Biographies"
|
## Santiago Mota
## Exploratory Data Analysis
## Course Project 1
## https://class.coursera.org/exdata-002/human_grading/view/courses/972082/assessments/3/submissions
## https://github.com/santiagomota/ExData_Plotting1
# Fork branch file with data
urls <- ("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip")
# Create data directory
if(!file.exists("./data")){dir.create("./data")}
# Download zip file
download.file(urls, "./data/power_consumption.zip")
# Change to data directory
setwd("./data/")
# Unzip file
unzip("power_consumption.zip")
# Load dataset
energy <- read.csv("household_power_consumption.txt", header=TRUE, sep=";",
stringsAsFactors=FALSE)
# Convert Date to date format
energy$Date <- as.Date(strptime(energy$Date, "%d/%m/%Y"))
# Convert Time to POSIXlt format
energy$Time <- strptime(paste(energy$Date, energy$Time), "%Y-%m-%d %H:%M:%S")
# Convert ? to NA
for(i in 3:8) {
energy[energy[, i]=="?", i] <- "NA"
}
# Convert character to numeric
for(i in 3:8) {
energy[, i] <- as.double(energy[, i])
}
# Subsetting dates
selection <- energy[(energy$Date=="2007-02-01" | energy$Date=="2007-02-02"), ]
par(mfrow = c(1, 1))
plot(selection$Time, selection$Sub_metering_1, type="l",
col="black", ylab="Energy sub metering", xlab="")
lines(selection$Time, selection$Sub_metering_2, type="l", col="red")
lines(selection$Time, selection$Sub_metering_3, type="l", col="blue")
legend("topright", col=c("black", "blue", "red"), lty=1, xjust=0,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# Change to main directory
setwd("./")
# Create figure directory
if(!file.exists("./figure")){dir.create("./figure")}
# Print to png file
dev.copy(png, file="./figure/plot3.png", width=480, height=480) ## Copy my plot to a PNG file
dev.off()
|
/plot3.R
|
no_license
|
santiagomota/ExData_Plotting1
|
R
| false
| false
| 1,838
|
r
|
## Santiago Mota
## Exploratory Data Analysis
## Course Project 1
## https://class.coursera.org/exdata-002/human_grading/view/courses/972082/assessments/3/submissions
## https://github.com/santiagomota/ExData_Plotting1
# Fork branch file with data
urls <- ("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip")
# Create data directory
if(!file.exists("./data")){dir.create("./data")}
# Download zip file
download.file(urls, "./data/power_consumption.zip")
# Change to data directory
setwd("./data/")
# Unzip file
unzip("power_consumption.zip")
# Load dataset
energy <- read.csv("household_power_consumption.txt", header=TRUE, sep=";",
stringsAsFactors=FALSE)
# Convert Date to date format
energy$Date <- as.Date(strptime(energy$Date, "%d/%m/%Y"))
# Convert Time to POSIXlt format
energy$Time <- strptime(paste(energy$Date, energy$Time), "%Y-%m-%d %H:%M:%S")
# Convert ? to NA
for(i in 3:8) {
energy[energy[, i]=="?", i] <- "NA"
}
# Convert character to numeric
for(i in 3:8) {
energy[, i] <- as.double(energy[, i])
}
# Subsetting dates
selection <- energy[(energy$Date=="2007-02-01" | energy$Date=="2007-02-02"), ]
par(mfrow = c(1, 1))
plot(selection$Time, selection$Sub_metering_1, type="l",
col="black", ylab="Energy sub metering", xlab="")
lines(selection$Time, selection$Sub_metering_2, type="l", col="red")
lines(selection$Time, selection$Sub_metering_3, type="l", col="blue")
legend("topright", col=c("black", "blue", "red"), lty=1, xjust=0,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# Change to main directory
setwd("./")
# Create figure directory
if(!file.exists("./figure")){dir.create("./figure")}
# Print to png file
dev.copy(png, file="./figure/plot3.png", width=480, height=480) ## Copy my plot to a PNG file
dev.off()
|
#' gets beta prior paramenters
#'
#' gets beta prior paramenters and plots distribution
#' @param mu mean of beta distribution
#' @param CV cv of beta distribution
#' @param Min min of x-axis range (default = 0)
#' @param Prior X-axis label
#' @param Plot c(TRUE,FALSE)
#' @return a and b parameter (shape and scale)
#' @export
get_beta <- function(mu,CV,Min=0,Prior="x",Plot=FALSE){
a = seq(0.0001,1000,0.001)
b= (a-mu*a)/mu
s2 = a*b/((a+b)^2*(a+b+1))
sdev = sqrt(s2)
# find beta )parameter a
CV.check = (sdev/mu-CV)^2
a = a[CV.check==min(CV.check)]
#find beta parameter b
b = (a-mu*a)/mu
x = seq(Min,1,0.001)
pdf = dbeta(x,a,b)
if(Plot==TRUE){
plot(x,pdf,type="l",xlim=range(x[pdf>0.01]),xlab=paste(Prior),ylab="",yaxt="n")
polygon(c(x,rev(x)),c(rep(0,length(x)),rev(ifelse(pdf==Inf,100000,pdf))),col="grey")
}
return(c(a,b))
}
#' gets gamma prior paramenters
#'
#' gets gamma prior paramenters and plots distribution
#' @param mu mean of gamma distribution
#' @param CV cv of gamma distribution
#' @param Prior X-axis label
#' @param Plot c(TRUE,FALSE)
#' @return a and b parameter (shape and scale)
#' @export
get_gamma <- function(mu,CV,Prior="x", Plot=FALSE){
a = seq(0.00001,10000,0.0001)
b = a/mu
s2 = (a/b^2)
sdev = sqrt(s2)
# find beta )parameter a
CV.check = (sdev/mu-CV)^2
a = a[CV.check==min(CV.check)]
#find beta parameter b
b = a/mu
x = sort(rgamma(1000,a,b))
pdf = dgamma(x,a,b)
if(Plot==TRUE){
plot(x,pdf,type="l",xlim=range(x[pdf>0.01]),xlab=paste(Prior),ylab="",yaxt="n")
polygon(c(x,rev(x)),c(rep(0,length(x)),rev(ifelse(pdf==Inf,100000,pdf))),col="grey")
}
return(c(a,b))
}
#' gets lognormal prior paramenters
#'
#' gets lognormal prior paramenters mean and log.sd with bias correction and plots distribution
#' @param mu mean of lognormal distribution
#' @param CV cv of lognoram distribution
#' @param Prior X-axis label
#' @param Plot c(TRUE,FALSE)
#' @return mean and lod.sd
#' @export
plot_lnorm <- function(mu,CV,Prior="x",Plot=FALSE){
sdev= sqrt(log(CV^2+1))
rand.pr = rlnorm(1000,log(mu)-0.5*sdev^2,sdev)
x = seq(min(rand.pr),quantile(rand.pr,0.995),max(rand.pr/500))
pdf = dlnorm(x,log(mu),sdev)
if(Plot==TRUE){
plot(x,pdf,type="l",xlim=range(x),xlab=paste(Prior),ylab="",yaxt="n")
polygon(c(x,rev(x)),c(rep(0,length(x)),rev(ifelse(pdf==Inf,100000,pdf))),col="grey")
}
return(c(mu,sdev))
}
#' Gets lognormal prior paramenters
#'
#' Gets lognormal prior paramenters mean and log.sd with bias correction and plots distribution
#' @param mu mean of lognormal distribution
#' @param CV cv of lognoram distribution
#' @param Prior X-axis label
#' @param Plot c(TRUE,FALSE)
#' @return mean and lod.sd
#' @export
plot_lnorm <- function(mu,CV,Prior="x",Plot=FALSE){
sdev= sqrt(log(CV^2+1))
rand.pr = rlnorm(1000,log(mu)-0.5*sdev^2,sdev)
x = seq(min(rand.pr),quantile(rand.pr,0.995),max(rand.pr/500))
pdf = dlnorm(x,log(mu),sdev)
if(Plot==TRUE){
plot(x,pdf,type="l",xlim=range(x),xlab=paste(Prior),ylab="",yaxt="n")
polygon(c(x,rev(x)),c(rep(0,length(x)),rev(ifelse(pdf==Inf,100000,pdf))),col="grey")
}
return(c(mu,sdev))
}
#' Function kobeJabba for FLR
#'
#' Function to convert kobe posteriors into KOBE FLR input object
#' @param x posterior array dims(iter,year,stock,harvest)
#' @export
kobeJabba<-function(x){
out=cbind(reshape::melt(x[,,2]),c(x[,,3]))
names(out)=c("iter","year","stock","harvest")
out$year=out$year
out}
#' Function to convert JABBA projections into FLR object
#'
#' Function to convert kobe projection matrix posteriors into Kobe FLR input object
#' @param x posterior array dims(iter,year,tac,stock,harvest)
#' @export
kobeJabbaProj<-function(x){
out=cbind(reshape::melt(x[,,,2]),c(x[,,,3]))
names(out)=c("iter","year","tac","stock","harvest")
out$year=out$year
out}
#' Function to do runs.test and 3 x sigma limits
#'
#' runs test is conducted with library(snpar)
#' @param x residuals from CPUE fits
#' @param type only c("resid","observations")
#' @return runs p value and 3 x sigma limits
#' @export
runs_sig3 <- function(x,type=NULL) {
if(is.null(type)) type="resid"
if(type=="resid"){mu = 0}else{mu = mean(x, na.rm = TRUE)}
# Average moving range
mr <- abs(diff(x - mu))
amr <- mean(mr, na.rm = TRUE)
# Upper limit for moving ranges
ulmr <- 3.267 * amr
# Remove moving ranges greater than ulmr and recalculate amr, Nelson 1982
mr <- mr[mr < ulmr]
amr <- mean(mr, na.rm = TRUE)
# Calculate standard deviation, Montgomery, 6.33
stdev <- amr / 1.128
# Calculate control limits
lcl <- mu - 3 * stdev
ucl <- mu + 3 * stdev
if(nlevels(factor(sign(x)))>1){
runstest = snpar::runs.test(x)
pvalue = round(runstest$p.value,3)} else {
pvalue = 0.001
}
return(list(sig3lim=c(lcl,ucl),p.runs= pvalue))
}
|
/R/jabba_utils.R
|
no_license
|
Henning-Winker/JABBApkg
|
R
| false
| false
| 4,845
|
r
|
#' gets beta prior paramenters
#'
#' gets beta prior paramenters and plots distribution
#' @param mu mean of beta distribution
#' @param CV cv of beta distribution
#' @param Min min of x-axis range (default = 0)
#' @param Prior X-axis label
#' @param Plot c(TRUE,FALSE)
#' @return a and b parameter (shape and scale)
#' @export
get_beta <- function(mu,CV,Min=0,Prior="x",Plot=FALSE){
a = seq(0.0001,1000,0.001)
b= (a-mu*a)/mu
s2 = a*b/((a+b)^2*(a+b+1))
sdev = sqrt(s2)
# find beta )parameter a
CV.check = (sdev/mu-CV)^2
a = a[CV.check==min(CV.check)]
#find beta parameter b
b = (a-mu*a)/mu
x = seq(Min,1,0.001)
pdf = dbeta(x,a,b)
if(Plot==TRUE){
plot(x,pdf,type="l",xlim=range(x[pdf>0.01]),xlab=paste(Prior),ylab="",yaxt="n")
polygon(c(x,rev(x)),c(rep(0,length(x)),rev(ifelse(pdf==Inf,100000,pdf))),col="grey")
}
return(c(a,b))
}
#' gets gamma prior paramenters
#'
#' gets gamma prior paramenters and plots distribution
#' @param mu mean of gamma distribution
#' @param CV cv of gamma distribution
#' @param Prior X-axis label
#' @param Plot c(TRUE,FALSE)
#' @return a and b parameter (shape and scale)
#' @export
get_gamma <- function(mu,CV,Prior="x", Plot=FALSE){
a = seq(0.00001,10000,0.0001)
b = a/mu
s2 = (a/b^2)
sdev = sqrt(s2)
# find beta )parameter a
CV.check = (sdev/mu-CV)^2
a = a[CV.check==min(CV.check)]
#find beta parameter b
b = a/mu
x = sort(rgamma(1000,a,b))
pdf = dgamma(x,a,b)
if(Plot==TRUE){
plot(x,pdf,type="l",xlim=range(x[pdf>0.01]),xlab=paste(Prior),ylab="",yaxt="n")
polygon(c(x,rev(x)),c(rep(0,length(x)),rev(ifelse(pdf==Inf,100000,pdf))),col="grey")
}
return(c(a,b))
}
#' gets lognormal prior paramenters
#'
#' gets lognormal prior paramenters mean and log.sd with bias correction and plots distribution
#' @param mu mean of lognormal distribution
#' @param CV cv of lognoram distribution
#' @param Prior X-axis label
#' @param Plot c(TRUE,FALSE)
#' @return mean and lod.sd
#' @export
plot_lnorm <- function(mu,CV,Prior="x",Plot=FALSE){
sdev= sqrt(log(CV^2+1))
rand.pr = rlnorm(1000,log(mu)-0.5*sdev^2,sdev)
x = seq(min(rand.pr),quantile(rand.pr,0.995),max(rand.pr/500))
pdf = dlnorm(x,log(mu),sdev)
if(Plot==TRUE){
plot(x,pdf,type="l",xlim=range(x),xlab=paste(Prior),ylab="",yaxt="n")
polygon(c(x,rev(x)),c(rep(0,length(x)),rev(ifelse(pdf==Inf,100000,pdf))),col="grey")
}
return(c(mu,sdev))
}
#' Gets lognormal prior paramenters
#'
#' Gets lognormal prior paramenters mean and log.sd with bias correction and plots distribution
#' @param mu mean of lognormal distribution
#' @param CV cv of lognoram distribution
#' @param Prior X-axis label
#' @param Plot c(TRUE,FALSE)
#' @return mean and lod.sd
#' @export
plot_lnorm <- function(mu,CV,Prior="x",Plot=FALSE){
sdev= sqrt(log(CV^2+1))
rand.pr = rlnorm(1000,log(mu)-0.5*sdev^2,sdev)
x = seq(min(rand.pr),quantile(rand.pr,0.995),max(rand.pr/500))
pdf = dlnorm(x,log(mu),sdev)
if(Plot==TRUE){
plot(x,pdf,type="l",xlim=range(x),xlab=paste(Prior),ylab="",yaxt="n")
polygon(c(x,rev(x)),c(rep(0,length(x)),rev(ifelse(pdf==Inf,100000,pdf))),col="grey")
}
return(c(mu,sdev))
}
#' Function kobeJabba for FLR
#'
#' Function to convert kobe posteriors into KOBE FLR input object
#' @param x posterior array dims(iter,year,stock,harvest)
#' @export
kobeJabba<-function(x){
out=cbind(reshape::melt(x[,,2]),c(x[,,3]))
names(out)=c("iter","year","stock","harvest")
out$year=out$year
out}
#' Function to convert JABBA projections into FLR object
#'
#' Function to convert kobe projection matrix posteriors into Kobe FLR input object
#' @param x posterior array dims(iter,year,tac,stock,harvest)
#' @export
kobeJabbaProj<-function(x){
out=cbind(reshape::melt(x[,,,2]),c(x[,,,3]))
names(out)=c("iter","year","tac","stock","harvest")
out$year=out$year
out}
#' Function to do runs.test and 3 x sigma limits
#'
#' runs test is conducted with library(snpar)
#' @param x residuals from CPUE fits
#' @param type only c("resid","observations")
#' @return runs p value and 3 x sigma limits
#' @export
runs_sig3 <- function(x,type=NULL) {
if(is.null(type)) type="resid"
if(type=="resid"){mu = 0}else{mu = mean(x, na.rm = TRUE)}
# Average moving range
mr <- abs(diff(x - mu))
amr <- mean(mr, na.rm = TRUE)
# Upper limit for moving ranges
ulmr <- 3.267 * amr
# Remove moving ranges greater than ulmr and recalculate amr, Nelson 1982
mr <- mr[mr < ulmr]
amr <- mean(mr, na.rm = TRUE)
# Calculate standard deviation, Montgomery, 6.33
stdev <- amr / 1.128
# Calculate control limits
lcl <- mu - 3 * stdev
ucl <- mu + 3 * stdev
if(nlevels(factor(sign(x)))>1){
runstest = snpar::runs.test(x)
pvalue = round(runstest$p.value,3)} else {
pvalue = 0.001
}
return(list(sig3lim=c(lcl,ucl),p.runs= pvalue))
}
|
# -------------------------------------------------------------------------------
# This file is part of Ranger.
#
# Ranger is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ranger is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ranger. If not, see <http://www.gnu.org/licenses/>.
#
# Written by:
#
# Marvin N. Wright
# Institut fuer Medizinische Biometrie und Statistik
# Universitaet zu Luebeck
# Ratzeburger Allee 160
# 23562 Luebeck
# Germany
#
# http://www.imbs-luebeck.de
# -------------------------------------------------------------------------------
##' Prediction with new data and a saved forest from Ranger.
##'
##' For \code{type = 'response'} (the default), the predicted classes (classification), predicted numeric values (regression), predicted probabilities (probability estimation) or survival probabilities (survival) are returned.
##' For \code{type = 'se'}, the standard error of the predictions are returned (regression only). The jackknife-after-bootstrap or infinitesimal jackknife for bagging is used to estimate the standard errors based on out-of-bag predictions. See Wager et al. (2014) for details.
##' For \code{type = 'terminalNodes'}, the IDs of the terminal node in each tree for each observation in the given dataset are returned.
##'
##' If \code{type = 'se'} is selected, the method to estimate the variances can be chosen with \code{se.method}. Set \code{se.method = 'jack'} for jackknife after bootstrap and \code{se.method = 'infjack'} for the infinitesimal jackknife for bagging.
##'
##' For classification and \code{predict.all = TRUE}, a factor levels are returned as numerics.
##' To retrieve the corresponding factor levels, use \code{rf$forest$levels}, if \code{rf} is the ranger object.
##'
##' @title Ranger prediction
##' @param object Ranger \code{ranger.forest} object.
##' @param data New test data of class \code{data.frame} or \code{gwaa.data} (GenABEL).
##' @param predict.all Return individual predictions for each tree instead of aggregated predictions for all trees. Return a matrix (sample x tree) for classification and regression, a 3d array for probability estimation (sample x class x tree) and survival (sample x time x tree).
##' @param num.trees Number of trees used for prediction. The first \code{num.trees} in the forest are used.
##' @param type Type of prediction. One of 'response', 'se', 'terminalNodes', 'quantiles' with default 'response'. See below for details.
##' @param se.method Method to compute standard errors. One of 'jack', 'infjack' with default 'infjack'. Only applicable if type = 'se'. See below for details.
##' @param seed Random seed. Default is \code{NULL}, which generates the seed from \code{R}. Set to \code{0} to ignore the \code{R} seed. The seed is used in case of ties in classification mode.
##' @param num.threads Number of threads. Default is number of CPUs available.
##' @param verbose Verbose output on or off.
##' @param inbag.counts Number of times the observations are in-bag in the trees.
##' @param ... further arguments passed to or from other methods.
##' @return Object of class \code{ranger.prediction} with elements
##' \tabular{ll}{
##' \code{predictions} \tab Predicted classes/values (only for classification and regression) \cr
##' \code{unique.death.times} \tab Unique death times (only for survival). \cr
##' \code{chf} \tab Estimated cumulative hazard function for each sample (only for survival). \cr
##' \code{survival} \tab Estimated survival function for each sample (only for survival). \cr
##' \code{num.trees} \tab Number of trees. \cr
##' \code{num.independent.variables} \tab Number of independent variables. \cr
##' \code{treetype} \tab Type of forest/tree. Classification, regression or survival. \cr
##' \code{num.samples} \tab Number of samples.
##' }
##' @references
##' \itemize{
##' \item Wright, M. N. & Ziegler, A. (2017). ranger: A Fast Implementation of Random Forests for High Dimensional Data in C++ and R. J Stat Softw 77:1-17. \url{https://doi.org/10.18637/jss.v077.i01}.
##' \item Wager, S., Hastie T., & Efron, B. (2014). Confidence Intervals for Random Forests: The Jackknife and the Infinitesimal Jackknife. J Mach Learn Res 15:1625-1651. \url{http://jmlr.org/papers/v15/wager14a.html}.
##' }
##' @seealso \code{\link{ranger}}
##' @author Marvin N. Wright
##' @importFrom Matrix Matrix
##' @export
predict.ranger.forest <- function(object, data, predict.all = FALSE,
num.trees = object$num.trees,
type = "response", se.method = "infjack",
seed = NULL, num.threads = NULL,
verbose = TRUE, inbag.counts = NULL, ...) {
## GenABEL GWA data
if ("gwaa.data" %in% class(data)) {
snp.names <- snp.names(data)
snp.data <- data@gtdata@gtps@.Data
data <- data@phdata[, -1, drop = FALSE]
gwa.mode <- TRUE
variable.names <- c(names(data), snp.names)
} else {
snp.data <- as.matrix(0)
gwa.mode <- FALSE
variable.names <- colnames(data)
}
## Check forest argument
if (class(object) != "ranger.forest") {
stop("Error: Invalid class of input object.")
} else {
forest <- object
}
if (is.null(forest$dependent.varID) || is.null(forest$num.trees) ||
is.null(forest$child.nodeIDs) || is.null(forest$split.varIDs) ||
is.null(forest$split.values) || is.null(forest$independent.variable.names) ||
is.null(forest$treetype)) {
stop("Error: Invalid forest object.")
}
if (forest$treetype == "Survival" && (is.null(forest$status.varID) ||
is.null(forest$chf) || is.null(forest$unique.death.times))) {
stop("Error: Invalid forest object.")
}
## Check for old ranger version
if (length(forest$child.nodeIDs) != forest$num.trees || length(forest$child.nodeIDs[[1]]) != 2) {
stop("Error: Invalid forest object. Is the forest grown in ranger version <0.3.9? Try to predict with the same version the forest was grown.")
}
## Prediction type
if (type == "response" || type == "se") {
prediction.type <- 1
} else if (type == "terminalNodes") {
prediction.type <- 2
} else if (type == "quantiles") {
stop("Error: Apply predict() to the ranger object instead of the $forest object to predict quantiles.")
} else {
stop("Error: Invalid value for 'type'. Use 'response', 'se', 'terminalNodes', or 'quantiles'.")
}
## Type "se" only for certain tree types
if (type == "se" && se.method == "jack" && forest$treetype != "Regression") {
stop("Error: Jackknife standard error prediction currently only available for regression.")
}
if (type == "se" && se.method == "infjack") {
if (forest$treetype == "Survival") {
stop("Error: Infinitesimal jackknife standard error prediction not yet available for survival.")
} else if (forest$treetype == "Classification") {
stop("Error: Not a probability forest. Set probability=TRUE to use the infinitesimal jackknife standard error prediction for classification.")
}
}
## Type "se" requires keep.inbag=TRUE
if (type == "se" && is.null(inbag.counts)) {
stop("Error: No saved inbag counts in ranger object. Please set keep.inbag=TRUE when calling ranger.")
}
## Set predict.all if type is "se"
if (type == "se") {
predict.all <- TRUE
}
## Create final data
if (forest$treetype == "Survival") {
if (forest$dependent.varID > 0 && forest$status.varID > 1) {
if (ncol(data) == length(forest$independent.variable.names)+2) {
## If alternative interface used and same data structure, don't subset data
data.used <- data
} else if (ncol(data) == length(forest$independent.variable.names)) {
data.selected <- data[, forest$independent.variable.names, drop = FALSE]
data.used <- cbind(0, 0, data.selected)
variable.names <- c("time", "status", forest$independent.variable.names)
forest$dependent.varID <- 0
forest$status.varID <- 1
} else {
stop("Invalid prediction data. Include both time and status variable or none.")
}
} else {
## If formula interface used, subset data
data.selected <- data[, forest$independent.variable.names, drop = FALSE]
## Arange data as in original data
data.used <- cbind(0, 0, data.selected)
variable.names <- c("time", "status", forest$independent.variable.names)
}
## Index of no-recode variables
idx.norecode <- c(-(forest$dependent.varID+1), -(forest$status.varID+1))
} else {
## No survival
if (ncol(data) == length(forest$independent.variable.names)+1 && forest$dependent.varID > 0) {
## If alternative interface used and same data structure, don't subset data
data.used <- data
} else {
## If formula interface used, subset data
data.selected <- data[, forest$independent.variable.names, drop = FALSE]
## Arange data as in original data
if (forest$dependent.varID == 0) {
data.used <- cbind(0, data.selected)
variable.names <- c("dependent", forest$independent.variable.names)
} else if (forest$dependent.varID >= ncol(data)) {
data.used <- cbind(data.selected, 0)
variable.names <- c(forest$independent.variable.names, "dependent")
} else {
data.used <- cbind(data.selected[, 1:forest$dependent.varID],
0,
data.selected[, (forest$dependent.varID+1):ncol(data.selected)])
variable.names <- c(forest$independent.variable.names[1:forest$dependent.varID],
"dependent",
forest$independent.variable.names[(forest$dependent.varID+1):length(forest$independent.variable.names)])
}
}
## Index of no-recode variables
idx.norecode <- -(forest$dependent.varID+1)
}
## Recode characters
if (!is.matrix(data.used) && !inherits(data.used, "Matrix")) {
char.columns <- sapply(data.used, is.character)
data.used[char.columns] <- lapply(data.used[char.columns], factor)
}
## Recode factors if forest grown 'order' mode
if (!is.null(forest$covariate.levels) && !all(sapply(forest$covariate.levels, is.null))) {
data.used[, idx.norecode] <- mapply(function(x, y) {
if(is.null(y)) {
x
} else {
new.levels <- setdiff(levels(x), y)
factor(x, levels = c(y, new.levels), exclude = NULL)
}
}, data.used[, idx.norecode], forest$covariate.levels, SIMPLIFY = !is.data.frame(data.used[, idx.norecode]))
}
## Convert to data matrix
if (is.matrix(data.used) || inherits(data.used, "Matrix")) {
data.final <- data.used
} else {
data.final <- data.matrix(data.used)
}
## If gwa mode, add snp variable names
if (gwa.mode) {
variable.names <- c(variable.names, snp.names)
}
## Check missing values
if (any(is.na(data.final))) {
offending_columns <- colnames(data.final)[colSums(is.na(data.final)) > 0]
stop("Missing data in columns: ",
paste0(offending_columns, collapse = ", "), ".", call. = FALSE)
}
if (sum(!(forest$independent.variable.names %in% variable.names)) > 0) {
stop("Error: One or more independent variables not found in data.")
}
## Num threads
## Default 0 -> detect from system in C++.
if (is.null(num.threads)) {
num.threads = 0
} else if (!is.numeric(num.threads) || num.threads < 0) {
stop("Error: Invalid value for num.threads")
}
## Seed
if (is.null(seed)) {
seed <- runif(1 , 0, .Machine$integer.max)
}
if (forest$treetype == "Classification") {
treetype <- 1
} else if (forest$treetype == "Regression") {
treetype <- 3
} else if (forest$treetype == "Survival") {
treetype <- 5
} else if (forest$treetype == "Probability estimation") {
treetype <- 9
} else {
stop("Error: Unknown tree type.")
}
## Defaults for variables not needed
dependent.variable.name <- ""
mtry <- 0
importance <- 0
min.node.size <- 0
split.select.weights <- list(c(0, 0))
use.split.select.weights <- FALSE
always.split.variables <- c("0", "0")
use.always.split.variables <- FALSE
status.variable.name <- "status"
prediction.mode <- TRUE
write.forest <- FALSE
replace <- TRUE
probability <- FALSE
unordered.factor.variables <- c("0", "0")
use.unordered.factor.variables <- FALSE
save.memory <- FALSE
splitrule <- 1
alpha <- 0
minprop <- 0
case.weights <- c(0, 0)
use.case.weights <- FALSE
class.weights <- c(0, 0)
keep.inbag <- FALSE
sample.fraction <- 1
holdout <- FALSE
num.random.splits <- 1
order.snps <- FALSE
oob.error <- FALSE
max.depth <- 0
inbag <- list(c(0,0))
use.inbag <- FALSE
bootstrap.ts.num <- 1
by.end <- TRUE
block.size <- 10
period <- 1
## Use sparse matrix
if ("dgCMatrix" %in% class(data.final)) {
sparse.data <- data.final
data.final <- matrix(c(0, 0))
use.sparse.data <- TRUE
} else {
sparse.data <- Matrix(matrix(c(0, 0)))
use.sparse.data <- FALSE
}
## Call Ranger
result <- rangerCpp(treetype, dependent.variable.name, data.final, variable.names, mtry,
num.trees, verbose, seed, num.threads, write.forest, importance,
min.node.size, split.select.weights, use.split.select.weights,
always.split.variables, use.always.split.variables,
status.variable.name, prediction.mode, forest, snp.data, replace, probability,
unordered.factor.variables, use.unordered.factor.variables, save.memory, splitrule,
case.weights, use.case.weights, class.weights,
predict.all, keep.inbag, sample.fraction, alpha, minprop, holdout,
prediction.type, num.random.splits, sparse.data, use.sparse.data,
order.snps, oob.error, max.depth, inbag, use.inbag,
bootstrap.ts.num, by.end, block.size, period)
if (length(result) == 0) {
stop("User interrupt or internal error.")
}
## Prepare results
result$num.samples <- nrow(data.final)
result$treetype <- forest$treetype
if (predict.all) {
if (forest$treetype %in% c("Classification", "Regression")) {
if (is.list(result$predictions)) {
result$predictions <- do.call(rbind, result$predictions)
} else {
result$predictions <- array(result$predictions, dim = c(1, length(result$predictions)))
}
} else {
## TODO: Better solution for this?
result$predictions <- aperm(array(unlist(result$predictions),
dim = rev(c(length(result$predictions),
length(result$predictions[[1]]),
length(result$predictions[[1]][[1]])))))
}
} else {
if (is.list(result$predictions)) {
result$predictions <- do.call(rbind, result$predictions)
}
}
if (type == "response") {
if (forest$treetype == "Classification" && !is.null(forest$levels)) {
if (!predict.all) {
result$predictions <- integer.to.factor(result$predictions, forest$levels)
}
} else if (forest$treetype == "Regression") {
## Empty
} else if (forest$treetype == "Survival") {
result$unique.death.times <- forest$unique.death.times
result$chf <- result$predictions
result$predictions <- NULL
result$survival <- exp(-result$chf)
} else if (forest$treetype == "Probability estimation" && !is.null(forest$levels)) {
if (!predict.all) {
if (is.vector(result$predictions)) {
result$predictions <- matrix(result$predictions, nrow = 1)
}
## Set colnames and sort by levels
colnames(result$predictions) <- forest$levels[forest$class.values]
result$predictions <- result$predictions[, forest$levels[sort(forest$class.values)], drop = FALSE]
}
}
} else if (type == "terminalNodes") {
if (is.vector(result$predictions)) {
result$predictions <- matrix(result$predictions, nrow = 1)
}
}
## Compute Jackknife
if (type == "se") {
## Aggregated predictions
if (length(dim(result$predictions)) > 2) {
yhat <- apply(result$predictions, c(1, 2), mean)
} else {
yhat <- rowMeans(result$predictions)
}
## Get inbag counts, keep only observations that are OOB at least once
inbag.counts <- simplify2array(inbag.counts)
if (is.vector(inbag.counts)) {
inbag.counts <- t(as.matrix(inbag.counts))
}
inbag.counts <- inbag.counts[rowSums(inbag.counts == 0) > 0, , drop = FALSE]
n <- nrow(inbag.counts)
oob <- inbag.counts == 0
if (num.trees != object$num.trees) {
oob <- oob[, 1:num.trees]
}
if (all(!oob)) {
stop("Error: No OOB observations found, consider increasing num.trees or reducing sample.fraction.")
}
if (se.method == "jack") {
## Compute Jackknife
oob.count <- rowSums(oob)
jack.n <- sweep(tcrossprod(result$predictions, oob),
2, oob.count, "/", check.margin = FALSE)
if (is.vector(jack.n)) {
jack.n <- t(as.matrix(jack.n))
}
if (any(oob.count == 0)) {
n <- sum(oob.count > 0)
jack.n <- jack.n[, oob.count > 0]
}
jack <- (n - 1) / n * rowSums((jack.n - yhat)^2)
bias <- (exp(1) - 1) * n / result$num.trees^2 * rowSums((result$predictions - yhat)^2)
jab <- pmax(jack - bias, 0)
result$se <- sqrt(jab)
} else if (se.method == "infjack") {
if (forest$treetype == "Regression") {
infjack <- rInfJack(pred = result$predictions, inbag = inbag.counts, used.trees = 1:num.trees)
result$se <- sqrt(infjack$var.hat)
} else if (forest$treetype == "Probability estimation") {
infjack <- apply(result$predictions, 2, function(x) {
rInfJack(x, inbag.counts)$var.hat
})
result$se <- sqrt(infjack)
}
} else {
stop("Error: Unknown standard error method (se.method).")
}
## Response as predictions
result$predictions <- yhat
if (forest$treetype == "Probability estimation") {
## Set colnames and sort by levels
colnames(result$predictions) <- forest$levels[forest$class.values]
result$predictions <- result$predictions[, forest$levels, drop = FALSE]
}
}
class(result) <- "ranger.prediction"
return(result)
}
##' Prediction with new data and a saved forest from Ranger.
##'
##' For \code{type = 'response'} (the default), the predicted classes (classification), predicted numeric values (regression), predicted probabilities (probability estimation) or survival probabilities (survival) are returned.
##' For \code{type = 'se'}, the standard error of the predictions are returned (regression only). The jackknife-after-bootstrap or infinitesimal jackknife for bagging is used to estimate the standard errors based on out-of-bag predictions. See Wager et al. (2014) for details.
##' For \code{type = 'terminalNodes'}, the IDs of the terminal node in each tree for each observation in the given dataset are returned.
##' For \code{type = 'quantiles'}, the selected quantiles for each observation are estimated. See Meinshausen (2006) for details.
##'
##' If \code{type = 'se'} is selected, the method to estimate the variances can be chosen with \code{se.method}. Set \code{se.method = 'jack'} for jackknife-after-bootstrap and \code{se.method = 'infjack'} for the infinitesimal jackknife for bagging.
##'
##' For classification and \code{predict.all = TRUE}, a factor levels are returned as numerics.
##' To retrieve the corresponding factor levels, use \code{rf$forest$levels}, if \code{rf} is the ranger object.
##'
##' @title Ranger prediction
##' @param object Ranger \code{ranger} object.
##' @param data New test data of class \code{data.frame} or \code{gwaa.data} (GenABEL).
##' @param predict.all Return individual predictions for each tree instead of aggregated predictions for all trees. Return a matrix (sample x tree) for classification and regression, a 3d array for probability estimation (sample x class x tree) and survival (sample x time x tree).
##' @param num.trees Number of trees used for prediction. The first \code{num.trees} in the forest are used.
##' @param type Type of prediction. One of 'response', 'se', 'terminalNodes', 'quantiles' with default 'response'. See below for details.
##' @param se.method Method to compute standard errors. One of 'jack', 'infjack' with default 'infjack'. Only applicable if type = 'se'. See below for details.
##' @param quantiles Vector of quantiles for quantile prediction. Set \code{type = 'quantiles'} to use.
##' @param seed Random seed. Default is \code{NULL}, which generates the seed from \code{R}. Set to \code{0} to ignore the \code{R} seed. The seed is used in case of ties in classification mode.
##' @param num.threads Number of threads. Default is number of CPUs available.
##' @param verbose Verbose output on or off.
##' @param ... further arguments passed to or from other methods.
##' @return Object of class \code{ranger.prediction} with elements
##' \tabular{ll}{
##' \code{predictions} \tab Predicted classes/values (only for classification and regression) \cr
##' \code{unique.death.times} \tab Unique death times (only for survival). \cr
##' \code{chf} \tab Estimated cumulative hazard function for each sample (only for survival). \cr
##' \code{survival} \tab Estimated survival function for each sample (only for survival). \cr
##' \code{num.trees} \tab Number of trees. \cr
##' \code{num.independent.variables} \tab Number of independent variables. \cr
##' \code{treetype} \tab Type of forest/tree. Classification, regression or survival. \cr
##' \code{num.samples} \tab Number of samples.
##' }
##' @references
##' \itemize{
##' \item Wright, M. N. & Ziegler, A. (2017). ranger: A Fast Implementation of Random Forests for High Dimensional Data in C++ and R. J Stat Softw 77:1-17. \url{https://doi.org/10.18637/jss.v077.i01}.
##' \item Wager, S., Hastie T., & Efron, B. (2014). Confidence Intervals for Random Forests: The Jackknife and the Infinitesimal Jackknife. J Mach Learn Res 15:1625-1651. \url{http://jmlr.org/papers/v15/wager14a.html}.
##' \item Meinshausen (2006). Quantile Regression Forests. J Mach Learn Res 7:983-999. \url{http://www.jmlr.org/papers/v7/meinshausen06a.html}.
##' }
##' @seealso \code{\link{ranger}}
##' @author Marvin N. Wright
##' @export
predict.ranger <- function(object, data = NULL, predict.all = FALSE,
num.trees = object$num.trees,
type = "response", se.method = "infjack",
quantiles = c(0.1, 0.5, 0.9),
seed = NULL, num.threads = NULL,
verbose = TRUE, ...) {
forest <- object$forest
if (is.null(forest)) {
stop("Error: No saved forest in ranger object. Please set write.forest to TRUE when calling ranger.")
}
if (object$importance.mode %in% c("impurity_corrected", "impurity_unbiased")) {
warning("Forest was grown with 'impurity_corrected' variable importance. For prediction it is advised to grow another forest without this importance setting.")
}
if (type == "quantiles") {
## Quantile prediction
if (object$treetype != "Regression") {
stop("Error: Quantile prediction implemented only for regression outcomes.")
}
if (is.null(object$random.node.values)) {
stop("Error: Set quantreg=TRUE in ranger(...) for quantile prediction.")
}
if (is.null(data)) {
## OOB prediction
if (is.null(object$random.node.values.oob)) {
stop("Error: Set keep.inbag=TRUE in ranger(...) for out-of-bag quantile prediction or provide new data in predict(...).")
}
node.values <- object$random.node.values.oob
} else {
## New data prediction
terminal.nodes <- predict(object, data, type = "terminalNodes")$predictions + 1
node.values <- 0 * terminal.nodes
for (tree in 1:num.trees) {
node.values[, tree] <- object$random.node.values[terminal.nodes[, tree], tree]
}
}
## Prepare results
result <- list(num.samples = nrow(node.values),
treetype = object$treetype,
num.independent.variables = object$num.independent.variables,
num.trees = num.trees)
class(result) <- "ranger.prediction"
## Compute quantiles of distribution
result$predictions <- t(apply(node.values, 1, quantile, quantiles, na.rm=TRUE))
if (nrow(result$predictions) != result$num.samples) {
## Fix result for single quantile
result$predictions <- t(result$predictions)
}
colnames(result$predictions) <- paste("quantile=", quantiles)
result
} else {
## Non-quantile prediction
if (is.null(data)) {
stop("Error: Argument 'data' is required for non-quantile prediction.")
}
predict(forest, data, predict.all, num.trees, type, se.method, seed, num.threads, verbose, object$inbag.counts, ...)
}
}
|
/rangerts/R/predict.R
|
no_license
|
BenjaminGoehry/BlocRF
|
R
| false
| false
| 25,752
|
r
|
# -------------------------------------------------------------------------------
# This file is part of Ranger.
#
# Ranger is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ranger is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ranger. If not, see <http://www.gnu.org/licenses/>.
#
# Written by:
#
# Marvin N. Wright
# Institut fuer Medizinische Biometrie und Statistik
# Universitaet zu Luebeck
# Ratzeburger Allee 160
# 23562 Luebeck
# Germany
#
# http://www.imbs-luebeck.de
# -------------------------------------------------------------------------------
##' Prediction with new data and a saved forest from Ranger.
##'
##' For \code{type = 'response'} (the default), the predicted classes (classification), predicted numeric values (regression), predicted probabilities (probability estimation) or survival probabilities (survival) are returned.
##' For \code{type = 'se'}, the standard error of the predictions are returned (regression only). The jackknife-after-bootstrap or infinitesimal jackknife for bagging is used to estimate the standard errors based on out-of-bag predictions. See Wager et al. (2014) for details.
##' For \code{type = 'terminalNodes'}, the IDs of the terminal node in each tree for each observation in the given dataset are returned.
##'
##' If \code{type = 'se'} is selected, the method to estimate the variances can be chosen with \code{se.method}. Set \code{se.method = 'jack'} for jackknife after bootstrap and \code{se.method = 'infjack'} for the infinitesimal jackknife for bagging.
##'
##' For classification and \code{predict.all = TRUE}, a factor levels are returned as numerics.
##' To retrieve the corresponding factor levels, use \code{rf$forest$levels}, if \code{rf} is the ranger object.
##'
##' @title Ranger prediction
##' @param object Ranger \code{ranger.forest} object.
##' @param data New test data of class \code{data.frame} or \code{gwaa.data} (GenABEL).
##' @param predict.all Return individual predictions for each tree instead of aggregated predictions for all trees. Return a matrix (sample x tree) for classification and regression, a 3d array for probability estimation (sample x class x tree) and survival (sample x time x tree).
##' @param num.trees Number of trees used for prediction. The first \code{num.trees} in the forest are used.
##' @param type Type of prediction. One of 'response', 'se', 'terminalNodes', 'quantiles' with default 'response'. See below for details.
##' @param se.method Method to compute standard errors. One of 'jack', 'infjack' with default 'infjack'. Only applicable if type = 'se'. See below for details.
##' @param seed Random seed. Default is \code{NULL}, which generates the seed from \code{R}. Set to \code{0} to ignore the \code{R} seed. The seed is used in case of ties in classification mode.
##' @param num.threads Number of threads. Default is number of CPUs available.
##' @param verbose Verbose output on or off.
##' @param inbag.counts Number of times the observations are in-bag in the trees.
##' @param ... further arguments passed to or from other methods.
##' @return Object of class \code{ranger.prediction} with elements
##' \tabular{ll}{
##' \code{predictions} \tab Predicted classes/values (only for classification and regression) \cr
##' \code{unique.death.times} \tab Unique death times (only for survival). \cr
##' \code{chf} \tab Estimated cumulative hazard function for each sample (only for survival). \cr
##' \code{survival} \tab Estimated survival function for each sample (only for survival). \cr
##' \code{num.trees} \tab Number of trees. \cr
##' \code{num.independent.variables} \tab Number of independent variables. \cr
##' \code{treetype} \tab Type of forest/tree. Classification, regression or survival. \cr
##' \code{num.samples} \tab Number of samples.
##' }
##' @references
##' \itemize{
##' \item Wright, M. N. & Ziegler, A. (2017). ranger: A Fast Implementation of Random Forests for High Dimensional Data in C++ and R. J Stat Softw 77:1-17. \url{https://doi.org/10.18637/jss.v077.i01}.
##' \item Wager, S., Hastie T., & Efron, B. (2014). Confidence Intervals for Random Forests: The Jackknife and the Infinitesimal Jackknife. J Mach Learn Res 15:1625-1651. \url{http://jmlr.org/papers/v15/wager14a.html}.
##' }
##' @seealso \code{\link{ranger}}
##' @author Marvin N. Wright
##' @importFrom Matrix Matrix
##' @export
predict.ranger.forest <- function(object, data, predict.all = FALSE,
num.trees = object$num.trees,
type = "response", se.method = "infjack",
seed = NULL, num.threads = NULL,
verbose = TRUE, inbag.counts = NULL, ...) {
## GenABEL GWA data
if ("gwaa.data" %in% class(data)) {
snp.names <- snp.names(data)
snp.data <- data@gtdata@gtps@.Data
data <- data@phdata[, -1, drop = FALSE]
gwa.mode <- TRUE
variable.names <- c(names(data), snp.names)
} else {
snp.data <- as.matrix(0)
gwa.mode <- FALSE
variable.names <- colnames(data)
}
## Check forest argument
if (class(object) != "ranger.forest") {
stop("Error: Invalid class of input object.")
} else {
forest <- object
}
if (is.null(forest$dependent.varID) || is.null(forest$num.trees) ||
is.null(forest$child.nodeIDs) || is.null(forest$split.varIDs) ||
is.null(forest$split.values) || is.null(forest$independent.variable.names) ||
is.null(forest$treetype)) {
stop("Error: Invalid forest object.")
}
if (forest$treetype == "Survival" && (is.null(forest$status.varID) ||
is.null(forest$chf) || is.null(forest$unique.death.times))) {
stop("Error: Invalid forest object.")
}
## Check for old ranger version
if (length(forest$child.nodeIDs) != forest$num.trees || length(forest$child.nodeIDs[[1]]) != 2) {
stop("Error: Invalid forest object. Is the forest grown in ranger version <0.3.9? Try to predict with the same version the forest was grown.")
}
## Prediction type
if (type == "response" || type == "se") {
prediction.type <- 1
} else if (type == "terminalNodes") {
prediction.type <- 2
} else if (type == "quantiles") {
stop("Error: Apply predict() to the ranger object instead of the $forest object to predict quantiles.")
} else {
stop("Error: Invalid value for 'type'. Use 'response', 'se', 'terminalNodes', or 'quantiles'.")
}
## Type "se" only for certain tree types
if (type == "se" && se.method == "jack" && forest$treetype != "Regression") {
stop("Error: Jackknife standard error prediction currently only available for regression.")
}
if (type == "se" && se.method == "infjack") {
if (forest$treetype == "Survival") {
stop("Error: Infinitesimal jackknife standard error prediction not yet available for survival.")
} else if (forest$treetype == "Classification") {
stop("Error: Not a probability forest. Set probability=TRUE to use the infinitesimal jackknife standard error prediction for classification.")
}
}
## Type "se" requires keep.inbag=TRUE
if (type == "se" && is.null(inbag.counts)) {
stop("Error: No saved inbag counts in ranger object. Please set keep.inbag=TRUE when calling ranger.")
}
## Set predict.all if type is "se"
if (type == "se") {
predict.all <- TRUE
}
## Create final data
if (forest$treetype == "Survival") {
if (forest$dependent.varID > 0 && forest$status.varID > 1) {
if (ncol(data) == length(forest$independent.variable.names)+2) {
## If alternative interface used and same data structure, don't subset data
data.used <- data
} else if (ncol(data) == length(forest$independent.variable.names)) {
data.selected <- data[, forest$independent.variable.names, drop = FALSE]
data.used <- cbind(0, 0, data.selected)
variable.names <- c("time", "status", forest$independent.variable.names)
forest$dependent.varID <- 0
forest$status.varID <- 1
} else {
stop("Invalid prediction data. Include both time and status variable or none.")
}
} else {
## If formula interface used, subset data
data.selected <- data[, forest$independent.variable.names, drop = FALSE]
## Arange data as in original data
data.used <- cbind(0, 0, data.selected)
variable.names <- c("time", "status", forest$independent.variable.names)
}
## Index of no-recode variables
idx.norecode <- c(-(forest$dependent.varID+1), -(forest$status.varID+1))
} else {
## No survival
if (ncol(data) == length(forest$independent.variable.names)+1 && forest$dependent.varID > 0) {
## If alternative interface used and same data structure, don't subset data
data.used <- data
} else {
## If formula interface used, subset data
data.selected <- data[, forest$independent.variable.names, drop = FALSE]
## Arange data as in original data
if (forest$dependent.varID == 0) {
data.used <- cbind(0, data.selected)
variable.names <- c("dependent", forest$independent.variable.names)
} else if (forest$dependent.varID >= ncol(data)) {
data.used <- cbind(data.selected, 0)
variable.names <- c(forest$independent.variable.names, "dependent")
} else {
data.used <- cbind(data.selected[, 1:forest$dependent.varID],
0,
data.selected[, (forest$dependent.varID+1):ncol(data.selected)])
variable.names <- c(forest$independent.variable.names[1:forest$dependent.varID],
"dependent",
forest$independent.variable.names[(forest$dependent.varID+1):length(forest$independent.variable.names)])
}
}
## Index of no-recode variables
idx.norecode <- -(forest$dependent.varID+1)
}
## Recode characters
if (!is.matrix(data.used) && !inherits(data.used, "Matrix")) {
char.columns <- sapply(data.used, is.character)
data.used[char.columns] <- lapply(data.used[char.columns], factor)
}
## Recode factors if forest grown 'order' mode
if (!is.null(forest$covariate.levels) && !all(sapply(forest$covariate.levels, is.null))) {
data.used[, idx.norecode] <- mapply(function(x, y) {
if(is.null(y)) {
x
} else {
new.levels <- setdiff(levels(x), y)
factor(x, levels = c(y, new.levels), exclude = NULL)
}
}, data.used[, idx.norecode], forest$covariate.levels, SIMPLIFY = !is.data.frame(data.used[, idx.norecode]))
}
## Convert to data matrix
if (is.matrix(data.used) || inherits(data.used, "Matrix")) {
data.final <- data.used
} else {
data.final <- data.matrix(data.used)
}
## If gwa mode, add snp variable names
if (gwa.mode) {
variable.names <- c(variable.names, snp.names)
}
## Check missing values
if (any(is.na(data.final))) {
offending_columns <- colnames(data.final)[colSums(is.na(data.final)) > 0]
stop("Missing data in columns: ",
paste0(offending_columns, collapse = ", "), ".", call. = FALSE)
}
if (sum(!(forest$independent.variable.names %in% variable.names)) > 0) {
stop("Error: One or more independent variables not found in data.")
}
## Num threads
## Default 0 -> detect from system in C++.
if (is.null(num.threads)) {
num.threads = 0
} else if (!is.numeric(num.threads) || num.threads < 0) {
stop("Error: Invalid value for num.threads")
}
## Seed
if (is.null(seed)) {
seed <- runif(1 , 0, .Machine$integer.max)
}
if (forest$treetype == "Classification") {
treetype <- 1
} else if (forest$treetype == "Regression") {
treetype <- 3
} else if (forest$treetype == "Survival") {
treetype <- 5
} else if (forest$treetype == "Probability estimation") {
treetype <- 9
} else {
stop("Error: Unknown tree type.")
}
## Defaults for variables not needed
dependent.variable.name <- ""
mtry <- 0
importance <- 0
min.node.size <- 0
split.select.weights <- list(c(0, 0))
use.split.select.weights <- FALSE
always.split.variables <- c("0", "0")
use.always.split.variables <- FALSE
status.variable.name <- "status"
prediction.mode <- TRUE
write.forest <- FALSE
replace <- TRUE
probability <- FALSE
unordered.factor.variables <- c("0", "0")
use.unordered.factor.variables <- FALSE
save.memory <- FALSE
splitrule <- 1
alpha <- 0
minprop <- 0
case.weights <- c(0, 0)
use.case.weights <- FALSE
class.weights <- c(0, 0)
keep.inbag <- FALSE
sample.fraction <- 1
holdout <- FALSE
num.random.splits <- 1
order.snps <- FALSE
oob.error <- FALSE
max.depth <- 0
inbag <- list(c(0,0))
use.inbag <- FALSE
bootstrap.ts.num <- 1
by.end <- TRUE
block.size <- 10
period <- 1
## Use sparse matrix
if ("dgCMatrix" %in% class(data.final)) {
sparse.data <- data.final
data.final <- matrix(c(0, 0))
use.sparse.data <- TRUE
} else {
sparse.data <- Matrix(matrix(c(0, 0)))
use.sparse.data <- FALSE
}
## Call Ranger
result <- rangerCpp(treetype, dependent.variable.name, data.final, variable.names, mtry,
num.trees, verbose, seed, num.threads, write.forest, importance,
min.node.size, split.select.weights, use.split.select.weights,
always.split.variables, use.always.split.variables,
status.variable.name, prediction.mode, forest, snp.data, replace, probability,
unordered.factor.variables, use.unordered.factor.variables, save.memory, splitrule,
case.weights, use.case.weights, class.weights,
predict.all, keep.inbag, sample.fraction, alpha, minprop, holdout,
prediction.type, num.random.splits, sparse.data, use.sparse.data,
order.snps, oob.error, max.depth, inbag, use.inbag,
bootstrap.ts.num, by.end, block.size, period)
if (length(result) == 0) {
stop("User interrupt or internal error.")
}
## Prepare results
result$num.samples <- nrow(data.final)
result$treetype <- forest$treetype
if (predict.all) {
if (forest$treetype %in% c("Classification", "Regression")) {
if (is.list(result$predictions)) {
result$predictions <- do.call(rbind, result$predictions)
} else {
result$predictions <- array(result$predictions, dim = c(1, length(result$predictions)))
}
} else {
## TODO: Better solution for this?
result$predictions <- aperm(array(unlist(result$predictions),
dim = rev(c(length(result$predictions),
length(result$predictions[[1]]),
length(result$predictions[[1]][[1]])))))
}
} else {
if (is.list(result$predictions)) {
result$predictions <- do.call(rbind, result$predictions)
}
}
if (type == "response") {
if (forest$treetype == "Classification" && !is.null(forest$levels)) {
if (!predict.all) {
result$predictions <- integer.to.factor(result$predictions, forest$levels)
}
} else if (forest$treetype == "Regression") {
## Empty
} else if (forest$treetype == "Survival") {
result$unique.death.times <- forest$unique.death.times
result$chf <- result$predictions
result$predictions <- NULL
result$survival <- exp(-result$chf)
} else if (forest$treetype == "Probability estimation" && !is.null(forest$levels)) {
if (!predict.all) {
if (is.vector(result$predictions)) {
result$predictions <- matrix(result$predictions, nrow = 1)
}
## Set colnames and sort by levels
colnames(result$predictions) <- forest$levels[forest$class.values]
result$predictions <- result$predictions[, forest$levels[sort(forest$class.values)], drop = FALSE]
}
}
} else if (type == "terminalNodes") {
if (is.vector(result$predictions)) {
result$predictions <- matrix(result$predictions, nrow = 1)
}
}
## Compute Jackknife
if (type == "se") {
## Aggregated predictions
if (length(dim(result$predictions)) > 2) {
yhat <- apply(result$predictions, c(1, 2), mean)
} else {
yhat <- rowMeans(result$predictions)
}
## Get inbag counts, keep only observations that are OOB at least once
inbag.counts <- simplify2array(inbag.counts)
if (is.vector(inbag.counts)) {
inbag.counts <- t(as.matrix(inbag.counts))
}
inbag.counts <- inbag.counts[rowSums(inbag.counts == 0) > 0, , drop = FALSE]
n <- nrow(inbag.counts)
oob <- inbag.counts == 0
if (num.trees != object$num.trees) {
oob <- oob[, 1:num.trees]
}
if (all(!oob)) {
stop("Error: No OOB observations found, consider increasing num.trees or reducing sample.fraction.")
}
if (se.method == "jack") {
## Compute Jackknife
oob.count <- rowSums(oob)
jack.n <- sweep(tcrossprod(result$predictions, oob),
2, oob.count, "/", check.margin = FALSE)
if (is.vector(jack.n)) {
jack.n <- t(as.matrix(jack.n))
}
if (any(oob.count == 0)) {
n <- sum(oob.count > 0)
jack.n <- jack.n[, oob.count > 0]
}
jack <- (n - 1) / n * rowSums((jack.n - yhat)^2)
bias <- (exp(1) - 1) * n / result$num.trees^2 * rowSums((result$predictions - yhat)^2)
jab <- pmax(jack - bias, 0)
result$se <- sqrt(jab)
} else if (se.method == "infjack") {
if (forest$treetype == "Regression") {
infjack <- rInfJack(pred = result$predictions, inbag = inbag.counts, used.trees = 1:num.trees)
result$se <- sqrt(infjack$var.hat)
} else if (forest$treetype == "Probability estimation") {
infjack <- apply(result$predictions, 2, function(x) {
rInfJack(x, inbag.counts)$var.hat
})
result$se <- sqrt(infjack)
}
} else {
stop("Error: Unknown standard error method (se.method).")
}
## Response as predictions
result$predictions <- yhat
if (forest$treetype == "Probability estimation") {
## Set colnames and sort by levels
colnames(result$predictions) <- forest$levels[forest$class.values]
result$predictions <- result$predictions[, forest$levels, drop = FALSE]
}
}
class(result) <- "ranger.prediction"
return(result)
}
##' Prediction with new data and a saved forest from Ranger.
##'
##' For \code{type = 'response'} (the default), the predicted classes (classification), predicted numeric values (regression), predicted probabilities (probability estimation) or survival probabilities (survival) are returned.
##' For \code{type = 'se'}, the standard error of the predictions are returned (regression only). The jackknife-after-bootstrap or infinitesimal jackknife for bagging is used to estimate the standard errors based on out-of-bag predictions. See Wager et al. (2014) for details.
##' For \code{type = 'terminalNodes'}, the IDs of the terminal node in each tree for each observation in the given dataset are returned.
##' For \code{type = 'quantiles'}, the selected quantiles for each observation are estimated. See Meinshausen (2006) for details.
##'
##' If \code{type = 'se'} is selected, the method to estimate the variances can be chosen with \code{se.method}. Set \code{se.method = 'jack'} for jackknife-after-bootstrap and \code{se.method = 'infjack'} for the infinitesimal jackknife for bagging.
##'
##' For classification and \code{predict.all = TRUE}, a factor levels are returned as numerics.
##' To retrieve the corresponding factor levels, use \code{rf$forest$levels}, if \code{rf} is the ranger object.
##'
##' @title Ranger prediction
##' @param object Ranger \code{ranger} object.
##' @param data New test data of class \code{data.frame} or \code{gwaa.data} (GenABEL).
##' @param predict.all Return individual predictions for each tree instead of aggregated predictions for all trees. Return a matrix (sample x tree) for classification and regression, a 3d array for probability estimation (sample x class x tree) and survival (sample x time x tree).
##' @param num.trees Number of trees used for prediction. The first \code{num.trees} in the forest are used.
##' @param type Type of prediction. One of 'response', 'se', 'terminalNodes', 'quantiles' with default 'response'. See below for details.
##' @param se.method Method to compute standard errors. One of 'jack', 'infjack' with default 'infjack'. Only applicable if type = 'se'. See below for details.
##' @param quantiles Vector of quantiles for quantile prediction. Set \code{type = 'quantiles'} to use.
##' @param seed Random seed. Default is \code{NULL}, which generates the seed from \code{R}. Set to \code{0} to ignore the \code{R} seed. The seed is used in case of ties in classification mode.
##' @param num.threads Number of threads. Default is number of CPUs available.
##' @param verbose Verbose output on or off.
##' @param ... further arguments passed to or from other methods.
##' @return Object of class \code{ranger.prediction} with elements
##' \tabular{ll}{
##' \code{predictions} \tab Predicted classes/values (only for classification and regression) \cr
##' \code{unique.death.times} \tab Unique death times (only for survival). \cr
##' \code{chf} \tab Estimated cumulative hazard function for each sample (only for survival). \cr
##' \code{survival} \tab Estimated survival function for each sample (only for survival). \cr
##' \code{num.trees} \tab Number of trees. \cr
##' \code{num.independent.variables} \tab Number of independent variables. \cr
##' \code{treetype} \tab Type of forest/tree. Classification, regression or survival. \cr
##' \code{num.samples} \tab Number of samples.
##' }
##' @references
##' \itemize{
##' \item Wright, M. N. & Ziegler, A. (2017). ranger: A Fast Implementation of Random Forests for High Dimensional Data in C++ and R. J Stat Softw 77:1-17. \url{https://doi.org/10.18637/jss.v077.i01}.
##' \item Wager, S., Hastie T., & Efron, B. (2014). Confidence Intervals for Random Forests: The Jackknife and the Infinitesimal Jackknife. J Mach Learn Res 15:1625-1651. \url{http://jmlr.org/papers/v15/wager14a.html}.
##' \item Meinshausen (2006). Quantile Regression Forests. J Mach Learn Res 7:983-999. \url{http://www.jmlr.org/papers/v7/meinshausen06a.html}.
##' }
##' @seealso \code{\link{ranger}}
##' @author Marvin N. Wright
##' @export
predict.ranger <- function(object, data = NULL, predict.all = FALSE,
num.trees = object$num.trees,
type = "response", se.method = "infjack",
quantiles = c(0.1, 0.5, 0.9),
seed = NULL, num.threads = NULL,
verbose = TRUE, ...) {
forest <- object$forest
if (is.null(forest)) {
stop("Error: No saved forest in ranger object. Please set write.forest to TRUE when calling ranger.")
}
if (object$importance.mode %in% c("impurity_corrected", "impurity_unbiased")) {
warning("Forest was grown with 'impurity_corrected' variable importance. For prediction it is advised to grow another forest without this importance setting.")
}
if (type == "quantiles") {
## Quantile prediction
if (object$treetype != "Regression") {
stop("Error: Quantile prediction implemented only for regression outcomes.")
}
if (is.null(object$random.node.values)) {
stop("Error: Set quantreg=TRUE in ranger(...) for quantile prediction.")
}
if (is.null(data)) {
## OOB prediction
if (is.null(object$random.node.values.oob)) {
stop("Error: Set keep.inbag=TRUE in ranger(...) for out-of-bag quantile prediction or provide new data in predict(...).")
}
node.values <- object$random.node.values.oob
} else {
## New data prediction
terminal.nodes <- predict(object, data, type = "terminalNodes")$predictions + 1
node.values <- 0 * terminal.nodes
for (tree in 1:num.trees) {
node.values[, tree] <- object$random.node.values[terminal.nodes[, tree], tree]
}
}
## Prepare results
result <- list(num.samples = nrow(node.values),
treetype = object$treetype,
num.independent.variables = object$num.independent.variables,
num.trees = num.trees)
class(result) <- "ranger.prediction"
## Compute quantiles of distribution
result$predictions <- t(apply(node.values, 1, quantile, quantiles, na.rm=TRUE))
if (nrow(result$predictions) != result$num.samples) {
## Fix result for single quantile
result$predictions <- t(result$predictions)
}
colnames(result$predictions) <- paste("quantile=", quantiles)
result
} else {
## Non-quantile prediction
if (is.null(data)) {
stop("Error: Argument 'data' is required for non-quantile prediction.")
}
predict(forest, data, predict.all, num.trees, type, se.method, seed, num.threads, verbose, object$inbag.counts, ...)
}
}
|
# Have total emissions from PM2.5 decreased in the Baltimore City, Maryland
# (fips == "24510") from 1999 to 2008? Use the base plotting system to make
# a plot answering this question.
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
tidydata<-aggregate(Emissions ~ year + fips,data = NEI,FUN=sum)
wanteddata<-tidydata[tidydata$fips == "24510",]
png('plot2.png')
with(wanteddata, plot(year, Emissions, type="l", ylab=expression('PM'[2.5]*' emission from all sources (in tons) in Baltimore City'), xlab="year"))
dev.off()
|
/plot2.R
|
no_license
|
yencarnacion/ExData_Plotting2
|
R
| false
| false
| 566
|
r
|
# Have total emissions from PM2.5 decreased in the Baltimore City, Maryland
# (fips == "24510") from 1999 to 2008? Use the base plotting system to make
# a plot answering this question.
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
tidydata<-aggregate(Emissions ~ year + fips,data = NEI,FUN=sum)
wanteddata<-tidydata[tidydata$fips == "24510",]
png('plot2.png')
with(wanteddata, plot(year, Emissions, type="l", ylab=expression('PM'[2.5]*' emission from all sources (in tons) in Baltimore City'), xlab="year"))
dev.off()
|
#' Check DEVILS directory sturucture and print warnings for wrong file types
#'
#' This function perfoms the data reduction on a 2dfDR arc file.
#' You must provide an idx filename and tramline file name (potentially produced by aaorunTLM.R).
#' Optionally you can also perform a bias subtraction and dark correction if provided (this is recomended)
#'
#' @param workingDir Path to top level of DEVILS directory structure
#' @param logName log filename to write progress to
#' @param verbose tell me whats going on: 0=nothing, 1=somethings, 2=everything
#' @examples
#' bizCheck(workingDir='.', logName='tempLog.txt', verbose=1)
#' @export
bizCheck<-function(workingDir='.', stopError=T, logName=logName, verbose=verbose){
if (verbose>0){cat('*** Running bizCheck on DEVILS data structure', '\n')}
write('*** Running bizCheck on DEVILS data structure', file=logName, append=T)
origDir<-getwd()
setwd(workingDir)
runs<-list.files(path='data/rawdata', pattern='*')
for (i in 1:length(runs)){
if (verbose>1){cat(' - Checking raw files for run:',runs[i], '\n')}
write(paste(' - Checking raw files for run:',runs[i],sep=''), file=logName, append=T)
biases<-list.files(path=paste('data/biases/',runs[i], sep=''), pattern='*.fits')
if (verbose>1){cat(' - ',length(biases),' FITS files found in biases directory', '\n')}
write(paste(' - ',length(biases),' FITS files found in biases directory',sep=''), file=logName, append=T)
if (length(biases)>0) {
for (j in 1:length(biases)){
name<- paste('data/biases/',runs[i],'/', biases[j],sep='')
hdr<-read.fitshdr(name)
date<-get.fitskey('UTDATE', hdr)
year<-as.numeric(strsplit(date,':')[[1]][1])
month<-as.numeric(strsplit(date,':')[[1]][2])
day<-as.numeric(strsplit(date,':')[[1]][3])
folderYear<-as.numeric(strsplit(runs[i],'_')[[1]][2])
folderMonth<-as.numeric(strsplit(runs[i],'_')[[1]][3])
typeF<-get.fitskey('OBJECT', hdr)
if (verbose>1){cat(' - Bias File:',biases[j], ', date taken: ',date,', Type:',typeF, '\n')}
write(paste(' - Bias File:',biases[j], ', date taken: ',date,', Type:',typeF,sep=''), file=logName, append=T)
if (typeF!='Bias Frame'){
if (verbose>1){cat('*** WARNING FRAME ',name, 'DOES NOT APPEAR TO BE A BIAS ***', '\n')}
if (verbose>1 & stopError==T){cat('Exiting TAZ, please check data locations and re-run', '\n')}
write(paste('*** WARNING FRAME ',name, 'DOES NOT APPEAR TO BE A BIAS ***',sep=''), file=logName, append=T)
if (stopError==T) {
write(paste('Exiting TAZ, please check data locations and re-run',sep=''), file=logName, append=T)
return(NULL)
}
}
if (folderYear!=year){
if (month!=1 | folderMonth!=12){
if (verbose>1){cat('*** WARNING FRAME ',name, ' WAS NOT TAKEN IN THE SAME YEAR AS ITS DIRECTORY ***', '\n')}
if (verbose>1 & stopError==T){cat('Exiting TAZ, please check data locations and re-run', '\n')}
write(paste('*** WARNING FRAME ',name, ' WAS NOT TAKEN IN THE SAME YEAR AS ITS DIRECTORY ***',sep=''), file=logName, append=T)
if (stopError==T) {
write(paste('Exiting TAZ, please check data locations and re-run',sep=''), file=logName, append=T)
return(NULL)
}
}
}
if (folderMonth!=month & folderMonth!=(month-1)){
if (verbose>1){cat('*** WARNING FRAME ',name, ' WAS NOT TAKEN IN THE SAME OR FOLLOWING MONTH AS ITS DIRECTORY ***', '\n')}
if (verbose>1 & stopError==T){cat('Exiting TAZ, please check data locations and re-run', '\n')}
write(paste('*** WARNING FRAME ',name, ' WAS NOT TAKEN IN THE SAME OR PRECEEDING MONTH AS ITS DIRECTORY ***',sep=''), file=logName, append=T)
if (stopError==T) {
write(paste('Exiting TAZ, please check data locations and re-run',sep=''), file=logName, append=T)
return(NULL)
}
}
}
}
darks<-list.files(path=paste('data/darks/',runs[i], sep=''), pattern='*.fits')
if (length(darks)>0) {
for (j in 1:length(darks)){
name<- paste('data/darks/',runs[i],'/', darks[j],sep='')
hdr<-read.fitshdr(name)
date<-get.fitskey('UTDATE', hdr)
year<-as.numeric(strsplit(date,':')[[1]][1])
month<-as.numeric(strsplit(date,':')[[1]][2])
day<-as.numeric(strsplit(date,':')[[1]][3])
typeF<-get.fitskey('OBJECT', hdr)
folderYear<-as.numeric(strsplit(runs[i],'_')[[1]][2])
folderMonth<-as.numeric(strsplit(runs[i],'_')[[1]][3])
if (verbose>1){cat(' - Dark File:',biases[j], ', date taken: ',date,', Type:',typeF, '\n')}
write(paste(' - Dark File:',biases[j], ', date taken: ',date,', Type:',typeF,sep=''), file=logName, append=T)
if (typeF!='Dark Frame'){
if (verbose>1){cat('*** WARNING FRAME ',name, 'DOES NOT APPEAR TO BE A DARK ***', '\n')}
if (verbose>1 & stopError==T){cat('Exiting TAZ, please check data locations and re-run', '\n')}
write(paste('*** WARNING FRAME ',name, 'DOES NOT APPEAR TO BE A DARK ***',sep=''), file=logName, append=T)
if (stopError==T) {
write(paste('Exiting TAZ, please check data locations and re-run',sep=''), file=logName, append=T)
return(NULL)
}
}
if (folderYear!=year){
if (month!=1 | folderMonth!=12){
if (verbose>1){cat('*** WARNING FRAME ',name, ' WAS NOT TAKEN IN THE SAME YEAR AS ITS DIRECTORY ***', '\n')}
if (verbose>1 & stopError==T){cat('Exiting TAZ, please check data locations and re-run', '\n')}
write(paste('*** WARNING FRAME ',name, ' WAS NOT TAKEN IN THE SAME YEAR AS ITS DIRECTORY ***',sep=''), file=logName, append=T)
if (stopError==T) {
write(paste('Exiting TAZ, please check data locations and re-run',sep=''), file=logName, append=T)
return(NULL)
}
}
}
if (folderMonth!=month & folderMonth!=(month-1)){
if (verbose>1){cat('*** WARNING FRAME ',name, ' WAS NOT TAKEN IN THE SAME OR FOLLOWING MONTH AS ITS DIRECTORY ***', '\n')}
if (verbose>1 & stopError==T){cat('Exiting TAZ, please check data locations and re-run', '\n')}
write(paste('*** WARNING FRAME ',name, ' WAS NOT TAKEN IN THE SAME OR PRECEEDING MONTH AS ITS DIRECTORY ***',sep=''), file=logName, append=T)
if (stopError==T) {
write(paste('Exiting TAZ, please check data locations and re-run',sep=''), file=logName, append=T)
return(NULL)
}
}
}
}
nights<-list.files(path=paste('data/rawdata/',runs[i], sep=''), pattern='*')
if (length(nights)>0) {
for (j in 1:length(nights)){
if (verbose>1){cat(' - Checking raw files for night:',nights[j], '\n')}
write(paste(' - Checking raw files for night:',nights[j],sep=''), file=logName, append=T)
Targets<-list.files(path=paste('data/rawdata/',runs[i],'/',nights[j], sep=''), pattern='*.fits')
if (length(Targets)>0) {
for (k in 1:length(Targets)){
name<- paste('data/rawdata/',runs[i],'/', nights[j],'/',Targets[k], sep='')
hdr<-read.fitshdr(name)
date<-get.fitskey('UTDATE', hdr)
year<-as.numeric(strsplit(date,':')[[1]][1])
month<-as.numeric(strsplit(date,':')[[1]][2])
day<-as.numeric(strsplit(date,':')[[1]][3])
typeF<-get.fitskey('RUNCMD', hdr)
folderYear<-as.numeric(strsplit(runs[i],'_')[[1]][2])
folderMonth<-as.numeric(strsplit(runs[i],'_')[[1]][3])
folderDay<-as.numeric(strsplit(nights[j],'_')[[1]][3])
if (verbose>1){cat(' - Target File:',Targets[k], ', date taken: ',date,', Type:',typeF, '\n')}
write(paste(' - Target File:',Targets[k], ', date taken: ',date,', Type:',typeF,sep=''), file=logName, append=T)
if (typeF!='FLAT' & typeF!='ARC' & typeF!='RUN'){
if (verbose>1){cat('*** WARNING FRAME ',name, 'DOES NOT APPEAR TO BE AN ARC, FLAT OR RUN (TARGET) FILE ***', '\n')}
if (verbose>1 & stopError==T){cat('Exiting TAZ, please check data locations and re-run', '\n')}
write(paste('*** WARNING FRAME ',name, 'DOES NOT APPEAR TO BE AN ARC, FLAT OR RUN (TARGET) FILE ***',sep=''), file=logName, append=T)
if (stopError==T) {
write(paste('Exiting TAZ, please check data locations and re-run',sep=''), file=logName, append=T)
return(NULL)
}
}
dateState<-'BAD'
if (folderYear==year & folderMonth==month & folderDay==day){dateState<-'GOOD'}
if (dateState=='BAD'){
if (verbose>1){cat('*** WARNING FRAME ',name, ' WAS NOT TAKEN ON THE SAME DAY AS ITS DIRECTORY ***', '\n')}
if (verbose>1 & stopError==T){cat('Exiting TAZ, please check data locations and re-run', '\n')}
write(paste('*** WARNING FRAME ',name, ' WAS NOT TAKEN IN THE SAME DAY AS ITS DIRECTORY ***',sep=''), file=logName, append=T)
if (stopError==T) {
write(paste('Exiting TAZ, please check data locations and re-run',sep=''), file=logName, append=T)
return(NULL)
}
}
}
}
}
}
}
a<-1
return(a)
}
|
/DEVILSTAZ/R/bizCheck.R
|
no_license
|
ICRAR/DEVILS-TAZ
|
R
| false
| false
| 9,593
|
r
|
#' Check DEVILS directory sturucture and print warnings for wrong file types
#'
#' This function perfoms the data reduction on a 2dfDR arc file.
#' You must provide an idx filename and tramline file name (potentially produced by aaorunTLM.R).
#' Optionally you can also perform a bias subtraction and dark correction if provided (this is recomended)
#'
#' @param workingDir Path to top level of DEVILS directory structure
#' @param logName log filename to write progress to
#' @param verbose tell me whats going on: 0=nothing, 1=somethings, 2=everything
#' @examples
#' bizCheck(workingDir='.', logName='tempLog.txt', verbose=1)
#' @export
bizCheck<-function(workingDir='.', stopError=T, logName=logName, verbose=verbose){
if (verbose>0){cat('*** Running bizCheck on DEVILS data structure', '\n')}
write('*** Running bizCheck on DEVILS data structure', file=logName, append=T)
origDir<-getwd()
setwd(workingDir)
runs<-list.files(path='data/rawdata', pattern='*')
for (i in 1:length(runs)){
if (verbose>1){cat(' - Checking raw files for run:',runs[i], '\n')}
write(paste(' - Checking raw files for run:',runs[i],sep=''), file=logName, append=T)
biases<-list.files(path=paste('data/biases/',runs[i], sep=''), pattern='*.fits')
if (verbose>1){cat(' - ',length(biases),' FITS files found in biases directory', '\n')}
write(paste(' - ',length(biases),' FITS files found in biases directory',sep=''), file=logName, append=T)
if (length(biases)>0) {
for (j in 1:length(biases)){
name<- paste('data/biases/',runs[i],'/', biases[j],sep='')
hdr<-read.fitshdr(name)
date<-get.fitskey('UTDATE', hdr)
year<-as.numeric(strsplit(date,':')[[1]][1])
month<-as.numeric(strsplit(date,':')[[1]][2])
day<-as.numeric(strsplit(date,':')[[1]][3])
folderYear<-as.numeric(strsplit(runs[i],'_')[[1]][2])
folderMonth<-as.numeric(strsplit(runs[i],'_')[[1]][3])
typeF<-get.fitskey('OBJECT', hdr)
if (verbose>1){cat(' - Bias File:',biases[j], ', date taken: ',date,', Type:',typeF, '\n')}
write(paste(' - Bias File:',biases[j], ', date taken: ',date,', Type:',typeF,sep=''), file=logName, append=T)
if (typeF!='Bias Frame'){
if (verbose>1){cat('*** WARNING FRAME ',name, 'DOES NOT APPEAR TO BE A BIAS ***', '\n')}
if (verbose>1 & stopError==T){cat('Exiting TAZ, please check data locations and re-run', '\n')}
write(paste('*** WARNING FRAME ',name, 'DOES NOT APPEAR TO BE A BIAS ***',sep=''), file=logName, append=T)
if (stopError==T) {
write(paste('Exiting TAZ, please check data locations and re-run',sep=''), file=logName, append=T)
return(NULL)
}
}
if (folderYear!=year){
if (month!=1 | folderMonth!=12){
if (verbose>1){cat('*** WARNING FRAME ',name, ' WAS NOT TAKEN IN THE SAME YEAR AS ITS DIRECTORY ***', '\n')}
if (verbose>1 & stopError==T){cat('Exiting TAZ, please check data locations and re-run', '\n')}
write(paste('*** WARNING FRAME ',name, ' WAS NOT TAKEN IN THE SAME YEAR AS ITS DIRECTORY ***',sep=''), file=logName, append=T)
if (stopError==T) {
write(paste('Exiting TAZ, please check data locations and re-run',sep=''), file=logName, append=T)
return(NULL)
}
}
}
if (folderMonth!=month & folderMonth!=(month-1)){
if (verbose>1){cat('*** WARNING FRAME ',name, ' WAS NOT TAKEN IN THE SAME OR FOLLOWING MONTH AS ITS DIRECTORY ***', '\n')}
if (verbose>1 & stopError==T){cat('Exiting TAZ, please check data locations and re-run', '\n')}
write(paste('*** WARNING FRAME ',name, ' WAS NOT TAKEN IN THE SAME OR PRECEEDING MONTH AS ITS DIRECTORY ***',sep=''), file=logName, append=T)
if (stopError==T) {
write(paste('Exiting TAZ, please check data locations and re-run',sep=''), file=logName, append=T)
return(NULL)
}
}
}
}
darks<-list.files(path=paste('data/darks/',runs[i], sep=''), pattern='*.fits')
if (length(darks)>0) {
for (j in 1:length(darks)){
name<- paste('data/darks/',runs[i],'/', darks[j],sep='')
hdr<-read.fitshdr(name)
date<-get.fitskey('UTDATE', hdr)
year<-as.numeric(strsplit(date,':')[[1]][1])
month<-as.numeric(strsplit(date,':')[[1]][2])
day<-as.numeric(strsplit(date,':')[[1]][3])
typeF<-get.fitskey('OBJECT', hdr)
folderYear<-as.numeric(strsplit(runs[i],'_')[[1]][2])
folderMonth<-as.numeric(strsplit(runs[i],'_')[[1]][3])
if (verbose>1){cat(' - Dark File:',biases[j], ', date taken: ',date,', Type:',typeF, '\n')}
write(paste(' - Dark File:',biases[j], ', date taken: ',date,', Type:',typeF,sep=''), file=logName, append=T)
if (typeF!='Dark Frame'){
if (verbose>1){cat('*** WARNING FRAME ',name, 'DOES NOT APPEAR TO BE A DARK ***', '\n')}
if (verbose>1 & stopError==T){cat('Exiting TAZ, please check data locations and re-run', '\n')}
write(paste('*** WARNING FRAME ',name, 'DOES NOT APPEAR TO BE A DARK ***',sep=''), file=logName, append=T)
if (stopError==T) {
write(paste('Exiting TAZ, please check data locations and re-run',sep=''), file=logName, append=T)
return(NULL)
}
}
if (folderYear!=year){
if (month!=1 | folderMonth!=12){
if (verbose>1){cat('*** WARNING FRAME ',name, ' WAS NOT TAKEN IN THE SAME YEAR AS ITS DIRECTORY ***', '\n')}
if (verbose>1 & stopError==T){cat('Exiting TAZ, please check data locations and re-run', '\n')}
write(paste('*** WARNING FRAME ',name, ' WAS NOT TAKEN IN THE SAME YEAR AS ITS DIRECTORY ***',sep=''), file=logName, append=T)
if (stopError==T) {
write(paste('Exiting TAZ, please check data locations and re-run',sep=''), file=logName, append=T)
return(NULL)
}
}
}
if (folderMonth!=month & folderMonth!=(month-1)){
if (verbose>1){cat('*** WARNING FRAME ',name, ' WAS NOT TAKEN IN THE SAME OR FOLLOWING MONTH AS ITS DIRECTORY ***', '\n')}
if (verbose>1 & stopError==T){cat('Exiting TAZ, please check data locations and re-run', '\n')}
write(paste('*** WARNING FRAME ',name, ' WAS NOT TAKEN IN THE SAME OR PRECEEDING MONTH AS ITS DIRECTORY ***',sep=''), file=logName, append=T)
if (stopError==T) {
write(paste('Exiting TAZ, please check data locations and re-run',sep=''), file=logName, append=T)
return(NULL)
}
}
}
}
nights<-list.files(path=paste('data/rawdata/',runs[i], sep=''), pattern='*')
if (length(nights)>0) {
for (j in 1:length(nights)){
if (verbose>1){cat(' - Checking raw files for night:',nights[j], '\n')}
write(paste(' - Checking raw files for night:',nights[j],sep=''), file=logName, append=T)
Targets<-list.files(path=paste('data/rawdata/',runs[i],'/',nights[j], sep=''), pattern='*.fits')
if (length(Targets)>0) {
for (k in 1:length(Targets)){
name<- paste('data/rawdata/',runs[i],'/', nights[j],'/',Targets[k], sep='')
hdr<-read.fitshdr(name)
date<-get.fitskey('UTDATE', hdr)
year<-as.numeric(strsplit(date,':')[[1]][1])
month<-as.numeric(strsplit(date,':')[[1]][2])
day<-as.numeric(strsplit(date,':')[[1]][3])
typeF<-get.fitskey('RUNCMD', hdr)
folderYear<-as.numeric(strsplit(runs[i],'_')[[1]][2])
folderMonth<-as.numeric(strsplit(runs[i],'_')[[1]][3])
folderDay<-as.numeric(strsplit(nights[j],'_')[[1]][3])
if (verbose>1){cat(' - Target File:',Targets[k], ', date taken: ',date,', Type:',typeF, '\n')}
write(paste(' - Target File:',Targets[k], ', date taken: ',date,', Type:',typeF,sep=''), file=logName, append=T)
if (typeF!='FLAT' & typeF!='ARC' & typeF!='RUN'){
if (verbose>1){cat('*** WARNING FRAME ',name, 'DOES NOT APPEAR TO BE AN ARC, FLAT OR RUN (TARGET) FILE ***', '\n')}
if (verbose>1 & stopError==T){cat('Exiting TAZ, please check data locations and re-run', '\n')}
write(paste('*** WARNING FRAME ',name, 'DOES NOT APPEAR TO BE AN ARC, FLAT OR RUN (TARGET) FILE ***',sep=''), file=logName, append=T)
if (stopError==T) {
write(paste('Exiting TAZ, please check data locations and re-run',sep=''), file=logName, append=T)
return(NULL)
}
}
dateState<-'BAD'
if (folderYear==year & folderMonth==month & folderDay==day){dateState<-'GOOD'}
if (dateState=='BAD'){
if (verbose>1){cat('*** WARNING FRAME ',name, ' WAS NOT TAKEN ON THE SAME DAY AS ITS DIRECTORY ***', '\n')}
if (verbose>1 & stopError==T){cat('Exiting TAZ, please check data locations and re-run', '\n')}
write(paste('*** WARNING FRAME ',name, ' WAS NOT TAKEN IN THE SAME DAY AS ITS DIRECTORY ***',sep=''), file=logName, append=T)
if (stopError==T) {
write(paste('Exiting TAZ, please check data locations and re-run',sep=''), file=logName, append=T)
return(NULL)
}
}
}
}
}
}
}
a<-1
return(a)
}
|
###############
###############
###############
# prevalence
pprev <- function(counts){
nsamples <- ncol(counts)
prev <- (rowSums(counts > 0)/nsamples)*100
prev.df <- data.frame(Taxon=rownames(counts), Prevalence=prev)
return(prev.df)
}
###############
###############
###############
# plotting
plotPrev <- function(prev.df, as.is=TRUE){
if (as.is==TRUE){
prev.df <- prev.df[order(prev.df$Prevalence, decreasing=TRUE),]
p1 <- ggplot(prev.df, aes(x=Taxon, y=Prevalence))
p2 <- p1 + geom_bar(stat="identity")
p3 <- p2 + theme_bw()
p4 <- p3 + theme(axis.text.x=element_text(angle=90))
}else{
p1 <- ggplot(prev.df, aes(x=Prevalence))
p2 <- p1 + geom_histogram()
p3 <- p2 + theme_bw()
p4 <- p3
}
return(p4)
}
|
/R/prevalence.R
|
permissive
|
nickilott/PSC_UC_Host_Microbiome
|
R
| false
| false
| 818
|
r
|
###############
###############
###############
# prevalence
pprev <- function(counts){
nsamples <- ncol(counts)
prev <- (rowSums(counts > 0)/nsamples)*100
prev.df <- data.frame(Taxon=rownames(counts), Prevalence=prev)
return(prev.df)
}
###############
###############
###############
# plotting
plotPrev <- function(prev.df, as.is=TRUE){
if (as.is==TRUE){
prev.df <- prev.df[order(prev.df$Prevalence, decreasing=TRUE),]
p1 <- ggplot(prev.df, aes(x=Taxon, y=Prevalence))
p2 <- p1 + geom_bar(stat="identity")
p3 <- p2 + theme_bw()
p4 <- p3 + theme(axis.text.x=element_text(angle=90))
}else{
p1 <- ggplot(prev.df, aes(x=Prevalence))
p2 <- p1 + geom_histogram()
p3 <- p2 + theme_bw()
p4 <- p3
}
return(p4)
}
|
numPerPatch48112 <- c(2479,2521)
|
/NatureEE-data-archive/Run203021/JAFSdata/JAFSnumPerPatch48112.R
|
no_license
|
flaxmans/NatureEE2017
|
R
| false
| false
| 33
|
r
|
numPerPatch48112 <- c(2479,2521)
|
library(dplyr)
library(ggplot2)
library(gridExtra)
library(lubridate)
library(tidyverse) # data manipulation
library(cluster) # clustering algorithms
library(factoextra) # clustering algorithms & visualization
orders = read.csv("C:/Users/OJElGhiran/Desktop/Candidate Test/Brazil/olist_orders_dataset.csv",header = TRUE)
# "order_id" unique identifier of the order.
# "customer_id" key to the customer dataset. Each order has a unique customer_id.
# "order_status" Reference to the order status (delivered, shipped, etc).
# "order_purchase_timestamp" Shows the purchase timestamp.
# "order_approved_at" Shows the payment approval timestamp.
# "order_delivered_carrier_date" Shows the order posting timestamp. When it was handled to the logistic partner.
# "order_delivered_customer_date" Shows the actual order delivery date to the customer.
# "order_estimated_delivery_date" Shows the estimated delivery date that was informed to customer at the purchase moment.
customers = read.csv("C:/Users/OJElGhiran/Desktop/Candidate Test/Brazil/olist_customers_dataset.csv",header = TRUE)
# "customer_id" key to the orders dataset. Each order has a unique customer_id.
# "customer_unique_id" unique identifier of a customer.
# "customer_zip_code_prefix" first five digits of customer zip code
# "customer_city" customer city name
# "customer_state" customer state
payments = read.csv("C:/Users/OJElGhiran/Desktop/Candidate Test/Brazil/olist_order_payments_dataset.csv",header = TRUE)
# "order_id" unique identifier of an order.
# "payment_sequential" a customer may pay an order with more than one payment method. If he does so, a sequence will be created to accommodate all payments.
# "payment_type" method of payment chosen by the customer.
# "payment_installments" number of installments chosen by the customer.
# "payment_value" transaction value.
items = read.csv("C:/Users/OJElGhiran/Desktop/Candidate Test/Brazil/olist_order_items_dataset.csv",header = TRUE)
# "order_id" order unique identifier
# "order_item_id" sequential number identifying number of items included in the same order.
# "product_id" product unique identifier
# "seller_id" seller unique identifier
# "shipping_limit_date" Shows the seller shipping limit date for handling the order over to the logistic partner.
# "price" item price
# "freight_value" item freight value item (if an order has more than one item the freight value is splitted between items)
products = read.csv("C:/Users/OJElGhiran/Desktop/Candidate Test/Brazil/olist_products_dataset.csv",header = TRUE)
# "product_id" unique product identifier
# "product_category_name" root category of product, in Portuguese.
# "product_name_lenght" number of characters extracted from the product name.
# "product_description_lenght" number of characters extracted from the product description.
# "product_photos_qty" number of product published photos
# "product_weight_g" product weight measured in grams.
# "product_length_cm" product length measured in centimeters.
# "product_height_cm" product height measured in centimeters.
# "product_width_cm" product width measured in centimeters.
#Creating dataset of Recency, Frequency, & Payment via joins
#obtaining frequency of purchases by customer_unique_id
freq = customers %>% inner_join(orders, by = "customer_id") %>%
group_by(customer_unique_id) %>%
summarise(
freq = n()
)
#obtaining payment sums of purchases by customer_id
payments2 = orders %>% inner_join(payments, by = "order_id") %>%
group_by(customer_id) %>%
summarise(
payments = sum(payment_value)
)
#adding a recency of purchase from 3/16/2021
orders$recency = difftime(date(orders$order_purchase_timestamp),dmy("4/9/2016"),units = "days")
#obtaining minimum recency of purchase by customer_unique_id
rec = customers %>% inner_join(orders, by = "customer_id") %>%
group_by(customer_unique_id) %>%
summarise(
recency = min(recency)
)
# save(rec, file = "C:/Users/OJElGhiran/Desktop/Candidate Test/Brazil/rec.RData")
#obtaining total payment value by customer_unique_id
pymt = customers %>% inner_join(payments2, by = "customer_id") %>%
group_by(customer_unique_id) %>%
summarise(
pymnt = sum(payments)
)
#creating RFM data set from freq, rec, and pymt by customer_unique_id
RFM1 = freq %>% inner_join(rec) %>% inner_join(pymt) %>%
group_by(customer_unique_id) %>%
summarise(
recency = sum(recency)
,frequency = sum(freq)
,pymt = sum(pymnt)
)
RFM1 = RFM1[-which(RFM1$pymt == 0), ]
RFM1 = as.data.frame(RFM1)
#EDA
#Recency visual
a1 = ggplot(RFM1, aes( y = recency))+geom_boxplot()+ggtitle("Recency Unscaled")
a2 = ggplot(RFM1, aes( x = recency))+geom_histogram(binwidth = 1)+
geom_vline(xintercept = c(min(RFM1$recency),quantile(RFM1$recency,0.25),median(RFM1$recency),quantile(RFM1$recency,0.75),max(RFM1$recency)))+
ggtitle("Recency Unscaled")
grid.arrange(a1,a2,nrow = 1)
summary(RFM1$recency)
#Frequency visual
c1 = ggplot(RFM1, aes( x = frequency))+geom_histogram(binwidth = 1)+ggtitle("Frequency Unscaled")
c2 = ggplot(RFM1, aes( y = frequency))+geom_boxplot()+ggtitle("Frequency Unscaled")
grid.arrange(c1,c2,nrow = 1)
summary(RFM1$frequency)
#Payment visual
b1 = ggplot(RFM1, aes( y = pymt))+geom_boxplot()+ggtitle("Pymt (Monetary) Unscaled")
b2 = ggplot(RFM1, aes( x = pymt))+geom_histogram(binwidth = 1)+
ggtitle("Pymt (Monetary) Unscaled")
grid.arrange(b1,b2,nrow = 1)
#Min-max normalization
RFM1$recency[which(RFM1$recency == 0)] = 0.5
RFM1$recency_inv = 1/RFM1$recency
RFM1$recency_inv_scaled = scale(RFM1$recency_inv)
RFM1$frequency_scaled = scale(RFM1$frequency)
RFM1$pymt_scaled = scale(RFM1$pymt)
RFM1$recency_inv_minmax = (RFM1$recency_inv - min(RFM1$recency_inv))/(max(RFM1$recency_inv)-min(RFM1$recency_inv))
RFM1$frequency_minmax = (RFM1$frequency-min(RFM1$frequency))/(max(RFM1$frequency)-min(RFM1$frequency))
RFM1$pymt_minmax = (RFM1$pymt-min(RFM1$pymt))/(max(RFM1$pymt)-min(RFM1$pymt))
RFM1$recency_minmax = (RFM1$recency - min(RFM1$recency))/(max(RFM1$recency)-min(RFM1$recency))
#Kmeans Clustering
#recency_minmax
ggplot(RFM1, aes(x =recency, y = 0 ))+geom_point()
set.seed(1)
k3 <- kmeans(RFM1[,c(13)], centers = 3, nstart = 25)
p3 <- fviz_cluster(k3, geom = "point", data = RFM1[,c(13)]) + ggtitle("k = 3")
finalz <- kmeans(RFM1[,c(13)], 3, nstart = 25)
print(finalz)
fviz_cluster(finalz, data = RFM1[,c(13)])
RFM1$recency_minmax_cluster = finalz$cluster
#Extract the clusters and add to our initial data to do some descriptive statistics at the cluster level:
t1 = RFM1 %>%
group_by(recency_minmax_cluster) %>%
summarise(
min = min(recency_minmax)
,med = median(recency_minmax)
,mean = mean(recency_minmax)
,max = max(recency_minmax)
)
RFM1$recency_minmax_cluster = 0
RFM1$recency_minmax_cluster = ifelse(RFM1$recency_minmax >= max(t1$min),1, ifelse(RFM1$recency_minmax <= min(t1$max),3,2))
RFM1 %>%
group_by(recency_minmax_cluster) %>%
summarise(
min = min(recency)
,med = median(recency)
,mean = mean(recency)
,max = max(recency)
)
ggplot(RFM1, aes(x = recency_minmax, y = 0, color = as.factor(recency_minmax_cluster)))+geom_point()+ggtitle("Recency_minmax Clustered") + theme(legend.position = "none")
ggplot(RFM1, aes(x = recency, y = 0, color = recency_minmax_cluster))+geom_point()
#frequency_minmax
set.seed(1)
summary(RFM1$frequency_minmax)
k3 <- kmeans(RFM1[,c(11)], centers = 3, nstart = 25)
p3 <- fviz_cluster(k3, geom = "point", data = RFM1[,c(11)]) + ggtitle("k = 3")
finalz <- kmeans(RFM1[,c(11)], 3, nstart = 25)
print(finalz)
fviz_cluster(finalz, data = RFM1[,c(11)])
RFM1$frequency_minmax_cluster = finalz$cluster
#Extract the clusters and add to our initial data to do some descriptive statistics at the cluster level:
t1 = RFM1 %>%
group_by(frequency_minmax_cluster) %>%
summarise(
min = min(frequency_minmax)
,med = median(frequency_minmax)
,mean = mean(frequency_minmax)
,max = max(frequency_minmax)
)
RFM1$frequency_minmax_cluster = 0
RFM1$frequency_minmax_cluster = ifelse(RFM1$frequency_minmax >= max(t1$min), 3, ifelse(RFM1$frequency_minmax <= min(t1$max),1,2))
RFM1 %>%
group_by(frequency_minmax_cluster) %>%
summarise(
min = min(frequency)
,med = median(frequency)
,mean = mean(frequency)
,max = max(frequency)
)
ggplot(RFM1, aes(x = frequency_minmax, y = 0, color = as.factor(frequency_minmax_cluster)))+geom_point()+ggtitle("Frequency_minmax Clustered")
ggplot(RFM1, aes(x = frequency, y = 0, color = as.factor(frequency_minmax_cluster)))+geom_point()
#pymt_minmax
set.seed(1)
summary(RFM1$pymt_minmax)
k4 <- kmeans(RFM1[,c(12)], centers = 4, nstart = 25)
p4 <- fviz_cluster(k4, geom = "point", data = RFM1[,c(12)]) + ggtitle("k = 4")
finalz <- kmeans(RFM1[,c(12)], 4, nstart = 25)
print(finalz)
fviz_cluster(finalz, data = RFM1[,c(12)])
RFM1$pymt_minmax_cluster = as.factor(finalz$cluster)
#Extract the clusters and add to our initial data to do some descriptive statistics at the cluster level:
t1 = RFM1 %>%
group_by(pymt_minmax_cluster) %>%
summarise(
min = min(pymt_minmax)
,med = median(pymt_minmax)
,mean = mean(pymt_minmax)
,max = max(pymt_minmax)
)
RFM1$pymt_minmax_cluster = 0
RFM1$pymt_minmax_cluster = ifelse(RFM1$pymt_minmax >= max(t1$min), 4, ifelse(RFM1$pymt_minmax <= min(t1$max), 1,ifelse(RFM1$pymt_minmax >= t1[[order(t1$min)[c(2)],2]] & RFM1$pymt_minmax<=t1[[order(t1$max)[c(2)],5]], 2,3)))
RFM1 %>%
group_by(pymt_minmax_cluster) %>%
summarise(
min = min(pymt)
,med = median(pymt)
,mean = mean(pymt)
,max = max(pymt)
)
ggplot(RFM1, aes(x = pymt_minmax, y = 0, color = as.factor(pymt_minmax_cluster)))+geom_point()+ggtitle("Pymt_minmax Clustered")
ggplot(RFM1, aes(x = pymt, y = 0, color = pymt_minmax_cluster))+geom_point()
#Create RFM_Score
RFM1$RFM_score = RFM1$recency_minmax_cluster + RFM1$frequency_minmax_cluster + RFM1$pymt_minmax_cluster
#Summarize customer base by RFM_Score
RFM1 %>% group_by(RFM_score) %>%
summarise(
count = n(),
min_rec = min(recency),
mean_rec = mean(recency),
med_rec = median(recency),
max_rec = max(recency),
min_freq = min(frequency),
mean_freq = mean(frequency),
med_freq = median(frequency),
max_freq = max(frequency),
min_pymt = min(pymt),
mean_pymt = mean(pymt),
med_pymt = median(pymt),
max_pymt = max(pymt)
)
#Creating segments from RFM_scores
RFM1$Segment = ifelse(RFM1$RFM_score <= 4, 1, ifelse(RFM1$RFM_score <= 6, 2, ifelse(RFM1$RFM_score == 7, 3, 4)))
#Summarize customer base by Segment
RFM1 %>% group_by(Segment) %>%
summarise(
count = n(),
min_rec = min(recency),
mean_rec = mean(recency),
med_rec = median(recency),
max_rec = max(recency),
min_freq = min(frequency),
mean_freq = mean(frequency),
med_freq = median(frequency),
max_freq = max(frequency),
min_pymt = min(pymt),
mean_pymt = mean(pymt),
med_pymt = median(pymt),
max_pymt = max(pymt)
)
#Customer proportions by RFM_Score & visualize
table(RFM1$RFM_score)
round((table(RFM1$RFM_score)/nrow(RFM1))*100,1)
ggplot(RFM1, aes(x= RFM_score)) +
geom_bar(aes(y = ..prop.., fill = factor(..x..)), stat="count") +
# geom_text(aes( label = scales::percent(..prop..),
# y= ..prop.. ), stat= "count", vjust = -.5) +
geom_text(aes(label = scales::percent(round((..count..)/sum(..count..),3)),
y= ((..count..)/sum(..count..))), stat="count",
vjust = -.25) +
labs(y = "Percent", fill="RFM_score") +
scale_y_continuous(labels = scales::percent) +
scale_x_continuous(labels = as.character(RFM1$RFM_score),breaks = RFM1$RFM_score) +
ggtitle("RFM Score")
#Customer proportions by Segment & visualize
ggplot(RFM1, aes(x= Segment)) +
geom_bar(aes(y = ..prop.., fill = factor(..x..)), stat="count") +
geom_text(aes( label = scales::percent(..prop..),
y= ..prop.. ), stat= "count", vjust = -.5) +
labs(y = "Percent", fill="RFM_cat") +
scale_y_continuous(labels = scales::percent) +
scale_fill_discrete(name = "Segment", labels = c("At Risk", "Needs Attention", "Promising","Champions")) +
ggtitle("Targeted Marketing Segments")
|
/Customer Segmentation RFM Clustering.R
|
no_license
|
oelghira/Customer-Segmentation-from-RFM-Clustering
|
R
| false
| false
| 12,779
|
r
|
library(dplyr)
library(ggplot2)
library(gridExtra)
library(lubridate)
library(tidyverse) # data manipulation
library(cluster) # clustering algorithms
library(factoextra) # clustering algorithms & visualization
orders = read.csv("C:/Users/OJElGhiran/Desktop/Candidate Test/Brazil/olist_orders_dataset.csv",header = TRUE)
# "order_id" unique identifier of the order.
# "customer_id" key to the customer dataset. Each order has a unique customer_id.
# "order_status" Reference to the order status (delivered, shipped, etc).
# "order_purchase_timestamp" Shows the purchase timestamp.
# "order_approved_at" Shows the payment approval timestamp.
# "order_delivered_carrier_date" Shows the order posting timestamp. When it was handled to the logistic partner.
# "order_delivered_customer_date" Shows the actual order delivery date to the customer.
# "order_estimated_delivery_date" Shows the estimated delivery date that was informed to customer at the purchase moment.
customers = read.csv("C:/Users/OJElGhiran/Desktop/Candidate Test/Brazil/olist_customers_dataset.csv",header = TRUE)
# "customer_id" key to the orders dataset. Each order has a unique customer_id.
# "customer_unique_id" unique identifier of a customer.
# "customer_zip_code_prefix" first five digits of customer zip code
# "customer_city" customer city name
# "customer_state" customer state
payments = read.csv("C:/Users/OJElGhiran/Desktop/Candidate Test/Brazil/olist_order_payments_dataset.csv",header = TRUE)
# "order_id" unique identifier of an order.
# "payment_sequential" a customer may pay an order with more than one payment method. If he does so, a sequence will be created to accommodate all payments.
# "payment_type" method of payment chosen by the customer.
# "payment_installments" number of installments chosen by the customer.
# "payment_value" transaction value.
items = read.csv("C:/Users/OJElGhiran/Desktop/Candidate Test/Brazil/olist_order_items_dataset.csv",header = TRUE)
# "order_id" order unique identifier
# "order_item_id" sequential number identifying number of items included in the same order.
# "product_id" product unique identifier
# "seller_id" seller unique identifier
# "shipping_limit_date" Shows the seller shipping limit date for handling the order over to the logistic partner.
# "price" item price
# "freight_value" item freight value item (if an order has more than one item the freight value is splitted between items)
products = read.csv("C:/Users/OJElGhiran/Desktop/Candidate Test/Brazil/olist_products_dataset.csv",header = TRUE)
# "product_id" unique product identifier
# "product_category_name" root category of product, in Portuguese.
# "product_name_lenght" number of characters extracted from the product name.
# "product_description_lenght" number of characters extracted from the product description.
# "product_photos_qty" number of product published photos
# "product_weight_g" product weight measured in grams.
# "product_length_cm" product length measured in centimeters.
# "product_height_cm" product height measured in centimeters.
# "product_width_cm" product width measured in centimeters.
#Creating dataset of Recency, Frequency, & Payment via joins
#obtaining frequency of purchases by customer_unique_id
freq = customers %>% inner_join(orders, by = "customer_id") %>%
group_by(customer_unique_id) %>%
summarise(
freq = n()
)
#obtaining payment sums of purchases by customer_id
payments2 = orders %>% inner_join(payments, by = "order_id") %>%
group_by(customer_id) %>%
summarise(
payments = sum(payment_value)
)
#adding a recency of purchase from 3/16/2021
orders$recency = difftime(date(orders$order_purchase_timestamp),dmy("4/9/2016"),units = "days")
#obtaining minimum recency of purchase by customer_unique_id
rec = customers %>% inner_join(orders, by = "customer_id") %>%
group_by(customer_unique_id) %>%
summarise(
recency = min(recency)
)
# save(rec, file = "C:/Users/OJElGhiran/Desktop/Candidate Test/Brazil/rec.RData")
#obtaining total payment value by customer_unique_id
pymt = customers %>% inner_join(payments2, by = "customer_id") %>%
group_by(customer_unique_id) %>%
summarise(
pymnt = sum(payments)
)
#creating RFM data set from freq, rec, and pymt by customer_unique_id
RFM1 = freq %>% inner_join(rec) %>% inner_join(pymt) %>%
group_by(customer_unique_id) %>%
summarise(
recency = sum(recency)
,frequency = sum(freq)
,pymt = sum(pymnt)
)
RFM1 = RFM1[-which(RFM1$pymt == 0), ]
RFM1 = as.data.frame(RFM1)
#EDA
#Recency visual
a1 = ggplot(RFM1, aes( y = recency))+geom_boxplot()+ggtitle("Recency Unscaled")
a2 = ggplot(RFM1, aes( x = recency))+geom_histogram(binwidth = 1)+
geom_vline(xintercept = c(min(RFM1$recency),quantile(RFM1$recency,0.25),median(RFM1$recency),quantile(RFM1$recency,0.75),max(RFM1$recency)))+
ggtitle("Recency Unscaled")
grid.arrange(a1,a2,nrow = 1)
summary(RFM1$recency)
#Frequency visual
c1 = ggplot(RFM1, aes( x = frequency))+geom_histogram(binwidth = 1)+ggtitle("Frequency Unscaled")
c2 = ggplot(RFM1, aes( y = frequency))+geom_boxplot()+ggtitle("Frequency Unscaled")
grid.arrange(c1,c2,nrow = 1)
summary(RFM1$frequency)
#Payment visual
b1 = ggplot(RFM1, aes( y = pymt))+geom_boxplot()+ggtitle("Pymt (Monetary) Unscaled")
b2 = ggplot(RFM1, aes( x = pymt))+geom_histogram(binwidth = 1)+
ggtitle("Pymt (Monetary) Unscaled")
grid.arrange(b1,b2,nrow = 1)
#Min-max normalization
RFM1$recency[which(RFM1$recency == 0)] = 0.5
RFM1$recency_inv = 1/RFM1$recency
RFM1$recency_inv_scaled = scale(RFM1$recency_inv)
RFM1$frequency_scaled = scale(RFM1$frequency)
RFM1$pymt_scaled = scale(RFM1$pymt)
RFM1$recency_inv_minmax = (RFM1$recency_inv - min(RFM1$recency_inv))/(max(RFM1$recency_inv)-min(RFM1$recency_inv))
RFM1$frequency_minmax = (RFM1$frequency-min(RFM1$frequency))/(max(RFM1$frequency)-min(RFM1$frequency))
RFM1$pymt_minmax = (RFM1$pymt-min(RFM1$pymt))/(max(RFM1$pymt)-min(RFM1$pymt))
RFM1$recency_minmax = (RFM1$recency - min(RFM1$recency))/(max(RFM1$recency)-min(RFM1$recency))
#Kmeans Clustering
#recency_minmax
ggplot(RFM1, aes(x =recency, y = 0 ))+geom_point()
set.seed(1)
k3 <- kmeans(RFM1[,c(13)], centers = 3, nstart = 25)
p3 <- fviz_cluster(k3, geom = "point", data = RFM1[,c(13)]) + ggtitle("k = 3")
finalz <- kmeans(RFM1[,c(13)], 3, nstart = 25)
print(finalz)
fviz_cluster(finalz, data = RFM1[,c(13)])
RFM1$recency_minmax_cluster = finalz$cluster
#Extract the clusters and add to our initial data to do some descriptive statistics at the cluster level:
t1 = RFM1 %>%
group_by(recency_minmax_cluster) %>%
summarise(
min = min(recency_minmax)
,med = median(recency_minmax)
,mean = mean(recency_minmax)
,max = max(recency_minmax)
)
RFM1$recency_minmax_cluster = 0
RFM1$recency_minmax_cluster = ifelse(RFM1$recency_minmax >= max(t1$min),1, ifelse(RFM1$recency_minmax <= min(t1$max),3,2))
RFM1 %>%
group_by(recency_minmax_cluster) %>%
summarise(
min = min(recency)
,med = median(recency)
,mean = mean(recency)
,max = max(recency)
)
ggplot(RFM1, aes(x = recency_minmax, y = 0, color = as.factor(recency_minmax_cluster)))+geom_point()+ggtitle("Recency_minmax Clustered") + theme(legend.position = "none")
ggplot(RFM1, aes(x = recency, y = 0, color = recency_minmax_cluster))+geom_point()
#frequency_minmax
set.seed(1)
summary(RFM1$frequency_minmax)
k3 <- kmeans(RFM1[,c(11)], centers = 3, nstart = 25)
p3 <- fviz_cluster(k3, geom = "point", data = RFM1[,c(11)]) + ggtitle("k = 3")
finalz <- kmeans(RFM1[,c(11)], 3, nstart = 25)
print(finalz)
fviz_cluster(finalz, data = RFM1[,c(11)])
RFM1$frequency_minmax_cluster = finalz$cluster
#Extract the clusters and add to our initial data to do some descriptive statistics at the cluster level:
t1 = RFM1 %>%
group_by(frequency_minmax_cluster) %>%
summarise(
min = min(frequency_minmax)
,med = median(frequency_minmax)
,mean = mean(frequency_minmax)
,max = max(frequency_minmax)
)
RFM1$frequency_minmax_cluster = 0
RFM1$frequency_minmax_cluster = ifelse(RFM1$frequency_minmax >= max(t1$min), 3, ifelse(RFM1$frequency_minmax <= min(t1$max),1,2))
RFM1 %>%
group_by(frequency_minmax_cluster) %>%
summarise(
min = min(frequency)
,med = median(frequency)
,mean = mean(frequency)
,max = max(frequency)
)
ggplot(RFM1, aes(x = frequency_minmax, y = 0, color = as.factor(frequency_minmax_cluster)))+geom_point()+ggtitle("Frequency_minmax Clustered")
ggplot(RFM1, aes(x = frequency, y = 0, color = as.factor(frequency_minmax_cluster)))+geom_point()
#pymt_minmax
set.seed(1)
summary(RFM1$pymt_minmax)
k4 <- kmeans(RFM1[,c(12)], centers = 4, nstart = 25)
p4 <- fviz_cluster(k4, geom = "point", data = RFM1[,c(12)]) + ggtitle("k = 4")
finalz <- kmeans(RFM1[,c(12)], 4, nstart = 25)
print(finalz)
fviz_cluster(finalz, data = RFM1[,c(12)])
RFM1$pymt_minmax_cluster = as.factor(finalz$cluster)
#Extract the clusters and add to our initial data to do some descriptive statistics at the cluster level:
t1 = RFM1 %>%
group_by(pymt_minmax_cluster) %>%
summarise(
min = min(pymt_minmax)
,med = median(pymt_minmax)
,mean = mean(pymt_minmax)
,max = max(pymt_minmax)
)
RFM1$pymt_minmax_cluster = 0
RFM1$pymt_minmax_cluster = ifelse(RFM1$pymt_minmax >= max(t1$min), 4, ifelse(RFM1$pymt_minmax <= min(t1$max), 1,ifelse(RFM1$pymt_minmax >= t1[[order(t1$min)[c(2)],2]] & RFM1$pymt_minmax<=t1[[order(t1$max)[c(2)],5]], 2,3)))
RFM1 %>%
group_by(pymt_minmax_cluster) %>%
summarise(
min = min(pymt)
,med = median(pymt)
,mean = mean(pymt)
,max = max(pymt)
)
ggplot(RFM1, aes(x = pymt_minmax, y = 0, color = as.factor(pymt_minmax_cluster)))+geom_point()+ggtitle("Pymt_minmax Clustered")
ggplot(RFM1, aes(x = pymt, y = 0, color = pymt_minmax_cluster))+geom_point()
#Create RFM_Score
RFM1$RFM_score = RFM1$recency_minmax_cluster + RFM1$frequency_minmax_cluster + RFM1$pymt_minmax_cluster
#Summarize customer base by RFM_Score
RFM1 %>% group_by(RFM_score) %>%
summarise(
count = n(),
min_rec = min(recency),
mean_rec = mean(recency),
med_rec = median(recency),
max_rec = max(recency),
min_freq = min(frequency),
mean_freq = mean(frequency),
med_freq = median(frequency),
max_freq = max(frequency),
min_pymt = min(pymt),
mean_pymt = mean(pymt),
med_pymt = median(pymt),
max_pymt = max(pymt)
)
#Creating segments from RFM_scores
RFM1$Segment = ifelse(RFM1$RFM_score <= 4, 1, ifelse(RFM1$RFM_score <= 6, 2, ifelse(RFM1$RFM_score == 7, 3, 4)))
#Summarize customer base by Segment
RFM1 %>% group_by(Segment) %>%
summarise(
count = n(),
min_rec = min(recency),
mean_rec = mean(recency),
med_rec = median(recency),
max_rec = max(recency),
min_freq = min(frequency),
mean_freq = mean(frequency),
med_freq = median(frequency),
max_freq = max(frequency),
min_pymt = min(pymt),
mean_pymt = mean(pymt),
med_pymt = median(pymt),
max_pymt = max(pymt)
)
#Customer proportions by RFM_Score & visualize
table(RFM1$RFM_score)
round((table(RFM1$RFM_score)/nrow(RFM1))*100,1)
ggplot(RFM1, aes(x= RFM_score)) +
geom_bar(aes(y = ..prop.., fill = factor(..x..)), stat="count") +
# geom_text(aes( label = scales::percent(..prop..),
# y= ..prop.. ), stat= "count", vjust = -.5) +
geom_text(aes(label = scales::percent(round((..count..)/sum(..count..),3)),
y= ((..count..)/sum(..count..))), stat="count",
vjust = -.25) +
labs(y = "Percent", fill="RFM_score") +
scale_y_continuous(labels = scales::percent) +
scale_x_continuous(labels = as.character(RFM1$RFM_score),breaks = RFM1$RFM_score) +
ggtitle("RFM Score")
#Customer proportions by Segment & visualize
ggplot(RFM1, aes(x= Segment)) +
geom_bar(aes(y = ..prop.., fill = factor(..x..)), stat="count") +
geom_text(aes( label = scales::percent(..prop..),
y= ..prop.. ), stat= "count", vjust = -.5) +
labs(y = "Percent", fill="RFM_cat") +
scale_y_continuous(labels = scales::percent) +
scale_fill_discrete(name = "Segment", labels = c("At Risk", "Needs Attention", "Promising","Champions")) +
ggtitle("Targeted Marketing Segments")
|
suppressPackageStartupMessages(library(oro.nifti))
suppressPackageStartupMessages(library(plyr))
source("source_headerFunc.R")
source("plotter/source_2dplot.R")
|
/plotter/header_plotter.R
|
no_license
|
felixxiao/brain-imaging
|
R
| false
| false
| 162
|
r
|
suppressPackageStartupMessages(library(oro.nifti))
suppressPackageStartupMessages(library(plyr))
source("source_headerFunc.R")
source("plotter/source_2dplot.R")
|
#Figure 4.9
write.csv(gen, "data/genesFromAtas.csv")
gen <- read.csv("data/genesFromAtas.csv")
# perform pca using prcomp func, choosing the top n number of genes
ntop=2000
Pvars <- rowVars(assay(vsd))
select <- order(Pvars, decreasing = TRUE)[seq_len(min(ntop,
length(Pvars)))]
PCA <- prcomp(t(assay(vsd)[select, ]), scale = T)
top_genesHM <- PCA %>%
# extract variable (gene) loadings
tidy(matrix = "variables") %>%
# retain only PC1 and PC2
dplyr::filter(PC == "1" | PC == "2") %>%
# for each PC
group_by(PC) %>%
# sort descending value
dplyr::arrange(desc(abs(value))) %>%
# take top 5 rows of each PC group
dplyr::slice(1:2000) %>%
# extract the column (gene name) from the table
pull(column) %>%
# retain unique gene names only
unique()
mew <- as.data.frame(assay(vsd)) %>%
as_tibble(rownames = "gene") %>%
filter(gene %in% top_genesHM)
mew <- as.data.frame(mew)
rownames(mew) <- mew$gene
mew <- mew[,-1]
colnames(mew) <- paste(vsd$group, vsd$specification, sep = " - " )
# Pairwise correlation between samples (columns)
cols.cor <- cor(mew,
use = "pairwise.complete.obs",
method = "pearson")
# Pairwise correlation between rows (genes)
rows.cor <- cor(t(mew),
use = "pairwise.complete.obs",
method = "pearson")
#rownames(mew) <- NULL
# Plot the heatmap
# Pairwise correlation between samples (columns)
forhc <- mew
colnames(forhc) <- paste(vsd$group, vsd$specification, sep = " - " )
cols.cor1 <- cor((forhc),
use = "pairwise.complete.obs",
method = "pearson")
library(dendsort)
sort_hclust <- function(...) as.hclust(dendsort(as.dendrogram(...)))
hc.complete = hclust(dist(t(mew)), method="complete")
hc.complete$label <- paste(vsd$group, vsd$specification, sep = " - " )
hc.complete.rows = hclust(dist((mew)), method="complete")
find_coordinates = function(n, gaps, m = 1:n){
if(length(gaps) == 0){
return(list(coord = unit(m / n, "npc"), size = unit(1 / n, "npc") ))
}
if(max(gaps) > n){
stop("Gaps do not match with matrix size")
}
size = (1 / n) * (unit(1, "npc") - length(gaps) * unit("4", "bigpts"))
gaps2 = apply(sapply(gaps, function(gap, x){x > gap}, m), 1, sum)
coord = m * size + (gaps2 * unit("4", "bigpts"))
return(list(coord = coord, size = size))
}
draw_colnames = function(coln, gaps, vjust_col, hjust_col, angle_col, ...){
coord = find_coordinates(length(coln), gaps)
x = coord$coord - 0.5 * coord$size
res = textGrob(coln, x = x, y = unit(1, "npc") - unit(3, "bigpts"), vjust = vjust_col, hjust = hjust_col, rot = angle_col, gp = gpar(...))
return(res)
}
assignInNamespace(
x = "draw_colnames",
value = "draw_colnames",
ns = asNamespace("pheatmap")
)
tiff("heatMapThesis.tiff",units="in", width=10,height=10, res=300, compression = 'lzw')
pheatmap(
mew,
scale = "row",
cluster_cols=hc.complete,
cluster_rows =hc.complete.rows,
clustering_distance_cols = as.dist(1 - cols.cor),
clustering_distance_rows = as.dist(1 - rows.cor),
cutree_cols = 6,
cutree_rows = 6,
annotation_colors = heatmapColScale,
#annotation_col = colAn,
#annotation_row = rowAn,
#show_rownames = FALSE,
#show_colnames = FALSE,
labels_col = hc.complete$label,
treeheight_row = 0,
angle_col = 315,
fontsize_col = 7,
col=inferno(100)
# col = inferno(length(breaksList)),
# breaks = breaksList
)
newForCorr <- newForCorr %>%
as_tibble(rownames = "gene") %>%
select("gene","CM","EHM","Fetal_Heart","Adult_Heart")
library(ggplotify)
x <- as.ggplot(pheatmap(newForCorr[2:5],
show_rownames = FALSE,
cluster_cols = FALSE,
scale="row",
color=viridis(100)))
x | huh
newForCorr <- sapply(split(seq_len(ncol(forCorr)),colnames(forCorr)),function(cis) rowMeans(forCorr[,cis,drop=F]))
corrplot(cols.cor1,
method = "color",
tl.col='grey30',
addrect = 3,
order = 'hclust',
tl.cex = 0.5,
number.cex = .7,
col=inferno(200),
#type="upper",
# bg = "black",
diag = FALSE)
forCorr <- mew
colnames(forCorr) <- vsd$group
r <- cor(forCorr, method="pearson")
round(r,2)
s <- r[rownames(r)=="Adult_Heart",]
t <- as.data.frame(colMeans(s))
t$group <- colnames(s)
t$group <- as.factor(t$group)
colnames(t) <-c("corr", "group")
u <- t[t$group!= "Adult_Heart",]
# Using median
tiff("PearsonCorr-thesis.tiff",units="in", width=4,height=4, res=300, compression = 'lzw')
huh <- u %>%
mutate(class = fct_reorder(group, corr, .fun='median')) %>%
ggplot( aes(x=reorder(group, corr), y=corr)) +
geom_boxplot(aes(fill = stage(group, after_scale = alpha(fill, 0.9))))+
# geom_boxplot(fill=after_stat(),alpha=0.4) +
ylab("Pearson Corelation\n") +
theme(legend.position="none") +
scale_fill_manual(values = colscale)+
#scale_fill_tableau(palette = "Superfishel Stone") +
xlab("")+
theme_few()+
guides(fill=FALSE)
dev.off()
library(ggplotify)
################################################
onnu <- as.data.frame(assay(vsd)) %>%
as_tibble(rownames = "gene") %>%
filter(gene %in% top_genesHM)
onnu <- as.data.frame(onnu)
rownames(onnu) <- onnu$gene
onnu <- onnu[,-1]
colnames(onnu) <- vsd$group
onnuPh <- sapply(split(seq_len(ncol(onnu)),colnames(onnu)),function(cis) rowMeans(onnu[,cis,drop=F]))
onnuPh <- onnuPh %>%
as_tibble(rownames = "gene") %>%
select("gene","CM","EHM","Fetal_Heart","Adult_Heart")
ph1000 <- as.ggplot(pheatmap(onnuPh[2:5],
show_rownames = FALSE,
cluster_cols = FALSE,
treeheight_row = 0,
#scale="row",
color=viridis(100),
fontsize = 15,
angle_col = 45))
colnames(onnu) <- vsd$group
r <- cor(onnu, method="pearson")
round(r,2)
s <- r[rownames(r)=="Adult_Heart",]
t <- as.data.frame(colMeans(s))
t$group <- colnames(s)
t$group <- as.factor(t$group)
colnames(t) <-c("corr", "group")
u <- t[t$group!= "Adult_Heart",]
cor1500 <- u %>%
mutate(class = fct_reorder(group, corr, .fun='median')) %>%
ggplot( aes(x=reorder(group, corr), y=corr)) +
geom_boxplot(aes(fill = stage(group, after_scale = alpha(fill, 0.9))))+
# geom_boxplot(fill=after_stat(),alpha=0.4) +
ylab("Pearson Corelation\n") +
theme(legend.position="none") +
scale_fill_manual(values = colscale)+
#scale_fill_tableau(palette = "Superfishel Stone") +
xlab("")+
theme_few()+
scale_y_continuous(limits=c(0.20,0.80), breaks = seq(0.20,0.80,.10))+
theme(axis.text.y = element_text(size=15, colour="black"),
axis.text.x = element_text(color="black", angle=45, hjust = 1, face="bold",size=15))+
guides(fill=FALSE)
rendu <- as.data.frame(assay(vsd)) %>%
as_tibble(rownames = "gene") %>%
filter(gene %in% gen$x)
rendu <- as.data.frame(rendu)
rownames(rendu) <- rendu$gene
rendu <- rendu[,-1]
colnames(rendu) <- vsd$group
renduPh <- sapply(split(seq_len(ncol(rendu)),colnames(rendu)),function(cis) rowMeans(rendu[,cis,drop=F]))
renduPh <- renduPh %>%
as_tibble(rownames = "gene") %>%
select("gene","CM","EHM","Fetal_Heart","Adult_Heart")
ph380 <- as.ggplot(pheatmap(renduPh[2:5],
show_rownames = FALSE,
cluster_cols = FALSE,
treeheight_row = 0,
#scale="row",
color=viridis(100),
fontsize = 15,
angle_col = 45))
colnames(rendu) <- vsd$group
r <- cor(rendu, method="pearson")
round(r,2)
s <- r[rownames(r)=="Adult_Heart",]
t <- as.data.frame(colMeans(s))
t$group <- colnames(s)
t$group <- as.factor(t$group)
colnames(t) <-c("corr", "group")
u <- t[t$group!= "Adult_Heart",]
cor380 <- u %>%
mutate(class = fct_reorder(group, corr, .fun='median')) %>%
ggplot( aes(x=reorder(group, corr), y=corr)) +
geom_boxplot(aes(fill = stage(group, after_scale = alpha(fill, 0.9))))+
# geom_boxplot(fill=after_stat(),alpha=0.4) +
ylab("Pearson Corelation\n") +
theme(legend.position="none") +
scale_fill_manual(values = colscale)+
#scale_fill_tableau(palette = "Superfishel Stone") +
xlab("")+
theme_few()+
scale_y_continuous(limits=c(0.20,0.80), breaks = seq(0.20,0.80,.10))+
theme(axis.text.y = element_text(size=15, colour = "black"),
axis.text.x = element_text(color="black", angle=45, hjust = 1, face="bold", size=15))+
guides(fill=FALSE)
#patchwork magick!
p1 <- (ph1000 / cor1500 )+
plot_layout(heights = c(2,1))
p2 <- (ph380 / cor380) +
plot_layout(heights = c(2,1))
tiff("01Corr.tiff",units="in", width=6,height=6, res=300, compression = 'lzw')
cor1500
dev.off()
library(patchwork)
patchwork <- (p1 | p2)
patchwork[[1]] <- patchwork[[1]] + plot_layout(tag_level = 'new')
patchwork[[2]] <- patchwork[[2]] + plot_layout(tag_level = 'new')
tiff("multiCorr-thesis.tiff",units="in", width=6,height=6, res=300, compression = 'lzw')
patchwork + plot_annotation(tag_levels = c('A', '1'),
tag_sep = '.')
dev.off()
############################
eig <- get_eigenvalue(PCA)
eig <- eig[1:min(8, nrow(eig)), , drop = FALSE]
trying <- seq(1:8)
text_labels <- round(eig, 1)
df.eig <- as.data.frame(eig)
df.eig$dimension <- seq(1:8)
ggplot(df.eig, aes(dimension, cumulative.variance.percent))+
geom_bar(stat = "identity", fill="beige", colour="grey")+
geom_line(color = "grey") +
geom_point(shape = 19, color = "red")+
geom_text(label = text_labels$cumulative.variance.percent, vjust = -0.8,
hjust = 1, size=4.5)+
scale_x_continuous(breaks=seq(1,8,1))+
labs(y="Percentage of Cummulative Variance Explained",
x="Principal Components")+
theme_few()+
theme(axis.text.y = element_text(size=12, colour = "black"),
axis.text.x = element_text(color="black"))
####################################
|
/R/03_heatMapCOrr.R
|
no_license
|
Harithaa-Anandakumar/masters.github.io
|
R
| false
| false
| 10,052
|
r
|
#Figure 4.9
write.csv(gen, "data/genesFromAtas.csv")
gen <- read.csv("data/genesFromAtas.csv")
# perform pca using prcomp func, choosing the top n number of genes
ntop=2000
Pvars <- rowVars(assay(vsd))
select <- order(Pvars, decreasing = TRUE)[seq_len(min(ntop,
length(Pvars)))]
PCA <- prcomp(t(assay(vsd)[select, ]), scale = T)
top_genesHM <- PCA %>%
# extract variable (gene) loadings
tidy(matrix = "variables") %>%
# retain only PC1 and PC2
dplyr::filter(PC == "1" | PC == "2") %>%
# for each PC
group_by(PC) %>%
# sort descending value
dplyr::arrange(desc(abs(value))) %>%
# take top 5 rows of each PC group
dplyr::slice(1:2000) %>%
# extract the column (gene name) from the table
pull(column) %>%
# retain unique gene names only
unique()
mew <- as.data.frame(assay(vsd)) %>%
as_tibble(rownames = "gene") %>%
filter(gene %in% top_genesHM)
mew <- as.data.frame(mew)
rownames(mew) <- mew$gene
mew <- mew[,-1]
colnames(mew) <- paste(vsd$group, vsd$specification, sep = " - " )
# Pairwise correlation between samples (columns)
cols.cor <- cor(mew,
use = "pairwise.complete.obs",
method = "pearson")
# Pairwise correlation between rows (genes)
rows.cor <- cor(t(mew),
use = "pairwise.complete.obs",
method = "pearson")
#rownames(mew) <- NULL
# Plot the heatmap
# Pairwise correlation between samples (columns)
forhc <- mew
colnames(forhc) <- paste(vsd$group, vsd$specification, sep = " - " )
cols.cor1 <- cor((forhc),
use = "pairwise.complete.obs",
method = "pearson")
library(dendsort)
sort_hclust <- function(...) as.hclust(dendsort(as.dendrogram(...)))
hc.complete = hclust(dist(t(mew)), method="complete")
hc.complete$label <- paste(vsd$group, vsd$specification, sep = " - " )
hc.complete.rows = hclust(dist((mew)), method="complete")
find_coordinates = function(n, gaps, m = 1:n){
if(length(gaps) == 0){
return(list(coord = unit(m / n, "npc"), size = unit(1 / n, "npc") ))
}
if(max(gaps) > n){
stop("Gaps do not match with matrix size")
}
size = (1 / n) * (unit(1, "npc") - length(gaps) * unit("4", "bigpts"))
gaps2 = apply(sapply(gaps, function(gap, x){x > gap}, m), 1, sum)
coord = m * size + (gaps2 * unit("4", "bigpts"))
return(list(coord = coord, size = size))
}
draw_colnames = function(coln, gaps, vjust_col, hjust_col, angle_col, ...){
coord = find_coordinates(length(coln), gaps)
x = coord$coord - 0.5 * coord$size
res = textGrob(coln, x = x, y = unit(1, "npc") - unit(3, "bigpts"), vjust = vjust_col, hjust = hjust_col, rot = angle_col, gp = gpar(...))
return(res)
}
assignInNamespace(
x = "draw_colnames",
value = "draw_colnames",
ns = asNamespace("pheatmap")
)
tiff("heatMapThesis.tiff",units="in", width=10,height=10, res=300, compression = 'lzw')
pheatmap(
mew,
scale = "row",
cluster_cols=hc.complete,
cluster_rows =hc.complete.rows,
clustering_distance_cols = as.dist(1 - cols.cor),
clustering_distance_rows = as.dist(1 - rows.cor),
cutree_cols = 6,
cutree_rows = 6,
annotation_colors = heatmapColScale,
#annotation_col = colAn,
#annotation_row = rowAn,
#show_rownames = FALSE,
#show_colnames = FALSE,
labels_col = hc.complete$label,
treeheight_row = 0,
angle_col = 315,
fontsize_col = 7,
col=inferno(100)
# col = inferno(length(breaksList)),
# breaks = breaksList
)
newForCorr <- newForCorr %>%
as_tibble(rownames = "gene") %>%
select("gene","CM","EHM","Fetal_Heart","Adult_Heart")
library(ggplotify)
x <- as.ggplot(pheatmap(newForCorr[2:5],
show_rownames = FALSE,
cluster_cols = FALSE,
scale="row",
color=viridis(100)))
x | huh
newForCorr <- sapply(split(seq_len(ncol(forCorr)),colnames(forCorr)),function(cis) rowMeans(forCorr[,cis,drop=F]))
corrplot(cols.cor1,
method = "color",
tl.col='grey30',
addrect = 3,
order = 'hclust',
tl.cex = 0.5,
number.cex = .7,
col=inferno(200),
#type="upper",
# bg = "black",
diag = FALSE)
forCorr <- mew
colnames(forCorr) <- vsd$group
r <- cor(forCorr, method="pearson")
round(r,2)
s <- r[rownames(r)=="Adult_Heart",]
t <- as.data.frame(colMeans(s))
t$group <- colnames(s)
t$group <- as.factor(t$group)
colnames(t) <-c("corr", "group")
u <- t[t$group!= "Adult_Heart",]
# Using median
tiff("PearsonCorr-thesis.tiff",units="in", width=4,height=4, res=300, compression = 'lzw')
huh <- u %>%
mutate(class = fct_reorder(group, corr, .fun='median')) %>%
ggplot( aes(x=reorder(group, corr), y=corr)) +
geom_boxplot(aes(fill = stage(group, after_scale = alpha(fill, 0.9))))+
# geom_boxplot(fill=after_stat(),alpha=0.4) +
ylab("Pearson Corelation\n") +
theme(legend.position="none") +
scale_fill_manual(values = colscale)+
#scale_fill_tableau(palette = "Superfishel Stone") +
xlab("")+
theme_few()+
guides(fill=FALSE)
dev.off()
library(ggplotify)
################################################
onnu <- as.data.frame(assay(vsd)) %>%
as_tibble(rownames = "gene") %>%
filter(gene %in% top_genesHM)
onnu <- as.data.frame(onnu)
rownames(onnu) <- onnu$gene
onnu <- onnu[,-1]
colnames(onnu) <- vsd$group
onnuPh <- sapply(split(seq_len(ncol(onnu)),colnames(onnu)),function(cis) rowMeans(onnu[,cis,drop=F]))
onnuPh <- onnuPh %>%
as_tibble(rownames = "gene") %>%
select("gene","CM","EHM","Fetal_Heart","Adult_Heart")
ph1000 <- as.ggplot(pheatmap(onnuPh[2:5],
show_rownames = FALSE,
cluster_cols = FALSE,
treeheight_row = 0,
#scale="row",
color=viridis(100),
fontsize = 15,
angle_col = 45))
colnames(onnu) <- vsd$group
r <- cor(onnu, method="pearson")
round(r,2)
s <- r[rownames(r)=="Adult_Heart",]
t <- as.data.frame(colMeans(s))
t$group <- colnames(s)
t$group <- as.factor(t$group)
colnames(t) <-c("corr", "group")
u <- t[t$group!= "Adult_Heart",]
cor1500 <- u %>%
mutate(class = fct_reorder(group, corr, .fun='median')) %>%
ggplot( aes(x=reorder(group, corr), y=corr)) +
geom_boxplot(aes(fill = stage(group, after_scale = alpha(fill, 0.9))))+
# geom_boxplot(fill=after_stat(),alpha=0.4) +
ylab("Pearson Corelation\n") +
theme(legend.position="none") +
scale_fill_manual(values = colscale)+
#scale_fill_tableau(palette = "Superfishel Stone") +
xlab("")+
theme_few()+
scale_y_continuous(limits=c(0.20,0.80), breaks = seq(0.20,0.80,.10))+
theme(axis.text.y = element_text(size=15, colour="black"),
axis.text.x = element_text(color="black", angle=45, hjust = 1, face="bold",size=15))+
guides(fill=FALSE)
rendu <- as.data.frame(assay(vsd)) %>%
as_tibble(rownames = "gene") %>%
filter(gene %in% gen$x)
rendu <- as.data.frame(rendu)
rownames(rendu) <- rendu$gene
rendu <- rendu[,-1]
colnames(rendu) <- vsd$group
renduPh <- sapply(split(seq_len(ncol(rendu)),colnames(rendu)),function(cis) rowMeans(rendu[,cis,drop=F]))
renduPh <- renduPh %>%
as_tibble(rownames = "gene") %>%
select("gene","CM","EHM","Fetal_Heart","Adult_Heart")
ph380 <- as.ggplot(pheatmap(renduPh[2:5],
show_rownames = FALSE,
cluster_cols = FALSE,
treeheight_row = 0,
#scale="row",
color=viridis(100),
fontsize = 15,
angle_col = 45))
colnames(rendu) <- vsd$group
r <- cor(rendu, method="pearson")
round(r,2)
s <- r[rownames(r)=="Adult_Heart",]
t <- as.data.frame(colMeans(s))
t$group <- colnames(s)
t$group <- as.factor(t$group)
colnames(t) <-c("corr", "group")
u <- t[t$group!= "Adult_Heart",]
cor380 <- u %>%
mutate(class = fct_reorder(group, corr, .fun='median')) %>%
ggplot( aes(x=reorder(group, corr), y=corr)) +
geom_boxplot(aes(fill = stage(group, after_scale = alpha(fill, 0.9))))+
# geom_boxplot(fill=after_stat(),alpha=0.4) +
ylab("Pearson Corelation\n") +
theme(legend.position="none") +
scale_fill_manual(values = colscale)+
#scale_fill_tableau(palette = "Superfishel Stone") +
xlab("")+
theme_few()+
scale_y_continuous(limits=c(0.20,0.80), breaks = seq(0.20,0.80,.10))+
theme(axis.text.y = element_text(size=15, colour = "black"),
axis.text.x = element_text(color="black", angle=45, hjust = 1, face="bold", size=15))+
guides(fill=FALSE)
#patchwork magick!
p1 <- (ph1000 / cor1500 )+
plot_layout(heights = c(2,1))
p2 <- (ph380 / cor380) +
plot_layout(heights = c(2,1))
tiff("01Corr.tiff",units="in", width=6,height=6, res=300, compression = 'lzw')
cor1500
dev.off()
library(patchwork)
patchwork <- (p1 | p2)
patchwork[[1]] <- patchwork[[1]] + plot_layout(tag_level = 'new')
patchwork[[2]] <- patchwork[[2]] + plot_layout(tag_level = 'new')
tiff("multiCorr-thesis.tiff",units="in", width=6,height=6, res=300, compression = 'lzw')
patchwork + plot_annotation(tag_levels = c('A', '1'),
tag_sep = '.')
dev.off()
############################
eig <- get_eigenvalue(PCA)
eig <- eig[1:min(8, nrow(eig)), , drop = FALSE]
trying <- seq(1:8)
text_labels <- round(eig, 1)
df.eig <- as.data.frame(eig)
df.eig$dimension <- seq(1:8)
ggplot(df.eig, aes(dimension, cumulative.variance.percent))+
geom_bar(stat = "identity", fill="beige", colour="grey")+
geom_line(color = "grey") +
geom_point(shape = 19, color = "red")+
geom_text(label = text_labels$cumulative.variance.percent, vjust = -0.8,
hjust = 1, size=4.5)+
scale_x_continuous(breaks=seq(1,8,1))+
labs(y="Percentage of Cummulative Variance Explained",
x="Principal Components")+
theme_few()+
theme(axis.text.y = element_text(size=12, colour = "black"),
axis.text.x = element_text(color="black"))
####################################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{add_blueprint}
\alias{add_blueprint}
\title{Add blueprint to a recipe}
\usage{
add_blueprint(base_recipe, blueprint)
}
\arguments{
\item{base_recipe}{A the basic recipe specified by the user. See (TODO reference).}
\item{blueprint}{A blueprint id. Use \code{\link{TODO function}} for a list of available blueprints.}
}
\description{
This function binds together the base definition of the recipe
specified by the user with a selected blueprint id.
}
\examples{
TODO examples
}
|
/man/add_blueprint.Rd
|
permissive
|
konradsemsch/cookbook
|
R
| false
| true
| 574
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{add_blueprint}
\alias{add_blueprint}
\title{Add blueprint to a recipe}
\usage{
add_blueprint(base_recipe, blueprint)
}
\arguments{
\item{base_recipe}{A the basic recipe specified by the user. See (TODO reference).}
\item{blueprint}{A blueprint id. Use \code{\link{TODO function}} for a list of available blueprints.}
}
\description{
This function binds together the base definition of the recipe
specified by the user with a selected blueprint id.
}
\examples{
TODO examples
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.R
\name{model-method-variational}
\alias{model-method-variational}
\alias{variational}
\title{Run Stan's variational approximation algorithms}
\usage{
variational(
data = NULL,
seed = NULL,
refresh = NULL,
init = NULL,
save_latent_dynamics = FALSE,
output_dir = NULL,
output_basename = NULL,
sig_figs = NULL,
threads = NULL,
opencl_ids = NULL,
algorithm = NULL,
iter = NULL,
grad_samples = NULL,
elbo_samples = NULL,
eta = NULL,
adapt_engaged = NULL,
adapt_iter = NULL,
tol_rel_obj = NULL,
eval_elbo = NULL,
output_samples = NULL
)
}
\arguments{
\item{data}{(multiple options) The data to use for the variables specified in
the data block of the Stan program. One of the following:
\itemize{
\item A named list of \R objects with the names corresponding to variables
declared in the data block of the Stan program. Internally this list is then
written to JSON for CmdStan using \code{\link[=write_stan_json]{write_stan_json()}}. See
\code{\link[=write_stan_json]{write_stan_json()}} for details on the conversions performed on \R objects
before they are passed to Stan.
\item A path to a data file compatible with CmdStan (JSON or \R dump). See the
appendices in the CmdStan guide for details on using these formats.
\item \code{NULL} or an empty list if the Stan program has no data block.
}}
\item{seed}{(positive integer(s)) A seed for the (P)RNG to pass to CmdStan.
In the case of multi-chain sampling the single \code{seed} will automatically be
augmented by the the run (chain) ID so that each chain uses a different
seed. The exception is the transformed data block, which defaults to
using same seed for all chains so that the same data is generated for all
chains if RNG functions are used. The only time \code{seed} should be specified
as a vector (one element per chain) is if RNG functions are used in
transformed data and the goal is to generate \emph{different} data for each
chain.}
\item{refresh}{(non-negative integer) The number of iterations between
printed screen updates. If \code{refresh = 0}, only error messages will be
printed.}
\item{init}{(multiple options) The initialization method to use for the
variables declared in the parameters block of the Stan program. One of
the following:
\itemize{
\item A real number \code{x>0}. This initializes \emph{all} parameters randomly between
\verb{[-x,x]} on the \emph{unconstrained} parameter space.;
\item The number \code{0}. This initializes \emph{all} parameters to \code{0};
\item A character vector of paths (one per chain) to JSON or Rdump files
containing initial values for all or some parameters. See
\code{\link[=write_stan_json]{write_stan_json()}} to write \R objects to JSON files compatible with
CmdStan.
\item A list of lists containing initial values for all or some parameters. For
MCMC the list should contain a sublist for each chain. For optimization and
variational inference there should be just one sublist. The sublists should
have named elements corresponding to the parameters for which you are
specifying initial values. See \strong{Examples}.
\item A function that returns a single list with names corresponding to the
parameters for which you are specifying initial values. The function can
take no arguments or a single argument \code{chain_id}. For MCMC, if the function
has argument \code{chain_id} it will be supplied with the chain id (from 1 to
number of chains) when called to generate the initial values. See
\strong{Examples}.
}}
\item{save_latent_dynamics}{(logical) Should auxiliary diagnostic information
about the latent dynamics be written to temporary diagnostic CSV files?
This argument replaces CmdStan's \code{diagnostic_file} argument and the content
written to CSV is controlled by the user's CmdStan installation and not
CmdStanR (for some algorithms no content may be written). The default
is \code{FALSE}, which is appropriate for almost every use case. To save the
temporary files created when \code{save_latent_dynamics=TRUE} see the
\code{\link[=fit-method-save_latent_dynamics_files]{$save_latent_dynamics_files()}}
method.}
\item{output_dir}{(string) A path to a directory where CmdStan should write
its output CSV files. For interactive use this can typically be left at
\code{NULL} (temporary directory) since CmdStanR makes the CmdStan output
(posterior draws and diagnostics) available in \R via methods of the fitted
model objects. The behavior of \code{output_dir} is as follows:
\itemize{
\item If \code{NULL} (the default), then the CSV files are written to a temporary
directory and only saved permanently if the user calls one of the \verb{$save_*}
methods of the fitted model object (e.g.,
\code{\link[=fit-method-save_output_files]{$save_output_files()}}). These temporary
files are removed when the fitted model object is
\link[base:gc]{garbage collected} (manually or automatically).
\item If a path, then the files are created in \code{output_dir} with names
corresponding to the defaults used by \verb{$save_output_files()}.
}}
\item{output_basename}{(string) A string to use as a prefix for the names of
the output CSV files of CmdStan. If \code{NULL} (the default), the basename of
the output CSV files will be comprised from the model name, timestamp, and
5 random characters.}
\item{sig_figs}{(positive integer) The number of significant figures used
when storing the output values. By default, CmdStan represent the output
values with 6 significant figures. The upper limit for \code{sig_figs} is 18.
Increasing this value will result in larger output CSV files and thus an
increased usage of disk space.}
\item{threads}{(positive integer) If the model was
\link[=model-method-compile]{compiled} with threading support, the number of
threads to use in parallelized sections (e.g., when using the Stan
functions \code{reduce_sum()} or \code{map_rect()}).}
\item{opencl_ids}{(integer vector of length 2) The platform and
device IDs of the OpenCL device to use for fitting. The model must
be compiled with \code{cpp_options = list(stan_opencl = TRUE)} for this
argument to have an effect.}
\item{algorithm}{(string) The algorithm. Either \code{"meanfield"} or
\code{"fullrank"}.}
\item{iter}{(positive integer) The \emph{maximum} number of iterations.}
\item{grad_samples}{(positive integer) The number of samples for Monte Carlo
estimate of gradients.}
\item{elbo_samples}{(positive integer) The number of samples for Monte Carlo
estimate of ELBO (objective function).}
\item{eta}{(positive real) The step size weighting parameter for adaptive
step size sequence.}
\item{adapt_engaged}{(logical) Do warmup adaptation?}
\item{adapt_iter}{(positive integer) The \emph{maximum} number of adaptation
iterations.}
\item{tol_rel_obj}{(positive real) Convergence tolerance on the relative norm
of the objective.}
\item{eval_elbo}{(positive integer) Evaluate ELBO every Nth iteration.}
\item{output_samples}{(positive integer) Number of approximate posterior
samples to draw and save.}
}
\value{
A \code{\link{CmdStanVB}} object.
}
\description{
The \verb{$variational()} method of a \code{\link{CmdStanModel}} object runs
Stan's variational Bayes (ADVI) algorithms.
Any argument left as \code{NULL} will default to the default value used by the
installed version of CmdStan. See the
\href{https://mc-stan.org/docs/cmdstan-guide/}{CmdStan User’s Guide}
for more details.
}
\details{
CmdStan can fit a variational approximation to the posterior. The
approximation is a Gaussian in the unconstrained variable space. Stan
implements two variational algorithms. The \code{algorithm="meanfield"} option
uses a fully factorized Gaussian for the approximation. The
\code{algorithm="fullrank"} option uses a Gaussian with a full-rank covariance
matrix for the approximation.
-- \href{https://github.com/stan-dev/cmdstan/releases/latest}{\emph{CmdStan Interface User's Guide}}
}
\examples{
\dontrun{
library(cmdstanr)
library(posterior)
library(bayesplot)
color_scheme_set("brightblue")
# Set path to CmdStan
# (Note: if you installed CmdStan via install_cmdstan() with default settings
# then setting the path is unnecessary but the default below should still work.
# Otherwise use the `path` argument to specify the location of your
# CmdStan installation.)
set_cmdstan_path(path = NULL)
# Create a CmdStanModel object from a Stan program,
# here using the example model that comes with CmdStan
file <- file.path(cmdstan_path(), "examples/bernoulli/bernoulli.stan")
mod <- cmdstan_model(file)
mod$print()
# Data as a named list (like RStan)
stan_data <- list(N = 10, y = c(0,1,0,0,0,0,0,0,0,1))
# Run MCMC using the 'sample' method
fit_mcmc <- mod$sample(
data = stan_data,
seed = 123,
chains = 2,
parallel_chains = 2
)
# Use 'posterior' package for summaries
fit_mcmc$summary()
# Get posterior draws
draws <- fit_mcmc$draws()
print(draws)
# Convert to data frame using posterior::as_draws_df
as_draws_df(draws)
# Plot posterior using bayesplot (ggplot2)
mcmc_hist(fit_mcmc$draws("theta"))
# Call CmdStan's diagnose and stansummary utilities
fit_mcmc$cmdstan_diagnose()
fit_mcmc$cmdstan_summary()
# For models fit using MCMC, if you like working with RStan's stanfit objects
# then you can create one with rstan::read_stan_csv()
# stanfit <- rstan::read_stan_csv(fit_mcmc$output_files())
# Run 'optimize' method to get a point estimate (default is Stan's LBFGS algorithm)
# and also demonstrate specifying data as a path to a file instead of a list
my_data_file <- file.path(cmdstan_path(), "examples/bernoulli/bernoulli.data.json")
fit_optim <- mod$optimize(data = my_data_file, seed = 123)
fit_optim$summary()
# Run 'variational' method to approximate the posterior (default is meanfield ADVI)
fit_vb <- mod$variational(data = stan_data, seed = 123)
fit_vb$summary()
# Plot approximate posterior using bayesplot
mcmc_hist(fit_vb$draws("theta"))
# Specifying initial values as a function
fit_mcmc_w_init_fun <- mod$sample(
data = stan_data,
seed = 123,
chains = 2,
refresh = 0,
init = function() list(theta = runif(1))
)
fit_mcmc_w_init_fun_2 <- mod$sample(
data = stan_data,
seed = 123,
chains = 2,
refresh = 0,
init = function(chain_id) {
# silly but demonstrates optional use of chain_id
list(theta = 1 / (chain_id + 1))
}
)
fit_mcmc_w_init_fun_2$init()
# Specifying initial values as a list of lists
fit_mcmc_w_init_list <- mod$sample(
data = stan_data,
seed = 123,
chains = 2,
refresh = 0,
init = list(
list(theta = 0.75), # chain 1
list(theta = 0.25) # chain 2
)
)
fit_optim_w_init_list <- mod$optimize(
data = stan_data,
seed = 123,
init = list(
list(theta = 0.75)
)
)
fit_optim_w_init_list$init()
}
}
\seealso{
The CmdStanR website
(\href{https://mc-stan.org/cmdstanr/}{mc-stan.org/cmdstanr}) for online
documentation and tutorials.
The Stan and CmdStan documentation:
\itemize{
\item Stan documentation: \href{https://mc-stan.org/users/documentation/}{mc-stan.org/users/documentation}
\item CmdStan User’s Guide: \href{https://mc-stan.org/docs/cmdstan-guide/}{mc-stan.org/docs/cmdstan-guide}
}
Other CmdStanModel methods:
\code{\link{model-method-check_syntax}},
\code{\link{model-method-compile}},
\code{\link{model-method-diagnose}},
\code{\link{model-method-format}},
\code{\link{model-method-generate-quantities}},
\code{\link{model-method-optimize}},
\code{\link{model-method-sample_mpi}},
\code{\link{model-method-sample}},
\code{\link{model-method-variables}}
}
\concept{CmdStanModel methods}
|
/man/model-method-variational.Rd
|
permissive
|
mike-lawrence/cmdstanr
|
R
| false
| true
| 11,586
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.R
\name{model-method-variational}
\alias{model-method-variational}
\alias{variational}
\title{Run Stan's variational approximation algorithms}
\usage{
variational(
data = NULL,
seed = NULL,
refresh = NULL,
init = NULL,
save_latent_dynamics = FALSE,
output_dir = NULL,
output_basename = NULL,
sig_figs = NULL,
threads = NULL,
opencl_ids = NULL,
algorithm = NULL,
iter = NULL,
grad_samples = NULL,
elbo_samples = NULL,
eta = NULL,
adapt_engaged = NULL,
adapt_iter = NULL,
tol_rel_obj = NULL,
eval_elbo = NULL,
output_samples = NULL
)
}
\arguments{
\item{data}{(multiple options) The data to use for the variables specified in
the data block of the Stan program. One of the following:
\itemize{
\item A named list of \R objects with the names corresponding to variables
declared in the data block of the Stan program. Internally this list is then
written to JSON for CmdStan using \code{\link[=write_stan_json]{write_stan_json()}}. See
\code{\link[=write_stan_json]{write_stan_json()}} for details on the conversions performed on \R objects
before they are passed to Stan.
\item A path to a data file compatible with CmdStan (JSON or \R dump). See the
appendices in the CmdStan guide for details on using these formats.
\item \code{NULL} or an empty list if the Stan program has no data block.
}}
\item{seed}{(positive integer(s)) A seed for the (P)RNG to pass to CmdStan.
In the case of multi-chain sampling the single \code{seed} will automatically be
augmented by the the run (chain) ID so that each chain uses a different
seed. The exception is the transformed data block, which defaults to
using same seed for all chains so that the same data is generated for all
chains if RNG functions are used. The only time \code{seed} should be specified
as a vector (one element per chain) is if RNG functions are used in
transformed data and the goal is to generate \emph{different} data for each
chain.}
\item{refresh}{(non-negative integer) The number of iterations between
printed screen updates. If \code{refresh = 0}, only error messages will be
printed.}
\item{init}{(multiple options) The initialization method to use for the
variables declared in the parameters block of the Stan program. One of
the following:
\itemize{
\item A real number \code{x>0}. This initializes \emph{all} parameters randomly between
\verb{[-x,x]} on the \emph{unconstrained} parameter space.;
\item The number \code{0}. This initializes \emph{all} parameters to \code{0};
\item A character vector of paths (one per chain) to JSON or Rdump files
containing initial values for all or some parameters. See
\code{\link[=write_stan_json]{write_stan_json()}} to write \R objects to JSON files compatible with
CmdStan.
\item A list of lists containing initial values for all or some parameters. For
MCMC the list should contain a sublist for each chain. For optimization and
variational inference there should be just one sublist. The sublists should
have named elements corresponding to the parameters for which you are
specifying initial values. See \strong{Examples}.
\item A function that returns a single list with names corresponding to the
parameters for which you are specifying initial values. The function can
take no arguments or a single argument \code{chain_id}. For MCMC, if the function
has argument \code{chain_id} it will be supplied with the chain id (from 1 to
number of chains) when called to generate the initial values. See
\strong{Examples}.
}}
\item{save_latent_dynamics}{(logical) Should auxiliary diagnostic information
about the latent dynamics be written to temporary diagnostic CSV files?
This argument replaces CmdStan's \code{diagnostic_file} argument and the content
written to CSV is controlled by the user's CmdStan installation and not
CmdStanR (for some algorithms no content may be written). The default
is \code{FALSE}, which is appropriate for almost every use case. To save the
temporary files created when \code{save_latent_dynamics=TRUE} see the
\code{\link[=fit-method-save_latent_dynamics_files]{$save_latent_dynamics_files()}}
method.}
\item{output_dir}{(string) A path to a directory where CmdStan should write
its output CSV files. For interactive use this can typically be left at
\code{NULL} (temporary directory) since CmdStanR makes the CmdStan output
(posterior draws and diagnostics) available in \R via methods of the fitted
model objects. The behavior of \code{output_dir} is as follows:
\itemize{
\item If \code{NULL} (the default), then the CSV files are written to a temporary
directory and only saved permanently if the user calls one of the \verb{$save_*}
methods of the fitted model object (e.g.,
\code{\link[=fit-method-save_output_files]{$save_output_files()}}). These temporary
files are removed when the fitted model object is
\link[base:gc]{garbage collected} (manually or automatically).
\item If a path, then the files are created in \code{output_dir} with names
corresponding to the defaults used by \verb{$save_output_files()}.
}}
\item{output_basename}{(string) A string to use as a prefix for the names of
the output CSV files of CmdStan. If \code{NULL} (the default), the basename of
the output CSV files will be comprised from the model name, timestamp, and
5 random characters.}
\item{sig_figs}{(positive integer) The number of significant figures used
when storing the output values. By default, CmdStan represent the output
values with 6 significant figures. The upper limit for \code{sig_figs} is 18.
Increasing this value will result in larger output CSV files and thus an
increased usage of disk space.}
\item{threads}{(positive integer) If the model was
\link[=model-method-compile]{compiled} with threading support, the number of
threads to use in parallelized sections (e.g., when using the Stan
functions \code{reduce_sum()} or \code{map_rect()}).}
\item{opencl_ids}{(integer vector of length 2) The platform and
device IDs of the OpenCL device to use for fitting. The model must
be compiled with \code{cpp_options = list(stan_opencl = TRUE)} for this
argument to have an effect.}
\item{algorithm}{(string) The algorithm. Either \code{"meanfield"} or
\code{"fullrank"}.}
\item{iter}{(positive integer) The \emph{maximum} number of iterations.}
\item{grad_samples}{(positive integer) The number of samples for Monte Carlo
estimate of gradients.}
\item{elbo_samples}{(positive integer) The number of samples for Monte Carlo
estimate of ELBO (objective function).}
\item{eta}{(positive real) The step size weighting parameter for adaptive
step size sequence.}
\item{adapt_engaged}{(logical) Do warmup adaptation?}
\item{adapt_iter}{(positive integer) The \emph{maximum} number of adaptation
iterations.}
\item{tol_rel_obj}{(positive real) Convergence tolerance on the relative norm
of the objective.}
\item{eval_elbo}{(positive integer) Evaluate ELBO every Nth iteration.}
\item{output_samples}{(positive integer) Number of approximate posterior
samples to draw and save.}
}
\value{
A \code{\link{CmdStanVB}} object.
}
\description{
The \verb{$variational()} method of a \code{\link{CmdStanModel}} object runs
Stan's variational Bayes (ADVI) algorithms.
Any argument left as \code{NULL} will default to the default value used by the
installed version of CmdStan. See the
\href{https://mc-stan.org/docs/cmdstan-guide/}{CmdStan User’s Guide}
for more details.
}
\details{
CmdStan can fit a variational approximation to the posterior. The
approximation is a Gaussian in the unconstrained variable space. Stan
implements two variational algorithms. The \code{algorithm="meanfield"} option
uses a fully factorized Gaussian for the approximation. The
\code{algorithm="fullrank"} option uses a Gaussian with a full-rank covariance
matrix for the approximation.
-- \href{https://github.com/stan-dev/cmdstan/releases/latest}{\emph{CmdStan Interface User's Guide}}
}
\examples{
\dontrun{
library(cmdstanr)
library(posterior)
library(bayesplot)
color_scheme_set("brightblue")
# Set path to CmdStan
# (Note: if you installed CmdStan via install_cmdstan() with default settings
# then setting the path is unnecessary but the default below should still work.
# Otherwise use the `path` argument to specify the location of your
# CmdStan installation.)
set_cmdstan_path(path = NULL)
# Create a CmdStanModel object from a Stan program,
# here using the example model that comes with CmdStan
file <- file.path(cmdstan_path(), "examples/bernoulli/bernoulli.stan")
mod <- cmdstan_model(file)
mod$print()
# Data as a named list (like RStan)
stan_data <- list(N = 10, y = c(0,1,0,0,0,0,0,0,0,1))
# Run MCMC using the 'sample' method
fit_mcmc <- mod$sample(
data = stan_data,
seed = 123,
chains = 2,
parallel_chains = 2
)
# Use 'posterior' package for summaries
fit_mcmc$summary()
# Get posterior draws
draws <- fit_mcmc$draws()
print(draws)
# Convert to data frame using posterior::as_draws_df
as_draws_df(draws)
# Plot posterior using bayesplot (ggplot2)
mcmc_hist(fit_mcmc$draws("theta"))
# Call CmdStan's diagnose and stansummary utilities
fit_mcmc$cmdstan_diagnose()
fit_mcmc$cmdstan_summary()
# For models fit using MCMC, if you like working with RStan's stanfit objects
# then you can create one with rstan::read_stan_csv()
# stanfit <- rstan::read_stan_csv(fit_mcmc$output_files())
# Run 'optimize' method to get a point estimate (default is Stan's LBFGS algorithm)
# and also demonstrate specifying data as a path to a file instead of a list
my_data_file <- file.path(cmdstan_path(), "examples/bernoulli/bernoulli.data.json")
fit_optim <- mod$optimize(data = my_data_file, seed = 123)
fit_optim$summary()
# Run 'variational' method to approximate the posterior (default is meanfield ADVI)
fit_vb <- mod$variational(data = stan_data, seed = 123)
fit_vb$summary()
# Plot approximate posterior using bayesplot
mcmc_hist(fit_vb$draws("theta"))
# Specifying initial values as a function
fit_mcmc_w_init_fun <- mod$sample(
data = stan_data,
seed = 123,
chains = 2,
refresh = 0,
init = function() list(theta = runif(1))
)
fit_mcmc_w_init_fun_2 <- mod$sample(
data = stan_data,
seed = 123,
chains = 2,
refresh = 0,
init = function(chain_id) {
# silly but demonstrates optional use of chain_id
list(theta = 1 / (chain_id + 1))
}
)
fit_mcmc_w_init_fun_2$init()
# Specifying initial values as a list of lists
fit_mcmc_w_init_list <- mod$sample(
data = stan_data,
seed = 123,
chains = 2,
refresh = 0,
init = list(
list(theta = 0.75), # chain 1
list(theta = 0.25) # chain 2
)
)
fit_optim_w_init_list <- mod$optimize(
data = stan_data,
seed = 123,
init = list(
list(theta = 0.75)
)
)
fit_optim_w_init_list$init()
}
}
\seealso{
The CmdStanR website
(\href{https://mc-stan.org/cmdstanr/}{mc-stan.org/cmdstanr}) for online
documentation and tutorials.
The Stan and CmdStan documentation:
\itemize{
\item Stan documentation: \href{https://mc-stan.org/users/documentation/}{mc-stan.org/users/documentation}
\item CmdStan User’s Guide: \href{https://mc-stan.org/docs/cmdstan-guide/}{mc-stan.org/docs/cmdstan-guide}
}
Other CmdStanModel methods:
\code{\link{model-method-check_syntax}},
\code{\link{model-method-compile}},
\code{\link{model-method-diagnose}},
\code{\link{model-method-format}},
\code{\link{model-method-generate-quantities}},
\code{\link{model-method-optimize}},
\code{\link{model-method-sample_mpi}},
\code{\link{model-method-sample}},
\code{\link{model-method-variables}}
}
\concept{CmdStanModel methods}
|
## --------------------------------
##
## Script name: makeCompleteFiles.R
##
## Purpose of script: Make stitched CSVs -- one for trackerholder (the real hand),
## and one for cursorObject (the object)
##
## Author: Shanaa Modchalingam
##
## Date created: 2019-12-16
##
## Email: s.modcha@gmail.com
##
## --------------------------------
##
## Notes: For each participant, make a "complete" file for hand, and one for object
##
## --------------------------------
## Load packages
library(data.table)
library(tidyverse)
## make into a function
# setup
path <- "data/raw"
expVersions <- c("reachToTarget_distance_20_EI", "reachToTarget_distance_20_IE")
dir.create(paste("data", "complete", sep = "/"))
for(expVersion in expVersions){
dir.create(paste("data", "complete", expVersion, sep = "/"))
for (ppt in list.files(path = paste(path, expVersion, sep = '/'))){
dir.create(paste("data", "complete", expVersion, ppt, sep = '/'))
for (session in list.files(path = paste(path, expVersion, ppt, sep = '/'))){
dir.create(paste("data", "complete", expVersion, ppt, session, sep = '/'))
for(trackerTag in c("cursorobjecttracker", "trackerholderobject")){
# make a vector of filenames to load (these are entire paths)
filesToLoad <- list.files(path = paste(path, expVersion, ppt, session, sep = '/'),
pattern = glob2rx(paste("*",trackerTag,"*", sep = "")),
full.names = TRUE)
datalist <- list()
i <- 1
# fill up datalist
for (eachFilePath in filesToLoad){
eachFile <- fread(eachFilePath, stringsAsFactors = FALSE)
# save this one df to datalist
datalist[[i]] <- eachFile
i <- i+1
}
# save this
complete_df <- do.call(rbind, datalist)
fileName <- paste(trackerTag, "_complete.csv", sep = "")
# print(head(complete_df))
fwrite(complete_df, file = paste("data", "complete", expVersion, ppt, session, fileName, sep = '/'))
}
#copy over the trial results file
trialResultPath <- list.files(path = paste(path, expVersion, ppt, session, sep = '/'),
pattern = glob2rx("*_results*"),
full.names = TRUE)
trialResult <- fread(trialResultPath, stringsAsFactors = FALSE)
fwrite(trialResult, file = paste("data", "complete", expVersion, ppt, session, "trial_results.csv", sep = '/'))
}
}
}
|
/preprocessing/makeCompleteFiles.R
|
no_license
|
shanaam/reachToTarget_distances
|
R
| false
| false
| 2,637
|
r
|
## --------------------------------
##
## Script name: makeCompleteFiles.R
##
## Purpose of script: Make stitched CSVs -- one for trackerholder (the real hand),
## and one for cursorObject (the object)
##
## Author: Shanaa Modchalingam
##
## Date created: 2019-12-16
##
## Email: s.modcha@gmail.com
##
## --------------------------------
##
## Notes: For each participant, make a "complete" file for hand, and one for object
##
## --------------------------------
## Load packages
library(data.table)
library(tidyverse)
## make into a function
# setup
path <- "data/raw"
expVersions <- c("reachToTarget_distance_20_EI", "reachToTarget_distance_20_IE")
dir.create(paste("data", "complete", sep = "/"))
for(expVersion in expVersions){
dir.create(paste("data", "complete", expVersion, sep = "/"))
for (ppt in list.files(path = paste(path, expVersion, sep = '/'))){
dir.create(paste("data", "complete", expVersion, ppt, sep = '/'))
for (session in list.files(path = paste(path, expVersion, ppt, sep = '/'))){
dir.create(paste("data", "complete", expVersion, ppt, session, sep = '/'))
for(trackerTag in c("cursorobjecttracker", "trackerholderobject")){
# make a vector of filenames to load (these are entire paths)
filesToLoad <- list.files(path = paste(path, expVersion, ppt, session, sep = '/'),
pattern = glob2rx(paste("*",trackerTag,"*", sep = "")),
full.names = TRUE)
datalist <- list()
i <- 1
# fill up datalist
for (eachFilePath in filesToLoad){
eachFile <- fread(eachFilePath, stringsAsFactors = FALSE)
# save this one df to datalist
datalist[[i]] <- eachFile
i <- i+1
}
# save this
complete_df <- do.call(rbind, datalist)
fileName <- paste(trackerTag, "_complete.csv", sep = "")
# print(head(complete_df))
fwrite(complete_df, file = paste("data", "complete", expVersion, ppt, session, fileName, sep = '/'))
}
#copy over the trial results file
trialResultPath <- list.files(path = paste(path, expVersion, ppt, session, sep = '/'),
pattern = glob2rx("*_results*"),
full.names = TRUE)
trialResult <- fread(trialResultPath, stringsAsFactors = FALSE)
fwrite(trialResult, file = paste("data", "complete", expVersion, ppt, session, "trial_results.csv", sep = '/'))
}
}
}
|
library(shiny)
install.packages("praise",repos = "https://cran.r-project.org")
#this doesn't appear to be working yet...
#install.packages("rgdal", type = "source",
# repos = "https://cran.r-project.org",
# configure.args=c('--with-proj-include=/home/vcap/deps/0/apt/usr/include',
# '--with-proj-lib=/home/vcap/deps/0/apt/usr/lib',
# '--with-gdal-config=/home/vcap/deps/0/apt/usr/bin'))
# maybe some info here: https://github.com/cloudfoundry/python-buildpack/issues/25
runApp(host="0.0.0.0", port=strtoi(Sys.getenv("PORT")))
|
/contributedCode/cloud_gov_shiny/rgdal/shiny.R
|
permissive
|
USEPA/R-User-Group
|
R
| false
| false
| 626
|
r
|
library(shiny)
install.packages("praise",repos = "https://cran.r-project.org")
#this doesn't appear to be working yet...
#install.packages("rgdal", type = "source",
# repos = "https://cran.r-project.org",
# configure.args=c('--with-proj-include=/home/vcap/deps/0/apt/usr/include',
# '--with-proj-lib=/home/vcap/deps/0/apt/usr/lib',
# '--with-gdal-config=/home/vcap/deps/0/apt/usr/bin'))
# maybe some info here: https://github.com/cloudfoundry/python-buildpack/issues/25
runApp(host="0.0.0.0", port=strtoi(Sys.getenv("PORT")))
|
\name{BayesPen}
\alias{BayesPen}
\title{
Bayesian Penalized Credible Regions
}
\description{
Fits the Bayesian penalized credible regions method from the posterior mean and covariance of a Bayesian model.
The function performs variable selection (Bondell and Reich 2012) and confounder selection (Wilson and Reich 2014+).
The default is variable selection and confounder selection is only performed if confounder weights are provided.
}
\usage{
BayesPen(beta, beta_cov, joint, force = NULL, confounder.weights, max.steps = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{beta}{
p-vector of posterior means from the fitting of the full regression model. For confounder selection this is the outcome model.
}
\item{beta_cov}{
Posterior covariance matrix corresponding to beta.
}
\item{joint}{
Indicator if joint credible regions approach should be used.
If joint=FALSE the marginal approach of Bondell and Reich (2012) will be used.
The marginal approach is only available for variable selection, not confounder selection.
}
\item{force}{
A vector of columns corresponding to variables that are forced to be included in the model. For example, this may include an intercept.
For confounder selection the exposure(s) of interest should be forced into the model.}
\item{confounder.weights}{
The posterior mean from the exposure model for confounder selection.
For a single exposure this is a p-vector with exposure model regression coefficients in the same order as in beta.
For multiple exposures this is a matrix with p rows and a column for each exposure.
The locations corresponding to exposure(s) in beta can be set to any numeric value; they are not used.
For variable selection this is omitted.
}
\item{max.steps}{
Maximum number of steps to be performed in the LARS algorithm (Hastie and Efron 2013).
}
}
\value{
\item{joint.path}{A complete solution path for the joint credible regions approach. Each row is a model in the solution path with a 1 indicating a variable is included and a 0 indicating it is not included.}
\item{marginal.path}{A complete solution path for the marginal credible regions approach. The p-vector denotes the step at which each covariate is included in the model.}
\item{order.path}{The action returned from lars that shows when each covariate is added to the model.}
\item{order.marg}{The the covariate added at each step.}
\item{joint}{Returns a vector indicating which variables are forced into the model. }
\item{force}{Returns the logical joint.}
}
\references{
Bondell, H. D. and Reich, B. J. (2012). Consistent high-dimensional Bayesian variable selection via penalized credible regions. \emph{J. Am. Statist. Assoc}. 107, 1610-1624.
Trevor Hastie and Brad Efron (2013). lars: Least Angle Regression, Lasso and Forward Stagewise. R package version 1.2. http://CRAN.R-project.org/package=lars
Wilson, A. and Reich, B.J. (2014+). Confounder selection via penalized credible regions.
}
\author{
Ander Wilson, Howard D. Bondell, and Brian J. Reich
}
\seealso{
\code{\link{BayesPen.lm}}, \code{\link{BayesPen.lm.confounders}}
}
\examples{
######################
#Variable Selection
set.seed(1234)
dat <- SimExample(500,model="BR1")
X <- dat$X
y <- dat$y
#fit the full model assuming flat priors on beta
fit1 <- lm(y~X-1)
betahat <- coef(fit1)
cov <- vcov(fit1)
#find solution path
fit.BayesPen <- BayesPen(beta=betahat, beta_cov=cov)
#refit the model
refit <- BayesPen.refit(y,X,fit.BayesPen)
#plot it
BayesPen.plot(refit)
######################
#Confounder Selection
set.seed(1234)
dat <- SimExample(500,model="WPD2")
X <- dat$X
U <- dat$U
W <- cbind(X,U)
y <- dat$y
#fit the full outcome model assuming flat priors on beta
fit1 <- lm(y~W-1)
betahat <- coef(fit1)
cov <- vcov(fit1)
#fit the full exposure model assuming flat priors on beta
fit2 <- lm(X~U-1)
gammahat <- coef(fit2)
#find solution path
fit.BayesPen <- BayesPen(beta=betahat, beta_cov=cov, confounder.weights=c(0,gammahat), force=1)
#refit the model
refit <- BayesPen.refit(y,W,fit.BayesPen)
#plot it
BayesPen.plot(refit)
}
\keyword{ regression }
|
/man/BayesPen.Rd
|
no_license
|
cran/BayesPen
|
R
| false
| false
| 4,119
|
rd
|
\name{BayesPen}
\alias{BayesPen}
\title{
Bayesian Penalized Credible Regions
}
\description{
Fits the Bayesian penalized credible regions method from the posterior mean and covariance of a Bayesian model.
The function performs variable selection (Bondell and Reich 2012) and confounder selection (Wilson and Reich 2014+).
The default is variable selection and confounder selection is only performed if confounder weights are provided.
}
\usage{
BayesPen(beta, beta_cov, joint, force = NULL, confounder.weights, max.steps = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{beta}{
p-vector of posterior means from the fitting of the full regression model. For confounder selection this is the outcome model.
}
\item{beta_cov}{
Posterior covariance matrix corresponding to beta.
}
\item{joint}{
Indicator if joint credible regions approach should be used.
If joint=FALSE the marginal approach of Bondell and Reich (2012) will be used.
The marginal approach is only available for variable selection, not confounder selection.
}
\item{force}{
A vector of columns corresponding to variables that are forced to be included in the model. For example, this may include an intercept.
For confounder selection the exposure(s) of interest should be forced into the model.}
\item{confounder.weights}{
The posterior mean from the exposure model for confounder selection.
For a single exposure this is a p-vector with exposure model regression coefficients in the same order as in beta.
For multiple exposures this is a matrix with p rows and a column for each exposure.
The locations corresponding to exposure(s) in beta can be set to any numeric value; they are not used.
For variable selection this is omitted.
}
\item{max.steps}{
Maximum number of steps to be performed in the LARS algorithm (Hastie and Efron 2013).
}
}
\value{
\item{joint.path}{A complete solution path for the joint credible regions approach. Each row is a model in the solution path with a 1 indicating a variable is included and a 0 indicating it is not included.}
\item{marginal.path}{A complete solution path for the marginal credible regions approach. The p-vector denotes the step at which each covariate is included in the model.}
\item{order.path}{The action returned from lars that shows when each covariate is added to the model.}
\item{order.marg}{The the covariate added at each step.}
\item{joint}{Returns a vector indicating which variables are forced into the model. }
\item{force}{Returns the logical joint.}
}
\references{
Bondell, H. D. and Reich, B. J. (2012). Consistent high-dimensional Bayesian variable selection via penalized credible regions. \emph{J. Am. Statist. Assoc}. 107, 1610-1624.
Trevor Hastie and Brad Efron (2013). lars: Least Angle Regression, Lasso and Forward Stagewise. R package version 1.2. http://CRAN.R-project.org/package=lars
Wilson, A. and Reich, B.J. (2014+). Confounder selection via penalized credible regions.
}
\author{
Ander Wilson, Howard D. Bondell, and Brian J. Reich
}
\seealso{
\code{\link{BayesPen.lm}}, \code{\link{BayesPen.lm.confounders}}
}
\examples{
######################
#Variable Selection
set.seed(1234)
dat <- SimExample(500,model="BR1")
X <- dat$X
y <- dat$y
#fit the full model assuming flat priors on beta
fit1 <- lm(y~X-1)
betahat <- coef(fit1)
cov <- vcov(fit1)
#find solution path
fit.BayesPen <- BayesPen(beta=betahat, beta_cov=cov)
#refit the model
refit <- BayesPen.refit(y,X,fit.BayesPen)
#plot it
BayesPen.plot(refit)
######################
#Confounder Selection
set.seed(1234)
dat <- SimExample(500,model="WPD2")
X <- dat$X
U <- dat$U
W <- cbind(X,U)
y <- dat$y
#fit the full outcome model assuming flat priors on beta
fit1 <- lm(y~W-1)
betahat <- coef(fit1)
cov <- vcov(fit1)
#fit the full exposure model assuming flat priors on beta
fit2 <- lm(X~U-1)
gammahat <- coef(fit2)
#find solution path
fit.BayesPen <- BayesPen(beta=betahat, beta_cov=cov, confounder.weights=c(0,gammahat), force=1)
#refit the model
refit <- BayesPen.refit(y,W,fit.BayesPen)
#plot it
BayesPen.plot(refit)
}
\keyword{ regression }
|
#' Title salamu
#'
#' Description Generates a message given someone's name
#'
#' @param name Name
#'
#' @return A message
#'
#' @export
#'
#' @examples
#' salamu("Shel")
#'
salamu <- function(name){
paste0("Hi ", name, ", I hope you are well.")
}
|
/R/salamu.R
|
permissive
|
Shelmith-Kariuki/rdarasa
|
R
| false
| false
| 250
|
r
|
#' Title salamu
#'
#' Description Generates a message given someone's name
#'
#' @param name Name
#'
#' @return A message
#'
#' @export
#'
#' @examples
#' salamu("Shel")
#'
salamu <- function(name){
paste0("Hi ", name, ", I hope you are well.")
}
|
#' Find Trading Frequency
#'
#' This helper function looks for the trading frequency of the input data set.
#' It can identify daily, weekly, monthly or yearly trading frequency in the
#' data set.
#'
#' @param x The input data set whose trading period we are interested in
#'
#' @return trade.freq The number that indicates the trading frequency.
trade_freq <- function(x){
message(paste0("trade_freq with parameter: \n", head(x), "\n\n"))
date.unique <- sort(unique(x$date))
## Create two lists of dates by cuting the first and the last trading dates.
## Then subtract one list from the other to calculate the difference between
## any two neighboring trading dates.
date1 <- date.unique[-length(date.unique)]
date2 <- date.unique[-1]
date.diff <- date2 - date1
## Look at the differences between neighboring trading dates and decide
## trading periods. If trading period does not look like daily, weekly,
## monthly nor yearly, the function returns an error
if(length(which(date.diff <= 3)) > 0.3 * length(date.diff)){
trade.freq <- 1
}
else if(length(which(date.diff >= 5 & date.diff <= 10)) > 0.3 * length(date.diff)){
trade.freq <- 7
}
else if(length(which(date.diff >= 28 & date.diff <= 31)) > 0.3 * length(date.diff)){
trade.freq <- 30
}
else if(length(which(date.diff >= 80 & date.diff <= 100)) > 0.3 * length(date.diff)){
trade.freq <- 90
}
else{
stop("It seems that the trading frequency of the input data set is not daily
or weekly or monthly or yearly. Please specify that manually.")
}
return(trade.freq)
}
|
/R/trade_freq.R
|
no_license
|
rynkwn/backtestGraphics
|
R
| false
| false
| 1,637
|
r
|
#' Find Trading Frequency
#'
#' This helper function looks for the trading frequency of the input data set.
#' It can identify daily, weekly, monthly or yearly trading frequency in the
#' data set.
#'
#' @param x The input data set whose trading period we are interested in
#'
#' @return trade.freq The number that indicates the trading frequency.
trade_freq <- function(x){
message(paste0("trade_freq with parameter: \n", head(x), "\n\n"))
date.unique <- sort(unique(x$date))
## Create two lists of dates by cuting the first and the last trading dates.
## Then subtract one list from the other to calculate the difference between
## any two neighboring trading dates.
date1 <- date.unique[-length(date.unique)]
date2 <- date.unique[-1]
date.diff <- date2 - date1
## Look at the differences between neighboring trading dates and decide
## trading periods. If trading period does not look like daily, weekly,
## monthly nor yearly, the function returns an error
if(length(which(date.diff <= 3)) > 0.3 * length(date.diff)){
trade.freq <- 1
}
else if(length(which(date.diff >= 5 & date.diff <= 10)) > 0.3 * length(date.diff)){
trade.freq <- 7
}
else if(length(which(date.diff >= 28 & date.diff <= 31)) > 0.3 * length(date.diff)){
trade.freq <- 30
}
else if(length(which(date.diff >= 80 & date.diff <= 100)) > 0.3 * length(date.diff)){
trade.freq <- 90
}
else{
stop("It seems that the trading frequency of the input data set is not daily
or weekly or monthly or yearly. Please specify that manually.")
}
return(trade.freq)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/single_marker_test.R
\name{glma}
\alias{glma}
\title{Single marker association analysis using linear models or linear mixed models}
\usage{
glma(
y = NULL,
X = NULL,
W = NULL,
Glist = NULL,
chr = NULL,
fit = NULL,
verbose = FALSE,
statistic = "mastor",
ids = NULL,
rsids = NULL,
msize = 100,
scale = TRUE
)
}
\arguments{
\item{y}{vector or matrix of phenotypes}
\item{X}{design matrix for factors modeled as fixed effects}
\item{W}{matrix of centered and scaled genotypes}
\item{Glist}{list of information about genotype matrix stored on disk}
\item{chr}{chromosome for which summary statistics are computed}
\item{fit}{list of information about linear mixed model fit (output from greml)}
\item{verbose}{is a logical; if TRUE it prints more details during optimization}
\item{statistic}{single marker test statistic used (currently based on the "mastor" statistics).}
\item{ids}{vector of individuals used in the analysis}
\item{rsids}{vector of marker rsids used in the analysis}
\item{msize}{number of genotype markers used for batch processing}
\item{scale}{logical if TRUE the genotypes have been scaled to mean zero and variance one}
}
\value{
Returns a dataframe (if number of traits = 1) else a list including
\item{coef}{single marker coefficients}
\item{se}{standard error of coefficients}
\item{stat}{single marker test statistic}
\item{p}{p-value}
}
\description{
The function glma performs single marker association analysis between genotype markers and the phenotype
either based on linear model analysis (LMA) or mixed linear model analysis (MLMA).
The basic MLMA approach involves 1) building a genetic relationship matrix (GRM) that models genome-wide
sample structure, 2) estimating the contribution of the GRM to phenotypic variance using a random effects model
(with or without additional fixed effects) and 3) computing association statistics that account for this component
on phenotypic variance.
MLMA methods are the method of choice when conducting association mapping in the presence of sample structure,
including geographic population structure, family relatedness and/or cryptic relatedness. MLMA methods prevent
false positive associations and increase power. The general recommendation when using MLMA is to exclude candidate
markers from the GRM. This can be efficiently implemented via a leave-one-chromosome-out analysis.
Further, it is recommend that analyses of randomly ascertained quantitative traits should include all markers
(except for the candidate marker and markers in LD with the candidate marker) in the GRM, except as follows.
First, the set of markers included in the GRM can be pruned by LD to reduce running time (with association
statistics still computed for all markers). Second, genome-wide significant markers of large effect should be
conditioned out as fixed effects or as an additional random effect (if a large number of associated markers).
Third, when population stratification is less of a concern, it may be useful using the top associated markers
selected based on the global maximum from out-of sample predictive accuracy.
}
\examples{
# Simulate data
W <- matrix(rnorm(1000000), ncol = 1000)
colnames(W) <- as.character(1:ncol(W))
rownames(W) <- as.character(1:nrow(W))
y <- rowSums(W[, 1:10]) + rowSums(W[, 501:510]) + rnorm(nrow(W))
# Create model
data <- data.frame(y = y, mu = 1)
fm <- y ~ 0 + mu
X <- model.matrix(fm, data = data)
# Linear model analyses and single marker association test
stat <- glma(y=y,X=X,W = W)
head(stat)
\donttest{
# Compute GRM
GRM <- grm(W = W)
# Estimate variance components using REML analysis
fit <- greml(y = y, X = X, GRM = list(GRM), verbose = TRUE)
# Single marker association test
stat <- glma(fit = fit, W = W)
head(stat)
}
}
\references{
Chen, W. M., & Abecasis, G. R. (2007). Family-based association tests for genomewide association scans. The American Journal of Human Genetics, 81(5), 913-926.
Loh, P. R., Tucker, G., Bulik-Sullivan, B. K., Vilhjalmsson, B. J., Finucane, H. K., Salem, R. M., ... & Patterson, N. (2015). Efficient Bayesian mixed-model analysis increases association power in large cohorts. Nature genetics, 47(3), 284-290.
Kang, H. M., Sul, J. H., Zaitlen, N. A., Kong, S. Y., Freimer, N. B., Sabatti, C., & Eskin, E. (2010). Variance component model to account for sample structure in genome-wide association studies. Nature genetics, 42(4), 348-354.
Lippert, C., Listgarten, J., Liu, Y., Kadie, C. M., Davidson, R. I., & Heckerman, D. (2011). FaST linear mixed models for genome-wide association studies. Nature methods, 8(10), 833-835.
Listgarten, J., Lippert, C., Kadie, C. M., Davidson, R. I., Eskin, E., & Heckerman, D. (2012). Improved linear mixed models for genome-wide association studies. Nature methods, 9(6), 525-526.
Listgarten, J., Lippert, C., & Heckerman, D. (2013). FaST-LMM-Select for addressing confounding from spatial structure and rare variants. Nature Genetics, 45(5), 470-471.
Lippert, C., Quon, G., Kang, E. Y., Kadie, C. M., Listgarten, J., & Heckerman, D. (2013). The benefits of selecting phenotype-specific variants for applications of mixed models in genomics. Scientific reports, 3.
Zhou, X., & Stephens, M. (2012). Genome-wide efficient mixed-model analysis for association studies. Nature genetics, 44(7), 821-824.
Svishcheva, G. R., Axenovich, T. I., Belonogova, N. M., van Duijn, C. M., & Aulchenko, Y. S. (2012). Rapid variance components-based method for whole-genome association analysis. Nature genetics, 44(10), 1166-1170.
Yang, J., Zaitlen, N. A., Goddard, M. E., Visscher, P. M., & Price, A. L. (2014). Advantages and pitfalls in the application of mixed-model association methods. Nature genetics, 46(2), 100-106.
Bulik-Sullivan, B. K., Loh, P. R., Finucane, H. K., Ripke, S., Yang, J., Patterson, N., ... & Schizophrenia Working Group of the Psychiatric Genomics Consortium. (2015). LD Score regression distinguishes confounding from polygenicity in genome-wide association studies. Nature genetics, 47(3), 291-295.
}
\author{
Peter Soerensen
}
|
/man/glma.Rd
|
no_license
|
psoerensen/qgg
|
R
| false
| true
| 6,158
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/single_marker_test.R
\name{glma}
\alias{glma}
\title{Single marker association analysis using linear models or linear mixed models}
\usage{
glma(
y = NULL,
X = NULL,
W = NULL,
Glist = NULL,
chr = NULL,
fit = NULL,
verbose = FALSE,
statistic = "mastor",
ids = NULL,
rsids = NULL,
msize = 100,
scale = TRUE
)
}
\arguments{
\item{y}{vector or matrix of phenotypes}
\item{X}{design matrix for factors modeled as fixed effects}
\item{W}{matrix of centered and scaled genotypes}
\item{Glist}{list of information about genotype matrix stored on disk}
\item{chr}{chromosome for which summary statistics are computed}
\item{fit}{list of information about linear mixed model fit (output from greml)}
\item{verbose}{is a logical; if TRUE it prints more details during optimization}
\item{statistic}{single marker test statistic used (currently based on the "mastor" statistics).}
\item{ids}{vector of individuals used in the analysis}
\item{rsids}{vector of marker rsids used in the analysis}
\item{msize}{number of genotype markers used for batch processing}
\item{scale}{logical if TRUE the genotypes have been scaled to mean zero and variance one}
}
\value{
Returns a dataframe (if number of traits = 1) else a list including
\item{coef}{single marker coefficients}
\item{se}{standard error of coefficients}
\item{stat}{single marker test statistic}
\item{p}{p-value}
}
\description{
The function glma performs single marker association analysis between genotype markers and the phenotype
either based on linear model analysis (LMA) or mixed linear model analysis (MLMA).
The basic MLMA approach involves 1) building a genetic relationship matrix (GRM) that models genome-wide
sample structure, 2) estimating the contribution of the GRM to phenotypic variance using a random effects model
(with or without additional fixed effects) and 3) computing association statistics that account for this component
on phenotypic variance.
MLMA methods are the method of choice when conducting association mapping in the presence of sample structure,
including geographic population structure, family relatedness and/or cryptic relatedness. MLMA methods prevent
false positive associations and increase power. The general recommendation when using MLMA is to exclude candidate
markers from the GRM. This can be efficiently implemented via a leave-one-chromosome-out analysis.
Further, it is recommend that analyses of randomly ascertained quantitative traits should include all markers
(except for the candidate marker and markers in LD with the candidate marker) in the GRM, except as follows.
First, the set of markers included in the GRM can be pruned by LD to reduce running time (with association
statistics still computed for all markers). Second, genome-wide significant markers of large effect should be
conditioned out as fixed effects or as an additional random effect (if a large number of associated markers).
Third, when population stratification is less of a concern, it may be useful using the top associated markers
selected based on the global maximum from out-of sample predictive accuracy.
}
\examples{
# Simulate data
W <- matrix(rnorm(1000000), ncol = 1000)
colnames(W) <- as.character(1:ncol(W))
rownames(W) <- as.character(1:nrow(W))
y <- rowSums(W[, 1:10]) + rowSums(W[, 501:510]) + rnorm(nrow(W))
# Create model
data <- data.frame(y = y, mu = 1)
fm <- y ~ 0 + mu
X <- model.matrix(fm, data = data)
# Linear model analyses and single marker association test
stat <- glma(y=y,X=X,W = W)
head(stat)
\donttest{
# Compute GRM
GRM <- grm(W = W)
# Estimate variance components using REML analysis
fit <- greml(y = y, X = X, GRM = list(GRM), verbose = TRUE)
# Single marker association test
stat <- glma(fit = fit, W = W)
head(stat)
}
}
\references{
Chen, W. M., & Abecasis, G. R. (2007). Family-based association tests for genomewide association scans. The American Journal of Human Genetics, 81(5), 913-926.
Loh, P. R., Tucker, G., Bulik-Sullivan, B. K., Vilhjalmsson, B. J., Finucane, H. K., Salem, R. M., ... & Patterson, N. (2015). Efficient Bayesian mixed-model analysis increases association power in large cohorts. Nature genetics, 47(3), 284-290.
Kang, H. M., Sul, J. H., Zaitlen, N. A., Kong, S. Y., Freimer, N. B., Sabatti, C., & Eskin, E. (2010). Variance component model to account for sample structure in genome-wide association studies. Nature genetics, 42(4), 348-354.
Lippert, C., Listgarten, J., Liu, Y., Kadie, C. M., Davidson, R. I., & Heckerman, D. (2011). FaST linear mixed models for genome-wide association studies. Nature methods, 8(10), 833-835.
Listgarten, J., Lippert, C., Kadie, C. M., Davidson, R. I., Eskin, E., & Heckerman, D. (2012). Improved linear mixed models for genome-wide association studies. Nature methods, 9(6), 525-526.
Listgarten, J., Lippert, C., & Heckerman, D. (2013). FaST-LMM-Select for addressing confounding from spatial structure and rare variants. Nature Genetics, 45(5), 470-471.
Lippert, C., Quon, G., Kang, E. Y., Kadie, C. M., Listgarten, J., & Heckerman, D. (2013). The benefits of selecting phenotype-specific variants for applications of mixed models in genomics. Scientific reports, 3.
Zhou, X., & Stephens, M. (2012). Genome-wide efficient mixed-model analysis for association studies. Nature genetics, 44(7), 821-824.
Svishcheva, G. R., Axenovich, T. I., Belonogova, N. M., van Duijn, C. M., & Aulchenko, Y. S. (2012). Rapid variance components-based method for whole-genome association analysis. Nature genetics, 44(10), 1166-1170.
Yang, J., Zaitlen, N. A., Goddard, M. E., Visscher, P. M., & Price, A. L. (2014). Advantages and pitfalls in the application of mixed-model association methods. Nature genetics, 46(2), 100-106.
Bulik-Sullivan, B. K., Loh, P. R., Finucane, H. K., Ripke, S., Yang, J., Patterson, N., ... & Schizophrenia Working Group of the Psychiatric Genomics Consortium. (2015). LD Score regression distinguishes confounding from polygenicity in genome-wide association studies. Nature genetics, 47(3), 291-295.
}
\author{
Peter Soerensen
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/method-results.R
\docType{methods}
\name{results}
\alias{results}
\alias{results,}
\alias{results.OutriderDataSet}
\alias{results,OutriderDataSet-method}
\title{Accessor function for the 'results' object in an OutriderDataSet object.}
\usage{
results(object, ...)
\S4method{results}{OutriderDataSet}(
object,
padjCutoff = 0.05,
zScoreCutoff = 0,
round = 2,
all = FALSE,
...
)
}
\arguments{
\item{object}{An OutriderDataSet}
\item{...}{Additional arguments, currently not used}
\item{padjCutoff}{The significant theshold to be applied}
\item{zScoreCutoff}{If provided additionally a z score threashold is applied}
\item{round}{Can be TRUE, defaults to 2, or an integer used for rounding
with \code{\link[base]{round}} to make the output
more user friendly}
\item{all}{By default FALSE, only significant read counts are listed in the
results. If TRUE all results are assembled resulting in a
data.table of length samples x genes}
}
\value{
A data.table where each row is an outlier event and the columns
contain additional information about this event. Eg padj, l2fc
}
\description{
This function assembles a results table of significant outlier events based
on the given filter criteria. The table contains various information
accumulated over the analysis pipeline.
}
\examples{
ods <- makeExampleOutriderDataSet()
\dontshow{
ods <- ods[1:10,1:10]
}
ods <- OUTRIDER(ods)
res <- results(ods, all=TRUE)
res
}
|
/man/results.Rd
|
permissive
|
loipf/OUTRIDER
|
R
| false
| true
| 1,525
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/method-results.R
\docType{methods}
\name{results}
\alias{results}
\alias{results,}
\alias{results.OutriderDataSet}
\alias{results,OutriderDataSet-method}
\title{Accessor function for the 'results' object in an OutriderDataSet object.}
\usage{
results(object, ...)
\S4method{results}{OutriderDataSet}(
object,
padjCutoff = 0.05,
zScoreCutoff = 0,
round = 2,
all = FALSE,
...
)
}
\arguments{
\item{object}{An OutriderDataSet}
\item{...}{Additional arguments, currently not used}
\item{padjCutoff}{The significant theshold to be applied}
\item{zScoreCutoff}{If provided additionally a z score threashold is applied}
\item{round}{Can be TRUE, defaults to 2, or an integer used for rounding
with \code{\link[base]{round}} to make the output
more user friendly}
\item{all}{By default FALSE, only significant read counts are listed in the
results. If TRUE all results are assembled resulting in a
data.table of length samples x genes}
}
\value{
A data.table where each row is an outlier event and the columns
contain additional information about this event. Eg padj, l2fc
}
\description{
This function assembles a results table of significant outlier events based
on the given filter criteria. The table contains various information
accumulated over the analysis pipeline.
}
\examples{
ods <- makeExampleOutriderDataSet()
\dontshow{
ods <- ods[1:10,1:10]
}
ods <- OUTRIDER(ods)
res <- results(ods, all=TRUE)
res
}
|
# 5일차 # 엑셀파일 읽는 방법, 별도로 설치하는 환경 있어야.#텍스트, 엑셀파일 읽기함.
# 범주형, 연속형. 두가지의 유형 데이터. 단일 변수형태냐, 단일 변수형태냐. 0#다변 일변량이냐 단수량이냐? 문제정의,데이터수집, 데이터전처리, 탐색적, 데이터분석, 보고서, 발표 #이 절차는 순차적이긴 하지만 계속 피드백하는 과정을 거쳐.
#분석절차 1. 문제정의 2. 자료수집 3. 자료 전처리 4. 자료 탐색(EDA) "오늘 18,내일 19 자료 탐색에 대해서 배울 것. 단일 변수 범주형 자료, 단일 변수 범주형, 연속형. 전처리라는 것은 수집한 데이터가 완벽한 자료가 아니므로 완벽한 형태로 수정하는 작업을 대이터 전처리라고 한다. 데이터 전처리 하는 과정에 더 많은 시간 걸릴 수 있어. 3,4, 핵심. 우리의 실습도 이것이 될 것. 정해진 데이터를 주겠다. 직접 수집하는 것은 다음주에 하겠다.
#GIT을 사용하는 것에 대하여서. #관리자권한으로 실행하면 오류가 줄 것이다. #패스명령에 익숙해지도록 합시다. 안되면 단계단계 끊어서 할 수 있도록 합니다.
#명령하고 한칸 뛰고 쓰는 습관을 가지도록 합니다. []있으면 디렉토리, 없으면 파일.
#git status 추적하지 않은 목록의 파일이 나옵니다. #git hub desktop
# 명령을 내리고, 화면에 나타나는 메시지를 잘 읽어야. 결정을 해야.
#git으로 관리할 필요가 있는 파일들은 어떤 파일인가? git창엔 관리안하고 있는 것들이 주욱 나 와. 관리할 것만 선택하면 된다. git add, git commit 했던 동작들을 git hub desktop으로 간 편 히 할 수 있어. # history에 내용들. # file - new repository - " 이것이 깃 인 잇과 같은 것입니다.
#여기서 사용한 것들을 집에서 가져다 쓸 수 있는 방법인 것. git hub 파일 사이즈에 제한은 있다. 25mb넘으면 안됩니다. 그럴 땐 파일을 분할해서 올려야. # 집에서 깃허브, 피일- 클론 리파지토리, 클론이란 가지고 오는 것. 사이트에서. #clone 복사본 만들기 #클론 리파지토리 이용해서, push 다운로드, # 푸시에러 풀에라 날수, 이때는 업로드 파일. 동기화를 잘할 수 있어야.
#2.1 txt/excel 파일 읽기
#2.2 자료의 종류
#2.3 단일 변수 범주형 자료 탐색
#2.4 단일 변수 연속형 자료 탐색
setwd("C:\\tomwaitz\\WorkspaceR")
setwd("C:/tomwaitz/WorkspaceR")
# "directory 경로 : directory 변경 함수. 스크립트파일 위치 디렉토리 데이타위치를 분리해서 저장하는 경우가 왕왕있어. 데이터랑 스크립트가 다를 떄. 이 때 setwd로 변경해주는 것이다. 더블 "" 포테이션? 이 안에다가 넣어야. 그리고 '\' 두개씩 써, 그러니까 하나씩 추가.
#text 파일 읽기
df <- read.table(file = "airquality.txt", header = T)
d f
class(df)
#Excel 파일 읽기
#Excel 파일 읽기
install.packages("xlsx") # excel 파일 읽을 때 패키지
install.packages("rJava") # Java 실행 패키지
# 기본 패키지 외에 설치된 패키지 사용 - library load 작업.
library(rJava) # 라이브러리 순서 이거대로 지켜야한다.
library(xlsx) # 껐다 키면 로드작업만 다시
df.xlsx <- read.xlsx(file = "airquality.xlsx",
sheetIndex = 1,
encoding = "UTF-8")
df.xlsx # r의 장점 내가 기능이 필요하면 공개용으로 제공이 된다. # change
class(df.xlsx); str(df.xlsx);head(df.xlsx);tail(df.xlsx)
# 파일을 왜 못 읽을까 위치가 안 맞기 때문. # 스크립트에 남겨놓은 것을 중요하게 생각해야.
setwd("C:\\tomwaitz\\WorkspaceR")
score <- c(76, 84, 69, 5, 95, 6, 85, 71, 88, 84)
which(score ==69)
which(score>=85)
max(score)
which.max(score) #최고값의 인덱스
min(score) #최저값의 인덱스
which.min(score)
idx <- which(score>=60)
score[idx] <- 61
score
idx <- which(df.xlsx[,1:2]=="NA", arr.ind = T)
# arr.ind=T : 해당조건의 행/열값을 확인 할때
idx
#Text File 읽기
df.txt <- read.table(file="airquality.txt",
header=T, #첫번째 줄이 제목이다. 원본부터 확인
encoding = "UTP-8")
df.txt
class(df.txt)
str(df.txt)
head(df.txt)
tail(df.txt)
#엑셀, 탭으로 분류된 텍스트!? read.table
#csv, text, excel 파일 읽기. 전부다 read.csv, read.table read.xlsx
#XML(exfended Markup Language), JSON(Java Script Object Notation) : jason 많이 써
# 자바 스크립트, 라고 해서 웹 프로그래밍 만들 때 쓰는 언어. 파이썬아는 사람은 jason을 이해하기가 더 용이하다고 한다.
#자료의 종류, 자료의 종류에 따라 적용할 수 있는 분석 방법이 다르다. 분석을 해야할 자료를 가지고 있을 떄 1차적으로 해야 할 일은 해당 자료가 어떤 분류에 속하는지를 파악하는 일이다. -자료 특성에 따른 분류 분석 대상 자료의 특성에 따라 범주형 자료와 연속형 자료로 분류한다. 1. 범주형 자료 (categorical data) : -범주 또는 그룹으로 구분할 수 있는 값 - 범주형 자료의 값은 기본적으로 숫자로 표현할 수 없고, 대소비교나 산술연산이 적용되지 않는다. -범주형 자료는 대체로 문자형 값을 갖는데, 숫자로 표기할 수 있으나 계산이 가능한 연속형 자료가 되는 것은 아니다. 자료분류할 수 있어야.-------------팩터형 #2.연속형자료(이산형자료):크기가 있는 숫자들로 구성된 자료,연속형자료의 값들은 대소비교,산술연산이가능하기때문에 다양한 분석방법이 존재한다.
#자료(data) : 어떤 주제를 가지고 값을 모아 놓은 것 전체 관측값 (observation) : 자료에 포함된 값들 통계학에서의 변수는 "연구, 조사, 관찰하고 싶은 대상의 특성(feature)" 변수 개수에 따라 1. 단일변수 자료 ; 일변량 자료 하나의 변수로 구성된 자료, 2. 다중변수자료, 다변량 자료, 두개 이상의 변수로 구성된 자료 특별히 두개의 변수로 구성된 자료를 이변량 자료 단일 변수 자료는 vector에 저장하여 분석 다중변수자료는 matrix, dataframe에 저장하여 분석
#변수의 개수와 자료의 특성에 따른 분류 1. 단일변수 범주형자료 2. 단일변수 연속형 자료 3. 다중변수 범주형자료 4. 다중변수 연속형 자료
#2.3 단일 변수 범주형 자료 탐색 # 단일 변수 범주형 자료 : 특성이 하나이면서 자료의 특성이 범주형인 자료, 범주형 자료에 할 수 있는 기본 작업 : 자료에 포함된 관측값들의 종류별로 개수를 세는 것, 중류별로 비율을 구하는 것 시각화는 막대/원 그래프를 사용 테이블함수 통해서 몇개 정도인지나 그정도 알수 있다. 도수분표표 <- 종류별로 개수를 세는 것, 종류별로 비율을 구하는 것, 시각화는 막대/원 그래프를 사용. 시각화라고 하는 것은 GUI, 그림으로 보여주겠다는 것. 지도로 표현해주겠다고. 대부분은 그래프를 이용한다. #ML은 빅데이터. 머신 러닝, 딥 러닝, 데이터분석이 없으면 수행하기 어려워. 하나의 변수.#ML은 빅데이터. 머신 러닝 딥 러닝, 데이터분석이 없으면 수행하기 어려워.
favorite <- c('winter','summer','spring', 'summer', 'summer','fall',
'fall','summer', 'spring', 'spring'); favorite
class(favorite) ; str(favorite) ; dim(favorite)
# : 봄, 여름, 가을, 겨울로 그룹화가 가능하다. 이런 내용을 범주형 데이터라고 함. R이 팩터라고 이야기해줄 수 있지만. 우리서 여기는 백터 형식으로 문자만 넣었을 뿐이야.
as.factor(favorite)
favorite
#################### 단일변수의 범주형 <- 도수분포표
table(favorite)#도수분포표작성
table(favorite)/length(favorite)#도수분포비율
ds <- table(favorite);
ds
class(ds)
str(ds)
dim(ds)
barplot(ds, main = 'favorite season') #기본 막대 그래프
#ds 도수분포표, maind은 제목인 것이다
ds.new <- ds[c(2,3,1,4)] # 내가 원하는 순서로 만들 수 있어
ds.new
barplot(ds.new, main = 'favorite season')
pie(ds.new, main = 'favorite season')
favoirte.color <- c(2,3,2,1,1,2,2,1,3,2,1,3,2,1,2)
str(favoirte.color)
ds <- table(favoirte.color)
ds
barplot(ds, main = 'favorite season')
color <- c('green','red','blue')
names(ds) <- colors;
ds
barplot(ds, main = 'favorite season',
col=colors)
pie(ds, main = 'favorite season', col = colors)
#############################################################
favorite.color <- c(2, 3, 2, 1, 1, 2, 2,
1, 3, 2, 1, 3, 2, 1, 2)
ds <- table( favorite.color )
ds
barplot( ds, main = 'favorite season' )
colors <- c('black', 'red', 'blue') ############### 색깔 어디서부터 어디까지 있나.
names(ds) <- colors;
ds
barplot(ds, main = 'favorite season',
col = colors)
pie(ds, main = 'favorite season',
col = colors)
#
# 단일 변수 연속형 자료 탐색
# 단일변수연속형 자료 : 특성이 하나이면서 자료의 특성이 연속형인 자료 연속형 자료는 관측값들이 크기를 가지므로 다양한 분석방법 존재 #1. 평균 : 하나의 값으로 전체를 대표할 수 있는 값, 이상 값에 영향을 받는다. 2. 중앙값 / 자료의 값들을 크기순으로 정려하였을 떄 가장 중앙에 위한 값, 이상값에 영향을 받지 않는다. 3. 절사평균 자료의 관측 값들 중에서 작은 값들의 하위 n%와 큰 값의 상위 n%를 제외하고 중간에 있는 나머지값들만 가지고 평균을 계산하는 방식 4. 사분위수/ 주어진 자료에 있는 값들을 크기순으로 나열했을 때 4등분하는 지점에 있는 값 1사분위수(Q1), 2사분위수(Q2, 중앙값과 동일), 3사분위수(Q3), 전체 자료를 4개로 나누므로 4개 구간은 25%의 자료 가 존재 5. 산포(distribution) : 주어진 자료에 있는 값들이 퍼져 있는정도분산(variance) : 주어진 자료의 각각의 값이 평균으로부터 떨어져있는 정도를 게산하여 합산한 후 값들의 개수로 나누어 계산 #표준편차(standard deviation) : 분산의 제곱근으로 계산####################3 계산이 가능한 것은 연속형이라고 볼 수.#### 어떤 범위를 벗어나면은 '이상치'라고 한다. 이상치 때문에 분석에 영향을 미칠 수 있어. 전처리 때에 이상치를 제거해준다. 탐색적데이터 분석에서 우리가 하는 일.
###############R은 도구일 뿐이다. - 연속형 자료의 시각화 따라빠빠빠
# 시각화는 히스토그램(Histogram)과 상자 그래프(box plot)를 사용
weight <- c(60, 62,64,65,68,69)
weight
weight.heavy <- c(weight, 120)
weight.heavy
# "120"은 이상치라고 할 수. #이상치는 어떤 기준으로 세워야할까.
mean(weight)
mean(weight.heavy)
median(weight)
median(weight.heavy)
mean(weight, trim=0.2) #상하위 20% 떼어 내겠다.
mean(weight.heavy, trim = 0.2) #
####################기계적으로 보면 안된다는 말.
#사분위수
quantile(weight.heavy)
quantile(weight.heavy, (0:10)/10)
#(0:10)구간을 몇개로 나눌것인지를 지정
#0.1~1.0 사이 백분율로 10%~100%
summary(weight.heavy)#summary를 많이 사용할 듯
# 산포
var(weight) # 분산
sd(weight)#표준편차
range(weight)#값의 범위(최소값과 최대값)
diff(range(weight))#최대값과 최소값의 차이
#연속형 자료 시각화
#histogram : 연속형 자료의 분포를 시각화하는 도구 연속형 자료에서 구간을 나누고 구간에 속한 값들의 개수를 세는 방법으로 사용
class(cars)
str(cars)
dist <- cars[,2]
dist <- cars$dist
dist
boxplot.st(ats(dist) # 구체적인 값을 파악할 떄 사용 # $stats : 사분위수 # %n 관측값의 수 # %conf : 중앙값에 대한 신뢰구간 $out : 특이값(이상치) 목록
cars$dist
a <- cars$dist
a
sort(a, decreasing = T)
############################### 히스토그램 그리기
hist(dist, main = "Histogram for 제동거리",
xlab = "제동거리", ylab = "빈도수",
border="blue", col = 'green',
las=1, breaks =5 ) # las x축 바로할건지 눕힐건지
hist
rm(list=ls())
cars
#상자 그래프(boxplot, 상자 수염 그래프) # 사분위수를 그래프형태로 시각화하는 도구#상자그래프는 하나의 그래프로 데이터의 분포형태를 포함한 다양한 정보를 전달 - 자료의 전반적인 분포를 이해하는데 도움-구체적인 최소/최대/중앙값을 알기는 어렵다.
boxplot (dist, main = '자동차 제동거리')
boxplot.stats(dist)
########################## 우리가 하는 모든 작업은 데이터를 이해하는 작업을 하고 있는 것, 이미 발생된 데이터를 가지고 한다. 그리고 이면의 데이터를 파악, 유추하는 것이다. 이전에는 분석할 수 있을만한 자원들이 없었던 것이다. 이제는 모든 것을 할 수. 모든 것을 파악할 수 있다는 말인 듯. ^^ <- 데이터의 이면을 찾기 위해서.
|
/M20200518.R
|
no_license
|
peolgok/WorkspaceR
|
R
| false
| false
| 13,489
|
r
|
# 5일차 # 엑셀파일 읽는 방법, 별도로 설치하는 환경 있어야.#텍스트, 엑셀파일 읽기함.
# 범주형, 연속형. 두가지의 유형 데이터. 단일 변수형태냐, 단일 변수형태냐. 0#다변 일변량이냐 단수량이냐? 문제정의,데이터수집, 데이터전처리, 탐색적, 데이터분석, 보고서, 발표 #이 절차는 순차적이긴 하지만 계속 피드백하는 과정을 거쳐.
#분석절차 1. 문제정의 2. 자료수집 3. 자료 전처리 4. 자료 탐색(EDA) "오늘 18,내일 19 자료 탐색에 대해서 배울 것. 단일 변수 범주형 자료, 단일 변수 범주형, 연속형. 전처리라는 것은 수집한 데이터가 완벽한 자료가 아니므로 완벽한 형태로 수정하는 작업을 대이터 전처리라고 한다. 데이터 전처리 하는 과정에 더 많은 시간 걸릴 수 있어. 3,4, 핵심. 우리의 실습도 이것이 될 것. 정해진 데이터를 주겠다. 직접 수집하는 것은 다음주에 하겠다.
#GIT을 사용하는 것에 대하여서. #관리자권한으로 실행하면 오류가 줄 것이다. #패스명령에 익숙해지도록 합시다. 안되면 단계단계 끊어서 할 수 있도록 합니다.
#명령하고 한칸 뛰고 쓰는 습관을 가지도록 합니다. []있으면 디렉토리, 없으면 파일.
#git status 추적하지 않은 목록의 파일이 나옵니다. #git hub desktop
# 명령을 내리고, 화면에 나타나는 메시지를 잘 읽어야. 결정을 해야.
#git으로 관리할 필요가 있는 파일들은 어떤 파일인가? git창엔 관리안하고 있는 것들이 주욱 나 와. 관리할 것만 선택하면 된다. git add, git commit 했던 동작들을 git hub desktop으로 간 편 히 할 수 있어. # history에 내용들. # file - new repository - " 이것이 깃 인 잇과 같은 것입니다.
#여기서 사용한 것들을 집에서 가져다 쓸 수 있는 방법인 것. git hub 파일 사이즈에 제한은 있다. 25mb넘으면 안됩니다. 그럴 땐 파일을 분할해서 올려야. # 집에서 깃허브, 피일- 클론 리파지토리, 클론이란 가지고 오는 것. 사이트에서. #clone 복사본 만들기 #클론 리파지토리 이용해서, push 다운로드, # 푸시에러 풀에라 날수, 이때는 업로드 파일. 동기화를 잘할 수 있어야.
#2.1 txt/excel 파일 읽기
#2.2 자료의 종류
#2.3 단일 변수 범주형 자료 탐색
#2.4 단일 변수 연속형 자료 탐색
setwd("C:\\tomwaitz\\WorkspaceR")
setwd("C:/tomwaitz/WorkspaceR")
# "directory 경로 : directory 변경 함수. 스크립트파일 위치 디렉토리 데이타위치를 분리해서 저장하는 경우가 왕왕있어. 데이터랑 스크립트가 다를 떄. 이 때 setwd로 변경해주는 것이다. 더블 "" 포테이션? 이 안에다가 넣어야. 그리고 '\' 두개씩 써, 그러니까 하나씩 추가.
#text 파일 읽기
df <- read.table(file = "airquality.txt", header = T)
d f
class(df)
#Excel 파일 읽기
#Excel 파일 읽기
install.packages("xlsx") # excel 파일 읽을 때 패키지
install.packages("rJava") # Java 실행 패키지
# 기본 패키지 외에 설치된 패키지 사용 - library load 작업.
library(rJava) # 라이브러리 순서 이거대로 지켜야한다.
library(xlsx) # 껐다 키면 로드작업만 다시
df.xlsx <- read.xlsx(file = "airquality.xlsx",
sheetIndex = 1,
encoding = "UTF-8")
df.xlsx # r의 장점 내가 기능이 필요하면 공개용으로 제공이 된다. # change
class(df.xlsx); str(df.xlsx);head(df.xlsx);tail(df.xlsx)
# 파일을 왜 못 읽을까 위치가 안 맞기 때문. # 스크립트에 남겨놓은 것을 중요하게 생각해야.
setwd("C:\\tomwaitz\\WorkspaceR")
score <- c(76, 84, 69, 5, 95, 6, 85, 71, 88, 84)
which(score ==69)
which(score>=85)
max(score)
which.max(score) #최고값의 인덱스
min(score) #최저값의 인덱스
which.min(score)
idx <- which(score>=60)
score[idx] <- 61
score
idx <- which(df.xlsx[,1:2]=="NA", arr.ind = T)
# arr.ind=T : 해당조건의 행/열값을 확인 할때
idx
#Text File 읽기
df.txt <- read.table(file="airquality.txt",
header=T, #첫번째 줄이 제목이다. 원본부터 확인
encoding = "UTP-8")
df.txt
class(df.txt)
str(df.txt)
head(df.txt)
tail(df.txt)
#엑셀, 탭으로 분류된 텍스트!? read.table
#csv, text, excel 파일 읽기. 전부다 read.csv, read.table read.xlsx
#XML(exfended Markup Language), JSON(Java Script Object Notation) : jason 많이 써
# 자바 스크립트, 라고 해서 웹 프로그래밍 만들 때 쓰는 언어. 파이썬아는 사람은 jason을 이해하기가 더 용이하다고 한다.
#자료의 종류, 자료의 종류에 따라 적용할 수 있는 분석 방법이 다르다. 분석을 해야할 자료를 가지고 있을 떄 1차적으로 해야 할 일은 해당 자료가 어떤 분류에 속하는지를 파악하는 일이다. -자료 특성에 따른 분류 분석 대상 자료의 특성에 따라 범주형 자료와 연속형 자료로 분류한다. 1. 범주형 자료 (categorical data) : -범주 또는 그룹으로 구분할 수 있는 값 - 범주형 자료의 값은 기본적으로 숫자로 표현할 수 없고, 대소비교나 산술연산이 적용되지 않는다. -범주형 자료는 대체로 문자형 값을 갖는데, 숫자로 표기할 수 있으나 계산이 가능한 연속형 자료가 되는 것은 아니다. 자료분류할 수 있어야.-------------팩터형 #2.연속형자료(이산형자료):크기가 있는 숫자들로 구성된 자료,연속형자료의 값들은 대소비교,산술연산이가능하기때문에 다양한 분석방법이 존재한다.
#자료(data) : 어떤 주제를 가지고 값을 모아 놓은 것 전체 관측값 (observation) : 자료에 포함된 값들 통계학에서의 변수는 "연구, 조사, 관찰하고 싶은 대상의 특성(feature)" 변수 개수에 따라 1. 단일변수 자료 ; 일변량 자료 하나의 변수로 구성된 자료, 2. 다중변수자료, 다변량 자료, 두개 이상의 변수로 구성된 자료 특별히 두개의 변수로 구성된 자료를 이변량 자료 단일 변수 자료는 vector에 저장하여 분석 다중변수자료는 matrix, dataframe에 저장하여 분석
#변수의 개수와 자료의 특성에 따른 분류 1. 단일변수 범주형자료 2. 단일변수 연속형 자료 3. 다중변수 범주형자료 4. 다중변수 연속형 자료
#2.3 단일 변수 범주형 자료 탐색 # 단일 변수 범주형 자료 : 특성이 하나이면서 자료의 특성이 범주형인 자료, 범주형 자료에 할 수 있는 기본 작업 : 자료에 포함된 관측값들의 종류별로 개수를 세는 것, 중류별로 비율을 구하는 것 시각화는 막대/원 그래프를 사용 테이블함수 통해서 몇개 정도인지나 그정도 알수 있다. 도수분표표 <- 종류별로 개수를 세는 것, 종류별로 비율을 구하는 것, 시각화는 막대/원 그래프를 사용. 시각화라고 하는 것은 GUI, 그림으로 보여주겠다는 것. 지도로 표현해주겠다고. 대부분은 그래프를 이용한다. #ML은 빅데이터. 머신 러닝, 딥 러닝, 데이터분석이 없으면 수행하기 어려워. 하나의 변수.#ML은 빅데이터. 머신 러닝 딥 러닝, 데이터분석이 없으면 수행하기 어려워.
favorite <- c('winter','summer','spring', 'summer', 'summer','fall',
'fall','summer', 'spring', 'spring'); favorite
class(favorite) ; str(favorite) ; dim(favorite)
# : 봄, 여름, 가을, 겨울로 그룹화가 가능하다. 이런 내용을 범주형 데이터라고 함. R이 팩터라고 이야기해줄 수 있지만. 우리서 여기는 백터 형식으로 문자만 넣었을 뿐이야.
as.factor(favorite)
favorite
#################### 단일변수의 범주형 <- 도수분포표
table(favorite)#도수분포표작성
table(favorite)/length(favorite)#도수분포비율
ds <- table(favorite);
ds
class(ds)
str(ds)
dim(ds)
barplot(ds, main = 'favorite season') #기본 막대 그래프
#ds 도수분포표, maind은 제목인 것이다
ds.new <- ds[c(2,3,1,4)] # 내가 원하는 순서로 만들 수 있어
ds.new
barplot(ds.new, main = 'favorite season')
pie(ds.new, main = 'favorite season')
favoirte.color <- c(2,3,2,1,1,2,2,1,3,2,1,3,2,1,2)
str(favoirte.color)
ds <- table(favoirte.color)
ds
barplot(ds, main = 'favorite season')
color <- c('green','red','blue')
names(ds) <- colors;
ds
barplot(ds, main = 'favorite season',
col=colors)
pie(ds, main = 'favorite season', col = colors)
#############################################################
favorite.color <- c(2, 3, 2, 1, 1, 2, 2,
1, 3, 2, 1, 3, 2, 1, 2)
ds <- table( favorite.color )
ds
barplot( ds, main = 'favorite season' )
colors <- c('black', 'red', 'blue') ############### 색깔 어디서부터 어디까지 있나.
names(ds) <- colors;
ds
barplot(ds, main = 'favorite season',
col = colors)
pie(ds, main = 'favorite season',
col = colors)
#
# 단일 변수 연속형 자료 탐색
# 단일변수연속형 자료 : 특성이 하나이면서 자료의 특성이 연속형인 자료 연속형 자료는 관측값들이 크기를 가지므로 다양한 분석방법 존재 #1. 평균 : 하나의 값으로 전체를 대표할 수 있는 값, 이상 값에 영향을 받는다. 2. 중앙값 / 자료의 값들을 크기순으로 정려하였을 떄 가장 중앙에 위한 값, 이상값에 영향을 받지 않는다. 3. 절사평균 자료의 관측 값들 중에서 작은 값들의 하위 n%와 큰 값의 상위 n%를 제외하고 중간에 있는 나머지값들만 가지고 평균을 계산하는 방식 4. 사분위수/ 주어진 자료에 있는 값들을 크기순으로 나열했을 때 4등분하는 지점에 있는 값 1사분위수(Q1), 2사분위수(Q2, 중앙값과 동일), 3사분위수(Q3), 전체 자료를 4개로 나누므로 4개 구간은 25%의 자료 가 존재 5. 산포(distribution) : 주어진 자료에 있는 값들이 퍼져 있는정도분산(variance) : 주어진 자료의 각각의 값이 평균으로부터 떨어져있는 정도를 게산하여 합산한 후 값들의 개수로 나누어 계산 #표준편차(standard deviation) : 분산의 제곱근으로 계산####################3 계산이 가능한 것은 연속형이라고 볼 수.#### 어떤 범위를 벗어나면은 '이상치'라고 한다. 이상치 때문에 분석에 영향을 미칠 수 있어. 전처리 때에 이상치를 제거해준다. 탐색적데이터 분석에서 우리가 하는 일.
###############R은 도구일 뿐이다. - 연속형 자료의 시각화 따라빠빠빠
# 시각화는 히스토그램(Histogram)과 상자 그래프(box plot)를 사용
weight <- c(60, 62,64,65,68,69)
weight
weight.heavy <- c(weight, 120)
weight.heavy
# "120"은 이상치라고 할 수. #이상치는 어떤 기준으로 세워야할까.
mean(weight)
mean(weight.heavy)
median(weight)
median(weight.heavy)
mean(weight, trim=0.2) #상하위 20% 떼어 내겠다.
mean(weight.heavy, trim = 0.2) #
####################기계적으로 보면 안된다는 말.
#사분위수
quantile(weight.heavy)
quantile(weight.heavy, (0:10)/10)
#(0:10)구간을 몇개로 나눌것인지를 지정
#0.1~1.0 사이 백분율로 10%~100%
summary(weight.heavy)#summary를 많이 사용할 듯
# 산포
var(weight) # 분산
sd(weight)#표준편차
range(weight)#값의 범위(최소값과 최대값)
diff(range(weight))#최대값과 최소값의 차이
#연속형 자료 시각화
#histogram : 연속형 자료의 분포를 시각화하는 도구 연속형 자료에서 구간을 나누고 구간에 속한 값들의 개수를 세는 방법으로 사용
class(cars)
str(cars)
dist <- cars[,2]
dist <- cars$dist
dist
boxplot.st(ats(dist) # 구체적인 값을 파악할 떄 사용 # $stats : 사분위수 # %n 관측값의 수 # %conf : 중앙값에 대한 신뢰구간 $out : 특이값(이상치) 목록
cars$dist
a <- cars$dist
a
sort(a, decreasing = T)
############################### 히스토그램 그리기
hist(dist, main = "Histogram for 제동거리",
xlab = "제동거리", ylab = "빈도수",
border="blue", col = 'green',
las=1, breaks =5 ) # las x축 바로할건지 눕힐건지
hist
rm(list=ls())
cars
#상자 그래프(boxplot, 상자 수염 그래프) # 사분위수를 그래프형태로 시각화하는 도구#상자그래프는 하나의 그래프로 데이터의 분포형태를 포함한 다양한 정보를 전달 - 자료의 전반적인 분포를 이해하는데 도움-구체적인 최소/최대/중앙값을 알기는 어렵다.
boxplot (dist, main = '자동차 제동거리')
boxplot.stats(dist)
########################## 우리가 하는 모든 작업은 데이터를 이해하는 작업을 하고 있는 것, 이미 발생된 데이터를 가지고 한다. 그리고 이면의 데이터를 파악, 유추하는 것이다. 이전에는 분석할 수 있을만한 자원들이 없었던 것이다. 이제는 모든 것을 할 수. 모든 것을 파악할 수 있다는 말인 듯. ^^ <- 데이터의 이면을 찾기 위해서.
|
testlist <- list(end = NULL, start = NULL, x = structure(c(4.65661649758392e-10, 1.39067099575524e-309, 2.32903286132618e+96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), segment_end = structure(0, .Dim = c(1L, 1L)), segment_start = structure(0, .Dim = c(1L, 1L)))
result <- do.call(dynutils::project_to_segments,testlist)
str(result)
|
/dynutils/inst/testfiles/project_to_segments/AFL_project_to_segments/project_to_segments_valgrind_files/1609871734-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 409
|
r
|
testlist <- list(end = NULL, start = NULL, x = structure(c(4.65661649758392e-10, 1.39067099575524e-309, 2.32903286132618e+96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), segment_end = structure(0, .Dim = c(1L, 1L)), segment_start = structure(0, .Dim = c(1L, 1L)))
result <- do.call(dynutils::project_to_segments,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/myfuncs-package.R
\docType{package}
\name{myfuncs-package}
\alias{myfuncs-package}
\alias{myfuncs}
\title{My collection of utility functions}
\description{
A collection of utility functions
}
\details{
A collection of utility functions
}
\section{Shortcuts}{
\itemize{
\item \code{\link{catf}} and \code{\link{printf}} implement printf() like functions
\item \code{\link{os}} display memory usage
\item \link{shortcuts}: \code{l}, \code{h} and \code{p}: wrappers for \code{length}, \code{head} and \code{paste}
\item \code{\link{msd}} and \code{\link{ss}} mean and sd; sum of squares
\item \code{\link{zz}} save everything
\item \code{\link{myfuncs.reload}} reload the myfuncs package
}
}
\section{Graphics and plotting}{
\itemize{
\item \code{\link{all.ablines}} Draw three lines (horizontal at 0, vertical at 0, and diagonal)
\item \code{\link{arrayplot}} Show image representation of a microarray
\item \code{\link{showgene}} beeswarm + boxplot for a gene
\item \code{\link{myfaces}} My version of Chernoff faces
\item \code{\link{plotGenes}} Show expression for a number of genes in all arrays
\item \code{\link{dotpair}} A version of \code{pairs} showing also correlation coefficients
\item \code{\link{genePairs}} A version of \code{pairs} showing also correlation coefficients for large numbers of data points (using smoothScatter)
\item \code{\link{myPlotDensities}} A replacement for
\code{plotDensities} from \code{limma} for single-color arrays and for
coloring array groups
\item \code{\link{pairedswarm}} Beeswarm with lines linking pairs
\item \code{\link{clustCompPlot}} compare different clusterings in an overview plot
\item \code{\link{mypalette}} Nice color palette
\item \code{\link{pcaR2plot}} Calculate and visualize PCA regression
\item \code{\link{ttplot}} Plot a top table
\item \code{\link{copyan}} Produce an annotated PDF copy of a plot
}
}
\section{Analysis}{
\itemize{
\item \code{\link{getGOrilla}} run GOrilla from R
\item \code{\link{topTableAll}} produce a topTable-like output for all contrasts in a fit object
\item \code{\link{ttmake}} produce a topTable-like output including the msd measure
\item \code{\link{confuMat}} confusion matrix based on reality vs predictions
}
}
\section{Data manipulation}{
\itemize{
\item \code{\link{calcDif}} calculate pair differences in a limma object (for paired samples analysis)
\item \code{\link{grep.df}} Grep all columns of a data frame
\item \code{\link{repsapply}} general version of \code{avereps}
}
}
\section{Others}{
\itemize{
\item \code{\link{centers.predict}}
\item \code{\link{clustsizes}}
\item \code{\link{likelihood.test}}
\item \code{\link{lineq}}
\item \code{\link{matrixReshuffle}}
\item \code{\link{pca2var}}
\item \code{\link{pvals}}
\item \code{\link{rfvalid}}
\item \code{\link{ringfence}}
\item \code{\link{smoothPalette}}
\item \code{\link{twofacreshape}}
}
}
\author{
January Weiner <january.weiner@gmail.com>
}
|
/man/myfuncs-package.Rd
|
no_license
|
january3/myfuncs
|
R
| false
| true
| 3,116
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/myfuncs-package.R
\docType{package}
\name{myfuncs-package}
\alias{myfuncs-package}
\alias{myfuncs}
\title{My collection of utility functions}
\description{
A collection of utility functions
}
\details{
A collection of utility functions
}
\section{Shortcuts}{
\itemize{
\item \code{\link{catf}} and \code{\link{printf}} implement printf() like functions
\item \code{\link{os}} display memory usage
\item \link{shortcuts}: \code{l}, \code{h} and \code{p}: wrappers for \code{length}, \code{head} and \code{paste}
\item \code{\link{msd}} and \code{\link{ss}} mean and sd; sum of squares
\item \code{\link{zz}} save everything
\item \code{\link{myfuncs.reload}} reload the myfuncs package
}
}
\section{Graphics and plotting}{
\itemize{
\item \code{\link{all.ablines}} Draw three lines (horizontal at 0, vertical at 0, and diagonal)
\item \code{\link{arrayplot}} Show image representation of a microarray
\item \code{\link{showgene}} beeswarm + boxplot for a gene
\item \code{\link{myfaces}} My version of Chernoff faces
\item \code{\link{plotGenes}} Show expression for a number of genes in all arrays
\item \code{\link{dotpair}} A version of \code{pairs} showing also correlation coefficients
\item \code{\link{genePairs}} A version of \code{pairs} showing also correlation coefficients for large numbers of data points (using smoothScatter)
\item \code{\link{myPlotDensities}} A replacement for
\code{plotDensities} from \code{limma} for single-color arrays and for
coloring array groups
\item \code{\link{pairedswarm}} Beeswarm with lines linking pairs
\item \code{\link{clustCompPlot}} compare different clusterings in an overview plot
\item \code{\link{mypalette}} Nice color palette
\item \code{\link{pcaR2plot}} Calculate and visualize PCA regression
\item \code{\link{ttplot}} Plot a top table
\item \code{\link{copyan}} Produce an annotated PDF copy of a plot
}
}
\section{Analysis}{
\itemize{
\item \code{\link{getGOrilla}} run GOrilla from R
\item \code{\link{topTableAll}} produce a topTable-like output for all contrasts in a fit object
\item \code{\link{ttmake}} produce a topTable-like output including the msd measure
\item \code{\link{confuMat}} confusion matrix based on reality vs predictions
}
}
\section{Data manipulation}{
\itemize{
\item \code{\link{calcDif}} calculate pair differences in a limma object (for paired samples analysis)
\item \code{\link{grep.df}} Grep all columns of a data frame
\item \code{\link{repsapply}} general version of \code{avereps}
}
}
\section{Others}{
\itemize{
\item \code{\link{centers.predict}}
\item \code{\link{clustsizes}}
\item \code{\link{likelihood.test}}
\item \code{\link{lineq}}
\item \code{\link{matrixReshuffle}}
\item \code{\link{pca2var}}
\item \code{\link{pvals}}
\item \code{\link{rfvalid}}
\item \code{\link{ringfence}}
\item \code{\link{smoothPalette}}
\item \code{\link{twofacreshape}}
}
}
\author{
January Weiner <january.weiner@gmail.com>
}
|
# Read in the dataset
power <- read.table('../Course4_ExploratoryDataAnalysis/data/household_power_consumption.txt', header = TRUE, sep = ';', dec ='.' )
#Restrict data to the dates 2007-02-01 to 2007-02-02
power$Date <- strptime(power$Date,"%d/%m/%Y")
power$Date <- as.Date(power$Date)
power_clean <- subset(power,Date == '2007-02-01' | Date == '2007-02-02')
rownames(power_clean) <- NULL #reset row numbers
#Create new datetime variable
library(lubridate)
power_clean$DateTime <- with(power_clean, ymd(Date)+hms(Time))
#Convert Global_active_power, Sub_metering columns to numeric
#(for some reason read.table would not work when tried to specify colClasses)
power_clean$Global_active_power <- as.numeric(paste(power_clean$Global_active_power))
power_clean$Sub_metering_1 <- as.numeric(paste(power_clean$Sub_metering_1))
power_clean$Sub_metering_2 <- as.numeric(paste(power_clean$Sub_metering_2))
power_clean$Sub_metering_3 <- as.numeric(paste(power_clean$Sub_metering_3))
png(file = 'plot4.png', width=480, height = 480) #open plotting device
par(mfcol = c(2,2)) #set plot window to be 2x2, adding plots by column
#Global Active power line plot (plot2.R)
with(power_clean, plot(x=DateTime, y = Global_active_power, xlab ="", ylab = "Global Active Power (kilowatts)", type = "l",lwd = 1))
#Create line plot of Sub Metering data (plot3.R)
with(power_clean, plot(DateTime, Sub_metering_1, type = "n", xlab = "", ylab = "Energy to sub metering"))
with(power_clean, lines(x=DateTime, y = Sub_metering_1,xlab = "", ylab = "Energy to sub metering", type = "l",lwd = 1))
with(power_clean, lines(x=DateTime, y = Sub_metering_2, xlab ="", type = "l",lwd = 1, col = "red"))
with(power_clean, lines(x=DateTime, y = Sub_metering_3, xlab ="", type = "l",lwd = 1, col = "blue"))
legend("topright", lwd = 1, col = c("black", "red","blue"), legend = c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"), bty = "n")
#Voltage line plot
with(power_clean, plot(x=DateTime, y = Voltage, xlab ="datetime", ylab = "Voltage", type = "l",lwd = 1))
#Global Reactive power line plot
with(power_clean, plot(x=DateTime, y = Global_reactive_power, xlab ="datetime", ylab = "Global_reactive_power", type = "l",lwd = 1))
dev.off()
|
/plot4.R
|
no_license
|
regfish7/ExData_Plotting1
|
R
| false
| false
| 2,215
|
r
|
# Read in the dataset
power <- read.table('../Course4_ExploratoryDataAnalysis/data/household_power_consumption.txt', header = TRUE, sep = ';', dec ='.' )
#Restrict data to the dates 2007-02-01 to 2007-02-02
power$Date <- strptime(power$Date,"%d/%m/%Y")
power$Date <- as.Date(power$Date)
power_clean <- subset(power,Date == '2007-02-01' | Date == '2007-02-02')
rownames(power_clean) <- NULL #reset row numbers
#Create new datetime variable
library(lubridate)
power_clean$DateTime <- with(power_clean, ymd(Date)+hms(Time))
#Convert Global_active_power, Sub_metering columns to numeric
#(for some reason read.table would not work when tried to specify colClasses)
power_clean$Global_active_power <- as.numeric(paste(power_clean$Global_active_power))
power_clean$Sub_metering_1 <- as.numeric(paste(power_clean$Sub_metering_1))
power_clean$Sub_metering_2 <- as.numeric(paste(power_clean$Sub_metering_2))
power_clean$Sub_metering_3 <- as.numeric(paste(power_clean$Sub_metering_3))
png(file = 'plot4.png', width=480, height = 480) #open plotting device
par(mfcol = c(2,2)) #set plot window to be 2x2, adding plots by column
#Global Active power line plot (plot2.R)
with(power_clean, plot(x=DateTime, y = Global_active_power, xlab ="", ylab = "Global Active Power (kilowatts)", type = "l",lwd = 1))
#Create line plot of Sub Metering data (plot3.R)
with(power_clean, plot(DateTime, Sub_metering_1, type = "n", xlab = "", ylab = "Energy to sub metering"))
with(power_clean, lines(x=DateTime, y = Sub_metering_1,xlab = "", ylab = "Energy to sub metering", type = "l",lwd = 1))
with(power_clean, lines(x=DateTime, y = Sub_metering_2, xlab ="", type = "l",lwd = 1, col = "red"))
with(power_clean, lines(x=DateTime, y = Sub_metering_3, xlab ="", type = "l",lwd = 1, col = "blue"))
legend("topright", lwd = 1, col = c("black", "red","blue"), legend = c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"), bty = "n")
#Voltage line plot
with(power_clean, plot(x=DateTime, y = Voltage, xlab ="datetime", ylab = "Voltage", type = "l",lwd = 1))
#Global Reactive power line plot
with(power_clean, plot(x=DateTime, y = Global_reactive_power, xlab ="datetime", ylab = "Global_reactive_power", type = "l",lwd = 1))
dev.off()
|
##' @title Make tables
##' @param effortdat data.frame containing annual estimates of hectares
##' incentivized for each program
##' @param habitatdat data.frame containing the annual estimates of hectares of
##' habitat available by land cover type, including lcl and ucl from Monte
##' Carlo
##' @param shortfalltotals data.frame containing annual estimates of total
##' energy shortfalls, including lcl and ucl from Monte Carlo
##' @param shortfallseason data.frame containing annual and seasonal estimates
##' of total energy shortfalls, including lcl and ucl from Monte Carlo
##' @param scale factor by which to divide values in the table
##' @param pathout name of filepath to be passed to save_as_docx
##' @description Functions to produce near-publication-ready tables of results
make_effort_table <- function(effortdat, scale = 1000000, pathout) {
# tabledat <- bind_rows(effortdat %>%
# filter(label == 'total') %>%
# select(-label) %>%
# pivot_longer(br_fall:incentives,
# names_to = 'habitat',
# values_to = 'total') %>%
# mutate(type = 'incentivized'),
# accessible_mc %>%
# filter(habitat %in% c('br_fall', 'br_spring',
# 'whep_fall', 'whep_vardd',
# 'incentives')) %>%
# mutate(type = 'accessible')) %>%
# select(type, group, habitat, total, lcl, ucl) %>%
# pivot_longer(total:ucl) %>%
# mutate(habitat = factor(habitat,
# levels = c('br_fall', 'br_spring', 'whep_fall',
# 'whep_vardd', 'incentives')),
# habitat = recode(habitat, incentives = 'total'),
# name = factor(name, levels = c('total', 'lcl', 'ucl', 'perc')),
# type = factor(type, levels = c('incentivized', 'accessible')),
# value = case_when(type == 'accessible' ~ value/scale,
# TRUE ~ value)) %>%
# arrange(type, habitat, name) %>%
# unite(col = 'label', habitat, name) %>%
# pivot_wider(names_from = label, values_from = value) %>%
# mutate_at(vars(ends_with('_total')),
# ~case_when(type == 'incentivized' ~
# format(round(., digits = 0), big.mark = ','),
# type == 'accessible' ~
# format(round(., digits = 3), nsmall = 3)))
#
# table <- flextable(tabledat,
# col_keys = c('type', 'group',
# 'br_fall_total', 'br_fall_ci',
# 'br_spring_total', 'br_spring_ci',
# ' ',
# 'whep_fall_total', 'whep_fall_ci',
# 'whep_vardd_total', 'whep_vardd_ci',
# 'total_total', 'total_ci')) %>%
# flextable::compose(j = "br_fall_ci",
# value = as_paragraph('(',
# sprintf("%.03f", br_fall_lcl),
# "\U2012",
# sprintf("%.03f", br_fall_ucl), ')')) %>%
# flextable::compose(j = "br_spring_ci",
# value = as_paragraph('(',
# sprintf("%.03f", br_spring_lcl),
# "\U2012",
# sprintf("%.03f", br_spring_ucl), ')')) %>%
# flextable::compose(j = "whep_fall_ci",
# value = as_paragraph('(',
# sprintf("%.03f", whep_fall_lcl),
# "\U2012",
# sprintf("%.03f", whep_fall_ucl), ')')) %>%
# flextable::compose(j = "whep_vardd_ci",
# value = as_paragraph('(',
# sprintf("%.03f", whep_vardd_lcl),
# "\U2012",
# sprintf("%.03f", whep_vardd_ucl), ')')) %>%
# flextable::compose(j = "total_ci",
# value = as_paragraph('(',
# sprintf("%.03f", total_lcl),
# "\U2012",
# sprintf("%.03f", total_ucl), ')')) %>%
# set_header_labels(group = 'year',
# br_fall_total = 'fall',
# br_fall_ci = 'fall',
# br_spring_total = 'spring',
# br_spring_ci = 'spring',
# whep_fall_total = 'fall flooding',
# whep_fall_ci = 'fall flooding',
# whep_vardd_total = 'variable drawdown',
# whep_vardd_ci = 'variable drawdown',
# total_total = 'Total',
# total_ci = 'Total') %>%
# merge_at(i = 1, j = 3:4, part = 'header') %>%
# merge_at(i = 1, j = 5:6, part = 'header') %>%
# merge_at(i = 1, j = 8:9, part = 'header') %>%
# merge_at(i = 1, j = 10:11, part = 'header') %>%
# merge_at(i = 1, j = 12:13, part = 'header') %>%
# add_header_row(values = c('type', 'year',
# 'BirdReturns', 'BirdReturns',
# 'BirdReturns', 'BirdReturns', ' ',
# 'WHEP', 'WHEP', 'WHEP', 'WHEP',
# 'Total', 'Total')) %>%
# merge_at(i = 1, j = 3:6, part = 'header') %>%
# merge_at(i = 1, j = 8:11, part = 'header') %>%
# merge_at(j = 1, i = 1:2, part = 'header') %>%
# merge_at(j = 2, i = 1:2, part = 'header') %>%
# merge_at(j = 7, i = 1:2, part = 'header') %>%
# merge_at(i = 1:2, j = 12:13, part = 'header') %>%
# theme_vanilla() %>%
# font(part = 'all', fontname = 'Times New Roman') %>%
# fontsize(part = 'all', size = 12) %>%
# bold(part = 'header') %>%
# set_table_properties(width = 1, layout = 'autofit') %>%
# # autofit(part = 'header') %>%
# # autofit(add_w = 0, add_h = 0, part = 'body') %>%
# align(align = 'center', part = 'header') %>%
# align(align = 'left', part = 'body', j = c(4,6,9,11,13)) %>%
# align(align = 'center', part = 'body', j = c(1, 2)) %>%
# border_inner_h(border = officer::fp_border(width = 0))
table <- flextable(effortdat %>% filter(label == 'total'),
col_keys = c('group',
'br_fall',
'br_spring',
' ',
'whep_fall',
'whep_vardd',
'incentives')) %>%
set_formatter(br_fall = function(x) format(round(x, digits = 0),
nsmall = 0, big.mark = ','),
br_spring = function(x) format(round(x, digits = 0),
nsmall = 0, big.mark = ','),
whep_fall = function(x) format(round(x, digits = 0),
nsmall = 0, big.mark = ','),
whep_vardd = function(x) format(round(x, digits = 0),
nsmall = 0, big.mark = ','),
incentives = function(x) format(round(x, digits = 0),
nsmall = 0, big.mark = ',')) %>%
set_header_labels(group = '',
br_fall = 'fall',
br_spring = 'spring',
whep_fall = 'fall flooding',
whep_vardd = 'variable drawdown',
incentives = 'Total') %>%
add_header_row(values = c('', 'BirdReturns', 'BirdReturns', ' ',
'WHEP', 'WHEP', 'Total')) %>%
merge_at(i = 1, j = 2:3, part = 'header') %>%
merge_at(i = 1, j = 5:6, part = 'header') %>%
merge_at(j = 1, i = 1:2, part = 'header') %>%
merge_at(j = 4, i = 1:2, part = 'header') %>%
merge_at(j = 7, i = 1:2, part = 'header') %>%
theme_vanilla() %>%
font(part = 'all', fontname = 'Times') %>%
bold(part = 'header') %>%
set_table_properties(width = 1, layout = 'autofit') %>%
# autofit(part = 'header') %>%
# autofit(add_w = 0, add_h = 0, part = 'body') %>%
align(align = 'center', part = 'header') %>%
border_inner_h(border = officer::fp_border(width = 0))
save_as_docx(table, path = pathout)
return(table)
}
make_habitat_table <- function(habitatdat, scale = 1000000, pathout) {
tabledat <- habitatdat %>%
select(year = group, habitat, total, lcl, ucl) %>%
pivot_longer(total:ucl) %>%
mutate(group = case_when(habitat %in% c('wetlands', 'rice', 'corn',
'other', 'totalfree') ~
'Unincentivized habitat',
habitat %in% c('br_fall', 'br_spring',
'whep_fall', 'whep_vardd',
'incentives') ~
'Incentivized habitat',
TRUE ~ 'Grand Total'),
group = factor(group, levels = c('Unincentivized habitat',
'Incentivized habitat',
'Grand Total')),
habitat = case_when(habitat == 'incentives' ~ 'Total incentivized',
habitat == 'totalfree' ~ 'Total unincentivized',
habitat == 'br_fall' ~ 'BirdReturns fall',
habitat == 'br_spring' ~ 'BirdReturns spring',
habitat == 'whep_fall' ~ 'WHEP fall flooding',
habitat == 'whep_vardd' ~ 'WHEP variable drawdown',
habitat == 'other' ~ 'Other crops',
habitat == 'grandtotal' ~ 'Grand Total',
TRUE ~ str_to_sentence(habitat)),
habitat = factor(habitat,
levels = c('Unincentivized habitat',
'Wetlands', 'Rice', 'Corn',
'Other crops', 'Total unincentivized',
'Incentivized habitat',
'BirdReturns fall', 'BirdReturns spring',
'WHEP fall flooding',
'WHEP variable drawdown',
'Total incentivized',
'Grand Total')),
name = factor(name, levels = c('total', 'lcl', 'ucl')),
year = gsub('-', '', year),
value = value/scale) %>%
arrange(group, habitat, year, name) %>%
unite(col = 'label', name, year) %>%
pivot_wider(names_from = label, values_from = value) %>%
complete(habitat) %>%
mutate(group = case_when(habitat == 'Unincentivized habitat' ~
'Unincentivized habitat',
habitat == 'Incentivized habitat' ~
'Incentivized habitat',
TRUE ~ as.character(group)))
table <- flextable(tabledat,
col_keys = c('group', 'habitat',
'total_201314', 'ci1',
'total_201415', 'ci2',
'total_201516', 'ci3',
'total_201617', 'ci4')) %>%
set_formatter(total_201314 = function(x) sprintf("%.03f", x),
total_201415 = function(x) sprintf("%.03f", x),
total_201516 = function(x) sprintf("%.03f", x),
total_201617 = function(x) sprintf("%.03f", x)) %>%
flextable::compose(j = 'ci1',
value = as_paragraph('(',
sprintf("%.03f", lcl_201314),
"\U2012",
sprintf("%.03f", ucl_201314), ')')) %>%
flextable::compose(j = 'ci2',
value = as_paragraph('(',
sprintf("%.03f", lcl_201415),
"\U2012",
sprintf("%.03f", ucl_201415), ')')) %>%
flextable::compose(j = 'ci3',
value = as_paragraph('(',
sprintf("%.03f", lcl_201516),
"\U2012",
sprintf("%.03f", ucl_201516), ')')) %>%
flextable::compose(j = 'ci4',
value = as_paragraph('(',
sprintf("%.03f", lcl_201617),
"\U2012",
sprintf("%.03f", ucl_201617), ')')) %>%
set_header_labels(group = '',
habitat = '',
total_201314 = '2013-14',
ci1 = '2013-14',
total_201415 = '2014-15',
ci2 = '2014-15',
total_201516 = '2015-16',
ci3 = '2015-16',
total_201617 = '2016-17',
ci4 = '2016-17') %>%
merge_at(i = 1, j = 3:4, part = 'header') %>%
merge_at(i = 1, j = 5:6, part = 'header') %>%
merge_at(i = 1, j = 7:8, part = 'header') %>%
merge_at(i = 1, j = 9:10, part = 'header') %>%
# void(i = 2:5, j = 1, part = 'body') %>%
merge_at(i = 1, j = 1:10, part = 'body') %>%
merge_at(i = 7, j = 1:10, part = 'body') %>%
merge_at(i = 13, j = 1:2, part = 'body') %>%
theme_vanilla() %>%
font(part = 'all', fontname = 'Times New Roman') %>%
fontsize(part = 'all', size = 12) %>%
bold(part = 'header') %>%
bold(i = c(1, 6, 7, 12, 13), part = 'body') %>%
set_table_properties(width = 1, layout = 'autofit') %>%
align(align = 'center', part = 'header') %>%
align(align = 'left', part = 'body', j = c(2, 4, 6, 8, 10)) %>%
valign(valign = 'top', part = 'body', j = 1) %>%
border_inner_h(border = officer::fp_border(width = 0)) %>%
border(i = c(7, 13), border.top = officer::fp_border())
save_as_docx(table, path = pathout)
return(table)
}
make_shortfall_table <- function(shortfalltotals, shortfallseason,
scale = 1000000000, pathout) {
values <- bind_rows(shortfalltotals %>% mutate(season = 'total') %>% select(-habitat),
shortfallseason) %>%
select(population, season, group, incentives, total, lcl, ucl)
diff <- values %>% select(-lcl, -ucl) %>%
pivot_wider(names_from = incentives, values_from = total) %>%
mutate(diff = with - without,
perc = diff / without * 100) %>%
select(population, season, group, perc)
tabledat <- left_join(values, diff, by = c('population', 'season', 'group')) %>%
mutate_at(vars(total:ucl), ~./scale) %>%
pivot_longer(total:perc) %>%
mutate(incentives = factor(incentives, levels = c('without', 'with')),
population = factor(population, levels = c('baseline', 'objectives')),
name = factor(name, levels = c('total', 'lcl', 'ucl', 'perc')),
season = factor(season, levels = c('fall', 'spring', 'total')),
group = factor(group, levels = c(' ', '2013-14', '2014-15',
'2015-16', '2016-17'))) %>%
arrange(population, incentives, name) %>%
unite(col = 'label', population, incentives, name) %>%
pivot_wider(names_from = label, values_from = value) %>%
complete(group, season) %>%
distinct() %>%
arrange(season, group)
table <- flextable(tabledat,
col_keys = c('season', 'group',
'baseline_without_total', 'baseline_without_ci',
'baseline_with_total', 'baseline_with_ci',
'baseline_with_perc', ' ',
'objectives_without_total', 'objectives_without_ci',
'objectives_with_total', 'objectives_with_ci',
'objectives_with_perc')) %>%
set_formatter(baseline_without_total = function(x) sprintf("%.02f", x),
baseline_with_total = function(x) sprintf("%.02f", x),
objectives_without_total = function(x) sprintf("%.02f", x),
objectives_with_total = function(x) sprintf("%.02f", x),
baseline_with_perc = function(x) sprintf("%.01f%%", x),
objectives_with_perc = function(x) sprintf("%.01f%%", x)) %>%
flextable::compose(j = "baseline_without_ci",
value = as_paragraph('(',
sprintf("%.02f", baseline_without_lcl),
"\U2012",
sprintf("%.02f", baseline_without_ucl), ')')) %>%
flextable::compose(j = "baseline_with_ci",
value = as_paragraph('(',
sprintf("%.02f", baseline_with_lcl),
"\U2012",
sprintf("%.02f", baseline_with_ucl), ')')) %>%
flextable::compose(j = "objectives_without_ci",
value = as_paragraph('(',
sprintf("%.02f", objectives_without_lcl),
"\U2012",
sprintf("%.02f", objectives_without_ucl), ')')) %>%
flextable::compose(j = "objectives_with_ci",
value = as_paragraph('(',
sprintf("%.02f", objectives_with_lcl),
"\U2012",
sprintf("%.02f", objectives_with_ucl), ')')) %>%
set_header_labels(baseline_without_total = 'excluding incentive\nprograms',
baseline_without_ci = 'excluding incentive\nprograms',
baseline_with_total = 'including incentive\nprograms',
baseline_with_ci = 'including incentive\nprograms',
baseline_with_perc = 'difference\n(%)',
objectives_without_total = 'excluding incentive\nprograms',
objectives_without_ci = 'excluding incentive\nprograms',
objectives_with_total = 'including incentive\nprograms',
objectives_with_ci = 'including incentive\nprograms',
objectives_with_perc = 'difference\n(%)') %>%
merge_at(i = 1, j = 3:4, part = 'header') %>%
merge_at(i = 1, j = 5:6, part = 'header') %>%
merge_at(i = 1, j = 9:10, part = 'header') %>%
merge_at(i = 1, j = 11:12, part = 'header') %>%
add_header_row(values = c('season', 'group',
'Baseline population', 'Baseline population',
'Baseline population', 'Baseline population',
'Baseline population', ' ',
'Population objectives', 'Population objectives',
'Population objectives', 'Population objectives',
'Population objectives')) %>%
merge_at(i = 1, j = 3:7, part = 'header') %>%
merge_at(i = 1, j = 9:13, part = 'header') %>%
merge_at(j = 1, i = 1:2, part = 'header') %>%
merge_at(j = 2, i = 1:2, part = 'header') %>%
merge_at(j = 8, i = 1:2, part = 'header') %>%
merge_at(i = 1, j = 1:13, part = 'body') %>%
merge_at(i = 6, j = 1:13, part = 'body') %>%
merge_at(i = 11, j = 1:13, part = 'body') %>%
theme_vanilla() %>%
font(part = 'all', fontname = 'Times New Roman') %>%
fontsize(part = 'all', size = 12) %>%
bold(part = 'header') %>%
bold(i = c(1, 6, 11), part = 'body') %>%
set_table_properties(width = 1, layout = 'autofit') %>%
# autofit(part = 'header') %>%
# autofit(add_w = 0, add_h = 0, part = 'body') %>%
align(align = 'center', part = 'header') %>%
border_inner_h(border = officer::fp_border(width = 0)) %>%
border(i = c(6, 11), border.top = officer::fp_border())
save_as_docx(table, path = pathout)
return(table)
}
make_habitatneed_table <- function(filldat, newhabitatpath, pathout) {
tabledat <- left_join(read_csv(newhabitatpath, col_types = cols()),
filldat, by = c('start', 'end'))
tabledat_format = bind_rows(
tabledat,
tabledat %>%
mutate(season = if_else(end < 185, 'fall', 'spring')) %>%
group_by(group, population, season) %>%
summarize(filled = sum(filled),
.groups = 'drop') %>%
mutate(start.date = case_when(season == 'fall' ~ 'Fall Total',
season == 'spring' ~ 'Spring Total'),
start = 320, end = 320) %>%
select(-season)
) %>%
mutate(filled = format(filled, big.mark = ',')) %>%
pivot_wider(names_from = group, values_from = filled) %>%
arrange(population, start) %>%
mutate(start.date = factor(start.date, levels = c(' ', unique(start.date)))) %>%
select(-start, -end) %>%
complete(start.date, population) %>%
arrange(population, start.date) %>%
mutate(population = case_when(!is.na(`2013-14`) ~ '',
TRUE ~ population))
require(flextable)
table <- flextable(tabledat_format,
col_keys = c('population', 'dates',
'2013-14', '2014-15', '2015-16', '2016-17')) %>%
flextable::compose(j = "dates",
value = as_paragraph(start.date, " \U2012 ", end.date)) %>%
set_header_labels(population = '',
dates = 'program interval') %>%
merge_at(i = 1, j = 1:6, part = 'body') %>%
merge_at(i = 24, j = 1:6, part = 'body') %>%
theme_vanilla() %>%
font(part = 'all', fontname = 'Times New Roman') %>%
fontsize(part = 'all', size = 12) %>%
bold(part = 'header') %>%
bold(i = c(1, 24), part = 'body') %>%
set_table_properties(width = 1, layout = 'autofit') %>%
# autofit(part = 'header') %>%
# autofit(add_w = 0, add_h = 0, part = 'body') %>%
align(align = 'center', part = 'header') %>%
align(j = 1, align = 'left', part = 'body') %>%
align(j = 2, align = 'center', part = 'body') %>%
border_inner_h(border = officer::fp_border(width = 0)) %>%
border(i = 22, border.top = officer::fp_border()) %>%
border(i = 24, border.top = officer::fp_border()) %>%
border(i = 45, border.top = officer::fp_border())
save_as_docx(table, path = pathout)
return(table)
}
|
/R/make_tables.R
|
no_license
|
pointblue/wetland-incentives-bioenergetics
|
R
| false
| false
| 23,702
|
r
|
##' @title Make tables
##' @param effortdat data.frame containing annual estimates of hectares
##' incentivized for each program
##' @param habitatdat data.frame containing the annual estimates of hectares of
##' habitat available by land cover type, including lcl and ucl from Monte
##' Carlo
##' @param shortfalltotals data.frame containing annual estimates of total
##' energy shortfalls, including lcl and ucl from Monte Carlo
##' @param shortfallseason data.frame containing annual and seasonal estimates
##' of total energy shortfalls, including lcl and ucl from Monte Carlo
##' @param scale factor by which to divide values in the table
##' @param pathout name of filepath to be passed to save_as_docx
##' @description Functions to produce near-publication-ready tables of results
make_effort_table <- function(effortdat, scale = 1000000, pathout) {
# tabledat <- bind_rows(effortdat %>%
# filter(label == 'total') %>%
# select(-label) %>%
# pivot_longer(br_fall:incentives,
# names_to = 'habitat',
# values_to = 'total') %>%
# mutate(type = 'incentivized'),
# accessible_mc %>%
# filter(habitat %in% c('br_fall', 'br_spring',
# 'whep_fall', 'whep_vardd',
# 'incentives')) %>%
# mutate(type = 'accessible')) %>%
# select(type, group, habitat, total, lcl, ucl) %>%
# pivot_longer(total:ucl) %>%
# mutate(habitat = factor(habitat,
# levels = c('br_fall', 'br_spring', 'whep_fall',
# 'whep_vardd', 'incentives')),
# habitat = recode(habitat, incentives = 'total'),
# name = factor(name, levels = c('total', 'lcl', 'ucl', 'perc')),
# type = factor(type, levels = c('incentivized', 'accessible')),
# value = case_when(type == 'accessible' ~ value/scale,
# TRUE ~ value)) %>%
# arrange(type, habitat, name) %>%
# unite(col = 'label', habitat, name) %>%
# pivot_wider(names_from = label, values_from = value) %>%
# mutate_at(vars(ends_with('_total')),
# ~case_when(type == 'incentivized' ~
# format(round(., digits = 0), big.mark = ','),
# type == 'accessible' ~
# format(round(., digits = 3), nsmall = 3)))
#
# table <- flextable(tabledat,
# col_keys = c('type', 'group',
# 'br_fall_total', 'br_fall_ci',
# 'br_spring_total', 'br_spring_ci',
# ' ',
# 'whep_fall_total', 'whep_fall_ci',
# 'whep_vardd_total', 'whep_vardd_ci',
# 'total_total', 'total_ci')) %>%
# flextable::compose(j = "br_fall_ci",
# value = as_paragraph('(',
# sprintf("%.03f", br_fall_lcl),
# "\U2012",
# sprintf("%.03f", br_fall_ucl), ')')) %>%
# flextable::compose(j = "br_spring_ci",
# value = as_paragraph('(',
# sprintf("%.03f", br_spring_lcl),
# "\U2012",
# sprintf("%.03f", br_spring_ucl), ')')) %>%
# flextable::compose(j = "whep_fall_ci",
# value = as_paragraph('(',
# sprintf("%.03f", whep_fall_lcl),
# "\U2012",
# sprintf("%.03f", whep_fall_ucl), ')')) %>%
# flextable::compose(j = "whep_vardd_ci",
# value = as_paragraph('(',
# sprintf("%.03f", whep_vardd_lcl),
# "\U2012",
# sprintf("%.03f", whep_vardd_ucl), ')')) %>%
# flextable::compose(j = "total_ci",
# value = as_paragraph('(',
# sprintf("%.03f", total_lcl),
# "\U2012",
# sprintf("%.03f", total_ucl), ')')) %>%
# set_header_labels(group = 'year',
# br_fall_total = 'fall',
# br_fall_ci = 'fall',
# br_spring_total = 'spring',
# br_spring_ci = 'spring',
# whep_fall_total = 'fall flooding',
# whep_fall_ci = 'fall flooding',
# whep_vardd_total = 'variable drawdown',
# whep_vardd_ci = 'variable drawdown',
# total_total = 'Total',
# total_ci = 'Total') %>%
# merge_at(i = 1, j = 3:4, part = 'header') %>%
# merge_at(i = 1, j = 5:6, part = 'header') %>%
# merge_at(i = 1, j = 8:9, part = 'header') %>%
# merge_at(i = 1, j = 10:11, part = 'header') %>%
# merge_at(i = 1, j = 12:13, part = 'header') %>%
# add_header_row(values = c('type', 'year',
# 'BirdReturns', 'BirdReturns',
# 'BirdReturns', 'BirdReturns', ' ',
# 'WHEP', 'WHEP', 'WHEP', 'WHEP',
# 'Total', 'Total')) %>%
# merge_at(i = 1, j = 3:6, part = 'header') %>%
# merge_at(i = 1, j = 8:11, part = 'header') %>%
# merge_at(j = 1, i = 1:2, part = 'header') %>%
# merge_at(j = 2, i = 1:2, part = 'header') %>%
# merge_at(j = 7, i = 1:2, part = 'header') %>%
# merge_at(i = 1:2, j = 12:13, part = 'header') %>%
# theme_vanilla() %>%
# font(part = 'all', fontname = 'Times New Roman') %>%
# fontsize(part = 'all', size = 12) %>%
# bold(part = 'header') %>%
# set_table_properties(width = 1, layout = 'autofit') %>%
# # autofit(part = 'header') %>%
# # autofit(add_w = 0, add_h = 0, part = 'body') %>%
# align(align = 'center', part = 'header') %>%
# align(align = 'left', part = 'body', j = c(4,6,9,11,13)) %>%
# align(align = 'center', part = 'body', j = c(1, 2)) %>%
# border_inner_h(border = officer::fp_border(width = 0))
table <- flextable(effortdat %>% filter(label == 'total'),
col_keys = c('group',
'br_fall',
'br_spring',
' ',
'whep_fall',
'whep_vardd',
'incentives')) %>%
set_formatter(br_fall = function(x) format(round(x, digits = 0),
nsmall = 0, big.mark = ','),
br_spring = function(x) format(round(x, digits = 0),
nsmall = 0, big.mark = ','),
whep_fall = function(x) format(round(x, digits = 0),
nsmall = 0, big.mark = ','),
whep_vardd = function(x) format(round(x, digits = 0),
nsmall = 0, big.mark = ','),
incentives = function(x) format(round(x, digits = 0),
nsmall = 0, big.mark = ',')) %>%
set_header_labels(group = '',
br_fall = 'fall',
br_spring = 'spring',
whep_fall = 'fall flooding',
whep_vardd = 'variable drawdown',
incentives = 'Total') %>%
add_header_row(values = c('', 'BirdReturns', 'BirdReturns', ' ',
'WHEP', 'WHEP', 'Total')) %>%
merge_at(i = 1, j = 2:3, part = 'header') %>%
merge_at(i = 1, j = 5:6, part = 'header') %>%
merge_at(j = 1, i = 1:2, part = 'header') %>%
merge_at(j = 4, i = 1:2, part = 'header') %>%
merge_at(j = 7, i = 1:2, part = 'header') %>%
theme_vanilla() %>%
font(part = 'all', fontname = 'Times') %>%
bold(part = 'header') %>%
set_table_properties(width = 1, layout = 'autofit') %>%
# autofit(part = 'header') %>%
# autofit(add_w = 0, add_h = 0, part = 'body') %>%
align(align = 'center', part = 'header') %>%
border_inner_h(border = officer::fp_border(width = 0))
save_as_docx(table, path = pathout)
return(table)
}
make_habitat_table <- function(habitatdat, scale = 1000000, pathout) {
tabledat <- habitatdat %>%
select(year = group, habitat, total, lcl, ucl) %>%
pivot_longer(total:ucl) %>%
mutate(group = case_when(habitat %in% c('wetlands', 'rice', 'corn',
'other', 'totalfree') ~
'Unincentivized habitat',
habitat %in% c('br_fall', 'br_spring',
'whep_fall', 'whep_vardd',
'incentives') ~
'Incentivized habitat',
TRUE ~ 'Grand Total'),
group = factor(group, levels = c('Unincentivized habitat',
'Incentivized habitat',
'Grand Total')),
habitat = case_when(habitat == 'incentives' ~ 'Total incentivized',
habitat == 'totalfree' ~ 'Total unincentivized',
habitat == 'br_fall' ~ 'BirdReturns fall',
habitat == 'br_spring' ~ 'BirdReturns spring',
habitat == 'whep_fall' ~ 'WHEP fall flooding',
habitat == 'whep_vardd' ~ 'WHEP variable drawdown',
habitat == 'other' ~ 'Other crops',
habitat == 'grandtotal' ~ 'Grand Total',
TRUE ~ str_to_sentence(habitat)),
habitat = factor(habitat,
levels = c('Unincentivized habitat',
'Wetlands', 'Rice', 'Corn',
'Other crops', 'Total unincentivized',
'Incentivized habitat',
'BirdReturns fall', 'BirdReturns spring',
'WHEP fall flooding',
'WHEP variable drawdown',
'Total incentivized',
'Grand Total')),
name = factor(name, levels = c('total', 'lcl', 'ucl')),
year = gsub('-', '', year),
value = value/scale) %>%
arrange(group, habitat, year, name) %>%
unite(col = 'label', name, year) %>%
pivot_wider(names_from = label, values_from = value) %>%
complete(habitat) %>%
mutate(group = case_when(habitat == 'Unincentivized habitat' ~
'Unincentivized habitat',
habitat == 'Incentivized habitat' ~
'Incentivized habitat',
TRUE ~ as.character(group)))
table <- flextable(tabledat,
col_keys = c('group', 'habitat',
'total_201314', 'ci1',
'total_201415', 'ci2',
'total_201516', 'ci3',
'total_201617', 'ci4')) %>%
set_formatter(total_201314 = function(x) sprintf("%.03f", x),
total_201415 = function(x) sprintf("%.03f", x),
total_201516 = function(x) sprintf("%.03f", x),
total_201617 = function(x) sprintf("%.03f", x)) %>%
flextable::compose(j = 'ci1',
value = as_paragraph('(',
sprintf("%.03f", lcl_201314),
"\U2012",
sprintf("%.03f", ucl_201314), ')')) %>%
flextable::compose(j = 'ci2',
value = as_paragraph('(',
sprintf("%.03f", lcl_201415),
"\U2012",
sprintf("%.03f", ucl_201415), ')')) %>%
flextable::compose(j = 'ci3',
value = as_paragraph('(',
sprintf("%.03f", lcl_201516),
"\U2012",
sprintf("%.03f", ucl_201516), ')')) %>%
flextable::compose(j = 'ci4',
value = as_paragraph('(',
sprintf("%.03f", lcl_201617),
"\U2012",
sprintf("%.03f", ucl_201617), ')')) %>%
set_header_labels(group = '',
habitat = '',
total_201314 = '2013-14',
ci1 = '2013-14',
total_201415 = '2014-15',
ci2 = '2014-15',
total_201516 = '2015-16',
ci3 = '2015-16',
total_201617 = '2016-17',
ci4 = '2016-17') %>%
merge_at(i = 1, j = 3:4, part = 'header') %>%
merge_at(i = 1, j = 5:6, part = 'header') %>%
merge_at(i = 1, j = 7:8, part = 'header') %>%
merge_at(i = 1, j = 9:10, part = 'header') %>%
# void(i = 2:5, j = 1, part = 'body') %>%
merge_at(i = 1, j = 1:10, part = 'body') %>%
merge_at(i = 7, j = 1:10, part = 'body') %>%
merge_at(i = 13, j = 1:2, part = 'body') %>%
theme_vanilla() %>%
font(part = 'all', fontname = 'Times New Roman') %>%
fontsize(part = 'all', size = 12) %>%
bold(part = 'header') %>%
bold(i = c(1, 6, 7, 12, 13), part = 'body') %>%
set_table_properties(width = 1, layout = 'autofit') %>%
align(align = 'center', part = 'header') %>%
align(align = 'left', part = 'body', j = c(2, 4, 6, 8, 10)) %>%
valign(valign = 'top', part = 'body', j = 1) %>%
border_inner_h(border = officer::fp_border(width = 0)) %>%
border(i = c(7, 13), border.top = officer::fp_border())
save_as_docx(table, path = pathout)
return(table)
}
make_shortfall_table <- function(shortfalltotals, shortfallseason,
scale = 1000000000, pathout) {
values <- bind_rows(shortfalltotals %>% mutate(season = 'total') %>% select(-habitat),
shortfallseason) %>%
select(population, season, group, incentives, total, lcl, ucl)
diff <- values %>% select(-lcl, -ucl) %>%
pivot_wider(names_from = incentives, values_from = total) %>%
mutate(diff = with - without,
perc = diff / without * 100) %>%
select(population, season, group, perc)
tabledat <- left_join(values, diff, by = c('population', 'season', 'group')) %>%
mutate_at(vars(total:ucl), ~./scale) %>%
pivot_longer(total:perc) %>%
mutate(incentives = factor(incentives, levels = c('without', 'with')),
population = factor(population, levels = c('baseline', 'objectives')),
name = factor(name, levels = c('total', 'lcl', 'ucl', 'perc')),
season = factor(season, levels = c('fall', 'spring', 'total')),
group = factor(group, levels = c(' ', '2013-14', '2014-15',
'2015-16', '2016-17'))) %>%
arrange(population, incentives, name) %>%
unite(col = 'label', population, incentives, name) %>%
pivot_wider(names_from = label, values_from = value) %>%
complete(group, season) %>%
distinct() %>%
arrange(season, group)
table <- flextable(tabledat,
col_keys = c('season', 'group',
'baseline_without_total', 'baseline_without_ci',
'baseline_with_total', 'baseline_with_ci',
'baseline_with_perc', ' ',
'objectives_without_total', 'objectives_without_ci',
'objectives_with_total', 'objectives_with_ci',
'objectives_with_perc')) %>%
set_formatter(baseline_without_total = function(x) sprintf("%.02f", x),
baseline_with_total = function(x) sprintf("%.02f", x),
objectives_without_total = function(x) sprintf("%.02f", x),
objectives_with_total = function(x) sprintf("%.02f", x),
baseline_with_perc = function(x) sprintf("%.01f%%", x),
objectives_with_perc = function(x) sprintf("%.01f%%", x)) %>%
flextable::compose(j = "baseline_without_ci",
value = as_paragraph('(',
sprintf("%.02f", baseline_without_lcl),
"\U2012",
sprintf("%.02f", baseline_without_ucl), ')')) %>%
flextable::compose(j = "baseline_with_ci",
value = as_paragraph('(',
sprintf("%.02f", baseline_with_lcl),
"\U2012",
sprintf("%.02f", baseline_with_ucl), ')')) %>%
flextable::compose(j = "objectives_without_ci",
value = as_paragraph('(',
sprintf("%.02f", objectives_without_lcl),
"\U2012",
sprintf("%.02f", objectives_without_ucl), ')')) %>%
flextable::compose(j = "objectives_with_ci",
value = as_paragraph('(',
sprintf("%.02f", objectives_with_lcl),
"\U2012",
sprintf("%.02f", objectives_with_ucl), ')')) %>%
set_header_labels(baseline_without_total = 'excluding incentive\nprograms',
baseline_without_ci = 'excluding incentive\nprograms',
baseline_with_total = 'including incentive\nprograms',
baseline_with_ci = 'including incentive\nprograms',
baseline_with_perc = 'difference\n(%)',
objectives_without_total = 'excluding incentive\nprograms',
objectives_without_ci = 'excluding incentive\nprograms',
objectives_with_total = 'including incentive\nprograms',
objectives_with_ci = 'including incentive\nprograms',
objectives_with_perc = 'difference\n(%)') %>%
merge_at(i = 1, j = 3:4, part = 'header') %>%
merge_at(i = 1, j = 5:6, part = 'header') %>%
merge_at(i = 1, j = 9:10, part = 'header') %>%
merge_at(i = 1, j = 11:12, part = 'header') %>%
add_header_row(values = c('season', 'group',
'Baseline population', 'Baseline population',
'Baseline population', 'Baseline population',
'Baseline population', ' ',
'Population objectives', 'Population objectives',
'Population objectives', 'Population objectives',
'Population objectives')) %>%
merge_at(i = 1, j = 3:7, part = 'header') %>%
merge_at(i = 1, j = 9:13, part = 'header') %>%
merge_at(j = 1, i = 1:2, part = 'header') %>%
merge_at(j = 2, i = 1:2, part = 'header') %>%
merge_at(j = 8, i = 1:2, part = 'header') %>%
merge_at(i = 1, j = 1:13, part = 'body') %>%
merge_at(i = 6, j = 1:13, part = 'body') %>%
merge_at(i = 11, j = 1:13, part = 'body') %>%
theme_vanilla() %>%
font(part = 'all', fontname = 'Times New Roman') %>%
fontsize(part = 'all', size = 12) %>%
bold(part = 'header') %>%
bold(i = c(1, 6, 11), part = 'body') %>%
set_table_properties(width = 1, layout = 'autofit') %>%
# autofit(part = 'header') %>%
# autofit(add_w = 0, add_h = 0, part = 'body') %>%
align(align = 'center', part = 'header') %>%
border_inner_h(border = officer::fp_border(width = 0)) %>%
border(i = c(6, 11), border.top = officer::fp_border())
save_as_docx(table, path = pathout)
return(table)
}
make_habitatneed_table <- function(filldat, newhabitatpath, pathout) {
tabledat <- left_join(read_csv(newhabitatpath, col_types = cols()),
filldat, by = c('start', 'end'))
tabledat_format = bind_rows(
tabledat,
tabledat %>%
mutate(season = if_else(end < 185, 'fall', 'spring')) %>%
group_by(group, population, season) %>%
summarize(filled = sum(filled),
.groups = 'drop') %>%
mutate(start.date = case_when(season == 'fall' ~ 'Fall Total',
season == 'spring' ~ 'Spring Total'),
start = 320, end = 320) %>%
select(-season)
) %>%
mutate(filled = format(filled, big.mark = ',')) %>%
pivot_wider(names_from = group, values_from = filled) %>%
arrange(population, start) %>%
mutate(start.date = factor(start.date, levels = c(' ', unique(start.date)))) %>%
select(-start, -end) %>%
complete(start.date, population) %>%
arrange(population, start.date) %>%
mutate(population = case_when(!is.na(`2013-14`) ~ '',
TRUE ~ population))
require(flextable)
table <- flextable(tabledat_format,
col_keys = c('population', 'dates',
'2013-14', '2014-15', '2015-16', '2016-17')) %>%
flextable::compose(j = "dates",
value = as_paragraph(start.date, " \U2012 ", end.date)) %>%
set_header_labels(population = '',
dates = 'program interval') %>%
merge_at(i = 1, j = 1:6, part = 'body') %>%
merge_at(i = 24, j = 1:6, part = 'body') %>%
theme_vanilla() %>%
font(part = 'all', fontname = 'Times New Roman') %>%
fontsize(part = 'all', size = 12) %>%
bold(part = 'header') %>%
bold(i = c(1, 24), part = 'body') %>%
set_table_properties(width = 1, layout = 'autofit') %>%
# autofit(part = 'header') %>%
# autofit(add_w = 0, add_h = 0, part = 'body') %>%
align(align = 'center', part = 'header') %>%
align(j = 1, align = 'left', part = 'body') %>%
align(j = 2, align = 'center', part = 'body') %>%
border_inner_h(border = officer::fp_border(width = 0)) %>%
border(i = 22, border.top = officer::fp_border()) %>%
border(i = 24, border.top = officer::fp_border()) %>%
border(i = 45, border.top = officer::fp_border())
save_as_docx(table, path = pathout)
return(table)
}
|
##### Defining global objects####
# source functions
source("2_load_libraries.R")
library(tidyverse)
library(plotly)
master=read.csv("data/All_bycatch_data_2010_2015.csv") %>% select(-c(CV,FOOTNOTE.S.,FISHERY.TYPE.GENERAL,FISHERY.TYPE.SPECIFIC)) %>% .[complete.cases(.[,c(6,9,10)]),] %>% mutate(NUM.FISH=seq(1:nrow(.)))
master[master=="Pot"]<-"pot"
master[master=="NW"]<-"WC"
master[master=="SW"]<-"WC"
ui <- dashboardPage(skin = "black",
dashboardHeader(
title = "National Bycatch Database Explorer",
titleWidth = 200
),
dashboardSidebar(
width = 200,
sidebarMenu(id = 'sidebarmenu',
menuItem("Visualize by species group", tabName='species',icon=icon("clock-o",lib='font-awesome')),
conditionalPanel("input.sidebarmenu ==='species'",
#checkboxInput("sp_region", "Subdivide by region",value=FALSE),
radioButtons(inputId="choice_sp", label="How would you like to subdivide the data?", selected = "Don't subdivide",
choices=c("Region","Fishery type", "Don't subdivide"))),
#checkboxInput("sp_region", "Subdivide by fishing type",value=FALSE)),
menuItem("Visualize by fishing type", tabName='fishing',icon=icon("clock-o",lib='font-awesome')),
conditionalPanel("input.sidebarmenu ==='fishing'",
radioButtons(inputId="choice_gear", label="How would you like to subdivide the data?", selected = "Don't subdivide",
choices=c("Region", "Don't subdivide")),
radioButtons(inputId="choice_metric", label="What metric would you like to see?", selected = "FISHERY.BYCATCH.RATIO",
choices=c("Bycatch ratio"="FISHERY.BYCATCH.RATIO",
"Total catch"="TOTAL.CATCH",
"Total landings"="TOTAL.FISHERY.LANDINGS",
"Number of fisheries"="NUM.FISH"))),
menuItem("Explore raw data", tabName='raw',icon=icon("clock-o",lib='font-awesome')),
div(style="text-align:center",downloadButton("downloadData", label = h6(style="color:black","Download dataset")))
)),
dashboardBody(
tabItems(
tabItem(tabName = "species",
fluidRow(
column(h5("Fish and invertebrates"),width=4,plotOutput("Fish")),
column(h5("Mammals"),width=4,plotOutput("Mammals")),
column(h5("Seabirds and sea turtles"),width=4,plotOutput("SBST"))
)),
tabItem(tabName = "fishing",
fluidRow(
# column(h5("Catch:Bycatch Ratio"),width=4,plotOutput("gear_BR")),
# column(h5("Total Catch"),width=4,plotOutput("gear_TC")),
# column(h5("Total Landings"),width=4,plotOutput("gear_TL"))
column(h5(""),width=12,plotOutput("gear_ll")))
# column(h5("Trawl"),width=4,plotOutput("gear_tr")),
# column(h5("Pot"),width=4,plotOutput("gear_pt"))),
# fluidRow(
# column(h5("Gillnet"),width=4,plotOutput("gear_gn")),
# column(h5("Jig"),width=4,plotOutput("gear_jg")),
# column(h5("Troll"),width=4,plotOutput("gear_trol"))),
# fluidRow(
# column(h5("Line"),width=4,plotOutput("gear_ln")),
# column(h5("Dredge"),width=4,plotOutput("gear_dr")),
# column(h5("Seine"),width=4,plotOutput("gear_se")))
),
tabItem(tabName = "raw",
fluidRow(
column(h5(""),width=10,DT::dataTableOutput("rawTable"))
))
))
)
server <- shinyServer(function(input, output) {
output$Fish<-renderPlot({
a=master %>% filter(GROUP=="invertebrate" | GROUP=="fish") %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015)
value=input$choice_sp
if(value=="Don't subdivide"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=TOTAL.FISHERY.BYCATCH.FISH.INVERT),stat="identity")+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch")+xlab("Year")
}
if(value=="Region"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=TOTAL.FISHERY.BYCATCH.FISH.INVERT,fill=REGION),stat="identity")+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch")+xlab("Year")
}
if(value=="Fishery type"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=TOTAL.FISHERY.BYCATCH.FISH.INVERT,fill=FISHERY.TYPE),stat="identity")+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch")+xlab("Year")
}
b
})
output$Mammals<-renderPlot({
a=master %>% filter(GROUP=="marine mammal")
value=input$choice_sp
if(value=="Don't subdivide"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=TOTAL.FISHERY.BYCATCH.MM),stat="identity")+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch")+xlab("Year")
}
if(value=="Region"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=TOTAL.FISHERY.BYCATCH.MM,fill=REGION),stat="identity")+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch")+xlab("Year")
}
if(value=="Fishery type"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=TOTAL.FISHERY.BYCATCH.MM,fill=FISHERY.TYPE),stat="identity")+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch")+xlab("Year")
}
b
})
output$SBST<-renderPlot({
a=master %>% filter(GROUP=="seabird"|GROUP=="sea turtle") %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015)
value=input$choice_sp
if(value=="Don't subdivide"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=TOTAL.FISHERY.BYCATCH.SBST),stat="identity")+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch")+xlab("Year")
}
if(value=="Region"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=TOTAL.FISHERY.BYCATCH.SBST,fill=REGION),stat="identity")+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch")+xlab("Year")
}
if(value=="Fishery type"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=TOTAL.FISHERY.BYCATCH.SBST,fill=FISHERY.TYPE),stat="identity")+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch")+xlab("Year")
}
b
})
metric=reactive({
a=input$choice_metric
b=grep(a,colnames(master),value=T)
return(b)
})
output$gear_ll<-renderPlot({
value=input$choice_gear
if(metric()=="FISHERY.BYCATCH.RATIO"){
a=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE) %>% summarise(newcol=mean(FISHERY.BYCATCH.RATIO,na.rm=T))
aa=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE,REGION) %>% summarise(newcol=mean(FISHERY.BYCATCH.RATIO,na.rm=T))
if(value=="Don't subdivide"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol),stat="identity")+facet_wrap(~FISHERY.TYPE)+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Bycatch ratio")+xlab("Year")
}
if(value=="Region"){
b=ggplot(aa) +geom_bar(aes(x=YEAR,y=newcol,fill=REGION),stat="identity")+facet_wrap(~FISHERY.TYPE)+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Bycatch ratio")+xlab("Year")
}
}
if(metric()=="TOTAL.FISHERY.LANDINGS"){
a=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE) %>% summarise(newcol=mean(TOTAL.FISHERY.LANDINGS,na.rm=T))
aa=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE,REGION) %>% summarise(newcol=mean(TOTAL.FISHERY.LANDINGS,na.rm=T))
if(value=="Don't subdivide"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol),stat="identity")+facet_wrap(~FISHERY.TYPE)+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total landings")+xlab("Year")
}
if(value=="Region"){
b=ggplot(aa) +geom_bar(aes(x=YEAR,y=newcol,fill=REGION),stat="identity")+facet_wrap(~FISHERY.TYPE)+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total landings")+xlab("Year")
}
}
if(metric()=="TOTAL.CATCH"){
a=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE) %>% summarise(newcol=mean(TOTAL.CATCH,na.rm=T))
aa=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE,REGION) %>% summarise(newcol=mean(TOTAL.CATCH,na.rm=T))
if(value=="Don't subdivide"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol),stat="identity")+facet_wrap(~FISHERY.TYPE)+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total catch")+xlab("Year")
}
if(value=="Region"){
b=ggplot(aa) +geom_bar(aes(x=YEAR,y=newcol,fill=REGION),stat="identity")+facet_wrap(~FISHERY.TYPE)+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total catch")+xlab("Year")
}
}
if(metric()=="NUM.FISH"){
a=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE) %>% summarise(newcol=n())
aa=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE,REGION) %>% summarise(newcol=n())
if(value=="Don't subdivide"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol),stat="identity")+facet_wrap(~FISHERY.TYPE)+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Number of fisheries")+xlab("Year")
}
if(value=="Region"){
b=ggplot(aa) +geom_bar(aes(x=YEAR,y=newcol,fill=REGION),stat="identity")+facet_wrap(~FISHERY.TYPE)+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Number of fisheries")+xlab("Year")
}
}
b
})
output$rawTable<-DT::renderDataTable({
datatable(master)
})
output$downloadData <- downloadHandler(
filename = function() {
paste("National_Bycatch_Database", ".csv", sep = "")
},
content = function(file) {
write.csv(master, file, row.names = FALSE)
})
})
shinyApp(ui = ui, server = server)
|
/NBR_bycatch_explorer/app_V2.R
|
no_license
|
mssavoca/NOP_NBR_bycatch_analysis
|
R
| false
| false
| 14,119
|
r
|
##### Defining global objects####
# source functions
source("2_load_libraries.R")
library(tidyverse)
library(plotly)
master=read.csv("data/All_bycatch_data_2010_2015.csv") %>% select(-c(CV,FOOTNOTE.S.,FISHERY.TYPE.GENERAL,FISHERY.TYPE.SPECIFIC)) %>% .[complete.cases(.[,c(6,9,10)]),] %>% mutate(NUM.FISH=seq(1:nrow(.)))
master[master=="Pot"]<-"pot"
master[master=="NW"]<-"WC"
master[master=="SW"]<-"WC"
ui <- dashboardPage(skin = "black",
dashboardHeader(
title = "National Bycatch Database Explorer",
titleWidth = 200
),
dashboardSidebar(
width = 200,
sidebarMenu(id = 'sidebarmenu',
menuItem("Visualize by species group", tabName='species',icon=icon("clock-o",lib='font-awesome')),
conditionalPanel("input.sidebarmenu ==='species'",
#checkboxInput("sp_region", "Subdivide by region",value=FALSE),
radioButtons(inputId="choice_sp", label="How would you like to subdivide the data?", selected = "Don't subdivide",
choices=c("Region","Fishery type", "Don't subdivide"))),
#checkboxInput("sp_region", "Subdivide by fishing type",value=FALSE)),
menuItem("Visualize by fishing type", tabName='fishing',icon=icon("clock-o",lib='font-awesome')),
conditionalPanel("input.sidebarmenu ==='fishing'",
radioButtons(inputId="choice_gear", label="How would you like to subdivide the data?", selected = "Don't subdivide",
choices=c("Region", "Don't subdivide")),
radioButtons(inputId="choice_metric", label="What metric would you like to see?", selected = "FISHERY.BYCATCH.RATIO",
choices=c("Bycatch ratio"="FISHERY.BYCATCH.RATIO",
"Total catch"="TOTAL.CATCH",
"Total landings"="TOTAL.FISHERY.LANDINGS",
"Number of fisheries"="NUM.FISH"))),
menuItem("Explore raw data", tabName='raw',icon=icon("clock-o",lib='font-awesome')),
div(style="text-align:center",downloadButton("downloadData", label = h6(style="color:black","Download dataset")))
)),
dashboardBody(
tabItems(
tabItem(tabName = "species",
fluidRow(
column(h5("Fish and invertebrates"),width=4,plotOutput("Fish")),
column(h5("Mammals"),width=4,plotOutput("Mammals")),
column(h5("Seabirds and sea turtles"),width=4,plotOutput("SBST"))
)),
tabItem(tabName = "fishing",
fluidRow(
# column(h5("Catch:Bycatch Ratio"),width=4,plotOutput("gear_BR")),
# column(h5("Total Catch"),width=4,plotOutput("gear_TC")),
# column(h5("Total Landings"),width=4,plotOutput("gear_TL"))
column(h5(""),width=12,plotOutput("gear_ll")))
# column(h5("Trawl"),width=4,plotOutput("gear_tr")),
# column(h5("Pot"),width=4,plotOutput("gear_pt"))),
# fluidRow(
# column(h5("Gillnet"),width=4,plotOutput("gear_gn")),
# column(h5("Jig"),width=4,plotOutput("gear_jg")),
# column(h5("Troll"),width=4,plotOutput("gear_trol"))),
# fluidRow(
# column(h5("Line"),width=4,plotOutput("gear_ln")),
# column(h5("Dredge"),width=4,plotOutput("gear_dr")),
# column(h5("Seine"),width=4,plotOutput("gear_se")))
),
tabItem(tabName = "raw",
fluidRow(
column(h5(""),width=10,DT::dataTableOutput("rawTable"))
))
))
)
server <- shinyServer(function(input, output) {
output$Fish<-renderPlot({
a=master %>% filter(GROUP=="invertebrate" | GROUP=="fish") %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015)
value=input$choice_sp
if(value=="Don't subdivide"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=TOTAL.FISHERY.BYCATCH.FISH.INVERT),stat="identity")+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch")+xlab("Year")
}
if(value=="Region"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=TOTAL.FISHERY.BYCATCH.FISH.INVERT,fill=REGION),stat="identity")+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch")+xlab("Year")
}
if(value=="Fishery type"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=TOTAL.FISHERY.BYCATCH.FISH.INVERT,fill=FISHERY.TYPE),stat="identity")+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch")+xlab("Year")
}
b
})
output$Mammals<-renderPlot({
a=master %>% filter(GROUP=="marine mammal")
value=input$choice_sp
if(value=="Don't subdivide"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=TOTAL.FISHERY.BYCATCH.MM),stat="identity")+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch")+xlab("Year")
}
if(value=="Region"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=TOTAL.FISHERY.BYCATCH.MM,fill=REGION),stat="identity")+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch")+xlab("Year")
}
if(value=="Fishery type"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=TOTAL.FISHERY.BYCATCH.MM,fill=FISHERY.TYPE),stat="identity")+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch")+xlab("Year")
}
b
})
output$SBST<-renderPlot({
a=master %>% filter(GROUP=="seabird"|GROUP=="sea turtle") %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015)
value=input$choice_sp
if(value=="Don't subdivide"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=TOTAL.FISHERY.BYCATCH.SBST),stat="identity")+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch")+xlab("Year")
}
if(value=="Region"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=TOTAL.FISHERY.BYCATCH.SBST,fill=REGION),stat="identity")+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch")+xlab("Year")
}
if(value=="Fishery type"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=TOTAL.FISHERY.BYCATCH.SBST,fill=FISHERY.TYPE),stat="identity")+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch")+xlab("Year")
}
b
})
metric=reactive({
a=input$choice_metric
b=grep(a,colnames(master),value=T)
return(b)
})
output$gear_ll<-renderPlot({
value=input$choice_gear
if(metric()=="FISHERY.BYCATCH.RATIO"){
a=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE) %>% summarise(newcol=mean(FISHERY.BYCATCH.RATIO,na.rm=T))
aa=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE,REGION) %>% summarise(newcol=mean(FISHERY.BYCATCH.RATIO,na.rm=T))
if(value=="Don't subdivide"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol),stat="identity")+facet_wrap(~FISHERY.TYPE)+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Bycatch ratio")+xlab("Year")
}
if(value=="Region"){
b=ggplot(aa) +geom_bar(aes(x=YEAR,y=newcol,fill=REGION),stat="identity")+facet_wrap(~FISHERY.TYPE)+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Bycatch ratio")+xlab("Year")
}
}
if(metric()=="TOTAL.FISHERY.LANDINGS"){
a=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE) %>% summarise(newcol=mean(TOTAL.FISHERY.LANDINGS,na.rm=T))
aa=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE,REGION) %>% summarise(newcol=mean(TOTAL.FISHERY.LANDINGS,na.rm=T))
if(value=="Don't subdivide"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol),stat="identity")+facet_wrap(~FISHERY.TYPE)+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total landings")+xlab("Year")
}
if(value=="Region"){
b=ggplot(aa) +geom_bar(aes(x=YEAR,y=newcol,fill=REGION),stat="identity")+facet_wrap(~FISHERY.TYPE)+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total landings")+xlab("Year")
}
}
if(metric()=="TOTAL.CATCH"){
a=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE) %>% summarise(newcol=mean(TOTAL.CATCH,na.rm=T))
aa=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE,REGION) %>% summarise(newcol=mean(TOTAL.CATCH,na.rm=T))
if(value=="Don't subdivide"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol),stat="identity")+facet_wrap(~FISHERY.TYPE)+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total catch")+xlab("Year")
}
if(value=="Region"){
b=ggplot(aa) +geom_bar(aes(x=YEAR,y=newcol,fill=REGION),stat="identity")+facet_wrap(~FISHERY.TYPE)+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total catch")+xlab("Year")
}
}
if(metric()=="NUM.FISH"){
a=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE) %>% summarise(newcol=n())
aa=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE,REGION) %>% summarise(newcol=n())
if(value=="Don't subdivide"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol),stat="identity")+facet_wrap(~FISHERY.TYPE)+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Number of fisheries")+xlab("Year")
}
if(value=="Region"){
b=ggplot(aa) +geom_bar(aes(x=YEAR,y=newcol,fill=REGION),stat="identity")+facet_wrap(~FISHERY.TYPE)+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Number of fisheries")+xlab("Year")
}
}
b
})
output$rawTable<-DT::renderDataTable({
datatable(master)
})
output$downloadData <- downloadHandler(
filename = function() {
paste("National_Bycatch_Database", ".csv", sep = "")
},
content = function(file) {
write.csv(master, file, row.names = FALSE)
})
})
shinyApp(ui = ui, server = server)
|
#install.packages('tabulizer')
#install.packages('rJava')
#install.packages('devtools')
#install.packages('pdftools')
#install.packages('data.table')
library(rvest)
library(tidyverse)
library(data.table)
library(tidytext)
library(stringr)
library(ggplot2)
library(shiny)
library(data.table)
library(leaflet)
library(geojsonio)
library(reshape2)
#getwd()
#setwd("C:/Users/veedu/Desktop/Spring 2019/Math 216/HW 3/HW3-2")
education <- read_csv("education.csv")
median.age <- read_csv("median_age.csv")
net.migration <- read_csv("net_mig.csv")
per.capita.personal.income <- read_csv("per_capita_income.csv")
resident.pop <- read_csv("resident_pop.csv")
ur <- read_csv("ur.csv")
# Rename columns
colnames(education) <- c("DATE", "Education")
colnames(median.age) <- c("DATE", "Median_Age")
colnames(net.migration) <- c("DATE", "Net_Migration")
colnames(per.capita.personal.income) <- c("DATE", "Per_Capital_Personal_Income")
colnames(resident.pop) <- c("DATE", "Population")
colnames(ur) <- c("DATE", "Unemployment_Rate")
# Change to numerical values
education$Education <- as.numeric(education$Education)
median.age$Median_Age <- as.numeric(median.age$Median_Age)
net.migration$Net_Migration <- as.numeric(net.migration$Net_Migration)
per.capita.personal.income$Per_Capital_Personal_Income <- as.numeric(per.capita.personal.income$Per_Capital_Personal_Income)
resident.pop$Population <- as.numeric(resident.pop$Population)
ur$Unemployment_Rate <- as.numeric(ur$Unemployment_Rate)
la.joined.data <- left_join(resident.pop,
ur,
by = c("DATE"))
la.joined.data <- left_join(la.joined.data,
education,
by = c("DATE"))
la.joined.data <- left_join(la.joined.data,
median.age,
by = c("DATE"))
la.joined.data <- left_join(la.joined.data,
net.migration,
by = c("DATE"))
la.joined.data <- left_join(la.joined.data,
per.capita.personal.income,
by = c("DATE"))
la.joined.data <- la.joined.data[-c(9), ]
#### R Shiny App
la.cities <- read_csv("2010_Census_Populations_by_Zip_Code.csv") #use the census data here
la.cities <- la.cities[-c(1), ]
colnames(la.cities) <- c("zipcode", "city",
"latitude", "longitude",
"population", "age", #age = median age
"males", "females",
"total_household", "household_size")
la.cities$zipcode <- as.numeric(la.cities$zipcode)
la.cities$latitude <- as.numeric(la.cities$latitude)
la.cities$longitude <- as.numeric(la.cities$longitude)
la.cities$population <- as.numeric(la.cities$population)
la.cities$age <- as.numeric(la.cities$age)
la.cities$males <- as.numeric(la.cities$males)
la.cities$females <- as.numeric(la.cities$females)
la.cities$total_household <- as.numeric(la.cities$total_household)
la.cities$household_size <- as.numeric(la.cities$household_size)
la.cities$population.2 <- as.character(la.cities$population)
la.cities$age.2 <- as.character(la.cities$age)
la.cities$males.2 <- as.character(la.cities$males)
la.cities$females.2 <- as.character(la.cities$females)
la.cities$total_household.2 <- as.character(la.cities$total_household)
cities.long.lat <- read_csv("uszips.csv") #use the uszips file here
#this data set contains all cities in CA
ca.cities.long.lat <- cities.long.lat %>%
filter(state_id == "CA") %>%
select(zip, lat, lng, city, state_id)
colnames(ca.cities.long.lat) <- c("zipcode", "latitude",
"longitude", "city", "state")
ca.cities.long.lat$zipcode <- as.numeric(ca.cities.long.lat$zipcode)
ca.cities.long.lat$latitude <- as.numeric(ca.cities.long.lat$latitude)
ca.cities.long.lat$longitude <- as.numeric(ca.cities.long.lat$longitude)
la.cities.2 <- la.cities %>%
select(zipcode, city, population,
age, males, females,
total_household, household_size)
#join data so we can get stats such as med age
la.cities.long.lat.joined <- left_join(ca.cities.long.lat,
la.cities.2,
by = c("zipcode"))
la.cities.long.lat.joined.2 <- la.cities.long.lat.joined %>%
select(zipcode, latitude, longitude,
city.x, city.y, population,
age, males, females,
total_household, household_size) %>%
na.omit()
la.cities.long.lat.joined.2$population <-
as.character(la.cities.long.lat.joined.2$population)
la.cities.long.lat.joined.3 <- la.cities.long.lat.joined.2
la.cities.long.lat.joined.3$zipcode <- as.character(la.cities.long.lat.joined.3$zipcode)
la.cities.long.lat.joined.3$population <- as.numeric(la.cities.long.lat.joined.3$population)
#make a map of LA with the zipcodes
la.millenial.pop <- la.cities.long.lat.joined.3 %>%
leaflet() %>%
addTiles() %>%
addCircleMarkers(lat = ~latitude,
lng = ~longitude,
clusterOptions = markerClusterOptions(),
popup = paste(la.cities.long.lat.joined.3$city.x, "<br>",
"population of millennials:", la.cities.long.lat.joined.3$population, "<br>",
"median age:", la.cities.long.lat.joined.3$age, "<br>",
"avg household size:", la.cities.long.lat.joined.3$household_size, "<br>",
"avg education level?", "<br>",
"avg income?", "<br>",
"# of public schools?", "<br>",
"marriage stats?", "<br>",
"avg cost of living"), #pop needs to be a string to show
label = ~la.cities.long.lat.joined.3$zipcode) #zipcode also needs to be a string
#Gender dataset and graph
la.cities.pie <- la.cities.long.lat.joined.3
la.cities.pie <- la.cities.pie[-1:-3]
la.cities.pie <- la.cities.pie[-2:-4]
la.cities.pie <- la.cities.pie[-4:-5]
names(la.cities.pie)[1] <-"city"
#Create dataset for barchart
gender.list <- aggregate(la.cities.pie, by=list(la.cities.pie$city),
FUN=mean, na.rm=TRUE)
gender.list <- gender.list[,-2]
names(gender.list)[1] <- "city"
#Reshape
gender.list.t=t(gender.list)
names(gender.list.t)[1] <- "gender"
#Barchart
data.new <- reshape(gender.list,
varying = c("males", "females"),
v.names = "gender",
timevar = "gen",
times = c("males", "females"),
direction = "long")
colors <- c("pink", "skyblue")
#education and income dataset
income.race <- read_csv("income_race.csv")
newdata <- income.race[ which(income.race$GEO_NAME=='City of Los Angeles' | income.race$GEO_NAME=='Inglewood' |
income.race$GEO_NAME=='Santa Monica' | income.race$GEO_NAME=='Torrance' |
income.race$GEO_NAME=='Whittier' | income.race$GEO_NAME=='Long Beach' |
income.race$GEO_NAME=='Pasadena' | income.race$GEO_NAME=='Glendale' |
income.race$GEO_NAME=='Burbank' | income.race$GEO_NAME=='Alhambra'
), ]
newdata <- newdata[-1:-2]
newdata <- newdata[-16:-17]
newdata <- newdata[-2:-7]
edu <- newdata[-5:-9]
new.edu <- reshape(edu,
varying = c("LESS_THAN_HIGH_SCHOOL", "AT_LEAST_BACHELORS", "GRADUATE_DEGREE"),
v.names = "education",
timevar = "edu",
times = c("LESS_THAN_HIGH_SCHOOL", "AT_LEAST_BACHELORS", "GRADUATE_DEGREE"),
direction = "long")
colorsedu <- c("red", "skyblue", "yellow")
city.choices <- sort(unique(la.cities.long.lat.joined.3$city.x))
gender.choices <- c("female-heavy", "male-heavy", "more or less equal (5% error)")
#personal income data
la.cities.long.lat.joined.3 <- la.cities.long.lat.joined.3 %>%
mutate(gender.equality = females/(males+females)*100)
ui <- fluidPage(
titlePanel("The millennial trend in LA county - by Vee Duong, Veronica Estudillo, and Jennifer Ko"),
sidebarLayout(
sidebarPanel(
tags$head(
tags$style("description {white-space: nowrap;}")),
fluidRow(offset = 0, (textOutput(outputId = "description")))
),
mainPanel(
tabsetPanel(
tabPanel("Summary",
verbatimTextOutput(outputId = "la.millenials.graph.description"),
plotOutput(outputId = "la.millenials.graph")),
tabPanel("The Annual Trend",
sliderInput(inputId = "slide",
label = "slide to preferred year",
min = 2009,
max = 2016,
value = 2009,
animate = TRUE),
plotOutput(outputId = "annual.trend"),
textOutput(outputId = "warning"),
br()),
tabPanel("Your unique city",
selectInput(inputId = "select",
label = "Select city to see its unique demographics!",
choices = city.choices,
selected = la.cities.long.lat.joined.3$city.x[1]),
leafletOutput(outputId = "la.millenial.pop"),
br())
)
)
)
)
server <- function(input, output, session) {
cities.subset <- reactive(la.cities.long.lat.joined.3 %>%
sample_n(input$select))
##
cities.subset.3 <- reactive(gender.list.t %>%
sample_n(input$select))
cities.subset.4 <- reactive(la.cities.long.lat.joined.3 %>%
sample_n(input$select))
output$la.millenial.pop <- renderLeaflet({
input.city <- la.cities.long.lat.joined.3 %>%
mutate(is.selected.city =
ifelse(la.cities.long.lat.joined.3$city.x == input$select,
"city",
"not.city")) %>%
filter(is.selected.city == "city")
input.city %>%
leaflet() %>%
addTiles() %>%
addCircleMarkers(lat = ~latitude,
lng = ~longitude,
clusterOptions = markerClusterOptions(),
popup = paste(input.city$city.x, "<br>",
"population of millennials:", input.city$population, "<br>",
"median age:", input.city$age, "<br>",
"avg household size:", input.city$household_size, "<br>",
"number of males", input.city$males, "<br>",
"number of females", input.city$females, "<br>"), #pop needs to be a string to show
label = input.city$zipcode)
})
output$la.millenials.graph <- renderPlot({
la.millenials <- la.cities %>%
group_by(city) %>%
summarize(mean.age = mean(age)) %>%
filter(mean.age > 23 & mean.age < 38) %>%
select(city)
la.cities %>%
group_by(city) %>%
summarize(mean.age = mean(age)) %>%
mutate(Millennials = ifelse(mean.age > 23 & mean.age < 38,
"Millennial city",
"Non-milennial city")) %>%
ggplot(aes(x = city,
y = mean.age)) +
geom_bar(aes(fill = Millennials),
stat = "identity") +
ylab("Mean Age")+
ggtitle("Mean age in Non-Millennial & Millennial Cities") +
coord_flip()})
la.cities$city <- factor(la.cities$city,
levels = rev(levels(factor(la.cities$city))))
cities.subset.5 <- reactive(la.cities %>%
sample_n(input$slide))
la.joined.data$DATE <- as.numeric(str_sub(la.joined.data$DATE,1,4))
la.joined.data$Population <- as.numeric(la.joined.data$Population)
output$annual.trend <- renderPlot({
la.joined.data %>%
filter(between(DATE, 2009, input$slide)) %>%
ggplot(aes(x = DATE,
y = Median_Age)) +
scale_x_continuous("Year",
limits = c(2009,2016),
breaks = c(2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016)) +
scale_y_continuous("Median Age",
limits = c(33, 37)) +
geom_point(aes(size = Population,
color = Education)) +
ggtitle("Median Age, Population, and Education in LA over time") +
xlab("Year") +
ylab("Median Age")
})
output$warning <- renderText(
paste("** Note: Education = proportion of people aged 25 or older with associate's degree or higher")
)
output$description <- renderText(
paste("The county of Los Angeles has become populated due to its rising popularity amongst millennials today. Our app is aimed towards people (specifically, the millennials) who are interested in moving to LA and would like to know more about unique cities in the county. We will examine population across different cities and any potential trend in education and population size across the years. Our sources include various census data.")
)
output$la.millenials.graph.description <- renderText(
paste('The "millennials" as described in the graph below are individuals aged between 22 and 37. "Non-millennials" are those whose ages do not lie within the range.')
)
}
shinyApp(ui = ui, server = server)
|
/hw3_u_final.R
|
no_license
|
keunyoungko/millennial_migration_los_angeles
|
R
| false
| false
| 13,604
|
r
|
#install.packages('tabulizer')
#install.packages('rJava')
#install.packages('devtools')
#install.packages('pdftools')
#install.packages('data.table')
library(rvest)
library(tidyverse)
library(data.table)
library(tidytext)
library(stringr)
library(ggplot2)
library(shiny)
library(data.table)
library(leaflet)
library(geojsonio)
library(reshape2)
#getwd()
#setwd("C:/Users/veedu/Desktop/Spring 2019/Math 216/HW 3/HW3-2")
education <- read_csv("education.csv")
median.age <- read_csv("median_age.csv")
net.migration <- read_csv("net_mig.csv")
per.capita.personal.income <- read_csv("per_capita_income.csv")
resident.pop <- read_csv("resident_pop.csv")
ur <- read_csv("ur.csv")
# Rename columns
colnames(education) <- c("DATE", "Education")
colnames(median.age) <- c("DATE", "Median_Age")
colnames(net.migration) <- c("DATE", "Net_Migration")
colnames(per.capita.personal.income) <- c("DATE", "Per_Capital_Personal_Income")
colnames(resident.pop) <- c("DATE", "Population")
colnames(ur) <- c("DATE", "Unemployment_Rate")
# Change to numerical values
education$Education <- as.numeric(education$Education)
median.age$Median_Age <- as.numeric(median.age$Median_Age)
net.migration$Net_Migration <- as.numeric(net.migration$Net_Migration)
per.capita.personal.income$Per_Capital_Personal_Income <- as.numeric(per.capita.personal.income$Per_Capital_Personal_Income)
resident.pop$Population <- as.numeric(resident.pop$Population)
ur$Unemployment_Rate <- as.numeric(ur$Unemployment_Rate)
la.joined.data <- left_join(resident.pop,
ur,
by = c("DATE"))
la.joined.data <- left_join(la.joined.data,
education,
by = c("DATE"))
la.joined.data <- left_join(la.joined.data,
median.age,
by = c("DATE"))
la.joined.data <- left_join(la.joined.data,
net.migration,
by = c("DATE"))
la.joined.data <- left_join(la.joined.data,
per.capita.personal.income,
by = c("DATE"))
la.joined.data <- la.joined.data[-c(9), ]
#### R Shiny App
la.cities <- read_csv("2010_Census_Populations_by_Zip_Code.csv") #use the census data here
la.cities <- la.cities[-c(1), ]
colnames(la.cities) <- c("zipcode", "city",
"latitude", "longitude",
"population", "age", #age = median age
"males", "females",
"total_household", "household_size")
la.cities$zipcode <- as.numeric(la.cities$zipcode)
la.cities$latitude <- as.numeric(la.cities$latitude)
la.cities$longitude <- as.numeric(la.cities$longitude)
la.cities$population <- as.numeric(la.cities$population)
la.cities$age <- as.numeric(la.cities$age)
la.cities$males <- as.numeric(la.cities$males)
la.cities$females <- as.numeric(la.cities$females)
la.cities$total_household <- as.numeric(la.cities$total_household)
la.cities$household_size <- as.numeric(la.cities$household_size)
la.cities$population.2 <- as.character(la.cities$population)
la.cities$age.2 <- as.character(la.cities$age)
la.cities$males.2 <- as.character(la.cities$males)
la.cities$females.2 <- as.character(la.cities$females)
la.cities$total_household.2 <- as.character(la.cities$total_household)
cities.long.lat <- read_csv("uszips.csv") #use the uszips file here
#this data set contains all cities in CA
ca.cities.long.lat <- cities.long.lat %>%
filter(state_id == "CA") %>%
select(zip, lat, lng, city, state_id)
colnames(ca.cities.long.lat) <- c("zipcode", "latitude",
"longitude", "city", "state")
ca.cities.long.lat$zipcode <- as.numeric(ca.cities.long.lat$zipcode)
ca.cities.long.lat$latitude <- as.numeric(ca.cities.long.lat$latitude)
ca.cities.long.lat$longitude <- as.numeric(ca.cities.long.lat$longitude)
la.cities.2 <- la.cities %>%
select(zipcode, city, population,
age, males, females,
total_household, household_size)
#join data so we can get stats such as med age
la.cities.long.lat.joined <- left_join(ca.cities.long.lat,
la.cities.2,
by = c("zipcode"))
la.cities.long.lat.joined.2 <- la.cities.long.lat.joined %>%
select(zipcode, latitude, longitude,
city.x, city.y, population,
age, males, females,
total_household, household_size) %>%
na.omit()
la.cities.long.lat.joined.2$population <-
as.character(la.cities.long.lat.joined.2$population)
la.cities.long.lat.joined.3 <- la.cities.long.lat.joined.2
la.cities.long.lat.joined.3$zipcode <- as.character(la.cities.long.lat.joined.3$zipcode)
la.cities.long.lat.joined.3$population <- as.numeric(la.cities.long.lat.joined.3$population)
#make a map of LA with the zipcodes
la.millenial.pop <- la.cities.long.lat.joined.3 %>%
leaflet() %>%
addTiles() %>%
addCircleMarkers(lat = ~latitude,
lng = ~longitude,
clusterOptions = markerClusterOptions(),
popup = paste(la.cities.long.lat.joined.3$city.x, "<br>",
"population of millennials:", la.cities.long.lat.joined.3$population, "<br>",
"median age:", la.cities.long.lat.joined.3$age, "<br>",
"avg household size:", la.cities.long.lat.joined.3$household_size, "<br>",
"avg education level?", "<br>",
"avg income?", "<br>",
"# of public schools?", "<br>",
"marriage stats?", "<br>",
"avg cost of living"), #pop needs to be a string to show
label = ~la.cities.long.lat.joined.3$zipcode) #zipcode also needs to be a string
#Gender dataset and graph
la.cities.pie <- la.cities.long.lat.joined.3
la.cities.pie <- la.cities.pie[-1:-3]
la.cities.pie <- la.cities.pie[-2:-4]
la.cities.pie <- la.cities.pie[-4:-5]
names(la.cities.pie)[1] <-"city"
#Create dataset for barchart
gender.list <- aggregate(la.cities.pie, by=list(la.cities.pie$city),
FUN=mean, na.rm=TRUE)
gender.list <- gender.list[,-2]
names(gender.list)[1] <- "city"
#Reshape
gender.list.t=t(gender.list)
names(gender.list.t)[1] <- "gender"
#Barchart
data.new <- reshape(gender.list,
varying = c("males", "females"),
v.names = "gender",
timevar = "gen",
times = c("males", "females"),
direction = "long")
colors <- c("pink", "skyblue")
#education and income dataset
income.race <- read_csv("income_race.csv")
newdata <- income.race[ which(income.race$GEO_NAME=='City of Los Angeles' | income.race$GEO_NAME=='Inglewood' |
income.race$GEO_NAME=='Santa Monica' | income.race$GEO_NAME=='Torrance' |
income.race$GEO_NAME=='Whittier' | income.race$GEO_NAME=='Long Beach' |
income.race$GEO_NAME=='Pasadena' | income.race$GEO_NAME=='Glendale' |
income.race$GEO_NAME=='Burbank' | income.race$GEO_NAME=='Alhambra'
), ]
newdata <- newdata[-1:-2]
newdata <- newdata[-16:-17]
newdata <- newdata[-2:-7]
edu <- newdata[-5:-9]
new.edu <- reshape(edu,
varying = c("LESS_THAN_HIGH_SCHOOL", "AT_LEAST_BACHELORS", "GRADUATE_DEGREE"),
v.names = "education",
timevar = "edu",
times = c("LESS_THAN_HIGH_SCHOOL", "AT_LEAST_BACHELORS", "GRADUATE_DEGREE"),
direction = "long")
colorsedu <- c("red", "skyblue", "yellow")
city.choices <- sort(unique(la.cities.long.lat.joined.3$city.x))
gender.choices <- c("female-heavy", "male-heavy", "more or less equal (5% error)")
#personal income data
la.cities.long.lat.joined.3 <- la.cities.long.lat.joined.3 %>%
mutate(gender.equality = females/(males+females)*100)
ui <- fluidPage(
titlePanel("The millennial trend in LA county - by Vee Duong, Veronica Estudillo, and Jennifer Ko"),
sidebarLayout(
sidebarPanel(
tags$head(
tags$style("description {white-space: nowrap;}")),
fluidRow(offset = 0, (textOutput(outputId = "description")))
),
mainPanel(
tabsetPanel(
tabPanel("Summary",
verbatimTextOutput(outputId = "la.millenials.graph.description"),
plotOutput(outputId = "la.millenials.graph")),
tabPanel("The Annual Trend",
sliderInput(inputId = "slide",
label = "slide to preferred year",
min = 2009,
max = 2016,
value = 2009,
animate = TRUE),
plotOutput(outputId = "annual.trend"),
textOutput(outputId = "warning"),
br()),
tabPanel("Your unique city",
selectInput(inputId = "select",
label = "Select city to see its unique demographics!",
choices = city.choices,
selected = la.cities.long.lat.joined.3$city.x[1]),
leafletOutput(outputId = "la.millenial.pop"),
br())
)
)
)
)
server <- function(input, output, session) {
cities.subset <- reactive(la.cities.long.lat.joined.3 %>%
sample_n(input$select))
##
cities.subset.3 <- reactive(gender.list.t %>%
sample_n(input$select))
cities.subset.4 <- reactive(la.cities.long.lat.joined.3 %>%
sample_n(input$select))
output$la.millenial.pop <- renderLeaflet({
input.city <- la.cities.long.lat.joined.3 %>%
mutate(is.selected.city =
ifelse(la.cities.long.lat.joined.3$city.x == input$select,
"city",
"not.city")) %>%
filter(is.selected.city == "city")
input.city %>%
leaflet() %>%
addTiles() %>%
addCircleMarkers(lat = ~latitude,
lng = ~longitude,
clusterOptions = markerClusterOptions(),
popup = paste(input.city$city.x, "<br>",
"population of millennials:", input.city$population, "<br>",
"median age:", input.city$age, "<br>",
"avg household size:", input.city$household_size, "<br>",
"number of males", input.city$males, "<br>",
"number of females", input.city$females, "<br>"), #pop needs to be a string to show
label = input.city$zipcode)
})
output$la.millenials.graph <- renderPlot({
la.millenials <- la.cities %>%
group_by(city) %>%
summarize(mean.age = mean(age)) %>%
filter(mean.age > 23 & mean.age < 38) %>%
select(city)
la.cities %>%
group_by(city) %>%
summarize(mean.age = mean(age)) %>%
mutate(Millennials = ifelse(mean.age > 23 & mean.age < 38,
"Millennial city",
"Non-milennial city")) %>%
ggplot(aes(x = city,
y = mean.age)) +
geom_bar(aes(fill = Millennials),
stat = "identity") +
ylab("Mean Age")+
ggtitle("Mean age in Non-Millennial & Millennial Cities") +
coord_flip()})
la.cities$city <- factor(la.cities$city,
levels = rev(levels(factor(la.cities$city))))
cities.subset.5 <- reactive(la.cities %>%
sample_n(input$slide))
la.joined.data$DATE <- as.numeric(str_sub(la.joined.data$DATE,1,4))
la.joined.data$Population <- as.numeric(la.joined.data$Population)
output$annual.trend <- renderPlot({
la.joined.data %>%
filter(between(DATE, 2009, input$slide)) %>%
ggplot(aes(x = DATE,
y = Median_Age)) +
scale_x_continuous("Year",
limits = c(2009,2016),
breaks = c(2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016)) +
scale_y_continuous("Median Age",
limits = c(33, 37)) +
geom_point(aes(size = Population,
color = Education)) +
ggtitle("Median Age, Population, and Education in LA over time") +
xlab("Year") +
ylab("Median Age")
})
output$warning <- renderText(
paste("** Note: Education = proportion of people aged 25 or older with associate's degree or higher")
)
output$description <- renderText(
paste("The county of Los Angeles has become populated due to its rising popularity amongst millennials today. Our app is aimed towards people (specifically, the millennials) who are interested in moving to LA and would like to know more about unique cities in the county. We will examine population across different cities and any potential trend in education and population size across the years. Our sources include various census data.")
)
output$la.millenials.graph.description <- renderText(
paste('The "millennials" as described in the graph below are individuals aged between 22 and 37. "Non-millennials" are those whose ages do not lie within the range.')
)
}
shinyApp(ui = ui, server = server)
|
# function to make a grid of the truth data to compare to predicted
# will be the average intensity of each grid square
make_truth_grid <- function(resolution,
dat1,
dimensions,
type=c('truth', 'grid'),
absolute = TRUE){
minnum <- resolution[1]-1
grid = matrix(NA, nrow=dimensions[2], ncol=dimensions[1])
grid_numbers <- 1:prod(dimensions/resolution)
# loop for y values
for(j in 1:(dimensions[2]/resolution[2])){
index.y <- seq(((j-1)*(dimensions[2]/resolution[2]))+1,(j*dimensions[2]/resolution[2]),1)
temp_grid_numbers <- grid_numbers[index.y]
row_nos <- seq((j*resolution[1]-minnum),(j*resolution[1]),1)
# loop for x values
for(i in 1:(dimensions[1]/resolution[1])){
index.x <- seq((i*resolution[1]-minnum),(i*resolution[1]),1)
grid[row_nos,index.x] <- temp_grid_numbers[i]
}
}
# sum average abundance by grid square for truth
output <- rep(NA, length(1:max(grid)))
if(absolute == TRUE){
data <- dat1$rf.s
}
if(absolute == FALSE){
data <- dat1$rf.s-mean(dat1$rf.s)
}
for(i in 1:max(grid)){
marker <- which(grid==i)
output[i] <- mean(data[marker])
}
if(type=='truth'){return(output)}
if(type=='grid'){return(grid)}
}
|
/make_truth_grid.R
|
no_license
|
ssarahas/MScProject_ISDM
|
R
| false
| false
| 1,378
|
r
|
# function to make a grid of the truth data to compare to predicted
# will be the average intensity of each grid square
make_truth_grid <- function(resolution,
dat1,
dimensions,
type=c('truth', 'grid'),
absolute = TRUE){
minnum <- resolution[1]-1
grid = matrix(NA, nrow=dimensions[2], ncol=dimensions[1])
grid_numbers <- 1:prod(dimensions/resolution)
# loop for y values
for(j in 1:(dimensions[2]/resolution[2])){
index.y <- seq(((j-1)*(dimensions[2]/resolution[2]))+1,(j*dimensions[2]/resolution[2]),1)
temp_grid_numbers <- grid_numbers[index.y]
row_nos <- seq((j*resolution[1]-minnum),(j*resolution[1]),1)
# loop for x values
for(i in 1:(dimensions[1]/resolution[1])){
index.x <- seq((i*resolution[1]-minnum),(i*resolution[1]),1)
grid[row_nos,index.x] <- temp_grid_numbers[i]
}
}
# sum average abundance by grid square for truth
output <- rep(NA, length(1:max(grid)))
if(absolute == TRUE){
data <- dat1$rf.s
}
if(absolute == FALSE){
data <- dat1$rf.s-mean(dat1$rf.s)
}
for(i in 1:max(grid)){
marker <- which(grid==i)
output[i] <- mean(data[marker])
}
if(type=='truth'){return(output)}
if(type=='grid'){return(grid)}
}
|
library(qtl)
library(snowfall)
load("/share/malooflab/Ruijuan/F2/QTL_analysis/data/LG.f2.after.crossover.43traits.Rdata")
LG.f2.after.crossover <- sim.geno(LG.f2.after.crossover,step=1,n.draws=32)
LG.f2.after.crossover <- calc.genoprob(LG.f2.after.crossover,step=1)
sfInit(parallel = TRUE, cpus = 16)
sfExport("LG.f2.after.crossover")
sfLibrary(qtl)
# run scantwo for all traits at once
set.seed(7)
system.time(
scantwo.perm.imp.5.7 <-
scantwo(LG.f2.after.crossover,pheno.col=10:11,method="hk",n.perm=10,n.cluster = 16)
)
names(scantwo.perm.imp.5.7) <- colnames(LG.f2.after.crossover$pheno)
sfStop()
# save output
save(scantwo.perm.imp.5.7, file = "/share/malooflab/Ruijuan/F2/QTL_analysis/output/scantwo/scantwo.perm.imp.5.7.Rdata")
|
/F2/scantwo/scantwo_perm_5.7.R
|
no_license
|
leejimmy93/KIAT_cabernet
|
R
| false
| false
| 746
|
r
|
library(qtl)
library(snowfall)
load("/share/malooflab/Ruijuan/F2/QTL_analysis/data/LG.f2.after.crossover.43traits.Rdata")
LG.f2.after.crossover <- sim.geno(LG.f2.after.crossover,step=1,n.draws=32)
LG.f2.after.crossover <- calc.genoprob(LG.f2.after.crossover,step=1)
sfInit(parallel = TRUE, cpus = 16)
sfExport("LG.f2.after.crossover")
sfLibrary(qtl)
# run scantwo for all traits at once
set.seed(7)
system.time(
scantwo.perm.imp.5.7 <-
scantwo(LG.f2.after.crossover,pheno.col=10:11,method="hk",n.perm=10,n.cluster = 16)
)
names(scantwo.perm.imp.5.7) <- colnames(LG.f2.after.crossover$pheno)
sfStop()
# save output
save(scantwo.perm.imp.5.7, file = "/share/malooflab/Ruijuan/F2/QTL_analysis/output/scantwo/scantwo.perm.imp.5.7.Rdata")
|
#' @include MariaDBConnection.R
NULL
#' Connect/disconnect to a MariaDB DBMS
#'
#' These methods are straight-forward implementations of the corresponding
#' generic functions.
#'
#' @section Time zones:
#' MySQL and MariaDB support named time zones,
#' they must be installed on the server.
#' See <https://dev.mysql.com/doc/mysql-g11n-excerpt/8.0/en/time-zone-support.html>
#' for more details.
#' Without installation, time zone support is restricted to UTC offset,
#' which cannot take into account DST offsets.
#'
#' @section Secure passwords:
#' Avoid storing passwords hard-coded in the code, use e.g. the \pkg{keyring}
#' package to store and retrieve passwords in a secure way.
#'
#' The MySQL client library (but not MariaDB) supports a `.mylogin.cnf` file
#' that can be passed in the `default.file` argument.
#' This file can contain an obfuscated password, which is not a secure way
#' to store passwords but may be acceptable if the user is aware of the
#' restrictions.
#' The availability of this feature depends on the client library used
#' for compiling the \pkg{RMariaDB} package.
#' Windows and macOS binaries on CRAN are compiled against the MariaDB Connector/C
#' client library which do not support this feature.
#'
#' @param drv an object of class [MariaDBDriver-class] or
#' [MariaDBConnection-class].
#' @param username,password Username and password. If username omitted,
#' defaults to the current user. If password is omitted, only users
#' without a password can log in.
#' @param dbname string with the database name or NULL. If not NULL, the
#' connection sets the default database to this value.
#' @param host string identifying the host machine running the MariaDB server or
#' NULL. If NULL or the string `"localhost"`, a connection to the local
#' host is assumed.
#' @param unix.socket (optional) string of the unix socket or named pipe.
#' @param port (optional) integer of the TCP/IP default port.
#' @param client.flag (optional) integer setting various MariaDB client flags,
#' see [Client-flags] for details.
#' @param groups string identifying a section in the `default.file` to use
#' for setting authentication parameters (see [MariaDB()]).
#' @param default.file string of the filename with MariaDB client options,
#' only relevant if `groups` is given. The default value depends on the
#' operating system (see references), on Linux and OS X the files
#' `~/.my.cnf` and `~/.mylogin.cnf` are used. Expanded with [normalizePath()].
#' @param ssl.key (optional) string of the filename of the SSL key file to use.
#' Expanded with [normalizePath()].
#' @param ssl.cert (optional) string of the filename of the SSL certificate to
#' use. Expanded with [normalizePath()].
#' @param ssl.ca (optional) string of the filename of an SSL certificate
#' authority file to use. Expanded with [normalizePath()].
#' @param ssl.capath (optional) string of the path to a directory containing
#' the trusted SSL CA certificates in PEM format. Expanded with
#' [normalizePath()].
#' @param ssl.cipher (optional) string list of permitted ciphers to use for SSL
#' encryption.
#' @param ... Unused, needed for compatibility with generic.
#' @param bigint The R type that 64-bit integer types should be mapped to,
#' default is [bit64::integer64], which allows the full range of 64 bit
#' integers.
#' @param timeout Connection timeout, in seconds. Use `Inf` or a negative value
#' for no timeout.
#' @param timezone (optional) time zone for the connection,
#' the default corresponds to UTC.
#' Set this argument if your server or database is configured with a different
#' time zone than UTC.
#' Set to `NULL` to automatically determine the server time zone.
#' @param timezone_out The time zone returned to R.
#' The default is to use the value of the `timezone` argument,
#' `"+00:00"` is converted to `"UTC"`
#' If you want to display datetime values in the local timezone,
#' set to [Sys.timezone()] or `""`.
#' This setting does not change the time values returned, only their display.
#' @references
#' Configuration files: https://mariadb.com/kb/en/library/configuring-mariadb-with-mycnf/
#' @export
#' @examples
#' \dontrun{
#' # Connect to a MariaDB database running locally
#' con <- dbConnect(RMariaDB::MariaDB(), dbname = "mydb")
#' # Connect to a remote database with username and password
#' con <- dbConnect(RMariaDB::MariaDB(), host = "mydb.mycompany.com",
#' user = "abc", password = "def")
#' # But instead of supplying the username and password in code, it's usually
#' # better to set up a group in your .my.cnf (usually located in your home
#' directory). Then it's less likely you'll inadvertently share them.
#' con <- dbConnect(RMariaDB::MariaDB(), group = "test")
#'
#' # Always cleanup by disconnecting the database
#' dbDisconnect(con)
#' }
#'
#' # All examples use the rs-dbi group by default.
#' if (mariadbHasDefault()) {
#' con <- dbConnect(RMariaDB::MariaDB(), dbname = "test")
#' con
#' dbDisconnect(con)
#' }
#' @export
setMethod("dbConnect", "MariaDBDriver",
function(drv, dbname = NULL, username = NULL, password = NULL, host = NULL,
unix.socket = NULL, port = 0, client.flag = 0,
groups = "rs-dbi", default.file = NULL, ssl.key = NULL, ssl.cert = NULL,
ssl.ca = NULL, ssl.capath = NULL, ssl.cipher = NULL, ...,
bigint = c("integer64", "integer", "numeric", "character"),
timeout = 10, timezone = "+00:00", timezone_out = NULL) {
bigint <- match.arg(bigint)
if (is.infinite(timeout)) {
timeout <- -1L
} else {
timeout <- as.integer(timeout)
}
# Make sure that `~` is resolved correctly:
if (!is.null(default.file)) {
default.file <- normalizePath(default.file)
}
if (!is.null(ssl.key)) {
ssl.key <- normalizePath(ssl.key)
}
if (!is.null(ssl.cert)) {
ssl.cert <- normalizePath(ssl.cert)
}
if (!is.null(ssl.ca)) {
ssl.ca <- normalizePath(ssl.ca)
}
if (!is.null(ssl.capath)) {
ssl.capath <- normalizePath(ssl.capath)
}
ptr <- connection_create(
host, username, password, dbname, as.integer(port), unix.socket,
as.integer(client.flag), groups, default.file,
ssl.key, ssl.cert, ssl.ca, ssl.capath, ssl.cipher,
timeout
)
info <- connection_info(ptr)
conn <- new("MariaDBConnection",
ptr = ptr,
host = info$host,
db = info$dbname,
bigint = bigint
)
on.exit(dbDisconnect(conn))
if (!is.null(timezone)) {
# Side effect: check if time zone valid
dbExecute(conn, paste0("SET time_zone = ", dbQuoteString(conn, timezone)))
} else {
timezone <- dbGetQuery(conn, "SELECT @@SESSION.time_zone")[[1]]
}
# Check if this is a valid time zone in R:
timezone <- check_tz(timezone)
if (is.null(timezone_out)) {
timezone_out <- timezone
} else {
timezone_out <- check_tz(timezone_out)
}
conn@timezone <- timezone
conn@timezone_out <- timezone_out
dbExecute(conn, "SET autocommit = 0")
on.exit(NULL)
conn
}
)
check_tz <- function(timezone) {
arg_name <- deparse(substitute(timezone))
if (timezone == "+00:00") {
timezone <- "UTC"
}
tryCatch(
lubridate::force_tz(as.POSIXct("2021-03-01 10:40"), timezone),
error = function(e) {
warning(
"Invalid time zone '", timezone, "', ",
"falling back to local time.\n",
"Set the `", arg_name, "` argument to a valid time zone.\n",
conditionMessage(e),
call. = FALSE
)
timezone <- ""
}
)
timezone
}
#' @export
#' @import methods DBI
#' @importFrom hms hms
#' @importFrom bit64 integer64
#' @rdname dbConnect-MariaDBDriver-method
#' @examples
#' if (mariadbHasDefault()) {
#' # connect to a database and load some data
#' con <- dbConnect(RMariaDB::MariaDB(), dbname = "test")
#' dbWriteTable(con, "USArrests", datasets::USArrests, temporary = TRUE)
#'
#' # query
#' rs <- dbSendQuery(con, "SELECT * FROM USArrests")
#' d1 <- dbFetch(rs, n = 10) # extract data in chunks of 10 rows
#' dbHasCompleted(rs)
#' d2 <- dbFetch(rs, n = -1) # extract all remaining data
#' dbHasCompleted(rs)
#' dbClearResult(rs)
#' dbListTables(con)
#'
#' # clean up
#' dbDisconnect(con)
#' }
MariaDB <- function() {
new("MariaDBDriver")
}
#' Client flags
#'
#' Use for the `client.flag` argument to [dbConnect()], multiple flags can be
#' combined with a bitwise or (see [Logic]). The flags are provided for
#' completeness.
#'
#' @seealso
#' The `flags` argument at https://mariadb.com/kb/en/library/mysql_real_connect.
#'
#' @examples
#' \dontrun{
#' library(DBI)
#' library(RMariaDB)
#' con1 <- dbConnect(MariaDB(), client.flag = CLIENT_COMPRESS)
#' con2 <- dbConnect(
#' MariaDB(),
#' client.flag = CLIENT_COMPRESS | CLIENT_SECURE_CONNECTION
#' )
#' }
#'
#' @aliases CLIENT_LONG_PASSWORD CLIENT_FOUND_ROWS CLIENT_LONG_FLAG
#' CLIENT_CONNECT_WITH_DB CLIENT_NO_SCHEMA CLIENT_COMPRESS CLIENT_ODBC
#' CLIENT_LOCAL_FILES CLIENT_IGNORE_SPACE CLIENT_PROTOCOL_41 CLIENT_INTERACTIVE
#' CLIENT_SSL CLIENT_IGNORE_SIGPIPE CLIENT_TRANSACTIONS CLIENT_RESERVED
#' CLIENT_SECURE_CONNECTION CLIENT_MULTI_STATEMENTS CLIENT_MULTI_RESULTS
#' @name Client-flags
NULL
## The following client flags were copied from mysql_com.h (version 4.1.13)
## but it may not make sense to set some of this from RMariaDB.
#' @export
CLIENT_LONG_PASSWORD <- 1 # new more secure passwords
#' @export
CLIENT_FOUND_ROWS <- 2 # Found instead of affected rows
#' @export
CLIENT_LONG_FLAG <- 4 # Get all column flags
#' @export
CLIENT_CONNECT_WITH_DB <- 8 # One can specify db on connect
#' @export
CLIENT_NO_SCHEMA <- 16 # Don't allow database.table.column
#' @export
CLIENT_COMPRESS <- 32 # Can use compression protocol
#' @export
CLIENT_ODBC <- 64 # Odbc client
#' @export
CLIENT_LOCAL_FILES <- 128 # Can use LOAD DATA LOCAL
#' @export
CLIENT_IGNORE_SPACE <- 256 # Ignore spaces before '('
#' @export
CLIENT_PROTOCOL_41 <- 512 # New 4.1 protocol
#' @export
CLIENT_INTERACTIVE <- 1024 # This is an interactive client
#' @export
CLIENT_SSL <- 2048 # Switch to SSL after handshake
#' @export
CLIENT_IGNORE_SIGPIPE <- 4096 # IGNORE sigpipes
#' @export
CLIENT_TRANSACTIONS <- 8192 # Client knows about transactions
#' @export
CLIENT_RESERVED <- 16384 # Old flag for 4.1 protocol
#' @export
CLIENT_SECURE_CONNECTION <- 32768 # New 4.1 authentication
#' @export
CLIENT_MULTI_STATEMENTS <- 65536 # Enable/disable multi-stmt support
#' @export
CLIENT_MULTI_RESULTS <- 131072 # Enable/disable multi-results
|
/R/connect.R
|
permissive
|
bpvgoncalves/RMariaDB
|
R
| false
| false
| 10,688
|
r
|
#' @include MariaDBConnection.R
NULL
#' Connect/disconnect to a MariaDB DBMS
#'
#' These methods are straight-forward implementations of the corresponding
#' generic functions.
#'
#' @section Time zones:
#' MySQL and MariaDB support named time zones,
#' they must be installed on the server.
#' See <https://dev.mysql.com/doc/mysql-g11n-excerpt/8.0/en/time-zone-support.html>
#' for more details.
#' Without installation, time zone support is restricted to UTC offset,
#' which cannot take into account DST offsets.
#'
#' @section Secure passwords:
#' Avoid storing passwords hard-coded in the code, use e.g. the \pkg{keyring}
#' package to store and retrieve passwords in a secure way.
#'
#' The MySQL client library (but not MariaDB) supports a `.mylogin.cnf` file
#' that can be passed in the `default.file` argument.
#' This file can contain an obfuscated password, which is not a secure way
#' to store passwords but may be acceptable if the user is aware of the
#' restrictions.
#' The availability of this feature depends on the client library used
#' for compiling the \pkg{RMariaDB} package.
#' Windows and macOS binaries on CRAN are compiled against the MariaDB Connector/C
#' client library which do not support this feature.
#'
#' @param drv an object of class [MariaDBDriver-class] or
#' [MariaDBConnection-class].
#' @param username,password Username and password. If username omitted,
#' defaults to the current user. If password is omitted, only users
#' without a password can log in.
#' @param dbname string with the database name or NULL. If not NULL, the
#' connection sets the default database to this value.
#' @param host string identifying the host machine running the MariaDB server or
#' NULL. If NULL or the string `"localhost"`, a connection to the local
#' host is assumed.
#' @param unix.socket (optional) string of the unix socket or named pipe.
#' @param port (optional) integer of the TCP/IP default port.
#' @param client.flag (optional) integer setting various MariaDB client flags,
#' see [Client-flags] for details.
#' @param groups string identifying a section in the `default.file` to use
#' for setting authentication parameters (see [MariaDB()]).
#' @param default.file string of the filename with MariaDB client options,
#' only relevant if `groups` is given. The default value depends on the
#' operating system (see references), on Linux and OS X the files
#' `~/.my.cnf` and `~/.mylogin.cnf` are used. Expanded with [normalizePath()].
#' @param ssl.key (optional) string of the filename of the SSL key file to use.
#' Expanded with [normalizePath()].
#' @param ssl.cert (optional) string of the filename of the SSL certificate to
#' use. Expanded with [normalizePath()].
#' @param ssl.ca (optional) string of the filename of an SSL certificate
#' authority file to use. Expanded with [normalizePath()].
#' @param ssl.capath (optional) string of the path to a directory containing
#' the trusted SSL CA certificates in PEM format. Expanded with
#' [normalizePath()].
#' @param ssl.cipher (optional) string list of permitted ciphers to use for SSL
#' encryption.
#' @param ... Unused, needed for compatibility with generic.
#' @param bigint The R type that 64-bit integer types should be mapped to,
#' default is [bit64::integer64], which allows the full range of 64 bit
#' integers.
#' @param timeout Connection timeout, in seconds. Use `Inf` or a negative value
#' for no timeout.
#' @param timezone (optional) time zone for the connection,
#' the default corresponds to UTC.
#' Set this argument if your server or database is configured with a different
#' time zone than UTC.
#' Set to `NULL` to automatically determine the server time zone.
#' @param timezone_out The time zone returned to R.
#' The default is to use the value of the `timezone` argument,
#' `"+00:00"` is converted to `"UTC"`
#' If you want to display datetime values in the local timezone,
#' set to [Sys.timezone()] or `""`.
#' This setting does not change the time values returned, only their display.
#' @references
#' Configuration files: https://mariadb.com/kb/en/library/configuring-mariadb-with-mycnf/
#' @export
#' @examples
#' \dontrun{
#' # Connect to a MariaDB database running locally
#' con <- dbConnect(RMariaDB::MariaDB(), dbname = "mydb")
#' # Connect to a remote database with username and password
#' con <- dbConnect(RMariaDB::MariaDB(), host = "mydb.mycompany.com",
#' user = "abc", password = "def")
#' # But instead of supplying the username and password in code, it's usually
#' # better to set up a group in your .my.cnf (usually located in your home
#' directory). Then it's less likely you'll inadvertently share them.
#' con <- dbConnect(RMariaDB::MariaDB(), group = "test")
#'
#' # Always cleanup by disconnecting the database
#' dbDisconnect(con)
#' }
#'
#' # All examples use the rs-dbi group by default.
#' if (mariadbHasDefault()) {
#' con <- dbConnect(RMariaDB::MariaDB(), dbname = "test")
#' con
#' dbDisconnect(con)
#' }
#' @export
setMethod("dbConnect", "MariaDBDriver",
function(drv, dbname = NULL, username = NULL, password = NULL, host = NULL,
unix.socket = NULL, port = 0, client.flag = 0,
groups = "rs-dbi", default.file = NULL, ssl.key = NULL, ssl.cert = NULL,
ssl.ca = NULL, ssl.capath = NULL, ssl.cipher = NULL, ...,
bigint = c("integer64", "integer", "numeric", "character"),
timeout = 10, timezone = "+00:00", timezone_out = NULL) {
bigint <- match.arg(bigint)
if (is.infinite(timeout)) {
timeout <- -1L
} else {
timeout <- as.integer(timeout)
}
# Make sure that `~` is resolved correctly:
if (!is.null(default.file)) {
default.file <- normalizePath(default.file)
}
if (!is.null(ssl.key)) {
ssl.key <- normalizePath(ssl.key)
}
if (!is.null(ssl.cert)) {
ssl.cert <- normalizePath(ssl.cert)
}
if (!is.null(ssl.ca)) {
ssl.ca <- normalizePath(ssl.ca)
}
if (!is.null(ssl.capath)) {
ssl.capath <- normalizePath(ssl.capath)
}
ptr <- connection_create(
host, username, password, dbname, as.integer(port), unix.socket,
as.integer(client.flag), groups, default.file,
ssl.key, ssl.cert, ssl.ca, ssl.capath, ssl.cipher,
timeout
)
info <- connection_info(ptr)
conn <- new("MariaDBConnection",
ptr = ptr,
host = info$host,
db = info$dbname,
bigint = bigint
)
on.exit(dbDisconnect(conn))
if (!is.null(timezone)) {
# Side effect: check if time zone valid
dbExecute(conn, paste0("SET time_zone = ", dbQuoteString(conn, timezone)))
} else {
timezone <- dbGetQuery(conn, "SELECT @@SESSION.time_zone")[[1]]
}
# Check if this is a valid time zone in R:
timezone <- check_tz(timezone)
if (is.null(timezone_out)) {
timezone_out <- timezone
} else {
timezone_out <- check_tz(timezone_out)
}
conn@timezone <- timezone
conn@timezone_out <- timezone_out
dbExecute(conn, "SET autocommit = 0")
on.exit(NULL)
conn
}
)
check_tz <- function(timezone) {
arg_name <- deparse(substitute(timezone))
if (timezone == "+00:00") {
timezone <- "UTC"
}
tryCatch(
lubridate::force_tz(as.POSIXct("2021-03-01 10:40"), timezone),
error = function(e) {
warning(
"Invalid time zone '", timezone, "', ",
"falling back to local time.\n",
"Set the `", arg_name, "` argument to a valid time zone.\n",
conditionMessage(e),
call. = FALSE
)
timezone <- ""
}
)
timezone
}
#' @export
#' @import methods DBI
#' @importFrom hms hms
#' @importFrom bit64 integer64
#' @rdname dbConnect-MariaDBDriver-method
#' @examples
#' if (mariadbHasDefault()) {
#' # connect to a database and load some data
#' con <- dbConnect(RMariaDB::MariaDB(), dbname = "test")
#' dbWriteTable(con, "USArrests", datasets::USArrests, temporary = TRUE)
#'
#' # query
#' rs <- dbSendQuery(con, "SELECT * FROM USArrests")
#' d1 <- dbFetch(rs, n = 10) # extract data in chunks of 10 rows
#' dbHasCompleted(rs)
#' d2 <- dbFetch(rs, n = -1) # extract all remaining data
#' dbHasCompleted(rs)
#' dbClearResult(rs)
#' dbListTables(con)
#'
#' # clean up
#' dbDisconnect(con)
#' }
MariaDB <- function() {
new("MariaDBDriver")
}
#' Client flags
#'
#' Use for the `client.flag` argument to [dbConnect()], multiple flags can be
#' combined with a bitwise or (see [Logic]). The flags are provided for
#' completeness.
#'
#' @seealso
#' The `flags` argument at https://mariadb.com/kb/en/library/mysql_real_connect.
#'
#' @examples
#' \dontrun{
#' library(DBI)
#' library(RMariaDB)
#' con1 <- dbConnect(MariaDB(), client.flag = CLIENT_COMPRESS)
#' con2 <- dbConnect(
#' MariaDB(),
#' client.flag = CLIENT_COMPRESS | CLIENT_SECURE_CONNECTION
#' )
#' }
#'
#' @aliases CLIENT_LONG_PASSWORD CLIENT_FOUND_ROWS CLIENT_LONG_FLAG
#' CLIENT_CONNECT_WITH_DB CLIENT_NO_SCHEMA CLIENT_COMPRESS CLIENT_ODBC
#' CLIENT_LOCAL_FILES CLIENT_IGNORE_SPACE CLIENT_PROTOCOL_41 CLIENT_INTERACTIVE
#' CLIENT_SSL CLIENT_IGNORE_SIGPIPE CLIENT_TRANSACTIONS CLIENT_RESERVED
#' CLIENT_SECURE_CONNECTION CLIENT_MULTI_STATEMENTS CLIENT_MULTI_RESULTS
#' @name Client-flags
NULL
## The following client flags were copied from mysql_com.h (version 4.1.13)
## but it may not make sense to set some of this from RMariaDB.
#' @export
CLIENT_LONG_PASSWORD <- 1 # new more secure passwords
#' @export
CLIENT_FOUND_ROWS <- 2 # Found instead of affected rows
#' @export
CLIENT_LONG_FLAG <- 4 # Get all column flags
#' @export
CLIENT_CONNECT_WITH_DB <- 8 # One can specify db on connect
#' @export
CLIENT_NO_SCHEMA <- 16 # Don't allow database.table.column
#' @export
CLIENT_COMPRESS <- 32 # Can use compression protocol
#' @export
CLIENT_ODBC <- 64 # Odbc client
#' @export
CLIENT_LOCAL_FILES <- 128 # Can use LOAD DATA LOCAL
#' @export
CLIENT_IGNORE_SPACE <- 256 # Ignore spaces before '('
#' @export
CLIENT_PROTOCOL_41 <- 512 # New 4.1 protocol
#' @export
CLIENT_INTERACTIVE <- 1024 # This is an interactive client
#' @export
CLIENT_SSL <- 2048 # Switch to SSL after handshake
#' @export
CLIENT_IGNORE_SIGPIPE <- 4096 # IGNORE sigpipes
#' @export
CLIENT_TRANSACTIONS <- 8192 # Client knows about transactions
#' @export
CLIENT_RESERVED <- 16384 # Old flag for 4.1 protocol
#' @export
CLIENT_SECURE_CONNECTION <- 32768 # New 4.1 authentication
#' @export
CLIENT_MULTI_STATEMENTS <- 65536 # Enable/disable multi-stmt support
#' @export
CLIENT_MULTI_RESULTS <- 131072 # Enable/disable multi-results
|
#' Reads a NetCDF file
#'
#' Reads the full content of a NetCDF file, given only the file name
#'
#' @param filn A character string specifying the full path of the NetCDF
#' file to be read.
#' @param varnam A character string specifying the variable name of the NetCDF file.
#' Can also be a vector of character strings. Defaults to \code{NA}, that is: all
#' available variables are read.
#' @param date_origin A character string of format \code{"YYYY-MM-DD"}
#' specifying the origin, day 0, of the time values provided in the NetCDF file.
#' @param time_is_years A logical specifying whether the values provided by dimension
#' \code{'time'} is years. Defaults to \code{FALSE}.
#' @param ignore_time A logical specifying whether file has a time dimension Use this to
#' ignore if it has a time dimension of length 1 by \code{has_time=TRUE}. Defaults to
#' \code{FALSE}.
#' @param check_flip A logical specifying whether order of latitude values should be checked.
#'
#' @return A list, containing \code{"lon"} (vector of longitudes of
#' gridcell mid-points), \code{"lat"} (vector of latitudes of gridcell
#' mid-points), \code{"time"} (vector of lubridate::ymd dates),
#' \code{"varnams"} (a vector of all variable names as strings), and a
#' named (nested) list of the data arrays (lon x lat x time) for each
#' variable.
#' @export
#'
read_nc_onefile <- function(filn, varnam = NA, date_origin = NA, time_is_years = FALSE, ignore_time = FALSE,
check_flip = FALSE){
require(dplyr)
nc <- ncdf4::nc_open(filn)
# Save the print(nc) dump to a text file
{
sink(paste0(filn, ".txt"))
print(nc)
sink()
unlink(paste0(filn, ".txt"))
}
## get names of longitude and latitude dimensions
dimnames <- ls(nc$dim)
if (!("lon" %in% dimnames)){
if ("LON" %in% dimnames){
lonname <- "LON"
} else if ("longitude" %in% dimnames){
lonname <- "longitude"
} else if ("Longitude" %in% dimnames){
lonname <- "Longitude"
} else if ("LONGITUDE" %in% dimnames){
lonname <- "LONGITUDE"
} else if ("x" %in% dimnames){
lonname <- "x"
}
} else {
lonname <- "lon"
}
if (!("lat" %in% dimnames)){
if ("LAT" %in% dimnames){
latname <- "LAT"
} else if ("latitude" %in% dimnames){
latname <- "latitude"
} else if ("Latitude" %in% dimnames){
latname <- "Latitude"
} else if ("LATITUDE" %in% dimnames){
latname <- "LATITUDE"
} else if ("y" %in% dimnames){
latname <- "y"
}
} else {
latname <- "lat"
}
if (!("time" %in% dimnames)){
if ("TIME" %in% dimnames){
timename <- "TIME"
} else if ("Time" %in% dimnames){
timename <- "Time"
}
} else {
timename <- "time"
}
if (!any(c("TIME", "time", "Time") %in% names(nc$dim)) || ignore_time){
## no time dimension
out <- list(
lon = ncdf4::ncvar_get(nc, nc$dim[[lonname]]$name),
lat = ncdf4::ncvar_get(nc, nc$dim[[latname]]$name)
)
} else {
## with time dimension
out <- list(
lon = ncdf4::ncvar_get(nc, nc$dim[[lonname]]$name),
lat = ncdf4::ncvar_get(nc, nc$dim[[latname]]$name),
time = ncdf4::ncvar_get(nc, nc$dim[[timename]]$name)
)
## Conversion to ymd object requires out$time to be integer
## usually it is, but in some cases it's not (e.g. when
## cdo timmean is applied before).
## Round down.
out$time <- floor(out$time)
## convert to date
if (time_is_years){
## interpret time as first of january
out$time <- lubridate::ymd(paste0(out$time, "-01-01"))
} else if (!is.na(date_origin)){
time_origin <- lubridate::ymd(date_origin)
out$time <- lubridate::days(out$time) + time_origin
} else {
## get time units
units_long <- ncmeta::nc_atts(filn, timename) %>%
tidyr::unnest(cols = c(value)) %>%
dplyr::filter(name == "units") %>%
dplyr::pull(value)
if (stringr::str_detect(units_long, "days since")){
time_origin <- units_long %>%
stringr::str_remove("days since ") %>%
stringr::str_remove(" 00:00:00") %>%
stringr::str_remove(" 0:0:0") %>%
lubridate::ymd()
out$time <- time_origin + lubridate::days(out$time)
} else if (stringr::str_detect(units_long, "seconds since")){
time_origin <- units_long %>%
stringr::str_remove("seconds since ") %>%
lubridate::ymd_hms()
out$time <- time_origin + lubridate::seconds(out$time)
}
# if (nc$dim[[timename]]$units=="days since 2001-1-1 0:0:0"){
#
# out$time <- conv_noleap_to_ymd(out$time, origin = lubridate::ymd("2001-01-01"))
#
# } else if (nc$dim[[timename]]$units=="days since 2000-01-01"){
#
# time_origin <- lubridate::ymd("2000-01-01")
# out$time <- lubridate::days(out$time) + time_origin
#
# } else if (nc$dim[[timename]]$units=="days since 2001-01-01"){
#
# time_origin <- lubridate::ymd("2001-01-01")
# out$time <- lubridate::days(out$time) + time_origin
#
# } else if (nc$dim[[timename]]$units=="days since 1900-1-1"){
#
# time_origin <- lubridate::ymd("1900-01-01")
# out$time <- lubridate::days(out$time) + time_origin
#
# } else if (nc$dim[[timename]]$units=="days since 1970-01-01 00:00:00"){
#
# time_origin <- lubridate::ymd("1970-01-01")
# out$time <- lubridate::days(out$time) + time_origin
#
# } else if (nc$dim[[timename]]$units=="years"){
#
# ## interpret time as first of january
# out$time <- lubridate::ymd(paste0(out$time, "-01-01"))
#
# } else {
#
# rlang::abort(paste("units of time not recognized for file", filn))
#
# }
}
}
# get variables
if (is.na(varnam)) varnam <- ls(nc$var)
getvar <- function(varnam){
#tibble( !!varnam := ncdf4::ncvar_get(nc, varnam) )
out <- list()
out[[varnam]] <- ncdf4::ncvar_get(nc, varnam)
}
vars <- purrr::map(as.list(varnam), ~getvar(.)) %>%
setNames(varnam)
nc$var[[varnam[1]]]$units
out[["vars"]] <- vars
out[["varnams"]] <- varnam
if (check_flip){
if (length(out$lat)>1){
if (out$lat[1]>out$lat[2]){
## Flip latitudes
out <- nc_flip_lat(out)
}
}
}
return(out)
}
nc_flip_lat <- function(nc){
nc$lat <- rev(nc$lat)
# nlat <- length(nc$lat)
# nc$vars[[1]] <- nc$vars[[1]][,nlat:1]
arr_flip_lat <- function(arr){
nlat <- dim(arr)[2]
arr <- arr[,nlat:1]
return(arr)
}
nc$vars <- purrr::map(nc$vars[1], ~arr_flip_lat(.))
return(nc)
}
|
/R/read_nc_onefile.R
|
no_license
|
stineb/rbeni
|
R
| false
| false
| 6,707
|
r
|
#' Reads a NetCDF file
#'
#' Reads the full content of a NetCDF file, given only the file name
#'
#' @param filn A character string specifying the full path of the NetCDF
#' file to be read.
#' @param varnam A character string specifying the variable name of the NetCDF file.
#' Can also be a vector of character strings. Defaults to \code{NA}, that is: all
#' available variables are read.
#' @param date_origin A character string of format \code{"YYYY-MM-DD"}
#' specifying the origin, day 0, of the time values provided in the NetCDF file.
#' @param time_is_years A logical specifying whether the values provided by dimension
#' \code{'time'} is years. Defaults to \code{FALSE}.
#' @param ignore_time A logical specifying whether file has a time dimension Use this to
#' ignore if it has a time dimension of length 1 by \code{has_time=TRUE}. Defaults to
#' \code{FALSE}.
#' @param check_flip A logical specifying whether order of latitude values should be checked.
#'
#' @return A list, containing \code{"lon"} (vector of longitudes of
#' gridcell mid-points), \code{"lat"} (vector of latitudes of gridcell
#' mid-points), \code{"time"} (vector of lubridate::ymd dates),
#' \code{"varnams"} (a vector of all variable names as strings), and a
#' named (nested) list of the data arrays (lon x lat x time) for each
#' variable.
#' @export
#'
read_nc_onefile <- function(filn, varnam = NA, date_origin = NA, time_is_years = FALSE, ignore_time = FALSE,
check_flip = FALSE){
require(dplyr)
nc <- ncdf4::nc_open(filn)
# Save the print(nc) dump to a text file
{
sink(paste0(filn, ".txt"))
print(nc)
sink()
unlink(paste0(filn, ".txt"))
}
## get names of longitude and latitude dimensions
dimnames <- ls(nc$dim)
if (!("lon" %in% dimnames)){
if ("LON" %in% dimnames){
lonname <- "LON"
} else if ("longitude" %in% dimnames){
lonname <- "longitude"
} else if ("Longitude" %in% dimnames){
lonname <- "Longitude"
} else if ("LONGITUDE" %in% dimnames){
lonname <- "LONGITUDE"
} else if ("x" %in% dimnames){
lonname <- "x"
}
} else {
lonname <- "lon"
}
if (!("lat" %in% dimnames)){
if ("LAT" %in% dimnames){
latname <- "LAT"
} else if ("latitude" %in% dimnames){
latname <- "latitude"
} else if ("Latitude" %in% dimnames){
latname <- "Latitude"
} else if ("LATITUDE" %in% dimnames){
latname <- "LATITUDE"
} else if ("y" %in% dimnames){
latname <- "y"
}
} else {
latname <- "lat"
}
if (!("time" %in% dimnames)){
if ("TIME" %in% dimnames){
timename <- "TIME"
} else if ("Time" %in% dimnames){
timename <- "Time"
}
} else {
timename <- "time"
}
if (!any(c("TIME", "time", "Time") %in% names(nc$dim)) || ignore_time){
## no time dimension
out <- list(
lon = ncdf4::ncvar_get(nc, nc$dim[[lonname]]$name),
lat = ncdf4::ncvar_get(nc, nc$dim[[latname]]$name)
)
} else {
## with time dimension
out <- list(
lon = ncdf4::ncvar_get(nc, nc$dim[[lonname]]$name),
lat = ncdf4::ncvar_get(nc, nc$dim[[latname]]$name),
time = ncdf4::ncvar_get(nc, nc$dim[[timename]]$name)
)
## Conversion to ymd object requires out$time to be integer
## usually it is, but in some cases it's not (e.g. when
## cdo timmean is applied before).
## Round down.
out$time <- floor(out$time)
## convert to date
if (time_is_years){
## interpret time as first of january
out$time <- lubridate::ymd(paste0(out$time, "-01-01"))
} else if (!is.na(date_origin)){
time_origin <- lubridate::ymd(date_origin)
out$time <- lubridate::days(out$time) + time_origin
} else {
## get time units
units_long <- ncmeta::nc_atts(filn, timename) %>%
tidyr::unnest(cols = c(value)) %>%
dplyr::filter(name == "units") %>%
dplyr::pull(value)
if (stringr::str_detect(units_long, "days since")){
time_origin <- units_long %>%
stringr::str_remove("days since ") %>%
stringr::str_remove(" 00:00:00") %>%
stringr::str_remove(" 0:0:0") %>%
lubridate::ymd()
out$time <- time_origin + lubridate::days(out$time)
} else if (stringr::str_detect(units_long, "seconds since")){
time_origin <- units_long %>%
stringr::str_remove("seconds since ") %>%
lubridate::ymd_hms()
out$time <- time_origin + lubridate::seconds(out$time)
}
# if (nc$dim[[timename]]$units=="days since 2001-1-1 0:0:0"){
#
# out$time <- conv_noleap_to_ymd(out$time, origin = lubridate::ymd("2001-01-01"))
#
# } else if (nc$dim[[timename]]$units=="days since 2000-01-01"){
#
# time_origin <- lubridate::ymd("2000-01-01")
# out$time <- lubridate::days(out$time) + time_origin
#
# } else if (nc$dim[[timename]]$units=="days since 2001-01-01"){
#
# time_origin <- lubridate::ymd("2001-01-01")
# out$time <- lubridate::days(out$time) + time_origin
#
# } else if (nc$dim[[timename]]$units=="days since 1900-1-1"){
#
# time_origin <- lubridate::ymd("1900-01-01")
# out$time <- lubridate::days(out$time) + time_origin
#
# } else if (nc$dim[[timename]]$units=="days since 1970-01-01 00:00:00"){
#
# time_origin <- lubridate::ymd("1970-01-01")
# out$time <- lubridate::days(out$time) + time_origin
#
# } else if (nc$dim[[timename]]$units=="years"){
#
# ## interpret time as first of january
# out$time <- lubridate::ymd(paste0(out$time, "-01-01"))
#
# } else {
#
# rlang::abort(paste("units of time not recognized for file", filn))
#
# }
}
}
# get variables
if (is.na(varnam)) varnam <- ls(nc$var)
getvar <- function(varnam){
#tibble( !!varnam := ncdf4::ncvar_get(nc, varnam) )
out <- list()
out[[varnam]] <- ncdf4::ncvar_get(nc, varnam)
}
vars <- purrr::map(as.list(varnam), ~getvar(.)) %>%
setNames(varnam)
nc$var[[varnam[1]]]$units
out[["vars"]] <- vars
out[["varnams"]] <- varnam
if (check_flip){
if (length(out$lat)>1){
if (out$lat[1]>out$lat[2]){
## Flip latitudes
out <- nc_flip_lat(out)
}
}
}
return(out)
}
nc_flip_lat <- function(nc){
nc$lat <- rev(nc$lat)
# nlat <- length(nc$lat)
# nc$vars[[1]] <- nc$vars[[1]][,nlat:1]
arr_flip_lat <- function(arr){
nlat <- dim(arr)[2]
arr <- arr[,nlat:1]
return(arr)
}
nc$vars <- purrr::map(nc$vars[1], ~arr_flip_lat(.))
return(nc)
}
|
## The makeCacheMatrix function creates a special "matrix" object that can
## cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInv <- function(i) inv <<- i
getInv <- function() inv
list(set = set, get = get, setInv = setInv, getInv = getInv)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix function. If the inverse has
## already been calculated (and the matrix has not changed),
## then cacheSolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
i <- x$getInv()
if (!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setInv(i)
i
}
|
/cachematrix.R
|
no_license
|
szetov/ProgrammingAssignment2
|
R
| false
| false
| 816
|
r
|
## The makeCacheMatrix function creates a special "matrix" object that can
## cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInv <- function(i) inv <<- i
getInv <- function() inv
list(set = set, get = get, setInv = setInv, getInv = getInv)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix function. If the inverse has
## already been calculated (and the matrix has not changed),
## then cacheSolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
i <- x$getInv()
if (!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setInv(i)
i
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drug_common_utilities.R
\name{get_xml_db_rows}
\alias{get_xml_db_rows}
\title{Reads drug bank xml database and set it in memory.}
\usage{
get_xml_db_rows(xml_db_name)
}
\arguments{
\item{xml_db_name}{string, full path for the drug bank xml or zip file.}
}
\value{
TRUE when sets the db tree in memory to be used by parser
methods and FALSE otherwise
}
\description{
\code{get_xml_db_rows} sets drug bank db xml full tree in memory
}
\details{
This functions reads drug bank xml database and sets the full tree
save it in a predefined database via
\code{\link{open_db}} method. It takes one single optional argument to
in memory directly without returning it.
It must be called one before using parser functions, and once it is called
If \code{\link{get_xml_db_rows}} is called before for any reason, so
no need to call it again.
}
\examples{
\donttest{
get_xml_db_rows("db_full_path")
get_xml_db_rows(xml_db_name = "db_full_path")
}
}
|
/man/get_xml_db_rows.Rd
|
no_license
|
Sparklingredstar/dbparser
|
R
| false
| true
| 1,014
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drug_common_utilities.R
\name{get_xml_db_rows}
\alias{get_xml_db_rows}
\title{Reads drug bank xml database and set it in memory.}
\usage{
get_xml_db_rows(xml_db_name)
}
\arguments{
\item{xml_db_name}{string, full path for the drug bank xml or zip file.}
}
\value{
TRUE when sets the db tree in memory to be used by parser
methods and FALSE otherwise
}
\description{
\code{get_xml_db_rows} sets drug bank db xml full tree in memory
}
\details{
This functions reads drug bank xml database and sets the full tree
save it in a predefined database via
\code{\link{open_db}} method. It takes one single optional argument to
in memory directly without returning it.
It must be called one before using parser functions, and once it is called
If \code{\link{get_xml_db_rows}} is called before for any reason, so
no need to call it again.
}
\examples{
\donttest{
get_xml_db_rows("db_full_path")
get_xml_db_rows(xml_db_name = "db_full_path")
}
}
|
# library(googleCharts)
# library(shinythemes)
# Use global max/min for axes so the view window stays
# constant as the user moves between years
xlim <- list(
min = 0,
max = (length(unique(meta_frame$metric))+1)
)
ylim <- list(
min = 0,
max = 1.1
)
navbarPage(title=div(#img(src="logo.png",width = "100px", height = "35px"),
"KPI Monitoring"), theme = shinytheme("darkly"),
############# OVERVIEW PANEL ###########
tabPanel("Overview",
sidebarLayout(
mainPanel(
googleChartsInit(),
# Use the Google webfont "Source Sans Pro"
tags$link(
href=paste0("http://fonts.googleapis.com/css?",
"family=Source+Sans+Pro:300,600,300italic"),
rel="stylesheet", type="text/css"),
tags$style(type="text/css",
"body {font-family: 'Source Sans Pro'}"
),
googleBubbleChart("chart",
width="100%", height = "475px",
# Set the default options for this chart; they can be
# overridden in server.R on a per-update basis. See
# https://developers.google.com/chart/interactive/docs/gallery/bubblechart
# for option documentation.
options = list(
fontName = "Source Sans Pro",
backgroundColor='#222222',
# legend.textStyle={color: 'black',
# fontName: 'Source Sans Pro', fontSize: 8},
legend=list(textStyle= list(color= 'grey', fontSize= 13)),
fontSize = 13,
# Set axis labels and ranges
hAxis = list(
title = "Metric",
viewWindow = xlim,
ticks=lapply(unique(meta_frame$metric), function(x) list(v=as.numeric(unique(x)),f=unique(as.character(x))))
, titleTextStyle= list(color= 'white'),
textStyle=list(color= 'white')
),
vAxis = list(
title = "Prediction accuracy",
viewWindow = ylim,
titleTextStyle= list(color= 'white'),
textStyle=list(color= 'white')
),
# The default padding is a little too spaced out
chartArea = list(
top = 50, left = 75, right=100,
height = "75%", width = "75%"
),
# Allow pan/zoom
explorer = list(),
# Set bubble visual props
bubble = list(
opacity = 0.4, stroke = "none",
# Hide bubble label
textStyle = list(
color = "none"
)
),
# Set fonts
titleTextStyle = list(
fontSize = 16,
color='white'
),
tooltip = list(
textStyle = list(
fontSize = 12
)
)
)
)
),
fluidRow(
wellPanel(
tags$style(type="text/css", '#rightPanel { width:400px; float:left;}'),
id = "rightPanel",
textOutput('title_alert'),
br(),
uiOutput("alert_level_input"),
br(),
br(),
textOutput('title_dim'),
br(),
uiOutput("input_ui"),
width=2
),
shiny::column(4, offset = 3,
sliderInput("min_vol", "Minimum volume",
min = 0, max = 10,
value = 0, animate = FALSE)
)
)
)
),
############## Graphs PANEL ###################
tabPanel("Graphs",
sidebarLayout(
sidebarPanel(
selectInput("metric", label = h5("Metric"),
choices =as.list(unique(test$metric))
,
selected = 1),
selectInput("partition", label = h5("Partition"),
choices =as.list(unique(test$partition))),
br(),br(),
textOutput('plot_alert'),br(),
textOutput('plot_real'),br(),
textOutput('plot_upper'),br(),
textOutput('plot_under'),br()
),
mainPanel(
textOutput('plot_title1'),
br(),
plotlyOutput("plotos"),
br(),br(),
textOutput('plot_title2'),
br(),
plotOutput("original_ts"),
br(),br()
)
)
),
############### Table PANEL ###################
tabPanel("Table",
dataTableOutput('tablea')
),
##### More
navbarMenu("More",
tabPanel("About",
print("Anomaly detection is based upon automated model selection based on AIC. \n
The candidate models for each time series are ARIMA models of different
orders (seasonal and non seasonal)"),
print("Code available in github: ----------")
),
tabPanel("References",
fluidRow(
column(6,
'asd' # includeMarkdown("about.md")
),
column(3,
tags$small(
"Work in Progress"
)
)
)
)
)
)
|
/ui.R
|
permissive
|
ATL64/anomaly-detection
|
R
| false
| false
| 7,347
|
r
|
# library(googleCharts)
# library(shinythemes)
# Use global max/min for axes so the view window stays
# constant as the user moves between years
xlim <- list(
min = 0,
max = (length(unique(meta_frame$metric))+1)
)
ylim <- list(
min = 0,
max = 1.1
)
navbarPage(title=div(#img(src="logo.png",width = "100px", height = "35px"),
"KPI Monitoring"), theme = shinytheme("darkly"),
############# OVERVIEW PANEL ###########
tabPanel("Overview",
sidebarLayout(
mainPanel(
googleChartsInit(),
# Use the Google webfont "Source Sans Pro"
tags$link(
href=paste0("http://fonts.googleapis.com/css?",
"family=Source+Sans+Pro:300,600,300italic"),
rel="stylesheet", type="text/css"),
tags$style(type="text/css",
"body {font-family: 'Source Sans Pro'}"
),
googleBubbleChart("chart",
width="100%", height = "475px",
# Set the default options for this chart; they can be
# overridden in server.R on a per-update basis. See
# https://developers.google.com/chart/interactive/docs/gallery/bubblechart
# for option documentation.
options = list(
fontName = "Source Sans Pro",
backgroundColor='#222222',
# legend.textStyle={color: 'black',
# fontName: 'Source Sans Pro', fontSize: 8},
legend=list(textStyle= list(color= 'grey', fontSize= 13)),
fontSize = 13,
# Set axis labels and ranges
hAxis = list(
title = "Metric",
viewWindow = xlim,
ticks=lapply(unique(meta_frame$metric), function(x) list(v=as.numeric(unique(x)),f=unique(as.character(x))))
, titleTextStyle= list(color= 'white'),
textStyle=list(color= 'white')
),
vAxis = list(
title = "Prediction accuracy",
viewWindow = ylim,
titleTextStyle= list(color= 'white'),
textStyle=list(color= 'white')
),
# The default padding is a little too spaced out
chartArea = list(
top = 50, left = 75, right=100,
height = "75%", width = "75%"
),
# Allow pan/zoom
explorer = list(),
# Set bubble visual props
bubble = list(
opacity = 0.4, stroke = "none",
# Hide bubble label
textStyle = list(
color = "none"
)
),
# Set fonts
titleTextStyle = list(
fontSize = 16,
color='white'
),
tooltip = list(
textStyle = list(
fontSize = 12
)
)
)
)
),
fluidRow(
wellPanel(
tags$style(type="text/css", '#rightPanel { width:400px; float:left;}'),
id = "rightPanel",
textOutput('title_alert'),
br(),
uiOutput("alert_level_input"),
br(),
br(),
textOutput('title_dim'),
br(),
uiOutput("input_ui"),
width=2
),
shiny::column(4, offset = 3,
sliderInput("min_vol", "Minimum volume",
min = 0, max = 10,
value = 0, animate = FALSE)
)
)
)
),
############## Graphs PANEL ###################
tabPanel("Graphs",
sidebarLayout(
sidebarPanel(
selectInput("metric", label = h5("Metric"),
choices =as.list(unique(test$metric))
,
selected = 1),
selectInput("partition", label = h5("Partition"),
choices =as.list(unique(test$partition))),
br(),br(),
textOutput('plot_alert'),br(),
textOutput('plot_real'),br(),
textOutput('plot_upper'),br(),
textOutput('plot_under'),br()
),
mainPanel(
textOutput('plot_title1'),
br(),
plotlyOutput("plotos"),
br(),br(),
textOutput('plot_title2'),
br(),
plotOutput("original_ts"),
br(),br()
)
)
),
############### Table PANEL ###################
tabPanel("Table",
dataTableOutput('tablea')
),
##### More
navbarMenu("More",
tabPanel("About",
print("Anomaly detection is based upon automated model selection based on AIC. \n
The candidate models for each time series are ARIMA models of different
orders (seasonal and non seasonal)"),
print("Code available in github: ----------")
),
tabPanel("References",
fluidRow(
column(6,
'asd' # includeMarkdown("about.md")
),
column(3,
tags$small(
"Work in Progress"
)
)
)
)
)
)
|
#' Build a data frame or list
#'
#' @description
#' `tibble()` is a trimmed down version of [data.frame()] that:
#'
#' * Never coerces inputs (i.e. strings stay as strings!).
#' * Never adds `row.names`.
#' * Never munges column names.
#' * Only recycles length 1 inputs.
#' * Evaluates its arguments lazily and in order.
#' * Adds `tbl_df` class to output.
#' * Automatically adds column names.
#'
#' `data_frame()` is an alias to `tibble()`.
#'
#' `tibble_()` and its alias `data_frame_()` use lazy evaluation and are
#' deprecated. New code should use `tibble()` or `data_frame()` with
#' [quasiquotation].
#'
#' @param ... A set of name-value pairs. Arguments are evaluated sequentially,
#' so you can refer to previously created variables. These arguments are
#' processed with [rlang::quos()] and support unquote via `!!` and
#' unquote-splice via `!!!`.
#' @seealso [as_tibble()] to turn an existing list into
#' a data frame.
#' @export
#' @examples
#' a <- 1:5
#' tibble(a, b = a * 2)
#' tibble(a, b = a * 2, c = 1)
#' tibble(x = runif(10), y = x * 2)
#'
#' lst(n = 5, x = runif(n))
#'
#' # tibble never coerces its inputs
#' str(tibble(letters))
#' str(tibble(x = list(diag(1), diag(2))))
#'
#' # or munges column names
#' tibble(`a + b` = 1:5)
#'
#' # You can splice-unquote a list of quotes and formulas
#' tibble(!!! list(x = rlang::quo(1:10), y = quote(x * 2)))
#'
#' # data frames can only contain 1d atomic vectors and lists
#' # and can not contain POSIXlt
#' \dontrun{
#' tibble(x = tibble(1, 2, 3))
#' tibble(y = strptime("2000/01/01", "%x"))
#' }
tibble <- function(...) {
xs <- quos(..., .named = TRUE)
as_tibble(lst_quos(xs, expand = TRUE))
}
#' @export
#' @usage NULL
#' @rdname tibble
tibble_ <- function(xs) {
xs <- compat_lazy_dots(xs, caller_env())
tibble(!!! xs)
}
#' @export
#' @rdname tibble
data_frame <- tibble
#' @export
#' @rdname tibble
#' @usage NULL
data_frame_ <- tibble_
#' Test if the object is a tibble
#'
#' This function returns `FALSE` for regular data frames and `TRUE` for tibbles.
#'
#' @param x An object
#' @return `TRUE` if the object inherits from the `tbl_df` class.
#' @export
is_tibble <- function(x) {
"tbl_df" %in% class(x)
}
#' @rdname is_tibble
#' @usage NULL
#' @export
is.tibble <- is_tibble
# Validity checks --------------------------------------------------------------
check_tibble <- function(x) {
# Names
names_x <- names2(x)
bad_name <- which(is.na(names_x) | names_x == "")
if (has_length(bad_name)) {
abort(error_column_must_be_named(bad_name))
}
dups <- which(duplicated(names_x))
if (has_length(dups)) {
abort(error_column_must_have_unique_name(names_x[dups]))
}
# Types
is_xd <- which(!map_lgl(x, is_1d))
if (has_length(is_xd)) {
classes <- map_chr(x[is_xd], function(x) class(x)[[1]])
abort(error_column_must_be_vector(names_x[is_xd], classes))
}
x[] <- map(x, strip_dim)
posixlt <- which(map_lgl(x, inherits, "POSIXlt"))
if (has_length(posixlt)) {
abort(error_time_column_must_be_posixct(names_x[posixlt]))
}
x
}
recycle_columns <- function(x) {
# Validate column lengths, allow recycling
lengths <- map_int(x, NROW)
# Shortcut if all columns have the same length (including zero length!)
if (all(lengths == lengths[1L])) return(x)
max <- max(lengths[lengths != 1L], 0L)
short <- lengths == 1
if (max > 1L && any(short)) {
x[short] <- map(x[short], rep, max)
}
x
}
|
/R/tibble.R
|
no_license
|
jeffreyhanson/tibble
|
R
| false
| false
| 3,453
|
r
|
#' Build a data frame or list
#'
#' @description
#' `tibble()` is a trimmed down version of [data.frame()] that:
#'
#' * Never coerces inputs (i.e. strings stay as strings!).
#' * Never adds `row.names`.
#' * Never munges column names.
#' * Only recycles length 1 inputs.
#' * Evaluates its arguments lazily and in order.
#' * Adds `tbl_df` class to output.
#' * Automatically adds column names.
#'
#' `data_frame()` is an alias to `tibble()`.
#'
#' `tibble_()` and its alias `data_frame_()` use lazy evaluation and are
#' deprecated. New code should use `tibble()` or `data_frame()` with
#' [quasiquotation].
#'
#' @param ... A set of name-value pairs. Arguments are evaluated sequentially,
#' so you can refer to previously created variables. These arguments are
#' processed with [rlang::quos()] and support unquote via `!!` and
#' unquote-splice via `!!!`.
#' @seealso [as_tibble()] to turn an existing list into
#' a data frame.
#' @export
#' @examples
#' a <- 1:5
#' tibble(a, b = a * 2)
#' tibble(a, b = a * 2, c = 1)
#' tibble(x = runif(10), y = x * 2)
#'
#' lst(n = 5, x = runif(n))
#'
#' # tibble never coerces its inputs
#' str(tibble(letters))
#' str(tibble(x = list(diag(1), diag(2))))
#'
#' # or munges column names
#' tibble(`a + b` = 1:5)
#'
#' # You can splice-unquote a list of quotes and formulas
#' tibble(!!! list(x = rlang::quo(1:10), y = quote(x * 2)))
#'
#' # data frames can only contain 1d atomic vectors and lists
#' # and can not contain POSIXlt
#' \dontrun{
#' tibble(x = tibble(1, 2, 3))
#' tibble(y = strptime("2000/01/01", "%x"))
#' }
tibble <- function(...) {
xs <- quos(..., .named = TRUE)
as_tibble(lst_quos(xs, expand = TRUE))
}
#' @export
#' @usage NULL
#' @rdname tibble
tibble_ <- function(xs) {
xs <- compat_lazy_dots(xs, caller_env())
tibble(!!! xs)
}
#' @export
#' @rdname tibble
data_frame <- tibble
#' @export
#' @rdname tibble
#' @usage NULL
data_frame_ <- tibble_
#' Test if the object is a tibble
#'
#' This function returns `FALSE` for regular data frames and `TRUE` for tibbles.
#'
#' @param x An object
#' @return `TRUE` if the object inherits from the `tbl_df` class.
#' @export
is_tibble <- function(x) {
"tbl_df" %in% class(x)
}
#' @rdname is_tibble
#' @usage NULL
#' @export
is.tibble <- is_tibble
# Validity checks --------------------------------------------------------------
check_tibble <- function(x) {
# Names
names_x <- names2(x)
bad_name <- which(is.na(names_x) | names_x == "")
if (has_length(bad_name)) {
abort(error_column_must_be_named(bad_name))
}
dups <- which(duplicated(names_x))
if (has_length(dups)) {
abort(error_column_must_have_unique_name(names_x[dups]))
}
# Types
is_xd <- which(!map_lgl(x, is_1d))
if (has_length(is_xd)) {
classes <- map_chr(x[is_xd], function(x) class(x)[[1]])
abort(error_column_must_be_vector(names_x[is_xd], classes))
}
x[] <- map(x, strip_dim)
posixlt <- which(map_lgl(x, inherits, "POSIXlt"))
if (has_length(posixlt)) {
abort(error_time_column_must_be_posixct(names_x[posixlt]))
}
x
}
recycle_columns <- function(x) {
# Validate column lengths, allow recycling
lengths <- map_int(x, NROW)
# Shortcut if all columns have the same length (including zero length!)
if (all(lengths == lengths[1L])) return(x)
max <- max(lengths[lengths != 1L], 0L)
short <- lengths == 1
if (max > 1L && any(short)) {
x[short] <- map(x[short], rep, max)
}
x
}
|
#' Transform a quantitative variable into a qualitative variable
#'
#' This function transforms a quantitative variable into a qualitative
#' one by breaking it into classes with the same frequencies.
#'
#' @param var variable to transform
#' @param nbclass number of classes
#' @param include.lowest argument passed to the \code{cut} function
#' @param right argument passed to the \code{cut} function
#' @param dig.lab argument passed to the \code{cut} function
#' @param ... arguments passed to the \code{cut} function
#' @details
#' This is just a simple wrapper around the \code{cut} and \code{quantile}
#' functions.
#' @return
#' The result is a factor.
#' @seealso
#' \code{\link{cut}}, \code{\link{quantile}}
#' @examples
#' data(iris)
#' sepal.width3cl <- quant.cut(iris$Sepal.Width,3)
#' table(sepal.width3cl)
#' @export
`quant.cut` <-
function (var, nbclass, include.lowest=TRUE, right=FALSE, dig.lab=5, ...) {
breaks <- unique(stats::quantile(var,probs=seq(0,1,length=nbclass+1),na.rm=TRUE))
cut(var,breaks=breaks,dig.lab=dig.lab,right=right, include.lowest=include.lowest, ...)
}
#' Recode values of a variable to missing values, using exact or regular expression matching.
#'
#' This function recodes selected values of a quantitative or qualitative
#' variable by matching its levels to exact or regular expression matches.
#'
#' @param x variable to recode. The variable is coerced to a factor if necessary.
#' @param ... levels to recode as missing in the variable. The values are coerced to character strings, meaning that you can pass numeric values to the function.
#' @param verbose print a table of missing levels before recoding them as missing. Defaults to \code{FALSE}.
#' @param regex use regular expressions to match values that include the "*" or "|" wildcards. Defaults to \code{TRUE}.
#' @param as.numeric coerce the recoded variable to \code{numeric}. The function recommends the option when the recode returns only numeric values. Defaults to FALSE.
#' @return
#' The result is a factor with properly encoded missing values. If the recoded variable contains only numeric values, it is converted to an object of class \code{numeric}.
#' @seealso
#' \code{\link{regex}}
#' @author François Briatte <f.briatte@@gmail.com>
#' @examples
#' data(hdv2003)
#' ## With exact string matches.
#' hdv2003$nivetud = recode.na(hdv2003$nivetud, "Inconnu")
#' ## With regular expressions.
#' hdv2003$relig = recode.na(hdv2003$relig, "[A|a]ppartenance", "Rejet|NSP")
#' ## Showing missing values.
#' hdv2003$clso = recode.na(hdv2003$clso, "Ne sait pas", verbose = TRUE)
#' ## Test results with freq.
#' freq(recode.na(hdv2003$trav.satisf, "Equilibre"))
#' ## Truncate a count variable (recommends numeric conversion).
#' freq(recode.na(hdv2003$freres.soeurs, 5:22))
#' @export
recode.na <- function(x, ..., verbose = FALSE, regex = TRUE, as.numeric = FALSE) {
if(!is.factor(x)) x = factor(x)
m = as.character(c(...))
r = which(grepl("\\*|\\|", m))
q = m[r]
# grepl
r1 = NULL
if(length(q) & regex) {
r1 = lapply(q, function(i) which(grepl(gsub("\\*", "", i), levels(x))))
r1 = unlist(r1)
}
q = m
if(length(r)) m = m[-r]
# exact
r2 = NULL
if(length(q)) {
r2 = lapply(q, function(i) which(levels(x) %in% i))
r2 = unlist(r2)
}
# match missing levels
q = levels(x)[unique(c(r1, r2))]
m = factor(x[x %in% q])
# missing counts
r = matrix(table(m))
rownames(r) = levels(m)
colnames(r) = "n"
message("Recoded ", sum(r), " values to NA.")
if(sum(r) & verbose) print(r)
# subset and relevel
if(sum(r)) {
x[which(x %in% q)] = NA
x = factor(x, levels = levels(x)[-which(levels(x) %in% levels(m))])
}
# detect numeric strings
numbers = !grepl("\\D", gsub("\\.\\s", "", paste0(levels(x), collapse = "")))
if(numbers & !as.numeric)
message("Recoded variable contains only numeric characters. ",
"Consider using as.numeric = TRUE.")
# numeric coercion
if(as.numeric) x = as.numeric(x)
return(x)
}
|
/R/recode.R
|
no_license
|
gdutz/questionr
|
R
| false
| false
| 4,019
|
r
|
#' Transform a quantitative variable into a qualitative variable
#'
#' This function transforms a quantitative variable into a qualitative
#' one by breaking it into classes with the same frequencies.
#'
#' @param var variable to transform
#' @param nbclass number of classes
#' @param include.lowest argument passed to the \code{cut} function
#' @param right argument passed to the \code{cut} function
#' @param dig.lab argument passed to the \code{cut} function
#' @param ... arguments passed to the \code{cut} function
#' @details
#' This is just a simple wrapper around the \code{cut} and \code{quantile}
#' functions.
#' @return
#' The result is a factor.
#' @seealso
#' \code{\link{cut}}, \code{\link{quantile}}
#' @examples
#' data(iris)
#' sepal.width3cl <- quant.cut(iris$Sepal.Width,3)
#' table(sepal.width3cl)
#' @export
`quant.cut` <-
function (var, nbclass, include.lowest=TRUE, right=FALSE, dig.lab=5, ...) {
breaks <- unique(stats::quantile(var,probs=seq(0,1,length=nbclass+1),na.rm=TRUE))
cut(var,breaks=breaks,dig.lab=dig.lab,right=right, include.lowest=include.lowest, ...)
}
#' Recode values of a variable to missing values, using exact or regular expression matching.
#'
#' This function recodes selected values of a quantitative or qualitative
#' variable by matching its levels to exact or regular expression matches.
#'
#' @param x variable to recode. The variable is coerced to a factor if necessary.
#' @param ... levels to recode as missing in the variable. The values are coerced to character strings, meaning that you can pass numeric values to the function.
#' @param verbose print a table of missing levels before recoding them as missing. Defaults to \code{FALSE}.
#' @param regex use regular expressions to match values that include the "*" or "|" wildcards. Defaults to \code{TRUE}.
#' @param as.numeric coerce the recoded variable to \code{numeric}. The function recommends the option when the recode returns only numeric values. Defaults to FALSE.
#' @return
#' The result is a factor with properly encoded missing values. If the recoded variable contains only numeric values, it is converted to an object of class \code{numeric}.
#' @seealso
#' \code{\link{regex}}
#' @author François Briatte <f.briatte@@gmail.com>
#' @examples
#' data(hdv2003)
#' ## With exact string matches.
#' hdv2003$nivetud = recode.na(hdv2003$nivetud, "Inconnu")
#' ## With regular expressions.
#' hdv2003$relig = recode.na(hdv2003$relig, "[A|a]ppartenance", "Rejet|NSP")
#' ## Showing missing values.
#' hdv2003$clso = recode.na(hdv2003$clso, "Ne sait pas", verbose = TRUE)
#' ## Test results with freq.
#' freq(recode.na(hdv2003$trav.satisf, "Equilibre"))
#' ## Truncate a count variable (recommends numeric conversion).
#' freq(recode.na(hdv2003$freres.soeurs, 5:22))
#' @export
recode.na <- function(x, ..., verbose = FALSE, regex = TRUE, as.numeric = FALSE) {
if(!is.factor(x)) x = factor(x)
m = as.character(c(...))
r = which(grepl("\\*|\\|", m))
q = m[r]
# grepl
r1 = NULL
if(length(q) & regex) {
r1 = lapply(q, function(i) which(grepl(gsub("\\*", "", i), levels(x))))
r1 = unlist(r1)
}
q = m
if(length(r)) m = m[-r]
# exact
r2 = NULL
if(length(q)) {
r2 = lapply(q, function(i) which(levels(x) %in% i))
r2 = unlist(r2)
}
# match missing levels
q = levels(x)[unique(c(r1, r2))]
m = factor(x[x %in% q])
# missing counts
r = matrix(table(m))
rownames(r) = levels(m)
colnames(r) = "n"
message("Recoded ", sum(r), " values to NA.")
if(sum(r) & verbose) print(r)
# subset and relevel
if(sum(r)) {
x[which(x %in% q)] = NA
x = factor(x, levels = levels(x)[-which(levels(x) %in% levels(m))])
}
# detect numeric strings
numbers = !grepl("\\D", gsub("\\.\\s", "", paste0(levels(x), collapse = "")))
if(numbers & !as.numeric)
message("Recoded variable contains only numeric characters. ",
"Consider using as.numeric = TRUE.")
# numeric coercion
if(as.numeric) x = as.numeric(x)
return(x)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gamelift_operations.R
\name{gamelift_start_match_backfill}
\alias{gamelift_start_match_backfill}
\title{Finds new players to fill open slots in an existing game session}
\usage{
gamelift_start_match_backfill(TicketId, ConfigurationName,
GameSessionArn, Players)
}
\arguments{
\item{TicketId}{A unique identifier for a matchmaking ticket. If no ticket ID is
specified here, Amazon GameLift will generate one in the form of a UUID.
Use this identifier to track the match backfill ticket status and
retrieve match results.}
\item{ConfigurationName}{[required] Name of the matchmaker to use for this request. You can use either the
configuration name or ARN value. The ARN of the matchmaker that was used
with the original game session is listed in the GameSession object,
\code{MatchmakerData} property.}
\item{GameSessionArn}{Amazon Resource Name
(\href{https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html}{ARN})
that is assigned to a game session and uniquely identifies it. This is
the same as the game session ID.}
\item{Players}{[required] Match information on all players that are currently assigned to the game
session. This information is used by the matchmaker to find new players
and add them to the existing game.
\itemize{
\item PlayerID, PlayerAttributes, Team -\\\\- This information is
maintained in the GameSession object, \code{MatchmakerData} property, for
all players who are currently assigned to the game session. The
matchmaker data is in JSON syntax, formatted as a string. For more
details, see \href{https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-server.html#match-server-data}{Match Data}.
\item LatencyInMs -\\\\- If the matchmaker uses player latency, include a
latency value, in milliseconds, for the Region that the game session
is currently in. Do not include latency values for any other Region.
}}
}
\description{
Finds new players to fill open slots in an existing game session. This
operation can be used to add players to matched games that start with
fewer than the maximum number of players or to replace players when they
drop out. By backfilling with the same matchmaker used to create the
original match, you ensure that new players meet the match criteria and
maintain a consistent experience throughout the game session. You can
backfill a match anytime after a game session has been created.
To request a match backfill, specify a unique ticket ID, the existing
game session's ARN, a matchmaking configuration, and a set of data that
describes all current players in the game session. If successful, a
match backfill ticket is created and returned with status set to QUEUED.
The ticket is placed in the matchmaker's ticket pool and processed.
Track the status of the ticket to respond as needed.
The process of finding backfill matches is essentially identical to the
initial matchmaking process. The matchmaker searches the pool and groups
tickets together to form potential matches, allowing only one backfill
ticket per potential match. Once the a match is formed, the matchmaker
creates player sessions for the new players. All tickets in the match
are updated with the game session's connection information, and the
GameSession object is updated to include matchmaker data on the new
players. For more detail on how match backfill requests are processed,
see \href{https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/gamelift-match.html}{How Amazon GameLift FlexMatch Works}.
\strong{Learn more}
\href{https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-backfill.html}{Backfill Existing Games with FlexMatch}
\href{https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/gamelift-match.html}{How GameLift FlexMatch Works}
\strong{Related operations}
\itemize{
\item StartMatchmaking
\item DescribeMatchmaking
\item StopMatchmaking
\item AcceptMatch
\item StartMatchBackfill
}
}
\section{Request syntax}{
\preformatted{svc$start_match_backfill(
TicketId = "string",
ConfigurationName = "string",
GameSessionArn = "string",
Players = list(
list(
PlayerId = "string",
PlayerAttributes = list(
list(
S = "string",
N = 123.0,
SL = list(
"string"
),
SDM = list(
123.0
)
)
),
Team = "string",
LatencyInMs = list(
123
)
)
)
)
}
}
\keyword{internal}
|
/paws/man/gamelift_start_match_backfill.Rd
|
permissive
|
sanchezvivi/paws
|
R
| false
| true
| 4,503
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gamelift_operations.R
\name{gamelift_start_match_backfill}
\alias{gamelift_start_match_backfill}
\title{Finds new players to fill open slots in an existing game session}
\usage{
gamelift_start_match_backfill(TicketId, ConfigurationName,
GameSessionArn, Players)
}
\arguments{
\item{TicketId}{A unique identifier for a matchmaking ticket. If no ticket ID is
specified here, Amazon GameLift will generate one in the form of a UUID.
Use this identifier to track the match backfill ticket status and
retrieve match results.}
\item{ConfigurationName}{[required] Name of the matchmaker to use for this request. You can use either the
configuration name or ARN value. The ARN of the matchmaker that was used
with the original game session is listed in the GameSession object,
\code{MatchmakerData} property.}
\item{GameSessionArn}{Amazon Resource Name
(\href{https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html}{ARN})
that is assigned to a game session and uniquely identifies it. This is
the same as the game session ID.}
\item{Players}{[required] Match information on all players that are currently assigned to the game
session. This information is used by the matchmaker to find new players
and add them to the existing game.
\itemize{
\item PlayerID, PlayerAttributes, Team -\\\\- This information is
maintained in the GameSession object, \code{MatchmakerData} property, for
all players who are currently assigned to the game session. The
matchmaker data is in JSON syntax, formatted as a string. For more
details, see \href{https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-server.html#match-server-data}{Match Data}.
\item LatencyInMs -\\\\- If the matchmaker uses player latency, include a
latency value, in milliseconds, for the Region that the game session
is currently in. Do not include latency values for any other Region.
}}
}
\description{
Finds new players to fill open slots in an existing game session. This
operation can be used to add players to matched games that start with
fewer than the maximum number of players or to replace players when they
drop out. By backfilling with the same matchmaker used to create the
original match, you ensure that new players meet the match criteria and
maintain a consistent experience throughout the game session. You can
backfill a match anytime after a game session has been created.
To request a match backfill, specify a unique ticket ID, the existing
game session's ARN, a matchmaking configuration, and a set of data that
describes all current players in the game session. If successful, a
match backfill ticket is created and returned with status set to QUEUED.
The ticket is placed in the matchmaker's ticket pool and processed.
Track the status of the ticket to respond as needed.
The process of finding backfill matches is essentially identical to the
initial matchmaking process. The matchmaker searches the pool and groups
tickets together to form potential matches, allowing only one backfill
ticket per potential match. Once the a match is formed, the matchmaker
creates player sessions for the new players. All tickets in the match
are updated with the game session's connection information, and the
GameSession object is updated to include matchmaker data on the new
players. For more detail on how match backfill requests are processed,
see \href{https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/gamelift-match.html}{How Amazon GameLift FlexMatch Works}.
\strong{Learn more}
\href{https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-backfill.html}{Backfill Existing Games with FlexMatch}
\href{https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/gamelift-match.html}{How GameLift FlexMatch Works}
\strong{Related operations}
\itemize{
\item StartMatchmaking
\item DescribeMatchmaking
\item StopMatchmaking
\item AcceptMatch
\item StartMatchBackfill
}
}
\section{Request syntax}{
\preformatted{svc$start_match_backfill(
TicketId = "string",
ConfigurationName = "string",
GameSessionArn = "string",
Players = list(
list(
PlayerId = "string",
PlayerAttributes = list(
list(
S = "string",
N = 123.0,
SL = list(
"string"
),
SDM = list(
123.0
)
)
),
Team = "string",
LatencyInMs = list(
123
)
)
)
)
}
}
\keyword{internal}
|
# Checks the construction of the SNN graph.
# require(scran); require(testthat); source("test-snn.R")
# Constructing a reference value.
library(igraph)
library(FNN)
check <- function(vals, k=10) {
g <- buildSNNGraph(vals, k=k, d=NA) # turning off PCA.
nn.out <- get.knn(t(vals), k=k)
IDX <- cbind(seq_len(ncol(vals)), nn.out$nn.index)
ncells <- ncol(vals)
expect_identical(seq_len(ncells), as.vector(V(g)))
for (i in seq_len(ncells)) {
inn <- IDX[i,]
collected <- numeric(ncells)
for (j in seq_len(ncells)) {
jnn <- IDX[j,]
shared <- intersect(inn, jnn)
if (length(shared)==0) next
s <- k + 1 - 0.5*(match(shared, inn) + match(shared, jnn))
collected[j] <- max(s)
}
collected[i] <- 0
expect_equal(collected, g[i])
}
return(NULL)
}
set.seed(20000)
ncells <- 200
ngenes <- 1000
means <- 2^runif(ngenes, -1, 5)
dummy <- matrix(rnorm(ngenes*ncells), ncol=ncells, nrow=ngenes)
check(dummy, k=10)
dummy <- matrix(rnorm(ngenes*ncells), ncol=ncells, nrow=ngenes)
check(dummy, k=20)
dummy <- matrix(rnorm(ngenes*ncells), ncol=ncells, nrow=ngenes)
check(dummy, k=5)
# Checking that the value is sensible with subset.row.
are_graphs_same <- function(g1, g2) {
expect_equal(g1[], g2[])
return(TRUE)
}
selected <- sample(ngenes, 50)
g <- buildSNNGraph(dummy[selected,])
g2 <- buildSNNGraph(dummy, subset.row=selected)
are_graphs_same(g, g2)
# Checking SCESet construction.
suppressWarnings(sce <- newSCESet(countData=2^dummy))
g <- buildSNNGraph(sce)
g2 <- buildSNNGraph(exprs(sce))
are_graphs_same(g, g2)
g <- buildSNNGraph(sce, assay="counts")
g2 <- buildSNNGraph(2^dummy)
are_graphs_same(g, g2)
g <- buildSNNGraph(sce, subset.row=selected)
g2 <- buildSNNGraph(sce[selected,])
are_graphs_same(g, g2)
sce <- calculateQCMetrics(sce, feature_controls=list(ERCC=selected))
setSpike(sce) <- "ERCC"
g <- buildSNNGraph(sce)
g2 <- buildSNNGraph(sce[-selected,])
are_graphs_same(g, g2)
# Checking multi-core processing works.
dummy <- matrix(rnorm(ngenes*ncells), ncol=ncells, nrow=ngenes)
ref <- scran:::.find_knn(dummy, k=10, BPPARAM=SerialParam())
alt <- scran:::.find_knn(dummy, k=10, BPPARAM=SerialParam(), force=TRUE)
expect_equal(ref, alt)
dummy <- matrix(rnorm(ngenes*ncells), ncol=ncells, nrow=ngenes)
ref <- scran:::.find_knn(dummy, k=5, BPPARAM=SerialParam())
alt <- scran:::.find_knn(dummy, k=5, BPPARAM=SerialParam(), force=TRUE)
expect_equal(ref, alt)
# ref <- scran:::.find_knn(dummy, k=5)
# alt <- scran:::.find_knn(dummy, k=5, BPPARAM=MulticoreParam(3))
# expect_equal(ref, alt)
# Checking PCA was working.
dummy <- matrix(rnorm(ngenes*ncells), ncol=ncells, nrow=ngenes)
pc <- prcomp(t(dummy))
ref <- buildSNNGraph(t(pc$x[,1:20]), k=10, d=NA)
alt <- buildSNNGraph(dummy, k=10, d=20)
are_graphs_same(ref, alt)
ref <- buildSNNGraph(t(pc$x[,1:50]), k=10, d=NA)
alt <- buildSNNGraph(dummy, k=10, d=50)
are_graphs_same(ref, alt)
|
/tests/testthat/test-snn.R
|
no_license
|
jan-glx/scran
|
R
| false
| false
| 2,994
|
r
|
# Checks the construction of the SNN graph.
# require(scran); require(testthat); source("test-snn.R")
# Constructing a reference value.
library(igraph)
library(FNN)
check <- function(vals, k=10) {
g <- buildSNNGraph(vals, k=k, d=NA) # turning off PCA.
nn.out <- get.knn(t(vals), k=k)
IDX <- cbind(seq_len(ncol(vals)), nn.out$nn.index)
ncells <- ncol(vals)
expect_identical(seq_len(ncells), as.vector(V(g)))
for (i in seq_len(ncells)) {
inn <- IDX[i,]
collected <- numeric(ncells)
for (j in seq_len(ncells)) {
jnn <- IDX[j,]
shared <- intersect(inn, jnn)
if (length(shared)==0) next
s <- k + 1 - 0.5*(match(shared, inn) + match(shared, jnn))
collected[j] <- max(s)
}
collected[i] <- 0
expect_equal(collected, g[i])
}
return(NULL)
}
set.seed(20000)
ncells <- 200
ngenes <- 1000
means <- 2^runif(ngenes, -1, 5)
dummy <- matrix(rnorm(ngenes*ncells), ncol=ncells, nrow=ngenes)
check(dummy, k=10)
dummy <- matrix(rnorm(ngenes*ncells), ncol=ncells, nrow=ngenes)
check(dummy, k=20)
dummy <- matrix(rnorm(ngenes*ncells), ncol=ncells, nrow=ngenes)
check(dummy, k=5)
# Checking that the value is sensible with subset.row.
are_graphs_same <- function(g1, g2) {
expect_equal(g1[], g2[])
return(TRUE)
}
selected <- sample(ngenes, 50)
g <- buildSNNGraph(dummy[selected,])
g2 <- buildSNNGraph(dummy, subset.row=selected)
are_graphs_same(g, g2)
# Checking SCESet construction.
suppressWarnings(sce <- newSCESet(countData=2^dummy))
g <- buildSNNGraph(sce)
g2 <- buildSNNGraph(exprs(sce))
are_graphs_same(g, g2)
g <- buildSNNGraph(sce, assay="counts")
g2 <- buildSNNGraph(2^dummy)
are_graphs_same(g, g2)
g <- buildSNNGraph(sce, subset.row=selected)
g2 <- buildSNNGraph(sce[selected,])
are_graphs_same(g, g2)
sce <- calculateQCMetrics(sce, feature_controls=list(ERCC=selected))
setSpike(sce) <- "ERCC"
g <- buildSNNGraph(sce)
g2 <- buildSNNGraph(sce[-selected,])
are_graphs_same(g, g2)
# Checking multi-core processing works.
dummy <- matrix(rnorm(ngenes*ncells), ncol=ncells, nrow=ngenes)
ref <- scran:::.find_knn(dummy, k=10, BPPARAM=SerialParam())
alt <- scran:::.find_knn(dummy, k=10, BPPARAM=SerialParam(), force=TRUE)
expect_equal(ref, alt)
dummy <- matrix(rnorm(ngenes*ncells), ncol=ncells, nrow=ngenes)
ref <- scran:::.find_knn(dummy, k=5, BPPARAM=SerialParam())
alt <- scran:::.find_knn(dummy, k=5, BPPARAM=SerialParam(), force=TRUE)
expect_equal(ref, alt)
# ref <- scran:::.find_knn(dummy, k=5)
# alt <- scran:::.find_knn(dummy, k=5, BPPARAM=MulticoreParam(3))
# expect_equal(ref, alt)
# Checking PCA was working.
dummy <- matrix(rnorm(ngenes*ncells), ncol=ncells, nrow=ngenes)
pc <- prcomp(t(dummy))
ref <- buildSNNGraph(t(pc$x[,1:20]), k=10, d=NA)
alt <- buildSNNGraph(dummy, k=10, d=20)
are_graphs_same(ref, alt)
ref <- buildSNNGraph(t(pc$x[,1:50]), k=10, d=NA)
alt <- buildSNNGraph(dummy, k=10, d=50)
are_graphs_same(ref, alt)
|
sink("logs/2_analyze.txt")
cat("This file conducts the analysis")
print(Sys.time())
# Prepare environment
library(tidyverse)
library(mgcv)
library(foreach)
library(doParallel)
library(doRNG)
cl <- makeCluster(5)
registerDoParallel(cl)
# Set seed for replicability
set.seed(90095)
# Load prepared data
prepared <- readRDS(file = "intermediate/prepared.RDS")
# Set several parameters
## Number of times to replicate 5-fold cross-validation
n_replicates <- 20
## Number of draws when simulating uncertainty of coefficient etsimates
n_sims <- 100
## Increment to income for the first-difference estimator.
## This is the amount we counterfactually add to each person's income.
delta <- 25e3
####################
# HELPER FUNCTIONS #
####################
# Load a helper function to make weighted quantiles
source("code/weighted.quantile.R")
# Function to conduct cross-validation for candidate learners
fit_learner <- function(data, formula) {
# Note here explains one non-default choice.
# The gamma parameter controls the preference for smoothness.
# mgcv package default is 1.
# We see that 1 gives very wiggle curves when used with family = binomial.
# We are increasing gamma to a higher value to prefer more smoothness.
# When there is an interaction() in the model formula because income is
# interacted with two or more variables, we strongly need smoothness.
# In that case, set to 3.
# Otherwise, we set to 1.5.
gamma_value <- ifelse(any(grepl("interaction",formula)),
3, 1.5)
gam(formula,
data = data,
family = binomial,
weights = w,
method = "REML",
gamma = gamma_value)
}
mutate_cv_fit <- function(data, formula, gamma_value = 1) {
foreach(i = 1:max(data$set), .combine = "rbind") %do% {
train <- data %>%
filter(set != i)
test <- data %>%
filter(set == i)
fit <- fit_learner(data = train,
formula = formula)
test %>%
mutate(yhat = predict(fit, type = "response",
newdata = test))
}
}
# Function to combine standard errors of individual learners
# into a standard error for the ensemble.
# This is based on V(aA + bB) = a^2V(A) + b^2V(B) + 2abCov(A,B)
# under the conservative assumption that Cor(A,B) = 1.
combine_se <- function(se, w) {
# First we have the sum of variance terms
variance_terms <- w ^ 2 * se ^ 2
# Then we have the sum of covariance terms
index_pairs <- combn(1:length(se),2)
covariance_terms <- apply(index_pairs,2,function(x) prod(w[x] * se[x]))
total_variance <- sum(variance_terms) + sum(covariance_terms)
total_se <- sqrt(total_variance)
return(total_se)
#sqrt(sum(foreach(i = 1:length(se), .combine = "c") %do% {
# foreach(j = 1:length(se), .combine = "c") %do% {
# se[i]*se[j]*w[i]*w[j]
# }
#}))
}
##########################################
# DEFINE FORMULAS FOR CANDIDATE LEARNERS #
##########################################
# Candidate learners
learners <- list(formula(enrolled ~ log(income) + race + educJoint + log(wealth)*wealthTercile),
formula(enrolled ~ log(income)*educJoint + race + log(wealth)*wealthTercile),
formula(enrolled ~ log(income)*wealthTercile + educJoint + race + log(wealth)*wealthTercile),
formula(enrolled ~ log(income)*race + educJoint + race + log(wealth)*wealthTercile),
formula(enrolled ~ log(income)*wealthTercile*educJoint + race + log(wealth)*wealthTercile),
formula(enrolled ~ log(income)*race*educJoint + race + log(wealth)*wealthTercile),
formula(enrolled ~ s(log(income), bs = "cr", k = 5) + race + educJoint + s(log(wealth), bs = "cr", k = 5, by = wealthTercile) + wealthTercile),
formula(enrolled ~ s(log(income), bs = "cr", k = 5, by = educJoint) + educJoint + race + s(log(wealth), bs = "cr", k = 5, by = wealthTercile) + wealthTercile),
formula(enrolled ~ s(log(income), bs = "cr", k = 5, by = wealthTercile) + educJoint + race + s(log(wealth), bs = "cr", k = 5, by = wealthTercile) + wealthTercile),
formula(enrolled ~ s(log(income), bs = "cr", k = 5, by = race) + educJoint + race + s(log(wealth), bs = "cr", k = 5, by = wealthTercile) + wealthTercile),
formula(enrolled ~ s(log(income), bs = "cr", k = 5, by = interaction(educJoint, wealthTercile)) + race + s(log(wealth), bs = "cr", k = 5, by = wealthTercile) + wealthTercile),
formula(enrolled ~ s(log(income), bs = "cr", k = 5, by = interaction(educJoint, race)) + race + s(log(wealth), bs = "cr", k = 5, by = wealthTercile) + wealthTercile))
names(learners) <- c(paste("glm",c("additive","educ","wealth","race","educWealth","educRace"),sep = "_"),
paste("gam",c("additive","educ","wealth","race","educWealth","educRace"),sep = "_"))
####################
# CROSS VALIDATION #
####################
# Create a data frame of cross-validated out-of-fold predictions
# from each base learner
# Structure of CV loops:
# 2. Loop over replicates
# 1. Loop over learners within each replicate
# 3. Conduct 5-fold CV within each replicate
cv_yhat_out_of_fold <- foreach(replicate_index = 1:n_replicates, .combine = "rbind", .packages = c("tidyverse","mgcv","foreach")) %dorng% {
# Define folded data for cross-validation
folded <- prepared %>%
# Create folds within strata of categorical covariates
group_by(educJoint, wealthTercile, race) %>%
# Randomly sample folds 1:5
mutate(set = sample(rep(1:n_replicates, ceiling(n() / 5))[1:n()])) %>%
ungroup()
foreach(learner = names(learners), .combine = "rbind") %do% {
# Within this learner, loop over replicates
mutate_cv_fit(data = folded,
formula = learners[[learner]],
# If this fit interacts income with two other variables, increase the
# preference for smoothness with a higher gamma value
gamma_value = ifelse(grepl("educWealth|educRace",learner),3,1.5)) %>%
# Record the learner and the index of this replicate
mutate(learner = learner,
replicate_index = replicate_index)
}
}
saveRDS(cv_yhat_out_of_fold,
file = "intermediate/cv_yhat_out_of_fold.RDS")
##################
# LEARN ENSEMBLE #
##################
# This code learns a weighted average of the base learners
# which optimally predicts out-of-fold cases from cross-validation
# Prepare a dataset for learning the ensemble
data_for_ensemble <- cv_yhat_out_of_fold %>%
rename(y = enrolled) %>%
select(PUBID, educJoint, wealthTercile, replicate_index, y, learner, yhat) %>%
# Make the prediction from each base learner appear in its own column;
# in this ensembling stage, these columns are now the predictors
pivot_wider(names_from = "learner", values_from = "yhat") %>%
group_by(educJoint, wealthTercile)
# Create inputs for quadprog::solve.QP()
# Define the number of learners
p <- length(learners)
# Create a matrix for constraints: all greater than 0 and sum to 1
Amat <- cbind(rep(1,p),
diag(1,p))
# Create a model matrix for prediction
X <- as.matrix(
data_for_ensemble %>%
ungroup() %>%
select(starts_with("gam"),starts_with("glm"))
)
# Learn the optimal weighting of the base learners
QP.out <- quadprog::solve.QP(Dmat = t(X) %*% X,
dvec = t(data_for_ensemble$y) %*% X,
Amat = Amat,
bvec = c(1,rep(0,p)),
meq = 1)
# Create a data frame with the learned weight on each learner
ensemble_weights <- data.frame(learner = colnames(X),
weight = QP.out$solution)
saveRDS(ensemble_weights, file = "intermediate/ensemble_weights.RDS")
###########################################
# ESTIMATE EACH BASE LEARNER ON FULL DATA #
###########################################
base_learners_estimated <- foreach(learner_case = names(learners)) %do% {
fit_learner(data = prepared,
formula = learners[[learner_case]])
}
names(base_learners_estimated) <- names(learners)
###################################
# PREDICT THE DOSE RESPONSE CURVE #
###################################
# We will estimate dose response curve
# - x education x wealth
# - x education x race
# Define shorthand for each of those
grouping_names <- c("educWealth","educRace")
# For each grouping, estimate the dose-response curve with each base learner
dose_response_each_learner <- foreach(grouping_name = grouping_names, .combine = "rbind") %do% {
if (grouping_name == "educWealth") {
byVars <- c("educJoint","wealthTercile")
} else if (grouping_name == "educRace") {
byVars <- c("educJoint","race")
}
estimate_this_grouping <- foreach(learner = names(learners), .combine = "rbind") %do% {
# Create a data frame to predict
toPredict <- prepared %>%
# Determine the 5th and 95th percentiles that will bound the counterfactual income values considered
group_by(across(all_of(byVars))) %>%
mutate(p05 = weighted.quantile(income, q = .05, w = w),
p95 = weighted.quantile(income, q = .95, w = w)) %>%
# Nest to have a dataset within each group
nest() %>%
# Within each dataset, expand so that each row appears 20 times
# taking the sequence of counterfactual income values
mutate(data = map(data, function(data_case) {
a_seq <- seq(data_case$p05[1],data_case$p95[1],length.out = 20)
foreach(a_val = a_seq, .combine = "rbind") %do% {
data_case %>%
mutate(income = a_val)
}
})) %>%
# Unnest to create a more typical data frame
unnest(cols = "data")
# Extract model objects
beta_hat <- coef(base_learners_estimated[[learner]])
X <- predict(base_learners_estimated[[learner]], newdata = toPredict, type = "lpmatrix")
# Predict for point estimate
yhat <- plogis(X %*% beta_hat)
point <- toPredict %>%
ungroup() %>%
mutate(yhat = yhat) %>%
group_by(across(all_of(byVars)), income) %>%
summarize(estimate = weighted.mean(yhat, w = w),
.groups = "drop")
# Predict over simulated coefficients for estimation uncertainty
Sigma_hat <- vcov(base_learners_estimated[[learner]])
beta_star <- mvtnorm::rmvnorm(n_sims, beta_hat, Sigma_hat)
yhat_star <- plogis(X %*% t(beta_star))
colnames(yhat_star) <- paste0("yhat_star",1:ncol(yhat_star))
# Aggregate those to create a standard error
se <- toPredict %>%
ungroup() %>%
bind_cols(yhat_star) %>%
select(all_of(byVars), income, w, starts_with('yhat_star')) %>%
pivot_longer(cols = starts_with("yhat_star")) %>%
group_by(across(all_of(byVars)), income, name) %>%
summarize(estimate_star = weighted.mean(value, w = w),
.groups = "drop_last") %>%
summarize(se = sd(estimate_star), .groups = "drop")
# Return the point estimate and standard errors
point %>%
left_join(se, by = c(byVars,"income")) %>%
mutate(learner = learner)
}
# Create a nested tibble to return for clean output
clean_tibble <- estimate_this_grouping %>%
mutate(grouping_name = grouping_name) %>%
group_by(grouping_name, learner) %>%
nest()
return(clean_tibble)
}
saveRDS(dose_response_each_learner,
file = "intermediate/dose_response_each_learner.RDS")
dose_response_ensemble <- foreach(grouping_name_case = unique(dose_response_each_learner$grouping_name), .combine = "rbind") %do% {
# Restrict to the dose-response curves for this grouping
dose_response_each_learner %>%
filter(grouping_name == grouping_name_case) %>%
# Unnest to work with a data frame
unnest(cols = "data") %>%
# Merge in the ensemble weights
left_join(ensemble_weights, by = "learner") %>%
# Take weighted average with ensemble weights
group_by(grouping_name, across(any_of(c("educJoint","wealthTercile","race"))), income) %>%
summarize(estimate = weighted.mean(estimate, w = weight),
se = combine_se(se = se, w = weight),
.groups = "drop") %>%
# Prepare the tibble to return
group_by(grouping_name) %>%
nest()
}
saveRDS(dose_response_ensemble,
file = "intermediate/dose_response_ensemble.RDS")
##############################
# FIRST DIFFERENCE ESTIMATES #
##############################
# Create datasets to predict for the first-difference estimator
d0 <- prepared
d1 <- prepared %>% mutate(income = income + delta)
# For each learner, calculate the first difference estimates
first_difference_each_learner <- foreach(learner = names(learners), .combine = "rbind") %do% {
# Extract model objects
beta_hat <- coef(base_learners_estimated[[learner]])
X0 <- predict(base_learners_estimated[[learner]], newdata = d0, type = "lpmatrix")
X1 <- predict(base_learners_estimated[[learner]], newdata = d1, type = "lpmatrix")
# Predict the conditional effect estimate for each unit
effect <- plogis(X1 %*% beta_hat) - plogis(X0 %*% beta_hat)
# Predict over simulated estimation uncertainty
Sigma_hat <- vcov(base_learners_estimated[[learner]])
beta_star <- mvtnorm::rmvnorm(n_sims, beta_hat, Sigma_hat)
effect_star <- plogis(X1 %*% t(beta_star)) - plogis(X0 %*% t(beta_star))
colnames(effect_star) <- paste0("effect_star_",1:ncol(effect_star))
# Function to make aggregate first-difference estimate and standard error
# for a given vector of observation indices in the prepared data frame
make_aggregate <- function(indices) {
# Create the point estimate
estimate <- weighted.mean(effect[indices], w = prepared$w[indices])
# Create the simulated estimates
point_star <- apply(effect_star, 2, function(x) {
weighted.mean(x[indices], w = prepared$w[indices])
})
# Calculate the standard error over simulations
se <- sd(point_star)
return(data.frame(estimate = estimate,
se = se))
}
# Note the unique values of each predictor within which we will aggregate
race_vals <- unique(prepared$race)
educ_vals <- unique(prepared$educJoint)
wealth_vals <- unique(prepared$wealthTercile)
# Create aggregated results
first_difference_estimates <- list(
individual = prepared %>%
select(PUBID) %>%
mutate(estimate = effect[,1],
se = apply(effect_star,1,sd)),
overall = make_aggregate(1:nrow(prepared)),
byRace = foreach(x = race_vals, .combine = "rbind") %do% make_aggregate(which(prepared$race == x)) %>% mutate(race = race_vals),
byEduc = foreach(x = educ_vals, .combine = "rbind") %do% make_aggregate(which(prepared$educJoint == x)) %>% mutate(educJoint = educ_vals),
byWealth = foreach(x = wealth_vals, .combine = "rbind") %do% make_aggregate(which(prepared$wealthTercile == x)) %>% mutate(wealthTercile = wealth_vals),
byEducRace = foreach(x = educ_vals, .combine = "rbind") %do% {
foreach(y = race_vals, .combine = "rbind") %do% {
make_aggregate(which(prepared$educJoint == x & prepared$race == y)) %>% mutate(educJoint = x, race = y)
}
},
byEducWealth = foreach(x = educ_vals, .combine = "rbind") %do% {
foreach(y = wealth_vals, .combine = "rbind") %do% {
make_aggregate(which(prepared$educJoint == x & prepared$wealthTercile == y)) %>% mutate(educJoint = x, wealthTercile = y)
}
},
byEducWealthRace = foreach(x = educ_vals, .combine = "rbind") %do% {
foreach(y = wealth_vals, .combine = "rbind") %do% {
foreach(z = race_vals, .combine = "rbind") %do% {
make_aggregate(which(prepared$educJoint == x & prepared$wealthTercile == y & prepared$race == z)) %>% mutate(educJoint = x, wealthTercile = y, race = z)
}
}
}
)
return(tibble(learner = learner,
quantity = names(first_difference_estimates),
estimate = first_difference_estimates))
}
saveRDS(first_difference_each_learner,
file = "intermediate/first_difference_each_learner.RDS")
# Create ensemble first difference estimates
first_difference_ensemble <- first_difference_each_learner %>%
left_join(ensemble_weights, by = "learner") %>%
group_by(quantity) %>%
nest() %>%
# Work with data within each target quantity
mutate(data = map(data, function(x) {
x %>%
# Un-nest learner-specific estimates
unnest(cols = "estimate") %>%
# Weighted average by ensemble weights
group_by(across(-all_of(c("learner","estimate","se","weight")))) %>%
summarize(estimate = weighted.mean(estimate, w = weight),
se = combine_se(se = se, w = weight),
.groups = "drop")
}))
saveRDS(first_difference_ensemble,
file = "intermediate/first_difference_ensemble.RDS")
print(Sys.time())
print(sessionInfo())
sink()
|
/continuous_treatments/code/2_analyze.R
|
no_license
|
ilundberg/replication
|
R
| false
| false
| 16,946
|
r
|
sink("logs/2_analyze.txt")
cat("This file conducts the analysis")
print(Sys.time())
# Prepare environment
library(tidyverse)
library(mgcv)
library(foreach)
library(doParallel)
library(doRNG)
cl <- makeCluster(5)
registerDoParallel(cl)
# Set seed for replicability
set.seed(90095)
# Load prepared data
prepared <- readRDS(file = "intermediate/prepared.RDS")
# Set several parameters
## Number of times to replicate 5-fold cross-validation
n_replicates <- 20
## Number of draws when simulating uncertainty of coefficient etsimates
n_sims <- 100
## Increment to income for the first-difference estimator.
## This is the amount we counterfactually add to each person's income.
delta <- 25e3
####################
# HELPER FUNCTIONS #
####################
# Load a helper function to make weighted quantiles
source("code/weighted.quantile.R")
# Function to conduct cross-validation for candidate learners
fit_learner <- function(data, formula) {
# Note here explains one non-default choice.
# The gamma parameter controls the preference for smoothness.
# mgcv package default is 1.
# We see that 1 gives very wiggle curves when used with family = binomial.
# We are increasing gamma to a higher value to prefer more smoothness.
# When there is an interaction() in the model formula because income is
# interacted with two or more variables, we strongly need smoothness.
# In that case, set to 3.
# Otherwise, we set to 1.5.
gamma_value <- ifelse(any(grepl("interaction",formula)),
3, 1.5)
gam(formula,
data = data,
family = binomial,
weights = w,
method = "REML",
gamma = gamma_value)
}
mutate_cv_fit <- function(data, formula, gamma_value = 1) {
foreach(i = 1:max(data$set), .combine = "rbind") %do% {
train <- data %>%
filter(set != i)
test <- data %>%
filter(set == i)
fit <- fit_learner(data = train,
formula = formula)
test %>%
mutate(yhat = predict(fit, type = "response",
newdata = test))
}
}
# Function to combine standard errors of individual learners
# into a standard error for the ensemble.
# This is based on V(aA + bB) = a^2V(A) + b^2V(B) + 2abCov(A,B)
# under the conservative assumption that Cor(A,B) = 1.
combine_se <- function(se, w) {
# First we have the sum of variance terms
variance_terms <- w ^ 2 * se ^ 2
# Then we have the sum of covariance terms
index_pairs <- combn(1:length(se),2)
covariance_terms <- apply(index_pairs,2,function(x) prod(w[x] * se[x]))
total_variance <- sum(variance_terms) + sum(covariance_terms)
total_se <- sqrt(total_variance)
return(total_se)
#sqrt(sum(foreach(i = 1:length(se), .combine = "c") %do% {
# foreach(j = 1:length(se), .combine = "c") %do% {
# se[i]*se[j]*w[i]*w[j]
# }
#}))
}
##########################################
# DEFINE FORMULAS FOR CANDIDATE LEARNERS #
##########################################
# Candidate learners
learners <- list(formula(enrolled ~ log(income) + race + educJoint + log(wealth)*wealthTercile),
formula(enrolled ~ log(income)*educJoint + race + log(wealth)*wealthTercile),
formula(enrolled ~ log(income)*wealthTercile + educJoint + race + log(wealth)*wealthTercile),
formula(enrolled ~ log(income)*race + educJoint + race + log(wealth)*wealthTercile),
formula(enrolled ~ log(income)*wealthTercile*educJoint + race + log(wealth)*wealthTercile),
formula(enrolled ~ log(income)*race*educJoint + race + log(wealth)*wealthTercile),
formula(enrolled ~ s(log(income), bs = "cr", k = 5) + race + educJoint + s(log(wealth), bs = "cr", k = 5, by = wealthTercile) + wealthTercile),
formula(enrolled ~ s(log(income), bs = "cr", k = 5, by = educJoint) + educJoint + race + s(log(wealth), bs = "cr", k = 5, by = wealthTercile) + wealthTercile),
formula(enrolled ~ s(log(income), bs = "cr", k = 5, by = wealthTercile) + educJoint + race + s(log(wealth), bs = "cr", k = 5, by = wealthTercile) + wealthTercile),
formula(enrolled ~ s(log(income), bs = "cr", k = 5, by = race) + educJoint + race + s(log(wealth), bs = "cr", k = 5, by = wealthTercile) + wealthTercile),
formula(enrolled ~ s(log(income), bs = "cr", k = 5, by = interaction(educJoint, wealthTercile)) + race + s(log(wealth), bs = "cr", k = 5, by = wealthTercile) + wealthTercile),
formula(enrolled ~ s(log(income), bs = "cr", k = 5, by = interaction(educJoint, race)) + race + s(log(wealth), bs = "cr", k = 5, by = wealthTercile) + wealthTercile))
names(learners) <- c(paste("glm",c("additive","educ","wealth","race","educWealth","educRace"),sep = "_"),
paste("gam",c("additive","educ","wealth","race","educWealth","educRace"),sep = "_"))
####################
# CROSS VALIDATION #
####################
# Create a data frame of cross-validated out-of-fold predictions
# from each base learner
# Structure of CV loops:
# 2. Loop over replicates
# 1. Loop over learners within each replicate
# 3. Conduct 5-fold CV within each replicate
cv_yhat_out_of_fold <- foreach(replicate_index = 1:n_replicates, .combine = "rbind", .packages = c("tidyverse","mgcv","foreach")) %dorng% {
# Define folded data for cross-validation
folded <- prepared %>%
# Create folds within strata of categorical covariates
group_by(educJoint, wealthTercile, race) %>%
# Randomly sample folds 1:5
mutate(set = sample(rep(1:n_replicates, ceiling(n() / 5))[1:n()])) %>%
ungroup()
foreach(learner = names(learners), .combine = "rbind") %do% {
# Within this learner, loop over replicates
mutate_cv_fit(data = folded,
formula = learners[[learner]],
# If this fit interacts income with two other variables, increase the
# preference for smoothness with a higher gamma value
gamma_value = ifelse(grepl("educWealth|educRace",learner),3,1.5)) %>%
# Record the learner and the index of this replicate
mutate(learner = learner,
replicate_index = replicate_index)
}
}
saveRDS(cv_yhat_out_of_fold,
file = "intermediate/cv_yhat_out_of_fold.RDS")
##################
# LEARN ENSEMBLE #
##################
# This code learns a weighted average of the base learners
# which optimally predicts out-of-fold cases from cross-validation
# Prepare a dataset for learning the ensemble
data_for_ensemble <- cv_yhat_out_of_fold %>%
rename(y = enrolled) %>%
select(PUBID, educJoint, wealthTercile, replicate_index, y, learner, yhat) %>%
# Make the prediction from each base learner appear in its own column;
# in this ensembling stage, these columns are now the predictors
pivot_wider(names_from = "learner", values_from = "yhat") %>%
group_by(educJoint, wealthTercile)
# Create inputs for quadprog::solve.QP()
# Define the number of learners
p <- length(learners)
# Create a matrix for constraints: all greater than 0 and sum to 1
Amat <- cbind(rep(1,p),
diag(1,p))
# Create a model matrix for prediction
X <- as.matrix(
data_for_ensemble %>%
ungroup() %>%
select(starts_with("gam"),starts_with("glm"))
)
# Learn the optimal weighting of the base learners
QP.out <- quadprog::solve.QP(Dmat = t(X) %*% X,
dvec = t(data_for_ensemble$y) %*% X,
Amat = Amat,
bvec = c(1,rep(0,p)),
meq = 1)
# Create a data frame with the learned weight on each learner
ensemble_weights <- data.frame(learner = colnames(X),
weight = QP.out$solution)
saveRDS(ensemble_weights, file = "intermediate/ensemble_weights.RDS")
###########################################
# ESTIMATE EACH BASE LEARNER ON FULL DATA #
###########################################
base_learners_estimated <- foreach(learner_case = names(learners)) %do% {
fit_learner(data = prepared,
formula = learners[[learner_case]])
}
names(base_learners_estimated) <- names(learners)
###################################
# PREDICT THE DOSE RESPONSE CURVE #
###################################
# We will estimate dose response curve
# - x education x wealth
# - x education x race
# Define shorthand for each of those
grouping_names <- c("educWealth","educRace")
# For each grouping, estimate the dose-response curve with each base learner
dose_response_each_learner <- foreach(grouping_name = grouping_names, .combine = "rbind") %do% {
if (grouping_name == "educWealth") {
byVars <- c("educJoint","wealthTercile")
} else if (grouping_name == "educRace") {
byVars <- c("educJoint","race")
}
estimate_this_grouping <- foreach(learner = names(learners), .combine = "rbind") %do% {
# Create a data frame to predict
toPredict <- prepared %>%
# Determine the 5th and 95th percentiles that will bound the counterfactual income values considered
group_by(across(all_of(byVars))) %>%
mutate(p05 = weighted.quantile(income, q = .05, w = w),
p95 = weighted.quantile(income, q = .95, w = w)) %>%
# Nest to have a dataset within each group
nest() %>%
# Within each dataset, expand so that each row appears 20 times
# taking the sequence of counterfactual income values
mutate(data = map(data, function(data_case) {
a_seq <- seq(data_case$p05[1],data_case$p95[1],length.out = 20)
foreach(a_val = a_seq, .combine = "rbind") %do% {
data_case %>%
mutate(income = a_val)
}
})) %>%
# Unnest to create a more typical data frame
unnest(cols = "data")
# Extract model objects
beta_hat <- coef(base_learners_estimated[[learner]])
X <- predict(base_learners_estimated[[learner]], newdata = toPredict, type = "lpmatrix")
# Predict for point estimate
yhat <- plogis(X %*% beta_hat)
point <- toPredict %>%
ungroup() %>%
mutate(yhat = yhat) %>%
group_by(across(all_of(byVars)), income) %>%
summarize(estimate = weighted.mean(yhat, w = w),
.groups = "drop")
# Predict over simulated coefficients for estimation uncertainty
Sigma_hat <- vcov(base_learners_estimated[[learner]])
beta_star <- mvtnorm::rmvnorm(n_sims, beta_hat, Sigma_hat)
yhat_star <- plogis(X %*% t(beta_star))
colnames(yhat_star) <- paste0("yhat_star",1:ncol(yhat_star))
# Aggregate those to create a standard error
se <- toPredict %>%
ungroup() %>%
bind_cols(yhat_star) %>%
select(all_of(byVars), income, w, starts_with('yhat_star')) %>%
pivot_longer(cols = starts_with("yhat_star")) %>%
group_by(across(all_of(byVars)), income, name) %>%
summarize(estimate_star = weighted.mean(value, w = w),
.groups = "drop_last") %>%
summarize(se = sd(estimate_star), .groups = "drop")
# Return the point estimate and standard errors
point %>%
left_join(se, by = c(byVars,"income")) %>%
mutate(learner = learner)
}
# Create a nested tibble to return for clean output
clean_tibble <- estimate_this_grouping %>%
mutate(grouping_name = grouping_name) %>%
group_by(grouping_name, learner) %>%
nest()
return(clean_tibble)
}
saveRDS(dose_response_each_learner,
file = "intermediate/dose_response_each_learner.RDS")
dose_response_ensemble <- foreach(grouping_name_case = unique(dose_response_each_learner$grouping_name), .combine = "rbind") %do% {
# Restrict to the dose-response curves for this grouping
dose_response_each_learner %>%
filter(grouping_name == grouping_name_case) %>%
# Unnest to work with a data frame
unnest(cols = "data") %>%
# Merge in the ensemble weights
left_join(ensemble_weights, by = "learner") %>%
# Take weighted average with ensemble weights
group_by(grouping_name, across(any_of(c("educJoint","wealthTercile","race"))), income) %>%
summarize(estimate = weighted.mean(estimate, w = weight),
se = combine_se(se = se, w = weight),
.groups = "drop") %>%
# Prepare the tibble to return
group_by(grouping_name) %>%
nest()
}
saveRDS(dose_response_ensemble,
file = "intermediate/dose_response_ensemble.RDS")
##############################
# FIRST DIFFERENCE ESTIMATES #
##############################
# Create datasets to predict for the first-difference estimator
d0 <- prepared
d1 <- prepared %>% mutate(income = income + delta)
# For each learner, calculate the first difference estimates
first_difference_each_learner <- foreach(learner = names(learners), .combine = "rbind") %do% {
# Extract model objects
beta_hat <- coef(base_learners_estimated[[learner]])
X0 <- predict(base_learners_estimated[[learner]], newdata = d0, type = "lpmatrix")
X1 <- predict(base_learners_estimated[[learner]], newdata = d1, type = "lpmatrix")
# Predict the conditional effect estimate for each unit
effect <- plogis(X1 %*% beta_hat) - plogis(X0 %*% beta_hat)
# Predict over simulated estimation uncertainty
Sigma_hat <- vcov(base_learners_estimated[[learner]])
beta_star <- mvtnorm::rmvnorm(n_sims, beta_hat, Sigma_hat)
effect_star <- plogis(X1 %*% t(beta_star)) - plogis(X0 %*% t(beta_star))
colnames(effect_star) <- paste0("effect_star_",1:ncol(effect_star))
# Function to make aggregate first-difference estimate and standard error
# for a given vector of observation indices in the prepared data frame
make_aggregate <- function(indices) {
# Create the point estimate
estimate <- weighted.mean(effect[indices], w = prepared$w[indices])
# Create the simulated estimates
point_star <- apply(effect_star, 2, function(x) {
weighted.mean(x[indices], w = prepared$w[indices])
})
# Calculate the standard error over simulations
se <- sd(point_star)
return(data.frame(estimate = estimate,
se = se))
}
# Note the unique values of each predictor within which we will aggregate
race_vals <- unique(prepared$race)
educ_vals <- unique(prepared$educJoint)
wealth_vals <- unique(prepared$wealthTercile)
# Create aggregated results
first_difference_estimates <- list(
individual = prepared %>%
select(PUBID) %>%
mutate(estimate = effect[,1],
se = apply(effect_star,1,sd)),
overall = make_aggregate(1:nrow(prepared)),
byRace = foreach(x = race_vals, .combine = "rbind") %do% make_aggregate(which(prepared$race == x)) %>% mutate(race = race_vals),
byEduc = foreach(x = educ_vals, .combine = "rbind") %do% make_aggregate(which(prepared$educJoint == x)) %>% mutate(educJoint = educ_vals),
byWealth = foreach(x = wealth_vals, .combine = "rbind") %do% make_aggregate(which(prepared$wealthTercile == x)) %>% mutate(wealthTercile = wealth_vals),
byEducRace = foreach(x = educ_vals, .combine = "rbind") %do% {
foreach(y = race_vals, .combine = "rbind") %do% {
make_aggregate(which(prepared$educJoint == x & prepared$race == y)) %>% mutate(educJoint = x, race = y)
}
},
byEducWealth = foreach(x = educ_vals, .combine = "rbind") %do% {
foreach(y = wealth_vals, .combine = "rbind") %do% {
make_aggregate(which(prepared$educJoint == x & prepared$wealthTercile == y)) %>% mutate(educJoint = x, wealthTercile = y)
}
},
byEducWealthRace = foreach(x = educ_vals, .combine = "rbind") %do% {
foreach(y = wealth_vals, .combine = "rbind") %do% {
foreach(z = race_vals, .combine = "rbind") %do% {
make_aggregate(which(prepared$educJoint == x & prepared$wealthTercile == y & prepared$race == z)) %>% mutate(educJoint = x, wealthTercile = y, race = z)
}
}
}
)
return(tibble(learner = learner,
quantity = names(first_difference_estimates),
estimate = first_difference_estimates))
}
saveRDS(first_difference_each_learner,
file = "intermediate/first_difference_each_learner.RDS")
# Create ensemble first difference estimates
first_difference_ensemble <- first_difference_each_learner %>%
left_join(ensemble_weights, by = "learner") %>%
group_by(quantity) %>%
nest() %>%
# Work with data within each target quantity
mutate(data = map(data, function(x) {
x %>%
# Un-nest learner-specific estimates
unnest(cols = "estimate") %>%
# Weighted average by ensemble weights
group_by(across(-all_of(c("learner","estimate","se","weight")))) %>%
summarize(estimate = weighted.mean(estimate, w = weight),
se = combine_se(se = se, w = weight),
.groups = "drop")
}))
saveRDS(first_difference_ensemble,
file = "intermediate/first_difference_ensemble.RDS")
print(Sys.time())
print(sessionInfo())
sink()
|
###########################################################
### Author:
### Adapted from Code written by
### Date: 12/9/2016
### Project: GBD Nonfatal Estimation
### Purpose: CSMR and Prevalence by location and sex- Alzheimer's Disease
###########################################################
#Setup
rm(list=ls())
if (Sys.info()["sysname"] == "Linux") {
j.root <- "/home/j/"
h.root <- "/homes/USERNAME/"
} else {
j.root <- "J:"
h.root <- "H:"
}
#load packages, install if missing
require(data.table)
require(RMySQL)
##connection string for epi database
con <- dbConnect(MySQL(), user= USERNAME, password= PASSWORD,
host = HOSTNAME, dbname='cod')
##set directories
central.function <- FILEPATH
output.dir <- FILEPATH
#get central functions
source(paste0(j.root, central.function, "get_location_metadata.R"))
source(paste0(j.root, central.function, "get_model_results.R"))
source(paste0(j.root, central.function, "get_population.R"))
source(paste0(j.root, FILEPATH, ".r"))
##Get gbd standard age-weights
age_weights <- as.data.table(get_age_weights())
age_weights <- age_weights[gbd_round_id==4 & age_group_weight_description=="IHME standard age weight",]
age_weights <- age_weights[age_group_id %in% c(13:20, 30:32, 235),]
age_weights <- age_weights[, sum := sum(age_group_weight_value)]
age_weights <- age_weights[, new_weight := age_group_weight_value/sum]
##pull locations
locations <- as.data.table(get_location_metadata(location_set_id=35))
locations <- locations[,c("location_id", "ihme_loc_id", "location_name", "is_estimate", "level"), with=F] ##keep what we need
##pull prevalence
prevalence.f <- as.data.table(get_model_results(gbd_team = "epi", model_version_id = 95694,
measure_id = 5, year_id = 2016, sex_id = 2, age_group_id = 27))
prevalence.m <- as.data.table(get_model_results(gbd_team = "epi", model_version_id = 95694,
measure_id = 5, year_id = 2016, sex_id = 1, age_group_id = 27))
setnames(prevalence.f, "mean", "as_prev")
setnames(prevalence.m,"mean","as_prev")
prevalence.m <- prevalence.m[,.(age_group_id, sex_id, location_id, prev)]
prevalence.f <- prevalence.f[,.(age_group_id, sex_id, location_id, prev)]
##pull csmr
csmr.m <- as.data.table(get_model_results(gbd_team = "cod", model_version_id = 92756,
age_group_id = c(2:20, 30:32, 235), year_id = 2016))
csmr.f <- as.data.table(get_model_results(gbd_team = "cod", model_version_id = 92753,
age_group_id = c(2:20, 30:32, 235), year_id = 2016))
csmr.m[,deaths := mean_cf * mean_env] ##generate number of deaths
csmr.f[,deaths := mean_cf * mean_env] ##generate number of deaths
csmr.m <- csmr.m[,.(deaths, population, age_group_id, sex_id, location_id)]
csmr.f <- csmr.f[,.(deaths, population, age_group_id, sex_id, location_id)]
##for both csmr sets
all_data <- list(csmr.f = csmr.f, csmr.m = csmr.m)
for (x in names(all_data)){
##age standardized csmr
data <- merge(all_data[[x]], age_weights, by = c("age_group_id"))
data[,as_csmr := deaths*new_weight/population] ##age standardize
data[,as_csmr := sum(as_csmr), by = "location_id"] ##sum over age
data <- unique(data, by= "location_id") ##only unique
assign(x, data)
}
male <- merge(csmr.m, prevalence.m, by = c("location_id"))
female <- merge(csmr.f, prevalence.f, by= c("location_id"))
##for each sex total manipulations
all_data <- list(male = male, female = female)
for (x in names(all_data)){
data <- all_data[[x]]
data[,ratio := as_csmr/as_prev] ##generate ratio
data <- merge(data, locations, by= "location_id") ## merge to get location names
data <- data[level==3,] ##only countries
data <- data[,.(location_id, location_name, ihme_loc_id, as_csmr, as_prev, ratio)]
assign(x, data)
}
write.csv(male, paste0(j.root, FILEPATH, ".csv"))
write.csv(female, paste0(j.root, FILEPATH, ".csv"))
|
/cod_code/neuro_dementia/dementia_country_selection_clean.R
|
no_license
|
Nermin-Ghith/ihme-modeling
|
R
| false
| false
| 4,071
|
r
|
###########################################################
### Author:
### Adapted from Code written by
### Date: 12/9/2016
### Project: GBD Nonfatal Estimation
### Purpose: CSMR and Prevalence by location and sex- Alzheimer's Disease
###########################################################
#Setup
rm(list=ls())
if (Sys.info()["sysname"] == "Linux") {
j.root <- "/home/j/"
h.root <- "/homes/USERNAME/"
} else {
j.root <- "J:"
h.root <- "H:"
}
#load packages, install if missing
require(data.table)
require(RMySQL)
##connection string for epi database
con <- dbConnect(MySQL(), user= USERNAME, password= PASSWORD,
host = HOSTNAME, dbname='cod')
##set directories
central.function <- FILEPATH
output.dir <- FILEPATH
#get central functions
source(paste0(j.root, central.function, "get_location_metadata.R"))
source(paste0(j.root, central.function, "get_model_results.R"))
source(paste0(j.root, central.function, "get_population.R"))
source(paste0(j.root, FILEPATH, ".r"))
##Get gbd standard age-weights
age_weights <- as.data.table(get_age_weights())
age_weights <- age_weights[gbd_round_id==4 & age_group_weight_description=="IHME standard age weight",]
age_weights <- age_weights[age_group_id %in% c(13:20, 30:32, 235),]
age_weights <- age_weights[, sum := sum(age_group_weight_value)]
age_weights <- age_weights[, new_weight := age_group_weight_value/sum]
##pull locations
locations <- as.data.table(get_location_metadata(location_set_id=35))
locations <- locations[,c("location_id", "ihme_loc_id", "location_name", "is_estimate", "level"), with=F] ##keep what we need
##pull prevalence
prevalence.f <- as.data.table(get_model_results(gbd_team = "epi", model_version_id = 95694,
measure_id = 5, year_id = 2016, sex_id = 2, age_group_id = 27))
prevalence.m <- as.data.table(get_model_results(gbd_team = "epi", model_version_id = 95694,
measure_id = 5, year_id = 2016, sex_id = 1, age_group_id = 27))
setnames(prevalence.f, "mean", "as_prev")
setnames(prevalence.m,"mean","as_prev")
prevalence.m <- prevalence.m[,.(age_group_id, sex_id, location_id, prev)]
prevalence.f <- prevalence.f[,.(age_group_id, sex_id, location_id, prev)]
##pull csmr
csmr.m <- as.data.table(get_model_results(gbd_team = "cod", model_version_id = 92756,
age_group_id = c(2:20, 30:32, 235), year_id = 2016))
csmr.f <- as.data.table(get_model_results(gbd_team = "cod", model_version_id = 92753,
age_group_id = c(2:20, 30:32, 235), year_id = 2016))
csmr.m[,deaths := mean_cf * mean_env] ##generate number of deaths
csmr.f[,deaths := mean_cf * mean_env] ##generate number of deaths
csmr.m <- csmr.m[,.(deaths, population, age_group_id, sex_id, location_id)]
csmr.f <- csmr.f[,.(deaths, population, age_group_id, sex_id, location_id)]
##for both csmr sets
all_data <- list(csmr.f = csmr.f, csmr.m = csmr.m)
for (x in names(all_data)){
##age standardized csmr
data <- merge(all_data[[x]], age_weights, by = c("age_group_id"))
data[,as_csmr := deaths*new_weight/population] ##age standardize
data[,as_csmr := sum(as_csmr), by = "location_id"] ##sum over age
data <- unique(data, by= "location_id") ##only unique
assign(x, data)
}
male <- merge(csmr.m, prevalence.m, by = c("location_id"))
female <- merge(csmr.f, prevalence.f, by= c("location_id"))
##for each sex total manipulations
all_data <- list(male = male, female = female)
for (x in names(all_data)){
data <- all_data[[x]]
data[,ratio := as_csmr/as_prev] ##generate ratio
data <- merge(data, locations, by= "location_id") ## merge to get location names
data <- data[level==3,] ##only countries
data <- data[,.(location_id, location_name, ihme_loc_id, as_csmr, as_prev, ratio)]
assign(x, data)
}
write.csv(male, paste0(j.root, FILEPATH, ".csv"))
write.csv(female, paste0(j.root, FILEPATH, ".csv"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runSimulation.R
\name{runSimulation}
\alias{runSimulation}
\alias{summary.SimDesign}
\alias{print.SimDesign}
\title{Run a Monte Carlo simulation given a data.frame of conditions and simulation functions}
\usage{
runSimulation(
design,
replications,
generate,
analyse,
summarise,
fixed_objects = NULL,
packages = NULL,
filename = NULL,
debug = "none",
load_seed = NULL,
save_results = FALSE,
parallel = FALSE,
ncores = parallel::detectCores(),
cl = NULL,
notification = NULL,
boot_method = "none",
boot_draws = 1000L,
CI = 0.95,
seed = rint(nrow(design), min = 1L, max = 2147483647L),
save_seeds = FALSE,
save = TRUE,
store_results = FALSE,
warnings_as_errors = FALSE,
max_errors = 50L,
allow_na = FALSE,
allow_nan = FALSE,
stop_on_fatal = FALSE,
MPI = FALSE,
save_details = list(),
progress = TRUE,
verbose = TRUE
)
\method{summary}{SimDesign}(object, ...)
\method{print}{SimDesign}(x, list2char = TRUE, ...)
}
\arguments{
\item{design}{a \code{tibble} or \code{data.frame} object containing the Monte Carlo simulation
conditions to be studied, where each row represents a unique condition and each column a factor
to be varied. See \code{\link{createDesign}} for the standard approach
to create this simulation design object}
\item{replications}{number of independent replications to perform per condition (i.e., each row in \code{design}).
Must be greater than 0}
\item{generate}{user-defined data and parameter generating function.
See \code{\link{Generate}} for details. Note that this argument may be omitted by the
user if they wish to generate the data with the \code{analyse} step, but for real-world
simulations this is generally not recommended}
\item{analyse}{user-defined analysis function that acts on the data generated from
\code{\link{Generate}} (or, if \code{generate} was omitted, can be used to generate and
analyses the simulated data). See \code{\link{Analyse}} for details}
\item{summarise}{optional (but highly recommended) user-defined summary function from \code{\link{Summarise}}
to be used to compute meta-statistical summary information after all the replications have completed within
each \code{design} condition.
Omitting this function will return a list of \code{data.frame}s (or a single \code{data.frame}, if only one row in
\code{design} is supplied) or, for more general objects (such as \code{list}s), a \code{list}
containing the results returned form \code{\link{Analyse}}.
Alternatively, the value \code{NA} can be passed to let the generate-analyse-summarise process to run as usual,
where the summarise components are instead included only as a placeholder.
Omitting this input is only recommended for didactic purposes because it leaves out a large amount of
information (e.g., try-errors, warning messages, saving files, etc), can witness memory related issues,
and generally is not as flexible internally.
If users do not wish to supply a summarise function then it
is is recommended to pass \code{NA} to this argument while also supplying passing \code{save_results = TRUE} to
save the results to the hard-drive during the simulation. This provides a
more RAM friendly alternative to storing all the Generate-Analyse results in the working environment, where
the Analysis results can be summarised at a later time}
\item{fixed_objects}{(optional) an object (usually a named \code{list})
containing additional user-defined objects
that should remain fixed across conditions. This is useful when including
large vectors/matrices of population parameters, fixed data information
that should be used across all conditions and replications (e.g., including a common design matrix
for linear regression models), or simply control constant global elements (e.g., a constant for sample size)}
\item{packages}{a character vector of external packages to be used during the simulation (e.g.,
\code{c('MASS', 'extraDistr', 'simsem')} ). Use this input when \code{parallel = TRUE} or
\code{MPI = TRUE} to use non-standard functions from additional packages,
otherwise the functions must be made available by using explicit
\code{\link{library}} or \code{\link{require}} calls within the provided simulation functions.
Alternatively, functions can be called explicitly without attaching the package with the \code{::} operator
(e.g., \code{extraDistr::rgumbel()})}
\item{filename}{(optional) the name of the \code{.rds} file to save the final simulation results to. If the extension
\code{.rds} is not included in the file name (e.g. \code{"mysimulation"} versus \code{"mysimulation.rds"}) then the
\code{.rds} extension will be automatically added to the file name to ensure the file extension is correct.
Note that if the same file name already exists in the working
directly at the time of saving then a new
file will be generated instead and a warning will be thrown. This helps to avoid accidentally overwriting
existing files. Default is \code{NULL}, indicating no file will be saved by default}
\item{debug}{a string indicating where to initiate a \code{browser()} call for editing and debugging, and pairs
particularly well with the \code{load_seed} argument for precice debugging.
General options are \code{'none'} (default; no debugging), \code{'error'}, which starts the debugger
when any error in the code is detected in one of three generate-analyse-summarise functions,
and \code{'all'}, which debugs all the user defined functions regardless of whether an error was thrown
or not. Specific options include: \code{'generate'}
to debug the data simulation function, \code{'analyse'} to debug the computational function, and
\code{'summarise'} to debug the aggregation function.
Alternatively, users may place \code{\link{browser}} calls within the respective functions for
debugging at specific lines, which is useful when debugging based on conditional evaluations (e.g.,
\code{if(this == 'problem') browser()}). Note that parallel computation flags
will automatically be disabled when a \code{browser()} is detected or when a debugging argument other than
\code{'none'} is supplied}
\item{load_seed}{used to replicate an exact simulation state, which is primarily useful for debugging purposes.
Input can be a character object indicating which file to load from when the \code{.Random.seed}s have
be saved (after a call with \code{save_seeds = TRUE}), or an integer vector indicating the actual
\code{.Random.seed} values. E.g., \code{load_seed = 'design-row-2/seed-1'}
will load the first seed in the second row of the \code{design} input, or explicitly passing the
elements from \code{.Random.seed} (see \code{\link{SimExtract}} to extract the seeds associated explicitly
with errors during the simulation, where each column represents a unique seed).
If the input is a character vector then it is important NOT
to modify the \code{design} input object, otherwise the path may not point to the correct saved location, while
if the input is an integer vector (or single column \code{tbl} object)
then it WILL be important to modify the \code{design} input in order to load this
exact seed for the corresponding design row. Default is \code{NULL}}
\item{save_results}{logical; save the results returned from \code{\link{Analyse}} to external
\code{.rds} files located in the defined \code{save_results_dirname} directory/folder?
Use this if you would like to keep track of the individual parameters returned from the \code{analysis} function.
Each saved object will contain a list of three elements containing the condition (row from \code{design}),
results (as a \code{list} or \code{matrix}), and try-errors.
See \code{\link{SimResults}} for an example of how to read these \code{.rds} files back into R
after the simulation is complete. Default is \code{FALSE}.
WARNING: saving results to your hard-drive can fill up space very quickly for larger simulations. Be sure to
test this option using a smaller number of replications before the full Monte Carlo simulation is performed.
See also \code{\link{reSummarise}} for applying summarise functions from saved
simulation results}
\item{parallel}{logical; use parallel processing from the \code{parallel} package over each
unique condition?}
\item{ncores}{number of cores to be used in parallel execution. Default uses all available}
\item{cl}{cluster object defined by \code{\link{makeCluster}} used to run code in parallel.
If \code{NULL} and \code{parallel = TRUE}, a local cluster object will be defined which
selects the maximum number cores available
and will be stopped when the simulation is complete. Note that supplying a \code{cl}
object will automatically set the \code{parallel} argument to \code{TRUE}}
\item{notification}{an optional, empty argument function to be executed upon completion of the simulation. This can be used, for
instance, to trigger email or SMS notifications that indicate the simulation has been completed. For example,
to utilize the \code{RPushbullet} package (and assuming users have previously a) registered for a Pushbullet account,
and b) installed the application on their mobile device and computer), use the following:
\describe{
\item{Prior Setup}{Prior to defining \code{notification}, load the \code{RPushbullet} library via \code{library(RPushbullet)}. If
this is the first time you have used the package then a suitable \code{rpushbullet.json} file will not exist on your computer,
and you'll need to supply a suitable token and the devise to push the notification to via the \code{pbSetup()} setup}
\item{Execution}{Supply a definition of \code{notification} that utilizes the \code{pbPost} function. E.g.,
\code{runSimulation(...,
notification = function() pbPost(type = "note", title = "SimDesign", body = "Simulation Complete"))}}
}
Alternatively, if users wish to have an email sent upon completion then the following template that uses the \code{sendmailR}
package could be used:
\describe{
\item{Using \code{sendmailR}}{
\code{runSimulation(...,
notification = function() sendmailR::sendmail(from="<sendmailR@your.computer>",
to="<your.email@address>", subject="SimDesign", msg="Simulation Complete",
control=list(smtpServer="ASPMX.L.GOOGLE.COM")))}.
}
}
However, note that this may be less reliable since the email message could be directed to a spam folder.}
\item{boot_method}{method for performing non-parametric bootstrap confidence intervals
for the respective meta-statistics computed by the \code{Summarise} function.
Can be \code{'basic'} for the empirical bootstrap CI, \code{'percentile'}
for percentile CIs, \code{'norm'} for normal approximations CIs, or \code{'studentized'}
for Studentized CIs (should only be used for simulations with lower replications due to its
computational intensity). Default is \code{'none'}, which performs no bootstrapping}
\item{boot_draws}{number of non-parametric bootstrap draws to sample for the \code{summarise}
function after the generate-analyse replications are collected. Default is 1000}
\item{CI}{bootstrap confidence interval level (default is 95\%)}
\item{seed}{a vector of integers to be used for reproducibility.
The length of the vector must be equal the number of rows in \code{design}.
This argument calls \code{\link{set.seed}} or
\code{\link{clusterSetRNGStream}} for each condition, respectively,
but will not be run when \code{MPI = TRUE}.
Default randomly generates seeds within the range 1 to 2147483647 for each condition.}
\item{save_seeds}{logical; save the \code{.Random.seed} states prior to performing each replication into
plain text files located in the defined \code{save_seeds_dirname} directory/folder?
Use this if you would like to keep track of every simulation state within each replication and design
condition. This can be used to completely replicate any cell in the simulation if need be.
As well, see the \code{load_seed} input
to load a given \code{.Random.seed} to exactly replicate the generated data and analysis state (mostly useful
for debugging). When \code{TRUE}, temporary files will also be saved
to the working directory (in the same way as when \code{save = TRUE}). Default is \code{FALSE}
Note, however, that this option is not typically necessary or recommended since the \code{.Random.seed} states for simulation
replications that throw errors during the execution are automatically stored within the final simulation
object, and can be extracted and investigated using \code{\link{SimExtract}}. Hence, this option is only of
interest when \emph{all} of the replications must be reproducible (which occurs very rarely),
otherwise the defaults to \code{runSimulation} should be sufficient}
\item{save}{logical; save the temporary simulation state to the hard-drive? This is useful
for simulations which require an extended amount of time, though for shorter simulations
can be disabled to slightly improve computational efficiency. When \code{TRUE}, a temp file
will be created in the working directory which allows the simulation state to be saved
and recovered (in case of power outages, crashes, etc). As well, triggering this flag will
save any fatal \code{.Random.seed} states when conditions unexpectedly crash (where each seed
is stored row-wise in an external .rds file), which provides a much easier mechanism
to debug issues (see \code{load_seed} for details). Upon completion, this temp file will be removed.
To recover your simulation at the last known location (having patched the issues in the previous execution code)
simply re-run the code you used to
initially define the simulation and the external file will automatically be detected and read-in.
Default is \code{TRUE}}
\item{store_results}{logical; store the complete tables of simulation results
in the returned object? This is \code{FALSE} by default to help avoid RAM
issues (see \code{save_results} as a more suitable alternative). To extract these results
pass the returned object to \code{SimExtract(..., what = 'results')}, which will return a named list
of all the simulation results for each condition}
\item{warnings_as_errors}{logical; treat warning messages as error messages during the simulation? Default is FALSE,
therefore warnings are only collected and not used to restart the data generation step, and the seeds associated with
the warning message conditions are not stored within the final simulation object}
\item{max_errors}{the simulation will terminate when more than this number of consecutive errors are thrown in any
given condition, causing the simulation to continue to the next unique \code{design} condition.
This is included to avoid getting stuck in infinite re-draws, and to indicate that something fatally problematic
is going wrong in the generate-analyse phases. Default is 50}
\item{allow_na}{logical; should \code{NA}s be allowed in the analyse step as a valid result from the simulation
analysis? Default is FALSE}
\item{allow_nan}{logical; should \code{NaN}s be allowed in the analyse step as a valid result from the simulation
analysis? Default is FALSE}
\item{stop_on_fatal}{logical; should the simulation be terminated immediately when
the maximum number of consecutive errors (\code{max_errors}) is reached? If \code{FALSE},
the simulation will continue as though errors did not occur, however a column
\code{FATAL_TERMINATION} will be included in the resulting object indicating the final
error message observed, and \code{NA} placeholders will be placed in all other row-elements. Default is
\code{FALSE}}
\item{MPI}{logical; use the \code{foreach} package in a form usable by MPI to run simulation
in parallel on a cluster? Default is \code{FALSE}}
\item{save_details}{a list pertaining to information regarding how and where files should be saved
when the \code{save} or \code{save_results} flags are triggered.
\describe{
\item{\code{safe}}{logical; trigger whether safe-saving should be performed. When \code{TRUE} files
will never be overwritten accidentally, and where appropriate the program will either stop or generate
new files with unique names. Default is \code{TRUE}}
\item{\code{compname}}{name of the computer running the simulation. Normally this doesn't need
to be modified, but in the event that a manual node breaks down while running a simulation the
results from the temp files may be resumed on another computer by changing the name of the
node to match the broken computer. Default is the result of evaluating \code{unname(Sys.info()['nodename'])}}
\item{\code{out_rootdir}}{root directory to save all files to. Default uses the
current working directory}
\item{\code{save_results_dirname}}{a string indicating the name of the folder to save
result objects to when \code{save_results = TRUE}. If a directory/folder does not exist
in the current working directory then a unique one will be created automatically. Default is
\code{'SimDesign-results_'} with the associated \code{compname} appended}
\item{\code{save_seeds_dirname}}{a string indicating the name of the folder to save
\code{.Random.seed} objects to when \code{save_seeds = TRUE}. If a directory/folder does not exist
in the current working directory then one will be created automatically. Default is
\code{'SimDesign-seeds_'} with the associated \code{compname} appended}
}}
\item{progress}{logical; display a progress bar (using the \code{pbapply} package) for each simulation condition?
This is useful when simulations conditions take a long time to run (see also the \code{notifications} argument).
Default is \code{TRUE}}
\item{verbose}{logical; print messages to the R console? Default is \code{TRUE}}
\item{object}{SimDesign object returned from \code{\link{runSimulation}}}
\item{...}{additional arguments}
\item{x}{SimDesign object returned from \code{\link{runSimulation}}}
\item{list2char}{logical; for \code{tibble} object re-evaluate list elements
as character vectors for better printing of the levels? Note that this
does not change the original classes of the object, just how they are printed.
Default is TRUE}
}
\value{
a \code{tibble} from the \code{dplyr} package (also of class \code{'SimDesign'})
with the original \code{design} conditions in the left-most columns,
simulation results in the middle columns, and additional information in the right-most columns (see below).
The right-most column information for each condition are:
\code{REPLICATIONS} to indicate the number of Monte Carlo replications,
\code{SIM_TIME} to indicate how long (in seconds) it took to complete
all the Monte Carlo replications for each respective design condition,
\code{COMPLETED} to indicate the date in which the given simulation condition completed,
\code{SEED} for the integer values in the \code{seed} argument, and, if applicable,
\code{ERRORS} and \code{WARNINGS} which contain counts for the number of error or warning
messages that were caught (if no errors/warnings were observed these columns will be omitted).
Note that to extract the specific error and warnings messages see
\code{\link{SimExtract}}. Finally,
if \code{boot_method} was a valid input other than 'none' then the final right-most columns will contain the labels
\code{BOOT_} followed by the name of the associated meta-statistic defined in \code{summarise()} and
and bootstrapped confidence interval location for the meta-statistics.
}
\description{
This function runs a Monte Carlo simulation study given a set of predefined simulation functions,
design conditions, and number of replications. Results can be saved as temporary files in case of interruptions
and may be restored by re-running \code{runSimulation}, provided that the respective temp
file can be found in the working directory. \code{runSimulation} supports parallel
and cluster computing, global and local debugging, error handling (including fail-safe
stopping when functions fail too often, even across nodes), provides bootstrap estimates of the
sampling variability (optional), and automatic tracking of error and warning messages and their associated
\code{.Random.seed} states.
For convenience, all functions available in the R work-space are exported across all computational nodes
so that they are more easily accessible (however, other R objects are not, and therefore
must be passed to the \code{fixed_objects} input to become available across nodes).
For an in-depth tutorial of the package please refer to Chalmers and Adkins (2020;
\doi{10.20982/tqmp.16.4.p248}).
For an earlier didactic presentation of the package refer to Sigal and Chalmers
(2016; \doi{10.1080/10691898.2016.1246953}). Finally, see the associated
wiki on Github (\url{https://github.com/philchalmers/SimDesign/wiki})
for tutorial material, examples, and applications of \code{SimDesign} to real-world simulation experiments.
}
\details{
The strategy for organizing the Monte Carlo simulation work-flow is to
\describe{
\item{1)}{Define a suitable \code{Design} object (a \code{tibble} or \code{data.frame})
containing fixed conditional
information about the Monte Carlo simulations. Each row or this \code{design} object pertains
to a unique set of simulation to study, while each column the simulation factor under investigation (e.g., sample size,
distribution types, etc). This is often expedited by using the
\code{\link{createDesign}} function, and if necessary the argument \code{subset}
can be used to remove redundant or non-applicable rows}
\item{2)}{Define the three step functions to generate the data (\code{\link{Generate}}; see also
\url{https://CRAN.R-project.org/view=Distributions} for a list of distributions in R),
analyse the generated data by computing the respective parameter estimates, detection rates,
etc (\code{\link{Analyse}}), and finally summarise the results across the total
number of replications (\code{\link{Summarise}}).
}
\item{3)}{Pass the \code{design} object and three defined R functions to \code{runSimulation}, and declare the
number of replications to perform with the \code{replications} input. This function will return a suitable
\code{tibble} object with the complete simulation results and execution details}
\item{4)}{Analyze the output from \code{runSimulation}, possibly using ANOVA techniques
(\code{\link{SimAnova}}) and generating suitable plots and tables}
}
Expressing the above more succinctly, the functions to be called have the following form,
with the exact functional arguments listed:
\describe{
\item{\code{Design <- createDesign(...)}}{}
\item{\code{Generate <- function(condition, fixed_objects = NULL) \{...\} }}{}
\item{\code{Analyse <- function(condition, dat, fixed_objects = NULL) \{...\} }}{}
\item{\code{Summarise <- function(condition, results, fixed_objects = NULL) \{...\} }}{}
\item{\code{res <- runSimulation(design=Design, replications, generate=Generate,
analyse=Analyse, summarise=Summarise)}}{}
}
The \code{condition} object above represents a single row from the \code{design} object, indicating
a unique Monte Carlo simulation condition. The \code{condition} object also contains two
additional elements to help track the simulation's state: an \code{ID} variable, indicating
the respective row number in the \code{design} object, and a \code{REPLICATION} element
indicating the replication iteration number (an integer value between 1 and \code{replication}).
This setup allows users to easily locate the \code{r}th replication (e.g., \code{REPLICATION == 500})
within the \code{j}th row in the simulation design (e.g., \code{ID == 2}). The
\code{REPLICATION} input is also useful when temporarily saving files to the hard-drive
when calling external command line utilities (see examples on the wiki).
For a template-based version of the work-flow, which is often useful when initially defining a simulation,
use the \code{\link{SimFunctions}} function. This function will write a template simulation
to one/two files so that modifying the required functions and objects can begin immediately.
This means that users can focus on their Monte Carlo simulation details right away rather
than worrying about the repetitive administrative code-work required to organize the simulation's execution flow.
Finally, examples, presentation files, and tutorials can be found on the package wiki located at
\url{https://github.com/philchalmers/SimDesign/wiki}.
}
\section{Saving data, results, seeds, and the simulation state}{
To conserve RAM, temporary objects (such as data generated across conditions and replications)
are discarded; however, these can be saved to the hard-disk by passing the appropriate flags.
For longer simulations it is recommended to use the \code{save_results} flag to write the analysis results
to the hard-drive.
The use of the \code{save_seeds} option can be evoked to save R's \code{.Random.seed} state to allow
for complete reproducibility of each replication within each condition. These
individual \code{.Random.seed} terms can then be read in with the
\code{load_seed} input to reproduce the exact simulation state at any given replication. Most often though,
\code{save_seeds} is less useful since problematic seeds are automatically stored in the final
simulation object to allow for easier replicability of potentially problematic errors (which incidentally
can be extracted using \code{SimExtract(res, 'error_seeds')} and passed to the \code{load_seed} argument). Finally,
providing a vector of \code{seeds} is also possible to ensure
that each simulation condition is macro reproducible under the single/multi-core method selected.
Finally, when the Monte Carlo simulation is complete
it is recommended to write the results to a hard-drive for safe keeping, particularly with the
\code{filename} argument provided (for reasons that are more obvious in the parallel computation
descriptions below). Using the \code{filename} argument supplied is safer than using, for instance,
\code{\link{saveRDS}} directly because files will never accidentally be overwritten,
and instead a new file name will be created when a conflict arises; this type of implementation safety
is prevalent in many locations in the package to help avoid unrecoverable (yet surprisingly common) mistakes
during the process of designing and executing Monte Carlo simulations.
}
\section{Resuming temporary results}{
In the event of a computer crash, power outage, etc, if \code{save = TRUE} was used (the default)
then the original code used to execute \code{runSimulation()} need only be re-run to resume the simulation.
The saved temp file will be read into the function automatically, and the simulation will continue
one the condition where it left off before the simulation state was terminated. If users wish to remove this temporary
simulation state entirely so as to start anew then simply pass \code{SimClean(temp = TRUE)} in the R console to remove any
previously saved temporary objects.
}
\section{A note on parallel computing}{
When running simulations in parallel (either with \code{parallel = TRUE} or \code{MPI = TRUE})
R objects defined in the global environment will generally \emph{not} be visible across nodes.
Hence, you may see errors such as \code{Error: object 'something' not found} if you try to use an object
that is defined in the workspace but is not passed to \code{runSimulation}.
To avoid this type or error, simply pass additional objects to the
\code{fixed_objects} input (usually it's convenient to supply a named list of these objects).
Fortunately, however, \emph{custom functions defined in the global environment are exported across
nodes automatically}. This makes it convenient when writing code because custom functions will
always be available across nodes if they are visible in the R workspace. As well, note the
\code{packages} input to declare packages which must be loaded via \code{library()} in order to make
specific non-standard R functions available across nodes.
}
\examples{
#-------------------------------------------------------------------------------
# Example 1: Sampling distribution of mean
# This example demonstrate some of the simpler uses of SimDesign,
# particularly for classroom settings. The only factor varied in this simulation
# is sample size.
# skeleton functions to be saved and edited
SimFunctions()
#### Step 1 --- Define your conditions under study and create design data.frame
Design <- createDesign(N = c(10, 20, 30))
#~~~~~~~~~~~~~~~~~~~~~~~~
#### Step 2 --- Define generate, analyse, and summarise functions
# help(Generate)
Generate <- function(condition, fixed_objects = NULL) {
dat <- with(condition, rnorm(N, 10, 5)) # distributed N(10, 5)
dat
}
# help(Analyse)
Analyse <- function(condition, dat, fixed_objects = NULL) {
ret <- mean(dat) # mean of the sample data vector
ret
}
# help(Summarise)
Summarise <- function(condition, results, fixed_objects = NULL) {
ret <- c(mu=mean(results), SE=sd(results)) # mean and SD summary of the sample means
ret
}
#~~~~~~~~~~~~~~~~~~~~~~~~
#### Step 3 --- Collect results by looping over the rows in design
# run the simulation
Final <- runSimulation(design=Design, replications=1000,
generate=Generate, analyse=Analyse, summarise=Summarise)
Final
# reproduce exact simulation
Final_rep <- runSimulation(design=Design, replications=1000, seed=Final$SEED,
generate=Generate, analyse=Analyse, summarise=Summarise)
Final_rep
#~~~~~~~~~~~~~~~~~~~~~~~~
#### Extras
\dontrun{
# compare SEs estimates to the true SEs from the formula sigma/sqrt(N)
5 / sqrt(Design$N)
# To store the results from the analyse function either
# a) omit a definition of of summarise(), or
# b) pass save_results = TRUE to runSimulation() and read the results in with SimResults()
# Note that the latter method should be adopted for longer simulations
# e.g., the a) approach
res <- runSimulation(design=Design, replications=1000,
generate=Generate, analyse=Analyse)
str(res)
head(res[[1]])
# or b) approach
Final <- runSimulation(design=Design, replications=1000, save_results=TRUE,
generate=Generate, analyse=Analyse, summarise=Summarise)
res <- SimResults(Final)
str(res)
head(res[[1]]$results)
# obtain empirical bootstrapped CIs during an initial run
# the simulation was completed (necessarily requires save_results = TRUE)
res <- runSimulation(design=Design, replications=1000, boot_method = 'basic',
generate=Generate, analyse=Analyse, summarise=Summarise)
res
# alternative bootstrapped CIs that uses saved results via reSummarise().
# Default directory save to:
dirname <- paste0('SimDesign-results_', unname(Sys.info()['nodename']), "/")
res <- reSummarise(summarise=Summarise, dir=dirname, boot_method = 'basic')
res
# remove the saved results from the hard-drive if you no longer want them
SimClean(results = TRUE)
}
#-------------------------------------------------------------------------------
# Example 2: t-test and Welch test when varying sample size, group sizes, and SDs
# skeleton functions to be saved and edited
SimFunctions()
\dontrun{
# in real-world simulations it's often better/easier to save
# these functions directly to your hard-drive with
SimFunctions('my-simulation')
}
#### Step 1 --- Define your conditions under study and create design data.frame
Design <- createDesign(sample_size = c(30, 60, 90, 120),
group_size_ratio = c(1, 4, 8),
standard_deviation_ratio = c(.5, 1, 2))
Design
#~~~~~~~~~~~~~~~~~~~~~~~~
#### Step 2 --- Define generate, analyse, and summarise functions
Generate <- function(condition, fixed_objects = NULL) {
N <- condition$sample_size # alternatively, could use Attach() to make objects available
grs <- condition$group_size_ratio
sd <- condition$standard_deviation_ratio
if(grs < 1){
N2 <- N / (1/grs + 1)
N1 <- N - N2
} else {
N1 <- N / (grs + 1)
N2 <- N - N1
}
group1 <- rnorm(N1)
group2 <- rnorm(N2, sd=sd)
dat <- data.frame(group = c(rep('g1', N1), rep('g2', N2)), DV = c(group1, group2))
dat
}
Analyse <- function(condition, dat, fixed_objects = NULL) {
welch <- t.test(DV ~ group, dat)
ind <- t.test(DV ~ group, dat, var.equal=TRUE)
# In this function the p values for the t-tests are returned,
# and make sure to name each element, for future reference
ret <- c(welch = welch$p.value, independent = ind$p.value)
ret
}
Summarise <- function(condition, results, fixed_objects = NULL) {
#find results of interest here (e.g., alpha < .1, .05, .01)
ret <- EDR(results, alpha = .05)
ret
}
#~~~~~~~~~~~~~~~~~~~~~~~~
#### Step 3 --- Collect results by looping over the rows in design
# first, test to see if it works
res <- runSimulation(design=Design, replications=5,
generate=Generate, analyse=Analyse, summarise=Summarise)
res
\dontrun{
# complete run with 1000 replications per condition
res <- runSimulation(design=Design, replications=1000, parallel=TRUE,
generate=Generate, analyse=Analyse, summarise=Summarise)
res
View(res)
## save final results to a file upon completion (not run)
runSimulation(design=Design, replications=1000, parallel=TRUE, filename = 'mysim',
generate=Generate, analyse=Analyse, summarise=Summarise)
## Debug the generate function. See ?browser for help on debugging
## Type help to see available commands (e.g., n, c, where, ...),
## ls() to see what has been defined, and type Q to quit the debugger
runSimulation(design=Design, replications=1000,
generate=Generate, analyse=Analyse, summarise=Summarise,
parallel=TRUE, debug='generate')
## Alternatively, place a browser() within the desired function line to
## jump to a specific location
Summarise <- function(condition, results, fixed_objects = NULL) {
#find results of interest here (e.g., alpha < .1, .05, .01)
browser()
ret <- EDR(results[,nms], alpha = .05)
ret
}
runSimulation(design=Design, replications=1000,
generate=Generate, analyse=Analyse, summarise=Summarise,
parallel=TRUE)
## EXTRA: To run the simulation on a MPI cluster, use the following setup on each node (not run)
# library(doMPI)
# cl <- startMPIcluster()
# registerDoMPI(cl)
# Final <- runSimulation(design=Design, replications=1000, MPI=TRUE,
# generate=Generate, analyse=Analyse, summarise=Summarise)
# saveRDS(Final, 'mysim.rds')
# closeCluster(cl)
# mpi.quit()
## Similarly, run simulation on a network linked via ssh
## (two way ssh key-paired connection must be possible between master and slave nodes)
##
## define IP addresses, including primary IP
# primary <- '192.168.2.20'
# IPs <- list(
# list(host=primary, user='phil', ncore=8),
# list(host='192.168.2.17', user='phil', ncore=8)
# )
# spec <- lapply(IPs, function(IP)
# rep(list(list(host=IP$host, user=IP$user)), IP$ncore))
# spec <- unlist(spec, recursive=FALSE)
#
# cl <- parallel::makeCluster(type='PSOCK', master=primary, spec=spec)
# res <- runSimulation(design=Design, replications=1000, parallel = TRUE,
# generate=Generate, analyse=Analyse, summarise=Summarise, cl=cl)
#~~~~~~~~~~~~~~~~~~~~~~~~
###### Post-analysis: Analyze the results via functions like lm() or SimAnova(), and create
###### tables(dplyr) or plots (ggplot2) to help visualize the results.
###### This is where you get to be a data analyst!
library(dplyr)
res \%>\% summarise(mean(welch), mean(independent))
res \%>\% group_by(standard_deviation_ratio, group_size_ratio) \%>\%
summarise(mean(welch), mean(independent))
# quick ANOVA analysis method with all two-way interactions
SimAnova( ~ (sample_size + group_size_ratio + standard_deviation_ratio)^2, res,
rates = TRUE)
# or more specific ANOVAs
SimAnova(independent ~ (group_size_ratio + standard_deviation_ratio)^2,
res, rates = TRUE)
# make some plots
library(ggplot2)
library(tidyr)
dd <- res \%>\%
select(group_size_ratio, standard_deviation_ratio, welch, independent) \%>\%
pivot_longer(cols=c('welch', 'independent'), names_to = 'stats')
dd
ggplot(dd, aes(factor(group_size_ratio), value)) + geom_boxplot() +
geom_abline(intercept=0.05, slope=0, col = 'red') +
geom_abline(intercept=0.075, slope=0, col = 'red', linetype='dotted') +
geom_abline(intercept=0.025, slope=0, col = 'red', linetype='dotted') +
facet_wrap(~stats)
ggplot(dd, aes(factor(group_size_ratio), value, fill = factor(standard_deviation_ratio))) +
geom_boxplot() + geom_abline(intercept=0.05, slope=0, col = 'red') +
geom_abline(intercept=0.075, slope=0, col = 'red', linetype='dotted') +
geom_abline(intercept=0.025, slope=0, col = 'red', linetype='dotted') +
facet_grid(stats~standard_deviation_ratio) +
theme(legend.position = 'none')
}
}
\references{
Chalmers, R. P., & Adkins, M. C. (2020). Writing Effective and Reliable Monte Carlo Simulations
with the SimDesign Package. \code{The Quantitative Methods for Psychology, 16}(4), 248-280.
\doi{10.20982/tqmp.16.4.p248}
Sigal, M. J., & Chalmers, R. P. (2016). Play it again: Teaching statistics with Monte
Carlo simulation. \code{Journal of Statistics Education, 24}(3), 136-156.
\doi{10.1080/10691898.2016.1246953}
}
\seealso{
\code{\link{SimFunctions}}, \code{\link{createDesign}},
\code{\link{Generate}}, \code{\link{Analyse}}, \code{\link{Summarise}},
\code{\link{SimExtract}},
\code{\link{reSummarise}}, \code{\link{SimClean}}, \code{\link{SimAnova}}, \code{\link{SimResults}},
\code{\link{aggregate_simulations}}, \code{\link{Attach}},
\code{\link{SimShiny}}
}
\author{
Phil Chalmers \email{rphilip.chalmers@gmail.com}
}
|
/man/runSimulation.Rd
|
no_license
|
phdtai/SimDesign
|
R
| false
| true
| 38,382
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runSimulation.R
\name{runSimulation}
\alias{runSimulation}
\alias{summary.SimDesign}
\alias{print.SimDesign}
\title{Run a Monte Carlo simulation given a data.frame of conditions and simulation functions}
\usage{
runSimulation(
design,
replications,
generate,
analyse,
summarise,
fixed_objects = NULL,
packages = NULL,
filename = NULL,
debug = "none",
load_seed = NULL,
save_results = FALSE,
parallel = FALSE,
ncores = parallel::detectCores(),
cl = NULL,
notification = NULL,
boot_method = "none",
boot_draws = 1000L,
CI = 0.95,
seed = rint(nrow(design), min = 1L, max = 2147483647L),
save_seeds = FALSE,
save = TRUE,
store_results = FALSE,
warnings_as_errors = FALSE,
max_errors = 50L,
allow_na = FALSE,
allow_nan = FALSE,
stop_on_fatal = FALSE,
MPI = FALSE,
save_details = list(),
progress = TRUE,
verbose = TRUE
)
\method{summary}{SimDesign}(object, ...)
\method{print}{SimDesign}(x, list2char = TRUE, ...)
}
\arguments{
\item{design}{a \code{tibble} or \code{data.frame} object containing the Monte Carlo simulation
conditions to be studied, where each row represents a unique condition and each column a factor
to be varied. See \code{\link{createDesign}} for the standard approach
to create this simulation design object}
\item{replications}{number of independent replications to perform per condition (i.e., each row in \code{design}).
Must be greater than 0}
\item{generate}{user-defined data and parameter generating function.
See \code{\link{Generate}} for details. Note that this argument may be omitted by the
user if they wish to generate the data with the \code{analyse} step, but for real-world
simulations this is generally not recommended}
\item{analyse}{user-defined analysis function that acts on the data generated from
\code{\link{Generate}} (or, if \code{generate} was omitted, can be used to generate and
analyses the simulated data). See \code{\link{Analyse}} for details}
\item{summarise}{optional (but highly recommended) user-defined summary function from \code{\link{Summarise}}
to be used to compute meta-statistical summary information after all the replications have completed within
each \code{design} condition.
Omitting this function will return a list of \code{data.frame}s (or a single \code{data.frame}, if only one row in
\code{design} is supplied) or, for more general objects (such as \code{list}s), a \code{list}
containing the results returned form \code{\link{Analyse}}.
Alternatively, the value \code{NA} can be passed to let the generate-analyse-summarise process to run as usual,
where the summarise components are instead included only as a placeholder.
Omitting this input is only recommended for didactic purposes because it leaves out a large amount of
information (e.g., try-errors, warning messages, saving files, etc), can witness memory related issues,
and generally is not as flexible internally.
If users do not wish to supply a summarise function then it
is is recommended to pass \code{NA} to this argument while also supplying passing \code{save_results = TRUE} to
save the results to the hard-drive during the simulation. This provides a
more RAM friendly alternative to storing all the Generate-Analyse results in the working environment, where
the Analysis results can be summarised at a later time}
\item{fixed_objects}{(optional) an object (usually a named \code{list})
containing additional user-defined objects
that should remain fixed across conditions. This is useful when including
large vectors/matrices of population parameters, fixed data information
that should be used across all conditions and replications (e.g., including a common design matrix
for linear regression models), or simply control constant global elements (e.g., a constant for sample size)}
\item{packages}{a character vector of external packages to be used during the simulation (e.g.,
\code{c('MASS', 'extraDistr', 'simsem')} ). Use this input when \code{parallel = TRUE} or
\code{MPI = TRUE} to use non-standard functions from additional packages,
otherwise the functions must be made available by using explicit
\code{\link{library}} or \code{\link{require}} calls within the provided simulation functions.
Alternatively, functions can be called explicitly without attaching the package with the \code{::} operator
(e.g., \code{extraDistr::rgumbel()})}
\item{filename}{(optional) the name of the \code{.rds} file to save the final simulation results to. If the extension
\code{.rds} is not included in the file name (e.g. \code{"mysimulation"} versus \code{"mysimulation.rds"}) then the
\code{.rds} extension will be automatically added to the file name to ensure the file extension is correct.
Note that if the same file name already exists in the working
directly at the time of saving then a new
file will be generated instead and a warning will be thrown. This helps to avoid accidentally overwriting
existing files. Default is \code{NULL}, indicating no file will be saved by default}
\item{debug}{a string indicating where to initiate a \code{browser()} call for editing and debugging, and pairs
particularly well with the \code{load_seed} argument for precice debugging.
General options are \code{'none'} (default; no debugging), \code{'error'}, which starts the debugger
when any error in the code is detected in one of three generate-analyse-summarise functions,
and \code{'all'}, which debugs all the user defined functions regardless of whether an error was thrown
or not. Specific options include: \code{'generate'}
to debug the data simulation function, \code{'analyse'} to debug the computational function, and
\code{'summarise'} to debug the aggregation function.
Alternatively, users may place \code{\link{browser}} calls within the respective functions for
debugging at specific lines, which is useful when debugging based on conditional evaluations (e.g.,
\code{if(this == 'problem') browser()}). Note that parallel computation flags
will automatically be disabled when a \code{browser()} is detected or when a debugging argument other than
\code{'none'} is supplied}
\item{load_seed}{used to replicate an exact simulation state, which is primarily useful for debugging purposes.
Input can be a character object indicating which file to load from when the \code{.Random.seed}s have
be saved (after a call with \code{save_seeds = TRUE}), or an integer vector indicating the actual
\code{.Random.seed} values. E.g., \code{load_seed = 'design-row-2/seed-1'}
will load the first seed in the second row of the \code{design} input, or explicitly passing the
elements from \code{.Random.seed} (see \code{\link{SimExtract}} to extract the seeds associated explicitly
with errors during the simulation, where each column represents a unique seed).
If the input is a character vector then it is important NOT
to modify the \code{design} input object, otherwise the path may not point to the correct saved location, while
if the input is an integer vector (or single column \code{tbl} object)
then it WILL be important to modify the \code{design} input in order to load this
exact seed for the corresponding design row. Default is \code{NULL}}
\item{save_results}{logical; save the results returned from \code{\link{Analyse}} to external
\code{.rds} files located in the defined \code{save_results_dirname} directory/folder?
Use this if you would like to keep track of the individual parameters returned from the \code{analysis} function.
Each saved object will contain a list of three elements containing the condition (row from \code{design}),
results (as a \code{list} or \code{matrix}), and try-errors.
See \code{\link{SimResults}} for an example of how to read these \code{.rds} files back into R
after the simulation is complete. Default is \code{FALSE}.
WARNING: saving results to your hard-drive can fill up space very quickly for larger simulations. Be sure to
test this option using a smaller number of replications before the full Monte Carlo simulation is performed.
See also \code{\link{reSummarise}} for applying summarise functions from saved
simulation results}
\item{parallel}{logical; use parallel processing from the \code{parallel} package over each
unique condition?}
\item{ncores}{number of cores to be used in parallel execution. Default uses all available}
\item{cl}{cluster object defined by \code{\link{makeCluster}} used to run code in parallel.
If \code{NULL} and \code{parallel = TRUE}, a local cluster object will be defined which
selects the maximum number cores available
and will be stopped when the simulation is complete. Note that supplying a \code{cl}
object will automatically set the \code{parallel} argument to \code{TRUE}}
\item{notification}{an optional, empty argument function to be executed upon completion of the simulation. This can be used, for
instance, to trigger email or SMS notifications that indicate the simulation has been completed. For example,
to utilize the \code{RPushbullet} package (and assuming users have previously a) registered for a Pushbullet account,
and b) installed the application on their mobile device and computer), use the following:
\describe{
\item{Prior Setup}{Prior to defining \code{notification}, load the \code{RPushbullet} library via \code{library(RPushbullet)}. If
this is the first time you have used the package then a suitable \code{rpushbullet.json} file will not exist on your computer,
and you'll need to supply a suitable token and the devise to push the notification to via the \code{pbSetup()} setup}
\item{Execution}{Supply a definition of \code{notification} that utilizes the \code{pbPost} function. E.g.,
\code{runSimulation(...,
notification = function() pbPost(type = "note", title = "SimDesign", body = "Simulation Complete"))}}
}
Alternatively, if users wish to have an email sent upon completion then the following template that uses the \code{sendmailR}
package could be used:
\describe{
\item{Using \code{sendmailR}}{
\code{runSimulation(...,
notification = function() sendmailR::sendmail(from="<sendmailR@your.computer>",
to="<your.email@address>", subject="SimDesign", msg="Simulation Complete",
control=list(smtpServer="ASPMX.L.GOOGLE.COM")))}.
}
}
However, note that this may be less reliable since the email message could be directed to a spam folder.}
\item{boot_method}{method for performing non-parametric bootstrap confidence intervals
for the respective meta-statistics computed by the \code{Summarise} function.
Can be \code{'basic'} for the empirical bootstrap CI, \code{'percentile'}
for percentile CIs, \code{'norm'} for normal approximations CIs, or \code{'studentized'}
for Studentized CIs (should only be used for simulations with lower replications due to its
computational intensity). Default is \code{'none'}, which performs no bootstrapping}
\item{boot_draws}{number of non-parametric bootstrap draws to sample for the \code{summarise}
function after the generate-analyse replications are collected. Default is 1000}
\item{CI}{bootstrap confidence interval level (default is 95\%)}
\item{seed}{a vector of integers to be used for reproducibility.
The length of the vector must be equal the number of rows in \code{design}.
This argument calls \code{\link{set.seed}} or
\code{\link{clusterSetRNGStream}} for each condition, respectively,
but will not be run when \code{MPI = TRUE}.
Default randomly generates seeds within the range 1 to 2147483647 for each condition.}
\item{save_seeds}{logical; save the \code{.Random.seed} states prior to performing each replication into
plain text files located in the defined \code{save_seeds_dirname} directory/folder?
Use this if you would like to keep track of every simulation state within each replication and design
condition. This can be used to completely replicate any cell in the simulation if need be.
As well, see the \code{load_seed} input
to load a given \code{.Random.seed} to exactly replicate the generated data and analysis state (mostly useful
for debugging). When \code{TRUE}, temporary files will also be saved
to the working directory (in the same way as when \code{save = TRUE}). Default is \code{FALSE}
Note, however, that this option is not typically necessary or recommended since the \code{.Random.seed} states for simulation
replications that throw errors during the execution are automatically stored within the final simulation
object, and can be extracted and investigated using \code{\link{SimExtract}}. Hence, this option is only of
interest when \emph{all} of the replications must be reproducible (which occurs very rarely),
otherwise the defaults to \code{runSimulation} should be sufficient}
\item{save}{logical; save the temporary simulation state to the hard-drive? This is useful
for simulations which require an extended amount of time, though for shorter simulations
can be disabled to slightly improve computational efficiency. When \code{TRUE}, a temp file
will be created in the working directory which allows the simulation state to be saved
and recovered (in case of power outages, crashes, etc). As well, triggering this flag will
save any fatal \code{.Random.seed} states when conditions unexpectedly crash (where each seed
is stored row-wise in an external .rds file), which provides a much easier mechanism
to debug issues (see \code{load_seed} for details). Upon completion, this temp file will be removed.
To recover your simulation at the last known location (having patched the issues in the previous execution code)
simply re-run the code you used to
initially define the simulation and the external file will automatically be detected and read-in.
Default is \code{TRUE}}
\item{store_results}{logical; store the complete tables of simulation results
in the returned object? This is \code{FALSE} by default to help avoid RAM
issues (see \code{save_results} as a more suitable alternative). To extract these results
pass the returned object to \code{SimExtract(..., what = 'results')}, which will return a named list
of all the simulation results for each condition}
\item{warnings_as_errors}{logical; treat warning messages as error messages during the simulation? Default is FALSE,
therefore warnings are only collected and not used to restart the data generation step, and the seeds associated with
the warning message conditions are not stored within the final simulation object}
\item{max_errors}{the simulation will terminate when more than this number of consecutive errors are thrown in any
given condition, causing the simulation to continue to the next unique \code{design} condition.
This is included to avoid getting stuck in infinite re-draws, and to indicate that something fatally problematic
is going wrong in the generate-analyse phases. Default is 50}
\item{allow_na}{logical; should \code{NA}s be allowed in the analyse step as a valid result from the simulation
analysis? Default is FALSE}
\item{allow_nan}{logical; should \code{NaN}s be allowed in the analyse step as a valid result from the simulation
analysis? Default is FALSE}
\item{stop_on_fatal}{logical; should the simulation be terminated immediately when
the maximum number of consecutive errors (\code{max_errors}) is reached? If \code{FALSE},
the simulation will continue as though errors did not occur, however a column
\code{FATAL_TERMINATION} will be included in the resulting object indicating the final
error message observed, and \code{NA} placeholders will be placed in all other row-elements. Default is
\code{FALSE}}
\item{MPI}{logical; use the \code{foreach} package in a form usable by MPI to run simulation
in parallel on a cluster? Default is \code{FALSE}}
\item{save_details}{a list pertaining to information regarding how and where files should be saved
when the \code{save} or \code{save_results} flags are triggered.
\describe{
\item{\code{safe}}{logical; trigger whether safe-saving should be performed. When \code{TRUE} files
will never be overwritten accidentally, and where appropriate the program will either stop or generate
new files with unique names. Default is \code{TRUE}}
\item{\code{compname}}{name of the computer running the simulation. Normally this doesn't need
to be modified, but in the event that a manual node breaks down while running a simulation the
results from the temp files may be resumed on another computer by changing the name of the
node to match the broken computer. Default is the result of evaluating \code{unname(Sys.info()['nodename'])}}
\item{\code{out_rootdir}}{root directory to save all files to. Default uses the
current working directory}
\item{\code{save_results_dirname}}{a string indicating the name of the folder to save
result objects to when \code{save_results = TRUE}. If a directory/folder does not exist
in the current working directory then a unique one will be created automatically. Default is
\code{'SimDesign-results_'} with the associated \code{compname} appended}
\item{\code{save_seeds_dirname}}{a string indicating the name of the folder to save
\code{.Random.seed} objects to when \code{save_seeds = TRUE}. If a directory/folder does not exist
in the current working directory then one will be created automatically. Default is
\code{'SimDesign-seeds_'} with the associated \code{compname} appended}
}}
\item{progress}{logical; display a progress bar (using the \code{pbapply} package) for each simulation condition?
This is useful when simulations conditions take a long time to run (see also the \code{notifications} argument).
Default is \code{TRUE}}
\item{verbose}{logical; print messages to the R console? Default is \code{TRUE}}
\item{object}{SimDesign object returned from \code{\link{runSimulation}}}
\item{...}{additional arguments}
\item{x}{SimDesign object returned from \code{\link{runSimulation}}}
\item{list2char}{logical; for \code{tibble} object re-evaluate list elements
as character vectors for better printing of the levels? Note that this
does not change the original classes of the object, just how they are printed.
Default is TRUE}
}
\value{
a \code{tibble} from the \code{dplyr} package (also of class \code{'SimDesign'})
with the original \code{design} conditions in the left-most columns,
simulation results in the middle columns, and additional information in the right-most columns (see below).
The right-most column information for each condition are:
\code{REPLICATIONS} to indicate the number of Monte Carlo replications,
\code{SIM_TIME} to indicate how long (in seconds) it took to complete
all the Monte Carlo replications for each respective design condition,
\code{COMPLETED} to indicate the date in which the given simulation condition completed,
\code{SEED} for the integer values in the \code{seed} argument, and, if applicable,
\code{ERRORS} and \code{WARNINGS} which contain counts for the number of error or warning
messages that were caught (if no errors/warnings were observed these columns will be omitted).
Note that to extract the specific error and warnings messages see
\code{\link{SimExtract}}. Finally,
if \code{boot_method} was a valid input other than 'none' then the final right-most columns will contain the labels
\code{BOOT_} followed by the name of the associated meta-statistic defined in \code{summarise()} and
and bootstrapped confidence interval location for the meta-statistics.
}
\description{
This function runs a Monte Carlo simulation study given a set of predefined simulation functions,
design conditions, and number of replications. Results can be saved as temporary files in case of interruptions
and may be restored by re-running \code{runSimulation}, provided that the respective temp
file can be found in the working directory. \code{runSimulation} supports parallel
and cluster computing, global and local debugging, error handling (including fail-safe
stopping when functions fail too often, even across nodes), provides bootstrap estimates of the
sampling variability (optional), and automatic tracking of error and warning messages and their associated
\code{.Random.seed} states.
For convenience, all functions available in the R work-space are exported across all computational nodes
so that they are more easily accessible (however, other R objects are not, and therefore
must be passed to the \code{fixed_objects} input to become available across nodes).
For an in-depth tutorial of the package please refer to Chalmers and Adkins (2020;
\doi{10.20982/tqmp.16.4.p248}).
For an earlier didactic presentation of the package refer to Sigal and Chalmers
(2016; \doi{10.1080/10691898.2016.1246953}). Finally, see the associated
wiki on Github (\url{https://github.com/philchalmers/SimDesign/wiki})
for tutorial material, examples, and applications of \code{SimDesign} to real-world simulation experiments.
}
\details{
The strategy for organizing the Monte Carlo simulation work-flow is to
\describe{
\item{1)}{Define a suitable \code{Design} object (a \code{tibble} or \code{data.frame})
containing fixed conditional
information about the Monte Carlo simulations. Each row or this \code{design} object pertains
to a unique set of simulation to study, while each column the simulation factor under investigation (e.g., sample size,
distribution types, etc). This is often expedited by using the
\code{\link{createDesign}} function, and if necessary the argument \code{subset}
can be used to remove redundant or non-applicable rows}
\item{2)}{Define the three step functions to generate the data (\code{\link{Generate}}; see also
\url{https://CRAN.R-project.org/view=Distributions} for a list of distributions in R),
analyse the generated data by computing the respective parameter estimates, detection rates,
etc (\code{\link{Analyse}}), and finally summarise the results across the total
number of replications (\code{\link{Summarise}}).
}
\item{3)}{Pass the \code{design} object and three defined R functions to \code{runSimulation}, and declare the
number of replications to perform with the \code{replications} input. This function will return a suitable
\code{tibble} object with the complete simulation results and execution details}
\item{4)}{Analyze the output from \code{runSimulation}, possibly using ANOVA techniques
(\code{\link{SimAnova}}) and generating suitable plots and tables}
}
Expressing the above more succinctly, the functions to be called have the following form,
with the exact functional arguments listed:
\describe{
\item{\code{Design <- createDesign(...)}}{}
\item{\code{Generate <- function(condition, fixed_objects = NULL) \{...\} }}{}
\item{\code{Analyse <- function(condition, dat, fixed_objects = NULL) \{...\} }}{}
\item{\code{Summarise <- function(condition, results, fixed_objects = NULL) \{...\} }}{}
\item{\code{res <- runSimulation(design=Design, replications, generate=Generate,
analyse=Analyse, summarise=Summarise)}}{}
}
The \code{condition} object above represents a single row from the \code{design} object, indicating
a unique Monte Carlo simulation condition. The \code{condition} object also contains two
additional elements to help track the simulation's state: an \code{ID} variable, indicating
the respective row number in the \code{design} object, and a \code{REPLICATION} element
indicating the replication iteration number (an integer value between 1 and \code{replication}).
This setup allows users to easily locate the \code{r}th replication (e.g., \code{REPLICATION == 500})
within the \code{j}th row in the simulation design (e.g., \code{ID == 2}). The
\code{REPLICATION} input is also useful when temporarily saving files to the hard-drive
when calling external command line utilities (see examples on the wiki).
For a template-based version of the work-flow, which is often useful when initially defining a simulation,
use the \code{\link{SimFunctions}} function. This function will write a template simulation
to one/two files so that modifying the required functions and objects can begin immediately.
This means that users can focus on their Monte Carlo simulation details right away rather
than worrying about the repetitive administrative code-work required to organize the simulation's execution flow.
Finally, examples, presentation files, and tutorials can be found on the package wiki located at
\url{https://github.com/philchalmers/SimDesign/wiki}.
}
\section{Saving data, results, seeds, and the simulation state}{
To conserve RAM, temporary objects (such as data generated across conditions and replications)
are discarded; however, these can be saved to the hard-disk by passing the appropriate flags.
For longer simulations it is recommended to use the \code{save_results} flag to write the analysis results
to the hard-drive.
The use of the \code{save_seeds} option can be evoked to save R's \code{.Random.seed} state to allow
for complete reproducibility of each replication within each condition. These
individual \code{.Random.seed} terms can then be read in with the
\code{load_seed} input to reproduce the exact simulation state at any given replication. Most often though,
\code{save_seeds} is less useful since problematic seeds are automatically stored in the final
simulation object to allow for easier replicability of potentially problematic errors (which incidentally
can be extracted using \code{SimExtract(res, 'error_seeds')} and passed to the \code{load_seed} argument). Finally,
providing a vector of \code{seeds} is also possible to ensure
that each simulation condition is macro reproducible under the single/multi-core method selected.
Finally, when the Monte Carlo simulation is complete
it is recommended to write the results to a hard-drive for safe keeping, particularly with the
\code{filename} argument provided (for reasons that are more obvious in the parallel computation
descriptions below). Using the \code{filename} argument supplied is safer than using, for instance,
\code{\link{saveRDS}} directly because files will never accidentally be overwritten,
and instead a new file name will be created when a conflict arises; this type of implementation safety
is prevalent in many locations in the package to help avoid unrecoverable (yet surprisingly common) mistakes
during the process of designing and executing Monte Carlo simulations.
}
\section{Resuming temporary results}{
In the event of a computer crash, power outage, etc, if \code{save = TRUE} was used (the default)
then the original code used to execute \code{runSimulation()} need only be re-run to resume the simulation.
The saved temp file will be read into the function automatically, and the simulation will continue
one the condition where it left off before the simulation state was terminated. If users wish to remove this temporary
simulation state entirely so as to start anew then simply pass \code{SimClean(temp = TRUE)} in the R console to remove any
previously saved temporary objects.
}
\section{A note on parallel computing}{
When running simulations in parallel (either with \code{parallel = TRUE} or \code{MPI = TRUE})
R objects defined in the global environment will generally \emph{not} be visible across nodes.
Hence, you may see errors such as \code{Error: object 'something' not found} if you try to use an object
that is defined in the workspace but is not passed to \code{runSimulation}.
To avoid this type or error, simply pass additional objects to the
\code{fixed_objects} input (usually it's convenient to supply a named list of these objects).
Fortunately, however, \emph{custom functions defined in the global environment are exported across
nodes automatically}. This makes it convenient when writing code because custom functions will
always be available across nodes if they are visible in the R workspace. As well, note the
\code{packages} input to declare packages which must be loaded via \code{library()} in order to make
specific non-standard R functions available across nodes.
}
\examples{
#-------------------------------------------------------------------------------
# Example 1: Sampling distribution of mean
# This example demonstrate some of the simpler uses of SimDesign,
# particularly for classroom settings. The only factor varied in this simulation
# is sample size.
# skeleton functions to be saved and edited
SimFunctions()
#### Step 1 --- Define your conditions under study and create design data.frame
Design <- createDesign(N = c(10, 20, 30))
#~~~~~~~~~~~~~~~~~~~~~~~~
#### Step 2 --- Define generate, analyse, and summarise functions
# help(Generate)
Generate <- function(condition, fixed_objects = NULL) {
dat <- with(condition, rnorm(N, 10, 5)) # distributed N(10, 5)
dat
}
# help(Analyse)
Analyse <- function(condition, dat, fixed_objects = NULL) {
ret <- mean(dat) # mean of the sample data vector
ret
}
# help(Summarise)
Summarise <- function(condition, results, fixed_objects = NULL) {
ret <- c(mu=mean(results), SE=sd(results)) # mean and SD summary of the sample means
ret
}
#~~~~~~~~~~~~~~~~~~~~~~~~
#### Step 3 --- Collect results by looping over the rows in design
# run the simulation
Final <- runSimulation(design=Design, replications=1000,
generate=Generate, analyse=Analyse, summarise=Summarise)
Final
# reproduce exact simulation
Final_rep <- runSimulation(design=Design, replications=1000, seed=Final$SEED,
generate=Generate, analyse=Analyse, summarise=Summarise)
Final_rep
#~~~~~~~~~~~~~~~~~~~~~~~~
#### Extras
\dontrun{
# compare SEs estimates to the true SEs from the formula sigma/sqrt(N)
5 / sqrt(Design$N)
# To store the results from the analyse function either
# a) omit a definition of of summarise(), or
# b) pass save_results = TRUE to runSimulation() and read the results in with SimResults()
# Note that the latter method should be adopted for longer simulations
# e.g., the a) approach
res <- runSimulation(design=Design, replications=1000,
generate=Generate, analyse=Analyse)
str(res)
head(res[[1]])
# or b) approach
Final <- runSimulation(design=Design, replications=1000, save_results=TRUE,
generate=Generate, analyse=Analyse, summarise=Summarise)
res <- SimResults(Final)
str(res)
head(res[[1]]$results)
# obtain empirical bootstrapped CIs during an initial run
# the simulation was completed (necessarily requires save_results = TRUE)
res <- runSimulation(design=Design, replications=1000, boot_method = 'basic',
generate=Generate, analyse=Analyse, summarise=Summarise)
res
# alternative bootstrapped CIs that uses saved results via reSummarise().
# Default directory save to:
dirname <- paste0('SimDesign-results_', unname(Sys.info()['nodename']), "/")
res <- reSummarise(summarise=Summarise, dir=dirname, boot_method = 'basic')
res
# remove the saved results from the hard-drive if you no longer want them
SimClean(results = TRUE)
}
#-------------------------------------------------------------------------------
# Example 2: t-test and Welch test when varying sample size, group sizes, and SDs
# skeleton functions to be saved and edited
SimFunctions()
\dontrun{
# in real-world simulations it's often better/easier to save
# these functions directly to your hard-drive with
SimFunctions('my-simulation')
}
#### Step 1 --- Define your conditions under study and create design data.frame
Design <- createDesign(sample_size = c(30, 60, 90, 120),
group_size_ratio = c(1, 4, 8),
standard_deviation_ratio = c(.5, 1, 2))
Design
#~~~~~~~~~~~~~~~~~~~~~~~~
#### Step 2 --- Define generate, analyse, and summarise functions
Generate <- function(condition, fixed_objects = NULL) {
N <- condition$sample_size # alternatively, could use Attach() to make objects available
grs <- condition$group_size_ratio
sd <- condition$standard_deviation_ratio
if(grs < 1){
N2 <- N / (1/grs + 1)
N1 <- N - N2
} else {
N1 <- N / (grs + 1)
N2 <- N - N1
}
group1 <- rnorm(N1)
group2 <- rnorm(N2, sd=sd)
dat <- data.frame(group = c(rep('g1', N1), rep('g2', N2)), DV = c(group1, group2))
dat
}
Analyse <- function(condition, dat, fixed_objects = NULL) {
welch <- t.test(DV ~ group, dat)
ind <- t.test(DV ~ group, dat, var.equal=TRUE)
# In this function the p values for the t-tests are returned,
# and make sure to name each element, for future reference
ret <- c(welch = welch$p.value, independent = ind$p.value)
ret
}
Summarise <- function(condition, results, fixed_objects = NULL) {
#find results of interest here (e.g., alpha < .1, .05, .01)
ret <- EDR(results, alpha = .05)
ret
}
#~~~~~~~~~~~~~~~~~~~~~~~~
#### Step 3 --- Collect results by looping over the rows in design
# first, test to see if it works
res <- runSimulation(design=Design, replications=5,
generate=Generate, analyse=Analyse, summarise=Summarise)
res
\dontrun{
# complete run with 1000 replications per condition
res <- runSimulation(design=Design, replications=1000, parallel=TRUE,
generate=Generate, analyse=Analyse, summarise=Summarise)
res
View(res)
## save final results to a file upon completion (not run)
runSimulation(design=Design, replications=1000, parallel=TRUE, filename = 'mysim',
generate=Generate, analyse=Analyse, summarise=Summarise)
## Debug the generate function. See ?browser for help on debugging
## Type help to see available commands (e.g., n, c, where, ...),
## ls() to see what has been defined, and type Q to quit the debugger
runSimulation(design=Design, replications=1000,
generate=Generate, analyse=Analyse, summarise=Summarise,
parallel=TRUE, debug='generate')
## Alternatively, place a browser() within the desired function line to
## jump to a specific location
Summarise <- function(condition, results, fixed_objects = NULL) {
#find results of interest here (e.g., alpha < .1, .05, .01)
browser()
ret <- EDR(results[,nms], alpha = .05)
ret
}
runSimulation(design=Design, replications=1000,
generate=Generate, analyse=Analyse, summarise=Summarise,
parallel=TRUE)
## EXTRA: To run the simulation on a MPI cluster, use the following setup on each node (not run)
# library(doMPI)
# cl <- startMPIcluster()
# registerDoMPI(cl)
# Final <- runSimulation(design=Design, replications=1000, MPI=TRUE,
# generate=Generate, analyse=Analyse, summarise=Summarise)
# saveRDS(Final, 'mysim.rds')
# closeCluster(cl)
# mpi.quit()
## Similarly, run simulation on a network linked via ssh
## (two way ssh key-paired connection must be possible between master and slave nodes)
##
## define IP addresses, including primary IP
# primary <- '192.168.2.20'
# IPs <- list(
# list(host=primary, user='phil', ncore=8),
# list(host='192.168.2.17', user='phil', ncore=8)
# )
# spec <- lapply(IPs, function(IP)
# rep(list(list(host=IP$host, user=IP$user)), IP$ncore))
# spec <- unlist(spec, recursive=FALSE)
#
# cl <- parallel::makeCluster(type='PSOCK', master=primary, spec=spec)
# res <- runSimulation(design=Design, replications=1000, parallel = TRUE,
# generate=Generate, analyse=Analyse, summarise=Summarise, cl=cl)
#~~~~~~~~~~~~~~~~~~~~~~~~
###### Post-analysis: Analyze the results via functions like lm() or SimAnova(), and create
###### tables(dplyr) or plots (ggplot2) to help visualize the results.
###### This is where you get to be a data analyst!
library(dplyr)
res \%>\% summarise(mean(welch), mean(independent))
res \%>\% group_by(standard_deviation_ratio, group_size_ratio) \%>\%
summarise(mean(welch), mean(independent))
# quick ANOVA analysis method with all two-way interactions
SimAnova( ~ (sample_size + group_size_ratio + standard_deviation_ratio)^2, res,
rates = TRUE)
# or more specific ANOVAs
SimAnova(independent ~ (group_size_ratio + standard_deviation_ratio)^2,
res, rates = TRUE)
# make some plots
library(ggplot2)
library(tidyr)
dd <- res \%>\%
select(group_size_ratio, standard_deviation_ratio, welch, independent) \%>\%
pivot_longer(cols=c('welch', 'independent'), names_to = 'stats')
dd
ggplot(dd, aes(factor(group_size_ratio), value)) + geom_boxplot() +
geom_abline(intercept=0.05, slope=0, col = 'red') +
geom_abline(intercept=0.075, slope=0, col = 'red', linetype='dotted') +
geom_abline(intercept=0.025, slope=0, col = 'red', linetype='dotted') +
facet_wrap(~stats)
ggplot(dd, aes(factor(group_size_ratio), value, fill = factor(standard_deviation_ratio))) +
geom_boxplot() + geom_abline(intercept=0.05, slope=0, col = 'red') +
geom_abline(intercept=0.075, slope=0, col = 'red', linetype='dotted') +
geom_abline(intercept=0.025, slope=0, col = 'red', linetype='dotted') +
facet_grid(stats~standard_deviation_ratio) +
theme(legend.position = 'none')
}
}
\references{
Chalmers, R. P., & Adkins, M. C. (2020). Writing Effective and Reliable Monte Carlo Simulations
with the SimDesign Package. \code{The Quantitative Methods for Psychology, 16}(4), 248-280.
\doi{10.20982/tqmp.16.4.p248}
Sigal, M. J., & Chalmers, R. P. (2016). Play it again: Teaching statistics with Monte
Carlo simulation. \code{Journal of Statistics Education, 24}(3), 136-156.
\doi{10.1080/10691898.2016.1246953}
}
\seealso{
\code{\link{SimFunctions}}, \code{\link{createDesign}},
\code{\link{Generate}}, \code{\link{Analyse}}, \code{\link{Summarise}},
\code{\link{SimExtract}},
\code{\link{reSummarise}}, \code{\link{SimClean}}, \code{\link{SimAnova}}, \code{\link{SimResults}},
\code{\link{aggregate_simulations}}, \code{\link{Attach}},
\code{\link{SimShiny}}
}
\author{
Phil Chalmers \email{rphilip.chalmers@gmail.com}
}
|
# Combine .RDA files from "combine_dim.R"
# into single dataframe
# This runs 1 time
# Packages
#library(tidyverse, lib.loc = "/ifs/home/msph/ehs/eag2186/local/hpc/")
## Get functions
source("/ifs/scratch/msph/ehs/eag2186/npbnmf/compare_functions.R")
#source("/ifs/scratch/msph/ehs/eag2186/npbnmf/factor_correspondence.R")
# Read in metrics for
# PCA, FA, NMF
n = 13500
dim_r_metrics = tibble()
for (i in 1:n) {
load(paste0("/ifs/scratch/msph/ehs/eag2186/npbnmf/dim/combo_r_dim/dim_out_", i, ".RDA"))
# output_all = output_all %>% mutate(id = i)
dim_r_metrics = bind_rows(dim_r_metrics, output_all)
if (i %% 100 == 0) {print(i)}
}
save(dim_r_metrics, file = "/ifs/scratch/msph/ehs/eag2186/npbnmf/dim/dim_r_metrics.RDA")
# Read in BN2MF metrics
bn2mf_rank = tibble()
bn2mf_metrics = tibble()
bn2mf_prop = tibble()
for (j in 1:n) {
load(paste0("/ifs/scratch/msph/ehs/eag2186/npbnmf/dim/dim_prop_m/dim_prop_m_", j, ".RDA"))
bn2mf_prop = bind_rows(bn2mf_prop, vci_metrics)
load(paste0("/ifs/scratch/msph/ehs/eag2186/npbnmf/dim/dim_rank_m/dim_rank_m_", j, ".RDA"))
bn2mf_rank = bind_rows(bn2mf_rank, dim_rank)
load(paste0("/ifs/scratch/msph/ehs/eag2186/npbnmf/dim/dim_metrics_m/dim_metrics_m_", j, ".RDA"))
bn2mf_metrics = bind_rows(bn2mf_metrics, dim_metrics)
if (j %% 100 == 0) {print(j)}
}
save(bn2mf_metrics, file = "/ifs/scratch/msph/ehs/eag2186/npbnmf/dim/dim_m_metrics.RDA")
save(bn2mf_prop, file = "/ifs/scratch/msph/ehs/eag2186/npbnmf/dim/dim_m_prop.RDA")
save(bn2mf_rank, file = "/ifs/scratch/msph/ehs/eag2186/npbnmf/dim/dim_m_rank.RDA")
|
/misc/Dim/aggregate_dim.R
|
permissive
|
lizzyagibson/BN2MF
|
R
| false
| false
| 1,581
|
r
|
# Combine .RDA files from "combine_dim.R"
# into single dataframe
# This runs 1 time
# Packages
#library(tidyverse, lib.loc = "/ifs/home/msph/ehs/eag2186/local/hpc/")
## Get functions
source("/ifs/scratch/msph/ehs/eag2186/npbnmf/compare_functions.R")
#source("/ifs/scratch/msph/ehs/eag2186/npbnmf/factor_correspondence.R")
# Read in metrics for
# PCA, FA, NMF
n = 13500
dim_r_metrics = tibble()
for (i in 1:n) {
load(paste0("/ifs/scratch/msph/ehs/eag2186/npbnmf/dim/combo_r_dim/dim_out_", i, ".RDA"))
# output_all = output_all %>% mutate(id = i)
dim_r_metrics = bind_rows(dim_r_metrics, output_all)
if (i %% 100 == 0) {print(i)}
}
save(dim_r_metrics, file = "/ifs/scratch/msph/ehs/eag2186/npbnmf/dim/dim_r_metrics.RDA")
# Read in BN2MF metrics
bn2mf_rank = tibble()
bn2mf_metrics = tibble()
bn2mf_prop = tibble()
for (j in 1:n) {
load(paste0("/ifs/scratch/msph/ehs/eag2186/npbnmf/dim/dim_prop_m/dim_prop_m_", j, ".RDA"))
bn2mf_prop = bind_rows(bn2mf_prop, vci_metrics)
load(paste0("/ifs/scratch/msph/ehs/eag2186/npbnmf/dim/dim_rank_m/dim_rank_m_", j, ".RDA"))
bn2mf_rank = bind_rows(bn2mf_rank, dim_rank)
load(paste0("/ifs/scratch/msph/ehs/eag2186/npbnmf/dim/dim_metrics_m/dim_metrics_m_", j, ".RDA"))
bn2mf_metrics = bind_rows(bn2mf_metrics, dim_metrics)
if (j %% 100 == 0) {print(j)}
}
save(bn2mf_metrics, file = "/ifs/scratch/msph/ehs/eag2186/npbnmf/dim/dim_m_metrics.RDA")
save(bn2mf_prop, file = "/ifs/scratch/msph/ehs/eag2186/npbnmf/dim/dim_m_prop.RDA")
save(bn2mf_rank, file = "/ifs/scratch/msph/ehs/eag2186/npbnmf/dim/dim_m_rank.RDA")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{remove_empty_lines}
\alias{remove_empty_lines}
\title{Parse config file and remove empty lines}
\usage{
remove_empty_lines(lines)
}
\arguments{
\item{lines}{A list of config file lines.}
}
\value{
A list of config file lines where empty lines have been filtered out.
}
\description{
Parse config file and remove empty lines
}
|
/man/remove_empty_lines.Rd
|
permissive
|
durandmorgan/DecoupleR
|
R
| false
| true
| 418
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{remove_empty_lines}
\alias{remove_empty_lines}
\title{Parse config file and remove empty lines}
\usage{
remove_empty_lines(lines)
}
\arguments{
\item{lines}{A list of config file lines.}
}
\value{
A list of config file lines where empty lines have been filtered out.
}
\description{
Parse config file and remove empty lines
}
|
library(ggplot2)
data(diamonds)
diamonds$cut <- unclass(diamonds$cut)
diamonds$color <- unclass(diamonds$color)
diamonds$clarity <- unclass(diamonds$clarity)
fit<- lm(price ~ ., data=diamonds)
shinyServer(
function(input, output) {
output$carat <- renderPrint({input$carat})
output$cut <- renderText({
if(input$cut==1) "Fair"
else if(input$cut==2) "Good"
else if(input$cut==3) "Very Good"
else if(input$cut==4) "Premium"
else "Ideal"
})
output$color <- renderText({
if(input$color==1) "D"
else if(input$color==2) "E"
else if(input$color==3) "F"
else if(input$color==4) "G"
else if(input$color==5) "H"
else if(input$color==6) "I"
else "J"
})
output$clarity <- renderText({
if(input$clarity==1) "I1"
else if(input$clarity==2) "SI2"
else if(input$clarity==3) "SI1"
else if(input$clarity==4) "VS2"
else if(input$clarity==5) "VS1"
else if(input$clarity==6) "VVS2"
else if(input$clarity==7) "VVS1"
else "IF"
})
output$x <- renderPrint({input$x})
output$y <- renderPrint({input$y})
output$z <- renderPrint({input$z})
output$depth <- renderPrint({input$depth})
output$table <- renderPrint({input$table})
output$prediction <- renderPrint({as.numeric(coefficients(fit)[1]+
coefficients(fit)[2]*input$carat+
coefficients(fit)[3]*as.numeric(input$cut)+
coefficients(fit)[4]*as.numeric(input$color)+
coefficients(fit)[5]*as.numeric(input$clarity)+
coefficients(fit)[6]*input$x+
coefficients(fit)[7]*input$y+
coefficients(fit)[8]*input$z+
coefficients(fit)[9]*input$depth+
coefficients(fit)[10]*input$table)})
output$modelfit <- renderPrint({summary(fit)})
output$Modelplot<- renderPlot(plot(fit))
}
)
|
/server.R
|
no_license
|
Gabegao/DataProductProject
|
R
| false
| false
| 1,861
|
r
|
library(ggplot2)
data(diamonds)
diamonds$cut <- unclass(diamonds$cut)
diamonds$color <- unclass(diamonds$color)
diamonds$clarity <- unclass(diamonds$clarity)
fit<- lm(price ~ ., data=diamonds)
shinyServer(
function(input, output) {
output$carat <- renderPrint({input$carat})
output$cut <- renderText({
if(input$cut==1) "Fair"
else if(input$cut==2) "Good"
else if(input$cut==3) "Very Good"
else if(input$cut==4) "Premium"
else "Ideal"
})
output$color <- renderText({
if(input$color==1) "D"
else if(input$color==2) "E"
else if(input$color==3) "F"
else if(input$color==4) "G"
else if(input$color==5) "H"
else if(input$color==6) "I"
else "J"
})
output$clarity <- renderText({
if(input$clarity==1) "I1"
else if(input$clarity==2) "SI2"
else if(input$clarity==3) "SI1"
else if(input$clarity==4) "VS2"
else if(input$clarity==5) "VS1"
else if(input$clarity==6) "VVS2"
else if(input$clarity==7) "VVS1"
else "IF"
})
output$x <- renderPrint({input$x})
output$y <- renderPrint({input$y})
output$z <- renderPrint({input$z})
output$depth <- renderPrint({input$depth})
output$table <- renderPrint({input$table})
output$prediction <- renderPrint({as.numeric(coefficients(fit)[1]+
coefficients(fit)[2]*input$carat+
coefficients(fit)[3]*as.numeric(input$cut)+
coefficients(fit)[4]*as.numeric(input$color)+
coefficients(fit)[5]*as.numeric(input$clarity)+
coefficients(fit)[6]*input$x+
coefficients(fit)[7]*input$y+
coefficients(fit)[8]*input$z+
coefficients(fit)[9]*input$depth+
coefficients(fit)[10]*input$table)})
output$modelfit <- renderPrint({summary(fit)})
output$Modelplot<- renderPlot(plot(fit))
}
)
|
#################################################################
#
# File: read.cdb.R
# Purpose: Read a cdb database
#
# Created: 20130416
# Authors: Emilio Torres Manzanera
#
# Modifications:
#
#################################################################
read.cdb <- function(file, type=c("cdb","txt")) {
data <- data.frame(key=as.character(NULL), value=as.character(NULL))
if(type[1] == "cdb" ) {
cdbunpack <- function( buffer ) {
if( class(buffer) != "raw")
stop("Not raw data")
if( length(buffer) != 4)
stop("No length 4")
require("bitops")
n <- buffer[4]
n <- bitShiftL(n, 8)
n <- bitOr(n, buffer[3])
n <- bitShiftL(n, 8)
n <- bitOr(n, buffer[2])
n <- bitShiftL(n, 8)
n <- bitOr(n, buffer[1])
n
}
zz <- file(file,"rb")
pointerend <- cdbunpack(readBin(zz, "raw", n=4, size= 1, signed = FALSE))
dummy <- readChar(zz, 2048 - 4, useBytes = TRUE)
pos <- 2048
data <- data.frame(key=as.character(NULL), value=as.character(NULL))
while( pos < pointerend ) {
klen <- cdbunpack(readBin(zz, "raw", n=4, size= 1, signed = FALSE))
vlen <- cdbunpack(readBin(zz, "raw", n=4, size= 1, signed = FALSE))
key <- readChar(zz,klen, useBytes = TRUE)
value <- readChar(zz,vlen, useBytes = TRUE)
pos <- pos + 8 + klen + vlen
data <- rbind(data, cbind(key,value))
}
close(zz)
} else {
readalineofcdbtxtformat <- function(zz) {
dummy <- readChar(zz,1) # Read the "+"
if(length(dummy)!=1 | dummy != "+" )
return(NULL)
klenstring <- ""
while( (z <- readChar(zz,1,useBytes=TRUE)) != ",") {
klenstring <- paste(klenstring,z,sep="")}
vlenstring <- ""
while( (z <- readChar(zz,1,useBytes=TRUE)) != ":") {
vlenstring <- paste(vlenstring,z,sep="")}
key <- readChar(zz,strtoi(klenstring),useBytes=TRUE)
dummy <- readChar(zz,2) # Read the "->"
value <- readChar(zz,strtoi(vlenstring),useBytes=TRUE)
dummy <- readChar(zz,1) # Read the "\n"
if( strtoi(klenstring) != nchar(key, type = "bytes") |
strtoi(vlenstring) != nchar(value, type = "bytes") )
stop("Error in length of key or value!")
cbind(key,value)
}
zz <- file(file,"r")
if(!isOpen(zz))
stop(paste("File ", file, " does not exist"))
while( length(register <- readalineofcdbtxtformat(zz)) == 2 ) {
data <- rbind(data, register)
}
close(zz)
}
data
}
|
/R/read.cdb.R
|
no_license
|
cran/cdb
|
R
| false
| false
| 2,530
|
r
|
#################################################################
#
# File: read.cdb.R
# Purpose: Read a cdb database
#
# Created: 20130416
# Authors: Emilio Torres Manzanera
#
# Modifications:
#
#################################################################
read.cdb <- function(file, type=c("cdb","txt")) {
data <- data.frame(key=as.character(NULL), value=as.character(NULL))
if(type[1] == "cdb" ) {
cdbunpack <- function( buffer ) {
if( class(buffer) != "raw")
stop("Not raw data")
if( length(buffer) != 4)
stop("No length 4")
require("bitops")
n <- buffer[4]
n <- bitShiftL(n, 8)
n <- bitOr(n, buffer[3])
n <- bitShiftL(n, 8)
n <- bitOr(n, buffer[2])
n <- bitShiftL(n, 8)
n <- bitOr(n, buffer[1])
n
}
zz <- file(file,"rb")
pointerend <- cdbunpack(readBin(zz, "raw", n=4, size= 1, signed = FALSE))
dummy <- readChar(zz, 2048 - 4, useBytes = TRUE)
pos <- 2048
data <- data.frame(key=as.character(NULL), value=as.character(NULL))
while( pos < pointerend ) {
klen <- cdbunpack(readBin(zz, "raw", n=4, size= 1, signed = FALSE))
vlen <- cdbunpack(readBin(zz, "raw", n=4, size= 1, signed = FALSE))
key <- readChar(zz,klen, useBytes = TRUE)
value <- readChar(zz,vlen, useBytes = TRUE)
pos <- pos + 8 + klen + vlen
data <- rbind(data, cbind(key,value))
}
close(zz)
} else {
readalineofcdbtxtformat <- function(zz) {
dummy <- readChar(zz,1) # Read the "+"
if(length(dummy)!=1 | dummy != "+" )
return(NULL)
klenstring <- ""
while( (z <- readChar(zz,1,useBytes=TRUE)) != ",") {
klenstring <- paste(klenstring,z,sep="")}
vlenstring <- ""
while( (z <- readChar(zz,1,useBytes=TRUE)) != ":") {
vlenstring <- paste(vlenstring,z,sep="")}
key <- readChar(zz,strtoi(klenstring),useBytes=TRUE)
dummy <- readChar(zz,2) # Read the "->"
value <- readChar(zz,strtoi(vlenstring),useBytes=TRUE)
dummy <- readChar(zz,1) # Read the "\n"
if( strtoi(klenstring) != nchar(key, type = "bytes") |
strtoi(vlenstring) != nchar(value, type = "bytes") )
stop("Error in length of key or value!")
cbind(key,value)
}
zz <- file(file,"r")
if(!isOpen(zz))
stop(paste("File ", file, " does not exist"))
while( length(register <- readalineofcdbtxtformat(zz)) == 2 ) {
data <- rbind(data, register)
}
close(zz)
}
data
}
|
#' getPretrainedNetwork
#'
#' Downloads antsrnet pretrained network. \url{10.6084/m9.figshare.7246985}
#'
#' @param fileid one of the permitted file ids or pass "show" to list all
#' valid possibilities. Note that most require internet access to download.
#' @param targetFileName optional target filename
#' @param verbose If \code{TRUE}, suppress status messages
#' (if any), and the progress bar.
#' @return filename string
#' @author Avants BB
#' @examples
#'
#' net <- getPretrainedNetwork( "dbpn4x" )
#'
#' @export getPretrainedNetwork
getPretrainedNetwork <- function(fileid,
targetFileName = FALSE,
verbose=FALSE ) {
myusage <- "usage: getPretrainedNetwork(fileid = whatever , usefixedlocation = TRUE )"
if (missing(fileid)) {
print(myusage)
return(NULL)
}
validlist = c( "dbpn4x", "brainExtraction", "protonLungMri", "show" )
if ( sum( validlist == fileid ) == 0 ) {
message("Try:")
print( validlist )
stop("no data with the id you passed - try show to get list of valid ids")
}
if ( fileid == "show" )
return( validlist )
myurl <- switch( fileid,
dbpn4x = "https://ndownloader.figshare.com/files/13347617",
brainExtraction = "https://ndownloader.figshare.com/files/13606802",
protonLungMri = "https://ndownloader.figshare.com/files/13606799"
)
if ( missing( targetFileName ) ) {
myext <- ".h5"
tdir <- tempdir() # for temporary storage
targetFileName <- tempfile( tmpdir = tdir, fileext = myext ) # for temporary storage
}
if ( ! file.exists( targetFileName ) )
{
download.file( myurl, targetFileName )
}
return( targetFileName )
}
|
/R/getPretrainedNetwork.R
|
no_license
|
neuroimaginador/ANTsRNet
|
R
| false
| false
| 1,751
|
r
|
#' getPretrainedNetwork
#'
#' Downloads antsrnet pretrained network. \url{10.6084/m9.figshare.7246985}
#'
#' @param fileid one of the permitted file ids or pass "show" to list all
#' valid possibilities. Note that most require internet access to download.
#' @param targetFileName optional target filename
#' @param verbose If \code{TRUE}, suppress status messages
#' (if any), and the progress bar.
#' @return filename string
#' @author Avants BB
#' @examples
#'
#' net <- getPretrainedNetwork( "dbpn4x" )
#'
#' @export getPretrainedNetwork
getPretrainedNetwork <- function(fileid,
targetFileName = FALSE,
verbose=FALSE ) {
myusage <- "usage: getPretrainedNetwork(fileid = whatever , usefixedlocation = TRUE )"
if (missing(fileid)) {
print(myusage)
return(NULL)
}
validlist = c( "dbpn4x", "brainExtraction", "protonLungMri", "show" )
if ( sum( validlist == fileid ) == 0 ) {
message("Try:")
print( validlist )
stop("no data with the id you passed - try show to get list of valid ids")
}
if ( fileid == "show" )
return( validlist )
myurl <- switch( fileid,
dbpn4x = "https://ndownloader.figshare.com/files/13347617",
brainExtraction = "https://ndownloader.figshare.com/files/13606802",
protonLungMri = "https://ndownloader.figshare.com/files/13606799"
)
if ( missing( targetFileName ) ) {
myext <- ".h5"
tdir <- tempdir() # for temporary storage
targetFileName <- tempfile( tmpdir = tdir, fileext = myext ) # for temporary storage
}
if ( ! file.exists( targetFileName ) )
{
download.file( myurl, targetFileName )
}
return( targetFileName )
}
|
##-------
## Plot 3
##-------
png("plot3.png")
plot(DateTime,Sub_metering_1,type="l",ylab="",xlab="")
lines(DateTime,Sub_metering_2,type="l",col="red")
lines(DateTime,Sub_metering_3,type="l",col="blue")
title(ylab="Energy sub metering")
legend("topright",lty=1, col = c("black", "red","blue"), legend = c("Sub metering 1", "Sub metering 2", "Sub metering 3"))
dev.off()
|
/plot3.R
|
no_license
|
tsimonso/ExData_Plotting1
|
R
| false
| false
| 371
|
r
|
##-------
## Plot 3
##-------
png("plot3.png")
plot(DateTime,Sub_metering_1,type="l",ylab="",xlab="")
lines(DateTime,Sub_metering_2,type="l",col="red")
lines(DateTime,Sub_metering_3,type="l",col="blue")
title(ylab="Energy sub metering")
legend("topright",lty=1, col = c("black", "red","blue"), legend = c("Sub metering 1", "Sub metering 2", "Sub metering 3"))
dev.off()
|
# This project looks at EPL season collected from seasons 93/94 to 17/18
# The goal of this project is to tell a story with data relating to
# current season title holders; Liverpool FC.
# We want to pave the way for insight that can be looked back upon when
# performing a detailed analysis of the impact Jurgen Klopp has had on the club
# and it's rise to championship.
setwd("C:/Users/Abdul Ghaffar/Desktop/Data Mini Projects/Soccer Analytics")
AllEPLData <- read.csv("EPLResults_93_18.csv")
summary(AllEPLData)
# Analysis 1: Liverpool performance before and after klopp relative to league - using simple statistical analysis and viz
# Analysis 2: Can we predict match success based on half time goals better or worse after klopp joined - Regression or Random Forest
# Analysis 3: Can we cluster the clubs that had similar success to liverpool - PCA Analysis or K-means
# Analysis 4: Deep learning
|
/KloppStatisticalAnalysis.R
|
no_license
|
agjamali/Jurgen-Klopp-Liverpool-FC-Statistical-Analysis
|
R
| false
| false
| 908
|
r
|
# This project looks at EPL season collected from seasons 93/94 to 17/18
# The goal of this project is to tell a story with data relating to
# current season title holders; Liverpool FC.
# We want to pave the way for insight that can be looked back upon when
# performing a detailed analysis of the impact Jurgen Klopp has had on the club
# and it's rise to championship.
setwd("C:/Users/Abdul Ghaffar/Desktop/Data Mini Projects/Soccer Analytics")
AllEPLData <- read.csv("EPLResults_93_18.csv")
summary(AllEPLData)
# Analysis 1: Liverpool performance before and after klopp relative to league - using simple statistical analysis and viz
# Analysis 2: Can we predict match success based on half time goals better or worse after klopp joined - Regression or Random Forest
# Analysis 3: Can we cluster the clubs that had similar success to liverpool - PCA Analysis or K-means
# Analysis 4: Deep learning
|
#======================================================
# Example 9.2.2 on Page 436
# Test for Homogeneity
#------------------------------------------------------
U = c(25, 31, 20, 42, 39, 19, 35, 36, 44, 26,
38, 31, 29, 41, 43, 36, 28, 31, 25, 38 )
V = c(28, 17, 33, 25, 31, 21, 16, 19, 31, 27,
23, 19, 25, 22, 29, 32, 24, 20, 34, 26 )
# Make tally table
BrandU = table( cut(U, breaks=c(-Inf, 23.5, 28.5, 34.5, Inf) ) )
BrandV = table( cut(V, breaks=c(-Inf, 23.5, 28.5, 34.5, Inf) ) )
Data = rbind(BrandU, BrandV)
Data
rownames(Data) = c("Braud U", "Bruan V")
colnames(Data) = c("A1", "A2", "A3", "A4")
# Let's follow the textbook Data (not needed tough).
# Turn off Yates's continuity correction for 2x2 table.
chisq.test(Data, correct=FALSE)
|
/Stat/R/Example-9-2-2.r
|
no_license
|
AppliedStat/class
|
R
| false
| false
| 792
|
r
|
#======================================================
# Example 9.2.2 on Page 436
# Test for Homogeneity
#------------------------------------------------------
U = c(25, 31, 20, 42, 39, 19, 35, 36, 44, 26,
38, 31, 29, 41, 43, 36, 28, 31, 25, 38 )
V = c(28, 17, 33, 25, 31, 21, 16, 19, 31, 27,
23, 19, 25, 22, 29, 32, 24, 20, 34, 26 )
# Make tally table
BrandU = table( cut(U, breaks=c(-Inf, 23.5, 28.5, 34.5, Inf) ) )
BrandV = table( cut(V, breaks=c(-Inf, 23.5, 28.5, 34.5, Inf) ) )
Data = rbind(BrandU, BrandV)
Data
rownames(Data) = c("Braud U", "Bruan V")
colnames(Data) = c("A1", "A2", "A3", "A4")
# Let's follow the textbook Data (not needed tough).
# Turn off Yates's continuity correction for 2x2 table.
chisq.test(Data, correct=FALSE)
|
################################
## Function to split long peptides sequences into smaller pieces of length n (by default n=8)
## and insert \n at split location. This is for having shorter texts on plots. In practice,
## \n breaks peptide names into multiple lines.
.spltpep <- function(x, n=8) {
nc <- nchar(x)
if (nc > n) {
pos <- seq(nc)[(seq(nc) %% n) == 0]
if (length(pos) > 1) {
if (pos[-1] == nc) pos <- head(pos,-1)
}
from <- c(1,pos+1)
to <- c(pos, nchar(x))
xx <- substring(x, from, to)
st <- do.call(paste, c(as.list(xx), sep = "\n"))
} else { st = x }
return(st)
}
################################
###################################
## change Charachter and Integer columns to Factor
char2fact <- function(dt){
changeCols<- c(names(Filter(is.character, dt)), names(Filter(is.integer, dt)))
dt[,(changeCols):=lapply(.SD, as.factor),.SDcols=changeCols]
return(dt)
}
int2fact <- function(dt){
changeCols<- c(names(Filter(is.character, dt)), names(Filter(is.integer, dt)))
dt[,(changeCols):=lapply(.SD, as.factor),.SDcols=changeCols]
return(dt)
}
###################################
################################
## function: median polish
f.medpol <- function(dws, para) {
clnm <- colnames(dws)
dw <- as.matrix(dws)
mp <- stats::medpolish(dws, na.rm=TRUE, trace.iter = FALSE, maxiter = 30)
if (toupper(para)=="MEDRES") {
tmp <- mp$overall + apply(mp$residuals, 2, function(x) mean(x, na.rm = TRUE)) # Abundance = overall median + mean(residual)
} else { tmp <- mp$overall + mp$col } # Abundance = overall median + column effect
result <- data.table(Channel=clnm, Abundance=tmp)
res <- mp$residual
return(list(result,res))
}
MedPolAll <- function(dw, MPpara="coleff") {
dw.w <- data.table::dcast(dw, Protein + Peptide + Charge ~ Mixture + Channel, value.var = "Abundance")
cols <- names(dw.w)[(!colnames(dw.w) %in% c("Protein", "Peptide", "Charge", "Run"))]
dw.mp <- dw.w[ , f.medpol(.SD, para=MPpara)[[1]] , by = .(Protein), .SDcols= cols ] # MPpara = "coleff" or "medres"
dw.mp[, c("Mixture", "Channel") := tstrsplit(Channel, "_", fixed=TRUE)]
# work.mp$MPmethod <- "allMix"
return(dw.mp)
}
MedPolInd <- function(dw, MPpara="coleff") {
dw.w <- data.table::dcast(dw, Protein + Peptide + Charge + Mixture ~ Channel, value.var = "Abundance")
cols <- names(dw.w)[(!colnames(dw.w) %in% c("Protein", "Peptide", "Charge", "Mixture"))]
dw.mp <- dw.w[ , f.medpol(.SD, para=MPpara)[[1]] , by = .(Protein, Mixture), .SDcols= cols ] # MPpara = "coleff" or "medres"
# work.mp$MPmethod <- "indMix"
return(dw.mp)
}
################################
################################
## VSN normalization
VSNnorm <- function(wdn, calib) {
wdn[, Intensity := 2^Abundance]
if (toupper(FractComb) != "NONE") { # Fractions (Runs) are combined, continue with Mixtures
wdn.w <- data.table::dcast(wdn, Protein + Peptide + Charge ~ Mixture + Channel, value.var = "Intensity")
cols.ch <- c("Protein","Peptide","Charge")
cols <- names(wdn.w[,-cols.ch, with=FALSE])
} else { # Fractions (Runs) are not combined
wdn.w <- data.table::dcast(wdn, Protein + Peptide + Charge + Run ~ Mixture + Channel, value.var = "Intensity")
cols.ch <- c("Protein","Peptide","Charge", "Run")
cols <- names(wdn.w[,-cols.ch, with=FALSE])
}
## apply VSN
mat <- as.matrix(wdn.w[, cols, with=FALSE])
fit <- vsn2(mat, calib=calib)
pred <- predict(fit, newdata = mat, useDataInFit = TRUE)
vsntest <- meanSdPlot(pred, plot=FALSE)$gg + theme_bw() + scale_fill_distiller(palette = "Spectral")
as.data.table(pred) %>%
cbind(wdn.w[,cols.ch, with=FALSE],.) %>%
melt(., id.vars = cols.ch,
variable.name="factors",
value.name="Abundance") %>%
.[, c("Mixture", "Channel") := tstrsplit(factors, "_", fixed=TRUE)] %>%
.[, factors := NULL] %>%
.[wdn, on=names(.)[!names(.) %in% "Abundance"]] %>%
.[, Intensity := NULL] -> work
return(list(work, vsntest))
}
################################
################################
## Function to get the median value
which.medX <- function(x) {
xx <- x >= median(x, na.rm = TRUE)
which( x == x[xx][which.min(x[xx])] )}
## gives the median if length(x)=2*n+1 or the larger of the two middle values if length(x)=2*n.
################################
################################
## function to i. calculate PSM ratios
## ii. impute NA for PSM within Peptide, Mixture, Run and Channel
## iii. return original Abundance, imputed Abundance & median Abindance
## note: median abundance inserted in datatable for both charge states, meaning median abundance is duplicated for each peptide.
## This is useful for ggplot to have all three levels of Abundance in the same datatable.
f.psm2ppt <- function(ww) {
ww <- t(ww[,"Feature"])
colnames(ww) <- t(ww[,"Feature"])
ww[is.na(ww[,2]),2] <- ww[is.na(ww[,2]),1] - median(ww[,1]-ww[,2], na.rm=TRUE) #filling NAs of 2nd Charge by nonNAs of 1st Charge
ww[is.na(ww[,1]),1] <- ww[is.na(ww[,1]),2] + median(ww[,1]-ww[,2], na.rm=TRUE) #filling NAs of 1st Charge by nonNAs of 2nd Charge
ww.med <- data.table(nm=rownames(ww), ww, medPeptide=apply(ww, 1, function(x) median(x, na.rm=TRUE))) #table of imputed and mean PSM
PSM.imp <- melt.data.table( ww.med[,-4], id.vars = "nm", variable.name="Feature", value.name="impAbundance") #to wide format
PSM.imp[, c("Mixture", "Run", "Channel") := tstrsplit(nm, "_", fixed=TRUE)] # table of imputed abundance
PSM.imp[, c("Protein", "Peptide", "Charge") := tstrsplit(Feature, "_", fixed=TRUE)]
PSM.med <- melt.data.table( ww.med[,c(1,4)], id.vars = "nm", value.name="medAbundance")
PPT.dt <- PSM.imp[PSM.med[,-"variable"], on=c("nm")] # add median abundance
return(PPT.dt)
}
################################
|
/scripts/tmtFuncs.R
|
no_license
|
fredrious/CblbTMT
|
R
| false
| false
| 5,892
|
r
|
################################
## Function to split long peptides sequences into smaller pieces of length n (by default n=8)
## and insert \n at split location. This is for having shorter texts on plots. In practice,
## \n breaks peptide names into multiple lines.
.spltpep <- function(x, n=8) {
nc <- nchar(x)
if (nc > n) {
pos <- seq(nc)[(seq(nc) %% n) == 0]
if (length(pos) > 1) {
if (pos[-1] == nc) pos <- head(pos,-1)
}
from <- c(1,pos+1)
to <- c(pos, nchar(x))
xx <- substring(x, from, to)
st <- do.call(paste, c(as.list(xx), sep = "\n"))
} else { st = x }
return(st)
}
################################
###################################
## change Charachter and Integer columns to Factor
char2fact <- function(dt){
changeCols<- c(names(Filter(is.character, dt)), names(Filter(is.integer, dt)))
dt[,(changeCols):=lapply(.SD, as.factor),.SDcols=changeCols]
return(dt)
}
int2fact <- function(dt){
changeCols<- c(names(Filter(is.character, dt)), names(Filter(is.integer, dt)))
dt[,(changeCols):=lapply(.SD, as.factor),.SDcols=changeCols]
return(dt)
}
###################################
################################
## function: median polish
f.medpol <- function(dws, para) {
clnm <- colnames(dws)
dw <- as.matrix(dws)
mp <- stats::medpolish(dws, na.rm=TRUE, trace.iter = FALSE, maxiter = 30)
if (toupper(para)=="MEDRES") {
tmp <- mp$overall + apply(mp$residuals, 2, function(x) mean(x, na.rm = TRUE)) # Abundance = overall median + mean(residual)
} else { tmp <- mp$overall + mp$col } # Abundance = overall median + column effect
result <- data.table(Channel=clnm, Abundance=tmp)
res <- mp$residual
return(list(result,res))
}
MedPolAll <- function(dw, MPpara="coleff") {
dw.w <- data.table::dcast(dw, Protein + Peptide + Charge ~ Mixture + Channel, value.var = "Abundance")
cols <- names(dw.w)[(!colnames(dw.w) %in% c("Protein", "Peptide", "Charge", "Run"))]
dw.mp <- dw.w[ , f.medpol(.SD, para=MPpara)[[1]] , by = .(Protein), .SDcols= cols ] # MPpara = "coleff" or "medres"
dw.mp[, c("Mixture", "Channel") := tstrsplit(Channel, "_", fixed=TRUE)]
# work.mp$MPmethod <- "allMix"
return(dw.mp)
}
MedPolInd <- function(dw, MPpara="coleff") {
dw.w <- data.table::dcast(dw, Protein + Peptide + Charge + Mixture ~ Channel, value.var = "Abundance")
cols <- names(dw.w)[(!colnames(dw.w) %in% c("Protein", "Peptide", "Charge", "Mixture"))]
dw.mp <- dw.w[ , f.medpol(.SD, para=MPpara)[[1]] , by = .(Protein, Mixture), .SDcols= cols ] # MPpara = "coleff" or "medres"
# work.mp$MPmethod <- "indMix"
return(dw.mp)
}
################################
################################
## VSN normalization
VSNnorm <- function(wdn, calib) {
wdn[, Intensity := 2^Abundance]
if (toupper(FractComb) != "NONE") { # Fractions (Runs) are combined, continue with Mixtures
wdn.w <- data.table::dcast(wdn, Protein + Peptide + Charge ~ Mixture + Channel, value.var = "Intensity")
cols.ch <- c("Protein","Peptide","Charge")
cols <- names(wdn.w[,-cols.ch, with=FALSE])
} else { # Fractions (Runs) are not combined
wdn.w <- data.table::dcast(wdn, Protein + Peptide + Charge + Run ~ Mixture + Channel, value.var = "Intensity")
cols.ch <- c("Protein","Peptide","Charge", "Run")
cols <- names(wdn.w[,-cols.ch, with=FALSE])
}
## apply VSN
mat <- as.matrix(wdn.w[, cols, with=FALSE])
fit <- vsn2(mat, calib=calib)
pred <- predict(fit, newdata = mat, useDataInFit = TRUE)
vsntest <- meanSdPlot(pred, plot=FALSE)$gg + theme_bw() + scale_fill_distiller(palette = "Spectral")
as.data.table(pred) %>%
cbind(wdn.w[,cols.ch, with=FALSE],.) %>%
melt(., id.vars = cols.ch,
variable.name="factors",
value.name="Abundance") %>%
.[, c("Mixture", "Channel") := tstrsplit(factors, "_", fixed=TRUE)] %>%
.[, factors := NULL] %>%
.[wdn, on=names(.)[!names(.) %in% "Abundance"]] %>%
.[, Intensity := NULL] -> work
return(list(work, vsntest))
}
################################
################################
## Function to get the median value
which.medX <- function(x) {
xx <- x >= median(x, na.rm = TRUE)
which( x == x[xx][which.min(x[xx])] )}
## gives the median if length(x)=2*n+1 or the larger of the two middle values if length(x)=2*n.
################################
################################
## function to i. calculate PSM ratios
## ii. impute NA for PSM within Peptide, Mixture, Run and Channel
## iii. return original Abundance, imputed Abundance & median Abindance
## note: median abundance inserted in datatable for both charge states, meaning median abundance is duplicated for each peptide.
## This is useful for ggplot to have all three levels of Abundance in the same datatable.
f.psm2ppt <- function(ww) {
ww <- t(ww[,"Feature"])
colnames(ww) <- t(ww[,"Feature"])
ww[is.na(ww[,2]),2] <- ww[is.na(ww[,2]),1] - median(ww[,1]-ww[,2], na.rm=TRUE) #filling NAs of 2nd Charge by nonNAs of 1st Charge
ww[is.na(ww[,1]),1] <- ww[is.na(ww[,1]),2] + median(ww[,1]-ww[,2], na.rm=TRUE) #filling NAs of 1st Charge by nonNAs of 2nd Charge
ww.med <- data.table(nm=rownames(ww), ww, medPeptide=apply(ww, 1, function(x) median(x, na.rm=TRUE))) #table of imputed and mean PSM
PSM.imp <- melt.data.table( ww.med[,-4], id.vars = "nm", variable.name="Feature", value.name="impAbundance") #to wide format
PSM.imp[, c("Mixture", "Run", "Channel") := tstrsplit(nm, "_", fixed=TRUE)] # table of imputed abundance
PSM.imp[, c("Protein", "Peptide", "Charge") := tstrsplit(Feature, "_", fixed=TRUE)]
PSM.med <- melt.data.table( ww.med[,c(1,4)], id.vars = "nm", value.name="medAbundance")
PPT.dt <- PSM.imp[PSM.med[,-"variable"], on=c("nm")] # add median abundance
return(PPT.dt)
}
################################
|
testlist <- list(x = c(5.25663347308138e+83, 1.02849645494772e-319, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(myTAI:::cpp_geom_mean,testlist)
str(result)
|
/myTAI/inst/testfiles/cpp_geom_mean/AFL_cpp_geom_mean/cpp_geom_mean_valgrind_files/1615839307-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 194
|
r
|
testlist <- list(x = c(5.25663347308138e+83, 1.02849645494772e-319, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(myTAI:::cpp_geom_mean,testlist)
str(result)
|
\name{CTMC_PO_1-class}
\Rdversion{1.1}
\docType{class}
\alias{CTMC_PO_1-class}
\title{Class "CTMC_PO_1"}
\description{Partially observed CTMC.}
\section{Objects from the Class}{
Objects can be created by calls of the form \code{new("CTMC_PO_1", ...)}.
Like CTMCs butdon't have an ending time; the final observation time
serves that purpose.
}
\section{Slots}{
\describe{
\item{\code{states}:}{Object of class \code{"numeric"} ~~ }
\item{\code{times}:}{Object of class \code{"numeric"} ~~ }
}
}
\section{Methods}{
\describe{
\item{BDsummaryStats.PO}{\code{signature(dat = "CTMC_PO_1")}: ... }
\item{getStates}{\code{signature(object = "CTMC_PO_1")}: ... }
\item{getTimes}{\code{signature(object = "CTMC_PO_1")}: ... }
}
}
\author{Charles Doss}
\examples{
showClass("CTMC_PO_1")
}
\keyword{classes}
|
/man/CTMC_PO_1-class.Rd
|
no_license
|
cran/DOBAD
|
R
| false
| false
| 833
|
rd
|
\name{CTMC_PO_1-class}
\Rdversion{1.1}
\docType{class}
\alias{CTMC_PO_1-class}
\title{Class "CTMC_PO_1"}
\description{Partially observed CTMC.}
\section{Objects from the Class}{
Objects can be created by calls of the form \code{new("CTMC_PO_1", ...)}.
Like CTMCs butdon't have an ending time; the final observation time
serves that purpose.
}
\section{Slots}{
\describe{
\item{\code{states}:}{Object of class \code{"numeric"} ~~ }
\item{\code{times}:}{Object of class \code{"numeric"} ~~ }
}
}
\section{Methods}{
\describe{
\item{BDsummaryStats.PO}{\code{signature(dat = "CTMC_PO_1")}: ... }
\item{getStates}{\code{signature(object = "CTMC_PO_1")}: ... }
\item{getTimes}{\code{signature(object = "CTMC_PO_1")}: ... }
}
}
\author{Charles Doss}
\examples{
showClass("CTMC_PO_1")
}
\keyword{classes}
|
#' read.soilcarbon
#'
#' This function imports data from xlsx format matching the standard soil carbon data template
#'
#' @param file directory to data file
#' @param template set to TRUE if reading in a template file
#' @import openxlsx
#' @import utils
#' @export
read.soilcarbon<-function(file, template=F){
requireNamespace("openxlsx")
# comprae sheets found in datafile to the necessary sheets in the standard data template
sheets_found<-getSheetNames(file)
sheets_needed<-c("metadata","site","profile","layer", "fraction")
if (F %in% (sheets_needed %in% sheets_found)){
sheets_missing<-setdiff(sheets_needed, sheets_found)
stop(paste("Sheet(s) '",sheets_missing,"' missing from data file", sep="")) # if sheets are missing, return error message with the missing sheets
}
metadata<-read.xlsx(file , sheet="metadata")
site<-read.xlsx(file , sheet="site")
profile<-read.xlsx(file , sheet="profile")
layer<-read.xlsx(file , sheet="layer", check.names=T)
fraction<-read.xlsx(file , sheet="fraction")
data_workbook=list(metadata=metadata, site=site, profile=profile, layer=layer, fraction=fraction)
data_workbook<-lapply(data_workbook, function(x) x<-x[-1:-2,])
#remove empty rows
if (template==F){
for (i in 1:length(data_workbook)){
data<-data_workbook[[i]]
for (j in 1:ncol(data)){
data[,j][grep("^[ ]+$", data[,j])]<-NA
}
data_workbook[[i]]<-data
data_workbook[[i]]<-data_workbook[[i]][rowSums(is.na(data_workbook[[i]])) != ncol(data_workbook[[i]]),]
}
}
data_workbook<-lapply(data_workbook, function(x) lapply(x, as.character))
data_workbook<-lapply(data_workbook, function(x) lapply(x, type.convert))
data_workbook<-lapply(data_workbook, as.data.frame)
attributes(data_workbook)$file_name<-file
return(data_workbook)
}
|
/R/read.soilcarbon.R
|
no_license
|
jb388/soilcarbon
|
R
| false
| false
| 1,823
|
r
|
#' read.soilcarbon
#'
#' This function imports data from xlsx format matching the standard soil carbon data template
#'
#' @param file directory to data file
#' @param template set to TRUE if reading in a template file
#' @import openxlsx
#' @import utils
#' @export
read.soilcarbon<-function(file, template=F){
requireNamespace("openxlsx")
# comprae sheets found in datafile to the necessary sheets in the standard data template
sheets_found<-getSheetNames(file)
sheets_needed<-c("metadata","site","profile","layer", "fraction")
if (F %in% (sheets_needed %in% sheets_found)){
sheets_missing<-setdiff(sheets_needed, sheets_found)
stop(paste("Sheet(s) '",sheets_missing,"' missing from data file", sep="")) # if sheets are missing, return error message with the missing sheets
}
metadata<-read.xlsx(file , sheet="metadata")
site<-read.xlsx(file , sheet="site")
profile<-read.xlsx(file , sheet="profile")
layer<-read.xlsx(file , sheet="layer", check.names=T)
fraction<-read.xlsx(file , sheet="fraction")
data_workbook=list(metadata=metadata, site=site, profile=profile, layer=layer, fraction=fraction)
data_workbook<-lapply(data_workbook, function(x) x<-x[-1:-2,])
#remove empty rows
if (template==F){
for (i in 1:length(data_workbook)){
data<-data_workbook[[i]]
for (j in 1:ncol(data)){
data[,j][grep("^[ ]+$", data[,j])]<-NA
}
data_workbook[[i]]<-data
data_workbook[[i]]<-data_workbook[[i]][rowSums(is.na(data_workbook[[i]])) != ncol(data_workbook[[i]]),]
}
}
data_workbook<-lapply(data_workbook, function(x) lapply(x, as.character))
data_workbook<-lapply(data_workbook, function(x) lapply(x, type.convert))
data_workbook<-lapply(data_workbook, as.data.frame)
attributes(data_workbook)$file_name<-file
return(data_workbook)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/visHierarchicalLayout.R
\name{visHierarchicalLayout}
\alias{visHierarchicalLayout}
\title{Network visualization Hierarchical layout options}
\usage{
visHierarchicalLayout(graph, enabled = TRUE, levelSeparation = NULL,
direction = NULL, sortMethod = NULL)
}
\arguments{
\item{graph}{: a visNetwork object}
\item{enabled}{: Boolean. Default to TRUE when calling this function. Enable or disable the hierarchical layout.}
\item{levelSeparation}{: Number. Default to 150. The distance between the different levels.}
\item{direction}{: String. Default to 'UD'. The direction of the hierarchical layout. The available options are: UD, DU, LR, RL. To simplify: up-down, down-up, left-right, right-left.}
\item{sortMethod}{: String. Default to 'hubsize'. The algorithm used to ascertain the levels of the nodes based on the data. The possible options are: hubsize, directed.}
}
\description{
Network visualization Hierarchical layout options. For full documentation, have a look at \link{visDocumentation}.
}
\examples{
nodes <- data.frame(id = 1:10)
edges <- data.frame(from = round(runif(8)*10), to = round(runif(8)*10))
visNetwork(nodes, edges) \%>\%
visHierarchicalLayout()
visNetwork(nodes, edges) \%>\%
visHierarchicalLayout(direction = "LR")
}
\seealso{
\link{visNodes} for nodes options, \link{visEdges} for edges options, \link{visGroups} for groups options,
\link{visLayout} & \link{visHierarchicalLayout} for layout, \link{visPhysics} for physics, \link{visInteraction} for interaction, ...
}
|
/man/visHierarchicalLayout.Rd
|
no_license
|
hnbeck/visNetwork
|
R
| false
| false
| 1,598
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/visHierarchicalLayout.R
\name{visHierarchicalLayout}
\alias{visHierarchicalLayout}
\title{Network visualization Hierarchical layout options}
\usage{
visHierarchicalLayout(graph, enabled = TRUE, levelSeparation = NULL,
direction = NULL, sortMethod = NULL)
}
\arguments{
\item{graph}{: a visNetwork object}
\item{enabled}{: Boolean. Default to TRUE when calling this function. Enable or disable the hierarchical layout.}
\item{levelSeparation}{: Number. Default to 150. The distance between the different levels.}
\item{direction}{: String. Default to 'UD'. The direction of the hierarchical layout. The available options are: UD, DU, LR, RL. To simplify: up-down, down-up, left-right, right-left.}
\item{sortMethod}{: String. Default to 'hubsize'. The algorithm used to ascertain the levels of the nodes based on the data. The possible options are: hubsize, directed.}
}
\description{
Network visualization Hierarchical layout options. For full documentation, have a look at \link{visDocumentation}.
}
\examples{
nodes <- data.frame(id = 1:10)
edges <- data.frame(from = round(runif(8)*10), to = round(runif(8)*10))
visNetwork(nodes, edges) \%>\%
visHierarchicalLayout()
visNetwork(nodes, edges) \%>\%
visHierarchicalLayout(direction = "LR")
}
\seealso{
\link{visNodes} for nodes options, \link{visEdges} for edges options, \link{visGroups} for groups options,
\link{visLayout} & \link{visHierarchicalLayout} for layout, \link{visPhysics} for physics, \link{visInteraction} for interaction, ...
}
|
#My first R code
xlim <- c(-16.738281, 56.601563)
ylim <- c(-35.039321, 36.856229)
map("world", col="#191919", fill=TRUE, bg="#000000", lwd=0.05, xlim=xlim, ylim=ylim)
pal <- colorRampPalette(c("#333333", "white", "#1292db"))
colors <- pal(100)
mydata=read.csv("/Users/newuser/Downloads/flights/the_File2.csv")
fsub<-mydata[order(mydata$number.of.routes),]
#maxcnt<-max(fsub$number.of.routes)
maxcnt<-16
for(j in 1:length(fsub$long..departure..decimal..1)){
inter <- gcIntermediate(c(fsub[j,]$long..departure..decimal., fsub[j,]$lat..departure..decimal.), c(fsub[j,]$long..departure..decimal..1, fsub[j,]$lat..departure..decimal..1), n=200, addStartEnd=TRUE)
colindex <- round( (fsub[j,]$number.of.routes / maxcnt) * length(colors) )
lines(inter, col=colors[colindex], lwd=0.8)
}
|
/Flights.Rd
|
no_license
|
connor11528/Africa-flight-routes
|
R
| false
| false
| 782
|
rd
|
#My first R code
xlim <- c(-16.738281, 56.601563)
ylim <- c(-35.039321, 36.856229)
map("world", col="#191919", fill=TRUE, bg="#000000", lwd=0.05, xlim=xlim, ylim=ylim)
pal <- colorRampPalette(c("#333333", "white", "#1292db"))
colors <- pal(100)
mydata=read.csv("/Users/newuser/Downloads/flights/the_File2.csv")
fsub<-mydata[order(mydata$number.of.routes),]
#maxcnt<-max(fsub$number.of.routes)
maxcnt<-16
for(j in 1:length(fsub$long..departure..decimal..1)){
inter <- gcIntermediate(c(fsub[j,]$long..departure..decimal., fsub[j,]$lat..departure..decimal.), c(fsub[j,]$long..departure..decimal..1, fsub[j,]$lat..departure..decimal..1), n=200, addStartEnd=TRUE)
colindex <- round( (fsub[j,]$number.of.routes / maxcnt) * length(colors) )
lines(inter, col=colors[colindex], lwd=0.8)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lookup_species.R
\name{lookup_species}
\alias{lookup_species}
\title{Match species origin, growth form and type}
\usage{
lookup_species(raw_field_data, species_lookup_data)
}
\arguments{
\item{raw_field_data}{A data_frame of raw percentage cover counts at the quadrat level, grouped by transect. Should contain the columns: \code{transect_number}, \code{quadrat}, \code{species}, and \code{percent_cover}.}
\item{species_lookup_data}{A data_frame containing a key column of species names in the format and associated value columns: \code{type}, \code{origin}, and \code{growth_form} of the matched species. The full list of species observed during a whole season's field work should be present in the \code{species} column.}
}
\value{
raw_field_data_w_species_type The new data frame containing the \code{type}, \code{origin}, and \code{growth_form} of the matched species.
}
\description{
lookup_species searches for matching species in a species look up table \code{species_lookup_data} for all observed species in the raw field data frame \code{raw_field data} and adds three new columns containing the \code{type}, \code{origin}, and \code{growth_form} of the matched species.
}
|
/man/lookup_species.Rd
|
no_license
|
egouldo/testpackage
|
R
| false
| true
| 1,274
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lookup_species.R
\name{lookup_species}
\alias{lookup_species}
\title{Match species origin, growth form and type}
\usage{
lookup_species(raw_field_data, species_lookup_data)
}
\arguments{
\item{raw_field_data}{A data_frame of raw percentage cover counts at the quadrat level, grouped by transect. Should contain the columns: \code{transect_number}, \code{quadrat}, \code{species}, and \code{percent_cover}.}
\item{species_lookup_data}{A data_frame containing a key column of species names in the format and associated value columns: \code{type}, \code{origin}, and \code{growth_form} of the matched species. The full list of species observed during a whole season's field work should be present in the \code{species} column.}
}
\value{
raw_field_data_w_species_type The new data frame containing the \code{type}, \code{origin}, and \code{growth_form} of the matched species.
}
\description{
lookup_species searches for matching species in a species look up table \code{species_lookup_data} for all observed species in the raw field data frame \code{raw_field data} and adds three new columns containing the \code{type}, \code{origin}, and \code{growth_form} of the matched species.
}
|
if (!exists("df", mode = "list")) source("loadData.R")
# `df` is a dataframe loaded by `loadData.R` if not already present in the
# current environment. It has the following structure:
# Classes ‘tbl_df’, ‘tbl’ and 'data.frame': 2880 obs. of 8 variables:
# $ DateTime : POSIXct, format: "2007-02-01 00:00:00" "2007-02-01 00:01:00" "2007-02-01 00:02:00" "2007-02-01 00:03:00" ...
# $ Global_active_power : num 0.326 0.326 0.324 0.324 0.322 0.32 0.32 0.32 0.32 0.236 ...
# $ Global_reactive_power: num 0.128 0.13 0.132 0.134 0.13 0.126 0.126 0.126 0.128 0 ...
# $ Voltage : num 243 243 244 244 243 ...
# $ Global_intensity : num 1.4 1.4 1.4 1.4 1.4 1.4 1.4 1.4 1.4 1 ...
# $ Sub_metering_1 : num 0 0 0 0 0 0 0 0 0 0 ...
# $ Sub_metering_2 : num 0 0 0 0 0 0 0 0 0 0 ...
# $ Sub_metering_3 : num 0 0 0 0 0 0 0 0 0 0 ...
par(mfrow=c(1,1))
png(filename="plot1.png", width=480, height=480)
# For whatever reason the plots are 504x504 px, but the directions
# specify 480x480 px.
with(data = df,
hist(Global_active_power, col="red",
xlab="Global Active Power (kilowatts)",
main = "Global Active Power"
)
)
dev.off()
|
/plot1.R
|
no_license
|
Randall-Data-Science/ExData_Plotting1
|
R
| false
| false
| 1,211
|
r
|
if (!exists("df", mode = "list")) source("loadData.R")
# `df` is a dataframe loaded by `loadData.R` if not already present in the
# current environment. It has the following structure:
# Classes ‘tbl_df’, ‘tbl’ and 'data.frame': 2880 obs. of 8 variables:
# $ DateTime : POSIXct, format: "2007-02-01 00:00:00" "2007-02-01 00:01:00" "2007-02-01 00:02:00" "2007-02-01 00:03:00" ...
# $ Global_active_power : num 0.326 0.326 0.324 0.324 0.322 0.32 0.32 0.32 0.32 0.236 ...
# $ Global_reactive_power: num 0.128 0.13 0.132 0.134 0.13 0.126 0.126 0.126 0.128 0 ...
# $ Voltage : num 243 243 244 244 243 ...
# $ Global_intensity : num 1.4 1.4 1.4 1.4 1.4 1.4 1.4 1.4 1.4 1 ...
# $ Sub_metering_1 : num 0 0 0 0 0 0 0 0 0 0 ...
# $ Sub_metering_2 : num 0 0 0 0 0 0 0 0 0 0 ...
# $ Sub_metering_3 : num 0 0 0 0 0 0 0 0 0 0 ...
par(mfrow=c(1,1))
png(filename="plot1.png", width=480, height=480)
# For whatever reason the plots are 504x504 px, but the directions
# specify 480x480 px.
with(data = df,
hist(Global_active_power, col="red",
xlab="Global Active Power (kilowatts)",
main = "Global Active Power"
)
)
dev.off()
|
#' @title ind_genotyped_helper
#' @description Help individual's genotyped threshold
#' @rdname ind_genotyped_helper
#' @export
#' @keywords internal
ind_genotyped_helper <- function(x) {
# x <- res$missing.genotypes.ind
# Set the breaks for the figure
max.ind <- dplyr::n_distinct(x$INDIVIDUALS)
threshold.helper.overall <- x %>%
dplyr::summarise(
`0` = length(PERCENT[PERCENT >= 0]),
`10` = length(PERCENT[PERCENT >= 10]),
`20` = length(PERCENT[PERCENT >= 20]),
`30` = length(PERCENT[PERCENT >= 30]),
`40` = length(PERCENT[PERCENT >= 40]),
`50` = length(PERCENT[PERCENT >= 50]),
`60` = length(PERCENT[PERCENT >= 60]),
`70` = length(PERCENT[PERCENT >= 70]),
`80` = length(PERCENT[PERCENT >= 80]),
`90` = length(PERCENT[PERCENT >= 90]),
`100` = length(PERCENT[PERCENT == 100])
) %>%
tidyr::pivot_longer(
data = .,
cols = tidyselect::everything(),
names_to = "GENOTYPED_THRESHOLD",
values_to = "NUMBER_INDIVIDUALS"
) %>%
dplyr::mutate(POP_ID = rep("OVERALL", n()))
threshold.helper.pop <- x %>%
dplyr::group_by(POP_ID) %>%
dplyr::summarise(
`0` = length(PERCENT[PERCENT >= 0]),
`10` = length(PERCENT[PERCENT >= 10]),
`20` = length(PERCENT[PERCENT >= 20]),
`30` = length(PERCENT[PERCENT >= 30]),
`40` = length(PERCENT[PERCENT >= 40]),
`50` = length(PERCENT[PERCENT >= 50]),
`60` = length(PERCENT[PERCENT >= 60]),
`70` = length(PERCENT[PERCENT >= 70]),
`80` = length(PERCENT[PERCENT >= 80]),
`90` = length(PERCENT[PERCENT >= 90]),
`100` = length(PERCENT[PERCENT == 100])
) %>%
tidyr::pivot_longer(
data = .,
cols = -POP_ID,
names_to = "GENOTYPED_THRESHOLD",
values_to = "NUMBER_INDIVIDUALS"
)
mean.pop <- threshold.helper.pop %>%
dplyr::group_by(GENOTYPED_THRESHOLD) %>%
dplyr::summarise(
NUMBER_INDIVIDUALS = round(mean(NUMBER_INDIVIDUALS), 0)
) %>%
dplyr::mutate(POP_ID = rep("MEAN_POP", n()))
threshold.helper <- suppressWarnings(
dplyr::bind_rows(threshold.helper.pop, mean.pop, threshold.helper.overall) %>%
dplyr::mutate(
GENOTYPED_THRESHOLD = as.numeric(GENOTYPED_THRESHOLD),
POP_ID = factor(POP_ID, levels = c(levels(x$POP_ID), "MEAN_POP", "OVERALL"), ordered = TRUE)
))
threshold.helper.pop <- mean.pop <- threshold.helper.overall <- x <- NULL
#Function to replace plyr::round_any
rounder <- function(x, accuracy, f = round) {
f(x / accuracy) * accuracy
}
if (max.ind >= 1000) {
y.breaks.by <- rounder(max.ind/10, 100, ceiling)
y.breaks.max <- rounder(max.ind, 1000, ceiling)
y.breaks <- seq(0, y.breaks.max, by = y.breaks.by)
} else {
y.breaks.by <- rounder(max.ind/10, 10, ceiling)
y.breaks.max <- rounder(max.ind, 100, ceiling)
y.breaks <- seq(0, y.breaks.max, by = y.breaks.by)
}
axis.title.element.text.fig <- ggplot2::element_text(
size = 12, family = "Helvetica", face = "bold")
axis.text.element.text.fig <- ggplot2::element_text(
size = 10, family = "Helvetica")
plot.ind.geno.threshold <- ggplot2::ggplot(
threshold.helper,
ggplot2::aes(x = GENOTYPED_THRESHOLD, y = NUMBER_INDIVIDUALS)) +
ggplot2::geom_line() +
ggplot2::geom_point(size = 2, shape = 21, fill = "white") +
ggplot2::scale_x_continuous(name = "Individual's missing genotyped threshold (percent)", breaks = c(0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100)) +
ggplot2::scale_y_continuous(name = "Individuals\n(blacklisted number)", breaks = y.breaks, limits = c(0, y.breaks.max)) +
ggplot2::theme(
axis.title.x = axis.title.element.text.fig,
axis.title.y = axis.title.element.text.fig,
axis.text.x = axis.text.element.text.fig,
axis.text.y = axis.text.element.text.fig
) +
ggplot2::theme_bw() +
ggplot2::facet_grid(~POP_ID)
# plot.ind.geno.threshold
return(plot.ind.geno.threshold)
}#End ind_genotyped_helper
#' @title blacklists_id_generator
#' @description Generate blacklist of ids
#' @rdname blacklists_id_generator
#' @export
#' @keywords internal
blacklists_id_generator <- function(x, y, path.folder) {
blacklist <- list()
blacklist.id.missing.geno <- y %>%
dplyr::filter(PERCENT >= x) %>%
dplyr::ungroup(.) %>%
dplyr::select(INDIVIDUALS)
if (length(blacklist.id.missing.geno$INDIVIDUALS) > 0) {
blacklist.name <- stringi::stri_join("blacklist.id.missing.", x)
readr::write_tsv(
blacklist.id.missing.geno,
file.path(path.folder, paste0(as.name(blacklist.name), ".tsv")))
blacklist[[blacklist.name]] <- blacklist.id.missing.geno
} else {
blacklist <- NULL
}
return(blacklist)
}#End blacklists_id_generator
#' @title whitelists_markers_generator
#' @description Generate whitelists of markers
#' @rdname whitelists_markers_generator
#' @export
#' @keywords internal
whitelists_markers_generator <- function(x, y, path.folder) {
whitelist <- list()
tidy.col <- colnames(y)
markers.meta <- purrr::keep(
.x = tidy.col,
.p = tidy.col %in% c("MARKERS", "CHROM", "LOCUS", "POS"))
whitelist.missing.geno <- dplyr::ungroup(y) %>%
dplyr::filter(MISSING_GENOTYPE_PROP <= x) %>%
dplyr::select(dplyr::one_of(markers.meta)) %>%
dplyr::distinct(MARKERS, .keep_all = TRUE)
n.whitelisted.markers <- nrow(whitelist.missing.geno)
n.markers <- nrow(y)
if (n.whitelisted.markers > 0 && n.whitelisted.markers < n.markers) {
whitelist.name <- stringi::stri_join("whitelist.markers.missing.max.", x)
readr::write_tsv(
whitelist.missing.geno,
stringi::stri_join(path.folder, "/", as.name(whitelist.name), ".tsv"))
whitelist[[whitelist.name]] <- whitelist.missing.geno
} else {
whitelist <- NULL
}
return(whitelist)
}#End whitelists_markers_generator
#' @title markers_genotyped_helper
#' @description Help individual's genotyped threshold
#' @rdname markers_genotyped_helper
#' @export
#' @keywords internal
markers_genotyped_helper <- function(x, y) {
# x <- res$missing.genotypes.markers.pop
# Set the breaks for the figure
max.markers <- dplyr::n_distinct(x$MARKERS)
threshold.helper.overall <- y %>%
dplyr::ungroup(.) %>%
dplyr::summarise(
`0` = length(PERCENT[PERCENT == 0]),
`10` = length(PERCENT[PERCENT <= 10]),
`20` = length(PERCENT[PERCENT <= 20]),
`30` = length(PERCENT[PERCENT <= 30]),
`40` = length(PERCENT[PERCENT <= 40]),
`50` = length(PERCENT[PERCENT <= 50]),
`60` = length(PERCENT[PERCENT <= 60]),
`70` = length(PERCENT[PERCENT <= 70]),
`80` = length(PERCENT[PERCENT <= 80]),
`90` = length(PERCENT[PERCENT <= 90]),
`100` = length(PERCENT[PERCENT <= 100])
) %>%
tidyr::pivot_longer(
data = .,
cols = tidyselect::everything(),
names_to = "GENOTYPED_THRESHOLD",
values_to = "NUMBER_MARKERS"
) %>%
dplyr::mutate(POP_ID = rep("OVERALL", n()))
threshold.helper.pop <- x %>%
dplyr::group_by(POP_ID) %>%
dplyr::summarise(
`0` = length(PERCENT[PERCENT == 0]),
`10` = length(PERCENT[PERCENT <= 10]),
`20` = length(PERCENT[PERCENT <= 20]),
`30` = length(PERCENT[PERCENT <= 30]),
`40` = length(PERCENT[PERCENT <= 40]),
`50` = length(PERCENT[PERCENT <= 50]),
`60` = length(PERCENT[PERCENT <= 60]),
`70` = length(PERCENT[PERCENT <= 70]),
`80` = length(PERCENT[PERCENT <= 80]),
`90` = length(PERCENT[PERCENT <= 90]),
`100` = length(PERCENT[PERCENT <= 100])
) %>%
tidyr::pivot_longer(
data = .,
cols = -POP_ID,
names_to = "GENOTYPED_THRESHOLD",
values_to = "NUMBER_MARKERS"
)
mean.pop <- threshold.helper.pop %>%
dplyr::group_by(GENOTYPED_THRESHOLD) %>%
dplyr::summarise(NUMBER_MARKERS = round(mean(NUMBER_MARKERS), 0)) %>%
dplyr::mutate(POP_ID = rep("MEAN_POP", n()))
threshold.helper <- suppressWarnings(
dplyr::bind_rows(threshold.helper.pop, mean.pop, threshold.helper.overall) %>%
dplyr::mutate(
GENOTYPED_THRESHOLD = as.numeric(GENOTYPED_THRESHOLD),
POP_ID = factor(POP_ID, levels = c(levels(x$POP_ID), "MEAN_POP", "OVERALL"), ordered = TRUE)
))
threshold.helper.pop <- mean.pop <- threshold.helper.overall <- x <- y <- NULL
#Function to replace plyr::round_any
rounder <- function(x, accuracy, f = round) {
f(x / accuracy) * accuracy
}
if (max.markers >= 1000) {
y.breaks.by <- rounder(max.markers / 10, 100, ceiling)
y.breaks.max <- rounder(max.markers, 1000, ceiling)
y.breaks <- seq(0, y.breaks.max, by = y.breaks.by)
} else {
y.breaks.by <- rounder(max.markers / 10, 10, ceiling)
y.breaks.max <- rounder(max.markers, 100, ceiling)
y.breaks <- seq(0, y.breaks.max, by = y.breaks.by)
}
plot.markers.geno.threshold <- ggplot2::ggplot(
threshold.helper,
ggplot2::aes(x = GENOTYPED_THRESHOLD, y = NUMBER_MARKERS)) +
ggplot2::geom_line() +
ggplot2::geom_point(size = 2, shape = 21, fill = "white") +
ggplot2::scale_x_continuous(name = "Marker's missing genotyped threshold (percent)", breaks = c(0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100)) +
ggplot2::scale_y_continuous(name = "Markers\n(whitelisted number)", breaks = y.breaks, limits = c(0, y.breaks.max)) +
ggplot2::theme(
axis.title.x = ggplot2::element_text(size = 10, family = "Helvetica", face = "bold"),
axis.title.y = ggplot2::element_text(size = 10, family = "Helvetica", face = "bold"),
axis.text.x = ggplot2::element_text(size = 8, family = "Helvetica"),#, angle = 90, hjust = 1, vjust = 0.5),
strip.text.x = ggplot2::element_text(size = 10, family = "Helvetica", face = "bold")
) +
ggplot2::theme_bw() +
ggplot2::facet_grid(~POP_ID)
# plot.markers.geno.threshold
return(plot.markers.geno.threshold)
}#End markers_genotyped_helper
#' @title generate_pcoa_plot
#' @description Generate the PCoA plots
#' @rdname generate_pcoa_plot
#' @keywords internal
#' @export
generate_pcoa_plot <- function(
strata.select,
pc.to.do,
vectors,
variance.component,
path.folder,
write.plot
) {
pcoa.plots <- list()
pcoa_plot <- function(
pc.to.do,
vectors,
variance.component) {
pcoa.plots <- list()
# pc.to.do <- pc.to.do[[1]]
# vectors <- res$vectors
# strata.select <- "POP_ID"
pcx <- pc.to.do[1]
pcy <- pc.to.do[2]
element.text.fig <- ggplot2::element_text(
size = 12, family = "Helvetica", face = "bold")
ibm.plot <- ggplot2::ggplot(
vectors,
ggplot2::aes_string(
x = stringi::stri_join("Axis.", pcx),
y = stringi::stri_join("Axis.", pcy), size = vectors$MISSING_GENOTYPE_PERCENT),
environment = environment()) +
ggplot2::geom_point(ggplot2::aes_string(colour = strata.select), alpha = 0.5) +
ggplot2::labs(x = stringi::stri_join("PCo", pcx, " [", variance.component[pcx,2], "]")) +
ggplot2::labs(y = stringi::stri_join("PCo", pcy, " [", variance.component[pcy,2], "]")) +
ggplot2::scale_size_area(name = "Individual's\nmissing genotypes\n(percent)", max_size = 4) +
ggplot2::theme(
axis.title.x = element.text.fig,
axis.title.y = element.text.fig,
legend.title = element.text.fig,
legend.text = element.text.fig
) +
ggplot2::theme_bw()
ibm_plot_name <- stringi::stri_join(
"ibm.plot.pco", pcx, ".pco", pcy, ".strata.", strata.select)
pcoa.plots[[ibm_plot_name]] <- ibm.plot
return(pcoa.plots)
}#End pcoa_plot
pcoa.plots.strata <- purrr::map(
.x = pc.to.do, .f = pcoa_plot,
vectors = vectors,
variance.component = variance.component) %>%
purrr::flatten(.)
# pcoa.plots.strata <- arrange_plots_legend(
# pcoa.plots.strata, ncol = 2, nrow = 3,
# position = "right")
#
# ggplot2::labs(title = stringi::stri_join("Principal Coordinates Analysis (PCoA)\n Identity by Missing (IBM) with strata = ", i)) +
# message("ggarrange problem")
# print(pcoa.plots.strata[[1]])
# message("worked")
pcoa.plots.strata <- ggpubr::ggarrange(
pcoa.plots.strata[[1]],
pcoa.plots.strata[[2]],
pcoa.plots.strata[[3]],
pcoa.plots.strata[[4]],
pcoa.plots.strata[[5]],
pcoa.plots.strata[[6]],
ncol = 2,
nrow = 3,
legend = "right",
common.legend = TRUE
)
# message("write plot problem")
if (write.plot) {
plot.name <- stringi::stri_join("ibm.plots.strata.", strata.select, ".pdf")
ggplot2::ggsave(
filename = file.path(path.folder, plot.name),
plot = pcoa.plots.strata,
width = 20, height = 15,
dpi = 600, units = "cm",
useDingbats = FALSE)
}
ibm_strata_name <- stringi::stri_join("ibm.strata.", strata.select)
pcoa.plots[[ibm_strata_name]] <- pcoa.plots.strata
return(pcoa.plots)
}#End generate_pcoa_plot
# tested alternative to ggpubr or cowplot package to reduce installed packages...
# the problem is my limited understanding of grid and grid extra
# note to myself: you managed to write the combined plots but you failed in
# keeping the combined plots in an object that can be return in the result list
# arrange_plots_legend <- function(plots, ncol = length(list(...)), nrow = 1, position = c("bottom", "right")) {
# # plots <- list(...)
# # plots <- unlist(plots)
# position <- match.arg(position)
#
# g <- ggplot2::ggplotGrob(plots[[1]] + ggplot2::theme(legend.position = position))$grobs
# legend <- g[[which(sapply(g, function(x) x$name) == "guide-box")]]
# lheight <- sum(legend$height)
# lwidth <- sum(legend$width)
#
# # gridExtra::grid.arrange(
# # do.call(gridExtra::arrangeGrob, lapply(plots, function(x)
# # x + ggplot2::theme(legend.position="none"))),
# # legend,
# # ncol = 1,
# # heights = grid::unit.c(grid::unit(1, "npc") - lheight, lheight))
# gl <- lapply(plots, function(x) x + ggplot2::theme(legend.position="none"))
# gl <- c(gl, ncol = ncol, nrow = nrow)
# combined <- switch(
# position,
# "bottom" = gridExtra::arrangeGrob(
# do.call(gridExtra::arrangeGrob, gl),
# legend,
# ncol = 1,
# heights = grid::unit.c(grid::unit(1, "npc") - lheight, lheight)),
# "right" = gridExtra::arrangeGrob(
# do.call(gridExtra::arrangeGrob, gl),
# legend,
# ncol = 2,
# widths = grid::unit.c(grid::unit(1, "npc") - lwidth, lwidth)))
#
# grid::grid.newpage()
# grid::grid.draw(combined)
# return(combined)
# }
#' @title pct_missing_by_total
#' @description Generates plot missing by total
#' @rdname pct_missing_by_total
#' @keywords internal
#' @export
pct_missing_by_total <- function(
strata.select, data, ci = 0.95, path.folder, write.plot = TRUE) {
# rename strata column and convert GT_BIN to missing or not
data %<>%
# dplyr::rename(STRATA_SELECT = data[[!!(strata.select)]]) %>%
dplyr::rename(STRATA_SELECT = !! strata.select) %>%
dplyr::mutate(is.missing = GT_MISSING_BINARY == 0)
# count number of strata
n.strata <- dplyr::n_distinct(data$STRATA_SELECT)
# summarize missingness by locus and factor column
miss.smry <- data %>%
dplyr::group_by(MARKERS, STRATA_SELECT) %>%
dplyr::summarise(num.missing.col = sum(is.missing)) %>%
dplyr::left_join(
data %>%
dplyr::group_by(MARKERS) %>%
dplyr::summarise(num.missing.total = sum(is.missing)),
by = "MARKERS"
) %>%
dplyr::filter(num.missing.total > 0) %>%
dplyr::mutate(
pct.missing = num.missing.col / num.missing.total
) %>%
dplyr::ungroup(.)
# summarize overall percent missing by factor column
pct.miss.col <- miss.smry %>%
dplyr::group_by(STRATA_SELECT) %>%
dplyr::summarise(
pct.missing = sum(num.missing.col) / sum(num.missing.total)
) %>%
dplyr::ungroup(.) %>%
dplyr::arrange(dplyr::desc(pct.missing)) %>%
dplyr::mutate(STRATA_SELECT = as.character(STRATA_SELECT))
# Keep string to reorder factor based on marginal % missingness
level.miss <- pct.miss.col$STRATA_SELECT
pct.miss.col <- pct.miss.col %>%
dplyr::mutate(
STRATA_SELECT = factor(STRATA_SELECT, levels = level.miss, ordered = TRUE)
)
# summarize % missing for each total number missing in each factor level (label)
miss.smry %<>%
dplyr::group_by(STRATA_SELECT, num.missing.total) %>%
dplyr::summarize(
n = length(pct.missing),
mean.miss = mean(pct.missing),
lci = stats::quantile(pct.missing, (1 - ci) / 2),
uci = stats::quantile(pct.missing, (1 + ci) / 2)
) %>%
dplyr::ungroup(.) %>%
dplyr::mutate(
STRATA_SELECT = factor(STRATA_SELECT, levels = level.miss, ordered = TRUE)
)
# to store results
res <- list()
# Tidy result of lm
lm.res.name <- stringi::stri_join("pct.missing.lm.", "strata.", strata.select)
res[[lm.res.name]] <- miss.smry %>%
split(x = ., f = .$STRATA_SELECT) %>%
purrr::map_df(
~ broom::tidy(
stats::lm(
mean.miss ~ log10(num.missing.total),
weights = n,
data = .
)
),
.id = "STRATA_SELECT"
)
miss.lm.coefs <- res[[lm.res.name]] %>%
dplyr::select(-c(std.error, statistic, p.value)) %>%
grur::rad_wide(x = ., names_from = term, values_from = estimate, tidy = TRUE) %>%
dplyr::rename(a = `(Intercept)`, b = `log10(num.missing.total)`) %>%
dplyr::mutate(
STRATA_SELECT = factor(STRATA_SELECT, levels = level.miss, ordered = TRUE)
) %>%
dplyr::arrange(STRATA_SELECT)
axis.title.element <- ggplot2::element_text(
size = 12, family = "Helvetica", face = "bold"
)
axis.text.element <- ggplot2::element_text(size = 10, family = "Helvetica")
# generate figure
fig.name <- stringi::stri_join("pct.missing.plot", ".strata.", strata.select)
res[[fig.name]] <- ggplot2::ggplot(
miss.smry, ggplot2::aes_string(x = "num.missing.total")) +
ggplot2::geom_segment(
ggplot2::aes_string(xend = "num.missing.total", y = "lci", yend = "uci"),
color = "gray50"
) +
ggplot2::geom_point(ggplot2::aes_string(y = "mean.miss", size = "n")) +
ggplot2::scale_size(breaks = c(0, 10, 100, 1000, 10000)) +
ggplot2::geom_abline(
data = miss.lm.coefs,
ggplot2::aes(intercept = a, slope = b),
color = "yellow"
) +
# the expected % missing by random (1 / number of factor levels)
ggplot2::geom_hline(yintercept = 1 / dplyr::n_distinct(data$STRATA_SELECT)) +
ggplot2::geom_hline(
data = pct.miss.col,
ggplot2::aes_string(yintercept = "pct.missing"),
color = "red"
) +
ggplot2::scale_x_log10() +
ggplot2::labs(
title = stringi::stri_join("Strata: ", strata.select),
x = expression(
paste("Total number of missing genotypes (", log[10], ")")
),
y = "Missing genotypes (mean percentage)",
size = "Markers missing (number)"
) +
ggplot2::theme(
axis.title.x = axis.title.element,
axis.text.x = axis.text.element,
axis.title.y = axis.title.element,
axis.text.y = axis.text.element,
legend.title = axis.title.element,
legend.text = axis.text.element
) +
ggplot2::theme_bw() +
ggplot2::facet_wrap(~ STRATA_SELECT)
# fig
if (write.plot) {
ggplot2::ggsave(
filename = stringi::stri_join(path.folder, "/", fig.name, ".pdf"),
plot = res[[fig.name]],
width = n.strata * 3,
height = n.strata * 3,
dpi = 600,
units = "cm",
useDingbats = FALSE,
limitsize = FALSE
)
}
return(res)
}#End pct_missing_by_total
#' @title missing_rda
#' @description The function uses Permutation test Anova and
#' Redundancy Analysis to highlight potential patterns of missingness in the data
#' based on the strata provided
#' @rdname missing_rda
#' @keywords internal
#' @export
# IMPORT FROM ------------------------------------------------------------------
#' @importFrom dplyr distinct rename arrange mutate select summarise group_by ungroup filter inner_join left_join
#' @importFrom ggpubr ggarrange
#' @importFrom ape pcoa
#' @importFrom radiator tidy_genomic_data change_pop_names ibdg_fh detect_all_missing write_rad
#' @importFrom cowplot plot_grid align_plots
#' @importFrom Matrix Matrix
#' @importFrom vegan anova.cca rda
#' @importFrom ape pcoa
#' @importFrom adespatial dist.ldc
#' @importFrom data.table as.data.table dcast.data.table melt.data.table
missing_rda <- function(
data,
strata,
permutations = 1000,
parallel.core = parallel::detectCores() - 1
) {
res <- list()
if (is.vector(data)) {
res$data.pcoa <- radiator::read_rad(data)
} else {
res$data.pcoa <- data
}
data <- NULL
res$data.pcoa %<>% dplyr::ungroup(.)
# PCoA on Jaccard distance (binary)
res$data.pcoa <- ape::pcoa(
D = sqrt(adespatial::dist.ldc(
Y = res$data.pcoa,
method = "jaccard",
binary = TRUE, silent = TRUE)),
rn = strata$INDIVIDUALS)
# D is Euclidean because the function outputs D[jk] = sqrt(1-S[jk])
# Anova on the pcoa data
rda_anova <- function(
strata.select,
data.pcoa,
strata,
permutations = 100,
parallel.core = parallel::detectCores() - 1) {
# strata.select <- "POP_ID" #test
# data.pcoa <- res$data.pcoa #test
res.rda <- list()#to store results
# Generate the function formula --------------------------------------------
# based on increasing levels for available strata
grur_count <- function(x) length(unique(x))
strata.formula <- dplyr::select(strata, dplyr::one_of(strata.select)) %>%
dplyr::summarise_all(.tbl = ., .funs = grur_count) %>%
tidyr::pivot_longer(
data = .,
cols = tidyselect::everything(),
names_to = "TERMS",
values_to = "LEVELS"
) %>%
dplyr::arrange(LEVELS) %>%
dplyr::select(TERMS) %>%
purrr::flatten_chr(.)
formula.grur <- stats::as.formula(
paste("data.pcoa$vectors ~ ", paste(strata.formula, collapse= "+")))
# Check how many strata.select are used and messages -----------------------
# if (length(strata.select) > 1) {
message("Redundancy Analysis using strata: ",
stringi::stri_join(strata.select, collapse = ", "))
rda_strata_name <- stringi::stri_join("rda.strata", strata.select, collapse = "_", sep = "_")
anova_strata_name <- stringi::stri_join("anova.strata.", strata.select, collapse = "_", sep = "_")
# } else {
# rda_strata_name <- stringi::stri_join("rda.strata.", strata.select)
# anova_strata_name <- stringi::stri_join("anova.strata.", strata.select)
# strata.select <- rlang::sym(strata.select)
# message("Redundancy Analysis using strata: ", strata.select)
# }
message("RDA model formula: ", format(formula.grur))
#RDA -----------------------------------------------------------------------
data.rda <- vegan::rda(formula.grur, strata)
# data.rda <- vegan::rda(rlang::`f_rhs<-`(data.pcoa$vectors ~ ., strata.select), strata)
# data.rda <- vegan::rda(rlang::`f_rhs<-`(data.pcoa$vectors ~ ., stats::reformulate(termlabels = strata.select)), strata)
# data.rda <- vegan::rda(stats::reformulate(termlabels = strata.select, response = data.pcoa$vectors), data = strata)
# data.rda <- vegan::rda(res$data.pcoa$vectors ~ POP_ID + POP_TYPE + ECOTYPE, strata)
# ANOVA overall test
message("Permutation test for Redundancy Analysis using strata: ", stringi::stri_join(strata.select, collapse = ", "))
# data.anova <- vegan::anova.cca(object = data.rda, permutations = permutations, parallel = parallel.core)
# data.anova <- suppressWarnings(broom::tidy(vegan::anova.cca(object = data.rda, permutations = permutations, parallel = parallel.core)))
data.anova <- suppressWarnings(broom::tidy(vegan::anova.cca(object = data.rda, by = "terms", model = "direct", permutations = permutations, parallel = parallel.core)))
p.value.message <- dplyr::select(data.anova, STRATA = term, VARIANCE = Variance, P_VALUE = p.value) %>%
dplyr::filter(STRATA %in% strata.formula)
message("\nHypothesis based on the strata provided")
message(" Null Hypothesis (H0): No pattern of missingness in the data between strata")
message(" Alternative Hypothesis (H1): Presence of pattern(s) of missingness in the data between strata\n")
# message(" p-value: ", data.anova$p.value[1], "\n")
print(p.value.message)
message(" note: low p-value -> reject the null hypothesis\n")
#return results
res.rda[[rda_strata_name]] <- data.rda
res.rda[[anova_strata_name]] <- data.anova
return(res.rda)
}
strata.select <- purrr::keep(.x = colnames(strata),
.p = !colnames(strata) %in% "INDIVIDUALS")
if (length(strata.select) > 1) {
message("\nSeparate analysis of strata\n")
res$rda_separate_strata <- purrr::map(
.x = strata.select,
.f = rda_anova,
data.pcoa = res$data.pcoa,
strata = strata,
permutations = permutations,
parallel.core = parallel.core) %>%
purrr::flatten(.)
message("\nCombined strata analysis\n")
res$rda_combined_strata <- rda_anova(
strata.select = strata.select,
data.pcoa = res$data.pcoa,
strata = strata,
permutations = permutations,
parallel.core = parallel.core)
} else {
res$rda_combined_strata <- rda_anova(
strata.select = strata.select,
data.pcoa = res$data.pcoa,
strata = strata,
permutations = permutations,
parallel.core = parallel.core)
}
return(res)
}#End missing_rda
# parallel_core_opt ------------------------------------------------------------
#' @title parallel_core_opt
#' @description Optimization of parallel core argument for radiator
#' @keywords internal
#' @export
parallel_core_opt <- function(parallel.core = NULL, max.core = NULL) {
# strategy:
# minimum of 1 core and a maximum of all the core available -2
# even number of core
# test
# parallel.core <- 1
# parallel.core <- 2
# parallel.core <- 3
# parallel.core <- 11
# parallel.core <- 12
# parallel.core <- 16
# max.core <- 5
# max.core <- 50
# max.core <- NULL
# Add-ons options
# to control the max and min number to use...
if (is.null(parallel.core)) {
parallel.core <- parallel::detectCores() - 2
} else {
parallel.core <- floor(parallel.core / 2) * 2
parallel.core <- max(1, min(parallel.core, parallel::detectCores() - 2))
}
if (is.null(max.core)) {
parallel.core.opt <- parallel.core
} else {
parallel.core.opt <- min(parallel.core, floor(max.core / 2) * 2)
}
return(parallel.core.opt)
}#End parallel_core_opt
# using future and future.apply -------------------------------------------------
#' @name grur_future
#' @title grur parallel function
#' @description Updating grur to use future
# @inheritParams future::plan
# @inheritParams future::availableCores
#' @inheritParams future.apply::future_apply
#' @rdname grur_future
#' @export
#' @keywords internal
grur_future <- function(
.x,
.f,
flat.future = c("int", "chr", "dfr", "dfc", "walk", "drop"),
split.vec = FALSE,
split.with = NULL,
split.chunks = 4L,
parallel.core = parallel::detectCores() - 1,
...
) {
opt.change <- getOption("width")
options(width = 70)
on.exit(options(width = opt.change), add = TRUE)
on.exit(if (parallel.core > 1L) future::plan(strategy = "sequential"), add = TRUE)
# argument for flattening the results
flat.future <- match.arg(
arg = flat.future,
choices = c("int", "chr", "dfr", "dfc", "walk", "drop"),
several.ok = FALSE
)
# splitting into chunks-------------------------------------------------------
if (split.vec && is.null(split.with)) {
# d: data, data length, data size
# sv: split vector
d <- .x
df <- FALSE
if (any(class(d) %in% c("tbl_df","tbl","data.frame"))) {
d <- nrow(d)
df <- TRUE
}
if (length(d) > 1L) d <- length(d)
stopifnot(is.integer(d))
sv <- as.integer(floor((split.chunks * (seq_len(d) - 1) / d) + 1))
# sv <- as.integer(floor((parallel.core * cpu.rounds * (seq_len(d) - 1) / d) + 1))
stopifnot(length(sv) == d)
# split
if (df) {
.x$SPLIT_VEC <- sv
.x %<>% dplyr::group_split(.tbl = ., "SPLIT_VEC", .keep = FALSE)
} else {
.x %<>% split(x = ., f = sv)
}
}
if (!is.null(split.with)) {
# check
if (length(split.with) != 1 || !is.character(split.with)) {
rlang::abort(message = "Contact author: problem with parallel computation")
}
.data <- NULL
stopifnot(rlang::has_name(.x, split.with))
if (split.vec) {
sv <- dplyr::distinct(.x, .data[[split.with]])
d <- nrow(sv)
sv$SPLIT_VEC <- as.integer(floor((split.chunks * (seq_len(d) - 1) / d) + 1))
.x %<>%
dplyr::left_join(sv, by = split.with) %>%
dplyr::group_split(.tbl = ., "SPLIT_VEC", .keep = FALSE)
} else {
.x %<>% dplyr::group_split(.tbl = ., .data[[split.with]], .keep = TRUE)
}
}
if (parallel.core == 1L) {
future::plan(strategy = "sequential")
} else {
parallel.core <- parallel_core_opt(parallel.core = parallel.core)
lx <- length(.x)
if (lx < parallel.core) {
future::plan(strategy = "multisession", workers = lx)
} else {
future::plan(strategy = "multisession", workers = parallel.core)
}
}
# .x <- future.apply::future_apply(X = .x, FUN = .f, ...)
# capture dots
# d <- rlang::dots_list(..., .ignore_empty = "all", .preserve_empty = TRUE, .homonyms = "first")
# if (bind.rows) .x %<>% dplyr::bind_rows(.)
# Run the function in parallel and account for dots-dots-dots argument
rad_map <- switch(flat.future,
int = {furrr::future_map_int},
chr = {furrr::future_map_chr},
dfr = {furrr::future_map_dfr},
dfc = {furrr::future_map_dfc},
walk = {furrr::future_walk},
drop = {furrr::future_map}
)
opts <- furrr::furrr_options(globals = FALSE)
if (length(list(...)) == 0) {
.x %<>% rad_map(.x = ., .f = .f, .options = opts)
} else {
.x %<>% rad_map(.x = ., .f = .f, ..., .options = opts)
}
return(.x)
}#End grur_future
# PIVOT-GATHER-CAST ------------------------------------------------------------
# rationale for doing this is that i'm tired of using tidyverse or data.table semantics
# tidyr changed from gather/spread to pivot_ functions but their are still very slow compared
# to 1. the original gather/spread and data.table equivalent...
#' @title rad_long
#' @description Gather, melt and pivot_longer
#' @rdname rad_long
#' @keywords internal
#' @export
rad_long <- function(
x,
cols = NULL,
measure_vars = NULL,
names_to = NULL,
values_to = NULL,
variable_factor = TRUE,
keep_rownames = FALSE,
tidy = FALSE
){
# tidyr
if (tidy) {
x %>%
tidyr::pivot_longer(
data = .,
cols = -cols,
names_to = names_to,
values_to = values_to
)
} else {# data.table
x %>%
data.table::as.data.table(., keep.rownames = keep_rownames) %>%
data.table::melt.data.table(
data = .,
id.vars = cols,
measure.vars = measure_vars,
variable.name = names_to,
value.name = values_to,
variable.factor = variable_factor
) %>%
tibble::as_tibble(.)
}
}#rad_long
#' @title rad_wide
#' @description Spread, dcast and pivot_wider
#' @rdname rad_wide
#' @keywords internal
#' @export
rad_wide <- function(
x ,
formula = NULL,
names_from = NULL,
values_from = NULL,
values_fill = NULL,
sep = "_",
tidy = FALSE
){
# tidyr
if (tidy) {
x %<>%
tidyr::pivot_wider(
data = .,
names_from = names_from,
values_from = values_from,
values_fill = values_fill
)
} else {# data.table
x %>%
data.table::as.data.table(.) %>%
data.table::dcast.data.table(
data = .,
formula = formula,
value.var = values_from,
sep = sep,
fill = values_fill
) %>%
tibble::as_tibble(.)
}
}#rad_wide
|
/R/internal.R
|
no_license
|
thierrygosselin/grur
|
R
| false
| false
| 32,467
|
r
|
#' @title ind_genotyped_helper
#' @description Help individual's genotyped threshold
#' @rdname ind_genotyped_helper
#' @export
#' @keywords internal
ind_genotyped_helper <- function(x) {
# x <- res$missing.genotypes.ind
# Set the breaks for the figure
max.ind <- dplyr::n_distinct(x$INDIVIDUALS)
threshold.helper.overall <- x %>%
dplyr::summarise(
`0` = length(PERCENT[PERCENT >= 0]),
`10` = length(PERCENT[PERCENT >= 10]),
`20` = length(PERCENT[PERCENT >= 20]),
`30` = length(PERCENT[PERCENT >= 30]),
`40` = length(PERCENT[PERCENT >= 40]),
`50` = length(PERCENT[PERCENT >= 50]),
`60` = length(PERCENT[PERCENT >= 60]),
`70` = length(PERCENT[PERCENT >= 70]),
`80` = length(PERCENT[PERCENT >= 80]),
`90` = length(PERCENT[PERCENT >= 90]),
`100` = length(PERCENT[PERCENT == 100])
) %>%
tidyr::pivot_longer(
data = .,
cols = tidyselect::everything(),
names_to = "GENOTYPED_THRESHOLD",
values_to = "NUMBER_INDIVIDUALS"
) %>%
dplyr::mutate(POP_ID = rep("OVERALL", n()))
threshold.helper.pop <- x %>%
dplyr::group_by(POP_ID) %>%
dplyr::summarise(
`0` = length(PERCENT[PERCENT >= 0]),
`10` = length(PERCENT[PERCENT >= 10]),
`20` = length(PERCENT[PERCENT >= 20]),
`30` = length(PERCENT[PERCENT >= 30]),
`40` = length(PERCENT[PERCENT >= 40]),
`50` = length(PERCENT[PERCENT >= 50]),
`60` = length(PERCENT[PERCENT >= 60]),
`70` = length(PERCENT[PERCENT >= 70]),
`80` = length(PERCENT[PERCENT >= 80]),
`90` = length(PERCENT[PERCENT >= 90]),
`100` = length(PERCENT[PERCENT == 100])
) %>%
tidyr::pivot_longer(
data = .,
cols = -POP_ID,
names_to = "GENOTYPED_THRESHOLD",
values_to = "NUMBER_INDIVIDUALS"
)
mean.pop <- threshold.helper.pop %>%
dplyr::group_by(GENOTYPED_THRESHOLD) %>%
dplyr::summarise(
NUMBER_INDIVIDUALS = round(mean(NUMBER_INDIVIDUALS), 0)
) %>%
dplyr::mutate(POP_ID = rep("MEAN_POP", n()))
threshold.helper <- suppressWarnings(
dplyr::bind_rows(threshold.helper.pop, mean.pop, threshold.helper.overall) %>%
dplyr::mutate(
GENOTYPED_THRESHOLD = as.numeric(GENOTYPED_THRESHOLD),
POP_ID = factor(POP_ID, levels = c(levels(x$POP_ID), "MEAN_POP", "OVERALL"), ordered = TRUE)
))
threshold.helper.pop <- mean.pop <- threshold.helper.overall <- x <- NULL
#Function to replace plyr::round_any
rounder <- function(x, accuracy, f = round) {
f(x / accuracy) * accuracy
}
if (max.ind >= 1000) {
y.breaks.by <- rounder(max.ind/10, 100, ceiling)
y.breaks.max <- rounder(max.ind, 1000, ceiling)
y.breaks <- seq(0, y.breaks.max, by = y.breaks.by)
} else {
y.breaks.by <- rounder(max.ind/10, 10, ceiling)
y.breaks.max <- rounder(max.ind, 100, ceiling)
y.breaks <- seq(0, y.breaks.max, by = y.breaks.by)
}
axis.title.element.text.fig <- ggplot2::element_text(
size = 12, family = "Helvetica", face = "bold")
axis.text.element.text.fig <- ggplot2::element_text(
size = 10, family = "Helvetica")
plot.ind.geno.threshold <- ggplot2::ggplot(
threshold.helper,
ggplot2::aes(x = GENOTYPED_THRESHOLD, y = NUMBER_INDIVIDUALS)) +
ggplot2::geom_line() +
ggplot2::geom_point(size = 2, shape = 21, fill = "white") +
ggplot2::scale_x_continuous(name = "Individual's missing genotyped threshold (percent)", breaks = c(0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100)) +
ggplot2::scale_y_continuous(name = "Individuals\n(blacklisted number)", breaks = y.breaks, limits = c(0, y.breaks.max)) +
ggplot2::theme(
axis.title.x = axis.title.element.text.fig,
axis.title.y = axis.title.element.text.fig,
axis.text.x = axis.text.element.text.fig,
axis.text.y = axis.text.element.text.fig
) +
ggplot2::theme_bw() +
ggplot2::facet_grid(~POP_ID)
# plot.ind.geno.threshold
return(plot.ind.geno.threshold)
}#End ind_genotyped_helper
#' @title blacklists_id_generator
#' @description Generate blacklist of ids
#' @rdname blacklists_id_generator
#' @export
#' @keywords internal
blacklists_id_generator <- function(x, y, path.folder) {
blacklist <- list()
blacklist.id.missing.geno <- y %>%
dplyr::filter(PERCENT >= x) %>%
dplyr::ungroup(.) %>%
dplyr::select(INDIVIDUALS)
if (length(blacklist.id.missing.geno$INDIVIDUALS) > 0) {
blacklist.name <- stringi::stri_join("blacklist.id.missing.", x)
readr::write_tsv(
blacklist.id.missing.geno,
file.path(path.folder, paste0(as.name(blacklist.name), ".tsv")))
blacklist[[blacklist.name]] <- blacklist.id.missing.geno
} else {
blacklist <- NULL
}
return(blacklist)
}#End blacklists_id_generator
#' @title whitelists_markers_generator
#' @description Generate whitelists of markers
#' @rdname whitelists_markers_generator
#' @export
#' @keywords internal
whitelists_markers_generator <- function(x, y, path.folder) {
whitelist <- list()
tidy.col <- colnames(y)
markers.meta <- purrr::keep(
.x = tidy.col,
.p = tidy.col %in% c("MARKERS", "CHROM", "LOCUS", "POS"))
whitelist.missing.geno <- dplyr::ungroup(y) %>%
dplyr::filter(MISSING_GENOTYPE_PROP <= x) %>%
dplyr::select(dplyr::one_of(markers.meta)) %>%
dplyr::distinct(MARKERS, .keep_all = TRUE)
n.whitelisted.markers <- nrow(whitelist.missing.geno)
n.markers <- nrow(y)
if (n.whitelisted.markers > 0 && n.whitelisted.markers < n.markers) {
whitelist.name <- stringi::stri_join("whitelist.markers.missing.max.", x)
readr::write_tsv(
whitelist.missing.geno,
stringi::stri_join(path.folder, "/", as.name(whitelist.name), ".tsv"))
whitelist[[whitelist.name]] <- whitelist.missing.geno
} else {
whitelist <- NULL
}
return(whitelist)
}#End whitelists_markers_generator
#' @title markers_genotyped_helper
#' @description Help individual's genotyped threshold
#' @rdname markers_genotyped_helper
#' @export
#' @keywords internal
markers_genotyped_helper <- function(x, y) {
# x <- res$missing.genotypes.markers.pop
# Set the breaks for the figure
max.markers <- dplyr::n_distinct(x$MARKERS)
threshold.helper.overall <- y %>%
dplyr::ungroup(.) %>%
dplyr::summarise(
`0` = length(PERCENT[PERCENT == 0]),
`10` = length(PERCENT[PERCENT <= 10]),
`20` = length(PERCENT[PERCENT <= 20]),
`30` = length(PERCENT[PERCENT <= 30]),
`40` = length(PERCENT[PERCENT <= 40]),
`50` = length(PERCENT[PERCENT <= 50]),
`60` = length(PERCENT[PERCENT <= 60]),
`70` = length(PERCENT[PERCENT <= 70]),
`80` = length(PERCENT[PERCENT <= 80]),
`90` = length(PERCENT[PERCENT <= 90]),
`100` = length(PERCENT[PERCENT <= 100])
) %>%
tidyr::pivot_longer(
data = .,
cols = tidyselect::everything(),
names_to = "GENOTYPED_THRESHOLD",
values_to = "NUMBER_MARKERS"
) %>%
dplyr::mutate(POP_ID = rep("OVERALL", n()))
threshold.helper.pop <- x %>%
dplyr::group_by(POP_ID) %>%
dplyr::summarise(
`0` = length(PERCENT[PERCENT == 0]),
`10` = length(PERCENT[PERCENT <= 10]),
`20` = length(PERCENT[PERCENT <= 20]),
`30` = length(PERCENT[PERCENT <= 30]),
`40` = length(PERCENT[PERCENT <= 40]),
`50` = length(PERCENT[PERCENT <= 50]),
`60` = length(PERCENT[PERCENT <= 60]),
`70` = length(PERCENT[PERCENT <= 70]),
`80` = length(PERCENT[PERCENT <= 80]),
`90` = length(PERCENT[PERCENT <= 90]),
`100` = length(PERCENT[PERCENT <= 100])
) %>%
tidyr::pivot_longer(
data = .,
cols = -POP_ID,
names_to = "GENOTYPED_THRESHOLD",
values_to = "NUMBER_MARKERS"
)
mean.pop <- threshold.helper.pop %>%
dplyr::group_by(GENOTYPED_THRESHOLD) %>%
dplyr::summarise(NUMBER_MARKERS = round(mean(NUMBER_MARKERS), 0)) %>%
dplyr::mutate(POP_ID = rep("MEAN_POP", n()))
threshold.helper <- suppressWarnings(
dplyr::bind_rows(threshold.helper.pop, mean.pop, threshold.helper.overall) %>%
dplyr::mutate(
GENOTYPED_THRESHOLD = as.numeric(GENOTYPED_THRESHOLD),
POP_ID = factor(POP_ID, levels = c(levels(x$POP_ID), "MEAN_POP", "OVERALL"), ordered = TRUE)
))
threshold.helper.pop <- mean.pop <- threshold.helper.overall <- x <- y <- NULL
#Function to replace plyr::round_any
rounder <- function(x, accuracy, f = round) {
f(x / accuracy) * accuracy
}
if (max.markers >= 1000) {
y.breaks.by <- rounder(max.markers / 10, 100, ceiling)
y.breaks.max <- rounder(max.markers, 1000, ceiling)
y.breaks <- seq(0, y.breaks.max, by = y.breaks.by)
} else {
y.breaks.by <- rounder(max.markers / 10, 10, ceiling)
y.breaks.max <- rounder(max.markers, 100, ceiling)
y.breaks <- seq(0, y.breaks.max, by = y.breaks.by)
}
plot.markers.geno.threshold <- ggplot2::ggplot(
threshold.helper,
ggplot2::aes(x = GENOTYPED_THRESHOLD, y = NUMBER_MARKERS)) +
ggplot2::geom_line() +
ggplot2::geom_point(size = 2, shape = 21, fill = "white") +
ggplot2::scale_x_continuous(name = "Marker's missing genotyped threshold (percent)", breaks = c(0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100)) +
ggplot2::scale_y_continuous(name = "Markers\n(whitelisted number)", breaks = y.breaks, limits = c(0, y.breaks.max)) +
ggplot2::theme(
axis.title.x = ggplot2::element_text(size = 10, family = "Helvetica", face = "bold"),
axis.title.y = ggplot2::element_text(size = 10, family = "Helvetica", face = "bold"),
axis.text.x = ggplot2::element_text(size = 8, family = "Helvetica"),#, angle = 90, hjust = 1, vjust = 0.5),
strip.text.x = ggplot2::element_text(size = 10, family = "Helvetica", face = "bold")
) +
ggplot2::theme_bw() +
ggplot2::facet_grid(~POP_ID)
# plot.markers.geno.threshold
return(plot.markers.geno.threshold)
}#End markers_genotyped_helper
#' @title generate_pcoa_plot
#' @description Generate the PCoA plots
#' @rdname generate_pcoa_plot
#' @keywords internal
#' @export
generate_pcoa_plot <- function(
strata.select,
pc.to.do,
vectors,
variance.component,
path.folder,
write.plot
) {
pcoa.plots <- list()
pcoa_plot <- function(
pc.to.do,
vectors,
variance.component) {
pcoa.plots <- list()
# pc.to.do <- pc.to.do[[1]]
# vectors <- res$vectors
# strata.select <- "POP_ID"
pcx <- pc.to.do[1]
pcy <- pc.to.do[2]
element.text.fig <- ggplot2::element_text(
size = 12, family = "Helvetica", face = "bold")
ibm.plot <- ggplot2::ggplot(
vectors,
ggplot2::aes_string(
x = stringi::stri_join("Axis.", pcx),
y = stringi::stri_join("Axis.", pcy), size = vectors$MISSING_GENOTYPE_PERCENT),
environment = environment()) +
ggplot2::geom_point(ggplot2::aes_string(colour = strata.select), alpha = 0.5) +
ggplot2::labs(x = stringi::stri_join("PCo", pcx, " [", variance.component[pcx,2], "]")) +
ggplot2::labs(y = stringi::stri_join("PCo", pcy, " [", variance.component[pcy,2], "]")) +
ggplot2::scale_size_area(name = "Individual's\nmissing genotypes\n(percent)", max_size = 4) +
ggplot2::theme(
axis.title.x = element.text.fig,
axis.title.y = element.text.fig,
legend.title = element.text.fig,
legend.text = element.text.fig
) +
ggplot2::theme_bw()
ibm_plot_name <- stringi::stri_join(
"ibm.plot.pco", pcx, ".pco", pcy, ".strata.", strata.select)
pcoa.plots[[ibm_plot_name]] <- ibm.plot
return(pcoa.plots)
}#End pcoa_plot
pcoa.plots.strata <- purrr::map(
.x = pc.to.do, .f = pcoa_plot,
vectors = vectors,
variance.component = variance.component) %>%
purrr::flatten(.)
# pcoa.plots.strata <- arrange_plots_legend(
# pcoa.plots.strata, ncol = 2, nrow = 3,
# position = "right")
#
# ggplot2::labs(title = stringi::stri_join("Principal Coordinates Analysis (PCoA)\n Identity by Missing (IBM) with strata = ", i)) +
# message("ggarrange problem")
# print(pcoa.plots.strata[[1]])
# message("worked")
pcoa.plots.strata <- ggpubr::ggarrange(
pcoa.plots.strata[[1]],
pcoa.plots.strata[[2]],
pcoa.plots.strata[[3]],
pcoa.plots.strata[[4]],
pcoa.plots.strata[[5]],
pcoa.plots.strata[[6]],
ncol = 2,
nrow = 3,
legend = "right",
common.legend = TRUE
)
# message("write plot problem")
if (write.plot) {
plot.name <- stringi::stri_join("ibm.plots.strata.", strata.select, ".pdf")
ggplot2::ggsave(
filename = file.path(path.folder, plot.name),
plot = pcoa.plots.strata,
width = 20, height = 15,
dpi = 600, units = "cm",
useDingbats = FALSE)
}
ibm_strata_name <- stringi::stri_join("ibm.strata.", strata.select)
pcoa.plots[[ibm_strata_name]] <- pcoa.plots.strata
return(pcoa.plots)
}#End generate_pcoa_plot
# tested alternative to ggpubr or cowplot package to reduce installed packages...
# the problem is my limited understanding of grid and grid extra
# note to myself: you managed to write the combined plots but you failed in
# keeping the combined plots in an object that can be return in the result list
# arrange_plots_legend <- function(plots, ncol = length(list(...)), nrow = 1, position = c("bottom", "right")) {
# # plots <- list(...)
# # plots <- unlist(plots)
# position <- match.arg(position)
#
# g <- ggplot2::ggplotGrob(plots[[1]] + ggplot2::theme(legend.position = position))$grobs
# legend <- g[[which(sapply(g, function(x) x$name) == "guide-box")]]
# lheight <- sum(legend$height)
# lwidth <- sum(legend$width)
#
# # gridExtra::grid.arrange(
# # do.call(gridExtra::arrangeGrob, lapply(plots, function(x)
# # x + ggplot2::theme(legend.position="none"))),
# # legend,
# # ncol = 1,
# # heights = grid::unit.c(grid::unit(1, "npc") - lheight, lheight))
# gl <- lapply(plots, function(x) x + ggplot2::theme(legend.position="none"))
# gl <- c(gl, ncol = ncol, nrow = nrow)
# combined <- switch(
# position,
# "bottom" = gridExtra::arrangeGrob(
# do.call(gridExtra::arrangeGrob, gl),
# legend,
# ncol = 1,
# heights = grid::unit.c(grid::unit(1, "npc") - lheight, lheight)),
# "right" = gridExtra::arrangeGrob(
# do.call(gridExtra::arrangeGrob, gl),
# legend,
# ncol = 2,
# widths = grid::unit.c(grid::unit(1, "npc") - lwidth, lwidth)))
#
# grid::grid.newpage()
# grid::grid.draw(combined)
# return(combined)
# }
#' @title pct_missing_by_total
#' @description Generates plot missing by total
#' @rdname pct_missing_by_total
#' @keywords internal
#' @export
pct_missing_by_total <- function(
strata.select, data, ci = 0.95, path.folder, write.plot = TRUE) {
# rename strata column and convert GT_BIN to missing or not
data %<>%
# dplyr::rename(STRATA_SELECT = data[[!!(strata.select)]]) %>%
dplyr::rename(STRATA_SELECT = !! strata.select) %>%
dplyr::mutate(is.missing = GT_MISSING_BINARY == 0)
# count number of strata
n.strata <- dplyr::n_distinct(data$STRATA_SELECT)
# summarize missingness by locus and factor column
miss.smry <- data %>%
dplyr::group_by(MARKERS, STRATA_SELECT) %>%
dplyr::summarise(num.missing.col = sum(is.missing)) %>%
dplyr::left_join(
data %>%
dplyr::group_by(MARKERS) %>%
dplyr::summarise(num.missing.total = sum(is.missing)),
by = "MARKERS"
) %>%
dplyr::filter(num.missing.total > 0) %>%
dplyr::mutate(
pct.missing = num.missing.col / num.missing.total
) %>%
dplyr::ungroup(.)
# summarize overall percent missing by factor column
pct.miss.col <- miss.smry %>%
dplyr::group_by(STRATA_SELECT) %>%
dplyr::summarise(
pct.missing = sum(num.missing.col) / sum(num.missing.total)
) %>%
dplyr::ungroup(.) %>%
dplyr::arrange(dplyr::desc(pct.missing)) %>%
dplyr::mutate(STRATA_SELECT = as.character(STRATA_SELECT))
# Keep string to reorder factor based on marginal % missingness
level.miss <- pct.miss.col$STRATA_SELECT
pct.miss.col <- pct.miss.col %>%
dplyr::mutate(
STRATA_SELECT = factor(STRATA_SELECT, levels = level.miss, ordered = TRUE)
)
# summarize % missing for each total number missing in each factor level (label)
miss.smry %<>%
dplyr::group_by(STRATA_SELECT, num.missing.total) %>%
dplyr::summarize(
n = length(pct.missing),
mean.miss = mean(pct.missing),
lci = stats::quantile(pct.missing, (1 - ci) / 2),
uci = stats::quantile(pct.missing, (1 + ci) / 2)
) %>%
dplyr::ungroup(.) %>%
dplyr::mutate(
STRATA_SELECT = factor(STRATA_SELECT, levels = level.miss, ordered = TRUE)
)
# to store results
res <- list()
# Tidy result of lm
lm.res.name <- stringi::stri_join("pct.missing.lm.", "strata.", strata.select)
res[[lm.res.name]] <- miss.smry %>%
split(x = ., f = .$STRATA_SELECT) %>%
purrr::map_df(
~ broom::tidy(
stats::lm(
mean.miss ~ log10(num.missing.total),
weights = n,
data = .
)
),
.id = "STRATA_SELECT"
)
miss.lm.coefs <- res[[lm.res.name]] %>%
dplyr::select(-c(std.error, statistic, p.value)) %>%
grur::rad_wide(x = ., names_from = term, values_from = estimate, tidy = TRUE) %>%
dplyr::rename(a = `(Intercept)`, b = `log10(num.missing.total)`) %>%
dplyr::mutate(
STRATA_SELECT = factor(STRATA_SELECT, levels = level.miss, ordered = TRUE)
) %>%
dplyr::arrange(STRATA_SELECT)
axis.title.element <- ggplot2::element_text(
size = 12, family = "Helvetica", face = "bold"
)
axis.text.element <- ggplot2::element_text(size = 10, family = "Helvetica")
# generate figure
fig.name <- stringi::stri_join("pct.missing.plot", ".strata.", strata.select)
res[[fig.name]] <- ggplot2::ggplot(
miss.smry, ggplot2::aes_string(x = "num.missing.total")) +
ggplot2::geom_segment(
ggplot2::aes_string(xend = "num.missing.total", y = "lci", yend = "uci"),
color = "gray50"
) +
ggplot2::geom_point(ggplot2::aes_string(y = "mean.miss", size = "n")) +
ggplot2::scale_size(breaks = c(0, 10, 100, 1000, 10000)) +
ggplot2::geom_abline(
data = miss.lm.coefs,
ggplot2::aes(intercept = a, slope = b),
color = "yellow"
) +
# the expected % missing by random (1 / number of factor levels)
ggplot2::geom_hline(yintercept = 1 / dplyr::n_distinct(data$STRATA_SELECT)) +
ggplot2::geom_hline(
data = pct.miss.col,
ggplot2::aes_string(yintercept = "pct.missing"),
color = "red"
) +
ggplot2::scale_x_log10() +
ggplot2::labs(
title = stringi::stri_join("Strata: ", strata.select),
x = expression(
paste("Total number of missing genotypes (", log[10], ")")
),
y = "Missing genotypes (mean percentage)",
size = "Markers missing (number)"
) +
ggplot2::theme(
axis.title.x = axis.title.element,
axis.text.x = axis.text.element,
axis.title.y = axis.title.element,
axis.text.y = axis.text.element,
legend.title = axis.title.element,
legend.text = axis.text.element
) +
ggplot2::theme_bw() +
ggplot2::facet_wrap(~ STRATA_SELECT)
# fig
if (write.plot) {
ggplot2::ggsave(
filename = stringi::stri_join(path.folder, "/", fig.name, ".pdf"),
plot = res[[fig.name]],
width = n.strata * 3,
height = n.strata * 3,
dpi = 600,
units = "cm",
useDingbats = FALSE,
limitsize = FALSE
)
}
return(res)
}#End pct_missing_by_total
#' @title missing_rda
#' @description The function uses Permutation test Anova and
#' Redundancy Analysis to highlight potential patterns of missingness in the data
#' based on the strata provided
#' @rdname missing_rda
#' @keywords internal
#' @export
# IMPORT FROM ------------------------------------------------------------------
#' @importFrom dplyr distinct rename arrange mutate select summarise group_by ungroup filter inner_join left_join
#' @importFrom ggpubr ggarrange
#' @importFrom ape pcoa
#' @importFrom radiator tidy_genomic_data change_pop_names ibdg_fh detect_all_missing write_rad
#' @importFrom cowplot plot_grid align_plots
#' @importFrom Matrix Matrix
#' @importFrom vegan anova.cca rda
#' @importFrom ape pcoa
#' @importFrom adespatial dist.ldc
#' @importFrom data.table as.data.table dcast.data.table melt.data.table
missing_rda <- function(
data,
strata,
permutations = 1000,
parallel.core = parallel::detectCores() - 1
) {
res <- list()
if (is.vector(data)) {
res$data.pcoa <- radiator::read_rad(data)
} else {
res$data.pcoa <- data
}
data <- NULL
res$data.pcoa %<>% dplyr::ungroup(.)
# PCoA on Jaccard distance (binary)
res$data.pcoa <- ape::pcoa(
D = sqrt(adespatial::dist.ldc(
Y = res$data.pcoa,
method = "jaccard",
binary = TRUE, silent = TRUE)),
rn = strata$INDIVIDUALS)
# D is Euclidean because the function outputs D[jk] = sqrt(1-S[jk])
# Anova on the pcoa data
rda_anova <- function(
strata.select,
data.pcoa,
strata,
permutations = 100,
parallel.core = parallel::detectCores() - 1) {
# strata.select <- "POP_ID" #test
# data.pcoa <- res$data.pcoa #test
res.rda <- list()#to store results
# Generate the function formula --------------------------------------------
# based on increasing levels for available strata
grur_count <- function(x) length(unique(x))
strata.formula <- dplyr::select(strata, dplyr::one_of(strata.select)) %>%
dplyr::summarise_all(.tbl = ., .funs = grur_count) %>%
tidyr::pivot_longer(
data = .,
cols = tidyselect::everything(),
names_to = "TERMS",
values_to = "LEVELS"
) %>%
dplyr::arrange(LEVELS) %>%
dplyr::select(TERMS) %>%
purrr::flatten_chr(.)
formula.grur <- stats::as.formula(
paste("data.pcoa$vectors ~ ", paste(strata.formula, collapse= "+")))
# Check how many strata.select are used and messages -----------------------
# if (length(strata.select) > 1) {
message("Redundancy Analysis using strata: ",
stringi::stri_join(strata.select, collapse = ", "))
rda_strata_name <- stringi::stri_join("rda.strata", strata.select, collapse = "_", sep = "_")
anova_strata_name <- stringi::stri_join("anova.strata.", strata.select, collapse = "_", sep = "_")
# } else {
# rda_strata_name <- stringi::stri_join("rda.strata.", strata.select)
# anova_strata_name <- stringi::stri_join("anova.strata.", strata.select)
# strata.select <- rlang::sym(strata.select)
# message("Redundancy Analysis using strata: ", strata.select)
# }
message("RDA model formula: ", format(formula.grur))
#RDA -----------------------------------------------------------------------
data.rda <- vegan::rda(formula.grur, strata)
# data.rda <- vegan::rda(rlang::`f_rhs<-`(data.pcoa$vectors ~ ., strata.select), strata)
# data.rda <- vegan::rda(rlang::`f_rhs<-`(data.pcoa$vectors ~ ., stats::reformulate(termlabels = strata.select)), strata)
# data.rda <- vegan::rda(stats::reformulate(termlabels = strata.select, response = data.pcoa$vectors), data = strata)
# data.rda <- vegan::rda(res$data.pcoa$vectors ~ POP_ID + POP_TYPE + ECOTYPE, strata)
# ANOVA overall test
message("Permutation test for Redundancy Analysis using strata: ", stringi::stri_join(strata.select, collapse = ", "))
# data.anova <- vegan::anova.cca(object = data.rda, permutations = permutations, parallel = parallel.core)
# data.anova <- suppressWarnings(broom::tidy(vegan::anova.cca(object = data.rda, permutations = permutations, parallel = parallel.core)))
data.anova <- suppressWarnings(broom::tidy(vegan::anova.cca(object = data.rda, by = "terms", model = "direct", permutations = permutations, parallel = parallel.core)))
p.value.message <- dplyr::select(data.anova, STRATA = term, VARIANCE = Variance, P_VALUE = p.value) %>%
dplyr::filter(STRATA %in% strata.formula)
message("\nHypothesis based on the strata provided")
message(" Null Hypothesis (H0): No pattern of missingness in the data between strata")
message(" Alternative Hypothesis (H1): Presence of pattern(s) of missingness in the data between strata\n")
# message(" p-value: ", data.anova$p.value[1], "\n")
print(p.value.message)
message(" note: low p-value -> reject the null hypothesis\n")
#return results
res.rda[[rda_strata_name]] <- data.rda
res.rda[[anova_strata_name]] <- data.anova
return(res.rda)
}
strata.select <- purrr::keep(.x = colnames(strata),
.p = !colnames(strata) %in% "INDIVIDUALS")
if (length(strata.select) > 1) {
message("\nSeparate analysis of strata\n")
res$rda_separate_strata <- purrr::map(
.x = strata.select,
.f = rda_anova,
data.pcoa = res$data.pcoa,
strata = strata,
permutations = permutations,
parallel.core = parallel.core) %>%
purrr::flatten(.)
message("\nCombined strata analysis\n")
res$rda_combined_strata <- rda_anova(
strata.select = strata.select,
data.pcoa = res$data.pcoa,
strata = strata,
permutations = permutations,
parallel.core = parallel.core)
} else {
res$rda_combined_strata <- rda_anova(
strata.select = strata.select,
data.pcoa = res$data.pcoa,
strata = strata,
permutations = permutations,
parallel.core = parallel.core)
}
return(res)
}#End missing_rda
# parallel_core_opt ------------------------------------------------------------
#' @title parallel_core_opt
#' @description Optimization of parallel core argument for radiator
#' @keywords internal
#' @export
parallel_core_opt <- function(parallel.core = NULL, max.core = NULL) {
# strategy:
# minimum of 1 core and a maximum of all the core available -2
# even number of core
# test
# parallel.core <- 1
# parallel.core <- 2
# parallel.core <- 3
# parallel.core <- 11
# parallel.core <- 12
# parallel.core <- 16
# max.core <- 5
# max.core <- 50
# max.core <- NULL
# Add-ons options
# to control the max and min number to use...
if (is.null(parallel.core)) {
parallel.core <- parallel::detectCores() - 2
} else {
parallel.core <- floor(parallel.core / 2) * 2
parallel.core <- max(1, min(parallel.core, parallel::detectCores() - 2))
}
if (is.null(max.core)) {
parallel.core.opt <- parallel.core
} else {
parallel.core.opt <- min(parallel.core, floor(max.core / 2) * 2)
}
return(parallel.core.opt)
}#End parallel_core_opt
# using future and future.apply -------------------------------------------------
#' @name grur_future
#' @title grur parallel function
#' @description Updating grur to use future
# @inheritParams future::plan
# @inheritParams future::availableCores
#' @inheritParams future.apply::future_apply
#' @rdname grur_future
#' @export
#' @keywords internal
grur_future <- function(
.x,
.f,
flat.future = c("int", "chr", "dfr", "dfc", "walk", "drop"),
split.vec = FALSE,
split.with = NULL,
split.chunks = 4L,
parallel.core = parallel::detectCores() - 1,
...
) {
opt.change <- getOption("width")
options(width = 70)
on.exit(options(width = opt.change), add = TRUE)
on.exit(if (parallel.core > 1L) future::plan(strategy = "sequential"), add = TRUE)
# argument for flattening the results
flat.future <- match.arg(
arg = flat.future,
choices = c("int", "chr", "dfr", "dfc", "walk", "drop"),
several.ok = FALSE
)
# splitting into chunks-------------------------------------------------------
if (split.vec && is.null(split.with)) {
# d: data, data length, data size
# sv: split vector
d <- .x
df <- FALSE
if (any(class(d) %in% c("tbl_df","tbl","data.frame"))) {
d <- nrow(d)
df <- TRUE
}
if (length(d) > 1L) d <- length(d)
stopifnot(is.integer(d))
sv <- as.integer(floor((split.chunks * (seq_len(d) - 1) / d) + 1))
# sv <- as.integer(floor((parallel.core * cpu.rounds * (seq_len(d) - 1) / d) + 1))
stopifnot(length(sv) == d)
# split
if (df) {
.x$SPLIT_VEC <- sv
.x %<>% dplyr::group_split(.tbl = ., "SPLIT_VEC", .keep = FALSE)
} else {
.x %<>% split(x = ., f = sv)
}
}
if (!is.null(split.with)) {
# check
if (length(split.with) != 1 || !is.character(split.with)) {
rlang::abort(message = "Contact author: problem with parallel computation")
}
.data <- NULL
stopifnot(rlang::has_name(.x, split.with))
if (split.vec) {
sv <- dplyr::distinct(.x, .data[[split.with]])
d <- nrow(sv)
sv$SPLIT_VEC <- as.integer(floor((split.chunks * (seq_len(d) - 1) / d) + 1))
.x %<>%
dplyr::left_join(sv, by = split.with) %>%
dplyr::group_split(.tbl = ., "SPLIT_VEC", .keep = FALSE)
} else {
.x %<>% dplyr::group_split(.tbl = ., .data[[split.with]], .keep = TRUE)
}
}
if (parallel.core == 1L) {
future::plan(strategy = "sequential")
} else {
parallel.core <- parallel_core_opt(parallel.core = parallel.core)
lx <- length(.x)
if (lx < parallel.core) {
future::plan(strategy = "multisession", workers = lx)
} else {
future::plan(strategy = "multisession", workers = parallel.core)
}
}
# .x <- future.apply::future_apply(X = .x, FUN = .f, ...)
# capture dots
# d <- rlang::dots_list(..., .ignore_empty = "all", .preserve_empty = TRUE, .homonyms = "first")
# if (bind.rows) .x %<>% dplyr::bind_rows(.)
# Run the function in parallel and account for dots-dots-dots argument
rad_map <- switch(flat.future,
int = {furrr::future_map_int},
chr = {furrr::future_map_chr},
dfr = {furrr::future_map_dfr},
dfc = {furrr::future_map_dfc},
walk = {furrr::future_walk},
drop = {furrr::future_map}
)
opts <- furrr::furrr_options(globals = FALSE)
if (length(list(...)) == 0) {
.x %<>% rad_map(.x = ., .f = .f, .options = opts)
} else {
.x %<>% rad_map(.x = ., .f = .f, ..., .options = opts)
}
return(.x)
}#End grur_future
# PIVOT-GATHER-CAST ------------------------------------------------------------
# rationale for doing this is that i'm tired of using tidyverse or data.table semantics
# tidyr changed from gather/spread to pivot_ functions but their are still very slow compared
# to 1. the original gather/spread and data.table equivalent...
#' @title rad_long
#' @description Gather, melt and pivot_longer
#' @rdname rad_long
#' @keywords internal
#' @export
rad_long <- function(
x,
cols = NULL,
measure_vars = NULL,
names_to = NULL,
values_to = NULL,
variable_factor = TRUE,
keep_rownames = FALSE,
tidy = FALSE
){
# tidyr
if (tidy) {
x %>%
tidyr::pivot_longer(
data = .,
cols = -cols,
names_to = names_to,
values_to = values_to
)
} else {# data.table
x %>%
data.table::as.data.table(., keep.rownames = keep_rownames) %>%
data.table::melt.data.table(
data = .,
id.vars = cols,
measure.vars = measure_vars,
variable.name = names_to,
value.name = values_to,
variable.factor = variable_factor
) %>%
tibble::as_tibble(.)
}
}#rad_long
#' @title rad_wide
#' @description Spread, dcast and pivot_wider
#' @rdname rad_wide
#' @keywords internal
#' @export
rad_wide <- function(
x ,
formula = NULL,
names_from = NULL,
values_from = NULL,
values_fill = NULL,
sep = "_",
tidy = FALSE
){
# tidyr
if (tidy) {
x %<>%
tidyr::pivot_wider(
data = .,
names_from = names_from,
values_from = values_from,
values_fill = values_fill
)
} else {# data.table
x %>%
data.table::as.data.table(.) %>%
data.table::dcast.data.table(
data = .,
formula = formula,
value.var = values_from,
sep = sep,
fill = values_fill
) %>%
tibble::as_tibble(.)
}
}#rad_wide
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VST.R
\name{distBioCond}
\alias{distBioCond}
\title{Quantify the Distance between Each Pair of Samples of a \code{bioCond}}
\usage{
distBioCond(x, subset = NULL, method = c("prior", "posterior", "none"),
min.var = 0, p = 2, diag = FALSE, upper = FALSE)
}
\arguments{
\item{x}{A \code{\link{bioCond}} object.}
\item{subset}{An optional vector specifying a subset of genomic intervals to
be used for deducing the distances between samples of \code{x}. In
practice, you may want to use only the intervals associated with large
variations across the samples to calculate the distances, as such
intervals are most helpful for distinguishing between the samples (see
\code{\link{varTestBioCond}} and "Examples" below).}
\item{method}{A character string indicating the method to be used for
calculating the variances of individual intervals. Must be one of
\code{"prior"} (default), \code{"posterior"} and \code{"none"}. Can be
abbreviated. Note that the \code{"none"} method does not consider the
mean-variance trend associated with \code{x} (see "Details").}
\item{min.var}{Lower bound of variances read from the mean-variance
curve associated with \code{x}. Any variance read from the curve less
than \code{min.var} will be adjusted to this value. It's primarily used
for safely reading positive values from the curve and taking into
account the practical significance of a signal variation. Ignored if
\code{method} is set to \code{"none"}.}
\item{p}{The power used to calculate the \emph{p}-norm distance between
each pair of samples (see "Details" for the specific formula).
Any positive real could be
specified, though setting \code{p} to a value other than 1
and 2 makes little sense. The default corresponds to the Euclidean
distance.}
\item{diag, upper}{Two arguments to be passed to
\code{\link[stats]{as.dist}}.}
}
\value{
A \code{\link[stats]{dist}} object quantifying the distance between
each pair of samples of \code{x}.
}
\description{
Given a \code{\link{bioCond}} object, \code{distBioCond} deduces, for each
pair of samples contained in it, the average absolute difference in signal
intensities of genomic intervals between them. Specifically, the function
calculates a weighted minkowski (i.e., \emph{p}-norm) distance between each
pair of vectors of signal intensities, with the weights being inversely
proportional to variances of individual intervals (see also
"Details"). \code{distBioCond} returns a \code{\link[stats]{dist}} object
recording the deduced average \eqn{|M|} values. The object effectively
quantifies the distance between each pair of samples and can be passed to
\code{\link[stats]{hclust}} to perform a clustering analysis (see
"Examples" below).
}
\details{
Variance of signal intensity varies considerably
across genomic intervals, due to
the heteroscedasticity inherent to count data as well as most of their
transformations. On this account, separately scaling the signal intensities
of each interval in a \code{\link{bioCond}} should lead to a more
reasonable measure of distances between its samples.
Suppose that \eqn{X} and \eqn{Y} are two vectors of signal intensities
representing two samples of a \code{bioCond} and that \eqn{xi}, \eqn{yi}
are their \eqn{i}th elements corresponding to the \eqn{i}th interval.
\code{distBioCond} calculates the distance between \eqn{X} and \eqn{Y} as
follows: \deqn{d(X, Y) = (sum(wi * |yi - xi| ^ p) / sum(wi)) ^ (1 / p)}
where \eqn{wi} is the reciprocal of the scaled variance (see below)
of interval \eqn{i}, and \eqn{p} defaults to 2.
Since the weights of intervals are normalized to have a sum of 1,
the resulting distance could be interpreted as an average absolute
difference in signal intensities of intervals between the two samples.
Since there typically exists a clear mean-variance dependence across genomic
intervals, \code{distBioCond} takes advantage of the mean-variance curve
associated with the \code{bioCond} to improve estimates of variances of
individual intervals. By default, prior variances, which are the ones read
from the curve, are used to deduce the weights of intervals for calculating
the distances. Alternatively, one can choose to use posterior variances of
intervals by setting \code{method} to \code{"posterior"}, which are weighted
averages of prior and observed variances, with the weights being
proportional to their respective numbers of degrees of freedom (see
\code{\link{fitMeanVarCurve}} for details). Since the observed variances of
intervals are associated with large uncertainty when the total number of
samples is small, it is not recommended to use posterior variances in such
cases. To be noted, if \code{method} is set to \code{"none"},
\code{distBioCond} will consider all genomic intervals to be associated with
a constant variance. In this case, neither the prior variance nor the
observed variance of each interval is used
to deduce its weight for calculating the distances.
This method is particularly suited to \code{bioCond} objects
that have gone through a variance-stabilizing transformation (see
\code{\link{vstBioCond}} for details and "Examples" below) as well as
\code{bioCond}s whose structure matrices have been specifically
designed (see below and "References" also).
Another point deserving special attention is that \code{distBioCond} has
considered the possibility that
genomic intervals in the supplied \code{bioCond}
are associated with different structure matrices. In order to objectively
compare signal variation levels between genomic intervals,
\code{distBioCond} further scales the variance of each interval
(deduced by using whichever method is selected) by
multiplying it with the geometric mean of diagonal
elements of the interval's structure matrix. See \code{\link{bioCond}} and
\code{\link{setWeight}} for a detailed description of structure matrix.
Given a set of \code{bioCond} objects,
\code{distBioCond} could also be used to quantify the distance between
each pair of them, by first combining the \code{bioCond}s into a
single \code{bioCond} and fitting a mean-variance curve for
it (see \code{\link{cmbBioCond}} and "Examples" below).
}
\examples{
data(H3K27Ac, package = "MAnorm2")
attr(H3K27Ac, "metaInfo")
## Cluster a set of ChIP-seq samples from different cell lines (i.e.,
## individuals).
# Perform MA normalization and construct a bioCond.
norm <- normalize(H3K27Ac, 4:8, 9:13)
cond <- bioCond(norm[4:8], norm[9:13], name = "all")
# Fit a mean-variance curve.
cond <- fitMeanVarCurve(list(cond), method = "local",
occupy.only = FALSE)[[1]]
plotMeanVarCurve(list(cond), subset = "all")
# Measure the distance between each pair of samples and accordingly perform
# a hierarchical clustering. Note that biological replicates of each cell
# line are clustered together.
d1 <- distBioCond(cond, method = "prior")
plot(hclust(d1, method = "average"), hang = -1)
# Measure the distances using only hypervariable genomic intervals. Note the
# change of scale of the distances.
res <- varTestBioCond(cond)
f <- res$fold.change > 1 & res$pval < 0.05
d2 <- distBioCond(cond, subset = f, method = "prior")
plot(hclust(d2, method = "average"), hang = -1)
# Apply a variance-stabilizing transformation and associate a constant
# function with the resulting bioCond as its mean-variance curve.
vst_cond <- vstBioCond(cond)
vst_cond <- setMeanVarCurve(list(vst_cond), function(x)
rep_len(1, length(x)), occupy.only = FALSE,
method = "constant prior")[[1]]
plotMeanVarCurve(list(vst_cond), subset = "all")
# Repeat the clustering analyses on the VSTed bioCond.
d3 <- distBioCond(vst_cond, method = "none")
plot(hclust(d3, method = "average"), hang = -1)
res <- varTestBioCond(vst_cond)
f <- res$fold.change > 1 & res$pval < 0.05
d4 <- distBioCond(vst_cond, subset = f, method = "none")
plot(hclust(d4, method = "average"), hang = -1)
## Cluster a set of individuals.
# Perform MA normalization and construct bioConds to represent individuals.
norm <- normalize(H3K27Ac, 4, 9)
norm <- normalize(norm, 5:6, 10:11)
norm <- normalize(norm, 7:8, 12:13)
conds <- list(GM12890 = bioCond(norm[4], norm[9], name = "GM12890"),
GM12891 = bioCond(norm[5:6], norm[10:11], name = "GM12891"),
GM12892 = bioCond(norm[7:8], norm[12:13], name = "GM12892"))
conds <- normBioCond(conds)
# Group the individuals into a single bioCond and fit a mean-variance curve
# for it.
cond <- cmbBioCond(conds, name = "all")
cond <- fitMeanVarCurve(list(cond), method = "local",
occupy.only = FALSE)[[1]]
plotMeanVarCurve(list(cond), subset = "all")
# Measure the distance between each pair of individuals and accordingly
# perform a hierarchical clustering. Note that GM12891 and GM12892 are
# actually a couple and they are clustered together.
d1 <- distBioCond(cond, method = "prior")
plot(hclust(d1, method = "average"), hang = -1)
# Measure the distances using only hypervariable genomic intervals. Note the
# change of scale of the distances.
res <- varTestBioCond(cond)
f <- res$fold.change > 1 & res$pval < 0.05
d2 <- distBioCond(cond, subset = f, method = "prior")
plot(hclust(d2, method = "average"), hang = -1)
}
\references{
Law, C.W., et al., \emph{voom: Precision weights unlock linear
model analysis tools for RNA-seq read counts}. Genome Biol, 2014.
\strong{15}(2): p. R29.
}
\seealso{
\code{\link{bioCond}} for creating a \code{bioCond} object;
\code{\link{fitMeanVarCurve}} for fitting a mean-variance curve;
\code{\link{cmbBioCond}} for combining a set of \code{bioCond} objects
into a single one; \code{\link[stats]{hclust}} for performing a
hierarchical clustering on a \code{\link[stats]{dist}} object;
\code{\link{vstBioCond}} for applying a variance-stabilizing
transformation to signal intensities of samples of a \code{bioCond}.
}
|
/man/distBioCond.Rd
|
no_license
|
wwang-chcn/MAnorm2
|
R
| false
| true
| 10,160
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VST.R
\name{distBioCond}
\alias{distBioCond}
\title{Quantify the Distance between Each Pair of Samples of a \code{bioCond}}
\usage{
distBioCond(x, subset = NULL, method = c("prior", "posterior", "none"),
min.var = 0, p = 2, diag = FALSE, upper = FALSE)
}
\arguments{
\item{x}{A \code{\link{bioCond}} object.}
\item{subset}{An optional vector specifying a subset of genomic intervals to
be used for deducing the distances between samples of \code{x}. In
practice, you may want to use only the intervals associated with large
variations across the samples to calculate the distances, as such
intervals are most helpful for distinguishing between the samples (see
\code{\link{varTestBioCond}} and "Examples" below).}
\item{method}{A character string indicating the method to be used for
calculating the variances of individual intervals. Must be one of
\code{"prior"} (default), \code{"posterior"} and \code{"none"}. Can be
abbreviated. Note that the \code{"none"} method does not consider the
mean-variance trend associated with \code{x} (see "Details").}
\item{min.var}{Lower bound of variances read from the mean-variance
curve associated with \code{x}. Any variance read from the curve less
than \code{min.var} will be adjusted to this value. It's primarily used
for safely reading positive values from the curve and taking into
account the practical significance of a signal variation. Ignored if
\code{method} is set to \code{"none"}.}
\item{p}{The power used to calculate the \emph{p}-norm distance between
each pair of samples (see "Details" for the specific formula).
Any positive real could be
specified, though setting \code{p} to a value other than 1
and 2 makes little sense. The default corresponds to the Euclidean
distance.}
\item{diag, upper}{Two arguments to be passed to
\code{\link[stats]{as.dist}}.}
}
\value{
A \code{\link[stats]{dist}} object quantifying the distance between
each pair of samples of \code{x}.
}
\description{
Given a \code{\link{bioCond}} object, \code{distBioCond} deduces, for each
pair of samples contained in it, the average absolute difference in signal
intensities of genomic intervals between them. Specifically, the function
calculates a weighted minkowski (i.e., \emph{p}-norm) distance between each
pair of vectors of signal intensities, with the weights being inversely
proportional to variances of individual intervals (see also
"Details"). \code{distBioCond} returns a \code{\link[stats]{dist}} object
recording the deduced average \eqn{|M|} values. The object effectively
quantifies the distance between each pair of samples and can be passed to
\code{\link[stats]{hclust}} to perform a clustering analysis (see
"Examples" below).
}
\details{
Variance of signal intensity varies considerably
across genomic intervals, due to
the heteroscedasticity inherent to count data as well as most of their
transformations. On this account, separately scaling the signal intensities
of each interval in a \code{\link{bioCond}} should lead to a more
reasonable measure of distances between its samples.
Suppose that \eqn{X} and \eqn{Y} are two vectors of signal intensities
representing two samples of a \code{bioCond} and that \eqn{xi}, \eqn{yi}
are their \eqn{i}th elements corresponding to the \eqn{i}th interval.
\code{distBioCond} calculates the distance between \eqn{X} and \eqn{Y} as
follows: \deqn{d(X, Y) = (sum(wi * |yi - xi| ^ p) / sum(wi)) ^ (1 / p)}
where \eqn{wi} is the reciprocal of the scaled variance (see below)
of interval \eqn{i}, and \eqn{p} defaults to 2.
Since the weights of intervals are normalized to have a sum of 1,
the resulting distance could be interpreted as an average absolute
difference in signal intensities of intervals between the two samples.
Since there typically exists a clear mean-variance dependence across genomic
intervals, \code{distBioCond} takes advantage of the mean-variance curve
associated with the \code{bioCond} to improve estimates of variances of
individual intervals. By default, prior variances, which are the ones read
from the curve, are used to deduce the weights of intervals for calculating
the distances. Alternatively, one can choose to use posterior variances of
intervals by setting \code{method} to \code{"posterior"}, which are weighted
averages of prior and observed variances, with the weights being
proportional to their respective numbers of degrees of freedom (see
\code{\link{fitMeanVarCurve}} for details). Since the observed variances of
intervals are associated with large uncertainty when the total number of
samples is small, it is not recommended to use posterior variances in such
cases. To be noted, if \code{method} is set to \code{"none"},
\code{distBioCond} will consider all genomic intervals to be associated with
a constant variance. In this case, neither the prior variance nor the
observed variance of each interval is used
to deduce its weight for calculating the distances.
This method is particularly suited to \code{bioCond} objects
that have gone through a variance-stabilizing transformation (see
\code{\link{vstBioCond}} for details and "Examples" below) as well as
\code{bioCond}s whose structure matrices have been specifically
designed (see below and "References" also).
Another point deserving special attention is that \code{distBioCond} has
considered the possibility that
genomic intervals in the supplied \code{bioCond}
are associated with different structure matrices. In order to objectively
compare signal variation levels between genomic intervals,
\code{distBioCond} further scales the variance of each interval
(deduced by using whichever method is selected) by
multiplying it with the geometric mean of diagonal
elements of the interval's structure matrix. See \code{\link{bioCond}} and
\code{\link{setWeight}} for a detailed description of structure matrix.
Given a set of \code{bioCond} objects,
\code{distBioCond} could also be used to quantify the distance between
each pair of them, by first combining the \code{bioCond}s into a
single \code{bioCond} and fitting a mean-variance curve for
it (see \code{\link{cmbBioCond}} and "Examples" below).
}
\examples{
data(H3K27Ac, package = "MAnorm2")
attr(H3K27Ac, "metaInfo")
## Cluster a set of ChIP-seq samples from different cell lines (i.e.,
## individuals).
# Perform MA normalization and construct a bioCond.
norm <- normalize(H3K27Ac, 4:8, 9:13)
cond <- bioCond(norm[4:8], norm[9:13], name = "all")
# Fit a mean-variance curve.
cond <- fitMeanVarCurve(list(cond), method = "local",
occupy.only = FALSE)[[1]]
plotMeanVarCurve(list(cond), subset = "all")
# Measure the distance between each pair of samples and accordingly perform
# a hierarchical clustering. Note that biological replicates of each cell
# line are clustered together.
d1 <- distBioCond(cond, method = "prior")
plot(hclust(d1, method = "average"), hang = -1)
# Measure the distances using only hypervariable genomic intervals. Note the
# change of scale of the distances.
res <- varTestBioCond(cond)
f <- res$fold.change > 1 & res$pval < 0.05
d2 <- distBioCond(cond, subset = f, method = "prior")
plot(hclust(d2, method = "average"), hang = -1)
# Apply a variance-stabilizing transformation and associate a constant
# function with the resulting bioCond as its mean-variance curve.
vst_cond <- vstBioCond(cond)
vst_cond <- setMeanVarCurve(list(vst_cond), function(x)
rep_len(1, length(x)), occupy.only = FALSE,
method = "constant prior")[[1]]
plotMeanVarCurve(list(vst_cond), subset = "all")
# Repeat the clustering analyses on the VSTed bioCond.
d3 <- distBioCond(vst_cond, method = "none")
plot(hclust(d3, method = "average"), hang = -1)
res <- varTestBioCond(vst_cond)
f <- res$fold.change > 1 & res$pval < 0.05
d4 <- distBioCond(vst_cond, subset = f, method = "none")
plot(hclust(d4, method = "average"), hang = -1)
## Cluster a set of individuals.
# Perform MA normalization and construct bioConds to represent individuals.
norm <- normalize(H3K27Ac, 4, 9)
norm <- normalize(norm, 5:6, 10:11)
norm <- normalize(norm, 7:8, 12:13)
conds <- list(GM12890 = bioCond(norm[4], norm[9], name = "GM12890"),
GM12891 = bioCond(norm[5:6], norm[10:11], name = "GM12891"),
GM12892 = bioCond(norm[7:8], norm[12:13], name = "GM12892"))
conds <- normBioCond(conds)
# Group the individuals into a single bioCond and fit a mean-variance curve
# for it.
cond <- cmbBioCond(conds, name = "all")
cond <- fitMeanVarCurve(list(cond), method = "local",
occupy.only = FALSE)[[1]]
plotMeanVarCurve(list(cond), subset = "all")
# Measure the distance between each pair of individuals and accordingly
# perform a hierarchical clustering. Note that GM12891 and GM12892 are
# actually a couple and they are clustered together.
d1 <- distBioCond(cond, method = "prior")
plot(hclust(d1, method = "average"), hang = -1)
# Measure the distances using only hypervariable genomic intervals. Note the
# change of scale of the distances.
res <- varTestBioCond(cond)
f <- res$fold.change > 1 & res$pval < 0.05
d2 <- distBioCond(cond, subset = f, method = "prior")
plot(hclust(d2, method = "average"), hang = -1)
}
\references{
Law, C.W., et al., \emph{voom: Precision weights unlock linear
model analysis tools for RNA-seq read counts}. Genome Biol, 2014.
\strong{15}(2): p. R29.
}
\seealso{
\code{\link{bioCond}} for creating a \code{bioCond} object;
\code{\link{fitMeanVarCurve}} for fitting a mean-variance curve;
\code{\link{cmbBioCond}} for combining a set of \code{bioCond} objects
into a single one; \code{\link[stats]{hclust}} for performing a
hierarchical clustering on a \code{\link[stats]{dist}} object;
\code{\link{vstBioCond}} for applying a variance-stabilizing
transformation to signal intensities of samples of a \code{bioCond}.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xx.Deprecated.functions.R
\name{loadTocc}
\alias{loadTocc}
\title{Load filtered occurrence data}
\usage{
loadTocc(...)
}
\arguments{
\item{...}{additional arguments}
}
\description{
Load filtered occurrence data from object returned by "thin_b" function
}
|
/man/loadTocc.Rd
|
no_license
|
HemingNM/ENMwizard
|
R
| false
| true
| 334
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xx.Deprecated.functions.R
\name{loadTocc}
\alias{loadTocc}
\title{Load filtered occurrence data}
\usage{
loadTocc(...)
}
\arguments{
\item{...}{additional arguments}
}
\description{
Load filtered occurrence data from object returned by "thin_b" function
}
|
# pdx.compare.dae.to.pca
# Directory where RF code is stored
codedir <- "~/school/research/pdx/code"
# Directory where data object split.cm.data and biomarker datasets are stored
datadir <- "~/school/research/pdx/data"
# Directory where final data objects should be sent
outdir <- "~/school/research/pdx/results"
DAE_errors <- matrix(nrow=length(tumorlist),ncol=length(ngeneslist))
rownames(DAE_errors) <- tumorlist
colnames(DAE_errors) <- ngeneslist
PCA_errors <- matrix(nrow=length(tumorlist),ncol=length(ngeneslist))
rownames(PCA_errors) <- tumorlist
colnames(PCA_errors) <- ngeneslist
for (q in 1:length(tumorlist)) {
for (r in 1:length(ngeneslist)) {
summ <- read.csv(sprintf("%s/%s.%s.biomarkers.rna.summary.csv",outdir,tumorlist[q],ngeneslist[r]))
chosen.row <- which(summ$AE_error==min(summ$AE_error))
DAE_errors[q,r] <- summ[chosen.row,"AE_error"]
PCA_errors[q,r] <- summ[chosen.row,"PCA_error"]
}
}
save(DAE_errors,file=sprintf("%s/DAE_errors.rda",outdir))
save(PCA_errors,file=sprintf("%s/PCA_errors.rda",outdir))
|
/PDX.Code/RNA/pdx.compare.dae.to.pca.R
|
no_license
|
jasa-acs/High-Dimensional-Precision-Medicine-From-Patient-Derived-Xenografts
|
R
| false
| false
| 1,074
|
r
|
# pdx.compare.dae.to.pca
# Directory where RF code is stored
codedir <- "~/school/research/pdx/code"
# Directory where data object split.cm.data and biomarker datasets are stored
datadir <- "~/school/research/pdx/data"
# Directory where final data objects should be sent
outdir <- "~/school/research/pdx/results"
DAE_errors <- matrix(nrow=length(tumorlist),ncol=length(ngeneslist))
rownames(DAE_errors) <- tumorlist
colnames(DAE_errors) <- ngeneslist
PCA_errors <- matrix(nrow=length(tumorlist),ncol=length(ngeneslist))
rownames(PCA_errors) <- tumorlist
colnames(PCA_errors) <- ngeneslist
for (q in 1:length(tumorlist)) {
for (r in 1:length(ngeneslist)) {
summ <- read.csv(sprintf("%s/%s.%s.biomarkers.rna.summary.csv",outdir,tumorlist[q],ngeneslist[r]))
chosen.row <- which(summ$AE_error==min(summ$AE_error))
DAE_errors[q,r] <- summ[chosen.row,"AE_error"]
PCA_errors[q,r] <- summ[chosen.row,"PCA_error"]
}
}
save(DAE_errors,file=sprintf("%s/DAE_errors.rda",outdir))
save(PCA_errors,file=sprintf("%s/PCA_errors.rda",outdir))
|
# Perform differential expression from limma-voom (with eBayes)
rm(list = ls())
load("readcounts_dfs.RData")
library(edgeR)
# use edgeR to remove lowly expressed genes and normalize reads for
# sequencing depth ; see code chunks above
info_e.edger <- factor(c(rep("4C", 3) , rep("4C11+", 3)))
info_e.edger <- relevel(info_e.edger , ref = "4C")
edgeR.DGElist <- DGEList(counts = df_e , group = info_e.edger)
cpm <- cpm(edgeR.DGElist) # log took off because of Diogo's script
keep <- rowSums(cpm > 1) >= 0.35 * length(colnames(counts)) # this was changed according to the script of Diogo Pessoa on August 20th 2018
edgeR.DGElist <- edgeR.DGElist[keep ,]
edgeR.DGElist <- calcNormFactors(edgeR.DGElist, method = "TMM")
# limma also needs a design matrix , just like edgeR
design <- model.matrix(~info_e.edger)
# transform the count data to log2 -counts -per - million and estimate
# the mean - variance relationship , which is used to compute weights
# for each count -- this is supposed to make the read counts
# amenable to be used with linear models
rownames(design) <- colnames(edgeR.DGElist)
voomTransformed <- voom(edgeR.DGElist , design, plot = FALSE )
# fit a linear model for each gene
voomed.fitted <- lmFit(voomTransformed, design = design)
# compute moderated t- statistics , moderated F- statistics ,
# and log - odds of differential expression
voomed.fitted <- eBayes(voomed.fitted )
# extract gene list with logFC and statistical measures
colnames(design) # check how the coefficient is named
DGE.results_limma <- topTable(voomed.fitted, coef = "info_e.edger4C11+",
number = Inf , adjust.method = "BH", sort.by = "logFC")
# remove the dot containing the version of ENSEMBL GENE (with the dot the gene converter does not recognize the ENSEMBL gene id)
row.names(DGE.results_limma) <- gsub(pattern = "\\..{1,}", "", row.names(DGE.results_limma))
# get the gene symbol annotation of the ENSEMBLGENE genes
library(Mus.musculus)
# separate only the ENSEMBLGENE
DGEgenes <- rownames(DGE.results_limma)
# get gene symbol using mapIds, because with this it get only uniq IDs
anno <- select(org.Mm.eg.db, keys = DGEgenes, keytype = "ENSEMBL", columns = c("SYMBOL", "GENENAME"))
# transform anno in data frame
anno.df <- as.data.frame(anno)
# bind anno.df and DGE.results_limma by ENSEMBLGENE using "merge" = more accurate than cbind
DGE.results_limma_anno <- merge(anno.df, DGE.results_limma, by.x = "ENSEMBL",by.y = "row.names")
# order according to logFC (for the preranked list)
DGE.results_limma_anno <- DGE.results_limma_anno[order(DGE.results_limma_anno$logFC), ]
# save one version with NAs
write.table(DGE.results_limma_anno, "DGE.results_limma_anno_e.txt", quote = F,
row.names = F, sep = "\t")
# remove NAs
# I installed the package rgr to get this function
DGE.results_notNA <- rgr::remove.na(DGE.results_limma_anno)
DGE.results_notNA <- DGE.results_notNA$x
DGE.results.sorted_notNA <- DGE.results_notNA[order(DGE.results_notNA$logFC), ]
# save version without NAs
write.table(DGE.results_notNA, "DGE.results_limma_anno_notNA_e.txt",
quote = F,
row.names = F, sep = "\t")
# save subset only with gene symbols and logFC (for GSEA preranked)
preranked <- subset(DGE.results_notNA, select= c("SYMBOL", "logFC"))
write.table(preranked, "preranked_geneexpr_e.txt", quote = F,
row.names = F, sep = "\t")
save.image("limma-voom-e.RData")
# venn diagram for the pairwise comparisons
# DE_list <- list(a = rownames(subset(DGE.results_limma , adj.P.Val <= 0.05)),
# b = rownames(subset(DGE.results_limma , adj.P.Val <= 0.05)),
# c = rownames(subset(DGE.results_limma , adj.P.Val <= 0.05)))
#
# gplots::venn(DE_list)
#
# DE_gns <- UpSetR :: fromList(DE_list)
# UpSetR::upset (DE_gns , order.by = "freq")
|
/scripts/R/rnaseq/limma-voom-e-new-log.R
|
no_license
|
flaviaerius/methylation-data
|
R
| false
| false
| 3,869
|
r
|
# Perform differential expression from limma-voom (with eBayes)
rm(list = ls())
load("readcounts_dfs.RData")
library(edgeR)
# use edgeR to remove lowly expressed genes and normalize reads for
# sequencing depth ; see code chunks above
info_e.edger <- factor(c(rep("4C", 3) , rep("4C11+", 3)))
info_e.edger <- relevel(info_e.edger , ref = "4C")
edgeR.DGElist <- DGEList(counts = df_e , group = info_e.edger)
cpm <- cpm(edgeR.DGElist) # log took off because of Diogo's script
keep <- rowSums(cpm > 1) >= 0.35 * length(colnames(counts)) # this was changed according to the script of Diogo Pessoa on August 20th 2018
edgeR.DGElist <- edgeR.DGElist[keep ,]
edgeR.DGElist <- calcNormFactors(edgeR.DGElist, method = "TMM")
# limma also needs a design matrix , just like edgeR
design <- model.matrix(~info_e.edger)
# transform the count data to log2 -counts -per - million and estimate
# the mean - variance relationship , which is used to compute weights
# for each count -- this is supposed to make the read counts
# amenable to be used with linear models
rownames(design) <- colnames(edgeR.DGElist)
voomTransformed <- voom(edgeR.DGElist , design, plot = FALSE )
# fit a linear model for each gene
voomed.fitted <- lmFit(voomTransformed, design = design)
# compute moderated t- statistics , moderated F- statistics ,
# and log - odds of differential expression
voomed.fitted <- eBayes(voomed.fitted )
# extract gene list with logFC and statistical measures
colnames(design) # check how the coefficient is named
DGE.results_limma <- topTable(voomed.fitted, coef = "info_e.edger4C11+",
number = Inf , adjust.method = "BH", sort.by = "logFC")
# remove the dot containing the version of ENSEMBL GENE (with the dot the gene converter does not recognize the ENSEMBL gene id)
row.names(DGE.results_limma) <- gsub(pattern = "\\..{1,}", "", row.names(DGE.results_limma))
# get the gene symbol annotation of the ENSEMBLGENE genes
library(Mus.musculus)
# separate only the ENSEMBLGENE
DGEgenes <- rownames(DGE.results_limma)
# get gene symbol using mapIds, because with this it get only uniq IDs
anno <- select(org.Mm.eg.db, keys = DGEgenes, keytype = "ENSEMBL", columns = c("SYMBOL", "GENENAME"))
# transform anno in data frame
anno.df <- as.data.frame(anno)
# bind anno.df and DGE.results_limma by ENSEMBLGENE using "merge" = more accurate than cbind
DGE.results_limma_anno <- merge(anno.df, DGE.results_limma, by.x = "ENSEMBL",by.y = "row.names")
# order according to logFC (for the preranked list)
DGE.results_limma_anno <- DGE.results_limma_anno[order(DGE.results_limma_anno$logFC), ]
# save one version with NAs
write.table(DGE.results_limma_anno, "DGE.results_limma_anno_e.txt", quote = F,
row.names = F, sep = "\t")
# remove NAs
# I installed the package rgr to get this function
DGE.results_notNA <- rgr::remove.na(DGE.results_limma_anno)
DGE.results_notNA <- DGE.results_notNA$x
DGE.results.sorted_notNA <- DGE.results_notNA[order(DGE.results_notNA$logFC), ]
# save version without NAs
write.table(DGE.results_notNA, "DGE.results_limma_anno_notNA_e.txt",
quote = F,
row.names = F, sep = "\t")
# save subset only with gene symbols and logFC (for GSEA preranked)
preranked <- subset(DGE.results_notNA, select= c("SYMBOL", "logFC"))
write.table(preranked, "preranked_geneexpr_e.txt", quote = F,
row.names = F, sep = "\t")
save.image("limma-voom-e.RData")
# venn diagram for the pairwise comparisons
# DE_list <- list(a = rownames(subset(DGE.results_limma , adj.P.Val <= 0.05)),
# b = rownames(subset(DGE.results_limma , adj.P.Val <= 0.05)),
# c = rownames(subset(DGE.results_limma , adj.P.Val <= 0.05)))
#
# gplots::venn(DE_list)
#
# DE_gns <- UpSetR :: fromList(DE_list)
# UpSetR::upset (DE_gns , order.by = "freq")
|
testlist <- list(x = structure(c(NaN, 2.54166853232633e+117, 7.27917493709237e-95, 5.72778147612284e+250, 3.33836074328217e-28, 0, 0, 0, 0, 0, 0, 0), .Dim = 4:3))
result <- do.call(multivariance:::doubleCenterBiasCorrected,testlist)
str(result)
|
/multivariance/inst/testfiles/doubleCenterBiasCorrected/libFuzzer_doubleCenterBiasCorrected/doubleCenterBiasCorrected_valgrind_files/1612884236-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 246
|
r
|
testlist <- list(x = structure(c(NaN, 2.54166853232633e+117, 7.27917493709237e-95, 5.72778147612284e+250, 3.33836074328217e-28, 0, 0, 0, 0, 0, 0, 0), .Dim = 4:3))
result <- do.call(multivariance:::doubleCenterBiasCorrected,testlist)
str(result)
|
# making table data sets
library(dplyr)
library(tidyr)
library(MorpheusData)
library(data.table)
#############benchmark 85
set.seed(14592)
dat <- data.frame(
vial_id = 1:6,
band = sample(0:2, 6, replace = TRUE),
non_spec = sample(0:2, 6, replace = TRUE),
reads = rnorm(6)
)
write.csv(dat, "data-raw/r73_input1.csv", row.names=FALSE)
df_out = dat %>%
unite(group_id, band, non_spec) %>%
group_by(group_id) %>%
summarize(group_mean = mean(reads))
write.csv(df_out, "data-raw/r73_output1.csv", row.names=FALSE)
r73_output1 <- read.csv("data-raw/r73_output1.csv", check.names = FALSE)
fctr.cols <- sapply(r73_output1, is.factor)
int.cols <- sapply(r73_output1, is.integer)
r73_output1[, fctr.cols] <- sapply(r73_output1[, fctr.cols], as.character)
r73_output1[, int.cols] <- sapply(r73_output1[, int.cols], as.numeric)
save(r73_output1, file = "data/r73_output1.rdata")
r73_input1 <- read.csv("data-raw/r73_input1.csv", check.names = FALSE)
fctr.cols <- sapply(r73_input1, is.factor)
int.cols <- sapply(r73_input1, is.integer)
r73_input1[, fctr.cols] <- sapply(r73_input1[, fctr.cols], as.character)
r73_input1[, int.cols] <- sapply(r73_input1[, int.cols], as.numeric)
save(r73_input1, file = "data/r73_input1.rdata")
|
/R73.R
|
permissive
|
boyland-pf/MorpheusData
|
R
| false
| false
| 1,304
|
r
|
# making table data sets
library(dplyr)
library(tidyr)
library(MorpheusData)
library(data.table)
#############benchmark 85
set.seed(14592)
dat <- data.frame(
vial_id = 1:6,
band = sample(0:2, 6, replace = TRUE),
non_spec = sample(0:2, 6, replace = TRUE),
reads = rnorm(6)
)
write.csv(dat, "data-raw/r73_input1.csv", row.names=FALSE)
df_out = dat %>%
unite(group_id, band, non_spec) %>%
group_by(group_id) %>%
summarize(group_mean = mean(reads))
write.csv(df_out, "data-raw/r73_output1.csv", row.names=FALSE)
r73_output1 <- read.csv("data-raw/r73_output1.csv", check.names = FALSE)
fctr.cols <- sapply(r73_output1, is.factor)
int.cols <- sapply(r73_output1, is.integer)
r73_output1[, fctr.cols] <- sapply(r73_output1[, fctr.cols], as.character)
r73_output1[, int.cols] <- sapply(r73_output1[, int.cols], as.numeric)
save(r73_output1, file = "data/r73_output1.rdata")
r73_input1 <- read.csv("data-raw/r73_input1.csv", check.names = FALSE)
fctr.cols <- sapply(r73_input1, is.factor)
int.cols <- sapply(r73_input1, is.integer)
r73_input1[, fctr.cols] <- sapply(r73_input1[, fctr.cols], as.character)
r73_input1[, int.cols] <- sapply(r73_input1[, int.cols], as.numeric)
save(r73_input1, file = "data/r73_input1.rdata")
|
"nEvents" <- function(hr = .6, alpha = .025, beta = .1, ratio = 1, sided = 1, hr0 = 1, n = 0, tbl = FALSE)
{ if (sided != 1 && sided != 2) stop("sided must be 1 or 2")
c <- sqrt(ratio) / (1 + ratio)
delta <- -c * (log(hr) - log(hr0))
if (n[1] == 0)
{ n <- (qnorm(1-alpha/sided)+qnorm(1-beta))^2/delta^2
if (tbl) n <- data.frame(cbind(hr = hr, n = ceiling(n), alpha = alpha,
sided=sided, beta = beta,
Power = 1-beta, delta = delta, ratio = ratio,
hr0 = hr0, se = 1/c/sqrt(ceiling(n))))
return(n)
}
else
{ pwr <- pnorm(-(qnorm(1-alpha/sided)-sqrt(n) * delta))
if (tbl) pwr <- data.frame(cbind(hr = hr, n = n, alpha = alpha,
sided=sided, beta = 1-pwr,
Power = pwr, delta = delta, ratio = ratio,
hr0 = hr0, se = sqrt(1/n)/c))
return(pwr)
}
}
"zn2hr" <- function (z, n , ratio = 1, hr0=1, hr1=.7)
{ c <- 1/(1 + ratio)
psi <- c * (1 - c)
exp(z * sign(hr1-hr0)/sqrt(n * psi)) * hr0
}
"hrn2z" <- function(hr, n, ratio=1, hr0=1, hr1=.7)
{ c <- 1/(1 + ratio)
psi <- c * (1 - c)
log(hr/hr0) * sqrt(n * psi) * sign(hr0-hr1)
}
"hrz2n" <- function(hr, z, ratio=1, hr0=1)
{ c <- 1 / (1 + ratio)
psi <- c * (1 - c)
(z / log(hr/hr0))^2 / psi
}
|
/gsDesign/R/nEvents.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 1,399
|
r
|
"nEvents" <- function(hr = .6, alpha = .025, beta = .1, ratio = 1, sided = 1, hr0 = 1, n = 0, tbl = FALSE)
{ if (sided != 1 && sided != 2) stop("sided must be 1 or 2")
c <- sqrt(ratio) / (1 + ratio)
delta <- -c * (log(hr) - log(hr0))
if (n[1] == 0)
{ n <- (qnorm(1-alpha/sided)+qnorm(1-beta))^2/delta^2
if (tbl) n <- data.frame(cbind(hr = hr, n = ceiling(n), alpha = alpha,
sided=sided, beta = beta,
Power = 1-beta, delta = delta, ratio = ratio,
hr0 = hr0, se = 1/c/sqrt(ceiling(n))))
return(n)
}
else
{ pwr <- pnorm(-(qnorm(1-alpha/sided)-sqrt(n) * delta))
if (tbl) pwr <- data.frame(cbind(hr = hr, n = n, alpha = alpha,
sided=sided, beta = 1-pwr,
Power = pwr, delta = delta, ratio = ratio,
hr0 = hr0, se = sqrt(1/n)/c))
return(pwr)
}
}
"zn2hr" <- function (z, n , ratio = 1, hr0=1, hr1=.7)
{ c <- 1/(1 + ratio)
psi <- c * (1 - c)
exp(z * sign(hr1-hr0)/sqrt(n * psi)) * hr0
}
"hrn2z" <- function(hr, n, ratio=1, hr0=1, hr1=.7)
{ c <- 1/(1 + ratio)
psi <- c * (1 - c)
log(hr/hr0) * sqrt(n * psi) * sign(hr0-hr1)
}
"hrz2n" <- function(hr, z, ratio=1, hr0=1)
{ c <- 1 / (1 + ratio)
psi <- c * (1 - c)
(z / log(hr/hr0))^2 / psi
}
|
# Page 58
curve( sin(1/x), from=-4/pi, to=4/pi, col="blue", n=10000, add=TRUE)
abline(v = c(-2/pi,-1/pi, 1/pi, 2/pi), lty=2, col="red" )
abline(h=0, col="black")
|
/Calculus/R/Example-02-1-07.r
|
no_license
|
AppliedStat/class
|
R
| false
| false
| 169
|
r
|
# Page 58
curve( sin(1/x), from=-4/pi, to=4/pi, col="blue", n=10000, add=TRUE)
abline(v = c(-2/pi,-1/pi, 1/pi, 2/pi), lty=2, col="red" )
abline(h=0, col="black")
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
test.automl.documentation.suite <- function() {
r_demo <- function() {
prostate_path <- system.file("extdata", "prostate.csv", package = "h2o")
prostate_hf <- h2o.uploadFile(path = prostate_path, header = TRUE)
y <- "CAPSULE"
prostate_hf[,y] <- as.factor(prostate_hf[,y])
aml <- h2o.automl(y = y, training_frame = prostate_hf, max_runtime_secs = 30)
lb <- h2o.get_leaderboard(aml)
head(lb)
}
makeSuite(
r_demo
)
}
doSuite("Test for AutoML's R code examples", test.automl.documentation.suite(), time_monitor=TRUE)
|
/h2o-r/tests/testdir_algos/automl/runit_automl_demo_for_api_documentation.R
|
permissive
|
h2oai/h2o-3
|
R
| false
| false
| 676
|
r
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
test.automl.documentation.suite <- function() {
r_demo <- function() {
prostate_path <- system.file("extdata", "prostate.csv", package = "h2o")
prostate_hf <- h2o.uploadFile(path = prostate_path, header = TRUE)
y <- "CAPSULE"
prostate_hf[,y] <- as.factor(prostate_hf[,y])
aml <- h2o.automl(y = y, training_frame = prostate_hf, max_runtime_secs = 30)
lb <- h2o.get_leaderboard(aml)
head(lb)
}
makeSuite(
r_demo
)
}
doSuite("Test for AutoML's R code examples", test.automl.documentation.suite(), time_monitor=TRUE)
|
#'Calculate the analytic steady state concentration for model pbtk.
#'
#'This function calculates the analytic steady state plasma or venous blood
#'concentrations as a result of infusion dosing.
#'
#'@param chem.name Either the chemical name, CAS number, or the parameters must
#'be specified.
#'@param chem.cas Either the chemical name, CAS number, or the parameters must
#'be specified.
#'@param parameters Chemical parameters from parameterize_pbtk (for model =
#''pbtk'), parameterize_3comp (for model = '3compartment),
#'parmeterize_1comp(for model = '1compartment') or parameterize_steadystate
#'(for model = '3compartmentss'), overrides chem.name and chem.cas.
#'@param hourly.dose Hourly dose rate mg/kg BW/h.
#'@param concentration Desired concentration type, 'blood', 'tissue', or default 'plasma'.
#'@param suppress.messages Whether or not the output message is suppressed.
#'@param recalc.blood2plasma Recalculates the ratio of the amount of chemical
#'in the blood to plasma using the input parameters. Use this if you have
#''altered hematocrit, Funbound.plasma, or Krbc2pu.
#'@param tissue Desired tissue conentration (defaults to whole body
#'concentration.)
#'@param restrictive.clearance If TRUE (default), then only the fraction of
#' chemical not bound to protein is available for metabolism in the liver. If
#' FALSE, then all chemical in the liver is metabolized (faster metabolism due
#' to rapid off-binding).
#'@param bioactive.free.invivo If FALSE (default), then the total concentration is treated
#' as bioactive in vivo. If TRUE, the the unbound (free) plasma concentration is treated as
#' bioactive in vivo. Only works with tissue = NULL in current implementation.
#'@param ... Additional parameters passed to parameterize function if
#'parameters is NULL.
#'
#'@return Steady state concentration in uM units
#'
#'@author Robert Pearce and John Wambaugh
calc_analytic_css_pbtk <- function(chem.name=NULL,
chem.cas = NULL,
parameters=NULL,
hourly.dose=1/24,
concentration='plasma',
suppress.messages=F,
recalc.blood2plasma=F,
tissue=NULL,
restrictive.clearance=T,
bioactive.free.invivo = FALSE,
...)
{
#R CMD CHECK throws notes about "no visible binding for global variable", for
#each time a data.table column name is used without quotes. To appease R CMD
#CHECK, a variable has to be created for each of these column names and set to
#NULL. Note that within the data.table, these variables will not be NULL! Yes,
#this is pointless and annoying.
dose <- NULL
#End R CMD CHECK appeasement.
if (is.null(chem.cas) & is.null(chem.name) & is.null(parameters))
{
stop('Parameters, chem.name, or chem.cas must be specified.')
}
if (is.null(parameters))
{
parameters <- parameterize_pbtk(chem.cas=chem.cas,
chem.name=chem.name,
suppress.messages=suppress.messages,
...)
if (recalc.blood2plasma)
{
warning("Argument recalc.blood2plasma=TRUE ignored because parameters is NULL.")
}
} else {
if (!all(param.names.pbtk %in% names(parameters)))
{
stop(paste("Missing parameters:",
paste(param.names.pbtk[which(!param.names.pbtk %in% names(parameters))],
collapse=', '),
". Use parameters from parameterize_pbtk."))
}
if (recalc.blood2plasma) {
parameters[['Rblood2plasma']] <- 1 -
parameters[['hematocrit']] +
parameters[['hematocrit']] * parameters[['Krbc2pu']] * parameters[['Funbound.plasma']]
}
}
Qcardiac <- parameters[["Qcardiacc"]] / parameters[['BW']]^0.25
Qgfr <- parameters[["Qgfrc"]] / parameters[['BW']]^0.25
Clmetabolism <- parameters[["Clmetabolismc"]]
Kliver2pu <- parameters[['Kliver2pu']]
Qgut <- parameters[["Qgutf"]] * Qcardiac
Qliver <- parameters[["Qliverf"]] * Qcardiac
Qkidney <- parameters[['Qkidneyf']] * Qcardiac
Qrest <- Qcardiac-Qgut-Qliver-Qkidney
Rblood2plasma <- parameters[['Rblood2plasma']]
fup <- parameters[["Funbound.plasma"]]
if (!restrictive.clearance) Clmetabolism <- Clmetabolism / fup
hourly.dose <- hourly.dose * parameters$Fgutabs
# Calculate steady-state plasma Css:
Css <- (hourly.dose * (Qliver + Qgut) /
(fup * Clmetabolism / Rblood2plasma + (Qliver + Qgut))) /
(Qcardiac - (Qliver + Qgut)**2 /
(fup * Clmetabolism / Rblood2plasma + (Qliver + Qgut)) -
Qkidney**2 / (Qgfr * fup / Rblood2plasma + Qkidney) - Qrest)
# Check to see if a specific tissue was asked for:
if (!is.null(tissue))
{
# Need to convert to schmitt parameters:
pcs <- predict_partitioning_schmitt(parameters=parameters[param.names.schmitt[param.names.schmitt%in%names(parameters)]])
if (!paste0('K',tolower(tissue)) %in%
substr(names(pcs),1,nchar(names(pcs))-3))
{
stop(paste("Tissue",tissue,"is not available."))
}
# Tissues with sources (gut) or sinks (liver,kidney) need to be calculated
# taking the change of mass into account:
if (tissue == 'gut')
{
Qgut <- parameters$Qgutf * parameters$Qcardiacc / parameters$BW^0.25
Css <- parameters[['Kgut2pu']] * parameters[['Funbound.plasma']] *
(Css + dose / (Qgut * parameters[['Rblood2plasma']]))
} else if (tissue == 'liver') {
Qliver <- (parameters$Qgutf + parameters$Qliverf) * parameters$Qcardiacc /
parameters$BW^0.25
Clmetabolism <- parameters$Clmetabolismc
if (!restrictive.clearance) Clmetabolism <- Clmetabolism / fup
Css <- parameters[['Kliver2pu']] * fup * (hourly.dose +
Qliver * Css * Rblood2plasma) /
(Clmetabolism * fup + Qliver * Rblood2plasma)
} else if(tissue == 'kidney') {
Qkidney <- parameters$Qkidneyf * parameters$Qcardiacc / parameters$BW^0.25
Css <- parameters[['Kkidney2pu']] * fup * Qkidney * Css * Rblood2plasma /
(Qkidney * Rblood2plasma + parameters$Qgfrc * fup)
# All other tissues are proportional based on the partition coefficient:
} else {
Css <- Css * pcs[[names(pcs)[substr(names(pcs),2,nchar(names(pcs))-3)==tissue]]] * fup
}
}
if(tolower(concentration != "tissue")){
if (tolower(concentration)=='plasma')
{
Css <- Css
concentration <- "Plasma"
if(bioactive.free.invivo == T){
Css <- Css * parameters[['Funbound.plasma']]
}
} else if (tolower(concentration)=='blood')
{
Css <- Css * Rblood2plasma
concentration <- "Blood"
} else {
stop("Only blood and plasma concentrations are calculated.")
}
}
return(Css)
}
|
/R/calc_analytic_css_pbtk.R
|
no_license
|
bellsha/CompTox-ExpoCast-httk
|
R
| false
| false
| 6,997
|
r
|
#'Calculate the analytic steady state concentration for model pbtk.
#'
#'This function calculates the analytic steady state plasma or venous blood
#'concentrations as a result of infusion dosing.
#'
#'@param chem.name Either the chemical name, CAS number, or the parameters must
#'be specified.
#'@param chem.cas Either the chemical name, CAS number, or the parameters must
#'be specified.
#'@param parameters Chemical parameters from parameterize_pbtk (for model =
#''pbtk'), parameterize_3comp (for model = '3compartment),
#'parmeterize_1comp(for model = '1compartment') or parameterize_steadystate
#'(for model = '3compartmentss'), overrides chem.name and chem.cas.
#'@param hourly.dose Hourly dose rate mg/kg BW/h.
#'@param concentration Desired concentration type, 'blood', 'tissue', or default 'plasma'.
#'@param suppress.messages Whether or not the output message is suppressed.
#'@param recalc.blood2plasma Recalculates the ratio of the amount of chemical
#'in the blood to plasma using the input parameters. Use this if you have
#''altered hematocrit, Funbound.plasma, or Krbc2pu.
#'@param tissue Desired tissue conentration (defaults to whole body
#'concentration.)
#'@param restrictive.clearance If TRUE (default), then only the fraction of
#' chemical not bound to protein is available for metabolism in the liver. If
#' FALSE, then all chemical in the liver is metabolized (faster metabolism due
#' to rapid off-binding).
#'@param bioactive.free.invivo If FALSE (default), then the total concentration is treated
#' as bioactive in vivo. If TRUE, the the unbound (free) plasma concentration is treated as
#' bioactive in vivo. Only works with tissue = NULL in current implementation.
#'@param ... Additional parameters passed to parameterize function if
#'parameters is NULL.
#'
#'@return Steady state concentration in uM units
#'
#'@author Robert Pearce and John Wambaugh
calc_analytic_css_pbtk <- function(chem.name=NULL,
chem.cas = NULL,
parameters=NULL,
hourly.dose=1/24,
concentration='plasma',
suppress.messages=F,
recalc.blood2plasma=F,
tissue=NULL,
restrictive.clearance=T,
bioactive.free.invivo = FALSE,
...)
{
#R CMD CHECK throws notes about "no visible binding for global variable", for
#each time a data.table column name is used without quotes. To appease R CMD
#CHECK, a variable has to be created for each of these column names and set to
#NULL. Note that within the data.table, these variables will not be NULL! Yes,
#this is pointless and annoying.
dose <- NULL
#End R CMD CHECK appeasement.
if (is.null(chem.cas) & is.null(chem.name) & is.null(parameters))
{
stop('Parameters, chem.name, or chem.cas must be specified.')
}
if (is.null(parameters))
{
parameters <- parameterize_pbtk(chem.cas=chem.cas,
chem.name=chem.name,
suppress.messages=suppress.messages,
...)
if (recalc.blood2plasma)
{
warning("Argument recalc.blood2plasma=TRUE ignored because parameters is NULL.")
}
} else {
if (!all(param.names.pbtk %in% names(parameters)))
{
stop(paste("Missing parameters:",
paste(param.names.pbtk[which(!param.names.pbtk %in% names(parameters))],
collapse=', '),
". Use parameters from parameterize_pbtk."))
}
if (recalc.blood2plasma) {
parameters[['Rblood2plasma']] <- 1 -
parameters[['hematocrit']] +
parameters[['hematocrit']] * parameters[['Krbc2pu']] * parameters[['Funbound.plasma']]
}
}
Qcardiac <- parameters[["Qcardiacc"]] / parameters[['BW']]^0.25
Qgfr <- parameters[["Qgfrc"]] / parameters[['BW']]^0.25
Clmetabolism <- parameters[["Clmetabolismc"]]
Kliver2pu <- parameters[['Kliver2pu']]
Qgut <- parameters[["Qgutf"]] * Qcardiac
Qliver <- parameters[["Qliverf"]] * Qcardiac
Qkidney <- parameters[['Qkidneyf']] * Qcardiac
Qrest <- Qcardiac-Qgut-Qliver-Qkidney
Rblood2plasma <- parameters[['Rblood2plasma']]
fup <- parameters[["Funbound.plasma"]]
if (!restrictive.clearance) Clmetabolism <- Clmetabolism / fup
hourly.dose <- hourly.dose * parameters$Fgutabs
# Calculate steady-state plasma Css:
Css <- (hourly.dose * (Qliver + Qgut) /
(fup * Clmetabolism / Rblood2plasma + (Qliver + Qgut))) /
(Qcardiac - (Qliver + Qgut)**2 /
(fup * Clmetabolism / Rblood2plasma + (Qliver + Qgut)) -
Qkidney**2 / (Qgfr * fup / Rblood2plasma + Qkidney) - Qrest)
# Check to see if a specific tissue was asked for:
if (!is.null(tissue))
{
# Need to convert to schmitt parameters:
pcs <- predict_partitioning_schmitt(parameters=parameters[param.names.schmitt[param.names.schmitt%in%names(parameters)]])
if (!paste0('K',tolower(tissue)) %in%
substr(names(pcs),1,nchar(names(pcs))-3))
{
stop(paste("Tissue",tissue,"is not available."))
}
# Tissues with sources (gut) or sinks (liver,kidney) need to be calculated
# taking the change of mass into account:
if (tissue == 'gut')
{
Qgut <- parameters$Qgutf * parameters$Qcardiacc / parameters$BW^0.25
Css <- parameters[['Kgut2pu']] * parameters[['Funbound.plasma']] *
(Css + dose / (Qgut * parameters[['Rblood2plasma']]))
} else if (tissue == 'liver') {
Qliver <- (parameters$Qgutf + parameters$Qliverf) * parameters$Qcardiacc /
parameters$BW^0.25
Clmetabolism <- parameters$Clmetabolismc
if (!restrictive.clearance) Clmetabolism <- Clmetabolism / fup
Css <- parameters[['Kliver2pu']] * fup * (hourly.dose +
Qliver * Css * Rblood2plasma) /
(Clmetabolism * fup + Qliver * Rblood2plasma)
} else if(tissue == 'kidney') {
Qkidney <- parameters$Qkidneyf * parameters$Qcardiacc / parameters$BW^0.25
Css <- parameters[['Kkidney2pu']] * fup * Qkidney * Css * Rblood2plasma /
(Qkidney * Rblood2plasma + parameters$Qgfrc * fup)
# All other tissues are proportional based on the partition coefficient:
} else {
Css <- Css * pcs[[names(pcs)[substr(names(pcs),2,nchar(names(pcs))-3)==tissue]]] * fup
}
}
if(tolower(concentration != "tissue")){
if (tolower(concentration)=='plasma')
{
Css <- Css
concentration <- "Plasma"
if(bioactive.free.invivo == T){
Css <- Css * parameters[['Funbound.plasma']]
}
} else if (tolower(concentration)=='blood')
{
Css <- Css * Rblood2plasma
concentration <- "Blood"
} else {
stop("Only blood and plasma concentrations are calculated.")
}
}
return(Css)
}
|
#' Equation 7
#'
#' @description Equation 7 from Montagnes et al (2008) citing Schoolfield et al. (1981)
#'
#' @param temp temperature (in Celsius)
#' @param rate rate measurement
#' @param augment logical wether the dataset with fits should be returned instead of the parameter values
#' @param return_fit logical wether the model fit object should be returned
#'
#' @return a data frame of, depending on augment argument, if FALSE, parameters, if TRUE, data with predicted values
#' @export
#'
#'
equ7 <- function(temp,rate, augment=F, return_fit=F){
#Fit the function
try_test <- try({
R<-8.617343e-5
b <- 1
c <- 1.05
d <- 273.15
a <- max(rate)/max((((temp+273.15)/293.15)*exp(b/R*(1/293.15-1/(temp+273.15))))/(1+exp(c/R*(1/d-1/(temp+273.15)))))
fit = minpack.lm::nlsLM(rate ~ (a*((temp+273.15)/293.15)*exp(b/R*(1/293.15-1/(temp+273.15))))/(1+exp(c/R*(1/d-1/(temp+273.15)))),
control = minpack.lm::nls.lm.control(maxiter = 10^20),start = list(a=a,
b=b,
c=c,
d=d))
output <- broom::tidy(fit)
a <- output$estimate[output$term=="a"]
b <- output$estimate[output$term=="b"]
c <- output$estimate[output$term=="c"]
d <- output$estimate[output$term=="d"]
f_equ= function(t){(a*((t+273.15)/293.15)*exp(b/R*(1/293.15-1/(t+273.15))))/(1+exp(c/R*(1/d-1/(t+273.15))))} #- for use with optim
})
output <- temperatureresponse::amend_output(output,
fit,
f_equ,
temp,
rate,
try_test,
augment,
return_fit)
output$model <- "equ07"
print("equ07")
return(output)
}
|
/R/equ7.R
|
no_license
|
padpadpadpad/temperatureresponse
|
R
| false
| false
| 2,164
|
r
|
#' Equation 7
#'
#' @description Equation 7 from Montagnes et al (2008) citing Schoolfield et al. (1981)
#'
#' @param temp temperature (in Celsius)
#' @param rate rate measurement
#' @param augment logical wether the dataset with fits should be returned instead of the parameter values
#' @param return_fit logical wether the model fit object should be returned
#'
#' @return a data frame of, depending on augment argument, if FALSE, parameters, if TRUE, data with predicted values
#' @export
#'
#'
equ7 <- function(temp,rate, augment=F, return_fit=F){
#Fit the function
try_test <- try({
R<-8.617343e-5
b <- 1
c <- 1.05
d <- 273.15
a <- max(rate)/max((((temp+273.15)/293.15)*exp(b/R*(1/293.15-1/(temp+273.15))))/(1+exp(c/R*(1/d-1/(temp+273.15)))))
fit = minpack.lm::nlsLM(rate ~ (a*((temp+273.15)/293.15)*exp(b/R*(1/293.15-1/(temp+273.15))))/(1+exp(c/R*(1/d-1/(temp+273.15)))),
control = minpack.lm::nls.lm.control(maxiter = 10^20),start = list(a=a,
b=b,
c=c,
d=d))
output <- broom::tidy(fit)
a <- output$estimate[output$term=="a"]
b <- output$estimate[output$term=="b"]
c <- output$estimate[output$term=="c"]
d <- output$estimate[output$term=="d"]
f_equ= function(t){(a*((t+273.15)/293.15)*exp(b/R*(1/293.15-1/(t+273.15))))/(1+exp(c/R*(1/d-1/(t+273.15))))} #- for use with optim
})
output <- temperatureresponse::amend_output(output,
fit,
f_equ,
temp,
rate,
try_test,
augment,
return_fit)
output$model <- "equ07"
print("equ07")
return(output)
}
|
check_ia <- function(data) {
# Check if PupilPre is installed
#.check_for_PupilPre(type="NotAvailable")
# Check right eye
Rias <- data %>%
rename(RIGHT_IA_ID = RIGHT_INTEREST_AREA_ID,
RIGHT_IA_LABEL = RIGHT_INTEREST_AREA_LABEL) %>%
group_by(RIGHT_IA_LABEL, RIGHT_IA_ID) %>%
summarise() %>% arrange(RIGHT_IA_ID) %>%
mutate(N = n())
R_bad_id <- 0
for (x in 1:nrow(Rias)) {
if (Rias[x, "RIGHT_IA_ID"] >= 0 & Rias[x, "RIGHT_IA_ID"] <= 8) {
} else {
R_bad_id <- R_bad_id + 1
}
}
R_bad_labels <- filter(Rias, N > 1)
# Check left eye
Lias <- data %>%
rename(LEFT_IA_ID = LEFT_INTEREST_AREA_ID,
LEFT_IA_LABEL = LEFT_INTEREST_AREA_LABEL) %>%
group_by(LEFT_IA_LABEL, LEFT_IA_ID) %>%
summarise() %>% arrange(LEFT_IA_ID) %>%
mutate(N = n())
L_bad_id <- 0
for (x in 1:nrow(Lias)) {
if (Lias[x, "LEFT_IA_ID"] >= 0 & Lias[x, "LEFT_IA_ID"] <= 8) {
} else {
L_bad_id <- L_bad_id + 1
}
}
L_bad_labels <- filter(Lias, N > 1)
# Print mappings
message(paste(utils::capture.output(print(as.data.frame(Rias[,2:1]), row.names=FALSE)), collapse = "\n"))
message(paste(utils::capture.output(print(as.data.frame(Lias[,2:1]), row.names=FALSE)), collapse = "\n"))
# Determine messages
if (R_bad_id > 0) {
Rstop <- "Interest Area IDs for the right eye are not between 0 and 8. Please recode before proceeding with data processing."
Rmsg <- NULL
} else {
Rmsg <- "Interest Area IDs for the right eye are coded appropriately between 0 and 8."
Rstop <- NULL
}
if (L_bad_id > 0) {
Lstop <- "Interest Area IDs for the left eye are not between 0 and 8. Please recode before proceeding with data processing."
Lmsg <- NULL
} else {
Lmsg <- "Interest Area IDs for the left eye are coded appropriately between 0 and 8."
Lstop <- NULL
}
if(nrow(R_bad_labels) > 0) {
Rstop2 <- "Interest Area ID and label combinations for the right eye are not consistent. Please correct before proceeding with data processing."
Rmsg2 <- NULL
} else {
Rmsg2 <- "Interest Area ID and label mapping combinations for the right eye are consistent."
Rstop2 <- NULL
}
if(nrow(L_bad_labels) > 0) {
Lstop2 <- "Interest Area ID and label combinations for the left eye are not consistent. Please correct before proceeding with data processing."
Lmsg2 <- NULL
} else {
Lmsg2 <- "Interest Area ID and label mapping combinations for the left eye are consistent."
Lstop2 <- NULL
}
# Print messages
if(!is.null(Rmsg)){
msg <- Rmsg
} else{
msg <- character()
}
if(!is.null(Lmsg)){
if(length(msg)>0){
msg <- paste(msg, Lmsg, sep = "\n")
} else{
msg <- Lmsg
}
}
if(!is.null(Rmsg2)){
if(length(msg)>0){
msg <- paste(msg, Rmsg2, sep = "\n")
} else{
msg <- Rmsg2
}
}
if(!is.null(Lmsg2)){
if(length(msg)>0){
msg <- paste(msg, Lmsg2, sep = "\n")
} else{
msg <- Lmsg2
}
}
if(length(msg)==0){
msg <- NULL
} else {message(msg)
}
# Print errors
if(!is.null(Rstop)){
stp <- Rstop
} else{
stp <- character()
}
if(!is.null(Lstop)){
if(length(stp)>0){
stp <- paste(stp, Lstop, sep = "\n")
} else{
stp <- Lstop
}
}
if(!is.null(Rstop2)){
if(length(stp)>0){
stp <- paste(stp, Rstop2, sep = "\n")
} else{
stp <- Rstop2
}
}
if(!is.null(Lstop2)){
if(length(stp)>0){
stp <- paste(stp, Lstop2, sep = "\n")
} else{
stp <- Lstop2
}
}
if(length(stp)==0){
stp <- NULL
} else {stop(stp)
}
}
|
/R/check_ia.R
|
no_license
|
Turner-JA/EyetrackingWA
|
R
| false
| false
| 3,814
|
r
|
check_ia <- function(data) {
# Check if PupilPre is installed
#.check_for_PupilPre(type="NotAvailable")
# Check right eye
Rias <- data %>%
rename(RIGHT_IA_ID = RIGHT_INTEREST_AREA_ID,
RIGHT_IA_LABEL = RIGHT_INTEREST_AREA_LABEL) %>%
group_by(RIGHT_IA_LABEL, RIGHT_IA_ID) %>%
summarise() %>% arrange(RIGHT_IA_ID) %>%
mutate(N = n())
R_bad_id <- 0
for (x in 1:nrow(Rias)) {
if (Rias[x, "RIGHT_IA_ID"] >= 0 & Rias[x, "RIGHT_IA_ID"] <= 8) {
} else {
R_bad_id <- R_bad_id + 1
}
}
R_bad_labels <- filter(Rias, N > 1)
# Check left eye
Lias <- data %>%
rename(LEFT_IA_ID = LEFT_INTEREST_AREA_ID,
LEFT_IA_LABEL = LEFT_INTEREST_AREA_LABEL) %>%
group_by(LEFT_IA_LABEL, LEFT_IA_ID) %>%
summarise() %>% arrange(LEFT_IA_ID) %>%
mutate(N = n())
L_bad_id <- 0
for (x in 1:nrow(Lias)) {
if (Lias[x, "LEFT_IA_ID"] >= 0 & Lias[x, "LEFT_IA_ID"] <= 8) {
} else {
L_bad_id <- L_bad_id + 1
}
}
L_bad_labels <- filter(Lias, N > 1)
# Print mappings
message(paste(utils::capture.output(print(as.data.frame(Rias[,2:1]), row.names=FALSE)), collapse = "\n"))
message(paste(utils::capture.output(print(as.data.frame(Lias[,2:1]), row.names=FALSE)), collapse = "\n"))
# Determine messages
if (R_bad_id > 0) {
Rstop <- "Interest Area IDs for the right eye are not between 0 and 8. Please recode before proceeding with data processing."
Rmsg <- NULL
} else {
Rmsg <- "Interest Area IDs for the right eye are coded appropriately between 0 and 8."
Rstop <- NULL
}
if (L_bad_id > 0) {
Lstop <- "Interest Area IDs for the left eye are not between 0 and 8. Please recode before proceeding with data processing."
Lmsg <- NULL
} else {
Lmsg <- "Interest Area IDs for the left eye are coded appropriately between 0 and 8."
Lstop <- NULL
}
if(nrow(R_bad_labels) > 0) {
Rstop2 <- "Interest Area ID and label combinations for the right eye are not consistent. Please correct before proceeding with data processing."
Rmsg2 <- NULL
} else {
Rmsg2 <- "Interest Area ID and label mapping combinations for the right eye are consistent."
Rstop2 <- NULL
}
if(nrow(L_bad_labels) > 0) {
Lstop2 <- "Interest Area ID and label combinations for the left eye are not consistent. Please correct before proceeding with data processing."
Lmsg2 <- NULL
} else {
Lmsg2 <- "Interest Area ID and label mapping combinations for the left eye are consistent."
Lstop2 <- NULL
}
# Print messages
if(!is.null(Rmsg)){
msg <- Rmsg
} else{
msg <- character()
}
if(!is.null(Lmsg)){
if(length(msg)>0){
msg <- paste(msg, Lmsg, sep = "\n")
} else{
msg <- Lmsg
}
}
if(!is.null(Rmsg2)){
if(length(msg)>0){
msg <- paste(msg, Rmsg2, sep = "\n")
} else{
msg <- Rmsg2
}
}
if(!is.null(Lmsg2)){
if(length(msg)>0){
msg <- paste(msg, Lmsg2, sep = "\n")
} else{
msg <- Lmsg2
}
}
if(length(msg)==0){
msg <- NULL
} else {message(msg)
}
# Print errors
if(!is.null(Rstop)){
stp <- Rstop
} else{
stp <- character()
}
if(!is.null(Lstop)){
if(length(stp)>0){
stp <- paste(stp, Lstop, sep = "\n")
} else{
stp <- Lstop
}
}
if(!is.null(Rstop2)){
if(length(stp)>0){
stp <- paste(stp, Rstop2, sep = "\n")
} else{
stp <- Rstop2
}
}
if(!is.null(Lstop2)){
if(length(stp)>0){
stp <- paste(stp, Lstop2, sep = "\n")
} else{
stp <- Lstop2
}
}
if(length(stp)==0){
stp <- NULL
} else {stop(stp)
}
}
|
head1 = function(filename, rows) {
data = read.table(file = filename, sep= ",", header = TRUE, stringsAsFactors = FALSE)
hrows = data[1:rows, ]
return(print(h_rows))
}
head1("wages.csv", 5)
#Part 2
iris_data <- read.table(file = "iris.csv", sep= ",", header = TRUE, stringsAsFactors = FALSE)
print(tail(iris_data[, (ncol(iris_data)-1):ncol(iris_data)], 2))
nrow(iris_data[iris_data$Species == "setosa", ])
nrow(iris_data[iris_data$Species == "versicolor", ])
nrow(iris_data[iris_data$Species == "virginica", ])
iris_data[iris_data$Sepal.Width > 3.5, ]
write.table(x = iris_data[iris_data$Species == "setosa", ], file = "setosa.csv", sep = ",", col.names = TRUE)
mean(iris_data[iris_data$Species == "virginica", ][, "Petal.Length"])
min(iris_data[iris_data$Species == "virginica", ][, "Petal.Length"])
max(iris_data[iris_data$Species == "virginica", ][, "Petal.Length"])
|
/ex7.R
|
no_license
|
aliyevND/Biocomp_tutorial9
|
R
| false
| false
| 879
|
r
|
head1 = function(filename, rows) {
data = read.table(file = filename, sep= ",", header = TRUE, stringsAsFactors = FALSE)
hrows = data[1:rows, ]
return(print(h_rows))
}
head1("wages.csv", 5)
#Part 2
iris_data <- read.table(file = "iris.csv", sep= ",", header = TRUE, stringsAsFactors = FALSE)
print(tail(iris_data[, (ncol(iris_data)-1):ncol(iris_data)], 2))
nrow(iris_data[iris_data$Species == "setosa", ])
nrow(iris_data[iris_data$Species == "versicolor", ])
nrow(iris_data[iris_data$Species == "virginica", ])
iris_data[iris_data$Sepal.Width > 3.5, ]
write.table(x = iris_data[iris_data$Species == "setosa", ], file = "setosa.csv", sep = ",", col.names = TRUE)
mean(iris_data[iris_data$Species == "virginica", ][, "Petal.Length"])
min(iris_data[iris_data$Species == "virginica", ][, "Petal.Length"])
max(iris_data[iris_data$Species == "virginica", ][, "Petal.Length"])
|
# HH MA example - customer
#install.packages("amap")
library(amap)
##Read the data in the file
url = 'https://docs.google.com/spreadsheets/d/1PWWoMqE5o3ChwJbpexeeYkW6p4BHL9hubVb1fkKSBgA/edit#gid=2073914016'
library(gsheet)
data = as.data.frame(gsheet2tbl(url))
str(data)
head(data)
names(data)
summary(data)
str(data)
###Verify the data
colnames(data)
sapply(data, class)
class(data$Age)
apply(data, 2, FUN= class) #are all numeric
dim(data)
head(data)
summary(data)
###Run the kmeans algorithm to generate the clusters
#?amap::Kmeans
names(data)
nrow(data)
k1 <- amap::Kmeans(data[,-1],centers=3, iter.max = 200)
nc = NbClust(data[-1], distance="euclidean",min.nc=2, max.nc=15, method="average")
str(data)
k1$centers # group means
###Fetch size/n of obs for the groups
k1$size
###Fetch sum of squared for the groups
k1$withinss
###Fetch the cluster for each obs
#k1$cluster
k1$cluster
k1$centers
k1$cluster[9000:9800]
table(k1$cluster)
k1$size
data_clus_2 <- data[ k1$cluster == 2,]
(data_clus_2)
mean(data_clus_2$Age)
data_clus_2$Cust_id
# Write CSV
write.csv(data_clus_2[,1], file = "./data/data_clus_2.csv")
|
/clusteringLive.R
|
no_license
|
abani013/analytics1
|
R
| false
| false
| 1,123
|
r
|
# HH MA example - customer
#install.packages("amap")
library(amap)
##Read the data in the file
url = 'https://docs.google.com/spreadsheets/d/1PWWoMqE5o3ChwJbpexeeYkW6p4BHL9hubVb1fkKSBgA/edit#gid=2073914016'
library(gsheet)
data = as.data.frame(gsheet2tbl(url))
str(data)
head(data)
names(data)
summary(data)
str(data)
###Verify the data
colnames(data)
sapply(data, class)
class(data$Age)
apply(data, 2, FUN= class) #are all numeric
dim(data)
head(data)
summary(data)
###Run the kmeans algorithm to generate the clusters
#?amap::Kmeans
names(data)
nrow(data)
k1 <- amap::Kmeans(data[,-1],centers=3, iter.max = 200)
nc = NbClust(data[-1], distance="euclidean",min.nc=2, max.nc=15, method="average")
str(data)
k1$centers # group means
###Fetch size/n of obs for the groups
k1$size
###Fetch sum of squared for the groups
k1$withinss
###Fetch the cluster for each obs
#k1$cluster
k1$cluster
k1$centers
k1$cluster[9000:9800]
table(k1$cluster)
k1$size
data_clus_2 <- data[ k1$cluster == 2,]
(data_clus_2)
mean(data_clus_2$Age)
data_clus_2$Cust_id
# Write CSV
write.csv(data_clus_2[,1], file = "./data/data_clus_2.csv")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.