content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
context("Add daily volume")
# Data inputs
test_that("dataframe can be provided and using different column names",{
skip_on_cran()
skip_on_ci()
flowdata <- tidyhydat::hy_daily_flows("08NM116") %>%
dplyr::rename(Flows = Value)
flowdata <- add_daily_volume(flowdata, values = Flows)
expect_true(all(c("Flows") %in% colnames(flowdata)) &
nrow(flowdata) >= 1)
})
test_that("station_number can be provided",{
skip_on_cran()
skip_on_ci()
stns <- "08NM003"
stations_data <- add_daily_volume(station_number = stns)
expect_true(stns %in% unique(stations_data$STATION_NUMBER) &
nrow(stations_data) >= 1)
})
test_that("multiple station_numbers can be provided",{
skip_on_cran()
skip_on_ci()
stns <- c("08NM003","08NM116")
stations_data <- add_daily_volume(station_number = stns)
expect_true(all(unique(stations_data$STATION_NUMBER) %in% stns) &
nrow(stations_data) >= 1)
})
# Function results
test_that("add_daily_volume actually adds a column called Volume_m3",{
skip_on_cran()
skip_on_ci()
stns <- "08NM003"
data_col <- add_daily_volume(station_number = stns)
expect_true("Volume_m3" %in% names(data_col))
})
| /tests/testthat/test-add_daily_volume.R | no_license | cran/fasstr | R | false | false | 1,246 | r | context("Add daily volume")
# Data inputs
test_that("dataframe can be provided and using different column names",{
skip_on_cran()
skip_on_ci()
flowdata <- tidyhydat::hy_daily_flows("08NM116") %>%
dplyr::rename(Flows = Value)
flowdata <- add_daily_volume(flowdata, values = Flows)
expect_true(all(c("Flows") %in% colnames(flowdata)) &
nrow(flowdata) >= 1)
})
test_that("station_number can be provided",{
skip_on_cran()
skip_on_ci()
stns <- "08NM003"
stations_data <- add_daily_volume(station_number = stns)
expect_true(stns %in% unique(stations_data$STATION_NUMBER) &
nrow(stations_data) >= 1)
})
test_that("multiple station_numbers can be provided",{
skip_on_cran()
skip_on_ci()
stns <- c("08NM003","08NM116")
stations_data <- add_daily_volume(station_number = stns)
expect_true(all(unique(stations_data$STATION_NUMBER) %in% stns) &
nrow(stations_data) >= 1)
})
# Function results
test_that("add_daily_volume actually adds a column called Volume_m3",{
skip_on_cran()
skip_on_ci()
stns <- "08NM003"
data_col <- add_daily_volume(station_number = stns)
expect_true("Volume_m3" %in% names(data_col))
})
|
\name{hist_prices}
\alias{hist_prices}
\title{
Historical Price Series.
}
\description{
Retrieve historical data series and merge them.
}
\usage{
hist_prices(ticker, start, end = Sys.Date() - 1,
adjust.return = NULL, return.class = "zoo")
}
\arguments{
\item{ticker}{
a character vector
}
\item{start}{
\code{\link{Date}}, a character string of format
\code{"YYYY-MM-DD"}, or a year \code{"YYYY"}, which
will be translated into \code{"YYYY-01-01"}
}
\item{end}{
\code{\link{Date}}, a character string of format
\code{"YYYY-MM-DD"}, or a year \code{"YYYY"}, which
will be translated into \code{"YYYY-12-31"}
}
\item{adjust.return}{
if NULL (the default), \code{PX_LAST} will be
loaded. Other valid values are \sQuote{net} or
\sQuote{gross}.
}
\item{return.class}{
character
}
}
\details{
The function calls \code{\link[Rblpapi]{bdh}} once
for every ticker in ticker and merges the results.
Currency tickers of the form \code{ABC Curncy}
(i.e. cash) or \code{ABCABC Curncy} (i.e. base and
counter currency are the same) always receive a price
of 1. If data for only such tickers are requested,
the returned data contain only two timestamps
(\code{start} and \code{end}).
}
\value{
A series of class defined in
\code{return.class}. Default and currently only
supported option is \code{zoo}.
}
\author{
Enrico Schumann
}
\seealso{
\code{\link{closing_price}}
}
\examples{
\dontrun{
hist_prices("SPI Index", start = 2015, end = 2015)
}
}
| /man/hist_prices.Rd | no_license | enricoschumann/Rblpapi.utils | R | false | false | 1,546 | rd | \name{hist_prices}
\alias{hist_prices}
\title{
Historical Price Series.
}
\description{
Retrieve historical data series and merge them.
}
\usage{
hist_prices(ticker, start, end = Sys.Date() - 1,
adjust.return = NULL, return.class = "zoo")
}
\arguments{
\item{ticker}{
a character vector
}
\item{start}{
\code{\link{Date}}, a character string of format
\code{"YYYY-MM-DD"}, or a year \code{"YYYY"}, which
will be translated into \code{"YYYY-01-01"}
}
\item{end}{
\code{\link{Date}}, a character string of format
\code{"YYYY-MM-DD"}, or a year \code{"YYYY"}, which
will be translated into \code{"YYYY-12-31"}
}
\item{adjust.return}{
if NULL (the default), \code{PX_LAST} will be
loaded. Other valid values are \sQuote{net} or
\sQuote{gross}.
}
\item{return.class}{
character
}
}
\details{
The function calls \code{\link[Rblpapi]{bdh}} once
for every ticker in ticker and merges the results.
Currency tickers of the form \code{ABC Curncy}
(i.e. cash) or \code{ABCABC Curncy} (i.e. base and
counter currency are the same) always receive a price
of 1. If data for only such tickers are requested,
the returned data contain only two timestamps
(\code{start} and \code{end}).
}
\value{
A series of class defined in
\code{return.class}. Default and currently only
supported option is \code{zoo}.
}
\author{
Enrico Schumann
}
\seealso{
\code{\link{closing_price}}
}
\examples{
\dontrun{
hist_prices("SPI Index", start = 2015, end = 2015)
}
}
|
library(ggrepel)
library(ggsci)
library(lmodel2)
library(scales)
library(tidyverse)
# Utility functions ----------------------------------------------------------
# Abbreviate a binomial e.g. Balaenoptera musculus -> B. musculus
abbr_binom = function(binom) {
paste(str_sub(binom, 1, 1),
str_extract(binom, " .*"),
sep = ".")
}
# Labels for logarithmic scales
log_labels <- trans_format("log10", math_format(10^.x))
cbf_palette <- c("Phocoenidae and Delphinidae" = rgb(0, 114, 178, maxColorValue = 255), # blue (descent)
"Physeteridae and Ziphiidae" = rgb(213, 94, 0, maxColorValue = 255), # vermillion (ascent)
"Balaenopteridae" = rgb(0, 158, 115, maxColorValue = 255)) # bluish green (surface)
# Data ------------------------------------------------------------------------
# Morphological data
morphologies <- read_csv("data/foragestats_combined_ko2.csv") %>%
mutate(Species = str_replace(Species, "_", " ")) %>%
group_by(Species) %>%
summarize(Length_m = first(Body_length_m),
Mass_kg = first(Body_mass_kg)) %>%
mutate(Clade = ifelse(str_detect(Species, ".*ptera.*"),
"Mysticete",
"Odontocete"),
Family = recode(Species,
`Balaenoptera bonaerensis` = "Balaenopteridae",
`Balaenoptera musculus` = "Balaenopteridae",
`Balaenoptera physalus` = "Balaenopteridae",
`Berardius bairdii` = "Ziphiidae",
`Globicephala macrorhynchus` = "Delphinidae",
`Globicephala melas` = "Delphinidae",
`Grampus griseus` = "Delphinidae",
`Megaptera novaeangliae` = "Balaenopteridae",
`Mesoplodon densirostris` = "Ziphiidae",
`Orcinus orca` = "Delphinidae",
`Phocoena phocoena` = "Phocoenidae",
`Physeter macrocephalus` = "Physeteridae",
`Ziphius cavirostris` = "Ziphiidae")) %>%
# binomial is a factor ordered by species length
arrange(Length_m) %>%
mutate(binomial = factor(Species,
levels = unique(Species)),
abbr = str_split(binomial, " ") %>%
map_chr(~ paste0(str_sub(.x[1], 1, 1), str_sub(.x[2], 1, 1))),
abbr = case_when(binomial == "Globicephala melas" ~ "Gme",
binomial == "Globicephala macrorhynchus" ~ "Gma",
TRUE ~ abbr)) %>%
filter(!binomial %in% c("Orcinus orca", "Berardius bairdii"))
binom_levels <- levels(morphologies$binomial)
# Prey
load("data/prey_tbl.RData")
# Feeding rates
load("data/buzz_rf.RData")
load("data/Md_buzz_rf.RData")
load("data/lunge_rf.RData")
# Consumption power (Pin) -----------------------------------------------------
Ep_tbl <- prey_tbl %>%
select(binomial,
meanEp_kJ,
meanlnEp_lnkJ,
sdlnEp_lnkJ,
firstqEp_kJ,
thirdqEp_kJ) %>%
ungroup %>%
mutate(binomial = factor(binomial, levels = binom_levels))
# Ep figure
prey_tbl %>%
filter(!binomial %in% c("Orcinus orca", "Berardius bairdii")) %>%
mutate(binomial = factor(binomial, levels = binom_levels),
grouping = case_when(Family %in% c("Phocoenidae", "Delphinidae") ~ "Phocoenidae and Delphinidae",
Family %in% c("Physeteridae", "Ziphiidae") ~ "Physeteridae and Ziphiidae",
Family == "Balaenopteridae" ~ "Balaenopteridae")) %>%
ggplot(aes(binomial, meanEp_kJ, color = grouping)) +
geom_point() +
geom_errorbar(aes(ymin = firstqEp_kJ, ymax = thirdqEp_kJ),
width = 0.4) +
scale_x_discrete(labels = function(lbl) str_replace(lbl, " ", "\n")) +
scale_y_log10(labels = log_labels) +
scale_color_aaas() +
labs(y = "Energy per feeding event (kJ)") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
axis.title.x = element_blank(),
legend.position = "none")
ggsave("figs/Ep.pdf",
width = 9,
height = 6)
# Ep/m figure
prey_tbl %>%
filter(!binomial %in% c("Orcinus orca", "Berardius bairdii")) %>%
mutate(binomial = factor(binomial, levels = binom_levels),
grouping = case_when(Family %in% c("Phocoenidae", "Delphinidae") ~ "Phocoenidae and Delphinidae",
Family %in% c("Physeteridae", "Ziphiidae") ~ "Physeteridae and Ziphiidae",
Family == "Balaenopteridae" ~ "Balaenopteridae")) %>%
left_join(select(morphologies, binomial, abbr, Mass_kg),
by = "binomial") %>%
mutate(meanEp_kJkg = meanEp_kJ / Mass_kg,
firstqEp_kJkg = firstqEp_kJ / Mass_kg,
thirdqEp_kJkg = thirdqEp_kJ / Mass_kg) %>%
ggplot(aes(Mass_kg, meanEp_kJkg, color = grouping, shape = grouping)) +
geom_pointrange(aes(ymin = firstqEp_kJkg, ymax = thirdqEp_kJkg),
fatten = 3,
size = 0.75) +
geom_text_repel(aes(label = abbr),
size = 3) +
scale_x_log10(labels = log_labels) +
scale_y_log10() +
scale_color_manual(values = cbf_palette) +
labs(x = "Body mass (kg)",
y = expression("Mass-specific " * E[p] ~ (kJ ~ kg ^ -1))) +
theme_classic(base_size = 12) +
theme(axis.title = element_text(size = 10),
legend.position = "none")
ggsave("figs/Ep2.pdf",
width = 80,
height = 65,
units = "mm",
dpi = 600)
rf_tbl <- bind_rows(buzz_rf, lunge_rf, Md_buzz_rf) %>%
select(binomial,
rf_h = mean_rf,
firstq_rf,
thirdq_rf,
q_rf_fun) %>%
ungroup %>%
mutate(binomial = factor(binomial, levels = binom_levels))
# rf figure
bind_rows(buzz_rf, lunge_rf, Md_buzz_rf) %>%
select(binomial, mean_rf, firstq_rf, thirdq_rf) %>%
left_join(select(prey_tbl, binomial, Family), by = "binomial") %>%
mutate(binomial = factor(binomial, levels = binom_levels),
grouping = case_when(Family %in% c("Phocoenidae", "Delphinidae") ~ "Phocoenidae and Delphinidae",
Family %in% c("Physeteridae", "Ziphiidae") ~ "Physeteridae and Ziphiidae",
Family == "Balaenopteridae" ~ "Balaenopteridae")) %>%
filter(binomial != "Orcinus orca") %>%
ggplot(aes(binomial, mean_rf, color = grouping, shape = grouping)) +
geom_pointrange(aes(ymin = firstq_rf, ymax = thirdq_rf),
fatten = 3,
size = 0.75) +
scale_x_discrete(labels = morphologies$abbr) +
scale_color_manual(values = cbf_palette) +
labs(y = expression("Feeding rate " * (hr^-1))) +
theme_classic(base_size = 12) +
theme(axis.text.x = element_text(size = 8,
angle = 45,
hjust = 1),
axis.title.x = element_blank(),
axis.title.y = element_text(size = 10,
margin = margin(0, 0, 0, 0)),
legend.position = "none",
plot.margin = margin(0.5, 0.5, 0.5, 0.5))
ggsave("figs/rf.pdf",
width = 80,
height = 65,
units = "mm",
dpi = 600)
Pin_tbl <- inner_join(Ep_tbl, rf_tbl, by = "binomial") %>%
mutate(Pin_kJ_h = meanEp_kJ * rf_h) %>%
left_join(select(morphologies, binomial, Family, Mass_kg), by = "binomial")
# Mass-specific consumption power
sample_Pin <- function(rf_q, meanlnEp, sdlnEp, n = 1e3) {
pse::LHS(function(data) data$rf * data$Ep,
factors = c("rf", "Ep"),
N = n,
q = list(rf = rf_q,
Ep = qlnorm),
q.arg = list(rf = list(),
Ep = list(meanlog = meanlnEp,
sdlog = sdlnEp)),
res.names = "Pin_kJhr")$res[,,1]
}
# Figure 1, Pc is bimodally distributed
# Using Bw as example
bw_ep <- filter(prey_tbl, binomial == "Balaenoptera musculus")
ep_inset <- ggplot(tibble(x = qlnorm(c(0.001, 0.99),
meanlog = bw_ep$meanlnEp_lnkJ,
sdlog = bw_ep$sdlnEp_lnkJ)),
aes(x)) +
stat_function(fun = dlnorm,
args = list(meanlog = bw_ep$meanlnEp_lnkJ,
sdlog = bw_ep$sdlnEp_lnkJ)) +
scale_x_continuous(breaks = seq(0, 1e6, by = 250e3),
labels = function(x) x / 10^5) +
labs(x = expression(italic(E[p]) ~ (10^5 ~ kJ))) +
theme_classic(base_size = 10) +
theme(axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
axis.title.y = element_blank(),
axis.title.x = element_text(size = 8))
bw_rf <- filter(rf_tbl, binomial == "Balaenoptera musculus")
rf_inset <- ggplot(tibble(x = bw_rf$q_rf_fun[[1]](seq(0, 1, length.out = 1000))),
aes(x)) +
geom_histogram(binwidth = 1,
boundary = 0,
fill = "light gray",
color = "black",
size = 0.2) +
labs(x = expression(italic(r[f]) ~ ("hr"^{-1}))) +
theme_classic(base_size = 10) +
theme(axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
axis.title.y = element_blank(),
axis.title.x = element_text(size = 8))
bw_Pc <- filter(Pin_tbl, binomial == "Balaenoptera musculus") %>%
group_by(binomial) %>%
group_modify(function(data, key) {
with(data,
tibble(Pc = sample_Pin(q_rf_fun[[1]],
meanlnEp_lnkJ[1],
sdlnEp_lnkJ[1])))
}) %>%
ungroup
Pc_plot <- ggplot(bw_Pc, aes(Pc)) +
geom_histogram(bins = 30,
boundary = 0,
fill = "light gray",
color = "black") +
geom_vline(aes(xintercept = mean(Pc)),
linetype = "dashed") +
scale_x_continuous(breaks = seq(0, 4e7, by = 1e7),
limits = c(0, 4e7),
labels = c(0,
expression(1 %*% 10^7),
expression(2 %*% 10^7),
expression(3 %*% 10^7),
expression(4 %*% 10^7)),
name = expression(italic(frac(dE[a], dt)) ~ ("kJ" ~ "hr"^{-1}))) +
theme_classic(base_size = 12) +
theme(axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
axis.title.y = element_blank(),
axis.title.x = element_text(size = 8))
Pc_plot +
annotation_custom(ggplotGrob(ep_inset),
xmin = 0.7e7, xmax = 2.45e7,
ymin = 90, ymax = 360) +
annotation_custom(ggplotGrob(rf_inset),
xmin = 2.5e7, xmax = 4.25e7,
ymin = 90, ymax = 360)
# Dimensions and DPI for Conservation Letters
# https://authorservices.wiley.com/asset/photos/electronic_artwork_guidelines.pdf
ggsave("figs/Pc.pdf",
width = 80,
height = 65,
units = "mm",
dpi = 600)
# Pc table
Pc_tbl <- rf_tbl %>%
left_join(Ep_tbl, by = "binomial") %>%
left_join(select(morphologies, binomial, Mass_kg)) %>%
group_by(binomial) %>%
group_modify(function(data, key) {
Pc <- sample_Pin(data$q_rf_fun[[1]],
data$meanlnEp_lnkJ,
data$sdlnEp_lnkJ)
tibble(meanEp_kJ = data$meanEp_kJ,
meanEp_kJkg = meanEp_kJ / data$Mass_kg,
iqr1Ep_kJ = data$firstqEp_kJ,
iqr3Ep_kJ = data$thirdqEp_kJ,
meanrf_h = data$rf_h,
meanrf_hkg = meanrf_h / data$Mass_kg,
iqr1rf_h = data$firstq_rf,
iqr3rf_h = data$thirdq_rf,
meanPc_kJh = mean(Pc),
meanPc_kJhkg = meanPc_kJh / data$Mass_kg,
iqr1Pc_kJh = quantile(Pc, 0.25),
iqr3Pc_kJh = quantile(Pc, 0.75))
})
write_csv(Pc_tbl, "data/output/Pc.csv")
# Locomotion power (Pout) -----------------------------------------------------
fs_fun <- function(U, L, La = 0.2, St = 0.3) {
# St = A * f / U
# A = L * La
# St = L * La * f / U
# f = St * U / L / La
St * U / L / La
}
U_b_ms <- 1.5
CL_fun <- function(m) 1.46 + 0.0005 * m
Pout_fun <- function(u, l, m) {
f_f <- fs_fun(u, l)
f_b <- fs_fun(U_b_ms, l)
(f_f - f_b) * CL_fun(m) * m
}
# Sensitivity -----------------------------------------------------------------
# Vectorized function for calculating Esonar for sensitivity analysis
Ein_fun <- function(td_min) {
function(rf_h, Ep_kJ) {
# Consumption power
Pin_kJh <- rf_h * Ep_kJ
Pin_W <- Pin_kJh / 3600
# Consumption energy
Ein_kJ <- Pin_kJh * td_min / 60
Ein_kJ
}
}
Eout_fun <- function(tf_min, m) {
function(delta_ff, CL) {
# Locomotion power
Pout_W <- delta_ff * CL * m
# Locomotion energy
Eout_kJ <- Pout_W / 1000 * tf_min * 60
Eout_kJ
}
}
Esonar_fun <- function(td_min, tf_min, m) {
function(data) {
with(data,
{
Ein_kJ <- Ein_fun(td_min)(rf_h, Ep_kJ)
Eout_kJ <- Eout_fun(tf_min, m)(delta_ff, CL)
Ein_kJ + Eout_kJ
}
)
}
}
# Scenarios -------------------------------------------------------------------
# Extreme flight
# Behavioral responses
scenario_tbl <-
tribble(~scenario, ~t_d_min, ~t_f_min, ~U_f_ms,
"flight", 60, 30, 5,
"consumption", 240, 10, 3.5) %>%
crossing(select(morphologies,
binomial,
Mass_kg,
Length_m) %>%
filter(!binomial %in% c("Orcinus orca",
"Berardius bairdii"))) %>%
# rf probabilities
left_join(select(rf_tbl, binomial, q_rf_fun), by = "binomial") %>%
# Ep probabilities
left_join(select(Ep_tbl, binomial, meanlnEp_lnkJ, sdlnEp_lnkJ),
by = "binomial")
Esonar_tbl <- scenario_tbl %>%
group_by(scenario, binomial) %>%
group_modify(function(data, key) {
param <- c("rf_h", "Ep_kJ", "delta_ff", "CL")
q <- list(rf_h = data$q_rf_fun[[1]],
Ep_kJ = qlnorm,
delta_ff = qgamma,
CL = qgamma)
# List of distribution function parameters
ff_flight <- fs_fun(data$U_f_ms, data$Length_m)
ff_basal <- fs_fun(U_b_ms, data$Length_m)
delta_ff <- ff_flight - ff_basal
ff_shape <- 4
ff_scale <- delta_ff / ff_shape
# squared to get units of J/stroke
CL <- 1.46 + 0.0005 * data$Mass_kg
CL_shape <- 4
CL_scale <- CL / CL_shape
q_arg <- list(rf_h = list(),
Ep_kJ = list(meanlog = data$meanlnEp_lnkJ,
sdlog = data$sdlnEp_lnkJ),
delta_ff = list(shape = ff_shape, scale = ff_scale),
CL = list(shape = CL_shape, scale = CL_scale))
param_args <- tibble(delta_ff = delta_ff,
ff_shape = ff_shape,
ff_scale = ff_scale,
CL = CL,
CL_shape = CL_shape,
CL_scale = CL_scale)
# Plots of delta_ff, CL distributions
ggplot(data.frame(x = c(0, qgamma(0.99,
shape = ff_shape,
scale = ff_scale))),
aes(x)) +
stat_function(fun = dgamma,
args = q_arg[[3]]) +
geom_vline(xintercept = delta_ff,
linetype = "dashed") +
labs(x = "Change in fluking frequency (Hz)",
y = "Probability density",
title = key$binomial,
caption = sprintf("U_b = %.1f m/s, U_f = %.1f m/s, f_b = %.2f Hz, f_f = %.2f Hz",
U_b_ms,
data$U_f_ms,
ff_basal,
ff_flight)) +
theme_minimal()
ggsave(sprintf("figs/ff_density/%s.pdf", key$binomial),
width = 9,
height = 6)
ggplot(data.frame(x = c(0, qgamma(0.99,
shape = CL_shape,
scale = CL_scale))),
aes(x)) +
stat_function(fun = dgamma,
args = q_arg[[4]]) +
geom_vline(xintercept = CL,
linetype = "dashed") +
labs(x = "Locomotor cost (J/stroke)",
y = "density",
title = key$binomial) +
theme_minimal()
ggsave(sprintf("figs/CL_density/%s.pdf", key$binomial),
width = 9,
height = 6)
# Latin hypercube sample of parameter space
model <- Esonar_fun(data$t_d_min,
data$t_f_min,
data$Mass_kg)
tryCatch(esonar_LHS <- pse::LHS(model, param, 1e2, q, q_arg),
error = function(e) browser())
sens_result <- esonar_LHS$data %>%
mutate(Esonar_kJ = esonar_LHS$res[,1,1],
Eout_kJ = Eout_fun(data$t_f_min, data$Mass_kg)(delta_ff, CL),
Ein_kJ = Ein_fun(data$t_d_min)(rf_h, Ep_kJ),
inout_ratio = Ein_kJ / Eout_kJ)
# ECDF of model outputs
esonar_ecdf <- ggplot(sens_result, aes(Esonar_kJ)) +
stat_ecdf() +
labs(x = "Energetic cost (kJ)",
y = "Cumulative probability",
title = key$binomial) +
theme_minimal()
# Scatter of model outputs w.r.t. parameters
esonar_scatter <- sens_result %>%
gather(parameter, value, rf_h:CL) %>%
ggplot(aes(value, Esonar_kJ)) +
geom_point(size = 0.5) +
geom_smooth(method = "lm",
se = FALSE) +
labs(x = "",
y = "Energetic cost (kJ)",
title = key$binomial) +
facet_wrap(~ parameter,
scales = "free_x",
strip.position = "bottom") +
theme_minimal() +
theme(strip.placement = "outside")
# Save plots
ggsave(sprintf("figs/esonar_ecdfs/%s_%s.pdf", key$scenario, key$binomial),
esonar_ecdf,
width = 9,
height = 6)
ggsave(sprintf("figs/esonar_scatters/%s_%s.pdf", key$scenario, key$binomial),
esonar_scatter,
width = 9,
height = 6)
# Linear model results
esonar_linear <- sens_result %>%
# Normalize values using z-scores
mutate_at(vars(Esonar_kJ, rf_h, Ep_kJ, CL, delta_ff),
function(x) (x - mean(x)) / sd(x)) %>%
# Multiple regression
lm(Esonar_kJ ~ rf_h + Ep_kJ + CL + delta_ff, data = .)
# Extract coefficients, p-values, and confidence intervals
esonar_coef <- coef(esonar_linear)
esonar_pval <- summary(esonar_linear)$coefficients[,4]
esonar_ci <- as_tibble(confint(esonar_linear, level = 0.95),
rownames = "param")
colnames(esonar_ci)[2:3] <- c("ci_min", "ci_max")
# Combine and drop info for the intercept
lm_results <- cbind(esonar_ci, esonar_coef, esonar_pval)[-1,]
esonar_results <- summarize(sens_result,
mean_Esonar = mean(Esonar_kJ),
median_Esonar = median(Esonar_kJ),
iqr_Esonar = IQR(Esonar_kJ),
median_inout = median(inout_ratio),
inout_25 = quantile(inout_ratio, 0.25),
inout_75 = quantile(inout_ratio, 0.75),
mean_inout = mean(inout_ratio),
se_inout = sd(inout_ratio)/sqrt(n()))
cbind(lm_results, esonar_results)
})
# A plot with the normalized linear model coefficients
coef_data <- Esonar_tbl %>%
ungroup %>%
mutate(param = factor(param,
levels = c("CL",
"delta_ff",
"Ep_kJ",
"rf_h"),
labels = c("C[L]",
"Delta*f[f]",
"E[p]",
"r[f]"))) %>%
left_join(select(morphologies, binomial, Family),
by = "binomial") %>%
mutate(grouping = case_when(Family %in% c("Phocoenidae", "Delphinidae") ~ "Phocoenidae and Delphinidae",
Family %in% c("Physeteridae", "Ziphiidae") ~ "Physeteridae and Ziphiidae",
Family == "Balaenopteridae" ~ "Balaenopteridae") %>%
factor(levels = c("Phocoenidae and Delphinidae",
"Physeteridae and Ziphiidae",
"Balaenopteridae")))
# For presentations -----------------------------------------------------------
consumption_coef <- filter(coef_data, scenario == "consumption")
# Consumption, all species
ggplot(consumption_coef, aes(x = param, y = esonar_coef)) +
geom_boxplot() +
coord_flip() +
scale_x_discrete(labels = parse(text = levels(consumption_coef$param))) +
scale_y_continuous(limits = c(-0.25, 1.0),
breaks = seq(-0.25, 1.0, by = 0.25)) +
labs(y = "Sensitivity") +
theme_classic() +
theme(axis.text.x = element_blank(),
axis.ticks = element_blank(),
axis.title.y = element_blank())
ggsave("figs/consumption_allsp.pdf",
width = 4.5,
height = 3)
# Consumption, by grouping
consumption_grouped <- ggplot(
consumption_coef,
aes(x = param, y = esonar_coef, color = grouping)
) +
geom_boxplot() +
coord_flip() +
scale_x_discrete(labels = parse(text = levels(consumption_coef$param))) +
scale_y_continuous(limits = c(-0.25, 1.0),
breaks = seq(-0.25, 1.0, by = 0.25)) +
scale_color_manual(values = cbf_palette) +
labs(y = "Sensitivity") +
theme_classic(base_size = 12) +
theme(axis.text.x = element_blank(),
axis.text.y = element_text(size = 12),
axis.ticks = element_blank(),
axis.title.y = element_blank(),
legend.position = "none")
consumption_grouped
ggsave("figs/consumption_groups.pdf",
width = 4.5,
height = 3)
flight_coef <- filter(coef_data, scenario == "flight")
# Flight, all species
ggplot(flight_coef, aes(x = param, y = esonar_coef)) +
geom_boxplot() +
coord_flip() +
scale_x_discrete(labels = parse(text = levels(flight_coef$param))) +
scale_y_continuous(limits = c(-0.25, 1.0),
breaks = seq(-0.25, 1.0, by = 0.25)) +
labs(y = "Sensitivity") +
theme_classic() +
theme(axis.text.x = element_blank(),
axis.ticks = element_blank(),
axis.title.y = element_blank())
ggsave("figs/flight_allsp.pdf",
width = 4.5,
height = 3)
# Flight, by grouping
flight_grouped <- ggplot(flight_coef,
aes(x = param, y = esonar_coef, color = grouping)) +
geom_boxplot() +
coord_flip() +
scale_x_discrete(labels = parse(text = levels(flight_coef$param))) +
scale_y_continuous(limits = c(-0.25, 1.0),
breaks = seq(-0.25, 1.0, by = 0.25)) +
scale_color_manual(values = cbf_palette) +
labs(y = "Sensitivity") +
theme_classic(base_size = 12) +
theme(axis.text.x = element_blank(),
axis.text.y = element_text(size = 12),
axis.ticks = element_blank(),
axis.title.y = element_blank(),
legend.position = "none")
flight_grouped
ggsave("figs/flight_groups.pdf",
width = 4.5,
height = 3)
# Figure for paper
consumption_grouped +
theme(axis.ticks = element_line(),
axis.text.x = element_text()) +
flight_grouped +
theme(axis.ticks = element_line(),
axis.text.x = element_text()) +
patchwork::plot_layout(nrow = 1) +
patchwork::plot_annotation(tag_levels = "A")
ggsave("figs/sensitivity.pdf",
width = 180,
height = 80,
units = "mm",
dpi = 600)
# Ratio of in/out
coef_data %>%
mutate(inout = ifelse(param %in% c("r[f]", "E[p]"), "in", "out")) %>%
group_by(inout, scenario) %>%
summarize(mean_coef = mean(esonar_coef))
ggplot(consumption_coef, aes(x = binomial, y = esonar_coef)) +
# geom_vline(aes(xintercept = mean_coef),
# summarize(group_by(consumption_coef, param),
# mean_coef = mean(esonar_coef)),
# linetype = "dashed") +
geom_boxplot() +
facet_grid(param ~ .,
labeller = label_parsed,
switch = "y") +
scale_x_continuous(limits = c(-0.25, 1.0),
breaks = seq(-0.25, 1.0, by = 0.25)) +
labs(x = "Sensitivity") +
theme_classic() +
theme(axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
axis.title.y = element_blank(),
legend.position = "none",
legend.title = element_blank(),
strip.background = element_blank(),
strip.placement = "outside")
# A table of the ratio of energy in to energy out
Esonar_tbl %>%
group_by(binomial, scenario) %>%
slice(1) %>%
ungroup %>%
select(binomial, scenario, mean_inout, se_inout) %>%
write_csv("figs/flight_inout.csv")
# Energy in vs energy out
inout_fun <- function(t_d_min, t_f_min, U_f_ms, binom) {
morph_row <- filter(morphologies, binomial == binom)
M_kg <- morph_row$Mass_kg
L_m <- morph_row$Length_m
# Parameter distributions
rf_fun <- filter(rf_tbl, binomial == binom)$q_rf_fun[[1]]
Ep_fun <- function(p) {
row <- filter(prey_tbl, binomial == binom)
qlnorm(p, meanlog = row$meanlnEp_lnkJ, sdlog = row$sdlnEp_lnkJ)
}
ff_fun <- function(p) {
if (U_f_ms == 0) {
0
} else {
ff_flight <- fs_fun(U_f_ms, L_m)
ff_basal <- fs_fun(U_b_ms, L_m)
delta_ff <- ff_flight - ff_basal
ff_shape <- 4
ff_scale <- delta_ff / ff_shape
qgamma(p, ff_shape, scale = ff_scale)
}
}
CL_fun <- function(p) {
CL <- 1.46 + 0.0005 * M_kg
CL_shape <- 4
CL_scale <- CL / CL_shape
qgamma(p, CL_shape, scale = CL_scale)
}
rf_q <- runif(1)
rf_h <- rf_fun(rf_q)
Ep_q <- runif(1)
Ep_kJ <- Ep_fun(Ep_q)
Ein <- Ein_fun(t_d_min)(rf_h, Ep_kJ)
ff_q <- runif(1)
delta_ff <- ff_fun(ff_q)
CL_q <- runif(1)
CL <- CL_fun(CL_q)
Eout <- Eout_fun(t_f_min, M_kg)(delta_ff, CL)
Esonar <- Ein + Eout
tibble(rf_q, rf_h, Ep_q, Ep_kJ, ff_q, delta_ff, CL_q, CL, Ein, Eout, Esonar)
}
Einout <- tribble(
~scenario, ~t_d_min, ~t_f_min, ~U_f_ms,
"cessation", 60, 0, 0,
"mild_flight", 0, 5, 2.5,
"strong_flight", 0, 15, 3.5,
"extreme_flight", 0, 30, 5
) %>%
mutate(scenario = factor(scenario,
levels = c("cessation",
"mild_flight",
"strong_flight",
"extreme_flight"),
labels = c("Cessation only",
"Mild flight",
"Strong flight",
"Extreme flight"))) %>%
# All but Oo and Bb
crossing(binomial = binom_levels[c(-6, -9)]) %>%
mutate(binomial = factor(binomial, levels = binom_levels)) %>%
group_by(binomial, scenario, t_d_min, t_f_min, U_f_ms, binomial) %>%
group_modify(function(data, key) {
map_dfr(1:500, ~ inout_fun(key$t_d_min, key$t_f_min, key$U_f_ms, key$binomial))
})
Einout %>%
ungroup %>%
left_join(morphologies, by = "binomial") %>%
mutate(grouping = case_when(Family %in% c("Phocoenidae", "Delphinidae") ~ "Phocoenidae and Delphinidae",
Family %in% c("Physeteridae", "Ziphiidae") ~ "Physeteridae and Ziphiidae",
Family == "Balaenopteridae" ~ "Balaenopteridae"),
scenario = fct_relabel(scenario, ~str_replace(.x, " ", "\n"))) %>%
ggplot(aes(scenario, Esonar, color = grouping)) +
geom_boxplot() +
scale_color_brewer(palette = "Dark2") +
labs(y = "Energy cost (kJ)") +
facet_wrap(~ binomial,
scales = "free_y") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
axis.title.x = element_blank(),
legend.position = "none")
ggsave("figs/inout.pdf",
width = 9,
height = 6)
# Critical thresholds ---------------------------------------------------------
scenario_tbl <- tribble(
~scenario, ~t_f_min, ~U_f_ms,
"no_flight", 0, 0,
"mild_flight", 5, 2.5,
"strong_flight", 15, 3.5,
"extreme_flight", 30, 5
) %>%
mutate(scenario = factor(scenario,
levels = c("no_flight",
"mild_flight",
"strong_flight",
"extreme_flight"),
labels = c("No flight",
"Mild flight",
"Strong flight",
"Extreme flight"))) %>%
# All but Oo and Bb
crossing(binomial = binom_levels[c(-6, -9)],
beta = c(2.5, 3, 5),
bmr = factor(c("Kleiber", "Maresh")))
thr_calculator <- function(U_f_ms, tf_min, binom, beta, bmr) {
# Morphologies
morph_row <- filter(morphologies, binomial == binom)
if (nrow(morph_row) != 1) stop("Imprecise binomial")
L_m = morph_row$Length_m
M_kg = morph_row$Mass_kg
# Parameter distributions
rf_fun <- filter(rf_tbl, binomial == binom)$q_rf_fun[[1]]
Ep_fun <- function(p) {
row <- filter(prey_tbl, binomial == binom)
qlnorm(p, meanlog = row$meanlnEp_lnkJ, sdlog = row$sdlnEp_lnkJ)
}
ff_fun <- function(p) {
if (U_f_ms == 0) {
0
} else {
ff_flight <- fs_fun(U_f_ms, L_m)
ff_basal <- fs_fun(U_b_ms, L_m)
delta_ff <- ff_flight - ff_basal
ff_shape <- 4
ff_scale <- delta_ff / ff_shape
qgamma(p, ff_shape, scale = ff_scale)
}
}
CL_fun <- function(p) {
CL <- 1.46 + 0.0005 * M_kg
CL_shape <- 4
CL_scale <- CL / CL_shape
qgamma(p, CL_shape, scale = CL_scale)
}
# Daily FMR
daily_bmr = if (bmr == "Kleiber") {
293.1 * M_kg ^ 0.75
} else if (bmr == "Maresh") {
581 * M_kg ^ 0.68
} else {
stop("Unspecified BMR calculation")
}
daily_FMR <- beta * daily_bmr
thr_fun <- function() {
Esonar <- 0
model <- Esonar_fun(60, tf_min, M_kg)
td_min <- 0
#print(sprintf("%s: tf_min=%.1f, beta=%.1f, bmr=%s", binom, tf_min, beta, bmr))
while (Esonar < daily_FMR) {
rf_h <- rf_fun(runif(1))
Ep_kJ <- Ep_fun(runif(1))
delta_ff <- ff_fun(runif(1))
CL <- CL_fun(runif(1))
result <- model(tibble(rf_h, Ep_kJ, delta_ff, CL))
if (Esonar + result < daily_FMR) {
td_min <- td_min + 60
} else {
td_min <- td_min + 60 * (daily_FMR - Esonar) / result
}
Esonar <- Esonar + result
}
td_min
}
td_min <- map_dbl(1:1e3, ~ thr_fun())
tibble(mean_thr = mean(td_min),
med_thr = median(td_min),
firstq_thr = quantile(td_min, 0.25),
thirdq_thr = quantile(td_min, 0.75))
}
thr_tbl <- scenario_tbl %>%
mutate(binomial = factor(binomial, levels = binom_levels)) %>%
group_by_all %>%
group_modify(function(data, key) {
with(key, thr_calculator(U_f_ms, t_f_min, binomial, beta, bmr))
}) %>%
arrange(binomial, t_f_min)
# filter(thr_tbl, beta == 3, bmr == "Maresh") %>%
filter(thr_tbl, beta == 3) %>%
ggplot(aes(binomial,
mean_thr,
color = scenario,
shape = bmr)) +
geom_pointrange(aes(ymin = firstq_thr,
ymax = thirdq_thr),
fatten = 2,
position = position_dodge(width = 0.6)) +
scale_x_discrete(labels = morphologies$abbr) +
scale_y_continuous(breaks = c(30, 60, 60 * 4, 60 * 12, 60*24, 60*48, 60*72),
minor_breaks = NULL,
labels = c("30 min", "1 hour", "4 hours", "12 hours", "1 day", "2 days", "3 days"),
trans = "log2") +
scale_color_brewer(palette = "RdYlBu", direction = -1) +
labs(y = "Feeding cessation") +
theme_classic(base_size = 12) +
theme(axis.title.x = element_blank(),
legend.position = "bottom",
legend.title = element_blank(),
legend.margin = margin(0, 0, 0, -10))
ggsave("figs/critical_threshold.pdf",
width = 180,
height = 120,
units = "mm",
dpi = 600)
# Threshold table
thr_tbl %>%
filter(beta == 3) %>%
write_csv("data/output/thresholds.csv")
| /src/sonar-response.R | no_license | FlukeAndFeather/sonar-response | R | false | false | 32,569 | r | library(ggrepel)
library(ggsci)
library(lmodel2)
library(scales)
library(tidyverse)
# Utility functions ----------------------------------------------------------
# Abbreviate a binomial e.g. Balaenoptera musculus -> B. musculus
abbr_binom = function(binom) {
paste(str_sub(binom, 1, 1),
str_extract(binom, " .*"),
sep = ".")
}
# Labels for logarithmic scales
log_labels <- trans_format("log10", math_format(10^.x))
cbf_palette <- c("Phocoenidae and Delphinidae" = rgb(0, 114, 178, maxColorValue = 255), # blue (descent)
"Physeteridae and Ziphiidae" = rgb(213, 94, 0, maxColorValue = 255), # vermillion (ascent)
"Balaenopteridae" = rgb(0, 158, 115, maxColorValue = 255)) # bluish green (surface)
# Data ------------------------------------------------------------------------
# Morphological data
morphologies <- read_csv("data/foragestats_combined_ko2.csv") %>%
mutate(Species = str_replace(Species, "_", " ")) %>%
group_by(Species) %>%
summarize(Length_m = first(Body_length_m),
Mass_kg = first(Body_mass_kg)) %>%
mutate(Clade = ifelse(str_detect(Species, ".*ptera.*"),
"Mysticete",
"Odontocete"),
Family = recode(Species,
`Balaenoptera bonaerensis` = "Balaenopteridae",
`Balaenoptera musculus` = "Balaenopteridae",
`Balaenoptera physalus` = "Balaenopteridae",
`Berardius bairdii` = "Ziphiidae",
`Globicephala macrorhynchus` = "Delphinidae",
`Globicephala melas` = "Delphinidae",
`Grampus griseus` = "Delphinidae",
`Megaptera novaeangliae` = "Balaenopteridae",
`Mesoplodon densirostris` = "Ziphiidae",
`Orcinus orca` = "Delphinidae",
`Phocoena phocoena` = "Phocoenidae",
`Physeter macrocephalus` = "Physeteridae",
`Ziphius cavirostris` = "Ziphiidae")) %>%
# binomial is a factor ordered by species length
arrange(Length_m) %>%
mutate(binomial = factor(Species,
levels = unique(Species)),
abbr = str_split(binomial, " ") %>%
map_chr(~ paste0(str_sub(.x[1], 1, 1), str_sub(.x[2], 1, 1))),
abbr = case_when(binomial == "Globicephala melas" ~ "Gme",
binomial == "Globicephala macrorhynchus" ~ "Gma",
TRUE ~ abbr)) %>%
filter(!binomial %in% c("Orcinus orca", "Berardius bairdii"))
binom_levels <- levels(morphologies$binomial)
# Prey
load("data/prey_tbl.RData")
# Feeding rates
load("data/buzz_rf.RData")
load("data/Md_buzz_rf.RData")
load("data/lunge_rf.RData")
# Consumption power (Pin) -----------------------------------------------------
Ep_tbl <- prey_tbl %>%
select(binomial,
meanEp_kJ,
meanlnEp_lnkJ,
sdlnEp_lnkJ,
firstqEp_kJ,
thirdqEp_kJ) %>%
ungroup %>%
mutate(binomial = factor(binomial, levels = binom_levels))
# Ep figure
prey_tbl %>%
filter(!binomial %in% c("Orcinus orca", "Berardius bairdii")) %>%
mutate(binomial = factor(binomial, levels = binom_levels),
grouping = case_when(Family %in% c("Phocoenidae", "Delphinidae") ~ "Phocoenidae and Delphinidae",
Family %in% c("Physeteridae", "Ziphiidae") ~ "Physeteridae and Ziphiidae",
Family == "Balaenopteridae" ~ "Balaenopteridae")) %>%
ggplot(aes(binomial, meanEp_kJ, color = grouping)) +
geom_point() +
geom_errorbar(aes(ymin = firstqEp_kJ, ymax = thirdqEp_kJ),
width = 0.4) +
scale_x_discrete(labels = function(lbl) str_replace(lbl, " ", "\n")) +
scale_y_log10(labels = log_labels) +
scale_color_aaas() +
labs(y = "Energy per feeding event (kJ)") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
axis.title.x = element_blank(),
legend.position = "none")
ggsave("figs/Ep.pdf",
width = 9,
height = 6)
# Ep/m figure
prey_tbl %>%
filter(!binomial %in% c("Orcinus orca", "Berardius bairdii")) %>%
mutate(binomial = factor(binomial, levels = binom_levels),
grouping = case_when(Family %in% c("Phocoenidae", "Delphinidae") ~ "Phocoenidae and Delphinidae",
Family %in% c("Physeteridae", "Ziphiidae") ~ "Physeteridae and Ziphiidae",
Family == "Balaenopteridae" ~ "Balaenopteridae")) %>%
left_join(select(morphologies, binomial, abbr, Mass_kg),
by = "binomial") %>%
mutate(meanEp_kJkg = meanEp_kJ / Mass_kg,
firstqEp_kJkg = firstqEp_kJ / Mass_kg,
thirdqEp_kJkg = thirdqEp_kJ / Mass_kg) %>%
ggplot(aes(Mass_kg, meanEp_kJkg, color = grouping, shape = grouping)) +
geom_pointrange(aes(ymin = firstqEp_kJkg, ymax = thirdqEp_kJkg),
fatten = 3,
size = 0.75) +
geom_text_repel(aes(label = abbr),
size = 3) +
scale_x_log10(labels = log_labels) +
scale_y_log10() +
scale_color_manual(values = cbf_palette) +
labs(x = "Body mass (kg)",
y = expression("Mass-specific " * E[p] ~ (kJ ~ kg ^ -1))) +
theme_classic(base_size = 12) +
theme(axis.title = element_text(size = 10),
legend.position = "none")
ggsave("figs/Ep2.pdf",
width = 80,
height = 65,
units = "mm",
dpi = 600)
rf_tbl <- bind_rows(buzz_rf, lunge_rf, Md_buzz_rf) %>%
select(binomial,
rf_h = mean_rf,
firstq_rf,
thirdq_rf,
q_rf_fun) %>%
ungroup %>%
mutate(binomial = factor(binomial, levels = binom_levels))
# rf figure
bind_rows(buzz_rf, lunge_rf, Md_buzz_rf) %>%
select(binomial, mean_rf, firstq_rf, thirdq_rf) %>%
left_join(select(prey_tbl, binomial, Family), by = "binomial") %>%
mutate(binomial = factor(binomial, levels = binom_levels),
grouping = case_when(Family %in% c("Phocoenidae", "Delphinidae") ~ "Phocoenidae and Delphinidae",
Family %in% c("Physeteridae", "Ziphiidae") ~ "Physeteridae and Ziphiidae",
Family == "Balaenopteridae" ~ "Balaenopteridae")) %>%
filter(binomial != "Orcinus orca") %>%
ggplot(aes(binomial, mean_rf, color = grouping, shape = grouping)) +
geom_pointrange(aes(ymin = firstq_rf, ymax = thirdq_rf),
fatten = 3,
size = 0.75) +
scale_x_discrete(labels = morphologies$abbr) +
scale_color_manual(values = cbf_palette) +
labs(y = expression("Feeding rate " * (hr^-1))) +
theme_classic(base_size = 12) +
theme(axis.text.x = element_text(size = 8,
angle = 45,
hjust = 1),
axis.title.x = element_blank(),
axis.title.y = element_text(size = 10,
margin = margin(0, 0, 0, 0)),
legend.position = "none",
plot.margin = margin(0.5, 0.5, 0.5, 0.5))
ggsave("figs/rf.pdf",
width = 80,
height = 65,
units = "mm",
dpi = 600)
Pin_tbl <- inner_join(Ep_tbl, rf_tbl, by = "binomial") %>%
mutate(Pin_kJ_h = meanEp_kJ * rf_h) %>%
left_join(select(morphologies, binomial, Family, Mass_kg), by = "binomial")
# Mass-specific consumption power
sample_Pin <- function(rf_q, meanlnEp, sdlnEp, n = 1e3) {
pse::LHS(function(data) data$rf * data$Ep,
factors = c("rf", "Ep"),
N = n,
q = list(rf = rf_q,
Ep = qlnorm),
q.arg = list(rf = list(),
Ep = list(meanlog = meanlnEp,
sdlog = sdlnEp)),
res.names = "Pin_kJhr")$res[,,1]
}
# Figure 1, Pc is bimodally distributed
# Using Bw as example
bw_ep <- filter(prey_tbl, binomial == "Balaenoptera musculus")
ep_inset <- ggplot(tibble(x = qlnorm(c(0.001, 0.99),
meanlog = bw_ep$meanlnEp_lnkJ,
sdlog = bw_ep$sdlnEp_lnkJ)),
aes(x)) +
stat_function(fun = dlnorm,
args = list(meanlog = bw_ep$meanlnEp_lnkJ,
sdlog = bw_ep$sdlnEp_lnkJ)) +
scale_x_continuous(breaks = seq(0, 1e6, by = 250e3),
labels = function(x) x / 10^5) +
labs(x = expression(italic(E[p]) ~ (10^5 ~ kJ))) +
theme_classic(base_size = 10) +
theme(axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
axis.title.y = element_blank(),
axis.title.x = element_text(size = 8))
bw_rf <- filter(rf_tbl, binomial == "Balaenoptera musculus")
rf_inset <- ggplot(tibble(x = bw_rf$q_rf_fun[[1]](seq(0, 1, length.out = 1000))),
aes(x)) +
geom_histogram(binwidth = 1,
boundary = 0,
fill = "light gray",
color = "black",
size = 0.2) +
labs(x = expression(italic(r[f]) ~ ("hr"^{-1}))) +
theme_classic(base_size = 10) +
theme(axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
axis.title.y = element_blank(),
axis.title.x = element_text(size = 8))
bw_Pc <- filter(Pin_tbl, binomial == "Balaenoptera musculus") %>%
group_by(binomial) %>%
group_modify(function(data, key) {
with(data,
tibble(Pc = sample_Pin(q_rf_fun[[1]],
meanlnEp_lnkJ[1],
sdlnEp_lnkJ[1])))
}) %>%
ungroup
Pc_plot <- ggplot(bw_Pc, aes(Pc)) +
geom_histogram(bins = 30,
boundary = 0,
fill = "light gray",
color = "black") +
geom_vline(aes(xintercept = mean(Pc)),
linetype = "dashed") +
scale_x_continuous(breaks = seq(0, 4e7, by = 1e7),
limits = c(0, 4e7),
labels = c(0,
expression(1 %*% 10^7),
expression(2 %*% 10^7),
expression(3 %*% 10^7),
expression(4 %*% 10^7)),
name = expression(italic(frac(dE[a], dt)) ~ ("kJ" ~ "hr"^{-1}))) +
theme_classic(base_size = 12) +
theme(axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
axis.title.y = element_blank(),
axis.title.x = element_text(size = 8))
Pc_plot +
annotation_custom(ggplotGrob(ep_inset),
xmin = 0.7e7, xmax = 2.45e7,
ymin = 90, ymax = 360) +
annotation_custom(ggplotGrob(rf_inset),
xmin = 2.5e7, xmax = 4.25e7,
ymin = 90, ymax = 360)
# Dimensions and DPI for Conservation Letters
# https://authorservices.wiley.com/asset/photos/electronic_artwork_guidelines.pdf
ggsave("figs/Pc.pdf",
width = 80,
height = 65,
units = "mm",
dpi = 600)
# Pc table
Pc_tbl <- rf_tbl %>%
left_join(Ep_tbl, by = "binomial") %>%
left_join(select(morphologies, binomial, Mass_kg)) %>%
group_by(binomial) %>%
group_modify(function(data, key) {
Pc <- sample_Pin(data$q_rf_fun[[1]],
data$meanlnEp_lnkJ,
data$sdlnEp_lnkJ)
tibble(meanEp_kJ = data$meanEp_kJ,
meanEp_kJkg = meanEp_kJ / data$Mass_kg,
iqr1Ep_kJ = data$firstqEp_kJ,
iqr3Ep_kJ = data$thirdqEp_kJ,
meanrf_h = data$rf_h,
meanrf_hkg = meanrf_h / data$Mass_kg,
iqr1rf_h = data$firstq_rf,
iqr3rf_h = data$thirdq_rf,
meanPc_kJh = mean(Pc),
meanPc_kJhkg = meanPc_kJh / data$Mass_kg,
iqr1Pc_kJh = quantile(Pc, 0.25),
iqr3Pc_kJh = quantile(Pc, 0.75))
})
write_csv(Pc_tbl, "data/output/Pc.csv")
# Locomotion power (Pout) -----------------------------------------------------
fs_fun <- function(U, L, La = 0.2, St = 0.3) {
# St = A * f / U
# A = L * La
# St = L * La * f / U
# f = St * U / L / La
St * U / L / La
}
U_b_ms <- 1.5
CL_fun <- function(m) 1.46 + 0.0005 * m
Pout_fun <- function(u, l, m) {
f_f <- fs_fun(u, l)
f_b <- fs_fun(U_b_ms, l)
(f_f - f_b) * CL_fun(m) * m
}
# Sensitivity -----------------------------------------------------------------
# Vectorized function for calculating Esonar for sensitivity analysis
Ein_fun <- function(td_min) {
function(rf_h, Ep_kJ) {
# Consumption power
Pin_kJh <- rf_h * Ep_kJ
Pin_W <- Pin_kJh / 3600
# Consumption energy
Ein_kJ <- Pin_kJh * td_min / 60
Ein_kJ
}
}
Eout_fun <- function(tf_min, m) {
function(delta_ff, CL) {
# Locomotion power
Pout_W <- delta_ff * CL * m
# Locomotion energy
Eout_kJ <- Pout_W / 1000 * tf_min * 60
Eout_kJ
}
}
Esonar_fun <- function(td_min, tf_min, m) {
function(data) {
with(data,
{
Ein_kJ <- Ein_fun(td_min)(rf_h, Ep_kJ)
Eout_kJ <- Eout_fun(tf_min, m)(delta_ff, CL)
Ein_kJ + Eout_kJ
}
)
}
}
# Scenarios -------------------------------------------------------------------
# Extreme flight
# Behavioral responses
scenario_tbl <-
tribble(~scenario, ~t_d_min, ~t_f_min, ~U_f_ms,
"flight", 60, 30, 5,
"consumption", 240, 10, 3.5) %>%
crossing(select(morphologies,
binomial,
Mass_kg,
Length_m) %>%
filter(!binomial %in% c("Orcinus orca",
"Berardius bairdii"))) %>%
# rf probabilities
left_join(select(rf_tbl, binomial, q_rf_fun), by = "binomial") %>%
# Ep probabilities
left_join(select(Ep_tbl, binomial, meanlnEp_lnkJ, sdlnEp_lnkJ),
by = "binomial")
Esonar_tbl <- scenario_tbl %>%
group_by(scenario, binomial) %>%
group_modify(function(data, key) {
param <- c("rf_h", "Ep_kJ", "delta_ff", "CL")
q <- list(rf_h = data$q_rf_fun[[1]],
Ep_kJ = qlnorm,
delta_ff = qgamma,
CL = qgamma)
# List of distribution function parameters
ff_flight <- fs_fun(data$U_f_ms, data$Length_m)
ff_basal <- fs_fun(U_b_ms, data$Length_m)
delta_ff <- ff_flight - ff_basal
ff_shape <- 4
ff_scale <- delta_ff / ff_shape
# squared to get units of J/stroke
CL <- 1.46 + 0.0005 * data$Mass_kg
CL_shape <- 4
CL_scale <- CL / CL_shape
q_arg <- list(rf_h = list(),
Ep_kJ = list(meanlog = data$meanlnEp_lnkJ,
sdlog = data$sdlnEp_lnkJ),
delta_ff = list(shape = ff_shape, scale = ff_scale),
CL = list(shape = CL_shape, scale = CL_scale))
param_args <- tibble(delta_ff = delta_ff,
ff_shape = ff_shape,
ff_scale = ff_scale,
CL = CL,
CL_shape = CL_shape,
CL_scale = CL_scale)
# Plots of delta_ff, CL distributions
ggplot(data.frame(x = c(0, qgamma(0.99,
shape = ff_shape,
scale = ff_scale))),
aes(x)) +
stat_function(fun = dgamma,
args = q_arg[[3]]) +
geom_vline(xintercept = delta_ff,
linetype = "dashed") +
labs(x = "Change in fluking frequency (Hz)",
y = "Probability density",
title = key$binomial,
caption = sprintf("U_b = %.1f m/s, U_f = %.1f m/s, f_b = %.2f Hz, f_f = %.2f Hz",
U_b_ms,
data$U_f_ms,
ff_basal,
ff_flight)) +
theme_minimal()
ggsave(sprintf("figs/ff_density/%s.pdf", key$binomial),
width = 9,
height = 6)
ggplot(data.frame(x = c(0, qgamma(0.99,
shape = CL_shape,
scale = CL_scale))),
aes(x)) +
stat_function(fun = dgamma,
args = q_arg[[4]]) +
geom_vline(xintercept = CL,
linetype = "dashed") +
labs(x = "Locomotor cost (J/stroke)",
y = "density",
title = key$binomial) +
theme_minimal()
ggsave(sprintf("figs/CL_density/%s.pdf", key$binomial),
width = 9,
height = 6)
# Latin hypercube sample of parameter space
model <- Esonar_fun(data$t_d_min,
data$t_f_min,
data$Mass_kg)
tryCatch(esonar_LHS <- pse::LHS(model, param, 1e2, q, q_arg),
error = function(e) browser())
sens_result <- esonar_LHS$data %>%
mutate(Esonar_kJ = esonar_LHS$res[,1,1],
Eout_kJ = Eout_fun(data$t_f_min, data$Mass_kg)(delta_ff, CL),
Ein_kJ = Ein_fun(data$t_d_min)(rf_h, Ep_kJ),
inout_ratio = Ein_kJ / Eout_kJ)
# ECDF of model outputs
esonar_ecdf <- ggplot(sens_result, aes(Esonar_kJ)) +
stat_ecdf() +
labs(x = "Energetic cost (kJ)",
y = "Cumulative probability",
title = key$binomial) +
theme_minimal()
# Scatter of model outputs w.r.t. parameters
esonar_scatter <- sens_result %>%
gather(parameter, value, rf_h:CL) %>%
ggplot(aes(value, Esonar_kJ)) +
geom_point(size = 0.5) +
geom_smooth(method = "lm",
se = FALSE) +
labs(x = "",
y = "Energetic cost (kJ)",
title = key$binomial) +
facet_wrap(~ parameter,
scales = "free_x",
strip.position = "bottom") +
theme_minimal() +
theme(strip.placement = "outside")
# Save plots
ggsave(sprintf("figs/esonar_ecdfs/%s_%s.pdf", key$scenario, key$binomial),
esonar_ecdf,
width = 9,
height = 6)
ggsave(sprintf("figs/esonar_scatters/%s_%s.pdf", key$scenario, key$binomial),
esonar_scatter,
width = 9,
height = 6)
# Linear model results
esonar_linear <- sens_result %>%
# Normalize values using z-scores
mutate_at(vars(Esonar_kJ, rf_h, Ep_kJ, CL, delta_ff),
function(x) (x - mean(x)) / sd(x)) %>%
# Multiple regression
lm(Esonar_kJ ~ rf_h + Ep_kJ + CL + delta_ff, data = .)
# Extract coefficients, p-values, and confidence intervals
esonar_coef <- coef(esonar_linear)
esonar_pval <- summary(esonar_linear)$coefficients[,4]
esonar_ci <- as_tibble(confint(esonar_linear, level = 0.95),
rownames = "param")
colnames(esonar_ci)[2:3] <- c("ci_min", "ci_max")
# Combine and drop info for the intercept
lm_results <- cbind(esonar_ci, esonar_coef, esonar_pval)[-1,]
esonar_results <- summarize(sens_result,
mean_Esonar = mean(Esonar_kJ),
median_Esonar = median(Esonar_kJ),
iqr_Esonar = IQR(Esonar_kJ),
median_inout = median(inout_ratio),
inout_25 = quantile(inout_ratio, 0.25),
inout_75 = quantile(inout_ratio, 0.75),
mean_inout = mean(inout_ratio),
se_inout = sd(inout_ratio)/sqrt(n()))
cbind(lm_results, esonar_results)
})
# A plot with the normalized linear model coefficients
coef_data <- Esonar_tbl %>%
ungroup %>%
mutate(param = factor(param,
levels = c("CL",
"delta_ff",
"Ep_kJ",
"rf_h"),
labels = c("C[L]",
"Delta*f[f]",
"E[p]",
"r[f]"))) %>%
left_join(select(morphologies, binomial, Family),
by = "binomial") %>%
mutate(grouping = case_when(Family %in% c("Phocoenidae", "Delphinidae") ~ "Phocoenidae and Delphinidae",
Family %in% c("Physeteridae", "Ziphiidae") ~ "Physeteridae and Ziphiidae",
Family == "Balaenopteridae" ~ "Balaenopteridae") %>%
factor(levels = c("Phocoenidae and Delphinidae",
"Physeteridae and Ziphiidae",
"Balaenopteridae")))
# For presentations -----------------------------------------------------------
consumption_coef <- filter(coef_data, scenario == "consumption")
# Consumption, all species
ggplot(consumption_coef, aes(x = param, y = esonar_coef)) +
geom_boxplot() +
coord_flip() +
scale_x_discrete(labels = parse(text = levels(consumption_coef$param))) +
scale_y_continuous(limits = c(-0.25, 1.0),
breaks = seq(-0.25, 1.0, by = 0.25)) +
labs(y = "Sensitivity") +
theme_classic() +
theme(axis.text.x = element_blank(),
axis.ticks = element_blank(),
axis.title.y = element_blank())
ggsave("figs/consumption_allsp.pdf",
width = 4.5,
height = 3)
# Consumption, by grouping
consumption_grouped <- ggplot(
consumption_coef,
aes(x = param, y = esonar_coef, color = grouping)
) +
geom_boxplot() +
coord_flip() +
scale_x_discrete(labels = parse(text = levels(consumption_coef$param))) +
scale_y_continuous(limits = c(-0.25, 1.0),
breaks = seq(-0.25, 1.0, by = 0.25)) +
scale_color_manual(values = cbf_palette) +
labs(y = "Sensitivity") +
theme_classic(base_size = 12) +
theme(axis.text.x = element_blank(),
axis.text.y = element_text(size = 12),
axis.ticks = element_blank(),
axis.title.y = element_blank(),
legend.position = "none")
consumption_grouped
ggsave("figs/consumption_groups.pdf",
width = 4.5,
height = 3)
flight_coef <- filter(coef_data, scenario == "flight")
# Flight, all species
ggplot(flight_coef, aes(x = param, y = esonar_coef)) +
geom_boxplot() +
coord_flip() +
scale_x_discrete(labels = parse(text = levels(flight_coef$param))) +
scale_y_continuous(limits = c(-0.25, 1.0),
breaks = seq(-0.25, 1.0, by = 0.25)) +
labs(y = "Sensitivity") +
theme_classic() +
theme(axis.text.x = element_blank(),
axis.ticks = element_blank(),
axis.title.y = element_blank())
ggsave("figs/flight_allsp.pdf",
width = 4.5,
height = 3)
# Flight, by grouping
flight_grouped <- ggplot(flight_coef,
aes(x = param, y = esonar_coef, color = grouping)) +
geom_boxplot() +
coord_flip() +
scale_x_discrete(labels = parse(text = levels(flight_coef$param))) +
scale_y_continuous(limits = c(-0.25, 1.0),
breaks = seq(-0.25, 1.0, by = 0.25)) +
scale_color_manual(values = cbf_palette) +
labs(y = "Sensitivity") +
theme_classic(base_size = 12) +
theme(axis.text.x = element_blank(),
axis.text.y = element_text(size = 12),
axis.ticks = element_blank(),
axis.title.y = element_blank(),
legend.position = "none")
flight_grouped
ggsave("figs/flight_groups.pdf",
width = 4.5,
height = 3)
# Figure for paper
consumption_grouped +
theme(axis.ticks = element_line(),
axis.text.x = element_text()) +
flight_grouped +
theme(axis.ticks = element_line(),
axis.text.x = element_text()) +
patchwork::plot_layout(nrow = 1) +
patchwork::plot_annotation(tag_levels = "A")
ggsave("figs/sensitivity.pdf",
width = 180,
height = 80,
units = "mm",
dpi = 600)
# Ratio of in/out
coef_data %>%
mutate(inout = ifelse(param %in% c("r[f]", "E[p]"), "in", "out")) %>%
group_by(inout, scenario) %>%
summarize(mean_coef = mean(esonar_coef))
ggplot(consumption_coef, aes(x = binomial, y = esonar_coef)) +
# geom_vline(aes(xintercept = mean_coef),
# summarize(group_by(consumption_coef, param),
# mean_coef = mean(esonar_coef)),
# linetype = "dashed") +
geom_boxplot() +
facet_grid(param ~ .,
labeller = label_parsed,
switch = "y") +
scale_x_continuous(limits = c(-0.25, 1.0),
breaks = seq(-0.25, 1.0, by = 0.25)) +
labs(x = "Sensitivity") +
theme_classic() +
theme(axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
axis.title.y = element_blank(),
legend.position = "none",
legend.title = element_blank(),
strip.background = element_blank(),
strip.placement = "outside")
# A table of the ratio of energy in to energy out
Esonar_tbl %>%
group_by(binomial, scenario) %>%
slice(1) %>%
ungroup %>%
select(binomial, scenario, mean_inout, se_inout) %>%
write_csv("figs/flight_inout.csv")
# Energy in vs energy out
inout_fun <- function(t_d_min, t_f_min, U_f_ms, binom) {
morph_row <- filter(morphologies, binomial == binom)
M_kg <- morph_row$Mass_kg
L_m <- morph_row$Length_m
# Parameter distributions
rf_fun <- filter(rf_tbl, binomial == binom)$q_rf_fun[[1]]
Ep_fun <- function(p) {
row <- filter(prey_tbl, binomial == binom)
qlnorm(p, meanlog = row$meanlnEp_lnkJ, sdlog = row$sdlnEp_lnkJ)
}
ff_fun <- function(p) {
if (U_f_ms == 0) {
0
} else {
ff_flight <- fs_fun(U_f_ms, L_m)
ff_basal <- fs_fun(U_b_ms, L_m)
delta_ff <- ff_flight - ff_basal
ff_shape <- 4
ff_scale <- delta_ff / ff_shape
qgamma(p, ff_shape, scale = ff_scale)
}
}
CL_fun <- function(p) {
CL <- 1.46 + 0.0005 * M_kg
CL_shape <- 4
CL_scale <- CL / CL_shape
qgamma(p, CL_shape, scale = CL_scale)
}
rf_q <- runif(1)
rf_h <- rf_fun(rf_q)
Ep_q <- runif(1)
Ep_kJ <- Ep_fun(Ep_q)
Ein <- Ein_fun(t_d_min)(rf_h, Ep_kJ)
ff_q <- runif(1)
delta_ff <- ff_fun(ff_q)
CL_q <- runif(1)
CL <- CL_fun(CL_q)
Eout <- Eout_fun(t_f_min, M_kg)(delta_ff, CL)
Esonar <- Ein + Eout
tibble(rf_q, rf_h, Ep_q, Ep_kJ, ff_q, delta_ff, CL_q, CL, Ein, Eout, Esonar)
}
Einout <- tribble(
~scenario, ~t_d_min, ~t_f_min, ~U_f_ms,
"cessation", 60, 0, 0,
"mild_flight", 0, 5, 2.5,
"strong_flight", 0, 15, 3.5,
"extreme_flight", 0, 30, 5
) %>%
mutate(scenario = factor(scenario,
levels = c("cessation",
"mild_flight",
"strong_flight",
"extreme_flight"),
labels = c("Cessation only",
"Mild flight",
"Strong flight",
"Extreme flight"))) %>%
# All but Oo and Bb
crossing(binomial = binom_levels[c(-6, -9)]) %>%
mutate(binomial = factor(binomial, levels = binom_levels)) %>%
group_by(binomial, scenario, t_d_min, t_f_min, U_f_ms, binomial) %>%
group_modify(function(data, key) {
map_dfr(1:500, ~ inout_fun(key$t_d_min, key$t_f_min, key$U_f_ms, key$binomial))
})
Einout %>%
ungroup %>%
left_join(morphologies, by = "binomial") %>%
mutate(grouping = case_when(Family %in% c("Phocoenidae", "Delphinidae") ~ "Phocoenidae and Delphinidae",
Family %in% c("Physeteridae", "Ziphiidae") ~ "Physeteridae and Ziphiidae",
Family == "Balaenopteridae" ~ "Balaenopteridae"),
scenario = fct_relabel(scenario, ~str_replace(.x, " ", "\n"))) %>%
ggplot(aes(scenario, Esonar, color = grouping)) +
geom_boxplot() +
scale_color_brewer(palette = "Dark2") +
labs(y = "Energy cost (kJ)") +
facet_wrap(~ binomial,
scales = "free_y") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
axis.title.x = element_blank(),
legend.position = "none")
ggsave("figs/inout.pdf",
width = 9,
height = 6)
# Critical thresholds ---------------------------------------------------------
scenario_tbl <- tribble(
~scenario, ~t_f_min, ~U_f_ms,
"no_flight", 0, 0,
"mild_flight", 5, 2.5,
"strong_flight", 15, 3.5,
"extreme_flight", 30, 5
) %>%
mutate(scenario = factor(scenario,
levels = c("no_flight",
"mild_flight",
"strong_flight",
"extreme_flight"),
labels = c("No flight",
"Mild flight",
"Strong flight",
"Extreme flight"))) %>%
# All but Oo and Bb
crossing(binomial = binom_levels[c(-6, -9)],
beta = c(2.5, 3, 5),
bmr = factor(c("Kleiber", "Maresh")))
thr_calculator <- function(U_f_ms, tf_min, binom, beta, bmr) {
# Morphologies
morph_row <- filter(morphologies, binomial == binom)
if (nrow(morph_row) != 1) stop("Imprecise binomial")
L_m = morph_row$Length_m
M_kg = morph_row$Mass_kg
# Parameter distributions
rf_fun <- filter(rf_tbl, binomial == binom)$q_rf_fun[[1]]
Ep_fun <- function(p) {
row <- filter(prey_tbl, binomial == binom)
qlnorm(p, meanlog = row$meanlnEp_lnkJ, sdlog = row$sdlnEp_lnkJ)
}
ff_fun <- function(p) {
if (U_f_ms == 0) {
0
} else {
ff_flight <- fs_fun(U_f_ms, L_m)
ff_basal <- fs_fun(U_b_ms, L_m)
delta_ff <- ff_flight - ff_basal
ff_shape <- 4
ff_scale <- delta_ff / ff_shape
qgamma(p, ff_shape, scale = ff_scale)
}
}
CL_fun <- function(p) {
CL <- 1.46 + 0.0005 * M_kg
CL_shape <- 4
CL_scale <- CL / CL_shape
qgamma(p, CL_shape, scale = CL_scale)
}
# Daily FMR
daily_bmr = if (bmr == "Kleiber") {
293.1 * M_kg ^ 0.75
} else if (bmr == "Maresh") {
581 * M_kg ^ 0.68
} else {
stop("Unspecified BMR calculation")
}
daily_FMR <- beta * daily_bmr
thr_fun <- function() {
Esonar <- 0
model <- Esonar_fun(60, tf_min, M_kg)
td_min <- 0
#print(sprintf("%s: tf_min=%.1f, beta=%.1f, bmr=%s", binom, tf_min, beta, bmr))
while (Esonar < daily_FMR) {
rf_h <- rf_fun(runif(1))
Ep_kJ <- Ep_fun(runif(1))
delta_ff <- ff_fun(runif(1))
CL <- CL_fun(runif(1))
result <- model(tibble(rf_h, Ep_kJ, delta_ff, CL))
if (Esonar + result < daily_FMR) {
td_min <- td_min + 60
} else {
td_min <- td_min + 60 * (daily_FMR - Esonar) / result
}
Esonar <- Esonar + result
}
td_min
}
td_min <- map_dbl(1:1e3, ~ thr_fun())
tibble(mean_thr = mean(td_min),
med_thr = median(td_min),
firstq_thr = quantile(td_min, 0.25),
thirdq_thr = quantile(td_min, 0.75))
}
thr_tbl <- scenario_tbl %>%
mutate(binomial = factor(binomial, levels = binom_levels)) %>%
group_by_all %>%
group_modify(function(data, key) {
with(key, thr_calculator(U_f_ms, t_f_min, binomial, beta, bmr))
}) %>%
arrange(binomial, t_f_min)
# filter(thr_tbl, beta == 3, bmr == "Maresh") %>%
filter(thr_tbl, beta == 3) %>%
ggplot(aes(binomial,
mean_thr,
color = scenario,
shape = bmr)) +
geom_pointrange(aes(ymin = firstq_thr,
ymax = thirdq_thr),
fatten = 2,
position = position_dodge(width = 0.6)) +
scale_x_discrete(labels = morphologies$abbr) +
scale_y_continuous(breaks = c(30, 60, 60 * 4, 60 * 12, 60*24, 60*48, 60*72),
minor_breaks = NULL,
labels = c("30 min", "1 hour", "4 hours", "12 hours", "1 day", "2 days", "3 days"),
trans = "log2") +
scale_color_brewer(palette = "RdYlBu", direction = -1) +
labs(y = "Feeding cessation") +
theme_classic(base_size = 12) +
theme(axis.title.x = element_blank(),
legend.position = "bottom",
legend.title = element_blank(),
legend.margin = margin(0, 0, 0, -10))
ggsave("figs/critical_threshold.pdf",
width = 180,
height = 120,
units = "mm",
dpi = 600)
# Threshold table
thr_tbl %>%
filter(beta == 3) %>%
write_csv("data/output/thresholds.csv")
|
################################################################################
## Exploratory Data Analysis Course- Project 1
## Author : Ralston Fonseca
## Date : 24 Aug 2018
## Version : 1.0
## Description : This script creates Plot3.png to examine how household energy
## usage varies over a 2-day period in February, 2007
################################################################################
# Load packages
library(dplyr)
library(lubridate)
# Read household_power_consumption.txt data file
hpcData_full <- read.table("household_power_consumption.txt",header = TRUE,
sep = ";", na.strings = "?") # dim 2075259 9
# Convert Time column to DateTime and rename the column
hpcData_full$Time <- strptime(paste(hpcData_full$Date, hpcData_full$Time),
"%d/%m/%Y %H:%M:%S")
hpc_col_names <- colnames(hpcData_full)
hpc_col_names[2] <- "DateTime"
names(hpcData_full) <- hpc_col_names
# Convert Date column to Date
hpcData_full$Date <- as.Date(hpcData_full$Date,"%d/%m/%Y")
# Select a subset of observations for 2 days : 2007-02-01 and 2007-02-02
hpcData_subset <- subset(hpcData_full, Date >= ymd("2007/02/01") &
Date <= ymd("2007/02/02")) # dim 2880 9
# remove variables not neeeded to clear memory
rm(hpc_col_names,hpcData_full)
#Set width of 480 pixels and a height of 480 pixels and open png device
png("plot3.png", width=480, height=480)
# plot a line graph - Plot 3
plot(hpcData_subset$DateTime,hpcData_subset$Sub_metering_1,type = "l",
xlab = "",ylab="Energy sub metering")
lines(hpcData_subset$DateTime,hpcData_subset$Sub_metering_2, col = "red")
lines(hpcData_subset$DateTime,hpcData_subset$Sub_metering_3, col = "blue")
legend("topright", col = c("black","red","blue"),
c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty = 1)
# close device
dev.off()
############################## EOF ############################################# | /plot3.R | no_license | RalstonFonseca/ExData_Plotting1 | R | false | false | 2,067 | r | ################################################################################
## Exploratory Data Analysis Course- Project 1
## Author : Ralston Fonseca
## Date : 24 Aug 2018
## Version : 1.0
## Description : This script creates Plot3.png to examine how household energy
## usage varies over a 2-day period in February, 2007
################################################################################
# Load packages
library(dplyr)
library(lubridate)
# Read household_power_consumption.txt data file
hpcData_full <- read.table("household_power_consumption.txt",header = TRUE,
sep = ";", na.strings = "?") # dim 2075259 9
# Convert Time column to DateTime and rename the column
hpcData_full$Time <- strptime(paste(hpcData_full$Date, hpcData_full$Time),
"%d/%m/%Y %H:%M:%S")
hpc_col_names <- colnames(hpcData_full)
hpc_col_names[2] <- "DateTime"
names(hpcData_full) <- hpc_col_names
# Convert Date column to Date
hpcData_full$Date <- as.Date(hpcData_full$Date,"%d/%m/%Y")
# Select a subset of observations for 2 days : 2007-02-01 and 2007-02-02
hpcData_subset <- subset(hpcData_full, Date >= ymd("2007/02/01") &
Date <= ymd("2007/02/02")) # dim 2880 9
# remove variables not neeeded to clear memory
rm(hpc_col_names,hpcData_full)
#Set width of 480 pixels and a height of 480 pixels and open png device
png("plot3.png", width=480, height=480)
# plot a line graph - Plot 3
plot(hpcData_subset$DateTime,hpcData_subset$Sub_metering_1,type = "l",
xlab = "",ylab="Energy sub metering")
lines(hpcData_subset$DateTime,hpcData_subset$Sub_metering_2, col = "red")
lines(hpcData_subset$DateTime,hpcData_subset$Sub_metering_3, col = "blue")
legend("topright", col = c("black","red","blue"),
c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty = 1)
# close device
dev.off()
############################## EOF ############################################# |
testlist <- list(iK = -50331648L)
result <- do.call(eDMA:::PowerSet,testlist)
str(result) | /eDMA/inst/testfiles/PowerSet/AFL_PowerSet/PowerSet_valgrind_files/1609870241-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 89 | r | testlist <- list(iK = -50331648L)
result <- do.call(eDMA:::PowerSet,testlist)
str(result) |
library(ape)
testtree <- read.tree("11310_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="11310_0_unrooted.txt") | /codeml_files/newick_trees_processed/11310_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 137 | r | library(ape)
testtree <- read.tree("11310_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="11310_0_unrooted.txt") |
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
StatBindot <- ggproto("StatBindot", Stat,
setup_params = function(data, params) {
if (is.null(params$breaks) && is.null(params$binwidth)) {
message("`stat_bindot()` using `bins = 30`. Pick better value with `binwidth`.")
}
params
},
setup_data = function(data, params) {
remove_missing(data, isTRUE(params$na.rm), c(params$binaxis, "weight"),
name = "stat_bindot", finite = TRUE)
},
compute_panel = function(self, data, na.rm = FALSE, binwidth = NULL,
binaxis = "x", method = "dotdensity",
binpositions = "bygroup", ...) {
# If using dotdensity and binning over all, we need to find the bin centers
# for all data before it's split into groups.
if (method == "dotdensity" && binpositions == "all") {
if (binaxis == "x") {
newdata <- densitybin(x = data$x, weight = data$weight, binwidth = binwidth,
method = method)
data <- plyr::arrange(data, x)
newdata <- plyr::arrange(newdata, x)
} else if (binaxis == "y") {
newdata <- densitybin(x = data$y, weight = data$weight, binwidth = binwidth,
method = method)
data <- plyr::arrange(data, y)
newdata <- plyr::arrange(newdata, x)
}
data$bin <- newdata$bin
data$binwidth <- newdata$binwidth
data$weight <- newdata$weight
data$bincenter <- newdata$bincenter
}
ggproto_parent(Stat, self)$compute_panel(data, binwidth = binwidth,
binaxis = binaxis, method = method, binpositions = binpositions, ...)
},
compute_group = function(self, data, scales, binwidth = NULL, binaxis = "x",
method = "dotdensity", binpositions = "bygroup",
origin = NULL, breaks = NULL, width = 0.9, drop = FALSE,
right = TRUE, ...) {
# This function taken from integer help page
is.wholenumber <- function(x, tol = .Machine$double.eps ^ 0.5) {
abs(x - round(x)) < tol
}
# Check that weights are whole numbers (for dots, weights must be whole)
if (!is.null(data$weight) && any(!is.wholenumber(data$weight)) &&
any(data$weight < 0)) {
stop("Weights for stat_bindot must be nonnegative integers.")
}
if (binaxis == "x") {
range <- scale_dimension(scales$x, c(0, 0))
values <- data$x
} else if (binaxis == "y") {
range <- scale_dimension(scales$y, c(0, 0))
values <- data$y
# The middle of each group, on the stack axis
midline <- mean(range(data$x))
}
if (method == "histodot") {
# Use the function from stat_bin
data <- bin(x = values, weight = data$weight, binwidth = binwidth, origin = origin,
breaks = breaks, range = range, width = width, drop = drop, right = right)
# Change "width" column to "binwidth" for consistency
names(data)[names(data) == "width"] <- "binwidth"
names(data)[names(data) == "x"] <- "bincenter"
} else if (method == "dotdensity") {
# If bin centers are found by group instead of by all, find the bin centers
# (If binpositions=="all", then we'll already have bin centers.)
if (binpositions == "bygroup")
data <- densitybin(x = values, weight = data$weight, binwidth = binwidth,
method = method, range = range)
# Collapse each bin and get a count
data <- plyr::ddply(data, "bincenter", plyr::summarise, binwidth = binwidth[1], count = sum(weight))
if (sum(data$count, na.rm = TRUE) != 0) {
data$count[is.na(data$count)] <- 0
data$ncount <- data$count / max(abs(data$count), na.rm = TRUE)
if (drop) data <- subset(data, count > 0)
}
}
if (binaxis == "x") {
names(data)[names(data) == "bincenter"] <- "x"
# For x binning, the width of the geoms is same as the width of the bin
data$width <- data$binwidth
} else if (binaxis == "y") {
names(data)[names(data) == "bincenter"] <- "y"
# For y binning, set the x midline. This is needed for continuous x axis
data$x <- midline
}
return(data)
},
default_aes = aes(y = ..count..),
required_aes = "x"
)
# This does density binning, but does not collapse each bin with a count.
# It returns a data frame with the original data (x), weights, bin #, and the bin centers.
densitybin <- function(x, weight = NULL, binwidth = NULL, method = method, range = NULL) {
if (length(stats::na.omit(x)) == 0) return(data.frame())
if (is.null(weight)) weight <- rep(1, length(x))
weight[is.na(weight)] <- 0
if (is.null(range)) range <- range(x, na.rm = TRUE, finite = TRUE)
if (is.null(binwidth)) binwidth <- diff(range) / 30
# Sort weight and x, by x
weight <- weight[order(x)]
x <- x[order(x)]
cbin <- 0 # Current bin ID
bin <- rep.int(NA, length(x)) # The bin ID for each observation
binend <- -Inf # End position of current bin (scan left to right)
# Scan list and put dots in bins
for (i in 1:length(x)) {
# If past end of bin, start a new bin at this point
if (x[i] >= binend) {
binend <- x[i] + binwidth
cbin <- cbin + 1
}
bin[i] <- cbin
}
results <- data.frame(x, bin, binwidth, weight)
results <- plyr::ddply(results, "bin", function(df) {
df$bincenter = (min(df$x) + max(df$x)) / 2
return(df)
})
return(results)
}
| /R/stat-bindot.r | no_license | jinyueyang/ggplot2 | R | false | false | 5,687 | r | #' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
StatBindot <- ggproto("StatBindot", Stat,
setup_params = function(data, params) {
if (is.null(params$breaks) && is.null(params$binwidth)) {
message("`stat_bindot()` using `bins = 30`. Pick better value with `binwidth`.")
}
params
},
setup_data = function(data, params) {
remove_missing(data, isTRUE(params$na.rm), c(params$binaxis, "weight"),
name = "stat_bindot", finite = TRUE)
},
compute_panel = function(self, data, na.rm = FALSE, binwidth = NULL,
binaxis = "x", method = "dotdensity",
binpositions = "bygroup", ...) {
# If using dotdensity and binning over all, we need to find the bin centers
# for all data before it's split into groups.
if (method == "dotdensity" && binpositions == "all") {
if (binaxis == "x") {
newdata <- densitybin(x = data$x, weight = data$weight, binwidth = binwidth,
method = method)
data <- plyr::arrange(data, x)
newdata <- plyr::arrange(newdata, x)
} else if (binaxis == "y") {
newdata <- densitybin(x = data$y, weight = data$weight, binwidth = binwidth,
method = method)
data <- plyr::arrange(data, y)
newdata <- plyr::arrange(newdata, x)
}
data$bin <- newdata$bin
data$binwidth <- newdata$binwidth
data$weight <- newdata$weight
data$bincenter <- newdata$bincenter
}
ggproto_parent(Stat, self)$compute_panel(data, binwidth = binwidth,
binaxis = binaxis, method = method, binpositions = binpositions, ...)
},
compute_group = function(self, data, scales, binwidth = NULL, binaxis = "x",
method = "dotdensity", binpositions = "bygroup",
origin = NULL, breaks = NULL, width = 0.9, drop = FALSE,
right = TRUE, ...) {
# This function taken from integer help page
is.wholenumber <- function(x, tol = .Machine$double.eps ^ 0.5) {
abs(x - round(x)) < tol
}
# Check that weights are whole numbers (for dots, weights must be whole)
if (!is.null(data$weight) && any(!is.wholenumber(data$weight)) &&
any(data$weight < 0)) {
stop("Weights for stat_bindot must be nonnegative integers.")
}
if (binaxis == "x") {
range <- scale_dimension(scales$x, c(0, 0))
values <- data$x
} else if (binaxis == "y") {
range <- scale_dimension(scales$y, c(0, 0))
values <- data$y
# The middle of each group, on the stack axis
midline <- mean(range(data$x))
}
if (method == "histodot") {
# Use the function from stat_bin
data <- bin(x = values, weight = data$weight, binwidth = binwidth, origin = origin,
breaks = breaks, range = range, width = width, drop = drop, right = right)
# Change "width" column to "binwidth" for consistency
names(data)[names(data) == "width"] <- "binwidth"
names(data)[names(data) == "x"] <- "bincenter"
} else if (method == "dotdensity") {
# If bin centers are found by group instead of by all, find the bin centers
# (If binpositions=="all", then we'll already have bin centers.)
if (binpositions == "bygroup")
data <- densitybin(x = values, weight = data$weight, binwidth = binwidth,
method = method, range = range)
# Collapse each bin and get a count
data <- plyr::ddply(data, "bincenter", plyr::summarise, binwidth = binwidth[1], count = sum(weight))
if (sum(data$count, na.rm = TRUE) != 0) {
data$count[is.na(data$count)] <- 0
data$ncount <- data$count / max(abs(data$count), na.rm = TRUE)
if (drop) data <- subset(data, count > 0)
}
}
if (binaxis == "x") {
names(data)[names(data) == "bincenter"] <- "x"
# For x binning, the width of the geoms is same as the width of the bin
data$width <- data$binwidth
} else if (binaxis == "y") {
names(data)[names(data) == "bincenter"] <- "y"
# For y binning, set the x midline. This is needed for continuous x axis
data$x <- midline
}
return(data)
},
default_aes = aes(y = ..count..),
required_aes = "x"
)
# This does density binning, but does not collapse each bin with a count.
# It returns a data frame with the original data (x), weights, bin #, and the bin centers.
densitybin <- function(x, weight = NULL, binwidth = NULL, method = method, range = NULL) {
if (length(stats::na.omit(x)) == 0) return(data.frame())
if (is.null(weight)) weight <- rep(1, length(x))
weight[is.na(weight)] <- 0
if (is.null(range)) range <- range(x, na.rm = TRUE, finite = TRUE)
if (is.null(binwidth)) binwidth <- diff(range) / 30
# Sort weight and x, by x
weight <- weight[order(x)]
x <- x[order(x)]
cbin <- 0 # Current bin ID
bin <- rep.int(NA, length(x)) # The bin ID for each observation
binend <- -Inf # End position of current bin (scan left to right)
# Scan list and put dots in bins
for (i in 1:length(x)) {
# If past end of bin, start a new bin at this point
if (x[i] >= binend) {
binend <- x[i] + binwidth
cbin <- cbin + 1
}
bin[i] <- cbin
}
results <- data.frame(x, bin, binwidth, weight)
results <- plyr::ddply(results, "bin", function(df) {
df$bincenter = (min(df$x) + max(df$x)) / 2
return(df)
})
return(results)
}
|
## begin with the extraction:
## Assumig you have the unzip file in your WD
PW_C<-read.table("household_power_consumption.txt",header=TRUE, sep=";",
na.strings="?",quote="")
PW_C$Date<-as.Date(PW_C$Date,"%d/%m/%Y") ##this is to convert to time class
## extract only the dates of interest
Date1<-as.Date("2007-02-01")
Date2<-as.Date("2007-02-02")
PW_C_short<- filter(PW_C, Date>=Date1 & Date<=Date2)
## We HAVE TO MERGE dATE AND TIME PARAMETERS TO GET A CONTINUOUS TIMESTAMP
PW_C_short$newdate <- with(PW_C_short, as.POSIXct ## LETS CREAte the newdate variable
(paste(Date, Time), format="%Y-%m-%d %H:%M"))
## set language to english, for x-time series
Sys.setlocale("LC_ALL", "English")
## create the plot
png(file="plot2.png",width=480,height=480) ## create the png file
with (PW_C_short, plot(Global_active_power~newdate, type="l",## create the lines graphic
ylab="Global Active Power (kilowatts)", ##title y axis
xlab="")) ## title x axis
dev.off() ## close the file
| /plot2.R | no_license | mray0n/ExData_Plotting1 | R | false | false | 1,070 | r | ## begin with the extraction:
## Assumig you have the unzip file in your WD
PW_C<-read.table("household_power_consumption.txt",header=TRUE, sep=";",
na.strings="?",quote="")
PW_C$Date<-as.Date(PW_C$Date,"%d/%m/%Y") ##this is to convert to time class
## extract only the dates of interest
Date1<-as.Date("2007-02-01")
Date2<-as.Date("2007-02-02")
PW_C_short<- filter(PW_C, Date>=Date1 & Date<=Date2)
## We HAVE TO MERGE dATE AND TIME PARAMETERS TO GET A CONTINUOUS TIMESTAMP
PW_C_short$newdate <- with(PW_C_short, as.POSIXct ## LETS CREAte the newdate variable
(paste(Date, Time), format="%Y-%m-%d %H:%M"))
## set language to english, for x-time series
Sys.setlocale("LC_ALL", "English")
## create the plot
png(file="plot2.png",width=480,height=480) ## create the png file
with (PW_C_short, plot(Global_active_power~newdate, type="l",## create the lines graphic
ylab="Global Active Power (kilowatts)", ##title y axis
xlab="")) ## title x axis
dev.off() ## close the file
|
#' Creates a square matrix filled with random (normally distributed) data
#' for use in the tests.
#'
#' @param size the number of colums/rows in the created matrix, default = 5.
#' @return a square matrix of the given size with random values.
create.sampleMatrix <- function(size = 5) {
matrix(rnorm(size*size), ncol = size)
}
#' check the function is doing something
test.makeCacheMatrix.returnsSomething <- function() {
checkEquals(FALSE, is.null(makeCacheMatrix()))
}
#' check the data contained in the function matches the input args
test.makeCacheMatrix.containsInputData <- function() {
input <- matrix()
cm <- makeCacheMatrix(input)
checkIdentical(input, cm$get())
}
#' check that `setInverse()` does cache the value when called directly
test.cacheMatrix.canSetInverse <- function() {
fake <- create.sampleMatrix()
cm <- makeCacheMatrix()
cm$setInverse(fake)
checkEquals(fake, cm$getInverse())
}
#' check that setting new data to the matrix wipes the cache so that we don't get
#' old results.
test.cacheMatrix.newDataInvalidatesCache <- function() {
fake <- create.sampleMatrix()
cm <- makeCacheMatrix()
cm$setInverse(fake)
checkEquals(fake, cm$getInverse())
cm$set(create.sampleMatrix())
checkTrue(is.null(cm$getInverse()))
}
#' check that `cacheSolve()` actually does solve correctly
test.cacheSolve.calculatesInverse <- function() {
sample <- create.sampleMatrix()
sample.inverse = solve(sample)
cm <- makeCacheMatrix(sample)
checkEquals(sample.inverse, cacheSolve(cm))
}
#' check that `cacheSolve()` actually caches the result on the matrix instance
test.cacheSolve.cachesInverse <- function() {
sample <- create.sampleMatrix()
sample.inverse = solve(sample)
cm <- makeCacheMatrix(sample)
checkEquals(sample.inverse, cacheSolve(cm))
checkEquals(sample.inverse, cm$getInverse())
# TODO: It would be nice to somehow verify that further invocations of cacheSolve
# don't recalculate the inverse, not just verify that the result was saved
}
| /tests/test_cachematrix.R | no_license | jasoma/ProgrammingAssignment2 | R | false | false | 2,067 | r |
#' Creates a square matrix filled with random (normally distributed) data
#' for use in the tests.
#'
#' @param size the number of colums/rows in the created matrix, default = 5.
#' @return a square matrix of the given size with random values.
create.sampleMatrix <- function(size = 5) {
matrix(rnorm(size*size), ncol = size)
}
#' check the function is doing something
test.makeCacheMatrix.returnsSomething <- function() {
checkEquals(FALSE, is.null(makeCacheMatrix()))
}
#' check the data contained in the function matches the input args
test.makeCacheMatrix.containsInputData <- function() {
input <- matrix()
cm <- makeCacheMatrix(input)
checkIdentical(input, cm$get())
}
#' check that `setInverse()` does cache the value when called directly
test.cacheMatrix.canSetInverse <- function() {
fake <- create.sampleMatrix()
cm <- makeCacheMatrix()
cm$setInverse(fake)
checkEquals(fake, cm$getInverse())
}
#' check that setting new data to the matrix wipes the cache so that we don't get
#' old results.
test.cacheMatrix.newDataInvalidatesCache <- function() {
fake <- create.sampleMatrix()
cm <- makeCacheMatrix()
cm$setInverse(fake)
checkEquals(fake, cm$getInverse())
cm$set(create.sampleMatrix())
checkTrue(is.null(cm$getInverse()))
}
#' check that `cacheSolve()` actually does solve correctly
test.cacheSolve.calculatesInverse <- function() {
sample <- create.sampleMatrix()
sample.inverse = solve(sample)
cm <- makeCacheMatrix(sample)
checkEquals(sample.inverse, cacheSolve(cm))
}
#' check that `cacheSolve()` actually caches the result on the matrix instance
test.cacheSolve.cachesInverse <- function() {
sample <- create.sampleMatrix()
sample.inverse = solve(sample)
cm <- makeCacheMatrix(sample)
checkEquals(sample.inverse, cacheSolve(cm))
checkEquals(sample.inverse, cm$getInverse())
# TODO: It would be nice to somehow verify that further invocations of cacheSolve
# don't recalculate the inverse, not just verify that the result was saved
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/applicationautoscaling_operations.R
\name{applicationautoscaling_register_scalable_target}
\alias{applicationautoscaling_register_scalable_target}
\title{Registers or updates a scalable target}
\usage{
applicationautoscaling_register_scalable_target(ServiceNamespace,
ResourceId, ScalableDimension, MinCapacity, MaxCapacity, RoleARN)
}
\arguments{
\item{ServiceNamespace}{[required] The namespace of the AWS service that provides the resource or
\code{custom-resource} for a resource provided by your own application or
service. For more information, see \href{https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces}{AWS Service Namespaces}
in the \emph{Amazon Web Services General Reference}.}
\item{ResourceId}{[required] The identifier of the resource associated with the scalable target. This
string consists of the resource type and unique identifier.
\itemize{
\item ECS service - The resource type is \code{service} and the unique
identifier is the cluster name and service name. Example:
\code{service/default/sample-webapp}.
\item Spot fleet request - The resource type is \code{spot-fleet-request} and
the unique identifier is the Spot fleet request ID. Example:
\code{spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE}.
\item EMR cluster - The resource type is \code{instancegroup} and the unique
identifier is the cluster ID and instance group ID. Example:
\code{instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0}.
\item AppStream 2.0 fleet - The resource type is \code{fleet} and the unique
identifier is the fleet name. Example: \code{fleet/sample-fleet}.
\item DynamoDB table - The resource type is \code{table} and the unique
identifier is the resource ID. Example: \code{table/my-table}.
\item DynamoDB global secondary index - The resource type is \code{index} and
the unique identifier is the resource ID. Example:
\code{table/my-table/index/my-table-index}.
\item Aurora DB cluster - The resource type is \code{cluster} and the unique
identifier is the cluster name. Example: \code{cluster:my-db-cluster}.
\item Amazon SageMaker endpoint variants - The resource type is \code{variant}
and the unique identifier is the resource ID. Example:
\code{endpoint/my-end-point/variant/KMeansClustering}.
\item Custom resources are not supported with a resource type. This
parameter must specify the \code{OutputValue} from the CloudFormation
template stack used to access the resources. The unique identifier
is defined by the service provider. More information is available in
our \href{https://github.com/aws/aws-auto-scaling-custom-resource}{GitHub repository}.
}}
\item{ScalableDimension}{[required] The scalable dimension associated with the scalable target. This string
consists of the service namespace, resource type, and scaling property.
\itemize{
\item \code{ecs:service:DesiredCount} - The desired task count of an ECS
service.
\item \code{ec2:spot-fleet-request:TargetCapacity} - The target capacity of a
Spot fleet request.
\item \code{elasticmapreduce:instancegroup:InstanceCount} - The instance count
of an EMR Instance Group.
\item \code{appstream:fleet:DesiredCapacity} - The desired capacity of an
AppStream 2.0 fleet.
\item \code{dynamodb:table:ReadCapacityUnits} - The provisioned read capacity
for a DynamoDB table.
\item \code{dynamodb:table:WriteCapacityUnits} - The provisioned write capacity
for a DynamoDB table.
\item \code{dynamodb:index:ReadCapacityUnits} - The provisioned read capacity
for a DynamoDB global secondary index.
\item \code{dynamodb:index:WriteCapacityUnits} - The provisioned write capacity
for a DynamoDB global secondary index.
\item \code{rds:cluster:ReadReplicaCount} - The count of Aurora Replicas in an
Aurora DB cluster. Available for Aurora MySQL-compatible edition and
Aurora PostgreSQL-compatible edition.
\item \code{sagemaker:variant:DesiredInstanceCount} - The number of EC2
instances for an Amazon SageMaker model endpoint variant.
\item \code{custom-resource:ResourceType:Property} - The scalable dimension for
a custom resource provided by your own application or service.
}}
\item{MinCapacity}{The minimum value to scale to in response to a scale-in event. This
parameter is required to register a scalable target.}
\item{MaxCapacity}{The maximum value to scale to in response to a scale-out event. This
parameter is required to register a scalable target.}
\item{RoleARN}{Application Auto Scaling creates a service-linked role that grants it
permissions to modify the scalable target on your behalf. For more
information, see \href{https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-service-linked-roles.html}{Service-Linked Roles for Application Auto Scaling}.
For resources that are not supported using a service-linked role, this
parameter is required and must specify the ARN of an IAM role that
allows Application Auto Scaling to modify the scalable target on your
behalf.}
}
\description{
Registers or updates a scalable target. A scalable target is a resource
that Application Auto Scaling can scale out and scale in. Each scalable
target has a resource ID, scalable dimension, and namespace, as well as
values for minimum and maximum capacity.
}
\details{
After you register a scalable target, you do not need to register it
again to use other Application Auto Scaling operations. To see which
resources have been registered, use DescribeScalableTargets. You can
also view the scaling policies for a service namespace using
DescribeScalableTargets.
If you no longer need a scalable target, you can deregister it using
DeregisterScalableTarget.
}
\section{Request syntax}{
\preformatted{svc$register_scalable_target(
ServiceNamespace = "ecs"|"elasticmapreduce"|"ec2"|"appstream"|"dynamodb"|"rds"|"sagemaker"|"custom-resource",
ResourceId = "string",
ScalableDimension = "ecs:service:DesiredCount"|"ec2:spot-fleet-request:TargetCapacity"|"elasticmapreduce:instancegroup:InstanceCount"|"appstream:fleet:DesiredCapacity"|"dynamodb:table:ReadCapacityUnits"|"dynamodb:table:WriteCapacityUnits"|"dynamodb:index:ReadCapacityUnits"|"dynamodb:index:WriteCapacityUnits"|"rds:cluster:ReadReplicaCount"|"sagemaker:variant:DesiredInstanceCount"|"custom-resource:ResourceType:Property",
MinCapacity = 123,
MaxCapacity = 123,
RoleARN = "string"
)
}
}
\examples{
# This example registers a scalable target from an Amazon ECS service
# called web-app that is running on the default cluster, with a minimum
# desired count of 1 task and a maximum desired count of 10 tasks.
\donttest{svc$register_scalable_target(
MaxCapacity = 10L,
MinCapacity = 1L,
ResourceId = "service/default/web-app",
RoleARN = "arn:aws:iam::012345678910:role/ApplicationAutoscalingECSRole",
ScalableDimension = "ecs:service:DesiredCount",
ServiceNamespace = "ecs"
)}
# This example registers a scalable target from an Amazon EC2 Spot fleet
# with a minimum target capacity of 1 and a maximum of 10.
\donttest{svc$register_scalable_target(
MaxCapacity = 10L,
MinCapacity = 1L,
ResourceId = "spot-fleet-request/sfr-45e69d8a-be48-4539-bbf3-3464e99c50c3",
RoleARN = "arn:aws:iam::012345678910:role/ApplicationAutoscalingSpotRole",
ScalableDimension = "ec2:spot-fleet-request:TargetCapacity",
ServiceNamespace = "ec2"
)}
}
\keyword{internal}
| /cran/paws.management/man/applicationautoscaling_register_scalable_target.Rd | permissive | peoplecure/paws | R | false | true | 7,394 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/applicationautoscaling_operations.R
\name{applicationautoscaling_register_scalable_target}
\alias{applicationautoscaling_register_scalable_target}
\title{Registers or updates a scalable target}
\usage{
applicationautoscaling_register_scalable_target(ServiceNamespace,
ResourceId, ScalableDimension, MinCapacity, MaxCapacity, RoleARN)
}
\arguments{
\item{ServiceNamespace}{[required] The namespace of the AWS service that provides the resource or
\code{custom-resource} for a resource provided by your own application or
service. For more information, see \href{https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces}{AWS Service Namespaces}
in the \emph{Amazon Web Services General Reference}.}
\item{ResourceId}{[required] The identifier of the resource associated with the scalable target. This
string consists of the resource type and unique identifier.
\itemize{
\item ECS service - The resource type is \code{service} and the unique
identifier is the cluster name and service name. Example:
\code{service/default/sample-webapp}.
\item Spot fleet request - The resource type is \code{spot-fleet-request} and
the unique identifier is the Spot fleet request ID. Example:
\code{spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE}.
\item EMR cluster - The resource type is \code{instancegroup} and the unique
identifier is the cluster ID and instance group ID. Example:
\code{instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0}.
\item AppStream 2.0 fleet - The resource type is \code{fleet} and the unique
identifier is the fleet name. Example: \code{fleet/sample-fleet}.
\item DynamoDB table - The resource type is \code{table} and the unique
identifier is the resource ID. Example: \code{table/my-table}.
\item DynamoDB global secondary index - The resource type is \code{index} and
the unique identifier is the resource ID. Example:
\code{table/my-table/index/my-table-index}.
\item Aurora DB cluster - The resource type is \code{cluster} and the unique
identifier is the cluster name. Example: \code{cluster:my-db-cluster}.
\item Amazon SageMaker endpoint variants - The resource type is \code{variant}
and the unique identifier is the resource ID. Example:
\code{endpoint/my-end-point/variant/KMeansClustering}.
\item Custom resources are not supported with a resource type. This
parameter must specify the \code{OutputValue} from the CloudFormation
template stack used to access the resources. The unique identifier
is defined by the service provider. More information is available in
our \href{https://github.com/aws/aws-auto-scaling-custom-resource}{GitHub repository}.
}}
\item{ScalableDimension}{[required] The scalable dimension associated with the scalable target. This string
consists of the service namespace, resource type, and scaling property.
\itemize{
\item \code{ecs:service:DesiredCount} - The desired task count of an ECS
service.
\item \code{ec2:spot-fleet-request:TargetCapacity} - The target capacity of a
Spot fleet request.
\item \code{elasticmapreduce:instancegroup:InstanceCount} - The instance count
of an EMR Instance Group.
\item \code{appstream:fleet:DesiredCapacity} - The desired capacity of an
AppStream 2.0 fleet.
\item \code{dynamodb:table:ReadCapacityUnits} - The provisioned read capacity
for a DynamoDB table.
\item \code{dynamodb:table:WriteCapacityUnits} - The provisioned write capacity
for a DynamoDB table.
\item \code{dynamodb:index:ReadCapacityUnits} - The provisioned read capacity
for a DynamoDB global secondary index.
\item \code{dynamodb:index:WriteCapacityUnits} - The provisioned write capacity
for a DynamoDB global secondary index.
\item \code{rds:cluster:ReadReplicaCount} - The count of Aurora Replicas in an
Aurora DB cluster. Available for Aurora MySQL-compatible edition and
Aurora PostgreSQL-compatible edition.
\item \code{sagemaker:variant:DesiredInstanceCount} - The number of EC2
instances for an Amazon SageMaker model endpoint variant.
\item \code{custom-resource:ResourceType:Property} - The scalable dimension for
a custom resource provided by your own application or service.
}}
\item{MinCapacity}{The minimum value to scale to in response to a scale-in event. This
parameter is required to register a scalable target.}
\item{MaxCapacity}{The maximum value to scale to in response to a scale-out event. This
parameter is required to register a scalable target.}
\item{RoleARN}{Application Auto Scaling creates a service-linked role that grants it
permissions to modify the scalable target on your behalf. For more
information, see \href{https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-service-linked-roles.html}{Service-Linked Roles for Application Auto Scaling}.
For resources that are not supported using a service-linked role, this
parameter is required and must specify the ARN of an IAM role that
allows Application Auto Scaling to modify the scalable target on your
behalf.}
}
\description{
Registers or updates a scalable target. A scalable target is a resource
that Application Auto Scaling can scale out and scale in. Each scalable
target has a resource ID, scalable dimension, and namespace, as well as
values for minimum and maximum capacity.
}
\details{
After you register a scalable target, you do not need to register it
again to use other Application Auto Scaling operations. To see which
resources have been registered, use DescribeScalableTargets. You can
also view the scaling policies for a service namespace using
DescribeScalableTargets.
If you no longer need a scalable target, you can deregister it using
DeregisterScalableTarget.
}
\section{Request syntax}{
\preformatted{svc$register_scalable_target(
ServiceNamespace = "ecs"|"elasticmapreduce"|"ec2"|"appstream"|"dynamodb"|"rds"|"sagemaker"|"custom-resource",
ResourceId = "string",
ScalableDimension = "ecs:service:DesiredCount"|"ec2:spot-fleet-request:TargetCapacity"|"elasticmapreduce:instancegroup:InstanceCount"|"appstream:fleet:DesiredCapacity"|"dynamodb:table:ReadCapacityUnits"|"dynamodb:table:WriteCapacityUnits"|"dynamodb:index:ReadCapacityUnits"|"dynamodb:index:WriteCapacityUnits"|"rds:cluster:ReadReplicaCount"|"sagemaker:variant:DesiredInstanceCount"|"custom-resource:ResourceType:Property",
MinCapacity = 123,
MaxCapacity = 123,
RoleARN = "string"
)
}
}
\examples{
# This example registers a scalable target from an Amazon ECS service
# called web-app that is running on the default cluster, with a minimum
# desired count of 1 task and a maximum desired count of 10 tasks.
\donttest{svc$register_scalable_target(
MaxCapacity = 10L,
MinCapacity = 1L,
ResourceId = "service/default/web-app",
RoleARN = "arn:aws:iam::012345678910:role/ApplicationAutoscalingECSRole",
ScalableDimension = "ecs:service:DesiredCount",
ServiceNamespace = "ecs"
)}
# This example registers a scalable target from an Amazon EC2 Spot fleet
# with a minimum target capacity of 1 and a maximum of 10.
\donttest{svc$register_scalable_target(
MaxCapacity = 10L,
MinCapacity = 1L,
ResourceId = "spot-fleet-request/sfr-45e69d8a-be48-4539-bbf3-3464e99c50c3",
RoleARN = "arn:aws:iam::012345678910:role/ApplicationAutoscalingSpotRole",
ScalableDimension = "ec2:spot-fleet-request:TargetCapacity",
ServiceNamespace = "ec2"
)}
}
\keyword{internal}
|
test_that("geo_coord_linestring() class works", {
tbl_linestring <- geo_coord_linestring(geo_xy(c(0, 1, 6), c(0, 2, 4)))
expect_output(print(tbl_linestring), "geo_coord_linestring")
expect_output(print(tibble(tbl_linestring)), "tblls")
expect_is(tbl_linestring, "geo_coord_linestring")
expect_true(is_geo_coord_linestring(tbl_linestring))
expect_true(vec_is(tbl_linestring))
})
test_that("geo_coord_multilinestring() class works", {
tbl_linestring <- geo_coord_multilinestring(geo_xy(c(0, 1, 6), c(0, 2, 4)))
expect_output(print(tbl_linestring), "geo_coord_multilinestring")
expect_output(print(tibble(tbl_linestring)), "tblmls")
expect_is(tbl_linestring, "geo_coord_multilinestring")
expect_true(is_geo_coord_multilinestring(tbl_linestring))
expect_true(vec_is(tbl_linestring))
})
test_that("geo_coord_linestring() c() and vec_c() works", {
linestring <- geo_coord_linestring(geo_xy(c(0, 1, 6), c(0, 2, 4)))
linestring_in_tbl <- tibble(linestring)
linestring_in_df <- as.data.frame(linestring_in_tbl)
tbl_linestring <- as_tibble(linestring)
df_linestring <- as.data.frame(tbl_linestring)
expect_is(c(linestring, linestring), "geo_coord_linestring")
expect_length(c(linestring, linestring), 6)
expect_is(vec_c(linestring, linestring), "geo_coord_linestring")
expect_length(vec_c(linestring, linestring), 6)
expect_equal(nrow(vec_rbind(linestring_in_tbl, linestring_in_tbl)), 6)
expect_is(vec_rbind(linestring_in_tbl, linestring_in_tbl)$linestring, "geo_coord_linestring")
# check vec_c() with tibble and data frame types
expect_identical(c(linestring, tbl_linestring), vec_rbind(tbl_linestring, tbl_linestring))
expect_identical(vec_c(linestring, tbl_linestring), vec_rbind(tbl_linestring, tbl_linestring))
expect_identical(vec_c(tbl_linestring, linestring), vec_rbind(df_linestring, df_linestring))
expect_identical(vec_c(linestring, df_linestring), vec_rbind(df_linestring, df_linestring))
expect_identical(vec_c(df_linestring, linestring), vec_rbind(df_linestring, df_linestring))
})
test_that("geo_coord_linestring() casting works", {
linestring <- geo_coord_linestring(geo_xy(c(0, 1, 6), c(0, 2, 4)))
tbl_linestring <- as_tibble(linestring)
df_linestring <- as.data.frame(linestring)
expect_is(tbl_linestring, "tbl_df")
expect_false(inherits(df_linestring, "tbl_df"))
expect_identical(vec_cast(tbl_linestring, new_geo_coord_linestring()), linestring)
expect_identical(vec_cast(linestring, tibble()), as.data.frame(tbl_linestring))
expect_identical(vec_cast(linestring, list()), vec_data(linestring))
expect_identical(vec_cast(vec_data(tbl_linestring), new_geo_coord_linestring()), linestring)
expect_error(
vec_cast(unname(vec_data(tbl_linestring)), new_geo_coord_linestring()),
"Can't convert an unnamed list"
)
expect_identical(as_geo_coord_linestring(tbl_linestring), linestring)
expect_identical(as_geo_coord_linestring(df_linestring), linestring)
expect_identical(as_geo_coord_linestring(unclass(df_linestring)), linestring)
})
test_that("geo_coord_multilinestring() c() and vec_c() works", {
multilinestring <- geo_coord_multilinestring(geo_xy(c(0, 1, 6), c(0, 2, 4)))
multilinestring_in_tbl <- tibble(multilinestring)
multilinestring_in_df <- as.data.frame(multilinestring_in_tbl)
tbl_multilinestring <- as_tibble(multilinestring)
df_multilinestring <- as.data.frame(tbl_multilinestring)
expect_is(c(multilinestring, multilinestring), "geo_coord_multilinestring")
expect_length(c(multilinestring, multilinestring), 6)
expect_is(vec_c(multilinestring, multilinestring), "geo_coord_multilinestring")
expect_length(vec_c(multilinestring, multilinestring), 6)
expect_equal(nrow(vec_rbind(multilinestring_in_tbl, multilinestring_in_tbl)), 6)
expect_is(
vec_rbind(multilinestring_in_tbl, multilinestring_in_tbl)$multilinestring,
"geo_coord_multilinestring"
)
# check vec_c() with tibble and data frame types
expect_identical(
c(multilinestring, tbl_multilinestring),
vec_rbind(tbl_multilinestring, tbl_multilinestring)
)
expect_identical(
vec_c(multilinestring, tbl_multilinestring),
vec_rbind(tbl_multilinestring, tbl_multilinestring)
)
expect_identical(
vec_c(tbl_multilinestring, multilinestring),
vec_rbind(df_multilinestring, df_multilinestring)
)
expect_identical(
vec_c(multilinestring, df_multilinestring),
vec_rbind(df_multilinestring, df_multilinestring)
)
expect_identical(
vec_c(df_multilinestring, multilinestring),
vec_rbind(df_multilinestring, df_multilinestring)
)
})
test_that("geo_coord_multilinestring() casting works", {
multilinestring <- geo_coord_multilinestring(geo_xy(c(0, 1, 6), c(0, 2, 4)))
tbl_multilinestring <- as_tibble(multilinestring)
df_multilinestring <- as.data.frame(multilinestring)
expect_is(tbl_multilinestring, "tbl_df")
expect_false(inherits(df_multilinestring, "tbl_df"))
expect_identical(
vec_cast(tbl_multilinestring, new_geo_coord_multilinestring()),
multilinestring
)
expect_identical(vec_cast(multilinestring, tibble()), as.data.frame(tbl_multilinestring))
expect_identical(vec_cast(multilinestring, list()), vec_data(multilinestring))
expect_identical(vec_cast(vec_data(tbl_multilinestring), new_geo_coord_multilinestring()), multilinestring)
expect_error(
vec_cast(unname(vec_data(tbl_multilinestring)), new_geo_coord_multilinestring()),
"Can't convert an unnamed list"
)
expect_identical(as_geo_coord_multilinestring(tbl_multilinestring), multilinestring)
expect_identical(as_geo_coord_multilinestring(df_multilinestring), multilinestring)
expect_identical(as_geo_coord_multilinestring(unclass(df_multilinestring)), multilinestring)
})
test_that("linestring is can be coerced to multilinestring", {
multilinestring <- geo_coord_multilinestring(geo_xy(c(0, 1, 6), c(0, 2, 4)))
linestring <- geo_coord_linestring(geo_xy(c(0, 1, 6), c(0, 2, 4)))
expect_is(c(multilinestring, linestring), "geo_coord_multilinestring")
expect_is(c(linestring, multilinestring), "geo_coord_multilinestring")
expect_is(vec_c(multilinestring, linestring), "geo_coord_multilinestring")
expect_is(vec_c(linestring, multilinestring), "geo_coord_multilinestring")
})
| /tests/testthat/test-geo-coord-linestring.R | no_license | SymbolixAU/geom | R | false | false | 6,263 | r |
test_that("geo_coord_linestring() class works", {
tbl_linestring <- geo_coord_linestring(geo_xy(c(0, 1, 6), c(0, 2, 4)))
expect_output(print(tbl_linestring), "geo_coord_linestring")
expect_output(print(tibble(tbl_linestring)), "tblls")
expect_is(tbl_linestring, "geo_coord_linestring")
expect_true(is_geo_coord_linestring(tbl_linestring))
expect_true(vec_is(tbl_linestring))
})
test_that("geo_coord_multilinestring() class works", {
tbl_linestring <- geo_coord_multilinestring(geo_xy(c(0, 1, 6), c(0, 2, 4)))
expect_output(print(tbl_linestring), "geo_coord_multilinestring")
expect_output(print(tibble(tbl_linestring)), "tblmls")
expect_is(tbl_linestring, "geo_coord_multilinestring")
expect_true(is_geo_coord_multilinestring(tbl_linestring))
expect_true(vec_is(tbl_linestring))
})
test_that("geo_coord_linestring() c() and vec_c() works", {
linestring <- geo_coord_linestring(geo_xy(c(0, 1, 6), c(0, 2, 4)))
linestring_in_tbl <- tibble(linestring)
linestring_in_df <- as.data.frame(linestring_in_tbl)
tbl_linestring <- as_tibble(linestring)
df_linestring <- as.data.frame(tbl_linestring)
expect_is(c(linestring, linestring), "geo_coord_linestring")
expect_length(c(linestring, linestring), 6)
expect_is(vec_c(linestring, linestring), "geo_coord_linestring")
expect_length(vec_c(linestring, linestring), 6)
expect_equal(nrow(vec_rbind(linestring_in_tbl, linestring_in_tbl)), 6)
expect_is(vec_rbind(linestring_in_tbl, linestring_in_tbl)$linestring, "geo_coord_linestring")
# check vec_c() with tibble and data frame types
expect_identical(c(linestring, tbl_linestring), vec_rbind(tbl_linestring, tbl_linestring))
expect_identical(vec_c(linestring, tbl_linestring), vec_rbind(tbl_linestring, tbl_linestring))
expect_identical(vec_c(tbl_linestring, linestring), vec_rbind(df_linestring, df_linestring))
expect_identical(vec_c(linestring, df_linestring), vec_rbind(df_linestring, df_linestring))
expect_identical(vec_c(df_linestring, linestring), vec_rbind(df_linestring, df_linestring))
})
test_that("geo_coord_linestring() casting works", {
linestring <- geo_coord_linestring(geo_xy(c(0, 1, 6), c(0, 2, 4)))
tbl_linestring <- as_tibble(linestring)
df_linestring <- as.data.frame(linestring)
expect_is(tbl_linestring, "tbl_df")
expect_false(inherits(df_linestring, "tbl_df"))
expect_identical(vec_cast(tbl_linestring, new_geo_coord_linestring()), linestring)
expect_identical(vec_cast(linestring, tibble()), as.data.frame(tbl_linestring))
expect_identical(vec_cast(linestring, list()), vec_data(linestring))
expect_identical(vec_cast(vec_data(tbl_linestring), new_geo_coord_linestring()), linestring)
expect_error(
vec_cast(unname(vec_data(tbl_linestring)), new_geo_coord_linestring()),
"Can't convert an unnamed list"
)
expect_identical(as_geo_coord_linestring(tbl_linestring), linestring)
expect_identical(as_geo_coord_linestring(df_linestring), linestring)
expect_identical(as_geo_coord_linestring(unclass(df_linestring)), linestring)
})
test_that("geo_coord_multilinestring() c() and vec_c() works", {
multilinestring <- geo_coord_multilinestring(geo_xy(c(0, 1, 6), c(0, 2, 4)))
multilinestring_in_tbl <- tibble(multilinestring)
multilinestring_in_df <- as.data.frame(multilinestring_in_tbl)
tbl_multilinestring <- as_tibble(multilinestring)
df_multilinestring <- as.data.frame(tbl_multilinestring)
expect_is(c(multilinestring, multilinestring), "geo_coord_multilinestring")
expect_length(c(multilinestring, multilinestring), 6)
expect_is(vec_c(multilinestring, multilinestring), "geo_coord_multilinestring")
expect_length(vec_c(multilinestring, multilinestring), 6)
expect_equal(nrow(vec_rbind(multilinestring_in_tbl, multilinestring_in_tbl)), 6)
expect_is(
vec_rbind(multilinestring_in_tbl, multilinestring_in_tbl)$multilinestring,
"geo_coord_multilinestring"
)
# check vec_c() with tibble and data frame types
expect_identical(
c(multilinestring, tbl_multilinestring),
vec_rbind(tbl_multilinestring, tbl_multilinestring)
)
expect_identical(
vec_c(multilinestring, tbl_multilinestring),
vec_rbind(tbl_multilinestring, tbl_multilinestring)
)
expect_identical(
vec_c(tbl_multilinestring, multilinestring),
vec_rbind(df_multilinestring, df_multilinestring)
)
expect_identical(
vec_c(multilinestring, df_multilinestring),
vec_rbind(df_multilinestring, df_multilinestring)
)
expect_identical(
vec_c(df_multilinestring, multilinestring),
vec_rbind(df_multilinestring, df_multilinestring)
)
})
test_that("geo_coord_multilinestring() casting works", {
multilinestring <- geo_coord_multilinestring(geo_xy(c(0, 1, 6), c(0, 2, 4)))
tbl_multilinestring <- as_tibble(multilinestring)
df_multilinestring <- as.data.frame(multilinestring)
expect_is(tbl_multilinestring, "tbl_df")
expect_false(inherits(df_multilinestring, "tbl_df"))
expect_identical(
vec_cast(tbl_multilinestring, new_geo_coord_multilinestring()),
multilinestring
)
expect_identical(vec_cast(multilinestring, tibble()), as.data.frame(tbl_multilinestring))
expect_identical(vec_cast(multilinestring, list()), vec_data(multilinestring))
expect_identical(vec_cast(vec_data(tbl_multilinestring), new_geo_coord_multilinestring()), multilinestring)
expect_error(
vec_cast(unname(vec_data(tbl_multilinestring)), new_geo_coord_multilinestring()),
"Can't convert an unnamed list"
)
expect_identical(as_geo_coord_multilinestring(tbl_multilinestring), multilinestring)
expect_identical(as_geo_coord_multilinestring(df_multilinestring), multilinestring)
expect_identical(as_geo_coord_multilinestring(unclass(df_multilinestring)), multilinestring)
})
test_that("linestring is can be coerced to multilinestring", {
multilinestring <- geo_coord_multilinestring(geo_xy(c(0, 1, 6), c(0, 2, 4)))
linestring <- geo_coord_linestring(geo_xy(c(0, 1, 6), c(0, 2, 4)))
expect_is(c(multilinestring, linestring), "geo_coord_multilinestring")
expect_is(c(linestring, multilinestring), "geo_coord_multilinestring")
expect_is(vec_c(multilinestring, linestring), "geo_coord_multilinestring")
expect_is(vec_c(linestring, multilinestring), "geo_coord_multilinestring")
})
|
# shared functions
## method
fit_model = function(y, X, order, family) {
model = fdapoi::estimate_poi_validated(y, X, order=order, family=family)
return(model)
}
plot_model = function(y, X, model, path) {
p = nrow(X)
t_grid = (seq(1, p) - 1) / (p-1)
png(path)
fdapoi:::PoIMaker(model$k.opt, X, y, plotting=TRUE)
axis(side = 3, at = t_grid[model$tau.ind.hat[1]],
labels = expression(hat(tau)[1]), line = 0)
axis(side = 3, at = t_grid[model$tau.ind.hat[2]],
labels = expression(hat(tau)[2]), line = 0)
dev.off()
}
read_data = function(directory, id) {
np = reticulate::import("numpy")
directory = ifelse(directory=="", directory, paste0(directory,"/"))
y = np$load(paste0(directory, id, "-y.npy"))
X = np$load(paste0(directory, id, "-X.npy"))
out = list(y=y, X=X)
return(out)
}
extract_coefficients = function(model, path=NULL) {
beta = unname(c(model$beta0.hat, model$beta.hat))
locations = model$tau.ind.hat
k_opt = model$k.opt
out = list(beta=beta, locations=locations, k_opt=k_opt)
if (is.null(path)) {
return(out)
} else {
yaml::write_yaml(out, path)
}
}
## monte carlo
read_n_sim = function() {
config = yaml::read_yaml(file.path(SRC, "config_monte_carlo.yaml"))
return(config[["n_sim"]])
}
replace_comma = function(str) {
s = strsplit(str, "")[[1]]
n = length(s)
if (s[n - 1] == ",") {
s[n - 1] = ""
}
s = paste0(s, collapse="")
return(s)
}
string_to_object = function(str) {
object = eval(parse(text=str))
return(object)
}
df_row_to_kwargs = function(row) {
kwargs = row
kwargs$n_periods = as.integer(kwargs$n_periods)
kwargs$n_samples = as.integer(kwargs$n_samples)
kwargs$n_points = as.integer(kwargs$n_points)
kwargs = as.list(kwargs)
kwargs$beta = unlist(kwargs$beta)
return(kwargs)
}
monte_carlo = function(kwargs_df, n_sim) {
kwargs_list = apply(kwargs_df, 1, df_row_to_kwargs)
to_parallelize = function(kwargs) monte_carlo_inner(kwargs, n_sim)
results = pbapply::pblapply(kwargs_list, to_parallelize)
return(results)
}
monte_carlo_inner = function(kwargs, n_sim) {
order = kwargs$order
kwargs$order = NULL
kwargs$beta = string_to_object(kwargs$beta)
kwargs$kernel_kwargs = string_to_object(kwargs$kernel_kwargs)
locations = c()
for (k in 1:n_sim) {
kwargs$seed = k
data = do.call(simulate_model, kwargs)
model = fit_model(y=data[[1]], X=data[[2]], order, family="gaussian")
coeff = extract_coefficients(model)
.locations = coeff$locations
locations = c(locations, .locations)
}
out = tibble::tibble(locations)
out = dplyr::count(out, locations)
out = dplyr::rename(out, count=n)
return(out)
}
write_results = function(monte_carlo_results, BLD) {
for (k in 1:length(monte_carlo_results)) {
path = file.path(BLD, "monte_carlo", paste0("result", k-1, ".csv"))
readr::write_csv(monte_carlo_results[[k]], path)
}
return(NULL)
}
| /src/shared.R | no_license | timmens/topics-metrics-2021 | R | false | false | 2,978 | r | # shared functions
## method
fit_model = function(y, X, order, family) {
model = fdapoi::estimate_poi_validated(y, X, order=order, family=family)
return(model)
}
plot_model = function(y, X, model, path) {
p = nrow(X)
t_grid = (seq(1, p) - 1) / (p-1)
png(path)
fdapoi:::PoIMaker(model$k.opt, X, y, plotting=TRUE)
axis(side = 3, at = t_grid[model$tau.ind.hat[1]],
labels = expression(hat(tau)[1]), line = 0)
axis(side = 3, at = t_grid[model$tau.ind.hat[2]],
labels = expression(hat(tau)[2]), line = 0)
dev.off()
}
read_data = function(directory, id) {
np = reticulate::import("numpy")
directory = ifelse(directory=="", directory, paste0(directory,"/"))
y = np$load(paste0(directory, id, "-y.npy"))
X = np$load(paste0(directory, id, "-X.npy"))
out = list(y=y, X=X)
return(out)
}
extract_coefficients = function(model, path=NULL) {
beta = unname(c(model$beta0.hat, model$beta.hat))
locations = model$tau.ind.hat
k_opt = model$k.opt
out = list(beta=beta, locations=locations, k_opt=k_opt)
if (is.null(path)) {
return(out)
} else {
yaml::write_yaml(out, path)
}
}
## monte carlo
read_n_sim = function() {
config = yaml::read_yaml(file.path(SRC, "config_monte_carlo.yaml"))
return(config[["n_sim"]])
}
replace_comma = function(str) {
s = strsplit(str, "")[[1]]
n = length(s)
if (s[n - 1] == ",") {
s[n - 1] = ""
}
s = paste0(s, collapse="")
return(s)
}
string_to_object = function(str) {
object = eval(parse(text=str))
return(object)
}
df_row_to_kwargs = function(row) {
kwargs = row
kwargs$n_periods = as.integer(kwargs$n_periods)
kwargs$n_samples = as.integer(kwargs$n_samples)
kwargs$n_points = as.integer(kwargs$n_points)
kwargs = as.list(kwargs)
kwargs$beta = unlist(kwargs$beta)
return(kwargs)
}
monte_carlo = function(kwargs_df, n_sim) {
kwargs_list = apply(kwargs_df, 1, df_row_to_kwargs)
to_parallelize = function(kwargs) monte_carlo_inner(kwargs, n_sim)
results = pbapply::pblapply(kwargs_list, to_parallelize)
return(results)
}
monte_carlo_inner = function(kwargs, n_sim) {
order = kwargs$order
kwargs$order = NULL
kwargs$beta = string_to_object(kwargs$beta)
kwargs$kernel_kwargs = string_to_object(kwargs$kernel_kwargs)
locations = c()
for (k in 1:n_sim) {
kwargs$seed = k
data = do.call(simulate_model, kwargs)
model = fit_model(y=data[[1]], X=data[[2]], order, family="gaussian")
coeff = extract_coefficients(model)
.locations = coeff$locations
locations = c(locations, .locations)
}
out = tibble::tibble(locations)
out = dplyr::count(out, locations)
out = dplyr::rename(out, count=n)
return(out)
}
write_results = function(monte_carlo_results, BLD) {
for (k in 1:length(monte_carlo_results)) {
path = file.path(BLD, "monte_carlo", paste0("result", k-1, ".csv"))
readr::write_csv(monte_carlo_results[[k]], path)
}
return(NULL)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FulfillmentService.R
\name{FulfillmentService}
\alias{FulfillmentService}
\alias{getFulfillmentServices}
\alias{createFulfillmentService}
\alias{getFulfillmentService}
\alias{modifyFulfillmentService}
\alias{deleteFulfillmentService}
\title{FulfillmentService Functions}
\usage{
getFulfillmentServices(...)
createFulfillmentService(fulfillmentService, ...)
getFulfillmentService(fulfillmentServiceId, ...)
modifyFulfillmentService(fulfillmentService, ...)
deleteFulfillmentService(fulfillmentServiceId, ...)
}
\arguments{
\item{...}{additional request parameters}
\item{fulfillmentService}{a list containing FulfillmentService fields}
\item{fulfillmentServiceId}{a FulfillmentService id number}
}
\value{
a list containing a FulfillmentService fields or a list of FulfillmentServices
}
\description{
FulfillmentService Functions
}
\references{
Shopify API FulfillmentService documentation: \url{https://help.shopify.com/api/reference/shipping_and_fulfillment/fulfillmentservice}
}
| /man/FulfillmentService.Rd | no_license | charliebone/shopifyr | R | false | true | 1,102 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FulfillmentService.R
\name{FulfillmentService}
\alias{FulfillmentService}
\alias{getFulfillmentServices}
\alias{createFulfillmentService}
\alias{getFulfillmentService}
\alias{modifyFulfillmentService}
\alias{deleteFulfillmentService}
\title{FulfillmentService Functions}
\usage{
getFulfillmentServices(...)
createFulfillmentService(fulfillmentService, ...)
getFulfillmentService(fulfillmentServiceId, ...)
modifyFulfillmentService(fulfillmentService, ...)
deleteFulfillmentService(fulfillmentServiceId, ...)
}
\arguments{
\item{...}{additional request parameters}
\item{fulfillmentService}{a list containing FulfillmentService fields}
\item{fulfillmentServiceId}{a FulfillmentService id number}
}
\value{
a list containing a FulfillmentService fields or a list of FulfillmentServices
}
\description{
FulfillmentService Functions
}
\references{
Shopify API FulfillmentService documentation: \url{https://help.shopify.com/api/reference/shipping_and_fulfillment/fulfillmentservice}
}
|
library(geozoning)
### Name: loopQ1
### Title: loopQ1
### Aliases: loopQ1
### ** Examples
# not run
| /data/genthat_extracted_code/geozoning/examples/loopQ1.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 107 | r | library(geozoning)
### Name: loopQ1
### Title: loopQ1
### Aliases: loopQ1
### ** Examples
# not run
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nfunctions.R
\name{ndoc}
\alias{ndoc}
\alias{nfeature}
\title{count the number of documents or features}
\usage{
ndoc(x)
nfeature(x)
}
\arguments{
\item{x}{a \pkg{quanteda} object: a \link{corpus}, \link{dfm}, or \link{tokens}
object, or a readtext object from the \pkg{readtext} package.}
}
\value{
an integer (count) of the number of documents or features in the
corpus or dfm
}
\description{
Get the number of documents, tokens, types, or features in an object.
}
\details{
\code{ndoc} returns the number of documents or features in a
quanteda object.
\code{nfeature} returns the number of features from a dfm; it is an
alias for \code{ntype} when applied to dfm objects. This function is only
defined for \link{dfm} objects because only these have "features". (To count
tokens, see \code{\link{ntoken}})
}
\examples{
# number of documents
ndoc(data_corpus_inaugural)
ndoc(corpus_subset(data_corpus_inaugural, Year > 1980))
ndoc(tokens(data_corpus_inaugural))
ndoc(dfm(corpus_subset(data_corpus_inaugural, Year > 1980)))
# number of features
nfeature(dfm(corpus_subset(data_corpus_inaugural, Year > 1980), removePunct = FALSE))
nfeature(dfm(corpus_subset(data_corpus_inaugural, Year > 1980), removePunct = TRUE))
}
\seealso{
\code{\link{ntoken}}
}
| /man/ndoc.Rd | no_license | plablo09/quanteda | R | false | true | 1,347 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nfunctions.R
\name{ndoc}
\alias{ndoc}
\alias{nfeature}
\title{count the number of documents or features}
\usage{
ndoc(x)
nfeature(x)
}
\arguments{
\item{x}{a \pkg{quanteda} object: a \link{corpus}, \link{dfm}, or \link{tokens}
object, or a readtext object from the \pkg{readtext} package.}
}
\value{
an integer (count) of the number of documents or features in the
corpus or dfm
}
\description{
Get the number of documents, tokens, types, or features in an object.
}
\details{
\code{ndoc} returns the number of documents or features in a
quanteda object.
\code{nfeature} returns the number of features from a dfm; it is an
alias for \code{ntype} when applied to dfm objects. This function is only
defined for \link{dfm} objects because only these have "features". (To count
tokens, see \code{\link{ntoken}})
}
\examples{
# number of documents
ndoc(data_corpus_inaugural)
ndoc(corpus_subset(data_corpus_inaugural, Year > 1980))
ndoc(tokens(data_corpus_inaugural))
ndoc(dfm(corpus_subset(data_corpus_inaugural, Year > 1980)))
# number of features
nfeature(dfm(corpus_subset(data_corpus_inaugural, Year > 1980), removePunct = FALSE))
nfeature(dfm(corpus_subset(data_corpus_inaugural, Year > 1980), removePunct = TRUE))
}
\seealso{
\code{\link{ntoken}}
}
|
## ======================================
## Examples for the chapter
## 'Describing and visualizing sequences'
## in TraMineR User's Guide
## ======================================
require(grDevices); require(graphics)
oask <- devAskNewPage(dev.interactive(orNone = TRUE))
library(TraMineR)
## Set 'graphdir' according to your system
## and uncomment the pdf() and dev.off() commands
## if you want to save the graphics as pdf files
graphdir <- "Graphics/"
## creating sequence objects
data(actcal)
state.lab <- c("> 37 hours", "19-36 hours", "1-18 hours", "no work")
actcal.seq <- seqdef(actcal,13:24,labels=state.lab)
data(biofam)
biofam.lab <- c("Parent", "Left", "Married", "Left+Marr",
"Child", "Left+Child", "Left+Marr+Child", "Divorced")
biofam.seq <- seqdef(biofam, 10:25, labels=biofam.lab)
## ---------------------------------------
## List of states present in sequence data
## ---------------------------------------
alphabet(actcal.seq)
sp.ex1 <- rbind("(000,12)-(0W0,9)-(0WU,5)-(1WU,2)",
"(000,12)-(0W0,14)-(1WU,2)")
sp.ex1
seqstatl(sp.ex1, format='SPS')
## ------------------
## State distribution
## ------------------
## pdf(file=paste(graphdir,"seqdplot-biofam.pdf",sep=""))
seqdplot(biofam.seq)
## dev.off()
## ------------------
## Transversal entropies
## ------------------
## pdf(file=paste(graphdir,"fg_biofam-entropy.pdf",sep=""), width=8, height=6, pointsize=14)
seqHtplot(biofam.seq)
## dev.off()
## --------------------
## Sequence frequencies
## --------------------
## pdf(file=paste(graphdir,"actcal-seqfplot.pdf",sep="")
seqfplot(biofam.seq)
## dev.off()
## For actcal
seqfplot(actcal.seq)
seqtab(actcal.seq, tlim=10)
seqtab(actcal.seq[,7:9], tlim=10)
## --------------------
## Transition rates
## --------------------
tr <- seqtrate(actcal.seq)
round(tr, digits=3)
rowSums(seqtrate(actcal.seq))
## ====================
## Sequence index plots
## ====================
seqiplot(actcal.seq)
## All sequences sorted by age in 2000
## grouped by sex
## using 'border=NA' and 'space=0' options to have a nicer plot
seqiplot(actcal.seq, group=actcal$sex, tlim=0, border=NA, space=0,
sortv=actcal$age00)
## -----------------------------
## Mean time spent in each state
## -----------------------------
seqmtplot(actcal.seq, group=actcal$sex)
## -----------------------------
## Sequence of modal states
## -----------------------------
seqmsplot(actcal.seq, group=actcal$sex)
seqmsplot(biofam.seq, group=actcal$sex)
## -----------------------------
## Displaying sequences in SPS format
## -----------------------------
print(actcal.seq[1:10,],"SPS")
seqdss(actcal.seq[1:10,])
seqdur(actcal.seq[1:10,])
## ---------------
## Sequence length
## ---------------
data(famform)
famform.seq <- seqdef(famform)
famform.seq
seqlength(famform.seq)
## --------------------------------
## Searching for given subsequences
## --------------------------------
seqpm(actcal.seq,"DAAD")
daad <- seqpm(actcal.seq,"DAAD")
actcal.seq[daad$MIndex,]
## -----------------------
## Within sequence entropy
## -----------------------
seqient(actcal.seq[1:10,])
s1 <- c("A","A","A","B","B","B","C","C","C","D","D","D")
s2 <- c("A","D","A","B","C","B","C","B","C","D","A","D")
s3 <- c("A","B","A","B","A","B","C","D","C","D","C","D")
ex1 <- rbind(s1,s2,s3)
ex1
ex1 <- seqdef(ex1)
seqistatd(ex1)
seqient(ex1)
seqient(ex1,norm=FALSE)
actcal.ient <- seqient(actcal.seq)
summary(actcal.ient)
hist(actcal.ient,col="cyan",
main="Entropy for the sequences in the actcal data set",
xlab="Entropy")
## hist(seqient(actcal.seq),col="cyan",
## main="Entropy for the sequences in the actcal data set",
## xlab="Entropy")
max(actcal.ient)
which(actcal.ient==max(actcal.ient))
actcal.seq[1836,]
actcal[actcal.ient==max(actcal.ient),]
## Entropy for the biofam data set
biofam.ient <- seqient(biofam.seq)
hist(biofam.ient,col="cyan",
xlab="Entropy",
main="Entropy for the sequences in the biofam data set")
biofam <- data.frame(biofam, seqient(biofam.seq))
names(biofam)
summary(biofam$Entropy)
q1 <- quantile(biofam$Entropy,0.01)
q49 <- quantile(biofam$Entropy,0.49)
q51 <- quantile(biofam$Entropy,0.51)
q99 <- quantile(biofam$Entropy,0.99)
ient.min <- biofam.seq[biofam$Entropy<=q1,]
ient.med <- biofam.seq[biofam$Entropy>=q49 & biofam$Entropy<=q51,]
ient.max <- biofam.seq[biofam$Entropy>=q99,]
omar <- par(mar=c(5,4,4,2)+.1)
opar <- par(mfrow=c(2,2))
seqiplot(ient.min,
title="10 seq. with low entropy",
withlegend=FALSE)
seqiplot(ient.med,
title="10 seq. with medium entropy",
withlegend=FALSE)
seqiplot(ient.max,
title="10 seq. with high entropy",
withlegend=FALSE)
seqlegend(biofam.seq)
par(opar)
table(biofam$birthyr)
biofam <- data.frame(biofam,
ageg=cut(biofam$birthy,c(1909,1918,1928,1938,1948,1958),
label=c("1909-18","1919-28","1929-38","1939-48","1949-58"),include.lowest=TRUE))
table(biofam$ageg)
boxplot(Entropy ~ ageg,
data=biofam,
main="Boxplot of within entropy by birth cohorts",
xlab="Birth cohort",
ylab="Sequences entropy",
col="cyan")
## -------------------
## Sequence turbulence
## -------------------
data(actcal)
actcal.seq <- seqdef(actcal,13:24)
actcal.seq[2,]
seqdss(actcal.seq[2,])
seqsubsn(actcal.seq[2,],DSS=FALSE)
seqsubsn(actcal.seq[2,],DSS=TRUE)
sp.ex1
sp.ex1 <- seqdef(sp.ex1,informat="SPS")
seqST(sp.ex1)
## biofam data set
biofam <- data.frame(biofam, seqST(biofam.seq))
summary(biofam$Turbulence)
hist(biofam$Turbulence,
col="cyan",
xlab="Turbulence",
main="Turbulences for the sequences in the biofam data set")
max.turb <- max(biofam$Turbulence)
subset(biofam, Turbulence==max.turb)
biofam[biofam$Turbulence==max.turb,]
max.seq <- which(biofam$Turbulence==max.turb)
print(biofam.seq[max.seq,],format='SPS')
## Correlation with entropy
cor(biofam$Turbulence,biofam$Entropy)
cor(biofam$Turbulence,biofam$Entropy, method='spearman')
plot(biofam$Turbulence,biofam$Entropy,
main="Turbulence vs. Entropy",
xlab="Turbulence",
ylab="Entropy")
## Low, medium and high turbulence
q1 <- quantile(biofam$Turbulence,0.01)
q49 <- quantile(biofam$Turbulence,0.49)
q51 <- quantile(biofam$Turbulence,0.51)
q99 <- quantile(biofam$Turbulence,0.99)
turb.min <- biofam.seq[biofam$Turbulence<=q1,]
turb.med <- biofam.seq[biofam$Turbulence>=q49 & biofam$Turbulence<=q51,]
turb.max <- biofam.seq[biofam$Turbulence>=q99,]
opar <- par(mfrow=c(2,2))
seqiplot(turb.min,
title="10 seq. with low turbulence",
withlegend=FALSE)
seqiplot(turb.med,
title="10 seq. with medium turbulence",
withlegend=FALSE)
seqiplot(turb.max,
title="10 seq. with high turbulence",
withlegend=FALSE)
seqlegend(biofam.seq)
par(opar)
par(omar)
devAskNewPage(oask)
| /TraMineR/demo/Rendering.R | no_license | ingted/R-Examples | R | false | false | 7,082 | r | ## ======================================
## Examples for the chapter
## 'Describing and visualizing sequences'
## in TraMineR User's Guide
## ======================================
require(grDevices); require(graphics)
oask <- devAskNewPage(dev.interactive(orNone = TRUE))
library(TraMineR)
## Set 'graphdir' according to your system
## and uncomment the pdf() and dev.off() commands
## if you want to save the graphics as pdf files
graphdir <- "Graphics/"
## creating sequence objects
data(actcal)
state.lab <- c("> 37 hours", "19-36 hours", "1-18 hours", "no work")
actcal.seq <- seqdef(actcal,13:24,labels=state.lab)
data(biofam)
biofam.lab <- c("Parent", "Left", "Married", "Left+Marr",
"Child", "Left+Child", "Left+Marr+Child", "Divorced")
biofam.seq <- seqdef(biofam, 10:25, labels=biofam.lab)
## ---------------------------------------
## List of states present in sequence data
## ---------------------------------------
alphabet(actcal.seq)
sp.ex1 <- rbind("(000,12)-(0W0,9)-(0WU,5)-(1WU,2)",
"(000,12)-(0W0,14)-(1WU,2)")
sp.ex1
seqstatl(sp.ex1, format='SPS')
## ------------------
## State distribution
## ------------------
## pdf(file=paste(graphdir,"seqdplot-biofam.pdf",sep=""))
seqdplot(biofam.seq)
## dev.off()
## ------------------
## Transversal entropies
## ------------------
## pdf(file=paste(graphdir,"fg_biofam-entropy.pdf",sep=""), width=8, height=6, pointsize=14)
seqHtplot(biofam.seq)
## dev.off()
## --------------------
## Sequence frequencies
## --------------------
## pdf(file=paste(graphdir,"actcal-seqfplot.pdf",sep="")
seqfplot(biofam.seq)
## dev.off()
## For actcal
seqfplot(actcal.seq)
seqtab(actcal.seq, tlim=10)
seqtab(actcal.seq[,7:9], tlim=10)
## --------------------
## Transition rates
## --------------------
tr <- seqtrate(actcal.seq)
round(tr, digits=3)
rowSums(seqtrate(actcal.seq))
## ====================
## Sequence index plots
## ====================
seqiplot(actcal.seq)
## All sequences sorted by age in 2000
## grouped by sex
## using 'border=NA' and 'space=0' options to have a nicer plot
seqiplot(actcal.seq, group=actcal$sex, tlim=0, border=NA, space=0,
sortv=actcal$age00)
## -----------------------------
## Mean time spent in each state
## -----------------------------
seqmtplot(actcal.seq, group=actcal$sex)
## -----------------------------
## Sequence of modal states
## -----------------------------
seqmsplot(actcal.seq, group=actcal$sex)
seqmsplot(biofam.seq, group=actcal$sex)
## -----------------------------
## Displaying sequences in SPS format
## -----------------------------
print(actcal.seq[1:10,],"SPS")
seqdss(actcal.seq[1:10,])
seqdur(actcal.seq[1:10,])
## ---------------
## Sequence length
## ---------------
data(famform)
famform.seq <- seqdef(famform)
famform.seq
seqlength(famform.seq)
## --------------------------------
## Searching for given subsequences
## --------------------------------
seqpm(actcal.seq,"DAAD")
daad <- seqpm(actcal.seq,"DAAD")
actcal.seq[daad$MIndex,]
## -----------------------
## Within sequence entropy
## -----------------------
seqient(actcal.seq[1:10,])
s1 <- c("A","A","A","B","B","B","C","C","C","D","D","D")
s2 <- c("A","D","A","B","C","B","C","B","C","D","A","D")
s3 <- c("A","B","A","B","A","B","C","D","C","D","C","D")
ex1 <- rbind(s1,s2,s3)
ex1
ex1 <- seqdef(ex1)
seqistatd(ex1)
seqient(ex1)
seqient(ex1,norm=FALSE)
actcal.ient <- seqient(actcal.seq)
summary(actcal.ient)
hist(actcal.ient,col="cyan",
main="Entropy for the sequences in the actcal data set",
xlab="Entropy")
## hist(seqient(actcal.seq),col="cyan",
## main="Entropy for the sequences in the actcal data set",
## xlab="Entropy")
max(actcal.ient)
which(actcal.ient==max(actcal.ient))
actcal.seq[1836,]
actcal[actcal.ient==max(actcal.ient),]
## Entropy for the biofam data set
biofam.ient <- seqient(biofam.seq)
hist(biofam.ient,col="cyan",
xlab="Entropy",
main="Entropy for the sequences in the biofam data set")
biofam <- data.frame(biofam, seqient(biofam.seq))
names(biofam)
summary(biofam$Entropy)
q1 <- quantile(biofam$Entropy,0.01)
q49 <- quantile(biofam$Entropy,0.49)
q51 <- quantile(biofam$Entropy,0.51)
q99 <- quantile(biofam$Entropy,0.99)
ient.min <- biofam.seq[biofam$Entropy<=q1,]
ient.med <- biofam.seq[biofam$Entropy>=q49 & biofam$Entropy<=q51,]
ient.max <- biofam.seq[biofam$Entropy>=q99,]
omar <- par(mar=c(5,4,4,2)+.1)
opar <- par(mfrow=c(2,2))
seqiplot(ient.min,
title="10 seq. with low entropy",
withlegend=FALSE)
seqiplot(ient.med,
title="10 seq. with medium entropy",
withlegend=FALSE)
seqiplot(ient.max,
title="10 seq. with high entropy",
withlegend=FALSE)
seqlegend(biofam.seq)
par(opar)
table(biofam$birthyr)
biofam <- data.frame(biofam,
ageg=cut(biofam$birthy,c(1909,1918,1928,1938,1948,1958),
label=c("1909-18","1919-28","1929-38","1939-48","1949-58"),include.lowest=TRUE))
table(biofam$ageg)
boxplot(Entropy ~ ageg,
data=biofam,
main="Boxplot of within entropy by birth cohorts",
xlab="Birth cohort",
ylab="Sequences entropy",
col="cyan")
## -------------------
## Sequence turbulence
## -------------------
data(actcal)
actcal.seq <- seqdef(actcal,13:24)
actcal.seq[2,]
seqdss(actcal.seq[2,])
seqsubsn(actcal.seq[2,],DSS=FALSE)
seqsubsn(actcal.seq[2,],DSS=TRUE)
sp.ex1
sp.ex1 <- seqdef(sp.ex1,informat="SPS")
seqST(sp.ex1)
## biofam data set
biofam <- data.frame(biofam, seqST(biofam.seq))
summary(biofam$Turbulence)
hist(biofam$Turbulence,
col="cyan",
xlab="Turbulence",
main="Turbulences for the sequences in the biofam data set")
max.turb <- max(biofam$Turbulence)
subset(biofam, Turbulence==max.turb)
biofam[biofam$Turbulence==max.turb,]
max.seq <- which(biofam$Turbulence==max.turb)
print(biofam.seq[max.seq,],format='SPS')
## Correlation with entropy
cor(biofam$Turbulence,biofam$Entropy)
cor(biofam$Turbulence,biofam$Entropy, method='spearman')
plot(biofam$Turbulence,biofam$Entropy,
main="Turbulence vs. Entropy",
xlab="Turbulence",
ylab="Entropy")
## Low, medium and high turbulence
q1 <- quantile(biofam$Turbulence,0.01)
q49 <- quantile(biofam$Turbulence,0.49)
q51 <- quantile(biofam$Turbulence,0.51)
q99 <- quantile(biofam$Turbulence,0.99)
turb.min <- biofam.seq[biofam$Turbulence<=q1,]
turb.med <- biofam.seq[biofam$Turbulence>=q49 & biofam$Turbulence<=q51,]
turb.max <- biofam.seq[biofam$Turbulence>=q99,]
opar <- par(mfrow=c(2,2))
seqiplot(turb.min,
title="10 seq. with low turbulence",
withlegend=FALSE)
seqiplot(turb.med,
title="10 seq. with medium turbulence",
withlegend=FALSE)
seqiplot(turb.max,
title="10 seq. with high turbulence",
withlegend=FALSE)
seqlegend(biofam.seq)
par(opar)
par(omar)
devAskNewPage(oask)
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(comment = "#>", collapse = TRUE)
## -----------------------------------------------------------------------------
library(lax)
# Column 4 of ow contains 1 for Oxford and -1 for Worthing
large <- gev_refit(ow$temp, ow, mul = 4, sigl = 4, shl = 4, show = FALSE,
method = "BFGS")
# Adjust the loglikelihood and standard errors
adj_large <- alogLik(large, cluster = ow$year, cadjust = FALSE)
# MLEs, SEs and adjusted SEs
t(summary(adj_large))
## -----------------------------------------------------------------------------
confint(adj_large)
confint(adj_large, type = "none")
## ---- fig.align='center', fig.width=7, fig.height=7---------------------------
library(chandwich)
which_pars <- c("scale", "shape")
gev_none <- conf_region(adj_large, which_pars = which_pars, type = "none")
gev_vertical <- conf_region(adj_large, which_pars = which_pars)
plot(gev_none, gev_vertical, lwd = 2, xlim = c(3.1, 4.5), ylim = c(-0.35, -0.05),
xlab = expression(sigma[0]), ylab = expression(xi[0]))
## -----------------------------------------------------------------------------
small <- gev_refit(ow$temp, ow, mul = 4, sigl = 4, show = FALSE,
method = "BFGS")
adj_small <- alogLik(small, cluster = ow$year, cadjust = FALSE)
summary(adj_small)
anova(adj_large, adj_small)
anova(adj_large, adj_small, type = "none")
## ---- echo = FALSE------------------------------------------------------------
got_texmex <- requireNamespace("texmex", quietly = TRUE)
## ---- eval = got_texmex-------------------------------------------------------
library(texmex, quietly = TRUE)
# Note: phi = log(scale)
evm_fit <- evm(temp, ow, gev, mu = ~ loc, phi = ~ loc, xi = ~loc)
adj_evm_fit <- alogLik(evm_fit, cluster = ow$year)
summary(adj_evm_fit)
## ---- echo = FALSE, eval = got_texmex-----------------------------------------
detach("package:texmex")
## ---- echo = FALSE------------------------------------------------------------
got_evd <- requireNamespace("evd", quietly = TRUE)
## ---- eval = got_evd----------------------------------------------------------
library(evd, quietly = TRUE)
fgev_fit <- fgev(ow$temp, nsloc = ow[, "loc"])
adj_fgev_fit <- alogLik(fgev_fit, cluster = ow$year)
summary(adj_fgev_fit)
## ---- echo = FALSE, eval = got_evd--------------------------------------------
detach("package:evd")
## ---- echo = FALSE, message = FALSE, warning = FALSE--------------------------
got_extRemes <- requireNamespace("extRemes", quietly = TRUE)
## ---- eval = got_extRemes, message = FALSE, warning = FALSE-------------------
library(extRemes, quietly = TRUE)
fevd_fit <- fevd(temp, ow, location.fun = ~ ow$loc, scale.fun = ~ ow$loc,
shape.fun = ~ ow$loc)
adj_fevd_fit <- alogLik(fevd_fit, cluster = ow$year)
summary(adj_fevd_fit)
## ---- echo = FALSE, eval = got_extRemes---------------------------------------
detach("package:extRemes")
## ---- echo = FALSE, message = FALSE-------------------------------------------
got_eva <- requireNamespace("eva", quietly = TRUE)
## ---- eval = got_extRemes, message = FALSE, warning = FALSE-------------------
library(eva, quietly = TRUE)
gevr_fit <- gevrFit(ow$temp, information = "observed",
locvars = ow, locform = ~ ow$loc,
scalevars = ow, scaleform = ~ ow$loc,
shapevars = ow, shapeform = ~ ow$loc)
adj_gevr_fit <- alogLik(gevr_fit, cluster = ow$year)
summary(adj_gevr_fit)
## ---- echo = FALSE, eval = got_eva--------------------------------------------
detach("package:eva")
## ---- echo = FALSE------------------------------------------------------------
got_evir <- requireNamespace("evir", quietly = TRUE)
## ---- eval = got_evir, message = FALSE----------------------------------------
library(evir, quietly = TRUE)
gev_fit <- gev(ow$temp)
adj_gev_fit <- alogLik(gev_fit)
summary(adj_gev_fit)
## ---- echo = FALSE, eval = got_evir-------------------------------------------
detach("package:evir")
## ---- echo = FALSE------------------------------------------------------------
got_fExtremes <- requireNamespace("fExtremes", quietly = TRUE)
## ---- eval = got_fExtremes----------------------------------------------------
library(fExtremes, quietly = TRUE)
gevFit_fit <- gevFit(ow$temp)
adj_gevFit_fit <- alogLik(gevFit_fit)
summary(adj_gevFit_fit)
## ---- echo = FALSE, eval = got_fExtremes--------------------------------------
detach("package:fExtremes")
## ---- echo = FALSE, message = FALSE-------------------------------------------
got_mev <- requireNamespace("mev", quietly = TRUE)
## ---- eval = got_mev----------------------------------------------------------
library(mev, quietly = TRUE)
gfit <- fit.gev(ow$temp)
adj_gfit <- alogLik(gfit)
summary(adj_gfit)
## ---- echo = FALSE, eval = got_mev--------------------------------------------
detach("package:mev")
## ---- echo = FALSE, message = FALSE-------------------------------------------
got_POT <- requireNamespace("POT", quietly = TRUE)
## ---- eval = got_POT----------------------------------------------------------
library(POT, quietly = TRUE)
set.seed(24082019)
x <- POT::rgpd(200, 1, 2, 0.25)
fit <- fitgpd(x, 1, "mle")
adj_fit <- alogLik(fit)
summary(adj_fit)
| /inst/doc/lax-vignette.R | no_license | cran/lax | R | false | false | 5,456 | r | ## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(comment = "#>", collapse = TRUE)
## -----------------------------------------------------------------------------
library(lax)
# Column 4 of ow contains 1 for Oxford and -1 for Worthing
large <- gev_refit(ow$temp, ow, mul = 4, sigl = 4, shl = 4, show = FALSE,
method = "BFGS")
# Adjust the loglikelihood and standard errors
adj_large <- alogLik(large, cluster = ow$year, cadjust = FALSE)
# MLEs, SEs and adjusted SEs
t(summary(adj_large))
## -----------------------------------------------------------------------------
confint(adj_large)
confint(adj_large, type = "none")
## ---- fig.align='center', fig.width=7, fig.height=7---------------------------
library(chandwich)
which_pars <- c("scale", "shape")
gev_none <- conf_region(adj_large, which_pars = which_pars, type = "none")
gev_vertical <- conf_region(adj_large, which_pars = which_pars)
plot(gev_none, gev_vertical, lwd = 2, xlim = c(3.1, 4.5), ylim = c(-0.35, -0.05),
xlab = expression(sigma[0]), ylab = expression(xi[0]))
## -----------------------------------------------------------------------------
small <- gev_refit(ow$temp, ow, mul = 4, sigl = 4, show = FALSE,
method = "BFGS")
adj_small <- alogLik(small, cluster = ow$year, cadjust = FALSE)
summary(adj_small)
anova(adj_large, adj_small)
anova(adj_large, adj_small, type = "none")
## ---- echo = FALSE------------------------------------------------------------
got_texmex <- requireNamespace("texmex", quietly = TRUE)
## ---- eval = got_texmex-------------------------------------------------------
library(texmex, quietly = TRUE)
# Note: phi = log(scale)
evm_fit <- evm(temp, ow, gev, mu = ~ loc, phi = ~ loc, xi = ~loc)
adj_evm_fit <- alogLik(evm_fit, cluster = ow$year)
summary(adj_evm_fit)
## ---- echo = FALSE, eval = got_texmex-----------------------------------------
detach("package:texmex")
## ---- echo = FALSE------------------------------------------------------------
got_evd <- requireNamespace("evd", quietly = TRUE)
## ---- eval = got_evd----------------------------------------------------------
library(evd, quietly = TRUE)
fgev_fit <- fgev(ow$temp, nsloc = ow[, "loc"])
adj_fgev_fit <- alogLik(fgev_fit, cluster = ow$year)
summary(adj_fgev_fit)
## ---- echo = FALSE, eval = got_evd--------------------------------------------
detach("package:evd")
## ---- echo = FALSE, message = FALSE, warning = FALSE--------------------------
got_extRemes <- requireNamespace("extRemes", quietly = TRUE)
## ---- eval = got_extRemes, message = FALSE, warning = FALSE-------------------
library(extRemes, quietly = TRUE)
fevd_fit <- fevd(temp, ow, location.fun = ~ ow$loc, scale.fun = ~ ow$loc,
shape.fun = ~ ow$loc)
adj_fevd_fit <- alogLik(fevd_fit, cluster = ow$year)
summary(adj_fevd_fit)
## ---- echo = FALSE, eval = got_extRemes---------------------------------------
detach("package:extRemes")
## ---- echo = FALSE, message = FALSE-------------------------------------------
got_eva <- requireNamespace("eva", quietly = TRUE)
## ---- eval = got_extRemes, message = FALSE, warning = FALSE-------------------
library(eva, quietly = TRUE)
gevr_fit <- gevrFit(ow$temp, information = "observed",
locvars = ow, locform = ~ ow$loc,
scalevars = ow, scaleform = ~ ow$loc,
shapevars = ow, shapeform = ~ ow$loc)
adj_gevr_fit <- alogLik(gevr_fit, cluster = ow$year)
summary(adj_gevr_fit)
## ---- echo = FALSE, eval = got_eva--------------------------------------------
detach("package:eva")
## ---- echo = FALSE------------------------------------------------------------
got_evir <- requireNamespace("evir", quietly = TRUE)
## ---- eval = got_evir, message = FALSE----------------------------------------
library(evir, quietly = TRUE)
gev_fit <- gev(ow$temp)
adj_gev_fit <- alogLik(gev_fit)
summary(adj_gev_fit)
## ---- echo = FALSE, eval = got_evir-------------------------------------------
detach("package:evir")
## ---- echo = FALSE------------------------------------------------------------
got_fExtremes <- requireNamespace("fExtremes", quietly = TRUE)
## ---- eval = got_fExtremes----------------------------------------------------
library(fExtremes, quietly = TRUE)
gevFit_fit <- gevFit(ow$temp)
adj_gevFit_fit <- alogLik(gevFit_fit)
summary(adj_gevFit_fit)
## ---- echo = FALSE, eval = got_fExtremes--------------------------------------
detach("package:fExtremes")
## ---- echo = FALSE, message = FALSE-------------------------------------------
got_mev <- requireNamespace("mev", quietly = TRUE)
## ---- eval = got_mev----------------------------------------------------------
library(mev, quietly = TRUE)
gfit <- fit.gev(ow$temp)
adj_gfit <- alogLik(gfit)
summary(adj_gfit)
## ---- echo = FALSE, eval = got_mev--------------------------------------------
detach("package:mev")
## ---- echo = FALSE, message = FALSE-------------------------------------------
got_POT <- requireNamespace("POT", quietly = TRUE)
## ---- eval = got_POT----------------------------------------------------------
library(POT, quietly = TRUE)
set.seed(24082019)
x <- POT::rgpd(200, 1, 2, 0.25)
fit <- fitgpd(x, 1, "mle")
adj_fit <- alogLik(fit)
summary(adj_fit)
|
calc_statistic <- function(x) {
return(mean(x))
}
mu_star <- mean(X)
mu_star_star <- replicate(10000, calc_statistic(sample(X, replace = TRUE)))
U <- quantile(mu_star - mu_star_star, 0.975)
L <- quantile(mu_star - mu_star_star, 0.025)
CI <- c(mu_star + L, mu_star + U)
| /Tree Based Methods/bootstrap_ci.R | permissive | shilpakancharla/machine-learning-tutorials | R | false | false | 275 | r | calc_statistic <- function(x) {
return(mean(x))
}
mu_star <- mean(X)
mu_star_star <- replicate(10000, calc_statistic(sample(X, replace = TRUE)))
U <- quantile(mu_star - mu_star_star, 0.975)
L <- quantile(mu_star - mu_star_star, 0.025)
CI <- c(mu_star + L, mu_star + U)
|
#A data sample is called qualitative also known as categorical,
#if its values belong to a collection of known defined non-overlapping classes.
#common ex. include student letter grade(A ,B,C, D, E and F)
#commercial bond rating (AAA,AAB........)and consumer clothing shoe sizes(1,2,3........).
iris
x=5
irisdata =iris
firstcolumn= irisdata$Sepal.Length
summary(firstcolumn)
boxplot(firstcolumn)
hist(firstcolumn)
#Data is loaded
iris
dim(irisdata)
#variable names or colum names
names(irisdata)
#structure (How your data looks like)
str(iris)
#Attributes
iris[1:15,]
iris[50:59,]
iris[149:150,]
iris[,4]
iris[,5]
iris[1:3]
#Get sepal.length of the first 10 rows
iris[1:10, "Sepal.Length"]
iris[1:10 ,3]
iris[1:10, iris$Petal.Width]
#the same as above
iris$Sepal.Length[1:10]
#distribution of every variable
summary(iris)
#frequency
table(iris$Species)
#pie chart
pie(table(iris$Species))
#variance of Sepal.Length
var(iris$Sepal.Length)
#covariance of two variables
cov(iris$Sepal.Length,iris$Petal.Length)
#correlation of two variables
cor(iris$Sepal.Length, iris$Petal.Length)
#hist
hist(iris$Sepal.Length)
#density
plot(density(iris$Sepal.Length))
#scatter plot
plot(iris$Sepal.Length, iris$Sepal.Length)
#pair plot
plot(iris)
pairs(iris)
pairs(iris[1:4], pch=21, bg=c("red","green3","blue")[unclass(iris$Species)])
| /learning to deal with data II.R | no_license | Githubarifkhan/cleaning-data-with-R | R | false | false | 1,412 | r | #A data sample is called qualitative also known as categorical,
#if its values belong to a collection of known defined non-overlapping classes.
#common ex. include student letter grade(A ,B,C, D, E and F)
#commercial bond rating (AAA,AAB........)and consumer clothing shoe sizes(1,2,3........).
iris
x=5
irisdata =iris
firstcolumn= irisdata$Sepal.Length
summary(firstcolumn)
boxplot(firstcolumn)
hist(firstcolumn)
#Data is loaded
iris
dim(irisdata)
#variable names or colum names
names(irisdata)
#structure (How your data looks like)
str(iris)
#Attributes
iris[1:15,]
iris[50:59,]
iris[149:150,]
iris[,4]
iris[,5]
iris[1:3]
#Get sepal.length of the first 10 rows
iris[1:10, "Sepal.Length"]
iris[1:10 ,3]
iris[1:10, iris$Petal.Width]
#the same as above
iris$Sepal.Length[1:10]
#distribution of every variable
summary(iris)
#frequency
table(iris$Species)
#pie chart
pie(table(iris$Species))
#variance of Sepal.Length
var(iris$Sepal.Length)
#covariance of two variables
cov(iris$Sepal.Length,iris$Petal.Length)
#correlation of two variables
cor(iris$Sepal.Length, iris$Petal.Length)
#hist
hist(iris$Sepal.Length)
#density
plot(density(iris$Sepal.Length))
#scatter plot
plot(iris$Sepal.Length, iris$Sepal.Length)
#pair plot
plot(iris)
pairs(iris)
pairs(iris[1:4], pch=21, bg=c("red","green3","blue")[unclass(iris$Species)])
|
testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.82434500994586e-304, -1.65791256519293e+82, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142))
result <- do.call(dcurver:::ddc,testlist)
str(result) | /dcurver/inst/testfiles/ddc/AFL_ddc/ddc_valgrind_files/1609868004-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 831 | r | testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.82434500994586e-304, -1.65791256519293e+82, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142))
result <- do.call(dcurver:::ddc,testlist)
str(result) |
#' Derive BASETYPE Variable
#'
#' Adds the `BASETYPE` variable to a dataset and duplicates records based upon
#' the provided conditions
#'
#' @param dataset Input dataset
#'
#' The columns specified in the expressions inside `basetypes` are required.
#'
#' @param basetypes A *named* list of expressions created using `exprs()`
#'
#' The names corresponds to the values of the newly created `BASETYPE` variables
#' and the expressions are used to subset the input dataset.
#'
#' @details
#' For each element of `basetypes` the input dataset is subset based upon
#' the provided expression and the `BASETYPE` variable is set to the name of the
#' expression. Then, all subsets are stacked. Records which do not match any
#' condition are kept and `BASETYPE` is set to `NA`.
#'
#' @author Thomas Neitmann
#'
#' @keywords bds derivation
#'
#' @export
#'
#' @examples
#' bds <- tibble::tribble(
#' ~USUBJID, ~EPOCH, ~PARAMCD, ~ASEQ, ~AVAL,
#' "P01", "RUN-IN", "PARAM01", 1, 10,
#' "P01", "RUN-IN", "PARAM01", 2, 9.8,
#' "P01", "DOUBLE-BLIND", "PARAM01", 3, 9.2,
#' "P01", "DOUBLE-BLIND", "PARAM01", 4, 10.1,
#' "P01", "OPEN-LABEL", "PARAM01", 5, 10.4,
#' "P01", "OPEN-LABEL", "PARAM01", 6, 9.9,
#' "P02", "RUN-IN", "PARAM01", 1, 12.1,
#' "P02", "DOUBLE-BLIND", "PARAM01", 2, 10.2,
#' "P02", "DOUBLE-BLIND", "PARAM01", 3, 10.8,
#' "P02", "OPEN-LABEL", "PARAM01", 4, 11.4,
#' "P02", "OPEN-LABEL", "PARAM01", 5, 10.8
#' )
#'
#' derive_var_basetype(
#' dataset = bds,
#' basetypes = exprs(
#' "RUN-IN" = EPOCH %in% c("RUN-IN", "STABILIZATION", "DOUBLE-BLIND", "OPEN-LABEL"),
#' "DOUBLE-BLIND" = EPOCH %in% c("DOUBLE-BLIND", "OPEN-LABEL"),
#' "OPEN-LABEL" = EPOCH == "OPEN-LABEL"
#' )
#' )
derive_var_basetype <- function(dataset, basetypes) {
assert_data_frame(dataset)
assert_named_exprs(basetypes)
records_with_basetype <- map2(names(basetypes), basetypes, function(label, condition) {
dataset %>%
filter(!!condition) %>%
mutate(BASETYPE = label)
}) %>%
bind_rows()
complementary_condition <- Reduce(function(x, y) bquote(.(x) | .(y)), basetypes)
records_without_basetype <- filter(dataset, !(!!complementary_condition))
bind_rows(records_without_basetype, records_with_basetype)
}
| /R/derive_var_basetype.R | no_license | rajkboddu/admiral | R | false | false | 2,377 | r | #' Derive BASETYPE Variable
#'
#' Adds the `BASETYPE` variable to a dataset and duplicates records based upon
#' the provided conditions
#'
#' @param dataset Input dataset
#'
#' The columns specified in the expressions inside `basetypes` are required.
#'
#' @param basetypes A *named* list of expressions created using `exprs()`
#'
#' The names corresponds to the values of the newly created `BASETYPE` variables
#' and the expressions are used to subset the input dataset.
#'
#' @details
#' For each element of `basetypes` the input dataset is subset based upon
#' the provided expression and the `BASETYPE` variable is set to the name of the
#' expression. Then, all subsets are stacked. Records which do not match any
#' condition are kept and `BASETYPE` is set to `NA`.
#'
#' @author Thomas Neitmann
#'
#' @keywords bds derivation
#'
#' @export
#'
#' @examples
#' bds <- tibble::tribble(
#' ~USUBJID, ~EPOCH, ~PARAMCD, ~ASEQ, ~AVAL,
#' "P01", "RUN-IN", "PARAM01", 1, 10,
#' "P01", "RUN-IN", "PARAM01", 2, 9.8,
#' "P01", "DOUBLE-BLIND", "PARAM01", 3, 9.2,
#' "P01", "DOUBLE-BLIND", "PARAM01", 4, 10.1,
#' "P01", "OPEN-LABEL", "PARAM01", 5, 10.4,
#' "P01", "OPEN-LABEL", "PARAM01", 6, 9.9,
#' "P02", "RUN-IN", "PARAM01", 1, 12.1,
#' "P02", "DOUBLE-BLIND", "PARAM01", 2, 10.2,
#' "P02", "DOUBLE-BLIND", "PARAM01", 3, 10.8,
#' "P02", "OPEN-LABEL", "PARAM01", 4, 11.4,
#' "P02", "OPEN-LABEL", "PARAM01", 5, 10.8
#' )
#'
#' derive_var_basetype(
#' dataset = bds,
#' basetypes = exprs(
#' "RUN-IN" = EPOCH %in% c("RUN-IN", "STABILIZATION", "DOUBLE-BLIND", "OPEN-LABEL"),
#' "DOUBLE-BLIND" = EPOCH %in% c("DOUBLE-BLIND", "OPEN-LABEL"),
#' "OPEN-LABEL" = EPOCH == "OPEN-LABEL"
#' )
#' )
derive_var_basetype <- function(dataset, basetypes) {
assert_data_frame(dataset)
assert_named_exprs(basetypes)
records_with_basetype <- map2(names(basetypes), basetypes, function(label, condition) {
dataset %>%
filter(!!condition) %>%
mutate(BASETYPE = label)
}) %>%
bind_rows()
complementary_condition <- Reduce(function(x, y) bquote(.(x) | .(y)), basetypes)
records_without_basetype <- filter(dataset, !(!!complementary_condition))
bind_rows(records_without_basetype, records_with_basetype)
}
|
#' @include AllClasses.R
.parameterizeGammaByMeanSd <- function(mn, sd){
rate <- mn/sd^2
shape <- mn^2/sd^2
setNames(c(shape, rate), c("shape", "rate"))
}
qInverseTau2 <- function(eta.0=1800, m2.0=100, mn, sd){
if(!missing(mn) && !missing(sd)){
params <- .parameterizeGammaByMeanSd(mn, sd)
a <- params[["shape"]]
b <- params[["rate"]]
eta.0 <- 2*a
m2.0 <- b/a
}
shape <- 0.5*eta.0
rate <- 0.5*eta.0*m2.0
mn <- shape/rate
sd <- sqrt(shape/rate^2)
x <- qgamma(seq(0, 1-0.001, 0.001), shape=0.5*eta.0, rate=0.5*eta.0*m2.0)
x <- x[is.finite(x) & x > 0]
list(quantiles=x, eta.0=eta.0, m2.0=m2.0, mean=mn, sd=sd)
}
#' Create an object of class 'HyperparametersMultiBatch' for the
#' batch mixture model
#'
#' @param k length-one integer vector specifying number of components
#' (typically 1 <= k <= 4)
#' @param mu.0 length-one numeric vector of the of the normal prior
#' for the component means.
#' @param tau2.0 length-one numeric vector of the variance for the normal
#' prior of the component means
#' @param eta.0 length-one numeric vector of the shape parameter for
#' the Inverse Gamma prior of the component variances, tau2_h. The
#' shape parameter is parameterized as 1/2 * eta.0. In the batch
#' model, tau2_h describes the inter-batch heterogeneity of means for
#' component h.
#' @param m2.0 length-one numeric vector of the rate parameter for the
#' Inverse Gamma prior of the component variances, tau2_h. The rate
#' parameter is parameterized as 1/2 * eta.0 * m2.0. In the batch
#' model, tau2_h describes the inter-batch heterogeneity of means for
#' component h.
#' @param alpha length-k numeric vector of the shape parameters for
#' the dirichlet prior on the mixture probabilities
#' @param beta length-one numeric vector for the parameter of the
#' geometric prior for nu.0 (nu.0 is the shape parameter of the
#' Inverse Gamma sampling distribution for the component-specific
#' variances. Together, nu.0 and sigma2.0 model inter-component
#' heterogeneity in variances.). beta is a probability and must be
#' in the interval [0,1].
#' @param a length-one numeric vector of the shape parameter for the
#' Gamma prior used for sigma2.0 (sigma2.0 is the shape parameter of
#' the Inverse Gamma sampling distribution for the component-specific
#' variances).
#' @param b a length-one numeric vector of the rate parameter for the
#' Gamma prior used for sigma2.0 (sigma2.0 is the rate parameter of
#' the Inverse Gamma sampling distribution for the component-specific
#' variances)
#' @param dfr length-one numeric vector for t-distribution degrees of freedom
#' @return An object of class HyperparametersBatch
HyperparametersMultiBatch <- function(k=3L,
mu.0=0,
tau2.0=0.4,
eta.0=32,
m2.0=0.5,
alpha,
beta=0.1, ## mean is 1/10
a=1.8,
b=6,
dfr=100){
if(missing(alpha)) alpha <- rep(1, k)
new("HyperparametersMultiBatch",
k=as.integer(k),
mu.0=mu.0,
tau2.0=tau2.0,
eta.0=eta.0,
m2.0=m2.0,
alpha=alpha,
beta=beta,
a=a,
b=b,
dfr=dfr)
}
#' Create an object of class 'HyperparametersSingleBatch' for the
#' single batch mixture model
#'
#' @param k length-one integer vector specifying number of components
#' (typically 1 <= k <= 4)
#' @param mu.0 length-one numeric vector of the mean for the normal
#' prior of the component means
#' @param tau2.0 length-one numeric vector of the variance for the normal
#' prior of the component means
#' @param eta.0 length-one numeric vector of the shape parameter for
#' the Inverse Gamma prior of the component variances. The shape
#' parameter is parameterized as 1/2 * eta.0.
#' @param m2.0 length-one numeric vector of the rate parameter for
#' the Inverse Gamma prior of the component variances. The rate
#' parameter is parameterized as 1/2 * eta.0 * m2.0.
#' @param alpha length-k numeric vector of the shape parameters for
#' the dirichlet prior on the mixture probabilities
#' @param beta length-one numeric vector for the parameter of the
#' geometric prior for nu.0 (nu.0 is the shape parameter of the
#' Inverse Gamma sampling distribution for the component-specific
#' variances). beta is a probability and must be in the interval
#' [0,1].
#' @param a length-one numeric vector of the shape parameter for the
#' Gamma prior used for sigma2.0 (sigma2.0 is the shape parameter of
#' the Inverse Gamma sampling distribution for the component-specific
#' variances)
#' @param b a length-one numeric vector of the rate parameter for the
#' Gamma prior used for sigma2.0 (sigma2.0 is the rate parameter of
#' the Inverse Gamma sampling distribution for the component-specific
#' variances)
#' @param dfr length-one numeric vector for t-distribution degrees of freedom
#'
#' @return An object of class HyperparametersSingleBatch
HyperparametersSingleBatch <- function(k=0L,
mu.0=0,
tau2.0=0.4,
eta.0=32,
m2.0=0.5,
alpha,
beta=0.1, ## mean is 1/10
a=1.8,
b=6,
dfr=100){
if(missing(alpha)) alpha <- rep(1, k)
##if(missing(tau2)) tau2 <- rep(1, k)
new("HyperparametersSingleBatch",
k=as.integer(k),
mu.0=mu.0,
tau2.0=tau2.0,
eta.0=eta.0,
m2.0=m2.0,
alpha=alpha,
beta=beta,
a=a,
b=b,
dfr=dfr)
}
setValidity("Hyperparameters", function(object){
msg <- TRUE
if(k(object) != alpha(object)){
msg <- "alpha vector must be the same length as k"
return(msg)
}
msg
})
#' Create an object of class 'Hyperparameters'
#'
#' @param type specifies 'marginal' or 'batch'
#' @param k number of components
#' @param ... additional arguments are names of the slots of the Hyperparameters class, including `mu.0`, `tau2.0`, `eta.0`, `m2.0`, `alpha`, `beta`, `a`, `b`, and `dfr`).
#' @return An object of class HyperparametersMarginal or HyperparametersBatch
#'
#' @details
#' Additional hyperparameters can be passed to the HyperparametersMarginal and HyperparametersBatch models.
#'
#' @rdname Hyperparameters
#' @export
Hyperparameters <- function(type="batch", k=2L, ...){
if(type=="marginal") return(HyperparametersSingleBatch(k, ...))
if(type=="batch") return(HyperparametersMultiBatch(k, ...))
##if(type=="trios") return(HyperparametersTrios(k, ...))
}
#' Accessor for number of mixture components
#'
#' @rdname k-method
#' @aliases k<-,Hyperparameters-method
#' @param object A Hyperparameters instance
#' @param value An integer specifying the number of mixture components for the mixture model
setReplaceMethod("k", "Hyperparameters", function(object, value){
object@k <- as.integer(value)
object@alpha <- rep(1L, value)
object
})
## #' @rdname k-method
## #' @aliases k<-,HyperparametersTrios-method
## setReplaceMethod("k", "HyperparametersTrios", function(object, value){
## object@k <- as.integer(value)
## object@alpha <- rep(1L, value)
## object
## })
setValidity("Hyperparameters", function(object){
msg <- NULL
if(length(alpha(object)) != k(object)){
msg <- "alpha should be a numeric vector with length equal to the number of components"
return(msg)
}
})
#' @rdname k-method
#' @export
setMethod("k", "Hyperparameters", function(object) object@k)
setMethod("alpha", "Hyperparameters", function(object) object@alpha)
## beta is a base function
betas <- function(object) object@beta
a <- function(object) object@a
b <- function(object) object@b
setReplaceMethod("alpha", "Hyperparameters", function(object, value){
object@alpha <- value
object
})
setMethod("show", "Hyperparameters", function(object){
cat("An object of class 'Hyperparameters'\n")
cat(" k :", k(object), "\n")
cat(" mu.0 :", mu.0(object), "\n")
cat(" tau2.0 :", tau2.0(object), "\n")
cat(" eta.0 :", eta.0(object), "\n")
cat(" m2.0 :", round(m2.0(object), 3), "\n")
cat(" alpha :", alpha(object), "\n")
cat(" beta :", betas(object), "\n")
cat(" a :", a(object), "\n")
cat(" b :", b(object), "\n")
})
# Create a list of hyperparameter objects for each of the four mixture model implmentations
hpList <- function(...){
sbp <- sb <- Hyperparameters(...)
mb <- mbp <- HyperparametersMultiBatch(...)
##tbm <- HyperparametersTrios(...)
list(SB=sb,
MB=mb,
SBP=sbp,
MBP=mbp)
}
setMethod("dfr", "Hyperparameters", function(object) object@dfr)
setReplaceMethod("dfr", c("Hyperparameters", "numeric"),
function(object, value){
object@dfr <- value
object
})
| /R/methods-Hyperparameters.R | no_license | aditharun/CNPBayes | R | false | false | 9,150 | r | #' @include AllClasses.R
.parameterizeGammaByMeanSd <- function(mn, sd){
rate <- mn/sd^2
shape <- mn^2/sd^2
setNames(c(shape, rate), c("shape", "rate"))
}
qInverseTau2 <- function(eta.0=1800, m2.0=100, mn, sd){
if(!missing(mn) && !missing(sd)){
params <- .parameterizeGammaByMeanSd(mn, sd)
a <- params[["shape"]]
b <- params[["rate"]]
eta.0 <- 2*a
m2.0 <- b/a
}
shape <- 0.5*eta.0
rate <- 0.5*eta.0*m2.0
mn <- shape/rate
sd <- sqrt(shape/rate^2)
x <- qgamma(seq(0, 1-0.001, 0.001), shape=0.5*eta.0, rate=0.5*eta.0*m2.0)
x <- x[is.finite(x) & x > 0]
list(quantiles=x, eta.0=eta.0, m2.0=m2.0, mean=mn, sd=sd)
}
#' Create an object of class 'HyperparametersMultiBatch' for the
#' batch mixture model
#'
#' @param k length-one integer vector specifying number of components
#' (typically 1 <= k <= 4)
#' @param mu.0 length-one numeric vector of the of the normal prior
#' for the component means.
#' @param tau2.0 length-one numeric vector of the variance for the normal
#' prior of the component means
#' @param eta.0 length-one numeric vector of the shape parameter for
#' the Inverse Gamma prior of the component variances, tau2_h. The
#' shape parameter is parameterized as 1/2 * eta.0. In the batch
#' model, tau2_h describes the inter-batch heterogeneity of means for
#' component h.
#' @param m2.0 length-one numeric vector of the rate parameter for the
#' Inverse Gamma prior of the component variances, tau2_h. The rate
#' parameter is parameterized as 1/2 * eta.0 * m2.0. In the batch
#' model, tau2_h describes the inter-batch heterogeneity of means for
#' component h.
#' @param alpha length-k numeric vector of the shape parameters for
#' the dirichlet prior on the mixture probabilities
#' @param beta length-one numeric vector for the parameter of the
#' geometric prior for nu.0 (nu.0 is the shape parameter of the
#' Inverse Gamma sampling distribution for the component-specific
#' variances. Together, nu.0 and sigma2.0 model inter-component
#' heterogeneity in variances.). beta is a probability and must be
#' in the interval [0,1].
#' @param a length-one numeric vector of the shape parameter for the
#' Gamma prior used for sigma2.0 (sigma2.0 is the shape parameter of
#' the Inverse Gamma sampling distribution for the component-specific
#' variances).
#' @param b a length-one numeric vector of the rate parameter for the
#' Gamma prior used for sigma2.0 (sigma2.0 is the rate parameter of
#' the Inverse Gamma sampling distribution for the component-specific
#' variances)
#' @param dfr length-one numeric vector for t-distribution degrees of freedom
#' @return An object of class HyperparametersBatch
HyperparametersMultiBatch <- function(k=3L,
mu.0=0,
tau2.0=0.4,
eta.0=32,
m2.0=0.5,
alpha,
beta=0.1, ## mean is 1/10
a=1.8,
b=6,
dfr=100){
if(missing(alpha)) alpha <- rep(1, k)
new("HyperparametersMultiBatch",
k=as.integer(k),
mu.0=mu.0,
tau2.0=tau2.0,
eta.0=eta.0,
m2.0=m2.0,
alpha=alpha,
beta=beta,
a=a,
b=b,
dfr=dfr)
}
#' Create an object of class 'HyperparametersSingleBatch' for the
#' single batch mixture model
#'
#' @param k length-one integer vector specifying number of components
#' (typically 1 <= k <= 4)
#' @param mu.0 length-one numeric vector of the mean for the normal
#' prior of the component means
#' @param tau2.0 length-one numeric vector of the variance for the normal
#' prior of the component means
#' @param eta.0 length-one numeric vector of the shape parameter for
#' the Inverse Gamma prior of the component variances. The shape
#' parameter is parameterized as 1/2 * eta.0.
#' @param m2.0 length-one numeric vector of the rate parameter for
#' the Inverse Gamma prior of the component variances. The rate
#' parameter is parameterized as 1/2 * eta.0 * m2.0.
#' @param alpha length-k numeric vector of the shape parameters for
#' the dirichlet prior on the mixture probabilities
#' @param beta length-one numeric vector for the parameter of the
#' geometric prior for nu.0 (nu.0 is the shape parameter of the
#' Inverse Gamma sampling distribution for the component-specific
#' variances). beta is a probability and must be in the interval
#' [0,1].
#' @param a length-one numeric vector of the shape parameter for the
#' Gamma prior used for sigma2.0 (sigma2.0 is the shape parameter of
#' the Inverse Gamma sampling distribution for the component-specific
#' variances)
#' @param b a length-one numeric vector of the rate parameter for the
#' Gamma prior used for sigma2.0 (sigma2.0 is the rate parameter of
#' the Inverse Gamma sampling distribution for the component-specific
#' variances)
#' @param dfr length-one numeric vector for t-distribution degrees of freedom
#'
#' @return An object of class HyperparametersSingleBatch
HyperparametersSingleBatch <- function(k=0L,
mu.0=0,
tau2.0=0.4,
eta.0=32,
m2.0=0.5,
alpha,
beta=0.1, ## mean is 1/10
a=1.8,
b=6,
dfr=100){
if(missing(alpha)) alpha <- rep(1, k)
##if(missing(tau2)) tau2 <- rep(1, k)
new("HyperparametersSingleBatch",
k=as.integer(k),
mu.0=mu.0,
tau2.0=tau2.0,
eta.0=eta.0,
m2.0=m2.0,
alpha=alpha,
beta=beta,
a=a,
b=b,
dfr=dfr)
}
setValidity("Hyperparameters", function(object){
msg <- TRUE
if(k(object) != alpha(object)){
msg <- "alpha vector must be the same length as k"
return(msg)
}
msg
})
#' Create an object of class 'Hyperparameters'
#'
#' @param type specifies 'marginal' or 'batch'
#' @param k number of components
#' @param ... additional arguments are names of the slots of the Hyperparameters class, including `mu.0`, `tau2.0`, `eta.0`, `m2.0`, `alpha`, `beta`, `a`, `b`, and `dfr`).
#' @return An object of class HyperparametersMarginal or HyperparametersBatch
#'
#' @details
#' Additional hyperparameters can be passed to the HyperparametersMarginal and HyperparametersBatch models.
#'
#' @rdname Hyperparameters
#' @export
Hyperparameters <- function(type="batch", k=2L, ...){
if(type=="marginal") return(HyperparametersSingleBatch(k, ...))
if(type=="batch") return(HyperparametersMultiBatch(k, ...))
##if(type=="trios") return(HyperparametersTrios(k, ...))
}
#' Accessor for number of mixture components
#'
#' @rdname k-method
#' @aliases k<-,Hyperparameters-method
#' @param object A Hyperparameters instance
#' @param value An integer specifying the number of mixture components for the mixture model
setReplaceMethod("k", "Hyperparameters", function(object, value){
object@k <- as.integer(value)
object@alpha <- rep(1L, value)
object
})
## #' @rdname k-method
## #' @aliases k<-,HyperparametersTrios-method
## setReplaceMethod("k", "HyperparametersTrios", function(object, value){
## object@k <- as.integer(value)
## object@alpha <- rep(1L, value)
## object
## })
setValidity("Hyperparameters", function(object){
msg <- NULL
if(length(alpha(object)) != k(object)){
msg <- "alpha should be a numeric vector with length equal to the number of components"
return(msg)
}
})
#' @rdname k-method
#' @export
setMethod("k", "Hyperparameters", function(object) object@k)
setMethod("alpha", "Hyperparameters", function(object) object@alpha)
## beta is a base function
betas <- function(object) object@beta
a <- function(object) object@a
b <- function(object) object@b
setReplaceMethod("alpha", "Hyperparameters", function(object, value){
object@alpha <- value
object
})
setMethod("show", "Hyperparameters", function(object){
cat("An object of class 'Hyperparameters'\n")
cat(" k :", k(object), "\n")
cat(" mu.0 :", mu.0(object), "\n")
cat(" tau2.0 :", tau2.0(object), "\n")
cat(" eta.0 :", eta.0(object), "\n")
cat(" m2.0 :", round(m2.0(object), 3), "\n")
cat(" alpha :", alpha(object), "\n")
cat(" beta :", betas(object), "\n")
cat(" a :", a(object), "\n")
cat(" b :", b(object), "\n")
})
# Create a list of hyperparameter objects for each of the four mixture model implmentations
hpList <- function(...){
sbp <- sb <- Hyperparameters(...)
mb <- mbp <- HyperparametersMultiBatch(...)
##tbm <- HyperparametersTrios(...)
list(SB=sb,
MB=mb,
SBP=sbp,
MBP=mbp)
}
setMethod("dfr", "Hyperparameters", function(object) object@dfr)
setReplaceMethod("dfr", c("Hyperparameters", "numeric"),
function(object, value){
object@dfr <- value
object
})
|
library(ggplot2)
sccPM25 <- readRDS("summarySCC_PM25.rds")
scc <- readRDS("Source_Classification_Code.rds")
baltimore <- sccPM25[sccPM25$fips=="24510",]
losAngeles <- sccPM25[sccPM25$fips=="06037",]
motorVeh <- character()
for(i in 1:length(scc$SCC)) {
if(length(grep("Mobile", as.character(scc$EI.Sector[i]), fixed=T)) > 0) {
motorVeh <- c(motorVeh, as.character(scc$SCC[i]))
}
}
data <- rbind(baltimore, losAngeles)
data <- data[data$SCC %in% motorVeh,]
png(file="question6.png")
qplot(year, Emissions, data=data, geom=c("point", "smooth"),
method="lm", main="LA v. Baltimore", facets=.~fips)
dev.off() | /04 - Exploratory Data Analysis/Week 3/Project/question6.R | permissive | PhillipChaffee/datasciencecoursera | R | false | false | 645 | r | library(ggplot2)
sccPM25 <- readRDS("summarySCC_PM25.rds")
scc <- readRDS("Source_Classification_Code.rds")
baltimore <- sccPM25[sccPM25$fips=="24510",]
losAngeles <- sccPM25[sccPM25$fips=="06037",]
motorVeh <- character()
for(i in 1:length(scc$SCC)) {
if(length(grep("Mobile", as.character(scc$EI.Sector[i]), fixed=T)) > 0) {
motorVeh <- c(motorVeh, as.character(scc$SCC[i]))
}
}
data <- rbind(baltimore, losAngeles)
data <- data[data$SCC %in% motorVeh,]
png(file="question6.png")
qplot(year, Emissions, data=data, geom=c("point", "smooth"),
method="lm", main="LA v. Baltimore", facets=.~fips)
dev.off() |
##########################################R 프로그래밍 기초 ####################################################
############# 1. R 기초
# R 둘러보기 ###
###### 작업 공간 할당하기 ###
getwd()
dir.create("d:/r_class")
setwd("d:/r_class")
getwd()
#### 커맨드 입력하기 ######
1+1
# 불완전한 커맨드 입력
max(4,6,8), max(4,6,
)
# 여러줄을 한꺼번에 실행
a<-1+1; b<-a/2
# 스크립트 활용하기 #
rnorm(20)
a<-rnorm(20)
# 도움말 기능 활용하기 #
help.start()
help(max)
?max
RSiteSearch("max")
# 명령어 히스토리 활용하기 #
#예를 들어서 아래와 같은 명령어들을 실행시켰다면
ls()
a<-rnorm(20)
b<-hist(a)
history() # 위의 명령어들을 다시 불러들인다
savehistory(file="myhistory") # 명령어 히스토리를 저장(working Directory에 저장됨)
loadhistory(file="myhistory") # 명령어 히스토리 불러오기
# 패키지 활용하기 #
path.package() #현재 활성화된 패키지들이 무엇인지 확인
data(Animals) #하드디스크에 MASS 패키지를 실행하지 않았기 때문에 파일이 없는것으로 나옴
library(MASS) #MASS 패키지를 라이브러리에 설치
data(Animals) #데이터 확인 가능
summary(Animals)
install.packages("boot") #하드디스크에 존재하지 않는 boot 패키지 다운로드 및 설치
library(help=boot) #다운로드 된 boot 패키지의 help 다큐먼트를 보여줌
help(package=boot) #웹을 통해 boot 패키지의 다큐먼트를 보여줌
??boot #웹을 통해 boot 패키지의 다큐먼트를 보여줌
## R 프로그램 파일 실행 ##
source("a.R")
######### 2. 입력과 출력###############
######### 출력하기 ###############
#######################################
pi
sqrt(2)
print(pi)
print(sqrt(2))
print(matrix(c(1,2,3,4,5,6),3,2)) # 3 x 2
print(matrix(c(1,2,3,4,5,6),2,3)) # 2 x 3
print(list("a","b","c","d"))
#print함수는 오로지 하나의 객체만 프린트
print("THe zoro occur at", 2*pi, "radians.") #error 발생
print("THe zoro occur at");print(2*pi);print("raidans")
#cat함수는 print의 대안으로 여러개의 항목을 묶어서 연결된 결과로 출력해줌
cat("The zoro occur at", 2*pi, "radians.","\n")
#cat함수는 간단한 벡터도 출력 가능
f<-c(0,1,1,2,3,5,8,13)
cat("The Fibonacci numbers are:",f,"....\n")
#cat함수의 한계는 행렬이나 리스트 같은 복잡한 데이터 구조를 출력할 수 없음
cat(list("a","b","c"))
####### 변수 설정하기 ##############
######################################
# 대입연산자 (<-)
x<-3
y<-2
z<-sqrt(x^2+2*y)
z
#R은 동적 타입 언어이다.
x<-c("a", "b", "c", "d")
x
a<<-3
a
b=4
b
5->c
c
#변수 목록 보기
x<-10
y<-30
z<-c("one", "two", "three")
f<-function(n,p) sqrt(p*(1-p)/n)
f(200,0.4)
ls()
#변수 목록 뿐아니라 구조를 보여주는 함수
ls.str()
rm(a) # 변수 삭제 , 한번 변수를 삭제하면 영원히 삭제 됨
rm(b,c,z) # 한꺼번에 변수 삭제 가능
ls()
# warning 변수 목록을 리스트 인자에 지정한 다음 삭제하게 됨에 따라 기존에 있는 모든 변수를 삭제하게 됨 조심해야 함.
rm(list=ls())
ls()
####### 벡터 생성하기 ##############
######################################
#R 프로그램에서 사용하는 가장 기본적인 데이터셋의 형태
c(0,1,1,2,3,5,8,13)
c(1*pi,2*pi,3*pi,4*pi)
c("I","love","you.")
c(TRUE,TRUE,FALSE,FALSE)
d<-c(1,2,3)
e<-c(4,5,6)
f<-c("a","b","c")
#벡터의 인자 자체가 벡터인 경우 벡터를 풀어서 하나로 합친다.
g<-c(d,e,c(7,8,9))
g
#벡터의 인자 중 하나라도 문자가 포함되어 있는 경우 모든 원소들은 문자형으로 정의 된다.
h<-c(d,f)
h
mode(g)
mode(h)
####### 기본적인 통계량 계산하기 ###########
#######################################
a<-c(0,1,1,2,3,5,8,13)
b<-log(a+1)
c<-c(0,1,1,2,3,5,8,13,NA)
#평균
mean(0,1,1,2,3,5,8,13)
mean(a)
#중앙값
median(0,1,1,2,3,5,8,13)
median(a)
#표준편차
sd(0,1,1,2,3,5,8,13)
sd(a)
#분산
var(0,1,1,2,3,5,8,13)
var(a)
#상관계수
cor(a,b)
#공분산
cov(a,b)
#평균, 중간값, 표준편차는 데이터 프레임에서 각 변수들을 기준으로 값을 산출
#분산은 공분산 행렬을 제공, 상관계수는 상관계수 행렬을 제공
## 단하나의 NA 값이 포함되어도 결과는 NA로 나오므로 아래와 같이 활용
mean(c)
sd(c)
mean(c, na.rm=TRUE)
sd(c, na.rm=TRUE)
hei<-c(187,178,176,169,181,172)
wei<-c(82,72,70,65,74,68)
dfm_1<-data.frame(hei,wei)
dfm_1
dfm<-data.frame(height=c(187,178,176,169,181,172),
weight=c(82,72,70,65,74,68))
dfm
mean(dfm)
mean(dfm$height)
sd(dfm$height)
median(dfm$height)
cor(dfm$height,dfm$weight)
cor(dfm)
cov(dfm$height,dfm$weight)
cov(dfm)
####### 수열 생성하기 ##############
##################################
1:10
# by의 디폴트는 1
seq(from=1, to=10, by=1)
rep(1,times=5)
0:9 # 0~9
5:10 # 5~10
9:0 # 9~0
# 수열의 길이를 지정하고자 할때, length.out 사용
seq(from=0, to=50, length.out=5)
seq(from=1.0, to=2.0, length.out=5)
rep(pi, times=5)
############ 벡터의 비교 ##############
####################################
a<-3.1
a==pi
a!=pi
a<pi
a>pi
a<=pi
a>=pi
z<-c(3,pi, 4)
z1<-c(pi,pi,4)
z2=pi
z==z1
z==z2
any(z==z1) #벡터의 값들 중 하나라도 같은가?
all(z==z1) #벡터의 값들이 모두 같은가? all(z==0) 모든 값들이 0인가?는 많이 활용된다.
############ 벡터에 있는 원소 선택하기 ########
###########################################
a<-c(0,1,1,2,3,5,8,13)
a
a[1]
a[2]
a[3]
a[4]
a[1:3]
a[4:7]
a[c(1,3,5,7)]
# 인덱스가 음수인 경우는 1번째 값을 제외하라는 의미
a[-1]
a[-(2:3)]
# 중앙값보다 큰 모든 원소를 선택
a>median(a)
a[a>median(a)]
# 상하위 5% 안에 있는 모든 원소를 선택
a[a<quantile(a,0.05)|a>quantile(a,0.95)]
# 평균에서 + - 표준편차를 넘는 모든 원소 선택
abs(a-mean(a))>2*sd(a)
a[abs(a-mean(a))>2*sd(a)]
# NA나 NULL이 아닌 모든 원소를 선택
abs(a-mean(a))>2*sd(a)
a[!is.na(a) & !is.null(a)]
names(a)<-c("1st","2nd","3th","4th","5th","6th","7th","8th")
a
a["2nd"]
a["4th"]
############ 벡터 연산 수행하기 ########
###########################################
v1<-c(10,11,12,13,14)
v2<-c(1,2,3,4,5)
v1+v2
v1-v2
v1*v2
v1/v2
v1^v2
v1+2
v1-2
v1*2
v1/2
v1^2
mean(v1)
v1-mean(v1)
(v1-mean(v1))/sd(v1)
############ 연산자 우선순위 ########
###########################################
#PPT 참조
############ R 함수 정의 ########
###########################################
#
# function(매개변수1, 매개변수2, 매개변수3,....) expr
#
# function(매개변수1, 매개변수2, 매개변수3,....) {
# expr1
# expr2
# expr3
# .....
# }
#
##############################################?
f1<-function(x) (x-mean(x))/sd(x) #정규분포를 표준정규분포로 표준화
f1(1:10)
skew.and.kurto <-function(x) #왜도와 첨도를 구하는 함수
{
num1 <- mean((x-mean(x))^3)
denom1<-(mean((x-mean(x))^2))^1.5
num2 <-mean((x-mean(x))^4)
denom2<-(mean((x-mean(x))^2))^2
skew<-num1/denom1
kurto<-num2/denom2-3
return(c(skew,kurto))
}
t5<- 1:1000 #1000 random obs from t with df 5
skew.and.kurto(t5)
######### 데이터 입력하기 ############
####################################
#키보드로 데이터를 입력할 때 conbine 함수를 사용
a<-c(1,2,3,4,5,6,7,8,9)
#빈 데이터 프레임을 만든 뒤 내장된 편집기를 불러와 데이터 입력
temp<-data.frame()
b<-edit(temp)
fix(b)
b
#메뉴->edit->data editor -> object명 입력
# 간단한 문제를 다룰 때는 데이터 파일을 생성하고 읽어오는 것이 귀찮습니다.
# 이럴때는 R에 데이터를 곧바로 입력하는 게 편리하합니다.
# 이러한 방법을 위해 데이터 프레임을 정의하고 c함수로 벡터 생성해서 데이터를 입력해 봅시다.
c<-data.frame(
label=c("low","mid","high"),
lbound=c(0,0.5,1.5),
ubound=c(0.5,1.5,2.5)
)
c
######### 데이터 자리수 조절 ############
####################################
# R을 기본적으로 부동소수점을 포함해 7자리를 표시
pi
# 자리수를 4자리로 표현
print(pi,digits=4)
cat("pi 값은 ", pi,"입니다.", "\n")
# cat 함수는 형식 조정 불가->format 함수 활용
cat("pi 값은 ",format(pi,digits=4),"입니다.", "\n")
# 옵션함수를 활용해서 digit의 기본값을 바꾸기
options(digits=3)
pi
######### 파일에 출력하기 ############
####################################
a<-sqrt(10)
cat("The Answer is", a, "\n", file="fileout1.out")
# sink("fileout2.txt")
#- 결과를 txt 파일과 pdf로 출력함
sink("fileout2.txt", append=TRUE)
pdf("fileout2.pdf")
library(MASS)
data(Animals)
Animals
m1<-lm(log(brain)~log(body),data=Animals)
str(m1)
attach(Animals)
plot(log(brain)~log(body))
abline(m1$coef, lty="dotted")
dev.off()
sink()
getwd()
######### 파일 목록보기 ############
####################################
list.files() #워킹 디렉토리의 파일을 확인할 수 있음.
list.files(recursive=T) #하위 디렉토리의 숨겨놓은 파일(마침표로 시작하는 파일)까지 모두 볼수 있음
list.files(recursive=T, all.files=T)
######### 윈도우에서 'cannot open file 해결하기 ############
##########################################################
# d:\dataedu\R\basic\exam1.txt 위도우에서 파일이 있음을 알고 있다.
#하지만
f1<-read.table("d:\r_class\exam1.txt")
# 역슬레쉬(\)가 이름에 포함되어 있으면 문제가 발생
# 역슬레쉬 뒤에 오는 모든 문자를 이스케이프 처리한뒤 d:dataeduRbasicexam1.txt 로 인식
# 윈도우에서 R은 슬러쉬(/)를 역슬레쉬와 똑같이 취급하므로 문제 해결
f1<-read.table("d:/r_class/exam1.txt")
#R에서는 역슬레쉬 (\\)를 역슬레쉬 (\)로 인식하여 취급하므로 문제 해결
f1<-read.table("d:\\r_class\\exam1.txt")
############고정데이터 불러오기####################
###############################################
# f2<-read.fwf("exam2.txt", widths=c(w1,w2,w3,w4)) 고정 자리 데이터 읽기
#1째는 과제제출여부, 2째는 점수 사이에는 2칸의 빈칸
f2<-read.fwf("exam2.txt", widths=c(1,-2,2))
f2<-read.fwf("exam2.txt", widths=c(1,-2,2),
col.names=c("report","score")) # col명 지정
f2
###########비고정데이터 불러오기#############################
# (탭 또는 공백으로 구분된 )텍스트 파일로 된 테이블형식의 데이터 불러오기
# 각 줄이 하나의 레코드
f3<-read.table("kings.txt")
f3
# 하나의 레코드 내에서, 공백, 탭, 쉽표 등 한글자 짜리 구분문자
# f3<-read.table("kings.txt", sep= :, )
# 각 레코드ㅡ 동일한 수의 필드를 가짐
class(f3$V1)
# 문자열 레코드값을 factor로 인식하지 않고 문자열로 인식하게 함
f3<-read.table("kings.txt", stringsAsFactor=FALSE)
f3
class(f3$V1)
f3<-read.table("kings.txt", na.strings=".") # SAS의 결측값(.) 형태의 데이터를 NA 형태로 결측값을 변경
f3
# csv 파일은 R, 엑셀, 다른 스프레드시트 프로그램, 데이터베이스 관리자들이 자주 사용하는 형식
f4<-read.csv("frame.csv",header=T, as.is=T)
f4
class(f4$ename)
#탐색기를 통한 데이터 파일 불러오기
file.choose<-read.csv (file.choose())
head(file.choose)
########### csv 파일 출력하기 ############################
######################################################
write.csv(f4,file="new_frame.csv", row.names=FALSE)
############ HTML 테이블 읽어오기 ############################
####세계 500대 기업 리스트#
install.packages(c("XML","httr","RCurl"))
library(XML)
library(httr)
library(RCurl)
u<-GET("https://en.wikipedia.org/wiki/List_of_S%26P_500_companies",encoding="UTF-8")
tab1<-readHTMLTable(rawToChar(u$content),encoding="UTF-8")
tab1
############ 복잡한 구조 파일 읽어오기 ############################
# 구조가 복잡하여 10줄만 읽고 멈춤
# l1<-readlines("exam1.txt",n=10)
# 변수를 숫자형으로 인식하고 읽어들임
l2<-scan("numeric.txt",what=numeric(0))
l2
#scan()에 활용되는 인자
#what=numeric(0) 다음 토큰은 숫자로 해석
#what=integer(0) 다음 토큰은 정수로 해석
#what=complex(0) 다음 토큰은 복소수로 해석
#what=character(0) 다음 토큰은 문자열로 해석
#what=logical(0) 다음 토큰은 논리값으로 해석
#n=number 이만큼의 토큰을 읽은 다음에 멈춤
#nlines=number 이만큼의 입력 라인을 읽은 다음 멈춤
#skip=number 데이터를 읽기 전에 건너 뛰는 입력 라인의 개수
#na.strings=list NA로 해석될 문자열 리스트
# 예제 1. 경주시외버스터미널 가격표
# 문자와 숫자가 조합된 형태의 데이터 읽기
l3<-scan("gyeongju_ternimal.csv",
what=list(des=character(0),fare=numeric(0)),
skip=1,sep=",")
gyeongju<-as.data.frame(l3)
gyeongju
########### 객체를 저장하고 전송하기 ######################
######################################################
save(gyeongju, file="myData.RData") #save함수는 바이너리 데이터를 작성, ASCII 형식은 dput, dump를 사용
load("myData.RData")
dput(myData, file="myData.txt")
dump("myData", file="myData.txt") #변수 앞뒤의 따움표에 주의
#save함수는 파일의 크기를 작게 유지하기 위해 바이너리 형식으로 저장한다. ASCII 형식은 dput, dump를 사용할 수 있다.
#메일링 리스트에 질의를 보낼때 다른 사람들이 문제를 재현하기 위해서 ASCII 형식의 데이터의 덤프를 첨부 할 수 있다.
#프로그램에서 데이터가 특정 패키지에 있는 데이터를 활용한다면 패키지를 로딩해 두어야 활용할 수있다.
library(MASS)
data("Animals")
Animals
attach(Animals)
plot(log(brain)~log(body))
############# 3. 데이터 구조와 데이터 프레임(1)
######### 데이터 구조 ############
####################################
a<-read.csv("frame.csv", header=T)
b<-as.data.frame(a)
b
#불러들인 csv 데이터를 데이터프레임에 적용
#1번째 리스트를 프린트
b[1]
#1번째 리스트내의 값들을 프린트
b[[1]]
# 항목과 그 값들을 프린트
b[1]
b["empno"]
# 항목명을 통해서 값들을 프린트
b[["empno"]]
b$empno
#1행, 2열에 해당하는 원소
b[1,2]
#2행에 해당하는 모든 원소
b[2, ]
#2열에 해당하는 모든 원소
b[ , 2]
b[ , 3]
###### 단일값 ######
#단일값은 원소가 하나인 벡터로 의미
pi
length(pi)
pi[1]
pi[2]
####### 행렬 #########
#차원을 가지고 있는 벡터로 인식하면 됨
a<-1:9
dim(a)<-c(3,3)
a
####### 배열 #########
b<-1:12
dim(b)<-c(2,3,2)
b
####### 벡터에 데이터 추가와 삽입 ##########
v1<-c(1,2,3)
#벡터에 단일값 삽입
v1<-c(v1, 4,5)
v1
v2<-c(6,7,8,9,10)
#벡터에 다른 벡터를 삽입
v1<-c(v1,v2)
v1
#벡터의 특정위치에 단일값 삽입
v1[11]<-11
v1
#벡터의 특정 위치에 단일값 삽입
append(v1,100, after=5)
#벡터의 특정 위치에 데이터 삽입
append(v1,v2, after=0)
#col 값을 벡터로 연결
c1<-cbind(1:6)
c1
c2<-cbind(1:3)
c2
#두개의 벡터를 col으로 연결
cbind(1:6,1:3)
#두개의 object를 연결할 때는 길이가 같아야 함
cbind(c1,c2)
c3<-cbind(1:6,1:3)
cbind(c3,c1)
###### recycling rule ########
a<-c(1,2,3,4,5,6)
b<-c(7,8,9)
a+b
# 함수에도 재활용 규칙은 적용된다.
cbind(a,b)
########## 요인 생성 #########
###############################
#
#연속형 변수를 범주형변수로 변경
f<-factor(c2)
f
#범주형변수에 데이터 삽입
f<-factor(c("A","B","C","A","B","B","B"))
f
#범주형변수에 레벨을 정의
f<-factor(f,c("A","B","C","D","F"))
f
############## 리스트 생성하기 ######################
# 리스트는 각 원소의 모드가 달라도 되는 특징이 있음
l<-list(3.14, "Tom", c(1,2,3,4), mean)
l
# 빈 리스트를 생성하고 원소를 채워 넣는 방법으로 리스트 생성
l1<-list()
l1[[1]] <- 2.714
l1[[2]] <- "Yoon"
l1[[3]] <- c(5,4,3)
l1[[4]] <- mean
l1[3]
l1[[3]]
l1[c(2,3)]
# 리스트의 원소에 이름을 붙일 수 있음
l2<-list(mid=1.1, far=1.5, more.far=2.0)
l2
# 자리수로 리스트의 원소를 선택하기
l2[[1]]
l2[1]
l2[c(1,2)]
# 리스트의 구조를 확인해 보기->numeric
class(l2[[1]])
mode(l2[[1]])
# 리스트의 구조 확인해 보기 ->list
class(l2[1])
mode(l2[1])
# 원소의 이름으로 리스트의 원소를 선택하기
l2[["mid"]]
l2["mid"]
l2$mid
l2[c("mid","far")]
# 리스트에서 원소를 제거하기
l2[["more.far"]]<-NULL
l2
l3<-list(mid=1.1, far=1.5, more.far=NULL) # 리스트의 원소에 NULL 이 포함되어 있을 때 삭제하기
l3
l3[sapply(l3, is.null)]<-NULL # 리스트의 원소의 값이 NULL인 원소를 삭제
l3
###### 리스트에서 원소 제거하는 다양한 방법 #######
l3[l3==0]<- NULL # 리스트의 원소가 0인 원소를 제거
l3[is.na(l3)]<- NULL # 리스트의 원소가 na인 원소를 제거
l3[abs(l3)<1]<- NULL # 리스트의 원소가 절대값이 1보다 작은 원소를 제거
# abs 함수는 list를 다루지 못하므로 에러 발생
########## 행렬의 초기 내용 설정과 연산 #########
##############################################
#벡터를 생성
rowm1<-c(1.1,1.2,1.3,1.4,2.4,2.3)
rowm2<-c(3.1,3.4,3.6,4.1,4.3,4.7)
rowm3<-c(1.1,1.2,1.3,3.1,3.4,3.6,4.1,4.3,4.7)
#벡터의 데이터를 2x3 행렬로 변경,,, 디폴트는 열을 기준으로 입력됨
m1<-matrix(rowm1,2,3)
m1
#벡터의 데이터를 3x2 행렬로 변경,,, 디폴트는 열을 기준으로 입력됨
m2<-matrix(rowm2,3,2)
m2
#벡터의 데이터를 행을 기준으로 2x3 행렬로 변경
m3<-matrix(rowm2,2,3,byrow=T)
m3
# 2x2 행렬을 0으로 초기화한 행렬 생성
m4<-matrix(0,2,2)
m4
# 2x2 행렬을 NA으로 초기화한 행렬 생성
m5<-matrix(NA,2,2)
m5
# 벡터의 구조를 행렬구조로 변경하여 행렬을 생성
dim(rowm3)<-c(3,3)
rowm3
r1<-c(1,2,3,4,5,6)
r2<-c(6,5,4,3,2,1)
r3<-c(1,2,3,4)
r1<-matrix(r1,2,3)
r2<-matrix(r2,3,2)
r3<-matrix(r3,2,2)
r1
r2
r3
###### 행렬의 연산 ##################
# 전치행렬을 구함
t(r3)
# 역행렬을 구함
solve(r3)
# 행렬의 곱 구함
r1%*%r2
# 행렬의 곱 구함
r2%*%r1
#2차 단위행렬을 구함
diag(2)
######### 행렬의 열과 행에 이름 붙이기 ###############
# 행렬의 행에 이름붙이기
rownames(m1)<-c("class A","class B")
m1
# 행렬의 열에 이름붙이기
colnames(m1)<-c("min_d","dis","max_d")
m1
######### 행렬의 열과 행 선택하여 하위행렬 생성 ###############
#m1 행렬에서 1행을 뽑아 벡터 vm1 생성
vm1<-m1[1,]
vm1
#m1 행렬에서 2열을 뽑아 벡터 vm2 생성
vm2<-m1[,2]
vm2
#m1 행렬에서 1행을 뽑아 하위행렬 sm1 생성
sm1<-m1[1,, drop=FALSE]
sm1
#m1 행렬에서 2열을 뽑아 하위행렬 sm2 생성
sm2<-m1[,2, drop=FALSE]
sm2
############# 4. 데이터 구조와 데이터 프레임(2)
######### 데이터 프레임 만들기 ########
# 벡터와 요인이 혼합된 데이터라면 data.frame 함수를 사용해서 조립
## dfm<-data.frame(v1,v2,v3,f1,f2)
# 데이터가 리스트 구조라면 as.data.frame 으로 재설정 가능
# dfm<-as.data.frame(list.of.vectors)
#벡터 생성
level<-c("low","mid","high")
lbound<-c(0.1,0.5,1.5)
ubound<-c(0.5,1.5,2.5)
#벡터를 데이터프레임으로 변환
dfm1<-data.frame(level, lbound,ubound)
dfm1
#외부 데이터를 불러와 어브젝트에 할당
a<-read.csv("frame.csv", header=T)
a
a$ename<-as.character(a$ename)
a$phone<-as.character(a$phone)
# 외부 데이터가 리스트 구조임에 따라 구조 변경으로 데이터를 데이터프레임에 적용
dfm1<-as.data.frame(a)
dfm1
#오브젝트의 내용 중 필요한 변수만 선택해서 data.frame 형식으로 재 구성
dfm2<-data.frame(a$empno, a$ename, a$deptno)
dfm2
# 열의 이름을 적용하여 data.frame 형식으로 재 구성
dfm2<-data.frame(empno=a$empno, ename=a$ename, deptno=a$deptno)
dfm2
# 개별 벡터로 정의된 데이터들을 리스트로 구조를 변경하고 데이터 프레임형식으로 적용
lst<-list(a1=a$empno, a2=a$ename, a3=a$deptno, a4=a$phone)
lst
lst1<-as.data.frame(lst)
lst1
#하나의 데이터를 리스트구조로 셋팅
a5<-list(a1=10011, a2="Jones", a3="20", a4="523-4572")
a5
#기존의 데이터 프레임에 a5를 추가
lst
lst1<-rbind(lst1,a5)
#추가할 행 정보를 데이터프레임 형식으로 정의
newrow<-data.frame(a1=10012, a2="Jone", a3="30", a4="523-4576")
#행 결합을 통해 2개의 데이터 프레임을 결합
lst1<-rbind(lst1, newrow)
lst1
# 다른 표현으로
lst1<-rbind(lst1,
data.frame(a1=10013, a2="Tom", a3="11", a4="523-4577"),
data.frame(a1=10014, a2="Tomy", a3="20", a4="523-4578"))
lst1
# 데이터 프레임 할당하기
#1,000,000개의 행과 3개의 열(2개는 수치형, 1개는 문자형)으로 데이터 프레임을 할당
N<-1000000
dtfm<-data.frame(no=numeric(N), name=character(N), score=numeric(N))
dtfm
#데이터프레임 내 2번째 원소를 선택
lst1[[2]]
#데이터프레임 내 2번째 원소내 리스트를 선택
lst1[2]
# 데이터프레임내 2번째 행원소를 선택
lst1[2,]
lst1[,2]
# 데이터프레임 내, 모든 행과 1,3번째 열을 선택
lst1[,c(1,3)]
## subset(데이터 프레임,select=c(열이름, 열이름,...)) 으로 쉽게 데이터 선택
## subset(데이터 프레임,select=c(열이름, 열이름,...),subset=(열이름>조건))
lst<-subset(lst1,select=-a4)
lst
lst<-subset(lst1,select=c(a1, a3))
lst
lst<-subset(lst1,select=c(a1,a3),subset=(a3==30))
lst
lst<-subset(lst1,subset=(a3==30))
lst
############## 연습 문제 #############
# MASS 라는 패키지 내 Cars93 데이터셋에서 #####
# 1) city 에서 갤런당 30마일 이상 주행하는 연비를 가지는 차를 선택 #
# 2) 고속도로에서 MPG 값이 중앙값이상인 모든 차의 제조사와 모델명 ##
#########################################################
library(MASS)
?Cars93
subset(Cars93, select=Model, subset=(MPG.city>30))
newCars93<-subset(Cars93, select=c(Manufacturer, Model,Type, Price, RPM),
subset=c(MPG.highway>median(MPG.highway)))
newCars93
#열이름으로 열삭제
newCars93<-subset(newCars93, select=-Type)
newCars93
cor(subset(newCars93, select=c(-Manufacturer, -Model)))
cor(newCars93$Price, newCars93$RPM)
#데이터 프레임의 열이름 바꾸기
colnames(newCars93)<-c("Manu", "Modelname")
newCars93
####### data editor 에서 데이터 변경 ##########
## 주의)실행 취소 기능이 없다.
##################################################
# 데이터 에디터를 불러들임
temp<-edit(lst1)
# 에디터에서 변경한 내용을 다시 저장
lst1<-temp
# 에디터에서 변경한 내용을 덮어씀
fix(lst1)
lst1
######### NA 값이 있는 행을 삭제하기 ###########
#벡터 생성
x<-c(0.1,-0.5,1.5,1.6,-0.9,NA)
y<-c(0.5,-1.5,NA,NA,1.5,2.5)
#벡터를 데이터프레임으로 변환
NA_example<-data.frame(x,y)
NA_example
cumsum(NA_example)
# NA 값을 가지고 있는 행을 삭제
NA_clean<-na.omit(NA_example)
NA_clean
cumsum(NA_clean)
# 두개의 데이터 프레임을 합칠때
x<-c(1.5,1.3,1.5,1.4,1.8,1.8)
y<-c(2.7,2.8,3.5,3.2,3.4,3.9)
z<-c(5.7,5.8,6.9,5.8,5.1,5.6)
b_dfm1<-data.frame(x,y)
b_dfm2<-as.data.frame(z)
cb_dfm<-cbind(b_dfm1,b_dfm2)
cb_dfm
x<-c(1.5,1.3,1.5)
y<-c(2.7,2.8,3.5)
z<-c(5.7,5.8,6.9)
b_dfm3<-data.frame(x,y,z)
rb_dfm<-rbind(b_dfm3,b_dfm3)
rb_dfm
#recycling rule 주의
x<-c(1.5,1.3,1.5,1.4,1.8,1.8)
y<-c(2.7,2.8,3.5,3.2,3.4,3.9)
z<-c(5.7,5.8,6.9)
b_dfm1<-data.frame(x,y)
b_dfm2<-as.data.frame(z)
cb_dfm<-cbind(b_dfm1,b_dfm2)
cb_dfm
x<-c(1.5,1.3,1.5)
y<-c(2.7,2.8,3.5)
z<-c(5.7,5.8,6.9)
a<-c(8.9,7.9,5.9)
b_dfm3<-data.frame(x,y,z)
b_dfm4<-data.frame(x,y,a)
rb_dfm<-rbind(b_dfm3,b_dfm4)
rb_dfm
######## 두개의 데이타 프레임을 동일한 변수를 중심으로 합치기 #######
T_name<-c("T1","T2","T3","T4","T5","T6")
x<-c(1.5,1.3,1.5,1.4,1.8,1.8)
y<-c(2.7,2.8,3.5,3.2,3.4,3.9)
T_na<-c("T1","T3","T5")
z<-c(5.7,5.8,6.9)
b_dfm1<-data.frame(T_name,x,y)
b_dfm2<-data.frame(T_name=T_na,z)
mg_dfm<-merge(b_dfm1,b_dfm2,by="T_name", all=T)
mg_dfm
?merge
#
#
#한번더 연습해 보자
#
#
a<-read.csv("frame.csv", header=T)
b<-read.csv("frame2.csv", header=T)
merg1<-as.data.frame(a)
merg2<-as.data.frame(b)
merg1
merg2
#empno 변수를 중심으로 데이터 프레임 병합
merg <-merge(merg1,merg2,by="empno")
merg
#enmae.y 가 중복되어 삭제
merg<-subset(merg, select=-ename.y)
merg
#
#데이터 프레임 내용을 더 쉽게 접근하기
#
#
zz<-with(rb_dfm, (x+y-z))
zz
attach(rb_dfm)
zzz<-x+y-z
zzz
detach()
############# 5. 데이터 변형
# 자료형 변환하기
#
#as.charater()
#as.complex()
#as.numeric() 또는 as.double()
#as.integer()
#as.logical()
# 데이터 구조 변환하기
#
#as.data.frame()
#as.list()
#as.matrix()
#as.vector()
######### 데이터 변형 ############
####################################
# apply 함수, (apply, lapply, sapply, tapply, mapply)###
# by함수, split 함수 ###
# 위의 함수들을 통해 단번에 데이터를 변형하고 처리할 수 있다. ###
#########################################################
# 요인을 통해 집단 정의 ###
v<-c(24,23,52,46,75,25)
w<-c(87,86,92,84,77,68)
f<-factor(c("A","A","B","B","C","A"))
dfm<-data.frame(value=v,group=f)
dfm
### 벡터를 여러 집단으로 분할하기 ##
#group<-split(v,f) #벡터를 요인에 따라 분할
#group <-unstack(data.frame(v,f)) #벡터가 동일한 길이인 경우 리스트를 데이터 프레임으로 변환해줌
group<-split(v,f)
group
group<-split(w,f)
group
group <-unstack(data.frame(v,f))
group
group<-unstack(dfm)
group
library(MASS)
head(Cars93)
split(Cars93$MPG.city, Cars93$Origin) # Origin={USA,non-USA}, MPG.city는 도시에서의 연비
g<-split(Cars93$MPG.city, Cars93$Origin)
g
mean(g[[1]])
mean(g[["USA"]])
mean(g[[2]]) #MPG 평균 계산
mean(g[["non-USA"]])
### 리스트의 각 원소에 함수 적용 #####
#list<-lapply(l,func)
#vector<-sapply(l,func)
s1<-c(91,87,95,96,89,87,86,85,84,86,88,92,91,93,92,92,91,93,94,94,95,96,96,96,99,95,98,97,92,86,84,89,87,86,89,85,84)
s2<-c(89,86,85,92,93,91,90,89,81,84,85,89,92,95,96,91,93,92,90,90,92,91,93,92,90,92,92,93,94,99,95,96,94)
s3<-c(89,86,78,89,84,95,87,92,90,90,91,93,93,92,93,94,95,95,96,98,100,85,79,82,89,86,95,89,92,91,90,90,93,90,85,86,84,90)
s4<-c(89,79,85,86,86,85,84,82,82,91,92,100,89,91,92,90,93,91,90,85,86,84,87,89,90,90,89,86,89,85,89,87,84,80,79,90,82)
length(s1)
length(s2)
length(s3)
length(s4)
scores<-list(Koran=s1,English=s2,Matt=s3,Chiness=s4)
scores
lapply(scores,length)
sapply(scores,length)
sapply(scores,mean)
sapply(scores,sd)
sapply(scores,range)
ttest<-lapply(scores,t.test)
ttest
sapply(ttest, function(t) t$conf.int)
##### 모든 행에 함수 적용하기 #####
## results<-apply(met, 1, func)
m1<-c(82.5,88.2,89.2,87.5,89.9,78.3,79.8,80.9,81.9,83.5,85.6,87.2,88.2,89.5,91.5,78.9,79.2,81.5,83.2,82.5)
dim(m1)<-c(4,5)
colnames(m1) <- c("tr1","tr2","tr3","tr4","tr5")
rownames(m1) <- c("Tom","Yoon","Moon","Song")
m1
#apply함수는 행렬의 처리를 위해 만들어졌고 1-행,2-열을 func으로 처리
apply(m1,1,mean)
apply(m1,2,mean)
#lapply는 리스트 형태에서 적용할 수 있으므로 행렬의 모든 원소를 리스트로 인식함
lapply(m1,mean)
#sapply는 모든 행렬의 원소를 벡터로 인식하여 반환함
sapply(m1,mean)
t<-read.csv("test.csv", header=T)
test<-data.frame(t)
test
#데이터 프레임에서 apply를 적용하려면 데이터가 모두 숫자나 문자로 동질적인 경우만 적용가능
apply(test,1,mean)
apply(test,2,mean) #
#데이터 프레임의 각 열들의 클레스를 확인할 때 활용 가능
sapply(test,class)
#데이터 프레임에서 열을 기준으로만 func을 적용할 때 사용가능, lapply는 리스트로 값을 반환
lapply(test,mean)
#데이터 프레임에서 열을 기준으로만 func을 적용할 때 사용가능, sapply는 벡터로 값을 반환
sapply(test,mean)
###### 데이터 집단에 함수 적용하기 ###############
###############################################
###############################################
# tapply(x,f,func) # x는 벡터, f는 집단 분류 요인, func는 함수
Cars93
attach(Cars93)
sum(Weight)
mean(Weight)
Origin
tapply(Weight,Origin,sum) # Cars93 데이터 프레임에서 무게를 생산지 구분별로 합계
tapply(Weight,Origin,mean) # Cars93 데이터 프레임에서 무게를 생산지 구분별로 평균
tapply(Weight,Origin,length) # Cars93 데이터 프레임에서 무게의 객수를 생산지 구분별로 카운트
###### 행 집단에 함수 적용하기 ###############
###############################################
###############################################
library(MASS)
by(Cars93, Origin, summary) # Cars93 데이터 프레임에서 생산지구분별로 요약
head(Cars93)
attach(Cars93)
model<-by(Cars93, Origin, function(df) lm(Price~Weight+EngineSize,data=df))
model
summary(model[[1]])
lapply(model,confint)
# 함수를 생성
gcd<-function(a,b){
if(b==0)return(a)
else return(gcd(b,a%/%b))
}
gcd(c(1,2,3), c(9,6,3))
mapply(gcd, c(1,2,3), c(9,6,3))
#문자열다루기
#문자열 길이 알아내기
nchar("Tom")
nchar("my name is Tom")
n<-c("my", "name", "is", "Tom")
length(n)
#문자열 연결하기
paste("my", "name", "is", "Tom")
paste("the pi is approximatly", pi)
name<-c("Tom", "Moe", "Larry")
paste(name,"loves me.")
paste(name,"loves me", collapse=", and ")
#하위문자열 추출하기
substr("Statistics",3,4)
#구분자로 문자열 분할하기
path<-"/home/dataedu/basic/R"
strsplit(path,"/")
#하위 문자열 대체하기
s<-"Curly is the smart one. Curly is funny, too."
sub("Curly", "Tom",s)
gsub("Curly", "Tom",s)
#문자열의 모든 쌍별 조합 만들기
location<-c("Seoul","Pusan","Inchon")
treatment<-c("T1","T2","T3")
outer(location, treatment, paste, sep="-")
#현재 날짜 알아내기
Sys.Date()
#문자열을 날짜로 변환하기
as.Date("2014-12-25")
as.Date("12/25/2014",format="%m/%d/%Y")
#날짜를 문자열로 변환하기
as.character(Sys.Date())
format(Sys.Date(), format="%m/%d/%Y")
#날짜 일부 추출하기
d<-as.Date("2014-12-25")
p<-as.POSIXlt(d)
p$mday
p$year
p$year+1900
#날짜로 수열 생성하기
start<-as.Date("2014-12-01")
end<-as.Date("2014-12-25")
countdown<-seq(from=start, to=end, by=1)
countdown
seq(from=start, by="month", length.out=12)
seq(from=start, by="3 months", length.out=5)
seq(from=start, by="year", length.out=5)
| /강의용+R+Full+code/1. R 프로그램 기초/2. R code/R 프로그래밍_강의자용_190923.R | no_license | Maphnew/R_edu_201909 | R | false | false | 33,964 | r |
##########################################R 프로그래밍 기초 ####################################################
############# 1. R 기초
# R 둘러보기 ###
###### 작업 공간 할당하기 ###
getwd()
dir.create("d:/r_class")
setwd("d:/r_class")
getwd()
#### 커맨드 입력하기 ######
1+1
# 불완전한 커맨드 입력
max(4,6,8), max(4,6,
)
# 여러줄을 한꺼번에 실행
a<-1+1; b<-a/2
# 스크립트 활용하기 #
rnorm(20)
a<-rnorm(20)
# 도움말 기능 활용하기 #
help.start()
help(max)
?max
RSiteSearch("max")
# 명령어 히스토리 활용하기 #
#예를 들어서 아래와 같은 명령어들을 실행시켰다면
ls()
a<-rnorm(20)
b<-hist(a)
history() # 위의 명령어들을 다시 불러들인다
savehistory(file="myhistory") # 명령어 히스토리를 저장(working Directory에 저장됨)
loadhistory(file="myhistory") # 명령어 히스토리 불러오기
# 패키지 활용하기 #
path.package() #현재 활성화된 패키지들이 무엇인지 확인
data(Animals) #하드디스크에 MASS 패키지를 실행하지 않았기 때문에 파일이 없는것으로 나옴
library(MASS) #MASS 패키지를 라이브러리에 설치
data(Animals) #데이터 확인 가능
summary(Animals)
install.packages("boot") #하드디스크에 존재하지 않는 boot 패키지 다운로드 및 설치
library(help=boot) #다운로드 된 boot 패키지의 help 다큐먼트를 보여줌
help(package=boot) #웹을 통해 boot 패키지의 다큐먼트를 보여줌
??boot #웹을 통해 boot 패키지의 다큐먼트를 보여줌
## R 프로그램 파일 실행 ##
source("a.R")
######### 2. 입력과 출력###############
######### 출력하기 ###############
#######################################
pi
sqrt(2)
print(pi)
print(sqrt(2))
print(matrix(c(1,2,3,4,5,6),3,2)) # 3 x 2
print(matrix(c(1,2,3,4,5,6),2,3)) # 2 x 3
print(list("a","b","c","d"))
#print함수는 오로지 하나의 객체만 프린트
print("THe zoro occur at", 2*pi, "radians.") #error 발생
print("THe zoro occur at");print(2*pi);print("raidans")
#cat함수는 print의 대안으로 여러개의 항목을 묶어서 연결된 결과로 출력해줌
cat("The zoro occur at", 2*pi, "radians.","\n")
#cat함수는 간단한 벡터도 출력 가능
f<-c(0,1,1,2,3,5,8,13)
cat("The Fibonacci numbers are:",f,"....\n")
#cat함수의 한계는 행렬이나 리스트 같은 복잡한 데이터 구조를 출력할 수 없음
cat(list("a","b","c"))
####### 변수 설정하기 ##############
######################################
# 대입연산자 (<-)
x<-3
y<-2
z<-sqrt(x^2+2*y)
z
#R은 동적 타입 언어이다.
x<-c("a", "b", "c", "d")
x
a<<-3
a
b=4
b
5->c
c
#변수 목록 보기
x<-10
y<-30
z<-c("one", "two", "three")
f<-function(n,p) sqrt(p*(1-p)/n)
f(200,0.4)
ls()
#변수 목록 뿐아니라 구조를 보여주는 함수
ls.str()
rm(a) # 변수 삭제 , 한번 변수를 삭제하면 영원히 삭제 됨
rm(b,c,z) # 한꺼번에 변수 삭제 가능
ls()
# warning 변수 목록을 리스트 인자에 지정한 다음 삭제하게 됨에 따라 기존에 있는 모든 변수를 삭제하게 됨 조심해야 함.
rm(list=ls())
ls()
####### 벡터 생성하기 ##############
######################################
#R 프로그램에서 사용하는 가장 기본적인 데이터셋의 형태
c(0,1,1,2,3,5,8,13)
c(1*pi,2*pi,3*pi,4*pi)
c("I","love","you.")
c(TRUE,TRUE,FALSE,FALSE)
d<-c(1,2,3)
e<-c(4,5,6)
f<-c("a","b","c")
#벡터의 인자 자체가 벡터인 경우 벡터를 풀어서 하나로 합친다.
g<-c(d,e,c(7,8,9))
g
#벡터의 인자 중 하나라도 문자가 포함되어 있는 경우 모든 원소들은 문자형으로 정의 된다.
h<-c(d,f)
h
mode(g)
mode(h)
####### 기본적인 통계량 계산하기 ###########
#######################################
a<-c(0,1,1,2,3,5,8,13)
b<-log(a+1)
c<-c(0,1,1,2,3,5,8,13,NA)
#평균
mean(0,1,1,2,3,5,8,13)
mean(a)
#중앙값
median(0,1,1,2,3,5,8,13)
median(a)
#표준편차
sd(0,1,1,2,3,5,8,13)
sd(a)
#분산
var(0,1,1,2,3,5,8,13)
var(a)
#상관계수
cor(a,b)
#공분산
cov(a,b)
#평균, 중간값, 표준편차는 데이터 프레임에서 각 변수들을 기준으로 값을 산출
#분산은 공분산 행렬을 제공, 상관계수는 상관계수 행렬을 제공
## 단하나의 NA 값이 포함되어도 결과는 NA로 나오므로 아래와 같이 활용
mean(c)
sd(c)
mean(c, na.rm=TRUE)
sd(c, na.rm=TRUE)
hei<-c(187,178,176,169,181,172)
wei<-c(82,72,70,65,74,68)
dfm_1<-data.frame(hei,wei)
dfm_1
dfm<-data.frame(height=c(187,178,176,169,181,172),
weight=c(82,72,70,65,74,68))
dfm
mean(dfm)
mean(dfm$height)
sd(dfm$height)
median(dfm$height)
cor(dfm$height,dfm$weight)
cor(dfm)
cov(dfm$height,dfm$weight)
cov(dfm)
####### 수열 생성하기 ##############
##################################
1:10
# by의 디폴트는 1
seq(from=1, to=10, by=1)
rep(1,times=5)
0:9 # 0~9
5:10 # 5~10
9:0 # 9~0
# 수열의 길이를 지정하고자 할때, length.out 사용
seq(from=0, to=50, length.out=5)
seq(from=1.0, to=2.0, length.out=5)
rep(pi, times=5)
############ 벡터의 비교 ##############
####################################
a<-3.1
a==pi
a!=pi
a<pi
a>pi
a<=pi
a>=pi
z<-c(3,pi, 4)
z1<-c(pi,pi,4)
z2=pi
z==z1
z==z2
any(z==z1) #벡터의 값들 중 하나라도 같은가?
all(z==z1) #벡터의 값들이 모두 같은가? all(z==0) 모든 값들이 0인가?는 많이 활용된다.
############ 벡터에 있는 원소 선택하기 ########
###########################################
a<-c(0,1,1,2,3,5,8,13)
a
a[1]
a[2]
a[3]
a[4]
a[1:3]
a[4:7]
a[c(1,3,5,7)]
# 인덱스가 음수인 경우는 1번째 값을 제외하라는 의미
a[-1]
a[-(2:3)]
# 중앙값보다 큰 모든 원소를 선택
a>median(a)
a[a>median(a)]
# 상하위 5% 안에 있는 모든 원소를 선택
a[a<quantile(a,0.05)|a>quantile(a,0.95)]
# 평균에서 + - 표준편차를 넘는 모든 원소 선택
abs(a-mean(a))>2*sd(a)
a[abs(a-mean(a))>2*sd(a)]
# NA나 NULL이 아닌 모든 원소를 선택
abs(a-mean(a))>2*sd(a)
a[!is.na(a) & !is.null(a)]
names(a)<-c("1st","2nd","3th","4th","5th","6th","7th","8th")
a
a["2nd"]
a["4th"]
############ 벡터 연산 수행하기 ########
###########################################
v1<-c(10,11,12,13,14)
v2<-c(1,2,3,4,5)
v1+v2
v1-v2
v1*v2
v1/v2
v1^v2
v1+2
v1-2
v1*2
v1/2
v1^2
mean(v1)
v1-mean(v1)
(v1-mean(v1))/sd(v1)
############ 연산자 우선순위 ########
###########################################
#PPT 참조
############ R 함수 정의 ########
###########################################
#
# function(매개변수1, 매개변수2, 매개변수3,....) expr
#
# function(매개변수1, 매개변수2, 매개변수3,....) {
# expr1
# expr2
# expr3
# .....
# }
#
##############################################?
f1<-function(x) (x-mean(x))/sd(x) #정규분포를 표준정규분포로 표준화
f1(1:10)
skew.and.kurto <-function(x) #왜도와 첨도를 구하는 함수
{
num1 <- mean((x-mean(x))^3)
denom1<-(mean((x-mean(x))^2))^1.5
num2 <-mean((x-mean(x))^4)
denom2<-(mean((x-mean(x))^2))^2
skew<-num1/denom1
kurto<-num2/denom2-3
return(c(skew,kurto))
}
t5<- 1:1000 #1000 random obs from t with df 5
skew.and.kurto(t5)
######### 데이터 입력하기 ############
####################################
#키보드로 데이터를 입력할 때 conbine 함수를 사용
a<-c(1,2,3,4,5,6,7,8,9)
#빈 데이터 프레임을 만든 뒤 내장된 편집기를 불러와 데이터 입력
temp<-data.frame()
b<-edit(temp)
fix(b)
b
#메뉴->edit->data editor -> object명 입력
# 간단한 문제를 다룰 때는 데이터 파일을 생성하고 읽어오는 것이 귀찮습니다.
# 이럴때는 R에 데이터를 곧바로 입력하는 게 편리하합니다.
# 이러한 방법을 위해 데이터 프레임을 정의하고 c함수로 벡터 생성해서 데이터를 입력해 봅시다.
c<-data.frame(
label=c("low","mid","high"),
lbound=c(0,0.5,1.5),
ubound=c(0.5,1.5,2.5)
)
c
######### 데이터 자리수 조절 ############
####################################
# R을 기본적으로 부동소수점을 포함해 7자리를 표시
pi
# 자리수를 4자리로 표현
print(pi,digits=4)
cat("pi 값은 ", pi,"입니다.", "\n")
# cat 함수는 형식 조정 불가->format 함수 활용
cat("pi 값은 ",format(pi,digits=4),"입니다.", "\n")
# 옵션함수를 활용해서 digit의 기본값을 바꾸기
options(digits=3)
pi
######### 파일에 출력하기 ############
####################################
a<-sqrt(10)
cat("The Answer is", a, "\n", file="fileout1.out")
# sink("fileout2.txt")
#- 결과를 txt 파일과 pdf로 출력함
sink("fileout2.txt", append=TRUE)
pdf("fileout2.pdf")
library(MASS)
data(Animals)
Animals
m1<-lm(log(brain)~log(body),data=Animals)
str(m1)
attach(Animals)
plot(log(brain)~log(body))
abline(m1$coef, lty="dotted")
dev.off()
sink()
getwd()
######### 파일 목록보기 ############
####################################
list.files() #워킹 디렉토리의 파일을 확인할 수 있음.
list.files(recursive=T) #하위 디렉토리의 숨겨놓은 파일(마침표로 시작하는 파일)까지 모두 볼수 있음
list.files(recursive=T, all.files=T)
######### 윈도우에서 'cannot open file 해결하기 ############
##########################################################
# d:\dataedu\R\basic\exam1.txt 위도우에서 파일이 있음을 알고 있다.
#하지만
f1<-read.table("d:\r_class\exam1.txt")
# 역슬레쉬(\)가 이름에 포함되어 있으면 문제가 발생
# 역슬레쉬 뒤에 오는 모든 문자를 이스케이프 처리한뒤 d:dataeduRbasicexam1.txt 로 인식
# 윈도우에서 R은 슬러쉬(/)를 역슬레쉬와 똑같이 취급하므로 문제 해결
f1<-read.table("d:/r_class/exam1.txt")
#R에서는 역슬레쉬 (\\)를 역슬레쉬 (\)로 인식하여 취급하므로 문제 해결
f1<-read.table("d:\\r_class\\exam1.txt")
############고정데이터 불러오기####################
###############################################
# f2<-read.fwf("exam2.txt", widths=c(w1,w2,w3,w4)) 고정 자리 데이터 읽기
#1째는 과제제출여부, 2째는 점수 사이에는 2칸의 빈칸
f2<-read.fwf("exam2.txt", widths=c(1,-2,2))
f2<-read.fwf("exam2.txt", widths=c(1,-2,2),
col.names=c("report","score")) # col명 지정
f2
###########비고정데이터 불러오기#############################
# (탭 또는 공백으로 구분된 )텍스트 파일로 된 테이블형식의 데이터 불러오기
# 각 줄이 하나의 레코드
f3<-read.table("kings.txt")
f3
# 하나의 레코드 내에서, 공백, 탭, 쉽표 등 한글자 짜리 구분문자
# f3<-read.table("kings.txt", sep= :, )
# 각 레코드ㅡ 동일한 수의 필드를 가짐
class(f3$V1)
# 문자열 레코드값을 factor로 인식하지 않고 문자열로 인식하게 함
f3<-read.table("kings.txt", stringsAsFactor=FALSE)
f3
class(f3$V1)
f3<-read.table("kings.txt", na.strings=".") # SAS의 결측값(.) 형태의 데이터를 NA 형태로 결측값을 변경
f3
# csv 파일은 R, 엑셀, 다른 스프레드시트 프로그램, 데이터베이스 관리자들이 자주 사용하는 형식
f4<-read.csv("frame.csv",header=T, as.is=T)
f4
class(f4$ename)
#탐색기를 통한 데이터 파일 불러오기
file.choose<-read.csv (file.choose())
head(file.choose)
########### csv 파일 출력하기 ############################
######################################################
write.csv(f4,file="new_frame.csv", row.names=FALSE)
############ HTML 테이블 읽어오기 ############################
####세계 500대 기업 리스트#
install.packages(c("XML","httr","RCurl"))
library(XML)
library(httr)
library(RCurl)
u<-GET("https://en.wikipedia.org/wiki/List_of_S%26P_500_companies",encoding="UTF-8")
tab1<-readHTMLTable(rawToChar(u$content),encoding="UTF-8")
tab1
############ 복잡한 구조 파일 읽어오기 ############################
# 구조가 복잡하여 10줄만 읽고 멈춤
# l1<-readlines("exam1.txt",n=10)
# 변수를 숫자형으로 인식하고 읽어들임
l2<-scan("numeric.txt",what=numeric(0))
l2
#scan()에 활용되는 인자
#what=numeric(0) 다음 토큰은 숫자로 해석
#what=integer(0) 다음 토큰은 정수로 해석
#what=complex(0) 다음 토큰은 복소수로 해석
#what=character(0) 다음 토큰은 문자열로 해석
#what=logical(0) 다음 토큰은 논리값으로 해석
#n=number 이만큼의 토큰을 읽은 다음에 멈춤
#nlines=number 이만큼의 입력 라인을 읽은 다음 멈춤
#skip=number 데이터를 읽기 전에 건너 뛰는 입력 라인의 개수
#na.strings=list NA로 해석될 문자열 리스트
# 예제 1. 경주시외버스터미널 가격표
# 문자와 숫자가 조합된 형태의 데이터 읽기
l3<-scan("gyeongju_ternimal.csv",
what=list(des=character(0),fare=numeric(0)),
skip=1,sep=",")
gyeongju<-as.data.frame(l3)
gyeongju
########### 객체를 저장하고 전송하기 ######################
######################################################
save(gyeongju, file="myData.RData") #save함수는 바이너리 데이터를 작성, ASCII 형식은 dput, dump를 사용
load("myData.RData")
dput(myData, file="myData.txt")
dump("myData", file="myData.txt") #변수 앞뒤의 따움표에 주의
#save함수는 파일의 크기를 작게 유지하기 위해 바이너리 형식으로 저장한다. ASCII 형식은 dput, dump를 사용할 수 있다.
#메일링 리스트에 질의를 보낼때 다른 사람들이 문제를 재현하기 위해서 ASCII 형식의 데이터의 덤프를 첨부 할 수 있다.
#프로그램에서 데이터가 특정 패키지에 있는 데이터를 활용한다면 패키지를 로딩해 두어야 활용할 수있다.
library(MASS)
data("Animals")
Animals
attach(Animals)
plot(log(brain)~log(body))
############# 3. 데이터 구조와 데이터 프레임(1)
######### 데이터 구조 ############
####################################
a<-read.csv("frame.csv", header=T)
b<-as.data.frame(a)
b
#불러들인 csv 데이터를 데이터프레임에 적용
#1번째 리스트를 프린트
b[1]
#1번째 리스트내의 값들을 프린트
b[[1]]
# 항목과 그 값들을 프린트
b[1]
b["empno"]
# 항목명을 통해서 값들을 프린트
b[["empno"]]
b$empno
#1행, 2열에 해당하는 원소
b[1,2]
#2행에 해당하는 모든 원소
b[2, ]
#2열에 해당하는 모든 원소
b[ , 2]
b[ , 3]
###### 단일값 ######
#단일값은 원소가 하나인 벡터로 의미
pi
length(pi)
pi[1]
pi[2]
####### 행렬 #########
#차원을 가지고 있는 벡터로 인식하면 됨
a<-1:9
dim(a)<-c(3,3)
a
####### 배열 #########
b<-1:12
dim(b)<-c(2,3,2)
b
####### 벡터에 데이터 추가와 삽입 ##########
v1<-c(1,2,3)
#벡터에 단일값 삽입
v1<-c(v1, 4,5)
v1
v2<-c(6,7,8,9,10)
#벡터에 다른 벡터를 삽입
v1<-c(v1,v2)
v1
#벡터의 특정위치에 단일값 삽입
v1[11]<-11
v1
#벡터의 특정 위치에 단일값 삽입
append(v1,100, after=5)
#벡터의 특정 위치에 데이터 삽입
append(v1,v2, after=0)
#col 값을 벡터로 연결
c1<-cbind(1:6)
c1
c2<-cbind(1:3)
c2
#두개의 벡터를 col으로 연결
cbind(1:6,1:3)
#두개의 object를 연결할 때는 길이가 같아야 함
cbind(c1,c2)
c3<-cbind(1:6,1:3)
cbind(c3,c1)
###### recycling rule ########
a<-c(1,2,3,4,5,6)
b<-c(7,8,9)
a+b
# 함수에도 재활용 규칙은 적용된다.
cbind(a,b)
########## 요인 생성 #########
###############################
#
#연속형 변수를 범주형변수로 변경
f<-factor(c2)
f
#범주형변수에 데이터 삽입
f<-factor(c("A","B","C","A","B","B","B"))
f
#범주형변수에 레벨을 정의
f<-factor(f,c("A","B","C","D","F"))
f
############## 리스트 생성하기 ######################
# 리스트는 각 원소의 모드가 달라도 되는 특징이 있음
l<-list(3.14, "Tom", c(1,2,3,4), mean)
l
# 빈 리스트를 생성하고 원소를 채워 넣는 방법으로 리스트 생성
l1<-list()
l1[[1]] <- 2.714
l1[[2]] <- "Yoon"
l1[[3]] <- c(5,4,3)
l1[[4]] <- mean
l1[3]
l1[[3]]
l1[c(2,3)]
# 리스트의 원소에 이름을 붙일 수 있음
l2<-list(mid=1.1, far=1.5, more.far=2.0)
l2
# 자리수로 리스트의 원소를 선택하기
l2[[1]]
l2[1]
l2[c(1,2)]
# 리스트의 구조를 확인해 보기->numeric
class(l2[[1]])
mode(l2[[1]])
# 리스트의 구조 확인해 보기 ->list
class(l2[1])
mode(l2[1])
# 원소의 이름으로 리스트의 원소를 선택하기
l2[["mid"]]
l2["mid"]
l2$mid
l2[c("mid","far")]
# 리스트에서 원소를 제거하기
l2[["more.far"]]<-NULL
l2
l3<-list(mid=1.1, far=1.5, more.far=NULL) # 리스트의 원소에 NULL 이 포함되어 있을 때 삭제하기
l3
l3[sapply(l3, is.null)]<-NULL # 리스트의 원소의 값이 NULL인 원소를 삭제
l3
###### 리스트에서 원소 제거하는 다양한 방법 #######
l3[l3==0]<- NULL # 리스트의 원소가 0인 원소를 제거
l3[is.na(l3)]<- NULL # 리스트의 원소가 na인 원소를 제거
l3[abs(l3)<1]<- NULL # 리스트의 원소가 절대값이 1보다 작은 원소를 제거
# abs 함수는 list를 다루지 못하므로 에러 발생
########## 행렬의 초기 내용 설정과 연산 #########
##############################################
#벡터를 생성
rowm1<-c(1.1,1.2,1.3,1.4,2.4,2.3)
rowm2<-c(3.1,3.4,3.6,4.1,4.3,4.7)
rowm3<-c(1.1,1.2,1.3,3.1,3.4,3.6,4.1,4.3,4.7)
#벡터의 데이터를 2x3 행렬로 변경,,, 디폴트는 열을 기준으로 입력됨
m1<-matrix(rowm1,2,3)
m1
#벡터의 데이터를 3x2 행렬로 변경,,, 디폴트는 열을 기준으로 입력됨
m2<-matrix(rowm2,3,2)
m2
#벡터의 데이터를 행을 기준으로 2x3 행렬로 변경
m3<-matrix(rowm2,2,3,byrow=T)
m3
# 2x2 행렬을 0으로 초기화한 행렬 생성
m4<-matrix(0,2,2)
m4
# 2x2 행렬을 NA으로 초기화한 행렬 생성
m5<-matrix(NA,2,2)
m5
# 벡터의 구조를 행렬구조로 변경하여 행렬을 생성
dim(rowm3)<-c(3,3)
rowm3
r1<-c(1,2,3,4,5,6)
r2<-c(6,5,4,3,2,1)
r3<-c(1,2,3,4)
r1<-matrix(r1,2,3)
r2<-matrix(r2,3,2)
r3<-matrix(r3,2,2)
r1
r2
r3
###### 행렬의 연산 ##################
# 전치행렬을 구함
t(r3)
# 역행렬을 구함
solve(r3)
# 행렬의 곱 구함
r1%*%r2
# 행렬의 곱 구함
r2%*%r1
#2차 단위행렬을 구함
diag(2)
######### 행렬의 열과 행에 이름 붙이기 ###############
# 행렬의 행에 이름붙이기
rownames(m1)<-c("class A","class B")
m1
# 행렬의 열에 이름붙이기
colnames(m1)<-c("min_d","dis","max_d")
m1
######### 행렬의 열과 행 선택하여 하위행렬 생성 ###############
#m1 행렬에서 1행을 뽑아 벡터 vm1 생성
vm1<-m1[1,]
vm1
#m1 행렬에서 2열을 뽑아 벡터 vm2 생성
vm2<-m1[,2]
vm2
#m1 행렬에서 1행을 뽑아 하위행렬 sm1 생성
sm1<-m1[1,, drop=FALSE]
sm1
#m1 행렬에서 2열을 뽑아 하위행렬 sm2 생성
sm2<-m1[,2, drop=FALSE]
sm2
############# 4. 데이터 구조와 데이터 프레임(2)
######### 데이터 프레임 만들기 ########
# 벡터와 요인이 혼합된 데이터라면 data.frame 함수를 사용해서 조립
## dfm<-data.frame(v1,v2,v3,f1,f2)
# 데이터가 리스트 구조라면 as.data.frame 으로 재설정 가능
# dfm<-as.data.frame(list.of.vectors)
#벡터 생성
level<-c("low","mid","high")
lbound<-c(0.1,0.5,1.5)
ubound<-c(0.5,1.5,2.5)
#벡터를 데이터프레임으로 변환
dfm1<-data.frame(level, lbound,ubound)
dfm1
#외부 데이터를 불러와 어브젝트에 할당
a<-read.csv("frame.csv", header=T)
a
a$ename<-as.character(a$ename)
a$phone<-as.character(a$phone)
# 외부 데이터가 리스트 구조임에 따라 구조 변경으로 데이터를 데이터프레임에 적용
dfm1<-as.data.frame(a)
dfm1
#오브젝트의 내용 중 필요한 변수만 선택해서 data.frame 형식으로 재 구성
dfm2<-data.frame(a$empno, a$ename, a$deptno)
dfm2
# 열의 이름을 적용하여 data.frame 형식으로 재 구성
dfm2<-data.frame(empno=a$empno, ename=a$ename, deptno=a$deptno)
dfm2
# 개별 벡터로 정의된 데이터들을 리스트로 구조를 변경하고 데이터 프레임형식으로 적용
lst<-list(a1=a$empno, a2=a$ename, a3=a$deptno, a4=a$phone)
lst
lst1<-as.data.frame(lst)
lst1
#하나의 데이터를 리스트구조로 셋팅
a5<-list(a1=10011, a2="Jones", a3="20", a4="523-4572")
a5
#기존의 데이터 프레임에 a5를 추가
lst
lst1<-rbind(lst1,a5)
#추가할 행 정보를 데이터프레임 형식으로 정의
newrow<-data.frame(a1=10012, a2="Jone", a3="30", a4="523-4576")
#행 결합을 통해 2개의 데이터 프레임을 결합
lst1<-rbind(lst1, newrow)
lst1
# 다른 표현으로
lst1<-rbind(lst1,
data.frame(a1=10013, a2="Tom", a3="11", a4="523-4577"),
data.frame(a1=10014, a2="Tomy", a3="20", a4="523-4578"))
lst1
# 데이터 프레임 할당하기
#1,000,000개의 행과 3개의 열(2개는 수치형, 1개는 문자형)으로 데이터 프레임을 할당
N<-1000000
dtfm<-data.frame(no=numeric(N), name=character(N), score=numeric(N))
dtfm
#데이터프레임 내 2번째 원소를 선택
lst1[[2]]
#데이터프레임 내 2번째 원소내 리스트를 선택
lst1[2]
# 데이터프레임내 2번째 행원소를 선택
lst1[2,]
lst1[,2]
# 데이터프레임 내, 모든 행과 1,3번째 열을 선택
lst1[,c(1,3)]
## subset(데이터 프레임,select=c(열이름, 열이름,...)) 으로 쉽게 데이터 선택
## subset(데이터 프레임,select=c(열이름, 열이름,...),subset=(열이름>조건))
lst<-subset(lst1,select=-a4)
lst
lst<-subset(lst1,select=c(a1, a3))
lst
lst<-subset(lst1,select=c(a1,a3),subset=(a3==30))
lst
lst<-subset(lst1,subset=(a3==30))
lst
############## 연습 문제 #############
# MASS 라는 패키지 내 Cars93 데이터셋에서 #####
# 1) city 에서 갤런당 30마일 이상 주행하는 연비를 가지는 차를 선택 #
# 2) 고속도로에서 MPG 값이 중앙값이상인 모든 차의 제조사와 모델명 ##
#########################################################
library(MASS)
?Cars93
subset(Cars93, select=Model, subset=(MPG.city>30))
newCars93<-subset(Cars93, select=c(Manufacturer, Model,Type, Price, RPM),
subset=c(MPG.highway>median(MPG.highway)))
newCars93
#열이름으로 열삭제
newCars93<-subset(newCars93, select=-Type)
newCars93
cor(subset(newCars93, select=c(-Manufacturer, -Model)))
cor(newCars93$Price, newCars93$RPM)
#데이터 프레임의 열이름 바꾸기
colnames(newCars93)<-c("Manu", "Modelname")
newCars93
####### data editor 에서 데이터 변경 ##########
## 주의)실행 취소 기능이 없다.
##################################################
# 데이터 에디터를 불러들임
temp<-edit(lst1)
# 에디터에서 변경한 내용을 다시 저장
lst1<-temp
# 에디터에서 변경한 내용을 덮어씀
fix(lst1)
lst1
######### NA 값이 있는 행을 삭제하기 ###########
#벡터 생성
x<-c(0.1,-0.5,1.5,1.6,-0.9,NA)
y<-c(0.5,-1.5,NA,NA,1.5,2.5)
#벡터를 데이터프레임으로 변환
NA_example<-data.frame(x,y)
NA_example
cumsum(NA_example)
# NA 값을 가지고 있는 행을 삭제
NA_clean<-na.omit(NA_example)
NA_clean
cumsum(NA_clean)
# 두개의 데이터 프레임을 합칠때
x<-c(1.5,1.3,1.5,1.4,1.8,1.8)
y<-c(2.7,2.8,3.5,3.2,3.4,3.9)
z<-c(5.7,5.8,6.9,5.8,5.1,5.6)
b_dfm1<-data.frame(x,y)
b_dfm2<-as.data.frame(z)
cb_dfm<-cbind(b_dfm1,b_dfm2)
cb_dfm
x<-c(1.5,1.3,1.5)
y<-c(2.7,2.8,3.5)
z<-c(5.7,5.8,6.9)
b_dfm3<-data.frame(x,y,z)
rb_dfm<-rbind(b_dfm3,b_dfm3)
rb_dfm
#recycling rule 주의
x<-c(1.5,1.3,1.5,1.4,1.8,1.8)
y<-c(2.7,2.8,3.5,3.2,3.4,3.9)
z<-c(5.7,5.8,6.9)
b_dfm1<-data.frame(x,y)
b_dfm2<-as.data.frame(z)
cb_dfm<-cbind(b_dfm1,b_dfm2)
cb_dfm
x<-c(1.5,1.3,1.5)
y<-c(2.7,2.8,3.5)
z<-c(5.7,5.8,6.9)
a<-c(8.9,7.9,5.9)
b_dfm3<-data.frame(x,y,z)
b_dfm4<-data.frame(x,y,a)
rb_dfm<-rbind(b_dfm3,b_dfm4)
rb_dfm
######## 두개의 데이타 프레임을 동일한 변수를 중심으로 합치기 #######
T_name<-c("T1","T2","T3","T4","T5","T6")
x<-c(1.5,1.3,1.5,1.4,1.8,1.8)
y<-c(2.7,2.8,3.5,3.2,3.4,3.9)
T_na<-c("T1","T3","T5")
z<-c(5.7,5.8,6.9)
b_dfm1<-data.frame(T_name,x,y)
b_dfm2<-data.frame(T_name=T_na,z)
mg_dfm<-merge(b_dfm1,b_dfm2,by="T_name", all=T)
mg_dfm
?merge
#
#
#한번더 연습해 보자
#
#
a<-read.csv("frame.csv", header=T)
b<-read.csv("frame2.csv", header=T)
merg1<-as.data.frame(a)
merg2<-as.data.frame(b)
merg1
merg2
#empno 변수를 중심으로 데이터 프레임 병합
merg <-merge(merg1,merg2,by="empno")
merg
#enmae.y 가 중복되어 삭제
merg<-subset(merg, select=-ename.y)
merg
#
#데이터 프레임 내용을 더 쉽게 접근하기
#
#
zz<-with(rb_dfm, (x+y-z))
zz
attach(rb_dfm)
zzz<-x+y-z
zzz
detach()
############# 5. 데이터 변형
# 자료형 변환하기
#
#as.charater()
#as.complex()
#as.numeric() 또는 as.double()
#as.integer()
#as.logical()
# 데이터 구조 변환하기
#
#as.data.frame()
#as.list()
#as.matrix()
#as.vector()
######### 데이터 변형 ############
####################################
# apply 함수, (apply, lapply, sapply, tapply, mapply)###
# by함수, split 함수 ###
# 위의 함수들을 통해 단번에 데이터를 변형하고 처리할 수 있다. ###
#########################################################
# 요인을 통해 집단 정의 ###
v<-c(24,23,52,46,75,25)
w<-c(87,86,92,84,77,68)
f<-factor(c("A","A","B","B","C","A"))
dfm<-data.frame(value=v,group=f)
dfm
### 벡터를 여러 집단으로 분할하기 ##
#group<-split(v,f) #벡터를 요인에 따라 분할
#group <-unstack(data.frame(v,f)) #벡터가 동일한 길이인 경우 리스트를 데이터 프레임으로 변환해줌
group<-split(v,f)
group
group<-split(w,f)
group
group <-unstack(data.frame(v,f))
group
group<-unstack(dfm)
group
library(MASS)
head(Cars93)
split(Cars93$MPG.city, Cars93$Origin) # Origin={USA,non-USA}, MPG.city는 도시에서의 연비
g<-split(Cars93$MPG.city, Cars93$Origin)
g
mean(g[[1]])
mean(g[["USA"]])
mean(g[[2]]) #MPG 평균 계산
mean(g[["non-USA"]])
### 리스트의 각 원소에 함수 적용 #####
#list<-lapply(l,func)
#vector<-sapply(l,func)
s1<-c(91,87,95,96,89,87,86,85,84,86,88,92,91,93,92,92,91,93,94,94,95,96,96,96,99,95,98,97,92,86,84,89,87,86,89,85,84)
s2<-c(89,86,85,92,93,91,90,89,81,84,85,89,92,95,96,91,93,92,90,90,92,91,93,92,90,92,92,93,94,99,95,96,94)
s3<-c(89,86,78,89,84,95,87,92,90,90,91,93,93,92,93,94,95,95,96,98,100,85,79,82,89,86,95,89,92,91,90,90,93,90,85,86,84,90)
s4<-c(89,79,85,86,86,85,84,82,82,91,92,100,89,91,92,90,93,91,90,85,86,84,87,89,90,90,89,86,89,85,89,87,84,80,79,90,82)
length(s1)
length(s2)
length(s3)
length(s4)
scores<-list(Koran=s1,English=s2,Matt=s3,Chiness=s4)
scores
lapply(scores,length)
sapply(scores,length)
sapply(scores,mean)
sapply(scores,sd)
sapply(scores,range)
ttest<-lapply(scores,t.test)
ttest
sapply(ttest, function(t) t$conf.int)
##### 모든 행에 함수 적용하기 #####
## results<-apply(met, 1, func)
m1<-c(82.5,88.2,89.2,87.5,89.9,78.3,79.8,80.9,81.9,83.5,85.6,87.2,88.2,89.5,91.5,78.9,79.2,81.5,83.2,82.5)
dim(m1)<-c(4,5)
colnames(m1) <- c("tr1","tr2","tr3","tr4","tr5")
rownames(m1) <- c("Tom","Yoon","Moon","Song")
m1
#apply함수는 행렬의 처리를 위해 만들어졌고 1-행,2-열을 func으로 처리
apply(m1,1,mean)
apply(m1,2,mean)
#lapply는 리스트 형태에서 적용할 수 있으므로 행렬의 모든 원소를 리스트로 인식함
lapply(m1,mean)
#sapply는 모든 행렬의 원소를 벡터로 인식하여 반환함
sapply(m1,mean)
t<-read.csv("test.csv", header=T)
test<-data.frame(t)
test
#데이터 프레임에서 apply를 적용하려면 데이터가 모두 숫자나 문자로 동질적인 경우만 적용가능
apply(test,1,mean)
apply(test,2,mean) #
#데이터 프레임의 각 열들의 클레스를 확인할 때 활용 가능
sapply(test,class)
#데이터 프레임에서 열을 기준으로만 func을 적용할 때 사용가능, lapply는 리스트로 값을 반환
lapply(test,mean)
#데이터 프레임에서 열을 기준으로만 func을 적용할 때 사용가능, sapply는 벡터로 값을 반환
sapply(test,mean)
###### 데이터 집단에 함수 적용하기 ###############
###############################################
###############################################
# tapply(x,f,func) # x는 벡터, f는 집단 분류 요인, func는 함수
Cars93
attach(Cars93)
sum(Weight)
mean(Weight)
Origin
tapply(Weight,Origin,sum) # Cars93 데이터 프레임에서 무게를 생산지 구분별로 합계
tapply(Weight,Origin,mean) # Cars93 데이터 프레임에서 무게를 생산지 구분별로 평균
tapply(Weight,Origin,length) # Cars93 데이터 프레임에서 무게의 객수를 생산지 구분별로 카운트
###### 행 집단에 함수 적용하기 ###############
###############################################
###############################################
library(MASS)
by(Cars93, Origin, summary) # Cars93 데이터 프레임에서 생산지구분별로 요약
head(Cars93)
attach(Cars93)
model<-by(Cars93, Origin, function(df) lm(Price~Weight+EngineSize,data=df))
model
summary(model[[1]])
lapply(model,confint)
# 함수를 생성
gcd<-function(a,b){
if(b==0)return(a)
else return(gcd(b,a%/%b))
}
gcd(c(1,2,3), c(9,6,3))
mapply(gcd, c(1,2,3), c(9,6,3))
#문자열다루기
#문자열 길이 알아내기
nchar("Tom")
nchar("my name is Tom")
n<-c("my", "name", "is", "Tom")
length(n)
#문자열 연결하기
paste("my", "name", "is", "Tom")
paste("the pi is approximatly", pi)
name<-c("Tom", "Moe", "Larry")
paste(name,"loves me.")
paste(name,"loves me", collapse=", and ")
#하위문자열 추출하기
substr("Statistics",3,4)
#구분자로 문자열 분할하기
path<-"/home/dataedu/basic/R"
strsplit(path,"/")
#하위 문자열 대체하기
s<-"Curly is the smart one. Curly is funny, too."
sub("Curly", "Tom",s)
gsub("Curly", "Tom",s)
#문자열의 모든 쌍별 조합 만들기
location<-c("Seoul","Pusan","Inchon")
treatment<-c("T1","T2","T3")
outer(location, treatment, paste, sep="-")
#현재 날짜 알아내기
Sys.Date()
#문자열을 날짜로 변환하기
as.Date("2014-12-25")
as.Date("12/25/2014",format="%m/%d/%Y")
#날짜를 문자열로 변환하기
as.character(Sys.Date())
format(Sys.Date(), format="%m/%d/%Y")
#날짜 일부 추출하기
d<-as.Date("2014-12-25")
p<-as.POSIXlt(d)
p$mday
p$year
p$year+1900
#날짜로 수열 생성하기
start<-as.Date("2014-12-01")
end<-as.Date("2014-12-25")
countdown<-seq(from=start, to=end, by=1)
countdown
seq(from=start, by="month", length.out=12)
seq(from=start, by="3 months", length.out=5)
seq(from=start, by="year", length.out=5)
|
context("memo")
library(magrittr)
library(testthat)
# environment used to mark memo execution
current.test.env <- function () test.env
# memo execution key
key.executed <- "executed"
##
# Helper to insert expression into function to be executed before the current body.
#
insert.before <- function (f, expr) {
expr.rest <- function (expr) {
expr.list <- expr %>% as.list()
if (length(expr.list) == 1) expr.list else rest(expr.list)
}
body(f) <- c(`{`, expr.rest(expr), expr.rest(body(f))) %>% as.call()
f
}
##
# marks memo execution in environment
#
mark.executed <- function () assign(key.executed, TRUE, envir=test.env)
##
# inserts code to mark environment with execution flag before memoising the function
#
test.memo <- function (f, ...) f %>% insert.before(quote({mark.executed()})) %>% memo(...)
##
# do memo test
#
do.test <- function (f, params, expected, executed) {
assign("test.env", test_env(), envir = environment(current.test.env))
expect_true(identical(do.call(f, params), expected))
expect_equal(mget(key.executed, envir=current.test.env(), inherits=FALSE, ifnotfound=FALSE)[[1]], executed)
}
test_that("
Given a simple function which has been memoised,
When I evaluate the memo,
Then the result is cached for the same parameters after the first call", {
memo <- (function (value) value) %>% test.memo()
do.test(memo, list(10), 10, TRUE)
do.test(memo, list(10), 10, FALSE)
do.test(memo, list(10), 10, FALSE)
do.test(memo, list(20), 20, TRUE)
do.test(memo, list(20), 20, FALSE)
})
test_that("
Given a simple function which has been memoised,
When I evaluate the memo and specifiy the force parameter,
Then the memo is executed if force is TRUE
And the new value is cached", {
memo <- (function (value) value) %>% test.memo()
do.test(memo, list(10, memo.force=FALSE), 10, TRUE)
do.test(memo, list(10, memo.force=TRUE), 10, TRUE)
do.test(memo, list(10, memo.force=FALSE), 10, FALSE)
do.test(memo, list(10, memo.force=TRUE), 10, TRUE)
})
test_that("
Given a simple function that has no return value
And has been memoised with the default arguments,
When I evaluate the memo,
Then it will always execute", {
memo <- (function (value) return(NULL)) %>% test.memo()
do.test(memo, list(10), NULL, TRUE)
do.test(memo, list(10), NULL, TRUE)
do.test(memo, list(20), NULL, TRUE)
do.test(memo, list(10, memo.force=TRUE), NULL, TRUE)
memo <- (function (value) return(NULL)) %>% test.memo(allow.null=FALSE)
do.test(memo, list(10), NULL, TRUE)
do.test(memo, list(10), NULL, TRUE)
do.test(memo, list(20), NULL, TRUE)
do.test(memo, list(20), NULL, TRUE)
do.test(memo, list(10, memo.force=TRUE), NULL, TRUE)
})
test_that("
Given a simple function that has no arguments,
And has been memoised,
When I evaluate the memo,
Then it will cache the result as expected", {
memo <- (function () 10) %>% test.memo()
do.test(memo, list(), 10, TRUE)
do.test(memo, list(), 10, FALSE)
})
test_that("
Given a simple function that has no return value
And has been memoised indicating that null results are allowed,
When I evaluate the memo,
Then it will cache NULL results as normal", {
memo <- (function (value) return(NULL)) %>% test.memo(allow.null=TRUE)
do.test(memo, list(10), NULL, TRUE)
do.test(memo, list(10), NULL, FALSE)
do.test(memo, list(20), NULL, TRUE)
do.test(memo, list(20), NULL, FALSE)
do.test(memo, list(10, memo.force=TRUE), NULL, TRUE)
})
## TODO what happens if the function returns an invisible value
## TODO what happens if the function returns NA or "" ??
## TODO show that different memos do not share cached values
test_that("
Given a memo,
When I ask for the cache,
Then I get the cache", {
memo <- (function (value) value) %>% memo()
memo %>% memo.cache() %>% is.null() %>% expect_false()
})
test_that("
Given a memo,
When I ask for the function,
Then I get the original function", {
memo <- (function (value) value) %>% memo()
memo %>% memo.function() %>% hash() %>% expect_equal(hash(function (value) value))
})
test_that("
Given a memo,
When I memo the memo,
Then I get an error", {
expect_error((function (value) value) %>% memo() %>% memo())
})
test_that("
Given a memo,
When I call it with the dry run argument set to TRUE,
Then it returns TRUE if the memoed function would be executed and FALSE if the value would have been
retrived from the cache,
And it doesn't store these values in the cache", {
memo <- (function (value) value) %>% memo()
memo(10, memo.dryrun = TRUE) %>% expect_true()
memo(10, memo.dryrun = TRUE) %>% expect_true()
memo(10, memo.dryrun = FALSE) %>% expect_equal(10)
memo(10, memo.dryrun = TRUE) %>% expect_false()
memo(10, memo.force = TRUE, memo.dryrun = TRUE) %>% expect_true()
memo(10, memo.dryrun = FALSE) %>% expect_equal(10)
memo(10, memo.force = TRUE, memo.dryrun = FALSE) %>% expect_equal(10)
memo(10) %>% expect_equal(10)
}) | /tests/testthat/testMemo.R | no_license | cran/memofunc | R | false | false | 5,236 | r |
context("memo")
library(magrittr)
library(testthat)
# environment used to mark memo execution
current.test.env <- function () test.env
# memo execution key
key.executed <- "executed"
##
# Helper to insert expression into function to be executed before the current body.
#
insert.before <- function (f, expr) {
expr.rest <- function (expr) {
expr.list <- expr %>% as.list()
if (length(expr.list) == 1) expr.list else rest(expr.list)
}
body(f) <- c(`{`, expr.rest(expr), expr.rest(body(f))) %>% as.call()
f
}
##
# marks memo execution in environment
#
mark.executed <- function () assign(key.executed, TRUE, envir=test.env)
##
# inserts code to mark environment with execution flag before memoising the function
#
test.memo <- function (f, ...) f %>% insert.before(quote({mark.executed()})) %>% memo(...)
##
# do memo test
#
do.test <- function (f, params, expected, executed) {
assign("test.env", test_env(), envir = environment(current.test.env))
expect_true(identical(do.call(f, params), expected))
expect_equal(mget(key.executed, envir=current.test.env(), inherits=FALSE, ifnotfound=FALSE)[[1]], executed)
}
test_that("
Given a simple function which has been memoised,
When I evaluate the memo,
Then the result is cached for the same parameters after the first call", {
memo <- (function (value) value) %>% test.memo()
do.test(memo, list(10), 10, TRUE)
do.test(memo, list(10), 10, FALSE)
do.test(memo, list(10), 10, FALSE)
do.test(memo, list(20), 20, TRUE)
do.test(memo, list(20), 20, FALSE)
})
test_that("
Given a simple function which has been memoised,
When I evaluate the memo and specifiy the force parameter,
Then the memo is executed if force is TRUE
And the new value is cached", {
memo <- (function (value) value) %>% test.memo()
do.test(memo, list(10, memo.force=FALSE), 10, TRUE)
do.test(memo, list(10, memo.force=TRUE), 10, TRUE)
do.test(memo, list(10, memo.force=FALSE), 10, FALSE)
do.test(memo, list(10, memo.force=TRUE), 10, TRUE)
})
test_that("
Given a simple function that has no return value
And has been memoised with the default arguments,
When I evaluate the memo,
Then it will always execute", {
memo <- (function (value) return(NULL)) %>% test.memo()
do.test(memo, list(10), NULL, TRUE)
do.test(memo, list(10), NULL, TRUE)
do.test(memo, list(20), NULL, TRUE)
do.test(memo, list(10, memo.force=TRUE), NULL, TRUE)
memo <- (function (value) return(NULL)) %>% test.memo(allow.null=FALSE)
do.test(memo, list(10), NULL, TRUE)
do.test(memo, list(10), NULL, TRUE)
do.test(memo, list(20), NULL, TRUE)
do.test(memo, list(20), NULL, TRUE)
do.test(memo, list(10, memo.force=TRUE), NULL, TRUE)
})
test_that("
Given a simple function that has no arguments,
And has been memoised,
When I evaluate the memo,
Then it will cache the result as expected", {
memo <- (function () 10) %>% test.memo()
do.test(memo, list(), 10, TRUE)
do.test(memo, list(), 10, FALSE)
})
test_that("
Given a simple function that has no return value
And has been memoised indicating that null results are allowed,
When I evaluate the memo,
Then it will cache NULL results as normal", {
memo <- (function (value) return(NULL)) %>% test.memo(allow.null=TRUE)
do.test(memo, list(10), NULL, TRUE)
do.test(memo, list(10), NULL, FALSE)
do.test(memo, list(20), NULL, TRUE)
do.test(memo, list(20), NULL, FALSE)
do.test(memo, list(10, memo.force=TRUE), NULL, TRUE)
})
## TODO what happens if the function returns an invisible value
## TODO what happens if the function returns NA or "" ??
## TODO show that different memos do not share cached values
test_that("
Given a memo,
When I ask for the cache,
Then I get the cache", {
memo <- (function (value) value) %>% memo()
memo %>% memo.cache() %>% is.null() %>% expect_false()
})
test_that("
Given a memo,
When I ask for the function,
Then I get the original function", {
memo <- (function (value) value) %>% memo()
memo %>% memo.function() %>% hash() %>% expect_equal(hash(function (value) value))
})
test_that("
Given a memo,
When I memo the memo,
Then I get an error", {
expect_error((function (value) value) %>% memo() %>% memo())
})
test_that("
Given a memo,
When I call it with the dry run argument set to TRUE,
Then it returns TRUE if the memoed function would be executed and FALSE if the value would have been
retrived from the cache,
And it doesn't store these values in the cache", {
memo <- (function (value) value) %>% memo()
memo(10, memo.dryrun = TRUE) %>% expect_true()
memo(10, memo.dryrun = TRUE) %>% expect_true()
memo(10, memo.dryrun = FALSE) %>% expect_equal(10)
memo(10, memo.dryrun = TRUE) %>% expect_false()
memo(10, memo.force = TRUE, memo.dryrun = TRUE) %>% expect_true()
memo(10, memo.dryrun = FALSE) %>% expect_equal(10)
memo(10, memo.force = TRUE, memo.dryrun = FALSE) %>% expect_equal(10)
memo(10) %>% expect_equal(10)
}) |
############################################################################################################
#
# File: DLfunctions41.R
# Developer: Tinniam V Ganesh
# Date : 26 Feb 2018
#
##########################################################################################################
# Compute the Relu of a vector
relu <-function(Z){
A <- apply(Z, 1:2, function(x) max(0,x))
cache<-Z
retvals <- list("A"=A,"Z"=Z)
return(retvals)
}
# Compute the softmax of a vector
softmax <- function(Z){
# get unnormalized probabilities
exp_scores = exp(t(Z))
# normalize them for each example
A = exp_scores / rowSums(exp_scores)
retvals <- list("A"=A,"Z"=Z)
return(retvals)
}
# Compute the detivative of Relu
reluDerivative <-function(dA, cache){
Z <- cache
dZ <- dA
# Create a logical matrix of values > 0
a <- Z > 0
# When z <= 0, you should set dz to 0 as well. Perform an element wise multiple
dZ <- dZ * a
return(dZ)
}
# Compute the detivative of softmax
softmaxDerivative <- function(dA, cache ,y,numTraining){
# Note : dA not used. dL/dZ = dL/dA * dA/dZ = pi-yi
Z <- cache
# Compute softmax
exp_scores = exp(t(Z))
# normalize them for each example
probs = exp_scores / rowSums(exp_scores)
# Get the number of 0, 1 and 2 classes and store in a,b,c
a=sum(y==0)
b=sum(y==1)
c=sum(y==2)
# Create a yi matrix based on yi for each class
m= matrix(rep(c(1,0,0),a),nrow=a,ncol=3,byrow=T)
n= matrix(rep(c(0,1,0),b),nrow=b,ncol=3,byrow=T)
o= matrix(rep(c(0,0,1),c),nrow=c,ncol=3,byrow=T)
# Stack them vertically
yi=rbind(m,n,o)
dZ = probs-yi
return(dZ)
}
# Initialize the model
# Input : number of features
# number of hidden units
# number of units in output
# Returns: list of Weight and bias matrices and vectors
initializeModel <- function(numFeats,numHidden,numOutput){
set.seed(2)
a<-rnorm(numHidden*numFeats)*0.01 # Multiply by .01
W1 <- matrix(a,nrow=numHidden,ncol=numFeats)
a<-rnorm(numHidden*1)
b1 <- matrix(a,nrow=numHidden,ncol=1)
a<-rnorm(numOutput*numHidden)*0.01
W2 <- matrix(a,nrow=numOutput,ncol=numHidden)
a<-rnorm(numOutput*1)
b2 <- matrix(a,nrow=numOutput,ncol=1)
parameters <- list("W1"=W1,"b1"=b1,"W2"=W2,"b2"=b2)
return(parameters)
}
# Compute the activation at a layer 'l' for forward prop in a Deep Network
# Input : A_prev - Activation of previous layer
# W,b - Weight and bias matrices and vectors
# activationFunc - Activation function - sigmoid, tanh, relu etc
# Returns : A list of forward_cache, activation_cache, cache
# Z = W * X + b
# A = sigmoid(Z), A= Relu(Z), A= tanh(Z)
layerActivationForward <- function(A_prev, W, b, activationFunc){
# Compute Z
z = W %*% A_prev
Z <-sweep(z,1,b,'+')
forward_cache <- list("A_prev"=A_prev, "W"=W, "b"=b)
# Compute the activation for sigmoid
if(activationFunc == "sigmoid"){
vals = sigmoid(Z)
} else if (activationFunc == "relu"){ # Compute the activation for relu
vals = relu(Z)
} else if(activationFunc == 'tanh'){ # Compute the activation for tanh
vals = tanhActivation(Z)
} else if(activationFunc == 'softmax'){
vals = softmax(Z)
}
cache <- list("forward_cache"=forward_cache, "activation_cache"=vals[['Z']])
retvals <- list("A"=vals[['A']],"cache"=cache)
return(retvals)
}
# Compute the backpropagation for 1 cycle
# Input : Neural Network parameters - dA
# # cache - forward_cache & activation_cache
# # y
# # activationFunc
# Returns: Gradients - a list of dA_prev, dW, db
# dL/dWi= dL/dZi*Al-1
# dl/dbl = dL/dZl
# dL/dZ_prev=dL/dZl*W
layerActivationBackward <- function(dA, cache, y, activationFunc){
# Get A_prev,W,b
forward_cache <-cache[['forward_cache']]
activation_cache <- cache[['activation_cache']]
A_prev <- forward_cache[['A_prev']]
numtraining = dim(A_prev)[2]
# Get Z
if(activationFunc == "relu"){
dZ <- reluDerivative(dA, activation_cache)
} else if(activationFunc == "sigmoid"){
dZ <- sigmoidDerivative(dA, activation_cache)
} else if(activationFunc == "tanh"){
dZ <- tanhDerivative(dA, activation_cache)
} else if(activationFunc == "softmax"){
dZ <- softmaxDerivative(dA, activation_cache,y,numtraining)
}
# Check if softmax
if (activationFunc == 'softmax'){
W <- forward_cache[['W']]
b <- forward_cache[['b']]
dW = 1/numtraining * A_prev%*%dZ
db = 1/numtraining* matrix(colSums(dZ),nrow=1,ncol=3)
dA_prev = dZ %*%W
} else {
W <- forward_cache[['W']]
b <- forward_cache[['b']]
numtraining = dim(A_prev)[2]
dW = 1/numtraining * dZ %*% t(A_prev)
db = 1/numtraining * rowSums(dZ)
dA_prev = t(W) %*% dZ
}
retvals <- list("dA_prev"=dA_prev,"dW"=dW,"db"=db)
return(retvals)
}
# Plot a decision boundary for Softmax output activation
# This function uses ggplot2
plotDecisionBoundary <- function(Z,W1,b1,W2,b2){
xmin<-min(Z[,1])
xmax<-max(Z[,1])
ymin<-min(Z[,2])
ymax<-max(Z[,2])
# Create a grid of points
a=seq(xmin,xmax,length=100)
b=seq(ymin,ymax,length=100)
grid <- expand.grid(x=a, y=b)
colnames(grid) <- c('x1', 'x2')
grid1 <-t(grid)
# Predict the output based on the grid of points
retvals <- layerActivationForward(grid1,W1,b1,'relu')
A1 <- retvals[['A']]
cache1 <- retvals[['cache']]
forward_cache1 <- cache1[['forward_cache1']]
activation_cache <- cache1[['activation_cache']]
retvals = layerActivationForward(A1,W2,b2,'softmax')
A2 <- retvals[['A']]
cache2 <- retvals[['cache']]
forward_cache2 <- cache2[['forward_cache1']]
activation_cache2 <- cache2[['activation_cache']]
# From the softmax probabilities pick the one with the highest probability
q= apply(A2,1,which.max)
q1 <- t(data.frame(q))
q2 <- as.numeric(q1)
grid2 <- cbind(grid,q2)
colnames(grid2) <- c('x1', 'x2','q2')
z1 <- data.frame(Z)
names(z1) <- c("x1","x2","y")
atitle=paste("Decision boundary")
ggplot(z1) +
geom_point(data = z1, aes(x = x1, y = x2, color = y)) +
stat_contour(data = grid2, aes(x = x1, y = x2, z = q2,color=q2), alpha = 0.9)+
ggtitle(atitle) + scale_colour_gradientn(colours = brewer.pal(10, "Spectral"))
}
# Predict the output
computeScores <- function(parameters, X,hiddenActivationFunc='relu'){
fwdProp <- forwardPropagationDeep(X, parameters,hiddenActivationFunc)
scores <- fwdProp$AL
return (scores)
}
| /Chap4-MulticlassDeepLearningNetwork/DLfunctions41.R | no_license | adithirgis/DeepLearningFromFirstPrinciples | R | false | false | 6,775 | r | ############################################################################################################
#
# File: DLfunctions41.R
# Developer: Tinniam V Ganesh
# Date : 26 Feb 2018
#
##########################################################################################################
# Compute the Relu of a vector
relu <-function(Z){
A <- apply(Z, 1:2, function(x) max(0,x))
cache<-Z
retvals <- list("A"=A,"Z"=Z)
return(retvals)
}
# Compute the softmax of a vector
softmax <- function(Z){
# get unnormalized probabilities
exp_scores = exp(t(Z))
# normalize them for each example
A = exp_scores / rowSums(exp_scores)
retvals <- list("A"=A,"Z"=Z)
return(retvals)
}
# Compute the detivative of Relu
reluDerivative <-function(dA, cache){
Z <- cache
dZ <- dA
# Create a logical matrix of values > 0
a <- Z > 0
# When z <= 0, you should set dz to 0 as well. Perform an element wise multiple
dZ <- dZ * a
return(dZ)
}
# Compute the detivative of softmax
softmaxDerivative <- function(dA, cache ,y,numTraining){
# Note : dA not used. dL/dZ = dL/dA * dA/dZ = pi-yi
Z <- cache
# Compute softmax
exp_scores = exp(t(Z))
# normalize them for each example
probs = exp_scores / rowSums(exp_scores)
# Get the number of 0, 1 and 2 classes and store in a,b,c
a=sum(y==0)
b=sum(y==1)
c=sum(y==2)
# Create a yi matrix based on yi for each class
m= matrix(rep(c(1,0,0),a),nrow=a,ncol=3,byrow=T)
n= matrix(rep(c(0,1,0),b),nrow=b,ncol=3,byrow=T)
o= matrix(rep(c(0,0,1),c),nrow=c,ncol=3,byrow=T)
# Stack them vertically
yi=rbind(m,n,o)
dZ = probs-yi
return(dZ)
}
# Initialize the model
# Input : number of features
# number of hidden units
# number of units in output
# Returns: list of Weight and bias matrices and vectors
initializeModel <- function(numFeats,numHidden,numOutput){
set.seed(2)
a<-rnorm(numHidden*numFeats)*0.01 # Multiply by .01
W1 <- matrix(a,nrow=numHidden,ncol=numFeats)
a<-rnorm(numHidden*1)
b1 <- matrix(a,nrow=numHidden,ncol=1)
a<-rnorm(numOutput*numHidden)*0.01
W2 <- matrix(a,nrow=numOutput,ncol=numHidden)
a<-rnorm(numOutput*1)
b2 <- matrix(a,nrow=numOutput,ncol=1)
parameters <- list("W1"=W1,"b1"=b1,"W2"=W2,"b2"=b2)
return(parameters)
}
# Compute the activation at a layer 'l' for forward prop in a Deep Network
# Input : A_prev - Activation of previous layer
# W,b - Weight and bias matrices and vectors
# activationFunc - Activation function - sigmoid, tanh, relu etc
# Returns : A list of forward_cache, activation_cache, cache
# Z = W * X + b
# A = sigmoid(Z), A= Relu(Z), A= tanh(Z)
layerActivationForward <- function(A_prev, W, b, activationFunc){
# Compute Z
z = W %*% A_prev
Z <-sweep(z,1,b,'+')
forward_cache <- list("A_prev"=A_prev, "W"=W, "b"=b)
# Compute the activation for sigmoid
if(activationFunc == "sigmoid"){
vals = sigmoid(Z)
} else if (activationFunc == "relu"){ # Compute the activation for relu
vals = relu(Z)
} else if(activationFunc == 'tanh'){ # Compute the activation for tanh
vals = tanhActivation(Z)
} else if(activationFunc == 'softmax'){
vals = softmax(Z)
}
cache <- list("forward_cache"=forward_cache, "activation_cache"=vals[['Z']])
retvals <- list("A"=vals[['A']],"cache"=cache)
return(retvals)
}
# Compute the backpropagation for 1 cycle
# Input : Neural Network parameters - dA
# # cache - forward_cache & activation_cache
# # y
# # activationFunc
# Returns: Gradients - a list of dA_prev, dW, db
# dL/dWi= dL/dZi*Al-1
# dl/dbl = dL/dZl
# dL/dZ_prev=dL/dZl*W
layerActivationBackward <- function(dA, cache, y, activationFunc){
# Get A_prev,W,b
forward_cache <-cache[['forward_cache']]
activation_cache <- cache[['activation_cache']]
A_prev <- forward_cache[['A_prev']]
numtraining = dim(A_prev)[2]
# Get Z
if(activationFunc == "relu"){
dZ <- reluDerivative(dA, activation_cache)
} else if(activationFunc == "sigmoid"){
dZ <- sigmoidDerivative(dA, activation_cache)
} else if(activationFunc == "tanh"){
dZ <- tanhDerivative(dA, activation_cache)
} else if(activationFunc == "softmax"){
dZ <- softmaxDerivative(dA, activation_cache,y,numtraining)
}
# Check if softmax
if (activationFunc == 'softmax'){
W <- forward_cache[['W']]
b <- forward_cache[['b']]
dW = 1/numtraining * A_prev%*%dZ
db = 1/numtraining* matrix(colSums(dZ),nrow=1,ncol=3)
dA_prev = dZ %*%W
} else {
W <- forward_cache[['W']]
b <- forward_cache[['b']]
numtraining = dim(A_prev)[2]
dW = 1/numtraining * dZ %*% t(A_prev)
db = 1/numtraining * rowSums(dZ)
dA_prev = t(W) %*% dZ
}
retvals <- list("dA_prev"=dA_prev,"dW"=dW,"db"=db)
return(retvals)
}
# Plot a decision boundary for Softmax output activation
# This function uses ggplot2
plotDecisionBoundary <- function(Z,W1,b1,W2,b2){
xmin<-min(Z[,1])
xmax<-max(Z[,1])
ymin<-min(Z[,2])
ymax<-max(Z[,2])
# Create a grid of points
a=seq(xmin,xmax,length=100)
b=seq(ymin,ymax,length=100)
grid <- expand.grid(x=a, y=b)
colnames(grid) <- c('x1', 'x2')
grid1 <-t(grid)
# Predict the output based on the grid of points
retvals <- layerActivationForward(grid1,W1,b1,'relu')
A1 <- retvals[['A']]
cache1 <- retvals[['cache']]
forward_cache1 <- cache1[['forward_cache1']]
activation_cache <- cache1[['activation_cache']]
retvals = layerActivationForward(A1,W2,b2,'softmax')
A2 <- retvals[['A']]
cache2 <- retvals[['cache']]
forward_cache2 <- cache2[['forward_cache1']]
activation_cache2 <- cache2[['activation_cache']]
# From the softmax probabilities pick the one with the highest probability
q= apply(A2,1,which.max)
q1 <- t(data.frame(q))
q2 <- as.numeric(q1)
grid2 <- cbind(grid,q2)
colnames(grid2) <- c('x1', 'x2','q2')
z1 <- data.frame(Z)
names(z1) <- c("x1","x2","y")
atitle=paste("Decision boundary")
ggplot(z1) +
geom_point(data = z1, aes(x = x1, y = x2, color = y)) +
stat_contour(data = grid2, aes(x = x1, y = x2, z = q2,color=q2), alpha = 0.9)+
ggtitle(atitle) + scale_colour_gradientn(colours = brewer.pal(10, "Spectral"))
}
# Predict the output
computeScores <- function(parameters, X,hiddenActivationFunc='relu'){
fwdProp <- forwardPropagationDeep(X, parameters,hiddenActivationFunc)
scores <- fwdProp$AL
return (scores)
}
|
#### Preamble ####
# Purpose: The purpose of this script is to download all of the pages for Queensland politicians.
# Author: Rohan Alexander
# Email: rohan.alexander@utoronto.ca
# Last updated: 12 June 2020
# Prerequisites: Need to have the IDs from 'get_ids.R'
# Misc:
#### Set up workspace ####
library(rvest)
library(tidyverse)
#### Write the function ####
get_bio <- function(page_of_interest){
# For testing:
# page_of_interest <- "inputs/data/queensland/politicians/2719334794.html"
politicians_page <- read_html(address)
save_name <- str_remove(address, "https://www.parliament.qld.gov.au/members/former/bio\\?id=") %>%
paste0("inputs/data/queensland/politicians/", ., ".html")
write_html(politicians_page, file = save_name)
message <- paste0("Done with ", address, " at ", Sys.time())
print(message)
Sys.sleep(sample(x = c(5:15), size = 1))
}
#### Use the function ####
all_former_politicians <- read_csv("outputs/data/queensland/list_of_all_former_politicians.csv")
# For testing:
# all_former_politicians <- all_former_politicians[1:2,]
safely_get_bio <- purrr::safely(get_bio)
all_former_politicians <- purrr::walk(all_former_politicians$page, safely_get_bio)
#### Current ####
get_bio_current <- function(page_of_interest){
# For testing:
# page_of_interest <- all_current_politicians$page[1]
id <- str_remove(page_of_interest, "/members/current/list/MemberDetails\\?ID=")
address <- paste0("https://www.parliament.qld.gov.au/members/current/list/MemberDetails?ID=", id)
politicians_page <- read_html(address)
save_name <- paste0("inputs/data/queensland/politicians/", id, ".html")
write_html(politicians_page, file = save_name)
message <- paste0("Done with ", id, " at ", Sys.time())
print(message)
Sys.sleep(sample(x = c(5:15), size = 1))
}
#### Use the function ####
all_current_politicians <- read_csv("outputs/data/queensland/list_of_all_current_politicians.csv")
# For testing:
# all_current_politicians <- all_current_politicians[1:2,]
safely_get_current_bio <- purrr::safely(get_bio_current)
all_politicians <- purrr::walk(all_current_politicians$page, safely_get_current_bio)
| /scripts/queensland/02-get_politicians_qld.R | no_license | hidayaa/australian_politicians | R | false | false | 2,201 | r | #### Preamble ####
# Purpose: The purpose of this script is to download all of the pages for Queensland politicians.
# Author: Rohan Alexander
# Email: rohan.alexander@utoronto.ca
# Last updated: 12 June 2020
# Prerequisites: Need to have the IDs from 'get_ids.R'
# Misc:
#### Set up workspace ####
library(rvest)
library(tidyverse)
#### Write the function ####
get_bio <- function(page_of_interest){
# For testing:
# page_of_interest <- "inputs/data/queensland/politicians/2719334794.html"
politicians_page <- read_html(address)
save_name <- str_remove(address, "https://www.parliament.qld.gov.au/members/former/bio\\?id=") %>%
paste0("inputs/data/queensland/politicians/", ., ".html")
write_html(politicians_page, file = save_name)
message <- paste0("Done with ", address, " at ", Sys.time())
print(message)
Sys.sleep(sample(x = c(5:15), size = 1))
}
#### Use the function ####
all_former_politicians <- read_csv("outputs/data/queensland/list_of_all_former_politicians.csv")
# For testing:
# all_former_politicians <- all_former_politicians[1:2,]
safely_get_bio <- purrr::safely(get_bio)
all_former_politicians <- purrr::walk(all_former_politicians$page, safely_get_bio)
#### Current ####
get_bio_current <- function(page_of_interest){
# For testing:
# page_of_interest <- all_current_politicians$page[1]
id <- str_remove(page_of_interest, "/members/current/list/MemberDetails\\?ID=")
address <- paste0("https://www.parliament.qld.gov.au/members/current/list/MemberDetails?ID=", id)
politicians_page <- read_html(address)
save_name <- paste0("inputs/data/queensland/politicians/", id, ".html")
write_html(politicians_page, file = save_name)
message <- paste0("Done with ", id, " at ", Sys.time())
print(message)
Sys.sleep(sample(x = c(5:15), size = 1))
}
#### Use the function ####
all_current_politicians <- read_csv("outputs/data/queensland/list_of_all_current_politicians.csv")
# For testing:
# all_current_politicians <- all_current_politicians[1:2,]
safely_get_current_bio <- purrr::safely(get_bio_current)
all_politicians <- purrr::walk(all_current_politicians$page, safely_get_current_bio)
|
setwd("C:/Users/david/datasciencecoursera/Exploratory Data Analysis/Week1/proj1")
filename <- "./household_power_consumption.txt"
subsetfile <- "./hpc-subset.Rd"
plotfilename <- "./plot4.png"
if (file.exists(plotfilename)) {
file.remove(plotfilename)
}
# if the datsubset already exists read it in,
# otherwise read in the original file and subset it
if (file.exists(subsetfile)) {
load(subsetfile)
} else {
consumption <- read.table(filename, header=TRUE, sep=";",na.strings="?",as.is=TRUE,
colClasses = c("character","character",rep("numeric",7)))
# subset so we only have the data for the two dates
consumption <- subset( consumption, consumption$Date == "1/2/2007" | consumption$Date == "2/2/2007")
# create a date / time column
consumption$DateTime <- paste(consumption$Date, consumption$Time)
consumption$DateTime <- strptime( consumption$DateTime, "%d/%m/%Y %H:%M:%S")
# now save the subset for future use
save(consumption, file=subsetfile)
}
# send this one directly to a png file
# as it contains less artifacts than using dev.copy2png
png(filename=plotfilename, width=480, height=480, units="px",
pointsize=12)
par(mfrow = c(2,2), mar=c(6,4,1,1))
# do the first plot - top left
plot(x=consumption$DateTime,
y=consumption$Global_active_power,
type="l", xlab="",
ylab="Global Active Power")
# do the second plot - top right
plot(x=consumption$DateTime,
y=consumption$Voltage,
type="l", xlab="datetime",
ylab="Voltage")
# do the third plot - bottom left
with(consumption, plot(DateTime, Sub_metering_1, main="",
xlab="", ylab="Energy sub metering",
type="n"))
with(consumption, lines(DateTime, Sub_metering_1, col="black"))
with(consumption, lines(DateTime, Sub_metering_2, col="red"))
with(consumption, lines(DateTime, Sub_metering_3, col="blue"))
legend("topright", lty=1, col = c("black","red","blue"),
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
bty="n")
# do the fourth plot - bottom right
plot(x=consumption$DateTime,
y=consumption$Global_reactive_power,
xlab="datetime", ylab="Global_reactive_power",
type="l")
dev.off() | /plot4.R | no_license | cyberhiker1965/ExData_Plotting1 | R | false | false | 2,258 | r |
setwd("C:/Users/david/datasciencecoursera/Exploratory Data Analysis/Week1/proj1")
filename <- "./household_power_consumption.txt"
subsetfile <- "./hpc-subset.Rd"
plotfilename <- "./plot4.png"
if (file.exists(plotfilename)) {
file.remove(plotfilename)
}
# if the datsubset already exists read it in,
# otherwise read in the original file and subset it
if (file.exists(subsetfile)) {
load(subsetfile)
} else {
consumption <- read.table(filename, header=TRUE, sep=";",na.strings="?",as.is=TRUE,
colClasses = c("character","character",rep("numeric",7)))
# subset so we only have the data for the two dates
consumption <- subset( consumption, consumption$Date == "1/2/2007" | consumption$Date == "2/2/2007")
# create a date / time column
consumption$DateTime <- paste(consumption$Date, consumption$Time)
consumption$DateTime <- strptime( consumption$DateTime, "%d/%m/%Y %H:%M:%S")
# now save the subset for future use
save(consumption, file=subsetfile)
}
# send this one directly to a png file
# as it contains less artifacts than using dev.copy2png
png(filename=plotfilename, width=480, height=480, units="px",
pointsize=12)
par(mfrow = c(2,2), mar=c(6,4,1,1))
# do the first plot - top left
plot(x=consumption$DateTime,
y=consumption$Global_active_power,
type="l", xlab="",
ylab="Global Active Power")
# do the second plot - top right
plot(x=consumption$DateTime,
y=consumption$Voltage,
type="l", xlab="datetime",
ylab="Voltage")
# do the third plot - bottom left
with(consumption, plot(DateTime, Sub_metering_1, main="",
xlab="", ylab="Energy sub metering",
type="n"))
with(consumption, lines(DateTime, Sub_metering_1, col="black"))
with(consumption, lines(DateTime, Sub_metering_2, col="red"))
with(consumption, lines(DateTime, Sub_metering_3, col="blue"))
legend("topright", lty=1, col = c("black","red","blue"),
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
bty="n")
# do the fourth plot - bottom right
plot(x=consumption$DateTime,
y=consumption$Global_reactive_power,
xlab="datetime", ylab="Global_reactive_power",
type="l")
dev.off() |
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/thyroid.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.35,family="gaussian",standardize=FALSE)
sink('./thyroid_048.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Correlation/thyroid/thyroid_048.R | no_license | esbgkannan/QSMART | R | false | false | 353 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/thyroid.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.35,family="gaussian",standardize=FALSE)
sink('./thyroid_048.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
# VECTORS
# 1. Create a vector u that has values:
# -10, -9, -8, . . . , 0.
u <- seq(-10,0)
# 2. Create another vector v that has values:
# -0.1, 0.4, 0.9, 1.4, ., and there are 11 terms in v.
v <- seq(-0.1,4.9,by=0.5)
# 3. Calculate the vector of u+v and u*v.
u + v
u * v
# 4. Increase all terms in u by 1, and then take away 20% from all terms in v
u + 1
v * 0.8
# 5. Create a vector w that contains all the numbers from u and then v. Report the length of w.
w <- c(u,v)
length(w)
# 6. Use a command to return the 14th, 15th and 16th value of w. What about the 2nd, the
# 5th, 9th and 21st value of w? What is the 23rd value?
w[14:16]
w[c(2,5,9,21,23)]
# 7. Sort w in the descending order.
sort(w, decreasing=T)
# MATRICES
# 1. Create the following matrix `A`:
# a b c d e
# A 1 3 5 7 9
# B 11 13 15 17 19
# C 21 23 25 27 29
# D 31 33 35 37 39
A <- matrix(seq(1,39,2), 4, 5, byrow=T)
rownames(A) <- c('A','B','C','D')
colnames(A) <- c('a','b','c','d','e')
# 2. Extract a sub-matrix `A_sub` containing columns a and b
A_sub <- A[1:2,1:2]
# 3. Create 3 integer vectors x, y, z each containing 3 elements.
# Combine the 3 vectors into a 3x3 matrix, where each column
# represents a vector. Change the row names to a, b, c.
x <- seq(0,3)
y <- seq(4,6)
z <- seq(7,9)
B <- cbind(x,y,z) # alt for rows: rbind()
rownames(B) <- c('a','b','c')
colnames(B) <- c('x','y','z') | /exercise1.r | no_license | fpersyn/r-python-training | R | false | false | 1,428 | r | # VECTORS
# 1. Create a vector u that has values:
# -10, -9, -8, . . . , 0.
u <- seq(-10,0)
# 2. Create another vector v that has values:
# -0.1, 0.4, 0.9, 1.4, ., and there are 11 terms in v.
v <- seq(-0.1,4.9,by=0.5)
# 3. Calculate the vector of u+v and u*v.
u + v
u * v
# 4. Increase all terms in u by 1, and then take away 20% from all terms in v
u + 1
v * 0.8
# 5. Create a vector w that contains all the numbers from u and then v. Report the length of w.
w <- c(u,v)
length(w)
# 6. Use a command to return the 14th, 15th and 16th value of w. What about the 2nd, the
# 5th, 9th and 21st value of w? What is the 23rd value?
w[14:16]
w[c(2,5,9,21,23)]
# 7. Sort w in the descending order.
sort(w, decreasing=T)
# MATRICES
# 1. Create the following matrix `A`:
# a b c d e
# A 1 3 5 7 9
# B 11 13 15 17 19
# C 21 23 25 27 29
# D 31 33 35 37 39
A <- matrix(seq(1,39,2), 4, 5, byrow=T)
rownames(A) <- c('A','B','C','D')
colnames(A) <- c('a','b','c','d','e')
# 2. Extract a sub-matrix `A_sub` containing columns a and b
A_sub <- A[1:2,1:2]
# 3. Create 3 integer vectors x, y, z each containing 3 elements.
# Combine the 3 vectors into a 3x3 matrix, where each column
# represents a vector. Change the row names to a, b, c.
x <- seq(0,3)
y <- seq(4,6)
z <- seq(7,9)
B <- cbind(x,y,z) # alt for rows: rbind()
rownames(B) <- c('a','b','c')
colnames(B) <- c('x','y','z') |
#' Turn parsed cURL command lines into \code{httr} request functions
#'
#' Takes the output of \code{\link{straighten}()} and turns the parsed cURL command lines
#' into working \code{httr} \code{\link[httr]{VERB}()} functions, optionally \code{cat}'ing the text of each function
#' to the console and/or replacing the system clipboard with the source code for the function.
#'
#' @param x a vector of \code{curlcoverter} objects
#' @param quiet if \code{FALSE}, will cause \code{make_req()} to write complete function
#' source code to the console.
#' @param add_clip if \code{TRUE}, will overwrite the system clipboard with the
#' character string contents of the last newly made `httr::VERB` function (i.e.
#' this is intended to be used in a workflow where only one cURL command line
#' is being processed). Defaults to \code{TRUE} if \code{length(x)} is \code{1}
#' @param use_parts logical. If \code{TRUE}, the request function will be generated
#' from the "URL parts" that are created as a result of the call to
#' \code{\link{straighten}}. This is useful if you want to modify the
#' URL parts before calling \code{make_req}. Default: \code{FALSE}.
#' @return a \code{list} of working R \code{function}s.
#' @seealso \code{\link{straighten}()}, \code{httr} \code{\link[httr]{VERB}()}
#' @references \href{https://developer.chrome.com/devtools/docs/network}{Evaluating Network Performance},
#' \href{https://developer.mozilla.org/en-US/docs/Tools/Network_Monitor}{Network Monitor}
#' @examples
#' \dontrun{
#' library(httr)
#'
#' my_ip <- straighten("curl 'https://httpbin.org/ip'") %>% make_req()
#'
#' # external test which captures live data
#' content(my_ip[[1]](), as="parsed")
#' }
#' @export
make_req <- function(x, use_parts=FALSE, quiet=TRUE, add_clip=(length(x)==1)) {
req <- purrr::map(x,
create_httr_function,
use_parts=use_parts,
quiet=quiet,
add_clip=add_clip)
}
| /R/make_req.r | no_license | ktargows/curlconverter | R | false | false | 2,028 | r | #' Turn parsed cURL command lines into \code{httr} request functions
#'
#' Takes the output of \code{\link{straighten}()} and turns the parsed cURL command lines
#' into working \code{httr} \code{\link[httr]{VERB}()} functions, optionally \code{cat}'ing the text of each function
#' to the console and/or replacing the system clipboard with the source code for the function.
#'
#' @param x a vector of \code{curlcoverter} objects
#' @param quiet if \code{FALSE}, will cause \code{make_req()} to write complete function
#' source code to the console.
#' @param add_clip if \code{TRUE}, will overwrite the system clipboard with the
#' character string contents of the last newly made `httr::VERB` function (i.e.
#' this is intended to be used in a workflow where only one cURL command line
#' is being processed). Defaults to \code{TRUE} if \code{length(x)} is \code{1}
#' @param use_parts logical. If \code{TRUE}, the request function will be generated
#' from the "URL parts" that are created as a result of the call to
#' \code{\link{straighten}}. This is useful if you want to modify the
#' URL parts before calling \code{make_req}. Default: \code{FALSE}.
#' @return a \code{list} of working R \code{function}s.
#' @seealso \code{\link{straighten}()}, \code{httr} \code{\link[httr]{VERB}()}
#' @references \href{https://developer.chrome.com/devtools/docs/network}{Evaluating Network Performance},
#' \href{https://developer.mozilla.org/en-US/docs/Tools/Network_Monitor}{Network Monitor}
#' @examples
#' \dontrun{
#' library(httr)
#'
#' my_ip <- straighten("curl 'https://httpbin.org/ip'") %>% make_req()
#'
#' # external test which captures live data
#' content(my_ip[[1]](), as="parsed")
#' }
#' @export
make_req <- function(x, use_parts=FALSE, quiet=TRUE, add_clip=(length(x)==1)) {
req <- purrr::map(x,
create_httr_function,
use_parts=use_parts,
quiet=quiet,
add_clip=add_clip)
}
|
#MBatch Copyright ? 2011, 2012, 2013, 2014, 2015, 2016, 2017 University of Texas MD Anderson Cancer Center
#
#This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.
BeaMP<- function(subMatrixGeneData, subDataframeBatchData, by='Batch', overall=TRUE, theIssuesFile=NULL)
{
logDebug("starting BeaMP")
foo <- NULL
tryCatch(
foo <- MP(subMatrixGeneData, convertDataFrameToSi(subDataframeBatchData), by, overall)
,error=function(e) {handleIssuesFunction(e, theIssuesFile) })
logDebug("finishing BeaMP")
return(foo)
}
MP<-function(dat, si, by='Batch', overall=FALSE, ...)
{
logDebug("starting MP")
if(missing(si) & overall==FALSE)
{
stop('sample information is needed for batch-wise median polish corrections')
}
if(overall==TRUE)
{
logDebug("MP overall")
MPdat<-medpolish(dat, eps=0.0001, trace.iter=FALSE, na.rm=TRUE, ...)
MPdat<-MPdat$residuals+MPdat$overall
final<-MPdat
}
else
{
logDebug("MP batch")
stopifnotWithLogging("Data sample names should match and be in same order as those for batch data", all(colnames(dat)==rownames(si)))
stopifnotWithLogging("All requested batch types should be in batch data", all(by %in% colnames(si)))
batch<-table(si[,by])
###logDebug("MP batch 1")
MPBdat<-dat
###logDebug("MP batch 2")
MPBcol<-rep(NA, ncol(dat))
###logDebug("MP batch 3")
for(i in names(batch))
{
###logDebug("MP batch 4")
temp<-dat[, si[,by]==i]
###logDebug("MP batch 5")
temp.MP<-medpolish(temp,eps=0.0001, trace.iter=FALSE,na.rm=TRUE, ...)
###logDebug("MP batch 6")
MPBdat[, si[,by]==i]<-temp.MP$residuals
###logDebug("MP batch 7")
}
###logDebug("MP batch 8")
all.MP<-medpolish(dat, eps=0.0001, trace.iter=FALSE,na.rm=TRUE)
###logDebug("MP batch 9")
final<-MPBdat+all.MP$overall
###logDebug("MP batch 10")
}
return(final)
} | /package/R/BEA_CorrectionsMP.R | no_license | MD-Anderson-Bioinformatics/MBatch | R | false | false | 2,373 | r | #MBatch Copyright ? 2011, 2012, 2013, 2014, 2015, 2016, 2017 University of Texas MD Anderson Cancer Center
#
#This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.
BeaMP<- function(subMatrixGeneData, subDataframeBatchData, by='Batch', overall=TRUE, theIssuesFile=NULL)
{
logDebug("starting BeaMP")
foo <- NULL
tryCatch(
foo <- MP(subMatrixGeneData, convertDataFrameToSi(subDataframeBatchData), by, overall)
,error=function(e) {handleIssuesFunction(e, theIssuesFile) })
logDebug("finishing BeaMP")
return(foo)
}
MP<-function(dat, si, by='Batch', overall=FALSE, ...)
{
logDebug("starting MP")
if(missing(si) & overall==FALSE)
{
stop('sample information is needed for batch-wise median polish corrections')
}
if(overall==TRUE)
{
logDebug("MP overall")
MPdat<-medpolish(dat, eps=0.0001, trace.iter=FALSE, na.rm=TRUE, ...)
MPdat<-MPdat$residuals+MPdat$overall
final<-MPdat
}
else
{
logDebug("MP batch")
stopifnotWithLogging("Data sample names should match and be in same order as those for batch data", all(colnames(dat)==rownames(si)))
stopifnotWithLogging("All requested batch types should be in batch data", all(by %in% colnames(si)))
batch<-table(si[,by])
###logDebug("MP batch 1")
MPBdat<-dat
###logDebug("MP batch 2")
MPBcol<-rep(NA, ncol(dat))
###logDebug("MP batch 3")
for(i in names(batch))
{
###logDebug("MP batch 4")
temp<-dat[, si[,by]==i]
###logDebug("MP batch 5")
temp.MP<-medpolish(temp,eps=0.0001, trace.iter=FALSE,na.rm=TRUE, ...)
###logDebug("MP batch 6")
MPBdat[, si[,by]==i]<-temp.MP$residuals
###logDebug("MP batch 7")
}
###logDebug("MP batch 8")
all.MP<-medpolish(dat, eps=0.0001, trace.iter=FALSE,na.rm=TRUE)
###logDebug("MP batch 9")
final<-MPBdat+all.MP$overall
###logDebug("MP batch 10")
}
return(final)
} |
#' liondown: A package for creating PSU undergraduate, Masters, and PhD theses
#' using R Markdown
#'
#'
#' @section thesis_gitbook:
#' Creates an R Markdown thesis template as a webpage
#' @section thesis_pdf:
#' Creates an R Markdown thesis template as a PDF
#' @section thesis_word:
#' Creates an R Markdown thesis template as a Microsoft Word document
#' @section thesis_epub:
#' Creates an R Markdown thesis template as an ebook
#' @docType package
#' @name liondown
"_PACKAGE"
| /R/liondown.R | permissive | arnold-c/liondown | R | false | false | 483 | r | #' liondown: A package for creating PSU undergraduate, Masters, and PhD theses
#' using R Markdown
#'
#'
#' @section thesis_gitbook:
#' Creates an R Markdown thesis template as a webpage
#' @section thesis_pdf:
#' Creates an R Markdown thesis template as a PDF
#' @section thesis_word:
#' Creates an R Markdown thesis template as a Microsoft Word document
#' @section thesis_epub:
#' Creates an R Markdown thesis template as an ebook
#' @docType package
#' @name liondown
"_PACKAGE"
|
library(randomForest)
ratingtest<-read.csv(file="ratings_firstmillion.csv",header = T)
ratingtest=ratingtest[1:100000,]
names(ratingtest)
ratingtest$rating <- as.factor(ratingtest$rating)
sample.ind <- sample(2,
nrow(ratingtest),
replace = T,
prob = c(0.8,0.2))
rating.dev <- ratingtest[sample.ind==1,]
rating.val <- ratingtest[sample.ind==2,]
table(rating.dev$rating)/nrow(rating.dev)
table(rating.val$rating)/nrow(rating.val)
varNames <- names(rating.dev)
# Exclude ID or Response variable
varNames <- varNames[!varNames %in% c("rating")]
# add + sign between exploratory variables
varNames1 <- paste(varNames, collapse = "+")
# Add response variable and convert to a formula object
rf.form <- as.formula(paste("rating", varNames1, sep = " ~ "))
ratings.rf <- randomForest(rf.form,
rating.dev,
ntree=500,
importance=T)
plot(ratings.rf)
varImpPlot(ratings.rf)
rating.dev$predicted.response <- predict(ratings.rf ,rating.dev)
library(e1071)
library(caret)
confusionMatrix(data=rating.dev$predicted.response,
reference=rating.dev$rating,
positive='yes')
rating.val$predicted.response <- predict(ratings.rf ,rating.val)
confusionMatrix(data=rating.val$predicted.response,
reference=rating.val$rating,
positive='yes')
| /RandomForest.R | no_license | sagzv7999/MLProjects | R | false | false | 1,453 | r | library(randomForest)
ratingtest<-read.csv(file="ratings_firstmillion.csv",header = T)
ratingtest=ratingtest[1:100000,]
names(ratingtest)
ratingtest$rating <- as.factor(ratingtest$rating)
sample.ind <- sample(2,
nrow(ratingtest),
replace = T,
prob = c(0.8,0.2))
rating.dev <- ratingtest[sample.ind==1,]
rating.val <- ratingtest[sample.ind==2,]
table(rating.dev$rating)/nrow(rating.dev)
table(rating.val$rating)/nrow(rating.val)
varNames <- names(rating.dev)
# Exclude ID or Response variable
varNames <- varNames[!varNames %in% c("rating")]
# add + sign between exploratory variables
varNames1 <- paste(varNames, collapse = "+")
# Add response variable and convert to a formula object
rf.form <- as.formula(paste("rating", varNames1, sep = " ~ "))
ratings.rf <- randomForest(rf.form,
rating.dev,
ntree=500,
importance=T)
plot(ratings.rf)
varImpPlot(ratings.rf)
rating.dev$predicted.response <- predict(ratings.rf ,rating.dev)
library(e1071)
library(caret)
confusionMatrix(data=rating.dev$predicted.response,
reference=rating.dev$rating,
positive='yes')
rating.val$predicted.response <- predict(ratings.rf ,rating.val)
confusionMatrix(data=rating.val$predicted.response,
reference=rating.val$rating,
positive='yes')
|
#-------------------------------------------------------------------------------
# Copyright (c) 2012 University of Illinois, NCSA.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the
# University of Illinois/NCSA Open Source License
# which accompanies this distribution, and is available at
# http://opensource.ncsa.illinois.edu/license.html
#-------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------#
##' Function to retrieve model output from local or remote server
##'
##' @name get.model.output.SIPNET
##' @title Retrieve model output from local or remote server
##'
##' @import PEcAn.utils
##' @export
get.model.output.SIPNET <- function(settings) {
model="SIPNET"
### Get model output on the localhost
if(settings$host$name == 'localhost'){
get.results(settings)
} else {
## model output is on a remote host
remoteScript = paste(settings$outdir,"PEcAn.functions.R",sep="")
### Make a copy of required functions and place in file PEcAn.functions.R
dump(c(paste("model2netcdf",model,sep="."),"get.run.id","read.ensemble.output","read.sa.output","read.output","get.results"),
file=remoteScript)
### Add execution of get.results to the end of the PEcAn.functions.R file
### This will execute all the code needed to extract output on remote host
cat("get.results()",file=remoteScript, append=TRUE)
### Copy required PEcAn.functions.R to remote host
rsync('-outi',remoteScript,
paste(settings$host$name, ':',settings$host$outdir, sep = '') )
### Run script on remote host
system(paste("ssh -T", settings$host$name, "'",
"cd", settings$host$outdir, "; R --vanilla < PEcAn.functions.R'"))
### Get PEcAn output from remote host
rsync('-outi', from = paste(settings$host$name, ':', settings$host$outdir, 'output.Rdata', sep=''),
to = settings$outdir)
} ### End of if/else
} ### End of function
#==================================================================================================#
####################################################################################################
### EOF. End of R script file.
####################################################################################################
| /models/sipnet/R/get.model.output.SIPNET.R | permissive | Viskari/pecan | R | false | false | 2,568 | r | #-------------------------------------------------------------------------------
# Copyright (c) 2012 University of Illinois, NCSA.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the
# University of Illinois/NCSA Open Source License
# which accompanies this distribution, and is available at
# http://opensource.ncsa.illinois.edu/license.html
#-------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------#
##' Function to retrieve model output from local or remote server
##'
##' @name get.model.output.SIPNET
##' @title Retrieve model output from local or remote server
##'
##' @import PEcAn.utils
##' @export
get.model.output.SIPNET <- function(settings) {
model="SIPNET"
### Get model output on the localhost
if(settings$host$name == 'localhost'){
get.results(settings)
} else {
## model output is on a remote host
remoteScript = paste(settings$outdir,"PEcAn.functions.R",sep="")
### Make a copy of required functions and place in file PEcAn.functions.R
dump(c(paste("model2netcdf",model,sep="."),"get.run.id","read.ensemble.output","read.sa.output","read.output","get.results"),
file=remoteScript)
### Add execution of get.results to the end of the PEcAn.functions.R file
### This will execute all the code needed to extract output on remote host
cat("get.results()",file=remoteScript, append=TRUE)
### Copy required PEcAn.functions.R to remote host
rsync('-outi',remoteScript,
paste(settings$host$name, ':',settings$host$outdir, sep = '') )
### Run script on remote host
system(paste("ssh -T", settings$host$name, "'",
"cd", settings$host$outdir, "; R --vanilla < PEcAn.functions.R'"))
### Get PEcAn output from remote host
rsync('-outi', from = paste(settings$host$name, ':', settings$host$outdir, 'output.Rdata', sep=''),
to = settings$outdir)
} ### End of if/else
} ### End of function
#==================================================================================================#
####################################################################################################
### EOF. End of R script file.
####################################################################################################
|
summary_4 <- combined %>%
drop_na(boro) %>%
group_by(boro) %>%
summarize(mean(avg_sat_score, na.rm = TRUE), mean(frl_percent, na.rm = TRUE), mean(`AP Test Takers`, na.rm = TRUE)) | /Dealing With Missing Data/Understanding Effects of Different Techniques for Handling Missing Data.r | no_license | kcrhino/Data_Quest_R | R | false | false | 193 | r | summary_4 <- combined %>%
drop_na(boro) %>%
group_by(boro) %>%
summarize(mean(avg_sat_score, na.rm = TRUE), mean(frl_percent, na.rm = TRUE), mean(`AP Test Takers`, na.rm = TRUE)) |
#' @include Modstrings.R
#' @include Modstrings-ModStringCodec.R
NULL
# These functions need to be here to access the modified functions of
# - .charToModString
#' @name ModString
#' @aliases ModString,MaskedModString-method ModString,AsIs-method
#' ModString,ModString-method ModString,XString-method
#' ModString,character-method ModString,factor-method
#' as.character,ModString-method as.vector,ModString-method
#' ==,ModString,ModString-method ==,ModString,XString-method
#' ==,XString,ModString-method
#'
#' @title ModString objects
#'
#' @description
#' The virtual \code{ModString} class derives from the \code{XString} virtual
#' class. Like its parent and its children, it is used for storing sequences of
#' characters. However, the \code{XString}/\code{BString} class requires single
#' byte characters as the letters of the input sequences. The \code{ModString}
#' extends the capability for multi-byte chracters by encoding these characters
#' into a single byte characters using a dictionary for internal conversion. It
#' also takes care of different encoding behavior of operating systems.
#'
#' The \code{\link{ModDNAString}} and \code{\link{ModRNAString}} classes derive
#' from the \code{ModString} class and use the functionality to store nucleotide
#' sequences containing modified nucleotides. To describe modified RNA and DNA
#' nucleotides with a single letter, special characters are commonly used, eg.
#' from the greek alphabet, which are multi-byte characters.
#'
#' The \code{ModString} class is virtual and it cannot be directly used to
#' create an object. Please have a look at \code{\link{ModDNAString}} and
#' \code{\link{ModRNAString}} for the specific alphabets of the individual
#' classes.
NULL
#' @name ModDNAString
#'
#' @title ModDNAString class
#'
#' @description
#' A \code{ModDNAString} object allows DNA sequences with modified nucleotides
#' to be stored and manipulated.
#'
#' @details
#' The ModDNAString class contains the virtual \code{\link{ModString}} class,
#' which is itself based on the \code{\link[Biostrings:XString-class]{XString}}
#' class. Therefore, functions for working with \code{XString} classes are
#' inherited.
#'
#' The \code{\link{alphabet}} of the ModDNAString class consist of the
#' non-extended IUPAC codes "A,G,C,T,N", the gap letter "-", the hard masking
#' letter "+", the not available letter "." and letters for individual
#' modifications: \code{alphabet(ModDNAString())}.
#'
#' Since the special characters are encoded differently depending on the OS and
#' encoding settings of the R session, it is not always possible to enter a DNA
#' sequence containing modified nucleotides via the R console. The most
#' convinient solution for this problem is to use the function
#' \code{\link{modifyNucleotides}} and modify and existing DNAString or
#' ModDNAString object.
#'
#' A \code{ModDNAString} object can be converted into a \code{DNAString} object
#' using the \code{DNAstring()} constructor. Modified nucleotides are
#' automaitcally converted intro their base nucleotides.
#'
#' If a modified DNA nucleotide you want to work with is not part of the
#' alphabet, please let us know.
#'
#' @param x the input as a \code{character}.
#' @param start the postion in the character vector to use as start position in
#' the \code{ModDNAString} object (default \code{start = 1}).
#' @param nchar the width of the character vector to use in the
#' \code{ModDNAString} object (default \code{nchar = NA}). The end position is
#' calculated as \code{start + nchar - 1}.
#'
#' @return a \code{ModDNAString} object
#'
#' @examples
#' # Constructing ModDNAString containing an m6A
#' md1 <- ModDNAString("AGCT`")
#' md1
#'
#' # the alphabet of the ModDNAString class
#' alphabet(md1)
#' # due to encoding issues the shortNames can also be used
#' shortName(md1)
#' # due to encoding issues the nomenclature can also be used
#' nomenclature(md1)
#'
#' # convert to DNAString
#' d1 <- DNAString(md1)
#' d1
NULL
#' @name ModRNAString
#'
#' @title ModDNAString class
#'
#' @description
#' A \code{ModRNAString} object allows RNA sequences with modified nucleotides
#' to be stored and manipulated.
#'
#' @details
#' The ModRNAString class contains the virtual \code{\link{ModString}} class,
#' which is itself based on the \code{\link[Biostrings:XString-class]{XString}}
#' class. Therefore, functions for working with \code{XString} classes are
#' inherited.
#'
#' The alphabet of the ModRNAString class consist of the non-extended IUPAC
#' codes "A,G,C,U", the gap letter "-", the hard masking letter "+", the not
#' available letter "." and letters for individual modifications:
#' \code{alphabet(ModRNAString())}.
#'
#' Since the special characters are encoded differently depending on the OS and
#' encoding settings of the R session, it is not always possible to enter a RNA
#' sequence containing modified nucleotides via the R console. The most
#' convinient solution for this problem is to use the function
#' \code{\link{modifyNucleotides}} and modify and existing RNAString or
#' ModRNAString object.
#'
#' A \code{ModRNAString} object can be converted into a \code{RNAString} object
#' using the \code{RNAstring()} constructor. Modified nucleotides are
#' automaitcally converted intro their base nucleotides.
#'
#' If a modified RNA nucleotide you want to work with is not part of the
#' alphabet, please let us know.
#'
#' @param x the input as a \code{character}.
#' @param start the postion in the character vector to use as start position in
#' the \code{ModRNAString} object (default \code{start = 1}).
#' @param nchar the width of the character vector to use in the
#' \code{ModRNAString} object (default \code{nchar = NA}). The end position is
#' calculated as \code{start + nchar - 1}.
#'
#' @return a \code{ModRNAString} object
#'
#' @examples
#' # Constructing ModDNAString containing an m6A and a dihydrouridine
#' mr1 <- ModRNAString("AGCU`D")
#' mr1
#'
#' # the alphabet of the ModRNAString class
#' alphabet(mr1)
#' # due to encoding issues the shortNames can also be used
#' shortName(mr1)
#' # due to encoding issues the nomenclature can also be used
#' nomenclature(mr1)
#'
#' # convert to RNAString
#' r1 <- RNAString(mr1)
#' r1
NULL
# derived from Biostrings/R/XString-class.R ------------------------------------
setClass("ModString", contains = "XString")
#' @rdname ModString
#' @export
setClass("ModDNAString", contains = "ModString")
#' @rdname ModString
#' @export
setClass("ModRNAString", contains = "ModString")
#' @rdname Modstrings-internals
#' @export
setMethod("seqtype", "ModDNAString", function(x) "ModDNA")
#' @rdname Modstrings-internals
#' @export
setMethod("seqtype", "ModRNAString", function(x) "ModRNA")
#' @rdname Modstrings-internals
#' @export
setReplaceMethod(
"seqtype", "ModString",
function(x, value)
{
ans_class <- paste0(value, "String")
if(is(x,ans_class)){
return(x)
}
ans_seq <-
XVector:::extract_character_from_XRaw_by_ranges(x, 1L, length(x),
collapse=FALSE,
lkup=NULL)
ans_seq <-
.convert_one_byte_codes_to_originating_base(ans_seq,
modscodec(seqtype(x)))
do.call(ans_class,list(ans_seq))
}
)
# low level functions construct XString objects and extract character
setMethod(
"extract_character_from_XString_by_positions", "ModString",
function(x, pos, collapse=FALSE)
{
ans <- callNextMethod()
codec <- modscodec(seqtype(x))
.convert_one_byte_codes_to_letters(ans, codec)
}
)
setMethod(
"extract_character_from_XString_by_ranges", "ModString",
function(x, start, width, collapse=FALSE)
{
ans <- callNextMethod()
codec <- modscodec(seqtype(x))
.convert_one_byte_codes_to_letters(ans, codec)
}
)
setMethod(
"make_XString_from_string", "ModString",
function(x0, string, start, width)
{
codec <- modscodec(seqtype(x0))
string <- .convert_letters_to_one_byte_codes(string, codec)
callNextMethod()
}
)
# Constructor ------------------------------------------------------------------
#' @rdname ModDNAString
#' @export
ModDNAString <- function(x = "", start = 1, nchar = NA){
XString("ModDNA", x, start = start, width = nchar)
}
#' @rdname ModRNAString
#' @export
ModRNAString <- function(x = "", start = 1, nchar = NA){
XString("ModRNA", x, start = start, width = nchar)
}
# Coercion ---------------------------------------------------------------------
#' @rdname Modstrings-internals
#' @export
setMethod(
"XString", "ModString",
function(seqtype, x, start = NA, end = NA, width = NA)
{
ans <- subseq(x, start = start, end = end, width = width)
ans_class <- paste0(seqtype, "String")
if(is(ans,ans_class)){
return(ans)
}
# convert over "base" classes to convert T/U
seqtype(ans) <- gsub("Mod","",seqtype(ans))
seqtype(ans) <- gsub("Mod","",seqtype)
seqtype(ans) <- seqtype
ans
}
)
#' @export
setAs("XString", "ModDNAString",
function(from) {
seqtype(from) <- "ModDNA"
from
}
)
#' @export
setAs("XString", "ModRNAString",
function(from) {
seqtype(from) <- "ModRNA"
from
}
)
#' @export
setAs("character", "ModDNAString", function(from) ModDNAString(from))
#' @export
setAs("character", "ModRNAString", function(from) ModRNAString(from))
# Show -------------------------------------------------------------------------
### Placeholder, initialized in .onLoad()
MODDNA_COLORED_LETTERS <- NULL
MODRNA_COLORED_LETTERS <- NULL
### Return a named character vector where all the names are single letters.
### Colors for A, C, G, and T were inspired by
### https://en.wikipedia.org/wiki/Nucleotide#Structure
### Called in .onLoad() to initialize MODDNA_COLORED_LETTERS and
### MODRNA_COLORED_LETTERS
#' @importFrom crayon make_style
#' @importFrom grDevices rgb
make_MODDNA_COLORED_LETTERS <- function()
{
ms <- crayon::make_style
regrbl <- grDevices::rgb
## modified DNA nucleotide letters
ans <- sprintf(ms(regrbl(0.2,0.2,0.2), bg=TRUE)(ms(regrbl(0,1,0))("%s")),
letters(MOD_DNA_STRING_CODEC))
names(ans) <- letters(MOD_DNA_STRING_CODEC)
# base colours
ans["A"] <- ms(regrbl(1, 0.5, 0.5), bg=TRUE)(ms("black")("A"))
ans["C"] <- ms(regrbl(0.5, 1, 0.5), bg=TRUE)(ms("black")("C"))
ans["G"] <- ms(regrbl(0.5, 1, 1), bg=TRUE)(ms("black")("G"))
ans["T"] <- ms(regrbl(1, 0.8, 0.5), bg=TRUE)(ms("black")("T"))
ans["N"] <- ms("grey", bg=TRUE)(ms(regrbl(0.4,0.4,0.4))("N"))
ans["-"] <- "-"
ans["+"] <- "+"
ans["."] <- "."
ans
}
#' @importFrom crayon make_style
#' @importFrom grDevices rgb
make_MODRNA_COLORED_LETTERS <- function()
{
ms <- crayon::make_style
regrbl <- grDevices::rgb
## modified RNA nucleotide letters
ans <- sprintf(ms(regrbl(0.2,0.2,0.2), bg=TRUE)(ms(regrbl(0,1,0))("%s")),
letters(MOD_RNA_STRING_CODEC))
names(ans) <- letters(MOD_RNA_STRING_CODEC)
# base colours
ans["A"] <- ms(regrbl(1, 0.5, 0.5), bg=TRUE)(ms("black")("A"))
ans["C"] <- ms(regrbl(0.5, 1, 0.5), bg=TRUE)(ms("black")("C"))
ans["G"] <- ms(regrbl(0.5, 1, 1), bg=TRUE)(ms("black")("G"))
ans["U"] <- ms(regrbl(1, 0.8, 0.5), bg=TRUE)(ms("black")("U"))
ans["N"] <- ms("grey", bg=TRUE)(ms(regrbl(0.4,0.4,0.4))("N"))
ans["-"] <- "-"
ans["+"] <- "+"
ans["."] <- "."
ans
}
### 'x' must be a character vector.
.add_modx_colors <- function(x, COLORED_LETTERS){
if (!isTRUE(getOption("Biostrings.coloring", default=FALSE)))
return(x)
ans <- vapply(x,
function(xi){
xi <- strsplit(xi,"")[[1L]]
m <- match(xi, names(COLORED_LETTERS))
match_idx <- which(!is.na(m))
xi[match_idx] <- COLORED_LETTERS[m[match_idx]]
paste0(xi, collapse="")
},
character(1),
USE.NAMES=FALSE
)
x_names <- names(x)
if (!is.null(x_names))
names(ans) <- x_names
ans
}
.add_moddna_colors <- function(x)
{
.add_modx_colors(x, MODDNA_COLORED_LETTERS)
}
.add_modrna_colors <- function(x)
{
.add_modx_colors(x, MODRNA_COLORED_LETTERS)
}
add_colors <- function(x) UseMethod("add_colors")
add_colors.default <- identity
add_colors.ModDNA <- .add_moddna_colors
add_colors.ModRNA <- .add_modrna_colors
setMethod("show", "ModString",
function(object)
{
object_len <- object@length
cat(object_len, "-letter ", class(object), " object\n", sep="")
snippet <- .toSeqSnippet(object, getOption("width") - 5L)
cat("seq: ", add_colors(snippet), "\n", sep="")
}
)
# Comparison -------------------------------------------------------------------
.compare_ModString <- function(e1,
e2){
if (!comparable_seqtypes(seqtype(e1), seqtype(e2))) {
class1 <- class(e1)
class2 <- class(e2)
stop("comparison between a \"", class1, "\" instance ",
"and a \"", class2, "\" instance ",
"is not supported")
}
if(!is(e1,"ModString")){
e1 <- BString(e1)
}
if(!is(e2,"ModString")){
e2 <- BString(e2)
}
.XString.equal(e1, e2)
}
#' @export
setMethod("==", signature(e1 = "ModString", e2 = "ModString"),
function(e1, e2) .compare_ModString(e1, e2)
)
#' @export
setMethod("==", signature(e1 = "ModString", e2 = "XString"),
function(e1, e2) .compare_ModString(e1, e2)
)
#' @export
setMethod("==", signature(e1 = "XString", e2 = "ModString"),
function(e1, e2) .compare_ModString(e1, e2)
)
# these accessors are not provided by the XVector package
setGeneric(name = "sharedXVector",
signature = "x",
def = function(x) standardGeneric("sharedXVector"))
setGeneric(name = "offsetXVector",
signature = "x",
def = function(x) standardGeneric("offsetXVector"))
setMethod("sharedXVector","ModString",
function(x) x@shared)
setMethod("offsetXVector","ModString",
function(x) x@offset)
| /R/Modstrings-ModString.R | no_license | FelixErnst/Modstrings | R | false | false | 14,174 | r | #' @include Modstrings.R
#' @include Modstrings-ModStringCodec.R
NULL
# These functions need to be here to access the modified functions of
# - .charToModString
#' @name ModString
#' @aliases ModString,MaskedModString-method ModString,AsIs-method
#' ModString,ModString-method ModString,XString-method
#' ModString,character-method ModString,factor-method
#' as.character,ModString-method as.vector,ModString-method
#' ==,ModString,ModString-method ==,ModString,XString-method
#' ==,XString,ModString-method
#'
#' @title ModString objects
#'
#' @description
#' The virtual \code{ModString} class derives from the \code{XString} virtual
#' class. Like its parent and its children, it is used for storing sequences of
#' characters. However, the \code{XString}/\code{BString} class requires single
#' byte characters as the letters of the input sequences. The \code{ModString}
#' extends the capability for multi-byte chracters by encoding these characters
#' into a single byte characters using a dictionary for internal conversion. It
#' also takes care of different encoding behavior of operating systems.
#'
#' The \code{\link{ModDNAString}} and \code{\link{ModRNAString}} classes derive
#' from the \code{ModString} class and use the functionality to store nucleotide
#' sequences containing modified nucleotides. To describe modified RNA and DNA
#' nucleotides with a single letter, special characters are commonly used, eg.
#' from the greek alphabet, which are multi-byte characters.
#'
#' The \code{ModString} class is virtual and it cannot be directly used to
#' create an object. Please have a look at \code{\link{ModDNAString}} and
#' \code{\link{ModRNAString}} for the specific alphabets of the individual
#' classes.
NULL
#' @name ModDNAString
#'
#' @title ModDNAString class
#'
#' @description
#' A \code{ModDNAString} object allows DNA sequences with modified nucleotides
#' to be stored and manipulated.
#'
#' @details
#' The ModDNAString class contains the virtual \code{\link{ModString}} class,
#' which is itself based on the \code{\link[Biostrings:XString-class]{XString}}
#' class. Therefore, functions for working with \code{XString} classes are
#' inherited.
#'
#' The \code{\link{alphabet}} of the ModDNAString class consist of the
#' non-extended IUPAC codes "A,G,C,T,N", the gap letter "-", the hard masking
#' letter "+", the not available letter "." and letters for individual
#' modifications: \code{alphabet(ModDNAString())}.
#'
#' Since the special characters are encoded differently depending on the OS and
#' encoding settings of the R session, it is not always possible to enter a DNA
#' sequence containing modified nucleotides via the R console. The most
#' convinient solution for this problem is to use the function
#' \code{\link{modifyNucleotides}} and modify and existing DNAString or
#' ModDNAString object.
#'
#' A \code{ModDNAString} object can be converted into a \code{DNAString} object
#' using the \code{DNAstring()} constructor. Modified nucleotides are
#' automaitcally converted intro their base nucleotides.
#'
#' If a modified DNA nucleotide you want to work with is not part of the
#' alphabet, please let us know.
#'
#' @param x the input as a \code{character}.
#' @param start the postion in the character vector to use as start position in
#' the \code{ModDNAString} object (default \code{start = 1}).
#' @param nchar the width of the character vector to use in the
#' \code{ModDNAString} object (default \code{nchar = NA}). The end position is
#' calculated as \code{start + nchar - 1}.
#'
#' @return a \code{ModDNAString} object
#'
#' @examples
#' # Constructing ModDNAString containing an m6A
#' md1 <- ModDNAString("AGCT`")
#' md1
#'
#' # the alphabet of the ModDNAString class
#' alphabet(md1)
#' # due to encoding issues the shortNames can also be used
#' shortName(md1)
#' # due to encoding issues the nomenclature can also be used
#' nomenclature(md1)
#'
#' # convert to DNAString
#' d1 <- DNAString(md1)
#' d1
NULL
#' @name ModRNAString
#'
#' @title ModDNAString class
#'
#' @description
#' A \code{ModRNAString} object allows RNA sequences with modified nucleotides
#' to be stored and manipulated.
#'
#' @details
#' The ModRNAString class contains the virtual \code{\link{ModString}} class,
#' which is itself based on the \code{\link[Biostrings:XString-class]{XString}}
#' class. Therefore, functions for working with \code{XString} classes are
#' inherited.
#'
#' The alphabet of the ModRNAString class consist of the non-extended IUPAC
#' codes "A,G,C,U", the gap letter "-", the hard masking letter "+", the not
#' available letter "." and letters for individual modifications:
#' \code{alphabet(ModRNAString())}.
#'
#' Since the special characters are encoded differently depending on the OS and
#' encoding settings of the R session, it is not always possible to enter a RNA
#' sequence containing modified nucleotides via the R console. The most
#' convinient solution for this problem is to use the function
#' \code{\link{modifyNucleotides}} and modify and existing RNAString or
#' ModRNAString object.
#'
#' A \code{ModRNAString} object can be converted into a \code{RNAString} object
#' using the \code{RNAstring()} constructor. Modified nucleotides are
#' automaitcally converted intro their base nucleotides.
#'
#' If a modified RNA nucleotide you want to work with is not part of the
#' alphabet, please let us know.
#'
#' @param x the input as a \code{character}.
#' @param start the postion in the character vector to use as start position in
#' the \code{ModRNAString} object (default \code{start = 1}).
#' @param nchar the width of the character vector to use in the
#' \code{ModRNAString} object (default \code{nchar = NA}). The end position is
#' calculated as \code{start + nchar - 1}.
#'
#' @return a \code{ModRNAString} object
#'
#' @examples
#' # Constructing ModDNAString containing an m6A and a dihydrouridine
#' mr1 <- ModRNAString("AGCU`D")
#' mr1
#'
#' # the alphabet of the ModRNAString class
#' alphabet(mr1)
#' # due to encoding issues the shortNames can also be used
#' shortName(mr1)
#' # due to encoding issues the nomenclature can also be used
#' nomenclature(mr1)
#'
#' # convert to RNAString
#' r1 <- RNAString(mr1)
#' r1
NULL
# derived from Biostrings/R/XString-class.R ------------------------------------
setClass("ModString", contains = "XString")
#' @rdname ModString
#' @export
setClass("ModDNAString", contains = "ModString")
#' @rdname ModString
#' @export
setClass("ModRNAString", contains = "ModString")
#' @rdname Modstrings-internals
#' @export
setMethod("seqtype", "ModDNAString", function(x) "ModDNA")
#' @rdname Modstrings-internals
#' @export
setMethod("seqtype", "ModRNAString", function(x) "ModRNA")
#' @rdname Modstrings-internals
#' @export
setReplaceMethod(
"seqtype", "ModString",
function(x, value)
{
ans_class <- paste0(value, "String")
if(is(x,ans_class)){
return(x)
}
ans_seq <-
XVector:::extract_character_from_XRaw_by_ranges(x, 1L, length(x),
collapse=FALSE,
lkup=NULL)
ans_seq <-
.convert_one_byte_codes_to_originating_base(ans_seq,
modscodec(seqtype(x)))
do.call(ans_class,list(ans_seq))
}
)
# low level functions construct XString objects and extract character
setMethod(
"extract_character_from_XString_by_positions", "ModString",
function(x, pos, collapse=FALSE)
{
ans <- callNextMethod()
codec <- modscodec(seqtype(x))
.convert_one_byte_codes_to_letters(ans, codec)
}
)
setMethod(
"extract_character_from_XString_by_ranges", "ModString",
function(x, start, width, collapse=FALSE)
{
ans <- callNextMethod()
codec <- modscodec(seqtype(x))
.convert_one_byte_codes_to_letters(ans, codec)
}
)
setMethod(
"make_XString_from_string", "ModString",
function(x0, string, start, width)
{
codec <- modscodec(seqtype(x0))
string <- .convert_letters_to_one_byte_codes(string, codec)
callNextMethod()
}
)
# Constructor ------------------------------------------------------------------
#' @rdname ModDNAString
#' @export
ModDNAString <- function(x = "", start = 1, nchar = NA){
XString("ModDNA", x, start = start, width = nchar)
}
#' @rdname ModRNAString
#' @export
ModRNAString <- function(x = "", start = 1, nchar = NA){
XString("ModRNA", x, start = start, width = nchar)
}
# Coercion ---------------------------------------------------------------------
#' @rdname Modstrings-internals
#' @export
setMethod(
"XString", "ModString",
function(seqtype, x, start = NA, end = NA, width = NA)
{
ans <- subseq(x, start = start, end = end, width = width)
ans_class <- paste0(seqtype, "String")
if(is(ans,ans_class)){
return(ans)
}
# convert over "base" classes to convert T/U
seqtype(ans) <- gsub("Mod","",seqtype(ans))
seqtype(ans) <- gsub("Mod","",seqtype)
seqtype(ans) <- seqtype
ans
}
)
#' @export
setAs("XString", "ModDNAString",
function(from) {
seqtype(from) <- "ModDNA"
from
}
)
#' @export
setAs("XString", "ModRNAString",
function(from) {
seqtype(from) <- "ModRNA"
from
}
)
#' @export
setAs("character", "ModDNAString", function(from) ModDNAString(from))
#' @export
setAs("character", "ModRNAString", function(from) ModRNAString(from))
# Show -------------------------------------------------------------------------
### Placeholder, initialized in .onLoad()
MODDNA_COLORED_LETTERS <- NULL
MODRNA_COLORED_LETTERS <- NULL
### Return a named character vector where all the names are single letters.
### Colors for A, C, G, and T were inspired by
### https://en.wikipedia.org/wiki/Nucleotide#Structure
### Called in .onLoad() to initialize MODDNA_COLORED_LETTERS and
### MODRNA_COLORED_LETTERS
#' @importFrom crayon make_style
#' @importFrom grDevices rgb
make_MODDNA_COLORED_LETTERS <- function()
{
ms <- crayon::make_style
regrbl <- grDevices::rgb
## modified DNA nucleotide letters
ans <- sprintf(ms(regrbl(0.2,0.2,0.2), bg=TRUE)(ms(regrbl(0,1,0))("%s")),
letters(MOD_DNA_STRING_CODEC))
names(ans) <- letters(MOD_DNA_STRING_CODEC)
# base colours
ans["A"] <- ms(regrbl(1, 0.5, 0.5), bg=TRUE)(ms("black")("A"))
ans["C"] <- ms(regrbl(0.5, 1, 0.5), bg=TRUE)(ms("black")("C"))
ans["G"] <- ms(regrbl(0.5, 1, 1), bg=TRUE)(ms("black")("G"))
ans["T"] <- ms(regrbl(1, 0.8, 0.5), bg=TRUE)(ms("black")("T"))
ans["N"] <- ms("grey", bg=TRUE)(ms(regrbl(0.4,0.4,0.4))("N"))
ans["-"] <- "-"
ans["+"] <- "+"
ans["."] <- "."
ans
}
#' @importFrom crayon make_style
#' @importFrom grDevices rgb
make_MODRNA_COLORED_LETTERS <- function()
{
ms <- crayon::make_style
regrbl <- grDevices::rgb
## modified RNA nucleotide letters
ans <- sprintf(ms(regrbl(0.2,0.2,0.2), bg=TRUE)(ms(regrbl(0,1,0))("%s")),
letters(MOD_RNA_STRING_CODEC))
names(ans) <- letters(MOD_RNA_STRING_CODEC)
# base colours
ans["A"] <- ms(regrbl(1, 0.5, 0.5), bg=TRUE)(ms("black")("A"))
ans["C"] <- ms(regrbl(0.5, 1, 0.5), bg=TRUE)(ms("black")("C"))
ans["G"] <- ms(regrbl(0.5, 1, 1), bg=TRUE)(ms("black")("G"))
ans["U"] <- ms(regrbl(1, 0.8, 0.5), bg=TRUE)(ms("black")("U"))
ans["N"] <- ms("grey", bg=TRUE)(ms(regrbl(0.4,0.4,0.4))("N"))
ans["-"] <- "-"
ans["+"] <- "+"
ans["."] <- "."
ans
}
### 'x' must be a character vector.
.add_modx_colors <- function(x, COLORED_LETTERS){
if (!isTRUE(getOption("Biostrings.coloring", default=FALSE)))
return(x)
ans <- vapply(x,
function(xi){
xi <- strsplit(xi,"")[[1L]]
m <- match(xi, names(COLORED_LETTERS))
match_idx <- which(!is.na(m))
xi[match_idx] <- COLORED_LETTERS[m[match_idx]]
paste0(xi, collapse="")
},
character(1),
USE.NAMES=FALSE
)
x_names <- names(x)
if (!is.null(x_names))
names(ans) <- x_names
ans
}
.add_moddna_colors <- function(x)
{
.add_modx_colors(x, MODDNA_COLORED_LETTERS)
}
.add_modrna_colors <- function(x)
{
.add_modx_colors(x, MODRNA_COLORED_LETTERS)
}
add_colors <- function(x) UseMethod("add_colors")
add_colors.default <- identity
add_colors.ModDNA <- .add_moddna_colors
add_colors.ModRNA <- .add_modrna_colors
setMethod("show", "ModString",
function(object)
{
object_len <- object@length
cat(object_len, "-letter ", class(object), " object\n", sep="")
snippet <- .toSeqSnippet(object, getOption("width") - 5L)
cat("seq: ", add_colors(snippet), "\n", sep="")
}
)
# Comparison -------------------------------------------------------------------
.compare_ModString <- function(e1,
e2){
if (!comparable_seqtypes(seqtype(e1), seqtype(e2))) {
class1 <- class(e1)
class2 <- class(e2)
stop("comparison between a \"", class1, "\" instance ",
"and a \"", class2, "\" instance ",
"is not supported")
}
if(!is(e1,"ModString")){
e1 <- BString(e1)
}
if(!is(e2,"ModString")){
e2 <- BString(e2)
}
.XString.equal(e1, e2)
}
#' @export
setMethod("==", signature(e1 = "ModString", e2 = "ModString"),
function(e1, e2) .compare_ModString(e1, e2)
)
#' @export
setMethod("==", signature(e1 = "ModString", e2 = "XString"),
function(e1, e2) .compare_ModString(e1, e2)
)
#' @export
setMethod("==", signature(e1 = "XString", e2 = "ModString"),
function(e1, e2) .compare_ModString(e1, e2)
)
# these accessors are not provided by the XVector package
setGeneric(name = "sharedXVector",
signature = "x",
def = function(x) standardGeneric("sharedXVector"))
setGeneric(name = "offsetXVector",
signature = "x",
def = function(x) standardGeneric("offsetXVector"))
setMethod("sharedXVector","ModString",
function(x) x@shared)
setMethod("offsetXVector","ModString",
function(x) x@offset)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BSDA-package.R
\docType{data}
\name{Lethal}
\alias{Lethal}
\title{Survival time of mice injected with an experimental lethal drug}
\format{A data frame with 30 observations on the following variable.
\describe{
\item{survival}{a numeric vector}
}}
\source{
Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
Duxbury
}
\description{
Data for Example 6.12
}
\examples{
str(Lethal)
attach(Lethal)
SIGN.test(survival,md=45,alternative="less")
detach(Lethal)
}
\keyword{datasets}
| /man/Lethal.Rd | no_license | lelou6666/BSDA | R | false | true | 573 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BSDA-package.R
\docType{data}
\name{Lethal}
\alias{Lethal}
\title{Survival time of mice injected with an experimental lethal drug}
\format{A data frame with 30 observations on the following variable.
\describe{
\item{survival}{a numeric vector}
}}
\source{
Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
Duxbury
}
\description{
Data for Example 6.12
}
\examples{
str(Lethal)
attach(Lethal)
SIGN.test(survival,md=45,alternative="less")
detach(Lethal)
}
\keyword{datasets}
|
#1
getNetwrok(badData)
#2
getNetwrok(goodData)
# | /demo.R | no_license | mfey619/HPIHack | R | false | false | 52 | r | #1
getNetwrok(badData)
#2
getNetwrok(goodData)
# |
context("test-vmeasure.R")
# Examples from Figure 2 of
# Rosenberg, Andrew, and Julia Hirschberg. "V-measure:
# A conditional entropy-based external cluster evaluation measure." Proceedings
# of the 2007 joint conference on empirical methods in natural language
# processing and computational natural language learning (EMNLP-CoNLL). 2007.
solution_a = list(x = c(1, 1, 1, 2, 3, 3, 3, 3, 1, 2, 2, 2, 2, 1, 3),
y = c(rep(1, 5), rep(2, 5), rep(3, 5)))
solution_b = list(x = c(1, 1, 1, 2, 2, 3, 3, 3, 1, 1, 2, 2, 2, 3, 3),
y = c(rep(1, 5), rep(2, 5), rep(3, 5)))
solution_c = list(x = c(1, 1, 1, 2, 2, 3, 3, 3, 1, 1, 2, 2, 2, 3, 3, 1, 2, 3, 1, 2, 3),
y = c(rep(1, 5), rep(2, 5), rep(3, 5), rep(4, 2), rep(5, 2), rep(6, 2)))
solution_d = list(x = c(1, 1, 1, 2, 2, 3, 3, 3, 1, 1, 2, 2, 2, 3, 3, 1, 2, 3, 1, 2, 3),
y = c(rep(1, 5), rep(2, 5), rep(3, 5), 4, 5, 6, 7, 8, 9))
test_that("multiplication works", {
expect_equal(vmeasure(solution_a$x, solution_a$y)$v_measure, 0.14, tolerance = 0.01)
expect_equal(vmeasure(solution_b$x, solution_b$y)$v_measure, 0.39, tolerance = 0.01)
expect_equal(vmeasure(solution_c$x, solution_c$y)$v_measure, 0.30, tolerance = 0.01)
expect_equal(vmeasure(solution_d$x, solution_d$y)$v_measure, 0.41, tolerance = 0.01)
})
| /tests/testthat/test-vmeasure.R | permissive | Nowosad/sabre | R | false | false | 1,332 | r | context("test-vmeasure.R")
# Examples from Figure 2 of
# Rosenberg, Andrew, and Julia Hirschberg. "V-measure:
# A conditional entropy-based external cluster evaluation measure." Proceedings
# of the 2007 joint conference on empirical methods in natural language
# processing and computational natural language learning (EMNLP-CoNLL). 2007.
solution_a = list(x = c(1, 1, 1, 2, 3, 3, 3, 3, 1, 2, 2, 2, 2, 1, 3),
y = c(rep(1, 5), rep(2, 5), rep(3, 5)))
solution_b = list(x = c(1, 1, 1, 2, 2, 3, 3, 3, 1, 1, 2, 2, 2, 3, 3),
y = c(rep(1, 5), rep(2, 5), rep(3, 5)))
solution_c = list(x = c(1, 1, 1, 2, 2, 3, 3, 3, 1, 1, 2, 2, 2, 3, 3, 1, 2, 3, 1, 2, 3),
y = c(rep(1, 5), rep(2, 5), rep(3, 5), rep(4, 2), rep(5, 2), rep(6, 2)))
solution_d = list(x = c(1, 1, 1, 2, 2, 3, 3, 3, 1, 1, 2, 2, 2, 3, 3, 1, 2, 3, 1, 2, 3),
y = c(rep(1, 5), rep(2, 5), rep(3, 5), 4, 5, 6, 7, 8, 9))
test_that("multiplication works", {
expect_equal(vmeasure(solution_a$x, solution_a$y)$v_measure, 0.14, tolerance = 0.01)
expect_equal(vmeasure(solution_b$x, solution_b$y)$v_measure, 0.39, tolerance = 0.01)
expect_equal(vmeasure(solution_c$x, solution_c$y)$v_measure, 0.30, tolerance = 0.01)
expect_equal(vmeasure(solution_d$x, solution_d$y)$v_measure, 0.41, tolerance = 0.01)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/propNeeded.R
\name{propNeeded}
\alias{propNeeded}
\alias{propNeeded,ANY-method}
\title{Computes proportion of remaining delegates necessary to secure the nomination}
\usage{
propNeeded(candidate, remaining.delegates)
}
\arguments{
\item{candidate}{An object of the class 'Candidate'}
\item{remaining.delegates}{The number of delegates yet to be allocated in the candidate's party.}
}
\value{
The proportion of the delegates a given candidate still needs to secure his/her party nomination
}
\description{
A function that calculates the proportion of remaining delegates needed by a presidential candidate to secure the nomination
}
\examples{
candidate.Hillary <- createCandidate("Hillary Clinton", "Democratic", 1614)
propNeeded(candidate.Hillary, 2295)
}
\author{
Jacob Hample \email{jacob.hample@wustl.edu}
}
\seealso{
\code{\link{createCandidate}}, \code{\link{Candidate}}
}
| /MyPackage/man/propNeeded.Rd | no_license | jacobhample/PS6 | R | false | true | 961 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/propNeeded.R
\name{propNeeded}
\alias{propNeeded}
\alias{propNeeded,ANY-method}
\title{Computes proportion of remaining delegates necessary to secure the nomination}
\usage{
propNeeded(candidate, remaining.delegates)
}
\arguments{
\item{candidate}{An object of the class 'Candidate'}
\item{remaining.delegates}{The number of delegates yet to be allocated in the candidate's party.}
}
\value{
The proportion of the delegates a given candidate still needs to secure his/her party nomination
}
\description{
A function that calculates the proportion of remaining delegates needed by a presidential candidate to secure the nomination
}
\examples{
candidate.Hillary <- createCandidate("Hillary Clinton", "Democratic", 1614)
propNeeded(candidate.Hillary, 2295)
}
\author{
Jacob Hample \email{jacob.hample@wustl.edu}
}
\seealso{
\code{\link{createCandidate}}, \code{\link{Candidate}}
}
|
###############################################################
# 05-30-2017
# clean code for simulations
# 8 base learners:
# random forest, generalized linear regression, quadratic splines regression, CART,
# 10 nearest neighbors, generalized boosting, support vector machine, Bagging Classification
# source in functions.R
# CRS: crs.fit function in functions.R for parameter changes:
# (1)initiation using NNLS
# (2)region [0, 5]^K
# (3)normalize to sum up to 1 before output and making predictions
# simply change the load data step for other runs
# data format: Y -- outcome; W -- covariates in dataframe
# other parameters for tuning:
# number of folds K, SL library, number of minimization alg
################################################################
### Data Processing
data = read.table("wdbc.data",sep = ",")
colnames(data) = c("ID","diag",
"MRadius","MTexture","MPerimeter","MArea","MSmooth",
"MCompact","MConcavity","MConcaveP","MSymmetry","MFracDim",
"SERadius","SETexture","SEPerimeter","SEArea","SESmooth",
"SECompact","SEConcavity","SEConcaveP","SESymmetry","SEFracDim",
"WRadius","WTexture","WPerimeter","WArea","WSmooth",
"WCompact","WConcavity","WConcaveP","WSymmetry","WFracDim")
W = matrix(unlist(data[,3:32]),ncol=dim(data[,3:32])[2])
colnames(W) = colnames(data)[3:32]
W = apply(W,2,function(x) (x-mean(x))/sd(x))
W = as.data.frame(W)
Y = rep(0,dim(data)[1])
Y[data[,2]=="B"] = 1
Z = Y
#table(Y)
################################################################
### Analysis
# load packages
library("SuperLearner")
library("randomForest")
library("gam")
library("rpart")
library("dplyr")
library("plyr")
library("ggplot2")
library("nloptr")
library("lpSolve")
library("nnls")
source("functions.r")# load functions
SL.library = c("SL.randomForest","SL.glm","SL.gam", "SL.rpart","SL.knn","SL.gbm","SL.svm","SL.ipredbagg")
# cross validation
# balance the ratio of Y=0 and Y=1's in each fold
N = length(Y)
K = 10 # number of folds for calculating cross validated risk
N0 = table(Y)[1] # number of {Y=0}
l0 = floor(N0/K) # number of {Y=0} obs in each fold
# evenly distribute the leftovers, fold label vector:
t0 = rep(1:K,rep(l0,K)+c(rep(1,N0%%l0),rep(0,K-N0%%l0)))
N1 = table(Y)[2] # number of {Y=1}
l1 = floor(N1/K) # number of {Y=0} obs in each fold
t1 = rep(1:K,rep(l1,K)+c(rep(1,N1%%l1),rep(0,K-N1%%l1)))
data = cbind(Y,W)
set.seed(100) #for reproducibility, set seed = 100
# permute the fold label vector
ind0 = sample(1:N0,replace=FALSE)
ind1 = sample(1:N1,replace=FALSE)
t0 = t0[ind0]
t1 = t1[ind1]
# permute the Y=0 and Y=1's separately to make balance in each fold
# cv.fold is the validation fold index for the whole dataset
cv.fold = rep(0,length(Y))
cv.fold[Y==0] = t0
cv.fold[Y==1] = t1
for(i in 1:K){
print(i)
train.ind <- (cv.fold!=i)
val.ind <- (cv.fold==i)
fit.data.SLL <- SuperLearner(Y=Y[train.ind], X=W[train.ind,],newX=W, SL.library = SL.library, family = binomial(), method = "method.NNLS",verbose = FALSE)
sl.pred <- fit.data.SLL$SL.predict #prediction from super learner
lib.pred <- fit.data.SLL$library.predict #prediction from library algorithms
pred <- cbind(sl.pred,lib.pred) #all predictions
colnames(pred) <- c("SuperLearner",SL.library)
train.S <- pred[train.ind,] # trained predictions
val.S <- pred[val.ind,] # validation predictions
train.CVS <- fit.data.SLL$Z # cross-validated library predictions
trainCVS_SL = fit.data.SLL$Z %*% fit.data.SLL$coef # SL CV predictions from within SL
train.CVS.wSL = cbind(trainCVS_SL, train.CVS) # add the first col to be the SL CV predictions
train.Z <- Y[train.ind] # trained outcome
val.Z <- Y[val.ind] # validation outcome
assign(paste("train.CVS.wSL", i, sep=""),train.CVS.wSL)
assign(paste("train.CVS", i, sep=""),train.CVS)
assign(paste("train.S", i, sep=""),train.S)
assign(paste("val.S", i, sep=""),val.S)
assign(paste("train.Z", i, sep=""),train.Z)
assign(paste("val.Z", i, sep=""),val.Z)
}
# Solving for (alpha, c) using alg number of methods
alg=3 # CRS, Two steps, conditional thresholding
#as.integer(runif(1)*2e9)
set.seed(1920413227)#WDBC
lambdas = unique(seq(0.1,0.9,0.01))
#lambdas = unique(c(seq(0,0.7,0.025),seq(0.7,1,0.01)))
# alg algorithms, length(lambdas) lambdas, 10 folds
cutoff = array(NA, dim = c(alg,length(lambdas),K))
# true status vector that follows the order of the stacked cross validated predictions
CVZ = val.Z1
for(k in 2:K){
CVZ = c(CVZ,get(paste("val.Z", k, sep="")))
}
FPR = matrix(NA,ncol = alg, nrow = length(lambdas))
FNR = matrix(NA,ncol = alg, nrow = length(lambdas))
TPR = matrix(NA,ncol = alg, nrow = length(lambdas))
risk = matrix(NA,ncol = alg, nrow = length(lambdas))
deci = vector("list", length(lambdas))
for(k in 1:K){
train.CVS = get(paste("train.CVS", k, sep=""))
train.S = get(paste("train.S", k, sep=""))
val.S = get(paste("val.S", k, sep=""))
train.Z = get(paste("train.Z", k, sep=""))
val.Z = get(paste("val.Z", k, sep=""))
train.CVS.wSL = get(paste("train.CVS.wSL", k, sep=""))
XX = train.S[,-1] # training fold library predictions
for(i in 1:length(lambdas)){
print(c(i,k))
lambda = lambdas[i]
# SL CRS
crs.seed = as.integer(runif(1)*2e9)
cbs = crs.fit(seed = crs.seed, lambda,train.CVS,train.Z, val.S[,-1], val.Z)
cutoff[3, i, k] = cbs$c #CBS cutoff for lambda i fold k
# SL Common
opt = Opt.nonpar.rule(train.Z,train.S[,1],phi=0,lambda)
cutoff[1, i, k] = as.numeric(opt)[1]
# SL Proposal Iterative
opt = Opt.nonpar.rule(train.Z,train.CVS.wSL[,1],phi=0,lambda)
cutoff[2, i, k] = as.numeric(opt)[1]
cut = matrix(rep(cutoff[, i, k],nrow(val.S)),nrow=nrow(val.S),byrow=T)
val = cbind(val.S[,1], val.S[,1], cbs$score)
dki = (val > cut)*1
# one decision matrix for each lambda value, matrix size n x alg
if(k == 1){
deci[[i]] = dki
}
if(k>1){
deci[[i]] = rbind(deci[[i]],dki) # each row is validation decision arranged by folds, col is algorithm
}
}#i
}#k
for(i in 1:length(lambdas)){
lambda = lambdas[i]
dec = deci[[i]]
FPR[i,] = apply(dec,2,function(x) mean((x==1)*(1-CVZ))/mean(1-CVZ))
FNR[i,] = apply(dec,2,function(x) mean((x==0)*(CVZ))/mean(CVZ))
risk[i,] = lambda*mean(CVZ)*FNR[i,] + (1-lambda)*(mean(1-CVZ))*FPR[i,]
}
toplot = risk
colnames(toplot) = c("Conditional Thresholding", "Two-Step Minimization", "CRS Minimization")
dat1 = data.frame(cbind(lambdas,c(toplot)))
colnames(dat1) <- c("lambda","risk")
dat1$Approach = rep(colnames(toplot),rep(length(lambdas),ncol(toplot)))
p1 <- ggplot(dat1, aes(x=lambda, y=risk)) + geom_line(data=dat1,aes(x=lambda, y=risk,group=Approach,col = Approach),lwd=1)
print(p1)
ind = which(lambdas %in% seq(0.1,0.9,0.1))
round(t(cbind(lambdas,risk)[ind,]),3)
| /R/Application_Code_WDBC.R | no_license | yizhenxu/SL_Thresholding | R | false | false | 6,929 | r | ###############################################################
# 05-30-2017
# clean code for simulations
# 8 base learners:
# random forest, generalized linear regression, quadratic splines regression, CART,
# 10 nearest neighbors, generalized boosting, support vector machine, Bagging Classification
# source in functions.R
# CRS: crs.fit function in functions.R for parameter changes:
# (1)initiation using NNLS
# (2)region [0, 5]^K
# (3)normalize to sum up to 1 before output and making predictions
# simply change the load data step for other runs
# data format: Y -- outcome; W -- covariates in dataframe
# other parameters for tuning:
# number of folds K, SL library, number of minimization alg
################################################################
### Data Processing
data = read.table("wdbc.data",sep = ",")
colnames(data) = c("ID","diag",
"MRadius","MTexture","MPerimeter","MArea","MSmooth",
"MCompact","MConcavity","MConcaveP","MSymmetry","MFracDim",
"SERadius","SETexture","SEPerimeter","SEArea","SESmooth",
"SECompact","SEConcavity","SEConcaveP","SESymmetry","SEFracDim",
"WRadius","WTexture","WPerimeter","WArea","WSmooth",
"WCompact","WConcavity","WConcaveP","WSymmetry","WFracDim")
W = matrix(unlist(data[,3:32]),ncol=dim(data[,3:32])[2])
colnames(W) = colnames(data)[3:32]
W = apply(W,2,function(x) (x-mean(x))/sd(x))
W = as.data.frame(W)
Y = rep(0,dim(data)[1])
Y[data[,2]=="B"] = 1
Z = Y
#table(Y)
################################################################
### Analysis
# load packages
library("SuperLearner")
library("randomForest")
library("gam")
library("rpart")
library("dplyr")
library("plyr")
library("ggplot2")
library("nloptr")
library("lpSolve")
library("nnls")
source("functions.r")# load functions
SL.library = c("SL.randomForest","SL.glm","SL.gam", "SL.rpart","SL.knn","SL.gbm","SL.svm","SL.ipredbagg")
# cross validation
# balance the ratio of Y=0 and Y=1's in each fold
N = length(Y)
K = 10 # number of folds for calculating cross validated risk
N0 = table(Y)[1] # number of {Y=0}
l0 = floor(N0/K) # number of {Y=0} obs in each fold
# evenly distribute the leftovers, fold label vector:
t0 = rep(1:K,rep(l0,K)+c(rep(1,N0%%l0),rep(0,K-N0%%l0)))
N1 = table(Y)[2] # number of {Y=1}
l1 = floor(N1/K) # number of {Y=0} obs in each fold
t1 = rep(1:K,rep(l1,K)+c(rep(1,N1%%l1),rep(0,K-N1%%l1)))
data = cbind(Y,W)
set.seed(100) #for reproducibility, set seed = 100
# permute the fold label vector
ind0 = sample(1:N0,replace=FALSE)
ind1 = sample(1:N1,replace=FALSE)
t0 = t0[ind0]
t1 = t1[ind1]
# permute the Y=0 and Y=1's separately to make balance in each fold
# cv.fold is the validation fold index for the whole dataset
cv.fold = rep(0,length(Y))
cv.fold[Y==0] = t0
cv.fold[Y==1] = t1
for(i in 1:K){
print(i)
train.ind <- (cv.fold!=i)
val.ind <- (cv.fold==i)
fit.data.SLL <- SuperLearner(Y=Y[train.ind], X=W[train.ind,],newX=W, SL.library = SL.library, family = binomial(), method = "method.NNLS",verbose = FALSE)
sl.pred <- fit.data.SLL$SL.predict #prediction from super learner
lib.pred <- fit.data.SLL$library.predict #prediction from library algorithms
pred <- cbind(sl.pred,lib.pred) #all predictions
colnames(pred) <- c("SuperLearner",SL.library)
train.S <- pred[train.ind,] # trained predictions
val.S <- pred[val.ind,] # validation predictions
train.CVS <- fit.data.SLL$Z # cross-validated library predictions
trainCVS_SL = fit.data.SLL$Z %*% fit.data.SLL$coef # SL CV predictions from within SL
train.CVS.wSL = cbind(trainCVS_SL, train.CVS) # add the first col to be the SL CV predictions
train.Z <- Y[train.ind] # trained outcome
val.Z <- Y[val.ind] # validation outcome
assign(paste("train.CVS.wSL", i, sep=""),train.CVS.wSL)
assign(paste("train.CVS", i, sep=""),train.CVS)
assign(paste("train.S", i, sep=""),train.S)
assign(paste("val.S", i, sep=""),val.S)
assign(paste("train.Z", i, sep=""),train.Z)
assign(paste("val.Z", i, sep=""),val.Z)
}
# Solving for (alpha, c) using alg number of methods
alg=3 # CRS, Two steps, conditional thresholding
#as.integer(runif(1)*2e9)
set.seed(1920413227)#WDBC
lambdas = unique(seq(0.1,0.9,0.01))
#lambdas = unique(c(seq(0,0.7,0.025),seq(0.7,1,0.01)))
# alg algorithms, length(lambdas) lambdas, 10 folds
cutoff = array(NA, dim = c(alg,length(lambdas),K))
# true status vector that follows the order of the stacked cross validated predictions
CVZ = val.Z1
for(k in 2:K){
CVZ = c(CVZ,get(paste("val.Z", k, sep="")))
}
FPR = matrix(NA,ncol = alg, nrow = length(lambdas))
FNR = matrix(NA,ncol = alg, nrow = length(lambdas))
TPR = matrix(NA,ncol = alg, nrow = length(lambdas))
risk = matrix(NA,ncol = alg, nrow = length(lambdas))
deci = vector("list", length(lambdas))
for(k in 1:K){
train.CVS = get(paste("train.CVS", k, sep=""))
train.S = get(paste("train.S", k, sep=""))
val.S = get(paste("val.S", k, sep=""))
train.Z = get(paste("train.Z", k, sep=""))
val.Z = get(paste("val.Z", k, sep=""))
train.CVS.wSL = get(paste("train.CVS.wSL", k, sep=""))
XX = train.S[,-1] # training fold library predictions
for(i in 1:length(lambdas)){
print(c(i,k))
lambda = lambdas[i]
# SL CRS
crs.seed = as.integer(runif(1)*2e9)
cbs = crs.fit(seed = crs.seed, lambda,train.CVS,train.Z, val.S[,-1], val.Z)
cutoff[3, i, k] = cbs$c #CBS cutoff for lambda i fold k
# SL Common
opt = Opt.nonpar.rule(train.Z,train.S[,1],phi=0,lambda)
cutoff[1, i, k] = as.numeric(opt)[1]
# SL Proposal Iterative
opt = Opt.nonpar.rule(train.Z,train.CVS.wSL[,1],phi=0,lambda)
cutoff[2, i, k] = as.numeric(opt)[1]
cut = matrix(rep(cutoff[, i, k],nrow(val.S)),nrow=nrow(val.S),byrow=T)
val = cbind(val.S[,1], val.S[,1], cbs$score)
dki = (val > cut)*1
# one decision matrix for each lambda value, matrix size n x alg
if(k == 1){
deci[[i]] = dki
}
if(k>1){
deci[[i]] = rbind(deci[[i]],dki) # each row is validation decision arranged by folds, col is algorithm
}
}#i
}#k
for(i in 1:length(lambdas)){
lambda = lambdas[i]
dec = deci[[i]]
FPR[i,] = apply(dec,2,function(x) mean((x==1)*(1-CVZ))/mean(1-CVZ))
FNR[i,] = apply(dec,2,function(x) mean((x==0)*(CVZ))/mean(CVZ))
risk[i,] = lambda*mean(CVZ)*FNR[i,] + (1-lambda)*(mean(1-CVZ))*FPR[i,]
}
toplot = risk
colnames(toplot) = c("Conditional Thresholding", "Two-Step Minimization", "CRS Minimization")
dat1 = data.frame(cbind(lambdas,c(toplot)))
colnames(dat1) <- c("lambda","risk")
dat1$Approach = rep(colnames(toplot),rep(length(lambdas),ncol(toplot)))
p1 <- ggplot(dat1, aes(x=lambda, y=risk)) + geom_line(data=dat1,aes(x=lambda, y=risk,group=Approach,col = Approach),lwd=1)
print(p1)
ind = which(lambdas %in% seq(0.1,0.9,0.1))
round(t(cbind(lambdas,risk)[ind,]),3)
|
# inst/app/ui.R
#
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
suppressMessages(require(shinyjqui))
suppressMessages(require(shiny))
source(paste0(packagePath, "/toolTips.R"), local = TRUE)
suppressMessages(require(shinydashboard))
suppressMessages(require(shinydashboardPlus))
suppressMessages(require(plotly))
# suppressMessages(require(shinythemes))
suppressMessages(require(ggplot2))
suppressMessages(require(DT))
suppressMessages(require(edgeR))
suppressMessages(require(pheatmap))
# suppressMessages(require(threejs))
suppressMessages(require(shinyTree))
# suppressMessages(require(shinyjs))
if (exists("devscShinyApp")) {
if (devscShinyApp) {
packagePath <- "inst/app"
# setwd("~/Rstudio/UTechSCB-SCHNAPPs/")
} else {
packagePath <- find.package("SCHNAPPs", lib.loc = NULL, quiet = TRUE) %>% paste0("/app/")
}
} else {
packagePath <- find.package("SCHNAPPs", lib.loc = NULL, quiet = TRUE) %>% paste0("/app/")
}
localContributionDir <- get(".SCHNAPPs_locContributionDir", envir = .schnappsEnv)
defaultValueSingleGene <- get(".SCHNAPPs_defaultValueSingleGene", envir = .schnappsEnv)
defaultValueMultiGenes <- get(".SCHNAPPs_defaultValueMultiGenes", envir = .schnappsEnv)
defaultValueRegExGene <- get(".SCHNAPPs_defaultValueRegExGene", envir = .schnappsEnv)
DEBUG <- get(".SCHNAPPs_DEBUG", envir = .schnappsEnv)
DEBUGSAVE <- get(".SCHNAPPs_DEBUGSAVE", envir = .schnappsEnv)
# source(paste0(packagePath, "/ui.R"))
# this is where the general tabs are defined:
# if (file.exists(paste0(packagePath, "/defaultValues.R"))) {
# source(paste0(packagePath, "/defaultValues.R"))
# }
# input, cell/gene selection tabs
# source('tabs.R', local = TRUE)
# "introjsUI"
if ("rintrojs" %in% rownames(installed.packages())) {
suppressMessages(require(rintrojs))
} else {
introjsUI = function(...) {}
cat(file = stderr(), "Please install introjsUI: install.packages('rintrojs')")
}
scShinyUI <- function(request) {
library(shinyjqui)
# jsCode <- 'shinyjs.hidemenuItem = function(targetid) {var x = document.getElementById(targetid); x.style.display = "none"; x.classList.remove("menu-open");};shinyjs.showmenuItem = function(targetid) {var x = document.getElementById(targetid); x.style.display = "block"; x.classList.add("menu-open");};'
# browser()
# load from history directory the old input variable that use defaultValues function
dvFile = paste0(.schnappsEnv$historyPath, "/defaultValues.RData")
if(file.exists(dvFile)){
cp = load(file=dvFile)
if("defaultValues" %in% cp){
# .schnappsEnv$defaultValues = defaultValues
assign("defaultValues", defaultValues, envir = .schnappsEnv)
}else{
warning("defaultValues file exist but no defaultValues\n\n")
}
}
source(paste0(packagePath, "/modulesUI.R"), local = FALSE)
source(paste0(packagePath, "/tabs.R"), local = TRUE)
# general tabs
allTabs <- list(
inputTab(),
shortCutsTab(),
geneSelectionTab(),
cellSelectionTab(),
clusterParametersTab() %>% checkAllowed(env = .schnappsEnv)
# ,
# renameTab()
)
# parameters tab, includes basic normalization
source(paste0(packagePath, "/parameters.R"), local = TRUE)
base::source(paste0(packagePath, "/serverFunctions.R"), local = TRUE)
# Basic menu Items
allMenus <- list(
shinydashboard::menuItem("input",
# id="inputID",
tabName = "input", icon = icon("folder")
),
shinydashboard::menuItem("short cuts",
tabName = "shortCuts", icon = icon("gopuram")
),
shinydashboard::menuItem("Parameters",
# id="parametersID",
tabName = "parameters", icon = icon("gopuram"),
parameterItems()
),
shinydashboard::menuItem(" Cell selection",
# id="cellSelectionID",
tabName = "cellSelection", icon = icon("ello")
),
shinydashboard::menuItem("Gene selection",
# id="geneSelectionID",
tabName = "geneSelection", icon = icon("atom")
)
# ,
# shinydashboard::menuItem("rename projections",
# # id="geneSelectionID",
# tabName = "modifyProj", icon = icon("signature")
# )
)
# parse all ui.R files under contributions to include in application
uiFiles <- dir(path = c(paste0(packagePath, "/contributions"), localContributionDir), pattern = "ui.R", full.names = TRUE, recursive = TRUE)
for (fp in uiFiles) {
# fp = uiFiles[5]
menuList <- list()
tabList <- list()
source(fp, local = TRUE)
for (li in menuList) {
if (length(li) > 0) {
# if(DEBUG)cat(file=stderr(), paste("menuList:", length(allMenus)," ", li$children, "\n"))
allMenus[[length(allMenus) + 1]] <- li
}
}
for (li in tabList) {
if (length(li) > 0) {
# if(DEBUG)cat(file=stderr(), paste(li$children[[1]], "\n"))
allTabs[[length(allTabs) + 1]] <- li
}
}
}
mListNames <- c()
for (menuListItem in 1:length(allMenus)) {
mListNames[menuListItem] <- allMenus[[menuListItem]][3][[1]][[1]][3]$children[[2]]$children[[1]][1]
}
sollOrder <- c(
"input", "short cuts", "Parameters", "General QC", " Cell selection", "Gene selection", "Co-expression",
"Data Exploration", "Subcluster analysis"
)
sollOrderIdx <- c()
for (sIdx in 1:length(sollOrder)) {
sollOrderIdx[sIdx] <- which(sollOrder[sIdx] == mListNames)
}
sollOrderIdx <- c(sollOrderIdx, which(!1:length(allMenus) %in% sollOrderIdx))
allMenus <- allMenus[sollOrderIdx]
# todo
# parse all parameters.R files under contributions to include in application
# allTabs holds all tabs regardsless of their location in the GUI
parFiles <- dir(path = c(paste0(packagePath, "/contributions"), localContributionDir), pattern = "parameters.R",
full.names = TRUE, recursive = TRUE)
for (fp in parFiles) {
tabList <- list()
source(fp, local = TRUE)
for (li in tabList) {
if (length(li) > 0) {
# if(DEBUG)cat(file=stderr(), paste(li$children[[1]], "\n"))
# allTabs[[length(allTabs) + 1]] <- li
}
}
}
mulist <- list(
inputTab(),
geneSelectionTab()
)
getallTabs <- function() {
# tags$div(list(inputTab(),
# geneSelectionTab()),
# # inputTab(),
# class = "tab-content"
# ) ,
tags$div(mulist,
# inputTab(),
class = "tab-content"
)
tags$div(
allTabs,
# inputTab(),
class = "tab-content"
)
}
# search for parameter contribution submenu items (menuSubItem)
# parameterContributions = ""
getallMenus <- function() {
allMenus
}
controlbarContext =NULL
if(file.exists(paste0(packagePath, "/controlbarContext.R")))
source(file = paste0(packagePath, "/controlbarContext.R"), local = TRUE)
# jsCode <- "shinyjs.hidemenuItem = function(targetid) {var x = document.getElementById(targetid); x.style.display = 'none'; x.classList.remove('menu-open');}; shinyjs.showmenuItem = function(targetid) {var x = document.getElementById(targetid); x.style.display = 'block'; x.classList.add('menu-open');};"
# jsCode2 <- "shinyjs.pageCol = function(params){console.log(params);$('body').css('color', params);}"
dep=htmltools::htmlDependency("jqueryui", "3.6.0", c(href="shared/jqueryui"), script = "jquery-ui.min.js")
shinyUI(
shinydashboardPlus::dashboardPage(
dheader(),
shinydashboardPlus::dashboardSidebar(
shinydashboard::sidebarMenu(
style = "height: 90vh; overflow-y: auto;",
id = "sideBarID",
getallMenus(),
htmlOutput("summaryStatsSideBar"),
# downloadButton("report", "Generate report", class = "butt"),
tags$head(tags$style(".butt{color: black !important;}")), # font color; otherwise the text on these buttons is gray
tags$head(tags$style(HTML("berndTest{background-color:rgba(255,34,22,0.1);}"))), # supposed to change the transparency of introjs area that is highlighted.
# bookmarkButton(id = "bookmark1"),
br(),
downloadButton("countscsv", "Download (log) counts.csv", class = "butt"),
br(),
downloadButton("RDSsave", "Download RData", class = "butt"),
br(),
downloadButton("RmdSave", "Download History", class = "butt"),
if (DEBUG) sc_checkboxInput("DEBUGSAVE", "Save for DEBUG", FALSE),
if (DEBUG) verbatimTextOutput("DEBUGSAVEstring"),
if (is.environment(.schnappsEnv)) {
if (exists("historyPath", envir = .schnappsEnv)) {
# sc_checkboxInput("save2History", "save to history file", FALSE)
actionButton("comment2History", "Add comment to history")
}
},
if (DEBUG) {
actionButton("openBrowser", "open Browser")
},
# bookmarkButton(),
actionButton("Quit", "quit")
)
# ,
# verbatimTextOutput("save2Historystring")
# ,verbatimTextOutput("currentTabInfo")
), # dashboard side bar
shinydashboard::dashboardBody(
tags$script(HTML("$('body').addClass('fixed');")),
# shinyjs is not working jqui_ and since we need resize etc we cannot use shinyjs
# understanding the java script - shiny interaction takes a bit too much time
# https://shiny.rstudio.com/articles/communicating-with-js.html
shinyjs::useShinyjs(debug = TRUE),
# shinyjs::extendShinyjs(text = jsCode, functions = c("hidemenuItem", "showmenuItem")),
# extendShinyjs(text = jsCode2, functions = c("pageCol")),
tags$script("showmenuItem = function(targetid) {var x = document.getElementById(targetid); x.style.display = 'block'; x.classList.add('menu-open');};"),
# alert('finished loading');
tags$script("$(document).on('shiny:connected', function(){
var startingUp = true;
Shiny.setInputValue('startingUp', '1');
function onTimeout() {
if(startingUp){
startingUp = 0;
Shiny.setInputValue('startingUp', '0');
};
};
function startIdleTimer() {
if (idleTimer) clearTimeout(idleTimer);
idleTimer = setTimeout(onTimeout, timeoutWarningMsecs);
};
$(document).on('shiny:idle', onTimeout);
});"),
introjsUI(),
shinyjs::inlineCSS(list(.red = "background-color: DarkSalmon; hover: red")),
shinyjs::inlineCSS(list(.green = "background-color: lightgreen")),
getallTabs(),
### !!!! https://github.com/Yang-Tang/shinyjqui/issues/87
### not working resize shinyjqui
# if(DEBUG){
# library(profvis)
# library(shiny)
# profvis_ui("profiler")
# },
tags$head(tags$style(HTML("div.box-header {display: block;}"))),
tags$head(tags$style(HTML("h3.box-title {display: block;}")))
# tags$head(
# tags$script(version = "1.12.1",
# src = "www/shared/jqueryui",
# script = "jquery-ui.min.js"
# ))
# tags$div(list(inputTab(),
# geneSelectionTab()),
# # inputTab(),
# class = "tab-content"
# ) ,
# h4("Sum of all previous slider values:", textOutput("sum"))
), # dashboard body
options = list(sidebarExpandOnHover = TRUE),
controlbar = controlbarContext
) # main dashboard
)
}
| /inst/app/ui.R | no_license | C3BI-pasteur-fr/UTechSCB-SCHNAPPs | R | false | false | 11,807 | r | # inst/app/ui.R
#
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
suppressMessages(require(shinyjqui))
suppressMessages(require(shiny))
source(paste0(packagePath, "/toolTips.R"), local = TRUE)
suppressMessages(require(shinydashboard))
suppressMessages(require(shinydashboardPlus))
suppressMessages(require(plotly))
# suppressMessages(require(shinythemes))
suppressMessages(require(ggplot2))
suppressMessages(require(DT))
suppressMessages(require(edgeR))
suppressMessages(require(pheatmap))
# suppressMessages(require(threejs))
suppressMessages(require(shinyTree))
# suppressMessages(require(shinyjs))
if (exists("devscShinyApp")) {
if (devscShinyApp) {
packagePath <- "inst/app"
# setwd("~/Rstudio/UTechSCB-SCHNAPPs/")
} else {
packagePath <- find.package("SCHNAPPs", lib.loc = NULL, quiet = TRUE) %>% paste0("/app/")
}
} else {
packagePath <- find.package("SCHNAPPs", lib.loc = NULL, quiet = TRUE) %>% paste0("/app/")
}
localContributionDir <- get(".SCHNAPPs_locContributionDir", envir = .schnappsEnv)
defaultValueSingleGene <- get(".SCHNAPPs_defaultValueSingleGene", envir = .schnappsEnv)
defaultValueMultiGenes <- get(".SCHNAPPs_defaultValueMultiGenes", envir = .schnappsEnv)
defaultValueRegExGene <- get(".SCHNAPPs_defaultValueRegExGene", envir = .schnappsEnv)
DEBUG <- get(".SCHNAPPs_DEBUG", envir = .schnappsEnv)
DEBUGSAVE <- get(".SCHNAPPs_DEBUGSAVE", envir = .schnappsEnv)
# source(paste0(packagePath, "/ui.R"))
# this is where the general tabs are defined:
# if (file.exists(paste0(packagePath, "/defaultValues.R"))) {
# source(paste0(packagePath, "/defaultValues.R"))
# }
# input, cell/gene selection tabs
# source('tabs.R', local = TRUE)
# "introjsUI"
if ("rintrojs" %in% rownames(installed.packages())) {
suppressMessages(require(rintrojs))
} else {
introjsUI = function(...) {}
cat(file = stderr(), "Please install introjsUI: install.packages('rintrojs')")
}
scShinyUI <- function(request) {
library(shinyjqui)
# jsCode <- 'shinyjs.hidemenuItem = function(targetid) {var x = document.getElementById(targetid); x.style.display = "none"; x.classList.remove("menu-open");};shinyjs.showmenuItem = function(targetid) {var x = document.getElementById(targetid); x.style.display = "block"; x.classList.add("menu-open");};'
# browser()
# load from history directory the old input variable that use defaultValues function
dvFile = paste0(.schnappsEnv$historyPath, "/defaultValues.RData")
if(file.exists(dvFile)){
cp = load(file=dvFile)
if("defaultValues" %in% cp){
# .schnappsEnv$defaultValues = defaultValues
assign("defaultValues", defaultValues, envir = .schnappsEnv)
}else{
warning("defaultValues file exist but no defaultValues\n\n")
}
}
source(paste0(packagePath, "/modulesUI.R"), local = FALSE)
source(paste0(packagePath, "/tabs.R"), local = TRUE)
# general tabs
allTabs <- list(
inputTab(),
shortCutsTab(),
geneSelectionTab(),
cellSelectionTab(),
clusterParametersTab() %>% checkAllowed(env = .schnappsEnv)
# ,
# renameTab()
)
# parameters tab, includes basic normalization
source(paste0(packagePath, "/parameters.R"), local = TRUE)
base::source(paste0(packagePath, "/serverFunctions.R"), local = TRUE)
# Basic menu Items
allMenus <- list(
shinydashboard::menuItem("input",
# id="inputID",
tabName = "input", icon = icon("folder")
),
shinydashboard::menuItem("short cuts",
tabName = "shortCuts", icon = icon("gopuram")
),
shinydashboard::menuItem("Parameters",
# id="parametersID",
tabName = "parameters", icon = icon("gopuram"),
parameterItems()
),
shinydashboard::menuItem(" Cell selection",
# id="cellSelectionID",
tabName = "cellSelection", icon = icon("ello")
),
shinydashboard::menuItem("Gene selection",
# id="geneSelectionID",
tabName = "geneSelection", icon = icon("atom")
)
# ,
# shinydashboard::menuItem("rename projections",
# # id="geneSelectionID",
# tabName = "modifyProj", icon = icon("signature")
# )
)
# parse all ui.R files under contributions to include in application
uiFiles <- dir(path = c(paste0(packagePath, "/contributions"), localContributionDir), pattern = "ui.R", full.names = TRUE, recursive = TRUE)
for (fp in uiFiles) {
# fp = uiFiles[5]
menuList <- list()
tabList <- list()
source(fp, local = TRUE)
for (li in menuList) {
if (length(li) > 0) {
# if(DEBUG)cat(file=stderr(), paste("menuList:", length(allMenus)," ", li$children, "\n"))
allMenus[[length(allMenus) + 1]] <- li
}
}
for (li in tabList) {
if (length(li) > 0) {
# if(DEBUG)cat(file=stderr(), paste(li$children[[1]], "\n"))
allTabs[[length(allTabs) + 1]] <- li
}
}
}
mListNames <- c()
for (menuListItem in 1:length(allMenus)) {
mListNames[menuListItem] <- allMenus[[menuListItem]][3][[1]][[1]][3]$children[[2]]$children[[1]][1]
}
sollOrder <- c(
"input", "short cuts", "Parameters", "General QC", " Cell selection", "Gene selection", "Co-expression",
"Data Exploration", "Subcluster analysis"
)
sollOrderIdx <- c()
for (sIdx in 1:length(sollOrder)) {
sollOrderIdx[sIdx] <- which(sollOrder[sIdx] == mListNames)
}
sollOrderIdx <- c(sollOrderIdx, which(!1:length(allMenus) %in% sollOrderIdx))
allMenus <- allMenus[sollOrderIdx]
# todo
# parse all parameters.R files under contributions to include in application
# allTabs holds all tabs regardsless of their location in the GUI
parFiles <- dir(path = c(paste0(packagePath, "/contributions"), localContributionDir), pattern = "parameters.R",
full.names = TRUE, recursive = TRUE)
for (fp in parFiles) {
tabList <- list()
source(fp, local = TRUE)
for (li in tabList) {
if (length(li) > 0) {
# if(DEBUG)cat(file=stderr(), paste(li$children[[1]], "\n"))
# allTabs[[length(allTabs) + 1]] <- li
}
}
}
mulist <- list(
inputTab(),
geneSelectionTab()
)
getallTabs <- function() {
# tags$div(list(inputTab(),
# geneSelectionTab()),
# # inputTab(),
# class = "tab-content"
# ) ,
tags$div(mulist,
# inputTab(),
class = "tab-content"
)
tags$div(
allTabs,
# inputTab(),
class = "tab-content"
)
}
# search for parameter contribution submenu items (menuSubItem)
# parameterContributions = ""
getallMenus <- function() {
allMenus
}
controlbarContext =NULL
if(file.exists(paste0(packagePath, "/controlbarContext.R")))
source(file = paste0(packagePath, "/controlbarContext.R"), local = TRUE)
# jsCode <- "shinyjs.hidemenuItem = function(targetid) {var x = document.getElementById(targetid); x.style.display = 'none'; x.classList.remove('menu-open');}; shinyjs.showmenuItem = function(targetid) {var x = document.getElementById(targetid); x.style.display = 'block'; x.classList.add('menu-open');};"
# jsCode2 <- "shinyjs.pageCol = function(params){console.log(params);$('body').css('color', params);}"
dep=htmltools::htmlDependency("jqueryui", "3.6.0", c(href="shared/jqueryui"), script = "jquery-ui.min.js")
shinyUI(
shinydashboardPlus::dashboardPage(
dheader(),
shinydashboardPlus::dashboardSidebar(
shinydashboard::sidebarMenu(
style = "height: 90vh; overflow-y: auto;",
id = "sideBarID",
getallMenus(),
htmlOutput("summaryStatsSideBar"),
# downloadButton("report", "Generate report", class = "butt"),
tags$head(tags$style(".butt{color: black !important;}")), # font color; otherwise the text on these buttons is gray
tags$head(tags$style(HTML("berndTest{background-color:rgba(255,34,22,0.1);}"))), # supposed to change the transparency of introjs area that is highlighted.
# bookmarkButton(id = "bookmark1"),
br(),
downloadButton("countscsv", "Download (log) counts.csv", class = "butt"),
br(),
downloadButton("RDSsave", "Download RData", class = "butt"),
br(),
downloadButton("RmdSave", "Download History", class = "butt"),
if (DEBUG) sc_checkboxInput("DEBUGSAVE", "Save for DEBUG", FALSE),
if (DEBUG) verbatimTextOutput("DEBUGSAVEstring"),
if (is.environment(.schnappsEnv)) {
if (exists("historyPath", envir = .schnappsEnv)) {
# sc_checkboxInput("save2History", "save to history file", FALSE)
actionButton("comment2History", "Add comment to history")
}
},
if (DEBUG) {
actionButton("openBrowser", "open Browser")
},
# bookmarkButton(),
actionButton("Quit", "quit")
)
# ,
# verbatimTextOutput("save2Historystring")
# ,verbatimTextOutput("currentTabInfo")
), # dashboard side bar
shinydashboard::dashboardBody(
tags$script(HTML("$('body').addClass('fixed');")),
# shinyjs is not working jqui_ and since we need resize etc we cannot use shinyjs
# understanding the java script - shiny interaction takes a bit too much time
# https://shiny.rstudio.com/articles/communicating-with-js.html
shinyjs::useShinyjs(debug = TRUE),
# shinyjs::extendShinyjs(text = jsCode, functions = c("hidemenuItem", "showmenuItem")),
# extendShinyjs(text = jsCode2, functions = c("pageCol")),
tags$script("showmenuItem = function(targetid) {var x = document.getElementById(targetid); x.style.display = 'block'; x.classList.add('menu-open');};"),
# alert('finished loading');
tags$script("$(document).on('shiny:connected', function(){
var startingUp = true;
Shiny.setInputValue('startingUp', '1');
function onTimeout() {
if(startingUp){
startingUp = 0;
Shiny.setInputValue('startingUp', '0');
};
};
function startIdleTimer() {
if (idleTimer) clearTimeout(idleTimer);
idleTimer = setTimeout(onTimeout, timeoutWarningMsecs);
};
$(document).on('shiny:idle', onTimeout);
});"),
introjsUI(),
shinyjs::inlineCSS(list(.red = "background-color: DarkSalmon; hover: red")),
shinyjs::inlineCSS(list(.green = "background-color: lightgreen")),
getallTabs(),
### !!!! https://github.com/Yang-Tang/shinyjqui/issues/87
### not working resize shinyjqui
# if(DEBUG){
# library(profvis)
# library(shiny)
# profvis_ui("profiler")
# },
tags$head(tags$style(HTML("div.box-header {display: block;}"))),
tags$head(tags$style(HTML("h3.box-title {display: block;}")))
# tags$head(
# tags$script(version = "1.12.1",
# src = "www/shared/jqueryui",
# script = "jquery-ui.min.js"
# ))
# tags$div(list(inputTab(),
# geneSelectionTab()),
# # inputTab(),
# class = "tab-content"
# ) ,
# h4("Sum of all previous slider values:", textOutput("sum"))
), # dashboard body
options = list(sidebarExpandOnHover = TRUE),
controlbar = controlbarContext
) # main dashboard
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/n-prop-miss-complete-rows.R
\name{prop_miss_row}
\alias{prop_miss_row}
\title{Return a vector of the proportion of missing values in each row}
\usage{
prop_miss_row(data)
}
\arguments{
\item{data}{a dataframe}
}
\value{
numeric vector of the proportion of missing values in each row
}
\description{
Substitute for \code{rowMeans(is.na(data))}, but it also checks if input is NULL or
is a dataframe
}
\examples{
prop_miss_row(airquality)
prop_miss_row(pedestrian)
}
\seealso{
\url{miss_case_pct} \url{miss_case_prop} \code{\link[=miss_prop_summary]{miss_prop_summary()}} \url{miss_case_summary} \url{miss_case_table} \url{miss_summary} \url{miss_var_pct} \url{miss_var_prop} \url{miss_var_run} \url{miss_var_span} \url{miss_var_summary} \url{miss_var_table} \url{n_complete} \url{n_complete_row} \url{n_miss} \url{n_miss_row} \url{pct_complete} \url{pct_miss} \url{prop_complete} \url{prop_complete_row} \url{prop_miss}
}
| /man/prop_miss_row.Rd | no_license | halzahrani/naniar | R | false | true | 1,001 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/n-prop-miss-complete-rows.R
\name{prop_miss_row}
\alias{prop_miss_row}
\title{Return a vector of the proportion of missing values in each row}
\usage{
prop_miss_row(data)
}
\arguments{
\item{data}{a dataframe}
}
\value{
numeric vector of the proportion of missing values in each row
}
\description{
Substitute for \code{rowMeans(is.na(data))}, but it also checks if input is NULL or
is a dataframe
}
\examples{
prop_miss_row(airquality)
prop_miss_row(pedestrian)
}
\seealso{
\url{miss_case_pct} \url{miss_case_prop} \code{\link[=miss_prop_summary]{miss_prop_summary()}} \url{miss_case_summary} \url{miss_case_table} \url{miss_summary} \url{miss_var_pct} \url{miss_var_prop} \url{miss_var_run} \url{miss_var_span} \url{miss_var_summary} \url{miss_var_table} \url{n_complete} \url{n_complete_row} \url{n_miss} \url{n_miss_row} \url{pct_complete} \url{pct_miss} \url{prop_complete} \url{prop_complete_row} \url{prop_miss}
}
|
#' Removes rows from a RelEx experiment that have zero detection counts.
#' Removes rows from a RelEx experiment that have zero detection counts. This
#' will update all tables in the list, as well as the proteins and
#' protein.counts vectors
#'
#' @param x A list of RelEx experiments.
#' @return a list much like \code{x} but where \code{min(protein.counts) >= 1}
#' @author Mark Cowley
#' @keywords manip
#' @export
#'
rmNArows.relex <- function(x) {
idx <- which(x$protein.counts > 0)
if( length(idx) < length(x$protein.counts) ) {
x$sn <- x$sn[idx,]
x$ratio <- x$ratio[idx,]
x$snSD <- x$snSD[idx,]
x$ratioSD <- x$ratioSD[idx,]
x$proteins <- x$proteins[idx]
x$protein.counts <- x$protein.counts[idx]
}
return( x )
}
| /R/rmNArows.relex.R | no_license | drmjc/relex | R | false | false | 738 | r | #' Removes rows from a RelEx experiment that have zero detection counts.
#' Removes rows from a RelEx experiment that have zero detection counts. This
#' will update all tables in the list, as well as the proteins and
#' protein.counts vectors
#'
#' @param x A list of RelEx experiments.
#' @return a list much like \code{x} but where \code{min(protein.counts) >= 1}
#' @author Mark Cowley
#' @keywords manip
#' @export
#'
rmNArows.relex <- function(x) {
idx <- which(x$protein.counts > 0)
if( length(idx) < length(x$protein.counts) ) {
x$sn <- x$sn[idx,]
x$ratio <- x$ratio[idx,]
x$snSD <- x$snSD[idx,]
x$ratioSD <- x$ratioSD[idx,]
x$proteins <- x$proteins[idx]
x$protein.counts <- x$protein.counts[idx]
}
return( x )
}
|
library(kernDeepStackNet)
# Check function tune_KDSN (duration is long)
XORdat4Dim <- expand.grid(x1=c(0, 1), x2=c(0, 1), x3=c(0, 1), x4=c(0, 1))
XORdat4Dim <- cbind(y1=as.numeric(xor(xor(xor(XORdat4Dim[, 1], XORdat4Dim[, 2]), XORdat4Dim[, 3]), XORdat4Dim[, 4])), XORdat4Dim)
fittedKDSN <- fitKDSN(y=XORdat4Dim[, 1], X=as.matrix(XORdat4Dim[, -1]), levels=10)
predict(fittedKDSN, newx=as.matrix(XORdat4Dim[, -1]))
# Fit with standardized responses
fittedKDSN <- fitKDSN(y=XORdat4Dim[, 1], X=as.matrix(XORdat4Dim[, -1]), levels=100, standY=TRUE)
predict(fittedKDSN, newx=as.matrix(XORdat4Dim[, -1]))
| /tests/KDSNestimation_tests_fit_k_DSN_rft.R | no_license | cran/kernDeepStackNet | R | false | false | 609 | r | library(kernDeepStackNet)
# Check function tune_KDSN (duration is long)
XORdat4Dim <- expand.grid(x1=c(0, 1), x2=c(0, 1), x3=c(0, 1), x4=c(0, 1))
XORdat4Dim <- cbind(y1=as.numeric(xor(xor(xor(XORdat4Dim[, 1], XORdat4Dim[, 2]), XORdat4Dim[, 3]), XORdat4Dim[, 4])), XORdat4Dim)
fittedKDSN <- fitKDSN(y=XORdat4Dim[, 1], X=as.matrix(XORdat4Dim[, -1]), levels=10)
predict(fittedKDSN, newx=as.matrix(XORdat4Dim[, -1]))
# Fit with standardized responses
fittedKDSN <- fitKDSN(y=XORdat4Dim[, 1], X=as.matrix(XORdat4Dim[, -1]), levels=100, standY=TRUE)
predict(fittedKDSN, newx=as.matrix(XORdat4Dim[, -1]))
|
library(deSolve)
### Name: ode.2D
### Title: Solver for 2-Dimensional Ordinary Differential Equations
### Aliases: ode.2D
### Keywords: math
### ** Examples
## =======================================================================
## A Lotka-Volterra predator-prey model with predator and prey
## dispersing in 2 dimensions
## =======================================================================
## ==================
## Model definitions
## ==================
lvmod2D <- function (time, state, pars, N, Da, dx) {
NN <- N*N
Prey <- matrix(nrow = N, ncol = N,state[1:NN])
Pred <- matrix(nrow = N, ncol = N,state[(NN+1):(2*NN)])
with (as.list(pars), {
## Biology
dPrey <- rGrow * Prey * (1- Prey/K) - rIng * Prey * Pred
dPred <- rIng * Prey * Pred*assEff - rMort * Pred
zero <- rep(0, N)
## 1. Fluxes in x-direction; zero fluxes near boundaries
FluxPrey <- -Da * rbind(zero,(Prey[2:N,] - Prey[1:(N-1),]), zero)/dx
FluxPred <- -Da * rbind(zero,(Pred[2:N,] - Pred[1:(N-1),]), zero)/dx
## Add flux gradient to rate of change
dPrey <- dPrey - (FluxPrey[2:(N+1),] - FluxPrey[1:N,])/dx
dPred <- dPred - (FluxPred[2:(N+1),] - FluxPred[1:N,])/dx
## 2. Fluxes in y-direction; zero fluxes near boundaries
FluxPrey <- -Da * cbind(zero,(Prey[,2:N] - Prey[,1:(N-1)]), zero)/dx
FluxPred <- -Da * cbind(zero,(Pred[,2:N] - Pred[,1:(N-1)]), zero)/dx
## Add flux gradient to rate of change
dPrey <- dPrey - (FluxPrey[,2:(N+1)] - FluxPrey[,1:N])/dx
dPred <- dPred - (FluxPred[,2:(N+1)] - FluxPred[,1:N])/dx
return(list(c(as.vector(dPrey), as.vector(dPred))))
})
}
## ===================
## Model applications
## ===================
pars <- c(rIng = 0.2, # /day, rate of ingestion
rGrow = 1.0, # /day, growth rate of prey
rMort = 0.2 , # /day, mortality rate of predator
assEff = 0.5, # -, assimilation efficiency
K = 5 ) # mmol/m3, carrying capacity
R <- 20 # total length of surface, m
N <- 50 # number of boxes in one direction
dx <- R/N # thickness of each layer
Da <- 0.05 # m2/d, dispersion coefficient
NN <- N*N # total number of boxes
## initial conditions
yini <- rep(0, 2*N*N)
cc <- c((NN/2):(NN/2+1)+N/2, (NN/2):(NN/2+1)-N/2)
yini[cc] <- yini[NN+cc] <- 1
## solve model (5000 state variables... use Cash-Karp Runge-Kutta method
times <- seq(0, 50, by = 1)
out <- ode.2D(y = yini, times = times, func = lvmod2D, parms = pars,
dimens = c(N, N), names = c("Prey", "Pred"),
N = N, dx = dx, Da = Da, method = rkMethod("rk45ck"))
diagnostics(out)
summary(out)
# Mean of prey concentration at each time step
Prey <- subset(out, select = "Prey", arr = TRUE)
dim(Prey)
MeanPrey <- apply(Prey, MARGIN = 3, FUN = mean)
plot(times, MeanPrey)
## Not run:
##D ## plot results
##D Col <- colorRampPalette(c("#00007F", "blue", "#007FFF", "cyan",
##D "#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000"))
##D
##D for (i in seq(1, length(times), by = 1))
##D image(Prey[ , ,i],
##D col = Col(100), xlab = , zlim = range(out[,2:(NN+1)]))
##D
##D ## similar, plotting both and adding a margin text with times:
##D image(out, xlab = "x", ylab = "y", mtext = paste("time = ", times))
## End(Not run)
select <- c(1, 40)
image(out, xlab = "x", ylab = "y", mtext = "Lotka-Volterra in 2-D",
subset = select, mfrow = c(2,2), legend = TRUE)
# plot prey and pred at t = 10; first use subset to select data
prey10 <- matrix (nrow = N, ncol = N,
data = subset(out, select = "Prey", subset = (time == 10)))
pred10 <- matrix (nrow = N, ncol = N,
data = subset(out, select = "Pred", subset = (time == 10)))
mf <- par(mfrow = c(1, 2))
image(prey10)
image(pred10)
par (mfrow = mf)
# same, using deSolve's image:
image(out, subset = (time == 10))
## =======================================================================
## An example with a cyclic boundary condition.
## Diffusion in 2-D; extra flux on 2 boundaries,
## cyclic boundary in y
## =======================================================================
diffusion2D <- function(t, Y, par) {
y <- matrix(nrow = nx, ncol = ny, data = Y) # vector to 2-D matrix
dY <- -r * y # consumption
BNDx <- rep(1, nx) # boundary concentration
BNDy <- rep(1, ny) # boundary concentration
## diffusion in X-direction; boundaries=imposed concentration
Flux <- -Dx * rbind(y[1,] - BNDy, (y[2:nx,] - y[1:(nx-1),]), BNDy - y[nx,])/dx
dY <- dY - (Flux[2:(nx+1),] - Flux[1:nx,])/dx
## diffusion in Y-direction
Flux <- -Dy * cbind(y[,1] - BNDx, (y[,2:ny]-y[,1:(ny-1)]), BNDx - y[,ny])/dy
dY <- dY - (Flux[,2:(ny+1)] - Flux[,1:ny])/dy
## extra flux on two sides
dY[,1] <- dY[,1] + 10
dY[1,] <- dY[1,] + 10
## and exchange between sides on y-direction
dY[,ny] <- dY[,ny] + (y[,1] - y[,ny]) * 10
return(list(as.vector(dY)))
}
## parameters
dy <- dx <- 1 # grid size
Dy <- Dx <- 1 # diffusion coeff, X- and Y-direction
r <- 0.05 # consumption rate
nx <- 50
ny <- 100
y <- matrix(nrow = nx, ncol = ny, 1)
## model most efficiently solved with lsodes - need to specify lrw
print(system.time(
ST3 <- ode.2D(y, times = 1:100, func = diffusion2D, parms = NULL,
dimens = c(nx, ny), verbose = TRUE, names = "Y",
lrw = 400000, atol = 1e-10, rtol = 1e-10, cyclicBnd = 2)
))
# summary of 2-D variable
summary(ST3)
# plot output at t = 10
t10 <- matrix (nrow = nx, ncol = ny,
data = subset(ST3, select = "Y", subset = (time == 10)))
persp(t10, theta = 30, border = NA, phi = 70,
col = "lightblue", shade = 0.5, box = FALSE)
# image plot, using deSolve's image function
image(ST3, subset = time == 10, method = "persp",
theta = 30, border = NA, phi = 70, main = "",
col = "lightblue", shade = 0.5, box = FALSE)
## Not run:
##D zlim <- range(ST3[, -1])
##D for (i in 2:nrow(ST3)) {
##D y <- matrix(nrow = nx, ncol = ny, data = ST3[i, -1])
##D filled.contour(y, zlim = zlim, main = i)
##D }
##D
##D # same
##D image(ST3, method = "filled.contour")
## End(Not run)
| /data/genthat_extracted_code/deSolve/examples/ode.2D.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 6,282 | r | library(deSolve)
### Name: ode.2D
### Title: Solver for 2-Dimensional Ordinary Differential Equations
### Aliases: ode.2D
### Keywords: math
### ** Examples
## =======================================================================
## A Lotka-Volterra predator-prey model with predator and prey
## dispersing in 2 dimensions
## =======================================================================
## ==================
## Model definitions
## ==================
lvmod2D <- function (time, state, pars, N, Da, dx) {
NN <- N*N
Prey <- matrix(nrow = N, ncol = N,state[1:NN])
Pred <- matrix(nrow = N, ncol = N,state[(NN+1):(2*NN)])
with (as.list(pars), {
## Biology
dPrey <- rGrow * Prey * (1- Prey/K) - rIng * Prey * Pred
dPred <- rIng * Prey * Pred*assEff - rMort * Pred
zero <- rep(0, N)
## 1. Fluxes in x-direction; zero fluxes near boundaries
FluxPrey <- -Da * rbind(zero,(Prey[2:N,] - Prey[1:(N-1),]), zero)/dx
FluxPred <- -Da * rbind(zero,(Pred[2:N,] - Pred[1:(N-1),]), zero)/dx
## Add flux gradient to rate of change
dPrey <- dPrey - (FluxPrey[2:(N+1),] - FluxPrey[1:N,])/dx
dPred <- dPred - (FluxPred[2:(N+1),] - FluxPred[1:N,])/dx
## 2. Fluxes in y-direction; zero fluxes near boundaries
FluxPrey <- -Da * cbind(zero,(Prey[,2:N] - Prey[,1:(N-1)]), zero)/dx
FluxPred <- -Da * cbind(zero,(Pred[,2:N] - Pred[,1:(N-1)]), zero)/dx
## Add flux gradient to rate of change
dPrey <- dPrey - (FluxPrey[,2:(N+1)] - FluxPrey[,1:N])/dx
dPred <- dPred - (FluxPred[,2:(N+1)] - FluxPred[,1:N])/dx
return(list(c(as.vector(dPrey), as.vector(dPred))))
})
}
## ===================
## Model applications
## ===================
pars <- c(rIng = 0.2, # /day, rate of ingestion
rGrow = 1.0, # /day, growth rate of prey
rMort = 0.2 , # /day, mortality rate of predator
assEff = 0.5, # -, assimilation efficiency
K = 5 ) # mmol/m3, carrying capacity
R <- 20 # total length of surface, m
N <- 50 # number of boxes in one direction
dx <- R/N # thickness of each layer
Da <- 0.05 # m2/d, dispersion coefficient
NN <- N*N # total number of boxes
## initial conditions
yini <- rep(0, 2*N*N)
cc <- c((NN/2):(NN/2+1)+N/2, (NN/2):(NN/2+1)-N/2)
yini[cc] <- yini[NN+cc] <- 1
## solve model (5000 state variables... use Cash-Karp Runge-Kutta method
times <- seq(0, 50, by = 1)
out <- ode.2D(y = yini, times = times, func = lvmod2D, parms = pars,
dimens = c(N, N), names = c("Prey", "Pred"),
N = N, dx = dx, Da = Da, method = rkMethod("rk45ck"))
diagnostics(out)
summary(out)
# Mean of prey concentration at each time step
Prey <- subset(out, select = "Prey", arr = TRUE)
dim(Prey)
MeanPrey <- apply(Prey, MARGIN = 3, FUN = mean)
plot(times, MeanPrey)
## Not run:
##D ## plot results
##D Col <- colorRampPalette(c("#00007F", "blue", "#007FFF", "cyan",
##D "#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000"))
##D
##D for (i in seq(1, length(times), by = 1))
##D image(Prey[ , ,i],
##D col = Col(100), xlab = , zlim = range(out[,2:(NN+1)]))
##D
##D ## similar, plotting both and adding a margin text with times:
##D image(out, xlab = "x", ylab = "y", mtext = paste("time = ", times))
## End(Not run)
select <- c(1, 40)
image(out, xlab = "x", ylab = "y", mtext = "Lotka-Volterra in 2-D",
subset = select, mfrow = c(2,2), legend = TRUE)
# plot prey and pred at t = 10; first use subset to select data
prey10 <- matrix (nrow = N, ncol = N,
data = subset(out, select = "Prey", subset = (time == 10)))
pred10 <- matrix (nrow = N, ncol = N,
data = subset(out, select = "Pred", subset = (time == 10)))
mf <- par(mfrow = c(1, 2))
image(prey10)
image(pred10)
par (mfrow = mf)
# same, using deSolve's image:
image(out, subset = (time == 10))
## =======================================================================
## An example with a cyclic boundary condition.
## Diffusion in 2-D; extra flux on 2 boundaries,
## cyclic boundary in y
## =======================================================================
diffusion2D <- function(t, Y, par) {
y <- matrix(nrow = nx, ncol = ny, data = Y) # vector to 2-D matrix
dY <- -r * y # consumption
BNDx <- rep(1, nx) # boundary concentration
BNDy <- rep(1, ny) # boundary concentration
## diffusion in X-direction; boundaries=imposed concentration
Flux <- -Dx * rbind(y[1,] - BNDy, (y[2:nx,] - y[1:(nx-1),]), BNDy - y[nx,])/dx
dY <- dY - (Flux[2:(nx+1),] - Flux[1:nx,])/dx
## diffusion in Y-direction
Flux <- -Dy * cbind(y[,1] - BNDx, (y[,2:ny]-y[,1:(ny-1)]), BNDx - y[,ny])/dy
dY <- dY - (Flux[,2:(ny+1)] - Flux[,1:ny])/dy
## extra flux on two sides
dY[,1] <- dY[,1] + 10
dY[1,] <- dY[1,] + 10
## and exchange between sides on y-direction
dY[,ny] <- dY[,ny] + (y[,1] - y[,ny]) * 10
return(list(as.vector(dY)))
}
## parameters
dy <- dx <- 1 # grid size
Dy <- Dx <- 1 # diffusion coeff, X- and Y-direction
r <- 0.05 # consumption rate
nx <- 50
ny <- 100
y <- matrix(nrow = nx, ncol = ny, 1)
## model most efficiently solved with lsodes - need to specify lrw
print(system.time(
ST3 <- ode.2D(y, times = 1:100, func = diffusion2D, parms = NULL,
dimens = c(nx, ny), verbose = TRUE, names = "Y",
lrw = 400000, atol = 1e-10, rtol = 1e-10, cyclicBnd = 2)
))
# summary of 2-D variable
summary(ST3)
# plot output at t = 10
t10 <- matrix (nrow = nx, ncol = ny,
data = subset(ST3, select = "Y", subset = (time == 10)))
persp(t10, theta = 30, border = NA, phi = 70,
col = "lightblue", shade = 0.5, box = FALSE)
# image plot, using deSolve's image function
image(ST3, subset = time == 10, method = "persp",
theta = 30, border = NA, phi = 70, main = "",
col = "lightblue", shade = 0.5, box = FALSE)
## Not run:
##D zlim <- range(ST3[, -1])
##D for (i in 2:nrow(ST3)) {
##D y <- matrix(nrow = nx, ncol = ny, data = ST3[i, -1])
##D filled.contour(y, zlim = zlim, main = i)
##D }
##D
##D # same
##D image(ST3, method = "filled.contour")
## End(Not run)
|
if (!require("pacman")) install.packages("pacman"); library(pacman)
p_load(tidyverse)
read_tsv("all_data.tsv") %>%
mutate(CancerType = str_replace(CancerType, "^TCGA_", "")) %>%
ggplot(aes(x = Algorithm, y = AUROC, fill = Algorithm)) +
geom_boxplot() +
geom_hline(yintercept = 0.5, color = "red", linetype = "dashed") +
facet_grid(CancerType ~ Description) +
theme(axis.text.x = element_blank())
ggsave("boxplots.png", height = 8, width = 7) | /DTA Notes/Archive/make_plot.R | no_license | natemella/Paper | R | false | false | 456 | r | if (!require("pacman")) install.packages("pacman"); library(pacman)
p_load(tidyverse)
read_tsv("all_data.tsv") %>%
mutate(CancerType = str_replace(CancerType, "^TCGA_", "")) %>%
ggplot(aes(x = Algorithm, y = AUROC, fill = Algorithm)) +
geom_boxplot() +
geom_hline(yintercept = 0.5, color = "red", linetype = "dashed") +
facet_grid(CancerType ~ Description) +
theme(axis.text.x = element_blank())
ggsave("boxplots.png", height = 8, width = 7) |
# Exploratory Data Analysis - Week 4 - Q. #4
# Across the United States, how have emissions from coal combustion-related
# sources changed from 1999-2008?
# Load the datasets from local machine (DataLoad.R loaded and unzipped them)
NEI <- readRDS("./summarySCC_PM25.rds")
SCC <- readRDS("./Source_Classification_Code.rds")
# Find coal combustion-related sources
is.combustion.coal <- grepl("Fuel Comb.*Coal", SCC$EI.Sector)
combustion.coal.sources <- SCC[is.combustion.coal,]
# Find emissions from coal combustion-related sources
emissions <- NEI[(NEI$SCC %in% combustion.coal.sources$SCC), ]
# group by year
emissions.by.year <- aggregate(Emissions ~ year, data=emissions, FUN=sum)
# plot using ggplot
library(ggplot2)
# Generate the graph in the current directory
png("plot4.png")
ggp <- ggplot(emissions.by.year, aes(x=factor(year), y=Emissions/1000)) +
geom_bar(stat="identity") +
labs(x="Year", y=expression("Total PM"[2.5]*" Emission (Kilotons)")) +
ggtitle("US Emissions from coal combustion-related sources")
print(ggp)
dev.off() | /plot4.R | no_license | ejcrotty/exploratory_data_week_4 | R | false | false | 1,093 | r | # Exploratory Data Analysis - Week 4 - Q. #4
# Across the United States, how have emissions from coal combustion-related
# sources changed from 1999-2008?
# Load the datasets from local machine (DataLoad.R loaded and unzipped them)
NEI <- readRDS("./summarySCC_PM25.rds")
SCC <- readRDS("./Source_Classification_Code.rds")
# Find coal combustion-related sources
is.combustion.coal <- grepl("Fuel Comb.*Coal", SCC$EI.Sector)
combustion.coal.sources <- SCC[is.combustion.coal,]
# Find emissions from coal combustion-related sources
emissions <- NEI[(NEI$SCC %in% combustion.coal.sources$SCC), ]
# group by year
emissions.by.year <- aggregate(Emissions ~ year, data=emissions, FUN=sum)
# plot using ggplot
library(ggplot2)
# Generate the graph in the current directory
png("plot4.png")
ggp <- ggplot(emissions.by.year, aes(x=factor(year), y=Emissions/1000)) +
geom_bar(stat="identity") +
labs(x="Year", y=expression("Total PM"[2.5]*" Emission (Kilotons)")) +
ggtitle("US Emissions from coal combustion-related sources")
print(ggp)
dev.off() |
# paquetes
library(tidyverse)
library(plotly)
library(emojifont)
# leemos los datos
movies <- read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2018/2018-10-23/movie_profit.csv')
library(ggplot2)
movies_horror <- movies %>%
mutate(year = as.numeric(str_sub(release_date, start = -1L - 3, end = -1L)),
q_rentabilidad= worldwide_gross/production_budget) %>%
filter(year %in% 2000:2014, worldwide_gross > 0, production_budget > 0)
movies_horror
glimpse(movies_horror)
gg_horror <- ggplot(movies_horror, aes(x = worldwide_gross,
y = q_rentabilidad, color = genre, text=movie)) +
geom_point(alpha=0.5) +
theme_minimal() +
scale_x_log10() +
scale_y_log10() +
facet_wrap(~ mpaa_rating)
gg_horror
ggplotly(gg_horror)
#Script Leonel
fig <- movies_horror %>% ggplot(aes(x=distributor, y=year, size = domestic_gross, color=genre)) +
geom_point(alpha=0.5) + scale_size(range = c(.1, 20)) + theme(text = element_text(size=10),plot.background = element_rect(fill = "black"),
axis.text.x = element_text(angle=35, hjust=1), legend.position = 'none') + scale_x_discrete("Eje X") + scale_y_discrete("Eje Y") + scale_color_discrete(NULL) +
labs(title = 'Título') + theme_dark()
fig <- ggplotly(fig)
fig
| /equipo-1/graficas-equipo-1.r | no_license | tereom/horroR | R | false | false | 1,356 | r | # paquetes
library(tidyverse)
library(plotly)
library(emojifont)
# leemos los datos
movies <- read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2018/2018-10-23/movie_profit.csv')
library(ggplot2)
movies_horror <- movies %>%
mutate(year = as.numeric(str_sub(release_date, start = -1L - 3, end = -1L)),
q_rentabilidad= worldwide_gross/production_budget) %>%
filter(year %in% 2000:2014, worldwide_gross > 0, production_budget > 0)
movies_horror
glimpse(movies_horror)
gg_horror <- ggplot(movies_horror, aes(x = worldwide_gross,
y = q_rentabilidad, color = genre, text=movie)) +
geom_point(alpha=0.5) +
theme_minimal() +
scale_x_log10() +
scale_y_log10() +
facet_wrap(~ mpaa_rating)
gg_horror
ggplotly(gg_horror)
#Script Leonel
fig <- movies_horror %>% ggplot(aes(x=distributor, y=year, size = domestic_gross, color=genre)) +
geom_point(alpha=0.5) + scale_size(range = c(.1, 20)) + theme(text = element_text(size=10),plot.background = element_rect(fill = "black"),
axis.text.x = element_text(angle=35, hjust=1), legend.position = 'none') + scale_x_discrete("Eje X") + scale_y_discrete("Eje Y") + scale_color_discrete(NULL) +
labs(title = 'Título') + theme_dark()
fig <- ggplotly(fig)
fig
|
library(shiny)
library(ggplot2)
library(plyr)
library(reshape2)
source("simulation_functions.R")
ui <- fluidPage(
tags$h1("Selection, mutation and drift at multiple loci"),
column(4,
tabsetPanel(
tabPanel("Multiplicative",
sliderInput(inputId = "log_mu",
label = "log10(Mutation rate)",
value = -4, min = -6, max = -1),
sliderInput(inputId = "log_s",
label = "log10(Selection coefficient)",
value = -1, min = -6, max = 0),
sliderInput(inputId = "h",
label = "Dominance coefficient",
value = 0.25, min = 0, max = 1),
sliderInput(inputId = "log_loci",
label = "log10(Number of loci)",
value = 2, min = 1, max = 3),
actionButton(inputId = "run_button",
label = "Run")),
tabPanel("Epistatic")),
tags$p("See the code on ",
tags$a(href = "https://github.com/mrtnj/shiny_polymutation", "GitHub"))),
column(8,
plotOutput(outputId = "plot_fitness"),
plotOutput(outputId = "plot_q"))
)
server <- function(input, output) {
simulated_data <- reactiveValues(data = NULL)
observeEvent(input$run_button, {
N <- 500
s <- 10^input$log_s
h <- input$h
loci <- 10^input$log_loci
mu <- 10^input$log_mu
update_progress <- function(value) {
incProgress(value)
}
withProgress(message = "Simulating", {
sim <- sim_variation(N = N,
mu = mu,
s = s,
h = h,
loci = loci,
gen = 200,
progress_function = update_progress)
})
plots <- plot_simulations(sim)
simulated_data$data <- plots
})
output$plot_fitness <- renderPlot({
if (! is.null(simulated_data$data))
print(simulated_data$data[[1]])
})
output$plot_q <- renderPlot({
if (! is.null(simulated_data$data))
print(simulated_data$data[[2]])
})
}
# Run the application
shinyApp(ui = ui, server = server)
| /app.R | no_license | mrtnj/shiny_polymutation | R | false | false | 2,315 | r | library(shiny)
library(ggplot2)
library(plyr)
library(reshape2)
source("simulation_functions.R")
ui <- fluidPage(
tags$h1("Selection, mutation and drift at multiple loci"),
column(4,
tabsetPanel(
tabPanel("Multiplicative",
sliderInput(inputId = "log_mu",
label = "log10(Mutation rate)",
value = -4, min = -6, max = -1),
sliderInput(inputId = "log_s",
label = "log10(Selection coefficient)",
value = -1, min = -6, max = 0),
sliderInput(inputId = "h",
label = "Dominance coefficient",
value = 0.25, min = 0, max = 1),
sliderInput(inputId = "log_loci",
label = "log10(Number of loci)",
value = 2, min = 1, max = 3),
actionButton(inputId = "run_button",
label = "Run")),
tabPanel("Epistatic")),
tags$p("See the code on ",
tags$a(href = "https://github.com/mrtnj/shiny_polymutation", "GitHub"))),
column(8,
plotOutput(outputId = "plot_fitness"),
plotOutput(outputId = "plot_q"))
)
server <- function(input, output) {
simulated_data <- reactiveValues(data = NULL)
observeEvent(input$run_button, {
N <- 500
s <- 10^input$log_s
h <- input$h
loci <- 10^input$log_loci
mu <- 10^input$log_mu
update_progress <- function(value) {
incProgress(value)
}
withProgress(message = "Simulating", {
sim <- sim_variation(N = N,
mu = mu,
s = s,
h = h,
loci = loci,
gen = 200,
progress_function = update_progress)
})
plots <- plot_simulations(sim)
simulated_data$data <- plots
})
output$plot_fitness <- renderPlot({
if (! is.null(simulated_data$data))
print(simulated_data$data[[1]])
})
output$plot_q <- renderPlot({
if (! is.null(simulated_data$data))
print(simulated_data$data[[2]])
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HeriTools.R
\name{getReadMatrix.CP}
\alias{getReadMatrix.CP}
\title{Simulate a read matrix from compound Poisson mixed effect models (CPMM).}
\usage{
getReadMatrix.CP(vec.num.rep, alphas, sigma2s, ps, phis)
}
\arguments{
\item{vec.num.rep}{A vector of replicate numbers for each strain.}
\item{alphas}{Intercept vector \eqn{\alpha_g}'s,
\eqn{1 \times \texttt{num.features}}{1 x num.features}.}
\item{sigma2s}{Random effect variance vector \eqn{\sigma^2_g}'s,
\eqn{1 \times \texttt{num.features}}{1 x num.features}.}
\item{ps}{Tweedie parameter in CP models, \eqn{p_g}'s, a
\eqn{1 \times \texttt{num.features}}{1 x num.features} vector.}
\item{phis}{Dispersion parameter in CP models, \eqn{\phi_g}'s, a
\eqn{1 \times \texttt{num.features}}{1 x num.features} vector.}
}
\value{
A \eqn{G \times N}{G x N} matrix with CP reads. \eqn{N} is the
total number of samples; \eqn{G} is the number of features. Column names
are sample names of the form "Ss_r", where S stands for sample, s is the
strain number, r is the replicate number within the strain. Row names
are the feature names of the form "Gene g", where g is the feature index.
}
\description{
Simulate a (possibly unbalanced) read matrix from CPMM.
For a compound Poisson (CP) random variable \eqn{Y_{gsr}} with mean
\eqn{\mu_{gs}}, its variance can be expressed as
\eqn{\phi_g\mu_{gs}^{p_g}}, for some \eqn{1<p_g<2}. Under the CPMM, with
a \eqn{\log}-link, the regression on the mean has the form:\cr
\eqn{\log(\mu_{gs}) = \alpha_g+ b_{gs}, \;\;b_{gs}\sim N(0, \sigma^2_g).}
}
\examples{
## Generate a sequencing dataset with 5 features and 6 strains.
## Assign parameter values.
rep.num <- c(3, 5, 2, 3, 4, 2)
a0s <- c(-1, 1, 2, 5, 10)
sig2s <- c(10, 0.2, 0.1, 0.03, 0.01)
ps <- rep(1.5, 5)
phis <- c(1.5, 1, 0.5, 0.1, 0.1)
set.seed(1234)
## Generate reads:
cpData <- getReadMatrix.CP(rep.num, a0s, sig2s, ps, phis)
## Generate strain names:
str <- sapply(1:length(rep.num), function(x){
str.x <- paste0("S", x)
return(rep(str.x, rep.num[x]))
})
str <- do.call(c, str)
}
| /man/getReadMatrix.CP.Rd | no_license | cran/HeritSeq | R | false | true | 2,133 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HeriTools.R
\name{getReadMatrix.CP}
\alias{getReadMatrix.CP}
\title{Simulate a read matrix from compound Poisson mixed effect models (CPMM).}
\usage{
getReadMatrix.CP(vec.num.rep, alphas, sigma2s, ps, phis)
}
\arguments{
\item{vec.num.rep}{A vector of replicate numbers for each strain.}
\item{alphas}{Intercept vector \eqn{\alpha_g}'s,
\eqn{1 \times \texttt{num.features}}{1 x num.features}.}
\item{sigma2s}{Random effect variance vector \eqn{\sigma^2_g}'s,
\eqn{1 \times \texttt{num.features}}{1 x num.features}.}
\item{ps}{Tweedie parameter in CP models, \eqn{p_g}'s, a
\eqn{1 \times \texttt{num.features}}{1 x num.features} vector.}
\item{phis}{Dispersion parameter in CP models, \eqn{\phi_g}'s, a
\eqn{1 \times \texttt{num.features}}{1 x num.features} vector.}
}
\value{
A \eqn{G \times N}{G x N} matrix with CP reads. \eqn{N} is the
total number of samples; \eqn{G} is the number of features. Column names
are sample names of the form "Ss_r", where S stands for sample, s is the
strain number, r is the replicate number within the strain. Row names
are the feature names of the form "Gene g", where g is the feature index.
}
\description{
Simulate a (possibly unbalanced) read matrix from CPMM.
For a compound Poisson (CP) random variable \eqn{Y_{gsr}} with mean
\eqn{\mu_{gs}}, its variance can be expressed as
\eqn{\phi_g\mu_{gs}^{p_g}}, for some \eqn{1<p_g<2}. Under the CPMM, with
a \eqn{\log}-link, the regression on the mean has the form:\cr
\eqn{\log(\mu_{gs}) = \alpha_g+ b_{gs}, \;\;b_{gs}\sim N(0, \sigma^2_g).}
}
\examples{
## Generate a sequencing dataset with 5 features and 6 strains.
## Assign parameter values.
rep.num <- c(3, 5, 2, 3, 4, 2)
a0s <- c(-1, 1, 2, 5, 10)
sig2s <- c(10, 0.2, 0.1, 0.03, 0.01)
ps <- rep(1.5, 5)
phis <- c(1.5, 1, 0.5, 0.1, 0.1)
set.seed(1234)
## Generate reads:
cpData <- getReadMatrix.CP(rep.num, a0s, sig2s, ps, phis)
## Generate strain names:
str <- sapply(1:length(rep.num), function(x){
str.x <- paste0("S", x)
return(rep(str.x, rep.num[x]))
})
str <- do.call(c, str)
}
|
#' Plot the results of a survival analysis
#'
#' @param object iwillsurvive. An iwillsurvive object created from iwillsurvive
#' @param cohort dataframe. A one-row-per-patient cohort used in generating fit.
#' @param ggtheme theme. A ggplot2 theme
#' @param palette character. The name of a paleete. See ?ggplot2::scale_colour_brewer for examples
#' @param simple logical. If TRUE, only plot the Kaplan-Meier estimate
#' @param add_gridlines logical. If TRUE, include gridlines
#' @param add_labels logical. If TRUE, show verbal labels
#' @param add_median logical. If TRUE, show median survival
#' @param add_median_delta logical.
#' @param anchor_arrow logical. If TRUE, use an arrow in pointing to the anchor
#' @param legend_position character. Where should the strata labels be located?
#' Either 'inside' for inside the plot, or 'top', or 'right'
#' @param legend_anchor_y numeric. Y locations of anchors for legends.
#' Only used if legend_position = "inside"
#' @param legend_nudge_y numeric.
#' @param legend_position_x numeric. X position of the legend(s)
#' @param xlim numeric.
#' @param x_breaks numeric. Major breaks for the x-axis
#' @param label_size numeric. Size of the labels.
#' @param label_color character. Color of labels.
#' @param median_nudge_y numeric. Amount to nudge median label.
#' @param risk_table logical. If TRUE, include the risk table
#' @param risk_size numeric. Size of font in risk table.
#' @param index_title character.
#' @param event_title character.
#' @param median_label_size numeric.
#' @param event_nudge_y numeric.
#'
#' @import ggplot2
#' @import scales
#'
#' @return ggplot2
#' @export
#'
#' @examples
#' # Set things up by creating an iwillsurvive object
#'
#' cohort <- ez_cohort %>%
#' derive_followup_date(
#' event_date = "dateofdeath",
#' censor_date = "lastvisitdate"
#' ) %>%
#' derive_followup_time(index_date = "lotstartdate") %>%
#' derive_event_status(event_date = "dateofdeath")
#'
#' cohort_iws <- iwillsurvive(cohort,
#' followup_time = "followup_days",
#' terms = "condition",
#' event_title = "Death",
#' index_title = "LOT1 Start"
#' )
#'
#' plot_survival(cohort_iws)
#'
#' # Set simple = TRUE to only get the KM without any fancy pants stuff
#'
#' plot_survival(cohort_iws,
#' simple = TRUE
#' )
#'
#' # Control the location of the legend with legend_position
#' plot_survival(cohort_iws,
#' legend_position = "top"
#' )
#'
#' # Change the location of the labels and add arrows
#' plot_survival(cohort_iws,
#' legend_anchor_y = c(.7, .85),
#' legend_position_x = c(260, 250),
#' legend_nudge_y = .1,
#' anchor_arrow = TRUE
#' )
plot_survival <- function(object = NULL,
cohort = NULL,
ggtheme = ggplot2::theme_bw(),
palette = "Set1",
simple = FALSE,
add_gridlines = TRUE,
add_labels = TRUE,
add_median = TRUE,
add_median_delta = TRUE,
anchor_arrow = FALSE,
legend_position = "inside",
legend_anchor_y = .5,
legend_nudge_y = NULL,
legend_position_x = NULL,
xlim = NULL,
x_breaks = NULL,
label_size = 3,
label_color = gray(0),
median_nudge_y = .1,
risk_table = TRUE,
risk_size = 3,
index_title = NULL,
event_title = NULL,
median_label_size = 4,
event_nudge_y = .15) {
testthat::expect_is(object, "iwillsurvive")
plot_df <- broom::tidy(object$fit)
cohort <- object$cohort
patient_n <- nrow(cohort)
if (is.null(event_title)) {
event_title <- object$event_title
}
if (is.null(index_title)) {
index_title <- object$index_title
}
if (simple) {
add_labels <- FALSE
add_median <- FALSE
add_median_delta <- FALSE
legend_position <- "top"
}
if ("strata" %in% names(plot_df) == FALSE) {
plot_df$strata <- "all"
}
# Create km plot {p_km} ------------------------------------------------------
{
plot_df <- plot_df %>%
dplyr::mutate(strata = stringr::str_remove_all(strata,
pattern = "condition="
))
strata_values <- unique(plot_df$strata)
strata_n <- length(strata_values)
if ("strata" %in% names(plot_df) == FALSE) {
plot_df <- plot_df %>%
dplyr::mutate(strata = "all")
}
plot_df <- plot_df %>%
dplyr::arrange(strata, time)
p_km <- ggplot2::ggplot(
plot_df,
ggplot2::aes(
x = time,
group = strata,
y = estimate,
col = strata
)
) +
ggplot2::scale_y_continuous(labels = scales::label_percent())
p_km_bld <- ggplot2::ggplot_build(p_km)
time_major_breaks <- p_km_bld$layout$panel_params[[1]]$x.sec$breaks
time_minor_breaks <- p_km_bld$layout$panel_params[[1]]$x.sec$minor_breaks
time_lims <- p_km_bld$layout$panel_params[[1]]$x.range
# Loop over conditions
for (strata_i in unique(plot_df$strata)) {
data <- plot_df %>%
filter(strata == strata_i) %>%
filter(is.finite(estimate), is.finite(conf.low), is.finite(conf.high))
# Add conf.low
p_km <- p_km +
ggplot2::geom_path(
data = data,
ggplot2::aes(y = conf.low),
alpha = .2
)
# Add conf.high
p_km <- p_km +
ggplot2::geom_line(
data = data,
ggplot2::aes(y = conf.high),
alpha = .2
)
p_km <- p_km +
ggplot2::geom_ribbon(
data = data,
ggplot2::aes(
x = time,
ymin = conf.low,
ymax = conf.high,
fill = strata
),
alpha = .2, lwd = 0
)
p_km <- p_km +
ggplot2::geom_line(
data = data,
ggplot2::aes(y = estimate)
)
p_km <- p_km +
ggplot2::geom_point(
data = data %>% filter(n.censor > 0),
ggplot2::aes(y = estimate),
alpha = 1,
pch = "|", size = 3,
fill = scales::alpha("white", .8)
)
}
if (add_median) {
surv_median <- object$fit_summary %>%
dplyr::select(strata, median) %>%
dplyr::mutate(strata = stringr::str_remove_all(strata, pattern = "condition=")) %>%
dplyr::mutate(y = .5) %>%
dplyr::mutate(value = round(median, 0))
p_km <- p_km +
ggrepel::geom_label_repel(
data = surv_median,
mapping = ggplot2::aes(
x = value,
y = y,
label = value
),
direction = "y",
label.size = .7,
min.segment.length = 0,
nudge_y = median_nudge_y,
size = median_label_size,
segment.colour = "black"
)
p_km <- p_km +
ggplot2::annotate("segment",
x = -Inf,
xend = max(surv_median$value),
y = .5,
yend = .5,
col = gray(.2),
lty = 3
)
if (add_labels) {
p_km <- p_km +
ggplot2::annotate("text",
x = 0, # min(time_lims),
y = .5,
col = label_color,
adj = 0,
label = "Median\nSurvival",
size = label_size
)
}
p_km <- p_km +
ggplot2::geom_point(
data = surv_median,
mapping = ggplot2::aes(
x = value,
y = .5
),
pch = 21,
fill = "white",
size = 4, stroke = 1
)
if (add_median_delta & strata_n > 1) {
horizontal_bar_y <- .08
median_delta <- surv_median %>%
dplyr::mutate(
value_max = max(value),
value_min = min(value)
) %>%
dplyr::filter(value == value_max | value == value_min)
p_km <- p_km +
ggplot2::annotate("segment",
x = min(surv_median$value),
xend = max(surv_median$value),
y = horizontal_bar_y,
yend = horizontal_bar_y,
col = gray(.8)
)
# Add dotted connectors
p_km <- p_km +
ggplot2::geom_segment(
data = median_delta,
ggplot2::aes(
x = value,
y = -Inf,
xend = value,
yend = .5,
), lty = 3
)
# Add ends
median_delta_ends <- median_delta %>%
dplyr::mutate(
y = horizontal_bar_y - .03,
yend = horizontal_bar_y + .03
)
p_km <- p_km +
ggplot2::geom_segment(
data = median_delta_ends,
ggplot2::aes(
x = value,
y = y,
xend = value,
yend = yend
)
)
median_diff <- diff(range(object$fit_summary$median))
delta_text <- paste0(round(median_diff, 0))
# delta_text <- paste0(round(median_diff, 1), object$followup_time_units)
p_km <- p_km +
ggplot2::annotate("text",
x = min(median_delta$value) + median_diff / 2,
y = .125,
label = delta_text
)
if (add_labels) {
suppressWarnings({
lab <- expression(paste(Delta, " Median"))
p_km <- p_km + ggplot2::annotate("text",
x = min(median_delta$value) + median_diff / 2,
y = .04,
label = lab,
size = label_size,
col = label_color
)
})
}
}
}
my_title <- paste0("Survival: From ", index_title)
if (!is.null(event_title)) {
my_title <- paste0("Survival: From ", index_title, " to ", event_title)
}
if (!is.null(object$followup_time_units)) {
x_lab <- paste0("Time (", stringr::str_to_title(object$followup_time_units), ")")
} else {
x_lab <- "Time"
}
p_km <- p_km +
ggplot2::labs(
title = my_title,
subtitle = paste0("Cohort N = ", scales::comma(patient_n)),
y = "Survival Probability",
x = x_lab
)
# Add legend
if (legend_position %in% c("top", "right")) {
p_km <- p_km + ggtheme +
ggplot2::theme(legend.position = "top")
}
if (legend_position == "inside") {
if (is.null(legend_position_x)) {
# Put first
legend_position_x <- rev(c(
max(plot_df$time) * .05,
rep(max(plot_df$time) * .4, strata_n - 1)
))
}
p_km <- p_km +
ggtheme +
ggplot2::theme(
legend.position = "none", # This keeps the labels from disappearing
plot.margin = ggplot2::unit(c(1, 1, 1, 3), "lines")
)
# Get the x positions corresponding to legend_anchor_y
if (is.null(legend_nudge_y)) {
legend_nudge_y <- rev(c(
-.15,
rep(.1, strata_n - 1)
))
}
temp <- tibble::tibble(
strata = strata_values,
legend_anchor_y = rep(legend_anchor_y,
length.out = length(strata_values)
)
)
legend_positions <- plot_df %>%
dplyr::left_join(temp, by = "strata") %>%
dplyr::group_by(strata) %>%
dplyr::mutate(dev = abs(estimate - legend_anchor_y)) %>%
dplyr::filter(dev == min(dev)) %>%
dplyr::slice(1) %>%
dplyr::mutate(
x = time,
y = legend_anchor_y
) %>%
dplyr::select(strata, x, y) %>%
dplyr::ungroup() %>%
dplyr::mutate(nudge_x = 0)
if (!is.null(legend_position_x)) {
temp <- tibble::tibble(
strata = strata_values,
legend_position_x = rep(legend_position_x,
length.out = length(strata_values)
)
)
legend_positions <- legend_positions %>%
dplyr::left_join(temp, by = "strata") %>%
dplyr::mutate(
nudge_x = legend_position_x - x
)
}
my_arrow <- if (anchor_arrow) {
arrow(length = unit(0.02, "npc"))
} else {
NULL
}
if (strata_n > 1) {
p_km <- p_km +
ggrepel::geom_label_repel(
data = legend_positions,
mapping = ggplot2::aes(
x = x, y = y,
group = strata,
label = strata
),
nudge_x = legend_positions$nudge_x,
nudge_y = legend_nudge_y,
direction = "y",
segment.size = .5,
arrow = my_arrow,
segment.color = "black",
label.size = 0
)
}
}
}
# Add risk table ---------------------------------------------------
if (risk_table) {
risk_df <- tidyr::expand_grid(
strata = strata_values,
time = time_minor_breaks
)
risk_df <- purrr::map_dfr(1:nrow(risk_df),
.f = function(row_i) {
strata_i <- risk_df$strata[row_i]
time_i <- risk_df$time[row_i]
at_risk_i <- suppressWarnings({
plot_df %>%
dplyr::filter(time < time_i, strata == strata_i) %>%
dplyr::filter(time == max(time)) %>%
dplyr::pull(n.risk)
})
censored_i <- plot_df %>%
dplyr::filter(time < time_i, strata == strata_i) %>%
dplyr::summarise(N = sum(n.censor), .groups = "drop") %>%
dplyr::pull(N)
tibble::tibble(
strata = strata_i,
time = time_i,
risk_n = at_risk_i,
censored_n = censored_i
)
}
)
risk_df <- risk_df %>%
dplyr::bind_rows(
plot_df %>%
dplyr::group_by(strata) %>%
dplyr::summarise(
risk_n = max(n.risk),
censored_n = 0, .groups = "drop"
) %>%
dplyr::ungroup() %>%
dplyr::mutate(time = 0)
) %>%
dplyr::arrange(strata, time) %>%
dplyr::mutate(lab = paste0(risk_n, " (", censored_n, ")")) %>%
dplyr::mutate(strata_y = as.numeric(factor(strata)) * -.1 - .2)
p_risk <- ggplot2::ggplot(
data = risk_df,
mapping = ggplot2::aes(
x = time,
y = strata,
label = lab
)
) +
ggplot2::geom_text(
data = risk_df,
ggplot2::aes(
x = time,
adj = 0,
label = lab
),
size = risk_size,
col = "black"
) +
# ggplot2::coord_cartesian(
# # xlim = c(0, max(time_minor_breaks)),
# clip = "off"
# ) + # This keeps the labels from disappearing
ggplot2::labs(y = "", x = "") +
ggtheme +
ggplot2::theme(
plot.margin = ggplot2::unit(c(0, 1, 1, 2.5), "lines"),
legend.position = "none"
)
# Add stata labels
strata_labs <- risk_df %>%
dplyr::group_by(strata) %>%
dplyr::summarise(y = first(strata_y), .groups = "drop") %>%
dplyr::mutate(x = min(time_lims))
# p_risk <- p_risk +
# ggplot2::geom_label(
# data = strata_labs,
# mapping = ggplot2::aes(
# x = x, y = y,
# label = strata
# ),
# adj = 1, size = risk_size
# )
}
# Create time at risk plot {p_tar} -----------------------------------------
#
# event_df <- plot_df %>%
# dplyr::select(time, strata, event = n.event, censor = n.censor) %>%
# tidyr::pivot_longer(
# cols = event:censor,
# names_to = "outcome",
# values_to = "count"
# ) %>%
# dplyr::filter(count > 0)
#
# # Duplicate any with counts > 1 so there is one row per event
#
# while (max(event_df$count) > 1) {
# count_max <- max(event_df$count)
#
# event_df_max <- event_df %>%
# filter(count == count_max)
#
# new_df <- purrr::map_df(1:count_max, .f = function(x) {
# event_df_max %>%
# dplyr::mutate(count = 1)
# }) %>%
# dplyr::arrange(time)
#
# event_df <- event_df %>%
# dplyr::filter(count != count_max) %>%
# dplyr::bind_rows(new_df)
# }
#
# event_df <- event_df %>%
# dplyr::mutate(nudge_y = dplyr::case_when(
# outcome == "event" ~ event_nudge_y,
# outcome == "censor" ~ -event_nudge_y
# )) %>%
# dplyr::mutate(strata_num = as.numeric(factor(strata)))
#
#
# ylim <- c(.5, strata_n + .5)
#
# p_tar <- ggplot2::ggplot(
# event_df,
# ggplot2::aes(
# x = time,
# y = strata_num,
# shape = outcome,
# col = strata
# )
# ) +
# ggplot2::scale_shape_manual(values = c(3, 21))
#
# # Add boxplots
#
# p_tar <- p_tar +
# ggplot2::geom_boxplot(
# data = event_df,
# mapping = ggplot2::aes(
# x = time,
# y = strata_num,
# shape = NULL, group = strata
# ),
# outlier.shape = NA,
# fill = "white", col = gray(.1, .5)
# )
#
# p_tar <- p_tar +
# ggplot2::geom_point(
# position = ggplot2::position_nudge(y = event_df$nudge_y),
# fill = "white"
# )
#
# p_tar <- p_tar +
# ggplot2::labs(title = "Time At Risk", y = "") +
# ggplot2::scale_y_continuous(
# labels = strata_values,
# limits = ylim,
# breaks = 1:strata_n
# ) +
# ggplot2::scale_x_continuous(
# labels = scales::label_comma(),
# limits = time_lims
# )
#
# p_tar <- p_tar + ggtheme
#
#
# p_tar <- p_tar +
# ggplot2::theme(
# # axis.title.y = ggplot2::element_blank(),
# # axis.text.y = ggplot2::element_text(color="white")
# # axis.ticks = ggplot2::element_blank()
# ) +
# ggplot2::guides(shape = FALSE, col = FALSE) +
# ggplot2::theme(
# plot.margin = ggplot2::unit(c(1, 1, 1, 1), "lines"),
# panel.grid.minor.y = ggplot2::element_blank()
# )
#
# # Create final plot -----------------------
#
p_km <- p_km +
ggplot2::theme(plot.margin = ggplot2::unit(c(1, 1, .5, 2), "lines"))
p_risk <- p_risk +
ggplot2::theme(plot.margin = ggplot2::unit(c(0, 1, 0, 2), "lines"))
if (add_gridlines == FALSE) {
p_km <- p_km +
ggplot2::theme(panel.grid = ggplot2::element_blank())
p_risk <- p_risk +
ggplot2::theme(panel.grid = ggplot2::element_blank())
}
if (is.null(xlim)) {
my_limits <- time_lims
} else {
my_limits <- xlim
}
if (is.null(x_breaks)) {
my_breaks <- time_major_breaks
} else {
my_breaks <- x_breaks
}
p_km <- p_km + ggplot2::scale_x_continuous(
breaks = my_breaks,
limits = my_limits,
expand = c(0, 0),
labels = scales::comma
) +
ggplot2::scale_colour_brewer(palette = palette)
p_risk <- p_risk + ggplot2::scale_x_continuous(
breaks = my_breaks,
limits = my_limits,
expand = c(0, 0),
labels = scales::comma
) +
ggplot2::theme(
axis.text.y = ggplot2::element_text(color = RColorBrewer::brewer.pal(max(c(strata_n, 3)), palette)),
panel.grid.major.y = ggplot2::element_blank()
) +
ggplot2::labs(subtitle = "At Risk (Censored)")
#
#
# # User specified xlim
# if (!is.null(xlim)) {
#
# p_km <- p_km + ggplot2::xlim(xlim)
# p_risk <- p_risk + ggplot2::xlim(xlim)
#
# }
g_km <- ggplot2::ggplotGrob(p_km)
g_risk <- ggplot2::ggplotGrob(p_risk)
maxWidth <- grid::unit.pmax(g_km$widths[2:5], g_risk$widths[2:5])
g_km$widths[2:5] <- as.list(maxWidth)
g_risk$widths[2:5] <- as.list(maxWidth)
# Lay out plots in one column
gridExtra::grid.arrange(g_km, g_risk, ncol = 1, heights = c(3, 1))
}
| /R/plot_survival.R | permissive | noxtoby/iwillsurvive | R | false | false | 19,999 | r | #' Plot the results of a survival analysis
#'
#' @param object iwillsurvive. An iwillsurvive object created from iwillsurvive
#' @param cohort dataframe. A one-row-per-patient cohort used in generating fit.
#' @param ggtheme theme. A ggplot2 theme
#' @param palette character. The name of a paleete. See ?ggplot2::scale_colour_brewer for examples
#' @param simple logical. If TRUE, only plot the Kaplan-Meier estimate
#' @param add_gridlines logical. If TRUE, include gridlines
#' @param add_labels logical. If TRUE, show verbal labels
#' @param add_median logical. If TRUE, show median survival
#' @param add_median_delta logical.
#' @param anchor_arrow logical. If TRUE, use an arrow in pointing to the anchor
#' @param legend_position character. Where should the strata labels be located?
#' Either 'inside' for inside the plot, or 'top', or 'right'
#' @param legend_anchor_y numeric. Y locations of anchors for legends.
#' Only used if legend_position = "inside"
#' @param legend_nudge_y numeric.
#' @param legend_position_x numeric. X position of the legend(s)
#' @param xlim numeric.
#' @param x_breaks numeric. Major breaks for the x-axis
#' @param label_size numeric. Size of the labels.
#' @param label_color character. Color of labels.
#' @param median_nudge_y numeric. Amount to nudge median label.
#' @param risk_table logical. If TRUE, include the risk table
#' @param risk_size numeric. Size of font in risk table.
#' @param index_title character.
#' @param event_title character.
#' @param median_label_size numeric.
#' @param event_nudge_y numeric.
#'
#' @import ggplot2
#' @import scales
#'
#' @return ggplot2
#' @export
#'
#' @examples
#' # Set things up by creating an iwillsurvive object
#'
#' cohort <- ez_cohort %>%
#' derive_followup_date(
#' event_date = "dateofdeath",
#' censor_date = "lastvisitdate"
#' ) %>%
#' derive_followup_time(index_date = "lotstartdate") %>%
#' derive_event_status(event_date = "dateofdeath")
#'
#' cohort_iws <- iwillsurvive(cohort,
#' followup_time = "followup_days",
#' terms = "condition",
#' event_title = "Death",
#' index_title = "LOT1 Start"
#' )
#'
#' plot_survival(cohort_iws)
#'
#' # Set simple = TRUE to only get the KM without any fancy pants stuff
#'
#' plot_survival(cohort_iws,
#' simple = TRUE
#' )
#'
#' # Control the location of the legend with legend_position
#' plot_survival(cohort_iws,
#' legend_position = "top"
#' )
#'
#' # Change the location of the labels and add arrows
#' plot_survival(cohort_iws,
#' legend_anchor_y = c(.7, .85),
#' legend_position_x = c(260, 250),
#' legend_nudge_y = .1,
#' anchor_arrow = TRUE
#' )
plot_survival <- function(object = NULL,
cohort = NULL,
ggtheme = ggplot2::theme_bw(),
palette = "Set1",
simple = FALSE,
add_gridlines = TRUE,
add_labels = TRUE,
add_median = TRUE,
add_median_delta = TRUE,
anchor_arrow = FALSE,
legend_position = "inside",
legend_anchor_y = .5,
legend_nudge_y = NULL,
legend_position_x = NULL,
xlim = NULL,
x_breaks = NULL,
label_size = 3,
label_color = gray(0),
median_nudge_y = .1,
risk_table = TRUE,
risk_size = 3,
index_title = NULL,
event_title = NULL,
median_label_size = 4,
event_nudge_y = .15) {
testthat::expect_is(object, "iwillsurvive")
plot_df <- broom::tidy(object$fit)
cohort <- object$cohort
patient_n <- nrow(cohort)
if (is.null(event_title)) {
event_title <- object$event_title
}
if (is.null(index_title)) {
index_title <- object$index_title
}
if (simple) {
add_labels <- FALSE
add_median <- FALSE
add_median_delta <- FALSE
legend_position <- "top"
}
if ("strata" %in% names(plot_df) == FALSE) {
plot_df$strata <- "all"
}
# Create km plot {p_km} ------------------------------------------------------
{
plot_df <- plot_df %>%
dplyr::mutate(strata = stringr::str_remove_all(strata,
pattern = "condition="
))
strata_values <- unique(plot_df$strata)
strata_n <- length(strata_values)
if ("strata" %in% names(plot_df) == FALSE) {
plot_df <- plot_df %>%
dplyr::mutate(strata = "all")
}
plot_df <- plot_df %>%
dplyr::arrange(strata, time)
p_km <- ggplot2::ggplot(
plot_df,
ggplot2::aes(
x = time,
group = strata,
y = estimate,
col = strata
)
) +
ggplot2::scale_y_continuous(labels = scales::label_percent())
p_km_bld <- ggplot2::ggplot_build(p_km)
time_major_breaks <- p_km_bld$layout$panel_params[[1]]$x.sec$breaks
time_minor_breaks <- p_km_bld$layout$panel_params[[1]]$x.sec$minor_breaks
time_lims <- p_km_bld$layout$panel_params[[1]]$x.range
# Loop over conditions
for (strata_i in unique(plot_df$strata)) {
data <- plot_df %>%
filter(strata == strata_i) %>%
filter(is.finite(estimate), is.finite(conf.low), is.finite(conf.high))
# Add conf.low
p_km <- p_km +
ggplot2::geom_path(
data = data,
ggplot2::aes(y = conf.low),
alpha = .2
)
# Add conf.high
p_km <- p_km +
ggplot2::geom_line(
data = data,
ggplot2::aes(y = conf.high),
alpha = .2
)
p_km <- p_km +
ggplot2::geom_ribbon(
data = data,
ggplot2::aes(
x = time,
ymin = conf.low,
ymax = conf.high,
fill = strata
),
alpha = .2, lwd = 0
)
p_km <- p_km +
ggplot2::geom_line(
data = data,
ggplot2::aes(y = estimate)
)
p_km <- p_km +
ggplot2::geom_point(
data = data %>% filter(n.censor > 0),
ggplot2::aes(y = estimate),
alpha = 1,
pch = "|", size = 3,
fill = scales::alpha("white", .8)
)
}
if (add_median) {
surv_median <- object$fit_summary %>%
dplyr::select(strata, median) %>%
dplyr::mutate(strata = stringr::str_remove_all(strata, pattern = "condition=")) %>%
dplyr::mutate(y = .5) %>%
dplyr::mutate(value = round(median, 0))
p_km <- p_km +
ggrepel::geom_label_repel(
data = surv_median,
mapping = ggplot2::aes(
x = value,
y = y,
label = value
),
direction = "y",
label.size = .7,
min.segment.length = 0,
nudge_y = median_nudge_y,
size = median_label_size,
segment.colour = "black"
)
p_km <- p_km +
ggplot2::annotate("segment",
x = -Inf,
xend = max(surv_median$value),
y = .5,
yend = .5,
col = gray(.2),
lty = 3
)
if (add_labels) {
p_km <- p_km +
ggplot2::annotate("text",
x = 0, # min(time_lims),
y = .5,
col = label_color,
adj = 0,
label = "Median\nSurvival",
size = label_size
)
}
p_km <- p_km +
ggplot2::geom_point(
data = surv_median,
mapping = ggplot2::aes(
x = value,
y = .5
),
pch = 21,
fill = "white",
size = 4, stroke = 1
)
if (add_median_delta & strata_n > 1) {
horizontal_bar_y <- .08
median_delta <- surv_median %>%
dplyr::mutate(
value_max = max(value),
value_min = min(value)
) %>%
dplyr::filter(value == value_max | value == value_min)
p_km <- p_km +
ggplot2::annotate("segment",
x = min(surv_median$value),
xend = max(surv_median$value),
y = horizontal_bar_y,
yend = horizontal_bar_y,
col = gray(.8)
)
# Add dotted connectors
p_km <- p_km +
ggplot2::geom_segment(
data = median_delta,
ggplot2::aes(
x = value,
y = -Inf,
xend = value,
yend = .5,
), lty = 3
)
# Add ends
median_delta_ends <- median_delta %>%
dplyr::mutate(
y = horizontal_bar_y - .03,
yend = horizontal_bar_y + .03
)
p_km <- p_km +
ggplot2::geom_segment(
data = median_delta_ends,
ggplot2::aes(
x = value,
y = y,
xend = value,
yend = yend
)
)
median_diff <- diff(range(object$fit_summary$median))
delta_text <- paste0(round(median_diff, 0))
# delta_text <- paste0(round(median_diff, 1), object$followup_time_units)
p_km <- p_km +
ggplot2::annotate("text",
x = min(median_delta$value) + median_diff / 2,
y = .125,
label = delta_text
)
if (add_labels) {
suppressWarnings({
lab <- expression(paste(Delta, " Median"))
p_km <- p_km + ggplot2::annotate("text",
x = min(median_delta$value) + median_diff / 2,
y = .04,
label = lab,
size = label_size,
col = label_color
)
})
}
}
}
my_title <- paste0("Survival: From ", index_title)
if (!is.null(event_title)) {
my_title <- paste0("Survival: From ", index_title, " to ", event_title)
}
if (!is.null(object$followup_time_units)) {
x_lab <- paste0("Time (", stringr::str_to_title(object$followup_time_units), ")")
} else {
x_lab <- "Time"
}
p_km <- p_km +
ggplot2::labs(
title = my_title,
subtitle = paste0("Cohort N = ", scales::comma(patient_n)),
y = "Survival Probability",
x = x_lab
)
# Add legend
if (legend_position %in% c("top", "right")) {
p_km <- p_km + ggtheme +
ggplot2::theme(legend.position = "top")
}
if (legend_position == "inside") {
if (is.null(legend_position_x)) {
# Put first
legend_position_x <- rev(c(
max(plot_df$time) * .05,
rep(max(plot_df$time) * .4, strata_n - 1)
))
}
p_km <- p_km +
ggtheme +
ggplot2::theme(
legend.position = "none", # This keeps the labels from disappearing
plot.margin = ggplot2::unit(c(1, 1, 1, 3), "lines")
)
# Get the x positions corresponding to legend_anchor_y
if (is.null(legend_nudge_y)) {
legend_nudge_y <- rev(c(
-.15,
rep(.1, strata_n - 1)
))
}
temp <- tibble::tibble(
strata = strata_values,
legend_anchor_y = rep(legend_anchor_y,
length.out = length(strata_values)
)
)
legend_positions <- plot_df %>%
dplyr::left_join(temp, by = "strata") %>%
dplyr::group_by(strata) %>%
dplyr::mutate(dev = abs(estimate - legend_anchor_y)) %>%
dplyr::filter(dev == min(dev)) %>%
dplyr::slice(1) %>%
dplyr::mutate(
x = time,
y = legend_anchor_y
) %>%
dplyr::select(strata, x, y) %>%
dplyr::ungroup() %>%
dplyr::mutate(nudge_x = 0)
if (!is.null(legend_position_x)) {
temp <- tibble::tibble(
strata = strata_values,
legend_position_x = rep(legend_position_x,
length.out = length(strata_values)
)
)
legend_positions <- legend_positions %>%
dplyr::left_join(temp, by = "strata") %>%
dplyr::mutate(
nudge_x = legend_position_x - x
)
}
my_arrow <- if (anchor_arrow) {
arrow(length = unit(0.02, "npc"))
} else {
NULL
}
if (strata_n > 1) {
p_km <- p_km +
ggrepel::geom_label_repel(
data = legend_positions,
mapping = ggplot2::aes(
x = x, y = y,
group = strata,
label = strata
),
nudge_x = legend_positions$nudge_x,
nudge_y = legend_nudge_y,
direction = "y",
segment.size = .5,
arrow = my_arrow,
segment.color = "black",
label.size = 0
)
}
}
}
# Add risk table ---------------------------------------------------
if (risk_table) {
risk_df <- tidyr::expand_grid(
strata = strata_values,
time = time_minor_breaks
)
risk_df <- purrr::map_dfr(1:nrow(risk_df),
.f = function(row_i) {
strata_i <- risk_df$strata[row_i]
time_i <- risk_df$time[row_i]
at_risk_i <- suppressWarnings({
plot_df %>%
dplyr::filter(time < time_i, strata == strata_i) %>%
dplyr::filter(time == max(time)) %>%
dplyr::pull(n.risk)
})
censored_i <- plot_df %>%
dplyr::filter(time < time_i, strata == strata_i) %>%
dplyr::summarise(N = sum(n.censor), .groups = "drop") %>%
dplyr::pull(N)
tibble::tibble(
strata = strata_i,
time = time_i,
risk_n = at_risk_i,
censored_n = censored_i
)
}
)
risk_df <- risk_df %>%
dplyr::bind_rows(
plot_df %>%
dplyr::group_by(strata) %>%
dplyr::summarise(
risk_n = max(n.risk),
censored_n = 0, .groups = "drop"
) %>%
dplyr::ungroup() %>%
dplyr::mutate(time = 0)
) %>%
dplyr::arrange(strata, time) %>%
dplyr::mutate(lab = paste0(risk_n, " (", censored_n, ")")) %>%
dplyr::mutate(strata_y = as.numeric(factor(strata)) * -.1 - .2)
p_risk <- ggplot2::ggplot(
data = risk_df,
mapping = ggplot2::aes(
x = time,
y = strata,
label = lab
)
) +
ggplot2::geom_text(
data = risk_df,
ggplot2::aes(
x = time,
adj = 0,
label = lab
),
size = risk_size,
col = "black"
) +
# ggplot2::coord_cartesian(
# # xlim = c(0, max(time_minor_breaks)),
# clip = "off"
# ) + # This keeps the labels from disappearing
ggplot2::labs(y = "", x = "") +
ggtheme +
ggplot2::theme(
plot.margin = ggplot2::unit(c(0, 1, 1, 2.5), "lines"),
legend.position = "none"
)
# Add stata labels
strata_labs <- risk_df %>%
dplyr::group_by(strata) %>%
dplyr::summarise(y = first(strata_y), .groups = "drop") %>%
dplyr::mutate(x = min(time_lims))
# p_risk <- p_risk +
# ggplot2::geom_label(
# data = strata_labs,
# mapping = ggplot2::aes(
# x = x, y = y,
# label = strata
# ),
# adj = 1, size = risk_size
# )
}
# Create time at risk plot {p_tar} -----------------------------------------
#
# event_df <- plot_df %>%
# dplyr::select(time, strata, event = n.event, censor = n.censor) %>%
# tidyr::pivot_longer(
# cols = event:censor,
# names_to = "outcome",
# values_to = "count"
# ) %>%
# dplyr::filter(count > 0)
#
# # Duplicate any with counts > 1 so there is one row per event
#
# while (max(event_df$count) > 1) {
# count_max <- max(event_df$count)
#
# event_df_max <- event_df %>%
# filter(count == count_max)
#
# new_df <- purrr::map_df(1:count_max, .f = function(x) {
# event_df_max %>%
# dplyr::mutate(count = 1)
# }) %>%
# dplyr::arrange(time)
#
# event_df <- event_df %>%
# dplyr::filter(count != count_max) %>%
# dplyr::bind_rows(new_df)
# }
#
# event_df <- event_df %>%
# dplyr::mutate(nudge_y = dplyr::case_when(
# outcome == "event" ~ event_nudge_y,
# outcome == "censor" ~ -event_nudge_y
# )) %>%
# dplyr::mutate(strata_num = as.numeric(factor(strata)))
#
#
# ylim <- c(.5, strata_n + .5)
#
# p_tar <- ggplot2::ggplot(
# event_df,
# ggplot2::aes(
# x = time,
# y = strata_num,
# shape = outcome,
# col = strata
# )
# ) +
# ggplot2::scale_shape_manual(values = c(3, 21))
#
# # Add boxplots
#
# p_tar <- p_tar +
# ggplot2::geom_boxplot(
# data = event_df,
# mapping = ggplot2::aes(
# x = time,
# y = strata_num,
# shape = NULL, group = strata
# ),
# outlier.shape = NA,
# fill = "white", col = gray(.1, .5)
# )
#
# p_tar <- p_tar +
# ggplot2::geom_point(
# position = ggplot2::position_nudge(y = event_df$nudge_y),
# fill = "white"
# )
#
# p_tar <- p_tar +
# ggplot2::labs(title = "Time At Risk", y = "") +
# ggplot2::scale_y_continuous(
# labels = strata_values,
# limits = ylim,
# breaks = 1:strata_n
# ) +
# ggplot2::scale_x_continuous(
# labels = scales::label_comma(),
# limits = time_lims
# )
#
# p_tar <- p_tar + ggtheme
#
#
# p_tar <- p_tar +
# ggplot2::theme(
# # axis.title.y = ggplot2::element_blank(),
# # axis.text.y = ggplot2::element_text(color="white")
# # axis.ticks = ggplot2::element_blank()
# ) +
# ggplot2::guides(shape = FALSE, col = FALSE) +
# ggplot2::theme(
# plot.margin = ggplot2::unit(c(1, 1, 1, 1), "lines"),
# panel.grid.minor.y = ggplot2::element_blank()
# )
#
# # Create final plot -----------------------
#
p_km <- p_km +
ggplot2::theme(plot.margin = ggplot2::unit(c(1, 1, .5, 2), "lines"))
p_risk <- p_risk +
ggplot2::theme(plot.margin = ggplot2::unit(c(0, 1, 0, 2), "lines"))
if (add_gridlines == FALSE) {
p_km <- p_km +
ggplot2::theme(panel.grid = ggplot2::element_blank())
p_risk <- p_risk +
ggplot2::theme(panel.grid = ggplot2::element_blank())
}
if (is.null(xlim)) {
my_limits <- time_lims
} else {
my_limits <- xlim
}
if (is.null(x_breaks)) {
my_breaks <- time_major_breaks
} else {
my_breaks <- x_breaks
}
p_km <- p_km + ggplot2::scale_x_continuous(
breaks = my_breaks,
limits = my_limits,
expand = c(0, 0),
labels = scales::comma
) +
ggplot2::scale_colour_brewer(palette = palette)
p_risk <- p_risk + ggplot2::scale_x_continuous(
breaks = my_breaks,
limits = my_limits,
expand = c(0, 0),
labels = scales::comma
) +
ggplot2::theme(
axis.text.y = ggplot2::element_text(color = RColorBrewer::brewer.pal(max(c(strata_n, 3)), palette)),
panel.grid.major.y = ggplot2::element_blank()
) +
ggplot2::labs(subtitle = "At Risk (Censored)")
#
#
# # User specified xlim
# if (!is.null(xlim)) {
#
# p_km <- p_km + ggplot2::xlim(xlim)
# p_risk <- p_risk + ggplot2::xlim(xlim)
#
# }
g_km <- ggplot2::ggplotGrob(p_km)
g_risk <- ggplot2::ggplotGrob(p_risk)
maxWidth <- grid::unit.pmax(g_km$widths[2:5], g_risk$widths[2:5])
g_km$widths[2:5] <- as.list(maxWidth)
g_risk$widths[2:5] <- as.list(maxWidth)
# Lay out plots in one column
gridExtra::grid.arrange(g_km, g_risk, ncol = 1, heights = c(3, 1))
}
|
\name{org.Hvulgare1.eg.db}
\alias{org.Hvulgare1.eg.db}
\alias{org.Hvulgare1.eg}
\title{Bioconductor annotation data package}
\description{
Welcome to the org.Hvulgare1.eg.db annotation Package. This is an
organism specific package. The purpose is to provide detailed
information about the species abbreviated in the second part of the
package name org.Hvulgare1.eg.db. This package is updated biannually.
To learn more about this package, users are encouraged to learn about
the select, columns, keys and keytypes methods. These are described
in a walkthrough on the bioconductor website as well as in the manual
pages and vignettes in the AnnotationDbi package.
}
\keyword{datasets}
| /00.annotation/org.Hvulgare1.eg.db/man/org.Hvulgare1.egBASE.Rd | no_license | Han9527/Barley_GO_Annotation | R | false | false | 705 | rd | \name{org.Hvulgare1.eg.db}
\alias{org.Hvulgare1.eg.db}
\alias{org.Hvulgare1.eg}
\title{Bioconductor annotation data package}
\description{
Welcome to the org.Hvulgare1.eg.db annotation Package. This is an
organism specific package. The purpose is to provide detailed
information about the species abbreviated in the second part of the
package name org.Hvulgare1.eg.db. This package is updated biannually.
To learn more about this package, users are encouraged to learn about
the select, columns, keys and keytypes methods. These are described
in a walkthrough on the bioconductor website as well as in the manual
pages and vignettes in the AnnotationDbi package.
}
\keyword{datasets}
|
\name{nodosIntermedios}
\alias{nodosIntermedios}
\title{
Nodos intermedios para etiquetas del modelo.
}
\description{
Se calcula la mitad del elemento para colocar ahí las etiquetas de número de elemento.
}
\usage{
nodosIntermedios(elementos, nodos)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{elementos}{Elementos de la armadura.}
\item{nodos}{Nodos de la armadura.}
}
| /man/nodosIntermedios.Rd | no_license | IzaelNava/Armaduras | R | false | false | 409 | rd | \name{nodosIntermedios}
\alias{nodosIntermedios}
\title{
Nodos intermedios para etiquetas del modelo.
}
\description{
Se calcula la mitad del elemento para colocar ahí las etiquetas de número de elemento.
}
\usage{
nodosIntermedios(elementos, nodos)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{elementos}{Elementos de la armadura.}
\item{nodos}{Nodos de la armadura.}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/anyNA.R
\name{anyNA}
\alias{anyNA}
\title{Backport of anyNA for R versions < 3.2.0.}
\usage{
anyNA(x, recursive = FALSE)
}
\description{
See the original description in \code{base::anyNA}.
}
\examples{
# get function from namespace instead of possibly getting
# implementation shipped with recent R versions:
bp_anyNA = getFromNamespace("anyNA", "backports")
bp_anyNA(letters)
}
\keyword{internal}
| /packrat/src/backports/backports/man/anyNA.Rd | no_license | wjhopper/PBS-R-Manual | R | false | true | 477 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/anyNA.R
\name{anyNA}
\alias{anyNA}
\title{Backport of anyNA for R versions < 3.2.0.}
\usage{
anyNA(x, recursive = FALSE)
}
\description{
See the original description in \code{base::anyNA}.
}
\examples{
# get function from namespace instead of possibly getting
# implementation shipped with recent R versions:
bp_anyNA = getFromNamespace("anyNA", "backports")
bp_anyNA(letters)
}
\keyword{internal}
|
\name{linkcomm2clustnsee}
\alias{linkcomm2clustnsee}
\title{
Write a Partition File for Clust&See
}
\description{
This function writes out a partition file which can be imported into the Cytoscape plug-in Clust&See.
}
\usage{
linkcomm2clustnsee(x, file = "temp.cns", network.name = NULL)
}
\arguments{
\item{x}{
An object of class \code{linkcomm} or \code{OCG}.
}
\item{file}{
A character string naming a Clust&See partition file (.cns extension). Defaults to \code{"temp.cns"}.
}
\item{network.name}{
A character string providing a name for the network. This name must correspond to the file name of the network that will be imported into Cytoscape. If NULL, the object name, \code{x}, is used. Defaults to NULL.
}
}
\details{
Cytoscape is an open source platform for complex-network analysis and visualization, and Clust&See is a Cytoscape plug-in used for visualizing the clusters produced by various network clustering algorithms (\url{http://tagc.univ-mrs.fr/tagc/index.php/clustnsee}).
}
\value{
Used for its side-effect of writing a Clust&See partition file to disk.
}
\references{
Becker, E. et al. (2012) Multifunctional proteins revealed by overlapping clustering in protein interaction network. \emph{Bioinformatics} \bold{28}, 84-90.
Gambette, P. and Guenoche, A. (2011) Bootstrap clustering for graph partitioning. \emph{RAIRO-Operations Research} \bold{45}, 339-352.
Kalinka, A.T. and Tomancak, P. (2011). linkcomm: an R package for the generation, visualization, and analysis of link communities in networks of arbitrary size and type. \emph{Bioinformatics} \bold{27}, 2011-2012.
Shannon, P. et al. (2003) Cytoscape: A software environment for integrated models of biomolecular interaction networks. \emph{Genome Research} \bold{13}, 2498-2504.
}
\author{
Alex T. Kalinka \email{alex.t.kalinka@gmail.com}
}
\examples{
## Generate graph and extract link communities.
g <- swiss[,3:4]
lc <- getLinkCommunities(g)
## Write a partition file to disk.
linkcomm2clustnsee(lc)
## Extract OCG communities.
oc <- getOCG.clusters(g)
## Write a partition file to disk.
linkcomm2clustnsee(oc)
}
| /linkcomm_R_package/man/linkcomm2clustnsee.Rd | no_license | jtan189/linkcomm | R | false | false | 2,115 | rd | \name{linkcomm2clustnsee}
\alias{linkcomm2clustnsee}
\title{
Write a Partition File for Clust&See
}
\description{
This function writes out a partition file which can be imported into the Cytoscape plug-in Clust&See.
}
\usage{
linkcomm2clustnsee(x, file = "temp.cns", network.name = NULL)
}
\arguments{
\item{x}{
An object of class \code{linkcomm} or \code{OCG}.
}
\item{file}{
A character string naming a Clust&See partition file (.cns extension). Defaults to \code{"temp.cns"}.
}
\item{network.name}{
A character string providing a name for the network. This name must correspond to the file name of the network that will be imported into Cytoscape. If NULL, the object name, \code{x}, is used. Defaults to NULL.
}
}
\details{
Cytoscape is an open source platform for complex-network analysis and visualization, and Clust&See is a Cytoscape plug-in used for visualizing the clusters produced by various network clustering algorithms (\url{http://tagc.univ-mrs.fr/tagc/index.php/clustnsee}).
}
\value{
Used for its side-effect of writing a Clust&See partition file to disk.
}
\references{
Becker, E. et al. (2012) Multifunctional proteins revealed by overlapping clustering in protein interaction network. \emph{Bioinformatics} \bold{28}, 84-90.
Gambette, P. and Guenoche, A. (2011) Bootstrap clustering for graph partitioning. \emph{RAIRO-Operations Research} \bold{45}, 339-352.
Kalinka, A.T. and Tomancak, P. (2011). linkcomm: an R package for the generation, visualization, and analysis of link communities in networks of arbitrary size and type. \emph{Bioinformatics} \bold{27}, 2011-2012.
Shannon, P. et al. (2003) Cytoscape: A software environment for integrated models of biomolecular interaction networks. \emph{Genome Research} \bold{13}, 2498-2504.
}
\author{
Alex T. Kalinka \email{alex.t.kalinka@gmail.com}
}
\examples{
## Generate graph and extract link communities.
g <- swiss[,3:4]
lc <- getLinkCommunities(g)
## Write a partition file to disk.
linkcomm2clustnsee(lc)
## Extract OCG communities.
oc <- getOCG.clusters(g)
## Write a partition file to disk.
linkcomm2clustnsee(oc)
}
|
\encoding{UTF-8}
\name{chron.stabilized}
\alias{chron.stabilized}
\title{Build Mean Value Chronology with Stabilized Variance}
\description{
This function builds a variance stabilized mean-value chronology, typically from a
\code{data.frame} of detrended ring widths as produced by
\code{\link{detrend}}.
}
\usage{
chron.stabilized(x, winLength, biweight = TRUE, running.rbar = FALSE)
}
\arguments{
\item{x}{a \code{data.frame} of ring widths with
\code{rownames(\var{x})} containing years and \code{colnames(x)}
containing each series \acronym{ID} such as produced by
\code{\link{read.rwl}}}
\item{winLength}{a odd \code{integer} specifying the window length.}
\item{biweight}{\code{logical} flag. If \acronym{TRUE} then a robust
mean is calculated using \code{\link{tbrm}.}}
\item{running.rbar}{\code{logical} flag. If \acronym{TRUE} the running interseries correlation is returned as well. }
}
\details{
The variance of a mean chronology depends on the variance of the individual samples, the number of series averaged together, and their interseries correlation (Wigley et al. 1984). As the number of series commonly decreases towards the beginning of a chronology averaging introduces changes in variance that are a solely an effect of changes in sample depth.
Additionally, time-dependent changes in interseries correlation can cause artificial variance changes of the final mean chronology. The function \code{chron.stabilized} accounts for both temporal changes in the interseries correlation and sample depth to produce a mean value chronology with stabilized variance.
The basic correction centers around the use of the effective independent sample size, \code{Neff}, which considers sample replication and mean interseries correlation between the samples at every time. This is defined as: \code{Neff = n(t) / 1+(n(t)-1)rbar(t)}
where \code{n(t)} is the number of series at time \code{t}, and \code{rbar} is the interseries correlation (see \code{\link{interseries.cor}}). Multiplication of the mean time series with the square root of \code{Neff} at every time \code{t} theoretically results in variance that is independent of sample size. In the limiting cases, when the \code{rbar} is zero or unity, \code{Neff} obtains values of the true sample size and unity, respectively.
}
\value{
A \code{data.frame} with the variance stabilized chronology, running interseries correlation (`if \code{running.bar=TRUE}), and the sample depth.
}
\references{
Frank, D, Esper, J, Cook, E, (2006) \emph{On variance adjustments in tree-ring chronology development}. Tree rings in archaeology, climatology and ecology, TRACE 4, 56–66
Frank, D, Esper, J, Cook, E, (2007) \emph{Adjustment for proxy number and coherence in a large-scale temperature reconstruction}. Geophysical Research Letters 34
Wigley, T, Briffa K, Jones P (1984) \emph{On the Average Value of Correlated Time Series, with Applications in Dendroclimatology and Hydrometeorology}. J. Climate Appl. Meteor., 23, 201–213
}
\author{ Original code by David Frank and adapted for dplR by Stefan Klesse. Patched and improved by Andy Bunn.
}
\seealso{ \code{\link{chron}}
}
\examples{library(graphics)
library(utils)
data(co021)
co021.rwi <- detrend(co021,method = "Spline")
co021.crn <- chron(co021.rwi)
co021.crn2 <- chron.stabilized(co021.rwi,
winLength=101,
biweight = TRUE,
running.rbar = FALSE)
yrs <- time(co021)
plot(yrs,co021.crn$std,type="l",col="grey")
lines(yrs,co021.crn2$adj.crn,col="red")
}
\keyword{ manip }
| /man/chron.stabilized.Rd | no_license | cran/dplR | R | false | false | 3,647 | rd | \encoding{UTF-8}
\name{chron.stabilized}
\alias{chron.stabilized}
\title{Build Mean Value Chronology with Stabilized Variance}
\description{
This function builds a variance stabilized mean-value chronology, typically from a
\code{data.frame} of detrended ring widths as produced by
\code{\link{detrend}}.
}
\usage{
chron.stabilized(x, winLength, biweight = TRUE, running.rbar = FALSE)
}
\arguments{
\item{x}{a \code{data.frame} of ring widths with
\code{rownames(\var{x})} containing years and \code{colnames(x)}
containing each series \acronym{ID} such as produced by
\code{\link{read.rwl}}}
\item{winLength}{a odd \code{integer} specifying the window length.}
\item{biweight}{\code{logical} flag. If \acronym{TRUE} then a robust
mean is calculated using \code{\link{tbrm}.}}
\item{running.rbar}{\code{logical} flag. If \acronym{TRUE} the running interseries correlation is returned as well. }
}
\details{
The variance of a mean chronology depends on the variance of the individual samples, the number of series averaged together, and their interseries correlation (Wigley et al. 1984). As the number of series commonly decreases towards the beginning of a chronology averaging introduces changes in variance that are a solely an effect of changes in sample depth.
Additionally, time-dependent changes in interseries correlation can cause artificial variance changes of the final mean chronology. The function \code{chron.stabilized} accounts for both temporal changes in the interseries correlation and sample depth to produce a mean value chronology with stabilized variance.
The basic correction centers around the use of the effective independent sample size, \code{Neff}, which considers sample replication and mean interseries correlation between the samples at every time. This is defined as: \code{Neff = n(t) / 1+(n(t)-1)rbar(t)}
where \code{n(t)} is the number of series at time \code{t}, and \code{rbar} is the interseries correlation (see \code{\link{interseries.cor}}). Multiplication of the mean time series with the square root of \code{Neff} at every time \code{t} theoretically results in variance that is independent of sample size. In the limiting cases, when the \code{rbar} is zero or unity, \code{Neff} obtains values of the true sample size and unity, respectively.
}
\value{
A \code{data.frame} with the variance stabilized chronology, running interseries correlation (`if \code{running.bar=TRUE}), and the sample depth.
}
\references{
Frank, D, Esper, J, Cook, E, (2006) \emph{On variance adjustments in tree-ring chronology development}. Tree rings in archaeology, climatology and ecology, TRACE 4, 56–66
Frank, D, Esper, J, Cook, E, (2007) \emph{Adjustment for proxy number and coherence in a large-scale temperature reconstruction}. Geophysical Research Letters 34
Wigley, T, Briffa K, Jones P (1984) \emph{On the Average Value of Correlated Time Series, with Applications in Dendroclimatology and Hydrometeorology}. J. Climate Appl. Meteor., 23, 201–213
}
\author{ Original code by David Frank and adapted for dplR by Stefan Klesse. Patched and improved by Andy Bunn.
}
\seealso{ \code{\link{chron}}
}
\examples{library(graphics)
library(utils)
data(co021)
co021.rwi <- detrend(co021,method = "Spline")
co021.crn <- chron(co021.rwi)
co021.crn2 <- chron.stabilized(co021.rwi,
winLength=101,
biweight = TRUE,
running.rbar = FALSE)
yrs <- time(co021)
plot(yrs,co021.crn$std,type="l",col="grey")
lines(yrs,co021.crn2$adj.crn,col="red")
}
\keyword{ manip }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adsense_objects.R
\name{SavedAdStyles}
\alias{SavedAdStyles}
\title{SavedAdStyles Object}
\usage{
SavedAdStyles(etag = NULL, items = NULL, nextPageToken = NULL)
}
\arguments{
\item{etag}{ETag of this response for caching purposes}
\item{items}{The saved ad styles returned in this list response}
\item{nextPageToken}{Continuation token used to page through ad units}
}
\value{
SavedAdStyles object
}
\description{
SavedAdStyles Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
| /googleadsensev12.auto/man/SavedAdStyles.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 613 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adsense_objects.R
\name{SavedAdStyles}
\alias{SavedAdStyles}
\title{SavedAdStyles Object}
\usage{
SavedAdStyles(etag = NULL, items = NULL, nextPageToken = NULL)
}
\arguments{
\item{etag}{ETag of this response for caching purposes}
\item{items}{The saved ad styles returned in this list response}
\item{nextPageToken}{Continuation token used to page through ad units}
}
\value{
SavedAdStyles object
}
\description{
SavedAdStyles Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/byGridCell_CalcStats.R
\name{byGridCell_CalcStats}
\alias{byGridCell_CalcStats}
\title{Calculate statistics for results by grid cell}
\usage{
byGridCell_CalcStats(
dfrs,
roms_grid,
quantity,
lci = 0.025,
uci = 0.975,
byStartTime = FALSE,
verbose = FALSE
)
}
\arguments{
\item{dfrs}{- list of dataframes with results by grid cell}
\item{roms_grid}{- sf dataset representing a roms grid, with all grid cells of interest}
\item{quantity}{- column name of quantity to calculate statistics for}
\item{lci}{- lower confidence interval}
\item{uci}{- upper confidence interval}
\item{byStartTime}{- flag to average by startTime}
\item{verbose}{- flag to print diagnostic info}
}
\value{
a list of sf datasets, by life stage, with mean by grid cell
}
\description{
Function to calculate statistics for results by grid cell.
}
| /man/byGridCell_CalcStats.Rd | permissive | wStockhausen/rDisMELS | R | false | true | 915 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/byGridCell_CalcStats.R
\name{byGridCell_CalcStats}
\alias{byGridCell_CalcStats}
\title{Calculate statistics for results by grid cell}
\usage{
byGridCell_CalcStats(
dfrs,
roms_grid,
quantity,
lci = 0.025,
uci = 0.975,
byStartTime = FALSE,
verbose = FALSE
)
}
\arguments{
\item{dfrs}{- list of dataframes with results by grid cell}
\item{roms_grid}{- sf dataset representing a roms grid, with all grid cells of interest}
\item{quantity}{- column name of quantity to calculate statistics for}
\item{lci}{- lower confidence interval}
\item{uci}{- upper confidence interval}
\item{byStartTime}{- flag to average by startTime}
\item{verbose}{- flag to print diagnostic info}
}
\value{
a list of sf datasets, by life stage, with mean by grid cell
}
\description{
Function to calculate statistics for results by grid cell.
}
|
#' Helper function: group fst by pattern
#' @param fst_pattern pattern to search for in fst filename (default: "raw")
#' @param time_pattern optional pattern to filter months to be imported (default: NULL),
#' for using it do e.g. "2017-06|2017-07" or c("2017-06", "2017-07")
#' @param fst_dir directory with fst files or subdirs to be imported (default:
#' system.file("shiny/berlin_t/data/fst",package = "aquanes.report"))
#' @importFrom stringr str_detect
#' @importFrom data.table rbindlist
#' @return merged data.frame
#' @keywords internal
group_fst_by_pattern <- function(time_pattern = NULL,
fst_pattern = "raw",
fst_dir = system.file(
"shiny/berlin_t/data/fst",
package = "aquanes.report"
)) {
fst_file_paths <- list.files(
path = fst_dir,
pattern = fst_pattern,
recursive = TRUE,
full.names = TRUE
)
if (!is.null(time_pattern)) {
if (length(time_pattern)>1) time_pattern <- paste0(time_pattern, collapse = "|")
indices <- stringr::str_detect(fst_file_paths, time_pattern)
fst_file_paths <- fst_file_paths[indices]
}
cat(sprintf(
"Importing the following fst files:\n%s\n",
paste(fst_file_paths, collapse = "\n")
))
system.time(merge_dt <- data.table::rbindlist(lapply(
fst_file_paths,
aquanes.report::read_fst
)))
return(merge_dt)
}
#' Helper function: merge and export fst files into main shiny data folder
#' @param time_pattern optional pattern to filter months to be imported (default: NULL),
#' for using it do e.g. "2017-06|2017-07" or c("2017-06", "2017-07")
#' @param compression compression for fst export (default: 100)
#' @param import_dir directory with fst files or subdirs to be imported (default:
#' system.file("shiny/berlin_t/data/fst",package = "aquanes.report"))
#' @param export_dir directory with fst directory for export (default:
#' system.file("shiny/berlin_t/data",package = "aquanes.report"))
#' @return imports multiple fst files and exports them to be used for app
#' @export
merge_and_export_fst <- function(time_pattern = NULL,
compression = 100,
import_dir = system.file(
"shiny/berlin_t/data/fst",
package = "aquanes.report"
),
export_dir = system.file(
"shiny/berlin_t/data",
package = "aquanes.report"
)) {
if (!dir.exists(export_dir)) {
print(sprintf("Creating export path: %s", export_dir))
dir.create(export_dir, recursive = TRUE)
}
siteData_raw_list <- group_fst_by_pattern(
time_pattern = time_pattern,
fst_pattern = "raw",
fst_dir = import_dir
)
exp_raw <- sprintf("%s/siteData_raw_list.fst", export_dir)
cat(sprintf(
"Writing fst: %s (with compression %d)\n",
exp_raw,
compression
))
system.time(fst::write.fst(
siteData_raw_list,
path = exp_raw,
compress = compression
))
rm(siteData_raw_list)
system.time(
siteData_10min_list <- group_fst_by_pattern(
time_pattern = time_pattern,
fst_pattern = "10min",
fst_dir = import_dir
)
)
exp_10min <- sprintf("%s/siteData_10min_list.fst", export_dir)
cat(sprintf(
"Writing fst: %s (with compression %d)\n",
exp_10min,
compression
))
fst::write.fst(
siteData_10min_list,
path = exp_10min,
compress = compression
)
rm(siteData_10min_list)
system.time(
siteData_hour_list <- group_fst_by_pattern(
time_pattern = time_pattern,
fst_pattern = "hour",
fst_dir = import_dir
)
)
exp_hour <- sprintf("%s/siteData_hour_list.fst", export_dir)
cat(sprintf(
"Writing fst: %s (with compression %d)\n",
exp_hour,
compression
))
fst::write.fst(
siteData_hour_list,
path = exp_hour,
compress = compression
)
system.time(
siteData_day_list <- group_fst_by_pattern(
time_pattern = time_pattern,
fst_pattern = "day",
fst_dir = import_dir
)
)
exp_day <- sprintf("%s/siteData_day_list.fst", export_dir)
cat(sprintf(
"Writing fst: %s (with compression %d)\n",
exp_day,
compression
))
fst::write.fst(
siteData_day_list,
path = exp_day,
compress = compression
)
}
| /R/merge_and_export_fst.R | permissive | KWB-R/aquanes.report | R | false | false | 4,699 | r | #' Helper function: group fst by pattern
#' @param fst_pattern pattern to search for in fst filename (default: "raw")
#' @param time_pattern optional pattern to filter months to be imported (default: NULL),
#' for using it do e.g. "2017-06|2017-07" or c("2017-06", "2017-07")
#' @param fst_dir directory with fst files or subdirs to be imported (default:
#' system.file("shiny/berlin_t/data/fst",package = "aquanes.report"))
#' @importFrom stringr str_detect
#' @importFrom data.table rbindlist
#' @return merged data.frame
#' @keywords internal
group_fst_by_pattern <- function(time_pattern = NULL,
fst_pattern = "raw",
fst_dir = system.file(
"shiny/berlin_t/data/fst",
package = "aquanes.report"
)) {
fst_file_paths <- list.files(
path = fst_dir,
pattern = fst_pattern,
recursive = TRUE,
full.names = TRUE
)
if (!is.null(time_pattern)) {
if (length(time_pattern)>1) time_pattern <- paste0(time_pattern, collapse = "|")
indices <- stringr::str_detect(fst_file_paths, time_pattern)
fst_file_paths <- fst_file_paths[indices]
}
cat(sprintf(
"Importing the following fst files:\n%s\n",
paste(fst_file_paths, collapse = "\n")
))
system.time(merge_dt <- data.table::rbindlist(lapply(
fst_file_paths,
aquanes.report::read_fst
)))
return(merge_dt)
}
#' Helper function: merge and export fst files into main shiny data folder
#' @param time_pattern optional pattern to filter months to be imported (default: NULL),
#' for using it do e.g. "2017-06|2017-07" or c("2017-06", "2017-07")
#' @param compression compression for fst export (default: 100)
#' @param import_dir directory with fst files or subdirs to be imported (default:
#' system.file("shiny/berlin_t/data/fst",package = "aquanes.report"))
#' @param export_dir directory with fst directory for export (default:
#' system.file("shiny/berlin_t/data",package = "aquanes.report"))
#' @return imports multiple fst files and exports them to be used for app
#' @export
merge_and_export_fst <- function(time_pattern = NULL,
compression = 100,
import_dir = system.file(
"shiny/berlin_t/data/fst",
package = "aquanes.report"
),
export_dir = system.file(
"shiny/berlin_t/data",
package = "aquanes.report"
)) {
if (!dir.exists(export_dir)) {
print(sprintf("Creating export path: %s", export_dir))
dir.create(export_dir, recursive = TRUE)
}
siteData_raw_list <- group_fst_by_pattern(
time_pattern = time_pattern,
fst_pattern = "raw",
fst_dir = import_dir
)
exp_raw <- sprintf("%s/siteData_raw_list.fst", export_dir)
cat(sprintf(
"Writing fst: %s (with compression %d)\n",
exp_raw,
compression
))
system.time(fst::write.fst(
siteData_raw_list,
path = exp_raw,
compress = compression
))
rm(siteData_raw_list)
system.time(
siteData_10min_list <- group_fst_by_pattern(
time_pattern = time_pattern,
fst_pattern = "10min",
fst_dir = import_dir
)
)
exp_10min <- sprintf("%s/siteData_10min_list.fst", export_dir)
cat(sprintf(
"Writing fst: %s (with compression %d)\n",
exp_10min,
compression
))
fst::write.fst(
siteData_10min_list,
path = exp_10min,
compress = compression
)
rm(siteData_10min_list)
system.time(
siteData_hour_list <- group_fst_by_pattern(
time_pattern = time_pattern,
fst_pattern = "hour",
fst_dir = import_dir
)
)
exp_hour <- sprintf("%s/siteData_hour_list.fst", export_dir)
cat(sprintf(
"Writing fst: %s (with compression %d)\n",
exp_hour,
compression
))
fst::write.fst(
siteData_hour_list,
path = exp_hour,
compress = compression
)
system.time(
siteData_day_list <- group_fst_by_pattern(
time_pattern = time_pattern,
fst_pattern = "day",
fst_dir = import_dir
)
)
exp_day <- sprintf("%s/siteData_day_list.fst", export_dir)
cat(sprintf(
"Writing fst: %s (with compression %d)\n",
exp_day,
compression
))
fst::write.fst(
siteData_day_list,
path = exp_day,
compress = compression
)
}
|
source("./Rcode/gFITC.R")
source("./Rcode/epAGPC_het.R")
set.seed(0)
# i <- commandArgs()[1]
i <- 1
#number of pseudo-inputs is set to 100
for (npi in c(100)) {
print(i)
#Loading the data
load(paste("./data/",i,"data.dat",sep = ""))
Xtrain <- data$x[ data$itrain, ]
Ytrain <- as.vector(c(data$y_train))
Xtest <- data$x[ data$itest, ]
Ytest <- as.vector(c(data$y_test))
Xstar_train <- data$x_star[ data$itrain, ]
Xstar_train <- matrix(Xstar_train, length(c(Xstar_train)), 1)
#zero mean unit variance normalization
meanTrain <- apply(Xtrain, 2, mean)
sdTrain <- apply(Xtrain, 2, sd)
sdTrain[ sdTrain == 0 ] <- 1
Xtrain <- (Xtrain - matrix(meanTrain, nrow(Xtrain), ncol(Xtrain), byrow = TRUE)) /
matrix(sdTrain, nrow(Xtrain), ncol(Xtrain), byrow = TRUE)
Xtest <- (Xtest - matrix(meanTrain, nrow(Xtest), ncol(Xtest), byrow = TRUE)) /
matrix(sdTrain, nrow(Xtest), ncol(Xtest), byrow = TRUE)
#GPC+ baseline
time <- system.time(
ret <- epGPCExternal(Xtrain, Xstar_train, Ytrain, npi, sigmaF = 1, sigma0F = 1,
lF = 5e-2, sigmaG = 1, sigma0G = 1, lG = 1, optimize_flags_F = c(TRUE, TRUE, TRUE, TRUE),
optimize_flags_G = c(TRUE, TRUE, TRUE, TRUE, TRUE))
)
errorTest <- mean(sign(predictGPC(ret, Xtest) - 0.5) != Ytest)
write.table(errorTest, file = paste("./results/GPC_plus/",i,"_errorTest_X_", npi, "NEW.txt", sep = ""),
row.names = F, col.names = F, append = FALSE)
write.table(t(time), file = paste("./results/GPC_plus/",i,"_time_X_", npi, "NEW.txt", sep = ""),
row.names = F, col.names = F, append = FALSE)
}
| /simulateGPC_plus2.R | no_license | jt86/r-project | R | false | false | 1,577 | r | source("./Rcode/gFITC.R")
source("./Rcode/epAGPC_het.R")
set.seed(0)
# i <- commandArgs()[1]
i <- 1
#number of pseudo-inputs is set to 100
for (npi in c(100)) {
print(i)
#Loading the data
load(paste("./data/",i,"data.dat",sep = ""))
Xtrain <- data$x[ data$itrain, ]
Ytrain <- as.vector(c(data$y_train))
Xtest <- data$x[ data$itest, ]
Ytest <- as.vector(c(data$y_test))
Xstar_train <- data$x_star[ data$itrain, ]
Xstar_train <- matrix(Xstar_train, length(c(Xstar_train)), 1)
#zero mean unit variance normalization
meanTrain <- apply(Xtrain, 2, mean)
sdTrain <- apply(Xtrain, 2, sd)
sdTrain[ sdTrain == 0 ] <- 1
Xtrain <- (Xtrain - matrix(meanTrain, nrow(Xtrain), ncol(Xtrain), byrow = TRUE)) /
matrix(sdTrain, nrow(Xtrain), ncol(Xtrain), byrow = TRUE)
Xtest <- (Xtest - matrix(meanTrain, nrow(Xtest), ncol(Xtest), byrow = TRUE)) /
matrix(sdTrain, nrow(Xtest), ncol(Xtest), byrow = TRUE)
#GPC+ baseline
time <- system.time(
ret <- epGPCExternal(Xtrain, Xstar_train, Ytrain, npi, sigmaF = 1, sigma0F = 1,
lF = 5e-2, sigmaG = 1, sigma0G = 1, lG = 1, optimize_flags_F = c(TRUE, TRUE, TRUE, TRUE),
optimize_flags_G = c(TRUE, TRUE, TRUE, TRUE, TRUE))
)
errorTest <- mean(sign(predictGPC(ret, Xtest) - 0.5) != Ytest)
write.table(errorTest, file = paste("./results/GPC_plus/",i,"_errorTest_X_", npi, "NEW.txt", sep = ""),
row.names = F, col.names = F, append = FALSE)
write.table(t(time), file = paste("./results/GPC_plus/",i,"_time_X_", npi, "NEW.txt", sep = ""),
row.names = F, col.names = F, append = FALSE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{reset_par}
\alias{reset_par}
\title{Reset graphical options in 'par' to factory defaults.}
\usage{
reset_par()
}
\description{
Reset the \code{\link[graphics]{par}} to R's defaults.
}
\details{
Some of \code{par}'s settings are readonly. These are obviously not reset.
Settings stored in \code{\link[graphics]{par}} are device-dependent. In practice,
most settings in \code{par} are initially the same accross devices. Exceptions
we noted are:
\itemize{
\item{\code{bg}: background color}
\item{\code{fin}: figure region dimensions}
\item{\code{mai}: margin size (inches)}
\item{\code{pin}: current plot dimensions (inches)}
\item{\code{plt}: coordinates of the plot region as fractions of the current figure region}
\item{\code{ps}: point size of text (but not symbos)}
}
Consequently, these options are currently not reset by calling \code{reset_par()}
}
\seealso{
\code{\link{reset_options}}, \code{\link[graphics]{par}}
}
| /man/reset_par.Rd | no_license | cran/settings | R | false | true | 1,020 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{reset_par}
\alias{reset_par}
\title{Reset graphical options in 'par' to factory defaults.}
\usage{
reset_par()
}
\description{
Reset the \code{\link[graphics]{par}} to R's defaults.
}
\details{
Some of \code{par}'s settings are readonly. These are obviously not reset.
Settings stored in \code{\link[graphics]{par}} are device-dependent. In practice,
most settings in \code{par} are initially the same accross devices. Exceptions
we noted are:
\itemize{
\item{\code{bg}: background color}
\item{\code{fin}: figure region dimensions}
\item{\code{mai}: margin size (inches)}
\item{\code{pin}: current plot dimensions (inches)}
\item{\code{plt}: coordinates of the plot region as fractions of the current figure region}
\item{\code{ps}: point size of text (but not symbos)}
}
Consequently, these options are currently not reset by calling \code{reset_par()}
}
\seealso{
\code{\link{reset_options}}, \code{\link[graphics]{par}}
}
|
##################################################
## plot counts such as histogram or density
##################################################
library(ggplot2)
argv <- commandArgs(T)
png(paste(argv[1],".png",sep=""),width=960,height=960)
md <- read.table(argv[1],header=T)
md2 <- read.table(argv[1],header=T)
p <- ggplot()
p1 <- p + geom_line(data=md,aes(pos,log(num),color=factor(num2),shape=factor(num2)),alpha=0.8,) + scale_colour_manual(values = c("red","green","black", "blue"))
p2 <- p1 + facet_wrap(~chr,ncol=2,scales = "free_x")
p3 <- p2 + theme(axis.text.x = element_text(angle = 45)) + labs(x="position",y="log(num)")
p3
##################################################
## plot counts such as histogram or density
##################################################
par(new=T)
library(ggplot2)
argv <- commandArgs(T)
#png(paste(argv[1],".png",sep=""),width=960,height=960)
md <- read.table("../fst/reseq-8-snp-fst.windowed.weir.fst",header=T)
p <- ggplot()
#p1 <- p + geom_point(data=md, aes(pos,log(num),color=smp))
p1 <- p + geom_line(data=md,aes(BIN_START,WEIGHTED_FST),color="blue") + geom_hline(yintercept=0.8,colour="red")
p2 <- p1 + facet_wrap(~CHROM,ncol=2,scales = "free_x")
p3 <- p2 + theme(axis.text.x = element_text(angle = 45)) + labs(x="position",y="Fst")
p3
dev.off()
| /plot-count-fst.r | no_license | ctan2020/test | R | false | false | 1,323 | r | ##################################################
## plot counts such as histogram or density
##################################################
library(ggplot2)
argv <- commandArgs(T)
png(paste(argv[1],".png",sep=""),width=960,height=960)
md <- read.table(argv[1],header=T)
md2 <- read.table(argv[1],header=T)
p <- ggplot()
p1 <- p + geom_line(data=md,aes(pos,log(num),color=factor(num2),shape=factor(num2)),alpha=0.8,) + scale_colour_manual(values = c("red","green","black", "blue"))
p2 <- p1 + facet_wrap(~chr,ncol=2,scales = "free_x")
p3 <- p2 + theme(axis.text.x = element_text(angle = 45)) + labs(x="position",y="log(num)")
p3
##################################################
## plot counts such as histogram or density
##################################################
par(new=T)
library(ggplot2)
argv <- commandArgs(T)
#png(paste(argv[1],".png",sep=""),width=960,height=960)
md <- read.table("../fst/reseq-8-snp-fst.windowed.weir.fst",header=T)
p <- ggplot()
#p1 <- p + geom_point(data=md, aes(pos,log(num),color=smp))
p1 <- p + geom_line(data=md,aes(BIN_START,WEIGHTED_FST),color="blue") + geom_hline(yintercept=0.8,colour="red")
p2 <- p1 + facet_wrap(~CHROM,ncol=2,scales = "free_x")
p3 <- p2 + theme(axis.text.x = element_text(angle = 45)) + labs(x="position",y="Fst")
p3
dev.off()
|
ag = aggregate(total~hr, bikeshare, mean)
ag
data.frame(ag$total)
lineplot = ggplot(ag) + geom_line(aes(x=hr, y=total)) +
labs (x="Hour of the day", y="Average bike rentals", title="Avg bike rentals with respective to every hour")
lineplot
ag1 = aggregate(total~hr+workingday, bikeshare, mean)
ag1
lineplot1 = ggplot(ag1) + geom_line(aes(x=hr, y=total)) +
labs (x="Hour of the day", y="Average bike rentals", title="Avg bike rentals with respective to every hour")+
facet_wrap(~workingday, nrow = 2)
lineplot1
ag2 = bikeshare %>% filter(hr == 8)
ag2n = aggregate(total~weathersit+workingday, ag2, mean)
lineplot2 = ggplot(ag2n) + geom_line(aes(x=weathersit, y=total)) +
labs (x="Hour of the day", y="Average bike rentals", title="Avg bike rentals with respective to every hour")+
facet_wrap(~workingday, nrow = 2)
lineplot2
| /bikeshare.R | no_license | sonali4794/Buildings-certified-green | R | false | false | 866 | r |
ag = aggregate(total~hr, bikeshare, mean)
ag
data.frame(ag$total)
lineplot = ggplot(ag) + geom_line(aes(x=hr, y=total)) +
labs (x="Hour of the day", y="Average bike rentals", title="Avg bike rentals with respective to every hour")
lineplot
ag1 = aggregate(total~hr+workingday, bikeshare, mean)
ag1
lineplot1 = ggplot(ag1) + geom_line(aes(x=hr, y=total)) +
labs (x="Hour of the day", y="Average bike rentals", title="Avg bike rentals with respective to every hour")+
facet_wrap(~workingday, nrow = 2)
lineplot1
ag2 = bikeshare %>% filter(hr == 8)
ag2n = aggregate(total~weathersit+workingday, ag2, mean)
lineplot2 = ggplot(ag2n) + geom_line(aes(x=weathersit, y=total)) +
labs (x="Hour of the day", y="Average bike rentals", title="Avg bike rentals with respective to every hour")+
facet_wrap(~workingday, nrow = 2)
lineplot2
|
# @file Cluster.R
#
# Copyright 2019 Observational Health Data Sciences and Informatics
#
# This file is part of OhdsiRTools
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
.computeFfMemPerCluster <- function(nClusters) {
# memory.limit is windows specific
if (.Platform$OS.type == "windows") {
if (getRversion() >= "2.6.0")
ffmaxbytes <- 0.5 * memory.limit() * (1024^2) else ffmaxbytes <- 0.5 * memory.limit()
} else {
# some magic constant (2GB)
ffmaxbytes <- 2 * 1024^3
}
ffmaxbytes <- ffmaxbytes/nClusters
# Limit size on machines with a lot of memory to prevent integer overflows in ff:
ffmaxbytes <- min(ffmaxbytes, .Machine$integer.max * 12)
ffbatchbytes <- ffmaxbytes/50
return(c(round(ffmaxbytes), round(ffbatchbytes)))
}
setFfMem <- function(values) {
options(ffmaxbytes = values[1])
options(ffbatchbytes = values[2])
return(c(getOption("ffmaxbytes"), getOption("ffbatchbytes")))
}
setFfDir <- function(fftempdir) {
options(fftempdir = fftempdir)
}
#' Create a cluster of nodes for parallel computation
#'
#' @param numberOfThreads Number of parallel threads.
#' @param singleThreadToMain If \code{numberOfThreads} is 1, should we fall back to running the
#' process in the main thread?
#' @param divideFfMemory When TRUE, the memory available for processing ff and ffdf objects will
#' be equally divided over the threads.
#' @param setFfTempDir When TRUE, the ffTempDir option will be copied to each thread.
#'
#' @return
#' An object representing the cluster.
#'
#' @export
makeCluster <- function(numberOfThreads,
singleThreadToMain = TRUE,
divideFfMemory = TRUE,
setFfTempDir = TRUE) {
.Deprecated("ParallelLogger::makeCluster")
if (numberOfThreads == 1 && singleThreadToMain) {
cluster <- list()
class(cluster) <- "noCluster"
OhdsiRTools::logTrace("Initiating cluster constisting only of main thread")
} else {
OhdsiRTools::logTrace("Initiating cluster with ", numberOfThreads, " threads")
cluster <- snow::makeCluster(numberOfThreads, type = "SOCK")
logThreadStart <- function(loggers, threadNumber) {
OhdsiRTools::clearLoggers()
for (logger in loggers) {
OhdsiRTools::registerLogger(logger)
}
options("threadNumber" = threadNumber)
OhdsiRTools::logTrace("Thread ", threadNumber, " initiated")
finalize <- function(env) {
OhdsiRTools::logTrace("Thread ", threadNumber, " terminated")
}
reg.finalizer(globalenv(), finalize, onexit = TRUE)
return(NULL)
}
loggers <- OhdsiRTools::getLoggers()
for (i in 1:length(cluster)) {
snow::sendCall(cluster[[i]], logThreadStart, list(loggers = loggers, threadNumber = i))
}
for (i in 1:length(cluster)) {
snow::recvOneResult(cluster)
}
if (divideFfMemory) {
values <- .computeFfMemPerCluster(length(cluster))
for (i in 1:length(cluster)) {
snow::sendCall(cluster[[i]], setFfMem, list(values = values))
}
for (i in 1:length(cluster)) {
if (min(snow::recvOneResult(cluster)$value == values) == 0)
warning("Unable to set ffmaxbytes and/or ffbatchbytes on worker")
}
}
if (setFfTempDir) {
for (i in 1:length(cluster)) {
snow::sendCall(cluster[[i]], setFfDir, list(fftempdir = options("fftempdir")$fftempdir))
}
for (i in 1:length(cluster)) {
snow::recvOneResult(cluster)
}
}
}
return(cluster)
}
#' Require a package in the cluster
#'
#' @description
#' Calls the \code{require} function in each node of the cluster.
#'
#' @param cluster The cluster object.
#' @param package The name of the package to load in all nodes.
#'
#' @export
clusterRequire <- function(cluster, package) {
.Deprecated("ParallelLogger::cluterRequire")
if (class(cluster)[1] == "noCluster") {
do.call("require", list(package = package))
} else {
requirePackage <- function(package) {
do.call("require", list(package = package))
}
for (i in 1:length(cluster)) {
snow::sendCall(cluster[[i]], requirePackage, list(package = package))
}
for (i in 1:length(cluster)) {
snow::recvOneResult(cluster)
}
}
}
#' Stop the cluster
#'
#' @param cluster The cluster to stop
#'
#' @export
stopCluster <- function(cluster) {
.Deprecated("ParallelLogger::stopCluster")
if (class(cluster)[1] != "noCluster") {
snow::stopCluster.default(cluster)
OhdsiRTools::logTrace("Stopping cluster")
}
}
#' Apply a function to a list using the cluster
#'
#' @details
#' The function will be executed on each element of x in the threads of the cluster. If there are more
#' elements than threads, the elements will be queued. The progress bar will show the number of
#' elements that have been completed. It can sometimes be important to realize that the context in
#' which a function is created is also transmitted to the worker node. If a function is defined inside
#' another function, and that outer function is called with a large argument, that argument will be
#' transmitted to the worker node each time the function is executed. It can therefore make sense to
#' define the function to be called at the package level rather than inside a function, to save
#' overhead.
#'
#' @param cluster The cluster of threads to run the function.
#' @param x The list on which the function will be applied.
#' @param fun The function to apply. Note that the context in which the function is specifies
#' matters (see details).
#' @param ... Additional parameters for the function.
#' @param stopOnError Stop when one of the threads reports an error? If FALSE, all errors will be
#' reported at the end.
#' @param progressBar Show a progress bar?
#'
#' @return
#' A list with the result of the function on each item in x.
#'
#' @export
clusterApply <- function(cluster, x, fun, ..., stopOnError = FALSE, progressBar = TRUE) {
.Deprecated("ParallelLogger::clusterApply")
if (class(cluster)[1] == "noCluster") {
lapply(x, fun, ...)
} else {
n <- length(x)
p <- length(cluster)
if (n > 0 && p > 0) {
if (progressBar)
pb <- txtProgressBar(style = 3)
for (i in 1:min(n, p)) {
snow::sendCall(cluster[[i]], fun, c(list(x[[i]]), list(...)), tag = i)
}
val <- vector("list", n)
hasError <- FALSE
formatError <- function(threadNumber, error, args) {
sprintf("Thread %s returns error: \"%s\" when using argument(s): %s",
threadNumber,
gsub("\n", "\\n", gsub("\t", "\\t", error)),
gsub("\n", "\\n", gsub("\t", "\\t", paste(args, collapse = ","))))
}
for (i in 1:n) {
d <- snow::recvOneResult(cluster)
if (inherits(d$value, "try-error")) {
val[d$tag] <- NULL
errorMessage <- formatError(d$node, d$value, c(list(x[[d$tag]]), list(...)))
if (stopOnError) {
stop(errorMessage)
} else {
OhdsiRTools::logError(errorMessage)
hasError <- TRUE
}
}
if (progressBar)
setTxtProgressBar(pb, i/n)
j <- i + min(n, p)
if (j <= n) {
snow::sendCall(cluster[[d$node]], fun, c(list(x[[j]]), list(...)), tag = j)
}
val[d$tag] <- list(d$value)
}
if (progressBar) {
close(pb)
}
if (hasError) {
message <- paste0("Error(s) when calling function '",
substitute(fun, parent.frame(1)),
"', see earlier messages for details")
stop(message)
}
return(val)
}
}
}
| /R/Cluster.R | permissive | parkdongsu/OhdsiRTools | R | false | false | 8,375 | r | # @file Cluster.R
#
# Copyright 2019 Observational Health Data Sciences and Informatics
#
# This file is part of OhdsiRTools
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
.computeFfMemPerCluster <- function(nClusters) {
# memory.limit is windows specific
if (.Platform$OS.type == "windows") {
if (getRversion() >= "2.6.0")
ffmaxbytes <- 0.5 * memory.limit() * (1024^2) else ffmaxbytes <- 0.5 * memory.limit()
} else {
# some magic constant (2GB)
ffmaxbytes <- 2 * 1024^3
}
ffmaxbytes <- ffmaxbytes/nClusters
# Limit size on machines with a lot of memory to prevent integer overflows in ff:
ffmaxbytes <- min(ffmaxbytes, .Machine$integer.max * 12)
ffbatchbytes <- ffmaxbytes/50
return(c(round(ffmaxbytes), round(ffbatchbytes)))
}
setFfMem <- function(values) {
options(ffmaxbytes = values[1])
options(ffbatchbytes = values[2])
return(c(getOption("ffmaxbytes"), getOption("ffbatchbytes")))
}
setFfDir <- function(fftempdir) {
options(fftempdir = fftempdir)
}
#' Create a cluster of nodes for parallel computation
#'
#' @param numberOfThreads Number of parallel threads.
#' @param singleThreadToMain If \code{numberOfThreads} is 1, should we fall back to running the
#' process in the main thread?
#' @param divideFfMemory When TRUE, the memory available for processing ff and ffdf objects will
#' be equally divided over the threads.
#' @param setFfTempDir When TRUE, the ffTempDir option will be copied to each thread.
#'
#' @return
#' An object representing the cluster.
#'
#' @export
makeCluster <- function(numberOfThreads,
singleThreadToMain = TRUE,
divideFfMemory = TRUE,
setFfTempDir = TRUE) {
.Deprecated("ParallelLogger::makeCluster")
if (numberOfThreads == 1 && singleThreadToMain) {
cluster <- list()
class(cluster) <- "noCluster"
OhdsiRTools::logTrace("Initiating cluster constisting only of main thread")
} else {
OhdsiRTools::logTrace("Initiating cluster with ", numberOfThreads, " threads")
cluster <- snow::makeCluster(numberOfThreads, type = "SOCK")
logThreadStart <- function(loggers, threadNumber) {
OhdsiRTools::clearLoggers()
for (logger in loggers) {
OhdsiRTools::registerLogger(logger)
}
options("threadNumber" = threadNumber)
OhdsiRTools::logTrace("Thread ", threadNumber, " initiated")
finalize <- function(env) {
OhdsiRTools::logTrace("Thread ", threadNumber, " terminated")
}
reg.finalizer(globalenv(), finalize, onexit = TRUE)
return(NULL)
}
loggers <- OhdsiRTools::getLoggers()
for (i in 1:length(cluster)) {
snow::sendCall(cluster[[i]], logThreadStart, list(loggers = loggers, threadNumber = i))
}
for (i in 1:length(cluster)) {
snow::recvOneResult(cluster)
}
if (divideFfMemory) {
values <- .computeFfMemPerCluster(length(cluster))
for (i in 1:length(cluster)) {
snow::sendCall(cluster[[i]], setFfMem, list(values = values))
}
for (i in 1:length(cluster)) {
if (min(snow::recvOneResult(cluster)$value == values) == 0)
warning("Unable to set ffmaxbytes and/or ffbatchbytes on worker")
}
}
if (setFfTempDir) {
for (i in 1:length(cluster)) {
snow::sendCall(cluster[[i]], setFfDir, list(fftempdir = options("fftempdir")$fftempdir))
}
for (i in 1:length(cluster)) {
snow::recvOneResult(cluster)
}
}
}
return(cluster)
}
#' Require a package in the cluster
#'
#' @description
#' Calls the \code{require} function in each node of the cluster.
#'
#' @param cluster The cluster object.
#' @param package The name of the package to load in all nodes.
#'
#' @export
clusterRequire <- function(cluster, package) {
.Deprecated("ParallelLogger::cluterRequire")
if (class(cluster)[1] == "noCluster") {
do.call("require", list(package = package))
} else {
requirePackage <- function(package) {
do.call("require", list(package = package))
}
for (i in 1:length(cluster)) {
snow::sendCall(cluster[[i]], requirePackage, list(package = package))
}
for (i in 1:length(cluster)) {
snow::recvOneResult(cluster)
}
}
}
#' Stop the cluster
#'
#' @param cluster The cluster to stop
#'
#' @export
stopCluster <- function(cluster) {
.Deprecated("ParallelLogger::stopCluster")
if (class(cluster)[1] != "noCluster") {
snow::stopCluster.default(cluster)
OhdsiRTools::logTrace("Stopping cluster")
}
}
#' Apply a function to a list using the cluster
#'
#' @details
#' The function will be executed on each element of x in the threads of the cluster. If there are more
#' elements than threads, the elements will be queued. The progress bar will show the number of
#' elements that have been completed. It can sometimes be important to realize that the context in
#' which a function is created is also transmitted to the worker node. If a function is defined inside
#' another function, and that outer function is called with a large argument, that argument will be
#' transmitted to the worker node each time the function is executed. It can therefore make sense to
#' define the function to be called at the package level rather than inside a function, to save
#' overhead.
#'
#' @param cluster The cluster of threads to run the function.
#' @param x The list on which the function will be applied.
#' @param fun The function to apply. Note that the context in which the function is specifies
#' matters (see details).
#' @param ... Additional parameters for the function.
#' @param stopOnError Stop when one of the threads reports an error? If FALSE, all errors will be
#' reported at the end.
#' @param progressBar Show a progress bar?
#'
#' @return
#' A list with the result of the function on each item in x.
#'
#' @export
clusterApply <- function(cluster, x, fun, ..., stopOnError = FALSE, progressBar = TRUE) {
.Deprecated("ParallelLogger::clusterApply")
if (class(cluster)[1] == "noCluster") {
lapply(x, fun, ...)
} else {
n <- length(x)
p <- length(cluster)
if (n > 0 && p > 0) {
if (progressBar)
pb <- txtProgressBar(style = 3)
for (i in 1:min(n, p)) {
snow::sendCall(cluster[[i]], fun, c(list(x[[i]]), list(...)), tag = i)
}
val <- vector("list", n)
hasError <- FALSE
formatError <- function(threadNumber, error, args) {
sprintf("Thread %s returns error: \"%s\" when using argument(s): %s",
threadNumber,
gsub("\n", "\\n", gsub("\t", "\\t", error)),
gsub("\n", "\\n", gsub("\t", "\\t", paste(args, collapse = ","))))
}
for (i in 1:n) {
d <- snow::recvOneResult(cluster)
if (inherits(d$value, "try-error")) {
val[d$tag] <- NULL
errorMessage <- formatError(d$node, d$value, c(list(x[[d$tag]]), list(...)))
if (stopOnError) {
stop(errorMessage)
} else {
OhdsiRTools::logError(errorMessage)
hasError <- TRUE
}
}
if (progressBar)
setTxtProgressBar(pb, i/n)
j <- i + min(n, p)
if (j <= n) {
snow::sendCall(cluster[[d$node]], fun, c(list(x[[j]]), list(...)), tag = j)
}
val[d$tag] <- list(d$value)
}
if (progressBar) {
close(pb)
}
if (hasError) {
message <- paste0("Error(s) when calling function '",
substitute(fun, parent.frame(1)),
"', see earlier messages for details")
stop(message)
}
return(val)
}
}
}
|
testlist <- list(lims = structure(c(5.22851419824833e+54, 5.22851419824833e+54, 5.22851419824833e+54, 5.22851419824833e+54, 2.02925826463916e+53, 2.14305266688968e-307, 7.06327445644526e-304, 0, 7.28400178326727e-304, 1.2136247081529e+132, 2.21813575529665e+130, 5.43222633946974e-312, 0, 0, 7.57261242874038e-315, 1.89477363875693e+132, 1.66391844583047e-308, 1.38521763239434e-309, 2.12176913340959e-314, 1.45350484989218e+135, 6.32404026676796e-322, 8.52313092163251e-254, 7.1686917865218e-80, 3.00009459320665e-241, 2.209520842865e-307, 4.1410356681522e+204, 4.08080822911401e+204, 2.71712984670743e-260, 3.12511342963964e-319, 9.23453418440268e-305, 4.94065645841247e-323, 1.52971493577025e-308, 3.05465676250376e-260, 4.1049064037361e+204, 5.89400770183056e+202, 1.06559766079976e-255, 1.22692992715509e-237, 7.2911220195564e-304, 2.63554948596104e-82, 1.71091322755295e-313, 1.98813431604752e-289, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(7L, 7L)), points = structure(c(NaN, NA), .Dim = 1:2))
result <- do.call(palm:::pbc_distances,testlist)
str(result) | /palm/inst/testfiles/pbc_distances/libFuzzer_pbc_distances/pbc_distances_valgrind_files/1612987931-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 1,067 | r | testlist <- list(lims = structure(c(5.22851419824833e+54, 5.22851419824833e+54, 5.22851419824833e+54, 5.22851419824833e+54, 2.02925826463916e+53, 2.14305266688968e-307, 7.06327445644526e-304, 0, 7.28400178326727e-304, 1.2136247081529e+132, 2.21813575529665e+130, 5.43222633946974e-312, 0, 0, 7.57261242874038e-315, 1.89477363875693e+132, 1.66391844583047e-308, 1.38521763239434e-309, 2.12176913340959e-314, 1.45350484989218e+135, 6.32404026676796e-322, 8.52313092163251e-254, 7.1686917865218e-80, 3.00009459320665e-241, 2.209520842865e-307, 4.1410356681522e+204, 4.08080822911401e+204, 2.71712984670743e-260, 3.12511342963964e-319, 9.23453418440268e-305, 4.94065645841247e-323, 1.52971493577025e-308, 3.05465676250376e-260, 4.1049064037361e+204, 5.89400770183056e+202, 1.06559766079976e-255, 1.22692992715509e-237, 7.2911220195564e-304, 2.63554948596104e-82, 1.71091322755295e-313, 1.98813431604752e-289, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(7L, 7L)), points = structure(c(NaN, NA), .Dim = 1:2))
result <- do.call(palm:::pbc_distances,testlist)
str(result) |
library(bsplus)
### Name: bs_set_data
### Title: Sets Bootstrap data- and aria- attributes.
### Aliases: bs_set_data bs_set_aria
### ** Examples
library("htmltools")
library("lubridate")
tags$div() %>%
bs_set_data(
target = "#foobar",
delay = dseconds(1),
placement = c("right", "auto")
) %>%
bs_set_aria(expanded = FALSE)
| /data/genthat_extracted_code/bsplus/examples/bs_set_data.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 349 | r | library(bsplus)
### Name: bs_set_data
### Title: Sets Bootstrap data- and aria- attributes.
### Aliases: bs_set_data bs_set_aria
### ** Examples
library("htmltools")
library("lubridate")
tags$div() %>%
bs_set_data(
target = "#foobar",
delay = dseconds(1),
placement = c("right", "auto")
) %>%
bs_set_aria(expanded = FALSE)
|
Company_Data <- read.csv("C:/Users/DELL/Desktop/New folder/DATA SCIENCE/ASSIGNMENTS/DECISION TREE st/Company_Data.csv")
View(Company_Data)
install.packages("C50")
install.packages("tree")
install.packages("caret")
install.packages("party")
library(C50)
library(tree)
library(caret)
library(party)
library(gmodels)
library(knitr)
attach(Company_Data)
hist(Sales)
hist(CompPrice)
hist(Income)
jarque.bera.test(Income)
hist(Advertising)
hist(Population)
hist(Price)
hist(Age)
count(Company_Data$Urban)
ggplot(Company_Data)+geom_bar(aes(x=Urban))
ggplot(Company_Data)+geom_bar(aes(x=US))
hist(Sales)
cor(Company_Data)
boxplot(Sales)
yes_no<-ifelse(Sales<=8.5,"No","Yes")
count(yes_no)
cloth<-data.frame(Company_Data,yes_no)
ggplot(cloth)+geom_bar(aes(x=yes_no))
test_data<-cloth[1:100,]
train_data<-cloth[101:400,]
decision_tree<-ctree(yes_no~CompPrice+Income+Advertising+Population+Price+ShelveLoc+Age+Education+Urban+US,data=train_data)
summary(decision_tree)
plot(decision_tree)
pred_tree<-as.data.frame(predict(decision_tree,newdata=test_data))
pred_tree["final_output"]<-NULL
pred_test<-predict(decision_tree,newdata=test_data)
mean(pred_test==cloth$yes_no)
CrossTable(test_data$yes_no,pred_test)
| /R company data decision tree.R | no_license | krithimr/KRITHI-CODE | R | false | false | 1,243 | r | Company_Data <- read.csv("C:/Users/DELL/Desktop/New folder/DATA SCIENCE/ASSIGNMENTS/DECISION TREE st/Company_Data.csv")
View(Company_Data)
install.packages("C50")
install.packages("tree")
install.packages("caret")
install.packages("party")
library(C50)
library(tree)
library(caret)
library(party)
library(gmodels)
library(knitr)
attach(Company_Data)
hist(Sales)
hist(CompPrice)
hist(Income)
jarque.bera.test(Income)
hist(Advertising)
hist(Population)
hist(Price)
hist(Age)
count(Company_Data$Urban)
ggplot(Company_Data)+geom_bar(aes(x=Urban))
ggplot(Company_Data)+geom_bar(aes(x=US))
hist(Sales)
cor(Company_Data)
boxplot(Sales)
yes_no<-ifelse(Sales<=8.5,"No","Yes")
count(yes_no)
cloth<-data.frame(Company_Data,yes_no)
ggplot(cloth)+geom_bar(aes(x=yes_no))
test_data<-cloth[1:100,]
train_data<-cloth[101:400,]
decision_tree<-ctree(yes_no~CompPrice+Income+Advertising+Population+Price+ShelveLoc+Age+Education+Urban+US,data=train_data)
summary(decision_tree)
plot(decision_tree)
pred_tree<-as.data.frame(predict(decision_tree,newdata=test_data))
pred_tree["final_output"]<-NULL
pred_test<-predict(decision_tree,newdata=test_data)
mean(pred_test==cloth$yes_no)
CrossTable(test_data$yes_no,pred_test)
|
## Code to convert complete record table to 10 minute independent records
## Original by LLB, 13/07/2018
## Modified by KG, 30/10/2018
## Get the libraries you need
library(dplyr)
library(data.table) ## tbh I don't know if you actually need this one
library(progress)
setwd("~/Dropbox/projects/GORONGOSA2/Camera Trap Grid")
record.table.all <- read.csv("Gaynor_ALLrecords_cleaned_June16_to_June17.csv", stringsAsFactors = FALSE)
##### CAMTRAPR ADAPTATION
record.table.10min <- record.table.all[record.table.all$delta.time.secs >= 600 | # because 900 seconds = 15 minutes
record.table.all$delta.time.secs == 0, ] # so that you keep the ones where delta.time.secs is 0 (these are the 'first' records)
# I realized that the original record table includes records from outside camera operation dates
# (this arises due to tall grass, with few detections, and I want to exclude these dates)
metadata <- read.csv("R/Data/Raw Data/cam_metadata_all.csv")
# merge camera start, end, and problem dates with the record table
metadata <- metadata[, 1:5]
record.table.10min <- merge(record.table.10min, metadata) # join by camera
# get the dates into date format
record.table.10min$Date <- as.Date(record.table.10min$Date, format = "%m/%d/%y")
record.table.10min$Start <- as.Date(record.table.10min$Start, format = "%m/%d/%y")
record.table.10min$End <- as.Date(record.table.10min$End, format = "%m/%d/%y")
record.table.10min$Problem1_from <- as.Date(record.table.10min$Problem1_from, format = "%m/%d/%y")
record.table.10min$Problem1_to <- as.Date(record.table.10min$Problem1_to, format = "%m/%d/%y")
# label records to drop if outside of operation date (either before start, after end, or during problem window)
record.table.10min$drop <- FALSE # create default of false
for (i in 1:nrow(record.table.10min)) {
if (record.table.10min$Date[i] < record.table.10min$Start[i]) {
record.table.10min$drop[i] <- TRUE}
else if (record.table.10min$Date[i] > record.table.10min$End[i]) {
record.table.10min$drop[i] <- TRUE}
else if ((is.na(record.table.10min$Problem1_from[i]) = FALSE) & (record.table.10min$Date[i] > record.table.10min$Problem1_from[i]) & (record.table.10min$Date[i] < record.table.10min$Problem1_to[i])) {
record.table.10min$drop[i] <- TRUE}
else {
record.table.10min$drop[i] <- FALSE}
}
summary(record.table.10min$drop)
# exclude records outside of operation dates
record.table.10min <- record.table.10min[record.table.10min$drop == FALSE,]
# get rid of extra columns
record.table.10min <- record.table.10min[,1:12]
write.csv(record.table.10min, "Gaynor_10minrecords_June2016_to_June2017.csv", row.names = FALSE)
| /scripts/01-cleaning/05a-recordtable-all-to-10min.R | no_license | kaitlyngaynor/gorongosa-camera-traps | R | false | false | 2,681 | r | ## Code to convert complete record table to 10 minute independent records
## Original by LLB, 13/07/2018
## Modified by KG, 30/10/2018
## Get the libraries you need
library(dplyr)
library(data.table) ## tbh I don't know if you actually need this one
library(progress)
setwd("~/Dropbox/projects/GORONGOSA2/Camera Trap Grid")
record.table.all <- read.csv("Gaynor_ALLrecords_cleaned_June16_to_June17.csv", stringsAsFactors = FALSE)
##### CAMTRAPR ADAPTATION
record.table.10min <- record.table.all[record.table.all$delta.time.secs >= 600 | # because 900 seconds = 15 minutes
record.table.all$delta.time.secs == 0, ] # so that you keep the ones where delta.time.secs is 0 (these are the 'first' records)
# I realized that the original record table includes records from outside camera operation dates
# (this arises due to tall grass, with few detections, and I want to exclude these dates)
metadata <- read.csv("R/Data/Raw Data/cam_metadata_all.csv")
# merge camera start, end, and problem dates with the record table
metadata <- metadata[, 1:5]
record.table.10min <- merge(record.table.10min, metadata) # join by camera
# get the dates into date format
record.table.10min$Date <- as.Date(record.table.10min$Date, format = "%m/%d/%y")
record.table.10min$Start <- as.Date(record.table.10min$Start, format = "%m/%d/%y")
record.table.10min$End <- as.Date(record.table.10min$End, format = "%m/%d/%y")
record.table.10min$Problem1_from <- as.Date(record.table.10min$Problem1_from, format = "%m/%d/%y")
record.table.10min$Problem1_to <- as.Date(record.table.10min$Problem1_to, format = "%m/%d/%y")
# label records to drop if outside of operation date (either before start, after end, or during problem window)
record.table.10min$drop <- FALSE # create default of false
for (i in 1:nrow(record.table.10min)) {
if (record.table.10min$Date[i] < record.table.10min$Start[i]) {
record.table.10min$drop[i] <- TRUE}
else if (record.table.10min$Date[i] > record.table.10min$End[i]) {
record.table.10min$drop[i] <- TRUE}
else if ((is.na(record.table.10min$Problem1_from[i]) = FALSE) & (record.table.10min$Date[i] > record.table.10min$Problem1_from[i]) & (record.table.10min$Date[i] < record.table.10min$Problem1_to[i])) {
record.table.10min$drop[i] <- TRUE}
else {
record.table.10min$drop[i] <- FALSE}
}
summary(record.table.10min$drop)
# exclude records outside of operation dates
record.table.10min <- record.table.10min[record.table.10min$drop == FALSE,]
# get rid of extra columns
record.table.10min <- record.table.10min[,1:12]
write.csv(record.table.10min, "Gaynor_10minrecords_June2016_to_June2017.csv", row.names = FALSE)
|
#!/usr/bin/env Rscript
####################
#
# Parameters:
# 1 - File to scale
# 2 - File containing scale values
# 3 - File containing center values
# 4 - Output file with scaling dataset
#
###################
args <- commandArgs(trailingOnly = TRUE)
validation = read.csv(args[1], check.names=FALSE)
center_values <- readRDS(paste(args[2],sep = ""))
scale_values <- readRDS(paste(args[3],sep = ""))
validation_scaled = mapply(function(x, y, z) scale((x), center=y, scale=z), (validation), (center_values[,1]), (scale_values[,1]))
#Shift to the right in order to avoid negative values
validation_scaled <- validation_scaled + 300
independent = args[5]
validation_scaled[,independent] <- validation[,independent]
if("timestamp" %in% colnames(validation_scaled)){
validation_scaled[,"timestamp"] <- validation$timestamp
}
if("Core_1CPU" %in% colnames(validation_scaled)){
validation_scaled[,"Core_1CPU"] <- validation$Core_1CPU
}
if("Core_2CPU" %in% colnames(validation_scaled)){
validation_scaled[,"Core_2CPU"] <- validation$Core_2CPU
}
if("numSockets" %in% colnames(validation_scaled)){
validation_scaled[,"numSockets"] <- validation$numSockets
}
validation_scaled[is.na(validation_scaled)] <- 0
validation_scaled[is.infinite(validation_scaled)] <- 0
validation_scaled <- round(validation_scaled,7)
write.csv(validation_scaled, file = args[4],row.names=FALSE)
| /powerModelling/prototype/configuration files/INTEL Xeon X5650/scaling_data_validation.r | permissive | bsc-renewit/d2.2 | R | false | false | 1,378 | r | #!/usr/bin/env Rscript
####################
#
# Parameters:
# 1 - File to scale
# 2 - File containing scale values
# 3 - File containing center values
# 4 - Output file with scaling dataset
#
###################
args <- commandArgs(trailingOnly = TRUE)
validation = read.csv(args[1], check.names=FALSE)
center_values <- readRDS(paste(args[2],sep = ""))
scale_values <- readRDS(paste(args[3],sep = ""))
validation_scaled = mapply(function(x, y, z) scale((x), center=y, scale=z), (validation), (center_values[,1]), (scale_values[,1]))
#Shift to the right in order to avoid negative values
validation_scaled <- validation_scaled + 300
independent = args[5]
validation_scaled[,independent] <- validation[,independent]
if("timestamp" %in% colnames(validation_scaled)){
validation_scaled[,"timestamp"] <- validation$timestamp
}
if("Core_1CPU" %in% colnames(validation_scaled)){
validation_scaled[,"Core_1CPU"] <- validation$Core_1CPU
}
if("Core_2CPU" %in% colnames(validation_scaled)){
validation_scaled[,"Core_2CPU"] <- validation$Core_2CPU
}
if("numSockets" %in% colnames(validation_scaled)){
validation_scaled[,"numSockets"] <- validation$numSockets
}
validation_scaled[is.na(validation_scaled)] <- 0
validation_scaled[is.infinite(validation_scaled)] <- 0
validation_scaled <- round(validation_scaled,7)
write.csv(validation_scaled, file = args[4],row.names=FALSE)
|
test_GenotypeDataByChr <- function(){
directory <- file.path(tempdir(), paste(sample(c (letters, LETTERS), 10, replace=T), collapse=""))
dir.create(directory)
prefix <- paste(sample(c(letters, LETTERS), 10, replace=T), collapse="")
simulateImputedGenotypeData(directory, prefix)
bcData <- GenotypeDataByChr(directory)
checkEquals(bcData@base, prefix)
checkEquals(bcData@chromSep, "_chr-")
checkEquals(getValidChromosomes(bcData), as.character(1:23))
# check snp annotation methods
chromosome <- sample(getValidChromosomes(bcData), 1)
snpAnnot <- getSnpAnnotation(bcData, chromosome)
snpAnnot.chk <- getobj(file.path(directory, paste(prefix, "_chr-", chromosome, "_snpAnnot.RData", sep="")))
checkEquals(pData(snpAnnot), pData(snpAnnot.chk))
# check scanID
gds <- getGenoData(bcData, 1)
scanID <- getScanID(gds)
close(gds)
checkEquals(scanID, getScanID(bcData))
# check genotypes from snpID
gds <- getGenoData(bcData, chromosome)
snpID <- getSnpID(gds)
snp <- sample(snpID, 1)
checkTrue(hasSnpID(gds, snp))
geno <- getGenotype(gds, snp=c(which(snpID == snp), 1), scan=c(1,-1), use.names=TRUE)
checkEquals(geno, getGenotypeFromSnpID(gds, snp))
close(gds)
checkTrue(hasSnpID(bcData, snp))
checkEquals(geno, getGenotypeFromSnpID(bcData, snp)[1,])
# multiple snps
gds <- getGenoData(bcData, chromosome=1)
snpID <- getSnpID(gds)
snp1 <- sample(snpID, 2)
geno <- list()
for (i in snp1) {
geno[[as.character(i)]] <- getGenotype(gds, snp=c(which(snpID == i), 1), scan=c(1,-1))
}
close(gds)
gds <- getGenoData(bcData, chromosome=2)
snpID <- getSnpID(gds)
snp2 <- sample(snpID, 1)
geno[[as.character(snp2)]] <- getGenotype(gds, snp=c(which(snpID == snp2), 1), scan=c(1,-1))
geno <- matrix(unlist(geno), nrow=length(geno), byrow=TRUE, dimnames=list(names(geno), getScanID(gds)))
close(gds)
snp <- c(snp1, snp2)
checkTrue(all(hasSnpID(bcData, snp)))
checkEquals(geno, getGenotypeFromSnpID(bcData, snp))
checkTrue(!hasSnpID(bcData, -1))
# check accessor errors
checkException(getSnpAnnotation(bcData, -1))
# check constructor errors
checkException(GenotypeDataByChr(directory, base=substring(prefix, 1, 1)))
checkException(GenotypeDataByChr(directory, chromSep=""))
unlink(directory, recursive=T)
}
| /GWASbyChr/inst/unitTests/GenotypeDataByChr_test.R | no_license | UW-GAC/QCpipeline | R | false | false | 2,334 | r | test_GenotypeDataByChr <- function(){
directory <- file.path(tempdir(), paste(sample(c (letters, LETTERS), 10, replace=T), collapse=""))
dir.create(directory)
prefix <- paste(sample(c(letters, LETTERS), 10, replace=T), collapse="")
simulateImputedGenotypeData(directory, prefix)
bcData <- GenotypeDataByChr(directory)
checkEquals(bcData@base, prefix)
checkEquals(bcData@chromSep, "_chr-")
checkEquals(getValidChromosomes(bcData), as.character(1:23))
# check snp annotation methods
chromosome <- sample(getValidChromosomes(bcData), 1)
snpAnnot <- getSnpAnnotation(bcData, chromosome)
snpAnnot.chk <- getobj(file.path(directory, paste(prefix, "_chr-", chromosome, "_snpAnnot.RData", sep="")))
checkEquals(pData(snpAnnot), pData(snpAnnot.chk))
# check scanID
gds <- getGenoData(bcData, 1)
scanID <- getScanID(gds)
close(gds)
checkEquals(scanID, getScanID(bcData))
# check genotypes from snpID
gds <- getGenoData(bcData, chromosome)
snpID <- getSnpID(gds)
snp <- sample(snpID, 1)
checkTrue(hasSnpID(gds, snp))
geno <- getGenotype(gds, snp=c(which(snpID == snp), 1), scan=c(1,-1), use.names=TRUE)
checkEquals(geno, getGenotypeFromSnpID(gds, snp))
close(gds)
checkTrue(hasSnpID(bcData, snp))
checkEquals(geno, getGenotypeFromSnpID(bcData, snp)[1,])
# multiple snps
gds <- getGenoData(bcData, chromosome=1)
snpID <- getSnpID(gds)
snp1 <- sample(snpID, 2)
geno <- list()
for (i in snp1) {
geno[[as.character(i)]] <- getGenotype(gds, snp=c(which(snpID == i), 1), scan=c(1,-1))
}
close(gds)
gds <- getGenoData(bcData, chromosome=2)
snpID <- getSnpID(gds)
snp2 <- sample(snpID, 1)
geno[[as.character(snp2)]] <- getGenotype(gds, snp=c(which(snpID == snp2), 1), scan=c(1,-1))
geno <- matrix(unlist(geno), nrow=length(geno), byrow=TRUE, dimnames=list(names(geno), getScanID(gds)))
close(gds)
snp <- c(snp1, snp2)
checkTrue(all(hasSnpID(bcData, snp)))
checkEquals(geno, getGenotypeFromSnpID(bcData, snp))
checkTrue(!hasSnpID(bcData, -1))
# check accessor errors
checkException(getSnpAnnotation(bcData, -1))
# check constructor errors
checkException(GenotypeDataByChr(directory, base=substring(prefix, 1, 1)))
checkException(GenotypeDataByChr(directory, chromSep=""))
unlink(directory, recursive=T)
}
|
# STARS - Summer 2021 : Data Visualization With R #############################
rm(list=ls())
# Be sure to adjust the file path to your working directory:
setwd("/Users/mikeseese/Desktop/STARS - Summer 2021/Stars_R_WorkingDirectory")
## Loading Necessary Packages ##################################################
library("tidyverse")
library("RCurl")
library("ggridges")
library("scales")
library("RColorBrewer")
library("COVID19")
library("reshape")
library("coefplot")
library("sjPlot")
library("broom")
## Why do we need to visualize data? ###########################################
### Example 1: We want to tell stories and share results #######################
# Let's get some DW-Nominate Data from Hall, et al. (https://voteview.com/)
a <- getURL('https://voteview.com/static/data/out/members/HSall_members.csv')
b <- read.csv(textConnection(a), header= TRUE)
# Clean up the data (focus on House + 110th Congress onward)
c <- b %>% filter(chamber == "House" & congress > 89)
# Plot the D1 Nominate scores
x <- ggplot(c, aes(x = nominate_dim1, y = as.factor(congress))) +
geom_density_ridges(scale = 2, aes(fill = as.factor(party_code))) +
labs(title = "The Growing Ideological Divide in the U.S. House",
x = "DW Nominate - Dimension 1",
y = "Congress",
caption = "Data: Lewis, Poole, Rosenthal, Boche, Rudkin, and Sonnet (2021)") +
scale_fill_manual(name = "Party",
values = c("#0000FFA0", "#FF0000A0"),
labels = c("Democract", "Republican")) +
theme_minimal()
x
ggsave("plot1.pdf", x, width = 11, height = 8.5)
### Example 2: We want to make sense of a lot of data ##########################
# Clean data to get party frequencies
d <- c %>% group_by(congress, party_code) %>% summarize(pfreq=n())
table(d$party_code)
# Deal with the independents
d$party_code[d$party_code == 328] <- 300
d$party_code[d$party_code == 329] <- 300
# Plot frequencies as stacked bars
x <- ggplot(d, aes(x = as.factor(congress), y = pfreq, fill = as.factor(party_code))) +
geom_bar(position = "fill", stat = "identity") +
labs(title = "Party Composition of the U.S. House, 1967 - Present",
x = "Congress",
y = "Percentage of Members by Party",
caption = "Data: Lewis, Poole, Rosenthal, Boche, Rudkin, and Sonnet (2021)") +
scale_fill_manual(name = "Party",
values = c("#0000FFA0", "#FF0000A0", "#999999"),
labels = c("Democract", "Republican", "Independent")) +
# geom_hline(yintercept = 0.5) +
theme_minimal()
x
ggsave("plot2.pdf", x, width = 11, height = 8.5)
rm(list=ls())
## Bad and Better Charts #######################################################
### Bad Plot ###################################################################
# Let's get some data from from the NCHS:
# https://catalog.data.gov/dataset/nchs-death-rates-and-life-expectancy-at-birth
# Get the data and clean it up a bit
a <- getURL('https://data.cdc.gov/api/views/w9j2-ggv5/rows.csv?accessType=DOWNLOAD')
b <- read.csv(textConnection(a), header= TRUE)
c <- b %>% filter(Race == "All Races" & Sex == "Both Sexes")
d <- b %>% filter(Race == "White" & Sex == "Both Sexes")
e <- b %>% filter(Race == "Black" & Sex == "Both Sexes")
# Here's a terrible plot in base graphics
pdf("plot3.pdf", width = 11, height = 8.5)
plot(c$Year, c$Average.Life.Expectancy..Years., type = "l", cex.axis = 2)
lines(d$Year, d$Average.Life.Expectancy..Years.)
lines(e$Year, e$Average.Life.Expectancy..Years., col="green", lwd=3)
grid(nx = 60, col = "darkgray", lty = 1)
dev.off()
### Better Plot ################################################################
# Clean up the previous plot some
pdf("plot4.pdf", width = 11, height = 8.5)
par(oma=c(1.5,0,0,0))
plot(c$Year, c$Average.Life.Expectancy..Years., type = "l",
xlab = "Year",
ylab = "Average Life Expectancy at Birth",
ylim = c(20, 80),
xaxp = c(1900, 2020, 12))
lines(d$Year, d$Average.Life.Expectancy..Years., col = "darkgoldenrod")
lines(e$Year, e$Average.Life.Expectancy..Years., col = "darkcyan")
abline(v = 1918, col = "gray50", lty = 2)
text(1915.5, 65, "1918 Flu Pandemic",
pos = 4, srt = 90, col = "gray50", cex = 0.75)
title(main = "U.S. Life Expectancy by Race and Year, 1900 - 2018")
legend("bottomright", inset = 0.025,
c("Total Population", "White", "Black"),
lwd=c(2, 2, 2),
col = c("black", "darkgoldenrod", "darkcyan"),
xjust = 0,
cex = 0.75)
mtext("Data: U.S. Centers for Disease Control and Prevention, National Center for Health Statistics",
side = 1, line=0, adj=0.05, outer = TRUE, cex = 0.75)
dev.off()
rm(list=ls())
### Deceptive Charts ###########################################################
# Make up some data
x <- LETTERS[1:5]
y <- c(55, 56, 64, 57, 59)
a <- data.frame(x, y)
# Underscore an extreme value
p1 <- ggplot(a, aes(x = x, y = y)) +
geom_bar(stat = "identity") +
geom_text(aes(label = y), vjust = -0.25, size = 4) +
scale_y_continuous(limits = c(50,65), oob = rescale_none) +
theme_classic() +
theme(axis.title = element_blank()) +
ggtitle("This chart emphasizes the extreme value of C")
p1
ggsave("plot5.pdf", p1, width = 11, height = 8.5)
# Underscore low variation
p2 <- ggplot(a, aes(x = x, y = y)) +
geom_bar(stat = "identity") +
geom_text(aes(label = y), vjust = -0.25, size = 4) +
scale_y_continuous(limits = c(0,100)) +
theme_classic() +
theme(axis.title = element_blank()) +
ggtitle("This chart implies more uniformity")
p2
ggsave("plot6.pdf", p2, width = 11, height = 8.5)
# Equivalence
p3 <- ggplot(a, aes(x = x, y = y)) +
geom_bar(stat = "identity") +
geom_text(aes(label = y), vjust = -0.25, size = 4) +
scale_y_continuous(limits = c(0,2500)) +
theme_classic() +
theme(axis.title = element_blank()) +
ggtitle("This chart gives the perception of equivalence")
p3
ggsave("plot7.pdf", p3, width = 11, height = 8.5)
# Why we don't use pie charts
p <- LETTERS[1:10]
q <- c(2, 3, 2, 0, 4, 18, 31, 20, 8, 12)
b <- data.frame(p, q)
clr <- brewer.pal(10, "Set3")
pdf("plot8.pdf", width = 11, height = 8.5)
par(mar=c(0,0,0,0))
pie(x = b$q[b$q != 0], labels = b$p[b$q != 0], col = clr)
dev.off()
pdf("plot13.pdf", width = 11, height = 8.5)
barplot(q, names.arg = p)
dev.off()
rm(list=ls())
## Base Graphics ###############################################################
# Bring in some data from WZB-IPI covid Project
# Find the website here: https://wzb-ipi.github.io/corona/
# And the paper here: https://osf.io/preprints/socarxiv/ub3zd/
a <- read.csv("wzb_covid_june2021.csv", header = TRUE)
### Histograms, Densities, and Rug Plots #######################################
# What happens if we just use the command "plot"?
plot(x = a$cases_cum)
# There are some wild outliers. What countries are these?
plot(x = a$cases_cum)
text(a$cases_cum[a$cases_cum > 15000000],
labels = a$X[a$cases_cum > 15000000],
cex = 0.75, pos = 1)
# Interesting, but not really what we want let's try:
hist(a$cases_cum)
hist(a$cases_cum, breaks = 50)
# I can't read the exponential notation, so lets try log:
a$cases_cum_log <- log(a$cases_cum)
hist(a$cases_cum_log)
# Let's add the density now
d_ccl <- density(a$cases_cum_log)
hist(a$cases_cum_log, freq = FALSE)
lines(d_ccl, col = "darkcyan")
plot(d_ccl, col = "darkcyan")
rug(a$cases_cum_log)
# Plot matrix using mfrow / mfcol (rows, columns)
par(mfrow = c(1, 2))
## Plot 1
hist(a$cases_cum_log, freq = FALSE)
lines(d_ccl, col = "darkcyan")
## Plot 2
plot(d_ccl, col = "darkcyan")
rug(a$cases_cum_log)
## Clear
dev.off()
### Box Plots ##################################################################
boxplot(a$cases_cum_log)
boxplot(a$cases_cum_log ~ a$continent)
boxplot(a$cases_cum_log ~ a$continent, horizontal = TRUE)
### Bar Plots ##################################################################
# How many countries are there in each region?
b <- table(a$region)
barplot(b[c(-1)])
# We want to exclude the un-categorized countries
# Wtf... R isn't plotting all the names? Because they don't fit...
barplot(b[c(-1)], cex.names = 0.5)
# Try something else: Filter down to just MENA countries
c <- a %>% filter(region == "Middle East & North Africa")
# New bar plot
barplot(c$pop_density, names.arg = c$geoid2, las = 2, cex.names = 0.75)
# Can we sort these bars?
d <- c[order(c$pop_density, decreasing = TRUE),]
barplot(d$pop_density, names.arg = d$geoid2, las = 2, cex.names = 0.75)
### Scatter Plots ##############################################################
plot(a$air_travel, a$cases_cum_log)
# Wow, that was easy. But let's make it nicer:
plot(a$air_travel, a$cases_cum_log,
xlab = "Air Travel",
ylab = "Cummulative Covid Cases (Log)")
abline(lm(a$cases_cum_log ~ a$air_travel), col = "darkcyan")
mtext("Data: https://wzb-ipi.github.io/corona/", side = 3)
title(main = "More Air Travel is Associated with Higher Covid Case Counts")
# What if we want to know the countries?
plot(a$air_travel, a$cases_cum_log,
col = "white",
xlab = "Air Travel",
ylab = "Cummulative Covid Cases (Log)")
text(a$air_travel, a$cases_cum_log, labels = a$geoid2, cex = 0.5)
abline(lm(a$cases_cum_log ~ a$air_travel), col = "darkcyan")
mtext("Data: https://wzb-ipi.github.io/corona/", side = 3)
title(main = "More Air Travel is Associated with Higher Covid Case Counts")
# But What about a confidence interval? We need to run the model and predict:
model <- lm(cases_cum_log ~ air_travel, data = a)
xvalues <- data.frame(air_travel = seq(5 , 21, length.out = 100))
predictions <- predict(model, newdata = xvalues, interval = "confidence")
plot(a$air_travel, a$cases_cum_log,
col = "white",
xlab = "Air Travel",
ylab = "Cumulative Covid Cases (Log)")
text(a$air_travel, a$cases_cum_log, labels = a$geoid2, cex = 0.5)
abline(lm(a$cases_cum_log ~ a$air_travel), col = "darkcyan")
lines(xvalues[,1], predictions[,2], col = "gray", lty = 2)
lines(xvalues[,1], predictions[,3], col = "gray", lty = 2)
mtext("Data: https://wzb-ipi.github.io/corona/", side = 3)
title(main = "More Air Travel is Associated with Higher Covid Case Counts")
dev.off()
rm(list=setdiff(ls(), "a"))
## ggplot: The Grammar of Graphics #############################################
### Distributions ##############################################################
# Violin plots
table(a$continent, exclude = NULL)
gg1 <- ggplot(a, aes(x = continent, y = gov_effect, fill = continent)) +
geom_violin() +
geom_jitter(position=position_jitter(0.1))
gg1
gg2 <- ggplot(a, aes(x = continent, y = deaths_cum_log, fill = continent)) +
geom_violin() +
geom_boxplot(width = 0.25)
gg2
# Density plots
gg3 <- ggplot(a, aes(x = gdp_pc)) +
geom_density()
gg3
gg4 <- ggplot(a, aes(x = gdp_pc)) +
geom_density(aes(color = continent))
gg4
gg5 <- ggplot(a, aes(x = gdp_pc)) +
geom_density() +
facet_wrap(~ continent)
gg5
### Line / Time Series #########################################################
# Some other Covid data
# Documentation at: https://cran.r-project.org/web/packages/COVID19/readme/README.html
c <- covid19(c("US"), level = 3)
d <- c %>% filter(administrative_area_level_3 == "San Diego")
gg6 <- ggplot(d, aes(x = date, y = confirmed)) +
geom_line()
gg6
gg7 <- ggplot(d) +
geom_line(aes(x = date, y = deaths))
gg7
# What if we wanted daily deaths?
d <- d %>% mutate(d_deaths = deaths - lag(deaths))
gg8 <- ggplot(d) +
geom_line(aes(x = date, y = d_deaths)) +
scale_x_date(date_labels = "%b-%Y", breaks = breaks_pretty(20)) +
scale_y_continuous(breaks = breaks_pretty(10)) +
theme(axis.text.x = element_text(angle = 45))
gg8
### Bar Plots ##################################################################
# Let's look at 1 July in the 4 states that I've lived in for more than a year:
e <- c %>% filter(administrative_area_level_2 == "Hawaii" |
administrative_area_level_2 == "California" |
administrative_area_level_2 == "Massachusetts" |
administrative_area_level_2 == "Arizona")
e <- e %>% filter(date == "2021-07-01")
# And get mean cumulative case counts by state...
# Recall that the standard error is the SD / sqrt(n)
f <- e %>% data.frame() %>%
select(state = administrative_area_level_2, confirmed)
g <- f %>% group_by(state) %>%
summarize(n = n(),
mean = mean(confirmed),
sd = sd(confirmed)) %>%
mutate(se = sd/sqrt(n))
# Now we can plot!
gg9 <- ggplot(g) +
geom_bar(aes(x = state, y = mean), stat = "identity") +
geom_errorbar(aes(x = state, ymin = mean-se, ymax = mean+se),
width=0.1, colour="darkcyan", size=0.75)
gg9
# Wow, those error bars are HUGE! We should double check the distributions:
gg10 <- ggplot(f, aes(x = state, y = confirmed, fill = state)) +
geom_violin() +
geom_point()
gg10
# Looks great, but isn't this deceptive? Comparing HI and CA is prob not ok...
# Let's normalize the case counts by the population:
h <- e %>% mutate(mconfirmed = confirmed / population) %>%
data.frame() %>%
select(state = administrative_area_level_2, mconfirmed) %>%
group_by(state) %>%
summarize(n = n(),
mean = mean(mconfirmed),
sd = sd(mconfirmed)) %>%
mutate(se = sd/sqrt(n))
# Now we can re-use the code from gg9, and just sub in the new dataset:
gg11 <- ggplot(h) +
geom_bar(aes(x = state, y = mean), stat = "identity") +
geom_errorbar(aes(x = state, ymin = mean-se, ymax = mean+se),
width=0.1, colour="darkcyan", size=0.75)
gg11
ggsave("plot9.pdf", gg11, width = 11, height = 8.5)
rm(list=setdiff(ls(), "a"))
### Dot Plots ##################################################################
# Let's check out some dotplots
# Using some Pew data: What % of surveyed respondents in the lead up to the 2020
# election reported [ISSUE] as very important to their vote?
b <- read.csv("pew_data.csv")
# We have a problem though! Our data is wide, and we need it to be long...
c <- melt(b, id = "issues")
gg12 <- ggplot(c, aes(x = value, y = issues)) +
geom_point(aes(color = variable))
gg12
# But that's super hard to read, let's try to sort these
gg13 <- ggplot(c, aes(x = value, y = reorder(issues, value))) +
geom_point(aes(color = variable))
gg13
# Great, but this is still sort of hard to read, and the colors don't make sense:
gg14 <- ggplot(c, aes(x = value, y = reorder(issues, value))) +
geom_line(aes(group = issues), color = "#999999") +
geom_point(aes(color = variable), size = 5) +
scale_color_manual(values = c("#0000FF", "#FF0000", "#999999"),
labels = c("Biden Voters", "Trump Voters", "All Voters")) +
geom_text(aes(label = sprintf("%1.0f%%", value), color = variable),
size = 2, nudge_y = 0.25) +
scale_x_continuous(breaks = NULL) +
labs(title = "Biden and Trump Voters Diverge on Which Issues Matter Most in 2020 Election",
subtitle = "Results of July/August 2020 Pew Research Poll of Registered Voters",
x = "Percent of Registered Voters Saying Issue is Very Important to Their Vote in the 2020 Election.",
y = "Issue Area") +
theme_minimal() +
theme(legend.title = element_blank())
gg14
ggsave("plot10.pdf", gg14, width = 11, height = 8.5)
# Keep in mind that red tends to bleed in video...
# Try: https://htmlcolorcodes.com and https://color.adobe.com/create/color-wheel
rm(list=setdiff(ls(), "a"))
### Scatter Plots ##############################################################
plot(a$air_travel, a$deaths_cum_log)
abline(lm(a$deaths_cum_log ~ a$air_travel), col = "blue")
b <- a %>% filter(region != "")
gg15 <- ggplot(b, aes(x = air_travel, y = deaths_cum_log, color = region)) +
geom_point()
gg15
# Let's try to scale by case count
gg16 <- ggplot(b, aes(x = air_travel, y = deaths_cum_log,
size = cases_cum,
color = region)) +
scale_size_continuous(range = c(1, 10), breaks = pretty_breaks()) +
geom_point()
gg16
# And now let's try to add an ols line and ci
gg17 <- ggplot(b, aes(x = air_travel, y = deaths_cum_log)) +
geom_point() +
# geom_smooth(method = loess) +
geom_smooth(method = lm)
gg17
# What happens if we add the color back in?
gg18 <- ggplot(b, aes(x = air_travel, y = deaths_cum_log,
color = region)) +
geom_point() +
geom_smooth(method = lm)
gg18
# It's calculating the interaction term!
# But that was hard af to read. Let's try:
gg19 <- ggplot(b, aes(x = air_travel, y = deaths_cum_log)) +
geom_point() +
geom_smooth(method = lm) +
facet_wrap(~ region)
gg19
# Let's go back to plot gg17... can you clean it up so it looks pretty?
# Or better yet, can you make it look like the plot we made with base graphics?
gg20 <- ggplot(b, aes(x = air_travel, y = deaths_cum_log)) +
geom_point() +
geom_smooth(method = lm) +
labs(title = "More Air Travel is Associated with Higher Covid Case Counts",
x = "Air Travel",
y = "Cummulative Covid Deaths (Log)") +
theme_minimal()
gg20
ggsave("plot11.pdf", gg20, width = 11, height = 8.5)
# Almost there, but I want ISO codes instead of points
gg20 <- ggplot(b, aes(x = air_travel, y = deaths_cum_log, label = geoid2)) +
geom_text(size = 2, check_overlap = TRUE) +
geom_smooth(method = lm,) +
labs(title = "More Air Travel is Associated with Higher Covid Case Counts",
x = "Air Travel",
y = "Cummulative Covid Deaths (Log)") +
theme_minimal()
gg20
### Coefficient Plots ##########################################################
# Let's run a regression:
m <- lm(deaths_cum_log ~ cases_cum_log + air_travel + gov_effect + share_older, data = b)
m
coefplot(m, intercept = FALSE)
# Wow, that was easy
# Can we make it harder? Let's try to replicate this manually in ggplot
z1 <- tidy(m, conf.int = TRUE, conf.level = .95)
z1 <- z1 %>% dplyr::rename(low95 = conf.low, high95 = conf.high)
z2 <- tidy(m, conf.int = TRUE, conf.level = .9)
z2 <- z2 %>% dplyr::rename(low90 = conf.low, high90 = conf.high)
z3 <- z1 %>% full_join(z2)
z3$term <- dplyr::recode(z3$term,
"(Intercept)" = "Intercept",
"cases_cum_log" = "Cumulative Cases (Log)",
"air_travel" = "Air Travel",
"gov_effect" = "Government Effectiveness",
"share_older" = "Share of Older Population")
z3$term <- factor(z3$term,
levels = c("Intercept",
"Cumulative Cases (Log)",
"Air Travel",
"Government Effectiveness",
"Share of Older Population"),
ordered = TRUE)
# Now z3 is all the data we need for a dot and whisker plot
gg21 <- ggplot(z3 %>% filter(term != "Intercept")) +
geom_hline(yintercept=0, lty="11", colour="grey30") +
geom_errorbar(aes(term, ymin = low95, ymax = high95), width=0.1) +
geom_errorbar(aes(term, ymin = low90, ymax = high90), lwd = 1.15, width=0) +
geom_point(aes(term, estimate)) +
labs(title = "Results of OLS Regression",
y = "Point Estimate",
x = "Variable") +
theme_light()
gg21
ggsave("plot12.pdf", gg21, width = 11, height = 8.5)
### Margins Plots ##############################################################
# Let's redefine the model and the sjPlot packace to estimate margins
m <- lm(deaths_cum_log ~ cases_cum_log + air_travel + gov_effect + share_older + as.factor(continent), data = b)
m
coefplot(m, intercept = FALSE)
gg22 <- plot_model(m, type = "pred",
terms = "continent")
gg22
gg23 <- plot_model(m, type = "pred",
terms = "air_travel")
gg23
gg24 <- plot_model(m, type = "pred",
terms = c("air_travel", "continent"))
gg24
rm(list=ls())
# And that's it!
| /DataVisualization.R | no_license | mfseese/STARS_Summer2021 | R | false | false | 19,864 | r | # STARS - Summer 2021 : Data Visualization With R #############################
rm(list=ls())
# Be sure to adjust the file path to your working directory:
setwd("/Users/mikeseese/Desktop/STARS - Summer 2021/Stars_R_WorkingDirectory")
## Loading Necessary Packages ##################################################
library("tidyverse")
library("RCurl")
library("ggridges")
library("scales")
library("RColorBrewer")
library("COVID19")
library("reshape")
library("coefplot")
library("sjPlot")
library("broom")
## Why do we need to visualize data? ###########################################
### Example 1: We want to tell stories and share results #######################
# Let's get some DW-Nominate Data from Hall, et al. (https://voteview.com/)
a <- getURL('https://voteview.com/static/data/out/members/HSall_members.csv')
b <- read.csv(textConnection(a), header= TRUE)
# Clean up the data (focus on House + 110th Congress onward)
c <- b %>% filter(chamber == "House" & congress > 89)
# Plot the D1 Nominate scores
x <- ggplot(c, aes(x = nominate_dim1, y = as.factor(congress))) +
geom_density_ridges(scale = 2, aes(fill = as.factor(party_code))) +
labs(title = "The Growing Ideological Divide in the U.S. House",
x = "DW Nominate - Dimension 1",
y = "Congress",
caption = "Data: Lewis, Poole, Rosenthal, Boche, Rudkin, and Sonnet (2021)") +
scale_fill_manual(name = "Party",
values = c("#0000FFA0", "#FF0000A0"),
labels = c("Democract", "Republican")) +
theme_minimal()
x
ggsave("plot1.pdf", x, width = 11, height = 8.5)
### Example 2: We want to make sense of a lot of data ##########################
# Clean data to get party frequencies
d <- c %>% group_by(congress, party_code) %>% summarize(pfreq=n())
table(d$party_code)
# Deal with the independents
d$party_code[d$party_code == 328] <- 300
d$party_code[d$party_code == 329] <- 300
# Plot frequencies as stacked bars
x <- ggplot(d, aes(x = as.factor(congress), y = pfreq, fill = as.factor(party_code))) +
geom_bar(position = "fill", stat = "identity") +
labs(title = "Party Composition of the U.S. House, 1967 - Present",
x = "Congress",
y = "Percentage of Members by Party",
caption = "Data: Lewis, Poole, Rosenthal, Boche, Rudkin, and Sonnet (2021)") +
scale_fill_manual(name = "Party",
values = c("#0000FFA0", "#FF0000A0", "#999999"),
labels = c("Democract", "Republican", "Independent")) +
# geom_hline(yintercept = 0.5) +
theme_minimal()
x
ggsave("plot2.pdf", x, width = 11, height = 8.5)
rm(list=ls())
## Bad and Better Charts #######################################################
### Bad Plot ###################################################################
# Let's get some data from from the NCHS:
# https://catalog.data.gov/dataset/nchs-death-rates-and-life-expectancy-at-birth
# Get the data and clean it up a bit
a <- getURL('https://data.cdc.gov/api/views/w9j2-ggv5/rows.csv?accessType=DOWNLOAD')
b <- read.csv(textConnection(a), header= TRUE)
c <- b %>% filter(Race == "All Races" & Sex == "Both Sexes")
d <- b %>% filter(Race == "White" & Sex == "Both Sexes")
e <- b %>% filter(Race == "Black" & Sex == "Both Sexes")
# Here's a terrible plot in base graphics
pdf("plot3.pdf", width = 11, height = 8.5)
plot(c$Year, c$Average.Life.Expectancy..Years., type = "l", cex.axis = 2)
lines(d$Year, d$Average.Life.Expectancy..Years.)
lines(e$Year, e$Average.Life.Expectancy..Years., col="green", lwd=3)
grid(nx = 60, col = "darkgray", lty = 1)
dev.off()
### Better Plot ################################################################
# Clean up the previous plot some
pdf("plot4.pdf", width = 11, height = 8.5)
par(oma=c(1.5,0,0,0))
plot(c$Year, c$Average.Life.Expectancy..Years., type = "l",
xlab = "Year",
ylab = "Average Life Expectancy at Birth",
ylim = c(20, 80),
xaxp = c(1900, 2020, 12))
lines(d$Year, d$Average.Life.Expectancy..Years., col = "darkgoldenrod")
lines(e$Year, e$Average.Life.Expectancy..Years., col = "darkcyan")
abline(v = 1918, col = "gray50", lty = 2)
text(1915.5, 65, "1918 Flu Pandemic",
pos = 4, srt = 90, col = "gray50", cex = 0.75)
title(main = "U.S. Life Expectancy by Race and Year, 1900 - 2018")
legend("bottomright", inset = 0.025,
c("Total Population", "White", "Black"),
lwd=c(2, 2, 2),
col = c("black", "darkgoldenrod", "darkcyan"),
xjust = 0,
cex = 0.75)
mtext("Data: U.S. Centers for Disease Control and Prevention, National Center for Health Statistics",
side = 1, line=0, adj=0.05, outer = TRUE, cex = 0.75)
dev.off()
rm(list=ls())
### Deceptive Charts ###########################################################
# Make up some data
x <- LETTERS[1:5]
y <- c(55, 56, 64, 57, 59)
a <- data.frame(x, y)
# Underscore an extreme value
p1 <- ggplot(a, aes(x = x, y = y)) +
geom_bar(stat = "identity") +
geom_text(aes(label = y), vjust = -0.25, size = 4) +
scale_y_continuous(limits = c(50,65), oob = rescale_none) +
theme_classic() +
theme(axis.title = element_blank()) +
ggtitle("This chart emphasizes the extreme value of C")
p1
ggsave("plot5.pdf", p1, width = 11, height = 8.5)
# Underscore low variation
p2 <- ggplot(a, aes(x = x, y = y)) +
geom_bar(stat = "identity") +
geom_text(aes(label = y), vjust = -0.25, size = 4) +
scale_y_continuous(limits = c(0,100)) +
theme_classic() +
theme(axis.title = element_blank()) +
ggtitle("This chart implies more uniformity")
p2
ggsave("plot6.pdf", p2, width = 11, height = 8.5)
# Equivalence
p3 <- ggplot(a, aes(x = x, y = y)) +
geom_bar(stat = "identity") +
geom_text(aes(label = y), vjust = -0.25, size = 4) +
scale_y_continuous(limits = c(0,2500)) +
theme_classic() +
theme(axis.title = element_blank()) +
ggtitle("This chart gives the perception of equivalence")
p3
ggsave("plot7.pdf", p3, width = 11, height = 8.5)
# Why we don't use pie charts
p <- LETTERS[1:10]
q <- c(2, 3, 2, 0, 4, 18, 31, 20, 8, 12)
b <- data.frame(p, q)
clr <- brewer.pal(10, "Set3")
pdf("plot8.pdf", width = 11, height = 8.5)
par(mar=c(0,0,0,0))
pie(x = b$q[b$q != 0], labels = b$p[b$q != 0], col = clr)
dev.off()
pdf("plot13.pdf", width = 11, height = 8.5)
barplot(q, names.arg = p)
dev.off()
rm(list=ls())
## Base Graphics ###############################################################
# Bring in some data from WZB-IPI covid Project
# Find the website here: https://wzb-ipi.github.io/corona/
# And the paper here: https://osf.io/preprints/socarxiv/ub3zd/
a <- read.csv("wzb_covid_june2021.csv", header = TRUE)
### Histograms, Densities, and Rug Plots #######################################
# What happens if we just use the command "plot"?
plot(x = a$cases_cum)
# There are some wild outliers. What countries are these?
plot(x = a$cases_cum)
text(a$cases_cum[a$cases_cum > 15000000],
labels = a$X[a$cases_cum > 15000000],
cex = 0.75, pos = 1)
# Interesting, but not really what we want let's try:
hist(a$cases_cum)
hist(a$cases_cum, breaks = 50)
# I can't read the exponential notation, so lets try log:
a$cases_cum_log <- log(a$cases_cum)
hist(a$cases_cum_log)
# Let's add the density now
d_ccl <- density(a$cases_cum_log)
hist(a$cases_cum_log, freq = FALSE)
lines(d_ccl, col = "darkcyan")
plot(d_ccl, col = "darkcyan")
rug(a$cases_cum_log)
# Plot matrix using mfrow / mfcol (rows, columns)
par(mfrow = c(1, 2))
## Plot 1
hist(a$cases_cum_log, freq = FALSE)
lines(d_ccl, col = "darkcyan")
## Plot 2
plot(d_ccl, col = "darkcyan")
rug(a$cases_cum_log)
## Clear
dev.off()
### Box Plots ##################################################################
boxplot(a$cases_cum_log)
boxplot(a$cases_cum_log ~ a$continent)
boxplot(a$cases_cum_log ~ a$continent, horizontal = TRUE)
### Bar Plots ##################################################################
# How many countries are there in each region?
b <- table(a$region)
barplot(b[c(-1)])
# We want to exclude the un-categorized countries
# Wtf... R isn't plotting all the names? Because they don't fit...
barplot(b[c(-1)], cex.names = 0.5)
# Try something else: Filter down to just MENA countries
c <- a %>% filter(region == "Middle East & North Africa")
# New bar plot
barplot(c$pop_density, names.arg = c$geoid2, las = 2, cex.names = 0.75)
# Can we sort these bars?
d <- c[order(c$pop_density, decreasing = TRUE),]
barplot(d$pop_density, names.arg = d$geoid2, las = 2, cex.names = 0.75)
### Scatter Plots ##############################################################
plot(a$air_travel, a$cases_cum_log)
# Wow, that was easy. But let's make it nicer:
plot(a$air_travel, a$cases_cum_log,
xlab = "Air Travel",
ylab = "Cummulative Covid Cases (Log)")
abline(lm(a$cases_cum_log ~ a$air_travel), col = "darkcyan")
mtext("Data: https://wzb-ipi.github.io/corona/", side = 3)
title(main = "More Air Travel is Associated with Higher Covid Case Counts")
# What if we want to know the countries?
plot(a$air_travel, a$cases_cum_log,
col = "white",
xlab = "Air Travel",
ylab = "Cummulative Covid Cases (Log)")
text(a$air_travel, a$cases_cum_log, labels = a$geoid2, cex = 0.5)
abline(lm(a$cases_cum_log ~ a$air_travel), col = "darkcyan")
mtext("Data: https://wzb-ipi.github.io/corona/", side = 3)
title(main = "More Air Travel is Associated with Higher Covid Case Counts")
# But What about a confidence interval? We need to run the model and predict:
model <- lm(cases_cum_log ~ air_travel, data = a)
xvalues <- data.frame(air_travel = seq(5 , 21, length.out = 100))
predictions <- predict(model, newdata = xvalues, interval = "confidence")
plot(a$air_travel, a$cases_cum_log,
col = "white",
xlab = "Air Travel",
ylab = "Cumulative Covid Cases (Log)")
text(a$air_travel, a$cases_cum_log, labels = a$geoid2, cex = 0.5)
abline(lm(a$cases_cum_log ~ a$air_travel), col = "darkcyan")
lines(xvalues[,1], predictions[,2], col = "gray", lty = 2)
lines(xvalues[,1], predictions[,3], col = "gray", lty = 2)
mtext("Data: https://wzb-ipi.github.io/corona/", side = 3)
title(main = "More Air Travel is Associated with Higher Covid Case Counts")
dev.off()
rm(list=setdiff(ls(), "a"))
## ggplot: The Grammar of Graphics #############################################
### Distributions ##############################################################
# Violin plots
table(a$continent, exclude = NULL)
gg1 <- ggplot(a, aes(x = continent, y = gov_effect, fill = continent)) +
geom_violin() +
geom_jitter(position=position_jitter(0.1))
gg1
gg2 <- ggplot(a, aes(x = continent, y = deaths_cum_log, fill = continent)) +
geom_violin() +
geom_boxplot(width = 0.25)
gg2
# Density plots
gg3 <- ggplot(a, aes(x = gdp_pc)) +
geom_density()
gg3
gg4 <- ggplot(a, aes(x = gdp_pc)) +
geom_density(aes(color = continent))
gg4
gg5 <- ggplot(a, aes(x = gdp_pc)) +
geom_density() +
facet_wrap(~ continent)
gg5
### Line / Time Series #########################################################
# Some other Covid data
# Documentation at: https://cran.r-project.org/web/packages/COVID19/readme/README.html
c <- covid19(c("US"), level = 3)
d <- c %>% filter(administrative_area_level_3 == "San Diego")
gg6 <- ggplot(d, aes(x = date, y = confirmed)) +
geom_line()
gg6
gg7 <- ggplot(d) +
geom_line(aes(x = date, y = deaths))
gg7
# What if we wanted daily deaths?
d <- d %>% mutate(d_deaths = deaths - lag(deaths))
gg8 <- ggplot(d) +
geom_line(aes(x = date, y = d_deaths)) +
scale_x_date(date_labels = "%b-%Y", breaks = breaks_pretty(20)) +
scale_y_continuous(breaks = breaks_pretty(10)) +
theme(axis.text.x = element_text(angle = 45))
gg8
### Bar Plots ##################################################################
# Let's look at 1 July in the 4 states that I've lived in for more than a year:
e <- c %>% filter(administrative_area_level_2 == "Hawaii" |
administrative_area_level_2 == "California" |
administrative_area_level_2 == "Massachusetts" |
administrative_area_level_2 == "Arizona")
e <- e %>% filter(date == "2021-07-01")
# And get mean cumulative case counts by state...
# Recall that the standard error is the SD / sqrt(n)
f <- e %>% data.frame() %>%
select(state = administrative_area_level_2, confirmed)
g <- f %>% group_by(state) %>%
summarize(n = n(),
mean = mean(confirmed),
sd = sd(confirmed)) %>%
mutate(se = sd/sqrt(n))
# Now we can plot!
gg9 <- ggplot(g) +
geom_bar(aes(x = state, y = mean), stat = "identity") +
geom_errorbar(aes(x = state, ymin = mean-se, ymax = mean+se),
width=0.1, colour="darkcyan", size=0.75)
gg9
# Wow, those error bars are HUGE! We should double check the distributions:
gg10 <- ggplot(f, aes(x = state, y = confirmed, fill = state)) +
geom_violin() +
geom_point()
gg10
# Looks great, but isn't this deceptive? Comparing HI and CA is prob not ok...
# Let's normalize the case counts by the population:
h <- e %>% mutate(mconfirmed = confirmed / population) %>%
data.frame() %>%
select(state = administrative_area_level_2, mconfirmed) %>%
group_by(state) %>%
summarize(n = n(),
mean = mean(mconfirmed),
sd = sd(mconfirmed)) %>%
mutate(se = sd/sqrt(n))
# Now we can re-use the code from gg9, and just sub in the new dataset:
gg11 <- ggplot(h) +
geom_bar(aes(x = state, y = mean), stat = "identity") +
geom_errorbar(aes(x = state, ymin = mean-se, ymax = mean+se),
width=0.1, colour="darkcyan", size=0.75)
gg11
ggsave("plot9.pdf", gg11, width = 11, height = 8.5)
rm(list=setdiff(ls(), "a"))
### Dot Plots ##################################################################
# Let's check out some dotplots
# Using some Pew data: What % of surveyed respondents in the lead up to the 2020
# election reported [ISSUE] as very important to their vote?
b <- read.csv("pew_data.csv")
# We have a problem though! Our data is wide, and we need it to be long...
c <- melt(b, id = "issues")
gg12 <- ggplot(c, aes(x = value, y = issues)) +
geom_point(aes(color = variable))
gg12
# But that's super hard to read, let's try to sort these
gg13 <- ggplot(c, aes(x = value, y = reorder(issues, value))) +
geom_point(aes(color = variable))
gg13
# Great, but this is still sort of hard to read, and the colors don't make sense:
gg14 <- ggplot(c, aes(x = value, y = reorder(issues, value))) +
geom_line(aes(group = issues), color = "#999999") +
geom_point(aes(color = variable), size = 5) +
scale_color_manual(values = c("#0000FF", "#FF0000", "#999999"),
labels = c("Biden Voters", "Trump Voters", "All Voters")) +
geom_text(aes(label = sprintf("%1.0f%%", value), color = variable),
size = 2, nudge_y = 0.25) +
scale_x_continuous(breaks = NULL) +
labs(title = "Biden and Trump Voters Diverge on Which Issues Matter Most in 2020 Election",
subtitle = "Results of July/August 2020 Pew Research Poll of Registered Voters",
x = "Percent of Registered Voters Saying Issue is Very Important to Their Vote in the 2020 Election.",
y = "Issue Area") +
theme_minimal() +
theme(legend.title = element_blank())
gg14
ggsave("plot10.pdf", gg14, width = 11, height = 8.5)
# Keep in mind that red tends to bleed in video...
# Try: https://htmlcolorcodes.com and https://color.adobe.com/create/color-wheel
rm(list=setdiff(ls(), "a"))
### Scatter Plots ##############################################################
plot(a$air_travel, a$deaths_cum_log)
abline(lm(a$deaths_cum_log ~ a$air_travel), col = "blue")
b <- a %>% filter(region != "")
gg15 <- ggplot(b, aes(x = air_travel, y = deaths_cum_log, color = region)) +
geom_point()
gg15
# Let's try to scale by case count
gg16 <- ggplot(b, aes(x = air_travel, y = deaths_cum_log,
size = cases_cum,
color = region)) +
scale_size_continuous(range = c(1, 10), breaks = pretty_breaks()) +
geom_point()
gg16
# And now let's try to add an ols line and ci
gg17 <- ggplot(b, aes(x = air_travel, y = deaths_cum_log)) +
geom_point() +
# geom_smooth(method = loess) +
geom_smooth(method = lm)
gg17
# What happens if we add the color back in?
gg18 <- ggplot(b, aes(x = air_travel, y = deaths_cum_log,
color = region)) +
geom_point() +
geom_smooth(method = lm)
gg18
# It's calculating the interaction term!
# But that was hard af to read. Let's try:
gg19 <- ggplot(b, aes(x = air_travel, y = deaths_cum_log)) +
geom_point() +
geom_smooth(method = lm) +
facet_wrap(~ region)
gg19
# Let's go back to plot gg17... can you clean it up so it looks pretty?
# Or better yet, can you make it look like the plot we made with base graphics?
gg20 <- ggplot(b, aes(x = air_travel, y = deaths_cum_log)) +
geom_point() +
geom_smooth(method = lm) +
labs(title = "More Air Travel is Associated with Higher Covid Case Counts",
x = "Air Travel",
y = "Cummulative Covid Deaths (Log)") +
theme_minimal()
gg20
ggsave("plot11.pdf", gg20, width = 11, height = 8.5)
# Almost there, but I want ISO codes instead of points
gg20 <- ggplot(b, aes(x = air_travel, y = deaths_cum_log, label = geoid2)) +
geom_text(size = 2, check_overlap = TRUE) +
geom_smooth(method = lm,) +
labs(title = "More Air Travel is Associated with Higher Covid Case Counts",
x = "Air Travel",
y = "Cummulative Covid Deaths (Log)") +
theme_minimal()
gg20
### Coefficient Plots ##########################################################
# Let's run a regression:
m <- lm(deaths_cum_log ~ cases_cum_log + air_travel + gov_effect + share_older, data = b)
m
coefplot(m, intercept = FALSE)
# Wow, that was easy
# Can we make it harder? Let's try to replicate this manually in ggplot
z1 <- tidy(m, conf.int = TRUE, conf.level = .95)
z1 <- z1 %>% dplyr::rename(low95 = conf.low, high95 = conf.high)
z2 <- tidy(m, conf.int = TRUE, conf.level = .9)
z2 <- z2 %>% dplyr::rename(low90 = conf.low, high90 = conf.high)
z3 <- z1 %>% full_join(z2)
z3$term <- dplyr::recode(z3$term,
"(Intercept)" = "Intercept",
"cases_cum_log" = "Cumulative Cases (Log)",
"air_travel" = "Air Travel",
"gov_effect" = "Government Effectiveness",
"share_older" = "Share of Older Population")
z3$term <- factor(z3$term,
levels = c("Intercept",
"Cumulative Cases (Log)",
"Air Travel",
"Government Effectiveness",
"Share of Older Population"),
ordered = TRUE)
# Now z3 is all the data we need for a dot and whisker plot
gg21 <- ggplot(z3 %>% filter(term != "Intercept")) +
geom_hline(yintercept=0, lty="11", colour="grey30") +
geom_errorbar(aes(term, ymin = low95, ymax = high95), width=0.1) +
geom_errorbar(aes(term, ymin = low90, ymax = high90), lwd = 1.15, width=0) +
geom_point(aes(term, estimate)) +
labs(title = "Results of OLS Regression",
y = "Point Estimate",
x = "Variable") +
theme_light()
gg21
ggsave("plot12.pdf", gg21, width = 11, height = 8.5)
### Margins Plots ##############################################################
# Let's redefine the model and the sjPlot packace to estimate margins
m <- lm(deaths_cum_log ~ cases_cum_log + air_travel + gov_effect + share_older + as.factor(continent), data = b)
m
coefplot(m, intercept = FALSE)
gg22 <- plot_model(m, type = "pred",
terms = "continent")
gg22
gg23 <- plot_model(m, type = "pred",
terms = "air_travel")
gg23
gg24 <- plot_model(m, type = "pred",
terms = c("air_travel", "continent"))
gg24
rm(list=ls())
# And that's it!
|
#############################################################################
######### MAKE CHECKERBOARD 2 EVALUATION GROUPS #############
#############################################################################
get.checkerboard2 <- function(occ, env, bg.coords, aggregation.factor){
if (length(aggregation.factor) == 1) aggregation.factor <- rep(aggregation.factor, 2)
grid <- aggregate(env[[1]], fact=aggregation.factor[1])
grid2 <- aggregate(grid, aggregation.factor[2])
w <- gridSample(occ, grid, n=1e4, chess='white')
b <- gridSample(occ, grid, n=1e4, chess='black')
ww <- gridSample(w, grid2, n=1e4, chess='white')
wb <- gridSample(w, grid2, n=1e4, chess='black')
bw <- gridSample(b, grid2, n=1e4, chess='white')
bb <- gridSample(b, grid2, n=1e4, chess='black')
bgw <- gridSample(bg.coords, grid, n=1e4, chess='white')
bgb <- gridSample(bg.coords, grid, n=1e4, chess='black')
bgww <- gridSample(bgw, grid2, n=1e4, chess='white')
bgwb <- gridSample(bgw, grid2, n=1e4, chess='black')
bgbw <- gridSample(bgb, grid2, n=1e4, chess='white')
bgbb <- gridSample(bgb, grid2, n=1e4, chess='black')
r <- data.frame()
if (nrow(ww) > 0) ww$grp <- 1; r <- rbind(r, ww)
if (nrow(wb) > 0) wb$grp <- 2; r <- rbind(r, wb)
if (nrow(bw) > 0) bw$grp <- 3; r <- rbind(r, bw)
if (nrow(bb) > 0) bb$grp <- 4; r <- rbind(r, bb)
occ.grp <- r[order(as.numeric(rownames(r))),]$grp
bgr <- data.frame()
if (nrow(bgww) > 0) bgww$grp <- 1; bgr <- rbind(bgr, bgww)
if (nrow(bgwb) > 0) bgwb$grp <- 2; bgr <- rbind(bgr, bgwb)
if (nrow(bgbw) > 0) bgbw$grp <- 3; bgr <- rbind(bgr, bgbw)
if (nrow(bgbb) > 0) bgbb$grp <- 4; bgr <- rbind(bgr, bgbb)
bg.grp <- bgr[order(as.numeric(rownames(bgr))),]$grp
# PATCH IF OCC OR BG POINTS FALL INTO FEWER THAN FOUR BINS
noccgrp <- length(unique(occ.grp))
nbggrp <- length(unique(bg.grp))
if(noccgrp < 4 ){
message(paste("Warning: occurrence points fall in only", noccgrp, "bins"))
bg.grp[ ! bg.grp %in% occ.grp] <- NA
occ.grp <- as.numeric(as.factor(occ.grp))
bg.grp <- as.numeric(as.factor(bg.grp))
}
if(length(unique(bg.grp[!is.na(bg.grp)])) != noccgrp) {
message("Error: occurrence records but no background points fall in 1 or more evaluation bin(s)")
stop()
}
out <- list(occ.grp=occ.grp, bg.grp=bg.grp)
return(out)
}
| /R/get.checkerboard2.R | no_license | xavi-rp/ENMeval | R | false | false | 2,290 | r | #############################################################################
######### MAKE CHECKERBOARD 2 EVALUATION GROUPS #############
#############################################################################
get.checkerboard2 <- function(occ, env, bg.coords, aggregation.factor){
if (length(aggregation.factor) == 1) aggregation.factor <- rep(aggregation.factor, 2)
grid <- aggregate(env[[1]], fact=aggregation.factor[1])
grid2 <- aggregate(grid, aggregation.factor[2])
w <- gridSample(occ, grid, n=1e4, chess='white')
b <- gridSample(occ, grid, n=1e4, chess='black')
ww <- gridSample(w, grid2, n=1e4, chess='white')
wb <- gridSample(w, grid2, n=1e4, chess='black')
bw <- gridSample(b, grid2, n=1e4, chess='white')
bb <- gridSample(b, grid2, n=1e4, chess='black')
bgw <- gridSample(bg.coords, grid, n=1e4, chess='white')
bgb <- gridSample(bg.coords, grid, n=1e4, chess='black')
bgww <- gridSample(bgw, grid2, n=1e4, chess='white')
bgwb <- gridSample(bgw, grid2, n=1e4, chess='black')
bgbw <- gridSample(bgb, grid2, n=1e4, chess='white')
bgbb <- gridSample(bgb, grid2, n=1e4, chess='black')
r <- data.frame()
if (nrow(ww) > 0) ww$grp <- 1; r <- rbind(r, ww)
if (nrow(wb) > 0) wb$grp <- 2; r <- rbind(r, wb)
if (nrow(bw) > 0) bw$grp <- 3; r <- rbind(r, bw)
if (nrow(bb) > 0) bb$grp <- 4; r <- rbind(r, bb)
occ.grp <- r[order(as.numeric(rownames(r))),]$grp
bgr <- data.frame()
if (nrow(bgww) > 0) bgww$grp <- 1; bgr <- rbind(bgr, bgww)
if (nrow(bgwb) > 0) bgwb$grp <- 2; bgr <- rbind(bgr, bgwb)
if (nrow(bgbw) > 0) bgbw$grp <- 3; bgr <- rbind(bgr, bgbw)
if (nrow(bgbb) > 0) bgbb$grp <- 4; bgr <- rbind(bgr, bgbb)
bg.grp <- bgr[order(as.numeric(rownames(bgr))),]$grp
# PATCH IF OCC OR BG POINTS FALL INTO FEWER THAN FOUR BINS
noccgrp <- length(unique(occ.grp))
nbggrp <- length(unique(bg.grp))
if(noccgrp < 4 ){
message(paste("Warning: occurrence points fall in only", noccgrp, "bins"))
bg.grp[ ! bg.grp %in% occ.grp] <- NA
occ.grp <- as.numeric(as.factor(occ.grp))
bg.grp <- as.numeric(as.factor(bg.grp))
}
if(length(unique(bg.grp[!is.na(bg.grp)])) != noccgrp) {
message("Error: occurrence records but no background points fall in 1 or more evaluation bin(s)")
stop()
}
out <- list(occ.grp=occ.grp, bg.grp=bg.grp)
return(out)
}
|
\name{bayesPop-package}
\alias{bayesPop-package}
\alias{bayesPop}
\docType{package}
\title{
Probabilistic Population Projection
}
\description{
The package allows to generate population projections for all countries of the world using several probabilistic components, such as total fertility rate (TFR) and life expectancy.
}
\details{
\tabular{ll}{
Package: \tab bayesPop\cr
Type: \tab Package\cr
Version: \tab 6.2-4\cr
Date: \tab 2018-03-15\cr
License: \tab GPL (>=2)\cr
URL: \tab \url{https://bayespop.csss.washington.edu}\cr
}
The main function is called \code{\link{pop.predict}}. It uses trajectories of TFR from the \pkg{bayesTFR} package and life expectancy from the \pkg{bayesLife} package and for each trajectory it computes a population projection using the Cohort component method. It results in probabilistic age and sex specific projections. Various plotting functions are available for results visualization (\code{\link{pop.trajectories.plot}}, \code{\link{pop.pyramid}}, \code{\link{pop.trajectories.pyramid}}), as well as a summary function (\code{\link{summary.bayesPop.prediction}}).
}
\author{
Hana Sevcikova, Adrian Raftery, Thomas Buettner
Maintainer: Hana Sevcikova <hanas@uw.edu>
}
\references{
H. Sevcikova, A. E. Raftery (2016). bayesPop: Probabilistic
Population Projections. Journal of Statistical Software, 75(5), 1-29.
doi:10.18637/jss.v075.i05
A. E. Raftery, N. Li, H. Sevcikova, P. Gerland, G. K. Heilig (2012). Bayesian probabilistic population projections for all countries. Proceedings of the National Academy of Sciences 109:13915-13921.
P. Gerland, A. E. Raftery, H. Sevcikova, N. Li, D. Gu, T. Spoorenberg, L. Alkema, B. K. Fosdick, J. L. Chunn, N. Lalic, G. Bay, T. Buettner, G. K. Heilig, J. Wilmoth (2014). World Population Stabilization Unlikely This Century. Science 346:234-237.
H. Sevcikova, N. Li, V. Kantorova, P. Gerland and A. E. Raftery (2015). Age-Specific Mortality and Fertility Rates for Probabilistic Population Projections. arXiv:1503.05215. \url{http://arxiv.org/abs/1503.05215}
%Working Paper 150. Center for Statistics and the Social Sciences, University of Washington. \url{http://www.csss.washington.edu/Papers/2015/wp150.pdf}
}
\keyword{ package }
\seealso{
\code{\link[bayesTFR:bayesTFR-package]{bayesTFR}}, \code{\link[bayesLife:bayesLife-package]{bayesLife}}
}
\examples{
\dontrun{
sim.dir <- tempfile()
# Generates population projection for one country
country <- "Netherlands"
pred <- pop.predict(countries=country, output.dir=sim.dir)
summary(pred, country)
pop.trajectories.plot(pred, country)
dev.off()
pop.trajectories.plot(pred, country, sum.over.ages=TRUE)
pop.pyramid(pred, country)
pop.pyramid(pred, country, year=2100, age=1:26)
unlink(sim.dir, recursive=TRUE)
}
# Here are commands needed to run probabilistic projections
# from scratch, i.e. including TFR and life expectancy.
# Note that running the first four commands
# (i.e. predicting TFR and life expectancy) can take
# LONG time (up to several days; see below for possible speed-up).
# For a toy simulation, set the number of iterations (iter)
# to a small number.
\dontrun{
sim.dir.tfr <- "directory/for/TFR"
sim.dir.e0 <- "directory/for/e0"
sim.dir.pop <- "directory/for/pop"
# Estimate TFR parameters (speed-up by including parallel=TRUE)
run.tfr.mcmc(iter="auto", output.dir=sim.dir.tfr, seed=1)
# Predict TFR (if iter above < 4000, reduce burnin and nr.traj accordingly)
tfr.predict(sim.dir=sim.dir.tfr, nr.traj=2000, burnin=2000)
# Estimate e0 parameters (females) (speed-up by including parallel=TRUE)
# Can be run independently of the two commands above
run.e0.mcmc(sex="F", iter="auto", output.dir=sim.dir.e0, seed=1)
# Predict female and male e0
# (if iter above < 22000, reduce burnin and nr.traj accordingly)
e0.predict(sim.dir=sim.dir.e0, nr.traj=2000, burnin=20000)
# Population prediction
pred <- pop.predict(output.dir=sim.dir.pop, verbose=TRUE,
inputs = list(tfr.sim.dir=sim.dir.tfr,
e0F.sim.dir=sim.dir.e0, e0M.sim.dir="joint_"))
pop.trajectories.plot(pred, "Madagascar", nr.traj=50, sum.over.ages=TRUE)
pop.trajectories.table(pred, "Madagascar")
}
}
% reproducing example data in the package:
% pred <- pop.predict(countries=c(528,218), nr.traj=3, output.dir=sim.dir, replace.output=TRUE)
| /man/bayesPop-package.Rd | no_license | raquelrguima/bayesPop | R | false | false | 4,297 | rd | \name{bayesPop-package}
\alias{bayesPop-package}
\alias{bayesPop}
\docType{package}
\title{
Probabilistic Population Projection
}
\description{
The package allows to generate population projections for all countries of the world using several probabilistic components, such as total fertility rate (TFR) and life expectancy.
}
\details{
\tabular{ll}{
Package: \tab bayesPop\cr
Type: \tab Package\cr
Version: \tab 6.2-4\cr
Date: \tab 2018-03-15\cr
License: \tab GPL (>=2)\cr
URL: \tab \url{https://bayespop.csss.washington.edu}\cr
}
The main function is called \code{\link{pop.predict}}. It uses trajectories of TFR from the \pkg{bayesTFR} package and life expectancy from the \pkg{bayesLife} package and for each trajectory it computes a population projection using the Cohort component method. It results in probabilistic age and sex specific projections. Various plotting functions are available for results visualization (\code{\link{pop.trajectories.plot}}, \code{\link{pop.pyramid}}, \code{\link{pop.trajectories.pyramid}}), as well as a summary function (\code{\link{summary.bayesPop.prediction}}).
}
\author{
Hana Sevcikova, Adrian Raftery, Thomas Buettner
Maintainer: Hana Sevcikova <hanas@uw.edu>
}
\references{
H. Sevcikova, A. E. Raftery (2016). bayesPop: Probabilistic
Population Projections. Journal of Statistical Software, 75(5), 1-29.
doi:10.18637/jss.v075.i05
A. E. Raftery, N. Li, H. Sevcikova, P. Gerland, G. K. Heilig (2012). Bayesian probabilistic population projections for all countries. Proceedings of the National Academy of Sciences 109:13915-13921.
P. Gerland, A. E. Raftery, H. Sevcikova, N. Li, D. Gu, T. Spoorenberg, L. Alkema, B. K. Fosdick, J. L. Chunn, N. Lalic, G. Bay, T. Buettner, G. K. Heilig, J. Wilmoth (2014). World Population Stabilization Unlikely This Century. Science 346:234-237.
H. Sevcikova, N. Li, V. Kantorova, P. Gerland and A. E. Raftery (2015). Age-Specific Mortality and Fertility Rates for Probabilistic Population Projections. arXiv:1503.05215. \url{http://arxiv.org/abs/1503.05215}
%Working Paper 150. Center for Statistics and the Social Sciences, University of Washington. \url{http://www.csss.washington.edu/Papers/2015/wp150.pdf}
}
\keyword{ package }
\seealso{
\code{\link[bayesTFR:bayesTFR-package]{bayesTFR}}, \code{\link[bayesLife:bayesLife-package]{bayesLife}}
}
\examples{
\dontrun{
sim.dir <- tempfile()
# Generates population projection for one country
country <- "Netherlands"
pred <- pop.predict(countries=country, output.dir=sim.dir)
summary(pred, country)
pop.trajectories.plot(pred, country)
dev.off()
pop.trajectories.plot(pred, country, sum.over.ages=TRUE)
pop.pyramid(pred, country)
pop.pyramid(pred, country, year=2100, age=1:26)
unlink(sim.dir, recursive=TRUE)
}
# Here are commands needed to run probabilistic projections
# from scratch, i.e. including TFR and life expectancy.
# Note that running the first four commands
# (i.e. predicting TFR and life expectancy) can take
# LONG time (up to several days; see below for possible speed-up).
# For a toy simulation, set the number of iterations (iter)
# to a small number.
\dontrun{
sim.dir.tfr <- "directory/for/TFR"
sim.dir.e0 <- "directory/for/e0"
sim.dir.pop <- "directory/for/pop"
# Estimate TFR parameters (speed-up by including parallel=TRUE)
run.tfr.mcmc(iter="auto", output.dir=sim.dir.tfr, seed=1)
# Predict TFR (if iter above < 4000, reduce burnin and nr.traj accordingly)
tfr.predict(sim.dir=sim.dir.tfr, nr.traj=2000, burnin=2000)
# Estimate e0 parameters (females) (speed-up by including parallel=TRUE)
# Can be run independently of the two commands above
run.e0.mcmc(sex="F", iter="auto", output.dir=sim.dir.e0, seed=1)
# Predict female and male e0
# (if iter above < 22000, reduce burnin and nr.traj accordingly)
e0.predict(sim.dir=sim.dir.e0, nr.traj=2000, burnin=20000)
# Population prediction
pred <- pop.predict(output.dir=sim.dir.pop, verbose=TRUE,
inputs = list(tfr.sim.dir=sim.dir.tfr,
e0F.sim.dir=sim.dir.e0, e0M.sim.dir="joint_"))
pop.trajectories.plot(pred, "Madagascar", nr.traj=50, sum.over.ages=TRUE)
pop.trajectories.table(pred, "Madagascar")
}
}
% reproducing example data in the package:
% pred <- pop.predict(countries=c(528,218), nr.traj=3, output.dir=sim.dir, replace.output=TRUE)
|
#-------------------------------------------------------------------------
#------ Advanced plotting in R using ggplot
#------ http://r-statistics.co/Complete-Ggplot2-Tutorial-Part1-With-R-Code.html
#------ http://tutorials.iq.harvard.edu/R/Rgraphics/Rgraphics.html
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#---- Step 1: prepare the environment, load libraries and import data
#-------------------------------------------------------------------------
# Wipe the environment clean
rm(list = ls())
# Prepare needed libraries
library.list <- c("readxl" # For data import
, "ggplot2" # Main package
, "scales" # For managing chart axis labels/values/scale
, "lemon" # Extra things for ggplot
, "gridExtra" # Arrange different plots in a grid
)
for (i in 1:length(library.list)) {
if (!library.list[i] %in% rownames(installed.packages())) {
install.packages(library.list[i])
}
library(library.list[i], character.only = TRUE)
}
rm(library.list)
# Set working directory
#setwd("")
# Load the sample orders data that we used for Excel and Tableau, but only keep sales < $2000
orders <- read_excel(file.choose())
# Our variables have spaces, dashes and upper case letters, which maybe confusing
# Let's fix that
colnames(orders) <- tolower(gsub(" |-", ".", colnames(orders)))
# Keep only observations with sales < 2000 (to ignore outliers)
orders <- orders[orders$sales < 2000, ]
# And this time randomly select and keep only 10% of the data (to speed up graphs)
orders <- orders[sample(1:nrow(orders), 0.1*nrow(orders)), ]
#-------------------------------------------------------------------------
#---- Step 2: basics of ggplot
#-------------------------------------------------------------------------
# Basic call to ggplot function produces empty plot
ggplot(orders, aes(x = sales, y = profit))
# Add a basic scatter plot
ggplot(orders, aes(x = sales, y = profit)) +
geom_point()
# You can save ggplot output as a valid graphics object
sales.scatter <- ggplot(orders, aes(x = sales, y = profit)) +
geom_point()
# And then simply call it to produce output
sales.scatter
# You can also add features/mappings/aesthetics to it
sales.scatter + geom_smooth()
# Delete some of the points outside limits
sales.scatter +
geom_smooth() +
xlim(c(0, 1000)) +
ylim(c(-2000, 2000))
# Or simply zoom (this retains all points)
sales.scatter +
geom_smooth() +
coord_cartesian(xlim = c(0, 1000)
, ylim = c(-2000, 2000)
)
# For large datasets you should subset first, and then call ggplot
# Otherwise ggplot will take longer time to render
ggplot(orders[orders$sales < 1000, ]
, aes(x = sales, y = profit)
) +
geom_point() +
geom_smooth()
# Add title and axis lables
ggplot(orders, aes(x = sales, y = profit)) +
geom_point() +
geom_smooth() +
labs(title = "Sales vs. Profits"
, subtitle = "for orders less than $2000"
, x = "Total sale, $"
, y = "Total profit, $"
)
# Customize the looks of pglot elements
ggplot(orders, aes(x = sales, y = profit)) +
geom_point(color = "steelblue" # Color of points' outline
, fill = "yellow" # Color of points' fill
, shape = 22 # Shape of points, similar to R's default "pch" parameter
, size = 2 # Size of points
, alpha = 1 # Transparency of points
) +
geom_smooth(color = "firebrick"
, weight = 5 # line thickness
) +
labs(title = "Sales vs. Profits"
, subtitle = "for orders less than $2000"
, x = "Total sale, $"
, y = "Total profit, $"
)
# You could use some of the built-in themes for customization of looks
ggplot(orders, aes(x = sales, y = profit)) +
geom_point(color = "steelblue" # Color of points' outline
, fill = "yellow" # Color of points' fill
, shape = 22 # Shape of points, similar to R's default "pch" parameter
, size = 2 # Size of points
, alpha = 1 # Transparency of points
) +
geom_smooth(color = "firebrick"
, weight = 5
, linetype = "solid" # Dashed, dotted, etc.
) +
labs(title = "Sales vs. Profits"
, subtitle = "for orders less than $2000"
, x = "Total sale, $"
, y = "Total profit, $"
) +
theme_bw()
# Or you could change theme elements yourself
ggplot(orders, aes(x = sales, y = profit)) +
geom_point(color = "steelblue" # Color of points' outline
, fill = "yellow" # Color of points' fill
, shape = 22 # Shape of points, similar to R's default "pch" parameter
, size = 2 # Size of points
, alpha = 1 # Transparency of points
) +
labs(title = "Sales vs. Profits"
, subtitle = "for orders less than $2000"
, x = "Total sale, $"
, y = "Total profit, $ \n"
) +
theme_bw() + # Lines below inside theme() will overwrite Steps of theme_bw()
theme(plot.title = element_text(hjust = 0.5 # Title alignment, 0 - left, 1 - right
, face = "bold" # font face - bold, italic, etc.
, size = 20 # font size
, color = "#912600") # font color
, plot.subtitle = element_text(hjust = 0.5 # Horizontal alignment, 0.5 = center
, face = "bold" # Bold font
, size = 14 # Font size
, color = "#912600" # Font color
)
, axis.title.x = element_text(face = "bold"
, color = "#912600"
, size = 14
)
, axis.text.x = element_text(face = "bold"
, vjust = 0.5 # Vertical alignment, 0.5 = middle
, size = 12
)
, axis.title.y = element_text(face = "bold"
, color = "#912600"
, size = 14
)
, axis.text.y = element_text(face = "bold"
, vjust = 0.5
, size = 12
)
)
# You could also use variable color/shape/fill mapping inside aes()
ggplot(orders
, aes(x = sales
, y = profit
, color = category
, shape = market
)
) +
geom_point(size = 2 # Size of points
, alpha = 1 # Transparency of points
) +
labs(title = "Sales vs. Profits"
, subtitle = "for orders less than $2000"
, x = "Total sale, $"
, y = "Total profit, $"
) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5
, face = "bold"
, size = 20
, color = "#912600"
)
, plot.subtitle = element_text(hjust = 0.5
, face = "bold"
, size = 14
, color = "#912600"
)
, axis.title.x = element_text(face = "bold"
, color = "#912600"
, size = 14
)
, axis.text.x = element_text(face = "bold"
, vjust = 0.5
, size = 12
)
, axis.title.y = element_text(face = "bold"
, color = "#912600"
, size = 14
)
, axis.text.y = element_text(face = "bold"
, vjust = 0.5
, size = 12
)
)
# Now you have a legend, which you can also customize inside theme()
sales.scatter <- ggplot(orders
, aes(x = sales
, y = profit
, color = category
, shape = market
)
) +
geom_point(size = 2 # Size of points
, alpha = 1 # Transparency of points
) +
labs(title = "Sales vs. Profits"
, subtitle = "for orders less than $2000"
, x = "Total sale, $"
, y = "Total profit, $"
) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5
, face = "bold"
, size = 20
, color = "#912600"
)
, plot.subtitle = element_text(hjust = 0.5
, face = "bold"
, size = 14
, color = "#912600"
)
, axis.title.x = element_text(face = "bold"
, color = "#912600"
, size = 14
)
, axis.text.x = element_text(face = "bold"
, vjust = 0.5
, size = 12
)
, axis.title.y = element_text(face = "bold"
, color = "#912600"
, size = 14
)
, axis.text.y = element_text(face = "bold"
, vjust = 0.5
, size = 12
)
, legend.text = element_text(face = "bold"
, size = 12
)
, legend.position = c(0.1, 0.2) # Or use "bottom", "top", "left", "right"
, legend.key.size = unit(1, 'lines') # Size of elements inside legend box
, legend.title = element_blank() # Hides legend title
, legend.background = element_rect(size = 0.5
, linetype = "solid"
, color = "black"
)
)
sales.scatter
# Export the result
png(file = "sales.scatter.png"
, width = 1920
, height = 1200
, res = 148
)
sales.scatter
dev.off()
#-------------------------------------------------------------------------
#---- Step 3: histogram
#-------------------------------------------------------------------------
# Histogram
# See more at http://www.sthda.com/english/wiki/ggplot2-histogram-plot-quick-start-guide-r-software-and-data-visualization
ggplot(orders, aes(x = sales)) +
geom_histogram()
# Change the number of bins
ggplot(orders, aes(x = sales)) +
geom_histogram(bins = 50)
# Or change the width of bins
ggplot(orders, aes(x = sales)) +
geom_histogram(binwidth = 100)
# To make histograms of different variables compararable, one needs to use % in y axis
# It will also help to have more ticks on x-axis
ggplot(orders, aes(x = sales)) +
geom_histogram(binwidth = 50
, aes(y = ..count../sum(..count..)) # ..count../sum(..count..) is an internal ggplot object
) +
scale_y_continuous(labels = percent # This is where the package "scales" gets used first time
) +
scale_x_continuous(breaks = seq(from = 0 # Adjust x-axis ticks
, to = 2000
, by = 200
)
, labels = seq(from = 0 # Adjust labels for ticks
, to = 2000
, by = 200
)
)
# Add custom lines and text to the chart
ggplot(orders, aes(x = sales)) +
geom_histogram(binwidth = 50
, aes(y = ..count../sum(..count..))
) +
scale_y_continuous(labels = percent
, limits = c(0, 0.4)
) +
scale_x_continuous(breaks = seq(from = 0 # Adjust x-axis ticks
, to = 2000
, by = 200
)
, labels = seq(from = 0 # Adjust labels for ticks
, to = 2000
, by = 200
)
) +
geom_vline(aes(xintercept = mean(sales)) # Adds a vertical line
) +
geom_vline(aes(xintercept = median(sales))
) +
annotate(geom = "text" # Adds a text box
, x = mean(orders$sales) + 100
, y = 0.35
, label = paste0("Mean = "
, round(mean(orders$sales), 2)
)
) +
annotate(geom = "text"
, x = median(orders$sales) - 100
, y = 0.35
, label = paste0("Median = "
, round(median(orders$sales),2)
)
)
# Adjust the looks
sales.hist <- ggplot(orders, aes(x = sales)) +
geom_histogram(bins = 50
, aes(y = ..count../sum(..count..))
, color = "darkblue"
, fill = "lightblue"
) +
scale_y_continuous(labels = percent
, limits = c(0, 0.4)
) +
scale_x_continuous(breaks = seq(from = 0 # Adjust x-axis ticks
, to = 2000
, by = 200
)
, labels = seq(from = 0 # Adjust labels for ticks
, to = 2000
, by = 200
)
) +
geom_vline(aes(xintercept = mean(sales))
, color = "red"
, linetype = "dashed"
, size = 1.2
) +
geom_vline(aes(xintercept = median(sales))
, color = "dark green"
, linetype = "dashed"
, size = 1.2
) +
annotate(geom = "text"
, x = mean(orders$sales) + 100
, y = 0.35
, color = "red"
, size = 4
, fontface = "bold"
, label = paste0("Mean\n=\n"
, round(mean(orders$sales), 2)
)
, lineheight = 0.75 # Reduce line spacing
) +
annotate(geom = "text"
, x = median(orders$sales) - 100
, y = 0.35
, color = "dark green"
, size = 4
, fontface = "bold"
, label = paste0("Median\n=\n"
, round(median(orders$sales),2)
)
, lineheight = 0.75 # Reduce line spacing
) +
labs(title = "Sales Distribution"
, subtitle = "for orders less than $2000"
, x = "Total sale, $"
, y = "Number of orders"
) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5
, face = "bold"
, size = 20
, color = "#912600"
)
, plot.subtitle = element_text(hjust = 0.5
, face = "bold"
, size = 14
, color = "#912600"
)
, axis.title.x = element_text(face = "bold"
, color = "#912600"
, size = 14
)
, axis.text.x = element_text(face = "bold"
, vjust = 0.5
, size = 12
)
, axis.title.y = element_text(face = "bold"
, color = "#912600"
, size = 14
)
, axis.text.y = element_text(face = "bold"
, vjust = 0.5
, size = 12
)
)
sales.hist
# Add a density curve to a histogram
sales.hist +
geom_density(aes(y = ..density..))
# That doesn't look right because our histogram and density use different y scale
# So, we need to change hist to have density
sales.hist <- ggplot(orders, aes(x = sales)) +
geom_histogram(bins = 50
, aes(y = ..density..) # Here we now have density
, color = "darkblue"
, fill = "lightblue"
) +
geom_density(aes(y = ..density..)
, color = "darkblue"
, size = 1.2
) +
# scale_y_continuous() +
scale_x_continuous(breaks = seq(from = 0
, to = 2000
, by = 200
)
, labels = seq(from = 0
, to = 2000
, by = 200
)
) +
geom_vline(aes(xintercept = mean(sales))
, color = "red"
, linetype = "dashed"
, size = 1.2
) +
geom_vline(aes(xintercept = median(sales))
, color = "dark green"
, linetype = "dashed"
, size = 1.2
) +
annotate(geom = "text"
, x = mean(orders$sales) + 100
, y = 0.0075 # This needs to be changed to accomodate new scale
, color = "red"
, size = 4
, fontface = "bold"
, label = paste0("Mean\n=\n"
, round(mean(orders$sales), 2)
)
, lineheight = 0.75 # Reduce line spacing
) +
annotate(geom = "text"
, x = median(orders$sales) - 100
, y = 0.0075 # This needs to be changed to accomodate new scale
, color = "dark green"
, size = 4
, fontface = "bold"
, label = paste0("Median\n=\n"
, round(median(orders$sales),2)
)
, lineheight = 0.75 # Reduce line spacing
) +
labs(title = "Sales Distribution"
, subtitle = "for orders less than $2000"
, x = "Total sale, $"
, y = ""
) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5
, face = "bold"
, size = 20
, color = "#912600"
)
, plot.subtitle = element_text(hjust = 0.5
, face = "bold"
, size = 14
, color = "#912600"
)
, axis.title.x = element_text(face = "bold"
, color = "#912600"
, size = 14
)
, axis.text.x = element_text(face = "bold"
, vjust = 0.5
, size = 12
)
, axis.title.y = element_text(face = "bold"
, color = "#912600"
, size = 14
)
, axis.text.y = element_text(face = "bold"
, vjust = 0.5
, size = 12
)
)
sales.hist
# We can also add a normal distribution density
sales.hist +
stat_function(fun = dnorm
, n = nrow(orders)
, args = list(mean = mean(orders$sales), sd = sd(orders$sales))
, color = "purple"
, size = 1.2
) +
annotate(geom = "text"
, x = 400
, y = 0.0015
, color = "purple"
, size = 4
, fontface = "bold"
, label = "Normal denstity"
, lineheight = 0.75 # Reduce line spacing
, hjust = 0 # Make text be on the left of x position
)
# Multiple histograms don't fill well on one graph
ggplot(orders, aes(x = sales
#, color = category # this doesn't work for bars
#, shape = category # this doesn't work either
, fill = category # Using category to define fill color
)
) +
geom_histogram(binwidth = 50
, aes(y = ..count../sum(..count..))
, color = "darkblue" # Border color of bars is the same for everyone
#, position = "dodge" # This is even worse
) +
scale_y_continuous(labels = percent) +
scale_x_continuous(breaks = seq(from = 0 # Adjust x-axis ticks
, to = 2000
, by = 200
)
, labels = seq(from = 0 # Adjust labels for ticks
, to = 2000
, by = 200
)
) +
theme_bw()
# So it's better to put them in a facet grid
ggplot(orders, aes(x = sales
, fill = category
)
) +
geom_histogram(binwidth = 50
, aes(y = ..count../sum(..count..))
, color = "darkblue"
) +
scale_y_continuous(labels = percent) +
scale_x_continuous(breaks = seq(from = 0 # Adjust x-axis ticks
, to = 2000
, by = 200
)
, labels = seq(from = 0 # Adjust labels for ticks
, to = 2000
, by = 200
)
) +
facet_wrap( ~ category
, scale = "fixed" # Use "free" if same scale is not good
, ncol = 1
, nrow = 3
, strip.position = "bottom") +
theme_bw() +
theme(strip.text.x = element_text(size = 12
, face = "bold"
)
, strip.background = element_blank() # No background color
, strip.placement = "outside"
, legend.position = "none" # Don't show legend, since we have facet labels
)
#-------------------------------------------------------------------------
#---- Step 4: Box plot
#-------------------------------------------------------------------------
# Box plot, vertical
ggplot(orders, aes( x = category
, y = sales
, fill = category
)
) +
geom_boxplot() +
theme_bw()
# Box plot, horizontal
ggplot(orders, aes(x = category
, y = sales
, fill = category
)
) +
geom_boxplot() +
coord_flip() + # Flip axis around, can be used in any ggplot object (note that you can't switch x and y in aes())
theme_bw()
# Add another dimension
ggplot(orders, aes(x = order.priority
, y = sales
, fill = category
)
) +
geom_boxplot() +
theme_bw()
# Priority is in random order because it's a character variable
# We need to make it an ordered factor for ggplot to properly use it
orders$order.priority <- factor(orders$order.priority
, levels = c("Low"
, "Medium"
, "High"
, "Critical"
)
, ordered = TRUE
)
ggplot(orders, aes(x = order.priority
, y = sales
, fill = category
)
) +
geom_boxplot() +
theme_bw()
# Customize
ggplot(orders, aes(x = order.priority
, y = sales
, fill = category
)
) +
geom_boxplot(alpha = 0.5 # Transparancy of fill color, 1 = none
, color = "black" # Border color of boxes
, size = 0.5 # Border size
) +
scale_fill_manual(values = c("dark orange" # This allows to use your own fill colors
, "dark green"
, "dark blue"
)
) +
scale_y_continuous(breaks = seq(from = 0 # Adjust x-axis ticks
, to = 2000
, by = 200
)
, labels = seq(from = 0 # Adjust labels for ticks
, to = 2000
, by = 200
)
) +
labs(title = "Sales vs order priority"
, subtitle = "for orders less than $2000"
, x = "Order priority"
, y = "Sales, $"
) +
coord_flip() +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5
, face = "bold"
, size = 20
, color = "#912600"
)
, plot.subtitle = element_text(hjust = 0.5
, face = "bold"
, size = 14
, color = "#912600"
)
, axis.title.x = element_text(face = "bold"
, color = "#912600"
, size = 14
)
, axis.text.x = element_text(face = "bold"
, vjust = 0.5
, size = 12
)
, axis.title.y = element_text(face = "bold"
, color = "#912600"
, size = 14
)
, axis.text.y = element_text(face = "bold"
, vjust = 0.5
, size = 12
)
)
#-------------------------------------------------------------------------
#---- Step 5: Bar chart
#-------------------------------------------------------------------------
# Stacked bars, absolute values
ggplot(orders, aes(x = market
, y = sales/1000
, fill = category
)
) +
geom_bar(stat = "identity"
, position = "stack"
)
# Stacked bars, relative values
ggplot(orders, aes(x = market
, y = sales/1000
, fill = category
)
) +
geom_bar(stat = "identitu"
, position = "fill"
)
# Non-stacked bars
ggplot(orders, aes(x = market
, y = sales/1000
, fill = category
)
) +
geom_bar(stat = "identity"
, position = "dodge"
, width = 0.7 # And this defines the width of bars themselves
)
# Add data labels
ggplot(orders, aes(x = market
, y = sales/1000
, fill = category
)
) +
geom_bar(stat = "identity"
, position = position_dodge(0.7) # Number inside defines width of "dodging" between groups
, width = 0.7
) +
geom_text(aes(label = sales/1000
, group = category)
)
# That's because we need to aggregate data first
orders.agg <- aggregate(sales ~ market + category
, orders
, sum)
# And now it will work
ggplot(orders.agg, aes(x = market
, y = sales/1000
, fill = category
)
) +
geom_bar(stat = "identity"
, position = position_dodge(0.7)
, width = 0.7
, color = "black"
) +
geom_text(aes(label = round(sales/1000, 0)) # This defines what to use as lables
, position = position_dodge(0.7) # Shoud be the same number as in geom_bar()
, vjust = -0.5 # This positions labels a little above bars
) +
theme_bw()
#-------------------------------------------------------------------------
#---- Step 6: Pie chart
#-------------------------------------------------------------------------
# Very similar to bar chart:
orders.pie <- aggregate(sales ~ market, orders, sum)
ggplot(orders.pie, aes(x = ""
, y = sales
, fill = market
)
) +
geom_bar(width = 1
, stat = "identity"
) +
coord_polar(theta = "y" # This defines which variable maps the angle of a circle
, start = 0 # First item starts at 12 o'clock
) +
theme_bw()
# Customize:
ggplot(orders.pie, aes(x = ""
, y = sales
, fill = market
)
) +
geom_bar(width = 1
, stat = "identity"
, position = "fill"
, color = "black"
) +
scale_fill_brewer(palette = "Accent") +
geom_text(aes(label = percent(round(sales / sum(sales), 2)))
, position = position_fill(vjust = 0.5)
, size = 6
, fontface = "bold"
) +
coord_polar(theta = "y"
, start = 0
) +
labs(title = "Market Shares"
) +
theme_void() +
theme(plot.title = element_text(hjust = 0.5
, face = "bold"
, size = 20
, color = "#912600"
)
, plot.subtitle = element_text(hjust = 0.5
, face = "bold"
, size = 14
, color = "#912600"
)
, axis.title.x = element_blank()
, axis.text.x = element_blank()
, axis.title.y = element_blank()
, axis.text.y = element_blank()
)
#-------------------------------------------------------------------------
#---- Step 7: Calendar Heatmap
#---- See more at http://r-statistics.co/Top50-Ggplot2-Visualizations-MasterList-R-Code.html
#-------------------------------------------------------------------------
# Change order.date to type "date"
orders$order.date <- as.Date(orders$order.date, format = "%Y-%m-%d")
# First we need to aggregate sales by date
orders.agg <- aggregate(sales ~ order.date
, orders
, sum
)
# Next we need to create a dataset with all the dates from 2012-2015:
orders.dates <- data.frame(order.date = seq(from = as.Date("2012-01-01")
, to = as.Date("2015-12-31")
, by = "day"
)
)
# Now merge two together:
orders.daily <- merge(orders.dates, orders.agg
, by = "order.date"
, all.x = TRUE
)
rm(orders.agg)
rm(orders.dates)
# Next we need to create calendar dimensions: days, weeks, months, quaters and years
# Date formats:
# %Y: 4-digit year (1982)
# %y: 2-digit year (82)
# %m: 2-digit month (01)
# %d: 2-digit day of the month (13)
# %A: weekday (Wednesday)
# %a: abbreviated weekday (Wed)
# %B: month (January)
# %b: abbreviated month (Jan)
# Days:
orders.daily$day <- as.numeric(format(orders.daily$order.date, "%d"))
orders.daily$weekday <- factor(format(orders.daily$order.date, "%a") # Alternatively, use weekdays() function
, levels = rev(c("Mon"
, "Tue"
, "Wed"
, "Thu"
, "Fri"
, "Sat"
, "Sun"
)
)
, ordered = TRUE
)
# Week of month, as a difference between week of year current and week of year for 1st day of month
# Calculate week of the year
orders.daily$week <- as.numeric(format(orders.daily$order.date, "%W")) + 1
# Calculate week of year number for 1st day of every month
tmp <- as.numeric(format(as.Date(cut(orders.daily$order.date, "month")), "%W"))
orders.daily$week <- orders.daily$week - tmp
# Months:
orders.daily$month <- factor(format(orders.daily$order.date, "%b") # Alternatively, use months() function
, levels = c("Jan"
, "Feb"
, "Mar"
, "Apr"
, "May"
, "Jun"
, "Jul"
, "Aug"
, "Sep"
, "Oct"
, "Nov"
, "Dec"
)
, ordered = TRUE
)
# Quaters:
orders.daily$quarter <- factor(quarters(orders.daily$order.date)
, levels = c("Q1"
, "Q2"
, "Q3"
, "Q4"
)
, labels = c("", "", "", "") # To avoid seeing Q1 in pictures
, ordered = TRUE
)
# Years:
orders.daily$year <- format(orders.daily$order.date, "%Y")
# Now we can use tiles and facetts to arrange everything
heatmap1 <- ggplot(orders.daily
, aes(x = week
, y = weekday
, fill = sales
)
) +
geom_tile(colour = "white") + # This creates a small rectangular for every date
facet_grid(year ~ month) +
scale_fill_gradient(low = "green" # This uses a 2-color gradient scale
, high = "dark green"
, na.value = "gray"
) +
scale_x_continuous(breaks = c(1, 2, 3, 4, 5, 6)
, labels = c("1", "2", "3", "4", "5", "6")
) +
labs(x = "Week of Month"
, y = ""
, title = "Daily Total Sales"
) +
theme_bw()
heatmap1
# Alternatively, we can look at months per quarter per year
heatmap2 <- ggplot(orders.daily[orders.daily$year == 2015, ]
, aes(x = weekday
, y = week
, fill = sales
)
) +
geom_tile(colour = "white") + # This creates a small rectangular for every date
geom_text(aes(label = day)) + # Day numbers inside tiles
scale_fill_gradient(low = "green" # This uses a 2-color gradient scale
, high = " dark green"
, na.value = "gray"
) +
# facet_rep_wrap is from package "lemon" to keep month labels on every row
facet_rep_wrap( ~ month # formula defines which variables identify subsets of data for different facets
, ncol = 3 # This is needed to define when to wrap facets
, strip.position = "top"
, repeat.tick.labels = TRUE
) +
scale_y_reverse() + # Proper order of weeks
scale_x_discrete(limits = rev(levels(orders.daily$weekday))) + # Proper order of weekdays
labs(x = ""
, y = ""
, title = "Daily Total Sales, 2015"
) +
theme_bw() +
theme(strip.background = element_blank() # No background color
, strip.placement = "outside"
, axis.text.y = element_blank()
)
heatmap2
# Arrange in a grid, similar to base R function par(mfrow = c(1,3))
grid.arrange(heatmap1, heatmap2, nrow = 1, ncol = 2)
# See more at:
# https://cran.r-project.org/web/packages/egg/vignettes/Ecosystem.html
# http://www.sthda.com/english/articles/24-ggpubr-publication-ready-plots/81-ggplot2-easy-way-to-mix-multiple-graphs-on-the-same-page/
#-------------------------------------------------------------------------
#---- Step 8: Rank Visualizations
#-------------------------------------------------------------------------
# Create aggregate sales per subcategory per year
rank.agg <- aggregate(sales ~ sub.category + format(orders$order.date, "%Y")
, orders
, sum
)
names(rank.agg)[2] <- "year"
rank.agg$year <- as.numeric(rank.agg$year)
# Ranking based on absolute sales values
ggplot(rank.agg, aes(x = year, y = sales, color = sub.category)) +
geom_line() +
geom_vline(aes(xintercept = year), linetype="dashed", size = 0.5)
# A few visual adjustments
ggplot(rank.agg, aes(x = year, y = sales, color = sub.category)) +
geom_line(linetype = "solid"
, size = 1.2
) +
geom_vline(aes(xintercept = year), linetype="dashed", size = 0.5) +
scale_x_continuous(breaks = c(2011:2016)
, labels = c("", "2012", "2013", "2014", "2015", "")
) +
coord_cartesian(xlim = c(2011:2016)) +
labs(title = "Sales rankings across subcategories\n"
, x = "Year"
, y = "Total sales"
) +
theme_bw()
# Switching to rank positions
rank(c(2, 1, 11, -2)) # Not what we want
rev(rank(c(2, 1, 11, -2))) # Not what we want
rank(-c(2, 1, 11, -2)) # That's what we want
years <- unique(rank.agg$year)
for (i in 1:length(years)) {
row.index <- which(rank.agg$year == years[i])
rank.agg$rank[row.index] <- rank(-rank.agg$sales[row.index]) # Reverse the rankings
}
# Plot
ggplot(rank.agg, aes(x = year, y = rank, color = sub.category)) +
geom_line(linetype = "solid"
, size = 1.2
) +
geom_vline(aes(xintercept = year), linetype="dashed", size = 0.5) +
scale_x_continuous(breaks = c(2011:2016)
, labels = c("", "2012", "2013", "2014", "2015", "")
) +
coord_cartesian(xlim = c(2011:2016)) +
labs(title = "Sales rankings across subcategories\n"
, x = "Year"
, y = "Total sales"
) +
theme_bw()
# Create labels for subcategories and ranks
ggplot(rank.agg, aes(x = year, y = rank, color = sub.category)) +
geom_line(linetype = "solid"
, size = 1.2
) +
geom_vline(aes(xintercept = year), linetype="dashed", size = 0.5) +
geom_text(data = rank.agg[rank.agg$year == min(rank.agg$year), ] # Left side lables
, aes(label = sub.category
, y = rank
, x = year - 0.1
)
, color = "black" # To avoid lables being colored same as lines
, hjust = 1, size = 3.5
) +
geom_text(data = rank.agg[rank.agg$year == max(rank.agg$year), ] # Right side lables
, aes(label = sub.category
, y = rank
, x = year + 0.1
)
, color = "black"
, hjust = 0, size = 3.5
) +
scale_x_continuous(breaks = c(2011:2016)
, labels = c("", "2012", "2013", "2014", "2015", "")
) +
scale_y_reverse(breaks = c(min(rank.agg$rank) : max(rank.agg$rank)) # To reverse ranks so that max rank is on top
) +
coord_cartesian(xlim = c(2011:2016)) +
labs(title = "Sales rankings across subcategories\n"
, x = "Year"
, y = "Ranking"
) +
theme_bw() +
theme(legend.position = "none")
| /R/Part 4 - Advanced visualizations.R | permissive | sherrytp/bc_f19_econ | R | false | false | 41,230 | r | #-------------------------------------------------------------------------
#------ Advanced plotting in R using ggplot
#------ http://r-statistics.co/Complete-Ggplot2-Tutorial-Part1-With-R-Code.html
#------ http://tutorials.iq.harvard.edu/R/Rgraphics/Rgraphics.html
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#---- Step 1: prepare the environment, load libraries and import data
#-------------------------------------------------------------------------
# Wipe the environment clean
rm(list = ls())
# Prepare needed libraries
library.list <- c("readxl" # For data import
, "ggplot2" # Main package
, "scales" # For managing chart axis labels/values/scale
, "lemon" # Extra things for ggplot
, "gridExtra" # Arrange different plots in a grid
)
for (i in 1:length(library.list)) {
if (!library.list[i] %in% rownames(installed.packages())) {
install.packages(library.list[i])
}
library(library.list[i], character.only = TRUE)
}
rm(library.list)
# Set working directory
#setwd("")
# Load the sample orders data that we used for Excel and Tableau, but only keep sales < $2000
orders <- read_excel(file.choose())
# Our variables have spaces, dashes and upper case letters, which maybe confusing
# Let's fix that
colnames(orders) <- tolower(gsub(" |-", ".", colnames(orders)))
# Keep only observations with sales < 2000 (to ignore outliers)
orders <- orders[orders$sales < 2000, ]
# And this time randomly select and keep only 10% of the data (to speed up graphs)
orders <- orders[sample(1:nrow(orders), 0.1*nrow(orders)), ]
#-------------------------------------------------------------------------
#---- Step 2: basics of ggplot
#-------------------------------------------------------------------------
# Basic call to ggplot function produces empty plot
ggplot(orders, aes(x = sales, y = profit))
# Add a basic scatter plot
ggplot(orders, aes(x = sales, y = profit)) +
geom_point()
# You can save ggplot output as a valid graphics object
sales.scatter <- ggplot(orders, aes(x = sales, y = profit)) +
geom_point()
# And then simply call it to produce output
sales.scatter
# You can also add features/mappings/aesthetics to it
sales.scatter + geom_smooth()
# Delete some of the points outside limits
sales.scatter +
geom_smooth() +
xlim(c(0, 1000)) +
ylim(c(-2000, 2000))
# Or simply zoom (this retains all points)
sales.scatter +
geom_smooth() +
coord_cartesian(xlim = c(0, 1000)
, ylim = c(-2000, 2000)
)
# For large datasets you should subset first, and then call ggplot
# Otherwise ggplot will take longer time to render
ggplot(orders[orders$sales < 1000, ]
, aes(x = sales, y = profit)
) +
geom_point() +
geom_smooth()
# Add title and axis lables
ggplot(orders, aes(x = sales, y = profit)) +
geom_point() +
geom_smooth() +
labs(title = "Sales vs. Profits"
, subtitle = "for orders less than $2000"
, x = "Total sale, $"
, y = "Total profit, $"
)
# Customize the looks of pglot elements
ggplot(orders, aes(x = sales, y = profit)) +
geom_point(color = "steelblue" # Color of points' outline
, fill = "yellow" # Color of points' fill
, shape = 22 # Shape of points, similar to R's default "pch" parameter
, size = 2 # Size of points
, alpha = 1 # Transparency of points
) +
geom_smooth(color = "firebrick"
, weight = 5 # line thickness
) +
labs(title = "Sales vs. Profits"
, subtitle = "for orders less than $2000"
, x = "Total sale, $"
, y = "Total profit, $"
)
# You could use some of the built-in themes for customization of looks
ggplot(orders, aes(x = sales, y = profit)) +
geom_point(color = "steelblue" # Color of points' outline
, fill = "yellow" # Color of points' fill
, shape = 22 # Shape of points, similar to R's default "pch" parameter
, size = 2 # Size of points
, alpha = 1 # Transparency of points
) +
geom_smooth(color = "firebrick"
, weight = 5
, linetype = "solid" # Dashed, dotted, etc.
) +
labs(title = "Sales vs. Profits"
, subtitle = "for orders less than $2000"
, x = "Total sale, $"
, y = "Total profit, $"
) +
theme_bw()
# Or you could change theme elements yourself
ggplot(orders, aes(x = sales, y = profit)) +
geom_point(color = "steelblue" # Color of points' outline
, fill = "yellow" # Color of points' fill
, shape = 22 # Shape of points, similar to R's default "pch" parameter
, size = 2 # Size of points
, alpha = 1 # Transparency of points
) +
labs(title = "Sales vs. Profits"
, subtitle = "for orders less than $2000"
, x = "Total sale, $"
, y = "Total profit, $ \n"
) +
theme_bw() + # Lines below inside theme() will overwrite Steps of theme_bw()
theme(plot.title = element_text(hjust = 0.5 # Title alignment, 0 - left, 1 - right
, face = "bold" # font face - bold, italic, etc.
, size = 20 # font size
, color = "#912600") # font color
, plot.subtitle = element_text(hjust = 0.5 # Horizontal alignment, 0.5 = center
, face = "bold" # Bold font
, size = 14 # Font size
, color = "#912600" # Font color
)
, axis.title.x = element_text(face = "bold"
, color = "#912600"
, size = 14
)
, axis.text.x = element_text(face = "bold"
, vjust = 0.5 # Vertical alignment, 0.5 = middle
, size = 12
)
, axis.title.y = element_text(face = "bold"
, color = "#912600"
, size = 14
)
, axis.text.y = element_text(face = "bold"
, vjust = 0.5
, size = 12
)
)
# You could also use variable color/shape/fill mapping inside aes()
ggplot(orders
, aes(x = sales
, y = profit
, color = category
, shape = market
)
) +
geom_point(size = 2 # Size of points
, alpha = 1 # Transparency of points
) +
labs(title = "Sales vs. Profits"
, subtitle = "for orders less than $2000"
, x = "Total sale, $"
, y = "Total profit, $"
) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5
, face = "bold"
, size = 20
, color = "#912600"
)
, plot.subtitle = element_text(hjust = 0.5
, face = "bold"
, size = 14
, color = "#912600"
)
, axis.title.x = element_text(face = "bold"
, color = "#912600"
, size = 14
)
, axis.text.x = element_text(face = "bold"
, vjust = 0.5
, size = 12
)
, axis.title.y = element_text(face = "bold"
, color = "#912600"
, size = 14
)
, axis.text.y = element_text(face = "bold"
, vjust = 0.5
, size = 12
)
)
# Now you have a legend, which you can also customize inside theme()
sales.scatter <- ggplot(orders
, aes(x = sales
, y = profit
, color = category
, shape = market
)
) +
geom_point(size = 2 # Size of points
, alpha = 1 # Transparency of points
) +
labs(title = "Sales vs. Profits"
, subtitle = "for orders less than $2000"
, x = "Total sale, $"
, y = "Total profit, $"
) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5
, face = "bold"
, size = 20
, color = "#912600"
)
, plot.subtitle = element_text(hjust = 0.5
, face = "bold"
, size = 14
, color = "#912600"
)
, axis.title.x = element_text(face = "bold"
, color = "#912600"
, size = 14
)
, axis.text.x = element_text(face = "bold"
, vjust = 0.5
, size = 12
)
, axis.title.y = element_text(face = "bold"
, color = "#912600"
, size = 14
)
, axis.text.y = element_text(face = "bold"
, vjust = 0.5
, size = 12
)
, legend.text = element_text(face = "bold"
, size = 12
)
, legend.position = c(0.1, 0.2) # Or use "bottom", "top", "left", "right"
, legend.key.size = unit(1, 'lines') # Size of elements inside legend box
, legend.title = element_blank() # Hides legend title
, legend.background = element_rect(size = 0.5
, linetype = "solid"
, color = "black"
)
)
sales.scatter
# Export the result
png(file = "sales.scatter.png"
, width = 1920
, height = 1200
, res = 148
)
sales.scatter
dev.off()
#-------------------------------------------------------------------------
#---- Step 3: histogram
#-------------------------------------------------------------------------
# Histogram
# See more at http://www.sthda.com/english/wiki/ggplot2-histogram-plot-quick-start-guide-r-software-and-data-visualization
ggplot(orders, aes(x = sales)) +
geom_histogram()
# Change the number of bins
ggplot(orders, aes(x = sales)) +
geom_histogram(bins = 50)
# Or change the width of bins
ggplot(orders, aes(x = sales)) +
geom_histogram(binwidth = 100)
# To make histograms of different variables compararable, one needs to use % in y axis
# It will also help to have more ticks on x-axis
ggplot(orders, aes(x = sales)) +
geom_histogram(binwidth = 50
, aes(y = ..count../sum(..count..)) # ..count../sum(..count..) is an internal ggplot object
) +
scale_y_continuous(labels = percent # This is where the package "scales" gets used first time
) +
scale_x_continuous(breaks = seq(from = 0 # Adjust x-axis ticks
, to = 2000
, by = 200
)
, labels = seq(from = 0 # Adjust labels for ticks
, to = 2000
, by = 200
)
)
# Add custom lines and text to the chart
ggplot(orders, aes(x = sales)) +
geom_histogram(binwidth = 50
, aes(y = ..count../sum(..count..))
) +
scale_y_continuous(labels = percent
, limits = c(0, 0.4)
) +
scale_x_continuous(breaks = seq(from = 0 # Adjust x-axis ticks
, to = 2000
, by = 200
)
, labels = seq(from = 0 # Adjust labels for ticks
, to = 2000
, by = 200
)
) +
geom_vline(aes(xintercept = mean(sales)) # Adds a vertical line
) +
geom_vline(aes(xintercept = median(sales))
) +
annotate(geom = "text" # Adds a text box
, x = mean(orders$sales) + 100
, y = 0.35
, label = paste0("Mean = "
, round(mean(orders$sales), 2)
)
) +
annotate(geom = "text"
, x = median(orders$sales) - 100
, y = 0.35
, label = paste0("Median = "
, round(median(orders$sales),2)
)
)
# Adjust the looks
sales.hist <- ggplot(orders, aes(x = sales)) +
geom_histogram(bins = 50
, aes(y = ..count../sum(..count..))
, color = "darkblue"
, fill = "lightblue"
) +
scale_y_continuous(labels = percent
, limits = c(0, 0.4)
) +
scale_x_continuous(breaks = seq(from = 0 # Adjust x-axis ticks
, to = 2000
, by = 200
)
, labels = seq(from = 0 # Adjust labels for ticks
, to = 2000
, by = 200
)
) +
geom_vline(aes(xintercept = mean(sales))
, color = "red"
, linetype = "dashed"
, size = 1.2
) +
geom_vline(aes(xintercept = median(sales))
, color = "dark green"
, linetype = "dashed"
, size = 1.2
) +
annotate(geom = "text"
, x = mean(orders$sales) + 100
, y = 0.35
, color = "red"
, size = 4
, fontface = "bold"
, label = paste0("Mean\n=\n"
, round(mean(orders$sales), 2)
)
, lineheight = 0.75 # Reduce line spacing
) +
annotate(geom = "text"
, x = median(orders$sales) - 100
, y = 0.35
, color = "dark green"
, size = 4
, fontface = "bold"
, label = paste0("Median\n=\n"
, round(median(orders$sales),2)
)
, lineheight = 0.75 # Reduce line spacing
) +
labs(title = "Sales Distribution"
, subtitle = "for orders less than $2000"
, x = "Total sale, $"
, y = "Number of orders"
) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5
, face = "bold"
, size = 20
, color = "#912600"
)
, plot.subtitle = element_text(hjust = 0.5
, face = "bold"
, size = 14
, color = "#912600"
)
, axis.title.x = element_text(face = "bold"
, color = "#912600"
, size = 14
)
, axis.text.x = element_text(face = "bold"
, vjust = 0.5
, size = 12
)
, axis.title.y = element_text(face = "bold"
, color = "#912600"
, size = 14
)
, axis.text.y = element_text(face = "bold"
, vjust = 0.5
, size = 12
)
)
sales.hist
# Add a density curve to a histogram
sales.hist +
geom_density(aes(y = ..density..))
# That doesn't look right because our histogram and density use different y scale
# So, we need to change hist to have density
sales.hist <- ggplot(orders, aes(x = sales)) +
geom_histogram(bins = 50
, aes(y = ..density..) # Here we now have density
, color = "darkblue"
, fill = "lightblue"
) +
geom_density(aes(y = ..density..)
, color = "darkblue"
, size = 1.2
) +
# scale_y_continuous() +
scale_x_continuous(breaks = seq(from = 0
, to = 2000
, by = 200
)
, labels = seq(from = 0
, to = 2000
, by = 200
)
) +
geom_vline(aes(xintercept = mean(sales))
, color = "red"
, linetype = "dashed"
, size = 1.2
) +
geom_vline(aes(xintercept = median(sales))
, color = "dark green"
, linetype = "dashed"
, size = 1.2
) +
annotate(geom = "text"
, x = mean(orders$sales) + 100
, y = 0.0075 # This needs to be changed to accomodate new scale
, color = "red"
, size = 4
, fontface = "bold"
, label = paste0("Mean\n=\n"
, round(mean(orders$sales), 2)
)
, lineheight = 0.75 # Reduce line spacing
) +
annotate(geom = "text"
, x = median(orders$sales) - 100
, y = 0.0075 # This needs to be changed to accomodate new scale
, color = "dark green"
, size = 4
, fontface = "bold"
, label = paste0("Median\n=\n"
, round(median(orders$sales),2)
)
, lineheight = 0.75 # Reduce line spacing
) +
labs(title = "Sales Distribution"
, subtitle = "for orders less than $2000"
, x = "Total sale, $"
, y = ""
) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5
, face = "bold"
, size = 20
, color = "#912600"
)
, plot.subtitle = element_text(hjust = 0.5
, face = "bold"
, size = 14
, color = "#912600"
)
, axis.title.x = element_text(face = "bold"
, color = "#912600"
, size = 14
)
, axis.text.x = element_text(face = "bold"
, vjust = 0.5
, size = 12
)
, axis.title.y = element_text(face = "bold"
, color = "#912600"
, size = 14
)
, axis.text.y = element_text(face = "bold"
, vjust = 0.5
, size = 12
)
)
sales.hist
# We can also add a normal distribution density
sales.hist +
stat_function(fun = dnorm
, n = nrow(orders)
, args = list(mean = mean(orders$sales), sd = sd(orders$sales))
, color = "purple"
, size = 1.2
) +
annotate(geom = "text"
, x = 400
, y = 0.0015
, color = "purple"
, size = 4
, fontface = "bold"
, label = "Normal denstity"
, lineheight = 0.75 # Reduce line spacing
, hjust = 0 # Make text be on the left of x position
)
# Multiple histograms don't fill well on one graph
ggplot(orders, aes(x = sales
#, color = category # this doesn't work for bars
#, shape = category # this doesn't work either
, fill = category # Using category to define fill color
)
) +
geom_histogram(binwidth = 50
, aes(y = ..count../sum(..count..))
, color = "darkblue" # Border color of bars is the same for everyone
#, position = "dodge" # This is even worse
) +
scale_y_continuous(labels = percent) +
scale_x_continuous(breaks = seq(from = 0 # Adjust x-axis ticks
, to = 2000
, by = 200
)
, labels = seq(from = 0 # Adjust labels for ticks
, to = 2000
, by = 200
)
) +
theme_bw()
# So it's better to put them in a facet grid
ggplot(orders, aes(x = sales
, fill = category
)
) +
geom_histogram(binwidth = 50
, aes(y = ..count../sum(..count..))
, color = "darkblue"
) +
scale_y_continuous(labels = percent) +
scale_x_continuous(breaks = seq(from = 0 # Adjust x-axis ticks
, to = 2000
, by = 200
)
, labels = seq(from = 0 # Adjust labels for ticks
, to = 2000
, by = 200
)
) +
facet_wrap( ~ category
, scale = "fixed" # Use "free" if same scale is not good
, ncol = 1
, nrow = 3
, strip.position = "bottom") +
theme_bw() +
theme(strip.text.x = element_text(size = 12
, face = "bold"
)
, strip.background = element_blank() # No background color
, strip.placement = "outside"
, legend.position = "none" # Don't show legend, since we have facet labels
)
#-------------------------------------------------------------------------
#---- Step 4: Box plot
#-------------------------------------------------------------------------
# Box plot, vertical
ggplot(orders, aes( x = category
, y = sales
, fill = category
)
) +
geom_boxplot() +
theme_bw()
# Box plot, horizontal
ggplot(orders, aes(x = category
, y = sales
, fill = category
)
) +
geom_boxplot() +
coord_flip() + # Flip axis around, can be used in any ggplot object (note that you can't switch x and y in aes())
theme_bw()
# Add another dimension
ggplot(orders, aes(x = order.priority
, y = sales
, fill = category
)
) +
geom_boxplot() +
theme_bw()
# Priority is in random order because it's a character variable
# We need to make it an ordered factor for ggplot to properly use it
orders$order.priority <- factor(orders$order.priority
, levels = c("Low"
, "Medium"
, "High"
, "Critical"
)
, ordered = TRUE
)
ggplot(orders, aes(x = order.priority
, y = sales
, fill = category
)
) +
geom_boxplot() +
theme_bw()
# Customize
ggplot(orders, aes(x = order.priority
, y = sales
, fill = category
)
) +
geom_boxplot(alpha = 0.5 # Transparancy of fill color, 1 = none
, color = "black" # Border color of boxes
, size = 0.5 # Border size
) +
scale_fill_manual(values = c("dark orange" # This allows to use your own fill colors
, "dark green"
, "dark blue"
)
) +
scale_y_continuous(breaks = seq(from = 0 # Adjust x-axis ticks
, to = 2000
, by = 200
)
, labels = seq(from = 0 # Adjust labels for ticks
, to = 2000
, by = 200
)
) +
labs(title = "Sales vs order priority"
, subtitle = "for orders less than $2000"
, x = "Order priority"
, y = "Sales, $"
) +
coord_flip() +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5
, face = "bold"
, size = 20
, color = "#912600"
)
, plot.subtitle = element_text(hjust = 0.5
, face = "bold"
, size = 14
, color = "#912600"
)
, axis.title.x = element_text(face = "bold"
, color = "#912600"
, size = 14
)
, axis.text.x = element_text(face = "bold"
, vjust = 0.5
, size = 12
)
, axis.title.y = element_text(face = "bold"
, color = "#912600"
, size = 14
)
, axis.text.y = element_text(face = "bold"
, vjust = 0.5
, size = 12
)
)
#-------------------------------------------------------------------------
#---- Step 5: Bar chart
#-------------------------------------------------------------------------
# Stacked bars, absolute values
ggplot(orders, aes(x = market
, y = sales/1000
, fill = category
)
) +
geom_bar(stat = "identity"
, position = "stack"
)
# Stacked bars, relative values
ggplot(orders, aes(x = market
, y = sales/1000
, fill = category
)
) +
geom_bar(stat = "identitu"
, position = "fill"
)
# Non-stacked bars
ggplot(orders, aes(x = market
, y = sales/1000
, fill = category
)
) +
geom_bar(stat = "identity"
, position = "dodge"
, width = 0.7 # And this defines the width of bars themselves
)
# Add data labels
ggplot(orders, aes(x = market
, y = sales/1000
, fill = category
)
) +
geom_bar(stat = "identity"
, position = position_dodge(0.7) # Number inside defines width of "dodging" between groups
, width = 0.7
) +
geom_text(aes(label = sales/1000
, group = category)
)
# That's because we need to aggregate data first
orders.agg <- aggregate(sales ~ market + category
, orders
, sum)
# And now it will work
ggplot(orders.agg, aes(x = market
, y = sales/1000
, fill = category
)
) +
geom_bar(stat = "identity"
, position = position_dodge(0.7)
, width = 0.7
, color = "black"
) +
geom_text(aes(label = round(sales/1000, 0)) # This defines what to use as lables
, position = position_dodge(0.7) # Shoud be the same number as in geom_bar()
, vjust = -0.5 # This positions labels a little above bars
) +
theme_bw()
#-------------------------------------------------------------------------
#---- Step 6: Pie chart
#-------------------------------------------------------------------------
# Very similar to bar chart:
orders.pie <- aggregate(sales ~ market, orders, sum)
ggplot(orders.pie, aes(x = ""
, y = sales
, fill = market
)
) +
geom_bar(width = 1
, stat = "identity"
) +
coord_polar(theta = "y" # This defines which variable maps the angle of a circle
, start = 0 # First item starts at 12 o'clock
) +
theme_bw()
# Customize:
ggplot(orders.pie, aes(x = ""
, y = sales
, fill = market
)
) +
geom_bar(width = 1
, stat = "identity"
, position = "fill"
, color = "black"
) +
scale_fill_brewer(palette = "Accent") +
geom_text(aes(label = percent(round(sales / sum(sales), 2)))
, position = position_fill(vjust = 0.5)
, size = 6
, fontface = "bold"
) +
coord_polar(theta = "y"
, start = 0
) +
labs(title = "Market Shares"
) +
theme_void() +
theme(plot.title = element_text(hjust = 0.5
, face = "bold"
, size = 20
, color = "#912600"
)
, plot.subtitle = element_text(hjust = 0.5
, face = "bold"
, size = 14
, color = "#912600"
)
, axis.title.x = element_blank()
, axis.text.x = element_blank()
, axis.title.y = element_blank()
, axis.text.y = element_blank()
)
#-------------------------------------------------------------------------
#---- Step 7: Calendar Heatmap
#---- See more at http://r-statistics.co/Top50-Ggplot2-Visualizations-MasterList-R-Code.html
#-------------------------------------------------------------------------
# Change order.date to type "date"
orders$order.date <- as.Date(orders$order.date, format = "%Y-%m-%d")
# First we need to aggregate sales by date
orders.agg <- aggregate(sales ~ order.date
, orders
, sum
)
# Next we need to create a dataset with all the dates from 2012-2015:
orders.dates <- data.frame(order.date = seq(from = as.Date("2012-01-01")
, to = as.Date("2015-12-31")
, by = "day"
)
)
# Now merge two together:
orders.daily <- merge(orders.dates, orders.agg
, by = "order.date"
, all.x = TRUE
)
rm(orders.agg)
rm(orders.dates)
# Next we need to create calendar dimensions: days, weeks, months, quaters and years
# Date formats:
# %Y: 4-digit year (1982)
# %y: 2-digit year (82)
# %m: 2-digit month (01)
# %d: 2-digit day of the month (13)
# %A: weekday (Wednesday)
# %a: abbreviated weekday (Wed)
# %B: month (January)
# %b: abbreviated month (Jan)
# Days:
orders.daily$day <- as.numeric(format(orders.daily$order.date, "%d"))
orders.daily$weekday <- factor(format(orders.daily$order.date, "%a") # Alternatively, use weekdays() function
, levels = rev(c("Mon"
, "Tue"
, "Wed"
, "Thu"
, "Fri"
, "Sat"
, "Sun"
)
)
, ordered = TRUE
)
# Week of month, as a difference between week of year current and week of year for 1st day of month
# Calculate week of the year
orders.daily$week <- as.numeric(format(orders.daily$order.date, "%W")) + 1
# Calculate week of year number for 1st day of every month
tmp <- as.numeric(format(as.Date(cut(orders.daily$order.date, "month")), "%W"))
orders.daily$week <- orders.daily$week - tmp
# Months:
orders.daily$month <- factor(format(orders.daily$order.date, "%b") # Alternatively, use months() function
, levels = c("Jan"
, "Feb"
, "Mar"
, "Apr"
, "May"
, "Jun"
, "Jul"
, "Aug"
, "Sep"
, "Oct"
, "Nov"
, "Dec"
)
, ordered = TRUE
)
# Quaters:
orders.daily$quarter <- factor(quarters(orders.daily$order.date)
, levels = c("Q1"
, "Q2"
, "Q3"
, "Q4"
)
, labels = c("", "", "", "") # To avoid seeing Q1 in pictures
, ordered = TRUE
)
# Years:
orders.daily$year <- format(orders.daily$order.date, "%Y")
# Now we can use tiles and facetts to arrange everything
heatmap1 <- ggplot(orders.daily
, aes(x = week
, y = weekday
, fill = sales
)
) +
geom_tile(colour = "white") + # This creates a small rectangular for every date
facet_grid(year ~ month) +
scale_fill_gradient(low = "green" # This uses a 2-color gradient scale
, high = "dark green"
, na.value = "gray"
) +
scale_x_continuous(breaks = c(1, 2, 3, 4, 5, 6)
, labels = c("1", "2", "3", "4", "5", "6")
) +
labs(x = "Week of Month"
, y = ""
, title = "Daily Total Sales"
) +
theme_bw()
heatmap1
# Alternatively, we can look at months per quarter per year
heatmap2 <- ggplot(orders.daily[orders.daily$year == 2015, ]
, aes(x = weekday
, y = week
, fill = sales
)
) +
geom_tile(colour = "white") + # This creates a small rectangular for every date
geom_text(aes(label = day)) + # Day numbers inside tiles
scale_fill_gradient(low = "green" # This uses a 2-color gradient scale
, high = " dark green"
, na.value = "gray"
) +
# facet_rep_wrap is from package "lemon" to keep month labels on every row
facet_rep_wrap( ~ month # formula defines which variables identify subsets of data for different facets
, ncol = 3 # This is needed to define when to wrap facets
, strip.position = "top"
, repeat.tick.labels = TRUE
) +
scale_y_reverse() + # Proper order of weeks
scale_x_discrete(limits = rev(levels(orders.daily$weekday))) + # Proper order of weekdays
labs(x = ""
, y = ""
, title = "Daily Total Sales, 2015"
) +
theme_bw() +
theme(strip.background = element_blank() # No background color
, strip.placement = "outside"
, axis.text.y = element_blank()
)
heatmap2
# Arrange in a grid, similar to base R function par(mfrow = c(1,3))
grid.arrange(heatmap1, heatmap2, nrow = 1, ncol = 2)
# See more at:
# https://cran.r-project.org/web/packages/egg/vignettes/Ecosystem.html
# http://www.sthda.com/english/articles/24-ggpubr-publication-ready-plots/81-ggplot2-easy-way-to-mix-multiple-graphs-on-the-same-page/
#-------------------------------------------------------------------------
#---- Step 8: Rank Visualizations
#-------------------------------------------------------------------------
# Create aggregate sales per subcategory per year
rank.agg <- aggregate(sales ~ sub.category + format(orders$order.date, "%Y")
, orders
, sum
)
names(rank.agg)[2] <- "year"
rank.agg$year <- as.numeric(rank.agg$year)
# Ranking based on absolute sales values
ggplot(rank.agg, aes(x = year, y = sales, color = sub.category)) +
geom_line() +
geom_vline(aes(xintercept = year), linetype="dashed", size = 0.5)
# A few visual adjustments
ggplot(rank.agg, aes(x = year, y = sales, color = sub.category)) +
geom_line(linetype = "solid"
, size = 1.2
) +
geom_vline(aes(xintercept = year), linetype="dashed", size = 0.5) +
scale_x_continuous(breaks = c(2011:2016)
, labels = c("", "2012", "2013", "2014", "2015", "")
) +
coord_cartesian(xlim = c(2011:2016)) +
labs(title = "Sales rankings across subcategories\n"
, x = "Year"
, y = "Total sales"
) +
theme_bw()
# Switching to rank positions
rank(c(2, 1, 11, -2)) # Not what we want
rev(rank(c(2, 1, 11, -2))) # Not what we want
rank(-c(2, 1, 11, -2)) # That's what we want
years <- unique(rank.agg$year)
for (i in 1:length(years)) {
row.index <- which(rank.agg$year == years[i])
rank.agg$rank[row.index] <- rank(-rank.agg$sales[row.index]) # Reverse the rankings
}
# Plot
ggplot(rank.agg, aes(x = year, y = rank, color = sub.category)) +
geom_line(linetype = "solid"
, size = 1.2
) +
geom_vline(aes(xintercept = year), linetype="dashed", size = 0.5) +
scale_x_continuous(breaks = c(2011:2016)
, labels = c("", "2012", "2013", "2014", "2015", "")
) +
coord_cartesian(xlim = c(2011:2016)) +
labs(title = "Sales rankings across subcategories\n"
, x = "Year"
, y = "Total sales"
) +
theme_bw()
# Create labels for subcategories and ranks
ggplot(rank.agg, aes(x = year, y = rank, color = sub.category)) +
geom_line(linetype = "solid"
, size = 1.2
) +
geom_vline(aes(xintercept = year), linetype="dashed", size = 0.5) +
geom_text(data = rank.agg[rank.agg$year == min(rank.agg$year), ] # Left side lables
, aes(label = sub.category
, y = rank
, x = year - 0.1
)
, color = "black" # To avoid lables being colored same as lines
, hjust = 1, size = 3.5
) +
geom_text(data = rank.agg[rank.agg$year == max(rank.agg$year), ] # Right side lables
, aes(label = sub.category
, y = rank
, x = year + 0.1
)
, color = "black"
, hjust = 0, size = 3.5
) +
scale_x_continuous(breaks = c(2011:2016)
, labels = c("", "2012", "2013", "2014", "2015", "")
) +
scale_y_reverse(breaks = c(min(rank.agg$rank) : max(rank.agg$rank)) # To reverse ranks so that max rank is on top
) +
coord_cartesian(xlim = c(2011:2016)) +
labs(title = "Sales rankings across subcategories\n"
, x = "Year"
, y = "Ranking"
) +
theme_bw() +
theme(legend.position = "none")
|
library("adegenet", lib.loc="/Library/Frameworks/R.framework/Versions/3.4/Resources/library")
library("ade4", lib.loc="/Library/Frameworks/R.framework/Versions/3.4/Resources/library")
library("factoextra", lib.loc="/Library/Frameworks/R.framework/Versions/3.4/Resources/library")
setwd("/Volumes/Brumfield_Lab_Drive/River_islands/1_analysis/structure_files_seqcap_pop_random_snp/")
# replace all these with info for your species
# Saltator_coerulescens
# Saltator coerulescens
# Saltator_coerulescens
# 1018 with your number of snps
# xlim=c(-15, 15), ylim=c(-15, 15),
Saltator_coerulescens <- read.structure("Saltator_coerulescens_SNPs_phased_rmIndels_75_QC_DP_random_seqcap_pop_structure.AmazonOnly.str",
n.ind=11, n.loc=2065, onerowperind=FALSE,
col.lab=1, col.pop=0, col.others=0, row.marknames=0)
setwd("/Volumes/Brumfield_Lab_Drive/River_islands/3_results/dapc_pca/")
dir.create("Saltator_coerulescens_AmazonOnly")
setwd("/Volumes/Brumfield_Lab_Drive/River_islands/3_results/dapc_pca/Saltator_coerulescens_AmazonOnly")
Saltator_coerulescens.X <- tab(Saltator_coerulescens, freq=TRUE, NA.method = "mean")
Saltator_coerulescens.X <- scaleGen(Saltator_coerulescens, NA.method = "mean")
sum(is.na(Saltator_coerulescens$tab)) #amount of missing data
Saltator_coerulescens.X[1:2,1:2]
Saltator_coerulescens.pca <- dudi.pca(Saltator_coerulescens.X, scale=FALSE, scannf = FALSE, nf = 4)
Saltator_coerulescens.pca$eig[1]/sum(Saltator_coerulescens.pca$eig) * 100
Saltator_coerulescens.pca$eig[2]/sum(Saltator_coerulescens.pca$eig) * 100
Saltator_coerulescens.pca$li
Saltator_coerulescens.grp <- find.clusters(Saltator_coerulescens, n.pca = 100)
xval <- xvalDapc(Saltator_coerulescens.X, Saltator_coerulescens.grp$grp, n.pca.max = 20, training.set = 0.9,
result = "groupMean", center = TRUE, scale = FALSE,
n.pca = NULL, n.rep = 30, xval.plot = TRUE)
xval$DAPC$n.pca
dapc1 <- dapc(Saltator_coerulescens.X, Saltator_coerulescens.grp$grp)
temp1 <- optim.a.score(dapc1)
names(temp)
temp$tab[1:2,1:2]
temp$pop.score
#use these to save as high quality figures
tiff("Plot.tiff", width = 6, height = 6, units = 'in',
res = 800, compression = 'none')
png("Plot .png", width = 6, height = 6, units = 'in',
res = 800)
bitmap("Plot.tiff", height = 6, width = 6, units = 'in',
type="tifflzw", res=800)
# run the desired format above then the plot, then this dev.off command
dev.off()
par(mfrow = c(1,1))
# pca with colored dots
tiff("Saltator_coerulescens_colors.tiff", width = 6, height = 6, units = 'in',
res = 800, compression = 'none')
png("Saltator_coerulescens_colors.png", width = 6, height = 6, units = 'in',
res = 800)
colorplot(Saltator_coerulescens.pca$li, Saltator_coerulescens.pca$li, add.plot=FALSE, cex=2,
xlim=c(-20, 65), ylim=c(-25, 40),
xlab="PC 1 (23.0%)", ylab="PC 2 (9.7%)",
main = "PCA of Saltator coerulescens 1018 SNPs \naxes 1-2")
abline(v=0,h=0,col="grey", lty=2)
dev.off()
# pca with labeled points
tiff("Saltator_coerulescens_labeled.tiff", width = 6, height = 6, units = 'in',
res = 800, compression = 'none')
png("Saltator_coerulescens_labeled.png", width = 12, height = 12, units = 'in',
res = 800)
fviz_pca_ind(Saltator_coerulescens.pca,
xlim=c(-20, 65), ylim=c(-25, 40),
repel = TRUE,
title = "PCA of Saltator coerulescens 1018 SNPs \naxes 1-2"
)
dev.off()
# pca without labeled points - groups
tiff("Saltator_coerulescens_groups.tiff", width = 6, height = 6, units = 'in',
res = 800, compression = 'none')
png("Saltator_coerulescens_groups.png", width = 6, height = 6, units = 'in',
res = 800)
fviz_pca_ind(Saltator_coerulescens.pca,
xlim=c(-20, 65), ylim=c(-25, 40),
geom = "point",
title = "PCA of Saltator coerulescens 1018 SNPs \naxes 1-2"
) + geom_point(aes(colour = factor(dapc1$grp)), size = 3)
dev.off()
# pca without labeled points - black
tiff("Saltator_coerulescens_black.tiff", width = 6, height = 6, units = 'in',
res = 800, compression = 'none')
png("Saltator_coerulescens_black.png", width = 6, height = 6, units = 'in',
res = 800)
fviz_pca_ind(Saltator_coerulescens.pca,
geom = "point",
xlim=c(-20, 65), ylim=c(-25, 40),
title = "PCA of Saltator coerulescens 1018 SNPs \naxes 1-2"
) + geom_point(size = 3)
dev.off()
# save these to a text file
indNames(Saltator_coerulescens)
xval$DAPC$assign
xval[4]
xval[6]
# group assignment probabilities
round(xval$DAPC$posterior,6)
round(dapc1$posterior,6)
#summary(xval$DAPC)
assignplot(xval$DAPC)
assignplot(dapc1)
contrib <- loadingplot(dapc1$var.contr)
contrib <- loadingplot(dapc1$var.contr, threshold = 0.003,
thres=.07, lab.jitter=1)
compoplot(xval$DAPC, show.lab = TRUE, posi=list(x=12,y=-.01), cleg=.7)
#xval[2:6]
#xval$DAPC$assign
| /DAPC/dapc_pca_Saltator_coerulescens.R | permissive | henicorhina/Riverine_islands_code | R | false | false | 4,962 | r | library("adegenet", lib.loc="/Library/Frameworks/R.framework/Versions/3.4/Resources/library")
library("ade4", lib.loc="/Library/Frameworks/R.framework/Versions/3.4/Resources/library")
library("factoextra", lib.loc="/Library/Frameworks/R.framework/Versions/3.4/Resources/library")
setwd("/Volumes/Brumfield_Lab_Drive/River_islands/1_analysis/structure_files_seqcap_pop_random_snp/")
# replace all these with info for your species
# Saltator_coerulescens
# Saltator coerulescens
# Saltator_coerulescens
# 1018 with your number of snps
# xlim=c(-15, 15), ylim=c(-15, 15),
Saltator_coerulescens <- read.structure("Saltator_coerulescens_SNPs_phased_rmIndels_75_QC_DP_random_seqcap_pop_structure.AmazonOnly.str",
n.ind=11, n.loc=2065, onerowperind=FALSE,
col.lab=1, col.pop=0, col.others=0, row.marknames=0)
setwd("/Volumes/Brumfield_Lab_Drive/River_islands/3_results/dapc_pca/")
dir.create("Saltator_coerulescens_AmazonOnly")
setwd("/Volumes/Brumfield_Lab_Drive/River_islands/3_results/dapc_pca/Saltator_coerulescens_AmazonOnly")
Saltator_coerulescens.X <- tab(Saltator_coerulescens, freq=TRUE, NA.method = "mean")
Saltator_coerulescens.X <- scaleGen(Saltator_coerulescens, NA.method = "mean")
sum(is.na(Saltator_coerulescens$tab)) #amount of missing data
Saltator_coerulescens.X[1:2,1:2]
Saltator_coerulescens.pca <- dudi.pca(Saltator_coerulescens.X, scale=FALSE, scannf = FALSE, nf = 4)
Saltator_coerulescens.pca$eig[1]/sum(Saltator_coerulescens.pca$eig) * 100
Saltator_coerulescens.pca$eig[2]/sum(Saltator_coerulescens.pca$eig) * 100
Saltator_coerulescens.pca$li
Saltator_coerulescens.grp <- find.clusters(Saltator_coerulescens, n.pca = 100)
xval <- xvalDapc(Saltator_coerulescens.X, Saltator_coerulescens.grp$grp, n.pca.max = 20, training.set = 0.9,
result = "groupMean", center = TRUE, scale = FALSE,
n.pca = NULL, n.rep = 30, xval.plot = TRUE)
xval$DAPC$n.pca
dapc1 <- dapc(Saltator_coerulescens.X, Saltator_coerulescens.grp$grp)
temp1 <- optim.a.score(dapc1)
names(temp)
temp$tab[1:2,1:2]
temp$pop.score
#use these to save as high quality figures
tiff("Plot.tiff", width = 6, height = 6, units = 'in',
res = 800, compression = 'none')
png("Plot .png", width = 6, height = 6, units = 'in',
res = 800)
bitmap("Plot.tiff", height = 6, width = 6, units = 'in',
type="tifflzw", res=800)
# run the desired format above then the plot, then this dev.off command
dev.off()
par(mfrow = c(1,1))
# pca with colored dots
tiff("Saltator_coerulescens_colors.tiff", width = 6, height = 6, units = 'in',
res = 800, compression = 'none')
png("Saltator_coerulescens_colors.png", width = 6, height = 6, units = 'in',
res = 800)
colorplot(Saltator_coerulescens.pca$li, Saltator_coerulescens.pca$li, add.plot=FALSE, cex=2,
xlim=c(-20, 65), ylim=c(-25, 40),
xlab="PC 1 (23.0%)", ylab="PC 2 (9.7%)",
main = "PCA of Saltator coerulescens 1018 SNPs \naxes 1-2")
abline(v=0,h=0,col="grey", lty=2)
dev.off()
# pca with labeled points
tiff("Saltator_coerulescens_labeled.tiff", width = 6, height = 6, units = 'in',
res = 800, compression = 'none')
png("Saltator_coerulescens_labeled.png", width = 12, height = 12, units = 'in',
res = 800)
fviz_pca_ind(Saltator_coerulescens.pca,
xlim=c(-20, 65), ylim=c(-25, 40),
repel = TRUE,
title = "PCA of Saltator coerulescens 1018 SNPs \naxes 1-2"
)
dev.off()
# pca without labeled points - groups
tiff("Saltator_coerulescens_groups.tiff", width = 6, height = 6, units = 'in',
res = 800, compression = 'none')
png("Saltator_coerulescens_groups.png", width = 6, height = 6, units = 'in',
res = 800)
fviz_pca_ind(Saltator_coerulescens.pca,
xlim=c(-20, 65), ylim=c(-25, 40),
geom = "point",
title = "PCA of Saltator coerulescens 1018 SNPs \naxes 1-2"
) + geom_point(aes(colour = factor(dapc1$grp)), size = 3)
dev.off()
# pca without labeled points - black
tiff("Saltator_coerulescens_black.tiff", width = 6, height = 6, units = 'in',
res = 800, compression = 'none')
png("Saltator_coerulescens_black.png", width = 6, height = 6, units = 'in',
res = 800)
fviz_pca_ind(Saltator_coerulescens.pca,
geom = "point",
xlim=c(-20, 65), ylim=c(-25, 40),
title = "PCA of Saltator coerulescens 1018 SNPs \naxes 1-2"
) + geom_point(size = 3)
dev.off()
# save these to a text file
indNames(Saltator_coerulescens)
xval$DAPC$assign
xval[4]
xval[6]
# group assignment probabilities
round(xval$DAPC$posterior,6)
round(dapc1$posterior,6)
#summary(xval$DAPC)
assignplot(xval$DAPC)
assignplot(dapc1)
contrib <- loadingplot(dapc1$var.contr)
contrib <- loadingplot(dapc1$var.contr, threshold = 0.003,
thres=.07, lab.jitter=1)
compoplot(xval$DAPC, show.lab = TRUE, posi=list(x=12,y=-.01), cleg=.7)
#xval[2:6]
#xval$DAPC$assign
|
\name{ci.pd}
\alias{ci.pd}
\title{
Compute confidence limits for a difference of two independent proportions.
}
\description{
The usual formula for the c.i. of at difference of proportions is
inaccurate. Newcombe has compared 11 methods and method 10 in his
paper looks like a winner. It is implemented here.
}
\usage{
ci.pd(aa, bb=NULL, cc=NULL, dd=NULL,
method = "Nc",
alpha = 0.05, conf.level=0.95,
digits = 3,
print = TRUE,
detail.labs = FALSE )
}
\arguments{
\item{aa}{Numeric vector of successes in sample 1. Can also be a
matrix or array (see details).}
\item{bb}{Successes in sample 2.}
\item{cc}{Failures in sample 1.}
\item{dd}{Failures in sample 2.}
\item{method}{Method to use for calculation of confidence interval, see
"Details".}
\item{alpha}{Significance level}
\item{conf.level}{Confidence level}
\item{print}{Should an account of the two by two table be printed.}
\item{digits}{How many digits should the result be rounded to if printed.}
\item{detail.labs}{Should the computing of probability differences be
reported in the labels.}
}
\details{
Implements method 10 from Newcombe(1998) (method="Nc") or from
Agresti & Caffo(2000) (method="AC").
\code{aa}, \code{bb}, \code{cc} and \code{dd} can be vectors.
If \code{aa} is a matrix, the elements \code{[1:2,1:2]} are used, with
successes \code{aa[,1:2]}. If \code{aa} is a three-way table or array,
the elements \code{aa[1:2,1:2,]} are used.
}
\value{
A matrix with three columns: probability difference, lower and upper
limit. The number of rows equals the length of the vectors \code{aa},
\code{bb}, \code{cc} and \code{dd} or, if \code{aa} is a 3-way matrix,
\code{dim(aa)[3]}.
}
\references{
RG Newcombe: Interval estimation for the difference between
independent proportions. Comparison of eleven methods. Statistics in
Medicine, 17, pp. 873-890, 1998.
A Agresti & B Caffo: Simple and effective confidence intervals for
proportions and differences of proportions result from adding two
successes and two failures. The American Statistician,
54(4), pp. 280-288, 2000.
}
\author{
Bendix Carstensen, Esa Laara.
\url{http://bendixcarstensen.com}
}
\seealso{
\code{\link{twoby2}}, \code{\link{binom.test}}
}
\examples{
( a <- matrix( sample( 10:40, 4 ), 2, 2 ) )
ci.pd( a )
twoby2( t(a) )
prop.test( t(a) )
( A <- array( sample( 10:40, 20 ), dim=c(2,2,5) ) )
ci.pd( A )
ci.pd( A, detail.labs=TRUE, digits=3 )
}
\keyword{distribution}
\keyword{htest}
| /man/ci.pd.Rd | no_license | cran/Epi | R | false | false | 2,547 | rd | \name{ci.pd}
\alias{ci.pd}
\title{
Compute confidence limits for a difference of two independent proportions.
}
\description{
The usual formula for the c.i. of at difference of proportions is
inaccurate. Newcombe has compared 11 methods and method 10 in his
paper looks like a winner. It is implemented here.
}
\usage{
ci.pd(aa, bb=NULL, cc=NULL, dd=NULL,
method = "Nc",
alpha = 0.05, conf.level=0.95,
digits = 3,
print = TRUE,
detail.labs = FALSE )
}
\arguments{
\item{aa}{Numeric vector of successes in sample 1. Can also be a
matrix or array (see details).}
\item{bb}{Successes in sample 2.}
\item{cc}{Failures in sample 1.}
\item{dd}{Failures in sample 2.}
\item{method}{Method to use for calculation of confidence interval, see
"Details".}
\item{alpha}{Significance level}
\item{conf.level}{Confidence level}
\item{print}{Should an account of the two by two table be printed.}
\item{digits}{How many digits should the result be rounded to if printed.}
\item{detail.labs}{Should the computing of probability differences be
reported in the labels.}
}
\details{
Implements method 10 from Newcombe(1998) (method="Nc") or from
Agresti & Caffo(2000) (method="AC").
\code{aa}, \code{bb}, \code{cc} and \code{dd} can be vectors.
If \code{aa} is a matrix, the elements \code{[1:2,1:2]} are used, with
successes \code{aa[,1:2]}. If \code{aa} is a three-way table or array,
the elements \code{aa[1:2,1:2,]} are used.
}
\value{
A matrix with three columns: probability difference, lower and upper
limit. The number of rows equals the length of the vectors \code{aa},
\code{bb}, \code{cc} and \code{dd} or, if \code{aa} is a 3-way matrix,
\code{dim(aa)[3]}.
}
\references{
RG Newcombe: Interval estimation for the difference between
independent proportions. Comparison of eleven methods. Statistics in
Medicine, 17, pp. 873-890, 1998.
A Agresti & B Caffo: Simple and effective confidence intervals for
proportions and differences of proportions result from adding two
successes and two failures. The American Statistician,
54(4), pp. 280-288, 2000.
}
\author{
Bendix Carstensen, Esa Laara.
\url{http://bendixcarstensen.com}
}
\seealso{
\code{\link{twoby2}}, \code{\link{binom.test}}
}
\examples{
( a <- matrix( sample( 10:40, 4 ), 2, 2 ) )
ci.pd( a )
twoby2( t(a) )
prop.test( t(a) )
( A <- array( sample( 10:40, 20 ), dim=c(2,2,5) ) )
ci.pd( A )
ci.pd( A, detail.labs=TRUE, digits=3 )
}
\keyword{distribution}
\keyword{htest}
|
## takes an invertible matrix and returns an object which has functions
## for storing its value and inverse in order set,get,setinverse,getinverse
## create an invertible matrix (invertible matrix is a square matrix)
## a <- matrix(1:4,2,2)
## test cacheMtrxVar <- makeCacheMatrix(a)
## test cacheSolveVar <- cacheSolve(makeCacheMatrix(a))
## output cacheSolveVar
## [,1] [,2]
## [1,] -2 1.5
## [2,] 1 -0.5
## reverse it back to test
## c <- cacheSolve(makeCacheMatrix(cacheSolveVar))
makeCacheMatrix <- function(x = matrix()) {
# Set inverse of X to NULL
xInverseVar <- NULL
# function to set the value of x
set <- function(y) {
x <<- y
xInverseVar <<- NULL
}
#function to return the value as it is of x
get <- function() x
#function to set the value of inversevariable
setInverse <- function(inverse) xInverseVar <<- inverse
#function to get the inverseVar
getInverse <- function() xInverseVar
#List is returned with set, set, setInverse,getInverse functions
list(set = set, get = get,setInverse = setInverse,getInverse = getInverse)
}
## Takes input of makeCacheMatrix object,
## if the inverse doesn't exist check by getInverse function, then set it
## using the setInverse function, calculate the inverse using solve(matrixObject) function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
xInverse <- x$getInverse()
#Check if the given inverse is not null, if not null then return it
if(!is.null(xInverse)) {
print("Cached Value of Invertible matrix...")
return(xInverse)
}
#comes here only if xInverse was found null, get matrix, use solve function,
#set to using setInverse function and return the xInverse object
matrixObj <- x$get()
xInverse <- solve(matrixObj, ...)
x$setInverse(xInverse)
xInverse
}
| /cachematrix.R | no_license | brijrajsingh/ProgrammingAssignment2 | R | false | false | 1,933 | r | ## takes an invertible matrix and returns an object which has functions
## for storing its value and inverse in order set,get,setinverse,getinverse
## create an invertible matrix (invertible matrix is a square matrix)
## a <- matrix(1:4,2,2)
## test cacheMtrxVar <- makeCacheMatrix(a)
## test cacheSolveVar <- cacheSolve(makeCacheMatrix(a))
## output cacheSolveVar
## [,1] [,2]
## [1,] -2 1.5
## [2,] 1 -0.5
## reverse it back to test
## c <- cacheSolve(makeCacheMatrix(cacheSolveVar))
makeCacheMatrix <- function(x = matrix()) {
# Set inverse of X to NULL
xInverseVar <- NULL
# function to set the value of x
set <- function(y) {
x <<- y
xInverseVar <<- NULL
}
#function to return the value as it is of x
get <- function() x
#function to set the value of inversevariable
setInverse <- function(inverse) xInverseVar <<- inverse
#function to get the inverseVar
getInverse <- function() xInverseVar
#List is returned with set, set, setInverse,getInverse functions
list(set = set, get = get,setInverse = setInverse,getInverse = getInverse)
}
## Takes input of makeCacheMatrix object,
## if the inverse doesn't exist check by getInverse function, then set it
## using the setInverse function, calculate the inverse using solve(matrixObject) function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
xInverse <- x$getInverse()
#Check if the given inverse is not null, if not null then return it
if(!is.null(xInverse)) {
print("Cached Value of Invertible matrix...")
return(xInverse)
}
#comes here only if xInverse was found null, get matrix, use solve function,
#set to using setInverse function and return the xInverse object
matrixObj <- x$get()
xInverse <- solve(matrixObj, ...)
x$setInverse(xInverse)
xInverse
}
|
#Visualizations for SciClone outputs
#Number of files in directory
f <- list.files("/mnt/sc/geoMatch_testing/Results",pattern="*.csv")
iterations <- length(f)
for(i in 1:iterations)
{
if(i == 1)
{
record.dataframe <- read.csv(paste("/mnt/sc/geoMatch_testing/Results/",f[i],sep=""))
}
else
{
record.dataframe[i,] <- read.csv(paste("/mnt/sc/geoMatch_testing/Results/",f[i],sep=""))
}
}
plot(xlim=c(0,3),ylim=c(0,3),record.dataframe$avg.spill,
record.dataframe$abs.error.geoMatch,
col=rgb(1,0,0,alpha=0.5), pch=3, cex=0.5,
main="Absolute error in Treatment Estimates",
ylab="Error",
xlab="Spillover")
lines(lowess(record.dataframe$avg.spill,
record.dataframe$abs.error.geoMatch),
col=rgb(1,0,0), pch=3)
lines(lowess(record.dataframe$avg.spill,
record.dataframe$abs.error.matchit),
col=rgb(0,0,1), pch=4)
points(record.dataframe$avg.spill,
record.dataframe$abs.error.matchit,
col=rgb(0,0,1,alpha=0.5), pch=4, cex=0.5)
lines(lowess(record.dataframe$avg.spill,
record.dataframe$abs.error.lm),
col=rgb(0,1,0), pch=4)
points(record.dataframe$avg.spill,
record.dataframe$abs.error.lm,
col=rgb(0,1,0,alpha=0.5), pch=4, cex=0.5)
legend("topleft",
cex = 0.65,
legend=c("geoMatch","Baseline LM", "Baseline MatchIt"),
pch=c(pch = 3, pch=4, pch=4),
col=c(col="red", col="green", col="blue"), title = "Legend")
| /Visualization.R | no_license | aiddata/SimTests | R | false | false | 1,476 | r | #Visualizations for SciClone outputs
#Number of files in directory
f <- list.files("/mnt/sc/geoMatch_testing/Results",pattern="*.csv")
iterations <- length(f)
for(i in 1:iterations)
{
if(i == 1)
{
record.dataframe <- read.csv(paste("/mnt/sc/geoMatch_testing/Results/",f[i],sep=""))
}
else
{
record.dataframe[i,] <- read.csv(paste("/mnt/sc/geoMatch_testing/Results/",f[i],sep=""))
}
}
plot(xlim=c(0,3),ylim=c(0,3),record.dataframe$avg.spill,
record.dataframe$abs.error.geoMatch,
col=rgb(1,0,0,alpha=0.5), pch=3, cex=0.5,
main="Absolute error in Treatment Estimates",
ylab="Error",
xlab="Spillover")
lines(lowess(record.dataframe$avg.spill,
record.dataframe$abs.error.geoMatch),
col=rgb(1,0,0), pch=3)
lines(lowess(record.dataframe$avg.spill,
record.dataframe$abs.error.matchit),
col=rgb(0,0,1), pch=4)
points(record.dataframe$avg.spill,
record.dataframe$abs.error.matchit,
col=rgb(0,0,1,alpha=0.5), pch=4, cex=0.5)
lines(lowess(record.dataframe$avg.spill,
record.dataframe$abs.error.lm),
col=rgb(0,1,0), pch=4)
points(record.dataframe$avg.spill,
record.dataframe$abs.error.lm,
col=rgb(0,1,0,alpha=0.5), pch=4, cex=0.5)
legend("topleft",
cex = 0.65,
legend=c("geoMatch","Baseline LM", "Baseline MatchIt"),
pch=c(pch = 3, pch=4, pch=4),
col=c(col="red", col="green", col="blue"), title = "Legend")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CBPSMain.R
\name{CBPS}
\alias{CBPS}
\title{Covariate Balancing Propensity Score (CBPS) Estimation}
\usage{
CBPS(
formula,
data,
na.action,
ATT = 1,
iterations = 1000,
standardize = TRUE,
method = "over",
twostep = TRUE,
sample.weights = NULL,
baseline.formula = NULL,
diff.formula = NULL,
...
)
}
\arguments{
\item{formula}{An object of class \code{formula} (or one that can be coerced
to that class): a symbolic description of the model to be fitted.}
\item{data}{An optional data frame, list or environment (or object coercible
by as.data.frame to a data frame) containing the variables in the model. If
not found in data, the variables are taken from \code{environment(formula)},
typically the environment from which \code{CBPS} is called.}
\item{na.action}{A function which indicates what should happen when the data
contain NAs. The default is set by the na.action setting of options, and is
na.fail if that is unset.}
\item{ATT}{Default is 1, which finds the average treatment effect on the
treated interpreting the second level of the treatment factor as the
treatment. Set to 2 to find the ATT interpreting the first level of the
treatment factor as the treatment. Set to 0 to find the average treatment
effect. For non-binary treatments, only the ATE is available.}
\item{iterations}{An optional parameter for the maximum number of iterations
for the optimization. Default is 1000.}
\item{standardize}{Default is \code{TRUE}, which normalizes weights to sum
to 1 within each treatment group. For continuous treatments, normalizes
weights to sum up to 1 for the entire sample. Set to \code{FALSE} to return
Horvitz-Thompson weights.}
\item{method}{Choose "over" to fit an over-identified model that combines
the propensity score and covariate balancing conditions; choose "exact" to
fit a model that only contains the covariate balancing conditions.}
\item{twostep}{Default is \code{TRUE} for a two-step estimator, which will
run substantially faster than continuous-updating. Set to \code{FALSE} to
use the continuous-updating estimator described by Imai and Ratkovic (2014).}
\item{sample.weights}{Survey sampling weights for the observations, if
applicable. When left NULL, defaults to a sampling weight of 1 for each
observation.}
\item{baseline.formula}{Used only to fit iCBPS (see Fan et al). Currently
only works with binary treatments. A formula specifying the balancing
covariates in the baseline outcome model, i.e., E(Y(0)|X).}
\item{diff.formula}{Used only to fit iCBPS (see Fan et al). Currently only
works with binary treatments. A formula specifying the balancing covariates
in the difference between the treatment and baseline outcome model, i.e.,
E(Y(1)-Y(0)|X).}
\item{...}{Other parameters to be passed through to \code{optim()}.}
}
\value{
\item{fitted.values}{The fitted propensity score}
\item{linear.predictor}{X * beta}
\item{deviance}{Minus twice the log-likelihood of the CBPS fit}
\item{weights}{The optimal weights. Let \eqn{\pi_i = f(T_i | X_i)}{\pi_i =
f(T_i | X_i)}. For binary ATE, these are given by \eqn{\frac{T_i}{\pi_i} +
\frac{(1 - T_i)}{(1 - \pi_i)}}{T_i/\pi_i + (1 - T_i)/(1 - \pi_i)}. For
binary ATT, these are given by \eqn{\frac{n}{n_t} * \frac{T_i - \pi_i}{1 -
\pi_i}}{n/n_t * (T_i - \pi_i)/(1 - \pi_i)}. For multi_valued treatments,
these are given by \eqn{\sum_{j=0}^{J-1} T_{i,j} /
\pi_{i,j}}{\sum_{j=0}^{J-1} T_i,j / \pi_i,j}. For continuous treatments,
these are given by \eqn{\frac{f(T_i)}{f(T_i | X_i)}}{f(T_i) / f(T_i | X_i)
}. These expressions for weights are all before standardization (i.e. with
standardize=\code{FALSE}). Standardization will make weights sum to 1
within each treatment group. For continuous treatment, standardization will
make all weights sum to 1. If sampling weights are used, the weight for
each observation is multiplied by the survey sampling weight.} \item{y}{The
treatment vector used} \item{x}{The covariate matrix} \item{model}{The model
frame} \item{converged}{Convergence value. Returned from the call to
\code{optim()}.} \item{call}{The matched call} \item{formula}{The formula
supplied} \item{data}{The data argument} \item{coefficients}{A named vector
of coefficients} \item{sigmasq}{The sigma-squared value, for continuous
treatments only} \item{J}{The J-statistic at convergence} \item{mle.J}{The
J-statistic for the parameters from maximum likelihood estimation}
\item{var}{The covariance matrix for the coefficients.} \item{Ttilde}{For
internal use only.} \item{Xtilde}{For internal use only.}
\item{beta.tilde}{For internal use only.} \item{simgasq.tilde}{For internal
use only.}
}
\description{
\code{CBPS} estimates propensity scores such that both covariate balance and
prediction of treatment assignment are maximized. The method, therefore,
avoids an iterative process between model fitting and balance checking and
implements both simultaneously. For cross-sectional data, the method can
take continuous treatments and treatments with a control (baseline)
condition and either 1, 2, or 3 distinct treatment conditions.
Fits covariate balancing propensity scores.
### @aliases CBPS CBPS.fit print.CBPS
}
\examples{
###
### Example: propensity score matching
###
##Load the LaLonde data
data(LaLonde)
## Estimate CBPS
fit <- CBPS(treat ~ age + educ + re75 + re74 +
I(re75==0) + I(re74==0),
data = LaLonde, ATT = TRUE)
summary(fit)
\dontrun{
## matching via MatchIt: one to one nearest neighbor with replacement
library(MatchIt)
m.out <- matchit(treat ~ fitted(fit), method = "nearest",
data = LaLonde, replace = TRUE)
### Example: propensity score weighting
###
## Simulation from Kang and Shafer (2007).
set.seed(123456)
n <- 500
X <- mvrnorm(n, mu = rep(0, 4), Sigma = diag(4))
prop <- 1 / (1 + exp(X[,1] - 0.5 * X[,2] +
0.25*X[,3] + 0.1 * X[,4]))
treat <- rbinom(n, 1, prop)
y <- 210 + 27.4*X[,1] + 13.7*X[,2] + 13.7*X[,3] + 13.7*X[,4] + rnorm(n)
##Estimate CBPS with a misspecified model
X.mis <- cbind(exp(X[,1]/2), X[,2]*(1+exp(X[,1]))^(-1)+10,
(X[,1]*X[,3]/25+.6)^3, (X[,2]+X[,4]+20)^2)
fit1 <- CBPS(treat ~ X.mis, ATT = 0)
summary(fit1)
## Horwitz-Thompson estimate
mean(treat*y/fit1$fitted.values)
## Inverse propensity score weighting
sum(treat*y/fit1$fitted.values)/sum(treat/fit1$fitted.values)
rm(list=c("y","X","prop","treat","n","X.mis","fit1"))
### Example: Continuous Treatment as in Fong, Hazlett,
### and Imai (2018). See
### https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/AIF4PI
### for a real data example.
set.seed(123456)
n <- 1000
X <- mvrnorm(n, mu = rep(0,2), Sigma = diag(2))
beta <- rnorm(ncol(X)+1, sd = 1)
treat <- cbind(1,X)\%*\%beta + rnorm(n, sd = 5)
treat.effect <- 1
effect.beta <- rnorm(ncol(X))
y <- rbinom(n, 1, (1 + exp(-treat.effect*treat -
X\%*\%effect.beta))^-1)
fit2 <- CBPS(treat ~ X)
summary(fit2)
summary(glm(y ~ treat + X, weights = fit2$weights,
family = "quasibinomial"))
rm(list=c("n", "X", "beta", "treat", "treat.effect",
"effect.beta", "y", "fit2"))
### Simulation example: Improved CBPS (or iCBPS) from Fan et al
set.seed(123456)
n <- 500
X <- mvrnorm(n, mu = rep(0, 4), Sigma = diag(4))
prop <- 1 / (1 + exp(X[,1] - 0.5 * X[,2] + 0.25*X[,3] + 0.1 * X[,4]))
treat <- rbinom(n, 1, prop)
y1 <- 210 + 27.4*X[,1] + 13.7*X[,2] + 13.7*X[,3] + 13.7*X[,4] + rnorm(n)
y0 <- 210 + 13.7*X[,2] + 13.7*X[,3] + 13.7*X[,4] + rnorm(n)
##Estimate iCBPS with a misspecificied model
X.mis <- cbind(exp(X[,1]/2), X[,2]*(1+exp(X[,1]))^(-1)+10,
(X[,1]*X[,3]/25+.6)^3, (X[,2]+X[,4]+20)^2)
fit1 <- CBPS(treat ~ X.mis, baseline.formula=~X.mis[,2:4],
diff.formula=~X.mis[,1], ATT = FALSE)
summary(fit1)
}
}
\references{
Imai, Kosuke and Marc Ratkovic. 2014. ``Covariate Balancing
Propensity Score.'' Journal of the Royal Statistical Society, Series B
(Statistical Methodology).
\url{http://imai.princeton.edu/research/CBPS.html} \cr Fong, Christian, Chad
Hazlett, and Kosuke Imai. 2018. ``Covariate Balancing Propensity Score
for a Continuous Treatment.'' The Annals of Applied Statistics.
\url{http://imai.princeton.edu/research/files/CBGPS.pdf} \cr
Fan, Jianqing and Imai, Kosuke and Liu, Han and Ning, Yang and Yang,
Xiaolin. ``Improving Covariate Balancing Propensity Score: A Doubly Robust
and Efficient Approach.'' Unpublished Manuscript.
\url{http://imai.princeton.edu/research/CBPStheory.html}
}
\seealso{
\link{summary.CBPS}
}
\author{
Christian Fong, Marc Ratkovic, Kosuke Imai, and Xiaolin Yang; The
CBPS function is based on the code for version 2.15.0 of the glm function
implemented in the stats package, originally written by Simon Davies. This
documentation is likewise modeled on the documentation for glm and borrows
its language where the arguments and values are the same.
}
| /man/CBPS.Rd | no_license | cran/CBPS | R | false | true | 9,085 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CBPSMain.R
\name{CBPS}
\alias{CBPS}
\title{Covariate Balancing Propensity Score (CBPS) Estimation}
\usage{
CBPS(
formula,
data,
na.action,
ATT = 1,
iterations = 1000,
standardize = TRUE,
method = "over",
twostep = TRUE,
sample.weights = NULL,
baseline.formula = NULL,
diff.formula = NULL,
...
)
}
\arguments{
\item{formula}{An object of class \code{formula} (or one that can be coerced
to that class): a symbolic description of the model to be fitted.}
\item{data}{An optional data frame, list or environment (or object coercible
by as.data.frame to a data frame) containing the variables in the model. If
not found in data, the variables are taken from \code{environment(formula)},
typically the environment from which \code{CBPS} is called.}
\item{na.action}{A function which indicates what should happen when the data
contain NAs. The default is set by the na.action setting of options, and is
na.fail if that is unset.}
\item{ATT}{Default is 1, which finds the average treatment effect on the
treated interpreting the second level of the treatment factor as the
treatment. Set to 2 to find the ATT interpreting the first level of the
treatment factor as the treatment. Set to 0 to find the average treatment
effect. For non-binary treatments, only the ATE is available.}
\item{iterations}{An optional parameter for the maximum number of iterations
for the optimization. Default is 1000.}
\item{standardize}{Default is \code{TRUE}, which normalizes weights to sum
to 1 within each treatment group. For continuous treatments, normalizes
weights to sum up to 1 for the entire sample. Set to \code{FALSE} to return
Horvitz-Thompson weights.}
\item{method}{Choose "over" to fit an over-identified model that combines
the propensity score and covariate balancing conditions; choose "exact" to
fit a model that only contains the covariate balancing conditions.}
\item{twostep}{Default is \code{TRUE} for a two-step estimator, which will
run substantially faster than continuous-updating. Set to \code{FALSE} to
use the continuous-updating estimator described by Imai and Ratkovic (2014).}
\item{sample.weights}{Survey sampling weights for the observations, if
applicable. When left NULL, defaults to a sampling weight of 1 for each
observation.}
\item{baseline.formula}{Used only to fit iCBPS (see Fan et al). Currently
only works with binary treatments. A formula specifying the balancing
covariates in the baseline outcome model, i.e., E(Y(0)|X).}
\item{diff.formula}{Used only to fit iCBPS (see Fan et al). Currently only
works with binary treatments. A formula specifying the balancing covariates
in the difference between the treatment and baseline outcome model, i.e.,
E(Y(1)-Y(0)|X).}
\item{...}{Other parameters to be passed through to \code{optim()}.}
}
\value{
\item{fitted.values}{The fitted propensity score}
\item{linear.predictor}{X * beta}
\item{deviance}{Minus twice the log-likelihood of the CBPS fit}
\item{weights}{The optimal weights. Let \eqn{\pi_i = f(T_i | X_i)}{\pi_i =
f(T_i | X_i)}. For binary ATE, these are given by \eqn{\frac{T_i}{\pi_i} +
\frac{(1 - T_i)}{(1 - \pi_i)}}{T_i/\pi_i + (1 - T_i)/(1 - \pi_i)}. For
binary ATT, these are given by \eqn{\frac{n}{n_t} * \frac{T_i - \pi_i}{1 -
\pi_i}}{n/n_t * (T_i - \pi_i)/(1 - \pi_i)}. For multi_valued treatments,
these are given by \eqn{\sum_{j=0}^{J-1} T_{i,j} /
\pi_{i,j}}{\sum_{j=0}^{J-1} T_i,j / \pi_i,j}. For continuous treatments,
these are given by \eqn{\frac{f(T_i)}{f(T_i | X_i)}}{f(T_i) / f(T_i | X_i)
}. These expressions for weights are all before standardization (i.e. with
standardize=\code{FALSE}). Standardization will make weights sum to 1
within each treatment group. For continuous treatment, standardization will
make all weights sum to 1. If sampling weights are used, the weight for
each observation is multiplied by the survey sampling weight.} \item{y}{The
treatment vector used} \item{x}{The covariate matrix} \item{model}{The model
frame} \item{converged}{Convergence value. Returned from the call to
\code{optim()}.} \item{call}{The matched call} \item{formula}{The formula
supplied} \item{data}{The data argument} \item{coefficients}{A named vector
of coefficients} \item{sigmasq}{The sigma-squared value, for continuous
treatments only} \item{J}{The J-statistic at convergence} \item{mle.J}{The
J-statistic for the parameters from maximum likelihood estimation}
\item{var}{The covariance matrix for the coefficients.} \item{Ttilde}{For
internal use only.} \item{Xtilde}{For internal use only.}
\item{beta.tilde}{For internal use only.} \item{simgasq.tilde}{For internal
use only.}
}
\description{
\code{CBPS} estimates propensity scores such that both covariate balance and
prediction of treatment assignment are maximized. The method, therefore,
avoids an iterative process between model fitting and balance checking and
implements both simultaneously. For cross-sectional data, the method can
take continuous treatments and treatments with a control (baseline)
condition and either 1, 2, or 3 distinct treatment conditions.
Fits covariate balancing propensity scores.
### @aliases CBPS CBPS.fit print.CBPS
}
\examples{
###
### Example: propensity score matching
###
##Load the LaLonde data
data(LaLonde)
## Estimate CBPS
fit <- CBPS(treat ~ age + educ + re75 + re74 +
I(re75==0) + I(re74==0),
data = LaLonde, ATT = TRUE)
summary(fit)
\dontrun{
## matching via MatchIt: one to one nearest neighbor with replacement
library(MatchIt)
m.out <- matchit(treat ~ fitted(fit), method = "nearest",
data = LaLonde, replace = TRUE)
### Example: propensity score weighting
###
## Simulation from Kang and Shafer (2007).
set.seed(123456)
n <- 500
X <- mvrnorm(n, mu = rep(0, 4), Sigma = diag(4))
prop <- 1 / (1 + exp(X[,1] - 0.5 * X[,2] +
0.25*X[,3] + 0.1 * X[,4]))
treat <- rbinom(n, 1, prop)
y <- 210 + 27.4*X[,1] + 13.7*X[,2] + 13.7*X[,3] + 13.7*X[,4] + rnorm(n)
##Estimate CBPS with a misspecified model
X.mis <- cbind(exp(X[,1]/2), X[,2]*(1+exp(X[,1]))^(-1)+10,
(X[,1]*X[,3]/25+.6)^3, (X[,2]+X[,4]+20)^2)
fit1 <- CBPS(treat ~ X.mis, ATT = 0)
summary(fit1)
## Horwitz-Thompson estimate
mean(treat*y/fit1$fitted.values)
## Inverse propensity score weighting
sum(treat*y/fit1$fitted.values)/sum(treat/fit1$fitted.values)
rm(list=c("y","X","prop","treat","n","X.mis","fit1"))
### Example: Continuous Treatment as in Fong, Hazlett,
### and Imai (2018). See
### https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/AIF4PI
### for a real data example.
set.seed(123456)
n <- 1000
X <- mvrnorm(n, mu = rep(0,2), Sigma = diag(2))
beta <- rnorm(ncol(X)+1, sd = 1)
treat <- cbind(1,X)\%*\%beta + rnorm(n, sd = 5)
treat.effect <- 1
effect.beta <- rnorm(ncol(X))
y <- rbinom(n, 1, (1 + exp(-treat.effect*treat -
X\%*\%effect.beta))^-1)
fit2 <- CBPS(treat ~ X)
summary(fit2)
summary(glm(y ~ treat + X, weights = fit2$weights,
family = "quasibinomial"))
rm(list=c("n", "X", "beta", "treat", "treat.effect",
"effect.beta", "y", "fit2"))
### Simulation example: Improved CBPS (or iCBPS) from Fan et al
set.seed(123456)
n <- 500
X <- mvrnorm(n, mu = rep(0, 4), Sigma = diag(4))
prop <- 1 / (1 + exp(X[,1] - 0.5 * X[,2] + 0.25*X[,3] + 0.1 * X[,4]))
treat <- rbinom(n, 1, prop)
y1 <- 210 + 27.4*X[,1] + 13.7*X[,2] + 13.7*X[,3] + 13.7*X[,4] + rnorm(n)
y0 <- 210 + 13.7*X[,2] + 13.7*X[,3] + 13.7*X[,4] + rnorm(n)
##Estimate iCBPS with a misspecificied model
X.mis <- cbind(exp(X[,1]/2), X[,2]*(1+exp(X[,1]))^(-1)+10,
(X[,1]*X[,3]/25+.6)^3, (X[,2]+X[,4]+20)^2)
fit1 <- CBPS(treat ~ X.mis, baseline.formula=~X.mis[,2:4],
diff.formula=~X.mis[,1], ATT = FALSE)
summary(fit1)
}
}
\references{
Imai, Kosuke and Marc Ratkovic. 2014. ``Covariate Balancing
Propensity Score.'' Journal of the Royal Statistical Society, Series B
(Statistical Methodology).
\url{http://imai.princeton.edu/research/CBPS.html} \cr Fong, Christian, Chad
Hazlett, and Kosuke Imai. 2018. ``Covariate Balancing Propensity Score
for a Continuous Treatment.'' The Annals of Applied Statistics.
\url{http://imai.princeton.edu/research/files/CBGPS.pdf} \cr
Fan, Jianqing and Imai, Kosuke and Liu, Han and Ning, Yang and Yang,
Xiaolin. ``Improving Covariate Balancing Propensity Score: A Doubly Robust
and Efficient Approach.'' Unpublished Manuscript.
\url{http://imai.princeton.edu/research/CBPStheory.html}
}
\seealso{
\link{summary.CBPS}
}
\author{
Christian Fong, Marc Ratkovic, Kosuke Imai, and Xiaolin Yang; The
CBPS function is based on the code for version 2.15.0 of the glm function
implemented in the stats package, originally written by Simon Davies. This
documentation is likewise modeled on the documentation for glm and borrows
its language where the arguments and values are the same.
}
|
#' Report config: generate template
#'
#' @param df a dataframe as retrieved by import_data_haridwar()
#' @param temporal_aggregation Set the following values if data should be
#' summarised to e.g. 10 minutes (600) or hourly (3600), daily ("day")
#' or monthly ("month") median values
#' (default: "raw")
#' @param output_timezone into which timezone should the data be converted for
#' the report? (default: "UTC")
#' @return default list for report configuration template
#' @export
report_config_template <- function(df = NULL,
temporal_aggregation = "raw",
output_timezone = "UTC") {
if (is.null(df)) {
sitenames <- c(
"General",
"Tank water",
"Well Water",
"After Filter",
"After AO Cell"
)
parameters_online <- "Redox potential"
parameters_offline <- "Temperature"
start_day <- sprintf("%s-01", format(Sys.Date(), format = "%Y-%m"))
end_day <- as.character(Sys.Date())
daterange <- c(start_day, end_day)
} else {
unique_pars <- function(src) unique(df$ParameterName[df$Source == src])
sitenames <- unique(df$SiteName)
parameters_online <- unique_pars("online")
parameters_offline <- unique_pars("offline")
daterange <- as.character(as.Date(range(df$DateTime)))
}
list(
report_sitenames = sitenames,
report_aggregation = temporal_aggregation,
report_parameters_online = parameters_online,
report_parameters_offline = parameters_offline,
report_parameters_calculated = c(
"Specific energy demand of pump",
"Specific energy demand of cell"
),
report_add_thresholds = TRUE,
report_daterange = daterange,
report_timezone = output_timezone
)
}
#' Report config: saves config to text file
#'
#' @param config_list a report configuration list e.g. as retrieved by
#' report_config_template()
#' @param output_file absolute or relative path of where to save output file
#' (default: "report_config.txt")
#' @return saves report configuration list as text file
#' @export
#' @examples
#' \dontrun{
#' ### Creates a configuration template
#' config <- report_config_template()
#' ### Saves list config in text
#' report_config_to_txt(
#' config_list = config,
#' output_file = "report_config.txt"
#' )
#' }
report_config_to_txt <- function(config_list, output_file = "report_config.txt") {
### Write config list to text file
### see http://stackoverflow.com/questions/8261590/write-list-to-a-text-file-preserving-names-r
if (file.exists(output_file)) {
file.remove(output_file)
}
output_dir <- dirname(output_file)
if (!dir.exists(output_dir)) {
dir.create(path = output_dir, showWarnings = FALSE)
}
# z <- deparse(substitute(config_list))
# cat(z, "\n", file=output_file)
for (key in names(config_list)) {
cat(file = output_file, append = TRUE, sprintf(
"%s=%s\n", key, kwb.utils::stringList(config_list[[key]], collapse = " ")
))
}
}
#' Report config: imports text file to list
#'
#' @param config_txt path to report configuration text file created by
#' a report configuration list e.g. as retrieved by function report_config_to_txt()
#' @return saves report configuration list as text file
#' @export
#' @examples
#' \dontrun{
#' ### Creates a configuration template
#' config <- report_config_template()
#' ### Saves list config in text
#' report_config_to_txt(config_list = config, output_file = "report_config.txt")
#' ### Reads config list from text file to
#' config_imported <- report_txt_to_config(config_txt = "report_config.txt")
#' ### Check whether both are identical
#' identical(x = config, y = config_imported)
#' }
report_txt_to_config <- function(config_txt = "report_config.txt") {
x <- scan(config_txt, what = "", sep = "\n")
# Separate elements by one or more whitepace
y <- strsplit(x, "=")
# Extract the first vector element and set it as the list element name
names(y) <- sapply(y, `[[`, 1)
# names(y) <- sapply(y, function(x) x[[1]]) # same as above
# Remove the first vector element from each list element
y <- lapply(y, `[`, -1)
### Remove "'" from character strings
y <- lapply(y, FUN = function(x) {
gsub(pattern = "'", replacement = "", unlist(strsplit(x, split = "'\\s")))
})
num_aggregation <- as.numeric(y$report_aggregation)
if (!is.na(num_aggregation)) {
y$report_aggregation <- num_aggregation
}
y
}
| /R/report_configuration.R | permissive | KWB-R/kwb.pilot | R | false | false | 4,445 | r | #' Report config: generate template
#'
#' @param df a dataframe as retrieved by import_data_haridwar()
#' @param temporal_aggregation Set the following values if data should be
#' summarised to e.g. 10 minutes (600) or hourly (3600), daily ("day")
#' or monthly ("month") median values
#' (default: "raw")
#' @param output_timezone into which timezone should the data be converted for
#' the report? (default: "UTC")
#' @return default list for report configuration template
#' @export
report_config_template <- function(df = NULL,
temporal_aggregation = "raw",
output_timezone = "UTC") {
if (is.null(df)) {
sitenames <- c(
"General",
"Tank water",
"Well Water",
"After Filter",
"After AO Cell"
)
parameters_online <- "Redox potential"
parameters_offline <- "Temperature"
start_day <- sprintf("%s-01", format(Sys.Date(), format = "%Y-%m"))
end_day <- as.character(Sys.Date())
daterange <- c(start_day, end_day)
} else {
unique_pars <- function(src) unique(df$ParameterName[df$Source == src])
sitenames <- unique(df$SiteName)
parameters_online <- unique_pars("online")
parameters_offline <- unique_pars("offline")
daterange <- as.character(as.Date(range(df$DateTime)))
}
list(
report_sitenames = sitenames,
report_aggregation = temporal_aggregation,
report_parameters_online = parameters_online,
report_parameters_offline = parameters_offline,
report_parameters_calculated = c(
"Specific energy demand of pump",
"Specific energy demand of cell"
),
report_add_thresholds = TRUE,
report_daterange = daterange,
report_timezone = output_timezone
)
}
#' Report config: saves config to text file
#'
#' @param config_list a report configuration list e.g. as retrieved by
#' report_config_template()
#' @param output_file absolute or relative path of where to save output file
#' (default: "report_config.txt")
#' @return saves report configuration list as text file
#' @export
#' @examples
#' \dontrun{
#' ### Creates a configuration template
#' config <- report_config_template()
#' ### Saves list config in text
#' report_config_to_txt(
#' config_list = config,
#' output_file = "report_config.txt"
#' )
#' }
report_config_to_txt <- function(config_list, output_file = "report_config.txt") {
### Write config list to text file
### see http://stackoverflow.com/questions/8261590/write-list-to-a-text-file-preserving-names-r
if (file.exists(output_file)) {
file.remove(output_file)
}
output_dir <- dirname(output_file)
if (!dir.exists(output_dir)) {
dir.create(path = output_dir, showWarnings = FALSE)
}
# z <- deparse(substitute(config_list))
# cat(z, "\n", file=output_file)
for (key in names(config_list)) {
cat(file = output_file, append = TRUE, sprintf(
"%s=%s\n", key, kwb.utils::stringList(config_list[[key]], collapse = " ")
))
}
}
#' Report config: imports text file to list
#'
#' @param config_txt path to report configuration text file created by
#' a report configuration list e.g. as retrieved by function report_config_to_txt()
#' @return saves report configuration list as text file
#' @export
#' @examples
#' \dontrun{
#' ### Creates a configuration template
#' config <- report_config_template()
#' ### Saves list config in text
#' report_config_to_txt(config_list = config, output_file = "report_config.txt")
#' ### Reads config list from text file to
#' config_imported <- report_txt_to_config(config_txt = "report_config.txt")
#' ### Check whether both are identical
#' identical(x = config, y = config_imported)
#' }
report_txt_to_config <- function(config_txt = "report_config.txt") {
x <- scan(config_txt, what = "", sep = "\n")
# Separate elements by one or more whitepace
y <- strsplit(x, "=")
# Extract the first vector element and set it as the list element name
names(y) <- sapply(y, `[[`, 1)
# names(y) <- sapply(y, function(x) x[[1]]) # same as above
# Remove the first vector element from each list element
y <- lapply(y, `[`, -1)
### Remove "'" from character strings
y <- lapply(y, FUN = function(x) {
gsub(pattern = "'", replacement = "", unlist(strsplit(x, split = "'\\s")))
})
num_aggregation <- as.numeric(y$report_aggregation)
if (!is.na(num_aggregation)) {
y$report_aggregation <- num_aggregation
}
y
}
|
####################################################################
#' Linear Regression Results Plot
#'
#' This function plots a Linear Regression Result
#'
#' @param tag Vector. Real known label
#' @param score Vector. Predicted value or model's result
#' @param subtitle Character. Subitle to show in plot
#' @param model_name Character. Model's name
#' @param save Boolean. Save output plot into working directory
#' @param subdir Character. Sub directory on which you wish to save the plot
#' @param file_name Character. File name as you wish to save the plot
#' @export
mplot_lineal <- function(tag,
score,
title = "Regression Model Results",
subtitle = NA,
model_name = NA,
save = FALSE,
subdir = NA,
deviation = FALSE,
transparent = FALSE,
file_name = "viz_lineal.png") {
require(ggplot2)
require(scales)
dist2d <- function(a, b = c(0, 0), c = c(1, 1)) {
v1 <- b - c
v2 <- a - b
m <- cbind(v1, v2)
d <- abs(det(m)) / sqrt(sum(v1 * v1))
}
rmse <- function(tag, score){
error <- tag - score
sqrt(mean(error^2))
}
mae <- function(tag, score){
error <- tag - score
mean(abs(error))
}
if (length(tag) != length(score)) {
message("The tag and score vectors should be the same length.")
stop(message(paste("Currently, tag has",length(tag),"rows and score has",length(score))))
}
results <- data.frame(tag = tag, score = score, dist = 0)
for (i in 1:nrow(results)) {
#results$dist[i] <- dist2d(c(results$tag[i],results$score[i]), c(0,0), c(1,1))
results$dist[i] <- abs(results$tag[i] - results$score[i])
}
fit <- lm(score ~ tag)
anova(fit)
coefficients(fit)
if(nrow(summary(fit)$coef) == 1){pval <- NA}else if(is.na(summary(fit)$coef[2,4])){pval <- NA}else{ pval <- signif(summary(fit)$coef[2,4],3)}
labels <- paste(
paste("R² (adj.) = ", signif(summary(fit)$adj.r.squared, 4)),
paste("p.value =", pval),
paste("RMSE =", signif(rmse(results$tag, results$score), 4)),
#paste("MAE =", signif(mae(results$tag, results$score), 4)),
sep="\n")
p <- ggplot(results, aes(x = tag, y = score)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
#geom_abline(slope = as.numeric(fit$coefficients[2]), intercept = as.numeric(fit$coefficients[1]), alpha = 0.5, colour = "orange", size=0.6) +
#geom_abline(slope = 1, intercept = 0, alpha = 0.5, colour = "orange", size=0.6) +
theme_minimal() + coord_equal(ratio = 1,xlim = c(0,100),ylim = c(0,100)) +
labs(title = title,
x = "Chronological age (Years)", y = "Transcriptomic age") +
# annotate("text", x = Inf, y = -Inf, hjust = 1, vjust = 0, label = labels, size = 4) +
geom_label(aes(x = 100, y = 0, hjust = 1, vjust = 0), label = labels, size = 8, label.padding = unit(0.50, "lines"), label.size = 0) +
scale_x_continuous(labels = comma) +
scale_y_continuous(labels = comma) +
theme(legend.justification = c(0, 1), legend.position = c(0, 1),
axis.text = element_text(size = 22), axis.title=element_text(size=20),
plot.title = element_text(size=24,face="bold",hjust = 0.5)) +
guides(colour = guide_colorbar(barwidth = 0.9, barheight = 4.5))
if(deviation){
p <- p + ggplot(colour = dist) + labs(colour = "Deviation")
}
if(!is.na(subtitle)) {
p <- p + labs(subtitle = subtitle)
}
if(transparent){
p <- p + geom_point(alpha = 0.5)
}
if(!is.na(model_name)) {
p <- p + labs(caption = model_name)
}
if (save == TRUE) {
p <- p + ggsave(file_name, width = 6, height = 6)
}
return(p)
}
| /linearPlot.r | no_license | jdelasrivas-lab/advanced_r_plots | R | false | false | 3,803 | r | ####################################################################
#' Linear Regression Results Plot
#'
#' This function plots a Linear Regression Result
#'
#' @param tag Vector. Real known label
#' @param score Vector. Predicted value or model's result
#' @param subtitle Character. Subitle to show in plot
#' @param model_name Character. Model's name
#' @param save Boolean. Save output plot into working directory
#' @param subdir Character. Sub directory on which you wish to save the plot
#' @param file_name Character. File name as you wish to save the plot
#' @export
mplot_lineal <- function(tag,
score,
title = "Regression Model Results",
subtitle = NA,
model_name = NA,
save = FALSE,
subdir = NA,
deviation = FALSE,
transparent = FALSE,
file_name = "viz_lineal.png") {
require(ggplot2)
require(scales)
dist2d <- function(a, b = c(0, 0), c = c(1, 1)) {
v1 <- b - c
v2 <- a - b
m <- cbind(v1, v2)
d <- abs(det(m)) / sqrt(sum(v1 * v1))
}
rmse <- function(tag, score){
error <- tag - score
sqrt(mean(error^2))
}
mae <- function(tag, score){
error <- tag - score
mean(abs(error))
}
if (length(tag) != length(score)) {
message("The tag and score vectors should be the same length.")
stop(message(paste("Currently, tag has",length(tag),"rows and score has",length(score))))
}
results <- data.frame(tag = tag, score = score, dist = 0)
for (i in 1:nrow(results)) {
#results$dist[i] <- dist2d(c(results$tag[i],results$score[i]), c(0,0), c(1,1))
results$dist[i] <- abs(results$tag[i] - results$score[i])
}
fit <- lm(score ~ tag)
anova(fit)
coefficients(fit)
if(nrow(summary(fit)$coef) == 1){pval <- NA}else if(is.na(summary(fit)$coef[2,4])){pval <- NA}else{ pval <- signif(summary(fit)$coef[2,4],3)}
labels <- paste(
paste("R² (adj.) = ", signif(summary(fit)$adj.r.squared, 4)),
paste("p.value =", pval),
paste("RMSE =", signif(rmse(results$tag, results$score), 4)),
#paste("MAE =", signif(mae(results$tag, results$score), 4)),
sep="\n")
p <- ggplot(results, aes(x = tag, y = score)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
#geom_abline(slope = as.numeric(fit$coefficients[2]), intercept = as.numeric(fit$coefficients[1]), alpha = 0.5, colour = "orange", size=0.6) +
#geom_abline(slope = 1, intercept = 0, alpha = 0.5, colour = "orange", size=0.6) +
theme_minimal() + coord_equal(ratio = 1,xlim = c(0,100),ylim = c(0,100)) +
labs(title = title,
x = "Chronological age (Years)", y = "Transcriptomic age") +
# annotate("text", x = Inf, y = -Inf, hjust = 1, vjust = 0, label = labels, size = 4) +
geom_label(aes(x = 100, y = 0, hjust = 1, vjust = 0), label = labels, size = 8, label.padding = unit(0.50, "lines"), label.size = 0) +
scale_x_continuous(labels = comma) +
scale_y_continuous(labels = comma) +
theme(legend.justification = c(0, 1), legend.position = c(0, 1),
axis.text = element_text(size = 22), axis.title=element_text(size=20),
plot.title = element_text(size=24,face="bold",hjust = 0.5)) +
guides(colour = guide_colorbar(barwidth = 0.9, barheight = 4.5))
if(deviation){
p <- p + ggplot(colour = dist) + labs(colour = "Deviation")
}
if(!is.na(subtitle)) {
p <- p + labs(subtitle = subtitle)
}
if(transparent){
p <- p + geom_point(alpha = 0.5)
}
if(!is.na(model_name)) {
p <- p + labs(caption = model_name)
}
if (save == TRUE) {
p <- p + ggsave(file_name, width = 6, height = 6)
}
return(p)
}
|
#PLot3.R
#Coursera
#Exploratory Data Analysis
install.packages("lubridate")
library(lubridate)
install.packages("ggplot2")
library(ggplot2)
setwd("C:/Users/jhuberty/Desktop/Coursera_data_science/4 Exploratory Data Analysis")
Sys.setenv(TZ='GMT')
consumption <- read.table("data/household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors = FALSE)
#use lubridate function dmy to convert date column into a POSIXct value)
consumption$Date <- dmy(consumption$Date)
#use lubridate function to reduce to only dates equal to 2007-02-01 and 2007-02-02
consumption <- consumption[consumption$Date == "2007-02-01" | consumption$Date == "2007-02-02", ]##, consumption$Date <= ymd(2007022)]
##Plot 3
Sub_metering_1 <- as.numeric(consumption$Sub_metering_1)
Sub_metering_2 <- as.numeric(consumption$Sub_metering_2)
Sub_metering_3 <- as.numeric(consumption$Sub_metering_3)
metering_max <- c(max(Sub_metering_1),max(Sub_metering_2), max(Sub_metering_3))
metering_min <- c(min(Sub_metering_1),min(Sub_metering_2), min(Sub_metering_3))
ymin <- min(metering_min)
ymax <- max(metering_max)
ymd_hms <- paste(consumption$Date,consumption$Time)
consumption <- cbind(consumption,ymd_hms)
datetime <- strptime(consumption$ymd_hms,"%Y-%m-%d %H:%M:%S")
#
plot(datetime, Sub_metering_1 ,type = "l",ylim=c(ymin,ymax), ylab = "Energy sub metering", xlab = "")
points(datetime, Sub_metering_2, type = "l", col="Red")
points(datetime, Sub_metering_3, type = "l", col="Blue")
legend("topright", col = c("Black","Red","Blue"), legend = c("Sub_metering_1", "Sub_metering_2"
,"Sub_metering_3"), lty=c(1,1))
png(file="plot3.png", width = 480, height = 480)
plot(datetime, Sub_metering_1 ,type = "l",ylim=c(ymin,ymax), ylab = "Energy sub metering", xlab = "")
points(datetime, Sub_metering_2, type = "l", col="Red")
points(datetime, Sub_metering_3, type = "l", col="Blue")
legend("topright", col = c("Black","Red","Blue"), legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"),lty=c(1,1))
dev.off()
| /plot3.R | no_license | johnhuberty/ExData_Plotting1 | R | false | false | 2,128 | r | #PLot3.R
#Coursera
#Exploratory Data Analysis
install.packages("lubridate")
library(lubridate)
install.packages("ggplot2")
library(ggplot2)
setwd("C:/Users/jhuberty/Desktop/Coursera_data_science/4 Exploratory Data Analysis")
Sys.setenv(TZ='GMT')
consumption <- read.table("data/household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors = FALSE)
#use lubridate function dmy to convert date column into a POSIXct value)
consumption$Date <- dmy(consumption$Date)
#use lubridate function to reduce to only dates equal to 2007-02-01 and 2007-02-02
consumption <- consumption[consumption$Date == "2007-02-01" | consumption$Date == "2007-02-02", ]##, consumption$Date <= ymd(2007022)]
##Plot 3
Sub_metering_1 <- as.numeric(consumption$Sub_metering_1)
Sub_metering_2 <- as.numeric(consumption$Sub_metering_2)
Sub_metering_3 <- as.numeric(consumption$Sub_metering_3)
metering_max <- c(max(Sub_metering_1),max(Sub_metering_2), max(Sub_metering_3))
metering_min <- c(min(Sub_metering_1),min(Sub_metering_2), min(Sub_metering_3))
ymin <- min(metering_min)
ymax <- max(metering_max)
ymd_hms <- paste(consumption$Date,consumption$Time)
consumption <- cbind(consumption,ymd_hms)
datetime <- strptime(consumption$ymd_hms,"%Y-%m-%d %H:%M:%S")
#
plot(datetime, Sub_metering_1 ,type = "l",ylim=c(ymin,ymax), ylab = "Energy sub metering", xlab = "")
points(datetime, Sub_metering_2, type = "l", col="Red")
points(datetime, Sub_metering_3, type = "l", col="Blue")
legend("topright", col = c("Black","Red","Blue"), legend = c("Sub_metering_1", "Sub_metering_2"
,"Sub_metering_3"), lty=c(1,1))
png(file="plot3.png", width = 480, height = 480)
plot(datetime, Sub_metering_1 ,type = "l",ylim=c(ymin,ymax), ylab = "Energy sub metering", xlab = "")
points(datetime, Sub_metering_2, type = "l", col="Red")
points(datetime, Sub_metering_3, type = "l", col="Blue")
legend("topright", col = c("Black","Red","Blue"), legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"),lty=c(1,1))
dev.off()
|
loadData = function(){
unzip("exdata-data-household_power_consumption.zip")
filename = "household_power_consumption.txt"
df = read.table(filename, sep=";",header = TRUE, na.strings='?')
if(nrow(df) != 2075259 || ncol(df) != 9) {
msg = paste("Wrong number of rows or columns in", filename, sep = " ")
stop(msg)
}
#convert data
df$Date = as.Date(df$Date,"%d/%m/%Y")
#filter by dates
startDate = as.Date("2007-02-01","%Y-%m-%d")
endDate = as.Date("2007-02-02","%Y-%m-%d")
df = df[df$Date >= startDate & df$Date <= endDate,] #or df = subset (df, Date >= startDate & Date <= endDate)
#convert to numeric
df$Global_active_power = sapply(df$Global_active_power, as.numeric)
df
} | /loadData.R | no_license | steppat/ExData_Plotting1 | R | false | false | 727 | r |
loadData = function(){
unzip("exdata-data-household_power_consumption.zip")
filename = "household_power_consumption.txt"
df = read.table(filename, sep=";",header = TRUE, na.strings='?')
if(nrow(df) != 2075259 || ncol(df) != 9) {
msg = paste("Wrong number of rows or columns in", filename, sep = " ")
stop(msg)
}
#convert data
df$Date = as.Date(df$Date,"%d/%m/%Y")
#filter by dates
startDate = as.Date("2007-02-01","%Y-%m-%d")
endDate = as.Date("2007-02-02","%Y-%m-%d")
df = df[df$Date >= startDate & df$Date <= endDate,] #or df = subset (df, Date >= startDate & Date <= endDate)
#convert to numeric
df$Global_active_power = sapply(df$Global_active_power, as.numeric)
df
} |
#BuildGenBankDB.R
BuildGenBankDB <- function(dataDir, ext="*.gbff.gz", range=c(1,Inf), reg.exp=FALSE, ...)
{
#Check arguments
if (length(range)==1) range <- c(1, range)
#Search for GenBank files
cat("Searching for GenBank files ...\n")
if (reg.exp) ext <- glob2rx(ext)
files <- dir(dataDir, ext, recursive=TRUE, full.names=FALSE)
if (length(files)==0) stop("No files found")
first <- max(1, range[1])
last <- min(c(length(files), range[2]))
if (last<first)
stop("the specified range has resulted in no genomes, so no database has been generated")
files <- files[first:last]
#Initialise database
nFiles <- last-first+1
db <- NULL #for holding database of GenBank record info
#Process genomes
basesProcessed <- 0
createTime <- Sys.time()
startTime <- proc.time()
elapsedTime <- 0
for (fn in 1L:nFiles)
{
if (fn>1)
cat(paste("Processing genome", fn, "of", nFiles, "...", elapsedTime, "s elapsed\n"))
else
cat(paste("Processing genome", fn, "of", nFiles, "...\n"))
file <- file.path(dataDir, files[fn])
info <- try(read.gbff(file, features=NULL, sequence=FALSE))
if (class(info)=="try-erorr")
{
print(info)
stop("read.gbff error")
} #if
if (is.null(info) || class(info)!="list" || any(sapply(info, class)!="gbk.info"))
{
cat("Invalid gbk.info:", file, "\n")
next #invalid gbk info loaded, skip it
} #if
nRecords <- length(info)
if (nRecords==0) continue
seqInfo <- data.frame(
Name=character(nRecords),
Description="",
Organism="",
Domain="",
Phylum="",
Class="",
Order="",
Family="",
Genus="",
Species="",
Strain="",
Substrain="",
taxid="",
Accession="",
Version="",
GI="",
Content="",
PhysicalStructure="",
GB="",
LastUpdate="",
File=files[fn],
RecordNo=1L:nRecords,
FileType="gbff",
Length=0L,
IsGenome=FALSE,
IsSequence=FALSE,
IsChromosome=FALSE,
IsPlasmid=FALSE,
IsComplete=FALSE,
IsDraft=FALSE,
Registered=FALSE,
stringsAsFactors=FALSE
)
for (i in 1L:nRecords)
{
seqInfo$Name[i] <- info[[i]]$Name
seqInfo$Description[i] <- info[[i]]$Description
seqInfo$Organism[i] <- info[[i]]$BinomialName
seqInfo$Domain[i] <- info[[i]]$Domain
seqInfo$Phylum[i] <- info[[i]]$Phylum
seqInfo$Class[i] <- info[[i]]$Class
seqInfo$Order[i] <- info[[i]]$Order
seqInfo$Family[i] <- info[[i]]$Family
seqInfo$Genus[i] <- info[[i]]$Genus
seqInfo$Species[i] <- info[[i]]$Species
seqInfo$Strain[i] <- info[[i]]$Strain
seqInfo$Substrain[i] <- info[[i]]$Substrain
seqInfo$taxid[i] <- info[[i]]$taxid
seqInfo$Accession[i] <- info[[i]]$Accession
seqInfo$Version[i] <- info[[i]]$Version
seqInfo$GI[i] <- info[[i]]$GI
seqInfo$Content[i] <- info[[i]]$Content
seqInfo$PhysicalStructure[i] <- info[[i]]$PhysicalStructure
seqInfo$GB[i] <- info[[i]]$GB
seqInfo$LastUpdate[i] <- info[[i]]$LastUpdate
seqInfo$Length[i] <- info[[i]]$Length
seqInfo$IsGenome[i] <- info[[i]]$IsGenome
seqInfo$IsSequence[i] <- info[[i]]$IsSequence
seqInfo$IsChromosome[i] <- info[[i]]$IsChromosome
seqInfo$IsPlasmid[i] <- info[[i]]$IsPlasmid
seqInfo$IsComplete[i] <- info[[i]]$IsComplete
seqInfo$IsDraft[i] <- info[[i]]$IsDraft
seqInfo$Registered[i] <- TRUE
} #for i
db <- rbind(db, seqInfo)
elapsedTime <- round((proc.time()-startTime)[3], 3)
} #for fn
#Set class and other attributes of database
attr(db, "DataDir") <- dataDir
attr(db, "StartCreationDate") <- createTime
attr(db, "EndCreationDate") <- Sys.time()
class(db) <- c("GenBankDB", "GenomeDB", "data.frame")
# db <- structure(db, DataDir=dataDir, StartCreationDate=createTime,
# EndCreationDate=Sys.time(), class=c("GenBankDB", "GenomeDB", "data.frame"))
cat("Done.\n", sum(db$Registered), "sequences out of", nrow(db), "from", nFiles, "assemblies successfully registered.\n")
invisible(db) #return database
} #function
| /GenDB/R/BuildGenBankDB.R | no_license | hartag/GenDB | R | false | false | 4,140 | r | #BuildGenBankDB.R
BuildGenBankDB <- function(dataDir, ext="*.gbff.gz", range=c(1,Inf), reg.exp=FALSE, ...)
{
#Check arguments
if (length(range)==1) range <- c(1, range)
#Search for GenBank files
cat("Searching for GenBank files ...\n")
if (reg.exp) ext <- glob2rx(ext)
files <- dir(dataDir, ext, recursive=TRUE, full.names=FALSE)
if (length(files)==0) stop("No files found")
first <- max(1, range[1])
last <- min(c(length(files), range[2]))
if (last<first)
stop("the specified range has resulted in no genomes, so no database has been generated")
files <- files[first:last]
#Initialise database
nFiles <- last-first+1
db <- NULL #for holding database of GenBank record info
#Process genomes
basesProcessed <- 0
createTime <- Sys.time()
startTime <- proc.time()
elapsedTime <- 0
for (fn in 1L:nFiles)
{
if (fn>1)
cat(paste("Processing genome", fn, "of", nFiles, "...", elapsedTime, "s elapsed\n"))
else
cat(paste("Processing genome", fn, "of", nFiles, "...\n"))
file <- file.path(dataDir, files[fn])
info <- try(read.gbff(file, features=NULL, sequence=FALSE))
if (class(info)=="try-erorr")
{
print(info)
stop("read.gbff error")
} #if
if (is.null(info) || class(info)!="list" || any(sapply(info, class)!="gbk.info"))
{
cat("Invalid gbk.info:", file, "\n")
next #invalid gbk info loaded, skip it
} #if
nRecords <- length(info)
if (nRecords==0) continue
seqInfo <- data.frame(
Name=character(nRecords),
Description="",
Organism="",
Domain="",
Phylum="",
Class="",
Order="",
Family="",
Genus="",
Species="",
Strain="",
Substrain="",
taxid="",
Accession="",
Version="",
GI="",
Content="",
PhysicalStructure="",
GB="",
LastUpdate="",
File=files[fn],
RecordNo=1L:nRecords,
FileType="gbff",
Length=0L,
IsGenome=FALSE,
IsSequence=FALSE,
IsChromosome=FALSE,
IsPlasmid=FALSE,
IsComplete=FALSE,
IsDraft=FALSE,
Registered=FALSE,
stringsAsFactors=FALSE
)
for (i in 1L:nRecords)
{
seqInfo$Name[i] <- info[[i]]$Name
seqInfo$Description[i] <- info[[i]]$Description
seqInfo$Organism[i] <- info[[i]]$BinomialName
seqInfo$Domain[i] <- info[[i]]$Domain
seqInfo$Phylum[i] <- info[[i]]$Phylum
seqInfo$Class[i] <- info[[i]]$Class
seqInfo$Order[i] <- info[[i]]$Order
seqInfo$Family[i] <- info[[i]]$Family
seqInfo$Genus[i] <- info[[i]]$Genus
seqInfo$Species[i] <- info[[i]]$Species
seqInfo$Strain[i] <- info[[i]]$Strain
seqInfo$Substrain[i] <- info[[i]]$Substrain
seqInfo$taxid[i] <- info[[i]]$taxid
seqInfo$Accession[i] <- info[[i]]$Accession
seqInfo$Version[i] <- info[[i]]$Version
seqInfo$GI[i] <- info[[i]]$GI
seqInfo$Content[i] <- info[[i]]$Content
seqInfo$PhysicalStructure[i] <- info[[i]]$PhysicalStructure
seqInfo$GB[i] <- info[[i]]$GB
seqInfo$LastUpdate[i] <- info[[i]]$LastUpdate
seqInfo$Length[i] <- info[[i]]$Length
seqInfo$IsGenome[i] <- info[[i]]$IsGenome
seqInfo$IsSequence[i] <- info[[i]]$IsSequence
seqInfo$IsChromosome[i] <- info[[i]]$IsChromosome
seqInfo$IsPlasmid[i] <- info[[i]]$IsPlasmid
seqInfo$IsComplete[i] <- info[[i]]$IsComplete
seqInfo$IsDraft[i] <- info[[i]]$IsDraft
seqInfo$Registered[i] <- TRUE
} #for i
db <- rbind(db, seqInfo)
elapsedTime <- round((proc.time()-startTime)[3], 3)
} #for fn
#Set class and other attributes of database
attr(db, "DataDir") <- dataDir
attr(db, "StartCreationDate") <- createTime
attr(db, "EndCreationDate") <- Sys.time()
class(db) <- c("GenBankDB", "GenomeDB", "data.frame")
# db <- structure(db, DataDir=dataDir, StartCreationDate=createTime,
# EndCreationDate=Sys.time(), class=c("GenBankDB", "GenomeDB", "data.frame"))
cat("Done.\n", sum(db$Registered), "sequences out of", nrow(db), "from", nFiles, "assemblies successfully registered.\n")
invisible(db) #return database
} #function
|
library(ggplot2)
library(plyr)
## useful inside reorder(), to invert the resulting factor levels
neglength <- function(x) -1 * length(x)
jWidth <- 5
jHeight <- 4
mlu_teams <- read.delim(file.path("..", "data", "mlu-teams.tsv"),
stringsAsFactors = FALSE)
mlu_cols <- with(mlu_teams, setNames(color, team))
theme_set(theme_bw())
input_dir <- file.path("..", "games", "2014_all-games")
point_file <- file.path(input_dir, "2014_points.rds")
str(point_dat <- readRDS(point_file), give.attr = FALSE) # 1314 obs. of 17 variables
table(point_dat$scor_team, useNA = "always")
# bosWC nykRM pdxST phlSP seaRM sfoDF vanNH wdcCT <NA>
# 67 48 202 76 187 157 226 234 117
str(point_dat <- subset(point_dat, !is.na(scor_team))) # 1197 obs. of 17 var
n_poss_freq <- ddply(point_dat, ~ n_poss, function(x) {
data.frame(n_points = nrow(x))
})
n_poss_freq$cum_points <- cumsum(n_poss_freq$n_points)
tot_points <- sum(n_poss_freq$n_points)
n_poss_freq <-
mutate(n_poss_freq,
inv_cum_points = tot_points - c(0, cum_points[-nrow(n_poss_freq)]),
prop = n_points/tot_points,
cum_prop = cum_points/tot_points,
convert_prop = n_points/inv_cum_points)
n_poss_freq
## find a practical max on n_poss for visualization purposes
p <- ggplot(n_poss_freq, aes(x = n_poss, y = cum_prop))
p + ylim(0, 1) + geom_segment(aes(xend = n_poss, yend = 0), size = I(3))
## once we account for 95% of the goals scored, lump the rest together
(n_poss_max <- max(which(n_poss_freq$cum_prop <= 0.95))) # 4 possessions
(catchall_convert_prop <- # weighted avg of convert_prop for the lump
with(subset(n_poss_freq, n_poss > n_poss_max),
weighted.mean(convert_prop, n_points))) # 0.4570766
## remake this table, lumping the high poss together
n_poss_freq <- n_poss_freq[c('n_poss', 'n_points')]
n_poss_freq$n_points[n_poss_max + 1] <-
sum(with(n_poss_freq, n_points[n_poss > n_poss_max]))
n_poss_freq <- subset(n_poss_freq, n_poss <= n_poss_max + 1)
foo <- with(n_poss_freq, paste0(n_poss, rep(c('', "+"), c(n_poss_max, 1))))
n_poss_freq$n_poss_pretty <- with(n_poss_freq, factor(foo, levels = foo))
tot_points <- sum(n_poss_freq$n_points)
n_poss_freq$cum_points <- cumsum(n_poss_freq$n_points)
n_poss_freq <-
mutate(n_poss_freq,
inv_cum_points = tot_points - c(0, cum_points[-nrow(n_poss_freq)]),
prop = n_points/tot_points,
cum_prop = cum_points/tot_points,
convert_prop = n_points/inv_cum_points)
n_poss_freq$convert_prop[n_poss_max + 1] <- catchall_convert_prop
## I'll show a confidence interval for convert_prop on the fig
alpha <- 0.05
n_poss_freq$conf_rad <-
with(n_poss_freq,
qnorm(1 - alpha/2) * sqrt( (1/inv_cum_points) * convert_prop *
(1 - convert_prop)))
## get overall goal conversion rate
avg_convert_prop <- nrow(point_dat)/sum(point_dat$n_poss) # 0.4659401
#with(n_poss_freq, weighted.mean(convert_prop, n_points)) # ?
anno_text <- paste0("overall goal conversion rate =\n", nrow(point_dat),
" goals on ", sum(point_dat$n_poss), " possessions = ",
sprintf("%1.2f", avg_convert_prop))
text_size <- rel(3) # for annotations
p <- ggplot(n_poss_freq, aes(x = n_poss, y = convert_prop))
p <- p +
geom_hline(yintercept = avg_convert_prop, color = "grey80") +
geom_errorbar(aes(ymin = convert_prop - conf_rad,
ymax = convert_prop + conf_rad), width = 0.15) +
geom_point(size = 3) +
coord_cartesian(xlim = c(0.5, n_poss_max + 1.5),
ylim = c(0, 0.65)) +
xlab("possession number") + ylab("goal conversion rate") +
with(n_poss_freq,
scale_x_continuous(breaks = n_poss, labels = n_poss_pretty)) +
geom_text(aes(x = n_poss, y = 0.2), size = text_size, vjust = 1.2,
label = with(n_poss_freq,
paste0(sprintf("%1.2f", convert_prop), " =\n",
n_points, "\nof\n", inv_cum_points))) +
annotate("text", x = -Inf, y = Inf, label = anno_text,
hjust = -0.1, vjust = 1.2, size = text_size)
p
out_file <- "dots_with_errbars_goal_conversion_rate_by_poss.png"
out_file <- file.path("..", "web", "figs", out_file)
ggsave(out_file, p, width = jWidth, height = jHeight)
text_size <- rel(3) # for annotations
p <- ggplot(n_poss_freq, aes(x = n_poss, y = cum_prop))
p <- p +
geom_point(size = 3) + geom_line() +
coord_cartesian(xlim = c(0.5, n_poss_max + 1.5), ylim = c(0, 1.05)) +
xlab("number of possessions") + ylab("cumulative proportion of points") +
with(n_poss_freq,
scale_x_continuous(breaks = n_poss, labels = n_poss_pretty)) +
geom_text(aes(x = n_poss, y = cum_prop, label = sprintf("%1.2f", cum_prop)),
size = text_size, hjust = -0.3, vjust = 1.2)
p
out_file <- "lineplot_cum_dist_poss_per_point.png"
out_file <- file.path("..", "web", "figs", out_file)
ggsave(out_file, p, width = jWidth, height = jHeight)
| /scripts/15_plot-goal-conversion-rate-by-possession.r | no_license | jennybc/vanNH | R | false | false | 4,967 | r | library(ggplot2)
library(plyr)
## useful inside reorder(), to invert the resulting factor levels
neglength <- function(x) -1 * length(x)
jWidth <- 5
jHeight <- 4
mlu_teams <- read.delim(file.path("..", "data", "mlu-teams.tsv"),
stringsAsFactors = FALSE)
mlu_cols <- with(mlu_teams, setNames(color, team))
theme_set(theme_bw())
input_dir <- file.path("..", "games", "2014_all-games")
point_file <- file.path(input_dir, "2014_points.rds")
str(point_dat <- readRDS(point_file), give.attr = FALSE) # 1314 obs. of 17 variables
table(point_dat$scor_team, useNA = "always")
# bosWC nykRM pdxST phlSP seaRM sfoDF vanNH wdcCT <NA>
# 67 48 202 76 187 157 226 234 117
str(point_dat <- subset(point_dat, !is.na(scor_team))) # 1197 obs. of 17 var
n_poss_freq <- ddply(point_dat, ~ n_poss, function(x) {
data.frame(n_points = nrow(x))
})
n_poss_freq$cum_points <- cumsum(n_poss_freq$n_points)
tot_points <- sum(n_poss_freq$n_points)
n_poss_freq <-
mutate(n_poss_freq,
inv_cum_points = tot_points - c(0, cum_points[-nrow(n_poss_freq)]),
prop = n_points/tot_points,
cum_prop = cum_points/tot_points,
convert_prop = n_points/inv_cum_points)
n_poss_freq
## find a practical max on n_poss for visualization purposes
p <- ggplot(n_poss_freq, aes(x = n_poss, y = cum_prop))
p + ylim(0, 1) + geom_segment(aes(xend = n_poss, yend = 0), size = I(3))
## once we account for 95% of the goals scored, lump the rest together
(n_poss_max <- max(which(n_poss_freq$cum_prop <= 0.95))) # 4 possessions
(catchall_convert_prop <- # weighted avg of convert_prop for the lump
with(subset(n_poss_freq, n_poss > n_poss_max),
weighted.mean(convert_prop, n_points))) # 0.4570766
## remake this table, lumping the high poss together
n_poss_freq <- n_poss_freq[c('n_poss', 'n_points')]
n_poss_freq$n_points[n_poss_max + 1] <-
sum(with(n_poss_freq, n_points[n_poss > n_poss_max]))
n_poss_freq <- subset(n_poss_freq, n_poss <= n_poss_max + 1)
foo <- with(n_poss_freq, paste0(n_poss, rep(c('', "+"), c(n_poss_max, 1))))
n_poss_freq$n_poss_pretty <- with(n_poss_freq, factor(foo, levels = foo))
tot_points <- sum(n_poss_freq$n_points)
n_poss_freq$cum_points <- cumsum(n_poss_freq$n_points)
n_poss_freq <-
mutate(n_poss_freq,
inv_cum_points = tot_points - c(0, cum_points[-nrow(n_poss_freq)]),
prop = n_points/tot_points,
cum_prop = cum_points/tot_points,
convert_prop = n_points/inv_cum_points)
n_poss_freq$convert_prop[n_poss_max + 1] <- catchall_convert_prop
## I'll show a confidence interval for convert_prop on the fig
alpha <- 0.05
n_poss_freq$conf_rad <-
with(n_poss_freq,
qnorm(1 - alpha/2) * sqrt( (1/inv_cum_points) * convert_prop *
(1 - convert_prop)))
## get overall goal conversion rate
avg_convert_prop <- nrow(point_dat)/sum(point_dat$n_poss) # 0.4659401
#with(n_poss_freq, weighted.mean(convert_prop, n_points)) # ?
anno_text <- paste0("overall goal conversion rate =\n", nrow(point_dat),
" goals on ", sum(point_dat$n_poss), " possessions = ",
sprintf("%1.2f", avg_convert_prop))
text_size <- rel(3) # for annotations
p <- ggplot(n_poss_freq, aes(x = n_poss, y = convert_prop))
p <- p +
geom_hline(yintercept = avg_convert_prop, color = "grey80") +
geom_errorbar(aes(ymin = convert_prop - conf_rad,
ymax = convert_prop + conf_rad), width = 0.15) +
geom_point(size = 3) +
coord_cartesian(xlim = c(0.5, n_poss_max + 1.5),
ylim = c(0, 0.65)) +
xlab("possession number") + ylab("goal conversion rate") +
with(n_poss_freq,
scale_x_continuous(breaks = n_poss, labels = n_poss_pretty)) +
geom_text(aes(x = n_poss, y = 0.2), size = text_size, vjust = 1.2,
label = with(n_poss_freq,
paste0(sprintf("%1.2f", convert_prop), " =\n",
n_points, "\nof\n", inv_cum_points))) +
annotate("text", x = -Inf, y = Inf, label = anno_text,
hjust = -0.1, vjust = 1.2, size = text_size)
p
out_file <- "dots_with_errbars_goal_conversion_rate_by_poss.png"
out_file <- file.path("..", "web", "figs", out_file)
ggsave(out_file, p, width = jWidth, height = jHeight)
text_size <- rel(3) # for annotations
p <- ggplot(n_poss_freq, aes(x = n_poss, y = cum_prop))
p <- p +
geom_point(size = 3) + geom_line() +
coord_cartesian(xlim = c(0.5, n_poss_max + 1.5), ylim = c(0, 1.05)) +
xlab("number of possessions") + ylab("cumulative proportion of points") +
with(n_poss_freq,
scale_x_continuous(breaks = n_poss, labels = n_poss_pretty)) +
geom_text(aes(x = n_poss, y = cum_prop, label = sprintf("%1.2f", cum_prop)),
size = text_size, hjust = -0.3, vjust = 1.2)
p
out_file <- "lineplot_cum_dist_poss_per_point.png"
out_file <- file.path("..", "web", "figs", out_file)
ggsave(out_file, p, width = jWidth, height = jHeight)
|
library(SurvBoost)
### Name: strata.boosting
### Title: Stratification function
### Aliases: strata.boosting
### Keywords: boosting gradient
### ** Examples
data <- simulate_survival_cox(true_beta=c(1,1,1,1,1,0,0,0,0,0))
strata.boosting(data$strata_idx, data$time)
| /data/genthat_extracted_code/SurvBoost/examples/strata.boosting.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 273 | r | library(SurvBoost)
### Name: strata.boosting
### Title: Stratification function
### Aliases: strata.boosting
### Keywords: boosting gradient
### ** Examples
data <- simulate_survival_cox(true_beta=c(1,1,1,1,1,0,0,0,0,0))
strata.boosting(data$strata_idx, data$time)
|
## VALIDATED by Tarak 2018-04-18
test_that("test renewal -- McShane results", {
print("~~~~~~ renewal regression-- McShane results ~~~~~~~~")
fn <- system.file("extdata", "McShane_paperResults.RDS", package = "Countr")
res <- readRDS(fn)
y <- res$y
data <- res$data
form <-
Y ~ GERMAN + EDU + VOC + UNI + CATH + PROT + MUSL + RURAL + YEAR_OF_B + AGEMARR
## =========================== gamma =====================================
print("............ gamma ............")
res <- renewalCount(formula = form, data = data, dist = "gamma",
computeHessian = FALSE,
control = renewal.control(trace = 0)
)
ll <- as.numeric(logLik(res))
expect_lt(abs(ll - (-2078)), 0.5)
})
test_that("test renewal -- McShane data --- prediction", {
print("~~~~~~ renewal prediction-- McShane results ~~~~~~~~")
fn <- system.file("extdata", "McShane_paperResults.RDS", package = "Countr")
res <- readRDS(fn)
y <- res$y
data <- res$data
form <-
Y ~ GERMAN + EDU + VOC + UNI + CATH + PROT + MUSL + RURAL + YEAR_OF_B + AGEMARR
## =========================== weibull =====================================
object <- renewalCount(formula = form, data = data, dist = "weibull",
computeHessian = TRUE, weiMethod = "series_acc",
control = renewal.control(trace = 0)
)
predOld.response <- predict(object, type = "response", se.fit = TRUE)
predOld.prob <- predict(object, type = "prob", se.fit = TRUE)
newData <- head(data)
predNew.response <- predict(object, newdata = newData,
type = "response", se.fit = TRUE)
predNew.prob <- predict(object, newdata = newData,
type = "prob", se.fit = TRUE)
expect_equal(head(predOld.response$values),
predNew.response$values,
tolerance = 1e-3)
expect_equal(head(predOld.response$se$scale),
predNew.response$se$scale,
tolerance = 1e-3)
expect_equal(head(predOld.response$se$shape),
predNew.response$se$shape,
tolerance = 1e-3)
expect_equal(head(predOld.prob$values),
predNew.prob$values,
tolerance = 1e-3)
expect_equal(head(predOld.prob$se$scale),
predNew.prob$se$scale,
tolerance = 1e-3)
expect_equal(head(predOld.prob$se$shape),
predNew.prob$se$shape,
tolerance = 1e-3)
})
## -------------------------- very slow due to conversion from R to C++
## test_that("test renewal -- McShane results -- user passed", {
## print("~~~~~~ renewal regression-- McShane results user passed ~~~~~~~~")
## res <- readRDS("McShane_paperResults.RDS")
## y <- res$y
## data <- res$data
## form <-
## Y ~ GERMAN + EDU + VOC + UNI + CATH + PROT + MUSL + RURAL + YEAR_OF_B + AGEMARR
## ## =========================== weibull =====================================
## print("............ weibull ............")
## parNames <- c("scale", "shape")
## sWei <- function(tt, distP) {
## exp( -distP[["scale"]] * tt ^ distP[["shape"]])
## }
## .getExtrapol <- function(distP) {
## c(2, distP[["shape"]])
## }
## customPars <- list(parNames = parNames,
## survivalFct = sWei,
## extrapolFct = .getExtrapol)
## link <- list(scale = "log", shape = "log")
## ## starting values
## par0 <- coef(glm(form, family = "poisson", data = data))
## names(par0) <- paste0("scale_", names(par0))
## start <- c(par0, shape_ = 1)
## ## control Parameters
## control <- renewal.control(start = start, trace = 0)
## res <- renewal(formula = form, data = data, dist = "custom", link = link,
## control = control, customPars = customPars,
## computeHessian = FALSE)
## ll <- as.numeric(logLik(res))
## expect_lt(abs(ll - (-2077)), 0.1)
## })
| /fuzzedpackages/Countr/inst/examples/example-RenewalRegression.R | no_license | akhikolla/testpackages | R | false | false | 4,169 | r | ## VALIDATED by Tarak 2018-04-18
test_that("test renewal -- McShane results", {
print("~~~~~~ renewal regression-- McShane results ~~~~~~~~")
fn <- system.file("extdata", "McShane_paperResults.RDS", package = "Countr")
res <- readRDS(fn)
y <- res$y
data <- res$data
form <-
Y ~ GERMAN + EDU + VOC + UNI + CATH + PROT + MUSL + RURAL + YEAR_OF_B + AGEMARR
## =========================== gamma =====================================
print("............ gamma ............")
res <- renewalCount(formula = form, data = data, dist = "gamma",
computeHessian = FALSE,
control = renewal.control(trace = 0)
)
ll <- as.numeric(logLik(res))
expect_lt(abs(ll - (-2078)), 0.5)
})
test_that("test renewal -- McShane data --- prediction", {
print("~~~~~~ renewal prediction-- McShane results ~~~~~~~~")
fn <- system.file("extdata", "McShane_paperResults.RDS", package = "Countr")
res <- readRDS(fn)
y <- res$y
data <- res$data
form <-
Y ~ GERMAN + EDU + VOC + UNI + CATH + PROT + MUSL + RURAL + YEAR_OF_B + AGEMARR
## =========================== weibull =====================================
object <- renewalCount(formula = form, data = data, dist = "weibull",
computeHessian = TRUE, weiMethod = "series_acc",
control = renewal.control(trace = 0)
)
predOld.response <- predict(object, type = "response", se.fit = TRUE)
predOld.prob <- predict(object, type = "prob", se.fit = TRUE)
newData <- head(data)
predNew.response <- predict(object, newdata = newData,
type = "response", se.fit = TRUE)
predNew.prob <- predict(object, newdata = newData,
type = "prob", se.fit = TRUE)
expect_equal(head(predOld.response$values),
predNew.response$values,
tolerance = 1e-3)
expect_equal(head(predOld.response$se$scale),
predNew.response$se$scale,
tolerance = 1e-3)
expect_equal(head(predOld.response$se$shape),
predNew.response$se$shape,
tolerance = 1e-3)
expect_equal(head(predOld.prob$values),
predNew.prob$values,
tolerance = 1e-3)
expect_equal(head(predOld.prob$se$scale),
predNew.prob$se$scale,
tolerance = 1e-3)
expect_equal(head(predOld.prob$se$shape),
predNew.prob$se$shape,
tolerance = 1e-3)
})
## -------------------------- very slow due to conversion from R to C++
## test_that("test renewal -- McShane results -- user passed", {
## print("~~~~~~ renewal regression-- McShane results user passed ~~~~~~~~")
## res <- readRDS("McShane_paperResults.RDS")
## y <- res$y
## data <- res$data
## form <-
## Y ~ GERMAN + EDU + VOC + UNI + CATH + PROT + MUSL + RURAL + YEAR_OF_B + AGEMARR
## ## =========================== weibull =====================================
## print("............ weibull ............")
## parNames <- c("scale", "shape")
## sWei <- function(tt, distP) {
## exp( -distP[["scale"]] * tt ^ distP[["shape"]])
## }
## .getExtrapol <- function(distP) {
## c(2, distP[["shape"]])
## }
## customPars <- list(parNames = parNames,
## survivalFct = sWei,
## extrapolFct = .getExtrapol)
## link <- list(scale = "log", shape = "log")
## ## starting values
## par0 <- coef(glm(form, family = "poisson", data = data))
## names(par0) <- paste0("scale_", names(par0))
## start <- c(par0, shape_ = 1)
## ## control Parameters
## control <- renewal.control(start = start, trace = 0)
## res <- renewal(formula = form, data = data, dist = "custom", link = link,
## control = control, customPars = customPars,
## computeHessian = FALSE)
## ll <- as.numeric(logLik(res))
## expect_lt(abs(ll - (-2077)), 0.1)
## })
|
#' Stability selection based on penalized conditional logistic regression
#'
#' Performs stability selection for conditional logistic regression models with
#' L1 and L2 penalty.
#'
#'
#' @inheritParams penalized.clr
#' @inheritParams subsample.clr
#' @param lambda.seq a sequence of non-negative value to be used as tuning
#' parameter for L1
#'
#' @return A list with a numeric vector \code{Pistab}
#' giving selection probabilities for each penalized covariate, and
#' a sequence \code{lambda.seq} used.
#'
#'
#' @export
#' @examples
#' set.seed(123)
#'
#' # simulate covariates (pure noise in two blocks of 20 and 80 variables)
#' X <- cbind(matrix(rnorm(4000, 0, 1), ncol = 20), matrix(rnorm(16000, 2, 0.6), ncol = 80))
#'
#' # stratum membership
#' stratum <- sort(rep(1:100, 2))
#'
#' # the response
#' Y <- rep(c(1, 0), 100)
#'
#' # default L1 penalty
#' lambda <- find.default.lambda(response = Y,
#' penalized = X,
#' stratum = stratum)
#'
#' # perform stability selection
#' \donttest{
#' stable1 <- stable.clr(response = Y, penalized = X, stratum = stratum,
#' lambda.seq = lambda)}
#'
#'
#'
#' @seealso \code{\link{stable.clr.g}} for stability selection
#' in penalized conditional logistic regression with multiple penalties for block structured covariates.
stable.clr <- function(response,
stratum,
penalized,
unpenalized = NULL,
lambda.seq,
alpha = 1,
B = 100,
parallel = TRUE,
standardize = TRUE,
event) {
if (missing(event) && is.factor(response)) event <- levels(response)[1]
if (is.factor(response)) response <- (response == event) * 1
if (!is.null(unpenalized) && !is.numeric(dim(unpenalized))) {
unpenalized <- as.matrix(unpenalized, nrow = nrow(penalized))
}
fit <- subsample.clr(
response = response,
stratum = stratum,
penalized = penalized,
unpenalized = unpenalized,
lambda = lambda.seq[1],
alpha = alpha,
B = B,
matB = NULL,
return.matB = TRUE,
parallel = parallel,
standardize = standardize
)
if (length(lambda.seq == 1)) {Pistab = fit$Pistab}else{
matB <- fit$matB
if (parallel) {
cl <- parallel::makeCluster(getOption("cl.cores", 2), setup_timeout = 0.5)
parallel::clusterExport(cl, varlist = c("penalized.clr"))
P <- expand.grid("B" = 1:nrow(matB), "lambda" = lambda.seq[-1])
res <- t(parallel::parApply(cl,
P,
1,
FUN = function(x,
response,
stratum,
penalized,
unpenalized,
matB,
alpha,
standardize) {
#require("penalized")
ind <- stratum %in% matB[x[1], ]
(penalized.clr(
response = response[ind],
stratum = stratum[ind],
penalized = penalized[ind, ],
unpenalized = unpenalized[ind, ],
lambda = x[2],
alpha = alpha,
standardize = standardize
)$penalized != 0) * 1
},
response,
stratum,
penalized,
unpenalized,
matB,
alpha,
standardize
))
res1 <- as.data.frame(cbind(P, res))
res2 <- stats::aggregate(
res,
list(lambda = res1$lambda),
mean
)
res <- t(rbind(fit$P, res2[, -c(1)]))
parallel::stopCluster(cl)
} else {
res <- subsample.clr.v(
response = response,
stratum = stratum,
penalized = penalized,
unpenalized = unpenalized,
lambda = lambda.seq[-1],
alpha = alpha,
B = B,
matB = fit$matB,
parallel = FALSE,
standardize = standardize
)
res <- cbind(fit$P, res)
}
Pistab <- apply(res, 1, max)
#names(Pistab) <- names(fit$Pistab)
}
res <- list(Pistab = Pistab, lambda.seq = lambda.seq)
# class(res) <- c("list", "penclr")
return(res)
}
| /R/stable.clr.R | no_license | cran/penalizedclr | R | false | false | 4,134 | r | #' Stability selection based on penalized conditional logistic regression
#'
#' Performs stability selection for conditional logistic regression models with
#' L1 and L2 penalty.
#'
#'
#' @inheritParams penalized.clr
#' @inheritParams subsample.clr
#' @param lambda.seq a sequence of non-negative value to be used as tuning
#' parameter for L1
#'
#' @return A list with a numeric vector \code{Pistab}
#' giving selection probabilities for each penalized covariate, and
#' a sequence \code{lambda.seq} used.
#'
#'
#' @export
#' @examples
#' set.seed(123)
#'
#' # simulate covariates (pure noise in two blocks of 20 and 80 variables)
#' X <- cbind(matrix(rnorm(4000, 0, 1), ncol = 20), matrix(rnorm(16000, 2, 0.6), ncol = 80))
#'
#' # stratum membership
#' stratum <- sort(rep(1:100, 2))
#'
#' # the response
#' Y <- rep(c(1, 0), 100)
#'
#' # default L1 penalty
#' lambda <- find.default.lambda(response = Y,
#' penalized = X,
#' stratum = stratum)
#'
#' # perform stability selection
#' \donttest{
#' stable1 <- stable.clr(response = Y, penalized = X, stratum = stratum,
#' lambda.seq = lambda)}
#'
#'
#'
#' @seealso \code{\link{stable.clr.g}} for stability selection
#' in penalized conditional logistic regression with multiple penalties for block structured covariates.
stable.clr <- function(response,
stratum,
penalized,
unpenalized = NULL,
lambda.seq,
alpha = 1,
B = 100,
parallel = TRUE,
standardize = TRUE,
event) {
if (missing(event) && is.factor(response)) event <- levels(response)[1]
if (is.factor(response)) response <- (response == event) * 1
if (!is.null(unpenalized) && !is.numeric(dim(unpenalized))) {
unpenalized <- as.matrix(unpenalized, nrow = nrow(penalized))
}
fit <- subsample.clr(
response = response,
stratum = stratum,
penalized = penalized,
unpenalized = unpenalized,
lambda = lambda.seq[1],
alpha = alpha,
B = B,
matB = NULL,
return.matB = TRUE,
parallel = parallel,
standardize = standardize
)
if (length(lambda.seq == 1)) {Pistab = fit$Pistab}else{
matB <- fit$matB
if (parallel) {
cl <- parallel::makeCluster(getOption("cl.cores", 2), setup_timeout = 0.5)
parallel::clusterExport(cl, varlist = c("penalized.clr"))
P <- expand.grid("B" = 1:nrow(matB), "lambda" = lambda.seq[-1])
res <- t(parallel::parApply(cl,
P,
1,
FUN = function(x,
response,
stratum,
penalized,
unpenalized,
matB,
alpha,
standardize) {
#require("penalized")
ind <- stratum %in% matB[x[1], ]
(penalized.clr(
response = response[ind],
stratum = stratum[ind],
penalized = penalized[ind, ],
unpenalized = unpenalized[ind, ],
lambda = x[2],
alpha = alpha,
standardize = standardize
)$penalized != 0) * 1
},
response,
stratum,
penalized,
unpenalized,
matB,
alpha,
standardize
))
res1 <- as.data.frame(cbind(P, res))
res2 <- stats::aggregate(
res,
list(lambda = res1$lambda),
mean
)
res <- t(rbind(fit$P, res2[, -c(1)]))
parallel::stopCluster(cl)
} else {
res <- subsample.clr.v(
response = response,
stratum = stratum,
penalized = penalized,
unpenalized = unpenalized,
lambda = lambda.seq[-1],
alpha = alpha,
B = B,
matB = fit$matB,
parallel = FALSE,
standardize = standardize
)
res <- cbind(fit$P, res)
}
Pistab <- apply(res, 1, max)
#names(Pistab) <- names(fit$Pistab)
}
res <- list(Pistab = Pistab, lambda.seq = lambda.seq)
# class(res) <- c("list", "penclr")
return(res)
}
|
context("dc_oai_listsets")
test_that("dc_oai_listsets", {
skip_on_cran()
aa <- dc_oai_listsets()
expect_is(aa, "data.frame")
expect_is(aa, "oai_df")
expect_is(aa$setSpec, "character")
expect_is(aa$setName, "character")
})
test_that("dc_oai_listsets - curl options", {
skip_on_cran()
library("httr")
expect_error(dc_oai_listsets(config = timeout(0.001)), "Timeout was reached")
})
test_that("dc_oai_listsets fails well", {
skip_on_cran()
expect_error(dc_oai_listsets(token = 454),
"The value of the resumptionToken argument is invalid or expired")
expect_error(dc_oai_listsets("stuff"),
"The value of the resumptionToken argument is invalid or expired")
})
| /rdatacite/tests/testthat/test-dc_oai_listsets.R | no_license | ingted/R-Examples | R | false | false | 718 | r | context("dc_oai_listsets")
test_that("dc_oai_listsets", {
skip_on_cran()
aa <- dc_oai_listsets()
expect_is(aa, "data.frame")
expect_is(aa, "oai_df")
expect_is(aa$setSpec, "character")
expect_is(aa$setName, "character")
})
test_that("dc_oai_listsets - curl options", {
skip_on_cran()
library("httr")
expect_error(dc_oai_listsets(config = timeout(0.001)), "Timeout was reached")
})
test_that("dc_oai_listsets fails well", {
skip_on_cran()
expect_error(dc_oai_listsets(token = 454),
"The value of the resumptionToken argument is invalid or expired")
expect_error(dc_oai_listsets("stuff"),
"The value of the resumptionToken argument is invalid or expired")
})
|
#' Plot sample count correlations
#' @export
#' @param data a list of SummarizedExperiment objects from atacr::make_counts()
#' @param which the subdivision of the genome to calculate correlations either 'whole_genome', 'bait_windows' or 'non_bait_windows'
#' @param method the correlation method to use. Any supported by `cor()` is useable
#' @return NULL
sample_correlation_plot <-
function(data,
which = "bait_windows",
method = "pearson") {
# nocov start
mat <- SummarizedExperiment::assay(data[[which]])
make_corrplot(mat, method)
}
order_by_name <- function(m){
return(m[order(colnames(m)), order(colnames(m))])
}
cor.mtest <- function(mat, ...) {
mat <- as.matrix(mat)
n <- ncol(mat)
p.mat <- matrix(NA, n, n)
diag(p.mat) <- 0
for (i in 1:(n - 1)) {
for (j in (i + 1):n) {
tmp <- cor.test(mat[, i], mat[, j], ...)
p.mat[i, j] <- p.mat[j, i] <- tmp$p.value
}
}
colnames(p.mat) <- rownames(p.mat) <- colnames(mat)
return(p.mat)
}
#' generate corrplot from matrix of counts
#' @param counts a matrix of counts
#' @param method the correlation method to use, any supported by `cor()` is useable
#' @return ggplot2 plot
make_corrplot <- function(counts, method = "pearson") {
Var1 <- Var2 <- value <- NULL
cors <- cor(counts, method = method)
cors <- order_by_name(cors)
p.mat <- order_by_name(cor.mtest(counts))
corrplot::corrplot(cors, type="upper",
p.mat = p.mat,
sig.level= 0.01,
insig = "blank",
method="circle",
tl.col="black")
}
#' generate cumulative plot of number of windows below a threshold in samples
#' @param data a list of SummarizedExperiment objects from atacr::make_counts()
#' @param which ("bait_windows") the subdivision of the genome to calculate correlations either 'whole_genome', 'bait_windows' or 'non_bait_windows'
#' @param from (0) the lowest threshold to consider
#' @param to (10) the highest threshold to consider
#' @export
#' @return ggplot2 plot
windows_below_coverage_threshold_plot <-
function(data,
which = "bait_windows",
from = 0,
to = 10) {
df <-
count_windows_under_threshold(data, which = which, threshold = from)
for (i in (from + 1):to) {
df <-
rbind(df,
count_windows_under_threshold(data, which = which, threshold = i))
}
rownames(df) <- NULL
threshold <- count <- NULL #devtools::check() fix
p <-
ggplot2::ggplot(df) + ggplot2::aes(threshold, count) + ggplot2::geom_point() + ggplot2::facet_wrap( ~ sample) + ggthemes::scale_color_ptol() + ggthemes::scale_fill_ptol() + ggplot2::theme_minimal() + ggplot2::ggtitle("Counts of windows below critical threshold") + ggplot2::labs(x =
"Coverage threshold", y = "Windows below threshold")
return(p)
}
#' plot M (log2 ratio of a windows sample count to windows all-sample median count ) versus A (log2 sum of a windows sample count to a windows all-sample median count ) for each window
#' @export
#' @param data an atacr object
#' @param which the subset of windows to operate on
#' @param by a vector of seqnames of the genome to view
ma_plot <- function(data, which = "bait_windows", by = NULL) {
sample_matrix <- matrix(0)
# by is to decide on sub-group, IE whole window, chromosome, region
if (!is.null(by)) {
roi <- GenomicRanges::GRanges(seqnames = by)
sample_matrix <-
SummarizedExperiment::assay(IRanges::subsetByOverlaps(data[[which]], roi))
}
else{
#print(colnames(SummarizedExperiment::assay(data[[which]])))
#print(which)
sample_matrix <- SummarizedExperiment::assay(data[[which]])
#print(colnames(sample_matrix))
#print(str(sample_matrix))
}
ma_df <- ma_data(sample_matrix)
#print(str(ma_df))
# do ggplot
a <- m <- NULL
plot <-
ggplot2::ggplot(ma_df) + ggplot2::aes(a, m) + ggplot2::geom_jitter(alpha =
1 / length(ma_df)) + ggplot2::facet_wrap( ~ sample) + ggthemes::scale_color_ptol() + ggthemes::scale_fill_ptol() + ggplot2::theme_minimal()
return(plot)
}
#' converts SummarizedExperiment::assay matrix to a dataframe with cols 'window', 'sample' and 'count
#' @param matrix a SummarizedExperiment::assay matrix
assay_matrix_to_df <- function(matrix) {
v <- reshape::melt(matrix)
colnames(v) <- c("window", "sample", "count")
return(v)
}
#' adds an 'm' and an 'a' column to an assay matrix dataframe for ma plots
#' @export
#' @param sample_matrix a SummarizedExperiment::assay from which to make the MA plot
ma_data <- function(sample_matrix) {
count <- NULL
mve <- median_virtual_experiment(sample_matrix)
v <- assay_matrix_to_df(sample_matrix)
v$mve <- rep(mve, length(colnames(sample_matrix)))
v <- dplyr::mutate(v, m = emm(count, mve))
v <- dplyr::mutate(v, a = ay(count, mve))
return(v)
}
#' plot the counts split by chromosome and sample
#' @param data a list of SummarizedExperiment objects from atacr::make_counts()
#' @param which the subdivision of the genome to calculate correlations either 'whole_genome', 'bait_windows' or 'non_bait_windows'
#' @param method (bar | smooth | point) which sort of plot to return
#' @export
#' @return ggplot2 plot
plot_count_by_chromosome <-
function(data,
which = "bait_windows",
method = "bar") {
v <- assay_matrix_to_df(SummarizedExperiment::assay(data[[which]]))
v$window <- as.character(v$window)
v <-
tidyr::separate(v,
window,
into = c("seqname", "start", "stop"),
sep = "[:-]")
v$seqname <- as.factor(v$seqname)
v$start <- as.numeric(v$start)
v$stop <- as.numeric(v$stop)
p <- ggplot2::ggplot(v)
if (method == 'bar') {
seqname <- count <- NULL
p <-
p + ggplot2::aes(start, count) + ggplot2::geom_bar(ggplot2::aes(colour =
seqname, fill = seqname), stat = "identity")
}
if (method == 'smooth') {
p <-
p + ggplot2::aes(start) + ggplot2::geom_density(ggplot2::aes(colour = seqname, fill = seqname))
}
if (method == 'point') {
p <-
p + ggplot2::aes(start, count) + ggplot2::geom_point(ggplot2::aes(colour =
seqname, fill = seqname))
}
p <-
p + ggplot2::facet_grid(sample ~ seqname) + ggthemes::scale_color_ptol() + ggthemes::scale_fill_ptol() + ggplot2::theme_minimal() + ggplot2::ggtitle("Read Count Over Chromosome") + ggplot2::labs(x = "bp", y =
"Read Count")
return(p)
}
#' Plot histograms of read counts by sample and window type
#' @export
#' @param data a list of SummarizedExperiment objects from atacr::make_counts()
#' @param which the subdivision of the genome to plot (default = bait and non_bait windows)
#' @param sample the sample to plot (default = all )
#' @param log_axis use a log scale for the x-axis
#' @return a ggplot2 object
coverage_summary <-
function(data,
which = NULL,
sample = NULL,
log_axis = TRUE) {
all <- as.data.frame(data)
samp <- sample
cov_joy_plot <- function(data) {
p <- ggplot2::ggplot(data) +
ggplot2::aes(x = count, y = sample) +
ggjoy::geom_joy(ggplot2::aes(fill = window_type),alpha = 0.7) +
ggplot2::facet_grid(. ~ window_type ) +
ggthemes::scale_color_ptol() +
ggthemes::scale_fill_ptol() +
ggplot2::theme_minimal() +
ggplot2::ggtitle("Coverage Distribution") +
ggplot2::labs(x = "Read Count", y = "Number of Windows") +
ggplot2::theme(legend.position = "none")
if (log_axis) {
p <- p + ggplot2::aes(x = log10(count + 1), y = sample)
p <- p + ggplot2::labs(x = "Log 10 Read Count", y = "Number of Windows")
}
return(p)
}
if (is.null(which) & is.null(samp)) {
count <- window_type <- NULL
p <- cov_joy_plot(all)
return(p)
}
if (is.null(which) & !is.null(samp)) {
d <- all %>% dplyr::filter(sample == samp)
p <- cov_joy_plot(d)
return(p)
}
if (!is.null(which) & is.null(samp)) {
d <- all %>% dplyr::filter(window_type == which)
p <- cov_joy_plot(d)
return(p)
}
if (!is.null(which) && !is.null(samp)) {
d <- all %>% dplyr::filter(window_type == which) %>%
dplyr::filter(sample == samp)
p <- cov_joy_plot(d)
return(p)
}
}
#' Plot distribution of counts in given data set
#' @export
#' @param data a list of SummarizedExperiment objects from atacr::make_counts()
#' @param which the subdivision of the genome to plot
#' @param log10 log 10 the counts for plotting.
#' @return ggplot2 plot
plot_counts <- function(data, which = "bait_windows", log10 = TRUE){
d <- reshape2::melt(SummarizedExperiment::assay(data[[which]]))
colnames(d) <- c("name", "sample", "count")
d$window_type <- factor(rep(which,length(d$name)))
count <- window_type <- NULL
p <- ggplot2::ggplot(d) +
ggplot2::aes(x = count, y = sample) +
ggjoy::geom_joy(ggplot2::aes(fill = window_type),alpha = 0.7) +
ggthemes::scale_color_ptol() +
ggthemes::scale_fill_ptol() +
ggplot2::theme_minimal() +
ggplot2::ggtitle("Coverage Distribution") +
ggplot2::labs(x = "Read Count", y = "Number of Windows") +
ggplot2::theme(legend.position = "none")
if (log10) {
p <- p + ggplot2::aes(x = log10(count + 1), y = sample)
p <- p + ggplot2::labs(x = "Log 10 Read Count", y = "Number of Windows")
}
return(p)
}
#' Plot density of read counts by sample over the chromosomes
#' @export
#' @param data a list of SummarizedExperiment objects from atacr::make_counts()
#' @param which the subdivision of the genome to plot (default = bait and non_bait windows)
#' @return a ggplot2 object
chromosome_coverage <- function(data, which = NULL) {
all <- as.data.frame(data)
d <- NULL
if (is.null(which)) {
d <- all
}
else{
window_type <- NULL #deal with devtools::check()
d <- all %>% dplyr::filter(window_type == which)
}
p <-
ggplot2::ggplot(d) + ggplot2::aes(start) + ggplot2::geom_density(ggplot2::aes(colour =
window_type)) + ggplot2::facet_grid(sample ~ chromosome, scales = "free_x") + ggthemes::scale_color_ptol() + ggthemes::scale_fill_ptol() + ggplot2::theme_minimal() + ggplot2::ggtitle("Density of coverage over chromosomes")
return(p)
}
#' Named distribution qqplot
#' @export
#' @param obs observed values
#' @param dist expected distribution
#' @return ggplot2 object
qqarb <- function(obs, dist = "norm") {
exp <- get_expected_values(obs, dist)
df <-
data.frame(observed_values = sort(obs),
expected_values = sort(exp))
expected_values <- observed_values <- NULL
p <-
ggplot2::ggplot(df) + ggplot2::aes(expected_values, observed_values) + ggplot2::geom_point() + ggplot2::geom_abline(intercept = 0, slope = 1) + ggthemes::scale_color_ptol() + ggthemes::scale_fill_ptol() + ggplot2::theme_minimal()
return(p)
}
#' draw count distribution of GOF estimates
#' @export
#' @param atacr a list of SummarizedExperiment objects from atacr::make_counts()
#' @param which the subdivision of the genome to plot (default = bait and non_bait
#' @param controls character vector of window names to consider control windows
#' @return ggplot2 object
plot_GoF <- function(atacr, which = "bait_windows", controls = NULL){
if (is.null(controls)){
stop("Can't plot without provided list of control windows")
}
if (!exists("gofs", where = atacr)) {
atacr <- estimate_GoFs(atacr, which)
}
Window <- is_control <- GoodnessOfFit <- NULL
df <- data.frame(
"GoodnessOfFit" = atacr$gofs,
"Window" = names(atacr$gofs)
) %>%
dplyr::mutate( is_control = dplyr::if_else(Window %in% controls, "Control", "Non Control") )
return( ggplot2::ggplot(df) +
ggplot2::aes(GoodnessOfFit) +
ggplot2::geom_density(ggplot2::aes(color = is_control, fill = is_control),
alpha = 0.7) +
ggthemes::scale_color_ptol() +
ggthemes::scale_fill_ptol() +
ggplot2::theme_minimal() +
ggplot2::theme(legend.title = ggplot2::element_blank())
)
}
get_mart <- function(ensembl, ens_dataset) {
mart <- switch(
ensembl,
plants = biomaRt::useMart("plants_mart",
host = "plants.ensembl.org",
dataset = ens_dataset),
ensembl = biomaRt::useMart("ensembl", dataset = ens_dataset)
)
return(mart)
}
get_gene_coords <- function(gene_id, mart) {
filter <- switch(mart@biomart,
plants_mart = "tair_locus",
ENSEMBL_MART_ENSEMBL = "with_entrezgene")
coords <-
biomaRt::getBM(
attributes = c(
"chromosome_name",
"start_position",
"end_position",
"strand"
),
filters = filter,
values = gene_id,
mart = mart
)
return(coords)
}
select_colours <- function(data) {
t <- treatments(data)
allcols <- RColorBrewer::brewer.pal(8, "Dark2")
allcols <- rep(allcols, ceiling(length(t) / length(allcols)))
cols <- allcols[1:length(t)]
names(cols) <- t
tr <- NULL
for (i in data$sample_names) {
tr <- c(tr, treatment_from_name(data, i))
}
return(cols[tr])
}
get_coverage_in_regions <- function(data, which, coords) {
sname <- coords$chromosome_name[[1]]
strt <- coords$start_position[[1]]
stp <- coords$end_position[[1]]
strand <- coords$strand[[1]]
roi <- GenomicRanges::GRanges(seqnames = sname, ranges = strt:stp)
se <- IRanges::subsetByOverlaps(data[[which]], roi)
colrs <- select_colours(data) #c(rep("grey", 2), rep("red", 2))
intens <- GenomeGraphs::makeGenericArray(
intensity = SummarizedExperiment::assay(se),
probeStart = se@rowRanges@ranges@start,
#probeEnd = (se@rowRanges@ranges@start + se@rowRanges@ranges@width),
# nProbes = nrow(se),
# probeId = se@rowRanges@ranges@NAMES,
dp = GenomeGraphs::DisplayPars(
color = colrs,
size = 2,
lwd = 2,
type = "line",
pointSize = 1
)
)
return(intens)
}
#' coverage over gene model
#' @export
#' @param data atacr object
#' @param gene_id the id of the gene to plot around
#' @param which the subset of the data to plot.
#' @param ensembl one of 'plants', 'ensembl' - which version of ensembl to connect to
#' @param ens_dataset which ensembl dataset to connect to
#' @return plot object
view_gene <-
function(data,
gene_id,
which = "bait_windows",
ensembl = "plants",
ens_dataset = "athaliana_eg_gene") {
##get connection to biomart
mart <- get_mart(ensembl, ens_dataset)
##extract gene coords from ensembl
coords <- get_gene_coords(gene_id, mart)
start <- coords$start_position[[1]]
end <- coords$end_position[[1]]
chrom <- coords$chromosome[[1]]
strand <- as.character(coords$strand[[1]])
##get coverage count in each window over coords
values <- get_coverage_in_regions(data, which, coords)
if (strand == "1") {
axis <-
GenomeGraphs::makeGenomeAxis(
add53 = TRUE,
littleTicks = TRUE,
dp = GenomeGraphs::DisplayPars(cex = 0.5)
)
strand = "+"
} else {
axis <-
GenomeGraphs::makeGenomeAxis(
add35 = TRUE,
littleTicks = TRUE,
dp = GenomeGraphs::DisplayPars(cex = 0.5)
)
strand = "-"
}
##get features in gene region from ensembl
g <- GenomeGraphs::makeGeneRegion(
start = start,
end = end,
chromosome = chrom,
strand = strand,
biomart = mart,
dp = GenomeGraphs::DisplayPars(protein_coding = "steelblue")
)
view <- list(GenomeGraphs::makeTitle(gene_id),
"counts" = values,
"gene" = g,
axis)
GenomeGraphs::gdPlot(view, minBase = start, maxBase = end)
}
# nocov end
#' PCA plot of samples
#' @export
#' @param data atacr object
#' @param which the subset of the data to plot
#'
#' @return ggplot object
sample_pca_plot <- function(data, which = "bait_windows") {
sample_matrix <- SummarizedExperiment::assay(data[[which]])
df_pca <- prcomp(sample_matrix)
df_out_r <- as.data.frame(df_pca$rotation)
df_out_r$sample <- row.names(df_out_r)
p <- ggplot2::ggplot(df_out_r) +
ggplot2::aes(x = PC1,y = PC2,label=sample, color = sample ) +
ggplot2::geom_point() + ggplot2::geom_text(size = 3) +
ggthemes::scale_color_ptol() +
ggthemes::scale_fill_ptol() +
ggplot2::theme_minimal()
return(p)
}
| /R/figures.R | no_license | TeamMacLean/atacr | R | false | false | 16,300 | r | #' Plot sample count correlations
#' @export
#' @param data a list of SummarizedExperiment objects from atacr::make_counts()
#' @param which the subdivision of the genome to calculate correlations either 'whole_genome', 'bait_windows' or 'non_bait_windows'
#' @param method the correlation method to use. Any supported by `cor()` is useable
#' @return NULL
sample_correlation_plot <-
function(data,
which = "bait_windows",
method = "pearson") {
# nocov start
mat <- SummarizedExperiment::assay(data[[which]])
make_corrplot(mat, method)
}
order_by_name <- function(m){
return(m[order(colnames(m)), order(colnames(m))])
}
cor.mtest <- function(mat, ...) {
mat <- as.matrix(mat)
n <- ncol(mat)
p.mat <- matrix(NA, n, n)
diag(p.mat) <- 0
for (i in 1:(n - 1)) {
for (j in (i + 1):n) {
tmp <- cor.test(mat[, i], mat[, j], ...)
p.mat[i, j] <- p.mat[j, i] <- tmp$p.value
}
}
colnames(p.mat) <- rownames(p.mat) <- colnames(mat)
return(p.mat)
}
#' generate corrplot from matrix of counts
#' @param counts a matrix of counts
#' @param method the correlation method to use, any supported by `cor()` is useable
#' @return ggplot2 plot
make_corrplot <- function(counts, method = "pearson") {
Var1 <- Var2 <- value <- NULL
cors <- cor(counts, method = method)
cors <- order_by_name(cors)
p.mat <- order_by_name(cor.mtest(counts))
corrplot::corrplot(cors, type="upper",
p.mat = p.mat,
sig.level= 0.01,
insig = "blank",
method="circle",
tl.col="black")
}
#' generate cumulative plot of number of windows below a threshold in samples
#' @param data a list of SummarizedExperiment objects from atacr::make_counts()
#' @param which ("bait_windows") the subdivision of the genome to calculate correlations either 'whole_genome', 'bait_windows' or 'non_bait_windows'
#' @param from (0) the lowest threshold to consider
#' @param to (10) the highest threshold to consider
#' @export
#' @return ggplot2 plot
windows_below_coverage_threshold_plot <-
function(data,
which = "bait_windows",
from = 0,
to = 10) {
df <-
count_windows_under_threshold(data, which = which, threshold = from)
for (i in (from + 1):to) {
df <-
rbind(df,
count_windows_under_threshold(data, which = which, threshold = i))
}
rownames(df) <- NULL
threshold <- count <- NULL #devtools::check() fix
p <-
ggplot2::ggplot(df) + ggplot2::aes(threshold, count) + ggplot2::geom_point() + ggplot2::facet_wrap( ~ sample) + ggthemes::scale_color_ptol() + ggthemes::scale_fill_ptol() + ggplot2::theme_minimal() + ggplot2::ggtitle("Counts of windows below critical threshold") + ggplot2::labs(x =
"Coverage threshold", y = "Windows below threshold")
return(p)
}
#' plot M (log2 ratio of a windows sample count to windows all-sample median count ) versus A (log2 sum of a windows sample count to a windows all-sample median count ) for each window
#' @export
#' @param data an atacr object
#' @param which the subset of windows to operate on
#' @param by a vector of seqnames of the genome to view
ma_plot <- function(data, which = "bait_windows", by = NULL) {
sample_matrix <- matrix(0)
# by is to decide on sub-group, IE whole window, chromosome, region
if (!is.null(by)) {
roi <- GenomicRanges::GRanges(seqnames = by)
sample_matrix <-
SummarizedExperiment::assay(IRanges::subsetByOverlaps(data[[which]], roi))
}
else{
#print(colnames(SummarizedExperiment::assay(data[[which]])))
#print(which)
sample_matrix <- SummarizedExperiment::assay(data[[which]])
#print(colnames(sample_matrix))
#print(str(sample_matrix))
}
ma_df <- ma_data(sample_matrix)
#print(str(ma_df))
# do ggplot
a <- m <- NULL
plot <-
ggplot2::ggplot(ma_df) + ggplot2::aes(a, m) + ggplot2::geom_jitter(alpha =
1 / length(ma_df)) + ggplot2::facet_wrap( ~ sample) + ggthemes::scale_color_ptol() + ggthemes::scale_fill_ptol() + ggplot2::theme_minimal()
return(plot)
}
#' converts SummarizedExperiment::assay matrix to a dataframe with cols 'window', 'sample' and 'count
#' @param matrix a SummarizedExperiment::assay matrix
assay_matrix_to_df <- function(matrix) {
v <- reshape::melt(matrix)
colnames(v) <- c("window", "sample", "count")
return(v)
}
#' adds an 'm' and an 'a' column to an assay matrix dataframe for ma plots
#' @export
#' @param sample_matrix a SummarizedExperiment::assay from which to make the MA plot
ma_data <- function(sample_matrix) {
count <- NULL
mve <- median_virtual_experiment(sample_matrix)
v <- assay_matrix_to_df(sample_matrix)
v$mve <- rep(mve, length(colnames(sample_matrix)))
v <- dplyr::mutate(v, m = emm(count, mve))
v <- dplyr::mutate(v, a = ay(count, mve))
return(v)
}
#' plot the counts split by chromosome and sample
#' @param data a list of SummarizedExperiment objects from atacr::make_counts()
#' @param which the subdivision of the genome to calculate correlations either 'whole_genome', 'bait_windows' or 'non_bait_windows'
#' @param method (bar | smooth | point) which sort of plot to return
#' @export
#' @return ggplot2 plot
plot_count_by_chromosome <-
function(data,
which = "bait_windows",
method = "bar") {
v <- assay_matrix_to_df(SummarizedExperiment::assay(data[[which]]))
v$window <- as.character(v$window)
v <-
tidyr::separate(v,
window,
into = c("seqname", "start", "stop"),
sep = "[:-]")
v$seqname <- as.factor(v$seqname)
v$start <- as.numeric(v$start)
v$stop <- as.numeric(v$stop)
p <- ggplot2::ggplot(v)
if (method == 'bar') {
seqname <- count <- NULL
p <-
p + ggplot2::aes(start, count) + ggplot2::geom_bar(ggplot2::aes(colour =
seqname, fill = seqname), stat = "identity")
}
if (method == 'smooth') {
p <-
p + ggplot2::aes(start) + ggplot2::geom_density(ggplot2::aes(colour = seqname, fill = seqname))
}
if (method == 'point') {
p <-
p + ggplot2::aes(start, count) + ggplot2::geom_point(ggplot2::aes(colour =
seqname, fill = seqname))
}
p <-
p + ggplot2::facet_grid(sample ~ seqname) + ggthemes::scale_color_ptol() + ggthemes::scale_fill_ptol() + ggplot2::theme_minimal() + ggplot2::ggtitle("Read Count Over Chromosome") + ggplot2::labs(x = "bp", y =
"Read Count")
return(p)
}
#' Plot histograms of read counts by sample and window type
#' @export
#' @param data a list of SummarizedExperiment objects from atacr::make_counts()
#' @param which the subdivision of the genome to plot (default = bait and non_bait windows)
#' @param sample the sample to plot (default = all )
#' @param log_axis use a log scale for the x-axis
#' @return a ggplot2 object
coverage_summary <-
function(data,
which = NULL,
sample = NULL,
log_axis = TRUE) {
all <- as.data.frame(data)
samp <- sample
cov_joy_plot <- function(data) {
p <- ggplot2::ggplot(data) +
ggplot2::aes(x = count, y = sample) +
ggjoy::geom_joy(ggplot2::aes(fill = window_type),alpha = 0.7) +
ggplot2::facet_grid(. ~ window_type ) +
ggthemes::scale_color_ptol() +
ggthemes::scale_fill_ptol() +
ggplot2::theme_minimal() +
ggplot2::ggtitle("Coverage Distribution") +
ggplot2::labs(x = "Read Count", y = "Number of Windows") +
ggplot2::theme(legend.position = "none")
if (log_axis) {
p <- p + ggplot2::aes(x = log10(count + 1), y = sample)
p <- p + ggplot2::labs(x = "Log 10 Read Count", y = "Number of Windows")
}
return(p)
}
if (is.null(which) & is.null(samp)) {
count <- window_type <- NULL
p <- cov_joy_plot(all)
return(p)
}
if (is.null(which) & !is.null(samp)) {
d <- all %>% dplyr::filter(sample == samp)
p <- cov_joy_plot(d)
return(p)
}
if (!is.null(which) & is.null(samp)) {
d <- all %>% dplyr::filter(window_type == which)
p <- cov_joy_plot(d)
return(p)
}
if (!is.null(which) && !is.null(samp)) {
d <- all %>% dplyr::filter(window_type == which) %>%
dplyr::filter(sample == samp)
p <- cov_joy_plot(d)
return(p)
}
}
#' Plot distribution of counts in given data set
#' @export
#' @param data a list of SummarizedExperiment objects from atacr::make_counts()
#' @param which the subdivision of the genome to plot
#' @param log10 log 10 the counts for plotting.
#' @return ggplot2 plot
plot_counts <- function(data, which = "bait_windows", log10 = TRUE){
d <- reshape2::melt(SummarizedExperiment::assay(data[[which]]))
colnames(d) <- c("name", "sample", "count")
d$window_type <- factor(rep(which,length(d$name)))
count <- window_type <- NULL
p <- ggplot2::ggplot(d) +
ggplot2::aes(x = count, y = sample) +
ggjoy::geom_joy(ggplot2::aes(fill = window_type),alpha = 0.7) +
ggthemes::scale_color_ptol() +
ggthemes::scale_fill_ptol() +
ggplot2::theme_minimal() +
ggplot2::ggtitle("Coverage Distribution") +
ggplot2::labs(x = "Read Count", y = "Number of Windows") +
ggplot2::theme(legend.position = "none")
if (log10) {
p <- p + ggplot2::aes(x = log10(count + 1), y = sample)
p <- p + ggplot2::labs(x = "Log 10 Read Count", y = "Number of Windows")
}
return(p)
}
#' Plot density of read counts by sample over the chromosomes
#' @export
#' @param data a list of SummarizedExperiment objects from atacr::make_counts()
#' @param which the subdivision of the genome to plot (default = bait and non_bait windows)
#' @return a ggplot2 object
chromosome_coverage <- function(data, which = NULL) {
all <- as.data.frame(data)
d <- NULL
if (is.null(which)) {
d <- all
}
else{
window_type <- NULL #deal with devtools::check()
d <- all %>% dplyr::filter(window_type == which)
}
p <-
ggplot2::ggplot(d) + ggplot2::aes(start) + ggplot2::geom_density(ggplot2::aes(colour =
window_type)) + ggplot2::facet_grid(sample ~ chromosome, scales = "free_x") + ggthemes::scale_color_ptol() + ggthemes::scale_fill_ptol() + ggplot2::theme_minimal() + ggplot2::ggtitle("Density of coverage over chromosomes")
return(p)
}
#' Named distribution qqplot
#' @export
#' @param obs observed values
#' @param dist expected distribution
#' @return ggplot2 object
qqarb <- function(obs, dist = "norm") {
exp <- get_expected_values(obs, dist)
df <-
data.frame(observed_values = sort(obs),
expected_values = sort(exp))
expected_values <- observed_values <- NULL
p <-
ggplot2::ggplot(df) + ggplot2::aes(expected_values, observed_values) + ggplot2::geom_point() + ggplot2::geom_abline(intercept = 0, slope = 1) + ggthemes::scale_color_ptol() + ggthemes::scale_fill_ptol() + ggplot2::theme_minimal()
return(p)
}
#' draw count distribution of GOF estimates
#' @export
#' @param atacr a list of SummarizedExperiment objects from atacr::make_counts()
#' @param which the subdivision of the genome to plot (default = bait and non_bait
#' @param controls character vector of window names to consider control windows
#' @return ggplot2 object
plot_GoF <- function(atacr, which = "bait_windows", controls = NULL){
if (is.null(controls)){
stop("Can't plot without provided list of control windows")
}
if (!exists("gofs", where = atacr)) {
atacr <- estimate_GoFs(atacr, which)
}
Window <- is_control <- GoodnessOfFit <- NULL
df <- data.frame(
"GoodnessOfFit" = atacr$gofs,
"Window" = names(atacr$gofs)
) %>%
dplyr::mutate( is_control = dplyr::if_else(Window %in% controls, "Control", "Non Control") )
return( ggplot2::ggplot(df) +
ggplot2::aes(GoodnessOfFit) +
ggplot2::geom_density(ggplot2::aes(color = is_control, fill = is_control),
alpha = 0.7) +
ggthemes::scale_color_ptol() +
ggthemes::scale_fill_ptol() +
ggplot2::theme_minimal() +
ggplot2::theme(legend.title = ggplot2::element_blank())
)
}
get_mart <- function(ensembl, ens_dataset) {
mart <- switch(
ensembl,
plants = biomaRt::useMart("plants_mart",
host = "plants.ensembl.org",
dataset = ens_dataset),
ensembl = biomaRt::useMart("ensembl", dataset = ens_dataset)
)
return(mart)
}
get_gene_coords <- function(gene_id, mart) {
filter <- switch(mart@biomart,
plants_mart = "tair_locus",
ENSEMBL_MART_ENSEMBL = "with_entrezgene")
coords <-
biomaRt::getBM(
attributes = c(
"chromosome_name",
"start_position",
"end_position",
"strand"
),
filters = filter,
values = gene_id,
mart = mart
)
return(coords)
}
select_colours <- function(data) {
t <- treatments(data)
allcols <- RColorBrewer::brewer.pal(8, "Dark2")
allcols <- rep(allcols, ceiling(length(t) / length(allcols)))
cols <- allcols[1:length(t)]
names(cols) <- t
tr <- NULL
for (i in data$sample_names) {
tr <- c(tr, treatment_from_name(data, i))
}
return(cols[tr])
}
get_coverage_in_regions <- function(data, which, coords) {
sname <- coords$chromosome_name[[1]]
strt <- coords$start_position[[1]]
stp <- coords$end_position[[1]]
strand <- coords$strand[[1]]
roi <- GenomicRanges::GRanges(seqnames = sname, ranges = strt:stp)
se <- IRanges::subsetByOverlaps(data[[which]], roi)
colrs <- select_colours(data) #c(rep("grey", 2), rep("red", 2))
intens <- GenomeGraphs::makeGenericArray(
intensity = SummarizedExperiment::assay(se),
probeStart = se@rowRanges@ranges@start,
#probeEnd = (se@rowRanges@ranges@start + se@rowRanges@ranges@width),
# nProbes = nrow(se),
# probeId = se@rowRanges@ranges@NAMES,
dp = GenomeGraphs::DisplayPars(
color = colrs,
size = 2,
lwd = 2,
type = "line",
pointSize = 1
)
)
return(intens)
}
#' coverage over gene model
#' @export
#' @param data atacr object
#' @param gene_id the id of the gene to plot around
#' @param which the subset of the data to plot.
#' @param ensembl one of 'plants', 'ensembl' - which version of ensembl to connect to
#' @param ens_dataset which ensembl dataset to connect to
#' @return plot object
view_gene <-
function(data,
gene_id,
which = "bait_windows",
ensembl = "plants",
ens_dataset = "athaliana_eg_gene") {
##get connection to biomart
mart <- get_mart(ensembl, ens_dataset)
##extract gene coords from ensembl
coords <- get_gene_coords(gene_id, mart)
start <- coords$start_position[[1]]
end <- coords$end_position[[1]]
chrom <- coords$chromosome[[1]]
strand <- as.character(coords$strand[[1]])
##get coverage count in each window over coords
values <- get_coverage_in_regions(data, which, coords)
if (strand == "1") {
axis <-
GenomeGraphs::makeGenomeAxis(
add53 = TRUE,
littleTicks = TRUE,
dp = GenomeGraphs::DisplayPars(cex = 0.5)
)
strand = "+"
} else {
axis <-
GenomeGraphs::makeGenomeAxis(
add35 = TRUE,
littleTicks = TRUE,
dp = GenomeGraphs::DisplayPars(cex = 0.5)
)
strand = "-"
}
##get features in gene region from ensembl
g <- GenomeGraphs::makeGeneRegion(
start = start,
end = end,
chromosome = chrom,
strand = strand,
biomart = mart,
dp = GenomeGraphs::DisplayPars(protein_coding = "steelblue")
)
view <- list(GenomeGraphs::makeTitle(gene_id),
"counts" = values,
"gene" = g,
axis)
GenomeGraphs::gdPlot(view, minBase = start, maxBase = end)
}
# nocov end
#' PCA plot of samples
#' @export
#' @param data atacr object
#' @param which the subset of the data to plot
#'
#' @return ggplot object
sample_pca_plot <- function(data, which = "bait_windows") {
sample_matrix <- SummarizedExperiment::assay(data[[which]])
df_pca <- prcomp(sample_matrix)
df_out_r <- as.data.frame(df_pca$rotation)
df_out_r$sample <- row.names(df_out_r)
p <- ggplot2::ggplot(df_out_r) +
ggplot2::aes(x = PC1,y = PC2,label=sample, color = sample ) +
ggplot2::geom_point() + ggplot2::geom_text(size = 3) +
ggthemes::scale_color_ptol() +
ggthemes::scale_fill_ptol() +
ggplot2::theme_minimal()
return(p)
}
|
#' List Accounts
#'
#' Get a list of trading accounts.
#' @export
#' @family accounts
#' @references \url{https://docs.gdax.com/#list-accounts}
get_accounts <- function() {
gdax_get("accounts")
}
#' Get an Account
#'
#' Information for a single account. Use this endpoint when you know the account_id.
#'
#' @param account_id Account Id
#' @export
#' @family accounts
#' @references \url{https://docs.gdax.com/#get-an-account}
get_account <- function(account_id) {
endpoint <- paste("accounts", account_id, sep = "/")
gdax_get(endpoint)
}
#' Get Account History
#'
#' List account activity. Account activity either increases or decreases your
#' account balance. Items are paginated and sorted latest first. See the
#' Pagination section for retrieving additional entries after the first page.
#'
#' @param account_id Account Id
#' @export
#' @family accounts
#' @references \url{https://docs.gdax.com/#get-account-history}
get_account_history <- function(account_id) {
endpoint <- paste("accounts", account_id, "ledger", sep = "/")
gdax_get(endpoint)
}
#' Get Holds
#'
#' Holds are placed on an account for any active orders or pending withdraw
#' requests. As an order is filled, the hold amount is updated. If an order is
#' canceled, any remaining hold is removed. For a withdraw, once it is
#' completed, the hold is removed.
#'
#' @param account_id Account Id
#' @export
#' @family accounts
#' @references \url{https://docs.gdax.com/#get-account-history}
get_account_holds <- function(account_id) {
endpoint <- paste("accounts", account_id, "holds", sep = "/")
gdax_get(endpoint)
}
| /R/accounts.r | permissive | jfontestad/gdaxr | R | false | false | 1,605 | r | #' List Accounts
#'
#' Get a list of trading accounts.
#' @export
#' @family accounts
#' @references \url{https://docs.gdax.com/#list-accounts}
get_accounts <- function() {
gdax_get("accounts")
}
#' Get an Account
#'
#' Information for a single account. Use this endpoint when you know the account_id.
#'
#' @param account_id Account Id
#' @export
#' @family accounts
#' @references \url{https://docs.gdax.com/#get-an-account}
get_account <- function(account_id) {
endpoint <- paste("accounts", account_id, sep = "/")
gdax_get(endpoint)
}
#' Get Account History
#'
#' List account activity. Account activity either increases or decreases your
#' account balance. Items are paginated and sorted latest first. See the
#' Pagination section for retrieving additional entries after the first page.
#'
#' @param account_id Account Id
#' @export
#' @family accounts
#' @references \url{https://docs.gdax.com/#get-account-history}
get_account_history <- function(account_id) {
endpoint <- paste("accounts", account_id, "ledger", sep = "/")
gdax_get(endpoint)
}
#' Get Holds
#'
#' Holds are placed on an account for any active orders or pending withdraw
#' requests. As an order is filled, the hold amount is updated. If an order is
#' canceled, any remaining hold is removed. For a withdraw, once it is
#' completed, the hold is removed.
#'
#' @param account_id Account Id
#' @export
#' @family accounts
#' @references \url{https://docs.gdax.com/#get-account-history}
get_account_holds <- function(account_id) {
endpoint <- paste("accounts", account_id, "holds", sep = "/")
gdax_get(endpoint)
}
|
################################################
# Spatially-explicit aquatic plant growth #
# Floating plant - submerged plant competition #
# Now compatible with new LIST structure #
# #
# By: Michael J. McCann #
# Major revision: July 2014 #
################################################
#
# CURRENT SETUP:
# Multiple species of duckweed growing - Each species is on their own 2-d grid (matrix)
# Positions of plants based on X,Y coordinates in a matrix
# Includes an overwintering step, where there is a large die-off of individuals
#
# TO DO:
# Modify shape of the waterbody (non-rectangular)
#
# Print parameter value labels on outputs (.gif or .jpg files)
# Can I get saveHTML (package animation) outputs to save well
# Figure out why I'm using saveHTML and not saveGIF() in package animation
# Modify reproduction so it gives up after a max distance of looking for a place to reproduce (so new plants aren't thrown too far)
#
# Example for naming files/plots with variable values
# x=1
# paste(c("x = ", x), collapse = "")
#
# ISSUES & PROBLEMS:
# Does the order of evaluting individuals in MOVE() matter? (i.e., center vs. UL corner vs. UR corner, etc.) - probably not b/c don't require empty cell
#
########################################################################################################
########################################
# load workspace for de-bugging
#
# LIST has an initial time step only
# short (3 yrs, 50 days each)
# one FP species
# load("testworkspace-1FPspecies.Rdata")
# four FP species
# load("testworkspace-4FPspecies.Rdata")
#
# complete LIST
# 7 years, 125 days each
# load("testworkspace-complete.Rdata")
########################################
# imports parameter values for all simulations
parameters <- read.csv("input_for_committee.csv")[1501:1944,]
# Check for errors in the input file
# source(file=paste(getwd(),"/FUNCTIONS/WARNING.R",sep=""),chdir=TRUE)
# WARNING(parameters)
# add blank columns to parameters for each of the results
parameters$propyears_avgFPcover_abovethreshold <- rep(NA, nrow(parameters))
parameters$propyears_prop_daysFP_abovehalf <- rep(NA, nrow(parameters))
parameters$avg_avg_FPcover <- rep(NA, nrow(parameters))
parameters$avg_max_FPcover <- rep(NA, nrow(parameters))
parameters$avg_firstdayFP <- rep(NA, nrow(parameters))
parameters$propyears_avgSAVcover_abovethreshold <- rep(NA, nrow(parameters))
parameters$propyears_prop_daysSAV_abovehalf <- rep(NA, nrow(parameters))
parameters$avg_avg_SAVcover <- rep(NA, nrow(parameters))
parameters$avg_max_SAVcover <- rep(NA, nrow(parameters))
parameters$avg_firstdaySAV <- rep(NA, nrow(parameters))
parameters$avg_SAVcover_yr01 <- rep(NA, nrow(parameters))
parameters$avg_SAVcover_yr02 <- rep(NA, nrow(parameters))
parameters$avg_SAVcover_yr03 <- rep(NA, nrow(parameters))
parameters$avg_FPcover_yr01 <- rep(NA, nrow(parameters))
parameters$avg_FPcover_yr02 <- rep(NA, nrow(parameters))
parameters$avg_FPcover_yr03 <- rep(NA, nrow(parameters))
# load the packages you need
require(foreach)
require(doSNOW)
require(R.utils) # package for sourceDirectory() - loeding all the functions in a directory
# make the correct number of clusters - the first argument will change depending on the machine / set-up
cl <- makeCluster(4,"SOCK")
# load all your functions
sourceDirectory(path=paste(getwd(),"/FUNCTIONS",sep=""),recursive=FALSE)
# assigns the functions to the global environments of each node
clusterExport(cl, c("BLANK25", "GROW_SAV25", "GROW_FP25",
"INPUT25","MOVE25","OUTPUT26",
"OVERWINTER25","SPECIES25","START25",
"STEP25","UPTAKE_N25","UPTAKE_P25","WIND25"))
registerDoSNOW(cl) # registers the SNOW parallel backend w/ foreach package
getDoParWorkers() # returns the # of workers - this should match the # of cores on your machine (or # cores - 1)
RESULT <- foreach (i=1:nrow(parameters), .combine=rbind, .errorhandling='pass') %dopar% { # loop through all of your simulations - User needs to specify the max # of simulations (rows of parameters) in .csv
# I am not sure if this is neccasary. Can I just use i as the input to the functions below?
simulnumb <- i # assigns the simulation # from the for loop - will be used as an input to INPUT() to read the right row of .csv
INPUT25(simulnumb) # reads the .csv file of parameter values and assigns them to the global environment
specieslist <- SPECIES25() # function that builds the list of species-specific parameters that is used in STEPX()
# define a couple of things in the environment that get used in STEPX() and OUTPUT()
timesteps <- years*(days+1) # this will need to change b/c of overwintering
winters <- (days+1) * seq(from=1, to=years, by=1) # ID timesteps that are winters - used in STEPX()
# generate blank list for total timesteps
LIST<-NULL
for (i in 1:timesteps){
LIST[[i]] <- BLANK25()
}
# initialize first time step
LIST[[1]]<-START25(LIST[[1]])
# for loop - STEP() to the entire LIST
today<-LIST[[1]]
for (t in 1:timesteps){
tomorrow<-STEP25(today,t)
LIST[[t+1]]<-tomorrow
today<-tomorrow
##################################
# Plot as you go (slows it down) #
##################################
# require(raster)
# make raster layers
# LANDrast<-raster(LAND)
# SAV<-raster(LIST[[t]]$SAV)
# for (y in 1:numbFPspecies){
# assign(paste("FP0",y,sep=""),raster(LIST[[t]]$FP[[y]]))
# }
# FPtotal<-raster(LIST[[t]]$FPtotal)
# stack raster layers
# I need a smarter way to make this variable length
# if (numbFPspecies == 4){
# all_layers <- stack(LANDrast,SAV,FPtotal,FP01,FP02,FP03,FP04)
# }
# if (numbFPspecies == 3){
# all_layers <- stack(LANDrast,SAV,FPtotal,FP01,FP02,FP03)
# }
# if (numbFPspecies == 2){
# all_layers <- stack(LANDrast,SAV,FPtotal,FP01,FP02)
# }
# if (numbFPspecies == 1){
# all_layers <- stack(LANDrast,SAV,FPtotal,FP01)
# }
# name raster layers
# names(all_layers)[1] <- "LAND"
# names(all_layers)[2] <- "SAV"
# names(all_layers)[3] <- "FPtotal"
# for (y in 1:numbFPspecies){
# names(all_layers)[y+3] <- paste("FP0",y,sep="")
# }
# plot raster layers
# plot(all_layers)
# print timestep to console - SLOW!
# print(t)
}
# generates graphs
# if you want .html animation you must specify animate=TRUE
# set "FP regime" threshold here
OUTPUT26(animate=FALSE,regimethreshold=70)
# When not using foreach() loop:
# RESULTS[simulnumb,1] <- propyears_avgFPcover_abovethreshold # assign the current simulations results to the correct spot
# RESULTS[simulnumb,2] <- propyears_prop_daysFP_abovehalf # assign the current simulations results to the correct spot
# RESULTS[simulnumb,3] <- avg_avg_FPcover # assign the current simulations results to the correct spot
# When not using foreach() loop (alternative):
# parameters$propyears_avgFPcover_abovethreshold[simulnumb] <- propyears_avgFPcover_abovethreshold
# parameters$propyears_prop_daysFP_abovehalf[simulnumb] <- propyears_prop_daysFP_abovehalf
# parameters$avg_avg_FPcover[simulnumb] <- avg_avg_FPcover
# these results are produced by OUTPUT()
# stick all of the results you want out in a vector together
# I'm not sure what this is actually doing,
# since I am not assigning this vector to anything
# BUT, if I take it out, writing "output.csv" does not work
# Error in `colnames<-`(`*tmp*`, value = c("simulnumb", "propyears_avgFPcover_abovethreshold", :
# length of 'dimnames' [2] not equal to array extent
c(simulnumb,
propyears_avgFPcover_abovethreshold,
propyears_prop_daysFP_abovehalf,
avg_avg_FPcover,
avg_max_FPcover,
avg_firstdayFP,
propyears_avgSAVcover_abovethreshold,
propyears_prop_daysSAV_abovehalf,
avg_avg_SAVcover,
avg_max_SAVcover,
avg_firstdaySAV,
avg_SAVcover_yr01,
avg_SAVcover_yr02,
avg_SAVcover_yr03,
avg_FPcover_yr01,
avg_FPcover_yr02,
avg_FPcover_yr03)
}
# stop the cluster
stopCluster(cl)
# name columns of RESULT
colnames(RESULT) <- c("simulnumb",
"propyears_avgFPcover_abovethreshold",
"propyears_prop_daysFP_abovehalf",
"avg_avg_FPcover",
"avg_max_FPcover",
"avg_firstdayFP",
"propyears_avgSAVcover_abovethreshold",
"propyears_prop_daysSAV_abovehalf",
"avg_avg_SAVcover",
"avg_max_SAVcover",
"avg_firstdaySAV",
"avg_SAVcover_yr01",
"avg_SAVcover_yr02",
"avg_SAVcover_yr03",
"avg_FPcover_yr01",
"avg_FPcover_yr02",
"avg_FPcover_yr03")
# convert to a data frame
RESULT <- as.data.frame(RESULT)
# Order the RESULT by simulation number
# create a vector of simulnumb in increasing order
order.simulnumb <- order(RESULT$simulnumb)
# use that vector to order the RESULT data frame
RESULT <- RESULT[order.simulnumb,]
# add the columns of the RESULT data frame to the original parameters data frame
parameters$propyears_avgFPcover_abovethreshold <- RESULT[,2]
parameters$propyears_prop_daysFP_abovehalf <- RESULT[,3]
parameters$avg_avg_FPcover <- RESULT[,4]
parameters$avg_max_FPcover <- RESULT[,5]
parameters$avg_firstdayFP <- RESULT[,6]
parameters$propyears_avgSAVcover_abovethreshold <- RESULT[,7]
parameters$propyears_prop_daysSAV_abovehalf <- RESULT[,8]
parameters$avg_avg_SAVcover <- RESULT[,9]
parameters$avg_max_SAVcover <- RESULT[,10]
parameters$avg_firstdaySAV <- RESULT[,11]
parameters$avg_SAVcover_yr01 <- RESULT[,12]
parameters$avg_SAVcover_yr02 <- RESULT[,13]
parameters$avg_SAVcover_yr03 <- RESULT[,14]
parameters$avg_FPcover_yr01 <- RESULT[,15]
parameters$avg_FPcover_yr02 <- RESULT[,16]
parameters$avg_FPcover_yr03 <- RESULT[,17]
# write parameters with RESULT appended to a .csv
write.csv(parameters,"output_for_committee_c.csv",row.names=F)
data01<-read.csv("output_for_committee_a.csv")
data02<-read.csv("output_for_committee_b.csv")
data03<-read.csv("output_for_committee_c.csv")
data <- rbind(data01,data02,data03)
write.csv(data,"output_for_committee.csv",row.names=FALSE)
| /SUMMARY/09-11-2014 - output_for_committee - larger lakes/RUNITALL_for_committee.R | no_license | mccannecology/model | R | false | false | 10,591 | r | ################################################
# Spatially-explicit aquatic plant growth #
# Floating plant - submerged plant competition #
# Now compatible with new LIST structure #
# #
# By: Michael J. McCann #
# Major revision: July 2014 #
################################################
#
# CURRENT SETUP:
# Multiple species of duckweed growing - Each species is on their own 2-d grid (matrix)
# Positions of plants based on X,Y coordinates in a matrix
# Includes an overwintering step, where there is a large die-off of individuals
#
# TO DO:
# Modify shape of the waterbody (non-rectangular)
#
# Print parameter value labels on outputs (.gif or .jpg files)
# Can I get saveHTML (package animation) outputs to save well
# Figure out why I'm using saveHTML and not saveGIF() in package animation
# Modify reproduction so it gives up after a max distance of looking for a place to reproduce (so new plants aren't thrown too far)
#
# Example for naming files/plots with variable values
# x=1
# paste(c("x = ", x), collapse = "")
#
# ISSUES & PROBLEMS:
# Does the order of evaluting individuals in MOVE() matter? (i.e., center vs. UL corner vs. UR corner, etc.) - probably not b/c don't require empty cell
#
########################################################################################################
########################################
# load workspace for de-bugging
#
# LIST has an initial time step only
# short (3 yrs, 50 days each)
# one FP species
# load("testworkspace-1FPspecies.Rdata")
# four FP species
# load("testworkspace-4FPspecies.Rdata")
#
# complete LIST
# 7 years, 125 days each
# load("testworkspace-complete.Rdata")
########################################
# imports parameter values for all simulations
parameters <- read.csv("input_for_committee.csv")[1501:1944,]
# Check for errors in the input file
# source(file=paste(getwd(),"/FUNCTIONS/WARNING.R",sep=""),chdir=TRUE)
# WARNING(parameters)
# add blank columns to parameters for each of the results
parameters$propyears_avgFPcover_abovethreshold <- rep(NA, nrow(parameters))
parameters$propyears_prop_daysFP_abovehalf <- rep(NA, nrow(parameters))
parameters$avg_avg_FPcover <- rep(NA, nrow(parameters))
parameters$avg_max_FPcover <- rep(NA, nrow(parameters))
parameters$avg_firstdayFP <- rep(NA, nrow(parameters))
parameters$propyears_avgSAVcover_abovethreshold <- rep(NA, nrow(parameters))
parameters$propyears_prop_daysSAV_abovehalf <- rep(NA, nrow(parameters))
parameters$avg_avg_SAVcover <- rep(NA, nrow(parameters))
parameters$avg_max_SAVcover <- rep(NA, nrow(parameters))
parameters$avg_firstdaySAV <- rep(NA, nrow(parameters))
parameters$avg_SAVcover_yr01 <- rep(NA, nrow(parameters))
parameters$avg_SAVcover_yr02 <- rep(NA, nrow(parameters))
parameters$avg_SAVcover_yr03 <- rep(NA, nrow(parameters))
parameters$avg_FPcover_yr01 <- rep(NA, nrow(parameters))
parameters$avg_FPcover_yr02 <- rep(NA, nrow(parameters))
parameters$avg_FPcover_yr03 <- rep(NA, nrow(parameters))
# load the packages you need
require(foreach)
require(doSNOW)
require(R.utils) # package for sourceDirectory() - loeding all the functions in a directory
# make the correct number of clusters - the first argument will change depending on the machine / set-up
cl <- makeCluster(4,"SOCK")
# load all your functions
sourceDirectory(path=paste(getwd(),"/FUNCTIONS",sep=""),recursive=FALSE)
# assigns the functions to the global environments of each node
clusterExport(cl, c("BLANK25", "GROW_SAV25", "GROW_FP25",
"INPUT25","MOVE25","OUTPUT26",
"OVERWINTER25","SPECIES25","START25",
"STEP25","UPTAKE_N25","UPTAKE_P25","WIND25"))
registerDoSNOW(cl) # registers the SNOW parallel backend w/ foreach package
getDoParWorkers() # returns the # of workers - this should match the # of cores on your machine (or # cores - 1)
RESULT <- foreach (i=1:nrow(parameters), .combine=rbind, .errorhandling='pass') %dopar% { # loop through all of your simulations - User needs to specify the max # of simulations (rows of parameters) in .csv
# I am not sure if this is neccasary. Can I just use i as the input to the functions below?
simulnumb <- i # assigns the simulation # from the for loop - will be used as an input to INPUT() to read the right row of .csv
INPUT25(simulnumb) # reads the .csv file of parameter values and assigns them to the global environment
specieslist <- SPECIES25() # function that builds the list of species-specific parameters that is used in STEPX()
# define a couple of things in the environment that get used in STEPX() and OUTPUT()
timesteps <- years*(days+1) # this will need to change b/c of overwintering
winters <- (days+1) * seq(from=1, to=years, by=1) # ID timesteps that are winters - used in STEPX()
# generate blank list for total timesteps
LIST<-NULL
for (i in 1:timesteps){
LIST[[i]] <- BLANK25()
}
# initialize first time step
LIST[[1]]<-START25(LIST[[1]])
# for loop - STEP() to the entire LIST
today<-LIST[[1]]
for (t in 1:timesteps){
tomorrow<-STEP25(today,t)
LIST[[t+1]]<-tomorrow
today<-tomorrow
##################################
# Plot as you go (slows it down) #
##################################
# require(raster)
# make raster layers
# LANDrast<-raster(LAND)
# SAV<-raster(LIST[[t]]$SAV)
# for (y in 1:numbFPspecies){
# assign(paste("FP0",y,sep=""),raster(LIST[[t]]$FP[[y]]))
# }
# FPtotal<-raster(LIST[[t]]$FPtotal)
# stack raster layers
# I need a smarter way to make this variable length
# if (numbFPspecies == 4){
# all_layers <- stack(LANDrast,SAV,FPtotal,FP01,FP02,FP03,FP04)
# }
# if (numbFPspecies == 3){
# all_layers <- stack(LANDrast,SAV,FPtotal,FP01,FP02,FP03)
# }
# if (numbFPspecies == 2){
# all_layers <- stack(LANDrast,SAV,FPtotal,FP01,FP02)
# }
# if (numbFPspecies == 1){
# all_layers <- stack(LANDrast,SAV,FPtotal,FP01)
# }
# name raster layers
# names(all_layers)[1] <- "LAND"
# names(all_layers)[2] <- "SAV"
# names(all_layers)[3] <- "FPtotal"
# for (y in 1:numbFPspecies){
# names(all_layers)[y+3] <- paste("FP0",y,sep="")
# }
# plot raster layers
# plot(all_layers)
# print timestep to console - SLOW!
# print(t)
}
# generates graphs
# if you want .html animation you must specify animate=TRUE
# set "FP regime" threshold here
OUTPUT26(animate=FALSE,regimethreshold=70)
# When not using foreach() loop:
# RESULTS[simulnumb,1] <- propyears_avgFPcover_abovethreshold # assign the current simulations results to the correct spot
# RESULTS[simulnumb,2] <- propyears_prop_daysFP_abovehalf # assign the current simulations results to the correct spot
# RESULTS[simulnumb,3] <- avg_avg_FPcover # assign the current simulations results to the correct spot
# When not using foreach() loop (alternative):
# parameters$propyears_avgFPcover_abovethreshold[simulnumb] <- propyears_avgFPcover_abovethreshold
# parameters$propyears_prop_daysFP_abovehalf[simulnumb] <- propyears_prop_daysFP_abovehalf
# parameters$avg_avg_FPcover[simulnumb] <- avg_avg_FPcover
# these results are produced by OUTPUT()
# stick all of the results you want out in a vector together
# I'm not sure what this is actually doing,
# since I am not assigning this vector to anything
# BUT, if I take it out, writing "output.csv" does not work
# Error in `colnames<-`(`*tmp*`, value = c("simulnumb", "propyears_avgFPcover_abovethreshold", :
# length of 'dimnames' [2] not equal to array extent
c(simulnumb,
propyears_avgFPcover_abovethreshold,
propyears_prop_daysFP_abovehalf,
avg_avg_FPcover,
avg_max_FPcover,
avg_firstdayFP,
propyears_avgSAVcover_abovethreshold,
propyears_prop_daysSAV_abovehalf,
avg_avg_SAVcover,
avg_max_SAVcover,
avg_firstdaySAV,
avg_SAVcover_yr01,
avg_SAVcover_yr02,
avg_SAVcover_yr03,
avg_FPcover_yr01,
avg_FPcover_yr02,
avg_FPcover_yr03)
}
# stop the cluster
stopCluster(cl)
# name columns of RESULT
colnames(RESULT) <- c("simulnumb",
"propyears_avgFPcover_abovethreshold",
"propyears_prop_daysFP_abovehalf",
"avg_avg_FPcover",
"avg_max_FPcover",
"avg_firstdayFP",
"propyears_avgSAVcover_abovethreshold",
"propyears_prop_daysSAV_abovehalf",
"avg_avg_SAVcover",
"avg_max_SAVcover",
"avg_firstdaySAV",
"avg_SAVcover_yr01",
"avg_SAVcover_yr02",
"avg_SAVcover_yr03",
"avg_FPcover_yr01",
"avg_FPcover_yr02",
"avg_FPcover_yr03")
# convert to a data frame
RESULT <- as.data.frame(RESULT)
# Order the RESULT by simulation number
# create a vector of simulnumb in increasing order
order.simulnumb <- order(RESULT$simulnumb)
# use that vector to order the RESULT data frame
RESULT <- RESULT[order.simulnumb,]
# add the columns of the RESULT data frame to the original parameters data frame
parameters$propyears_avgFPcover_abovethreshold <- RESULT[,2]
parameters$propyears_prop_daysFP_abovehalf <- RESULT[,3]
parameters$avg_avg_FPcover <- RESULT[,4]
parameters$avg_max_FPcover <- RESULT[,5]
parameters$avg_firstdayFP <- RESULT[,6]
parameters$propyears_avgSAVcover_abovethreshold <- RESULT[,7]
parameters$propyears_prop_daysSAV_abovehalf <- RESULT[,8]
parameters$avg_avg_SAVcover <- RESULT[,9]
parameters$avg_max_SAVcover <- RESULT[,10]
parameters$avg_firstdaySAV <- RESULT[,11]
parameters$avg_SAVcover_yr01 <- RESULT[,12]
parameters$avg_SAVcover_yr02 <- RESULT[,13]
parameters$avg_SAVcover_yr03 <- RESULT[,14]
parameters$avg_FPcover_yr01 <- RESULT[,15]
parameters$avg_FPcover_yr02 <- RESULT[,16]
parameters$avg_FPcover_yr03 <- RESULT[,17]
# write parameters with RESULT appended to a .csv
write.csv(parameters,"output_for_committee_c.csv",row.names=F)
data01<-read.csv("output_for_committee_a.csv")
data02<-read.csv("output_for_committee_b.csv")
data03<-read.csv("output_for_committee_c.csv")
data <- rbind(data01,data02,data03)
write.csv(data,"output_for_committee.csv",row.names=FALSE)
|
## Given the properties we can read off from the
## PH diagram in Eavor's patent, basic properties
## of ethane, ammonia, and the 60:40 mixing
## proportion, we want to complete the property list
## for the vertices in the power cycle - e.g.,
## temperature, enthalpy, and others if needed.
## First, we source the aforementioned values we
## could read off, which are loaded in
## parameter_values.R
library(tidyverse)
library(here)
source("parameter_values.R")
## Next, we download isobaric properties for ethane
## and ammonia for p_r values matching those of the
## 5 power cycle vertices. There are 3 distinct
## p_r values: 0.778 (A), 0.322 (B,C), 0.939 (D,E).
p_r_vec <- c(0.322, 0.778, 0.939)
ammonia_p_vec <- nh3$p_c * p_r_vec
ethane_p_vec <- c2h6$p_c * p_r_vec
## We haven't yet been able to script the downloading
## of data from NIST, so the data files were gathered
## manually and collected in the data/ subdirectory.
## Next, we proceed through the vertices one at a time
## and work through the fitting procedure. This is
## essentially the same code block for vertices
## A, C, D, E, and B; these could be combined at
## some point. We did vertex B out of sequence because
## that was the only one where we thought the vertex
## might wind up in the biphasic region and thus
## require special handling. As it happens, it wound
## up in the vapor region, so no special handling was
## required.
##############################################################
## fit vertex A
##############################################################
## load isobar data
nh3_isobar <-
read_delim(here::here("data", "nh3_0p78.tsv"), delim = "\t")
c2h6_isobar <-
read_delim(here::here("data", "c2h6_0p78.tsv"), delim = "\t")
## compute reduced temperatures
nh3_isobar$t_r <- nh3_isobar$`Temperature (K)` / nh3$t_c
c2h6_isobar$t_r <- c2h6_isobar$`Temperature (K)` / c2h6$t_c
## identify range of common t_r values
range(nh3_isobar$t_r)
range(c2h6_isobar$t_r)
## here, this runs from 0.49 to 1.72
## interpolate enthalpies to this common scale
common_t_r <- seq(from = 0.49, to = 1.72, by = 0.01)
mix_h_mat <- matrix(NA, nrow = length(common_t_r), ncol = 4)
colnames(mix_h_mat) <- c("t_r", "nh3_h", "c2h6_h", "mix")
mix_h_mat[, "t_r"] <- common_t_r
mix_h_mat[, "nh3_h"] <-
approx(x = nh3_isobar$t_r, y = nh3_isobar$`Enthalpy (kJ/mol)`,
xout = mix_h_mat[, "t_r"])$y
mix_h_mat[, "c2h6_h"] <-
approx(x = c2h6_isobar$t_r, y = c2h6_isobar$`Enthalpy (kJ/mol)`,
xout = mix_h_mat[, "t_r"])$y
mix_h_mat[, "mix"] <-
0.4 * (mix_h_mat[, "nh3_h"] - nh3$h_c) +
0.6 * (mix_h_mat[, "c2h6_h"] - c2h6$h_c)
new_t_r <-
approx(x = mix_h_mat[, "mix"], y = mix_h_mat[, "t_r"],
xout = (power_cycle$A$h - power_cycle$critical$h))$y
## using this new t_r, compute t_nh3, t_c2h6, and t_mix
new_t_r * nh3$t_c
new_t_r * c2h6$t_c
new_t_r * (0.4 * nh3$t_c + 0.6 * c2h6$t_c)
## over at NIST, look up enthalpies for NH3 and C2H6
## at their respective P,T values: 27.383 and 17.203, resp
## compute h_mix as weighted sum
0.6 * 17.203 + 0.4 * 27.383
## compute h_c_mix
0.6 * c2h6$h_c + 0.4 * nh3$h_c
## compare h_mix - h_c_mix with target value (here 4.4)
21.275 - 16.8738
power_cycle$A$t_r <- new_t_r
power_cycle$A$t <- new_t_r * (0.4 * nh3$t_c + 0.6 * c2h6$t_c)
##############################################################
## fit vertex C
##############################################################
## load isobar data
nh3_isobar <-
read_delim(here::here("data", "nh3_0p32.tsv"), delim = "\t")
c2h6_isobar <-
read_delim(here::here("data", "c2h6_0p32.tsv"), delim = "\t")
## compute reduced temperatures
nh3_isobar$t_r <- nh3_isobar$`Temperature (K)` / nh3$t_c
c2h6_isobar$t_r <- c2h6_isobar$`Temperature (K)` / c2h6$t_c
## identify range of common t_r values
range(nh3_isobar$t_r)
range(c2h6_isobar$t_r)
## here, this runs from 0.49 to 1.72
## interpolate enthalpies to this common scale
common_t_r <- seq(from = 0.49, to = 1.72, by = 0.01)
mix_h_mat <- matrix(NA, nrow = length(common_t_r), ncol = 4)
colnames(mix_h_mat) <- c("t_r", "nh3_h", "c2h6_h", "mix")
mix_h_mat[, "t_r"] <- common_t_r
mix_h_mat[, "nh3_h"] <-
approx(x = nh3_isobar$t_r, y = nh3_isobar$`Enthalpy (kJ/mol)`,
xout = mix_h_mat[, "t_r"])$y
mix_h_mat[, "c2h6_h"] <-
approx(x = c2h6_isobar$t_r, y = c2h6_isobar$`Enthalpy (kJ/mol)`,
xout = mix_h_mat[, "t_r"])$y
mix_h_mat[, "mix"] <-
0.4 * (mix_h_mat[, "nh3_h"] - nh3$h_c) +
0.6 * (mix_h_mat[, "c2h6_h"] - c2h6$h_c)
new_t_r <-
approx(x = mix_h_mat[, "mix"], y = mix_h_mat[, "t_r"],
xout = (power_cycle$C$h - power_cycle$critical$h))$y ## update!
## using this new t_r, compute t_nh3, t_c2h6, and t_mix
new_t_r * nh3$t_c
new_t_r * c2h6$t_c
new_t_r * (0.4 * nh3$t_c + 0.6 * c2h6$t_c)
## over at NIST, look up enthalpies for NH3 and C2H6
## at their respective P,T values: 10.54 and 5.0956, resp
## compute h_mix as weighted sum
0.6 * 5.0956 + 0.4 * 10.54
## compute h_c_mix
0.6 * c2h6$h_c + 0.4 * nh3$h_c
## compare h_mix - h_c_mix with target value (here -9.6)
7.273 - 16.8738
power_cycle$C$t_r <- new_t_r ## update!
power_cycle$C$t <- new_t_r * (0.4 * nh3$t_c + 0.6 * c2h6$t_c) ## update!
##############################################################
## fit vertex D
##############################################################
## load isobar data
nh3_isobar <-
read_delim(here::here("data", "nh3_0p94.tsv"), delim = "\t") ## update!
c2h6_isobar <-
read_delim(here::here("data", "c2h6_0p94.tsv"), delim = "\t") ## update!
## compute reduced temperatures
nh3_isobar$t_r <- nh3_isobar$`Temperature (K)` / nh3$t_c
c2h6_isobar$t_r <- c2h6_isobar$`Temperature (K)` / c2h6$t_c
## identify range of common t_r values
range(nh3_isobar$t_r)
range(c2h6_isobar$t_r)
## here, this runs from 0.49 to 1.72
## interpolate enthalpies to this common scale
common_t_r <- seq(from = 0.49, to = 1.72, by = 0.01)
mix_h_mat <- matrix(NA, nrow = length(common_t_r), ncol = 4)
colnames(mix_h_mat) <- c("t_r", "nh3_h", "c2h6_h", "mix")
mix_h_mat[, "t_r"] <- common_t_r
mix_h_mat[, "nh3_h"] <-
approx(x = nh3_isobar$t_r, y = nh3_isobar$`Enthalpy (kJ/mol)`,
xout = mix_h_mat[, "t_r"])$y
mix_h_mat[, "c2h6_h"] <-
approx(x = c2h6_isobar$t_r, y = c2h6_isobar$`Enthalpy (kJ/mol)`,
xout = mix_h_mat[, "t_r"])$y
mix_h_mat[, "mix"] <-
0.4 * (mix_h_mat[, "nh3_h"] - nh3$h_c) +
0.6 * (mix_h_mat[, "c2h6_h"] - c2h6$h_c)
new_t_r <-
approx(x = mix_h_mat[, "mix"], y = mix_h_mat[, "t_r"],
xout = (power_cycle$D$h - power_cycle$critical$h))$y ## update!
## using this new t_r, compute t_nh3, t_c2h6, and t_mix
new_t_r * nh3$t_c
new_t_r * c2h6$t_c
new_t_r * (0.4 * nh3$t_c + 0.6 * c2h6$t_c)
## over at NIST, look up enthalpies for NH3 and C2H6
## at their respective P,T values: 10.647 and 5.191, resp
## compute h_mix as weighted sum
0.6 * 5.191 + 0.4 * 10.647
## compute h_c_mix
0.6 * c2h6$h_c + 0.4 * nh3$h_c
## compare h_mix - h_c_mix with target value (here -9.5)
7.3734 - 16.8738
power_cycle$D$t_r <- new_t_r ## update!
power_cycle$D$t <- new_t_r * (0.4 * nh3$t_c + 0.6 * c2h6$t_c) ## update!
##############################################################
## fit vertex E
##############################################################
## load isobar data
nh3_isobar <-
read_delim(here::here("data", "nh3_0p94.tsv"), delim = "\t") ## update!
c2h6_isobar <-
read_delim(here::here("data", "c2h6_0p94.tsv"), delim = "\t") ## update!
## compute reduced temperatures
nh3_isobar$t_r <- nh3_isobar$`Temperature (K)` / nh3$t_c
c2h6_isobar$t_r <- c2h6_isobar$`Temperature (K)` / c2h6$t_c
## identify range of common t_r values
range(nh3_isobar$t_r)
range(c2h6_isobar$t_r)
## here, this runs from 0.49 to 1.72
## interpolate enthalpies to this common scale
common_t_r <- seq(from = 0.49, to = 1.72, by = 0.01)
mix_h_mat <- matrix(NA, nrow = length(common_t_r), ncol = 4)
colnames(mix_h_mat) <- c("t_r", "nh3_h", "c2h6_h", "mix")
mix_h_mat[, "t_r"] <- common_t_r
mix_h_mat[, "nh3_h"] <-
approx(x = nh3_isobar$t_r, y = nh3_isobar$`Enthalpy (kJ/mol)`,
xout = mix_h_mat[, "t_r"])$y
mix_h_mat[, "c2h6_h"] <-
approx(x = c2h6_isobar$t_r, y = c2h6_isobar$`Enthalpy (kJ/mol)`,
xout = mix_h_mat[, "t_r"])$y
mix_h_mat[, "mix"] <-
0.4 * (mix_h_mat[, "nh3_h"] - nh3$h_c) +
0.6 * (mix_h_mat[, "c2h6_h"] - c2h6$h_c)
new_t_r <-
approx(x = mix_h_mat[, "mix"], y = mix_h_mat[, "t_r"],
xout = (power_cycle$E$h - power_cycle$critical$h))$y ## update!
## using this new t_r, compute t_nh3, t_c2h6, and t_mix
new_t_r * nh3$t_c
new_t_r * c2h6$t_c
new_t_r * (0.4 * nh3$t_c + 0.6 * c2h6$t_c)
## over at NIST, look up enthalpies for NH3 and C2H6
## at their respective P,T values: 27.831 and 17.579, resp
## compute h_mix as weighted sum
0.6 * 17.579 + 0.4 * 27.831
## compute h_c_mix
0.6 * c2h6$h_c + 0.4 * nh3$h_c
## compare h_mix - h_c_mix with target value (here 4.8)
21.6798 - 16.8738
power_cycle$E$t_r <- new_t_r ## update!
power_cycle$E$t <- new_t_r * (0.4 * nh3$t_c + 0.6 * c2h6$t_c) ## update!
##############################################################
## fit vertex B
##############################################################
## load isobar data
nh3_isobar <-
read_delim(here::here("data", "nh3_0p32.tsv"), delim = "\t") ## update!
c2h6_isobar <-
read_delim(here::here("data", "c2h6_0p32.tsv"), delim = "\t") ## update!
## compute reduced temperatures
nh3_isobar$t_r <- nh3_isobar$`Temperature (K)` / nh3$t_c
c2h6_isobar$t_r <- c2h6_isobar$`Temperature (K)` / c2h6$t_c
## identify range of common t_r values
range(nh3_isobar$t_r)
range(c2h6_isobar$t_r)
## here, this runs from 0.49 to 1.72
## interpolate enthalpies to this common scale
common_t_r <- seq(from = 0.49, to = 1.72, by = 0.01)
mix_h_mat <- matrix(NA, nrow = length(common_t_r), ncol = 4)
colnames(mix_h_mat) <- c("t_r", "nh3_h", "c2h6_h", "mix")
mix_h_mat[, "t_r"] <- common_t_r
mix_h_mat[, "nh3_h"] <-
approx(x = nh3_isobar$t_r, y = nh3_isobar$`Enthalpy (kJ/mol)`,
xout = mix_h_mat[, "t_r"])$y
mix_h_mat[, "c2h6_h"] <-
approx(x = c2h6_isobar$t_r, y = c2h6_isobar$`Enthalpy (kJ/mol)`,
xout = mix_h_mat[, "t_r"])$y
mix_h_mat[, "mix"] <-
0.4 * (mix_h_mat[, "nh3_h"] - nh3$h_c) +
0.6 * (mix_h_mat[, "c2h6_h"] - c2h6$h_c)
new_t_r <-
approx(x = mix_h_mat[, "mix"], y = mix_h_mat[, "t_r"],
xout = (power_cycle$B$h - power_cycle$critical$h))$y ## update!
## using this new t_r, compute t_nh3, t_c2h6, and t_mix
new_t_r * nh3$t_c
new_t_r * c2h6$t_c
new_t_r * (0.4 * nh3$t_c + 0.6 * c2h6$t_c)
## over at NIST, look up enthalpies for NH3 and C2H6
## at their respective P,T values: 27.678 and 16.817, resp
## compute h_mix as weighted sum
0.6 * 16.817 + 0.4 * 27.678
## compute h_c_mix
0.6 * c2h6$h_c + 0.4 * nh3$h_c
## compare h_mix - h_c_mix with target value (here 3.9)
21.1614 - 16.8738
## We're notably off here; we get 4.3 vs 3.9.
## Their shift into the biphasic region hurts our modeling.
power_cycle$B$t_r <- new_t_r ## update!
power_cycle$B$t <- new_t_r * (0.4 * nh3$t_c + 0.6 * c2h6$t_c) ## update!
| /02_complete_vertex_values.R | no_license | kabagg/chbe_401_project | R | false | false | 11,215 | r | ## Given the properties we can read off from the
## PH diagram in Eavor's patent, basic properties
## of ethane, ammonia, and the 60:40 mixing
## proportion, we want to complete the property list
## for the vertices in the power cycle - e.g.,
## temperature, enthalpy, and others if needed.
## First, we source the aforementioned values we
## could read off, which are loaded in
## parameter_values.R
library(tidyverse)
library(here)
source("parameter_values.R")
## Next, we download isobaric properties for ethane
## and ammonia for p_r values matching those of the
## 5 power cycle vertices. There are 3 distinct
## p_r values: 0.778 (A), 0.322 (B,C), 0.939 (D,E).
p_r_vec <- c(0.322, 0.778, 0.939)
ammonia_p_vec <- nh3$p_c * p_r_vec
ethane_p_vec <- c2h6$p_c * p_r_vec
## We haven't yet been able to script the downloading
## of data from NIST, so the data files were gathered
## manually and collected in the data/ subdirectory.
## Next, we proceed through the vertices one at a time
## and work through the fitting procedure. This is
## essentially the same code block for vertices
## A, C, D, E, and B; these could be combined at
## some point. We did vertex B out of sequence because
## that was the only one where we thought the vertex
## might wind up in the biphasic region and thus
## require special handling. As it happens, it wound
## up in the vapor region, so no special handling was
## required.
##############################################################
## fit vertex A
##############################################################
## load isobar data
nh3_isobar <-
read_delim(here::here("data", "nh3_0p78.tsv"), delim = "\t")
c2h6_isobar <-
read_delim(here::here("data", "c2h6_0p78.tsv"), delim = "\t")
## compute reduced temperatures
nh3_isobar$t_r <- nh3_isobar$`Temperature (K)` / nh3$t_c
c2h6_isobar$t_r <- c2h6_isobar$`Temperature (K)` / c2h6$t_c
## identify range of common t_r values
range(nh3_isobar$t_r)
range(c2h6_isobar$t_r)
## here, this runs from 0.49 to 1.72
## interpolate enthalpies to this common scale
common_t_r <- seq(from = 0.49, to = 1.72, by = 0.01)
mix_h_mat <- matrix(NA, nrow = length(common_t_r), ncol = 4)
colnames(mix_h_mat) <- c("t_r", "nh3_h", "c2h6_h", "mix")
mix_h_mat[, "t_r"] <- common_t_r
mix_h_mat[, "nh3_h"] <-
approx(x = nh3_isobar$t_r, y = nh3_isobar$`Enthalpy (kJ/mol)`,
xout = mix_h_mat[, "t_r"])$y
mix_h_mat[, "c2h6_h"] <-
approx(x = c2h6_isobar$t_r, y = c2h6_isobar$`Enthalpy (kJ/mol)`,
xout = mix_h_mat[, "t_r"])$y
mix_h_mat[, "mix"] <-
0.4 * (mix_h_mat[, "nh3_h"] - nh3$h_c) +
0.6 * (mix_h_mat[, "c2h6_h"] - c2h6$h_c)
new_t_r <-
approx(x = mix_h_mat[, "mix"], y = mix_h_mat[, "t_r"],
xout = (power_cycle$A$h - power_cycle$critical$h))$y
## using this new t_r, compute t_nh3, t_c2h6, and t_mix
new_t_r * nh3$t_c
new_t_r * c2h6$t_c
new_t_r * (0.4 * nh3$t_c + 0.6 * c2h6$t_c)
## over at NIST, look up enthalpies for NH3 and C2H6
## at their respective P,T values: 27.383 and 17.203, resp
## compute h_mix as weighted sum
0.6 * 17.203 + 0.4 * 27.383
## compute h_c_mix
0.6 * c2h6$h_c + 0.4 * nh3$h_c
## compare h_mix - h_c_mix with target value (here 4.4)
21.275 - 16.8738
power_cycle$A$t_r <- new_t_r
power_cycle$A$t <- new_t_r * (0.4 * nh3$t_c + 0.6 * c2h6$t_c)
##############################################################
## fit vertex C
##############################################################
## load isobar data
nh3_isobar <-
read_delim(here::here("data", "nh3_0p32.tsv"), delim = "\t")
c2h6_isobar <-
read_delim(here::here("data", "c2h6_0p32.tsv"), delim = "\t")
## compute reduced temperatures
nh3_isobar$t_r <- nh3_isobar$`Temperature (K)` / nh3$t_c
c2h6_isobar$t_r <- c2h6_isobar$`Temperature (K)` / c2h6$t_c
## identify range of common t_r values
range(nh3_isobar$t_r)
range(c2h6_isobar$t_r)
## here, this runs from 0.49 to 1.72
## interpolate enthalpies to this common scale
common_t_r <- seq(from = 0.49, to = 1.72, by = 0.01)
mix_h_mat <- matrix(NA, nrow = length(common_t_r), ncol = 4)
colnames(mix_h_mat) <- c("t_r", "nh3_h", "c2h6_h", "mix")
mix_h_mat[, "t_r"] <- common_t_r
mix_h_mat[, "nh3_h"] <-
approx(x = nh3_isobar$t_r, y = nh3_isobar$`Enthalpy (kJ/mol)`,
xout = mix_h_mat[, "t_r"])$y
mix_h_mat[, "c2h6_h"] <-
approx(x = c2h6_isobar$t_r, y = c2h6_isobar$`Enthalpy (kJ/mol)`,
xout = mix_h_mat[, "t_r"])$y
mix_h_mat[, "mix"] <-
0.4 * (mix_h_mat[, "nh3_h"] - nh3$h_c) +
0.6 * (mix_h_mat[, "c2h6_h"] - c2h6$h_c)
new_t_r <-
approx(x = mix_h_mat[, "mix"], y = mix_h_mat[, "t_r"],
xout = (power_cycle$C$h - power_cycle$critical$h))$y ## update!
## using this new t_r, compute t_nh3, t_c2h6, and t_mix
new_t_r * nh3$t_c
new_t_r * c2h6$t_c
new_t_r * (0.4 * nh3$t_c + 0.6 * c2h6$t_c)
## over at NIST, look up enthalpies for NH3 and C2H6
## at their respective P,T values: 10.54 and 5.0956, resp
## compute h_mix as weighted sum
0.6 * 5.0956 + 0.4 * 10.54
## compute h_c_mix
0.6 * c2h6$h_c + 0.4 * nh3$h_c
## compare h_mix - h_c_mix with target value (here -9.6)
7.273 - 16.8738
power_cycle$C$t_r <- new_t_r ## update!
power_cycle$C$t <- new_t_r * (0.4 * nh3$t_c + 0.6 * c2h6$t_c) ## update!
##############################################################
## fit vertex D
##############################################################
## load isobar data
nh3_isobar <-
read_delim(here::here("data", "nh3_0p94.tsv"), delim = "\t") ## update!
c2h6_isobar <-
read_delim(here::here("data", "c2h6_0p94.tsv"), delim = "\t") ## update!
## compute reduced temperatures
nh3_isobar$t_r <- nh3_isobar$`Temperature (K)` / nh3$t_c
c2h6_isobar$t_r <- c2h6_isobar$`Temperature (K)` / c2h6$t_c
## identify range of common t_r values
range(nh3_isobar$t_r)
range(c2h6_isobar$t_r)
## here, this runs from 0.49 to 1.72
## interpolate enthalpies to this common scale
common_t_r <- seq(from = 0.49, to = 1.72, by = 0.01)
mix_h_mat <- matrix(NA, nrow = length(common_t_r), ncol = 4)
colnames(mix_h_mat) <- c("t_r", "nh3_h", "c2h6_h", "mix")
mix_h_mat[, "t_r"] <- common_t_r
mix_h_mat[, "nh3_h"] <-
approx(x = nh3_isobar$t_r, y = nh3_isobar$`Enthalpy (kJ/mol)`,
xout = mix_h_mat[, "t_r"])$y
mix_h_mat[, "c2h6_h"] <-
approx(x = c2h6_isobar$t_r, y = c2h6_isobar$`Enthalpy (kJ/mol)`,
xout = mix_h_mat[, "t_r"])$y
mix_h_mat[, "mix"] <-
0.4 * (mix_h_mat[, "nh3_h"] - nh3$h_c) +
0.6 * (mix_h_mat[, "c2h6_h"] - c2h6$h_c)
new_t_r <-
approx(x = mix_h_mat[, "mix"], y = mix_h_mat[, "t_r"],
xout = (power_cycle$D$h - power_cycle$critical$h))$y ## update!
## using this new t_r, compute t_nh3, t_c2h6, and t_mix
new_t_r * nh3$t_c
new_t_r * c2h6$t_c
new_t_r * (0.4 * nh3$t_c + 0.6 * c2h6$t_c)
## over at NIST, look up enthalpies for NH3 and C2H6
## at their respective P,T values: 10.647 and 5.191, resp
## compute h_mix as weighted sum
0.6 * 5.191 + 0.4 * 10.647
## compute h_c_mix
0.6 * c2h6$h_c + 0.4 * nh3$h_c
## compare h_mix - h_c_mix with target value (here -9.5)
7.3734 - 16.8738
power_cycle$D$t_r <- new_t_r ## update!
power_cycle$D$t <- new_t_r * (0.4 * nh3$t_c + 0.6 * c2h6$t_c) ## update!
##############################################################
## fit vertex E
##############################################################
## load isobar data
nh3_isobar <-
read_delim(here::here("data", "nh3_0p94.tsv"), delim = "\t") ## update!
c2h6_isobar <-
read_delim(here::here("data", "c2h6_0p94.tsv"), delim = "\t") ## update!
## compute reduced temperatures
nh3_isobar$t_r <- nh3_isobar$`Temperature (K)` / nh3$t_c
c2h6_isobar$t_r <- c2h6_isobar$`Temperature (K)` / c2h6$t_c
## identify range of common t_r values
range(nh3_isobar$t_r)
range(c2h6_isobar$t_r)
## here, this runs from 0.49 to 1.72
## interpolate enthalpies to this common scale
common_t_r <- seq(from = 0.49, to = 1.72, by = 0.01)
mix_h_mat <- matrix(NA, nrow = length(common_t_r), ncol = 4)
colnames(mix_h_mat) <- c("t_r", "nh3_h", "c2h6_h", "mix")
mix_h_mat[, "t_r"] <- common_t_r
mix_h_mat[, "nh3_h"] <-
approx(x = nh3_isobar$t_r, y = nh3_isobar$`Enthalpy (kJ/mol)`,
xout = mix_h_mat[, "t_r"])$y
mix_h_mat[, "c2h6_h"] <-
approx(x = c2h6_isobar$t_r, y = c2h6_isobar$`Enthalpy (kJ/mol)`,
xout = mix_h_mat[, "t_r"])$y
mix_h_mat[, "mix"] <-
0.4 * (mix_h_mat[, "nh3_h"] - nh3$h_c) +
0.6 * (mix_h_mat[, "c2h6_h"] - c2h6$h_c)
new_t_r <-
approx(x = mix_h_mat[, "mix"], y = mix_h_mat[, "t_r"],
xout = (power_cycle$E$h - power_cycle$critical$h))$y ## update!
## using this new t_r, compute t_nh3, t_c2h6, and t_mix
new_t_r * nh3$t_c
new_t_r * c2h6$t_c
new_t_r * (0.4 * nh3$t_c + 0.6 * c2h6$t_c)
## over at NIST, look up enthalpies for NH3 and C2H6
## at their respective P,T values: 27.831 and 17.579, resp
## compute h_mix as weighted sum
0.6 * 17.579 + 0.4 * 27.831
## compute h_c_mix
0.6 * c2h6$h_c + 0.4 * nh3$h_c
## compare h_mix - h_c_mix with target value (here 4.8)
21.6798 - 16.8738
power_cycle$E$t_r <- new_t_r ## update!
power_cycle$E$t <- new_t_r * (0.4 * nh3$t_c + 0.6 * c2h6$t_c) ## update!
##############################################################
## fit vertex B
##############################################################
## load isobar data
nh3_isobar <-
read_delim(here::here("data", "nh3_0p32.tsv"), delim = "\t") ## update!
c2h6_isobar <-
read_delim(here::here("data", "c2h6_0p32.tsv"), delim = "\t") ## update!
## compute reduced temperatures
nh3_isobar$t_r <- nh3_isobar$`Temperature (K)` / nh3$t_c
c2h6_isobar$t_r <- c2h6_isobar$`Temperature (K)` / c2h6$t_c
## identify range of common t_r values
range(nh3_isobar$t_r)
range(c2h6_isobar$t_r)
## here, this runs from 0.49 to 1.72
## interpolate enthalpies to this common scale
common_t_r <- seq(from = 0.49, to = 1.72, by = 0.01)
mix_h_mat <- matrix(NA, nrow = length(common_t_r), ncol = 4)
colnames(mix_h_mat) <- c("t_r", "nh3_h", "c2h6_h", "mix")
mix_h_mat[, "t_r"] <- common_t_r
mix_h_mat[, "nh3_h"] <-
approx(x = nh3_isobar$t_r, y = nh3_isobar$`Enthalpy (kJ/mol)`,
xout = mix_h_mat[, "t_r"])$y
mix_h_mat[, "c2h6_h"] <-
approx(x = c2h6_isobar$t_r, y = c2h6_isobar$`Enthalpy (kJ/mol)`,
xout = mix_h_mat[, "t_r"])$y
mix_h_mat[, "mix"] <-
0.4 * (mix_h_mat[, "nh3_h"] - nh3$h_c) +
0.6 * (mix_h_mat[, "c2h6_h"] - c2h6$h_c)
new_t_r <-
approx(x = mix_h_mat[, "mix"], y = mix_h_mat[, "t_r"],
xout = (power_cycle$B$h - power_cycle$critical$h))$y ## update!
## using this new t_r, compute t_nh3, t_c2h6, and t_mix
new_t_r * nh3$t_c
new_t_r * c2h6$t_c
new_t_r * (0.4 * nh3$t_c + 0.6 * c2h6$t_c)
## over at NIST, look up enthalpies for NH3 and C2H6
## at their respective P,T values: 27.678 and 16.817, resp
## compute h_mix as weighted sum
0.6 * 16.817 + 0.4 * 27.678
## compute h_c_mix
0.6 * c2h6$h_c + 0.4 * nh3$h_c
## compare h_mix - h_c_mix with target value (here 3.9)
21.1614 - 16.8738
## We're notably off here; we get 4.3 vs 3.9.
## Their shift into the biphasic region hurts our modeling.
power_cycle$B$t_r <- new_t_r ## update!
power_cycle$B$t <- new_t_r * (0.4 * nh3$t_c + 0.6 * c2h6$t_c) ## update!
|
library(rhdf5)
library(GenomicAlignments)
library(GenomicFeatures)
library(ggplot2)
#'########################################################################################
# Read functions && Find overlaps ####
#'########################################################################################
read_and_findPolyA <- function(f5list, chunkNumber = 50){
f5PathList <- f5list$fast5_path
# Split data in chunks
rows <- length(f5PathList)
rowsPerChunk <- floor(rows/chunkNumber)
extraLastChunk <- (rows %% chunkNumber)
counter = 1
result <- list()
cores <- detectCores()
for(chunk in c(1:chunkNumber)){
if(chunk == chunkNumber)
r <- rowsPerChunk + extraLastChunk
else
r <- rowsPerChunk
f5Subset <- f5PathList[counter:(counter+r-1)]
counter <- counter + r
tmp <- mclapply(f5Subset, function(path2file){
#Read event data
tmpPath <- h5ls(path2file)[which(h5ls(path2file) == "/Analyses/Basecall_1D_000/BaseCalled_template")[1], 1]
data.event <- h5read(path2file, tmpPath)$Events
# Read attributes
tmpPath <- h5ls(path2file)[(which(h5ls(path2file) == "/Raw/Reads") + 1) ,1]
attribute.start_time <- h5readAttributes(path2file, tmpPath)$start_time
attribute.start_mux <- h5readAttributes(path2file, tmpPath)$start_mux
attribute.sample_frequency <- h5readAttributes(path2file, "/UniqueGlobalKey/context_tags")$sample_frequency
attribute.channel_number <- h5readAttributes(path2file, "/UniqueGlobalKey/channel_id")$channel_number
H5close()
#Merge interesting information about the sample
data <- list(
eventMean = scale(data.event$mean),
eventStart = data.event$start,
eventMove = data.event$move,
eventLength = data.event$length[1],
attrStartTime = as.numeric(attribute.start_time),
attrStartMux = as.numeric(attribute.start_mux),
attrSampleFreq = as.numeric(attribute.sample_frequency),
attrChannelNum = as.numeric(attribute.channel_number)
)
return(data)
}, mc.cores = cores)
result[[chunk]] <- findPolyA(tmp)
cat("[INFO] Chunk", chunk, "of", chunkNumber, "computed! \n")
}
# Unlist and make dataframe
finalResult <- data.frame()
for(l in result){
df <- data.frame(l, stringsAsFactors = F)
finalResult <- rbind(finalResult, df)
}
finalResult <- data.frame(f5list, finalResult)
return(finalResult)
}
makeTranscriptIdTable <- function(hits, data){
query <- from(hits)
subjects <- to(hits)
cores <- detectCores()
result <- mclapply(unique(query), function(q){
# Transcript ID
result.idxId <- names(transcripts[q])
# Get samples from data
smpls <- subjects[query == q]
result.subj <- paste(which(data$subjectHits %in% smpls), collapse = ",")
smpls <- data[data$subjectHits %in% smpls,]
# Number of Tags
result.tags <- nrow(smpls)
# Calculate mean
result.mean <- mean(smpls$lengthPolyA_BasePair)
# Calculate median
result.median <- median(smpls$lengthPolyA_BasePair)
# Result
df <- data.frame(result.idxId, result.tags, result.mean, result.median, result.subj, stringsAsFactors = F)
colnames(df) <- c("transcript_id", "tags", "mean_length", "median_length", "samples_comma_seperated")
return(df)
}, mc.cores = cores)
# Unlist and make dataframe
finalResult <- data.frame()
for(l in result){
df <- data.frame(l, stringsAsFactors = F)
finalResult <- rbind(finalResult, df)
}
return(finalResult)
}
computeCorrelation <- function(data1, data2, mergeVec){
tmp <- merge(data1, data2, by.x=mergeVec[1], by.y=mergeVec[2])
colnames(tmp) <- c("transcript_id", "n", "b")
tmp <- tmp[!is.nan(tmp$n),]
tmp <- tmp[!is.na(tmp$n),]
tmp <- tmp[!is.nan(tmp$b),]
tmp <- tmp[!is.na(tmp$b),]
covar <- cor(tmp$n, tmp$b)
return(covar)
}
#'########################################################################################
# Plot -- functions ####
#'########################################################################################
plotHistogram <- function(data, column, labelVec, savePath = "Result/"){
tmp <- as.data.frame(round(data[,column]))
colnames(tmp) <- "l"
plot <- ggplot(tmp, aes(x=l)) + geom_histogram(stat="bin", binwidth=1, fill="#ff5b5b") +
theme_minimal() + labs(title=labelVec[1], x=labelVec[2])
ggsave(paste0(labelVec[1], ".png"), plot=plot, path=savePath, width=17.3, height=7.06, dpi=125)
return(plot)
}
plotScatter <- function(data1, data2, mergeVec, labelVec, savePath = "Result/"){
tmp <- merge(data1, data2, by.x=mergeVec[1], by.y=mergeVec[2])
colnames(tmp) <- c("transcript_id", "n", "b")
tmp <- tmp[!is.nan(tmp$n),]
tmp <- tmp[!is.na(tmp$b),]
plot <- ggplot(tmp, aes(x=n, y=b)) +
#geom_line(data = data.frame(x=c(0:300)), mapping = aes(x=x, y=x), color="lightgrey", alpha = 0.4) +
geom_point(color="steelblue", alpha = 0.4) + theme_minimal() +
labs(title=labelVec[1], x=labelVec[2], y=labelVec[3])
ggsave(paste0(labelVec[1], ".png"), plot=plot, path=savePath, width=17.3, height=7.06, dpi=125)
return(plot)
}
plotDensity <- function(data1, data2, mergeVec, labelVec, classVec, savePath = "Result/"){
tmp <- merge(data1, data2, by.x=mergeVec[1], by.y=mergeVec[2])
colnames(tmp) <- c("transcript_id", "n", "b")
tmp <- tmp[!is.nan(tmp$n),]
tmp <- tmp[!is.na(tmp$b),]
tmp$n <- round(tmp$n); tmp$b <- round(tmp$b)
tmp <- rbind(data.frame(l=tmp$n, class=classVec[1]), data.frame(l=tmp$b, class=classVec[2]))
plot <- ggplot(tmp, aes(x=l, fill=class)) +
geom_histogram(aes(y=..density..), alpha=0.5, position="identity", binwidth = 1) +
geom_density(alpha=0.4) +
labs(title=labelVec[1], x=labelVec[2])
ggsave(paste0(labelVec[1], ".png"), plot=plot, path=savePath, width=17.3, height=7.06, dpi=125)
return(plot)
}
# plotRaw <- function(x, start, end){
# tmp <- data.frame(data[[x]]$rawSignal, c(1:(length(data[[x]]$rawSignal))))
# colnames(tmp) <- c("raw", "idx")
# tmp$PolyA <- 0
# tmp$PolyA[start:end] <- 1
# return(ggplot(tmp, aes(x=idx, y=raw, color=PolyA)) + geom_line() + theme_minimal())
# }
# plotEventMean <- function(x){
# tmp <- data.frame(data[[x]]$eventStart, data[[x]]$eventMean)
# colnames(tmp) <- c("start", "mean")
# return(ggplot(tmp, aes(x=start, y=mean)) + geom_point() + geom_line(color="red"))
# }
# plotEventMove <- function(x){
# tmp <- data.frame(data[[x]]$eventStart, as.numeric(data[[x]]$eventMove > 0))
# colnames(tmp) <- c("start", "move")
# return(ggplot(tmp, aes(x=start, y=move)) + geom_point())
# }
# plotEventReducedMove <- function(x){
# tmp <- data.frame(data[[x]]$eventStart, as.numeric(data[[x]]$eventMove > 0))
# colnames(tmp) <- c("start", "move")
# tmp <- tmp[data[[x]]$eventP_mp_state > 0.8,]
# tmp <- tmp[tmp$move > 0,]
# return(ggplot(tmp, aes(x=start, y=move)) + geom_point())
# }
# plotEventP_mp_state <- function(x){
# tmp <- data.frame(data[[x]]$eventStart, data[[x]]$eventP_mp_state)
# colnames(tmp) <- c("start", "p_mp_state")
# return(ggplot(tmp, aes(x=start, y=p_mp_state)) + geom_line( color = "darkgreen") + theme_minimal())
# }
# plotEventReducedP_mp_state <- function(x){
# tmp <- data.frame(data[[x]]$eventStart, data[[x]]$eventP_mp_state, data[[x]]$eventMove)
# colnames(tmp) <- c("start", "p_mp_state", "move")
# tmp <- tmp[tmp$move > 0,]
# return(ggplot(tmp, aes(x=start, y=p_mp_state)) + geom_line( color = "darkblue") + theme_minimal())
# }
# plotEventCombo <- function(x){
# l <- data[[x]]
# tmp <- data.frame(l$eventStart, l$eventMean, l$eventMove, l$eventP_mp_state, l$eventWeights)
# colnames(tmp) <- c("dwelltime", "mean", "move", "p", "w")
# return(ggplot(tmp, aes(x=dwelltime, y=mean)) + geom_line() + theme_minimal() +
# geom_line(color="darkgreen", aes(y=move), alpha=0.6) +
# #geom_line(color="darkblue", aes(y=w)) +
# geom_line(color="darkblue", aes(y=p))
# )
# }
# plotAndSaveAll <- function(x, path){
# for(i in x){
# polyA <- findPolyA(data, i)
# polyA <- polyA*15
# if(length(polyA) == 1) next()
# if(polyA[1] < 0 || polyA[2] < 0 || polyA[3] < 0) next()
# #plot <- plotEventCombo(i)
# plot <- plotRaw(i, polyA[1], polyA[2])
# ggsave(paste0(i, ".png"), plot=plot, path=path, width=17.3, height=7.06, dpi=75)
# }
# }
| /read_plot_functions.R | no_license | ltilo/INF219_PolyA-Project | R | false | false | 8,620 | r | library(rhdf5)
library(GenomicAlignments)
library(GenomicFeatures)
library(ggplot2)
#'########################################################################################
# Read functions && Find overlaps ####
#'########################################################################################
read_and_findPolyA <- function(f5list, chunkNumber = 50){
f5PathList <- f5list$fast5_path
# Split data in chunks
rows <- length(f5PathList)
rowsPerChunk <- floor(rows/chunkNumber)
extraLastChunk <- (rows %% chunkNumber)
counter = 1
result <- list()
cores <- detectCores()
for(chunk in c(1:chunkNumber)){
if(chunk == chunkNumber)
r <- rowsPerChunk + extraLastChunk
else
r <- rowsPerChunk
f5Subset <- f5PathList[counter:(counter+r-1)]
counter <- counter + r
tmp <- mclapply(f5Subset, function(path2file){
#Read event data
tmpPath <- h5ls(path2file)[which(h5ls(path2file) == "/Analyses/Basecall_1D_000/BaseCalled_template")[1], 1]
data.event <- h5read(path2file, tmpPath)$Events
# Read attributes
tmpPath <- h5ls(path2file)[(which(h5ls(path2file) == "/Raw/Reads") + 1) ,1]
attribute.start_time <- h5readAttributes(path2file, tmpPath)$start_time
attribute.start_mux <- h5readAttributes(path2file, tmpPath)$start_mux
attribute.sample_frequency <- h5readAttributes(path2file, "/UniqueGlobalKey/context_tags")$sample_frequency
attribute.channel_number <- h5readAttributes(path2file, "/UniqueGlobalKey/channel_id")$channel_number
H5close()
#Merge interesting information about the sample
data <- list(
eventMean = scale(data.event$mean),
eventStart = data.event$start,
eventMove = data.event$move,
eventLength = data.event$length[1],
attrStartTime = as.numeric(attribute.start_time),
attrStartMux = as.numeric(attribute.start_mux),
attrSampleFreq = as.numeric(attribute.sample_frequency),
attrChannelNum = as.numeric(attribute.channel_number)
)
return(data)
}, mc.cores = cores)
result[[chunk]] <- findPolyA(tmp)
cat("[INFO] Chunk", chunk, "of", chunkNumber, "computed! \n")
}
# Unlist and make dataframe
finalResult <- data.frame()
for(l in result){
df <- data.frame(l, stringsAsFactors = F)
finalResult <- rbind(finalResult, df)
}
finalResult <- data.frame(f5list, finalResult)
return(finalResult)
}
makeTranscriptIdTable <- function(hits, data){
query <- from(hits)
subjects <- to(hits)
cores <- detectCores()
result <- mclapply(unique(query), function(q){
# Transcript ID
result.idxId <- names(transcripts[q])
# Get samples from data
smpls <- subjects[query == q]
result.subj <- paste(which(data$subjectHits %in% smpls), collapse = ",")
smpls <- data[data$subjectHits %in% smpls,]
# Number of Tags
result.tags <- nrow(smpls)
# Calculate mean
result.mean <- mean(smpls$lengthPolyA_BasePair)
# Calculate median
result.median <- median(smpls$lengthPolyA_BasePair)
# Result
df <- data.frame(result.idxId, result.tags, result.mean, result.median, result.subj, stringsAsFactors = F)
colnames(df) <- c("transcript_id", "tags", "mean_length", "median_length", "samples_comma_seperated")
return(df)
}, mc.cores = cores)
# Unlist and make dataframe
finalResult <- data.frame()
for(l in result){
df <- data.frame(l, stringsAsFactors = F)
finalResult <- rbind(finalResult, df)
}
return(finalResult)
}
computeCorrelation <- function(data1, data2, mergeVec){
tmp <- merge(data1, data2, by.x=mergeVec[1], by.y=mergeVec[2])
colnames(tmp) <- c("transcript_id", "n", "b")
tmp <- tmp[!is.nan(tmp$n),]
tmp <- tmp[!is.na(tmp$n),]
tmp <- tmp[!is.nan(tmp$b),]
tmp <- tmp[!is.na(tmp$b),]
covar <- cor(tmp$n, tmp$b)
return(covar)
}
#'########################################################################################
# Plot -- functions ####
#'########################################################################################
plotHistogram <- function(data, column, labelVec, savePath = "Result/"){
tmp <- as.data.frame(round(data[,column]))
colnames(tmp) <- "l"
plot <- ggplot(tmp, aes(x=l)) + geom_histogram(stat="bin", binwidth=1, fill="#ff5b5b") +
theme_minimal() + labs(title=labelVec[1], x=labelVec[2])
ggsave(paste0(labelVec[1], ".png"), plot=plot, path=savePath, width=17.3, height=7.06, dpi=125)
return(plot)
}
plotScatter <- function(data1, data2, mergeVec, labelVec, savePath = "Result/"){
tmp <- merge(data1, data2, by.x=mergeVec[1], by.y=mergeVec[2])
colnames(tmp) <- c("transcript_id", "n", "b")
tmp <- tmp[!is.nan(tmp$n),]
tmp <- tmp[!is.na(tmp$b),]
plot <- ggplot(tmp, aes(x=n, y=b)) +
#geom_line(data = data.frame(x=c(0:300)), mapping = aes(x=x, y=x), color="lightgrey", alpha = 0.4) +
geom_point(color="steelblue", alpha = 0.4) + theme_minimal() +
labs(title=labelVec[1], x=labelVec[2], y=labelVec[3])
ggsave(paste0(labelVec[1], ".png"), plot=plot, path=savePath, width=17.3, height=7.06, dpi=125)
return(plot)
}
plotDensity <- function(data1, data2, mergeVec, labelVec, classVec, savePath = "Result/"){
tmp <- merge(data1, data2, by.x=mergeVec[1], by.y=mergeVec[2])
colnames(tmp) <- c("transcript_id", "n", "b")
tmp <- tmp[!is.nan(tmp$n),]
tmp <- tmp[!is.na(tmp$b),]
tmp$n <- round(tmp$n); tmp$b <- round(tmp$b)
tmp <- rbind(data.frame(l=tmp$n, class=classVec[1]), data.frame(l=tmp$b, class=classVec[2]))
plot <- ggplot(tmp, aes(x=l, fill=class)) +
geom_histogram(aes(y=..density..), alpha=0.5, position="identity", binwidth = 1) +
geom_density(alpha=0.4) +
labs(title=labelVec[1], x=labelVec[2])
ggsave(paste0(labelVec[1], ".png"), plot=plot, path=savePath, width=17.3, height=7.06, dpi=125)
return(plot)
}
# plotRaw <- function(x, start, end){
# tmp <- data.frame(data[[x]]$rawSignal, c(1:(length(data[[x]]$rawSignal))))
# colnames(tmp) <- c("raw", "idx")
# tmp$PolyA <- 0
# tmp$PolyA[start:end] <- 1
# return(ggplot(tmp, aes(x=idx, y=raw, color=PolyA)) + geom_line() + theme_minimal())
# }
# plotEventMean <- function(x){
# tmp <- data.frame(data[[x]]$eventStart, data[[x]]$eventMean)
# colnames(tmp) <- c("start", "mean")
# return(ggplot(tmp, aes(x=start, y=mean)) + geom_point() + geom_line(color="red"))
# }
# plotEventMove <- function(x){
# tmp <- data.frame(data[[x]]$eventStart, as.numeric(data[[x]]$eventMove > 0))
# colnames(tmp) <- c("start", "move")
# return(ggplot(tmp, aes(x=start, y=move)) + geom_point())
# }
# plotEventReducedMove <- function(x){
# tmp <- data.frame(data[[x]]$eventStart, as.numeric(data[[x]]$eventMove > 0))
# colnames(tmp) <- c("start", "move")
# tmp <- tmp[data[[x]]$eventP_mp_state > 0.8,]
# tmp <- tmp[tmp$move > 0,]
# return(ggplot(tmp, aes(x=start, y=move)) + geom_point())
# }
# plotEventP_mp_state <- function(x){
# tmp <- data.frame(data[[x]]$eventStart, data[[x]]$eventP_mp_state)
# colnames(tmp) <- c("start", "p_mp_state")
# return(ggplot(tmp, aes(x=start, y=p_mp_state)) + geom_line( color = "darkgreen") + theme_minimal())
# }
# plotEventReducedP_mp_state <- function(x){
# tmp <- data.frame(data[[x]]$eventStart, data[[x]]$eventP_mp_state, data[[x]]$eventMove)
# colnames(tmp) <- c("start", "p_mp_state", "move")
# tmp <- tmp[tmp$move > 0,]
# return(ggplot(tmp, aes(x=start, y=p_mp_state)) + geom_line( color = "darkblue") + theme_minimal())
# }
# plotEventCombo <- function(x){
# l <- data[[x]]
# tmp <- data.frame(l$eventStart, l$eventMean, l$eventMove, l$eventP_mp_state, l$eventWeights)
# colnames(tmp) <- c("dwelltime", "mean", "move", "p", "w")
# return(ggplot(tmp, aes(x=dwelltime, y=mean)) + geom_line() + theme_minimal() +
# geom_line(color="darkgreen", aes(y=move), alpha=0.6) +
# #geom_line(color="darkblue", aes(y=w)) +
# geom_line(color="darkblue", aes(y=p))
# )
# }
# plotAndSaveAll <- function(x, path){
# for(i in x){
# polyA <- findPolyA(data, i)
# polyA <- polyA*15
# if(length(polyA) == 1) next()
# if(polyA[1] < 0 || polyA[2] < 0 || polyA[3] < 0) next()
# #plot <- plotEventCombo(i)
# plot <- plotRaw(i, polyA[1], polyA[2])
# ggsave(paste0(i, ".png"), plot=plot, path=path, width=17.3, height=7.06, dpi=75)
# }
# }
|
# Test case 146
Data_1 <- matrix(c(3, 5, 8, 1, 1, 2,
7, 1, 4, 1, 5, 8,
2, 5, 3, 1, 2, 1),byrow=TRUE, nrow=3);
Input = matrix(c(3, 8, 1,
7, 4, 1,
2, 3, 1), byrow = TRUE, nrow = 3);
Output = matrix(c(5, 1,
1, 5,
5, 2), byrow = TRUE, nrow = 3);
Link = matrix(c(2,
8,
1), byrow = TRUE, nrow = 3);
weights = c(0.5, 0.5);
K = 2;
N = 3;
sum_m = 3;
sum_r = 2;
sum_l = 1;
Amount = c(1,1,2,1,1);
Amount_Input = c(1,2);
Amount_Output = c(1,1);
Amount_Link = c(1);
direction = "non" ;
link_con = 1; #fix
return_to_scale = "CRS";
NIRS = 0;
Link_obj = 0; # No Link variable in the objective function
#Loading all the functioN:
setwd(getwd())
setwd("..")
setwd("00_pkg_src")
setwd("Nsbm.function")
setwd("R")
source("load_all_func.R");
load_all_func();
setwd("..")
setwd("..")
setwd("..")
setwd("tests")
test_that("Test case 146",{
#Righthandside_and_ Direction:
Righthandside <- c(1, rep(0,1,sum_m), rep(0,1,sum_r), rep(0,1,(2*sum_l)), 0);
# NIRS = 1 adjusted
Direction_ <- c("=", rep("=",1,sum_m), rep("=",1,sum_r), rep("=",1,(2*sum_l)), ">");
Righthside_Direction_ <- list( "FDIR" = Direction_, "FRHS" = Righthandside);
#########################################
#########################################
#########################################
#Righthandside_and_Direction:
expect_equal(Righthandside_and_Direction(1, direction, return_to_scale, link_con, NIRS, Input, Output, Link, Amount_Link, K, sum_m, sum_r, sum_l), Righthside_Direction_, check.attributes = FALSE)
})
| /2_nsbm_approach/Nsbm.function/tests/Test_case_146.R | no_license | thomaskrupa/thesis | R | false | false | 1,729 | r | # Test case 146
Data_1 <- matrix(c(3, 5, 8, 1, 1, 2,
7, 1, 4, 1, 5, 8,
2, 5, 3, 1, 2, 1),byrow=TRUE, nrow=3);
Input = matrix(c(3, 8, 1,
7, 4, 1,
2, 3, 1), byrow = TRUE, nrow = 3);
Output = matrix(c(5, 1,
1, 5,
5, 2), byrow = TRUE, nrow = 3);
Link = matrix(c(2,
8,
1), byrow = TRUE, nrow = 3);
weights = c(0.5, 0.5);
K = 2;
N = 3;
sum_m = 3;
sum_r = 2;
sum_l = 1;
Amount = c(1,1,2,1,1);
Amount_Input = c(1,2);
Amount_Output = c(1,1);
Amount_Link = c(1);
direction = "non" ;
link_con = 1; #fix
return_to_scale = "CRS";
NIRS = 0;
Link_obj = 0; # No Link variable in the objective function
#Loading all the functioN:
setwd(getwd())
setwd("..")
setwd("00_pkg_src")
setwd("Nsbm.function")
setwd("R")
source("load_all_func.R");
load_all_func();
setwd("..")
setwd("..")
setwd("..")
setwd("tests")
test_that("Test case 146",{
#Righthandside_and_ Direction:
Righthandside <- c(1, rep(0,1,sum_m), rep(0,1,sum_r), rep(0,1,(2*sum_l)), 0);
# NIRS = 1 adjusted
Direction_ <- c("=", rep("=",1,sum_m), rep("=",1,sum_r), rep("=",1,(2*sum_l)), ">");
Righthside_Direction_ <- list( "FDIR" = Direction_, "FRHS" = Righthandside);
#########################################
#########################################
#########################################
#Righthandside_and_Direction:
expect_equal(Righthandside_and_Direction(1, direction, return_to_scale, link_con, NIRS, Input, Output, Link, Amount_Link, K, sum_m, sum_r, sum_l), Righthside_Direction_, check.attributes = FALSE)
})
|
#' Plot Morphopace
#'
#' @description
#'
#' Plots a morphospace using the output from ordinate_cladistic_matrix.
#'
#' @param pcoa_input The main input in the format output from \link{ordinate_cladistic_matrix}.
#' @param x_axis Which ordination axis to plot as the x-axis (defaults to 1).
#' @param y_axis Which ordination axis to plot as the y-axis (defaults to 2).
#' @param z_axis Which ordination axis to plot as the z-axis (defaults to NULL, i.e., is not plotted).
#' @param taxon_groups A named list of groups to which taxa are assigned (optional). This is used to plot points or convex hulls in different colours corresponding to each group. As the user names the groups these can represent any grouping of interest (e.g., taxonomic, ecological, temporal, spatial). \link{assign_taxa_to_bins} can automate temporal assignments.
#' @param plot_taxon_names Logical indicating whether to plot the names of the taxa (defaults to FALSE).
#' @param plot_convex_hulls Logical indicating whether to plot convex hulls around any taxon_groups (if used).
#' @param plot_internal_nodes Logical indicating whether to plot the internal nodes of the tree (if included in \code{pcoa_input}) (defaults to FALSE).
#' @param plot_edges Logical indicating whether to plot the branches of the tree (if included in \code{pcoa_input}) (defaults to TRUE).
#' @param plot_root Logical indicating whether to plot the root separately (defaults to FALSE).
#' @param root_colour If plotting the root separately (previous option) sets the root colour.
#' @param palette The palette to use for plotting each element of taxon_groups. See \link[grDevices]{palette}.
#' @param plot_group_legend Logical indicating whether to plot a legend for taxon_groups. (Default is TRUE.)
#' @param group_legend_position Position to plot the group legend. Must be one of \code{bottom_left}, \code{bottom_right}, \code{top_left}, or \code{top_right} (the default).
#' @param plot_z_legend Logical indicating whether to plot a legend for the z-axis. (Default is TRUE.)
#' @param z_legend_position Position to plot the group legend. Must be one of \code{bottom_left}, \code{bottom_right} (the default), \code{top_left}, or \code{top_right}.
#' @param inform Logical indicating whether to inform the user of any taxon pruning. (Default is TRUE.)
#' @param x_limits Plot limits to use for x-axis. Only intended for use by \link{plot_multi_morphospace}.
#' @param y_limits Plot limits to use for y-axis. Only intended for use by \link{plot_multi_morphospace}.
#'
#' @details
#'
#' Uses output from \link{ordinate_cladistic_matrix} to make morphospace plots.
#'
#' Allows plotting of a third axis using the technique of Wills et al. (1994; their Figures 4 and 8; Wills 1998; his Figure 4), where solid and open indicate positive and negative values respectively, and the size of points their magnitudes.
#'
#' Will automatically generate phylomorphospaces if a tree was included in the ordination.
#'
#' Can also plot groups of points - whether they represent taxonomic, ecological, temporal, or spatial groupings - in different colours as well as plot translucent convex hulls around these groups, by using the \code{taxon_groups} and \code{plot_convex_hulls = TRUE} options, respectively. Note that \code{taxon_groups} should be in the form of a named list (see example below for how these should be formatted).
#'
#' Various other options allow toggling of particular features on or off. For example, the taxon names can be shown with \code{plot_taxon_names = TRUE}.
#'
#' Note that some features will generate legends that may initially appear to disappear off the sides of the plot, but simple resizing of the plot window (or increasing the width:height ratio if outputting to a file) should fix this.
#'
#' @author Graeme T. Lloyd \email{graemetlloyd@@gmail.com} and Emma Sherratt \email{emma.sherratt@@gmail.com}
#'
#' @seealso
#'
#' \link{assign_taxa_to_bins}, \link{plot_chronophylomorphospace}, \link{plot_morphospace_stack}, \link{plot_multi_morphospace}, \link{ordinate_cladistic_matrix}
#'
#' @references
#'
#' Wills, M. A., 1998. Cambrian and Recent disparity: the picture from priapulids. \emph{Paleobiology}, \bold{24}, 177-199.
#'
#' Wills, M. A., Briggs, D. E. G. and Fortey, R. A., 1994. Disparity as an evolutionary index: a comparison of Cambrian and Recent arthropods. \emph{Paleobiology}, \bold{20}, 93-130.
#'
#' @examples
#'
#' \donttest{
#' # Perform a PCoA ordination on the day_2016 data set:
#' pcoa_input <- ordinate_cladistic_matrix(cladistic_matrix = day_2016)
#'
#' # Plot this as a simple bivarate morphospace:
#' plot_morphospace(pcoa_input = pcoa_input)
#'
#' # Use the Wills technique to add a third axis (PC3):
#' plot_morphospace(pcoa_input = pcoa_input, z_axis = 3)
#'
#' # You may need to resize the plot to see the legend for the z-axis
#'
#' # Add taxon names as well:
#' plot_morphospace(pcoa_input = pcoa_input, z_axis = 3, plot_taxon_names = TRUE)
#'
#' # Define some simple taxon groups for the data as a named list:
#' taxon_groups <- list(nonBurnetiamorpha = c("Biarmosuchus_tener",
#' "Hipposaurus_boonstrai", "Bullacephalus_jacksoni", "Pachydectes_elsi",
#' "Ictidorhinus_martinsi", "RC_20", "Herpetoskylax_hopsoni"),
#' Burnetiamorpha = c("Lemurosaurus_pricei", "Lobalopex_mordax",
#' "Lophorhinus_willodenensis", "Proburnetia_viatkensis", "Lende_chiweta",
#' "Paraburnetia_sneeubergensis", "Burnetia_mirabilis", "BP_1_7098"))
#'
#' # Plot taxon groups including convex hulls:
#' plot_morphospace(pcoa_input = pcoa_input, z_axis = 3, plot_taxon_names = TRUE,
#' taxon_groups = taxon_groups, plot_convex_hulls = TRUE)
#'
#' # Make time-scaled first MPT for Day 2016 data set:
#' time_tree <- ape::read.tree(text = paste0("(Biarmosuchus_tener:0.5,",
#' "(((Hipposaurus_boonstrai:3.5,(Bullacephalus_jacksoni:0.75,",
#' "Pachydectes_elsi:0.75):0.75):0.75,(Lemurosaurus_pricei:7.166666667,",
#' "(Lobalopex_mordax:4.333333333,((Lophorhinus_willodenensis:3.666666667,",
#' "(Proburnetia_viatkensis:0.8333333333,(Lende_chiweta:2,",
#' "(Paraburnetia_sneeubergensis:1,Burnetia_mirabilis:2):1):1.833333333)",
#' ":0.8333333333):0.8333333333,(BP_1_7098:2.25,Niuksenitia_sukhonensis:",
#' "1.25):1.25):0.8333333333):0.8333333333):3.083333333):1.95,",
#' "(Ictidorhinus_martinsi:15.9,(RC_20:11.6,(Herpetoskylax_hopsoni:11.3,",
#' "Lycaenodon_longiceps:0.3):0.3):0.3):0.3):0.3);"))
#'
#' # Add root age to tree:
#' time_tree$root.time <- 269.5
#'
#' # Prune incomplete taxa from tree:
#' time_tree <- ape::drop.tip(phy = time_tree, tip = c("Lycaenodon_longiceps",
#' "Niuksenitia_sukhonensis"))
#'
#' # Prune incomplete taxa from cladistic matrix:
#' cladistic_matrix <- prune_cladistic_matrix(cladistic_matrix = day_2016,
#' taxa2prune = c("Lycaenodon_longiceps", "Niuksenitia_sukhonensis"))
#'
#' # Note: the above pruning is simply to run this example and should not be
#' # done manually as a matter of course as the functions will automatically
#' # prune tips and nodes as required.
#'
#' # Make new ordination with tree included (enabling phylomorphospace):
#' pcoa_input <- ordinate_cladistic_matrix(cladistic_matrix = cladistic_matrix,
#' time_tree = time_tree)
#'
#' # Plot this as a simple bivarate phylomorphospace:
#' plot_morphospace(pcoa_input = pcoa_input)
#'
#' # Use the Wills technique to add a third axis (PC3):
#' plot_morphospace(pcoa_input = pcoa_input, z_axis = 3)
#'
#' # You may need to resize the plot to see the legend for the z-axis
#'
#' # Add taxon names as well:
#' plot_morphospace(pcoa_input = pcoa_input, z_axis = 3, plot_taxon_names = TRUE)
#'
#' # Add taxon groups including convex hulls:
#' plot_morphospace(pcoa_input = pcoa_input, z_axis = 3, plot_taxon_names = TRUE,
#' taxon_groups = taxon_groups, plot_convex_hulls = TRUE)
#' }
#' @export plot_morphospace
plot_morphospace <- function(pcoa_input, x_axis = 1, y_axis = 2, z_axis = NULL, taxon_groups = NULL, plot_taxon_names = FALSE, plot_convex_hulls = FALSE, plot_internal_nodes = FALSE, plot_edges = TRUE, plot_root = TRUE, root_colour = "red", palette = "viridis", plot_group_legend = TRUE, group_legend_position = "top_right", plot_z_legend = TRUE, z_legend_position = "bottom_right", inform = TRUE, x_limits = NULL, y_limits = NULL) {
# TO DO:
#
# - Order points by z-value so they are actually plotted from "back" to "front".
# - Add plot_node_names option.
# - Check inputs.
# Check group_legend_position is a valid value and stop and warn user if not:
if (!group_legend_position %in% c("bottom_left", "bottom_right", "top_left", "top_right")) stop("group_legend_position must be one of \"bottom_left\", \"bottom_right\", \"top_left\", or \"top_right\".")
# Check z_legend_position is a valid value and stop and warn user if not:
if (!z_legend_position %in% c("bottom_left", "bottom_right", "top_left", "top_right")) stop("z_legend_position must be one of \"bottom_left\", \"bottom_right\", \"top_left\", or \"top_right\".")
# Check that if both legends are used that they are in different positions:
if (plot_group_legend && plot_z_legend && group_legend_position == z_legend_position) stop("plot_group_legend and plot_z_legend must be different values or they will plot on top of each other.")
# Create logical for whether taxon groups are used or not:
taxon_groups_used <- ifelse(test = is.null(x = taxon_groups), yes = FALSE, no = TRUE)
# Create logical for whether tree is used or not:
tree_used <- ifelse(test = is.null(x = pcoa_input$time_tree), yes = FALSE, no = TRUE)
# If using taxon groups:
if (taxon_groups_used) {
# Find any taxa to prune from taxon groups:
taxa_to_prune <- setdiff(x = unique(x = unlist(x = taxon_groups)), y = rownames(x = pcoa_input$vectors))
# If taxa to prune are found:
if (length(x = taxa_to_prune) > 0) {
# Go through taxon groups:
taxon_groups <- lapply(X = taxon_groups, function(y) {
# Remove any taxa not found in pcoa data:
if (length(x = sort(x = match(x = taxa_to_prune, table = y))) > 0) y <- y[-sort(x = match(x = taxa_to_prune, table = y))]
# Return
y
})
# Warn user that this has happened in case it is an error:
if (inform) print(paste0("Warning: The following taxa were removed from taxon_groups as they do not appear in pcoa_input: ", paste(taxa_to_prune, collapse = ", "), ". You may wish to double check this makes sense (e.g., because of incomplete taxa being removed by trim_matrix) and is not due to a typographical or other error which means names are not an exact match."))
}
}
# If plot limits aren't set use x and y ranges to set them:
if (is.null(x_limits)) x_limits <- range(pcoa_input$vectors[, x_axis])
if (is.null(y_limits)) y_limits <- range(pcoa_input$vectors[, y_axis])
# Set default tip numbers as just 1 to N:
tip_numbers <- 1:nrow(x = pcoa_input$vectors)
# Case if tree supplied:
if (tree_used) {
# Set basic tree information:
n_tips <- ape::Ntip(phy = pcoa_input$time_tree)
tip_numbers <- c(1:n_tips)
node_numbers <- setdiff(x = 1:nrow(pcoa_input$vectors), y = tip_numbers)
root_number <- n_tips + 1
}
# Get vector of values that correspond to scree plot:
scree_values <- apply(pcoa_input$vectors, 2, var) / sum(apply(pcoa_input$vectors, 2, var)) * 100
# Set default solid colour to black:
solid_colours <- "black"
# Set default transparent colour to 50% black:
translucent_colours <- grDevices::rgb(red = 0, green = 0, blue = 0, alpha = 0.5)
# Set open colour (completely transparent):
transparent_colour <- grDevices::rgb(red = 0, green = 0, blue = 0, alpha = 0)
# If using taxon groups:
if (taxon_groups_used) {
# Set colours for each group:
solid_colours <- grDevices::hcl.colors(n = length(x = taxon_groups), palette = palette, alpha = 1)
translucent_colours <- grDevices::hcl.colors(n = length(x = taxon_groups), palette = palette, alpha = 0.5)
transparent_colour <- grDevices::hcl.colors(n = length(x = taxon_groups), palette = palette, alpha = 0)
# If not using taxon groups:
} else {
# Create dummy taxon group of all data as one:
taxon_groups <- list(Data = rownames(pcoa_input$vector))
}
# If using a tree make sure taxon_groups only includes tips:
if (tree_used) taxon_groups <- lapply(X = taxon_groups, FUN = function(y) intersect(y, pcoa_input$time_tree$tip.label[tip_numbers]))
# Set point colours (background and border):
point_col <- point_bg <- lapply(X = as.list(x = 1:length(taxon_groups)), FUN = function(y) rep(x = solid_colours[y], length.out = length(taxon_groups[[y]])))
if (!is.null(x = z_axis)) point_bg <- lapply(X = as.list(x = 1:length(taxon_groups)), FUN = function(y) as.vector(unlist(x = lapply(X = as.list(x = taxon_groups[[y]]), FUN = function(z) ifelse(test = pcoa_input$vectors[z, z_axis] > 0, yes = solid_colours[[y]], no = transparent_colour[[y]]))), mode = "character"))
# Make axis labels:
x_lab <- paste("PC", x_axis, " (", round(scree_values[x_axis], 2), "% of total variance)", sep = "")
y_lab <- paste("PC", y_axis, " (", round(scree_values[y_axis], 2), "% of total variance)", sep = "")
if (!is.null(x = z_axis)) z_lab <- paste("PC", z_axis, " (", round(scree_values[z_axis], 2), "% of total variance)", sep = "")
# Make all points equal in size by default:
point_sizes <- rep(1, nrow(pcoa_input$vectors))
# Set point sizes as absolute z-value if using z-axis:
if (!is.null(x = z_axis)) point_sizes <- abs(x = pcoa_input$vectors[, z_axis]) / max(abs(x = pcoa_input$vectors[, z_axis])) * 3
# Add taxon names to point_sizes:
names(x = point_sizes) <- rownames(x = pcoa_input$vectors)
# Create the basic plot space (will be empty for now):
graphics::plot(x = pcoa_input$vectors[, x_axis], y = pcoa_input$vectors[, y_axis], type = "n", bg = "black", xlab = x_lab, ylab = y_lab, asp = TRUE, xlim = x_limits, ylim = y_limits)
# Sort vectors by node number (1:N):
if (tree_used) pcoa_input$vectors <- pcoa_input$vectors[c(pcoa_input$time_tree$tip.label, setdiff(x = rownames(x = pcoa_input$vectors), y = pcoa_input$time_tree$tip.label)), ]
# Plot branches of tree (if a tree is used plotting requested):
if (tree_used && plot_edges) for (i in 1:nrow(pcoa_input$time_tree$edge)) lines(x = pcoa_input$vectors[pcoa_input$time_tree$edge[i, ], x_axis], y = pcoa_input$vectors[pcoa_input$time_tree$edge[i, ], y_axis], col = grDevices::rgb(red = 0.65, green = 0.65, blue = 0.65, alpha = 1))
# Set node colours for plotting:
if (tree_used) node_bg <- unlist(x = lapply(X = as.list(pcoa_input$vectors[as.character(node_numbers), z_axis]), FUN = function(y) ifelse(y > 0, grDevices::rgb(red = 0.65, green = 0.65, blue = 0.65, alpha = 1), grDevices::rgb(red = 0.65, green = 0.65, blue = 0.65, alpha = 0))))
# Plot internal nodes, if requested:
if (tree_used && plot_internal_nodes) graphics::points(pcoa_input$vectors[node_numbers, x_axis], pcoa_input$vectors[node_numbers, y_axis], pch = 21, bg = node_bg[as.character(x = node_numbers)], col = grDevices::rgb(red = 0.65, green = 0.65, blue = 0.65, alpha = 1), cex = point_sizes[node_numbers])
# Plot root separetely, if requested:
if (tree_used && plot_root) graphics::points(pcoa_input$vectors[root_number, x_axis], pcoa_input$vectors[root_number, y_axis], pch = 21, col = root_colour, bg = root_colour, cex = point_sizes[root_number])
# If convex hulls are requested:
if(taxon_groups_used && plot_convex_hulls) {
# For each group:
x <- lapply(X = as.list(x = 1:length(x = taxon_groups)), function(y) {
# Make convex hull for data:
convex_hull <- grDevices::chull(x = pcoa_input$vectors[taxon_groups[[y]], x_axis], y = pcoa_input$vectors[taxon_groups[[y]], y_axis])
# Plot convex hull as translucent polygon:
graphics::polygon(x = pcoa_input$vectors[taxon_groups[[y]][convex_hull], x_axis], y = pcoa_input$vectors[taxon_groups[[y]][convex_hull], y_axis], col = translucent_colours[[y]], border = NA)
})
}
# Add points to plot:
x <- lapply(X = as.list(x = 1:length(x = taxon_groups)), function(y) graphics::points(x = pcoa_input$vectors[taxon_groups[[y]], x_axis], y = pcoa_input$vectors[taxon_groups[[y]], y_axis], pch = 21, bg = point_bg[[y]], col = point_col[[y]], cex = point_sizes[taxon_groups[[y]]]))
# If plotting taxon names:
if (plot_taxon_names) {
# First establish a default position for names (to the left of the point):
x_positions <- rep(2, nrow(pcoa_input$vectors))
# Now changes negative values to plot on the right instead:
x_positions[which(x = pcoa_input$vectors[, x_axis] < 0)] <- 4
# Plot taxon names (for tips only):
graphics::text(x = pcoa_input$vectors[tip_numbers, x_axis], y = pcoa_input$vectors[tip_numbers, y_axis], labels = rownames(x = pcoa_input$vectors)[tip_numbers], pos = x_positions[tip_numbers], cex = 0.7)
}
# If plotting a group legend:
if(taxon_groups_used && plot_group_legend) {
# Add groups legend to plot:
if(group_legend_position == "bottom_left") graphics::legend(x = min(pcoa_input$vectors[, x_axis]), y = min(pcoa_input$vectors[, y_axis]), legend = names(taxon_groups), fill = solid_colours, bg = "white", xjust = 1, yjust = 0)
if(group_legend_position == "bottom_right") graphics::legend(x = max(pcoa_input$vectors[, x_axis]), y = min(pcoa_input$vectors[, y_axis]), legend = names(taxon_groups), fill = solid_colours, bg = "white", xjust = 0, yjust = 0)
if(group_legend_position == "top_left") graphics::legend(x = min(pcoa_input$vectors[, x_axis]), y = max(pcoa_input$vectors[, y_axis]), legend = names(taxon_groups), fill = solid_colours, bg = "white", xjust = 1, yjust = 1)
if(group_legend_position == "top_right") graphics::legend(x = max(pcoa_input$vectors[, x_axis]), y = max(pcoa_input$vectors[, y_axis]), legend = names(taxon_groups), fill = solid_colours, bg = "white", xjust = 0, yjust = 1)
}
# If plotting a z-axis legend:
if (plot_z_legend && !is.null(z_axis)) {
# Collapse range of z-values to a spread f six:
z_values <- seq(from = min(x = pcoa_input$vectors[, z_axis]), to = max(x = pcoa_input$vectors[, z_axis]), length.out = 6)
# Make z point sizes for legend:
z_sizes <- abs(x = z_values) / max(abs(x = z_values)) * 3
# Add z legend to plot:
if(z_legend_position == "bottom_left") graphics::legend(x = min(pcoa_input$vectors[, x_axis]), y = min(pcoa_input$vectors[, y_axis]), legend = signif(x = z_values, digits = 4), pch = 21, bg = "white", xjust = 1, yjust = 0, pt.cex = z_sizes, pt.bg = unlist(lapply(X = as.list(x = z_values), FUN = function(y) ifelse(test = y > 0, yes = "black", no = "white"))), col = "black")
if(z_legend_position == "bottom_right") graphics::legend(x = max(pcoa_input$vectors[, x_axis]), y = min(pcoa_input$vectors[, y_axis]), legend = signif(x = z_values, digits = 4), pch = 21, bg = "white", xjust = 0, yjust = 0, pt.cex = z_sizes, pt.bg = unlist(lapply(X = as.list(x = z_values), FUN = function(y) ifelse(test = y > 0, yes = "black", no = "white"))), col = "black")
if(z_legend_position == "top_left") graphics::legend(x = min(pcoa_input$vectors[, x_axis]), y = max(pcoa_input$vectors[, y_axis]), legend = signif(x = z_values, digits = 4), pch = 21, bg = "white", xjust = 1, yjust = 1, pt.cex = z_sizes, pt.bg = unlist(lapply(X = as.list(x = z_values), FUN = function(y) ifelse(test = y > 0, yes = "black", no = "white"))), col = "black")
if(z_legend_position == "top_right") graphics::legend(x = max(pcoa_input$vectors[, x_axis]), y = max(pcoa_input$vectors[, y_axis]), legend = signif(x = z_values, digits = 4), pch = 21, bg = "white", xjust = 0, yjust = 1, pt.cex = z_sizes, pt.bg = unlist(lapply(X = as.list(x = z_values), FUN = function(y) ifelse(test = y > 0, yes = "black", no = "white"))), col = "black")
}
# If using a z-axis add label as plot title:
if (!is.null(x = z_axis)) graphics::title(main = z_lab)
}
#time_tree <- ape::read.tree(text = "(Psarolepis_romeri:1,(Diabolepis_speratus:1.85,((Dipnorhynchus_kiandrensis:7.4,(Archaeonectes_pertusus:28.3,(Uranolophus_wyomingensis:1.6,(Speonesydrion_iani:0.8,(Jarvikia_arctica:36.5173913,(((Adololopas_moyasmithae:10.775,((Adelargo_schultzei:14.05,Chirodipterus_australis:3.25):6.1,(Chirodipterus_rhenanus:1.425,(Chirodipterus_wildungensis:3.25,Dipterus_cf_valenciennesi:3.25):4.675):1.425):1.425):10.31485507,(Barwickia_downunda:10.14492754,Dipterus_valenciennesi:4.444927536):4.444927536):4.444927536,(Pillararhynchus_longi:25.35217391,(((Gogodipterus_paddyensis:24.80434783,((Tarachomylax_oepiki:1.947826087,(Amadeodipterus_kencampbelli:0.9739130435,Stomiahykus_thlaodus:10.47391304):0.9739130435):0.9739130435,(Iowadipterus_halli:17.93913043,((Delatitia_breviceps:50.17391304,(Phaneropleuron_andersoni:34.69130435,((Orlovichthys_limnatis:34.32608696,(Howidipterus_donnae:16.84347826,(((Andreyevichthys_epitomus:16.88913043,Oervigia_nordica:16.88913043):16.88913043,(Grossipterus_crassus:22.79565217,(Fleurantia_denticulata:22.61304348,((Robinsondipterus_longi:16.275,(Asthenorhynchus_meemannae:10.85,(Holodipterus_elderae:5.425,Holodipterus_gogoensis:5.425):5.425):5.425):6.155434783,((Griphognathus_minutidens:14.46666667,(Griphognathus_sculpta:7.233333333,Griphognathus_whitei:7.233333333):7.233333333):7.78115942,(Rhynchodipterus_elginensis:32.86521739,(Jessenia_concentrica:0.1826086957,Soederberghia_groenlandica:21.8826087):0.1826086957):0.1826086957):0.1826086957):0.1826086957):0.1826086957):0.1826086957):0.1826086957,(Pentlandia_macroptera:8.330434783,Scaumenacia_curta:14.83043478):8.330434783):0.1826086957):0.1826086957):0.1826086957,(Holodipterus_santacrucensis:21.07439614,((Ganopristodus_splendens:55.8057971,(Megapleuron_zangerli:86.77149758,(Sagenodus_inaequalis:50.53719807,(((Eoctenodus_microsoma:2.634299517,Tranodis_castrensis:61.53429952):2.634299517,(Ctenodus_romeri:15.68429952,Straitonia_waterstoni:29.58429952):15.68429952):2.634299517,((Parasagenodus_sibiricus:11.33429952,(Gnathorhiza_serrata:20.55,((Beltanodus_ambilobensis:53.1125,(Namatozodia_pitikanta:45.525,(Ariguna_formosa:37.9375,(((Aphelodus_anapes:11.38125,Ceratodus_formosa:11.38125):11.38125,((Asiatoceratodus_sharovi:7.5875,Gosfordia_truncata:30.5875):7.5875,(Neoceratodus_forsteri:77.0875,(Mioceratodus_gregoryi:37,(Lepidosiren_paradoxa:22.4,Protopterus_annectens:9.5):9.5):86.5875):77.0875):7.5875):7.5875,(Archaeoceratodus_avus:26.675,Tellerodus_sturi:38.175):26.675):7.5875):7.5875):7.5875):12.3875,(Microceratodus_angolensis:63.9,(Palaeophichthys_parvulus:1.6,(Ptychoceratodus_serratus:45.525,(Paraceratodus_germaini:31.65,(Arganodus_atlantis:15.175,Ferganoceratodus_jurassicus:104.975):15.175):15.175):16.775):1.6):1.6):22.15):31.88429952):11.33429952,(Ceratodus_latissimus:76.93429952,Metaceratodus_wollastoni:159.4342995):76.93429952):11.33429952):2.634299517):2.634299517):2.634299517):2.634299517,(Nielsenia_nordica:14.62004831,Conchopoma_gadiforme:77.42004831):14.62004831):2.634299517):2.634299517):0.1826086957):0.1826086957):0.1826086957,(Rhinodipterus_secans:15.37826087,Rhinodipterus_ulrichi:8.87826087):8.87826087):0.1826086957):0.1826086957):0.1826086957):0.1826086957,(Palaeodaphus_insignis:12.49347826,Sunwapta_grandiceps:23.29347826):12.49347826):0.1826086957,(Melanognathus_canadensis:1.734782609,Sorbitorhynchus_deleaskitus:1.734782609):1.734782609):0.1826086957):0.1826086957):0.1826086957):0.9826086957):0.8):0.8):0.8):0.8,(Westollrhynchus_lehmanni:2,(Ichnomylax_kurnai:3.36,(Dipnorhynchus_sussmilchi:2.52,(Chirodipterus_onawwayensis:16.88,(Dipnorhynch_cathlesae:0.84,Dipnorhynchus_kurikae:0.84):0.84):0.84):0.84):2.84):2):2.65):1.85);")
#time_tree$root.time <- 419.7
#cladistic_matrix <- read_nexus_matrix("http://www.graemetlloyd.com/nexus/Lloyd_etal_2012a.nex")
#pcoa_input <- ordinate_cladistic_matrix(cladistic_matrix = cladistic_matrix, time_tree = time_tree)
#x_axis = 1
#y_axis = 2
#taxon_groups = assign_taxa_to_bins(taxon_ages, named_time_bins) ### NEW PARAM
#plot_taxon_names = TRUE
#plot_internal_nodes = TRUE
#plot_root = TRUE
#root_colour = "red"
#palette = "viridis"
#plot_group_legend = TRUE
#group_legend_position = "top_right"
#plot_z_legend = TRUE
#z_legend_position = "bottom_right"
| /R/plot_morphospace.R | no_license | cran/Claddis | R | false | false | 24,390 | r | #' Plot Morphopace
#'
#' @description
#'
#' Plots a morphospace using the output from ordinate_cladistic_matrix.
#'
#' @param pcoa_input The main input in the format output from \link{ordinate_cladistic_matrix}.
#' @param x_axis Which ordination axis to plot as the x-axis (defaults to 1).
#' @param y_axis Which ordination axis to plot as the y-axis (defaults to 2).
#' @param z_axis Which ordination axis to plot as the z-axis (defaults to NULL, i.e., is not plotted).
#' @param taxon_groups A named list of groups to which taxa are assigned (optional). This is used to plot points or convex hulls in different colours corresponding to each group. As the user names the groups these can represent any grouping of interest (e.g., taxonomic, ecological, temporal, spatial). \link{assign_taxa_to_bins} can automate temporal assignments.
#' @param plot_taxon_names Logical indicating whether to plot the names of the taxa (defaults to FALSE).
#' @param plot_convex_hulls Logical indicating whether to plot convex hulls around any taxon_groups (if used).
#' @param plot_internal_nodes Logical indicating whether to plot the internal nodes of the tree (if included in \code{pcoa_input}) (defaults to FALSE).
#' @param plot_edges Logical indicating whether to plot the branches of the tree (if included in \code{pcoa_input}) (defaults to TRUE).
#' @param plot_root Logical indicating whether to plot the root separately (defaults to FALSE).
#' @param root_colour If plotting the root separately (previous option) sets the root colour.
#' @param palette The palette to use for plotting each element of taxon_groups. See \link[grDevices]{palette}.
#' @param plot_group_legend Logical indicating whether to plot a legend for taxon_groups. (Default is TRUE.)
#' @param group_legend_position Position to plot the group legend. Must be one of \code{bottom_left}, \code{bottom_right}, \code{top_left}, or \code{top_right} (the default).
#' @param plot_z_legend Logical indicating whether to plot a legend for the z-axis. (Default is TRUE.)
#' @param z_legend_position Position to plot the group legend. Must be one of \code{bottom_left}, \code{bottom_right} (the default), \code{top_left}, or \code{top_right}.
#' @param inform Logical indicating whether to inform the user of any taxon pruning. (Default is TRUE.)
#' @param x_limits Plot limits to use for x-axis. Only intended for use by \link{plot_multi_morphospace}.
#' @param y_limits Plot limits to use for y-axis. Only intended for use by \link{plot_multi_morphospace}.
#'
#' @details
#'
#' Uses output from \link{ordinate_cladistic_matrix} to make morphospace plots.
#'
#' Allows plotting of a third axis using the technique of Wills et al. (1994; their Figures 4 and 8; Wills 1998; his Figure 4), where solid and open indicate positive and negative values respectively, and the size of points their magnitudes.
#'
#' Will automatically generate phylomorphospaces if a tree was included in the ordination.
#'
#' Can also plot groups of points - whether they represent taxonomic, ecological, temporal, or spatial groupings - in different colours as well as plot translucent convex hulls around these groups, by using the \code{taxon_groups} and \code{plot_convex_hulls = TRUE} options, respectively. Note that \code{taxon_groups} should be in the form of a named list (see example below for how these should be formatted).
#'
#' Various other options allow toggling of particular features on or off. For example, the taxon names can be shown with \code{plot_taxon_names = TRUE}.
#'
#' Note that some features will generate legends that may initially appear to disappear off the sides of the plot, but simple resizing of the plot window (or increasing the width:height ratio if outputting to a file) should fix this.
#'
#' @author Graeme T. Lloyd \email{graemetlloyd@@gmail.com} and Emma Sherratt \email{emma.sherratt@@gmail.com}
#'
#' @seealso
#'
#' \link{assign_taxa_to_bins}, \link{plot_chronophylomorphospace}, \link{plot_morphospace_stack}, \link{plot_multi_morphospace}, \link{ordinate_cladistic_matrix}
#'
#' @references
#'
#' Wills, M. A., 1998. Cambrian and Recent disparity: the picture from priapulids. \emph{Paleobiology}, \bold{24}, 177-199.
#'
#' Wills, M. A., Briggs, D. E. G. and Fortey, R. A., 1994. Disparity as an evolutionary index: a comparison of Cambrian and Recent arthropods. \emph{Paleobiology}, \bold{20}, 93-130.
#'
#' @examples
#'
#' \donttest{
#' # Perform a PCoA ordination on the day_2016 data set:
#' pcoa_input <- ordinate_cladistic_matrix(cladistic_matrix = day_2016)
#'
#' # Plot this as a simple bivarate morphospace:
#' plot_morphospace(pcoa_input = pcoa_input)
#'
#' # Use the Wills technique to add a third axis (PC3):
#' plot_morphospace(pcoa_input = pcoa_input, z_axis = 3)
#'
#' # You may need to resize the plot to see the legend for the z-axis
#'
#' # Add taxon names as well:
#' plot_morphospace(pcoa_input = pcoa_input, z_axis = 3, plot_taxon_names = TRUE)
#'
#' # Define some simple taxon groups for the data as a named list:
#' taxon_groups <- list(nonBurnetiamorpha = c("Biarmosuchus_tener",
#' "Hipposaurus_boonstrai", "Bullacephalus_jacksoni", "Pachydectes_elsi",
#' "Ictidorhinus_martinsi", "RC_20", "Herpetoskylax_hopsoni"),
#' Burnetiamorpha = c("Lemurosaurus_pricei", "Lobalopex_mordax",
#' "Lophorhinus_willodenensis", "Proburnetia_viatkensis", "Lende_chiweta",
#' "Paraburnetia_sneeubergensis", "Burnetia_mirabilis", "BP_1_7098"))
#'
#' # Plot taxon groups including convex hulls:
#' plot_morphospace(pcoa_input = pcoa_input, z_axis = 3, plot_taxon_names = TRUE,
#' taxon_groups = taxon_groups, plot_convex_hulls = TRUE)
#'
#' # Make time-scaled first MPT for Day 2016 data set:
#' time_tree <- ape::read.tree(text = paste0("(Biarmosuchus_tener:0.5,",
#' "(((Hipposaurus_boonstrai:3.5,(Bullacephalus_jacksoni:0.75,",
#' "Pachydectes_elsi:0.75):0.75):0.75,(Lemurosaurus_pricei:7.166666667,",
#' "(Lobalopex_mordax:4.333333333,((Lophorhinus_willodenensis:3.666666667,",
#' "(Proburnetia_viatkensis:0.8333333333,(Lende_chiweta:2,",
#' "(Paraburnetia_sneeubergensis:1,Burnetia_mirabilis:2):1):1.833333333)",
#' ":0.8333333333):0.8333333333,(BP_1_7098:2.25,Niuksenitia_sukhonensis:",
#' "1.25):1.25):0.8333333333):0.8333333333):3.083333333):1.95,",
#' "(Ictidorhinus_martinsi:15.9,(RC_20:11.6,(Herpetoskylax_hopsoni:11.3,",
#' "Lycaenodon_longiceps:0.3):0.3):0.3):0.3):0.3);"))
#'
#' # Add root age to tree:
#' time_tree$root.time <- 269.5
#'
#' # Prune incomplete taxa from tree:
#' time_tree <- ape::drop.tip(phy = time_tree, tip = c("Lycaenodon_longiceps",
#' "Niuksenitia_sukhonensis"))
#'
#' # Prune incomplete taxa from cladistic matrix:
#' cladistic_matrix <- prune_cladistic_matrix(cladistic_matrix = day_2016,
#' taxa2prune = c("Lycaenodon_longiceps", "Niuksenitia_sukhonensis"))
#'
#' # Note: the above pruning is simply to run this example and should not be
#' # done manually as a matter of course as the functions will automatically
#' # prune tips and nodes as required.
#'
#' # Make new ordination with tree included (enabling phylomorphospace):
#' pcoa_input <- ordinate_cladistic_matrix(cladistic_matrix = cladistic_matrix,
#' time_tree = time_tree)
#'
#' # Plot this as a simple bivarate phylomorphospace:
#' plot_morphospace(pcoa_input = pcoa_input)
#'
#' # Use the Wills technique to add a third axis (PC3):
#' plot_morphospace(pcoa_input = pcoa_input, z_axis = 3)
#'
#' # You may need to resize the plot to see the legend for the z-axis
#'
#' # Add taxon names as well:
#' plot_morphospace(pcoa_input = pcoa_input, z_axis = 3, plot_taxon_names = TRUE)
#'
#' # Add taxon groups including convex hulls:
#' plot_morphospace(pcoa_input = pcoa_input, z_axis = 3, plot_taxon_names = TRUE,
#' taxon_groups = taxon_groups, plot_convex_hulls = TRUE)
#' }
#' @export plot_morphospace
plot_morphospace <- function(pcoa_input, x_axis = 1, y_axis = 2, z_axis = NULL, taxon_groups = NULL, plot_taxon_names = FALSE, plot_convex_hulls = FALSE, plot_internal_nodes = FALSE, plot_edges = TRUE, plot_root = TRUE, root_colour = "red", palette = "viridis", plot_group_legend = TRUE, group_legend_position = "top_right", plot_z_legend = TRUE, z_legend_position = "bottom_right", inform = TRUE, x_limits = NULL, y_limits = NULL) {
# TO DO:
#
# - Order points by z-value so they are actually plotted from "back" to "front".
# - Add plot_node_names option.
# - Check inputs.
# Check group_legend_position is a valid value and stop and warn user if not:
if (!group_legend_position %in% c("bottom_left", "bottom_right", "top_left", "top_right")) stop("group_legend_position must be one of \"bottom_left\", \"bottom_right\", \"top_left\", or \"top_right\".")
# Check z_legend_position is a valid value and stop and warn user if not:
if (!z_legend_position %in% c("bottom_left", "bottom_right", "top_left", "top_right")) stop("z_legend_position must be one of \"bottom_left\", \"bottom_right\", \"top_left\", or \"top_right\".")
# Check that if both legends are used that they are in different positions:
if (plot_group_legend && plot_z_legend && group_legend_position == z_legend_position) stop("plot_group_legend and plot_z_legend must be different values or they will plot on top of each other.")
# Create logical for whether taxon groups are used or not:
taxon_groups_used <- ifelse(test = is.null(x = taxon_groups), yes = FALSE, no = TRUE)
# Create logical for whether tree is used or not:
tree_used <- ifelse(test = is.null(x = pcoa_input$time_tree), yes = FALSE, no = TRUE)
# If using taxon groups:
if (taxon_groups_used) {
# Find any taxa to prune from taxon groups:
taxa_to_prune <- setdiff(x = unique(x = unlist(x = taxon_groups)), y = rownames(x = pcoa_input$vectors))
# If taxa to prune are found:
if (length(x = taxa_to_prune) > 0) {
# Go through taxon groups:
taxon_groups <- lapply(X = taxon_groups, function(y) {
# Remove any taxa not found in pcoa data:
if (length(x = sort(x = match(x = taxa_to_prune, table = y))) > 0) y <- y[-sort(x = match(x = taxa_to_prune, table = y))]
# Return
y
})
# Warn user that this has happened in case it is an error:
if (inform) print(paste0("Warning: The following taxa were removed from taxon_groups as they do not appear in pcoa_input: ", paste(taxa_to_prune, collapse = ", "), ". You may wish to double check this makes sense (e.g., because of incomplete taxa being removed by trim_matrix) and is not due to a typographical or other error which means names are not an exact match."))
}
}
# If plot limits aren't set use x and y ranges to set them:
if (is.null(x_limits)) x_limits <- range(pcoa_input$vectors[, x_axis])
if (is.null(y_limits)) y_limits <- range(pcoa_input$vectors[, y_axis])
# Set default tip numbers as just 1 to N:
tip_numbers <- 1:nrow(x = pcoa_input$vectors)
# Case if tree supplied:
if (tree_used) {
# Set basic tree information:
n_tips <- ape::Ntip(phy = pcoa_input$time_tree)
tip_numbers <- c(1:n_tips)
node_numbers <- setdiff(x = 1:nrow(pcoa_input$vectors), y = tip_numbers)
root_number <- n_tips + 1
}
# Get vector of values that correspond to scree plot:
scree_values <- apply(pcoa_input$vectors, 2, var) / sum(apply(pcoa_input$vectors, 2, var)) * 100
# Set default solid colour to black:
solid_colours <- "black"
# Set default transparent colour to 50% black:
translucent_colours <- grDevices::rgb(red = 0, green = 0, blue = 0, alpha = 0.5)
# Set open colour (completely transparent):
transparent_colour <- grDevices::rgb(red = 0, green = 0, blue = 0, alpha = 0)
# If using taxon groups:
if (taxon_groups_used) {
# Set colours for each group:
solid_colours <- grDevices::hcl.colors(n = length(x = taxon_groups), palette = palette, alpha = 1)
translucent_colours <- grDevices::hcl.colors(n = length(x = taxon_groups), palette = palette, alpha = 0.5)
transparent_colour <- grDevices::hcl.colors(n = length(x = taxon_groups), palette = palette, alpha = 0)
# If not using taxon groups:
} else {
# Create dummy taxon group of all data as one:
taxon_groups <- list(Data = rownames(pcoa_input$vector))
}
# If using a tree make sure taxon_groups only includes tips:
if (tree_used) taxon_groups <- lapply(X = taxon_groups, FUN = function(y) intersect(y, pcoa_input$time_tree$tip.label[tip_numbers]))
# Set point colours (background and border):
point_col <- point_bg <- lapply(X = as.list(x = 1:length(taxon_groups)), FUN = function(y) rep(x = solid_colours[y], length.out = length(taxon_groups[[y]])))
if (!is.null(x = z_axis)) point_bg <- lapply(X = as.list(x = 1:length(taxon_groups)), FUN = function(y) as.vector(unlist(x = lapply(X = as.list(x = taxon_groups[[y]]), FUN = function(z) ifelse(test = pcoa_input$vectors[z, z_axis] > 0, yes = solid_colours[[y]], no = transparent_colour[[y]]))), mode = "character"))
# Make axis labels:
x_lab <- paste("PC", x_axis, " (", round(scree_values[x_axis], 2), "% of total variance)", sep = "")
y_lab <- paste("PC", y_axis, " (", round(scree_values[y_axis], 2), "% of total variance)", sep = "")
if (!is.null(x = z_axis)) z_lab <- paste("PC", z_axis, " (", round(scree_values[z_axis], 2), "% of total variance)", sep = "")
# Make all points equal in size by default:
point_sizes <- rep(1, nrow(pcoa_input$vectors))
# Set point sizes as absolute z-value if using z-axis:
if (!is.null(x = z_axis)) point_sizes <- abs(x = pcoa_input$vectors[, z_axis]) / max(abs(x = pcoa_input$vectors[, z_axis])) * 3
# Add taxon names to point_sizes:
names(x = point_sizes) <- rownames(x = pcoa_input$vectors)
# Create the basic plot space (will be empty for now):
graphics::plot(x = pcoa_input$vectors[, x_axis], y = pcoa_input$vectors[, y_axis], type = "n", bg = "black", xlab = x_lab, ylab = y_lab, asp = TRUE, xlim = x_limits, ylim = y_limits)
# Sort vectors by node number (1:N):
if (tree_used) pcoa_input$vectors <- pcoa_input$vectors[c(pcoa_input$time_tree$tip.label, setdiff(x = rownames(x = pcoa_input$vectors), y = pcoa_input$time_tree$tip.label)), ]
# Plot branches of tree (if a tree is used plotting requested):
if (tree_used && plot_edges) for (i in 1:nrow(pcoa_input$time_tree$edge)) lines(x = pcoa_input$vectors[pcoa_input$time_tree$edge[i, ], x_axis], y = pcoa_input$vectors[pcoa_input$time_tree$edge[i, ], y_axis], col = grDevices::rgb(red = 0.65, green = 0.65, blue = 0.65, alpha = 1))
# Set node colours for plotting:
if (tree_used) node_bg <- unlist(x = lapply(X = as.list(pcoa_input$vectors[as.character(node_numbers), z_axis]), FUN = function(y) ifelse(y > 0, grDevices::rgb(red = 0.65, green = 0.65, blue = 0.65, alpha = 1), grDevices::rgb(red = 0.65, green = 0.65, blue = 0.65, alpha = 0))))
# Plot internal nodes, if requested:
if (tree_used && plot_internal_nodes) graphics::points(pcoa_input$vectors[node_numbers, x_axis], pcoa_input$vectors[node_numbers, y_axis], pch = 21, bg = node_bg[as.character(x = node_numbers)], col = grDevices::rgb(red = 0.65, green = 0.65, blue = 0.65, alpha = 1), cex = point_sizes[node_numbers])
# Plot root separetely, if requested:
if (tree_used && plot_root) graphics::points(pcoa_input$vectors[root_number, x_axis], pcoa_input$vectors[root_number, y_axis], pch = 21, col = root_colour, bg = root_colour, cex = point_sizes[root_number])
# If convex hulls are requested:
if(taxon_groups_used && plot_convex_hulls) {
# For each group:
x <- lapply(X = as.list(x = 1:length(x = taxon_groups)), function(y) {
# Make convex hull for data:
convex_hull <- grDevices::chull(x = pcoa_input$vectors[taxon_groups[[y]], x_axis], y = pcoa_input$vectors[taxon_groups[[y]], y_axis])
# Plot convex hull as translucent polygon:
graphics::polygon(x = pcoa_input$vectors[taxon_groups[[y]][convex_hull], x_axis], y = pcoa_input$vectors[taxon_groups[[y]][convex_hull], y_axis], col = translucent_colours[[y]], border = NA)
})
}
# Add points to plot:
x <- lapply(X = as.list(x = 1:length(x = taxon_groups)), function(y) graphics::points(x = pcoa_input$vectors[taxon_groups[[y]], x_axis], y = pcoa_input$vectors[taxon_groups[[y]], y_axis], pch = 21, bg = point_bg[[y]], col = point_col[[y]], cex = point_sizes[taxon_groups[[y]]]))
# If plotting taxon names:
if (plot_taxon_names) {
# First establish a default position for names (to the left of the point):
x_positions <- rep(2, nrow(pcoa_input$vectors))
# Now changes negative values to plot on the right instead:
x_positions[which(x = pcoa_input$vectors[, x_axis] < 0)] <- 4
# Plot taxon names (for tips only):
graphics::text(x = pcoa_input$vectors[tip_numbers, x_axis], y = pcoa_input$vectors[tip_numbers, y_axis], labels = rownames(x = pcoa_input$vectors)[tip_numbers], pos = x_positions[tip_numbers], cex = 0.7)
}
# If plotting a group legend:
if(taxon_groups_used && plot_group_legend) {
# Add groups legend to plot:
if(group_legend_position == "bottom_left") graphics::legend(x = min(pcoa_input$vectors[, x_axis]), y = min(pcoa_input$vectors[, y_axis]), legend = names(taxon_groups), fill = solid_colours, bg = "white", xjust = 1, yjust = 0)
if(group_legend_position == "bottom_right") graphics::legend(x = max(pcoa_input$vectors[, x_axis]), y = min(pcoa_input$vectors[, y_axis]), legend = names(taxon_groups), fill = solid_colours, bg = "white", xjust = 0, yjust = 0)
if(group_legend_position == "top_left") graphics::legend(x = min(pcoa_input$vectors[, x_axis]), y = max(pcoa_input$vectors[, y_axis]), legend = names(taxon_groups), fill = solid_colours, bg = "white", xjust = 1, yjust = 1)
if(group_legend_position == "top_right") graphics::legend(x = max(pcoa_input$vectors[, x_axis]), y = max(pcoa_input$vectors[, y_axis]), legend = names(taxon_groups), fill = solid_colours, bg = "white", xjust = 0, yjust = 1)
}
# If plotting a z-axis legend:
if (plot_z_legend && !is.null(z_axis)) {
# Collapse range of z-values to a spread f six:
z_values <- seq(from = min(x = pcoa_input$vectors[, z_axis]), to = max(x = pcoa_input$vectors[, z_axis]), length.out = 6)
# Make z point sizes for legend:
z_sizes <- abs(x = z_values) / max(abs(x = z_values)) * 3
# Add z legend to plot:
if(z_legend_position == "bottom_left") graphics::legend(x = min(pcoa_input$vectors[, x_axis]), y = min(pcoa_input$vectors[, y_axis]), legend = signif(x = z_values, digits = 4), pch = 21, bg = "white", xjust = 1, yjust = 0, pt.cex = z_sizes, pt.bg = unlist(lapply(X = as.list(x = z_values), FUN = function(y) ifelse(test = y > 0, yes = "black", no = "white"))), col = "black")
if(z_legend_position == "bottom_right") graphics::legend(x = max(pcoa_input$vectors[, x_axis]), y = min(pcoa_input$vectors[, y_axis]), legend = signif(x = z_values, digits = 4), pch = 21, bg = "white", xjust = 0, yjust = 0, pt.cex = z_sizes, pt.bg = unlist(lapply(X = as.list(x = z_values), FUN = function(y) ifelse(test = y > 0, yes = "black", no = "white"))), col = "black")
if(z_legend_position == "top_left") graphics::legend(x = min(pcoa_input$vectors[, x_axis]), y = max(pcoa_input$vectors[, y_axis]), legend = signif(x = z_values, digits = 4), pch = 21, bg = "white", xjust = 1, yjust = 1, pt.cex = z_sizes, pt.bg = unlist(lapply(X = as.list(x = z_values), FUN = function(y) ifelse(test = y > 0, yes = "black", no = "white"))), col = "black")
if(z_legend_position == "top_right") graphics::legend(x = max(pcoa_input$vectors[, x_axis]), y = max(pcoa_input$vectors[, y_axis]), legend = signif(x = z_values, digits = 4), pch = 21, bg = "white", xjust = 0, yjust = 1, pt.cex = z_sizes, pt.bg = unlist(lapply(X = as.list(x = z_values), FUN = function(y) ifelse(test = y > 0, yes = "black", no = "white"))), col = "black")
}
# If using a z-axis add label as plot title:
if (!is.null(x = z_axis)) graphics::title(main = z_lab)
}
#time_tree <- ape::read.tree(text = "(Psarolepis_romeri:1,(Diabolepis_speratus:1.85,((Dipnorhynchus_kiandrensis:7.4,(Archaeonectes_pertusus:28.3,(Uranolophus_wyomingensis:1.6,(Speonesydrion_iani:0.8,(Jarvikia_arctica:36.5173913,(((Adololopas_moyasmithae:10.775,((Adelargo_schultzei:14.05,Chirodipterus_australis:3.25):6.1,(Chirodipterus_rhenanus:1.425,(Chirodipterus_wildungensis:3.25,Dipterus_cf_valenciennesi:3.25):4.675):1.425):1.425):10.31485507,(Barwickia_downunda:10.14492754,Dipterus_valenciennesi:4.444927536):4.444927536):4.444927536,(Pillararhynchus_longi:25.35217391,(((Gogodipterus_paddyensis:24.80434783,((Tarachomylax_oepiki:1.947826087,(Amadeodipterus_kencampbelli:0.9739130435,Stomiahykus_thlaodus:10.47391304):0.9739130435):0.9739130435,(Iowadipterus_halli:17.93913043,((Delatitia_breviceps:50.17391304,(Phaneropleuron_andersoni:34.69130435,((Orlovichthys_limnatis:34.32608696,(Howidipterus_donnae:16.84347826,(((Andreyevichthys_epitomus:16.88913043,Oervigia_nordica:16.88913043):16.88913043,(Grossipterus_crassus:22.79565217,(Fleurantia_denticulata:22.61304348,((Robinsondipterus_longi:16.275,(Asthenorhynchus_meemannae:10.85,(Holodipterus_elderae:5.425,Holodipterus_gogoensis:5.425):5.425):5.425):6.155434783,((Griphognathus_minutidens:14.46666667,(Griphognathus_sculpta:7.233333333,Griphognathus_whitei:7.233333333):7.233333333):7.78115942,(Rhynchodipterus_elginensis:32.86521739,(Jessenia_concentrica:0.1826086957,Soederberghia_groenlandica:21.8826087):0.1826086957):0.1826086957):0.1826086957):0.1826086957):0.1826086957):0.1826086957):0.1826086957,(Pentlandia_macroptera:8.330434783,Scaumenacia_curta:14.83043478):8.330434783):0.1826086957):0.1826086957):0.1826086957,(Holodipterus_santacrucensis:21.07439614,((Ganopristodus_splendens:55.8057971,(Megapleuron_zangerli:86.77149758,(Sagenodus_inaequalis:50.53719807,(((Eoctenodus_microsoma:2.634299517,Tranodis_castrensis:61.53429952):2.634299517,(Ctenodus_romeri:15.68429952,Straitonia_waterstoni:29.58429952):15.68429952):2.634299517,((Parasagenodus_sibiricus:11.33429952,(Gnathorhiza_serrata:20.55,((Beltanodus_ambilobensis:53.1125,(Namatozodia_pitikanta:45.525,(Ariguna_formosa:37.9375,(((Aphelodus_anapes:11.38125,Ceratodus_formosa:11.38125):11.38125,((Asiatoceratodus_sharovi:7.5875,Gosfordia_truncata:30.5875):7.5875,(Neoceratodus_forsteri:77.0875,(Mioceratodus_gregoryi:37,(Lepidosiren_paradoxa:22.4,Protopterus_annectens:9.5):9.5):86.5875):77.0875):7.5875):7.5875,(Archaeoceratodus_avus:26.675,Tellerodus_sturi:38.175):26.675):7.5875):7.5875):7.5875):12.3875,(Microceratodus_angolensis:63.9,(Palaeophichthys_parvulus:1.6,(Ptychoceratodus_serratus:45.525,(Paraceratodus_germaini:31.65,(Arganodus_atlantis:15.175,Ferganoceratodus_jurassicus:104.975):15.175):15.175):16.775):1.6):1.6):22.15):31.88429952):11.33429952,(Ceratodus_latissimus:76.93429952,Metaceratodus_wollastoni:159.4342995):76.93429952):11.33429952):2.634299517):2.634299517):2.634299517):2.634299517,(Nielsenia_nordica:14.62004831,Conchopoma_gadiforme:77.42004831):14.62004831):2.634299517):2.634299517):0.1826086957):0.1826086957):0.1826086957,(Rhinodipterus_secans:15.37826087,Rhinodipterus_ulrichi:8.87826087):8.87826087):0.1826086957):0.1826086957):0.1826086957):0.1826086957,(Palaeodaphus_insignis:12.49347826,Sunwapta_grandiceps:23.29347826):12.49347826):0.1826086957,(Melanognathus_canadensis:1.734782609,Sorbitorhynchus_deleaskitus:1.734782609):1.734782609):0.1826086957):0.1826086957):0.1826086957):0.9826086957):0.8):0.8):0.8):0.8,(Westollrhynchus_lehmanni:2,(Ichnomylax_kurnai:3.36,(Dipnorhynchus_sussmilchi:2.52,(Chirodipterus_onawwayensis:16.88,(Dipnorhynch_cathlesae:0.84,Dipnorhynchus_kurikae:0.84):0.84):0.84):0.84):2.84):2):2.65):1.85);")
#time_tree$root.time <- 419.7
#cladistic_matrix <- read_nexus_matrix("http://www.graemetlloyd.com/nexus/Lloyd_etal_2012a.nex")
#pcoa_input <- ordinate_cladistic_matrix(cladistic_matrix = cladistic_matrix, time_tree = time_tree)
#x_axis = 1
#y_axis = 2
#taxon_groups = assign_taxa_to_bins(taxon_ages, named_time_bins) ### NEW PARAM
#plot_taxon_names = TRUE
#plot_internal_nodes = TRUE
#plot_root = TRUE
#root_colour = "red"
#palette = "viridis"
#plot_group_legend = TRUE
#group_legend_position = "top_right"
#plot_z_legend = TRUE
#z_legend_position = "bottom_right"
|
########################################################################
path="LncEvoDevo/"
pathExpression=paste(path,"results/expression_estimation/", sep="")
pathGeneOverlaps=paste(path,"results/gene_overlaps/",sep="")
set.seed(19)
source("normalization.R")
type="NonOverlappingExonBlocks_StringTie"
splist=c("Mouse", "Rat", "Chicken")
release=94
########################################################################
samples.otherstrain=list()
samples.otherstrain[["Mouse"]]=c("Sertoli", "Spermatids", "Spermatocytes", "Spermatogonia", "Spermatozoa")
samples.otherstrain[["Rat"]]=c("Brain_Adult3", "Brain_Adult4", "Kidney_Adult3", "Kidney_Adult4", "Liver_Adult3", "Liver_Adult4", "Testis_Adult4")
samples.otherstrain[["Chicken"]]=c("Brain_Adult1", "Brain_Adult2", "Brain_Day18Embryo1", "Brain_Day18Embryo10", "Brain_Day18Embryo2", "Brain_Day18Embryo3", "Brain_Day18Embryo4", "Brain_Day18Embryo5", "Brain_Day18Embryo6", "Brain_Day18Embryo7", "Brain_Day18Embryo8", "Brain_Day18Embryo9", "Kidney_Adult1", "Kidney_Adult2", "Kidney_Day18Embryo1", "Kidney_Day18Embryo10", "Kidney_Day18Embryo2", "Kidney_Day18Embryo3", "Kidney_Day18Embryo4", "Kidney_Day18Embryo5", "Kidney_Day18Embryo6", "Kidney_Day18Embryo7", "Kidney_Day18Embryo8", "Kidney_Day18Embryo9", "Liver_Adult1", "Liver_Adult2", "Liver_Day18Embryo1", "Liver_Day18Embryo3", "Liver_Day18Embryo4", "Liver_Day18Embryo5", "Liver_Day18Embryo6", "Liver_Day18Embryo7", "Liver_Day18Embryo8", "Liver_Day18Embryo9", "Testis_Adult1", "Testis_Day18Embryo1", "Testis_Day18Embryo2", "Testis_Day18Embryo3", "Testis_Day18Embryo4", "Testis_Day18Embryo5", "Testis_Day4.5Embryo1", "Testis_Day4.5Embryo2", "Testis_Day6Embryo1", "Testis_Day6Embryo2")
########################################################################
for(sp in splist){
print(sp)
samples=system(paste("ls ", pathExpression, sp, " | grep -v txt", sep=""), intern=T)
exons=read.table(paste(pathGeneOverlaps,sp,"/ExonBlocks_ExcludingOverlapOtherGenes_FilteredTranscripts_StringTie_Ensembl",release,".txt",sep=""), h=F, stringsAsFactors=F, sep="\t")
exons$Length=exons$V5-exons$V4+1
exonic.length=as.numeric(tapply(exons$Length, as.factor(exons$V1), sum))
names(exonic.length)=levels(as.factor(exons$V1))
unique.reads=list()
for(sample in samples){
print(sample)
this.reads=c()
try(this.reads<-read.table(paste(pathExpression, sp, "/", sample,"/ReadCounts_",type,".txt",sep=""), h=T, stringsAsFactors=F, sep="\t", quote="\""))
if(length(this.reads)>0){
## remove duplicates
unique.reads[[sample]]=this.reads[,"NbUniqueReads"]
names(unique.reads[[sample]])=this.reads[,"GeneID"]
}
}
samples=names(unique.reads) ## to exclude samples for which we didn't find expression values
samples.mainstrain=setdiff(samples, samples.otherstrain[[sp]])
print(paste(length(samples), "samples in total, ",length(samples.mainstrain), "samples for the main strain"))
## reorder values
exonic.length=exonic.length[intersect(names(exonic.length), names(unique.reads[[samples[1]]]))]
gene.order=names(exonic.length)
for(sample in samples){
print(paste("reordering", sample))
unique.reads[[sample]]=unique.reads[[sample]][gene.order]
}
## make data frames
unique.reads=as.data.frame(unique.reads)
## add gene id as a column
unique.reads$GeneID=gene.order
unique.reads$ExonicLength=exonic.length
unique.reads=unique.reads[,c("GeneID","ExonicLength", samples)]
## write output
write.table(unique.reads, file=paste(pathExpression, sp, "/AllSamples_UniqueReadCounts_", type,".txt",sep=""), row.names=F, col.names=T, quote=F, sep="\t")
## write output only for main strain samples
unique.reads.mainstrain=unique.reads[,c("GeneID","ExonicLength", samples.mainstrain)]
write.table(unique.reads.mainstrain, file=paste(pathExpression, sp, "/AllSamples_UniqueReadCounts_", type,"_MainStrain.txt",sep=""), row.names=F, col.names=T, quote=F, sep="\t")
}
########################################################################
| /expression_estimation/combine.read.counts.nooverlap.R | no_license | anecsulea/LncEvoDevo | R | false | false | 4,201 | r | ########################################################################
path="LncEvoDevo/"
pathExpression=paste(path,"results/expression_estimation/", sep="")
pathGeneOverlaps=paste(path,"results/gene_overlaps/",sep="")
set.seed(19)
source("normalization.R")
type="NonOverlappingExonBlocks_StringTie"
splist=c("Mouse", "Rat", "Chicken")
release=94
########################################################################
samples.otherstrain=list()
samples.otherstrain[["Mouse"]]=c("Sertoli", "Spermatids", "Spermatocytes", "Spermatogonia", "Spermatozoa")
samples.otherstrain[["Rat"]]=c("Brain_Adult3", "Brain_Adult4", "Kidney_Adult3", "Kidney_Adult4", "Liver_Adult3", "Liver_Adult4", "Testis_Adult4")
samples.otherstrain[["Chicken"]]=c("Brain_Adult1", "Brain_Adult2", "Brain_Day18Embryo1", "Brain_Day18Embryo10", "Brain_Day18Embryo2", "Brain_Day18Embryo3", "Brain_Day18Embryo4", "Brain_Day18Embryo5", "Brain_Day18Embryo6", "Brain_Day18Embryo7", "Brain_Day18Embryo8", "Brain_Day18Embryo9", "Kidney_Adult1", "Kidney_Adult2", "Kidney_Day18Embryo1", "Kidney_Day18Embryo10", "Kidney_Day18Embryo2", "Kidney_Day18Embryo3", "Kidney_Day18Embryo4", "Kidney_Day18Embryo5", "Kidney_Day18Embryo6", "Kidney_Day18Embryo7", "Kidney_Day18Embryo8", "Kidney_Day18Embryo9", "Liver_Adult1", "Liver_Adult2", "Liver_Day18Embryo1", "Liver_Day18Embryo3", "Liver_Day18Embryo4", "Liver_Day18Embryo5", "Liver_Day18Embryo6", "Liver_Day18Embryo7", "Liver_Day18Embryo8", "Liver_Day18Embryo9", "Testis_Adult1", "Testis_Day18Embryo1", "Testis_Day18Embryo2", "Testis_Day18Embryo3", "Testis_Day18Embryo4", "Testis_Day18Embryo5", "Testis_Day4.5Embryo1", "Testis_Day4.5Embryo2", "Testis_Day6Embryo1", "Testis_Day6Embryo2")
########################################################################
for(sp in splist){
print(sp)
samples=system(paste("ls ", pathExpression, sp, " | grep -v txt", sep=""), intern=T)
exons=read.table(paste(pathGeneOverlaps,sp,"/ExonBlocks_ExcludingOverlapOtherGenes_FilteredTranscripts_StringTie_Ensembl",release,".txt",sep=""), h=F, stringsAsFactors=F, sep="\t")
exons$Length=exons$V5-exons$V4+1
exonic.length=as.numeric(tapply(exons$Length, as.factor(exons$V1), sum))
names(exonic.length)=levels(as.factor(exons$V1))
unique.reads=list()
for(sample in samples){
print(sample)
this.reads=c()
try(this.reads<-read.table(paste(pathExpression, sp, "/", sample,"/ReadCounts_",type,".txt",sep=""), h=T, stringsAsFactors=F, sep="\t", quote="\""))
if(length(this.reads)>0){
## remove duplicates
unique.reads[[sample]]=this.reads[,"NbUniqueReads"]
names(unique.reads[[sample]])=this.reads[,"GeneID"]
}
}
samples=names(unique.reads) ## to exclude samples for which we didn't find expression values
samples.mainstrain=setdiff(samples, samples.otherstrain[[sp]])
print(paste(length(samples), "samples in total, ",length(samples.mainstrain), "samples for the main strain"))
## reorder values
exonic.length=exonic.length[intersect(names(exonic.length), names(unique.reads[[samples[1]]]))]
gene.order=names(exonic.length)
for(sample in samples){
print(paste("reordering", sample))
unique.reads[[sample]]=unique.reads[[sample]][gene.order]
}
## make data frames
unique.reads=as.data.frame(unique.reads)
## add gene id as a column
unique.reads$GeneID=gene.order
unique.reads$ExonicLength=exonic.length
unique.reads=unique.reads[,c("GeneID","ExonicLength", samples)]
## write output
write.table(unique.reads, file=paste(pathExpression, sp, "/AllSamples_UniqueReadCounts_", type,".txt",sep=""), row.names=F, col.names=T, quote=F, sep="\t")
## write output only for main strain samples
unique.reads.mainstrain=unique.reads[,c("GeneID","ExonicLength", samples.mainstrain)]
write.table(unique.reads.mainstrain, file=paste(pathExpression, sp, "/AllSamples_UniqueReadCounts_", type,"_MainStrain.txt",sep=""), row.names=F, col.names=T, quote=F, sep="\t")
}
########################################################################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/image_processing.R
\name{image_reg_visium}
\alias{image_reg_visium}
\title{Image registration to 10x Visium fiducials}
\usage{
image_reg_visium(img)
}
\arguments{
\item{img}{image pointer}
}
\value{
}
\description{
Image registration to 10x Visium fiducials
}
\examples{
}
| /man/image_reg_visium.Rd | permissive | genesofeve/astRal | R | false | true | 353 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/image_processing.R
\name{image_reg_visium}
\alias{image_reg_visium}
\title{Image registration to 10x Visium fiducials}
\usage{
image_reg_visium(img)
}
\arguments{
\item{img}{image pointer}
}
\value{
}
\description{
Image registration to 10x Visium fiducials
}
\examples{
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.