blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ca05813f48fb453d691953707a5c51765c25b4f0 | c750c1991c8d0ed18b174dc72f3014fd35e5bd8c | /pkgs/oce/man/summary-satellite-method.Rd | 2f4a56537ed660061dfeefc479a08c9d55ac47b7 | [] | no_license | vaguiar/EDAV_Project_2017 | 4b190e66fe7a6b4078cfe1b875bccd9b5a594b25 | 288ffaeec1cfdd873fe7439c0fa0c46a90a16a4f | refs/heads/base | 2021-01-23T02:39:36.272851 | 2017-05-01T23:21:03 | 2017-05-01T23:21:03 | 86,010,131 | 1 | 0 | null | 2017-05-01T23:43:04 | 2017-03-24T00:21:20 | HTML | UTF-8 | R | false | true | 608 | rd | summary-satellite-method.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/satellite.R
\docType{methods}
\name{summary,satellite-method}
\alias{summary,satellite-method}
\title{Summarize a satellite object}
\usage{
\S4method{summary}{satellite}(object, ...)
}
\arguments{
\item{object}{The object to be summarized.}
\item{...}{Ignored.}
}
\description{
Summarize a satellite object
}
\seealso{
Other things related to satellite data: \code{\link{g1sst-class}},
\code{\link{plot,satellite-method}},
\code{\link{read.g1sst}}, \code{\link{satellite-class}}
}
\author{
Dan Kelley
}
\concept{
satellite
}
|
9846d1a1a96d6a820c6dc96c64a34b435fa8f533 | 886631b1b55ff47db3e63ae11207899aea43c61d | /R/functions.R | a815b86bf62e642a975620bea31fb6a8436a83ca | [] | no_license | davidsovich/davidr | 159d0b05d135ac84523bd9e31caf00e0606bef96 | 0a3297a9d45b590d74f7ecba544a3d7d323ce85e | refs/heads/master | 2021-06-25T06:29:33.878916 | 2021-02-24T14:25:09 | 2021-02-24T14:25:09 | 204,074,805 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,454 | r | functions.R | # ---- Description --------------------------------------------------------------------------------
# This program defines functions for the analysis.
# ---- Date functions -----------------------------------------------------------------------------
yyyymm_dashes = function(date) {
as.numeric(substr(date, 1, 4)) * 100 + as.numeric(substr(date, 6, 7))
}
mdist = function(beg_date, end_date) {
a = as.numeric(beg_date)
b = as.numeric(end_date)
y = floor(b / 100) - floor(a / 100)
m = (b %% 100) - (a %% 100)
y * 12 + m
}
# ---- Payment functions --------------------------------------------------------------------------
calc_pmt = function(amt, annual_rate, months) {
ifelse(
near(annual_rate, 0),
amt / months,
amt * ((annual_rate / 12) / (1 - (1 + annual_rate / 12)^(-1 * months)))
)
}
# ---- Cap and floor values -----------------------------------------------------------------------
cap_vals = function(x, floor, cap) {
ifelse(
is.infinite(x),
NA,
ifelse(
x < floor,
floor,
ifelse(
x > cap,
cap,
x
)
)
)
}
# ---- State functions ----------------------------------------------------------------------------
us_states = function() {
c(
"AL",
"AK",
"AZ",
"AR",
"CA",
"CO",
"CT",
"DE",
"FL",
"GA",
"HI",
"ID",
"IL",
"IN",
"IA",
"KS",
"KY",
"LA",
"ME",
"MD",
"MA",
"MI",
"MN",
"MS",
"MO",
"MT",
"NE",
"NV",
"NH",
"NJ",
"NM",
"NY",
"NC",
"ND",
"OH",
"OK",
"OR",
"PA",
"RI",
"SC",
"SD",
"TN",
"TX",
"UT",
"VT",
"VA",
"WA",
"WV",
"WI",
"WY",
"DC"
)
}
# ---- Lender functions ---------------------------------------------------------------------------
lender_names = function() {
tibble::tibble(
ticker = c(
"AHFC",
"Ally Bank",
"BMW Bank of North America",
"CBS",
"CONA",
"ESB",
"Fifth Third Bank",
"Ford Credit",
"GM FINANCIAL",
"HCA",
"MBFS USA LLC",
"Mechanics Bank",
"NMAC",
"SC",
"TMCC",
"USAAFSB",
"VW Credit",
"WORLD OMNI FINANCIAL CORP"
),
name = c(
"Honda",
"Ally Bank",
"BMW",
"CarMax",
"Capital One",
"Harley Davidson",
"Fifth Third",
"Ford",
"GM",
"Hyundai",
"Mercedes",
"Mechanics Bank",
"Nissan",
"Santander",
"Toyota",
"USAA",
"Volkswagen",
"World Omni"
),
type = c(
"Captive",
"Bank",
"Captive",
"Other",
"Bank",
"Captive",
"Bank",
"Captive",
"Captive",
"Captive",
"Captive",
"Bank",
"Captive",
"Santander",
"Captive",
"Bank",
"Captive",
"Bank"
)
)
}
# ---- Polynomial orders --------------------------------------------------------------------------
form_poly = function(x, d, p) {
if(p > 1){
paste0(
form_poly(x, d, p-1),
" + ",
"I(", x, "^", p, ")",
" + ",
"I(", d, "*", x, "^", p, ")"
)
} else if (p == 1) {
paste0(
x,
" + ",
"I(", d, "*", x, ")"
)
}
}
# ---- Plot functions -----------------------------------------------------------------------------
srd_plot = function(df, y, x, xlab = "", ylab = "", c = 0, smooth = TRUE, p_l = 4, p_r = 4) {
df[, "y"] = df[, y]
df[, "x"] = df[, x]
gg1 = ggplot2::ggplot(
data = df
) +
ggplot2::geom_point(
ggplot2::aes(
x = x,
y = y
),
color = "darkgray"
) +
ggplot2::theme_bw() +
ggplot2::theme(
axis.text.x = ggplot2::element_text(
angle = 45,
hjust = 1,
size = 12
),
axis.text.y = ggplot2::element_text(
size = 12
),
panel.grid.major = ggplot2::element_blank(),
panel.grid.minor = ggplot2::element_blank(),
strip.background = ggplot2::element_rect(fill = NA, color = "black"),
strip.text.x = ggplot2::element_text(
size = 14
)
) +
ggplot2::labs(
x = xlab,
y = ylab
) +
ggplot2::geom_vline(
xintercept = c,
color = "black",
linetype = "dashed"
)
if(smooth == TRUE) {
gg1 = gg1 +
ggplot2::geom_smooth(
data = dplyr::filter(df, x < 0),
ggplot2::aes(
x = x,
y = y
),
method = "lm",
formula = y ~ poly(x, p_l, raw = TRUE),
color = "black",
se = FALSE
) +
ggplot2::geom_smooth(
data = dplyr::filter(df, x >= 0),
ggplot2::aes(
x = x,
y = y
),
method = "lm",
formula = y ~ poly(x, p_r, raw = TRUE),
color = "black",
se = FALSE
)
}
gg1
}
# ---- Sharp regression discontinuity estimation --------------------------------------------------
form_srd = function(y, x, d, p, fe, cluster) {
paste0(
y,
" ~ ",
d,
" + ",
form_poly(x = x, d = d, p = p),
" | ",
ifelse(
missing(fe),
"0",
paste0(fe, sep = "", collapse = " + ")
),
" | 0 | ",
ifelse(
missing(cluster),
"0",
paste0(cluster, sep = "", collapse = " + ")
)
)
}
srd = function(df, y, x, d, p, h, fe, cluster) {
df = as.data.frame(df)
if(missing(h)) {
df = df[!is.na(df[, y]), ]
} else {
df = df[(!is.na(df[, y])) & (abs(df[, x]) <= h), ]
}
lfe::felm(
formula(form_srd(
y = y,
x = x,
d = d,
p = p,
fe = fe,
cluster = cluster
)),
data = df
)
}
srd_matrix = function(df, y, x, d, p_vec, h_vec, fe, cluster) {
n = 1
for(h in h_vec) {
for(p in p_vec) {
temp_df = srd(
df = df,
y = y,
x = x,
d = d,
p = p,
h = h,
fe = fe,
cluster = cluster
) %>%
broom::tidy(
robust = TRUE
) %>%
dplyr::filter(
term == d
) %>%
dplyr::mutate(
bandwidth = h,
polynomial = p
) %>%
dplyr::select(
bandwidth,
polynomial,
estimate,
std.error,
statistic
) %>%
dplyr::rename(
coef = estimate,
se = std.error,
t_stat = statistic
)
if(n == 1) {
srd_df = temp_df
} else {
srd_df = dplyr::bind_rows(
srd_df,
temp_df
)
}
n = n + 1
}
}
srd_df
}
form_stacked_srd = function(y, xs, ds, p, fe, cluster) {
paste0(
y,
" ~ ",
paste0(ds, sep = "", collapse = " + "),
" + ",
paste0(form_poly(x = xs, d = ds, p = p), sep = "", collapse = " + "),
" | ",
ifelse(
missing(fe),
"0",
paste0(fe, sep = "", collapse = " + ")
),
" | 0 | ",
ifelse(
missing(cluster),
"0",
paste0(cluster, sep = "", collapse = " + ")
)
)
}
stacked_srd = function(df, y, x, d, group, p, h, fe, cluster) {
if(missing(h)) {
df = df[!is.na(df[, y]), ]
} else {
df = df[(!is.na(df[, y])) & (abs(df[, x]) <= h), ]
}
group_vals = unique(as.data.frame(df)[, group])
for(g in group_vals) {
df[, paste0(d, "_", gsub(" ", "", g))] = df[, d] * as.numeric(df[, group] == g)
df[, paste0(x, "_", gsub(" ", "", g))] = df[, x] * as.numeric(df[, group] == g)
}
ds = paste0(d, "_", gsub(" ", "", group_vals))
xs = paste0(x, "_", gsub(" ", "", group_vals))
lfe::felm(
formula(form_stacked_srd(
y = y,
xs = xs,
ds = ds,
p = p,
fe = fe,
cluster = cluster
)),
data = df
)
}
stacked_srd_matrix = function(df, y, x, d, group, p_vec, h_vec, fe, cluster) {
group_vals = unique(as.data.frame(df)[, group])
extracts = paste0(d, "_", gsub(" ", "", group_vals))
n = 1
for(h in h_vec) {
for(p in p_vec) {
temp_df = stacked_srd(
df = df,
y = y,
x = x,
d = d,
group = group,
p = p,
h = h,
fe = fe,
cluster = cluster
) %>%
broom::tidy(
robust = TRUE
) %>%
dplyr::filter(
term %in% extracts
) %>%
dplyr::mutate(
bandwidth = h,
polynomial = p,
group = gsub(paste0(d, "_"), "", term)
) %>%
dplyr::select(
bandwidth,
polynomial,
group,
estimate,
std.error,
statistic
) %>%
dplyr::rename(
coef = estimate,
se = std.error,
t_stat = statistic
)
if(n == 1) {
srd_df = temp_df
} else {
srd_df = dplyr::bind_rows(
srd_df,
temp_df
)
}
n = n + 1
}
}
srd_df
}
np_srd = function(df, y, x) {
rdrobust::rdrobust(
y = as.data.frame(df)[, y],
x = as.data.frame(df)[, x]
)
}
np_srd_output = function(np) {
tibble::tibble(
coef = np$Estimate[, "tau.us"],
se = np$se["Conventional", ],
t_stat = np$z["Conventional", ],
bw = np$bws["h", "left"]
)
}
np_srd_matrix = function(df, y, x, disc_id) {
n = 1
df = as.data.frame(df)
for(i in sort(unique(df[, disc_id]))) {
temp_df = np_srd_output(
np_srd(
df = df[df[, disc_id] == i, ],
y = y,
x = x
)
)
if(n == 1) {
np_srd_df = temp_df
} else {
np_srd_df = dplyr::bind_rows(
np_srd_df,
temp_df
)
}
n = n + 1
}
dplyr::bind_cols(
np_srd_df,
tibble::tibble(
disc_id = sort(unique(df[, disc_id]))
)
)
}
# ---- Fuzzy regression discontinuity estimation --------------------------------------------------
form_frd = function(y, v, x, d, p, fe, cluster, ctrls) {
paste0(
y,
" ~ ",
form_poly(x = x, d = d, p = p),
ifelse(
missing(ctrls),
"",
paste0(" + ", paste(ctrls, sep = "", collapse = " + "))
),
" | ",
ifelse(
missing(fe),
"0",
paste0(fe, sep = "", collapse = " + ")
),
" | ",
"(", v, " ~ ", d, ")",
" | ",
ifelse(
missing(cluster),
"0",
paste0(cluster, sep = "", collapse = " + ")
)
)
}
frd = function(df, y, v, x, d, p, h, fe, cluster, ctrls) {
df = as.data.frame(df)
if(missing(h)) {
df = df[!is.na(df[, y]), ]
} else {
df = df[(!is.na(df[, y])) & (abs(df[, x]) <= h), ]
}
lfe::felm(
formula(form_frd(
y = y,
v = v,
x = x,
d = d,
p = p,
fe = fe,
cluster = cluster,
ctrls = ctrls
)),
data = df
)
}
frd_matrix = function(df, y, v, x, d, p_vec, h_vec, fe, cluster, ctrls) {
n = 1
for(h in h_vec) {
for(p in p_vec) {
temp_df = frd(
df = df,
y = y,
v = v,
x = x,
d = d,
p = p,
h = h,
fe = fe,
cluster = cluster,
ctrls = ctrls
) %>%
broom::tidy(
robust = TRUE
) %>%
dplyr::filter(
term == paste0("`", v, "(fit)`")
) %>%
dplyr::mutate(
bandwidth = h,
polynomial = p
) %>%
dplyr::select(
bandwidth,
polynomial,
estimate,
std.error,
statistic
) %>%
dplyr::rename(
coef = estimate,
se = std.error,
t_stat = statistic
)
if(n == 1) {
srd_df = temp_df
} else {
srd_df = dplyr::bind_rows(
srd_df,
temp_df
)
}
n = n + 1
}
}
srd_df
}
form_frd_cs = function(y, v, x, d, cs, p, fe, cluster) {
paste0(
y,
" ~ ",
form_poly(x = x, d = d, p = p),
" + ",
cs,
" | ",
ifelse(
missing(fe),
"0",
paste0(fe, sep = "", collapse = " + ")
),
" | ",
"(", v, " | I(", cs, " * ", v, ")", " ~ ", d, " + I(", d, "*", cs, "))",
" | ",
ifelse(
missing(cluster),
"0",
paste0(cluster, sep = "", collapse = " + ")
)
)
}
frd_cs = function(df, y, v, x, d, cs, p, h, fe, cluster) {
df = as.data.frame(df)
if(missing(h)) {
df = df[!is.na(df[, y]), ]
} else {
df = df[(!is.na(df[, y])) & (abs(df[, x]) <= h), ]
}
lfe::felm(
formula(form_frd_cs(
y = y,
v = v,
x = x,
d = d,
cs = cs,
p = p,
fe = fe,
cluster = cluster
)),
data = df
)
}
|
21a0d6a4937a1ff33715d924b547dd4fe5e2aed8 | cdf2306e558058ce7f3704256b9732f1d03d1493 | /twitterOAuth.R | c3e2817c73f195aae27269e5e6620c81bc565692 | [] | no_license | charliegerm/sentiment | 65ed19cd7a8d8af2799482f5d8298afdcd8dfa2c | 82eaa6e6292011579c2a7c22dc9cf8b2a4f6280b | refs/heads/master | 2016-09-06T01:59:24.482579 | 2015-08-26T19:02:37 | 2015-08-26T19:02:37 | 41,431,832 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 343 | r | twitterOAuth.R | # setup twitter OAuth
twitter_oath <- function() {
twitterConsumerKey <- "xxxxx"
twitterConsumerSecret <- "xxxxx"
twitterAccessToken <- "xxxxx-xxxxx"
twitterAccessTokenSecret <- "xxxxx"
setup_twitter_oauth(twitterConsumerKey, twitterConsumerSecret, twitterAccessToken, twitterAccessTokenSecret)
return(NA)
}
|
bfbfb9823b8cae6127f0bc49122f3c3517d1da30 | 6108283fb5c4e55f6aa0ae2716b449cbf274bbde | /nelsonsiegel.R | 8497b0efb71e237fcc3aeecd5ae473973ed342b1 | [
"MIT"
] | permissive | vmurik/relval | 32e0353078184df39abc9f427263c8255df74d7f | c6f429e71e55c71c0983488d459dff049cab0ee8 | refs/heads/master | 2020-07-03T10:53:42.421139 | 2016-11-20T03:43:10 | 2016-11-20T03:43:10 | 74,178,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,893 | r | nelsonsiegel.R | NelsonSiegel =
function(Yield, Maturity)
{ # A function written by Diethelm Wuertz
# Description:
# Fit the Yield Curve by the Nelson-Siegel Method
#
# Details:
# This function finds a global solution. The start values for the
# betas are solved exactly as a function of tau using OLS.
#
# Copyright:
# Diethelm Wuertz, (c) 2004 fBonds
#
# Source:
# Partial copy from 'fBonds' from 'Rmetrics' (unpublished).
# FUNCTION:
# Find Optimal Start Solution by OLS of beta's vs. Yields:
n = length(Maturity)
gmin = 1.0e99
for (i in 1:n) {
tau = Maturity[i]
x = Maturity/tau
a = matrix(rep(NA, times = 9), nrow = 3)
a[1,1] = 1
a[1,2] = a[2,1] = mean((1-exp(-x))/x)
a[1,3] = a[3,1] = mean((1-exp(-x))/x - exp(-x))
a[2,2] = mean( ((1-exp(-x))/x)^2 )
a[2,3] = a[3,2] = mean(((1-exp(-x))/x)*((1-exp(-x))/x-exp(-x)))
a[3,3] = mean(((1-exp(-x))/x - exp(-x))^2)
b = c(
mean ( Yield ),
mean ( Yield * ((1-exp(-x))/x)),
mean ( Yield * (((1-exp(-x))/x - exp(-x)))))
beta = solve(a, b)
yfit = beta[1] + beta[2]*exp(-x) + beta[3]*x*exp(-x)
fmin = sum( (Yield-yfit)^2 )
if (fmin < gmin) {
gmin = fmin
gvec = c(beta, tau)
}
}
# Function to be optimized:
fx <- function(Maturity, x) {
x[1] + x[2] * (1-exp(-Maturity/x[4]))/(Maturity/x[4]) +
x[3] *
((1-exp(-Maturity/x[4]))/(Maturity/x[4])-exp(-Maturity/x[4]))
}
func <- function(x) { sum( (Yield - fx(Maturity, x))^2 ) }
# Optimize:
fit = nlminb(objective = func, start = gvec)
fit$start = gvec
names(fit$par) = c("beta1", "beta2", "beta3", "tau")
# Plot Curve:
yfit = fx(Maturity, gvec)
plot(Maturity, Yield, ylim = c(min(c(Yield, yfit)), max(c(Yield,
yfit))),
pch = 19, cex = 0.5, main = "Nelson-Siegel" )
lines(Maturity, yfit, col = "steelblue")
# Return Value:
fit
}
NelsonSiegel2 =
function(Yield, Maturity, tau)
{ # A function written by Diethelm Wuertz
# Description:
# Fit the Yield Curve by the Nelson-Siegel Method
#
# Details:
# This function finds a global solution. The start values for the
# betas are solved exactly as a function of tau using OLS.
#
# Copyright:
# Diethelm Wuertz, (c) 2004 fBonds
#
# Source:
# Partial copy from 'fBonds' from 'Rmetrics' (unpublished).
# FUNCTION:
# Find Optimal Start Solution by OLS of beta's vs. Yields:
n = length(Maturity)
gmin = 1.0e99
for (i in 1:n) {
tau = Maturity[i]
x = Maturity/tau
a = matrix(rep(NA, times = 9), nrow = 3)
a[1,1] = 1
a[1,2] = a[2,1] = mean((1-exp(-x))/x)
a[1,3] = a[3,1] = mean((1-exp(-x))/x - exp(-x))
a[2,2] = mean( ((1-exp(-x))/x)^2 )
a[2,3] = a[3,2] = mean(((1-exp(-x))/x)*((1-exp(-x))/x-exp(-x)))
a[3,3] = mean(((1-exp(-x))/x - exp(-x))^2)
b = c(
mean ( Yield ),
mean ( Yield * ((1-exp(-x))/x)),
mean ( Yield * (((1-exp(-x))/x - exp(-x)))))
beta = solve(a, b)
yfit = beta[1] + beta[2]*exp(-x) + beta[3]*x*exp(-x)
fmin = sum( (Yield-yfit)^2 )
if (fmin < gmin) {
gmin = fmin
gvec = c(beta, tau)
}
}
# Function to be optimized:
fx <- function(Maturity, x) {
x[1] + x[2] * (1-exp(-Maturity/tau))/(Maturity/tau) +
x[3] *
((1-exp(-Maturity/tau))/(Maturity/tau)-exp(-Maturity/tau))
}
func <- function(x) { sum( (Yield - fx(Maturity, x))^2 ) }
# Optimize:
fit = nlminb(objective = func, start = gvec)
fit$start = gvec
names(fit$par) = c("beta1", "beta2", "beta3", "tau")
# Plot Curve:
yfit = fx(Maturity, gvec)
plot(Maturity, Yield, ylim = c(min(c(Yield, yfit)), max(c(Yield,
yfit))),
pch = 19, cex = 0.5, main = "Nelson-Siegel" )
lines(Maturity, yfit, col = "steelblue")
# Return Value:
fit
}
nsplot <- function(rates, today = chron("8/12/08"), excl = NULL){
startcurve <- cbind(coredata(rates), (index(rates) - today) / 30)
if(!is.null(excl)){
NelsonSiegel2(startcurve[-excl,1], startcurve[-excl,2], tau = 10)
}
else{
NelsonSiegel2(startcurve[,1], startcurve[,2], tau = 10)
}
}
# nsplot(zeroes$startval)
# nsplot(zeroes$startval, excl = c(1, 2, 4))
# nsplot(zeroes$zeroes.annual)
|
882a1e1001043402608a9848bd14b65802fe174f | a350afcce86ace1839a0d91a2a73d65776a32aa4 | /complete.R | 3c01427268f39e0b17498add4c6c96da6de86ba5 | [] | no_license | ErickDiaz/Data_Science_Assignment_1 | 432a34b1eb46ea46859e9769991be4fd3f9aa7b0 | 5f19fc2902cf9addba59cd52ed462defc56b184b | refs/heads/master | 2016-09-09T19:11:11.193805 | 2015-02-14T04:22:08 | 2015-02-14T04:22:08 | 30,759,190 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,651 | r | complete.R | complete <- function(directory, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
archivos <- list.files(directory,pattern = "\\.csv$")
##print(archivos)
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## *Recorriendo la lista de id's para formar el data set
for (i in id){
fileName <- c(directory, archivos[i])
##print(paste(fileName, collapse="/"))
rawData <- read.csv(paste(fileName, collapse="/"))
temp1 <- subset(rawData,!is.na(rawData[["sulfate"]]))
temp2 <- subset(temp1,!is.na(temp1[["nitrate"]]))
##print(nrow(temp2))
if (!exists("countVector")){
countVector <- nrow(temp2)
idVector <- i
}else{
countVector <- c( countVector, nrow(temp2) )
idVector <- c( idVector, i)
}
dfResult <- data.frame(idVector,countVector)
}
colnames(dfResult) <- c("id","nobs")
## ** RESULTADO **
dfResult
## Return a data frame of the form:
## id nobs
## 1 117
## 2 1041
## ...
## where 'id' is the monitor ID number and 'nobs' is the
## number of complete cases
} |
1acea0ea5d9d6335436448bdfa432a6dc56662e7 | 7e6f176f3bcdb16790d7b37f1df2e01ade36dff6 | /mrs_simulation.R | f40cbf99d581919b60fe14562e47e8cf5bf5ae54 | [] | no_license | ejvandenoord/trauma_prediction | a0cef1e9c785563aa28775521f3178c4df6e7b0d | f5f6d23dba654318a5915314200675cd422d8e24 | refs/heads/master | 2022-12-08T12:53:36.169862 | 2020-08-24T11:50:32 | 2020-08-24T11:50:32 | 289,912,168 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,135 | r | mrs_simulation.R |
rm(list=ls())
drive = "G:\\My Drive\\"
work_dir = paste0(drive,"SOP\\methylation\\gsms\\exposure\\papers\\charlie\\paper\\kfold_simulation\\")
source(paste0(work_dir,"functions_mrs_simulation.R"))
library(MASS)
library(glmnet)
set.seed(1)
n_sample = 500
n_pred = 1000
n_sims = 10000
n_folds = 10 # number of fold for EN analysis, "loo" for leave one out
alpha = 0 # alpha = 0 is ridge regression that will keep all variables in the mode and alpha is 1 is lasso select the most predictive sites (issue is that it may select different sites for different folds but you could run it once on all sample to get a single set but keep the cross validation as an estimate of well that set predicts).
family = "gaussian" # use "binomial" for logistic regression and "gaussian" for normal regression
n_effects = 0
r = 0.0 # size of the effects
if (n_pred<n_effects) stop("Cannot have more effects than predictors")
analysis_label = paste0("scale_nSims",n_sims,"_nfolds",n_folds,"_alpha",alpha)
for (i in 1:n_sims) { # i = 1
cat(paste0("Simulation: ", i, "\n"))
outcome = rnorm( n_sample )
sigma = diag(rep(1,n_pred),n_pred,n_pred)
if ( n_effects==0 ) {
pred_data = mvrnorm(n_sample,rep(0,n_pred),sigma)
} else {
pred_data = mvrnorm(n_sample,rep(0,n_pred),sigma)
pred_data[,1:n_effects] = r*outcome + sqrt(1-r^2)*pred_data[,1:n_effects]
}
row.names(pred_data) = paste0("sample_",1:n_sample)
if (i==1) results = run_regression(outcome,pred_data,n_folds,alpha,family) else
results = rbind(results,run_regression(outcome,pred_data,n_folds,alpha,family) )
}
summary( results )
t.test(results[,"estimate"], mu = 0, alternative = "greater")
write.csv( results,paste0(work_dir,"result_",analysis_label,".csv"),row.names=T,quote=F)
par(mar=c(6,6,2,2))
opar=par(ps=15)
hist( results[,"estimate"],freq = T, breaks = 50,cex=5, main="", xlab = "Predictive power" )
|
c5512fcc3f9a2c00e533a16f903ad2cb48b50c68 | 08313c529a4d1ffecb9f6463534a537d670d775c | /man/rev.comp.Rd | 8d55412783ee80064ff0e63ee07ab1a92491e1a8 | [] | no_license | emdann/hexamerModel | 5a95ac93852915f51cb4f11917919d9566586e00 | 5c6bf140d9841fa0dcf572b8acf789c9b82caaf0 | refs/heads/master | 2021-09-21T21:03:45.694694 | 2018-08-31T12:22:46 | 2018-08-31T12:22:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 532 | rd | rev.comp.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binding_model_functions.r
\name{rev.comp}
\alias{rev.comp}
\title{Reverse and/or complement sequence}
\usage{
\method{rev}{comp}(x, compl = TRUE, rev = TRUE)
}
\arguments{
\item{x}{string of DNA sequence}
\item{compl}{logical indicating wheather to take the complement of sequence x}
\item{rev}{logical indicating wheather to take the reverse of sequence x}
}
\value{
string of transformed sequence
}
\description{
Reverse and/or complement sequence
}
|
62faa87eab554bfe633ec5779f822acc0403f1e2 | 155949a1cfdc33d011fd7684d534cf041be71f7a | /Assignment Week 4 - Upload.R | 58e9082f7afcf8a065147715697d16af779b78c1 | [] | no_license | Nossk/Data-Science-Coursera | 7e7770a3c35363c943b3d84371498f7cefccf286 | 53769b2c0056db1bebc34dd45c2de7020bcbbeab | refs/heads/master | 2020-03-23T15:30:44.420228 | 2018-10-20T16:41:10 | 2018-10-20T16:41:10 | 141,753,304 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,277 | r | Assignment Week 4 - Upload.R |
outcome <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
head(outcome)
ncol(outcome)
nrow(outcome)
names(outcome)
library(data.table)
outcome[, 11] <- as.numeric(outcome[, 11])
hist(outcome[, 11])
best <- function(state, outcome) {
##Read outcome data
out_df <- data.table::fread('outcome-of-care-measures.csv')
outcome <- tolower(outcome)
chosen_state <- state
##Check that state and outcome are valid
if (!chosen_state %in% unique(out_df[["State"]])) {
stop('invalid state!')
}
if (!outcome %in% c("heart attack", "heart failure", "pneumonia")) {
stop('invalid outcome!')
}
setnames(out_df
, tolower(sapply(colnames(out_df), gsub, pattern = "^Hospital 30-Day Death \\(Mortality\\) Rates from ", replacement = "" ))
)
out_df <- out_df[state == chosen_state]
##Ruturn hospital name in that state with the lowest 30-day death rate
col_indices <- grep(paste0("hospital name|state|^",outcome), colnames(out_df))
out_df <- out_df[, .SD ,.SDcols = col_indices]
out_df[, outcome] <- out_df[, as.numeric(get(outcome))]
out_df <- out_df[complete.cases(out_df),]
out_df <- out_df[order(get(outcome), `hospital name`)]
return(out_df[, "hospital name"][1])
}
rankhospital <- function(state, outcome, num = "best") {
##Read outcome data
out_df <- data.table::fread('outcome-of-care-measures.csv')
outcome <- tolower(outcome)
chosen_state <- state
##Check that state and outcome are valid
if (!chosen_state %in% unique(out_df[["State"]])) {
stop('invalid state!')
}
if (!outcome %in% c("heart attack", "heart failure", "pneumonia")) {
stop('invalid outcome!')
}
setnames(out_df
, tolower(sapply(colnames(out_df), gsub, pattern = "^Hospital 30-Day Death \\(Mortality\\) Rates from ", replacement = "" ))
)
out_df <- out_df[state == chosen_state]
##Ruturn hospital name in that state with the given rank 30-day death rate
col_indices <- grep(paste0("hospital name|state|^",outcome), colnames(out_df))
out_df <- out_df[, .SD ,.SDcols = col_indices]
out_df[, outcome] <- out_df[, as.numeric(get(outcome))]
out_df <- out_df[complete.cases(out_df),]
out_df <- out_df[order(get(outcome), `hospital name`)]
out_df <- out_df[, .(`hospital name` = `hospital name`, state = state, rate = get(outcome), Rank = .I)]
if (num == "best"){
return(out_df[1,`hospital name`])
}
if (num == "worst"){
return(out_df[.N,`hospital name`])
}
return(out_df[num,`hospital name`])
}
rankall <- function(outcome, num = "best") {
##Read outcome data
out_df <- data.table::fread('outcome-of-care-measures.csv')
outcome <- tolower(outcome)
##Check that state and outcome are valid
if (!outcome %in% c("heart attack", "heart failure", "pneumonia")) {
stop('invalid outcome')
}
setnames(out_df
, tolower(sapply(colnames(out_df), gsub, pattern = "^Hospital 30-Day Death \\(Mortality\\) Rates from ", replacement = "" ))
)
col_indices <- grep(paste0("hospital name|state|^",outcome), colnames(out_df))
##For each state, find the hospital of the given rank
setnames(out_df
, tolower(sapply(colnames(out_df), gsub, pattern = "^Hospital 30-Day Death \\(Mortality\\) Rates from ", replacement = "" )))
col_indices <- grep(paste0("hospital name|state|^",outcome), colnames(out_df))
out_df <- out_df[, .SD ,.SDcols = col_indices]
out_df[, outcome] <- out_df[, as.numeric(get(outcome))]
#Return a dataframe with the hospital names and the (abbreviated) state name
if (num == "best"){
return(out_df[order(state, get(outcome), `hospital name`)
, .(hospital = head(`hospital name`, 1))
, by = state])
}
if (num == "worst"){
return(out_df[order(get(outcome), `hospital name`)
, .(hospital = tail(`hospital name`, 1))
, by = state])
}
return(out_df[order(state, get(outcome), `hospital name`)
, head(.SD,num)
, by = state, .SDcols = c("hospital name") ])
}
#Import library(data.table)
#Q1
best("SC", "heart attack")
#Q2
best("NY", "pneumonia")
#Q3
best("AK", "pneumonia")
#Q4
rankhospital("NC", "heart attack", "worst")
#Q5
rankhospital("WA", "heart attack", 7)
#Q6
rankhospital("TX", "pneumonia", 10)
#Q7
rankhospital("NY", "heart attack", 7)
#Q8
r <- rankall("heart attack", 4)
as.character(subset(r, state == "HI")$hospital)
#Q9
r <- rankall("pneumonia", "worst")
as.character(subset(r, state == "NJ")$hospital)
#Q10
r <- rankall("heart failure", 10)
as.character(subset(r, state == "NV")$hospital)
|
12c48c21a5ad8b41e1b899cd9e87266ad97a30af | 03f7b233c6f745b4dd654fa57c7414fc0e4fa023 | /2019-04-23/tidytuesday_anime.R | b8365dcb9813be82c57a11832d2d1dfc18b9181c | [] | no_license | metabdel/tidytuesday | c15bc11150a8352375b0615cb3c6b751f6a26fef | cf55d8b5cb277deb38e0095bc2902e77f6cd6e2d | refs/heads/master | 2020-08-29T11:19:56.217171 | 2019-09-16T17:11:37 | 2019-09-16T17:11:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,373 | r | tidytuesday_anime.R | library(tidyverse)
library(hrbrthemes)
library(lubridate)
theme_set(theme_ipsum_rc())
tidy_anime <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-04-23/tidy_anime.csv")
# Gantt chart of most 20 most famous anime
anime_data <- tidy_anime %>%
nest(-name) %>%
mutate(
score = data %>% map_dbl(~ mean(.$score, na.rm = TRUE))
) %>%
arrange(desc(score)) %>%
slice(1:20) %>%
unnest() %>%
drop_na(start_date, end_date) %>%
group_by(name) %>%
slice(1:1) %>%
ungroup() %>%
select(name, score, start_date, end_date)
anime_tidied <- tidy_anime %>%
mutate(
year = start_date %>% year
)
anime_tidied %>%
ggplot(aes(x = year, y = score, group = year)) +
geom_jitter(color = "#ececec") +
geom_boxplot(outlier.shape = NA, outlier.size = 0, coef = 0,
alpha = .9, fill = "#F5E1DA") +
labs(
title = "Anime Movies are getting better",
subtitle = "Since the 1960s, Anime Movies have continuously improved in terms of user ratings"
) +
theme(
text = element_text(family = "FreeSans"),
plot.title = element_text(family = "FreeSans"),
plot.subtitle = element_text(family = "FreeSans"),
axis.title.y = element_text(margin = margin(r = 10),
family = "FreeSans"),
axis.title.x = element_text(family = "FreeSans")
)
|
b308ac7a060f90614fc7fc8ffe601cc258c4b8b6 | 88e835c66a8f260bb856ebd59cc6a6671e7ef38e | /R/check_set_predictors.R | acbbe6576e776b993cf5fdaacc7c6dce47a1dd13 | [] | no_license | cran/brea | 67310f64b6db5f0440ac6d92e8b34aefa23cf80a | 4989155b3ed41fcd154f58f72d5015b6215d3fd8 | refs/heads/master | 2020-05-23T08:17:35.910449 | 2017-10-09T02:49:15 | 2017-10-09T02:49:15 | 70,241,022 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,672 | r | check_set_predictors.R |
# This function checks that the predictor matrix/dataframe x and the vector of
# maximum discretized covariate values K (if supplied) are valid, and then
# returns versions of x and K of type integer in a list.
check_set_predictors <- function(x,K) {
# Case 1: x is a matrix of positive integers ---------------------------------
if (is.matrix(x) && all_whole(x) && all(x > .5)) {
# convert x from floating points to integers if necessary:
if (!is.integer(x)) {
x <- round(x)
storage.mode(x) <- "integer"
}
# set K if needed, and otherwise verify K is a vector of integers:
if (is.null(K)) {
K <- apply(x,2,max)
} else if (!(is.vector(K) && length(K)==ncol(x) && all_whole(K))) {
stop("K must be NULL or a vector of whole numbers of length ncol(x)")
}
# convert K from floating points to integers if necessary:
if (!is.integer(K)) K <- as.integer(round(K))
# Case 2: x is a dataframe with integer or factor columns -------------------
} else if (is.data.frame(x)) {
# first make sure K is potentially valid:
if (!(is.null(K) || (is.vector(K) && length(K)==ncol(x) && all_whole(K)))) {
stop("K must be NULL or a vector of whole numbers of length ncol(x)")
}
# if K is a floating point vector, convert it to integer type:
if (!is.null(K) && !is.integer(K)) K <- as.integer(round(K))
# create variables to store new x and K values:
x.new <- matrix(0L,nrow(x),ncol(x))
K.new <- integer(ncol(x))
# loop over columns m of x:
for (m in seq_len(ncol(x))) {
# use shorter variable name for x[[m]] for convenience:
xm <- x[[m]]
# case i: the predictor is a factor:
if (is.factor(xm)) {
# store integer factor codes into the new integer matrix:
x.new[,m] <- as.integer(xm)
# calculate K[m] if K is null; otherwise use supplied value:
if (is.null(K)) {
K.new[m] <- nlevels(xm)
} else {
K.new[m] <- K[m]
}
# case ii: the predictor is a vector of positive integers:
} else if (is.vector(xm) && all_whole(xm) && all(xm > .5)) {
# convert xm from floating points to integers if necessary:
if (!is.integer(xm)) xm <- as.integer(round(xm))
# store the integer-type variable xm into the new integer matrix:
x.new[,m] <- xm
# calculate K[m] if K is null; otherwise use supplied value:
if (is.null(K)) {
K.new[m] <- max(xm)
} else {
K.new[m] <- K[m]
}
# otherwise the predictor was an invalid type:
} else stop("columns of x must be factors or positive integer vectors")
} # end loop over columns m of x
# replace x and K with their new values:
x <- x.new
K <- K.new
# otherwise x was of neither allowed type ------------------------------------
} else {
stop(paste("x must be a matrix of positive whole numbers",
"or a dataframe with whole number or factor columns"))
}
# verify K is valid and return x and K ---------------------------------------
# make sure there are not covariate values larger than corresponding K:
if (any(K < apply(x,2,max))) {
stop("K values cannot be smaller than corresponding x variable values")
}
# make sure each covariate has (potentially) more than one value:
if (any(K < 2L)) stop("at least one covariate has only one level")
# return the new x and K values:
list(x,K)
}
|
fa5a1febc059fa18fa6b4022216dbb9793b911eb | 7fb600847709ec1c7730c6e5ac418d6498191dc7 | /crested_myna/cleaning_ebird_data.R | 93c3be7727ea0e09704e2033c11c673686181049 | [
"MIT"
] | permissive | federiva/Monkeys-Working | d5ab2fbb888b10b8010bd5b2dbade54128ec5f7d | ba7e995e26ab321c67671772cf2e1635c18d0ec8 | refs/heads/master | 2021-07-17T08:21:21.331390 | 2021-07-12T18:30:40 | 2021-07-12T18:30:40 | 136,230,571 | 0 | 2 | MIT | 2021-07-12T18:30:41 | 2018-06-05T20:22:50 | R | UTF-8 | R | false | false | 3,735 | r | cleaning_ebird_data.R | # Data from ebird is cleaned and tidied
# Loading packages
library(auk)
library(dplyr)
library(lubridate)
# Reading the ebird data using auk data
acdata <- read_ebd("./ebd_cremyn_relMay-2018.txt")
head(acdata)
summary(acdata)
# Exploring data and checking if are there missing data
# Is all the data about the Crested Myna or are there any other species?
unique(acdata$common_name) # "Crested Myna"
unique(acdata$scientific_name) # "Acridotheres cristatellus"
# Are there any missing data of date, latitude, longitude or country?
any(is.na(acdata$observation_date))
any(is.na(acdata$latitude))
any(is.na(acdata$longitude))
any(is.na(acdata$longitude))
# Observation date as Timestamp
acdata$observation_date <- as.Date(acdata$observation_date)
# Adding year column
acdata$year <- year(acdata$observation_date)
# Time observations started as DateTime
acdata <- acdata %>%
mutate(., starting_time = as.numeric(as_datetime(paste(observation_date, time_observations_started, sep = " "))))
# Observations count as numeric
acdata$observation_count <- as.numeric(acdata$observation_count)
# Which countries do the observations belong to?
acdata$country <- as.factor(acdata$country)
unique(acdata$country)
unique(acdata$country_code)
# Countries of America
countries_america <- c("Argentina", "Canada", "United States", "Uruguay")
# Filtering data for American Countries
amdata <- acdata %>%
filter(., country %in% countries_america)
unique(amdata$country)
nrow(amdata) # 1747
summary(amdata)
# Are there observations registered in e-bird as the same sampling event?
observers_group <- as.factor(amdata$group_identifier)
observers_group <- observers_group[!is.na(observers_group)]
length(observers_group) == length(unique(observers_group)) # TRUE
table(observers_group)
############ Looking for records that are probably the same, but they are registered as different observations
# Are there any observations from the same day and for the same country?
amdata_more_samplings <- amdata %>%
group_by(., country) %>%
add_count(., observation_date) %>%
rename(sampling_frequency = n) %>%
filter(., sampling_frequency > 1)
# If they were on the same day, were they in the same place?
amdata_same_place <- amdata_more_samplings %>%
group_by(., country, observation_date) %>%
mutate(., latlon=paste(longitude, latitude, sep = "")) %>%
add_count(., latlon, name = 'latlon_count') %>%
filter(., latlon_count>1)
# Where did observations started?
amdata_same_sampling_time <- amdata_same_place %>%
group_by(., country, observation_date, latlon) %>%
arrange(., starting_time) %>%
mutate(., duration_first_sampling = first(duration_minutes)) %>%
mutate(., diff_time = abs(c(NA,diff(starting_time)))/60)
amdata_same_sampling_time <- amdata_same_sampling_time %>%
mutate(., same_sampling_event=case_when(diff_time<duration_first_sampling ~ TRUE,
diff_time >= duration_first_sampling ~ FALSE,
diff_time == NA ~ NA))
amdata_same_sampling_time <- amdata_same_sampling_time %>%
filter(., same_sampling_event==TRUE)
# Finally, the ids from duplicated observations were obtained
id_duplicated_obs <- amdata_same_sampling_time$global_unique_identifier
# The rows with those ids are removed from the table
cleaned_amdata <- amdata %>%
filter(., !global_unique_identifier %in% id_duplicated_obs)
# Only some colums are kept
dfcleaned_amdata <- as.data.frame(cleaned_amdata)
colnames(dfcleaned_amdata)
dfamdata_main_colums <- dfcleaned_amdata[ ,c("country","latitude", "longitude","year")]
head(dfamdata_main_colums)
# The final table can be writen as .csv
write.csv(dfamdata_main_colums, "./crested_myna_records.csv") |
33f85adfbb29bb76475b29ee4cce7dd24dec833b | 6cfac8e9f81c6de25403159d85211f7b4e5a41ec | /activity_EXPLORE.R | 782240031342ed522ef5e35ad04bcb86e62493e0 | [] | no_license | StatsRhian/active | 530eec58143859aef486471b326f296710613b6b | a53005fc87003ad235fe02aee90160fa90419ba2 | refs/heads/master | 2021-09-18T17:25:26.404914 | 2018-04-19T08:22:47 | 2018-04-19T08:22:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,762 | r | activity_EXPLORE.R |
source("activity_process.R")
#log_master - the original data import
#log - the original data with blank rows removed and some other cleaning
#log_B, log_R, log_F - the data for types B,F,R with appropriate variables
#log_B_split, log_F_split - B and F data with week data split up over following week
#log_new - R combined with B and F splits. 9 variables so some running specific ones lost (paces, terrain, hs, strides)
#totals - Daily totals for each of R,B,F (Time, Distance, Ascent)
#Other queries
print(log_new, n=15)
totals
log_R
names(log_R)
names(log_new)
log_new %>% group_by(Type, Week_data) %>% tally(Distance)
shoe_summary <- log_R %>% group_by(`Sub-type`) %>%
summarise_at(funs(sum), .vars = c("Time", "Distance", "Ascent")) %>%
arrange(desc(Distance))
shoe_summary
# Five longest (distance) R, B and F activities.
log %>% filter(Type %in% c("R", "B", "F")) %>%
filter(Week_total==0) %>%
group_by(Type) %>%
select(c(2:8, 16)) %>%
top_n(n=5, wt=Distance) %>%
arrange(Type, desc(Distance))
log %>% filter(Type == "R") %>%
filter(Week_total==0) %>%
select(c(2:8, 16)) %>%
top_n(n=15, wt=Distance) %>%
arrange(Type, desc(Distance))
log %>% filter(Type == "R") %>%
filter(Week_total==0) %>%
group_by(year(Date), long = Distance >=20) %>%
select(c(2:8, 16)) %>%
count()
eddington(log_new, "R")
eddington(log_new, "R", years = c(2013, 2014))
eddington(log_new, "R", years = 2017)
eddington(log_new, "R", "Ascent", 20)
eddington(log_new, "B")
eddington(totals, "R", years = 2017)
eddington(log_new, "B", "Ascent", 20)
library(zoo)
library(tidyverse)
temp <- totals %>% filter(Type=="R") %>% transmute(temp=rollsum(Distance, 28, alig="right", fill=NA))
plot(temp$temp, typ='l')
plot(tail(temp$temp, 730), typ='l')
|
0da1559cacb5bc30bb0cf99bc0389b254dcddc65 | 597ff8e61470bf66cb8958285cc2b8ffa37b015f | /Christmas tree/christmas_tree.r | f95a4a44126d88fb143f9e2255c0bcc1a20c1f84 | [] | no_license | r-kasidit/random-projects-in-R | d7aa0cca095f4c5de9c799b71999f77dd31d7e6d | 0ef6437f306abf3d1ffd9c2650323115a6821533 | refs/heads/master | 2023-08-15T07:11:58.302204 | 2023-08-06T06:45:23 | 2023-08-06T06:45:23 | 106,931,244 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,783 | r | christmas_tree.r | library(tidyverse)
data = data.frame(x=seq(-5, 5), y=0:10)
snow = data.frame(x=runif(100,-5,5), y=runif(100,0,10))
layer = 20
triangles = data.frame(
group = rep(seq(1, layer), 3),
pointx= c(seq(min(data$x), mean(data$x)-0.5, length.out=layer),
rep(0,layer),
seq(max(data$x), mean(data$x+0.5), length.out=layer)),
pointy= c(seq(min(data$y) +2, max(data$y)-2, length.out=layer),
seq(min(data$y) +3,max(data$y)-1, length.out=layer+1)[-1],
seq(min(data$y) +2, max(data$y)-2, length.out=layer))
)
pres_xposition = rnorm(10,0,2)
pres_width = runif(10,0.3,0.7)
pres_height = runif(10,0,1)
present = data.frame(
group = seq(1,10),
x_min= pres_xposition-pres_width,
x_max= pres_xposition+pres_width,
y_min= rep(0,10),
y_max= pres_height,
x_min_rib = pres_xposition - pres_width/4,
x_max_rib = pres_xposition + pres_width/4
)
x = NULL
list_value = list(NULL)
list_layer_x = list(NULL)
list_layer_y= list(NULL)
for (i in 1:layer) {
j = (layer:1)[1]
x = runif(j,
triangles[which(triangles$group==i), 2][1],
triangles[which(triangles$group==i), 2][3])
list_layer_x[[length(list_layer_x) +1]] <- rep(i,j)
list_layer_y[[length(list_layer_y) +1]] <- rep(triangles[which(triangles$group==i), 3][1],j)
list_value[[length(list_value) +1]] <- x
}
ornaments = data.frame(
layer = unlist(list_layer_x),
x_position = unlist(list_value),
y_position = unlist(list_layer_y),
size = rpois (length(unlist(list_layer_y)),0.5)+1,
color = sample(c('red', 'blue', 'yellow'), length(unlist(list_layer_y)), replace = T)
)
rand_drop = sample (1:dim(ornaments)[1], round(dim(ornaments)[1]/2, digits = 0), replace = F)
ggplot() +
geom_rect(data=data, aes(xmin=mean(x)-0.75, xmax=mean(x) +0.75, ymin=min (y), ymax=max (y)-7), fill='tan3') +
annotate('point', x = 0, y = max(triangles$pointy), shape=24, fill='yellow',size=8, color='yellow') +
annotate('point', x = 0, y = max(triangles$pointy), shape=25, fill='yellow',size=8, color='yellow') +
geom_polygon(data=triangles, aes(x=pointx, y=pointy, group=group), fill='green4')+
geom_jitter(data=ornaments[rand_drop, ], aes(x=x_position, y=y_position, size=size, color=color))+
scale_color_brewer(palette = 'Y1Gn')+
geom_rect(data=present, aes(xmin=x_min, xmax=x_max, ymin=y_min, ymax=y_max, fill=factor (group)))+
geom_rect(data=present, aes(xmin=x_min_rib, xmax=x_max_rib, ymin=y_min, ymax=y_max), fill='yellow') +
geom_point(data=snow, aes(x=x, y=y), shape='*', color="white", size=6)+
theme_void()+
theme(legend.position= 'none',
panel.background = element_rect(fill="#BFD5E3"))
#ggsave('cs_tree.jpeg', dpi=300, width=5, height = 5)
|
e48bdaa5c863c04d31fb557be7d872e7a2157807 | c4925b4e073c2d95b89871ec75430e77ee8aa716 | /ncvreg/R/summary-ncvsurv.R | fb7ad8a47985a5637967e3b650ca8eb5239a8a68 | [] | no_license | kof900/src | e04c5753ec0c26c54d3df0da77280cd1b124f6ce | fd4f7ab9860274155af53d1af2eb93a0fd95828e | refs/heads/master | 2020-03-13T06:44:40.824139 | 2018-04-25T13:32:57 | 2018-04-25T13:32:57 | 131,010,875 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 578 | r | summary-ncvsurv.R | summary.ncvsurv <- function(object, lambda, which, ...) {
nvars <- predict(object, type="nvars", lambda=lambda, which=which)
if (length(nvars) > 1) stop("You must specify a single model (i.e., a single value of lambda)")
if (missing(lambda)) lambda <- object$lambda[which]
model <- "Cox"
val <- list(penalty=object$penalty, model=model, n=object$n, p=nrow(object$beta), lambda=lambda, nvars=nvars)
if ("X" %in% names(object)) {
mFDR <- mfdr(object)
f <- approxfun(object$lambda, mFDR$EF)
val$EF = f(lambda)
}
structure(val, class="summary.ncvreg")
}
|
f7ea450464334f30f9cbc60fee3ba0f83deb1e61 | 7020e18e9ff3b2aa8bed306a9866017abd327b05 | /tests/testthat/test_08_list_variants.R | 4537cfa0c8db6c61993630e02ba564629ef41637 | [
"MIT"
] | permissive | fasterius/seqCAT | 2c19f56128cc2abbc11236222608e96139a9dd89 | ccc5fe015a556fae93ea7d83f7d155a269817928 | refs/heads/master | 2022-02-11T23:52:36.690083 | 2022-02-08T06:37:12 | 2022-02-08T06:37:12 | 102,448,610 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,217 | r | test_08_list_variants.R | library("seqCAT")
context("Listing known variants in SNV profiles")
# Load test profile
data(test_profile_1)
data(test_profile_2)
# Create test variants
test_variants <- data.frame(chr = c(1, 1),
pos = c(16229, 16298),
gene = c("DDX11L1", "DDX11L1"),
stringsAsFactors = FALSE)
# List test variants
profiles <- list(test_profile_1, test_profile_2)
variants <- list_variants(profiles, test_variants)
# Tests
test_that("errors for malformed input data are raised correctly", {
expect_error(list_variants(test_profile_1, "a string"), "not a dataframe")
expect_error(list_variants(test_profile_1, data.frame(chr = 1)), "'pos'")
expect_error(list_variants(test_profile_1, data.frame(pos = 1)), "'chr'")
})
test_that("known variants are listed correctly", {
expect_equal(nrow(variants), 2)
expect_equal(variants[variants$sample1 == "C/A", "gene"], "DDX11L1")
expect_equal(variants[variants$sample1 == "C/T", "gene"], "DDX11L1")
expect_equal(variants[variants$sample2 == "A/A", "gene"], "DDX11L1")
expect_equal(variants[variants$sample2 == 0, "gene"], "DDX11L1")
})
|
b55e18f2e16d8582b30da41fc3595c10cb1eeeea | 78ee5195584c81eb3b47ea1892db706d4b5e82da | /tests/fixBugs.R | e3889c00202eb0a0aae2bf6021a91593ee6dfa9c | [] | no_license | alfcrisci/bmdModeling | 80baa7f0045b1d35a904ca53fdefff131ebbdc75 | 43d2aa3b74011bca9960b79d4f49c8dfc9d2b1d9 | refs/heads/master | 2020-05-28T02:23:16.979389 | 2019-05-27T13:57:21 | 2019-05-27T13:57:21 | 188,852,970 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,450 | r | fixBugs.R | # Project: bmd_git
#
# Author: mvarewyck
###############################################################################
# Compare results with original f.proast()
#source("/home/mvarewyck/git/bmd/exploringProast/fFunctionsProast.R")
#dataDir <- "~/git/bmd/proastUI/inst/extdata"
#load(file.path(dataDir, "das1.rda"))
#f.proast(das1)
if (FALSE) {
dataDir <- "/home/mvarewyck/Documents/bmd/Background"
# Data 1 #
rawData <- read.table(file.path(dataDir, "Marco.txt"), header = TRUE)
proastData <- list(info = "testData",
nvar = ncol(rawData),
varnames = colnames(rawData),
data = rawData,
dtype = rep(0, ncol(rawData)))
shinyInput <- list(dtype = 4,
xans = 1,
yans = 2,
nans = 3,
CES = 0.1,
ces.ans = 3,
cont = FALSE)
savedResults <- fitSingleModel(data = proastData, shinyInput = shinyInput,
selectedModel = 25, track = TRUE)
bindModelResults(savedResults = savedResults)
# model parameterNames npar loglik aic bmd bmdl bmdu converged
# 1 Probit 2 -3.14 10.28 74.5686 57.91975 85.49365 TRUE
# Expected result (Proast 62.10)
# npar = 2.00, loglik = -3.14, AIC = 10.28, accepted = yes, BMDL = 57.90, BMDU = 85.50, BMD = 74.60
# Data 2 #
rawData <- read.table(file.path(dataDir, "examplequantal.txt"), header = TRUE)
proastData <- list(info = "testData",
nvar = ncol(rawData),
varnames = colnames(rawData),
data = rawData,
dtype = rep(0, ncol(rawData)))
shinyInput <- list(dtype = 4,
xans = 1,
yans = 3,
nans = 2,
CES = 0.1,
ces.ans = 3,
cont = FALSE)
savedResults <- fitSingleModel(data = proastData, shinyInput = shinyInput,
selectedModel = 25, track = TRUE)
bindModelResults(savedResults = savedResults)
# model parameterNames npar loglik aic bmd bmdl bmdu
# (Intercept) Probit 2 -91.14 186.28 4.003365 3.080731 6.32023
# converged
# (Intercept) TRUE
# loglik value is ok (-97.54) at the end of f.mm4.cat()
# but value changes when estimating CI (also with proast 62.10)
# Expected result (Proast 62.10) when not calculating CI
# npar = 2.00, loglik = -97.54, AIC = 199.08
# Expected result (Proast 62.10) when calculating CI
# npar = 2.00, loglik = -91.14, AIC = ??, BMDL = 3.0807, BMDU = 6.3202, BMD = 4.0034
} |
dad5fecfcb109ccc3f55e54a83057bbef0971199 | eda1e1a1edd543e3dd0605e1d74e19b23a1ff31c | /plot4.R | 3143d796143efbb77d229fc137a851be458f4703 | [] | no_license | nabi-a/ExData_Plotting1 | adfc491e050018bd4d11a4abffe5677255d7b8b7 | 1916af36b2b14cb271e41d853158f6c0a388e660 | refs/heads/master | 2020-03-09T04:27:48.366234 | 2018-04-08T15:00:32 | 2018-04-08T15:00:32 | 128,588,330 | 0 | 0 | null | 2018-04-08T02:25:07 | 2018-04-08T02:25:07 | null | UTF-8 | R | false | false | 1,895 | r | plot4.R | # Reading, naming and subsetting power consumption data
power_data <- read.table("household_power_consumption.txt",skip=1,sep=";")
names(power_data) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
power_subdata <- subset(power_data,power_data$Date=="1/2/2007" | power_data$Date =="2/2/2007")
# Transforming the Date and Time vars from characters into objects of type Date and POSIXlt respectively
power_subdata$Date <- as.Date(power_subdata$Date, format="%d/%m/%Y")
power_subdata$Time <- strptime(power_subdata$Time, format="%H:%M:%S")
power_subdata[1:1440,"Time"] <- format(power_subdata[1:1440,"Time"],"2007-02-01 %H:%M:%S")
power_subdata[1441:2880,"Time"] <- format(power_subdata[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
# initiating a composite plot with many graphs
par(mfrow=c(2,2))
# calling the basic plot function that calls different plot functions to build the 4 plots that form the graph
with(power_subdata,{
plot(power_subdata$Time,as.numeric(as.character(power_subdata$Global_active_power)),type="l", xlab="",ylab="Global Active Power")
plot(power_subdata$Time,as.numeric(as.character(power_subdata$Voltage)), type="l",xlab="datetime",ylab="Voltage")
plot(power_subdata$Time,power_subdata$Sub_metering_1,type="n",xlab="",ylab="Energy sub metering")
with(power_subdata,lines(Time,as.numeric(as.character(Sub_metering_1))))
with(power_subdata,lines(Time,as.numeric(as.character(Sub_metering_2)),col="red"))
with(power_subdata,lines(Time,as.numeric(as.character(Sub_metering_3)),col="blue"))
legend("topright", lty=1, col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), cex = 0.6)
plot(power_subdata$Time,as.numeric(as.character(power_subdata$Global_reactive_power)),type="l",xlab="datetime",ylab="Global_reactive_power")
})
|
139fb0cd0c50c9a32f6bb7a49266048435d5996b | e0ef38471f4df350d7f2507be45ef384501b8993 | /man/ms_list_item.Rd | f3bd07d86f78a19956f72a2eacadc6f87e8ba31e | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | amane49/Microsoft365R | a98c8ac6089470394129fc47fe772432223c414b | 7eb1515f11f2b9157cdd42dddd38cf8f88a011c8 | refs/heads/master | 2023-06-16T09:17:40.173598 | 2021-07-12T07:31:56 | 2021-07-12T07:31:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,198 | rd | ms_list_item.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ms_list_item.R
\docType{class}
\name{ms_list_item}
\alias{ms_list_item}
\title{SharePoint list item}
\format{
An R6 object of class \code{ms_list_item}, inheriting from \code{ms_object}.
}
\description{
Class representing an item in a SharePoint list.
}
\section{Fields}{
\itemize{
\item \code{token}: The token used to authenticate with the Graph host.
\item \code{tenant}: The Azure Active Directory tenant for the parent drive.
\item \code{type}: always "drive item" for a drive item object.
\item \code{properties}: The item properties (data and metadata). This is a list; the item data can be found in the \code{fields} component.
}
}
\section{Methods}{
\itemize{
\item \code{new(...)}: Initialize a new object. Do not call this directly; see 'Initialization' below.
\item \code{delete(confirm=TRUE)}: Delete this item. By default, ask for confirmation first.
\item \code{update(...)}: Update the item's properties (metadata) in Microsoft Graph. To update the list \emph{data}, update the \code{fields} property. See the examples below.
\item \code{do_operation(...)}: Carry out an arbitrary operation on the item.
\item \code{sync_fields()}: Synchronise the R object with the item data and metadata in Microsoft Graph.
}
}
\section{Initialization}{
Creating new objects of this class should be done via the \code{get_item} method of the \code{\link{ms_list}} class. Calling the \code{new()} method for this class only constructs the R object; it does not call the Microsoft Graph API to retrieve or create the actual item.
}
\examples{
\dontrun{
site <- get_sharepoint_site("My site")
lst <- site$get_list("mylist")
lst_items <- lst$list_items(as_data_frame=FALSE)
item <- lst_items[[1]]
item$update(fields=list(firstname="Mary"))
# item data (plus some metadata mixed in)
item$properties$fields
item$delete()
}
}
\seealso{
\code{\link{ms_graph}}, \code{\link{ms_site}}, \code{\link{ms_list}}
\href{https://docs.microsoft.com/en-us/graph/overview}{Microsoft Graph overview},
\href{https://docs.microsoft.com/en-us/graph/api/resources/sharepoint?view=graph-rest-1.0}{SharePoint sites API reference}
}
|
f793e926d35aeefbc4b37c85a8f0c782a3fe5dcf | 6204c6aa86599740768bd066e11608ddf3c58e84 | /R/rdcc-likelihoods.R | 4feab05fc5f089a9988374affa3dc0bcb401ec3e | [] | no_license | cran/rmgarch | 08e9048e7c79920dd783086466d3cfc1770d6df7 | 2699f032d3811d4f539deb90aac589f5823ebdb4 | refs/heads/master | 2023-03-04T19:20:23.957480 | 2022-02-04T23:30:06 | 2022-02-04T23:30:06 | 17,699,257 | 12 | 11 | null | 2023-02-24T10:26:57 | 2014-03-13T06:08:29 | R | UTF-8 | R | false | false | 23,098 | r | rdcc-likelihoods.R | #################################################################################
##
## R package rmgarch by Alexios Galanos Copyright (C) 2008-2022.
## This file is part of the R package rmgarch.
##
## The R package rmgarch is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## The R package rmgarch is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
#################################################################################
normal.dccLLH1 = function(pars, arglist)
{
# prepare inputs
# rejoin fixed and pars
mgarchenv = arglist$mgarchenv
model = arglist$model
modelinc = model$modelinc
estidx = arglist$estidx
idx = model$pidx
ipars = arglist$ipars
ipars[estidx, 1] = pars
trace = arglist$trace
m = arglist$m
mx = model$maxdccOrder
Qbar = arglist$Qbar
Nbar = arglist$Nbar
stdres = arglist$stdresid
astdres = arglist$astdresid
stdres = rbind( matrix(0, nrow = mx, ncol = m), stdres )
astdres = rbind( matrix(0, nrow = mx, ncol = m), astdres )
fit.control = arglist$fit.control
H = arglist$H
sumdcc = sum(ipars[idx["dcca",1]:idx["dcca",2],1])+sum(ipars[idx["dccb",1]:idx["dccb",2],1])
sumdccg = sum(ipars[idx["dccg",1]:idx["dccg",2],1])
if( fit.control$stationarity ){
if(modelinc[5]>0){
persist = .adcccon(pars, arglist)
} else{
persist = .dcccon(pars, arglist)
}
if( !is.na(persist) && persist >= 1 )
return(llh = get("rmgarch_llh", mgarchenv) + 0.1*(abs(get("rmgarch_llh", mgarchenv))))
}
res = .Call("dccnormC1", model = as.integer(modelinc), pars = as.numeric(ipars[,1]),
idx = as.integer(idx[,1]-1), Qbar = Qbar, Nbar = Nbar, Z = stdres,
N = astdres, epars = c(sumdcc, sumdccg, mx), PACKAGE = "rmgarch")
llh = res[[3]]
if(is.finite(llh) | !is.na(llh) | !is.nan(llh)){
assign("rmgarch_llh", llh, envir = mgarchenv)
} else {
llh = (get("rmgarch_llh", mgarchenv) + 0.1*(abs(get("rmgarch_llh", mgarchenv))))
}
return(llh)
}
normal.dccLLH2 = function(pars, arglist)
{
# prepare inputs
# rejoin fixed and pars
mgarchenv = arglist$mgarchenv
model = arglist$model
umodel = arglist$umodel
modelinc = model$modelinc
idx = model$pidx
mpars = arglist$mpars
ipars = arglist$ipars
m = arglist$m
estidx = arglist$estidx
eidx = arglist$eidx
midx = arglist$midx
data = arglist$data
n.start = model$modeldata$n.start
# assign the pars to the matrix pars (used in the GARCH filtering)
mpars[which(eidx==1, arr.ind = TRUE)] = pars
# assign the pars to the ipars (used in the DCC diltering)
ipars[estidx,1] = mpars[which(eidx[,m+1]==1),m+1]
trace = arglist$trace
T = arglist$T
Qbar = arglist$Qbar
Nbar = arglist$Nbar
# the dcc order
mx = model$maxdccOrder
m = dim(data)[2]
H = matrix(0, nrow = T, ncol = m)
resids = matrix(0, nrow = T, ncol = m)
# simulate new H with which to standardize the dataset
mspec = .makemultispec(umodel$modelinc, umodel$modeldesc$vmodel, umodel$modeldesc$vsubmodel,
umodel$modeldata$mexdata, umodel$modeldata$vexdata, umodel$start.pars,
umodel$fixed.pars, NULL)
for(i in 1:m){
specx = mspec@spec[[i]]
setfixed(specx) = as.list(mpars[which(midx[,i]==1), i])
flt = ugarchfilter(spec = specx, data = xts(data[,i], arglist$index[1:nrow(data)]), out.sample = n.start, realizedVol = arglist$realizedVol[1:nrow(data),i])
H[, i] = sigma(flt)
resids[,i] = residuals(flt)
}
stdres = resids/H
T = dim(stdres)[1]
Qbar = cov(stdres)
if(modelinc[5]>0){
Ibar = .asymI(stdres)
astdres = Ibar*stdres
Nbar = cov(astdres)
} else{
Ibar = .asymI(stdres)
# zero what is not needed
astdres = Ibar*stdres*0
Nbar = matrix(0, m, m)
}
#if(modelinc[5]>0){
# ipars[idx["dcca",1]:idx["dccg",2],1] = ipars[idx["dcca",1]:idx["dccg",2],1]^2
#}
sumdcc = sum(ipars[idx["dcca",1]:idx["dcca",2],1])+sum(ipars[idx["dccb",1]:idx["dccb",2],1])
sumdccg = sum(ipars[idx["dccg",1]:idx["dccg",2],1])
stdres = rbind( matrix(1, nrow = mx, ncol = m), stdres )
astdres = rbind( matrix(1, nrow = mx, ncol = m), astdres )
H = rbind( matrix(0, nrow = mx, ncol = m), H )
res = .Call("dccnormC2", model = as.integer(modelinc), pars = as.numeric(ipars[,1]),
idx = as.integer(idx[,1]-1), Qbar = Qbar, Nbar = Nbar, H = H, Z = stdres,
N = astdres, epars = c(sumdcc, sumdccg, mx),
PACKAGE = "rmgarch")
Qtout = res[[1]]
likelihoods = res[[2]]
llh = res[[3]]
Rtout = res[[4]]
Qtout = Qtout[(mx+1):(T+mx)]
Rtout = Rtout[(mx+1):(T+mx)]
likelihoods = likelihoods[(mx+1):(T+mx)]
if(is.finite(llh) | !is.na(llh) | !is.nan(llh)){
assign("rmgarch_llh", llh, envir = mgarchenv)
} else {
llh = (get("rmgarch_llh", mgarchenv) + 0.1*(abs(get("rmgarch_llh", mgarchenv))))
}
ans = switch(arglist$returnType,
lik = likelihoods,
llh = llh,
all = list(lik = likelihoods, llh = llh, Rt = Rtout, Qt = Qtout))
return(ans)
}
normalfilter.dccLLH2 = function(pars, arglist)
{
# prepare inputs
# rejoin fixed and pars
mgarchenv = arglist$mgarchenv
model = arglist$model
umodel = arglist$umodel
modelinc = model$modelinc
idx = model$pidx
mpars = arglist$mpars
ipars = arglist$ipars
m = arglist$m
estidx = arglist$estidx
eidx = arglist$eidx
midx = arglist$midx
data = arglist$data
n.old = arglist$n.old
dcc.old = arglist$dcc.old
n.start = model$modeldata$n.start
# assign the pars to the matrix pars (used in the GARCH filtering)
mpars[which(eidx==1, arr.ind = TRUE)] = pars
# assign the pars to the ipars (used in the DCC diltering)
ipars[estidx,1] = mpars[which(eidx[,m+1]==1),m+1]
trace = arglist$trace
T = arglist$T
Qbar = arglist$Qbar
Nbar = arglist$Nbar
# the dcc order
mx = model$maxdccOrder
m = dim(data)[2]
H = matrix(0, nrow = T, ncol = m)
resids = matrix(0, nrow = T, ncol = m)
# simulate new H with which to standardize the dataset
mspec = .makemultispec(umodel$modelinc, umodel$modeldesc$vmodel, umodel$modeldesc$vsubmodel,
umodel$modeldata$mexdata, umodel$modeldata$vexdata, umodel$start.pars,
umodel$fixed.pars, NULL)
for(i in 1:m){
specx = mspec@spec[[i]]
setfixed(specx) = as.list(mpars[which(midx[,i]==1), i])
flt = ugarchfilter(spec = specx, data =xts(data[,i], arglist$index[1:nrow(data)]), out.sample = n.start, n.old = n.old, realizedVol = arglist$realizedVol[1:nrow(data),i])
H[, i] = sigma(flt)
resids[,i] = residuals(flt)
}
stdres = resids/H
T = dim(stdres)[1]
Qbar = cov(stdres[1:dcc.old, ])
if(modelinc[5]>0){
Ibar = .asymI(stdres)
astdres = Ibar*stdres
Nbar = cov(astdres[1:dcc.old, ])
} else{
Ibar = .asymI(stdres)
# zero what is not needed
astdres = Ibar*stdres*0
Nbar = matrix(0, m, m)
}
#if(modelinc[5]>0){
# ipars[idx["dcca",1]:idx["dccg",2],1] = ipars[idx["dcca",1]:idx["dccg",2],1]^2
#}
sumdcc = sum(ipars[idx["dcca",1]:idx["dcca",2],1])+sum(ipars[idx["dccb",1]:idx["dccb",2],1])
sumdccg = sum(ipars[idx["dccg",1]:idx["dccg",2],1])
stdres = rbind( matrix(1, nrow = mx, ncol = m), stdres )
astdres = rbind( matrix(1, nrow = mx, ncol = m), astdres )
H = rbind( matrix(0, nrow = mx, ncol = m), H )
res = .Call("dccnormC2", model = as.integer(modelinc), pars = as.numeric(ipars[,1]),
idx = as.integer(idx[,1]-1), Qbar = Qbar, Nbar = Nbar, H = H, Z = stdres,
N = astdres, epars = c(sumdcc, sumdccg, mx),
PACKAGE = "rmgarch")
Qtout = res[[1]]
likelihoods = res[[2]]
llh = res[[3]]
Rtout = res[[4]]
Qtout = Qtout[(mx+1):(T+mx)]
Rtout = Rtout[(mx+1):(T+mx)]
likelihoods = likelihoods[(mx+1):(T+mx)]
if(is.finite(llh) | !is.na(llh) | !is.nan(llh)){
assign("rmgarch_llh", llh, envir = mgarchenv)
} else {
llh = (get("rmgarch_llh", mgarchenv) + 0.1*(abs(get("rmgarch_llh", mgarchenv))))
}
ans = switch(arglist$returnType,
lik = likelihoods,
llh = llh,
all = list(lik = likelihoods, llh = llh, Rt = Rtout, Qt = Qtout))
return(ans)
}
student.dccLLH1 = function(pars, arglist)
{
# prepare inputs
# rejoin fixed and pars
mgarchenv = arglist$mgarchenv
model = arglist$model
modelinc = model$modelinc
estidx = arglist$estidx
idx = model$pidx
ipars = arglist$ipars
ipars[estidx, 1] = pars
trace = arglist$trace
T = arglist$T
m = arglist$m
mx = model$maxdccOrder
Qbar = arglist$Qbar
Nbar = arglist$Nbar
stdres = arglist$stdresid
astdres = arglist$astdresid
fit.control = arglist$fit.control
H = arglist$H
sumdcc = sum(ipars[idx["dcca",1]:idx["dcca",2],1])+sum(ipars[idx["dccb",1]:idx["dccb",2],1])
sumdccg = sum(ipars[idx["dccg",1]:idx["dccg",2],1])
stdres = rbind( matrix(0, nrow = mx, ncol = m), stdres )
astdres = rbind( matrix(0, nrow = mx, ncol = m), astdres )
if( fit.control$stationarity ){
if(modelinc[5]>0){
persist = .adcccon(pars, arglist)
} else{
persist = .dcccon(pars, arglist)
}
if( !is.na(persist) && persist >= 1 )
return(llh = get("rmgarch_llh", mgarchenv) + 0.1*(abs(get("rmgarch_llh", mgarchenv))))
}
res = .Call("dccstudentC1", model = as.integer(modelinc), pars = as.numeric(ipars[,1]),
idx = as.integer(idx[,1]-1), Qbar = Qbar, Nbar = Nbar, Z = stdres,
N = astdres, epars = c(sumdcc, sumdccg, mx), PACKAGE = "rmgarch")
llh = res[[3]]
if(is.finite(llh) | !is.na(llh) | !is.nan(llh)){
assign("rmgarch_llh", llh, envir = mgarchenv)
} else {
llh = (get("rmgarch_llh", mgarchenv) + 0.1*(abs(get("rmgarch_llh", mgarchenv))))
}
return(llh)
}
student.dccLLH2 = function(pars, arglist)
{
# prepare inputs
# rejoin fixed and pars
mgarchenv = arglist$mgarchenv
model = arglist$model
umodel = arglist$umodel
modelinc = model$modelinc
idx = model$pidx
mpars = arglist$mpars
ipars = arglist$ipars
m = arglist$m
estidx = arglist$estidx
eidx = arglist$eidx
midx = arglist$midx
data = arglist$data
n.start = model$modeldata$n.start
# assign the pars to the matrix pars (used in the GARCH filtering)
mpars[which(eidx==1, arr.ind = TRUE)] = pars
# assign the pars to the ipars (used in the DCC diltering)
ipars[estidx,1] = mpars[which(eidx[,m+1]==1),m+1]
trace = arglist$trace
T = arglist$T
Qbar = arglist$Qbar
Nbar = arglist$Nbar
mx = model$maxdccOrder
m = dim(data)[2]
H = matrix(0, nrow = T, ncol = m)
resids = matrix(0, nrow = T, ncol = m)
# simulate new H with which to standardize the dataset
mspec = .makemultispec(umodel$modelinc, umodel$modeldesc$vmodel, umodel$modeldesc$vsubmodel,
umodel$modeldata$mexdata, umodel$modeldata$vexdata, umodel$start.pars,
umodel$fixed.pars, NULL)
for(i in 1:m){
specx = mspec@spec[[i]]
setfixed(specx) = as.list(mpars[which(midx[,i]==1), i])
flt = ugarchfilter(spec = specx, data = xts(data[,i], arglist$index[1:nrow(data)]), out.sample = n.start, realizedVol = arglist$realizedVol[1:nrow(data),i])
H[, i] = sigma(flt)
resids[,i] = residuals(flt)
}
stdres = resids/H
T = dim(stdres)[1]
Qbar = cov(stdres)
if(modelinc[5]>0){
Ibar = .asymI(stdres)
astdres = Ibar*stdres
Nbar = cov(astdres)
} else{
Ibar = .asymI(stdres)
# zero what is not needed
astdres = Ibar*stdres*0
Nbar = matrix(0, m, m)
}
sumdcc = sum(ipars[idx["dcca",1]:idx["dcca",2],1])+sum(ipars[idx["dccb",1]:idx["dccb",2],1])
sumdccg = sum(ipars[idx["dccg",1]:idx["dccg",2],1])
stdres = rbind( matrix(1, nrow = mx, ncol = m), stdres )
astdres = rbind( matrix(1, nrow = mx, ncol = m), astdres )
H = rbind( matrix(0, nrow = mx, ncol = m), H )
res = .Call( "dccstudentC2", model = as.integer(modelinc), pars = as.numeric(ipars[,1]),
idx = as.integer(idx[,1]-1), Qbar = Qbar, Nbar = Nbar, H = H,
Z = stdres, N = astdres, epars = c(sumdcc, sumdccg, mx), PACKAGE = "rmgarch")
Qtout = res[[1]]
likelihoods = res[[2]]
llh = res[[3]]
Rtout = res[[4]]
Qtout = Qtout[(mx+1):(T+mx)]
Rtout = Rtout[(mx+1):(T+mx)]
likelihoods = likelihoods[(mx+1):(T+mx)]
if(is.finite(llh) | !is.na(llh) | !is.nan(llh)){
assign("rmgarch_llh", llh, envir = mgarchenv)
} else {
llh = (get("rmgarch_llh", mgarchenv) + 0.1*(abs(get("rmgarch_llh", mgarchenv))))
}
ans = switch(arglist$returnType,
lik = likelihoods,
llh = llh,
all = list(lik = likelihoods, llh = llh, Rt = Rtout, Qt = Qtout))
return(ans)
}
studentfilter.dccLLH2 = function(pars, arglist)
{
# prepare inputs
# rejoin fixed and pars
mgarchenv = arglist$mgarchenv
model = arglist$model
umodel = arglist$umodel
modelinc = model$modelinc
idx = model$pidx
mpars = arglist$mpars
ipars = arglist$ipars
m = arglist$m
estidx = arglist$estidx
eidx = arglist$eidx
midx = arglist$midx
data = arglist$data
n.old = arglist$n.old
dcc.old = arglist$dcc.old
n.start = model$modeldata$n.start
# assign the pars to the matrix pars (used in the GARCH filtering)
mpars[which(eidx==1, arr.ind = TRUE)] = pars
# assign the pars to the ipars (used in the DCC diltering)
ipars[estidx,1] = mpars[which(eidx[,m+1]==1),m+1]
trace = arglist$trace
T = arglist$T
Qbar = arglist$Qbar
Nbar = arglist$Nbar
mx = model$maxdccOrder
m = dim(data)[2]
H = matrix(0, nrow = T, ncol = m)
resids = matrix(0, nrow = T, ncol = m)
# simulate new H with which to standardize the dataset
mspec = .makemultispec(umodel$modelinc, umodel$modeldesc$vmodel, umodel$modeldesc$vsubmodel,
umodel$modeldata$mexdata, umodel$modeldata$vexdata, umodel$start.pars,
umodel$fixed.pars, NULL)
for(i in 1:m){
specx = mspec@spec[[i]]
setfixed(specx) = as.list(mpars[which(midx[,i]==1), i])
flt = ugarchfilter(spec = specx, data = xts(data[,i], arglist$index[1:nrow(data)]), out.sample = n.start, n.old = n.old, realizedVol = arglist$realizedVol[1:nrow(data),i])
H[, i] = sigma(flt)
resids[,i] = residuals(flt)
}
stdres = resids/H
T = dim(stdres)[1]
Qbar = cov(stdres[1:dcc.old, ])
if(modelinc[5]>0){
Ibar = .asymI(stdres)
astdres = Ibar*stdres
Nbar = cov(astdres[1:dcc.old, ])
} else{
Ibar = .asymI(stdres)
# zero what is not needed
astdres = Ibar*stdres*0
Nbar = matrix(0, m, m)
}
sumdcc = sum(ipars[idx["dcca",1]:idx["dcca",2],1])+sum(ipars[idx["dccb",1]:idx["dccb",2],1])
sumdccg = sum(ipars[idx["dccg",1]:idx["dccg",2],1])
stdres = rbind( matrix(1, nrow = mx, ncol = m), stdres )
astdres = rbind( matrix(1, nrow = mx, ncol = m), astdres )
H = rbind( matrix(0, nrow = mx, ncol = m), H )
res = .Call( "dccstudentC2", model = as.integer(modelinc), pars = as.numeric(ipars[,1]),
idx = as.integer(idx[,1]-1), Qbar = Qbar, Nbar = Nbar, H = H, Z = stdres,
N = astdres, epars = c(sumdcc, sumdccg, mx), PACKAGE = "rmgarch")
Qtout = res[[1]]
likelihoods = res[[2]]
llh = res[[3]]
Rtout = res[[4]]
Qtout = Qtout[(mx+1):(T+mx)]
Rtout = Rtout[(mx+1):(T+mx)]
likelihoods = likelihoods[(mx+1):(T+mx)]
if(is.finite(llh) | !is.na(llh) | !is.nan(llh)){
assign("rmgarch_llh", llh, envir = mgarchenv)
} else {
llh = (get("rmgarch_llh", mgarchenv) + 0.1*(abs(get("rmgarch_llh", mgarchenv))))
}
ans = switch(arglist$returnType,
lik = likelihoods,
llh = llh,
all = list(lik = likelihoods, llh = llh, Rt = Rtout, Qt = Qtout))
return(ans)
}
laplace.dccLLH1 = function(pars, arglist)
{
# prepare inputs
# rejoin fixed and pars
mgarchenv = arglist$mgarchenv
model = arglist$model
modelinc = model$modelinc
estidx = arglist$estidx
idx = model$pidx
ipars = arglist$ipars
ipars[estidx, 1] = pars
trace = arglist$trace
T = arglist$T
m = arglist$m
mx = model$maxdccOrder
Qbar = arglist$Qbar
Nbar = arglist$Nbar
stdres = arglist$stdresid
astdres = arglist$astdresid
fit.control = arglist$fit.control
H = arglist$H
N = c(mx, T)
#if(modelinc[5]>0){
# ipars[idx["dcca",1]:idx["dccg",2],1] = ipars[idx["dcca",1]:idx["dccg",2],1]^2
#}
sumdcc = sum(ipars[idx["dcca",1]:idx["dcca",2],1])+sum(ipars[idx["dccb",1]:idx["dccb",2],1])
sumdccg = sum(ipars[idx["dccg",1]:idx["dccg",2],1])
stdres = rbind( matrix(0, nrow = mx, ncol = m), stdres )
astdres = rbind( matrix(0, nrow = mx, ncol = m), astdres )
if( fit.control$stationarity ){
if(modelinc[5]>0){
persist = .adcccon(pars, arglist)
} else{
persist = .dcccon(pars, arglist)
}
if( !is.na(persist) && persist >= 1 )
return(llh = get("rmgarch_llh", mgarchenv) + 0.1*(abs(get("rmgarch_llh", mgarchenv))))
}
res = .Call( "dcclaplaceC1", model = as.integer(modelinc), pars = as.numeric(ipars[,1]),
idx = as.integer(idx[,1]-1), Qbar = Qbar, Nbar = Nbar, Z = stdres,
N = astdres, epars = c(sumdcc, sumdccg, mx), PACKAGE = "rmgarch")
llh = res[[3]]
if(is.finite(llh) | !is.na(llh) | !is.nan(llh)){
assign("rmgarch_llh", llh, envir = mgarchenv)
} else {
llh = (get("rmgarch_llh", mgarchenv) + 0.1*(abs(get("rmgarch_llh", mgarchenv))))
}
return(llh)
}
laplace.dccLLH2 = function(pars, arglist)
{
# prepare inputs
# rejoin fixed and pars
mgarchenv = arglist$mgarchenv
model = arglist$model
umodel = arglist$umodel
modelinc = model$modelinc
idx = model$pidx
mpars = arglist$mpars
ipars = arglist$ipars
m = arglist$m
estidx = arglist$estidx
eidx = arglist$eidx
midx = arglist$midx
data = arglist$data
n.start = model$modeldata$n.start
# assign the pars to the matrix pars (used in the GARCH filtering)
mpars[which(eidx==1, arr.ind = TRUE)] = pars
# assign the pars to the ipars (used in the DCC diltering)
ipars[estidx,1] = mpars[which(eidx[,m+1]==1),m+1]
trace = arglist$trace
T = arglist$T
Qbar = arglist$Qbar
Nbar = arglist$Nbar
mx = model$maxdccOrder
m = dim(data)[2]
H = matrix(0, nrow = T, ncol = m)
resids = matrix(0, nrow = T, ncol = m)
# simulate new H with which to standardize the dataset
mspec = .makemultispec(umodel$modelinc, umodel$modeldesc$vmodel, umodel$modeldesc$vsubmodel,
umodel$modeldata$mexdata, umodel$modeldata$vexdata, umodel$start.pars,
umodel$fixed.pars, NULL)
for(i in 1:m){
specx = mspec@spec[[i]]
setfixed(specx) = as.list(mpars[which(midx[,i]==1), i])
flt = ugarchfilter(spec = specx, data = xts(data[,i], arglist$index[1:nrow(data)]), out.sample = n.start, realizedVol = arglist$realizedVol[1:nrow(data),i])
H[, i] = sigma(flt)
resids[,i] = residuals(flt)
}
stdres = resids/H
T = dim(stdres)[1]
Qbar = cov(stdres)
if(modelinc[5]>0){
Ibar = .asymI(stdres)
astdres = Ibar*stdres
Nbar = cov(astdres)
} else{
Ibar = .asymI(stdres)
# zero what is not needed
astdres = Ibar*stdres*0
Nbar = matrix(0, m, m)
}
sumdcc = sum(ipars[idx["dcca",1]:idx["dcca",2],1])+sum(ipars[idx["dccb",1]:idx["dccb",2],1])
sumdccg = sum(ipars[idx["dccg",1]:idx["dccg",2],1])
stdres = rbind( matrix(1, nrow = mx, ncol = m), stdres )
astdres = rbind( matrix(1, nrow = mx, ncol = m), astdres )
H = rbind( matrix(0, nrow = mx, ncol = m), H )
res = .Call( "dcclaplaceC2", model = as.integer(modelinc),
pars = as.numeric(ipars[,1]), idx = as.integer(idx[,1]-1),
Qbar = Qbar, Nbar = Nbar, H = H, Z = stdres, N = astdres,
epars = c(sumdcc, sumdccg, mx), PACKAGE = "rmgarch")
Qtout = res[[1]]
likelihoods = res[[2]]
llh = res[[3]]
Rtout = res[[4]]
Qtout = Qtout[(mx+1):(T+mx)]
Rtout = Rtout[(mx+1):(T+mx)]
likelihoods = likelihoods[(mx+1):(T+mx)]
if(is.finite(llh) | !is.na(llh) | !is.nan(llh)){
assign("rmgarch_llh", llh, envir = mgarchenv)
} else {
llh = (get("rmgarch_llh", mgarchenv) + 0.1*(abs(get("rmgarch_llh", mgarchenv))))
}
ans = switch(arglist$returnType,
lik = likelihoods,
llh = llh,
all = list(lik = likelihoods, llh = llh, Rt = Rtout, Qt = Qtout))
return(ans)
}
laplacefilter.dccLLH2 = function(pars, arglist)
{
# prepare inputs
# rejoin fixed and pars
mgarchenv = arglist$mgarchenv
model = arglist$model
umodel = arglist$umodel
modelinc = model$modelinc
idx = model$pidx
mpars = arglist$mpars
ipars = arglist$ipars
m = arglist$m
estidx = arglist$estidx
eidx = arglist$eidx
midx = arglist$midx
data = arglist$data
n.old = arglist$n.old
dcc.old = arglist$dcc.old
n.start = model$modeldata$n.start
# assign the pars to the matrix pars (used in the GARCH filtering)
mpars[which(eidx==1, arr.ind = TRUE)] = pars
# assign the pars to the ipars (used in the DCC diltering)
ipars[estidx,1] = mpars[which(eidx[,m+1]==1),m+1]
trace = arglist$trace
T = arglist$T
Qbar = arglist$Qbar
Nbar = arglist$Nbar
mx = model$maxdccOrder
m = dim(data)[2]
H = matrix(0, nrow = T, ncol = m)
resids = matrix(0, nrow = T, ncol = m)
# simulate new H with which to standardize the dataset
mspec = .makemultispec(umodel$modelinc, umodel$modeldesc$vmodel, umodel$modeldesc$vsubmodel,
umodel$modeldata$mexdata, umodel$modeldata$vexdata, umodel$start.pars,
umodel$fixed.pars, NULL)
for(i in 1:m){
specx = mspec@spec[[i]]
setfixed(specx) = as.list(mpars[which(midx[,i]==1), i])
flt = ugarchfilter(spec = specx, data = xts(data[,i], arglist$index[1:nrow(data)]), out.sample = n.start, realizedVol = arglist$realizedVol[1:nrow(data),i])
H[, i] = sigma(flt)
resids[,i] = residuals(flt)
}
stdres = resids/H
T = dim(stdres)[1]
Qbar = cov(stdres[1:dcc.old, ])
if(modelinc[5]>0){
Ibar = .asymI(stdres)
astdres = Ibar*stdres
Nbar = cov(astdres[1:dcc.old, ])
} else{
Ibar = .asymI(stdres)
# zero what is not needed
astdres = Ibar*stdres*0
Nbar = matrix(0, m, m)
}
sumdcc = sum(ipars[idx["dcca",1]:idx["dcca",2],1])+sum(ipars[idx["dccb",1]:idx["dccb",2],1])
sumdccg = sum(ipars[idx["dccg",1]:idx["dccg",2],1])
stdres = rbind( matrix(1, nrow = mx, ncol = m), stdres )
astdres = rbind( matrix(1, nrow = mx, ncol = m), astdres )
H = rbind( matrix(0, nrow = mx, ncol = m), H )
res = .Call( "dcclaplaceC2", model = as.integer(modelinc),
pars = as.numeric(ipars[,1]), idx = as.integer(idx[,1]-1),
Qbar = Qbar, Nbar = Nbar, H = H, Z = stdres, N = astdres,
epars = c(sumdcc, sumdccg, mx), PACKAGE = "rmgarch")
Qtout = res[[1]]
likelihoods = res[[2]]
llh = res[[3]]
Rtout = res[[4]]
Qtout = Qtout[(mx+1):(T+mx)]
Rtout = Rtout[(mx+1):(T+mx)]
likelihoods = likelihoods[(mx+1):(T+mx)]
if(is.finite(llh) | !is.na(llh) | !is.nan(llh)){
assign("rmgarch_llh", llh, envir = mgarchenv)
} else {
llh = (get("rmgarch_llh", mgarchenv) + 0.1*(abs(get("rmgarch_llh", mgarchenv))))
}
ans = switch(arglist$returnType,
lik = likelihoods,
llh = llh,
all = list(lik = likelihoods, llh = llh, Rt = Rtout, Qt = Qtout))
return(ans)
}
|
f513272f38889d9f81fe99896e173e64cfb35d19 | 18df0ee04b5654c30475fabbb669cff7e112b98b | /man/class_from_obs_id.Rd | 0521cab29b0b7714c9899084a86b6f05bb070a80 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | seninp/metacoder | fa7a84787fafb9d67aef5226b0b9e17c5defd654 | a0685c540fec9955bc2a068cc7af46b5172dcabe | refs/heads/master | 2020-06-10T20:44:04.208387 | 2016-09-27T21:59:15 | 2016-09-27T21:59:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,005 | rd | class_from_obs_id.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract_taxonomy--parsers.R
\name{class_from_obs_id}
\alias{class_from_obs_id}
\title{Retrieve classifications from observation IDs}
\usage{
class_from_obs_id(obs_id, database = c("ncbi", "none"), ...)
}
\arguments{
\item{obs_id}{(\code{character})
An unique observation (e.g. sequence) identifier for a particular \code{database}.
Requires an internet connection.}
\item{database}{(\code{character} of length 1)
The name of the database that patterns given in \code{parser} will apply to.
Valid databases include "ncbi", "itis", "eol", "col", "tropicos",
"nbn", and "none". \code{"none"} will cause no database to be quired; use this if you want to not use the
internet. NOTE: Only \code{"ncbi"} has been tested so far.}
\item{...}{Not used}
}
\value{
\code{list} of \code{data.frame}
}
\description{
Retrieve taxonomic classifications from observation (e.g. sequence) IDs using a specified database.
}
\keyword{internal}
|
aa911364cde0559379b953e3e3b56abc59082cb1 | 2be6995e86b2ffffaf040b802387901a4e6bf5c1 | /vignettes/Ch19Vig2FrocSampleSize.R | 38a8e5e5d039de1b95e55367d1725b3f2c72c758 | [] | no_license | pwep/RJafroc | 89491abb88581f0962330c84415b9eecdbe9221d | 9a5a6078c35b470fd21639ac5c75a0ba1d8a3eaa | refs/heads/master | 2021-06-30T07:49:03.714828 | 2020-12-10T00:39:05 | 2020-12-10T00:39:05 | 189,657,987 | 1 | 0 | null | 2019-05-31T21:01:36 | 2019-05-31T21:01:35 | null | UTF-8 | R | false | false | 2,870 | r | Ch19Vig2FrocSampleSize.R | ## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
library(RJafroc)
library(ggplot2)
## -----------------------------------------------------------------------------
lesDistr <- c(0.7, 0.2, 0.1)
frocNhData <- DfExtractDataset(dataset04, trts = c(1,2))
ret <- SsFrocNhRsmModel(frocNhData, lesDistr = lesDistr)
muMed <- ret$muMed
lambdaMed <- ret$lambdaMed
nuMed <- ret$nuMed
lesDistr <- ret$lesDistr
lesWghtDistr <- ret$lesWghtDistr
scaleFactor <- ret$scaleFactor
## -----------------------------------------------------------------------------
aucRocNH <- PlotRsmOperatingCharacteristics(muMed, lambdaMed, nuMed,
lesDistr = lesDistr, OpChType = "ROC")$aucROC
aucwAfrocNH <- PlotRsmOperatingCharacteristics(muMed, lambdaMed, nuMed,
lesDistr = lesDistr, OpChType = "wAFROC")$aucwAFROC
## -----------------------------------------------------------------------------
varCompwAFROC <- StSignificanceTesting(frocNhData, FOM = "wAFROC", method = "DBM", analysisOption = "RRRC")$ANOVA$VarCom
## -----------------------------------------------------------------------------
ROC_ES <- 0.05
effectSizewAFROC <- scaleFactor * ROC_ES
J <- 5;K <- 100
varYTR <- varCompwAFROC["VarTR","Estimates"]
varYTC <- varCompwAFROC["VarTC","Estimates"]
varYEps <- varCompwAFROC["VarErr","Estimates"]
ret <- SsPowerGivenJK(dataset = NULL, FOM = "Wilcoxon", J = J, K = K, analysisOption = "RRRC",
effectSize = effectSizewAFROC, method = "DBM", LegacyCode = TRUE,
list(VarTR = varYTR,
VarTC = varYTC,
VarErr = varYEps))
powerwAFROC <- ret$powerRRRC
cat("ROC-ES = ", ROC_ES, ", wAFROC-ES = ", ROC_ES * scaleFactor, ", Power-wAFROC = ", powerwAFROC, "\n")
## -----------------------------------------------------------------------------
VarTR <- varCompwAFROC["VarTR","Estimates"]
VarTC <- varCompwAFROC["VarTC","Estimates"]
VarErr <- varCompwAFROC["VarErr","Estimates"]
ret2 <- SsSampleSizeKGivenJ(dataset = NULL, J = 6, effectSize = effectSizewAFROC, method = "DBM", LegacyCode = TRUE,
list(VarTR = VarTR, VarTC = VarTC, VarErr = VarErr))
cat("ROC-ES = ", ROC_ES, ", wAFROC-ES = ", ROC_ES * scaleFactor,
", K80RRRC = ", ret2$KRRRC, ", Power-wAFROC = ", ret2$powerRRRC, "\n")
## -----------------------------------------------------------------------------
ret3 <- SsPowerGivenJK(dataset = NULL, J = 6, K = ret2$KRRRC, effectSize = effectSizewAFROC, method = "DBM", LegacyCode = TRUE,
list(VarTR = VarTR, VarTC = VarTC, VarErr = VarErr))
cat("ROC-ES = ", ROC_ES, ", wAFROC-ES = ", ROC_ES * scaleFactor,
", powerRRRC = ", ret3$powerRRRC, "\n")
|
28f481b2781745514b8f9652dd1551d59470e4aa | 703b1605c75434440dbe641d1c80a8deb390f15a | /XCMS/demo_workflow.R | 8e764ca0cd9653ef9f8afb64a7ce1c80f5711c11 | [] | no_license | TalWac/XCMS | 80d8eda3728a1bf17e41ea9e622ed58f919b6d53 | 12df016196dea00020768b24c21d1adac84a209c | refs/heads/main | 2023-03-18T15:49:56.367973 | 2021-03-09T15:57:31 | 2021-03-09T15:57:31 | 346,059,562 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,401 | r | demo_workflow.R | if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("xcms")
BiocManager::install("faahKO")
library(xcms)
library(faahKO)
library(RColorBrewer)
library(pander)
library(magrittr)
library(pheatmap)
## Get the full path to the CDF files
cdfs <- dir(system.file("cdf", package = "faahKO"), full.names = TRUE,
recursive = TRUE)
## Create a phenodata data.frame
pd <- data.frame(sample_name = sub(basename(cdfs), pattern = ".CDF",
replacement = "", fixed = TRUE),
sample_group = c(rep("KO", 6), rep("WT", 6)),
stringsAsFactors = FALSE)
raw_data <- readMSData(files = cdfs, pdata = new("NAnnotatedDataFrame", pd),
mode = "onDisk")
head(rtime(raw_data))
head(mz(raw_data))
mzs <- mz(raw_data)
## Split the list by file
mzs_by_file <- split(mzs, f = fromFile(raw_data))
length(mzs_by_file)
## Get the base peak chromatograms. This reads data from the files.
bpis <- chromatogram(raw_data, aggregationFun = "max")
## Define colors for the two groups
group_colors <- brewer.pal(3, "Set1")[1:2]
names(group_colors) <- c("KO", "WT")
## Plot all chromatograms.
plot(bpis, col = group_colors[raw_data$sample_group])
head(rtime(bpi_1))
## Get the total ion current by file
tc <- split(tic(raw_data), f = fromFile(raw_data))
boxplot(tc, col = group_colors[raw_data$sample_group],
ylab = "intensity", main = "Total ion current")
## Define the rt and m/z range of the peak area
rtr <- c(2700, 2900)
mzr <- c(334.9, 335.1)
## extract the chromatogram
chr_raw <- chromatogram(raw_data, mz = mzr, rt = rtr)
plot(chr_raw, col = group_colors[chr_raw$sample_group])
raw_data %>%
filterRt(rt = rtr) %>%
filterMz(mz = mzr) %>%
plot(type = "XIC")
cwp <- CentWaveParam(peakwidth = c(30, 80), noise = 1000)
xdata <- findChromPeaks(raw_data, param = cwp)
head(chromPeaks(xdata))
summary_fun <- function(z) {
c(peak_count = nrow(z), rt = quantile(z[, "rtmax"] - z[, "rtmin"]))
}
T <- lapply(split.data.frame(chromPeaks(xdata),
f = chromPeaks(xdata)[, "sample"]),
FUN = summary_fun)
T <- do.call(rbind, T)
rownames(T) <- basename(fileNames(xdata))
pandoc.table(T,
caption = paste0("Summary statistics on identified chromatographic",
" peaks. Shown are number of identified peaks per",
" sample and widths/duration of chromatographic ",
"peaks."))
plot(chr_raw, col = group_colors[chr_raw$sample_group], lwd = 2)
highlightChromPeaks(xdata, border = group_colors[chr_raw$sample_group],
lty = 3, rt = rtr, mz = mzr)
pander(chromPeaks(xdata, mz = mzr, rt = rtr),
caption = paste("Identified chromatographic peaks in a selected ",
"m/z and retention time range."))
## Extract a list of per-sample peak intensities (in log2 scale)
ints <- split(log2(chromPeaks(xdata)[, "into"]),
f = chromPeaks(xdata)[, "sample"])
boxplot(ints, varwidth = TRUE, col = group_colors[xdata$sample_group],
ylab = expression(log[2]~intensity), main = "Peak intensities distribution per sample")
grid(nx = NA, ny = NULL)
|
49766f964d1eebd68e2e520dbec03c864c7ea2cf | 68034e3069603a30be4ac88534612f7652ef4e2e | /original_files/J_Addah_MSc_project/HCM-SCD CEAA.R | fb9f3279b3f4f7195d776f807756e332ff40c18b | [] | no_license | lixiaocheng123/HCM-SCD-CE-analysis | c62cd0c51ecffb751d9065a56389a937bac9966c | 44093a943fc5e4483c6576aa04e29ff981f0af9e | refs/heads/master | 2023-06-28T22:44:23.358692 | 2020-11-04T16:01:55 | 2020-11-04T16:01:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,668 | r | HCM-SCD CEAA.R | # Bayesian Models for Cost-Effectiveness Analysis
# Loads packages
S <- 4 # Number of health states
# s = 1 Healthy health state
# s = 2 Stroke HCM-Related Health state
# s = 3 SCD(Sudden Cardiac Death) Health state
# s = 4 DAC (Death All Causes) Health state
# EXISTING METHOD OF SCD-RISK-PREDICTION (EMSRP):Current Method of SCD Risk Prediction
# HCM - SCD RISK PREDICTION MODEL(HSRPM)
J <-10 # Number of years of follow up
# Now load the observered data on transitions among the states for the two treatments
Model.file="HCMmodel.txt" # Specifies file with the model defining observed data
inits1=source("hcminits1.txt")$value # Loads the initial values for the first chain
inits2=source("hcminits2.txt")$value # Loads the initial values of the second chain
inits=list(inits1,inits2) # Combines into a list files with inital values
dataBugs=source("HCMdata.txt")$value # Loads observed data
# Now run the MCMC
library(R2OpenBUGS)
params=c("lambda.0","lambda.1") # Defines parameters to save
n.iter <- 10000
n.burnin<- 5000
n.thin <- floor((n.iter- n.burnin)/500)
n.chain=2
debug=FALSE
mm1 <- bugs(data=dataBugs,inits=inits,parameters.to.save = params,
model.file = Model.file, n.chains=n.chain,n.iter=n.iter,n.burnin=n.burnin,n.thin=n.thin,DIC = TRUE)
print(mm1,digits = 3)
attach.bugs(mm1)
# Analyze MCMC Convergence with CODA
# Check if all entries in Rhat component of Bugs output are less than 1.1
# all(mm1$summary[,"Rhat"]< 1.1)
# Convert bugs output for coda and create an mcmc object
hcm<- bugs(data=dataBugs,inits=inits,parameters.to.save = params,
model.file = Model.file,codaPkg=TRUE, n.chains=n.chain,n.iter=n.iter,n.burnin=n.burnin,n.thin=n.thin,DIC = TRUE)
hcm.coda<-read.bugs(hcm)
# Now we run the Markov model from R
start <- c(1000,0,0,0) # Analysis for virtual cohort of 1000, individuals
# NB All patients enter the model from the first state "Healthy"
#Determine the Markov transitions
m.0 <- m.1 <- array(NA,c(n.sims,S,(J+1)))
for(s in 1:S){
m.0[,s,1] <- start[s]
m.1[,s,1] <- start[s]
}
#NB
# BUGS only outputs matrices for lambda.0 and lambda.1 with simulations for the "random" part
# ie only the first 2 rows, as the last two are deterministically defined as c(0,0,1,1)
# because once a patient is in SCD,and DAC, they can't move away. So there is the need to
# reconstruct a full matrix with S rows and S columns for each MCMC simulations. This is done by
# defining new arrays lamda0 and lamda1 and then stacking up the simulated values for the first (S-2)
# rows saved in lambda.0[i,,] and lambda.1[i,,] for MCMC simulations i with a row vector
# containing (S-2) 0s and then two 1's, ie c(0,0, 1,1)
lamda0=lamda1=array(NA, c(n.sims,S,S))
for (i in 1:n.sims) {
lamda0[i,,]=rbind(rbind(lambda.0[i,,],c(0,0,1,1)),c(0,0,1,1))
lamda1[i,,]=rbind(rbind(lambda.1[i,,],c(0,0,1,1)),c(0,0,1,1))
for (j in 2: (J+1)) {
for (s in 1:S) {
# Now use lamda0,and lamda1, for the matrix multiplication
m.0[i,s,j] <- sum(m.0[i,,j-1]*lamda0[i,,s])
m.1[i,s,j] <- sum(m.1[i,,j-1]*lamda1[i,,s])
}
}
}
# Now we draw barplot of the number of people in each state at each time point during follow up
par(mfrow=c(1,2))
barplot(apply(m.0,c(2,3),sum),names.arg=seq(0,10),space=.2,xlab="Virtual follow up",
ylab="Proportion of patients in each state",main="EMSRP",ylim = c(0,200000000))
barplot(apply(m.1,c(2,3),sum),names.arg=seq(0,10),space=.2,xlab="Virtual follow up",
ylab="Proportion of patients in each state",main="HSRPM",ylim = c(0,200000000))
# Plot trace and density for all mcmc chains
plot(hcm.coda, trace = TRUE, density = TRUE, smooth = FALSE)
# Run the economic analysis
#Now we define the benefits
utility.score.0<-utility.score.1<-array(NA,c(n.sims,2,J)) # Defines measures of accumulated untility
# under each treatment
dec.rate<-0.35 # Defines utility decreament rate to apply when a non-fatal HCM event occurs at j >0
utility.score.0[,1,]<-rep(0.637,J) # Utility for occupying the state "Healthy" under treatment t=0
utility.score.1[,1,]<-rep(0.637,J) # Utility for occupyingthe state "Healty" under treatment t= 1
utility.score.0[,2,1]<-dec.rate*utility.score.0[1,1,1] # Utility for occupying state "Stroke-HCM Related" under treatment t=0
utility.score.1[,2,1]<-dec.rate*utility.score.1[1,1,1] # Utility for occupying state "Stroke-HCM Related" under treatment t=1
for (i in 1:n.sims) {
for (j in 2:J) {
utility.score.0[i,2,j]<-utility.score.0[i,2,j-1]*(1- dec.rate)
utility.score.1[i,2,j]<-utility.score.1[i,2,j-1]*(1- dec.rate)
}
}
# We now compute QALY's accumulated under each treatment for each year of follow up
Qal.0<-Qal.1<-matrix(NA,n.sims,J)
for (i in 1:n.sims) {
for (j in 1:J) {
Qal.0[i,j]<-(m.0[i,1,j]%*%utility.score.0[i,1,j]
+ m.0[i,2,j]%*%utility.score.0[i,2,j])/m.0[1,1,1]
Qal.1[i,j]<-(m.1[i,1,j]%*%utility.score.1[i,1,j]
+ m.1[i,2,j]%*%utility.score.1[i,2,j])/m.1[1,1,1]
}
}
# Now sum values across all time points, and create matrix effectiveness
eff<-array(NA,c(n.sims,2,J))
eff[,1,]<-apply(Qal.0, 1,sum)
eff[,2,]<-apply(Qal.1, 1,sum)
# We define the annual cost for each non-fatal health state under each treatment
unit.cost.0 <-c(4792,22880)
unit.cost.1 <-c(4812,22880)
#Create a holding cost variable to track yearly (j>0)accumulated cost under each treatment
cost.0<-cost.1<-matrix(NA,n.sims,J)
for (i in 1: n.sims) {
for (j in 2:(J+1)) {
cost.0[i,j-1]<-(m.0[i,S,j]+ m.0[i,(S-1),j])*(unit.cost.0%*%m.0[i,1:(S-2),j])/sum(m.0[i,1:(S-2),j])
+ unit.cost.0%*%m.0[i,1:(S-2),j]
cost.1[i,j-1]<-(m.0[i,S,j]+ m.0[i,(S-1),j])*(unit.cost.1%*%m.0[i,1:(S-2),j])/sum(m.0[i,1:(S-2),j])
+ unit.cost.1%*%m.0[i,1:(S-2),j]
}
}
# We now derive a general formulation to apply discount to cost and benefits
rate.b <- 0.035 # discount rate for benefits (3.5%)
rate.c <- 0.035 # discount rate for costs (3.5%)
# Defines the discount factors
disc.b <- numeric(); disc.c <- numeric()
disc.b[1] <- 1; disc.c[1] <- 1
for (j in 2:J) {
disc.b[j] <- (1+rate.b)^(j-1)
disc.c[j] <- (1+rate.c)^(j-1)
}
disc.cost.0 <- disc.eff.0 <- disc.cost.1 <- disc.eff.1 <- matrix(NA,n.sims,J)
for (j in 1:J) {
disc.cost.0[,j] <- cost.0[,j]/disc.c[j]
disc.cost.1[,j] <- cost.1[,j]/disc.c[j]
disc.eff.0[,j] <- eff[,1,j]/disc.b[j]
disc.eff.1[,j] <- eff[,2,j]/disc.b[j]
}
# Now sum the values across all time points and create matrix of costs
c <- matrix(NA,n.sims,2)
c[,1] <- apply(disc.cost.0,1,sum)
c[,2] <- apply(disc.cost.1,1,sum)
# Sum all discounted valees of effectiveness and create a matrix of discounted effectiveness
e <- matrix(NA,n.sims,2)
e[,1] <- apply(disc.eff.0,1,sum)
e[,2] <- apply(disc.eff.1,1,sum)
# Cost-effectiveness analysis
library(BCEA)
ints <- c("EMSRP","HSRPM")
m <- bcea(e,c,ref=2,interventions=ints,Kmax=25000)
contour2(m,25000)
plot(m)
summary(m)
|
ea923e6bf85a03ee420175b043aa2b63d09f270a | 361045b8660071fc6bc9bf1d5d5727632dc8235d | /TRAL_breeding_age.R | c4e2940da1a5a78df341fe7c8d407312d7f14fcd | [
"CC0-1.0"
] | permissive | steffenoppel/TRAL_IPM | 61a46a30fd64611cc3af86dda2e4c1b3316c1b9a | cbe939f7234b4ced1f50fa2492cc6c0b42222c71 | refs/heads/main | 2023-04-08T03:45:08.927110 | 2022-05-12T06:59:28 | 2022-05-12T06:59:28 | 198,238,566 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 22,285 | r | TRAL_breeding_age.R | #####################################################################################
######## EXAMINATION OF BREEDING AGE OF TRISTAN ALBATROSS ON GOUGH ##############
#####################################################################################
### written by Steffen Oppel in August 2021
### steffen.oppel@rspb.org.uk
### examines 3 question re TRAL breeding age:
## 1. does average age of all TRAL first recruiters in a given year change over time? [This is potentially effort-dependent]
## 2. does average age of all TRAL breeders in a given year change over time?
## 3. does average age of all TRAL individuals contacted in a given year change over time? - not used in manuscript as age increases due to length of time since ringing was started
## update on 2 Oct 2021: after chat with Cat Horswill try to examine whether mean age at first breeding 2004-2009 is higher than 2015-2021
## switched questions and figures for manuscript, because prop old breeders is more logical and should be key evidence
## updated 18 February 2022: repeated questions that increase in age proportion could simply be due to when ringing began - examine in more detail
library(tidyverse)
library(lubridate)
library(data.table)
filter<-dplyr::filter
select<-dplyr::select
# LOAD AND MANIPULATE ICONS
library(grid)
library(magick)
imgTRAL<-image_read("C:\\STEFFEN\\RSPB\\UKOT\\Gough\\PR_Comms\\Icons\\alby 4.jpg") %>% image_transparent("white", fuzz=5)
TRALicon <- rasterGrob(imgTRAL, interpolate=TRUE)
#############################################################################
## DATA IMPORT AND PREPARATION COPIED FROM IPM_DATA_PREPARATION.r ####
#############################################################################
## SPECIFY THE SPECIES AND START YEAR FOR SURVIVAL MODEL
SP<-"TRAL"
start<-1950 ## for CMR data
IPMstart<-2004 ## for count and breeding success data
## run the RODBC import of CMR data in a 32-bit version of R
#system(paste0("C:/PROGRA~1/R/R-35~1.1/bin/i386/Rscript.exe ", shQuote("C:\\STEFFEN\\RSPB\\UKOT\\Gough\\ANALYSIS\\PopulationModel\\TRAL_IPM\\RODBC_CMR_import_TRAL.R")), wait = TRUE, invisible = FALSE, intern = T)
#system(paste0(Sys.getenv("R_HOME"), "/bin/i386/Rscript.exe ", shQuote("C:\\STEFFEN\\RSPB\\UKOT\\Gough\\ANALYSIS\\PopulationModel\\TRAL_IPM\\RODBC_CMR_import_TRAL.R")), wait = TRUE, invisible = FALSE, intern = T)
try(setwd("C:\\STEFFEN\\RSPB\\UKOT\\Gough\\ANALYSIS\\PopulationModel\\TRAL_IPM"), silent=T)
load("GOUGH_seabird_CMR_data.RData")
## run the RODBC import of nest and count data in a 32-bit version of R
#system(paste0("C:/PROGRA~1/R/R-35~1.1/bin/i386/Rscript.exe ", shQuote("C:\\STEFFEN\\RSPB\\UKOT\\Gough\\DATA\\Breeding_Database\\RODBC_count_import.r")), wait = TRUE, invisible = FALSE, intern = T)
#system(paste0(Sys.getenv("R_HOME"), "/bin/i386/Rscript.exe ", shQuote("C:\\STEFFEN\\RSPB\\UKOT\\Gough\\DATA\\Breeding_Database\\RODBC_count_import.r")), wait = TRUE, invisible = FALSE, intern = T)
try(setwd("C:\\STEFFEN\\RSPB\\UKOT\\Gough\\DATA\\Breeding_Database"), silent=T)
load("GOUGH_seabird_data.RData")
## filter data for the selected species
nests<-nests %>% filter(Species==SP)
## filter data for the selected species
contacts<-contacts %>% filter(SpeciesCode==SP) ## %>% filter(Location %in% c("Hummocks","Gonydale","Tafelkop","Not Specified")) - this removes age info for chicks ringed elsewhere!
ages<-ages %>% filter(SpeciesCode==SP)
bands<-bands %>% filter(SpeciesCode==SP)
head(contacts) ## CMR data
dim(contacts)
dim(contactsbreed)
### EXTRACT AGE AT DEPLOYMENT FROM DATABASE
deploy_age<-contacts %>% arrange(BirdID, Date_Time,Contact_Year) %>%
mutate(AGE=ifelse(Age %in% c("Chick","Fledgling"),0,1)) %>%
group_by(BirdID) %>%
summarise(MIN_AGE=min(AGE), MAX_AGE=max(AGE), FIRST_AGE=first(Age), FIRST_Date=first(Date_Time), FIRST_YEAR=min(Contact_Year)) %>%
mutate(FIRST_AGE=ifelse(FIRST_AGE=="Unknown" & month(FIRST_Date)<5,"Adult", as.character(FIRST_AGE))) %>% ### unknowns marked before May were not chicks
mutate(FIRST_AGE=ifelse(is.na(FIRST_AGE), ifelse(month(FIRST_Date)<5,"Adult","Chick"), as.character(FIRST_AGE))) ### unknowns marked before May were not chicks
head(deploy_age)
dim(deploy_age)
MISSAGE<-deploy_age %>% filter(is.na(FIRST_AGE)) %>% left_join(bands, by="BirdID") %>%
select(BirdID, Band_Number,MIN_AGE,FIRST_Date,FIRST_YEAR)
dim(MISSAGE)
### FIND MISSING DATA FOR SEASON AND REPLACE BASED ON DATE
contacts<-contacts %>%
mutate(Contact_Year=if_else(is.na(Contact_Year),as.integer(year(Date_Time)),Contact_Year)) %>%
mutate(Contact_Year=if_else(as.integer(month(Date_Time))==12 & !(Age %in% c("Chick","Fledgling")),Contact_Year+1,as.numeric(Contact_Year))) %>%
mutate(Contact_Year=if_else(as.integer(month(Date_Time))==1 & (Age %in% c("Chick","Fledgling")),Contact_Year-1,as.numeric(Contact_Year)))
dim(contacts)
head(contacts)
### ASSIGN AGE TO BIRDS WHERE THIS IS NOT SPECIFIED
## include a column with continuous age
contacts<-contacts %>%
left_join(deploy_age, by="BirdID") %>%
mutate(AGE=ifelse(Age=="Adult",1,ifelse(Age %in% c("Chick","Fledgling"),0,NA))) %>% ### certain assignments based on provided age
mutate(AGE=ifelse(is.na(AGE), ifelse(Sex %in% c("Male","Female"),1,NA),AGE)) %>% ### inferred assignment from sex info - only adults can be sexed
mutate(ContAge=ifelse(FIRST_AGE %in% c("Chick","Fledgling"),Contact_Year-FIRST_YEAR,Contact_Year-FIRST_YEAR+7)) #%>% ### continuous age since first deployment, at least 7 years for birds marked as 'adult'
contacts %>% filter(is.na(AGE))
contacts %>% filter(is.na(ContAge))
########## CREATE A LOOP OVER EVERY BIRD TO CHECK WHETHER THEY WERE EVER RECORDED IN STUDY AREAS
STUDY_AREAS<- c("Hummocks","Gonydale","Tafelkop")
allbirds<-unique(contacts$BirdID)
fixed_contacts<-data.frame()
for (xid in allbirds){
xcont<-contacts %>% filter(BirdID==xid) %>% arrange(Date_Time)
xcont$INSIDE<-ifelse(xcont$Location %in% STUDY_AREAS,1,0)
xcont$INSIDE<-ifelse(xcont$Location == "Not Specified" & xcont$Contact_Year>2014,1,xcont$INSIDE)
if(sum(xcont$INSIDE, na.rm=T)>0){fixed_contacts<-bind_rows(fixed_contacts ,xcont)}
}
dim(contacts)
dim(fixed_contacts)
length(unique(fixed_contacts$BirdID))
length(allbirds)
### CHECK WHAT BIRDS WERE RINGED AS CHICKS BEFORE 1978
oldchicks<-fixed_contacts %>% filter(Contact_Year<=start) %>% filter(ContAge<2)
fixed_contacts %>% filter(BirdID %in% oldchicks$BirdID)
### REMOVE RECORDS FROM BEFORE THE SET START YEAR AND BIRDS FIRST MARKED IN LAST YEAR
contacts<-fixed_contacts %>%
filter(year(Date_Time)>start) %>%
filter(ContAge!=1) ## remove 5 records of unfledged chicks within a few weeks/months of ringing
dim(contacts)
unique(contacts$FIRST_AGE)
## try to determine years with high and low detection probability
contacts %>% mutate(count=1) %>% group_by(Contact_Year) %>% summarise(n=sum(count)) %>%
ggplot() + geom_bar(aes(x=Contact_Year,y=n), stat="identity")
n_exist<-deploy_age %>% mutate(count=1) %>% rename(Contact_Year=FIRST_YEAR) %>%
mutate(FIRST_AGE=if_else(FIRST_AGE=="Fledgling","Chick",FIRST_AGE)) %>%
group_by(Contact_Year,FIRST_AGE) %>%
summarise(N_marked=sum(count)) %>%
arrange(Contact_Year) %>%
spread(key=FIRST_AGE, value=N_marked, fill=0) %>%
ungroup() %>%
mutate(N_marked=Adult+Chick) %>%
mutate(N_ever_ad = cumsum(Adult),N_ever_ch = cumsum(Chick),N_all = cumsum(N_marked)) %>%
bind_rows(tibble(Contact_Year=2022,Adult=0,Chick=0,N_marked=0,N_all=0, N_ever_ad=1404, N_ever_ch=3635)) %>%
mutate(N_all=if_else(Contact_Year==2022,dplyr::lag(N_all),N_all)) %>%
arrange(Contact_Year)
n_exist$N_all[1] = n_exist$N_marked[1]
n_exist$N_all[2] = (n_exist$Adult[1]*0.96) + (n_exist$Chick[1]*0.85) + n_exist$N_marked[2]
n_exist$N_all[3] = (n_exist$N_all[2]*(0.96^19)) + n_exist$N_marked[3]
n_exist$N_all[4] = (n_exist$Adult[3]*0.96) + (n_exist$Chick[3]*0.85) + (n_exist$N_all[2]*(0.96^19)) + n_exist$N_marked[4]
for (y in 5:dim(n_exist)[1]) {
n_exist$N_all[y] = ((n_exist$Adult[y-1]+n_exist$N_all[y-2])*0.96) + (n_exist$Chick[y-1]*0.85) + n_exist$N_marked[y]
}
tail(n_exist)
### ADDED - A COLUMN THAT SAYS WHAT PROPORTION OF BIRDS WERE MARKED > 30 YEARS AGO
goodyears<-contacts %>% group_by(Contact_Year) %>% summarise(n=length(unique(BirdID))) %>%
filter(!(Contact_Year==1959)) %>% ## remove single re-sighting from 1959
left_join(n_exist, by='Contact_Year') %>%
mutate(prop.seen=n/N_all) %>%
mutate(old.ad=ifelse(Contact_Year<1981,0,
ifelse(Contact_Year<2002,
142,
dplyr::lag(N_ever_ad,n=26)))) %>% ## specifies adults that were already 4 years old when ringed
mutate(old.ch=ifelse(Contact_Year<1985,0,
ifelse(Contact_Year<2006,
21,
dplyr::lag(N_ever_ch,n=30)))) %>% ## specifies chicks that were already 4 years old when ringed
mutate(all.pot.old=(old.ad+old.ch)) %>%
mutate(all.pot.ad=cumsum(Adult), all.pot.ch=cumsum(Chick)) %>%
mutate(all.pot.breed=dplyr::lag(all.pot.ch,n=4)+all.pot.ad) %>%
mutate(prop.pot.old=all.pot.old/(all.pot.breed-all.pot.old)) %>% ## changed denominator to all.not.old on advice from Adam Butler
select(Contact_Year,n,Adult,Chick,N_marked,N_all,prop.seen,prop.pot.old)
#filter(Contact_Year>1978)
tail(goodyears)
#############################################################################
## QUESTION 1: does age of breeders change over time? ###############
#############################################################################
### THIS QUESTION IS MASSIVELY AFFECTED BY WHEN THE RINGING BEGAN AS THERE IS AN INCREASING AGE OVER TIME
### this requires the breeding status ID to be added to the access query
breeders<-contacts %>%
filter(!(FIRST_YEAR==Contact_Year)) %>% ## potentially change this to remove only ringed chicks? Age %in% c("Chick","Fledgling")
#filter(!(is.na(Nest_Description))) %>%
mutate(Breeding_StatusID=ifelse(is.na(Nest_Description),Breeding_StatusID,1)) %>%
filter(Breeding_StatusID %in% c(1,-1525788936,105568723,1899636611,1899636612,1899636618)) %>%
group_by(BirdID,Contact_Year) %>%
summarise(ContAge=mean(ContAge)) %>%
left_join(goodyears, by="Contact_Year") %>%
mutate(detrend=Contact_Year-1980) %>%
filter(Contact_Year>1990)
dim(breeders)
min(breeders$ContAge)
breeders %>% filter(ContAge<6)
### exploratory plots
ggplot(breeders) +
geom_point(aes(x=Contact_Year, y=ContAge)) +
geom_smooth(aes(x=Contact_Year, y=ContAge),method="lm")
# ggplot(breeders) +
# geom_histogram(aes(x=ContAge)) +
# facet_wrap(~Contact_Year)
### analysis
m2eff<-glm(ContAge~Contact_Year, data=breeders, family="poisson",weights=prop.seen)
summary(m2eff)
predict(m2eff, newdat=data.frame(Contact_Year=seq(2000,2020,1),detrend=1), type='response')
### does the proportion of young breeders change over time?
## removed on 2 Oct 2021 because it is too confusing and will lead to questions of age at first breeding which we cannot answer
### does the proportion of OLD breeders change over time?
## first, calculate the proportion of birds in each year that could have plausibly been >30 years
head(goodyears)
oldbreeders<-contacts %>%
filter(!(FIRST_YEAR==Contact_Year)) %>% ## potentially change this to remove only ringed chicks? Age %in% c("Chick","Fledgling")
mutate(Breeding_StatusID=ifelse(is.na(Nest_Description),Breeding_StatusID,1)) %>%
filter(Breeding_StatusID %in% c(1,-1525788936,105568723,1899636611,1899636612,1899636618)) %>%
group_by(BirdID,Contact_Year) %>%
summarise(ContAge=mean(ContAge)) %>%
left_join(goodyears, by="Contact_Year") %>%
ungroup() %>%
mutate(YOUNG=ifelse(ContAge<30,1,0)) %>%
mutate(OLD=ifelse(ContAge>29,1,0)) %>%
group_by(Contact_Year) %>%
summarise(prop.old=mean(OLD),n.young=sum(YOUNG),n.old=sum(OLD)) %>%
left_join(goodyears, by="Contact_Year") %>%
filter(Contact_Year>2003)
dim(oldbreeders)
#fwrite(oldbreeders,"TRAL_old_breeders_2004_2021.csv")
### analysis of trend over time
m2oleff<-glm(cbind(n.old,n.young)~Contact_Year+offset(log(prop.pot.old)), data=oldbreeders, family=binomial(link="cloglog"),weights=prop.seen)
summary(m2oleff)
### Sarah suggested different approach:
m1<-glm(cbind(n.old,n.young)~Contact_Year+prop.pot.old, data=oldbreeders, family=binomial(link="cloglog"),weights=prop.seen)
m2<-glm(cbind(n.old,n.young)~prop.pot.old, data=oldbreeders, family=binomial(link="cloglog"),weights=prop.seen)
m1$aic
m2$aic
str(m2oleff)
m2oleff$fitted.values
### prediction of effect size
olddat<-data.frame(Contact_Year=seq(2004,2021,1),prop.seen=1, prop.pot.old=0.01)
## grad the inverse link function
ilink <- family(m2oleff)$linkinv
## add fit and se.fit on the **link** scale
olddat <- bind_cols(olddat, setNames(as_tibble(predict(m2oleff, olddat, se.fit = TRUE)[1:2]),
c('fit_link','se_link')))
## create the interval and backtransform
olddat <- mutate(olddat,
pred.prop = ilink(fit_link),
ucl = ilink(fit_link + (1.96 * se_link)),
lcl = ilink(fit_link - (1.96 * se_link)))
### COMBINE PROPORTION OF YOUNG AND OLD BREEDERS IN ONE PLOT
ggplot(olddat) +
geom_line(data=oldbreeders,aes(x=Contact_Year, y=prop.pot.old),colour = "darkgrey",linetype="dashed") +
geom_line(aes(x=Contact_Year, y=pred.prop),colour = "indianred") +
geom_ribbon(aes(x=Contact_Year, ymin=lcl, ymax=ucl), fill= "indianred",alpha = 0.2) +
geom_point(data=oldbreeders,aes(x=Contact_Year, y=prop.old), size=3) +
labs(x = "Year",
y = "Annual proportion of old breeders") +
scale_y_continuous(breaks=seq(0,0.40,0.10), limits=c(0,0.40))+
scale_x_continuous(breaks=seq(2005,2021,2), limits=c(2004,2021))+
### add the bird icons
annotation_custom(TRALicon, xmin=2004.5, xmax=2006.5, ymin=0.06, ymax=0.10) +
theme(panel.background=element_rect(fill="white", colour="black"),
axis.text=element_text(size=18, color="black"),
axis.title=element_text(size=20),
panel.grid.minor = element_blank())
ggsave("C:\\STEFFEN\\MANUSCRIPTS\\Submitted\\TRAL_IPM\\Fig3_rev.jpg", width=9, height=6)
################### CHECK WHETHER THE NUMBER OF OLD BIRDS CHANGES #####################################################
## ####
### does the proportion of OLD breeders change over time?
oldbirds<-contacts %>%
filter(!(FIRST_YEAR==Contact_Year)) %>% ## potentially change this to remove only ringed chicks? Age %in% c("Chick","Fledgling")
group_by(BirdID,Contact_Year) %>%
summarise(ContAge=mean(ContAge)) %>%
left_join(goodyears, by="Contact_Year") %>%
ungroup() %>%
mutate(YOUNG=ifelse(ContAge<30,1,0)) %>%
mutate(OLD=ifelse(ContAge>29,1,0)) %>%
group_by(Contact_Year) %>%
summarise(prop.old=mean(OLD),n.young=sum(YOUNG),n.old=sum(OLD)) %>%
left_join(goodyears, by="Contact_Year") %>%
filter(Contact_Year>2003)
### analysis of trend over time
m2oleff<-glm(cbind(n.old,n.young)~Contact_Year, data=oldbirds, family="binomial", weights=prop.seen)
summary(m2oleff)
### prediction of effect size
olddat<-data.frame(Contact_Year=seq(2004,2021,1),prop.seen=1)
## grad the inverse link function
ilink <- family(m2oleff)$linkinv
## add fit and se.fit on the **link** scale
olddat <- bind_cols(olddat, setNames(as_tibble(predict(m2oleff, olddat, se.fit = TRUE)[1:2]),
c('fit_link','se_link')))
## create the interval and backtransform
olddat <- mutate(olddat,
pred.prop = ilink(fit_link),
ucl = ilink(fit_link + (1.96 * se_link)),
lcl = ilink(fit_link - (1.96 * se_link)))
#############################################################################
## QUESTION 2: does age of first returners change over time? ###############
#############################################################################
firstreturnyear<-contacts %>%
filter(FIRST_AGE %in% c("Chick","Fledgling")) %>%
filter(!(FIRST_YEAR==Contact_Year)) %>%
group_by(BirdID) %>%
summarise(FirstReturn=min(Contact_Year))
firstreturns<-contacts %>%
filter(FIRST_AGE %in% c("Chick","Fledgling")) %>%
left_join(goodyears, by="Contact_Year") %>%
left_join(firstreturnyear, by="BirdID") %>%
filter((Contact_Year==FirstReturn)) %>%
select(ContactID,BirdID,Contact_Year,FIRST_YEAR,ContAge,n,N_marked,prop.seen,FirstReturn) %>%
rename(effort=n) %>%
filter(Contact_Year>2003) %>%
filter(Contact_Year!=2008) %>% ### remove the THREE recruiters observed in 2008
filter(Contact_Year!=2005) ### remove the ONLY recruiter observed in 2005!
dim(firstreturns)
### exploratory plots
ggplot(firstreturns) +
geom_point(aes(x=Contact_Year, y=ContAge), position=position_jitter()) +
geom_smooth(aes(x=Contact_Year, y=ContAge),method="lm")
ggplot(firstreturns) +
geom_histogram(aes(x=ContAge))
### analysis
m3eff<-glm(ContAge~Contact_Year, data=firstreturns, family="poisson",weights=prop.seen)
summary(m3eff)
m3n<-glm(ContAge~Contact_Year, data=firstreturns, family="poisson",weights=effort)
summary(m3n)
m3<-glm(ContAge~Contact_Year, data=firstreturns, family="poisson")
summary(m3)
### prediction of effect size
newdat<-data.frame(Contact_Year=seq(2004,2021,1),prop.seen=1)
#newdat$pred.age<-predict(m3eff, newdat=newdat, type="response", se=T)$fit
#newdat$se.age<-predict(m3eff, newdat=newdat, type="response", se=T)$se.fit
#newdat<-newdat %>% mutate(lcl=pred.age-1.96 * se.age,ucl=pred.age+1.96 * se.age)
## grad the inverse link function
ilink <- family(m3eff)$linkinv
## add fit and se.fit on the **link** scale
newdat <- bind_cols(newdat, setNames(as_tibble(predict(m3eff, newdat, se.fit = TRUE)[1:2]),
c('fit_link','se_link')))
## create the interval and backtransform
newdat <- mutate(newdat,
pred.age = ilink(fit_link),
ucl = ilink(fit_link + (1.96 * se_link)),
lcl = ilink(fit_link - (1.96 * se_link)))
### plot predicted effect size
ggplot(newdat) +
geom_line(aes(x=Contact_Year, y=pred.age),colour = "blue") +
geom_ribbon(aes(x=Contact_Year, ymin=lcl, ymax=ucl), alpha = 0.2,fill = "blue") +
geom_point(data=firstreturns,aes(x=Contact_Year, y=ContAge), colour="grey45",size=0.7,position=position_jitter()) +
ylab("Age (years) of first return to Gough") +
xlab("Year") +
scale_y_continuous(breaks=seq(0,30,5), limits=c(0,32))+
scale_x_continuous(breaks=seq(2005,2021,2), limits=c(2004,2021))+
### add the bird icons
annotation_custom(TRALicon, xmin=2004, xmax=2006, ymin=25, ymax=30) +
theme(panel.background=element_rect(fill="white", colour="black"),
axis.text=element_text(size=18, color="black"),
axis.title=element_text(size=20),
panel.grid.minor = element_blank())
ggsave("C:\\STEFFEN\\MANUSCRIPTS\\in_prep\\TRAL_IPM\\Fig3.jpg", width=9, height=6)
######### ANALYSIS WITH QUANTILE REGRESSION ######
## makes no big difference, hence discarded
#install.packages("quantreg")
# library(quantreg)
#
# firstreturns %>% group_by(Contact_Year) %>% summarise(med=median(ContAge),mean=mean(ContAge))
# firstreturns %>% filter(Contact_Year==2006)
#
# ### analysis
# m3eff_rq<-rq(ContAge~Contact_Year, tau=c(0.025, 0.5, 0.975),data=firstreturns, weights=prop.seen)
# summary(m3eff_rq)
# plot(m3eff_rq,mfrow = c(1,2))
#
#
# plot(firstreturns$Contact_Year,firstreturns$ContAge,cex=.25,type="n",xlab="Year", ylab="Age")
# points(firstreturns$Contact_Year,firstreturns$ContAge,cex=.5,col="blue")
# abline(rq(ContAge~Contact_Year, data=firstreturns, weights=prop.seen),col="blue")
# abline(lm(ContAge~Contact_Year, data=firstreturns, weights=prop.seen),lty=2,col="red") #the mean regression line
# taus <- c(0.025, 0.975)
# for(i in 1:length(taus)){
# abline(rq(ContAge~Contact_Year, tau=taus[i],data=firstreturns, weights=prop.seen),col="gray")
# }
#############################################################################
## QUESTION 3: does age of first breeding change over time? ###############
#############################################################################
unique(contacts$Breeding_StatusID)
firstbreedyear<-contacts %>%
filter(FIRST_AGE %in% c("Chick","Fledgling")) %>%
filter(!(FIRST_YEAR==Contact_Year)) %>%
filter(Breeding_StatusID %in% c(1,1899636611,1899636612,1899636615,1899636613,105568723,1899636616,-1525788936)) %>%
filter(!(is.na(Nest_Description))) %>%
group_by(BirdID,FIRST_YEAR) %>%
summarise(FirstBreed=min(Contact_Year),AgeFirstBreed=min(ContAge))
dim(firstbreedyear)
### exploratory plots
ggplot(firstbreedyear) +
geom_point(aes(x=FirstBreed, y=AgeFirstBreed), position=position_jitter()) +
geom_smooth(aes(x=FirstBreed, y=AgeFirstBreed),method="lm")
### decadal analysis
firstbreedyear %>%
mutate(decade=if_else(FirstBreed<2000,"1990s",if_else(FirstBreed<2015,"2000-2015","2015-2021"))) %>%
group_by(decade) %>%
summarise(med=median(AgeFirstBreed),lcl=quantile(AgeFirstBreed,0.05),ucl=quantile(AgeFirstBreed,0.95))
#################################################################################################################
## QUESTION 4: does age of all recorded bird change over time? ABANDONED BECAUSE NOT MEANINGFUL ###############
#################################################################################################################
head(contacts)
dim(contacts)
returns<-contacts %>%
filter(!(FIRST_YEAR==Contact_Year)) %>% ## potentially change this to remove only ringed chicks? Age %in% c("Chick","Fledgling")
group_by(BirdID,Contact_Year) %>%
summarise(ContAge=mean(ContAge)) %>%
left_join(goodyears, by="Contact_Year") %>%
filter(Contact_Year>2003)
dim(returns)
### exploratory plots
ggplot(returns) +
geom_point(aes(x=Contact_Year, y=ContAge)) +
geom_smooth(aes(x=Contact_Year, y=ContAge),method="lm")
ggplot(returns) +
geom_histogram(aes(x=ContAge))
### analysis
m1eff<-glm(ContAge~Contact_Year, data=returns, family="poisson",weights=prop.seen)
#m1n<-glm(ContAge~Contact_Year, data=returns, family="poisson",weights=n)
#m1<-glm(ContAge~Contact_Year, data=returns, family="poisson")
summary(m1eff)
#summary(m1n)
#summary(m1)
|
1bd4e83ace156b004f88d3444c7ae78292e03d65 | 431d7c0f7c71ad503832b20700a96e8a952807fd | /Classification/SVM.R | 0db90ccf8d4f3e52239b80a32e4f8ebd4ddb4ac6 | [] | no_license | Tao-Hu/CollectionOfML | b935ca7feda06906b3fe92fdaf9810178519151e | 5afb5aae008ae5fb5d2ec697cb4766a1610c2c8c | refs/heads/master | 2021-07-03T22:46:54.752053 | 2017-09-26T19:34:37 | 2017-09-26T19:34:37 | 104,919,353 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,013 | r | SVM.R |
library(kernlab)
# First a quadratic example:
n <- 1000
x <- cbind(runif(n),runif(n))
y <- ifelse(x[,2] > x[,1]^2,1,-1)
xp <- cbind(runif(2*n),runif(2*n))
yp <- ifelse(xp[,2] > xp[,1]^2,1,-1)
y <- as.factor(y)
yp <- as.factor(yp)
x1 <- x
z1 <- xp
x2 <- cbind(x,x^2,x[,1]*x[,2])
z2 <- cbind(xp,xp^2,xp[,1]*xp[,2])
plot(x,col=ifelse(y=="1",1,3),pch=19,main="Training data")
legend("topright",c("Y=1","Y=-1"),pch=19,col=c(1,3),inset=0.05,bg=gray(1),cex=1.5)
# Vanilldadot gives a linear SVM
fit1<-ksvm(x1,y,kernel="vanilladot")
fit2<-ksvm(x2,y,kernel="vanilladot")
SV <- x[alphaindex(fit2)[[1]],]
plot(SV,pch=19,main="Locations of the support vectors")
# Predictions:
yhat1 <- predict(fit1,z1)
yhat2 <- predict(fit2,z2)
table(yp,yhat1)
table(yp,yhat2)
plot(xp,col=ifelse(yhat1=="1",1,3),pch=19,main="Testing data - linear model")
legend("topright",c("Y=1","Y=-1"),pch=19,col=c(1,3),inset=0.05,bg=gray(1),cex=1.5)
plot(xp,col=ifelse(yhat2=="1",1,3),pch=19,main="Testing data - quadratic model")
legend("topright",c("Y=1","Y=-1"),pch=19,col=c(1,3),inset=0.05,bg=gray(1),cex=1.5)
# First a harder example:
n <- 1000
x <- cbind(runif(n),runif(n))
eta <- x[,2] - sin(10*x[,1])
prob <- 1/(1+exp(-5*eta))
y <- 2*rbinom(n,1,prob)-1
xp <- cbind(runif(n),runif(n))
eta <- xp[,2] - sin(10*xp[,1])
prob <- 1/(1+exp(-5*eta))
yp <- 2*rbinom(n,1,prob)-1
y <- as.factor(y)
yp <- as.factor(yp)
plot(x,col=ifelse(y=="1",1,3),pch=19,main="Training data")
legend("topright",c("Y=1","Y=-1"),pch=19,col=c(1,3),inset=0.05,bg=gray(1),cex=1.5)
# Non-linear SVM with Gaussian (rbf = radial basis function) kernel
fit<-ksvm(x,y,kernel="rbfdot")
SV <- x[alphaindex(fit)[[1]],]
plot(SV,pch=19,main="Locations of the support vectors")
# Predictions:
yhat <- predict(fit,xp)
table(yp,yhat)
plot(xp,col=ifelse(yhat=="1",1,3),pch=19,main="Testing data")
legend("topright",c("Y=1","Y=-1"),pch=19,col=c(1,3),inset=0.05,bg=gray(1),cex=1.5)
|
c36c7a54f4fab4d7fd434817eaefd32711c15cb7 | 3fec2c809df124254ccc690d8c0f3fe1bafb5499 | /teaching/ecodyn/complexfoodweb1.R | 14dd1dd7502e7877489d9f96ca63e6262c8d4153 | [
"MIT"
] | permissive | jdyeakel/jdyeakel.github.io | 4c3d7b0f935996acd5d27e3e171e198729496a4d | 59bb2b4162f6613c3ad6a8561df740d703ebd624 | refs/heads/master | 2023-08-16T21:36:38.975400 | 2023-08-09T02:44:16 | 2023-08-09T02:44:16 | 23,481,042 | 2 | 1 | MIT | 2021-07-20T18:29:37 | 2014-08-30T00:04:53 | Mathematica | UTF-8 | R | false | false | 906 | r | complexfoodweb1.R | # First define number of species
S = 100;
# And the connectance
C = 0.001
# And the interaction variability
sigma = 1
#Build a SxS empty matrix
J = matrix(0,S,S)
#Now with probability C, draw a random number from a normal distribution with mean 0 and stdev = sigma
#With probability 1-C, the interaction is zero
for (i in 1:S) {
for (j in 1:S) {
#draw random value from uniform
rv = runif(1,0,1)
#An interaction occurs:
if (rv < C) {
int_value = rnorm(1,0,sigma)
J[i,j] = int_value
}
#An interaction DOESNT occur
if (rv > C) {
J[i,j] = 0
}
}
}
#Set diagonal = -1
for (i in 1:S) {
J[i,i] = -1
}
#see the matrix
# image(J,col=c(heat.colors(20)))
#Calculate eigenvalues
e = eigen(J)
plot(Re(e$value),Im(e$value),xlab="Real part",ylab="Imaginary Part",xlim=c(-10,10),ylim=c(-10,10),pch=16)
lines(rep(0,41),seq(-20,20))
lines(seq(-20,20),rep(0,41))
|
0e3367f5dd1aa6d0f49cad122980b6ade244b5eb | 9929d2c82bac38873878bb0c825e7cf5f84a1172 | /plot1.R | dda81b5b54c7f3ead3dc341a0784c52c1c47f82b | [] | no_license | o2kenobi/ExData_Plotting1 | 49c8e1c9121a18ba37c8c55eb186927cb2463fce | 9eea5e5a5d201cc7615db25ef4feda2eb5638fc4 | refs/heads/master | 2020-05-04T04:46:23.651991 | 2019-04-02T01:29:48 | 2019-04-02T01:29:48 | 178,973,435 | 0 | 0 | null | 2019-04-02T01:15:31 | 2019-04-02T01:15:30 | null | UTF-8 | R | false | false | 704 | r | plot1.R | library(lubridate)
library(data.table)
#---get data
#zfile="exdata_data_household_power_consumption.zip"
#pfile<-unzip(zfile) #<--(uncomment these 2 lines if use the zip file)
pfile<-"household_power_consumption.txt"
dt <- fread(pfile,sep=";")
sdt <- subset(dt, Date=="1/2/2007" | Date=="2/2/2007") #<--get subset
sdt[,"datetime"]<-dmy_hms(paste(sdt$Date,sdt$Time,sep=" ")) #<--add a col w/ date/time
dim(sdt) #--> just a quick check
names(sdt) #--> check new col
#---make plot in png device
png(file="plot1.png",width = 480, height = 480)
hist(as.numeric(sdt$Global_active_power),col="red",
main="Global Active Power",
xlab="Global Active Power (kilowatts)")
dev.off()
|
90b0dedbe7a4652864515e0fecc5aa0ccb1f6d14 | 1c7210fd8fad8f1dfb2da39d3a5efa73ea37f5f6 | /plot3.r | a11c2d508c34ae14388cec85ee982d424c65868d | [] | no_license | Chandru1102/ExData_Plotting1 | 0cc01f1d314b3e75dfae4aaf4e302496fca4c642 | 36a00bfaa7c24a4502dab1f23bf8c085249f8241 | refs/heads/master | 2021-01-09T06:53:16.538494 | 2015-05-09T21:30:18 | 2015-05-09T21:30:18 | 35,294,875 | 0 | 0 | null | 2015-05-08T18:21:55 | 2015-05-08T18:21:53 | null | UTF-8 | R | false | false | 1,560 | r | plot3.r | ## Course Project1 EXDA plot3
# This function will plot the graph for comparing 3 sub metering in a png file named plot3 with 480 x 480 pixels
plot3 <- function(datafilename) {
#unzip file of rows 2,075,259
file <- unzip(datafilename)
##Read File to dataframe also lets specify the known number of rows to make memory efficient for R.
df1 <- read.csv(file, header = TRUE, sep = ";", na.strings = "?", nrows = 2075259 )
#remove the huge file from the environment
rm(file)
## Strip the Dataframe for only the concerned dates
df1 <- subset(df1, (Date == "1/2/2007") | (Date == "2/2/2007"))
##lets remove any na which was converted from "?" while reading the file
df1 <- na.omit(df1)
#Lets open the png device with required pixels
png("plot1.png", width=480, height=480)
#lets make sure that the graph is aligned properly
par(mfrow = c(1,1))
#Draw a graph for 3 different sub metering using the subset function and then add a legend at the topright
with(df1, (plot (strptime(paste(Date, Time), format="%d/%m/%Y %H:%M"), Sub_metering_1 ,main = "" , xlab = "" , ylab = "Energy sub metering", type ="l")))
with(subset(df1, Sub_metering_2 > -1), points(strptime(paste(Date, Time), format="%d/%m/%Y %H:%M"), Sub_metering_2 , type = "l", col = "red"))
with(subset(df1, Sub_metering_3 > -1), points(strptime(paste(Date, Time), format="%d/%m/%Y %H:%M"), Sub_metering_3 , type= "l", col = "blue"))
legend("topright", lty = 1, col = c("black","red" ,"blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
#close the png device
dev.off()
}
|
f5fd8148ee6f95461ba4d9e40127aabe6ac23b68 | 352f0f7988ff92257e9d0b59f64712e3364d61bf | /src/r/Dynamic publibike plot.R | c09e77853d47a2a665139a15dd88cf39339fc3c9 | [] | no_license | mferri17/usi-hackathon-19 | d14ddcda3f8241d50272da9b7b1e5923a50e685a | 3ee6e90addae97c64602db6720715b729890657a | refs/heads/master | 2023-03-16T13:55:09.425241 | 2021-03-06T02:42:42 | 2021-03-06T02:42:42 | 221,738,096 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,773 | r | Dynamic publibike plot.R | #Paolo Montemurro, Marco Ferri USI Hackaton 2019
# 6D
library(shiny)
library(leaflet)
library(dplyr)
library(leaflet.extras)
data <- read.csv("gather_bike.csv")
#Define intensity and size
data$from_avgDistance <- round(data$from_avgDistance,0)
data$to_avgDistance <- round(data$to_avgDistance,0)
data$label <- paste0('<strong>', data$station, '</strong> <br>',"Rides starting from here:" ,data$from_count) %>% lapply(htmltools::HTML)
data$popup <- as.character(paste("Avg outcoming trip: <strong>", data$from_avgDistance,"</strong> Meters", "<br>","Avg incoming trip: <strong> " ,data$to_avgDistance,"</strong> Meters <br>"))
data2 <- data
# data2 <- subset(data, DoW=="A")
# data2 <- subset(data, period=="F")
#Define page
ui <- fluidPage(
h1("Behaviour of outgoing vs ingoing Publibike-trips by station"),
sidebarLayout(position = "left",
mainPanel(
fluidRow(
splitLayout(
cellWidths = c("50%", "50%"),leafletOutput(outputId = "mymap",height=640, width = 480),leafletOutput(outputId = "mymap2",height=640, width = 480)))),
sidebarPanel(width = 4,
h3("Filters"),
selectInput(inputId = "period",
label = "Season",
choices = c("Summer","Fall"),
selected = "Fall"),
selectInput(inputId = "dow",
label = "Days of Week",
choices = c("All","Working Days","Weekends"),
selected = "All"),
sliderInput(inputId = "hour", label = "Hour of the day:", min = 0,max = 23, step = 1, value = 7,width = "90%"))
))
server <- function(input, output) {
colour_list = c("#ffffcc","#ffeda0","#fed976","#feb24c","#fd8d3c","#fc4e2a","#e31a1c","#bd0026","#800026","#800026","#800026","#800026","#800026","#800026","#800026","#800026")
#Define palette
pal <- colorNumeric( palette = colour_list, domain = c(0,0.48))
#Plot the map without any circle
output$mymap <- renderLeaflet({
leaflet(data2) %>%
setView(lng = 8.9565244, lat = 46.0052856, zoom = 15) %>% #Set swizzera
addTiles()})
output$mymap2 <- renderLeaflet({
leaflet(data2) %>%
setView(lng = 8.9565244, lat = 46.0052856, zoom = 15) %>%
addTiles() })
#Some interactivity!
observe({
dow = if(input$dow == "All") "A" else { if(input$dow == "Working Days") "D" else "W" }
season = if(input$period == "Fall") "F" else "S"
data2 <- subset(data, DoW == dow)
data2 <- subset(data2, period == season)
data_hour <- subset(data2, hour == input$hour)
leafletProxy("mymap", data = data_hour) %>%
clearShapes() %>% clearControls() %>%
addCircles(data = data_hour, lat = ~ lat, lng = ~ lon, weight = 4, color = "black",
radius = 10*(data_hour$from_count)^0.5, fillOpacity = 0.7, fillColor = ~pal(from_freq),
label = ~label, popup = ~popup) %>%
addLegend("bottomleft", pal = pal, values = data2$from_freq, title = " Outgoing relative usage")
leafletProxy("mymap2", data = data_hour) %>%
clearShapes() %>% clearControls() %>%
addCircles(data = data_hour, lat = ~ lat, lng = ~ lon, weight = 4, color = "black",
radius = 10*(data_hour$to_count)^0.5, fillOpacity = 0.7, fillColor = ~pal(to_freq),
label = ~label, popup = ~popup) %>%
addLegend("bottomright", pal = pal, values = data2$from_freq, title = "Ingoing relative usage")})
}
shinyApp(ui, server)
|
9e48891c98180217a3690e9ede240d5f4bfd6678 | 9b2c57aea0ebd7f031041f5ea9e98a779170d922 | /R/io.R | 65f06ee3846a6c27eab498e134cda254e11d9bc3 | [] | no_license | usobiaga/diatechServer | 4dce6771710a23f79dce42c8e2772bce46b22198 | cd93e1f379e21b25d0e1e80e16b1aaf6a9954c64 | refs/heads/master | 2021-01-10T02:06:17.782443 | 2019-02-28T21:11:17 | 2019-02-28T21:11:17 | 43,323,133 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,244 | r | io.R | #' Prototype of load data
#'
#' This function loads data from the database.
#'
#' @param database database name.
#' @param user database user.
#' @param password password for database.
#' @param anstype '1' for orthographic answer, '2' for phonetic answer, '3' for all answer.
#' @param proid project id to be queried
#' @param linDomain linguistic domain to be used.
#' @param type data type to be queried.
#'
#' @export
#'
#' @import DBI RMySQL
#'
#' @examples
#'
#' ## sample call from server
#' \dontrun{
#' set <- loadDiatech('thedatabase', 'user', 'thepassword', '1', type = 'geodist')
#' }
#'
loadDiatech <- function(database, user, password, parts, type = c('geodist', 'lingdata', 'lemmadata')){
write('Reading data', stdout())
requestType <- match.arg(type)
on.exit(DBI::dbDisconnect(con))
con <- DBI::dbConnect(DBI::dbDriver('MySQL'), user = user, password = password, database)
DBI::dbGetQuery(con, 'SET NAMES "utf8"')
if (requestType == 'geodist'){
query <- paste0('SELECT
dl.id,
dl.location,
dl.longitude,
dl.latitude
FROM diatech_location AS dl
INNER JOIN diatech_section AS ds ON ds.project_id = dl.project_id
WHERE ds.id IN (', paste(parts, collapse = ', '), ')')
result <- DBI::dbGetQuery(con, query)
class(result) <- c('diaLongLat', 'data.frame')
} else if (requestType == 'lingdata'){
query <- paste0(
'SELECT
dl.id,
dl.location,
dq.question,
da.answer
FROM diatech_answer AS da
INNER JOIN diatech_question AS dq ON da.question_id = dq.id
INNER JOIN diatech_section AS ds ON dq.section_id = ds.id
INNER JOIN diatech_location AS dl ON da.location_id = dl.id
WHERE ds.id IN (', paste(parts, collapse = ', '), ')')
result <- DBI::dbGetQuery(con, query)
class(result) <- c('diaLingData', 'data.frame')
} else { ## lemmadata
stop ('deprecated')
}
DBI::dbDisconnect(con)
on.exit(NULL)
if (nrow(result) == 0L) stop ('The lodaded dataset is empty')
write('Data read', stdout())
return (result)
}
|
b87da0fd1c8067dc001f17d1667481596fde2998 | 98ebff9df47cb49c0ea317308ebe40875b69977d | /tests/testthat/test-map.R | f916a3b9ff9a953475ba71526a96583990c4fa36 | [] | no_license | atibot/MsdrCapstoneMPS | 6128c0ca1a38359f6e93e559b825c525a5fc7b6d | 5da58c0978c5aff92044a66c408be30fcfa679d4 | refs/heads/master | 2021-05-05T16:58:20.704660 | 2018-01-12T17:15:40 | 2018-01-12T17:15:40 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 746 | r | test-map.R | library(MsdrCapstoneMPS)
context("Leaflet maps")
## Clean data
raw_noaa <- as.data.table(noaa_data)
test_that("Leaflet map", {
g <- raw_noaa %>%
eq_clean_data() %>%
dplyr::filter(COUNTRY == "MEXICO" & DATE >= as.Date("2000-01-01")) %>%
eq_map(annot_col = "DATE")
## Plot is a leaflet map
expect_equal(attributes(g)$package, "leaflet")
})
test_that("Leaflet map labels", {
g <- raw_noaa %>%
eq_clean_data() %>%
eq_location_clean() %>%
dplyr::filter(COUNTRY == "MEXICO" & DATE >= as.Date("2000-01-01")) %>%
dplyr::mutate(popup_text = eq_create_label(.))
## html label is built correctly
expect_equal(g$popup_text[1], "<b>Location: </b>San Andres Tuxtla, Tuxtepec<br /> <b>Magnitude:</b> 5.9 <br />")
})
|
d206f7b80ba3fe35ade544337df86d9771847f62 | 1222e694187534fd1afc1ef0675784957101392c | /R/map_palettes.R | 56219f1f052172e0dade4d383d6bfdcc281ceddd | [] | no_license | laceym14/defendersr | 7a3755ed8b6bcf7602ce0dbe00495c59f17dbf9a | 86532217508b2bb5ef15ca2f4f9b5028cc9d6801 | refs/heads/master | 2020-09-15T15:27:22.365987 | 2019-12-27T20:53:20 | 2019-12-27T20:53:20 | 223,489,120 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,599 | r | map_palettes.R | # Defenders of Wildlife branded color palettes for use in scientific figure development using ggplot.
# Modelled after @drismonj tutorial. Learn more at
# https://drsimonj.svbtle.com/creating-corporate-colour-palettes-for-ggplot2
# Defining standard land ownership color palettes
land_palettes <- list(
`BLM` = land_ownership_cols("BLM"),
`BLM Wilderness Area` = land_ownership_cols("BLM Wilderness Area"),
`USFS` = land_ownership_cols("USFS"),
`USFS Wilderness Area` = land_ownership_cols("USFS Wilderness Area"),
`NPS` = land_ownership_cols("NPS"),
`NPS Wilderness Area` = land_ownership_cols("NPS Wilderness Area"),
`USFWS NWR` = land_ownership_cols("USFWS NWR"),
`USFWS Wilderness Area` = land_ownership_cols("USFWS Wilderness Area"),
`National Grasslands` = land_ownership_cols("National Grasslands"),
`Bureau of Reclamation` = land_ownership_cols("Bureau of Reclamation"),
`Tribal Land` = land_ownership_cols("Indian Reservation"),
`Tribal Land Wilderness Area` = land_ownership_cols("Indian Reservation Wilderness Area"),
`Military and Corps of Engineers` = land_ownership_cols("Military Reservations and Corps of Engineers"),
`Other Federal Land` = land_ownership_cols("Other Federal Land"),
`State Land` = land_ownership_cols("State Land"),
`State Wilderness Area` = land_ownership_cols("State Wilderness Area"),
`State Wildlife, Park, Outdoor Rec` = land_ownership_cols("State, County, City; Wildlife, Park, and Outdoor Recreation Areas"),
`Private Land` = land_ownership_cols("Private Land")
)
land_pal <- function(palette = "main", reverse = FALSE, ...) {
pal <- land_palettes[[palette]]
if (reverse) pal <- rev(pal)
colorRampPalette(pal, ...)
}
# Defining National Park Service standard color palettes
nps_palettes <- list(
`full palette` = nps_cols("white", "brown", "green", "light green", "gray", "blue")
)
nps_pal <- function(palette = "main", reverse = FALSE, ...) {
pal <- nps_palettes[[palette]]
if (reverse) pal <- rev(pal)
colorRampPalette(pal, ...)
}
# Defining color blind friendly color palettes
color_blind_palettes <- list(
`color blind 1` = color_blind_cols("red", "peach", "cream", "light blue", "blue", "dark blue"),
`color blind 2` = color_blind_cols("orange red", "orange", "yellow", "light blue 2", "blue 2", "dark blue 2"),
`color blind 3` = color_blind_cols("brown", "light brown", "tan", "teal", "turquoise", "green")
)
color_blind_pal <- function(palette = "main", reverse = FALSE, ...) {
pal <- color_blind_palettes[[palette]]
if (reverse) pal <- rev(pal)
colorRampPalette(pal, ...)
}
|
fb5412ac95277ab3aa019b8e12eeee600acf1b69 | b6f6710c76a04e0d0c3bf8579cedbd52dd8dcafb | /man/vdist_chisquare_plot.Rd | 7b7bd6963454121ba06602e2ef8103f9a8d4a8a2 | [
"MIT"
] | permissive | guhjy/vistributions | 5cc1f490eef6dc325d14f5a726cb4b5c649682c1 | db3dc91e79ee40e7e28e5f7a74e164a1c72e8c73 | refs/heads/master | 2020-04-09T10:55:02.977717 | 2018-12-02T17:22:37 | 2018-12-02T17:22:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,363 | rd | vdist_chisquare_plot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vdist-chisquare.R
\name{vdist_chisquare_plot}
\alias{vdist_chisquare_plot}
\alias{vdist_chisquare_perc}
\alias{vdist_chisquare_prob}
\title{Visualize chi square distribution}
\usage{
vdist_chisquare_plot(df = 3, normal = FALSE)
vdist_chisquare_perc(probs = 0.95, df = 3, type = c("lower",
"upper"))
vdist_chisquare_prob(perc, df, type = c("lower", "upper"))
}
\arguments{
\item{df}{Degrees of freedom.}
\item{normal}{If \code{TRUE}, normal curve with same \code{mean} and
\code{sd} as the chi square distribution is drawn.}
\item{probs}{Probability value.}
\item{type}{Lower tail or upper tail.}
\item{perc}{Quantile value.}
}
\description{
Visualize how changes in degrees of freedom affect the shape of
the chi square distribution. Compute & visualize quantiles out of given
probability and probability from a given quantile.
}
\examples{
# visualize chi square distribution
vdist_chisquare_plot()
vdist_chisquare_plot(df = 5)
vdist_chisquare_plot(df = 5, normal = TRUE)
# visualize quantiles out of given probability
vdist_chisquare_perc(0.165, 8, 'lower')
vdist_chisquare_perc(0.22, 13, 'upper')
# visualize probability from a given quantile.
vdist_chisquare_prob(13.58, 11, 'lower')
vdist_chisquare_prob(15.72, 13, 'upper')
}
\seealso{
\code{\link[stats]{Chisquare}}
}
|
afbc946cb5fc92c3e8a66176fbd2a543de815a63 | 5999431bdc55e047981f6206f31490c4262daaac | /code/cleaning.R | 8074b2fc786eebc921aab0aa624c8265de7cf23a | [] | no_license | osolari/C-W-RR | cd92360d93526ad37121dcb7ca8ec88ed8cd044a | a733edcb0131d640ef8de624047675c65e49de09 | refs/heads/master | 2021-01-21T06:11:18.667329 | 2016-05-27T07:20:47 | 2016-05-27T07:20:47 | 59,790,074 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,454 | r | cleaning.R | setwd("~/workspace/data/GWAS/code/")
require("ggplot2")
require("stringdist")
require("dplyr")
source("cleaning.lib.R")
require("gridExtra")
spatialVariantsRAW <- read.table(file = "~/workspace/data/GWAS/variants_omid/spatial_variants/dp_4_70_percent_spatial_variants_10000_duplicate_ind_removed_top_293_scaffold_freebayes.recode.parsedScores.phred90.tdl.csv",
header = TRUE, sep = "\t", stringsAsFactors = F, check.names = FALSE, row.names = 1)
head(spatialVariantsRAW)
dim(spatialVariantsRAW)
colnames(spatialVariantsRAW) <- gsub("-", "_", colnames(spatialVariantsRAW))
colnames(spatialVariantsRAW)
spatialVariants <- spatialVariantsRAW %>% rename(Blain_F7 = Blaln_F7, Blain_F15 = Blaln_F15)
phenoType <- read.table(file = "~/workspace/data/GWAS/variants_omid/spatial_variants/spatialVariants.csv"
, header = TRUE, check.names = FALSE, sep = ",", stringsAsFactors = FALSE)
rownames(phenoType) <- phenoType$Clone
phenoType <- phenoType %>% dplyr::select(-Pond, -`genome sequenced`, -Clone)
colnames(phenoType) <- c("ageMatP", "ageMother1st", "numOffSpring1st","ageMother2nd", "numOffSpring2nd", "ageMatF1",
"sizeMatF1", "ageMother1stF1", "numOffSpring1stF1", "ageMother2ndF1",
"numOffSpring2ndF1", "growth")
matchedNames <- findSimilar(gtList = colnames(spatialVariants), ptList = rownames(phenoType))
spatialVariants <- t(spatialVariants)
X <- spatialVariants[matchedNames$genoType, ]
X <- X[, sort(colSums(X), decreasing = TRUE, index.return = TRUE)$ix[1:10000]]
#DATA <- cbind(matchedSpatialVariants, phenoType)
write.table(x = X, file = "~/workspace/data/GWAS/variants_omid/spatial_variants/matchedSpatialVariants.csv",
row.names = TRUE, col.names = TRUE, sep = "\t")
write.table(x = phenoType, file = "~/workspace/data/GWAS/variants_omid/spatial_variants/matchedSpatialPhenotypes.csv",
row.names = TRUE, col.names = TRUE, sep = "\t")
GQ <- read.table(file = "~/workspace/data/GWAS/variants_omid/spatial_variants/dp_4_70_percent_spatial_variants.GQ.tdl.phred90.csv"
, header = FALSE)
DP <- read.table(file = "~/workspace/data/GWAS/variants_omid/spatial_variants/dp_4_70_percent_spatial_variants.DP.tdl.phred90.csv"
, header = FALSE)
gqdp <- data.frame(GQ, DP)
colnames(gqdp) <- c("GQ", "DP")
p1 <- ggplot(data = gqdp) + geom_histogram(aes(x = GQ, color = DP)) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.border = element_blank(), panel.background = element_blank()) +
labs(x = "Genotype Quality (Phred Score)", y = "Count")
p2 <- ggplot(data = gqdp) + geom_histogram(aes(x = DP)) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.border = element_blank(), panel.background = element_blank()) +
labs(x = "Read Depth", y = "Count") + xlim(c(0,30))
p3 <- ggplot(data = gqdp[sample(dim(gqdp)[1], 10000),]) + geom_point(aes(x = DP, y = GQ), alpha = .2) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.border = element_blank(), panel.background = element_blank()) +
labs(x = "Read Depth", y = "Genotype Quality") + xlim(c(0,30))
pdf("./hist.pdf", width = 10, height = 10)
grid.arrange(p3,p1,p2, ncol = 2, layout_matrix = cbind(c(1,1),c(2,3)))
dev.off()
ggplot(data = gqdp) + geom_histogram(aes(x = GQ, color = DP))
|
395213753f0c940d11aa8901eb6a5825146f6b0b | 23093affc35a4376384b3a47b24382e11078999b | /man/Nerve_Tibial.Rd | 71bc03432e88c23611a67e71c7c837922ea807de | [] | no_license | roderickslieker/CONQUER.db | f35d36c57fc937a1e489c6a2dd1f1a4f5287943a | 85139b8feaac3bbfe69dacec22519364ebf1c303 | refs/heads/master | 2021-07-31T18:46:35.240183 | 2021-07-27T06:16:38 | 2021-07-27T06:16:38 | 250,022,871 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 415 | rd | Nerve_Tibial.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Nerve_Tibial.R
\docType{data}
\name{Nerve_Tibial}
\alias{Nerve_Tibial}
\title{Matrix of expression data of Nerve...Tibial from gtex}
\format{
A [[matrix]] object
}
\source{
[Nerve_Tibial](https://gtexportal.org/home/datasets)
}
\usage{
Nerve_Tibial
}
\description{
GTEx Analysis V8 (dbGaP Accession phs000424.v8.p2)
}
\keyword{datasets}
|
0c942f5ea7b022f6e3cdb0d0fa8ded4bc1648aa3 | 736281b44e7f46705960e14ba97c98acdeecd6b1 | /man/ksnormal.Rd | e64c19ac4957c336fe5fea1d58b4e0a811ef3ae3 | [] | no_license | abusjahn/wrappedtools | 97dd0d77514cd42affbaab7679d2fc2eab8f8358 | aca66340603ef4a5ba0e0ebee76a06ba6610a33f | refs/heads/main | 2023-08-24T12:09:11.635831 | 2023-08-04T16:37:30 | 2023-08-04T16:37:30 | 132,574,321 | 3 | 2 | null | 2023-09-01T18:01:05 | 2018-05-08T07:55:20 | R | UTF-8 | R | false | true | 589 | rd | ksnormal.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tests.R
\name{ksnormal}
\alias{ksnormal}
\title{Kolmogorov-Smirnov-Test against Normal distribution}
\usage{
ksnormal(x)
}
\arguments{
\item{x}{Vector of data to test.}
}
\value{
p.value from \link{ks.test}.
}
\description{
\code{ksnormal} is a convenience function around \link{ks.test}, testing against
Normal distribution
}
\examples{
# original ks.test:
ks.test(
x = mtcars$wt, pnorm, mean = mean(mtcars$wt, na.rm = TRUE),
sd = sd(mtcars$wt, na.rm = TRUE)
)
# wrapped version:
ksnormal(x = mtcars$wt)
}
|
9a9bd81db620e56bd0ef39523d0bc4ff267b05ce | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/lhs/examples/optSeededLHS.Rd.R | 98f515b461943bc160241229e98f160e679dd1c6 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 209 | r | optSeededLHS.Rd.R | library(lhs)
### Name: optSeededLHS
### Title: Optimum Seeded Latin Hypercube Sample
### Aliases: optSeededLHS
### Keywords: design
### ** Examples
a <- randomLHS(4,3)
a
optSeededLHS(a, 2, 2, .1)
|
a00e7ad79a5bce817c2eb463f180ed6422d03afe | 7482cf4dba3278e3daae54115b791e5b1fd64291 | /rev.R | 534afb11a15a799cba1f8c3c9c7a0076a5287b3c | [] | no_license | Prathyusha-L/100DaysofCode---Day-58-Basic- | 7559436d81342e0f172a718bd64e9b0de9aa7bc4 | e9a3ea4a65fe09fec4a3a0a1430f50e7b5465b1e | refs/heads/main | 2023-07-12T12:35:58.657322 | 2021-08-22T05:20:50 | 2021-08-22T05:20:50 | 398,720,517 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 953 | r | rev.R | m=matrix(c(1,2,3,4,5,6),nrow=2,ncol=3,byrow=TRUE)
print(m)
f=c(1,2,3,4,5,6,7,7,8)
a=factor(f)
print(a)
nlevels(a)
class(a)
a=4+5
print(a)
a=5
b=6
a<b
TRUTH VALUE TABLE(&) AND
A B RESULT
T T T
T F T
F T T
F F F
A=FALSE
B=TRUE
A&B
x=34
if(x>30&x<40)
x=40
if(x>4|x<20)
not
NOT TRUE FALSE
NOT FALSE TRUE
if(condition){true}
if(condition){true}else{false}
if(condition){true}else if (condition){true}else{false}
x=0
if(x>0) {print('positive number')}
if(x>100) {print('positive number')} else{'negative number'}
if(x>100) {print('positive number')} else if (x<0){'negative number'} else
{print('number is zero')
}
x=3
y=6
OP='/'
switch(OP,
'+'=x+y,
'-'=x-y,
'*'=x*y,
'/'=x/y,
'^'=x^y,
)
x=1
repeat{
cat(x)
cat(' ')
x=x+1
if(x>10){break}
}
for(x in 1:10){
cat(x,' ')
}
prn
|
79303d3467f3a27078547741d8671e57613263ba | 392fe0cc9fac49f1c101d32288aa3f661f4e5a10 | /runDataCompression.R | d1fe453d5f84b8fb4c058153f266a4a71ee6f275 | [] | no_license | mwinnel/ARC-Project | bf3b8c053131cd22622dd81c4b9d290e972de4ce | 879f04cc419fe39d92ae416dfb5217047b12a5fd | refs/heads/master | 2021-01-24T20:25:40.735664 | 2015-04-08T03:41:20 | 2015-04-08T03:41:20 | 28,791,006 | 0 | 0 | null | 2015-03-24T06:04:47 | 2015-01-05T00:42:06 | R | UTF-8 | R | false | false | 4,533 | r | runDataCompression.R | #------------------------------------------------------------------------------------------
# PROJECT: Griffith University ARC Linkage: WQAIS
# Author: Melissa Winnel
#
# File: runDataCompression.R
#
# Function: Data Compression algorithm - follows the Douglas Algorithm
#
#
#------------------------------------------------------------------------------------------
rm(list=ls(all=TRUE))
library(data.table)
source("douglasFunction.R")
## include douglas algorithms - or open R Work Space with them included.
#epsilon is used to set the compression strength. It is based on the data range.
#lastoneupdate<<-FALSE
start.minute <- read.table("minute.dat", header=FALSE)
start.minute<-0
## THIS NOT WORKING
if(start.minute == 0 & dim(dat)[1] > (2880*2) ) {
data.set <- dat[(dim(dat)[1]-(2880*2):dim(dat)[1]),]
} else {
data.set <- dat[dat$MINUTES > start.minute[1,],]
}
if(dim(data.set)[1] <= 3)
{
##exit - nothing to update
print("nothing to update")
}
## LOOP THROUGH AND READ EACH FILE AND COMPRESS
#-----------------------------------------------------------------------------------------
# Some Globals - PLEASE DO NOT CHANGE !!!!!!!!!!!!!!!!!!!!!!!!!
#-----------------------------------------------------------------------------------------
sensor.names.all <- c("TempA", "TempC", "TempC2",
"pH", "pH2", "Cond", "Cond2","TurbA",
"TurbA2","TurbS", "TurbS2")
alert.func.name <- c("TempA1", "TempC1", "TempC2",
"pH1", "pH2", "EC1", "EC2", "TurbA",
"TurbA2","TurbS1", "TurbS2")
file.names <- c("TempA1", "TempC1", "TempC2",
"pH1", "pH2", "Cond1", "Cond2", "TurbA1",
"TurbA2","TurbS1", "TurbS2")
#-----------------------------------------------------------------------------------------
# CONGIF GLOBALS -- Edit sensor.config to be the sensors to include
#-----------------------------------------------------------------------------------------
sensor.config <- c("TempA", "TempC", "pH", "Cond", "TurbS","TurbA",
"TempC2", "pH2", "Cond2", "TurbS2","TurbA2") # order matters!!! effects how plots output
#kPeriod <- 1440
dataset <<- sapply(sensor.config,function(x) NULL)
name.i <- match(sensor.config,sensor.names.all) ### index to get the sensor name variations.
n <- length(sensor.config) # how many sensors in this unit
######### READ IN FILES - LAST num.points
num.points <- 1440 ## HOW MANY POINTS TO READ IN ??
for(j in 1:n) {
file <- paste("dataset_" , file.names[[name.i[j]]], ".dat", sep="")
df.names <- c("Date", "Time", sensor.names.all[name.i[j]], "MINUTES")
# print("filename")
# print(file)
# print("sensor name")
# print(df.names)
dataset.previous <- read.table(file, col.names=df.names )
lenp <- dim(dataset.previous)[1]
## (to do) CHECK IF FILE EXISTS
newdataset <- scan(file, '', skip = lenp-num.points, nlines = num.points, sep = '\n')
set.df <- matrix(NA, nrow=0, ncol=4)
for(i in 1:num.points){
a <- unlist(strsplit(newdataset[i], " "))
newdata <- cbind(a[1], a[2], as.double(a[3]),as.numeric(a[4]))
set.df <- rbind(set.df, newdata)
}
## dataset is our main data frame - do not delete or remove
dataset[[j]] <- cbind(as.data.frame(set.df[, 1:2]), as.double(set.df[, 3]),as.double(set.df[, 4]))
colnames(dataset[[j]]) <- df.names
#print(head(dataset[[j]]))
################ RUN DOUGLAS
compressed <- run.douglas(dataset[[j]])
############# SAVE RESULTS
## APPEND TRUE OR FALSE ???
write.table(compressed, paste("dataset_douglas_" , file.names[[name.i[j]]], ".dat", sep=""), row.names = FALSE, append = FALSE, col.names = FALSE)
# write.table(last.minute, "minute.dat", row.names = FALSE, append = FALSE, col.names = FALSE)
}
#plot(dataset.TempC1[,4],dataset.TempC1[,3],typ="l")
#points(result, col="red")
last.minute <- compressed.data[dim(compressed.data)[1],4]
lapply(dataset, head, n=1)
# do some cleanup // note: does moving to function auto cleans at return...(check this)
rm(dataset.previous, newdataset, newdata, set.df, a, lenp, file, df.names)
for(j in 1:n) {
## plot data
plotting(dataset.previous[[j]], 3, alarms[[j]], kPeriod,241, 241, label=sensor.names.all[name.i[j]])
}
win.graph()
plot(data,typ="l", axes = FALSE)
lines(result, col="blue")
points(result, col="blue")
axis(2)
box()
LabelTimeAxis()
|
a515d8d3f03b3a7b830975898e975b5946c88104 | d87690700ec1de1392c0a66f24da07c6a00c653d | /R/certificates.R | e5b886d43303b9ee72d11216942ca278e74628be | [
"MIT"
] | permissive | Peidyen/AzureKeyVault | f0d7993f77b233c00334b41e61d949c2ed8158db | 1dcef3780fff06dface756b88418c836fe939f5e | refs/heads/master | 2020-05-23T07:39:16.190116 | 2019-05-12T21:59:47 | 2019-05-12T21:59:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,156 | r | certificates.R | #' Certificates in Key Vault
#'
#' This class represents the collection of certificates stored in a vault. It provides methods for managing certificates, including creating, importing and deleting certificates, and doing backups and restores. For operations with a specific certificate, see [certificate].
#'
#' @docType class
#'
#' @section Methods:
#' This class provides the following methods:
#' ```
#' create(name, subject, x509=cert_x509_properties(), issuer=cert_issuer_properties(),
#' key=cert_key_properties(), format=c("pem", "pkcs12"),
#' expiry_action=cert_expiry_action(),
#' attributes=vault_object_attrs(),
#' ..., wait=TRUE)
#' import(name, value, pwd=NULL,
#' attributes=vault_object_attrs(),
#' ..., wait=TRUE)
#' get(name)
#' delete(name, confirm=TRUE)
#' list()
#' backup(name)
#' restore(backup)
#' get_contacts()
#' set_contacts(email)
#' add_issuer(issuer, provider, credentials=NULL, details=NULL)
#' remove_issuer(issuer)
#' get_issuer(issuer)
#' list_issuers()
#' ```
#' @section Arguments:
#' - `name`: The name of the certificate.
#' - `subject`: For `create`, The subject or X.500 distinguished name for the certificate.
#' - `x509`: Other X.509 properties for the certificate, such as the domain name(s) and validity period. A convenient way to provide this is via the [cert_x509_properties] helper function.
#' - `issuer`: Issuer properties for the certificate. A convenient way to provide this is via the [cert_issuer_properties] helper function. The default is to specify a self-signed certificate.
#' - `key`: Key properties for the certificate. A convenient way to provide this is via the [cert_key_properties] helper function.
#' - `format`: The format to store the certificate in. Can be either PEM or PFX, aka PKCS#12. This also determines the format in which the certificate will be exported (see [certificate]).
#' - `expiry_action`: What Key Vault should do when the certificate is about to expire. A convenient way to provide this is via the [cert_expiry_action] helper function.
#' - `attributes`: Optional attributes for the secret. A convenient way to provide this is via the [vault_object_attrs] helper function.
#' - `value`: For `import`, the certificate to import. This can be the name of a PFX file, or a raw vector with the contents of the file.
#' - `pwd`: For `import`, the password if the imported certificate is password-protected.
#' - `...`: For `create` and `import`, other named arguments which will be treated as tags.
#' - `wait`: For `create` and `import`, whether to wait until the certificate has been created before returning. If FALSE, you can check on the status of the certificate via the returned object's `sync` method.
#' - `backup`: For `restore`, a string representing the backup blob for a key.
#' - `email`: For `set_contacts`, the email addresses of the contacts.
#' - `issuer`: For the issuer methods, the name by which to refer to an issuer.
#' - `provider`: For `add_issuer`, the provider name as a string.
#' - `credentials`: For `add_issuer`, the credentials for the issuer, if required. Should be a list containing the components `account_id` and `password`.
#' - `details`: For `add_issuer`, the organisation details, if required. See the [Azure docs](https://docs.microsoft.com/en-us/rest/api/keyvault/setcertificateissuer/setcertificateissuer#administratordetails) for more information.
#'
#' @section Value:
#' For `get`, `create` and `import`, an object of class `stored_certificate`, representing the certificate itself.
#'
#' For `list`, a vector of key names.
#'
#' For `add_issuer` and `get_issuer`, an object representing an issuer. For `list_issuers`, a vector of issuer names.
#'
#' For `backup`, a string representing the backup blob for a certificate. If the certificate has multiple versions, the blob will contain all versions.
#'
#' @seealso
#' [certificate], [cert_key_properties], [cert_x509_properties], [cert_issuer_properties], [vault_object_attrs]
#'
#' [Azure Key Vault documentation](https://docs.microsoft.com/en-us/azure/key-vault/),
#' [Azure Key Vault API reference](https://docs.microsoft.com/en-us/rest/api/keyvault)
#'
#' @examples
#' \dontrun{
#'
#' vault <- key_vault("mykeyvault")
#'
#' vault$certificates$create("mynewcert", "CN=mydomain.com")
#' vault$certificates$list()
#' vault$certificates$get("mynewcert")
#'
#' # specifying some domain names
#' vault$certificates$create("mynewcert", "CN=mydomain.com",
#' x509=cert_x509_properties(dns_names=c("mydomain.com", "otherdomain.com")))
#'
#' # specifying a validity period of 2 years (24 months)
#' vault$certificates$create("mynewcert", "CN=mydomain.com",
#' x509=cert_x509_properties(validity_months=24))
#'
#' # setting management tags
#' vault$certificates$create("mynewcert", "CN=mydomain.com", tag1="a value", othertag="another value")
#'
#' # importing a cert from a PFX file
#' vault$certificates$import("importedcert", "mycert.pfx")
#'
#' # backup and restore a cert
#' bak <- vault$certificates$backup("mynewcert")
#' vault$certificates$delete("mynewcert", confirm=FALSE)
#' vault$certificates$restore(bak)
#'
#' # set a contact
#' vault$certificates$set_contacts("username@mydomain.com")
#' vault$certificates$get_contacts()
#'
#' # add an issuer and then obtain a cert
#' # this can take a long time, so set wait=FALSE to return immediately
#' vault$certificates$add_issuer("newissuer", provider="OneCert")
#' vault$certificates$create("issuedcert", "CN=mydomain.com",
#' issuer=cert_issuer_properties("newissuer"),
#' wait=FALSE)
#'
#' }
#' @name certificates
#' @aliases certificates certs
#' @rdname certificates
NULL
vault_certificates <- R6::R6Class("vault_certificates",
public=list(
token=NULL,
url=NULL,
initialize=function(token, url)
{
self$token <- token
self$url <- url
},
create=function(name, subject, x509=cert_x509_properties(), issuer=cert_issuer_properties(),
key=cert_key_properties(),
format=c("pem", "pfx"),
expiry_action=cert_expiry_action(),
attributes=vault_object_attrs(),
..., wait=TRUE)
{
format <- if(match.arg(format) == "pem")
"application/x-pem-file"
else "application/x-pkcs12"
policy <- list(
issuer=issuer,
key_props=key,
secret_props=list(contentType=format),
x509_props=c(subject=subject, x509),
lifetime_actions=expiry_action,
attributes=attributes
)
body <- list(policy=policy, attributes=attributes, tags=list(...))
op <- construct_path(name, "create")
self$do_operation(op, body=body, encode="json", http_verb="POST")
cert <- self$get(name)
if(!wait)
message("Certificate creation started. Call the sync() method to update status.")
else while(is.null(cert$cer))
{
Sys.sleep(5)
cert <- self$get(name)
}
cert
},
get=function(name, version=NULL)
{
op <- construct_path(name, version)
stored_cert$new(self$token, self$url, name, version, self$do_operation(op))
},
delete=function(name, confirm=TRUE)
{
if(delete_confirmed(confirm, name, "certificate"))
invisible(self$do_operation(name, http_verb="DELETE"))
},
list=function()
{
sapply(get_vault_paged_list(self$do_operation(), self$token),
function(props) basename(props$id))
},
backup=function(name)
{
self$do_operation(construct_path(name, "backup"), http_verb="POST")$value
},
restore=function(name, backup)
{
stopifnot(is.character(backup))
self$do_operation("restore", body=list(value=backup), encode="json", http_verb="POST")
},
import=function(name, value, pwd=NULL,
attributes=vault_object_attrs(),
..., wait=TRUE)
{
if(is.character(value) && length(value) == 1 && file.exists(value))
value <- readBin(value, "raw", file.info(value)$size)
body <- list(value=value, pwd=pwd, attributes=attributes, tags=list(...))
self$do_operation(construct_path(name, "import"), body=body, encode="json", http_verb="POST")
cert <- self$get(name)
if(!wait)
message("Certificate creation started. Call the sync() method to update status.")
else while(is.null(cert$cer))
{
Sys.sleep(5)
cert <- self$get(name)
}
cert
},
get_contacts=function()
{
self$do_operation("contacts")
},
set_contacts=function(email)
{
df <- data.frame(email=email, stringsAsFactors=FALSE)
self$do_operation("contacts", body=list(contacts=df), encode="json", http_verb="PUT")
},
delete_contacts=function()
{
invisible(self$do_operation("contacts", http_verb="DELETE"))
},
add_issuer=function(issuer, provider, credentials=NULL, details=NULL)
{
op <- construct_path("issuers", issuer)
body <- list(provider=provider, credentials=credentials, org_details=details)
self$do_operation(op, body=body, encode="json", http_verb="PUT")
},
get_issuer=function(issuer)
{
op <- construct_path("issuers", issuer)
self$do_operation(op)
},
remove_issuer=function(issuer)
{
op <- construct_path("issuers", issuer)
invisible(self$do_operation(op, http_verb="DELETE"))
},
list_issuers=function()
{
sapply(self$do_operation("issuers")$value, function(x) basename(x$id))
},
do_operation=function(op="", ..., options=list())
{
url <- self$url
url$path <- construct_path("certificates", op)
url$query <- options
call_vault_url(self$token, url, ...)
},
print=function(...)
{
url <- self$url
url$path <- "certificates"
cat("<key vault endpoint '", httr::build_url(url), "'>\n", sep="")
invisible(self)
}
))
|
57ef8f898185961766c31baf5277759d368d0804 | 2da2406aff1f6318cba7453db555c7ed4d2ea0d3 | /bin/snipping/amsfast2.R | f2b219e60b9e384dae1bc8cf57092826ed0c67e7 | [] | no_license | rpruim/fastR2 | 4efe9742f56fe7fcee0ede1c1ec1203abb312f34 | d0fe0464ea6a6258b2414e4fcd59166eaf3103f8 | refs/heads/main | 2022-05-05T23:24:55.024994 | 2022-03-15T23:06:08 | 2022-03-15T23:06:08 | 3,821,177 | 11 | 8 | null | null | null | null | UTF-8 | R | false | false | 387,988 | r | amsfast2.R | ## ----fastR-setup,include=FALSE, cache = FALSE----------------------------
includeChapter <- 1:7 %in% (1:7) # [-6]
includeApp <- 1:4 %in% 1:3
require(MASS) # make sure this comes before dplyr loads
require(Matrix) # make sure this comes before mosaic loads
require(fastR2)
require(mosaic)
theme_set(theme_bw())
require(knitr)
require(xtable)
options(xtable.floating = FALSE)
opts_knit$set(width=74)
opts_knit$set(self.contained=FALSE)
opts_chunk$set(
digits = 3,
dev="pdf", # don't need EPS files anymore
dev.args=list(colormodel="cmyk"),
comment="##",
prompt=FALSE,
size="small",
cache=TRUE,
cache.path='cache/c-',
cache.lazy=FALSE,
tidy=FALSE,
fig.width=8*.45,
fig.height=6*.45,
fig.show="hold",
fig.align="center",
out.width=".47\\textwidth",
# background="gray88",
# background="white",
# background="transparent",
boxedLabel=TRUE
)
opts_template$set(fig3 = list(fig.height = 7*.40, fig.width = 8*.40, out.width=".31\\textwidth"))
opts_template$set(figtall = list(fig.height = 8*.45, fig.width = 8*.45, out.width=".47\\textwidth"))
opts_template$set(fig1 = list(fig.height = 3*0.9, fig.width = 8 * 0.9, out.width=".95\\textwidth"))
opts_template$set(figbig = list(fig.height = 9*0.9, fig.width = 12*0.9, out.width=".95\\textwidth"))
knit_hooks$set(seed = function(before, options, envir) {
if (before) set.seed(options$seed)
})
knit_hooks$set(digits = function(before, options, envir) {
if (before) {
options(digits = options$digits)
} else {
options(digits = 3)
}
})
knit_hooks$set(
document = function(x) {
sub('\\usepackage[]{color}', '\\usepackage{xcolor}', x, fixed = TRUE)
gsub(
"\\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}",
"\\definecolor{shadecolor}{gray}{0.8}", x, fixed = TRUE)
}
)
knit_hooks$set(chunk = function (x, options) {
if ( !is.null(options$boxedLabel) && options$boxedLabel &&
! grepl("unnamed-chunk", options$label) &&
(is.null(options$echo) || options$echo) ) {
labeling <- paste0(
"\\endgraf\\nobreak\\null\\endgraf\\penalty-2\\kern-.5\\baselineskip",
"\n\n",
"\\hfill \\makebox[0pt][r]{\\fbox{\\tiny ",
options$label,
"}}",
"\\endgraf",
"\\kern-4.5ex\n\n")
} else {
labeling <- ""
}
ai = knitr:::output_asis(x, options)
col = if (!ai)
paste(knitr:::color_def(options$background),
if (!knitr:::is_tikz_dev(options)) "\\color{fgcolor}",
sep = "")
k1 = paste(col, "\\begin{kframe}\n", sep = "")
k2 = "\\end{kframe}"
x = knitr:::.rm.empty.envir(paste(k1, labeling, x, k2, sep = ""))
size = if (options$size == "normalsize")
""
else sprintf("\\%s", options$size)
if (!ai)
x = sprintf("\\begin{knitrout}%s\n%s\n\\end{knitrout}",
size, x)
if (options$split) {
name = knitr:::fig_path(".tex", options)
if (!file.exists(dirname(name)))
dir.create(dirname(name))
cat(x, file = name)
sprintf("\\input{%s}", name)
}
else x
}
)
blackAndWhite = TRUE
fastRlty = rep(1,20)
fastRlty = c(1,2,5,6,1,2,5,6,1,2,5,6)
trellis.par.set(theme=col.whitebg())
# trellis.par.set(theme=col.fastR(bw=blackAndWhite),lty=fastRlty)
trellis.par.set(theme=col.fastR())
# options(width=70)
options(continue=" ")
options(str = strOptions(strict.width = "wrap"))
options(show.signif.stars=FALSE)
options(digits=3)
# # omit some of the output from summary( lm( ) )
# print.summary.lm <-
# function (x, digits = max(3L, getOption("digits") - 3L), symbolic.cor = x$symbolic.cor,
# signif.stars = getOption("show.signif.stars"), ...)
# {
# output <- capture.output( stats:::print.summary.lm(x, digits=digits, symbolic.cor = symbolic.cor,
# signif.stars=signif.stars, ...) )
# l <- sapply( output, nchar )
# w1 <- min( grep("Call", output) )
# w2 <- min( grep("Resid", output) )
# w3 <- min( grep("Coef", output) )
# rows <- 1:length(output)
# keep <- (rows >= w1 & rows < w2) | (rows >=w3)
# cat( paste(output[keep], collapse="\n") )
# return(invisible(x))
# }
#
# print.summary.glm <-
# function (x, digits = max(3L, getOption("digits") - 3L), symbolic.cor = x$symbolic.cor,
# signif.stars = getOption("show.signif.stars"), ...)
# {
# output <- capture.output( stats:::print.summary.glm(x, digits=digits, symbolic.cor = symbolic.cor,
# signif.stars=signif.stars, ...) )
# l <- sapply( output, nchar )
# w1 <- min( grep("Call", output) )
# w2 <- min( grep("Resid", output) )
# w3 <- min( grep("Coef", output) )
# rows <- 1:length(output)
# keep <- (rows >= w1 & rows < w2) | (rows >=w3)
# cat( paste(output[keep], collapse="\n") )
# return(invisible(x))
# }
## ----show_source, include=FALSE------------------------------------------
show_source <-
function(x, prompt=FALSE, highlight=TRUE, string.input=FALSE) {
options = list(engine="R", prompt=prompt, highlight=highlight)
if (! string.input) x <- deparse(substitute(x))
paste0(
"\\code{",
knitr:::hilight_source(
x, "latex",
options = options
),
"}")
}
## ----amsPreface, child="amsPreface.Rnw", eval=TRUE-----------------------
## ----snippet, eval=FALSE-------------------------------------------------
## snippet("snippet")
## ----snippet2, eval=FALSE------------------------------------------------
## snippet("snippet", exec = FALSE)
## ----IntroChapter, child="IntroChapter.Rnw", eval=TRUE-------------------
## ----intro-setup, include = FALSE----------------------------------------
knitr::opts_chunk$set(cache.path = "cache/Intro-")
## ----Data, child="Data.Rnw", eval=includeChapter[1]----------------------
## ----data-setup, include = FALSE, cache = FALSE--------------------------
knitr::opts_chunk$set(cache.path = "cache/Data-")
require(ggformula)
## ------------------------------------------------------------------------
require(fastR2)
## ------------------------------------------------------------------------
library(fastR2)
## ----data01--------------------------------------------------------------
require(fastR2) # load fastR2 and its dependencies
glimpse(iris) # glimpse lives in the dplyr package
## ----data02--------------------------------------------------------------
head(iris, n = 3) # first three rows
## ----data03--------------------------------------------------------------
tail(iris, n = 3) # last three rows
## ----data04--------------------------------------------------------------
iris[50:51, 3:5] # 2 rows and 3 columns
## ----data05--------------------------------------------------------------
sample(iris, 6) # this requires mosaic::sample()
## ----eval=FALSE----------------------------------------------------------
## snippet("data01")
## ----eval=FALSE----------------------------------------------------------
## snippet("data01", exec = FALSE)
## ----eval = FALSE--------------------------------------------------------
## snippet("death-penalty")
## ----iris-vector01-------------------------------------------------------
iris$Sepal.Length # get one variable and display as vector
## ----iris-vector02-------------------------------------------------------
with(iris, Species) # alternative method
## ----data-reload---------------------------------------------------------
iris <- "An iris is a beautiful flower."
str(iris) # what is the structure of iris?
data(iris) # explicitly reload the data set
str(iris) # now it is a data frame again
## ----eval=FALSE----------------------------------------------------------
## ### Simpler version
## goal( ~ x, data = mydata)
##
## ### Fancier version:
## goal(y ~ x | z, data = mydata)
##
## ### Unified version:
## goal(formula, data = mydata)
## ----eval=FALSE----------------------------------------------------------
## Sepal.Length ~ Sepal.Width
## ----scatter01, fig.show = "hide"----------------------------------------
gf_point(Sepal.Length ~ Sepal.Width, data = iris)
## ----scatter02, fig.keep = "none"----------------------------------------
# Two ways to add facets to a scatter plot
gf_point(Sepal.Length ~ Sepal.Width | Species, data = iris)
gf_point(Sepal.Length ~ Sepal.Width, data = iris) %>%
gf_facet_wrap( ~ Species)
## ----scatter03, fig.keep = "none"----------------------------------------
gf_point(Sepal.Length ~ Sepal.Width, data = iris,
color = ~ Species, shape = ~ Species, alpha = 0.7)
## ----scatter01-fig, echo=FALSE-------------------------------------------
gf_point(Sepal.Length ~ Sepal.Width, data = iris)
## ----scatter02-03-fig, echo=FALSE, fig.keep = c(2, 4)--------------------
# Two ways to add facets to a scatter plot
gf_point(Sepal.Length ~ Sepal.Width | Species, data = iris)
gf_point(Sepal.Length ~ Sepal.Width, data = iris) %>%
gf_facet_wrap( ~ Species)
last_plot() %>% gf_refine(scale_x_continuous(breaks = 2:5))
gf_point(Sepal.Length ~ Sepal.Width, data = iris,
color = ~ Species, shape = ~ Species, alpha = 0.7)
last_plot() %>% gf_refine(scale_x_continuous(breaks = 2:5))
## ----tally01-------------------------------------------------------------
tally( ~ Species, data = iris) # make a table of values
## ----tally02-------------------------------------------------------------
tally( ~ Sepal.Length, data = iris) # make a table of values
## ----tally03-------------------------------------------------------------
tally( ~ (Sepal.Length > 6.0), data = iris)
## ----tally04-------------------------------------------------------------
tally( ~ cut(Sepal.Length, breaks = 2:10), data = iris)
## ----tally05-------------------------------------------------------------
tally( ~ cut(Sepal.Length, breaks = 2:10, right = FALSE),
data = iris)
## ----histogram01, fig.keep = "none"--------------------------------------
gf_histogram( ~ Sepal.Length, data = iris)
## ----histogram02, fig.keep = "none"--------------------------------------
# manually selecting width of bins
gf_histogram( ~ Sepal.Length, data = iris, binwidth = 0.5)
# also selecting the boundary of the bins
gf_histogram( ~ Sepal.Length, data = iris, binwidth = 0.5, boundary = 8)
# manually selecting number of bins
gf_histogram( ~ Sepal.Length, data = iris, bins = 15)
## ----histogram02-fig, echo=FALSE, message=FALSE--------------------------
gf_histogram( ~ Sepal.Length, data = iris)
# manually selecting width of bins
gf_histogram( ~ Sepal.Length, data = iris, binwidth = 0.5)
# also selecting the boundary of the bins
gf_histogram( ~ Sepal.Length, data = iris, binwidth = 0.5, boundary = 8)
# manually selecting number of bins
gf_histogram( ~ Sepal.Length, data = iris, bins = 15)
## ----histogram03, eval=FALSE---------------------------------------------
## gf_histogram( ~ Sepal.Length, data = iris,
## breaks = c(4, 5, 5.5, 6, 6.5, 7, 8, 10),
## color = "black", fill = "skyblue")
## gf_dhistogram( ~ Sepal.Length, data = iris,
## breaks = c(4, 5, 5.5, 6, 6.5, 7, 8, 10),
## color = "black", fill = "skyblue")
## ----histogram04, fig.keep = "none"--------------------------------------
gf_histogram( ~ Sepal.Length | Species ~ ., data = iris,
bins = 15)
## ----histogram03-fig, echo=FALSE-----------------------------------------
gf_histogram( ~ Sepal.Length, data = iris,
breaks = c(4, 5, 5.5, 6, 6.5, 7, 8, 10),
color = "black", fill = "skyblue")
gf_dhistogram( ~ Sepal.Length, data = iris,
breaks = c(4, 5, 5.5, 6, 6.5, 7, 8, 10),
color = "black", fill = "skyblue")
## ----histogram04-fig, echo = FALSE---------------------------------------
gf_histogram( ~ Sepal.Length | Species ~ ., data = iris,
bins = 15)
## ----histogram05, fig.keep = "none"--------------------------------------
gf_histogram( ~ Sepal.Length, data = iris, bins = 15) %>%
gf_facet_wrap( ~ ntiles(Sepal.Width, 4, format = "interval"))
## ----histogram06, eval=FALSE---------------------------------------------
## gf_histogram( ~ Sepal.Length | Species, bins = 15,
## data = iris %>% filter(Species == "virginica"))
## ----histogram05-fig, echo=FALSE-----------------------------------------
gf_histogram( ~ Sepal.Length, data = iris, bins = 15) %>%
gf_facet_wrap( ~ ntiles(Sepal.Width, 4, format = "interval"))
gf_histogram( ~ Sepal.Length | Species, bins = 15,
data = iris %>% filter(Species == "virginica"))
## ----freqpolygon, fig.keep = "none"--------------------------------------
gf_freqpoly( ~ Sepal.Length, color = ~ Species, data = iris,
binwidth = 0.5)
## ----freqpolygon-fig, echo = FALSE---------------------------------------
gf_histogram( ~ Sepal.Length, data = iris, fill = "gray85", color = "black", bins = 20) %>%
gf_freqpoly( ~ Sepal.Length, color = "black", size = 1, bins = 20)
gf_freqpoly( ~ Sepal.Length, color = ~ Species, data = iris,
binwidth = 0.5)
## ----echo=FALSE----------------------------------------------------------
set.seed(123)
mydata <-
tibble(
`symmetric` = rnorm(5000, 75, 20),
`positively skewed` = rgamma(5000, shape = 3, rate = 1/10),
`negatively skewed` = 150 - `positively skewed`) %>%
tidyr::gather("dist", "x")
gf_dhistogram( ~ x, data = mydata, binwidth = 5) %>%
gf_facet_wrap( ~ dist, ncol = 1) %>%
gf_labs(x = "", y = "") %>%
gf_theme(axis.text = element_blank(), axis.ticks = element_blank())
gf_histogram( ~ duration, data = MASS::geyser, bins = 20)
## ----faithful-histogram, eval=FALSE--------------------------------------
## gf_histogram( ~ duration, data = MASS::geyser, bins = 20)
## ----mean-median01-------------------------------------------------------
mean( ~ Sepal.Length, data = iris)
median( ~ Sepal.Length, data = iris )
## ----mean-median02-------------------------------------------------------
mean(Sepal.Length ~ Species, data = iris)
median(Sepal.Length ~ Species, data = iris)
df_stats(Sepal.Length ~ Species, data = iris, mean, median)
## ----mean-median03-------------------------------------------------------
mean( ~ duration, data = MASS::geyser)
median( ~ duration, data = MASS::geyser)
## ----stem----------------------------------------------------------------
# stem does not understand the formula template
stem(MASS::geyser$duration)
## ----pulse-hist-sol------------------------------------------------------
gf_histogram( ~ pulse, data = LittleSurvey)
gf_histogram( ~ pulse, data = LittleSurvey %>% filter(pulse > 30))
df_stats(~ pulse, data = LittleSurvey %>% filter(pulse > 30), median, mean)
## ----number-prob01-sol---------------------------------------------------
gf_histogram( ~ number, data = LittleSurvey, binwidth = 1)
## ----number-prob02-sol---------------------------------------------------
t <- tally( ~ number, data = LittleSurvey); t
max(t)
t[which(t == max(t))]
t[which(t == min(t))]
tally(~ (number %% 2 == 0), data = LittleSurvey)
## ----number-prob03-sol---------------------------------------------------
# Alternative method
LittleSurvey %>%
group_by(number) %>%
summarise(total = n()) %>%
filter(total == min(total))
## ----number-prob04-sol---------------------------------------------------
LittleSurvey %>%
group_by(number) %>%
summarise(total = n()) %>%
filter(total == max(total))
## ----histograms-fig, echo=FALSE, opts.label="fig1"-----------------------
a <- rnorm(4000, mean = 10, sd = 2)
b <- rnorm(4000, mean = 10, sd = 5)
mydata <- data.frame(x = c(a, b), dist = rep(c("A", "B"), each = 4000))
gf_dhistogram( ~ x | dist, data = mydata, bins = 30) %>%
gf_labs(x = "")
## ----quantile01----------------------------------------------------------
quantile((1:10)^2)
## ----quantile02----------------------------------------------------------
quantile((1:10)^2, type = 5)
## ----quantile03----------------------------------------------------------
quantile((1:10)^2, type = 5, seq(0, 0.10, by = 0.005))
## ----quantile04----------------------------------------------------------
# note the different order of the arguments and
# different output formats in these two functions.
quantile( ~ duration, data = MASS::geyser, probs = (0:10)/10)
qdata( ~ duration, (0:10)/10, data = MASS::geyser)
quantile( ~ duration, probs = (0:4)/4, data = MASS::geyser)
qdata( ~ duration, (0:4)/4, data = MASS::geyser)
## ----boxplot01, eval=FALSE-----------------------------------------------
## gf_boxplot(Sepal.Length ~ Species, data = iris)
## gf_boxplot(Sepal.Length ~ Species, data = iris) %>%
## gf_refine(coord_flip())
## gf_boxplot(duration ~ "", data = MASS::geyser) %>%
## gf_refine(coord_flip())
## ----boxplot01-fig, echo=FALSE-------------------------------------------
gf_boxplot(Sepal.Length ~ Species, data = iris)
gf_boxplot(Sepal.Length ~ Species, data = iris) %>%
gf_refine(coord_flip())
gf_boxplot(duration ~ "", data = MASS::geyser) %>%
gf_refine(coord_flip())
## ----boxplots-match-fig, echo=FALSE--------------------------------------
rescale <- function(x, lim = c(0, 10)) {
return ( min(lim) + (x - min(x)) /
(diff(range(x))) * (diff(range(lim))) )
}
n <- 400
a <- qnorm(ppoints(n))
a <- rescale(a)
b <- qexp(ppoints(n), 2)
b <- qbeta(ppoints(n), 3, 15)
b <- rescale(b)
c <- qbeta(ppoints(n), 20, 5)
c <- rescale(c)
d <- c(runif(n = n/2, min = 0, max = 10), qunif(ppoints(n/2), 0, 10) )
d <- rescale(d)
# bowl shaped
e <- 10 * c(rbeta(500, 6, 1), rbeta(500, 1, 6))
e <- c(qbeta(ppoints(100), 6, 1), qbeta(ppoints(100), 1, 6))
e <- rescale(e)
f <- c(0, 1, qbeta(ppoints(n-2), 12, 15))
f <- rescale(f)
Y <- data.frame(A = a, B = b, C = c, D = d, E = e, F = f)
X <- stack(Y)
Z <- data.frame(W = a, Z = b, V = c, Y = d, U = e, X = f)
#z$W <- y$A
#z$Z <- y$B
#z$V <- y$C
#z$Y <- y$D
#z$U <- y$E
#z$X <- y$F
Z <- stack(Z)
gf_histogram( ~ values | ind, data = X, binwidth = 0.75) %>%
gf_labs(x = "", y = "") %>%
gf_theme(
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank()
)
# levels(z$ind) <- rev(levels(z$ind))
Z$ind <- as.character(Z$ind)
gf_boxplot(values ~ ind, data = Z, range = 2.25, coef = 0) %>%
gf_labs(x = "", y = "") %>%
gf_refine(coord_flip())
## ----boxplot-match-sol---------------------------------------------------
favstats(values~ind, data = X)
favstats(values~ind, data = Z)
## ----fivenum01-----------------------------------------------------------
fivenum(1:11)
quantile(1:11)
## ----fivenum02-----------------------------------------------------------
fivenum(1:10)
quantile(1:10)
## ----boxplot-iqr-rule-sol------------------------------------------------
x <- c(1:20, 20)
quantile(x, .75)
iqr(x)
# not an "outlier"
x1 <- c(1:20, 16 + 1.5 * iqr(x))
# now it is an "outlier"
x2 <- c(1:20, 16 + 1.501 * iqr(x))
# combine into one data set and compare boxplots
Data <- data.frame(x = c(x1, x2), a = rep(c("A", "B"), each = 21))
gf_boxplot(x ~ a, data = Data)
## ----dispersion01--------------------------------------------------------
x <- c(1, 3, 5, 5, 6, 8, 9, 14, 14, 20)
n <- length(x); n
mean(x)
x - mean(x)
sum(x - mean(x))
abs(x - mean(x))
sum(abs(x - mean(x)))
mean(abs(x - mean(x)))
(x - mean(x))^2
sum((x - mean(x))^2)
sum((x - mean(x))^2) / (n-1)
var(x)
sd(x)
sd(x)^2
## ----dispersion02--------------------------------------------------------
mean(Sepal.Length ~ Species, data = iris)
var(Sepal.Length ~ Species, data = iris)
sd(Sepal.Length ~ Species, data = iris)
favstats(Sepal.Length ~ Species, data = iris)
df_stats(Sepal.Length ~ Species, data = iris, mean, var, sd)
## ----inspect-------------------------------------------------------------
inspect(iris)
## ----mad-----------------------------------------------------------------
mad(iris$Sepal.Length)
## ----mad-sol-------------------------------------------------------------
mad(iris$Sepal.Length)
median(abs(iris$Sepal.Length - median(iris$Sepal.Length))) * 1.4826
## ----pitching2005-era-sol------------------------------------------------
Pitching2 <- filter(Pitching2005, GS > 4)
favstats(ERA ~ lgID, data = Pitching2)
gf_boxplot(ERA ~ lgID, data = Pitching2) %>% gf_refine(coord_flip())
gf_histogram( ~ ERA | lgID ~ ., data = Pitching2, binwidth = 0.3)
## ----batting-ba-sol------------------------------------------------------
Batting2 <- droplevels(filter(Batting, AB >= 200))
Batting2 <- mutate(Batting2, BA = H / AB)
favstats(BA ~ league, data = Batting2)
gf_boxplot(BA ~ league, data = Batting2) %>% gf_refine(coord_flip())
gf_histogram( ~ BA | league, data = Batting2)
## ----batting-ba2-sol-----------------------------------------------------
gf_boxplot( BA ~ factor(year), color = ~ league, data = Batting2)
## ----death-penalty01-----------------------------------------------------
tally(death ~ victim, data = DeathPenalty)
tally(death ~ defendant, data = DeathPenalty) # this line was missing in the printed version
## ----death-penalty02-----------------------------------------------------
tally(death ~ defendant | victim, data = DeathPenalty)
## ----vcd-mosaic-fig, echo=FALSE, message=FALSE---------------------------
vcd::mosaic(~ victim + defendant + death,
shade = TRUE,
data = DeathPenalty %>%
mutate( # abbreviate labels to fit plot better
victim = abbreviate(victim, 2),
defendant = abbreviate(defendant, 2),
death = abbreviate(death, 1))
)
## ----death-penalty03, fig.show="hide"------------------------------------
vcd::mosaic( ~ victim + defendant + death, data = DeathPenalty)
vcd::structable(~ victim + defendant + death, data = DeathPenalty)
## ----faithful-sol--------------------------------------------------------
gf_point(waiting ~ duration, data = MASS::geyser)
gf_point(lead(waiting) ~ duration, data = MASS::geyser)
gf_point(waiting ~ lag(duration), data = MASS::geyser)
## ----utilities-sol-------------------------------------------------------
gf_point(ccf ~ (year + month/12), data = Utilities, color = ~ factor(month)) %>%
gf_line(ccf ~ (year + month/12), data = Utilities, color = ~ factor(month))
gf_boxplot(ccf ~ factor(month), data = Utilities)
## ----utilities-ccfpday, eval=FALSE--------------------------------------
## Utilities <- mutate(Utilities, ccfpday = ccf / billingDays)
## ----utilities-ccfpday-sol-----------------------------------------------
Utilities <- mutate(Utilities, ccfpday = ccf / billingDays)
gf_point(ccfpday ~ (year + month/12), data = Utilities, color = ~ factor(month))
gf_boxplot(ccfpday ~ factor(month), data = Utilities)
## ----utilities-temp-sol--------------------------------------------------
gf_point(ccf ~ temp, data = Utilities)
## ----utilities-price-sol-------------------------------------------------
gf_point(ccf/gasbill ~ (12*year + month), data = Utilities)
## ----births-sol----------------------------------------------------------
gf_point(births ~ dayofyear, color = ~ wday, data = Births78)
gf_line(births ~ dayofyear, color = ~ wday, data = Births78)
gf_boxplot(births ~ wday, data = Births78)
## ----births02-sol, opts.label = "fig1"-----------------------------------
gf_line(births ~ day_of_year, color = ~wday, data = Births) %>%
gf_facet_wrap(~year)
gf_freqpoly(~ births, color = ~wday, data = Births) %>%
gf_facet_wrap(~year)
gf_boxplot(births ~ wday, color = ~wday, data = Births, size = 0.5) %>%
gf_facet_wrap(~year)
## ----DiscreteDistribution, child="DiscreteDistributions.Rnw", eval=includeChapter[2]----
## ----discrete-setup, include = FALSE, cache = FALSE----------------------
knitr::opts_chunk$set(cache.path = "cache/Disc-")
## ----dice-sample-space-sol, tidy=FALSE-----------------------------------
Dice <- expand.grid(red = 1:6, blue = 1:6)
Dice %>% sample(4)
# part b
Dice <-
Dice %>%
mutate(
A = red + blue >= 9,
B = blue > red,
C = blue == 5)
## ----dice-SS01-sol-------------------------------------------------------
Dice %>% filter(A)
## ----dice-SS02-sol-------------------------------------------------------
Dice %>% filter(B)
## ----dice-SS03-sol-------------------------------------------------------
Dice %>% filter(C)
## ----dice-SS04-sol-------------------------------------------------------
Dice %>% filter(A & B)
## ----dice-SS05-sol-------------------------------------------------------
Dice %>% filter( B | C)
## ----dice-SS06-sol-------------------------------------------------------
Dice %>% filter( A & (B | C))
## ----coin-toss, echo=FALSE, opts.label="fig1", seed=0--------------------
coinTosses <- data.frame(
outcome = rbinom(1000, 1, 0.5),
toss = 1:1000) %>%
mutate(relFreq = cumsum(outcome) / toss)
gf_hline(yintercept = 0.5, col = "skyblue", size = 1, alpha = 0.8) %>%
gf_line(relFreq ~ toss, data = coinTosses) %>%
gf_labs(title = "Results of 1000 simulated coin tosses",
y = "relative frequency",
x = "number of tosses") %>%
gf_lims(y = c(0, 1))
## ----coin-toss-hist, echo=FALSE------------------------------------------
MoreTosses <- data.frame(heads = rbinom(1000, 1000, 0.5))
gf_histogram( ~ (heads / 1000), data = MoreTosses, binwidth = 0.005) %>%
gf_labs(title = "Results of 1000 simulations\nof 1000 coin tosses",
x = "proportion heads") %>%
gf_lims(x = c(0.44, 0.56))
LotsMoreTosses <- data.frame(heads = rbinom(1000, 10000, 0.5))
gf_histogram( ~ (heads / 10000), data = LotsMoreTosses, binwidth = 0.002) %>%
gf_labs(title = "Results of 1000 simulations\nof 10,000 coin tosses",
x = "proportion heads") %>%
gf_lims(x = c(0.44, 0.56))
## ----print-sums-sol------------------------------------------------------
sums <- function(n){
n <- n-3
results <- character(0)
for (x in 0:n) {
for ( y in (0:(n-x)) ) {
z <- n - x - y
results <-
c(results,
paste(x + 1, "+", y+1, "+", z + 1, "=", x + y + z + 3))
}
}
return(results)
}
length(sums(20)) # how many solutions?
sums(20)[1:10] # first 10 solutions
sums(7) # smaller example
## ----choose01------------------------------------------------------------
choose(5, 2)
## ----choose02------------------------------------------------------------
4 * choose(13, 5) / choose(52, 5)
## ----full-house, tidy=FALSE----------------------------------------------
(choose(13, 1) * # a number to have three of
choose( 4, 3) * # three of that number
choose(12, 1) * # a different number to have two of
choose( 4, 2)) / # two of that number
choose(52, 5)
## ----two-pair-sol, tidy=FALSE--------------------------------------------
(choose(13, 2) * # two numbers (the pairs)
choose( 4, 2) * choose(4, 2) * # two suits for each pair
choose(11, 1) * choose(4, 1)) / # one more card of a different number
choose(52, 5)
## ----three-kind-sol, tidy=FALSE------------------------------------------
(choose(13, 1) * # a number to have three of
choose( 4, 3) * # three of that number
choose(12, 2) * # two other numbers
choose( 4, 1) * choose(4, 1)) / # one of each of those numbers
choose(52, 5)
## ----birthday-problem-sol------------------------------------------------
# calculates prob of shared birthday
birthdayprob <- function(n) {
last <- 366 - n
1 - ( prod(seq(last, 365)) / 365^n )
}
birthdayprob(10)
cbind(20:25, sapply(20:25, birthdayprob))
## ----birthday-sim-sol, seed=123------------------------------------------
birthdays69 <- rep(Births78$date, Births78$births)
sharedBirthday <- function(n, birthdays) {
bdays <- sample( birthdays, n )
length( unique(bdays) ) < n
}
tally( ~ sharedBirthday, do(1000) * sharedBirthday(15, birthdays69))
tally( ~ sharedBirthday, do(1000) * sharedBirthday(20, birthdays69))
tally( ~ sharedBirthday, do(1000) * sharedBirthday(25, birthdays69))
# 1969
Births69 <- Births %>% filter(year == 1969)
birthdays69 <- rep(Births69$date, Births69$births)
tally( ~ sharedBirthday, do(1000) * sharedBirthday(15, birthdays69))
tally( ~ sharedBirthday, do(1000) * sharedBirthday(20, birthdays69))
tally( ~ sharedBirthday, do(1000) * sharedBirthday(25, birthdays69))
# 1988
Births88 <- Births %>% filter(year == 1988)
birthdays88 <- rep(Births88$date, Births88$births)
tally( ~ sharedBirthday, do(1000) * sharedBirthday(15, birthdays88))
tally( ~ sharedBirthday, do(1000) * sharedBirthday(20, birthdays88))
tally( ~ sharedBirthday, do(1000) * sharedBirthday(25, birthdays88))
## ----donut-redo-sol------------------------------------------------------
choose(12 + 2, 2)
## ----sum-redo-sol--------------------------------------------------------
choose(17 + 2, 2)
## ----smokers01-sol-------------------------------------------------------
# a)
21.1 / ( 21.1 + 24.8)
## ----smokers02-sol-------------------------------------------------------
# b) After factoring out a constant from numerator and denominator
# we are left with
0.183 * 13 / ( 0.183 * 13 + .817 * 1 )
# c) After factoring out a constant from numerator and denominator
# we are left with
0.231 * 23 / ( 0.231 * 23 + .769 * 1 )
## ----defective-parts-sol, tidy=FALSE-------------------------------------
# part a
.20 + .27
# part b:
# P(Wed-Thur | defective) = P(Wed-Thur and defective) / P(defective)
a <- .20 * .02 + # Monday and defective
.27 * .03 # Thursday and defective
b <- .25 * .015 + # Tuesday and defective
.28 * .01 # Wednesday and defective
a / (a + b)
# part c: P( Wed-Thur | good ) = P( Wed-Thur and good ) / P(good)
c <- .20 * .98 + # Monday and good
.27 * .97 # Thursday and good
d <- .25 * .985 + # Tuesday and good
.28 * .99 # Wednesday and good
c / (c + d)
## ----cards-same-color-sol------------------------------------------------
2 * # black or red
choose(26, 5) / # 5 cards from that color
choose(52, 5) # any 5 cards
## ----bayes-disease-sol---------------------------------------------------
p <- c(0.01, 0.10)
(p * 0.98) / (p * 0.98 + (1 - p) * 0.01)
## ----statistics-sol------------------------------------------------------
factorial(10) / (factorial(3) * factorial(3) * factorial(2))
## ----acceptance-sampling-sol---------------------------------------------
# prob only good ones selected
choose(90, 4) / choose(100, 4)
# prob lot is rejected
1 - choose(90, 4) / choose(100, 4)
f <- function(x) { 1 - choose(100 - x, 4) / choose(100, 4) }
Data <- data.frame(reject= sapply(10:100, f), defective = 10:100)
gf_line(reject ~ defective, data = Data, col = "navy", size = 1) %>%
gf_labs(x = "number of defective parts",
y = "probability of rejecting")
## ----acceptance-sampling-binomial-sol------------------------------------
# prob only good ones selected
dbinom(0, 4, 10/100)
# prob lot is rejected
1 - dbinom(0, 4, 10/100)
## ----acceptance-sampling-nbinom-sol--------------------------------------
pnbinom(3, 1, 10/100) # lot is rejected
## ----HHH-sol-------------------------------------------------------------
1/8 + # HHH--
1/16 + # or THHH-
1/16 # or -THHH
## ----HTH-sol-------------------------------------------------------------
1/8 + # HTH--
1/8 + # or -HTH-
3/32 # or --HTH but can't start HT
## ----HHT-sol-------------------------------------------------------------
1/8 + # HHT--
1/8 + # or -HHT-
1/8 # or --HHT
## ----mastermind----------------------------------------------------------
6^4 # one of six colors in each of 4 holes
8^5 # one of eight colors in each of 5 holes
## ----cards-flush-sol-----------------------------------------------------
choose(13 - 3, 2) / # two of remaining 10 spades
choose(52 - 5, 2) # two of 47 remaining cards in deck
## ----socks-sol, tidy=FALSE-----------------------------------------------
8 * 5 * 4 / choose(17, 3) # 1 sock of each kind means no pairs
1 - (8 * 5 * 4 / choose(17, 3)) # so this is prob of getting a pair
# or do it this way
( choose(8, 2) * 9 + choose(5, 2) * 12 + choose(4, 2) * 13 +
choose(8, 3) + choose(5, 3) + choose(4, 3) ) / choose(17, 3)
## ----prob-plot, fig.keep="none"------------------------------------------
# this will be wrong for values not among 0, 1, 2, 3, or 4
f <- function(x) {
factorial(4) / (16 * factorial(x) * factorial(4 - x))
}
f(0:4)
sum(f(0:4)) # check to be sure the probabilities add to 1
my_data <- data.frame(probability = f(0:4), x = 0:4)
gf_point(probability ~ x, data = my_data)
gf_point(probability ~ x, data = my_data) %>%
gf_segment(0 + probability ~ x + x, data = my_data)
gf_point(probability ~ x, data = my_data) %>%
gf_line(probability ~ x, data = my_data)
## ----prob-plot-fig, echo=FALSE, opts.label="fig3"------------------------
# this will be wrong for values not among 0, 1, 2, 3, or 4
f <- function(x) {
factorial(4) / (16 * factorial(x) * factorial(4 - x))
}
f(0:4)
sum(f(0:4)) # check to be sure the probabilities add to 1
my_data <- data.frame(probability = f(0:4), x = 0:4)
gf_point(probability ~ x, data = my_data)
gf_point(probability ~ x, data = my_data) %>%
gf_segment(0 + probability ~ x + x, data = my_data)
gf_point(probability ~ x, data = my_data) %>%
gf_line(probability ~ x, data = my_data)
## ----prob-hist-cdf-fig, echo=FALSE---------------------------------------
gf_dist("binom", params = list(size = 4, prob = 0.5), kind = "hist", binwidth = 1)
gf_dist("binom", params = list(4, 0.5), kind = "cdf")
## ----binom, seed = 123---------------------------------------------------
randomData <- rbinom(n = 30, size = 4, prob = 0.5)
randomData
tally( ~ randomData)
vals <- setNames(0:4, 0:4) # add labels for nicer displays below
dbinom(vals, size = 4, prob = 0.5) # matches earlier example
dbinom(vals, size = 4, prob = 0.5) * 30 # pretty close to our table above
pbinom(vals, size = 4, prob = 0.5) # same as cumsum(dbinom(...))
qbinom(0.20, size = 20, prob = 0.5)
pbinom(7, 20, 0.5) # < 0.20
pbinom(8, 20, 0.5) # >= 0.20
## ----gf-dist, opts.label="fig3", fig.keep = "none"-----------------------
gf_dist("binom", params = list(size = 4, prob = 0.5))
gf_dist("binom", params = list(size = 4, prob = 0.5), kind = "cdf")
gf_dist("binom", params = list(size = 4, prob = 0.5),
kind = "histogram", binwidth = 1)
## ----gf-dist-fig, echo = FALSE, opts.label="fig3"------------------------
gf_dist("binom", params = list(size = 4, prob = 0.5))
gf_dist("binom", params = list(size = 4, prob = 0.5), kind = "cdf")
gf_dist("binom", params = list(size = 4, prob = 0.5),
kind = "histogram", binwidth = 1)
## ----freddy01------------------------------------------------------------
dbinom(20, 20, 0.8) # probability of making all 20
1 - pbinom(14, 20, 0.8) # probability of NOT making 14 or fewer
dbinom(16, 20, 0.8) # probability of making exactly 16
gf_dist("binom", params = list(size = 20, prob = 0.8))
## ----nbinom01------------------------------------------------------------
1 - pnbinom(c(18, 28, 38, 48), size = 1, prob = 1/36)
## ----deMere-sol----------------------------------------------------------
1 - dbinom(0, 4, 1/6) # P(at least one 6 in 4 tries)
pgeom(3, 1/6) # P(fail at most 3 times before getting a 6)
1 - dbinom(0, 24, 1/36) # P(at least one double 6 in 24 tries)
pgeom(23, 1/36) # P(fail at most 23 times before getting double 6)
## ----amy-sol-------------------------------------------------------------
1 - pnbinom(13, 100, 0.92)
## ----Hamming-sol, tidy = FALSE-------------------------------------------
dbinom(0, 4, 0.05) # P(all four bits received correctly
pbinom(1, 7, 0.05) # P(>= 6 of 7 bits received correctly)
p <- seq(0, 1, by = 0.01)
DD <- data.frame(
probability = c(dbinom(0, 4, p), pbinom(1, 7, p)),
error.rate = c(p, p),
method = rep(c("plain", "Hamming"), each = length(p)))
gf_line(probability ~ error.rate, data = DD, color = ~ method) %>%
gf_labs(x = "bitwise error rate", y = "message error rate")
## ----freethrow04-sol-----------------------------------------------------
1 - pbinom(8, 10, 0.8) # make at least 9 of 10
1 - pbinom(17, 20, 0.8) # make at least 18 of 20
## ----freethrow02-sol-----------------------------------------------------
dbinom(10, 10, 0.8) # make 10 straight
1 - pnbinom(4, 10, 0.80) # at least 5 misses <-> at least 15 shots
pbinom(9, 14, 0.8) # 9 or fewer makes in 14 tries <-> at least 15 shots
pbinom(10, 15, 0.8) # 10 or fewer makes in 15 tries is INCORRECT
1 - pnbinom(4, 10, 0.70) # at least 5 misses = at least 15 shots
pbinom(9, 14, 0.7) # 9 or fewer makes in 14 tries <-> at least 15 shots
pbinom(10, 15, 0.7) # 10 or fewer makes in 15 tries is INCORRECT
## ----freethrow03-sol-----------------------------------------------------
pp <- dbinom(5, 5, 0.80); pp # a) prob make 5 straight (= success)
dgeom(1, pp) # b) succeed with 1 miss (correct answer)
0.20 * pp # miss first then make 5 straight (INCORRECT)
1 - pgeom(1, pp) # c) miss more than one shot before success
probs <- dgeom(0:15, pp) # d)
#
gf_point(probs ~ 0:15) %>%
gf_labs(title = "Freddie", x = "misses", y = "probability")
#################################################################
pp <- dbinom(5, 5, 0.70) # a) prob make 5 straight (= success)
pp
dgeom(1, pp) # b) succeed with 1 miss (correct answer)
0.20 * pp # miss first then make 5 straight (_not_ the answer)
1 - pgeom(1, pp) # c) miss more than one shot before success
misses <- 0:15
probs <- dgeom(misses, pp) # d)
#
gf_point(probs ~ misses) %>%
gf_labs(title = "Frank", x = "misses", y = "probability")
## ----multiple-choice01-sol-----------------------------------------------
1 - pbinom(11, 20, 0.25) # 11 or fewer correct fails
1 - pbinom(11, 20, 1/3) # 11 or fewer correct fails
1 - pbinom(11, 20, 0.5 + 0.4 * 1/3 + 0.1 * 1/4)
## ----playoffs-part-------------------------------------------------------
### using binomial dist
1 - pbinom(1, 3, 0.6) # win at least 2 of 3
### using neg binomial dist
pnbinom(1, 2, 0.6) # lose <= 1 time before 2 wins
## ----playoffs-sol--------------------------------------------------------
### using binomial dist
1- pbinom(1, 3, 0.6) # win at least 2 of 3
1- pbinom(2, 5, 0.6) # win at least 3 of 5
1- pbinom(3, 7, 0.6) # win at least 4 of 7
### using neg binomial dist
pnbinom(1, 2, 0.6) # lose <= 1 time before 2 wins
pnbinom(2, 3, 0.6) # lose <= 2 times before 3 wins
pnbinom(3, 4, 0.6) # lose <= 3 times before 4 wins
## ----lady01--------------------------------------------------------------
1 - pbinom(8, 10, 0.5)
binom.test(9, 10)
## ----lady02--------------------------------------------------------------
binom.test(9, 10) %>% pval() # same as pval(binom.test(9, 10))
## ----lady03--------------------------------------------------------------
binom.test(9, 10, alternative = "greater")
## ----hugo01--------------------------------------------------------------
binom.test(16, 50, 1/6)
## ----hugo02--------------------------------------------------------------
# one-sided test manually and using binom.test()
1 - pbinom(15, 50, 1/6)
binom.test(16, 50, 1/6, alternative = "greater")
## ----hugo03, digits = 5--------------------------------------------------
# finding the "other side" by inspection:
dbinom(16, 50, 1/6)
data.frame(x = 0:4, `P(X=x)` = dbinom(0:4, 50, 1/6), check.names = FALSE)
# this should match the p-value from binom.test()
pbinom(1, 50, 1/6) + 1 - pbinom(15, 50, 1/6)
# letting R automate finding the interval too:
probs <- dbinom(0:50, 50, 1/6)
sum(probs[probs <= dbinom(16, 50, 1/6)])
## ----spinner-sol---------------------------------------------------------
probs <- dbinom(0:50, 50, 0.25)
sum(probs[probs <= dbinom(8, 50, 0.25)]) # sum the small probs
binom.test(8, 50, 0.25) # check with binom.test()
## ----Gus-sol-------------------------------------------------------------
binom.test(8, 10, p = 0.50)
## ----mendel-sol----------------------------------------------------------
binom.test(428, 428 + 152, p = 0.75) %>% pval()
## ------------------------------------------------------------------------
binom.test(427, 428 + 152, p = 0.75) %>% pval()
1 - (binom.test(427, 428 + 152, p = 0.75) %>% pval())
## ------------------------------------------------------------------------
# expeted # of green
(428 + 152) * 0.75
# P(|X - 435| <= 7)
pbinom(435 + 7, 428 + 152, 0.75) - pbinom( 435 - 7 - 1, 428 + 152, 0.75)
## ----beetles-sol---------------------------------------------------------
binom.test(10, 10 + 17)
binom.test(36, 7 + 36)
## ----alpha-not-exact-sol-------------------------------------------------
data.frame(
x = 7:9,
`P(X <= x)` = pbinom(7:9, 25, 0.5),
check.names = FALSE
)
## ----power01-------------------------------------------------------------
qbinom(0.025, 100, 0.5) # find q with pbinom(q, 100, 0.5) >= 0.025
pbinom(39:40, 100, 0.5) # double checking
## ----power02-------------------------------------------------------------
pbinom(60, 100, 0.95) - pbinom(39, 100, 0.95)
## ----power03-------------------------------------------------------------
pbinom(60, 100, 0.55) - pbinom(39, 100, 0.55)
## ----power04, eval=FALSE-------------------------------------------------
## p <- seq(0, 1, by = 0.01)
## power <- 1 - (pbinom(60, 100, p) - pbinom(39, 100, p))
## gf_line(power ~ p, size = 1) %>%
## gf_labs(x = expression(pi[a]))
## ----power04-fig, echo=FALSE---------------------------------------------
p <- seq(0, 1, by = 0.01)
power <- 1 - (pbinom(60, 100, p) - pbinom(39, 100, p))
gf_line(power ~ p, size = 1) %>%
gf_labs(x = expression(pi[a]))
## ----eval=FALSE, include=FALSE-------------------------------------------
## Power_data <-
## expand.grid(n = 1:2000, p = c(0.52, 0.55, 0.60)) %>%
## mutate(
## plab = paste("alt prob =", as.character(p)),
## critical = qbinom(0.025, size = n, prob = p),
## power = 1 - (pbinom(n - critical + 1, n, p) - pbinom(critical - 1, n, p))
## )
## gf_line(power ~ n | plab, data = Power_data) %>%
## gf_labs(y = "power", x = "number of coin tosses") %>%
## gf_lims(y = c(0, 1.1))
## ----eval=FALSE, include = FALSE-----------------------------------------
## binom_power <- function(n, p_alt, alpha = 0.05, p_null = 0.50) {
## critical_low <- qbinom( alpha/2, size = n, prob = p_null) - 1
## critical_hi <- qbinom(1 - alpha/2, size = n, prob = p_null) + 1
## pbinom(critical_low, n, p_alt) + 1 - pbinom(critical_hi - 1, n, p_alt)
## }
##
## PowerData <-
## expand.grid(n = seq(5, 10000, by = 5), p_alt = c(0.52, 0.55, 0.60)) %>%
## mutate(
## power = binom_power(n, p_alt),
## plab = paste("alt prob =", as.character(p_alt))
## )
##
## gf_line(power ~ n | plab, data = PowerData, size = 1) %>%
## gf_labs(y = "power", x = "number of coin tosses") %>%
## gf_lims(y = c(0, 1.1))
## ----power05, eval=FALSE-------------------------------------------------
## binom_power <- function(n, p_alt, alpha = 0.05) {
## p_null <- 0.50
## # reject if X <= critical_lo or X >= critical_hi
## critical_lo <- qbinom( alpha/2, size = n, prob = p_null) - 1
## critical_hi <- qbinom(1 - alpha/2, size = n, prob = p_null)
## pbinom(critical_lo, n, p_alt) + 1 - pbinom(critical_hi - 1, n, p_alt)
## }
##
## PowerData <-
## expand.grid(n = seq(5, 10000, by = 5), p_alt = c(0.52, 0.55, 0.60)) %>%
## mutate(
## power = binom_power(n, p_alt),
## plab = paste("alt prob =", as.character(p_alt))
## )
##
## gf_line(power ~ n | plab, data = PowerData, size = 1) %>%
## gf_labs(y = "power", x = "number of coin tosses") %>%
## gf_lims(y = c(0, 1.1))
## ----power05-fig, echo=FALSE, opts.label = "fig1"------------------------
binom_power <- function(n, p_alt, alpha = 0.05) {
p_null <- 0.50
# reject if X <= critical_lo or X >= critical_hi
critical_lo <- qbinom( alpha/2, size = n, prob = p_null) - 1
critical_hi <- qbinom(1 - alpha/2, size = n, prob = p_null)
pbinom(critical_lo, n, p_alt) + 1 - pbinom(critical_hi - 1, n, p_alt)
}
PowerData <-
expand.grid(n = seq(5, 10000, by = 5), p_alt = c(0.52, 0.55, 0.60)) %>%
mutate(
power = binom_power(n, p_alt),
plab = paste("alt prob =", as.character(p_alt))
)
gf_line(power ~ n | plab, data = PowerData, size = 1) %>%
gf_labs(y = "power", x = "number of coin tosses") %>%
gf_lims(y = c(0, 1.1))
## ----power01-sol---------------------------------------------------------
qbinom(0.975, 200, 0.5)
qbinom(0.025, 200, 0.5)
pbinom(85:86, 200, 0.5)
1 - pbinom(114:115, 200, 0.5)
## ----power02-sol, tidy = FALSE-------------------------------------------
# define a function to calculate power for given sample size.
power <- function(size, null = 0.50, alt = 0.55, alpha = 0.05){
size <- ceiling(size) # make sure size is an integer
leftCritical <- -1 + qbinom( alpha / 2, size, null)
rightCritical <- 1 + qbinom(1 - alpha / 2, size, null)
true_alpha <- 1 - pbinom(rightCritical - 1, size, null) +
pbinom( leftCritical, size, null)
leftPower <- pbinom(leftCritical, size, alt)
rightPower <- 1 - pbinom(rightCritical - 1, size, alt)
data.frame(
n = size,
alpha = true_alpha,
null = null,
alt = alt,
power = leftPower + rightPower,
lPower = leftPower,
rPower = rightPower,
lCritcal = leftCritical,
rCritcal = rightCritical
)
}
## ----power03-sol---------------------------------------------------------
power(c(200, 400, 5000))
## ----power04-sol---------------------------------------------------------
# find sample size with 90% power
uniroot(function(size){ power(size)$power - 0.90 }, c(400, 5000)) %>%
value()
## ----power05-sol, fig.keep = "last"--------------------------------------
gf_point(power ~ n, data = power(1000:1090)) %>%
gf_hline(yintercept = 0.9, color = "red", alpha = 0.5)
## ----power06-sol---------------------------------------------------------
power(1000:2000) %>%
filter(power >= 0.90) %>%
arrange(n) %>%
head(10)
## ----power-sim-sol-------------------------------------------------------
sim_pval <- function(n, alt = 0.55, null = 0.5) {
pval(binom.test(rbinom(1, n, alt), n, prob = null))
}
Sims200 <- do(1000) * sim_pval(n = 200)
Sims400 <- do(1000) * sim_pval(n = 400)
Sims1075 <- do(1000) * sim_pval(n = 1075)
tally( ~ p.value < 0.05, data = Sims200, format = "proportion")
tally( ~ p.value < 0.05, data = Sims400, format = "proportion")
tally( ~ p.value < 0.05, data = Sims1075, format = "proportion")
## ----binomial-power-sol00, warning = FALSE, echo = FALSE, opts.label = "fig1"----
binom_power_data <-
function(n, p_alt, alpha = 0.05, p_null = 0.5) {
tibble(
x = 0:n, # possible value of x
null_prob = dbinom(x, n, p_null),
alt_prob = dbinom(x, n, p_alt)
) %>%
arrange(null_prob) %>% # sort by null probability
mutate(
cumsum_prob = cumsum(null_prob), # running total
reject =
ifelse(cumsum_prob <= alpha,
"reject",
"don't reject")
) %>%
arrange(x) # sort by x values again
}
binom_power_plot <- function(n, p_alt, alpha = 0.05, p_null = 0.5) {
BPD <- binom_power_data(n = n, p_alt = p_alt, alpha = alpha, p_null = p_null)
BPDnull <- BPD %>% mutate(case = factor("null", levels = c("null", "alternative")))
BPDalt <- BPD %>% mutate(case = factor("alternative", levels = c("null", "alternative")))
true_alpha <- sum( ~ null_prob, data = BPD %>% filter(cumsum_prob <= alpha))
power <- sum( ~ alt_prob, data = BPD %>% filter(cumsum_prob <= alpha))
gf_point(null_prob ~ x, color = ~ reject, alpha = 0.7, data = BPDnull) %>%
gf_segment(null_prob + 0 ~ x + x, color = ~ reject, alpha = 0.7, data = BPDnull) %>%
gf_point(alt_prob ~ x, color = ~ reject, alpha = 0.7, data = BPDalt) %>%
gf_segment(alt_prob + 0 ~ x + x, color = ~ reject, alpha = 0.7, data = BPDalt) %>%
gf_facet_grid( case ~ .) %>%
gf_lims(
x = range(c(
n * p_null + c(-1, 1) * sqrt(n * p_null * (1-p_null)) * 4,
n * p_alt + c(-1, 1) * sqrt(n * p_alt * (1-p_alt)) * 4)
)
) %>%
gf_refine(scale_color_manual(values = c("navy", "red"))) %>%
gf_labs(
caption = paste("nominal alpha = ", format(round(alpha, 3)),
"true alpha = ", format(round(true_alpha, 3)),
"; power = ", format(round(power, 3))))
}
binom_power_plot(n = 50, p_null = 1/6, p_alt = 1/3)
## ----binomial-power-sol01------------------------------------------------
binom_power_data <-
function(n, p_alt, alpha = 0.05, p_null = 0.5) {
tibble(
x = 0:n, # possible value of x
null_prob = dbinom(x, n, p_null),
alt_prob = dbinom(x, n, p_alt)
) %>%
arrange(null_prob) %>% # sort by null probability
mutate(
cumsum_prob = cumsum(null_prob), # running total
reject =
ifelse(cumsum_prob <= alpha,
"reject",
"don't reject")
) %>%
arrange(x) # sort by x values again
}
binom_power_data(10, 0.5) %>% head(3)
## ----binomial-power-sol02------------------------------------------------
binom_power <- function(n, p_alt, alpha = 0.05, p_null = 0.5) {
binom_power_data(n = n, p_alt = p_alt, alpha = alpha, p_null = p_null) %>%
filter(reject == "reject") %>%
summarise(n = n, p_null = p_null, p_alt = p_alt, power = sum(alt_prob))
}
binom_power(n = 400, p_null = 1/3, p_alt = 1/4)
binom_power(n = 400, p_null = 1/3, p_alt = 1/4)$power
## ----binomial-power-sol03------------------------------------------------
Plot_Data <-
tibble(
n = 1:1000,
power = sapply(n, function(n) binom_power(n = n, p_null = 1/3, p_alt = 1/4)$power)
)
gf_line(power ~ n, data = Plot_Data)
## ----binomial-power-sol04------------------------------------------------
binom_power_plot <- function(n, p_alt, alpha = 0.05, p_null = 0.5) {
BPD <- binom_power_data(n = n, p_alt = p_alt, alpha = alpha, p_null = p_null)
BPDnull <- BPD %>% mutate(case = factor("null", levels = c("null", "alternative")))
BPDalt <- BPD %>% mutate(case = factor("alternative", levels = c("null", "alternative")))
true_alpha <- sum( ~ null_prob, data = BPD %>% filter(cumsum_prob <= alpha))
power <- sum( ~ alt_prob, data = BPD %>% filter(cumsum_prob <= alpha))
gf_point(null_prob ~ x, color = ~ reject, alpha = 0.7, data = BPDnull) %>%
gf_segment(null_prob + 0 ~ x + x, color = ~ reject, alpha = 0.7, data = BPDnull) %>%
gf_point(alt_prob ~ x, color = ~ reject, alpha = 0.7, data = BPDalt) %>%
gf_segment(alt_prob + 0 ~ x + x, color = ~ reject, alpha = 0.7, data = BPDalt) %>%
gf_facet_grid( case ~ .) %>%
gf_lims(
x = range(c(
n * p_null + c(-1, 1) * sqrt(n * p_null * (1-p_null)) * 4,
n * p_alt + c(-1, 1) * sqrt(n * p_alt * (1-p_alt)) * 4)
)
) %>%
gf_refine(scale_color_manual(values = c("navy", "red"))) %>%
gf_labs(
caption = paste("nominal alpha = ", format(round(alpha, 3)),
"true alpha = ", format(round(true_alpha, 3)),
"; power = ", format(round(power, 3))))
}
binom_power_plot(n = 400, p_null = 1/3, p_alt = 1/4)
## ----mean-coins01--------------------------------------------------------
vals <- 0:4
probs <- c(1, 4, 6, 4, 1) / 16 # providing probabilities directly
sum(vals * probs)
sum(0:4 * dbinom(0:4, 4, 0.5)) # using the fact that X is binomial
## ----number-of-suits-sol-------------------------------------------------
p<-rep(NA, 4)
p[1] <- choose(4, 1) * choose(13, 5) / choose(52, 5)
p[2] <- choose(4, 2) * (choose(26, 5) - (choose(13, 5) + choose(13, 5))) /
choose(52, 5)
p[4] <- choose(4, 1) * choose(13, 2) * 13 * 13 * 13 / choose(52, 5)
p[3] <- 1 - sum(p[-3]) # sum of all probabilities must be 1
rbind(1:4, p)
## ----mean-coins02--------------------------------------------------------
sum(0:100 * dnbinom(0:100, 3, 0.5))
## ----var01---------------------------------------------------------------
x <- 0:2
sum((x - 1)^2 * dbinom(x, 2, 0.5)) # same as above
n <- 5; p <- 0.2; x <- 0:n
sum((x - n*p)^2 * dbinom(x, n, p)) # X ~ Binom(5, 0.2)
n <- 5; p <- 0.8; x <- 0:n
sum((x - n*p)^2 * dbinom(x, n, p)) # X ~ Binom(5, 0.8)
n <- 10; p <- 0.8; x <- 0:n
sum((x - n*p)^2 * dbinom(x, n, p)) # X ~ Binom(10, 0.8)
n <- 20; p <- 0.8; x <- 0:n
sum((x - n*p)^2 * dbinom(x, n, p)) # X ~ Binom(20, 0.8)
## ----var02, digits = 4---------------------------------------------------
y <- 1:4
prob <- c(0.05, 0.20, 0.40, 0.35)
mean.y <- sum(y * prob); mean.y # E(Y)
sum((y - mean.y)^2 * prob) # Var(Y)
sum(y^2 * prob) - mean.y^2 # Var(Y) again
## ----Bernoulli-var-fig, echo = FALSE-------------------------------------
x <- seq(0, 1, by = 0.01)
gf_line( x * (1-x) ~ x, size = 1) %>%
gf_labs(title = "Variance of a Bernoulli random variable",
x = expression(pi), y = expression(Var(X)) )
## ----socks-exp-sol-------------------------------------------------------
vals <- 1:4
probs <- (4:1) / 10
sum(vals*probs) # expected value
sum(vals^2 * probs) - sum(vals * probs)^2 # variance
## ----negbinomial-var-sol-------------------------------------------------
vals <- 0:50
probs.x <- dnbinom(vals, 3, 0.5)
probs.y <- dnbinom(vals, 3, 0.2)
var.x <- sum(vals^2 * probs.x) - sum(vals * probs.x)^2; var.x
var.y <- sum(vals^2 * probs.y) - sum(vals * probs.y)^2; var.y
# better approximation using more terms:
vals <- 0:500
probs.x <- dnbinom(vals, 3, 0.5)
probs.y <- dnbinom(vals, 3, 0.2)
var.x <- sum(vals^2 * probs.x) - sum(vals * probs.x)^2; var.x
var.y <- sum(vals^2 * probs.y) - sum(vals * probs.y)^2; var.y
## ----search-expval-sol---------------------------------------------------
# expected value of binary search
vals <- 1:8
probs <- 2^(vals - 1) / 255
sum(vals * probs)
# expected value of linear search
vals <- 1:255
probs <- rep(1/255, 255)
sum(vals * probs)
## ----roulette-sol--------------------------------------------------------
val <- c(-1, 1)
prob <- c(20/38, 18/38)
sum( val * prob) # expected value
sum( val^2 * prob) - sum(val * prob)^2 # variance
## ----willy-sol-----------------------------------------------------------
val <- 1/(1:6)
sum(val * 1/6) # expected value
1/3.5
## ----discrete-sum--------------------------------------------------------
f <- function(x,y) x^2 * y / 84
vals <- 2:6
probs <- c(
"2" = f(1,1),
"3" = f(1,2) + f(2,1),
"4" = f(1,3) + f(2,2) + f(3,1),
"5" = f(2,3) + f(3,2),
"6" = f(3,3)
)
probs
probs %>% fractions()
# check that total probability = 1
sum(probs)
# E(S)
sum(vals * probs)
## ----alice-bob-sol-------------------------------------------------------
f <- function(prob) {
prob + prob * (1 - prob)^3/(1 - (1 - prob)^2)
}
g <- function(prob) { f(prob) - 0.50 }
uniroot(g, c(0.20, 0.5))$root # when g = 0, f = 0.5
## ----poisson-customers01-------------------------------------------------
dpois(0, 2)
## ----poisson-customers02-------------------------------------------------
1- ppois(9, 6)
## ----fumbles01-----------------------------------------------------------
m <- max(~week1, data = Fumbles)
tally( ~ factor(week1, levels = 0:m), data = Fumbles)
favstats( ~ week1, data = Fumbles)
## ----fumbles02, tidy = FALSE, include=FALSE------------------------------
m <- max( ~ week1, data = Fumbles)
xbar <- mean( ~ week1, data = Fumbles); xbar
Week1 <-
tibble(
fumbles = 0:m,
`observed count` =
as.vector(tally( ~ factor(week1, levels = 0:m), data = Fumbles)),
`model count` = 120 * dpois(0:m, xbar)
) %>%
mutate(
`observed pct` = 100 * `observed count` / 120,
`model pct` = 100 * `model count` / 120
)
Week1
## ----results = "asis", echo=FALSE----------------------------------------
print(xtable::xtable(Week1), include.rownames = FALSE)
## ----fumbles-fig, echo=FALSE, fig.keep = "last"--------------------------
gf_dhistogram( ~ week1, data = Fumbles, binwidth = 1, alpha = 0.3) %>%
gf_dist("pois", lambda = mean( ~ week1, data = Fumbles) )
## ----pois-hint-----------------------------------------------------------
dpois(0, 6/3) # 0 customers in 1/3 hour
dpois(2, 6/3) # 2 customers in 1/3 hour
## ----pois-sol------------------------------------------------------------
dpois(0, 6/3) # 0 customers in 1/3 hour
dpois(2, 6/3) # 2 customers in 1/3 hour
1-ppois(6, 6) # more than 6 in 1 hour
dpois(6, 6) # exactly 6 in 1 hour
ppois(5, 6) # less than 6 in 1 hour
ppois(30, 24) - ppois(19, 24) # 20 to 30 customers in 4 hours
## ----hockey-goals01-sol--------------------------------------------------
1 - ppois(43, 206/506 * 89)
## ----hockey-goals02-sol--------------------------------------------------
1 - ppois(41, 206/506 * 88)
## ----hockey-goals03-sol--------------------------------------------------
max(which(dpois(1:30, 206/506 * 89) <= dpois(44, 206/506 * 89)))
ppois(28, 206/506 * 89)
ppois(28, 206/506 * 89) + 1 - ppois(43, 206/506 * 89)
## ----fumbles-23a-sol-----------------------------------------------------
m <- max( ~ week2, data = Fumbles)
tally( ~ factor(week2, levels = 0:m), data = Fumbles)
favstats( ~ week2, data = Fumbles)
m <- max( ~ week3, data = Fumbles)
tally( ~ factor(week3, levels = 0:m), data = Fumbles)
favstats( ~ week3, data = Fumbles)
## ----fumbles-23b-sol, fig.keep = "last"----------------------------------
gf_dhistogram(~ week2, data = Fumbles, binwidth = 1, alpha = 0.3) %>%
gf_dist("pois", lambda = mean( ~ week2, data = Fumbles))
## ----fumbles-23-sol3, fig.keep = "last"----------------------------------
gf_dhistogram(~ week3, data = Fumbles, binwidth = 1, alpha = 0.3) %>%
gf_dist("pois", lambda = mean( ~ week3, data = Fumbles))
## ----fumbles-all-sol, fig.keep="last"------------------------------------
Fumbles <- Fumbles %>% mutate(all = week1 + week2 + week3)
gf_dhistogram(~ all, data = Fumbles, binwidth = 1, alpha = 0.3) %>%
gf_dist("pois", lambda = mean( ~ all, data = Fumbles)) %>%
gf_labs(title = "All fumbles weeks 1-3")
## ----fumbles-simulated-sol, fig.keep="last", opts.label = "fig1"---------
Sims <- data.frame(fumbles = rpois(120 * 8, 1.75),
sample = rep(LETTERS[1:8], each = 120))
favstats( ~ fumbles | sample, data = Sims)
gf_dhistogram( ~ fumbles | sample, data = Sims, binwidth = 1) %>%
gf_dist("pois", lambda = 1.75)
## ----youth-soccer--------------------------------------------------------
1 - phyper(3, m = 7, n = 5, k = 6) # from "girls' perspective"
phyper(2, m = 5, n = 7, k = 6) # redone from "boys' perspective"
## ----lady-hyper----------------------------------------------------------
setNames(1 - phyper(-1:4, 5, 5, 5), paste0("x=", 0:5))
## ----lady-binom----------------------------------------------------------
setNames(1 - pbinom(2 * (0:5) - 1, 10, 0.5), paste0("x=", 2 * 0:5))
## ----power-lady-sol------------------------------------------------------
# compute rejection regions
binom.crit <- qbinom(0.95, 10, 0.5); binom.crit
hyper.crit <- qhyper(0.95, 5, 5, 5); hyper.crit
# now compute the power
1 - pbinom(binom.crit - 1, 10, 0.9)
1 - phyper(hyper.crit - 1, 5, 5, 5)
## ----fisher-twins01, digits = 4------------------------------------------
phyper(2, 17, 13, 12)
convictions <- rbind(dizygotic = c(2, 15), monozygotic = c(10, 3))
colnames(convictions) <- c("convicted", "not convicted")
convictions
fisher.test(convictions, alternative = "less") %>% pval()
fisher.test(convictions, alternative = "less")
## ----fisher-twins02------------------------------------------------------
fisher.test(convictions) %>% pval()
## ----ticket01-sol--------------------------------------------------------
d <- cbind(c(9, 13), c(14, 9)); d
fisher.test(d)
phyper(9, 23, 22, 22)
## ----ticket02-sol--------------------------------------------------------
d <- cbind(c(61, 103), c(69, 44)); d
fisher.test(d)
phyper(61, 61 + 103, 44 + 69, 61 + 69)
## ----ticket03-sol--------------------------------------------------------
or <- 9/13 / (14/9); c(or, 1/or)
or <- 61/103 / (69/44); c(or, 1/or)
## ----fisher-twins01-alt-sol----------------------------------------------
phyper(3, 13, 17, 18)
convictions <- rbind(monozygotic = c(3, 10), dizygotic = c(15, 2))
colnames(convictions) <- c("not convicted", "convicted")
convictions
fisher.test(convictions, alternative = "less")
## ----first-digit, opts.label = "fig1"------------------------------------
firstDigit <- function(x) {
trunc(x / 10^(floor(log10(abs(x)))))
}
# rivers contains lengths (mi) of 141 major North American rivers
# Rivers has first digits of lengths in miles and km
Rivers <-
tibble(
digit = 1:9,
model = log10(digit + 1) - log10(digit),
miles = as.numeric(tally( ~ firstDigit(rivers), format = "prop")),
km = as.numeric(tally( ~ firstDigit(1.61 * rivers), format = "prop"))
) %>%
tidyr::gather(source, proportion, model:km)
gf_point(proportion ~ digit, color = ~ source, data = Rivers) %>%
gf_line(proportion ~ digit, color = ~ source, data = Rivers) %>%
gf_refine(scale_x_continuous(breaks = 1:10))
## ----multinom, fig.keep="none"-------------------------------------------
# P(X1 = 20 & X2 = 30 & X3 = 50)
dmultinom(c(20, 30, 50), size = 100, prob = c(0.2, 0.3, 0.5))
# 1 column for each of 10 random draws from Multinom(100, <0.2, 0.3, 0.5>)
rmultinom(10, size = 100, prob = c(0.2, 0.3, 0.5))
# create a data frame with 1000 draws
SimMultinom <-
rmultinom(2000, size = 100, prob = c(0.2, 0.3, 0.5)) %>%
t() %>% data.frame()
head(SimMultinom, 3)
# scatter plot shows the negative correlation between X1 and X2
gf_point(X2 ~ X1, data = SimMultinom, alpha = 0.2) %>%
gf_density2d() %>%
gf_labs(x = expression(X[1]), y = expression(X[2]))
## ----multinom-fig, echo = FALSE, results = "hide"------------------------
# P(X1 = 20 & X2 = 30 & X3 = 50)
dmultinom(c(20, 30, 50), size = 100, prob = c(0.2, 0.3, 0.5))
# 1 column for each of 10 random draws from Multinom(100, <0.2, 0.3, 0.5>)
rmultinom(10, size = 100, prob = c(0.2, 0.3, 0.5))
# create a data frame with 1000 draws
SimMultinom <-
rmultinom(2000, size = 100, prob = c(0.2, 0.3, 0.5)) %>%
t() %>% data.frame()
head(SimMultinom, 3)
# scatter plot shows the negative correlation between X1 and X2
gf_point(X2 ~ X1, data = SimMultinom, alpha = 0.2) %>%
gf_density2d() %>%
gf_labs(x = expression(X[1]), y = expression(X[2]))
## ----rmultinom-sol-------------------------------------------------------
rmultinom2 <- function(n, size, prob) {
prob <- prob / sum(prob)
x <- runif(n * size, 0, 1)
y <- as.numeric(cut(x, c(0, cumsum(prob))))
y <- factor(y, levels = 1:length(prob)) # so we get 0 counts recorded
M <- matrix(y, nrow = n) # put results into a matrix
apply(M, 1, table) # create table for each row
}
rmultinom2(4, 100, c(.1, .2, .3, .4))
## ----kings-and-queens-sol------------------------------------------------
numerator <- # P(K=2 & Q=2)
choose(4, 2) * # pick 2 Kings
choose(4, 2) * # pick 2 Queens
choose(52 - 4 - 4, 1) / # pick 1 other card
choose(52, 5)
denominator <- # P(Q = 2)
choose(4, 2) * # pick 2 Queens
choose(52 - 4, 3) / # pick 3 other cards
choose(52, 5)
numerator / denominator
## ----kings-and-hearts-sol------------------------------------------------
numerator <- # P(K=2 & H=2)
( 1 * # pick King of Hearts
choose( 3, 1) * # pick one other King
choose(12, 1) * # pick one other heart
choose(52 - 1 - 12 - 3, 2) + # pick 2 other cards
#
choose( 3, 2) * # pick 2 Kings (not hearts)
choose(12, 2) * # pick 2 hearts (not King)
choose(52 - 1 - 12 - 3, 1) # pick 1 other card
) / choose(52, 5)
denominator <- # P(H = 2)
choose(13, 2) * # pick 2 hearts
choose(52 - 13, 3) / # pick 3 other cards
choose(52, 5)
numerator / denominator
## ----conditional-urn-sol-------------------------------------------------
# P(B=b | W=w) = dhyper(b, 3, 5, 3 - w)
probs <-
outer(0:2, 0:3, function(x, y){dhyper(y, 3, 5, 3 - x)})
colnames(probs) = paste("B=", 0:3, sep="")
rownames(probs) = paste("W=", 0:2, sep="")
probs
fractions(probs)
#
# P(R=r | W=w) = dhyper(r, 5, 3, 3-w)
probs <-
outer(0:2, 0:3, function(x, y){dhyper(y, 5, 3, 3 - x)})
colnames(probs) = paste("R=", 0:3, sep = "")
rownames(probs) = paste("W=", 0:2, sep = "")
probs
fractions(probs)
## ----burger-barn-sol-----------------------------------------------------
prob <- (10 - (0:9)) / 10
# expected value
sum(1/prob)
# variance
sum((1 - prob) / prob^2)
## ----ContinuousDistributions, child="ContinuousDistributions.Rnw", eval=includeChapter[3]----
## ----cont-setup, include = FALSE, cache = FALSE--------------------------
knitr::opts_chunk$set(cache.path = "cache/Cont-")
require(grid)
## ----pdfdef, include=FALSE-----------------------------------------------
p <- ppoints(10000)
x <- qnorm(p, 10, 2.5) # x <- rnorm(40000, 10, 2.5)
gf_dhistogram( ~ x, bins = 20, color = "black", fill = "navy", alpha = 0.5) %>%
gf_labs(x = "", y = "") %>%
gf_theme(
axis.text = element_blank(),
axis.ticks = element_blank()
)
gf_dhistogram( ~ x, bins = 40, color = "black", fill = "navy", alpha = 0.5) %>%
gf_labs(x = "", y = "") %>%
gf_theme(
axis.text = element_blank(),
axis.ticks = element_blank()
)
gf_dhistogram( ~ x, bins = 161, color = "black", fill = "navy", alpha = 0.5) %>%
gf_labs(x = "", y = "") %>%
gf_theme(
axis.text = element_blank(),
axis.ticks = element_blank()
)
## ----pdfdef-fig, echo=FALSE, opts.label="fig3"---------------------------
p <- ppoints(10000)
x <- qnorm(p, 10, 2.5) # x <- rnorm(40000, 10, 2.5)
gf_dhistogram( ~ x, bins = 20, color = "black", fill = "navy", alpha = 0.5) %>%
gf_labs(x = "", y = "") %>%
gf_theme(
axis.text = element_blank(),
axis.ticks = element_blank()
)
gf_dhistogram( ~ x, bins = 40, color = "black", fill = "navy", alpha = 0.5) %>%
gf_labs(x = "", y = "") %>%
gf_theme(
axis.text = element_blank(),
axis.ticks = element_blank()
)
gf_dhistogram( ~ x, bins = 161, color = "black", fill = "navy", alpha = 0.5) %>%
gf_labs(x = "", y = "") %>%
gf_theme(
axis.text = element_blank(),
axis.ticks = element_blank()
)
## ----pdf-fig, echo=FALSE, results="hide"---------------------------------
# define the pdf for X
f <- function(x) { x^2 / 9 * (0 <= x & x <= 3) }
# numerical integration gives approximation and tolerance
integrate(f, 0, 3)
integrate(f, -Inf, Inf) # same value but less precise
integrate(f, 0, 1)
integrate(f, 0, 1) %>% value() # just the approximation value
# find nearby fraction
integrate(f, 0, 1) %>% value() %>% fractions()
gf_line(y ~ x, data = tibble(x = seq(-1, 4, by = 0.01), y = f(x)),
group = ~ (x > 3)) %>%
gf_labs(y = "f(x)")
## ----pdf, fig.keep = "none"----------------------------------------------
# define the pdf for X
f <- function(x) { x^2 / 9 * (0 <= x & x <= 3) }
# numerical integration gives approximation and tolerance
integrate(f, 0, 3)
integrate(f, -Inf, Inf) # same value but less precise
integrate(f, 0, 1)
integrate(f, 0, 1) %>% value() # just the approximation value
# find nearby fraction
integrate(f, 0, 1) %>% value() %>% fractions()
gf_line(y ~ x, data = tibble(x = seq(-1, 4, by = 0.01), y = f(x)),
group = ~ (x > 3)) %>%
gf_labs(y = "f(x)")
## ----integrate-----------------------------------------------------------
integrate(function(x) x^2 / 9, 0, 3)
## ----dist01-sol----------------------------------------------------------
kernel <- function(x) { (x - 2) * (x + 2) * (x >= -2 & x <= 2) }
k <- 1 / value(integrate(kernel, -2, 2)); k
f <- function(x) { k * kernel(x) }
fractions(k)
integrate(f, -2, 2) # check that we have pdf
integrate(f, 0, 2) # P( 0 <= X <= 2 )
fractions(integrate(f, 0, 2)$value)
integrate(f, 1, 2) # P( 1 <= X <= 2 )
fractions(integrate(f, 1, 2)$value)
integrate(f, -1, 1) # P( -1 <= X <= 1 )
fractions(integrate(f, -1, 1)$value)
## ----dist02-sol----------------------------------------------------------
kernel <- function(x) { x * (x-3) * as.numeric(x >= 0 & x <= 3) }
k <- 1 / integrate(kernel, 0, 3)$value; k
fractions(k)
g <- function(x) { k * kernel(x) }
integrate(g, 0, 3) # check that we have pdf
integrate(g, 0, 1)
integrate(g, 0, 1) %>% value() %>% fractions()
integrate(g, 0, 2)
integrate(g, 0, 2) %>% value() %>% fractions()
integrate(g, 1, 2)
integrate(g, 1, 2) %>% value() %>% fractions()
## ----cauchy01-sol--------------------------------------------------------
tan(0.4 * pi)
qcauchy(0.9)
## ----unif01--------------------------------------------------------------
x <- -1:11; x
f <- function(x) { 0.1 * (0 <= x & x <= 10) }
rbind(x, f(x)) # sanity check
# numerical integration gives approximation and error estimate
integrate(f, 7, 10)
integrate(f, 3, 7)
integrate(f, 7, 15)
## ----unif-pdf-cdf-fig, echo=FALSE----------------------------------------
gf_line(y ~ x,
data = tibble(x = seq(-0.5, 1.5, by = 0.001), y = dunif(x)),
group = ~ (x < 0) + (x<=1) ) %>%
gf_labs(y = "f(x)")
# gf_fun(dunif(x) ~ x, xlim = c(-0.5, 1.5), n = 1000) %>%
# gf_labs(title = "pdf for Unif(0,1)",
# x = "x", y = expression(f(x)))
gf_fun(punif(x) ~ x, xlim = c(-0.5, 1.5)) %>%
gf_labs(title = "cdf for Unif(0,1)",
x = "x", y = "F(x)")
## ----unif02--------------------------------------------------------------
runif(6, 0, 10) # 6 random values on [0,10]
dunif(5, 0, 10) # pdf is 1/10
punif(5, 0, 10) # half the distribution is below 5
qunif(0.25, 0, 10) # 1/4 of the distribution is below 2.5
## ----cdf-method01--------------------------------------------------------
g <- function(y) { 1 / (2 * sqrt(y)) * (0 <= y & y <= 1) }
integrate(g, 0, 1)
## ----cdf-method02, fig.keep = "none"-------------------------------------
fV <- function(v) (0 <= v & v <= 4) * 0.25 / sqrt(abs(v))
integrate(fV, 0, 4)
# gf_fun is not clever about discontinuities
gf_fun(fV(v) ~ v, xlim = c(-1, 5), n = 1000) %>%
gf_lims(y = c(0, 1))
# we can be clever if we do things manually
gf_line(y ~ v, data = tibble(v = seq(-1, 5, by = 0.01), y = fV(v)),
group = ~(v < 0) + (v <=4)) %>%
gf_lims(y = c(0, 1))
## ----cdf-method02-fig, echo = FALSE, results = "hide"--------------------
fV <- function(v) (0 <= v & v <= 4) * 0.25 / sqrt(abs(v))
integrate(fV, 0, 4)
# gf_fun is not clever about discontinuities
gf_fun(fV(v) ~ v, xlim = c(-1, 5), n = 1000) %>%
gf_lims(y = c(0, 1))
# we can be clever if we do things manually
gf_line(y ~ v, data = tibble(v = seq(-1, 5, by = 0.01), y = fV(v)),
group = ~(v < 0) + (v <=4)) %>%
gf_lims(y = c(0, 1))
## ----exp-pdf-cdf-fig, echo=FALSE-----------------------------------------
plot_data <-
expand.grid(
x = seq(0, 5, by = 0.01),
lambda = c(0.5, 1, 1.5)
) %>%
mutate(
density = dexp(x, rate = lambda),
probability = pexp(x, rate = lambda)
)
gf_line(density ~ x, color = ~ factor(lambda), group = ~lambda, data = plot_data) %>%
gf_refine(guides(color = guide_legend(expression(lambda)))) %>%
gf_labs(title = "Exponential pdfs")
gf_line(probability ~ x, color = ~ factor(lambda), group = ~lambda, data = plot_data) %>%
gf_refine(guides(color = guide_legend(expression(lambda)))) %>%
gf_labs(title = "Exponential cdfs")
## ----exp-from-unif-sol, seed = 2357--------------------------------------
U <- runif(10000)
X <- (-10 * log(U)) %>% sort()
Y1 <- rexp(10000, rate = 1/10) %>% sort()
Y2 <- rexp(10000, rate = 1/10) %>% sort()
gf_point(Y1 ~ X) %>%
gf_abline(slope = 1, intercept = 0)
gf_point(Y2 ~ Y1) %>%
gf_abline(slope = 1, intercept = 0)
## ----bank----------------------------------------------------------------
1 - pexp(1/4, 10) # 10 customers per 1 hour
1 - pexp(15, 1/6) # 1/6 customer per 1 minute
dpois(0, 10/4) # 10/4 customers per 15 minutes
## ----pois-sim, eval=FALSE, seed=123, tidy = FALSE------------------------
## PoisSim <-
## expand.grid(run = 1:10, i = 1:40) %>%
## group_by(run) %>%
## mutate(interval = rexp(40), time = cumsum(interval))
## stop <- min(max(time ~ run, data = PoisSim)) # shortest run?
## stop <- 5 * trunc(stop / 5) # truncate to multiple of 5
## gf_point(run ~ time, data = PoisSim %>% filter(time <= stop),
## shape = 1, size = 0.7, col = "black") %>%
## gf_hline(yintercept = seq(1.5, 9.5, by = 1), color = "gray60") %>%
## gf_vline(xintercept = seq(0, stop, by = 5), color = "gray60") %>%
## gf_theme(panel.grid.major = element_blank(),
## panel.grid.minor = element_blank())
## ----pois-sim-fig, echo=FALSE, opts.label="fig1", cache=FALSE, seed=123----
PoisSim <-
expand.grid(run = 1:10, i = 1:40) %>%
group_by(run) %>%
mutate(interval = rexp(40), time = cumsum(interval))
stop <- min(max(time ~ run, data = PoisSim)) # shortest run?
stop <- 5 * trunc(stop / 5) # truncate to multiple of 5
gf_point(run ~ time, data = PoisSim %>% filter(time <= stop),
shape = 1, size = 0.7, col = "black") %>%
gf_hline(yintercept = seq(1.5, 9.5, by = 1), color = "gray60") %>%
gf_vline(xintercept = seq(0, stop, by = 5), color = "gray60") %>%
gf_theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
## ----fumbles-time--------------------------------------------------------
1 - pexp(0.5, rate = 1.75)
dpois(0, 1.75 / 2)
## ----prob-cdf01, tidy = FALSE--------------------------------------------
f <- function(x, k=0) {x^k * x/2 } # define pdf
integrate(f, lower = 0, upper = 2) # check it is a pdf
integrate(f, k=1, lower = 0, upper = 2) # expected value
integrate(f, k=2, lower = 0, upper = 2) # E(X^2)
## ----prob-cdf02, tidy = FALSE--------------------------------------------
# compute the variance using E(X^2) - E(X)^2
value(integrate(f, k=2, lower = 0, upper = 2)) -
value(integrate(f, k=1, lower = 0, upper = 2))^2
## ----cdf01-sol-----------------------------------------------------------
g <- makeFun(3 * x^2 / 8 ~ x)
integrate(function(x) x * g(x), 0, 2)
## ----cdf02-sol, tidy = FALSE---------------------------------------------
integrate(function(x){ x^2 * g(x) }, 0, 2)
integrate(function(x){ x^2 * g(x) }, 0, 2)$value -
(integrate(function(x){ x * g(x) }, 0, 2)$value)^2
## ----cdf03-sol, tidy = FALSE---------------------------------------------
g <- function(x) { 3 * x^2 / 8 } # define pdf
integrate(f, lower = 0, upper = 2) # check it is a pdf
xg <- function(x) { x * g(x) }
integrate(xg, lower = 0, upper = 2) # expected value
xxg <- function(x) { x^2 * g(x) }
integrate(xxg, lower = 0, upper = 2) # E(X^2)
# compute the variance using E(X^2) - E(X)^2
integrate(xxg, lower = 0, upper = 2)$value -
(integrate(xg, lower = 0, upper = 2)$value)^2
## ----traffic01-sol-------------------------------------------------------
a <- 1
f <- function(x) {1 / x^4}
k <- 1 / integrate(f, a, Inf)$value; k
f <- function(x) {k / x^4}
integrate(f, a, Inf)
integrate(f, 2, 3)
integrate(f, 2, 3) %>% value() %>% fractions()
## ----traffic02-sol-------------------------------------------------------
# find the median
g <- function(x) { integrate(f, a, x)$value - 0.5 }
uniroot(g, c(1, 10))$root
## ----traffic03-sol, tidy = FALSE-----------------------------------------
# check with a == 1
a
xf <- function(x) { x * f(x) }
Ex <- integrate(xf, a, Inf)$value; Ex # E(X)
xxf <- function(x) { x^2 * f(x) }
Exx <- value(integrate(xxf, a, Inf)); Exx # E(X^2)
Exx - (Ex)^2 # variance
sqrt(Exx - (Ex)^2) # st dev
## ----normal-fig, echo=FALSE, opts.label = "fig1"-------------------------
plot_data <-
expand.grid(
x = seq(-4, 4, by = 0.01),
sigma = c(0.7, 1.0, 1.5)
) %>%
mutate(density = dnorm(x, 0, sigma))
gf_line(density ~ x, data = plot_data, color = ~ factor(sigma), group = ~sigma) %>%
gf_refine(guides(color = guide_legend(expression(sigma))))
## ----normal01------------------------------------------------------------
# these two should return the same value:
pnorm(5, mean = 3, sd = 2) # 5 is 1 st dev above the mean of 3
pnorm(1)
## ----empirical-rule------------------------------------------------------
pnorm(1:3) - pnorm(-1:-3)
## ----sat01---------------------------------------------------------------
pnorm(700, 500, 100) - pnorm(400, 500, 100)
1- pnorm(800, 500, 100)
## ----sat02---------------------------------------------------------------
pnorm(500, 422, 100)
pnorm(500, 475, 100)
## ----sat03---------------------------------------------------------------
qnorm(0.80, 500, 100)
qnorm(0.80, 500, 110)
## ----sat01-sol-----------------------------------------------------------
1 - pnorm(800, 500, 110)
## ----gamma-function-fig, echo=FALSE--------------------------------------
inputs <- seq(0.05, 5, by = 0.05)
gf_point(gamma(1:5) ~ 1:5, shape = 1) %>%
gf_line(gamma(inputs) ~ inputs) %>%
gf_lims(y = c(0, factorial(4))) %>%
gf_labs(x = "x", y = expression(Gamma(x)))
## ----gamma-pdfs-fig, echo=FALSE------------------------------------------
plot_data <-
expand.grid(
x = seq(0, 6, by = 0.01),
shape = 1:3,
rate = 1:3
) %>%
mutate(
density = dgamma(x, shape, rate),
shape_label = paste("shape =", shape),
rate_label = paste("rate =", rate)
)
gf_line(density ~ x, data = plot_data %>% filter(rate == 2),
color = ~ factor(shape), group = ~shape) %>%
gf_refine(guides(color = guide_legend("shape"))) %>%
gf_labs(title = "Gamma pdfs (rate = 2)")
gf_line(density ~ x, data = plot_data %>% filter(shape == 2),
color = ~ factor(rate), group = ~rate) %>%
gf_refine(guides(color = guide_legend("rate"))) %>%
gf_labs(title = "Gamma pdfs (shape = 2)")
# gf_line(density ~ x | shape_label ~ rate_label, data = plot_data)
## ----weibull-pdfs-fig, echo=FALSE----------------------------------------
plot_data <-
expand.grid(
x = seq(0, 6, by = 0.01),
shape = 1:3,
scale = 1:3
) %>%
mutate(
density = dweibull(x, shape = shape, scale = scale),
shape_label = paste("shape =", shape),
scale_label = paste("scale =", scale)
)
gf_line(density ~ x, data = plot_data %>% filter(scale == 2),
color = ~ factor(shape), group = ~shape) %>%
gf_refine(guides(color = guide_legend("shape"))) %>%
gf_labs(title = "Weibull pdfs (scale = 2)")
gf_line(density ~ x, data = plot_data %>% filter(shape == 2),
color = ~ factor(scale), group = ~scale) %>%
gf_refine(guides(color = guide_legend("scale"))) %>%
gf_labs(title = "Weibull pdfs (shape = 2)")
# gf_line(density ~ x | shape_label ~ scale_label, data = plot_data)
## ----middle-of-dist-sol--------------------------------------------------
pexp(1+1) - pexp(1-1)
pexp(1/2 + 1/2, rate = 2) - pexp(1/2 - 1/2, rate = 2)
punif(0.5 + sqrt(1/12), 0, 1) - punif(0.5 - sqrt(1/12), 0, 1)
s <- sqrt(8 / (6^2*7))
pbeta(2/6 + s, shape1 = 2, shape2 = 4) - pbeta(2/6 - s, shape1 = 2, shape2 = 4)
## ----weibull01-sol-------------------------------------------------------
# a = shape; b = scale (acts like 1/lambda from exponential dist)
a <- 2; b <- 3
m <- b * gamma(1 + 1/a); m # mean
v <- b^2 * ( gamma(1 + 2/a) - gamma(1+1/a)^2 ); v # var
qweibull(0.5, a, b) # median
pweibull(m, a, b) # less than mean
pweibull(6, a, b) - pweibull(1.5, a, b) # in a range
pweibull(m+sqrt(v), a, b) - pweibull(m - sqrt(v) , a, b) # near mean
# with roles of parameters reversed
a <- 3; b <- 2
m <- b * gamma(1 + 1/a); m # mean
v <- b^2 * ( gamma(1 + 2/a) - gamma(1+1/a)^2 ); v # var
qweibull(0.5, a, b) # median
pweibull(m, a, b) # less than mean
pweibull(6, a, b) - pweibull(1.5, a, b) # in a range
pweibull(m+sqrt(v), a, b) - pweibull(m - sqrt(v), a, b) # near mean
## ----beta-01-sol---------------------------------------------------------
m <- 5/(2+5); m # mean
v <- 5 * 2 / ( (5+2)^2 * (5 + 2 + 1) ); v # var
qbeta(0.5, 5, 2) # median
pbeta(m, 5, 2) # less than mean
pbeta(0.4, 5, 2) - pbeta(0.2, 5, 2) # in a range
pbeta(m+sqrt(v), 5, 2) - pbeta(m-sqrt(v), 5, 2) # near mean
## ----beta-pdfs-fig, echo=FALSE, opts.label = "figbig"--------------------
plot_data <-
expand.grid(
x = seq(0, 1, by = 0.01),
shape1 = c(0.8, 2, 4),
shape2 = c(0.8, 2, 4)
) %>%
mutate(
density = dbeta(x, shape1 = shape1, shape2 = shape2),
shape1_label = paste("shape1 =", shape1),
shape2_label = paste("shape2 =", shape2)
)
# gf_line(density ~ x, data = plot_data %>% filter(shape2 == 2),
# color = ~ factor(shape1), group = ~shape1) %>%
# gf_refine(guides(color = guide_legend("shape1"))) %>%
# gf_labs(title = "Beta pdfs (shape2 = 2)")
#
# gf_line(density ~ x, data = plot_data %>% filter(shape1 == 2),
# color = ~ factor(shape2), group = ~shape2) %>%
# gf_refine(guides(color = guide_legend("shape2"))) %>%
# gf_labs(title = "Beta pdfs (shape1 = 2)")
gf_line(density ~ x | shape2_label ~ shape1_label, data = plot_data)
## ----faithful-kde01, echo=FALSE------------------------------------------
times <- faithful$eruptions
gf_dhistogram( ~ times, binwidth = 0.25, alpha = 0.3) %>%
gf_dens( ~ times, size = 1, alpha = 0.9)
## ----kde01---------------------------------------------------------------
x <- c(2.2, 3.3, 5.1, 5.5, 5.7, 5.7, 6.9, 7.8, 8.4, 9.6)
## ----kde02---------------------------------------------------------------
K1 <- function(x) { # rectangular
return( as.numeric( -1 < x & x < 1 ) )
}
K2 <- function(x) { # triangular
return( (1 - abs(x)) * as.numeric(abs(x) < 1) )
}
K3 <- function(x) { # parabola / Epanechnikov
return( (1 - x^2) * as.numeric(abs(x) < 1) )
}
K4 <- dnorm # Gaussian
## ----faithful-kde02, echo=FALSE------------------------------------------
gf_fun(K1(x) ~ x, xlim = c(-3, 3)) %>% gf_labs(y = expression(K[1]))
gf_fun(K2(x) ~ x, xlim = c(-3, 3)) %>% gf_labs(y = expression(K[2]))
gf_fun(K3(x) ~ x, xlim = c(-3, 3)) %>% gf_labs(y = expression(K[3]))
gf_fun(K4(x) ~ x, xlim = c(-3, 3)) %>% gf_labs(y = expression(K[4]))
## ----kde03---------------------------------------------------------------
kde <- function(data, kernel = K1, ...) {
n <- length(data)
scalingConstant <-
integrate(function(x){kernel(x, ...)}, -Inf, Inf) %>% value()
function(x) {
mat <- outer(x, data,
FUN = function(x, data) {kernel(x - data, ...)})
val <- rowSums(mat)
val <- val / (n * scalingConstant)
return(val)
}
}
## ----kde04-fig, echo=FALSE-----------------------------------------------
kdeplot <- function(data, kernel, xlim, ylim,
args = list(),
lty = 1,
lwd = 3,
col = trellis.par.get("plot.line")$col,
buffer = 0,
n = 50,
xlab = "x",
ylab = "density",
...) {
ndata <- length(data)
scalingConstant = integrate( function(x) {
do.call( kernel, c(list(x), args) ) },
-Inf, Inf)$value
if (missing(xlim)) {
xlim = range(data)
buf <- buffer * diff(xlim)
xlim <- xlim + c(-1, 1) * buf
}
if (missing(ylim)) {
xvals <- seq(xlim[1], xlim[2], length = n)
# yvals <- fun(xvals, unlist(args))
yvals = do.call(kernel,
c(list(x = seq(xlim[1], xlim[2], length = 100)), args))
ylim <- range(yvals)
buf <- buffer * diff(ylim)
ylim <- ylim + c(-1, 1) * buf
}
xyplot(ylim ~ xlim, xlab = xlab, ylab = ylab,
panel = function(x, y, ...){
panel.mathdensity(kde(data, kernel, ...),
args = args,
lwd = 3.5,
lty = lty,
col = col,
n = n,
...)
for (d in data) {
panel.mathdensity(
dmath = function(x){
y <- do.call(kernel, c(list(x-d), args))
y / (ndata * scalingConstant)
},
args = args,
lwd = 1.5,
lty = 1,
col = trellis.par.get("superpose.line")$col[2],
n = n,
...)
}
panel.rug(data, lwd = 2)
},
...)
}
kdeplot(x, K1, xlim = c(1, 11), ylim = c(0, 0.32), n = 500,
main = expression(K[1]))
kdeplot(x, K2, xlim = c(1, 11), ylim = c(0, 0.32), n = 500,
main = expression(K[2]))
kdeplot(x, K3, xlim = c(1, 11), ylim = c(0, 0.32), n = 500,
main = expression(K[3]))
kdeplot(x, K4, xlim = c(1, 11), ylim = c(0, 0.32), n = 500,
main = expression(K[4]))
## ----K5-fig, echo=FALSE--------------------------------------------------
K5 <- function(x, ...) { dnorm(x, sd = sqrt(1/6), ...) }
kdeplot(x, K2, xlim = c(1, 11), ylim = c(0, 0.32), n = 500,
main = expression(K[2]))
kdeplot(x, K5, xlim = c(1, 11), ylim = c(0, 0.35), n = 500,
main = expression(K[5]))
## ----faithful-kde03, fig.keep = "none"-----------------------------------
duration <- faithful$eruptions
gf_dens( ~ duration, kernel = "rectangular") %>%
gf_labs(title = "Rectangular kernel")
gf_dens( ~ duration, kernel = "triangular") %>%
gf_labs(title = "Triangular kernel")
gf_density( ~ duration) %>%
gf_labs(title = "Normal kernel")
gf_density( ~ duration, adjust = 0.25) %>%
gf_labs(title = "Normal kernel; adjust = 0.25")
density(duration) # display some information about the kde
## ----faithful-kde03-fig, echo=FALSE, results = "hide"--------------------
duration <- faithful$eruptions
gf_dens( ~ duration, kernel = "rectangular") %>%
gf_labs(title = "Rectangular kernel")
gf_dens( ~ duration, kernel = "triangular") %>%
gf_labs(title = "Triangular kernel")
gf_density( ~ duration) %>%
gf_labs(title = "Normal kernel")
gf_density( ~ duration, adjust = 0.25) %>%
gf_labs(title = "Normal kernel; adjust = 0.25")
density(duration) # display some information about the kde
## ----do-call00, eval = FALSE---------------------------------------------
## sampleData <- do.call(rdist, args = c(list(n = size), args));
## ----ise-sol-------------------------------------------------------------
# Here is a simple way to estimate ise
ise <- function(density, distr, ...) {
x <- density$x
y <- density$y
diffs <- diff(c(min(x), x, max(x)))
dx <- .5 * (head(diffs, -1) + tail(diffs, -1))
sum((y - distr(x, ...))^2 * dx)
}
# some sanity checks
x <- rnorm(100)
d <- density(x)
ise( d, dnorm )
## ----mise-sol------------------------------------------------------------
mise <- function(size = 20, reps = 100, dist = "norm", args = list(), ...) {
results <- do(reps) * {
data <- do.call(paste0("r", dist), c(list(n = size), args))
distr <- function(x) { do.call(paste0("d", dist), c(list(x), args)) }
d <- density(data, ...)
data.frame(ise = ise(d, distr))
}
return(c(mise = mean( ~ ise, data = results)))
}
## ----mise-sims-sol, seed = 1234------------------------------------------
settings <- expand.grid( kernel = c("gaussian", "triangular",
"rectangular", "epanechinikov"),
size = c(10, 30, 100),
adjust = c(1/3, 1, 3)
)
Results <-
bind_rows(
settings %>% group_by(kernel, size, adjust) %>%
summarise(
dist = "normal",
mise = mise(size = size, reps = 500)),
settings %>% group_by(kernel, size, adjust) %>%
summarise(
dist = "exp",
mise = mise(size = size, reps = 500, dist = "exp")),
settings %>% group_by(kernel, size, adjust) %>%
summarise(
dist = "beta",
mise = mise(size = size, reps = 500, dist = "beta",
args = list(shape1 = 0.5, shape2 = 0.5)))
)
## ----mise-plot-sol, opts.label = "figbig"--------------------------------
gf_line(mise ~ adj, color = ~ kernel, group = ~ kernel, alpha = 0.7,
data = Results %>% mutate(adj = factor(round(adjust, 2)))) %>%
gf_facet_grid(dist ~ size, scales = "free_y")
## ----mise-beta-sol-------------------------------------------------------
gf_density( ~ rbeta(100, .5, .5)) %>%
gf_dist("beta", shape1 = 0.5, shape2 = 0.5, col = "gray50") %>%
gf_labs(title = "adjust = 1") %>%
gf_lims(y = c(0, 5), x = c(-0.5, 1.5))
gf_density( ~ rbeta(100, .5, .5), adjust = 1/3) %>%
gf_dist("beta", shape1 = 0.5, shape2 = 0.5, col = "gray50") %>%
gf_labs(title = "adjust = 1/3") %>%
gf_lims(y = c(0, 5), x = c(-0.5, 1.5))
gf_density( ~ rbeta(100, .5, .5), adjust = 3) %>%
gf_dist("beta", shape1 = 0.5, shape2 = 0.5, col = "gray50") %>%
gf_labs(title = "adjust = 3") %>%
gf_lims(y = c(0, 5), x = c(-0.5, 1.5))
gf_density( ~ rbeta(100, .5, .5), adjust = 0.1) %>%
gf_dist("beta", shape1 = 0.5, shape2 = 0.5, col = "gray50") %>%
gf_labs(title = "adjust = 0.1") %>%
gf_lims(y = c(0, 5), x = c(-0.5, 1.5))
## ----qq-norm01, fig.keep="none"------------------------------------------
x <- c(-0.16, 1.17, -0.43, -0.02, 1.06,
-1.35, 0.65, -1.12, 0.03, -1.44)
Plot_data <- tibble(
x.sorted = sort(x),
p = seq(0.05, 0.95, by = 0.1),
q = qnorm(p)
)
Plot_data
gf_point(x.sorted ~ q, data = Plot_data)
## ----qq-norm01-fig, echo=FALSE, results = "hide"-------------------------
x <- c(-0.16, 1.17, -0.43, -0.02, 1.06,
-1.35, 0.65, -1.12, 0.03, -1.44)
Plot_data <- tibble(
x.sorted = sort(x),
p = seq(0.05, 0.95, by = 0.1),
q = qnorm(p)
)
Plot_data
gf_point(x.sorted ~ q, data = Plot_data)
gf_qq( ~ x) # generate the normal-quantile plot
## ----qq-norm02, fig.show="hide"------------------------------------------
ppoints(10) # percentages for 10 data values
gf_qq(~x) # generate the normal-quantile plot
## ----qq-norm03-fig, echo=FALSE, opts.label="fig1", seed=123--------------
dat10 <-
data.frame(
x = rnorm(8*10), # 8 samples of size 10
size = rep(10, 8*10), # record sample size
sample = rep(1:8, each = 10) # record sample number
)
dat25 <-
data.frame(
x = rnorm(8*25), # 8 samples of size 25
size = rep(25, 8*25), # record sample size
sample = rep(1:8, each = 25) # record sample number
)
dat100 <-
data.frame(
x = rnorm(8*100), # 8 samples of size 100
size = rep(100, 8*100), # record sample size
sample = rep(1:8, each = 100) # record sample number
)
simdata <- rbind(dat10, dat25, dat100)
# generate the normal-quantile plots for each of the 30 samples
gf_qq( ~ x ,data = simdata) %>%
gf_facet_grid(factor(size) ~ factor(sample), scales = "free") %>%
gf_theme(
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank())
## ----qq-norm03, fig.keep = "none"----------------------------------------
dat10 <-
data.frame(
x = rnorm(8*10), # 8 samples of size 10
size = rep(10, 8*10), # record sample size
sample = rep(1:8, each = 10) # record sample number
)
dat25 <-
data.frame(
x = rnorm(8*25), # 8 samples of size 25
size = rep(25, 8*25), # record sample size
sample = rep(1:8, each = 25) # record sample number
)
dat100 <-
data.frame(
x = rnorm(8*100), # 8 samples of size 100
size = rep(100, 8*100), # record sample size
sample = rep(1:8, each = 100) # record sample number
)
simdata <- rbind(dat10, dat25, dat100)
# generate the normal-quantile plots for each of the 30 samples
gf_qq( ~ x ,data = simdata) %>%
gf_facet_grid(factor(size) ~ factor(sample), scales = "free") %>%
gf_theme(
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank())
## ----qq-norm04, seed=123, eval = FALSE-----------------------------------
## # sample of size 40 from Binom(50, 0.4)
## x <- rbinom(40, 50, 0.4); x
## gf_qq( ~ x)
## ----qq-norm04-fig, seed=123, echo = FALSE, results = "hide"-------------
# sample of size 40 from Binom(50, 0.4)
x <- rbinom(40, 50, 0.4); x
gf_qq( ~ x)
## ----qqline, fig.keep = "none"-------------------------------------------
gf_qq( ~ age | substance, data = HELPrct, alpha = 0.4) %>%
gf_qqline(color = "red") %>%
gf_qqline(color = "skyblue", tail = 0.10)
## ----qqline-fig, echo = FALSE, opts.label = "fig1"-----------------------
gf_qq( ~ age | substance, data = HELPrct, alpha = 0.4) %>%
gf_qqline(color = "red") %>%
gf_qqline(color = "skyblue", tail = 0.10)
## ----qq-weibull, eval=FALSE----------------------------------------------
## life01 <- c(16, 34, 53, 75, 93, 120, 150, 191, 240, 339)
## gf_qq( ~ life01, distribution = qweibull,
## dparams = list(shape = 1.4, scale = 144)) %>%
## gf_abline(slope = 1, intercept = 0, color = "gray50",
## linetype = "dashed")
## ----qq-weibull-fig, echo = FALSE----------------------------------------
life01 <- c(16, 34, 53, 75, 93, 120, 150, 191, 240, 339)
gf_qq( ~ life01, distribution = qweibull,
dparams = list(shape = 1.4, scale = 144)) %>%
gf_abline(slope = 1, intercept = 0, color = "gray50",
linetype = "dashed")
## ----qq-exp01, fig.keep = "none"-----------------------------------------
life01 <- c(16, 34, 53, 75, 93, 120, 150, 191, 240, 339)
mean(life01); 1 / mean(life01)
gf_qq( ~ life01, distribution = qexp,
dparams = list(1/mean(life01))) %>%
gf_qqline(distribution = qexp, dparams = list(1/mean(life01)))
gf_qq( ~ life01, distribution = qexp) %>%
gf_qqline(distribution = qexp)
## ----qq-exp01-fig, echo = FALSE, results = "hide"------------------------
life01 <- c(16, 34, 53, 75, 93, 120, 150, 191, 240, 339)
mean(life01); 1 / mean(life01)
gf_qq( ~ life01, distribution = qexp,
dparams = list(1/mean(life01))) %>%
gf_qqline(distribution = qexp, dparams = list(1/mean(life01)))
gf_qq( ~ life01, distribution = qexp) %>%
gf_qqline(distribution = qexp)
## ----qq-departures, echo=FALSE, seed=123, opts.label="fig1"--------------
dd <- rcauchy(50)
qqdata <- data.frame(
x = c(runif(100), 10 - rexp(100), rchisq(100, df = 2), dd, jitter(-dd)),
dist = rep(c("A", "B", "C", "D"), each = 100)
)
gf_qq( ~ x, data = qqdata) %>%
gf_facet_wrap( ~ dist, scales = "free") %>%
gf_theme(axis.text = element_blank(), axis.ticks = element_blank())
## ----jordan-sol----------------------------------------------------------
gf_qq( ~ points, data = Jordan8687)
## ----joint-integrate-----------------------------------------------------
f <- function(x) { 6 * x[1] * x[2]^2 }
cubature::adaptIntegrate(f, c(0, 0), c(1, 1))
g <- function(x) {
if (x[1] > x[2]) {return(0)} # set value to 0 if X > Y
return(f(x)) # else return joint pdf
}
cubature::adaptIntegrate(
g, c(0, 0), c(1, 1), tol = 0.01) # tol controls precision
## ----ralph-claudia-sol---------------------------------------------------
pnorm(-10, 0, 25 / sqrt(3))
## ----mvnorm01-fig, echo = FALSE------------------------------------------
f <- Vectorize(function(x, y) mvtnorm::dmvnorm(c(x, y)))
g <- Vectorize(function(x, y)
mvtnorm::dmvnorm(
c(x, y),
sigma = rbind(c(1, 0.8), c(0.8, 1))
))
## ----mvnorm02-fig, include = FALSE---------------------------------------
plotFun(f(x, y) ~ x + y, x.lim = c(-3, 3), y.lim = c(-3, 3), npts = 22,
surface = TRUE,
interactive = FALSE,
par.settings = list(
box.3d = list(col = "transparent"),
axis.line = list(col = NA, lty = 1, lwd = 1)
),
light.source = c(25, 50, 50),
aspect = c(1, 0.5),
zlab = "",
screen = list(z = 20, x = -75),
scales = list(arrows = FALSE,
x = list(draw = FALSE),
y = list(draw = FALSE),
z = list(draw = FALSE))
)
plotFun(g(x, y) ~ x + y, x.lim = c(-3, 3), y.lim = c(-3, 3), npts = 22,
surface = TRUE,
interactive = FALSE,
par.settings = list(
box.3d = list(col = "transparent"),
axis.line = list(col = NA, lty = 1, lwd = 1)
),
light.source = c(25, 50, 50),
aspect = c(1, 0.5),
zlab = "",
screen = list(z = 20, x = -75),
scales = list(arrows = FALSE,
x = list(draw = FALSE),
y = list(draw = FALSE),
z = list(draw = FALSE))
)
## ----mvnorm03-fig, echo = FALSE------------------------------------------
plotFun(f(x, y) ~ x + y, x.lim = c(-3.5, 3.5), y.lim = c(-2, 2), npts = 150)
plotFun(g(x, y) ~ x + y, x.lim = c(-3.5, 3.5), y.lim = c(-2, 2), npts = 150)
## ----bvnorm01------------------------------------------------------------
A1 <- rbind(c(-1, 0), c(-2, -1))
A1 %*% t(A1)
## ----bvnorm02------------------------------------------------------------
A2 <- rbind(c(3/5, 4/5), c(2, 1))
A2 %*% t(A2)
## ----rho-----------------------------------------------------------------
A <- rbind(c(1, 0), c(-1, 0))
Sigma <- A %*% t(A); Sigma
det(Sigma)
rho <- Sigma[1,2] / (Sigma[1,1] * Sigma[2,2]); rho
## ----mvnorm01-sol--------------------------------------------------------
max(0, pnorm(0) - pnorm(0))
max(0, pnorm(1) - pnorm(-1))
max(0, pnorm(-1) - pnorm(-2))
max(0, pnorm(1) - pnorm(2))
max(0, pnorm(2) - pnorm(-2))
## ----mvnorm01------------------------------------------------------------
A <- rbind(c(1, 0, 0), c(1, 1, 0), c(2, 1, 1)); A
Sigma <- A %*% t(A); Sigma
mu <- c(0, 1, 2)
mvtnorm::rmvnorm(2, mean = mu, sigma = Sigma)
## ----mvnorm02------------------------------------------------------------
# find q such that Prob( X_1 <= q & X_2 <= q & X_3 <= q) = 0.5
mvtnorm::qmvnorm(0.5, mean = mu, sigma = Sigma)
## ----mvnorm03------------------------------------------------------------
# By the result above, this should be just under 0.5
mvtnorm::pmvnorm(upper = c(2, 2, 2), mean = mu, sigma = Sigma)
# Prob(all three are between -1 and 1)
mvtnorm::pmvnorm(lower = c(-1, -1, -1), upper = c(1, 1, 1),
mean = mu, sigma = Sigma)
## ----nonneg-definite-sol-------------------------------------------------
A <- rbind(c(sqrt(5), 0), c(-3/5 * sqrt(5), 4/5 * sqrt(5)))
A %*% t(A)
## ----mvn-marginal01------------------------------------------------------
A <- rbind(c(1, 0, 0), c(1, 1, 0), c(1, 2, 1))
# covariance matrix
Sigma<- A %*% t(A); Sigma
# marginal covariance matrix
Sigma[-3, -3]
## ----mvn-marginal02------------------------------------------------------
A12 <- A[1:2, 1:2]; A12
Sigma12 <- A12 %*% t(A12); Sigma12
## ----mvn-marginal03, digits = 2, seed = 12345----------------------------
# simulate 3 independent Norm(0,1) vars
Z1 <- rnorm(100000); Z2 <- rnorm(100000); Z3 <- rnorm(100000)
# create the X's from the Z's
X1 <- 1 + Z1; X2 <- 2 + Z1 + Z2; X3 <- 3 + Z1 + 2 * Z2 + Z3
data.frame( `E(X1)` = mean(X1), `E(X2)` = mean(X2),
`Var(X1)` = var(X1), `Var(X2)` = var(X2),
`Cov(X1, X2)` = cov(X1, X2), check.names = FALSE)
## ----mvn-marginal04------------------------------------------------------
Sigma[-2, -2]
## ----mvn-marginal05------------------------------------------------------
data.frame(
`mean(X3)` = mean(X3), `var(X3)` = var(X3),
`cov(X1, X3)` = cov(X1, X3), check.names = FALSE)
## ----mvn-marginal06------------------------------------------------------
B <- A[c(1, 3, 2), c(1, 3, 2)]; B
# This is NOT the covariance matrix
B[-3, -3] %*% t(B[-3, -3])
# Nor is this (since it is the same as the matrix above)
A[-2, -2] %*% t(A[-2, -2])
## ----mvn-marginal07------------------------------------------------------
C <- rbind(c(1, 0), c(1, sqrt(5))); C
C %*% t(C)
## ----mvn-marginal08------------------------------------------------------
B <- rbind(c(1, 0, 0),
c(1, sqrt(5), 0),
c(1, 2/sqrt(5), 1/sqrt(5)))
B %*% t(B)
## ----mvn-conditional01---------------------------------------------------
B <- rbind(c(1, 0), c(1, 1)); B
C <- rbind(c(1,2)); C
Binv <- solve(B); Binv
C %*% Binv
## ----mvn-conditional02---------------------------------------------------
X3cond <- X3[round(X1) == 3 & round(X2) == 4]
favstats(X3cond)
## ----mvn-conditional03---------------------------------------------------
D <- rbind(c(1, 0), c(2, 1))
D %*% t(D)
## ----mvn-conditional04---------------------------------------------------
X2cond <- X2[round(X1, 1) == 3]
X3cond <- X3[round(X1, 1) == 3]
data.frame(
`mean(X2|X1 = 3)` = mean(X2cond), `mean(X3|X1 = 3)` = mean(X3cond),
`var(X2|X1 = 3)` = var(X2cond), `var(X3|X1 = 3)` = var(X3cond),
`cov(X2, X3 | X1 = 3)` = cov(X2cond, X3cond), check.names = FALSE
)
## ----mvn-conditional05---------------------------------------------------
Sigma
mu <- c(1, 2, 3); mu
# means
mu[2:3] + Sigma[2:3, 1] %*% solve(Sigma[1, 1]) %*% (3 - mu[1])
# variance-covariance
Sigma[2:3, 2:3] - Sigma[2:3, 1] %*% solve(Sigma[1, 1]) %*% Sigma[1, 2:3]
## ----CLT, child="CLT.Rnw", eval=includeChapter[4]------------------------
## ----clt-setup, include = FALSE, cache = FALSE---------------------------
knitr::opts_chunk$set(cache.path = "cache/CLT-")
require(parallel)
options(`mosaic:parallelMessage` = FALSE)
## ----mom-unif01----------------------------------------------------------
x <- c(1.6, 2.8, 6.2, 8.2, 8.5, 8.7); mean(x)
## ----mom-unif02----------------------------------------------------------
x <- c(0.2, 0.9, 1.9, 2.2, 4.7, 5.1); mean(x)
## ----mom-unif01-sol------------------------------------------------------
simulate <- function(size) {
rdata <- runif(size)
2 * mean(rdata) < max(rdata)
}
mean(replicate(1000, simulate(6)))
mean(replicate(1000, simulate(12)))
mean(replicate(1000, simulate(24)))
## ----mom-exp-fig, echo = FALSE, results="hide"---------------------------
time <- c(49.0, 60.4, 8.9, 43.4, 34.8, 8.2, 13.6, 11.5, 99.4, 31.9)
mean(time)
lambda.hat = 1 / mean(time); lambda.hat
Plot_data <- tibble(x = seq(0,121, by = 0.5), density = dexp(x, rate = lambda.hat))
gf_dhistogram( ~ time, n = 10, binwidth = 10, alpha = 0.5) %>%
gf_line(density ~ x, data = Plot_data)
breaks = seq(0, 11, by = 2)^2
gf_dhistogram( ~ time, n = 10, breaks = breaks, alpha = 0.5) %>%
gf_line(density ~ x, data = Plot_data)
## ----mom-exp-------------------------------------------------------------
time <- c(49.0, 60.4, 8.9, 43.4, 34.8, 8.2, 13.6, 11.5, 99.4, 31.9)
mean(time)
lambda.hat = 1 / mean(time); lambda.hat
## ----moment-sol----------------------------------------------------------
moment <- function(k = 1, # which moment?
x, # data
centered = TRUE, # centered on mean?
na.rm = TRUE) # remove missing vals?
{
if (na.rm) { x <- x[!is.na(x)] }
if (length(k) > 1) { # vectorize this (fancy)
return(sapply(k, moment, x = x, centered = centered))
}
if (centered) { m = mean(x) } else { m = 0 }
return(sum((x - m)^k) / length(x))
}
x <- (1:10)^2; n <- length(x)
moment(1:2, x, centered = FALSE)
moment(1:2, x, centered = TRUE)
c(mean(x), (n-1) / n * var(x))
## ----mom-norm, digits = 4------------------------------------------------
x <-
c(57.9, 70.8, 86.3, 92.3, 94.2, 117.0, 118.4, 122.4, 125.8, 134.4)
n <- length(x)
mean(x)
sd(x) # NOT the method of moments estimate for sigma
sqrt(sum((x - mean(x))^2 / n)) # method of moments estimate for sigma
v <- 9/10 * var(x) # working from var() and adjusting denominator
sqrt(v)
## ----mom-beta01, seed = 1234---------------------------------------------
beta.mom <- function(x, lower = 0.01, upper = 100) {
x.bar <- mean (x)
n <- length(x)
v <- var(x) * (n-1) / n
R <- 1 / x.bar - 1
f <- function(a){ R * x.bar^2 / (a / x.bar + 1) - v }
u <- uniroot(f, c(lower, upper))
return(c(shape1 = u$root, shape2 = u$root * R))
}
x <- rbeta(50, 2, 5); beta.mom(x)
## ----mom-beta02----------------------------------------------------------
# algebraic solutions
x.bar <- mean(x); x.bar
v <- var(x) * (length(x) - 1) / length(x); v
x.bar * (x.bar * (1 - x.bar) / v - 1) # alpha = shape1
(1 - x.bar) * (x.bar * (1 - x.bar) / v - 1) # beta = shape2
## ----sample-means, fig.show = "hide"-------------------------------------
# 5000 sample means of samples of size 16 from N(100, 12):
SamplingDist <-
do(5000) * c(sample.mean = mean(rnorm(16, 100, 12)))
mean(~ sample.mean, data = SamplingDist)
sd( ~ sample.mean, data = SamplingDist)
gf_dhistogram( ~ sample.mean, data = SamplingDist,
bins = 20, alpha = 0.5) %>%
gf_vline(xintercept = 100) %>%
gf_function(fun = dnorm, args = list(mean = 100, sd = 3))
gf_qq( ~ sample.mean, data = SamplingDist)
## ----sample-means-fig, echo = FALSE, results = "hide", fig.keep = "all"----
# 5000 sample means of samples of size 16 from N(100, 12):
SamplingDist <-
do(5000) * c(sample.mean = mean(rnorm(16, 100, 12)))
mean(~ sample.mean, data = SamplingDist)
sd( ~ sample.mean, data = SamplingDist)
gf_dhistogram( ~ sample.mean, data = SamplingDist,
bins = 20, alpha = 0.5) %>%
gf_vline(xintercept = 100) %>%
gf_function(fun = dnorm, args = list(mean = 100, sd = 3))
gf_qq( ~ sample.mean, data = SamplingDist)
## ----do, eval = FALSE----------------------------------------------------
## mean(rnorm(16, 100, 12))
## ----mom-beta-sim01, fig.show = "hide"-----------------------------------
Results <- do(1000) * beta.mom(rbeta(50, 2, 5))
gf_dhistogram( ~ shape1, data = Results, bins = 30) %>%
gf_vline(xintercept = 2)
gf_dhistogram( ~ shape2, data = Results, bins = 30) %>%
gf_vline(xintercept = 5)
## ----mom-beta-sim01-fig, echo = FALSE, cache = FALSE---------------------
Results <- do(1000) * beta.mom(rbeta(50, 2, 5))
gf_dhistogram( ~ shape1, data = Results, bins = 30) %>%
gf_vline(xintercept = 2)
gf_dhistogram( ~ shape2, data = Results, bins = 30) %>%
gf_vline(xintercept = 5)
## ----mom-beta-sim02, eval = FALSE----------------------------------------
## gf_point(shape2 ~ shape1, data = Results, alpha = 0.4) %>%
## gf_abline(intercept = 0, slope = 5/2)
##
## gf_dhistogram( ~ (shape2 / shape1), data = Results, bins = 30) %>%
## gf_vline(xintercept = 2.5)
## ----mom-beta-sim-02-fig, echo = FALSE-----------------------------------
gf_point(shape2 ~ shape1, data = Results, alpha = 0.4) %>%
gf_abline(intercept = 0, slope = 5/2)
gf_dhistogram( ~ (shape2 / shape1), data = Results, bins = 30) %>%
gf_vline(xintercept = 2.5)
## ----miaa-ft-beta--------------------------------------------------------
# This gives the method of moments estimates
# for the full data set
beta.mom(MIAA05$FTPct)
## ----miaa-ft-beta-sol----------------------------------------------------
miaa <- MIAA05
length(miaa$FTPct)
beta.mom(miaa$FTPct)
# remove players who took no shots
someshots <- miaa$FTPct[miaa$FTA >= 1]
length(someshots)
beta.mom(someshots) -> bmom1; bmom1
gf_qq( ~ someshots, distribution = qbeta, dparams = as.list(bmom1))
# remove players with fewer than 10 shots
tenshots <- miaa$FTPct[miaa$FTA >= 10]
length(tenshots)
beta.mom(tenshots) -> bmom2; bmom2
gf_qq( ~ tenshots, distribution = qbeta, dparams = as.list(bmom2))
## ----miaa-fg-beta-sol----------------------------------------------------
miaa <- MIAA05
length(miaa$FGPct)
beta.mom(miaa$FGPct)
# remove players who took no shots
someshots <- miaa$FGPct[miaa$FGA >= 1]
length(someshots)
beta.mom(someshots) -> bmom1; bmom1
gf_qq( ~ someshots, dist = function(x)qbeta(x, bmom1["shape1"], bmom1["shape2"]))
# remove players with fewer than 10 shots
tenshots <- miaa$FTPct[miaa$FGA >= 10]
length(tenshots)
beta.mom(tenshots) -> bmom2; bmom2
gf_qq( ~ tenshots,
dist = function(x)qbeta(x, bmom2["shape1"], bmom2["shape2"]))
## ----mom-life01----------------------------------------------------------
life01 <- c(16, 34, 53, 75, 93, 120, 150, 191, 240, 339)
l <- 1/ mean(life01); l
## ----mom-life02----------------------------------------------------------
m <- mean(life01); m
n <- length(life01); n
v <- var(life01) * (n-1) / n; v
# alpha
m^2 / v
# lambda
m / v
## ----srs, seed = 1234----------------------------------------------------
sample(1:30, 15) # 15 random numbers in 1-30
set.seed(123)
sample(1:30, 15, replace = TRUE) # iid random sample
set.seed(123)
resample(1:30, 15) # same iid random sample
## ----sample-do, fig.keep = "none"----------------------------------------
sample(vcd::VonBort, 10) # SRS of size 10
# mean of SRS
c(mean.deaths = mean( ~ deaths, data = sample(vcd::VonBort, 10)))
# mean of an iid random sample
c(mean.deaths = mean( ~ deaths, data = resample(vcd::VonBort, 10)))
# means of 3 SRSs using do()
do (3) * c(mean.deaths = mean(~ deaths, data = sample(vcd::VonBort, 10)))
# means of 3 SRSs using replicate()
replicate(3, mean(~ deaths, data = sample(vcd::VonBort, 10)))
mean( ~ deaths, data = vcd::VonBort) # mean of entire data set
gf_dhistogram( ~ mean.deaths, binwidth = 0.1,
data = do (2000) *
c(mean.deaths = mean(~ deaths, data = sample(vcd::VonBort, 10)))) %>%
gf_vline(xintercept = 0.7)
## ----sample-do-fig, echo = FALSE, results = "hide"-----------------------
sample(vcd::VonBort, 10) # SRS of size 10
# mean of SRS
c(mean.deaths = mean( ~ deaths, data = sample(vcd::VonBort, 10)))
# mean of an iid random sample
c(mean.deaths = mean( ~ deaths, data = resample(vcd::VonBort, 10)))
# means of 3 SRSs using do()
do (3) * c(mean.deaths = mean(~ deaths, data = sample(vcd::VonBort, 10)))
# means of 3 SRSs using replicate()
replicate(3, mean(~ deaths, data = sample(vcd::VonBort, 10)))
mean( ~ deaths, data = vcd::VonBort) # mean of entire data set
gf_dhistogram( ~ mean.deaths, binwidth = 0.1,
data = do (2000) *
c(mean.deaths = mean(~ deaths, data = sample(vcd::VonBort, 10)))) %>%
gf_vline(xintercept = 0.7)
## ----law-large-numbers, fig.keep = "none"--------------------------------
expand.grid(run = paste("run", 1:6), rep = 1:1000) %>%
mutate(x = rexp(6 * 1000)) %>%
group_by(run) %>% arrange(rep) %>%
mutate(runningMean = cumsum(x) / 1:length(x)) %>%
gf_line(runningMean ~ rep | run) %>%
gf_hline(yintercept = 1, color = "red", alpha = 0.5) %>%
gf_labs(y = "running mean", x = "") %>%
gf_lims(y = c(0, 3))
## ----law-large-numbers-fig, echo = FALSE, eval = TRUE, opts.label = "fig1"----
expand.grid(run = paste("run", 1:6), rep = 1:1000) %>%
mutate(x = rexp(6 * 1000)) %>%
group_by(run) %>% arrange(rep) %>%
mutate(runningMean = cumsum(x) / 1:length(x)) %>%
gf_line(runningMean ~ rep | run) %>%
gf_hline(yintercept = 1, color = "red", alpha = 0.5) %>%
gf_labs(y = "running mean", x = "") %>%
gf_lims(y = c(0, 3))
## ----lln-cauchy, fig.keep = "none"---------------------------------------
set.seed(1234)
expand.grid(run = paste("run", 1:6), rep = 1:10000) %>%
mutate(x = rcauchy(6 * 10000)) %>%
group_by(run) %>% arrange(rep) %>%
mutate(runningMean = cumsum(x) / 1:length(x)) %>%
gf_line(runningMean ~ rep | run) %>%
gf_hline(yintercept = 0, color = "red", alpha = 0.5) %>%
gf_lims(y = c(-20, 20)) %>%
gf_labs(y = "running mean", x = "")
## ----lln-cauchy-fig, echo = FALSE, opts.label = "fig1"-------------------
set.seed(1234)
expand.grid(run = paste("run", 1:6), rep = 1:10000) %>%
mutate(x = rcauchy(6 * 10000)) %>%
group_by(run) %>% arrange(rep) %>%
mutate(runningMean = cumsum(x) / 1:length(x)) %>%
gf_line(runningMean ~ rep | run) %>%
gf_hline(yintercept = 0, color = "red", alpha = 0.5) %>%
gf_lims(y = c(-20, 20)) %>%
gf_labs(y = "running mean", x = "")
## ----lln-cauchy-seed, fig.keep="none"------------------------------------
set.seed(123)
Data <- tibble(
n = 1:10000,
x = rcauchy(10000),
running_mean = cumsum(x) / (1:length(x))
)
gf_line(running_mean ~ n, data = Data) %>%
gf_labs(y = "running mean", title = "Sample from a Cauchy Distribution")
## ----clt02-sol-----------------------------------------------------------
pnorm(3, sd = 2) - pnorm(-3, sd = 2)
## ----clt-finite-samples01------------------------------------------------
x <- c(1, 6, 6, 8, 9)
mu <- sum(x * 0.2); mu # population mean
v <- sum(x^2 * 0.2) - mu^2; v # population variance
pairsums <- outer(x, x, "+") # compute 25 sums
pairmeans <- pairsums / 2
## ----clt-finite-samples02------------------------------------------------
# sampling distribution with SRS
srs.means <- as.vector(pairmeans[lower.tri(pairmeans)]); srs.means
iid.means <- as.vector(pairmeans); iid.means
srs.mean <- sum(srs.means * 0.1); srs.mean
srs.var <- sum(srs.means^2 * 0.1) - srs.mean^2; srs.var
v / 2 * (5-2) / (5-1)
sqrt(v / 2 * (5-2) / (5-1))
var(srs.means) # N.B: This is the INCORRECT variance
## ----clt-finite-samples-sol----------------------------------------------
x <- c(1, 6, 6, 8, 9)
mu <- sum(x * 0.2); mu # population mean
v <- sum(x^2 * 0.2) - mu^2; v # population variance
pairsums <- outer(x, x, "+") # compute 25 sums
pairmeans <- pairsums / 2
# sampling distribution with SRS
srs.means <- as.vector(pairmeans[lower.tri(pairmeans)]); srs.means
iid.means <- as.vector(pairmeans); iid.means
srs.mean <- sum(srs.means * 0.1); srs.mean
srs.var <- sum(srs.means^2 * 0.1) - srs.mean^2; srs.var
v / 2 * (5-2) / (5-1)
sqrt(v / 2 * (5-2) / (5-1))
var(srs.means) # INCORRECT variance
# sampling distribution with iid sample
iid.mean <- sum(iid.means * 0.04); iid.mean
iid.var <- sum(iid.means^2 * 0.04) - iid.mean^2; iid.var
v / 2
sqrt(v / 2)
var(iid.means) # INCORRECT variance
## ----unif-12, fig.keep = "none",warning = FALSE--------------------------
sampleSums <- replicate(5000, sum(runif(12, -0.5, 0.5)))
gf_qq( ~ sampleSums)
gf_dhistogram( ~ sampleSums, bins = 25)
## ----unif-12-fig, echo = FALSE-------------------------------------------
sampleSums <- replicate(5000, sum(runif(12, -0.5, 0.5)))
gf_qq( ~ sampleSums)
gf_dhistogram( ~ sampleSums, bins = 25)
## ----beta-clt, eval = FALSE----------------------------------------------
## BetaSims <-
## expand.grid(rep = 1:1000, size = c(5, 10, 20, 40)) %>%
## group_by(rep, size) %>%
## mutate(sample.mean = mean(rbeta(size, 0.5, 0.5)))
## gf_qq( ~ sample.mean | factor(size), data = BetaSims) %>%
## gf_facet_wrap( ~ factor(size), scales = "free")
## gf_dhistogram( ~ sample.mean | factor(size), data = BetaSims, bins = 25)
## ----beta-pdf-fig, echo = FALSE, warning = FALSE-------------------------
gf_dist("beta", shape1 = 0.5, shape2 = 0.5, xlim = c(0,1)) %>%
gf_lims(y = c(0, 5))
## ----beta-clt-fig, echo = FALSE------------------------------------------
BetaSims <-
expand.grid(rep = 1:1000, size = c(5, 10, 20, 40)) %>%
group_by(rep, size) %>%
mutate(sample.mean = mean(rbeta(size, 0.5, 0.5)))
gf_qq( ~ sample.mean | factor(size), data = BetaSims) %>%
gf_facet_wrap( ~ factor(size), scales = "free")
gf_dhistogram( ~ sample.mean | factor(size), data = BetaSims, bins = 25)
## ----binomial-normal-hist-fig, echo = FALSE, opts.label = "figbig"-------
Plot_data <-
expand.grid(
pi = c(0.5, 0.3, 0.1, 0.05),
n = c(10, 40, 80, 800),
p = ppoints(50)
) %>%
mutate(
label = paste("n=", n, "; pi=", pi, sep = ""),
group = paste("pi=", pi, "; n=", n, sep = ""),
y = qbinom(p, n, pi),
x = qnorm(p, n * pi, sqrt(n * pi * (1-pi)))
)
gf_qq( ~ y, data = Plot_data, color = ~factor(pi)) %>%
gf_qqline(color = "gray50") %>%
gf_facet_wrap( ~ label, scales = "free", ncol = 4) %>%
gf_labs(
y = expression(qbinom(p, n, pi)),
x = expression(qnorm(p, n * pi, sqrt(n * pi * (1-pi))))
) %>%
gf_theme(legend.position = "top") %>%
gf_refine(guides(color = guide_legend(title = "pi:")))
# p <- xyplot(
# y ~ x | paste("n=", n, sep = "") * paste("pi=", pi, sep = ""),
# data = Plot_data,
# scales = list(relation = "free"),
# cex = 0.6,
# ylab = expression(qbinom(p, n, pi)),
# xlab = expression(qnorm(p, n * pi, sqrt(n * pi * (1-pi)))),
# panel = function(x, y, ...){
# panel.abline(0, 1, ...)
# panel.xyplot(x, y, ...)
# })
# latticeExtra::useOuterStrips(p)
## ----binomial-clt-fig, echo = FALSE--------------------------------------
gf_dist("binom", params = list(size = 20, prob = 0.1)) %>%
gf_dist("norm", params = list(mean = 2, sd = sqrt(0.1 * 0.9 * 20)),
alpha = 0.4) %>%
gf_labs(title = expression(paste("Binomial vs Normal", "(", "n=20", ", " , pi, "= 0.10", ")")))
## ----continuity-correction-----------------------------------------------
# P(55 <= X <= 65)
pbinom(65, 100, 0.6) - pbinom(54, 100, 0.6)
# without continuity correction:
diff(pnorm(c(55, 65), 60, sqrt(100 * 0.6 * 0.4)))
# with continuity correction:
diff(pnorm(c(54.5, 65.5), 60, sqrt(100 * 0.6 * 0.4)))
## ----binomial-ztest------------------------------------------------------
# "exact" p-value
binom.test(60, 100)
# approximate p-value
z <- (0.6 - 0.5) / sqrt(0.5 * 0.5 / 100); z
2 * (1 - pnorm(z))
# approximate p-value with continuity correction
z <- (0.595 - 0.5) / sqrt(0.5 * 0.5 / 100); z # 0.595 = 59.5 / 100
2 * (1 - pnorm(z))
# R can automate the approximate version too:
prop.test(60, 100) # uses continuity correction by default
prop.test(60, 100, correct = FALSE) # turn off continuity correction
## ----voter-poll-sol------------------------------------------------------
binom.test(465, 980)
prop.test(465, 980)
## ----battery-life-sol----------------------------------------------------
1 - pnorm(160/3, mean = 50, sd = 5 / sqrt(3))
## ----z-test01, eval = FALSE----------------------------------------------
## z_test <-
## function (x, alternative = c("two.sided", "less", "greater"),
## mu = 0, sigma = 1, conf.level = 0.95)
## {
## DNAME <- deparse(substitute(x)) # record name of data coming in
## alternative <- match.arg(alternative) # fancy argument matching
##
## # your code goes here
## }
## ----z-test02, eval = FALSE, tidy = FALSE--------------------------------
## Z <- "???"; names(Z) <- "z"
## SIGMA <- sigma; names(SIGMA) <- "sigma"
## MU <- mu; names(MU) <- "mean"
## ESTIMATE <- "???"; names(ESTIMATE) <- "sample mean"
## CINT <- "???"; attr(CINT, "conf.level") <- conf.level
## PVAL <- "???"
##
## structure(list(statistic = Z, parameter = SIGMA, p.value = PVAL,
## conf.int = CINT, estimate = ESTIMATE, null.value = MU,
## alternative = alternative, method = "Z test for a mean",
## data.name = DNAME),
## class = "htest")
## ----z-test-sol----------------------------------------------------------
z_test <- function (x,
alternative = c("two.sided", "less", "greater"),
mu = 0, sigma = 1, conf.level = 0.95)
{
DNAME <- deparse(substitute(x)) # record name of data coming in
alternative <- match.arg(alternative) # fancy argument matching
n <- length(x)
x.bar <- mean(x, na.rm = TRUE)
se <- sigma / sqrt(n)
alpha <- 1 - conf.level
p <- switch(alternative,
two.sided = c(alpha/ 2, 1 - alpha / 2),
less = c(0, 1 - alpha),
greater = c(alpha, 1)
)
z.star <- qnorm(p)
Z <- (x.bar - mu) / se; names(Z) <- "z"
SIGMA <- sigma; names(SIGMA) <- "sigma"
MU <- mu; names(MU) <- "mean"
ESTIMATE <- x.bar; names(ESTIMATE) <- "sample mean"
CINT <- x.bar + z.star * se;
attr(CINT, "conf.level") <- conf.level
PVAL <- switch(alternative,
two.sided = 2 * pnorm( - abs(Z)),
less = pnorm(Z),
greater = 1 - pnorm(Z)
)
structure(list(statistic = Z, parameter = SIGMA, p.value = PVAL,
conf.int = CINT, estimate = ESTIMATE, null.value = MU,
alternative = alternative, method = "Z test for a mean",
data.name = DNAME),
class = "htest")
}
## ----free-weights--------------------------------------------------------
z <- (4.96 - 5.0) / (0.05 / sqrt(10)); z # test statistic
2 * (pnorm(z)) # 2-sided p-value
2 * (1 - pnorm(abs(z))) # 2-sided p-value again
## ----z-ci----------------------------------------------------------------
zci <- function(x, sd = 100, conf.level = 0.95) {
alpha = 1 - conf.level
n = length(x)
zstar <- - qnorm(alpha / 2)
interval <- mean(x) + c(-1,1) * zstar * sd / sqrt(n)
return(list(conf.int = interval, estimate = mean(x)))
}
## ----ci-vis, eval = TRUE, fig.keep = "none", seed = 1234-----------------
# simulate 100 intervals and plot them.
CIsim(n = 20, samples = 100, estimand = 500,
rdist = rnorm, args = list(mean = 500, sd = 100),
method = zci, method.args = list(sd = 100))
## ----ci-vis-fig, echo = FALSE, message = FALSE, seed = 1234, opts.label = "fig1"----
# simulate 100 intervals and plot them.
CIsim(n = 20, samples = 100, estimand = 500,
rdist = rnorm, args = list(mean = 500, sd = 100),
method = zci, method.args = list(sd = 100))
## ----simulate-ci---------------------------------------------------------
# an example CI from a sample of size 20
zci(rnorm(20, 500, 100))
# 10,000 simulated samples each of size 2, 5 and 20
CIsim(n = c(2, 5, 20), samples = 10000, rdist = rnorm,
args = list(mean = 500, sd = 100),
estimand = 500, method = zci, method.args = list(sd = 100))
## ----simulate-ci-unif----------------------------------------------------
mu <- 1/2; v <- 1/12 # mean and variance
# 10,000 simulated samples of sizes 2, 5, and 20
CIsim(n = c(2, 5, 20), samples = 10000, rdist = runif, estimand = mu,
method = zci, method.args = list(sd = sqrt(v)))
## ----simulate-ci-beta----------------------------------------------------
mu <- 0.4 / (0.4 + 0.6); mu # mean for beta dist
v <- (0.4 * 0.6) / ((0.4 + 0.6)^2 * (0.4 + 0.6 + 1)); v # var for beta dist
#
# 10,000 simulated samples of sizes 2, 5, and 20
CIsim(n = c(2, 5, 20), samples = 10000,
rdist = rbeta, args = list(shape1 = 0.4, shape2 = 0.6),
estimand = mu, method = zci, method.args = list(sd = sqrt(v)))
## ----simulate-ci-exp-----------------------------------------------------
rate <- 1/10
v <- (1 / rate)^2 # var of exponential
mu <- 10 # mean of exponential
zci(rexp(20, rate), sd = sqrt(v))$conf.int # an example CI
#
# 10,000 simulated samples of size 2, 5, and 20
CIsim(n = c(2, 5, 20), samples = 10000,
rdist = rexp, args = list(rate = rate), estimand = mu,
method = zci, method.args = list(sd = sqrt(v)))
## ----prop-test-----------------------------------------------------------
prop.test(9458, 10000, p = .95, correct = FALSE) %>% pval()
prop.test(9457, 10000, p = .95, correct = FALSE) %>% pval()
prop.test(9458, 10000, p = .95) %>% pval()
prop.test(9457, 10000, p = .95) %>% pval()
prop.test(9456, 10000, p = .95) %>% pval()
binom.test(9457, 10000, p = .95) %>% pval()
binom.test(9456, 10000, p = .95) %>% pval()
## ----ci-99---------------------------------------------------------------
zstar <- - qnorm(0.005); zstar
se <- 2 / sqrt(25); se
zstar * se
10 + c(-1,1) * zstar * se # confidence interval
## ----unif-ci-------------------------------------------------------------
x <- c(1.6, 2.8, 6.2, 8.2, 8.5, 8.7)
c(max(x), max(x) / 0.05^(1/6))
## ----unif-ci-sim---------------------------------------------------------
Sims <-
expand.grid(n = c(5, 10, 20, 50), rep = 1:10000, theta = 10) %>%
group_by(n, rep, theta) %>%
mutate(
lower = max(runif(n, 0, theta)),
upper = lower / 0.05^(1/n),
cover = upper > theta
)
df_stats(cover ~ n, data = Sims, props)
## ----one-sided-ci-sol----------------------------------------------------
zstar <- qnorm(0.95)
c(-Inf, 8 + zstar * 3 / sqrt(16))
c(8 - zstar * 3 / sqrt(16), Inf)
tstar <- qt(0.95, df = 7)
c(-Inf, 8 + tstar * 3 / sqrt(16))
c(8 - tstar * 3 / sqrt(16), Inf)
## ----myci-sol------------------------------------------------------------
myci <- function(x, y) {
setNames(
mean(x) - mean(y) + c(-1, 1) * sqrt( var(x)/length(x) + var(y)/length(y) ),
c("lower", "upper")
)
}
Intervals <-
(do(10000) * myci(rnorm(10, 9, 2), rnorm(5, 8, 3))) %>%
mutate(
estimate = (upper + lower) / 2,
me = 0.5 * (upper - lower),
D = abs(1 - estimate),
ratio = D / me
)
R <- qdata( ~ ratio, data = Intervals, p = 0.95)["quantile"]; R
uniroot(function(df) qt(0.975, df = df) - R, c(1,100)) %>% value()
## ----uvec01-sol,tidy = FALSE---------------------------------------------
x <- c(3, 4, 4, 7, 7)
mean(x)
v <- x - mean(x)
u1 <- c(1,1,1,1,1) / sqrt(5)
u2 <- c(1,-1,0,0,0) / sqrt(2)
u3 <- c(1,1,-2,0,0) / sqrt(6)
u4 <- c(1,1,1,-3,0) / sqrt(12)
u5 <- c(1,1,1,1,-4) / sqrt(20)
ulist <- list(u1,u2,u3,u4,u5)
vlength <- function(x) sqrt(dot(x,x))
sapply(ulist, vlength)
xList <- lapply(ulist, function(u) project(x,u)); xList
vList <- lapply(ulist, function(u) project(v,u)); vList
all.equal(xList[-1], vList[-1])
## ----t-dist-fig, echo = FALSE, opts.label = "fig1"-----------------------
x <- seq(-5, 5, by = 0.05)
l <- length(x)
Plot_data <-
tibble(
pdf = c(dnorm(x), dt(x, df = 1), dt(x, df = 2), dt(x, df = 4), dt(x, df = 10)),
distribution = rep(c(1000, 1, 2, 4, 10), each = l),
x = rep(x, times = 5)
)
Plot_data$distribution <-
factor(Plot_data$distribution,
labels = c("df=1", "df=2", "df=4", "df=10", "normal")
)
line.list <- list(
lty = c(1, 1, 1, 1, 1), # lty = c(1, 2, 3, 4, 1),
lwd = c(2, 2, 2, 2, 2),
col = paste("gray", c(80, 60, 40, 20, 5), sep = "")
)
gf_line( pdf ~ x, data = Plot_data, color = ~ distribution) %>%
gf_refine(scale_color_manual(values = paste("gray", c(80, 60, 40, 20, 10), sep = "")))
## ----t-test01------------------------------------------------------------
t <- (10.3 - 10)/ (0.4 / sqrt(12)); t # test statistic
2 * pt(-abs(t), df = 11); # p-value using t-distribution
2 * pnorm(-abs(t)); # "p-value" using normal distribution
## ----t-ci01, digits = 4--------------------------------------------------
tstar <- qt(0.975, df = 11); tstar
10.3 + c(-1,1) * tstar * 0.4 / sqrt(12)
## ----t-test-iris---------------------------------------------------------
V <- iris %>% filter(Species == "virginica")
# for CI; p-value not interesting here -- mu = 0
t.test( ~ Sepal.Width, data = V)
# this gives a more interesting p-value, if mu = 3 is an interesting claim
t.test( ~ Sepal.Width, data = V, mu = 3)
## ----uvec01, tidy = FALSE------------------------------------------------
x <- c(3, 4, 5, 8)
mean(x)
var(x)
u1 <- .5 * c(1,1,1,1)
u2 <- 1 / sqrt(2) * c(1,-1,0,0)
u3 <- 1 / sqrt(6) * c(1,1,-2,0)
u4 <- 1 / sqrt(12) * c(1,1,1,-3)
## ----uvec02, tidy = FALSE------------------------------------------------
ulist <- list(u1, u2, u3, u4)
vlength <- function(x) sqrt(dot(x,x))
sapply(ulist, vlength)
c(dot(u1, u2), dot(u1, u3), dot(u1, u4),
dot(u2, u3), dot(u2, u4), dot(u3, u4))
## ----uvec03, tidy = FALSE------------------------------------------------
pList <- lapply(ulist, function(u) project(x, u)); pList
sapply(pList, vlength)
sum(sapply(pList, function(x) dot(x,x))[2:4])
3 * var(x)
## ----uvec02-sol, tidy = FALSE--------------------------------------------
x <- c(3, 4, 5, 8)
mean(x)
var(x)
u1 <- .5 * c(1,1,1,1)
u2 <- 1 / sqrt(2) * c(1,-1,0,0)
u3 <- 1 / sqrt(6) * c(1,1,-2,0)
u4 <- 1 / sqrt(12) * c(1,1,1,-3)
## ----uvec03-sol, tidy = FALSE--------------------------------------------
ulist <- list(u1, u2, u3, u4)
vlength <- function(x) sqrt(dot(x,x))
sapply(ulist, vlength)
c(dot(u1, u2), dot(u1, u3), dot(u1, u4),
dot(u2, u3), dot(u2, u4), dot(u3, u4))
## ----uvec04-sol, tidy = FALSE--------------------------------------------
pList <- lapply(ulist, function(u) project(x, u)); pList
sapply(pList, vlength)
sum(sapply(pList, function(x) dot(x,x))[2:4])
3 * var(x)
## ----uvec05-sol,tidy = FALSE---------------------------------------------
x <- c(3, 4, 5, 8)
mean(x)
v <- x - mean(x)
w1 <- .5 * c(1, 1, 1, 1)
w2 <- .5 * c(1, 1, -1, -1)
w3 <- .5 * c(1, -1, -1, 1)
w4 <- .5 * c(1, -1, 1, -1)
wlist <- list(w1, w2, w3, w4)
vlength <- function(x) sqrt(dot(x, x))
sapply(wlist, vlength)
c(dot(w1, w2), dot(w1, w3), dot(w1, w4), dot(w2, w3), dot(w2, w4), dot(w3, w4))
pList <- lapply(wlist, function(w) project(x, w)); pList
sapply(pList, vlength)
sum(sapply(pList, function(x) dot(x,x))[2:4])
3 * var(x)
## ----uvec06-sol,tidy = FALSE---------------------------------------------
x <- c(3, 4, 5, 8)
mean(x)
v <- x - mean(x)
w1 <- c(1, 1, 1, 1) / 2
w2 <- c(1, 1,-1,-1) / 2
w3 <- c(1,-1, 0, 0) / sqrt(2)
w4 <- c(0, 0, 1,-1) / sqrt(2)
wlist <- list(w1, w2, w3, w4)
sapply(wlist, vlength)
c(dot(w1, w2), dot(w1, w3), dot(w1, w4), dot(w2, w3), dot(w2, w4), dot(w3, w4))
pList <- lapply(wlist, function(w) project(x,w)); pList
sapply(pList, vlength)
sum(sapply(pList, function(x) dot(x, x))[2:4])
3 * var(x)
## ----sepal-width-ci------------------------------------------------------
iris %>% group_by(Species) %>%
do(data.frame(as.list(
confint(t.test( ~ Sepal.Width, data= .))
)))
## ----sepal-length-ci-sol-------------------------------------------------
iris %>% group_by(Species) %>%
do(data.frame(as.list(
confint(t.test( ~ Sepal.Length, data= .))
)))
## ----sepal-ratio-ci-sol--------------------------------------------------
bind_rows(
lapply(
levels(iris$Species),
function(s) {
confint(t.test( ~ Sepal.Length / Sepal.Width,
data = filter(iris, Species == s))) %>%
mutate(species = s)
}
)
)
## ----shoe-size-ci--------------------------------------------------------
qt(0.975, df = 255) * 2 / sqrt(256)
## ----uniroot-------------------------------------------------------------
f <- function(n) { qt(0.975, n-1) * 2 / sqrt(n) - 0.25}
uniroot(f, c(10, 1000))
## ----t-simulations-------------------------------------------------------
# an example CI from a sample of size 20
confint(t.test(rnorm(20, 500, 100)))
# 10,000 simulated samples of sizes 2, 5, and 20
CIsim(n = c(2, 5, 20), samples = 10000, estimand = 500,
rdist = rnorm, args = list(mean = 500, sd = 100))
## ----heavy-tails, echo = FALSE, warning = FALSE--------------------------
x <- seq(-7, 10, by = 0.10)
l <- length(x)
Plot_data <- data.frame(
pdf = c(dnorm(x, 0, sqrt(3)), dt(x, df = 3)),
cdf = c(pnorm(x, 0, sqrt(3)), pt(x, df = 3)),
distribution = rep(c(1000, 3), each = l),
x = rep(x, times = 2)
)
Plot_data$distribution <- factor(Plot_data$distribution,
labels = c("T(3)", "Norm")
)
# line.list <- list(
# lty = c(1, 1),
# lwd = c(1.5, 1.5),
# col = trellis.par.get("superpose.line")$col[1:2]
# )
gf_line(pdf ~ x, data = Plot_data, color = ~ distribution) %>%
gf_labs(title = "PDFs") %>%
gf_lims(x = c(0, 7), y = c(0, 0.40)) %>%
gf_theme(legend.position = "top")
gf_line(cdf ~ x, data = Plot_data, color = ~ distribution) %>%
gf_labs(title = "CDFs") %>%
gf_lims(x = c(-2, 7), y = c(0, 1)) %>%
gf_theme(legend.position = "top")
gf_line(pdf ~ x, data = Plot_data, color = ~ distribution) %>%
gf_labs(title = "PDFs (zoomed)") %>%
gf_lims(x = c(3, 9), y = c(0, 0.06)) %>%
gf_theme(legend.position = "top")
gf_line(cdf ~ x, data = Plot_data, color = ~ distribution) %>%
gf_labs(title = "CDFs (zoomed)") %>%
gf_lims(x = c(2.8, 9), y = c(0.97, 1)) %>%
gf_theme(legend.position = "top")
# xyplot(pdf ~ x, Plot_data,
# groups = distribution,
# main = "PDFs",
# type = "l",
# xlim = c(0, 7),
# ylim = c(-0.005, 0.40),
# lattice.options = list(
# superpose.line = line.list
# ),
# lwd = line.list$lwd,
# lty = line.list$lty,
# col = line.list$col,
# key = list(
# lines = line.list,
# text = list(
# lab = c(expression(T(3)), expression(Norm(0, sqrt(3))))
# ),
# columns = 2
# )
# )
#
# xyplot(cdf ~ x, Plot_data,
# groups = distribution,
# main = "CDFs",
# type = "l",
# xlim = c(-3, 7),
# ylim = c(0, 1),
# lattice.options = list(
# superpose.line = line.list
# ),
# lwd = line.list$lwd,
# lty = line.list$lty,
# col = line.list$col,
# key = list(
# lines = line.list,
# text = list(
# lab = c(expression(T(3)), expression(Norm(0, sqrt(3))))
# ),
# columns = 2
# )
# )
#
# xyplot(pdf ~ x, Plot_data,
# groups = distribution,
# main = "PDFs",
# type = "l",
# xlim = c(3, 9),
# ylim = c(-0.005, 0.06),
# lattice.options = list(
# superpose.line = line.list
# ),
# lwd = line.list$lwd,
# lty = line.list$lty,
# col = line.list$col,
# key = list(
# lines = line.list,
# text = list(
# lab = c(expression(T(3)), expression(Norm(0, sqrt(3))))
# ),
# columns = 2
# )
# )
#
# xyplot(cdf ~ x, Plot_data,
# groups = distribution,
# main = "CDFs",
# type = "l",
# xlim = c(3, 9),
# ylim = c(0.98, 1),
# lattice.options = list(
# superpose.line = line.list
# ),
# lwd = line.list$lwd,
# lty = line.list$lty,
# col = line.list$col,
# key = list(
# lines = line.list,
# text = list(
# lab = c(expression(T(3)), expression(Norm(0, sqrt(3))))
# ),
# columns = 2
# )
# )
## ----t-robust-heavytails-------------------------------------------------
# an example CI (n = 20, mu = 0)
confint(t.test(rt(20, 3)))
# 10,000 simulated samples of sizes 2, 5, and 20
CIsim(n = c(2, 5, 20), samples = 10000, estimand = 0,
rdist = rt, args = list(df = 3))
## ----t-robust-exp--------------------------------------------------------
# an example CI (n = 20; mu = 10)
confint(t.test(rexp(20, 1/10)))
# 10,000 simulated samples of sizes 2, 5 and 20
CIsim(n = c(2, 5, 20), samples = 10000, estimand = 10,
rdist = rexp, args = list(rate = 1/10))
## ----t-robust-qq, eval = FALSE-------------------------------------------
## ExpSims <-
## expand.grid(n = c(10, 20, 40, 80), rep = 1:2000) %>%
## group_by(n, rep) %>%
## mutate(
## pval = pval(t.test(rexp(n), mu = 1)),
## dist = paste0("Exp(1); n=", n))
##
## TSims <-
## expand.grid(n = c(10, 20, 40, 80), rep = 1:2000) %>%
## group_by(n, rep) %>%
## mutate(
## pval = pval(t.test(rt(n, df = 3), mu = 0)),
## dist = paste0("t(3); n=", n))
##
## gf_qq( ~ pval, data = bind_rows(ExpSims, TSims),
## distribution = qunif, geom = "line") %>%
## gf_abline(slope = 1, intercept = 0, color = "red",
## linetype = "dashed", alpha = 0.6) %>%
## gf_facet_wrap( ~ dist, nrow = 2)
## gf_qq( ~ pval, data = bind_rows(ExpSims, TSims), na.rm = TRUE,
## distribution = qunif, geom = "line") %>%
## gf_abline(slope = 1, intercept = 0, color = "red",
## linetype = "dashed", alpha = 0.6) %>%
## gf_lims(x = c(0, 0.2), y = c(0, 0.2)) %>%
## gf_facet_wrap( ~ dist, nrow = 2)
## ----t-robust-qq-fig, echo = FALSE, opts.label = "fig1"------------------
ExpSims <-
expand.grid(n = c(10, 20, 40, 80), rep = 1:2000) %>%
group_by(n, rep) %>%
mutate(
pval = pval(t.test(rexp(n), mu = 1)),
dist = paste0("Exp(1); n=", n))
TSims <-
expand.grid(n = c(10, 20, 40, 80), rep = 1:2000) %>%
group_by(n, rep) %>%
mutate(
pval = pval(t.test(rt(n, df = 3), mu = 0)),
dist = paste0("t(3); n=", n))
gf_qq( ~ pval, data = bind_rows(ExpSims, TSims),
distribution = qunif, geom = "line") %>%
gf_abline(slope = 1, intercept = 0, color = "red",
linetype = "dashed", alpha = 0.6) %>%
gf_facet_wrap( ~ dist, nrow = 2)
gf_qq( ~ pval, data = bind_rows(ExpSims, TSims), na.rm = TRUE,
distribution = qunif, geom = "line") %>%
gf_abline(slope = 1, intercept = 0, color = "red",
linetype = "dashed", alpha = 0.6) %>%
gf_lims(x = c(0, 0.2), y = c(0, 0.2)) %>%
gf_facet_wrap( ~ dist, nrow = 2)
## ----miaa-sol------------------------------------------------------------
set.seed(12345)
Intervals <-
do(20) *
confint(t.test( ~ PTSG, data = sample(MIAA05, 15), conf.level = .90))
mu <- mean( ~ PTSG, data = MIAA05)
tally( ~ (lower <= mu & mu <= upper), data = Intervals)
## ----wald-sims-----------------------------------------------------------
WaldSims <-
CIsim(2000, n = c(5, 10, 20, 40),
method = binom.test, method.args = list(ci.method = "Wald"),
rdist = rbinom, args = list(size = 1, prob = 0.2),
estimand = 0.2)
## ----wald-score-compare-fig, echo = FALSE, opts.label = "fig1"-----------
# function to compute LR statistic
LR = function(pi.0, y, N, alpha) {
pi.hat = y / N; L0 = 0; L1 = 0
if (pi.0 < 1) L0 = L0 + (N - y) * log(1 - pi.0)
if (pi.0 > 0) L0 = L0 + y * log(pi.0)
if (pi.hat > 0) L1 = L1 + y * log(pi.hat)
if (pi.hat < 1) L1 = L1 + (N - y) * log(1 - pi.hat)
LR = 2 * (L1 - L0)
return(LR)
}
# function used in uniroot to find lower and upper bounds
# of confidence interval
LRCI = function(pi.0, y, N, alpha) {
pi.hat = y / N; L0 = 0; L1 = 0
if (pi.0 < 1) L0 = L0 + (N-y) * log(1-pi.0)
if (pi.0 > 0) L0 = L0 + y * log(pi.0)
if (pi.hat > 0) L1 = L1 + y * log(pi.hat)
if (pi.hat < 1) L1 = L1 + (N-y) * log(1-pi.hat)
LR = 2 * (L1-L0)
LR-qchisq(1-alpha, df = 1)
}
# function used in uniroot to compute lower bound CI of mid-P
# version of clopper-pearson exact test
midpL = function(pi.0, y, N, alpha) {
lowerbound = sum(dbinom(y:N, N, pi.0)) - 0.5 * dbinom(y, N, pi.0)- alpha / 2
return(lowerbound)
}
# function used in uniroot to compute upper bound CI of mid-P
# version of clopper-pearson exact test
midpU = function(pi.0, y, N, alpha) {
upperbound = sum(dbinom(0:y, N, pi.0)) - 0.5 * dbinom(y, N, pi.0)- alpha / 2
return(upperbound)
}
n = 35
y = 0:n
pi.hat = y / n
pi.wig = (y + 2) / (n + 4)
tmp = matrix(0, length(seq(0.001, 0.999, by = 0.001)), 8)
# (n + 1) by 2 matrices to hold confindence bounds
# (first col--lower bound, second col -- upper bound)
# W -wald, S - score, E - exact, L - LR, BJ = bayes, jeffrey's prior,
# BU - bayes uniform prior, MP - mid p, Wil = Wilson
W = matrix(0, n + 1, 2)
S = W
E = W
L = W
BJ = W
BU = W
MP = W
Wil = W
tmps = lapply(y, stats::prop.test, n, correct = FALSE) #compute confidence bounds for score
tmpe = lapply(y, stats::binom.test, n) #compute confidence bounds for exact
# compute lower/upper confidence bounds for wald
W[, 1] = pi.hat - qnorm(0.975) * sqrt(pi.hat * (1-pi.hat) / n)
W[, 2] = pi.hat + qnorm(0.975) * sqrt(pi.hat * (1-pi.hat) / n)
# compute lower/upper confidence bounds for Wilson
Wil[, 1] = pi.wig - qnorm(0.975) * sqrt(pi.wig * (1-pi.wig) / (n + 4))
Wil[, 2] = pi.wig + qnorm(0.975) * sqrt(pi.wig * (1-pi.wig) / (n + 4))
for (i in 1:(n + 1)) {
S[i, ] = tmps[[i]]$conf.int #extract confidence interval for score
E[i, ] = tmpe[[i]]$conf.int # extract conf. int. for exact
if (y[i] == 0){
L[i, 1] = 0
MP[i, 1] = 0
}
else {
L[i, 1] = uniroot(f = LR, interval = c(0.000001, y[i] / n), N = n, y = y[i],
alpha = 0.05)$root # lower bound for LR
MP[i, 1] = uniroot(f = midpL, interval = c(0.000001, 0.999999),
N = n, y = y[i], alpha = 0.05)$root # lower bound for mid-P
}
if (y[i] == n) {
L[i, 2] = 1
MP[i, 2] = 1
}
else {
L[i, 2] = uniroot(f = LR, interval = c(y[i] / n, 0.999999), N = n, y = y[i],
alpha = 0.05)$root # upper bound for LR
MP[i, 2] = uniroot(f = midpU, interval = c(0.000001, 0.999999),
N = n, y = y[i], alpha = 0.05)$root #upper bound for mid-P
}
}
BJ[, 1] = qbeta(0.025, 0.5 + y, n + 0.5-y) # lower bounds, bayes, jeffrey's prior
BJ[, 2] = qbeta(0.975, 0.5 + y, n + 0.5-y) # upper bounds
BU[, 1] = qbeta(0.025, 1 + y, n + 1-y) # lower bounds bayes, uniform prior
BU[, 2] = qbeta(0.975, 1 + y, n + 1-y) # upper bounds
cnt = 1
# probabilities from the binomial y = (0, 1, 2, ..., 25)
#probs = dbinom(y, n, pi.0)
pi.values <- seq(0.001, 0.999, by = 0.001)
for (pi.0 in pi.values) {
# calculate coverage rates, put into matrix tmp
probs = dbinom(y, n, pi.0)
tmp[cnt, 1] = (S[, 1] < pi.0 & pi.0 < S[, 2]) %*% probs
tmp[cnt, 2] = (W[, 1] < pi.0 & pi.0 < W[, 2]) %*% probs
tmp[cnt, 3] = (E[, 1] < pi.0 & pi.0 < E[, 2]) %*% probs
tmp[cnt, 4] = (L[, 1] < pi.0 & pi.0 < L[, 2]) %*% probs
tmp[cnt, 5] = (BJ[, 1] < pi.0 & pi.0 < BJ[, 2]) %*% probs
tmp[cnt, 6] = (BU[, 1] < pi.0 & pi.0 < BU[, 2]) %*% probs
tmp[cnt, 7] = (MP[, 1] < pi.0 & pi.0 < MP[, 2]) %*% probs
tmp[cnt, 8] = (Wil[, 1] < pi.0 & pi.0 < Wil[, 2]) %*% probs
cnt = cnt + 1
}
nn <- length(pi.values)
coverage <- data.frame(
pi = rep(pi.values, times = 3),
coverage = c(tmp[, 1], tmp[, 2], tmp[, 3]),
method = factor(rep(c("Score", "Wald", "Clopper-Pearson"), each = nn),
levels = c("Wald", "Clopper-Pearson", "Score"))
)
# below, opens a pdf file creates various plots shown in lecture
# and closes the PDF device
trellis.par.set(theme = col.fastR(bw = TRUE))
if(FALSE) {
matplot(seq(0.001, 0.999, by = 0.001), tmp[, 1:3], type = "l",
lty = 1,
col = trellis.par.get("superpose.line")$col[1:3],
main = paste("Coverage rates (n=", n, "; 95% CI)", sep = ""),
xlab = expression(pi),
ylab = "Coverage Rate",
lwd = 2,
ylim = c(0.8, 1))
abline(h = 0.95)
legend(0.35, 0.875, c("Score", "Wald", "Clopper-Pearson")[c(3, 1, 2)],
col = trellis.par.get("superpose.line")$col[c(3, 1, 2)],
lwd = 2,
lty = 1,
cex = 1)
trellis.par.set(theme = col.fastR(bw = TRUE))
matplot(seq(0.001, 0.999, by = 0.001), tmp[, c(1, 8)], type = "l",
lty = 1, col = trellis.par.get("superpose.line")$col[1:4],
main = paste("Coverage rates (n=", n, "; 95% CI)", sep = ""),
xlab = expression(pi),
ylab = "Coverage Rate",
lwd = 2,
ylim = c(0.8, 1))
abline(h = 0.95)
legend(0.40, 0.875, c("Score", "Wilson"), col = trellis.par.get("superpose.line")$col[1:2], lty = 1, cex = 1)
}
gf_line(coverage ~ pi, data = coverage, color = ~ method,
na.rm = TRUE, alpha = 0.8) %>%
gf_hline(yintercept = 0.95, alpha = 0.5, linetype = "dashed") %>%
gf_lims(y = c(0.8, 1)) %>%
gf_theme(legend.position = "top") %>%
gf_labs(title = "Coverage rates (n = 35; 95% CI)")
# xyplot(coverage ~ pi, data = coverage, groups = method,
# lty = 1, lwd = 2, alpha = 0.8,
# type = "l", cex = .25,
# main = paste("Coverage rates (n=", n, "; 95% CI)", sep = ""),
# xlab = expression(pi),
# ylab = "Coverage Rate",
# ylim = c(0.8, 1),
# # col = c("gray50", "gray80", "gray20"),
# col = c("navy", "red", "forestgreen"), # "purple"),
# auto.key = TRUE,
# legend = list(
# inside= list(x = .5, y = .1, corner = c(.5, 0),
# fun = draw.key,
# args = list(
# key = list(
# lines = list(lty = 1, lwd = 2,
# # col = c("gray70", "gray20", "gray50")
# col = c("red", "forestgreen", "navy")
# ),
# text = list(
# lab = c("Clopper-Pearson", "Score", "Wald"),
# cex = .8)
# )
# )
# )
# ),
# panel = function(x, y, ...){
# panel.abline(h = 0.95)
# panel.xyplot(x, y, ...)
# }
# )
#write.csv(coverage, file = "CIcoverage.csv", row.names = FALSE)
## ----binom-cis-----------------------------------------------------------
binom.test(25, 70) # Clopper-Pearson
binom.test(25, 70, ci.method = "Wald")
binom.test(25, 70, ci.method = "score")
prop.test(25, 70) # also uses inverted score test
## ----prop-ci-sim---------------------------------------------------------
Sims <-
expand.grid(
n = c(35, 100),
pi = c(0.2, 0.3, 0.4, 0.5),
method = c("Wald", "Wilson", "score"),
rep = 1:2000) %>%
group_by(n, pi, method, rep) %>%
do(
confint(
binom.test(rbinom(1, .$n, .$pi), n = .$n, ci.method = .$method)))
Sims %>%
group_by(n, pi, method) %>%
summarise(cover = prop(lower <= pi & pi <= upper))
## ----helium-footballs01--------------------------------------------------
Footballs <- HeliumFootballs %>% mutate(diff = helium - air)
Footballs %>% head(3)
t.test( ~ diff, data = Footballs)
## ----helium-footballs02--------------------------------------------------
tally( ~ sign(helium - air), data = HeliumFootballs)
## ----helium-footballs03--------------------------------------------------
binom.test(20,37) %>% pval()
## ----sign-ties-----------------------------------------------------------
S = (20 - 17) / sqrt(20 + 17); S
2 * pnorm(-S)
## ----goldfish01----------------------------------------------------------
S <- (11 - 4) / sqrt(11 + 4); S
2 * pnorm(-S)
## ----goldfish02----------------------------------------------------------
binom.test(11, 11 + 4) %>% pval()
## ----goldfish03----------------------------------------------------------
2 * pnorm(-(10 - 3) / sqrt(10 + 3))
binom.test(10, 13) %>% pval()
## ----buger-barn2a-sol----------------------------------------------------
Sims <-
expand.grid(n=c(5, 10, 20, 50), rate = 1/10, rep = 1:2000) %>%
group_by(n, rate, rep) %>%
mutate(
S = sum(rexp(n, rate)),
U = S * rate,
pvalS = 1 - pgamma(S, shape = n, rate = rate),
pvalU = 1 - pgamma(U, shape = n, rate = 1)
)
# We get the same p-value using either S or U as the test stat
gf_line(pvalS ~ pvalU, data = Sims)
# The p-values exhibit the expected Unif(0,1) distribution
gf_dhistogram( ~ pvalU | paste0("n=", n), data = Sims, binwidth = 0.05) %>%
gf_lims(x = c(0,1))
gf_qq( ~ pvalU | paste0("n=", n), data = Sims, distribution = qunif)
# S, U approx Normal when sample size is large enough.
gf_dhistogram( ~ U | paste0("n=", n), data = Sims, bins = 25)
gf_dhistogram( ~ S | paste0("n=", n), data = Sims, bins = 25)
## ----buger-barn2b-sol----------------------------------------------------
eci <- function(x, conf.level = 0.95, type = c("S", "U")) {
type <- match.arg(type)
pv <-
switch(
type,
U = function(x, rate) {pgamma(sum(x) * rate, shape = length(x), rate = 1)},
S = function(x, rate) {pgamma(sum(x), shape = length(x), rate = rate)}
)
alpha <- (1 - conf.level) / 2
list(
type = type,
estimate = 1 / mean(x),
conf.int =
c(
uniroot(function(rate) pv(x, rate) - alpha, c(0.001, 1000))$root,
uniroot(function(rate) pv(x, rate) - (1 - alpha), c(0.001, 1000))$root
)
)
}
CIsim(samples = 1000, n = 5, rdist = rexp, args = list(rate = 1/10),
estimand = 1/10, method = eci, method.args = list(type = "U"))
CIsim(samples = 1000, n = 5, rdist = rexp, args = list(rate = 1/10),
estimand = 1/10, method = eci, method.args = list(type = "S"))
## ----buger-barn2c-sol----------------------------------------------------
eci2 <- function(x, conf.level = 0.95, type = c("S", "U")) {
type <- match.arg(type)
pv <-
switch(
type,
U = function(x, mu) {pgamma(sum(x) / mu, shape = length(x), rate = 1)},
S = function(x, mu) {pgamma(sum(x), shape = length(x), rate = 1/mu)}
)
list(
type = type,
estimate = mean(x),
conf.int =
c(
uniroot(function(mu) pv(x, mu) - conf.level, c(0.01, 100))$root,
uniroot(function(mu) pv(x, mu) - 1 + conf.level, c(0.01, 100))$root
)
)
}
CIsim(samples = 1000, n = 5, rdist = rexp, args = list(rate = 1/10),
estimand = 10, method = eci2, method.args = list(type = "U"))
CIsim(samples = 1000, n = 5, rdist = rexp, args = list(rate = 1/10),
estimand = 10, method = eci2, method.args = list(type = "S"))
## ----endurance-paired01--------------------------------------------------
t.test( ~ (vitamin - placebo), data = Endurance)
## ----endurance-paired02--------------------------------------------------
t.test( ~ (log(vitamin) - log(placebo)) , data = Endurance)
## ----endurance-paired03--------------------------------------------------
# this is the same as the previous one
t.test( ~ (log(vitamin / placebo)) , data = Endurance)
## ----endurance-paired04--------------------------------------------------
t.test( ~ (vitamin / placebo), data = Endurance)
## ----endurance-paired05--------------------------------------------------
t.test(~ (1 / vitamin - 1 / placebo), data = Endurance)
## ----endurance-paired06--------------------------------------------------
binom.test(~ (vitamin > placebo), data = Endurance)
## ----Joes-coin01-sol-----------------------------------------------------
wald.ci <- function(x, n, level = 0.95) {
alpha = 1 - level
pi.hat <- x / n
se <- sqrt(pi.hat * (1 - pi.hat) / n)
z.star <- qnorm(1 - alpha / 2)
pi.hat + c(-1, 1) * z.star * se
}
## ----Joes-coin02-sol-----------------------------------------------------
wilson.ci<- function(x, n, level = 0.95) {
x = x + 2; n = n + 4
alpha = 1 - level
pi.hat <- x / n
se <- sqrt(pi.hat * (1 - pi.hat) / n)
z.star <- qnorm(1 - alpha / 2)
pi.hat + c(-1, 1) * z.star * se
}
## ----Joes-coin03-sol-----------------------------------------------------
score.ci <- function(x, n, level = 0.95) {
alpha = 1 - level
z.star <- qnorm(1 - alpha / 2)
pi.hat <- x / n
A <- pi.hat + z.star^2 / (2 * n)
B <- z.star * sqrt((pi.hat * (1 - pi.hat) / n)
+ (z.star^2 / (4 * n^2)))
D <- 1 + z.star^2 / n
# interval is (A +- B) / D
(A + c(-1, 1) * B) / D
}
## ----Joes-coin04-sol-----------------------------------------------------
prop.test(115, 200)$conf.int
confint(prop.test(115, 200, correct = FALSE))
## ----Joes-coin05-sol-----------------------------------------------------
wald.ci(115, 200)
wilson.ci(115, 200)
wald.ci(117, 204)
score.ci(115, 200)
## ----Joes-coin06-sol-----------------------------------------------------
# score interval using uniroot:
p.hat <- 115 / 200; n <- 200
f <- function(p) {
abs(p.hat - p) / sqrt(p * (1-p) / n) + qnorm(0.025)
}
uniroot(f, c(0, p.hat))$root
uniroot(f, c(p.hat, 1))$root
uniroot(f, c(0, p.hat))$estim.prec
## ----corn-sol------------------------------------------------------------
t.test( ~ (kiln - reg), data = Corn)
## ----golfballs-range, fig.keep = "none"----------------------------------
stat <- function(x) { diff(range(x)) }
statTally(
golfballs, rgolfballs, stat, xlab = "test statistic (range)")
## ----golfballs-range-fig, echo = FALSE, message = FALSE, results = "hide"----
stat <- function(x) { diff(range(x)) }
statTally(
golfballs, rgolfballs, stat, xlab = "test statistic (range)")
## ----golfballs-sample----------------------------------------------------
rmultinom(1, prob = c(.3, .3, .2, .2), size = 486)
tally(resample(c(1, 1, 1, 2, 2, 2, 3, 3, 4, 4), 486))
## ----fisher-twins-perm01, seed = 123-------------------------------------
numSims <- 20000
FT <-
data.frame(
twin = rep(c("Di", "Mono"), times = c(17, 13)),
conviction = rep(c("No", "Yes", "No", "Yes"), times = c(15, 2, 3, 10))
)
# check to see that table matches
tally(twin ~ conviction, data = FT)
# test statistic is value in top left cell
tally(twin ~ conviction, data = FT)[1, 1]
## ----fisher-twins-perm02-------------------------------------------------
# simulated data sets
testStats <- replicate(numSims, {
tally(twin ~ shuffle(conviction), data = FT)[1, 1]
})
# for p-value
tally(testStats)
# tail probabilities
prop1(testStats >= 15)
# 2-sided p-value
2 * prop1(testStats >= 15)
## ----fisher-twins-2sided-------------------------------------------------
prop(testStats >= 15 | testStats <= 5)
## ----fisher-twins-exact--------------------------------------------------
fisher.test(tally(twin ~ conviction, data = FT))
## ----fisher-twins-ci-----------------------------------------------------
binom.test(2 * sum(testStats >= 15), numSims) %>% confint()
binom.test(sum(testStats >= 15 | testStats <= 5), numSims) %>% confint()
## ----iris-perm, fig.keep = "none"----------------------------------------
data(iris)
Setosa <- iris %>% filter(Species == "setosa")
corStat <- function(x, y) {sum(x * y) - length(x) * mean(x) * mean(y)}
testStat <- with(Setosa, corStat(Sepal.Length, Petal.Length)); testStat
SetosaSims <-
expand.grid(rep = 1:10000) %>%
group_by(rep) %>%
mutate(
simStat = with(Setosa, corStat(Sepal.Length, shuffle(Petal.Length)))
)
gf_dhistogram( ~ simStat, data = SetosaSims) %>%
gf_vline(xintercept = testStat)
# 1-sided p-value
prop1( ~ (simStat >= testStat), data = SetosaSims)
# 2-sided p-value
2 * prop1( ~ (simStat >= testStat), data = SetosaSims)
## ----iris-perm-fig, echo = FALSE, results = "hide"-----------------------
data(iris)
Setosa <- iris %>% filter(Species == "setosa")
corStat <- function(x, y) {sum(x * y) - length(x) * mean(x) * mean(y)}
testStat <- with(Setosa, corStat(Sepal.Length, Petal.Length)); testStat
SetosaSims <-
expand.grid(rep = 1:10000) %>%
group_by(rep) %>%
mutate(
simStat = with(Setosa, corStat(Sepal.Length, shuffle(Petal.Length)))
)
gf_dhistogram( ~ simStat, data = SetosaSims) %>%
gf_vline(xintercept = testStat)
# 1-sided p-value
prop1( ~ (simStat >= testStat), data = SetosaSims)
# 2-sided p-value
2 * prop1( ~ (simStat >= testStat), data = SetosaSims)
## ----iris-perm-versi-----------------------------------------------------
Versi <- iris %>% filter(Species == "versicolor")
testStat <- with(Versi, corStat(Sepal.Length, Petal.Length)); testStat
VersiSims <-
expand.grid(rep = 1:10000) %>%
group_by(rep) %>%
mutate(simStat = with(Versi, corStat(Sepal.Length, shuffle(Petal.Length))))
# 1-sided p-value
prop1( ~ (simStat >= testStat), data = VersiSims)
# 2-sided p-value
2 * prop1( ~ (simStat >= testStat), data = VersiSims)
## ----pvals01, fig.keep = "none"------------------------------------------
Pvals.null <- do(10000) * { t.test(rnorm(25, 0, 1)) %>% pval() }
gf_dhistogram(~ p.value, data = Pvals.null, binwidth = 0.02, center = 0.01)
gf_qq(~ p.value, data = Pvals.null, distribution = qunif, geom = "line")
## ----pvals01-fig, echo = FALSE, results = "hide"-------------------------
Pvals.null <- do(10000) * { t.test(rnorm(25, 0, 1)) %>% pval() }
gf_dhistogram(~ p.value, data = Pvals.null, binwidth = 0.02, center = 0.01)
gf_qq(~ p.value, data = Pvals.null, distribution = qunif, geom = "line")
## ----pvals02, fig.keep = "none"------------------------------------------
Pvals.alt <- do(10000) * { t.test(rnorm(25, 1/2, 1)) %>% pval() }
gf_dhistogram(~ p.value, data = Pvals.alt, binwidth = 0.01, center = 0.005)
gf_qq(~ p.value, data = Pvals.alt, distribution = qunif, geom = "line")
## ----pvals02-fig, echo = FALSE, results = "hide"-------------------------
Pvals.alt <- do(10000) * { t.test(rnorm(25, 1/2, 1)) %>% pval() }
gf_dhistogram(~ p.value, data = Pvals.alt, binwidth = 0.01, center = 0.005)
gf_qq(~ p.value, data = Pvals.alt, distribution = qunif, geom = "line")
## ----pvals03-------------------------------------------------------------
# power
prop(~ (p.value <= 0.05), data = Pvals.alt)
## ----pvals04, echo = FALSE, results = "hide", fig.keep = 2, opts.label = "figtall"----
update <- function(theta, alpha, power) {
theta * power / (theta * power + alpha * (1-theta))
}
# gf_function(fun = update, args = list(alpha = 0.05, power = 0.66), xlim = c(0,1)) %>%
# gf_labs(
# y = expression(paste("updated probability that ", H[0], " is false")),
# x = expression(paste("prior probability that ", H[0], " is false (", theta, ")"))
# )
plotFun(update(theta, alpha = 0.05, power = 0.66) ~ theta,
theta.lim = c(0,1),
ylab = expression(paste("updated probability that ", H[0], " is false")),
xlab = expression(paste("prior probability that ", H[0], " is false (", theta, ")")))
plotFun(update(theta, alpha = 0.05, power = 0.90) ~ theta,
add = TRUE, col = "red")
plotFun(update(theta = 0.1, alpha, power = 0.66) ~ alpha,
alpha.lim = c(0, 0.10),
ylab = expression(paste("updated probability that ", H[0], " is false")),
xlab = expression(paste("significance level (", alpha, ")")))
plotFun(update(theta = 0.1, alpha, power = 0.90) ~ alpha,
add = TRUE, col = "red")
## ----dimes-boot01, digits = 4--------------------------------------------
x.bar <- mean( ~ mass, data = Dimes); x.bar
## ----dimes-boot02, fig.keep = "none"-------------------------------------
Dimes.boot <-
do(5000) * c(boot.mean = mean( ~ mass, data = resample(Dimes)))
gf_dhistogram( ~ boot.mean, data = Dimes.boot)
## ----dimes-boot03, fig.keep = "none"-------------------------------------
# normality check
gf_qq( ~ boot.mean, data = Dimes.boot)
SE <- sd( ~ boot.mean, data = Dimes.boot); SE
# confidence interval
x.bar + 1.96 * c(0, 1) * SE
## ----dimes-boot02-fig, echo = FALSE, results = "hide"--------------------
Dimes.boot <-
do(5000) * c(boot.mean = mean( ~ mass, data = resample(Dimes)))
gf_dhistogram( ~ boot.mean, data = Dimes.boot)
# normality check
gf_qq( ~ boot.mean, data = Dimes.boot)
SE <- sd( ~ boot.mean, data = Dimes.boot); SE
# confidence interval
x.bar + 1.96 * c(0, 1) * SE
## ----dimes-boot04--------------------------------------------------------
cdata( ~ boot.mean, data = Dimes.boot)
## ----dimes-boot05--------------------------------------------------------
t.test( ~ mass, data = Dimes) %>% confint()
## ----boot-norm01, seed = 1234--------------------------------------------
S <- lapply(1:6, function(x) {rnorm(36, 100, 12)})
Boots <-
bind_rows(
(do(3000) * c(boot.mean = mean(resample(S[[1]])))) %>%
mutate(sample = "1", sample.mean = mean(S[[1]])),
(do(3000) * c(boot.mean = mean(resample(S[[2]])))) %>%
mutate(sample = "2", sample.mean = mean(S[[2]])),
(do(3000) * c(boot.mean = mean(resample(S[[3]])))) %>%
mutate(sample = "3", sample.mean = mean(S[[3]])),
(do(3000) * c(boot.mean = mean(resample(S[[4]])))) %>%
mutate(sample = "4", sample.mean = mean(S[[4]])),
(do(3000) * c(boot.mean = mean(resample(S[[5]])))) %>%
mutate(sample = "5", sample.mean = mean(S[[5]])),
(do(3000) * c(boot.mean = mean(resample(S[[6]])))) %>%
mutate(sample = "6", sample.mean = mean(S[[6]]))
)
## ----boot-norm03, fig.keep = "none"--------------------------------------
gf_dhistogram( ~ boot.mean | sample, data = Boots, binwidth = 0.5) %>%
gf_dist("norm", mean = 100, sd = 12 / sqrt(36))
## ----boot-norm04, fig.keep = "none"--------------------------------------
gf_dhistogram( ~ boot.mean | sample, binwidth = 0.5,
data = Boots %>% filter(sample == 1)) %>%
gf_lims(x = c(88,112)) %>%
gf_vline(xintercept = mean(S[[1]])) %>%
gf_labs(x = "3000 bootstrap means from sample 1")
gf_dhistogram( ~ boot.mean | sample, binwidth = 0.5,
data = Boots %>% filter(sample == 2)) %>%
gf_lims(x = c(88,112)) %>%
gf_vline(xintercept = mean(S[[2]])) %>%
gf_labs(x = "3000 bootstrap means from sample 2")
## ----boot-norm03-fig, echo = FALSE, results = "hide", fig.keep = "last", opts.label = "fig1"----
gf_dhistogram( ~ boot.mean | sample, data = Boots, binwidth = 0.5) %>%
gf_dist("norm", mean = 100, sd = 12 / sqrt(36))
## ----boot-norm04-fig, echo = FALSE, results = "hide", fig.keep = "all"----
gf_dhistogram( ~ boot.mean | sample, binwidth = 0.5,
data = Boots %>% filter(sample == 1)) %>%
gf_lims(x = c(88,112)) %>%
gf_vline(xintercept = mean(S[[1]])) %>%
gf_labs(x = "3000 bootstrap means from sample 1")
gf_dhistogram( ~ boot.mean | sample, binwidth = 0.5,
data = Boots %>% filter(sample == 2)) %>%
gf_lims(x = c(88,112)) %>%
gf_vline(xintercept = mean(S[[2]])) %>%
gf_labs(x = "3000 bootstrap means from sample 2")
## ----boot-norm02---------------------------------------------------------
# means of samples
sample.means <- sapply(S, mean); sample.means
## ----boot-norm05, digits = 5---------------------------------------------
# means of bootstrap distributions
boot.means <- mean( ~ boot.mean | sample, data = Boots)
boot.means
# difference from sample means
boot.means - sample.means
## ----boot-norm06---------------------------------------------------------
# standard error
12 / sqrt(36)
# usual standard error estimates
sapply(S, sd) / sqrt(36)
# bootstrap standard errors
sd( ~ boot.mean | sample, data = Boots)
## ----dimes-bias----------------------------------------------------------
# estimate of bias
(mean( ~ boot.mean, data = Dimes.boot) - mean( ~ mass, data = Dimes)) /
sd( ~ boot.mean, data = Dimes.boot)
## ----boot-gamma01, seed = 1234-------------------------------------------
S <- lapply(1:6, function(x) {rgamma(16, rate = 1, shape = 2) })
Boot.Gamma <-
bind_rows(
(do(3000) * c(boot.mean = mean(resample(S[[1]])))) %>%
mutate(sample = "1", sample.mean = mean(S[[1]])),
(do(3000) * c(boot.mean = mean(resample(S[[2]])))) %>%
mutate(sample = "2", sample.mean = mean(S[[2]])),
(do(3000) * c(boot.mean = mean(resample(S[[3]])))) %>%
mutate(sample = "3", sample.mean = mean(S[[3]])),
(do(3000) * c(boot.mean = mean(resample(S[[4]])))) %>%
mutate(sample = "4", sample.mean = mean(S[[4]])),
(do(3000) * c(boot.mean = mean(resample(S[[5]])))) %>%
mutate(sample = "5", sample.mean = mean(S[[5]])),
(do(3000) * c(boot.mean = mean(resample(S[[6]])))) %>%
mutate(sample = "6", sample.mean = mean(S[[6]]))
)
## ----boot-gamma02--------------------------------------------------------
# means of samples
sapply(S, mean)
# means of the bootstrap distributions
mean( ~ boot.mean | sample, data = Boot.Gamma)
# standard deviations of samples
sapply(S, sd)
# standard error of each sample
sapply(S, sd) / sqrt(16)
# standard deviations of the bootstrap distributions
sd( ~ boot.mean | sample, data = Boot.Gamma)
sqrt(1/8)
# bias
(mean( ~ boot.mean | sample, data = Boot.Gamma) - sapply(S, mean)) /
sd( ~ boot.mean | sample, data = Boot.Gamma)
## ----boot-gamma03, fig.keep = "none"-------------------------------------
gf_dhistogram( ~ boot.mean | sample, data = Boot.Gamma,
binwidth = 0.1) %>%
gf_dist("gamma", rate = 16, shape = 32)
## ----boot-gamma04, fig.keep = "none", include= FALSE---------------------
gf_dhistogram( ~ boot.mean | sample, binwidth = 0.1,
data = Boot.Gamma %>% filter(sample == 1)) %>%
gf_vline(xintercept = mean(S[[1]]))
gf_dhistogram( ~ boot.mean | sample, binwidth = 0.1,
data = Boot.Gamma %>% filter(sample == 2)) %>%
gf_vline(xintercept = mean(S[[2]]))
## ----boot-gamma03-fig, echo = FALSE, results = "hide", fig.keep = "last", opts.label = "fig1"----
gf_dhistogram( ~ boot.mean | sample, data = Boot.Gamma,
binwidth = 0.1) %>%
gf_dist("gamma", rate = 16, shape = 32)
## ----lifetime01----------------------------------------------------------
life01 <- c(16, 34, 53, 75, 93, 120, 150, 191, 240, 339)
## ----lifetime02----------------------------------------------------------
Life.boot <- do(5000) * favstats(resample(life01))
cdata(~mean, data = Life.boot, p = 0.95)
# normalized bias
(mean( ~ mean, data = Life.boot) - x.bar) / sd( ~ mean, data = Life.boot)
## ----lifetime03----------------------------------------------------------
favstats(life01)
n <- length(life01); x.bar <- mean(life01); s <- sd(life01)
Life.boot <-
Life.boot %>%
mutate(t = (mean - x.bar)/(sd/sqrt(n)))
q <- cdata( ~ t, data = Life.boot, p = 0.95); q
x.bar - q[2:1] * s/sqrt(n)
## ----lifetime04, fig.keep = "none", opts.label = "fig1"------------------
gf_dhistogram( ~ life01, binwidth = 50)
gf_dhistogram( ~ t, data = Life.boot, bins = 50)
## ----lifetime04-fig, echo = FALSE, results = "hide"----------------------
gf_dhistogram( ~ life01, binwidth = 50)
gf_dhistogram( ~ t, data = Life.boot, bins = 50)
## ----gpigs01-------------------------------------------------------------
gpigs <-
c( 76, 93, 97, 107, 108, 113, 114, 119, 136, 137, 138, 139, 152,
154, 154, 160, 164, 164, 166, 168, 178, 179, 181, 181, 185, 194,
198, 212, 213, 216, 220, 225, 225, 244, 253, 256, 259, 265, 268,
268, 270, 283, 289, 291, 311, 315, 326, 361, 373, 376, 397, 398,
406, 459, 466, 592, 598)
## ----gpigs02, fig.keep = "none"------------------------------------------
gf_dhistogram( ~ gpigs, binwidth = 25)
## ----gpigs02-fig, echo = FALSE, opts.label = "fig1"----------------------
gf_dhistogram( ~ gpigs, binwidth = 25)
## ----gpigs03-------------------------------------------------------------
favstats(gpigs)
n <- length(gpigs); x.bar <- mean(gpigs); s <- sd(gpigs)
GP.boot <-
(do(5000) * favstats(resample(gpigs))) %>%
mutate(t = (mean - x.bar) / (sd / sqrt(n)))
# percentile interval
cdata( ~ mean, data = GP.boot)
# normalized bias
(mean( ~ mean, data = GP.boot) - x.bar) / sd( ~ mean, data = GP.boot)
# bootstrap-t interval
q <- quantile( ~ t, data = GP.boot, p = c(0.025, 0.975)); q
x.bar - q[2:1] * s/sqrt(n)
## ----boott-sims01--------------------------------------------------------
ci3 <- function(x, r = 1000, conf.level = 0.95) {
x.bar <- mean(x); n <- length(x)
sqrtn <- sqrt(n); SE <- sd(x) / sqrtn
Boot <- do(r) * {
rx <- resample(x)
mean.boot <- mean(rx)
se.boot <- sd(rx) / sqrtn
c(mean.boot = mean.boot,
t.boot = (mean.boot - x.bar) / se.boot
)
}
q <- cdata(~ t.boot, data = Boot, 0.95)
tint <- stats::t.test(x) %>% confint()
data.frame(
method = c("percentile", "bootstrap-t", "t"),
estimate = x.bar,
lo = c(
quantile(Boot$mean.boot, 0.025),
x.bar - q[2] * SE,
tint$lower),
hi = c(
quantile(Boot$mean.boot, 0.975),
x.bar
- q[1] * SE,
tint$upper)
)
}
# eample use
ci3(rgamma(20, 2, 1))
## ----boott-sims02, fig.keep = "none", seed = 12345-----------------------
Sims <-
(do(400) * ci3(rgamma(20, 1, 1/2))) %>%
mutate(
status = c("lo", "good", "hi")[1 + (2 <= lo) + (2 < hi)])
## ----boott-sims03, fig.keep = "none"-------------------------------------
df_stats(status ~ method, data = Sims, props)
## ----boott-sims03-fig, echo = FALSE, results = "hide", opts.label = "fig1"----
gf_linerange(lo + hi ~ .index, data = Sims,
color = ~ status, size = 0.5, alpha = 0.7) %>%
gf_point(estimate ~ .index, size = 0.5, alpha = 0.7) %>%
gf_facet_grid(method ~ .) %>%
gf_refine(scale_color_manual(
values = c(good = "gray70", hi = "red", lo = "navy"),
breaks = c("hi", "good", "lo")
)) %>%
gf_labs(x = "")
## ----bodytemp------------------------------------------------------------
Lock5withR::BodyTemp50$BodyTemp
## ----boot-boys01---------------------------------------------------------
# select the 10 year old boys
Boys10 <-
NHANES::NHANES %>%
filter(Age == 10, Gender == "male") %>%
select(Age, Gender, Height)
favstats( ~ Height, data = Boys10)
## ----boot-boys02---------------------------------------------------------
# create the bootstrap distribution
Boys.boot.sd <-
do(3000) * c(sd.boot = sd( ~ Height, data = resample(Boys10)))
# check for biased estimator
(mean( ~ sd.boot, data = Boys.boot.sd) - sd( ~ Height, data = Boys10)) /
sd( ~ sd.boot, data = Boys.boot.sd)
## ----boot-boys03---------------------------------------------------------
# create the bootstrap distribution
Boys.boot <-
do(3000) * c(var.boot = var( ~ Height, data = resample(Boys10)))
# check for biased estimator
(mean( ~ var.boot, data = Boys.boot) - var( ~ Height, data = Boys10)) /
sd( ~ var.boot, data = Boys.boot)
## ----boot-boys04---------------------------------------------------------
cdata( ~ var.boot, data = Boys.boot)
cdata( ~ var.boot, data = Boys.boot)[1:2] %>% sqrt()
## ----boot-boys05---------------------------------------------------------
cdata( ~ sd.boot, data = Boys.boot.sd)
## ----dimes, include = FALSE, tidy = FALSE--------------------------------
s <- sd(~ mass, data = Dimes)
n <- nrow(Dimes)
B <- 10200; B
D <- mean( ~ mass, data = Dimes); D
uB <- 100 / sqrt(12); uB
uD <- s / sqrt(n); uD
u <- sqrt( 1/D^2 * uB^2 + B^2/D^4 * uD^2 )
## ----propagation-unif01--------------------------------------------------
X <- runif(100000, 0, 1)
Y <- sqrt(X)
mean(Y)
var(Y)
## ----propagation-unif01-sol, warning = FALSE-----------------------------
integrate(makeFun( y * 2*y ~ y), 0, 1)
mu <- integrate(function(y) y * 2*y, 0, 1) %>% value()
fractions(mu)
integrate(function(y) (y-mu)^2 * 2*y, 0, 1)
integrate(function(y) (y-mu)^2 * 2*y, 0, 1) %>% value() %>% fractions()
## ----dimes-favstats------------------------------------------------------
favstats( ~ mass, data = Dimes)
## ----mean-dime-cl--------------------------------------------------------
pt(1, df = 29) - pt(-1, df = 29)
## ----dimes-sim, digits = 4-----------------------------------------------
B <- runif(10000, 10150, 10250)
Dimes.boot <- do(10000) * mean( ~ mass, data = resample(Dimes))
head(Dimes.boot, 3)
Dimes.boot <-
Dimes.boot %>% mutate(D = mean, N = B / D)
gf_dhistogram( ~ N, data = Dimes.boot)
gf_qq( ~ N, data = Dimes.boot)
sd( ~ N, data = Dimes.boot)
## ----resistors, include=FALSE--------------------------------------------
R <- 20*50/(20 + 50)
u1 <- 0.7; u2 <- 1.2
p1 <- (50/70)^2; p2 <- (20/70)^2
u <- sqrt( p1^2 * u1^2 + p2^2 *u2^2 )
r <- 1 + round(-log10(u))
## ----rm-pi-sol, include=FALSE--------------------------------------------
rm(pi)
## ----tank-sol, tidy=FALSE------------------------------------------------
L <- 2.65; W <- 3.10; H <- 4.61
uL <- 0.02; uW <- 0.02; uH <- 0.05
V <- L * W * H; V
uV <- sqrt( (uL/L)^2 + (uW/W)^2 + (uH/H)^2) * V; uV
## ----relative-uncertainty-sol, tidy=FALSE--------------------------------
V <- 1.637 / 0.43; V
uV <- sqrt( (0.02/.43)^2 + (0.006/1.637)^2 ) * V; uV
## ----Likelihood, child="Likelihood.Rnw", eval=includeChapter[5]----------
## ----lik-setup, include = FALSE, cache = FALSE---------------------------
knitr::opts_chunk$set(cache.path = "cache/Lik-")
require(maxLik)
## ----dice-likelihood-sol-------------------------------------------------
expand.grid(
x = 0:10, size = 10, sides = c(4, 6, 10)) %>%
mutate(
die = factor(paste("D", sides, sep = ""),
levels = c("D4", "D6", "D10")),
probability = dbinom(x, size, 2 / sides)) %>%
gf_point(probability ~ x, color = ~ die) %>%
gf_line(probability ~ x, color = ~ die)
## ----dice-10-sol---------------------------------------------------------
pbinom(2, 10, 1/5)
## ----dice-others-sol-----------------------------------------------------
pbinom(4, 10, 1/3) - pbinom(2, 10, 1/3)
1 - pbinom(4, 10, 1/2)
## ----binom-mle-fig, echo=FALSE-------------------------------------------
llik <- function (p, t = 14) {
14*log(p) + 26 * log(1-p)
}
xpts <- seq(0, 1, by = 0.005)
gf_line(llik(xpts) ~ xpts) %>%
gf_labs(x = expression(pi), y = "log-likelihood")
## ----mle-unif01-fig, echo = FALSE, results = "hide"----------------------
L <- function(theta) { (1 / theta)^6 * (theta >= 8.7) }
plotFun(L(theta) ~ theta, theta.lim = c(6, 15),
xlab = expression(theta), ylab = expression(L(theta)))
## ----zero-one-mom-mle----------------------------------------------------
x <-
c(0.90, 0.78, 0.93, 0.64, 0.45,
0.85, 0.75, 0.93, 0.98, 0.78)
mean(x)
mom <- (1 / (1 - mean(x))) - 2; mom
mle <- ( - length(x) / sum(log(x))) - 1; mle
## ----plant-density01-sol-------------------------------------------------
# plant densities (lambda) for simulations
density <- c(0.01, 0.1, 0.25, 0.5, 1, 2, 4, 10, 100)
sizes <- c(10, 20, 50)
simulate <-
function(lambda = 1, size = 10, area = 1,
method = c("count", "distance")){
method <- match.arg(method) # allows for prefixes
if (method == "count") {
x <- rpois(size, lambda*area)
plants <- sum(x)
total.area <- size * area
mle <- plants / total.area
} else {
y <- rweibull(size, shape = 2, scale = 1 / sqrt(pi * lambda))
plants <- length(y)
total.area <- pi * sum(y^2)
mle <- plants / total.area
}
tibble(
size = size, lambda = lambda, method = method,
estimate = mle, plants = plants, area = total.area,
lambdaFac = paste0("l=", lambda),
sizeFac = paste0("size=", size)
)
}
## ----plant-density02-sol, tidy=FALSE-------------------------------------
Results <- c()
for (lambda in density) {
for (size in sizes) {
Results <- bind_rows(Results,
do(1000) * simulate(lambda, size, method = "count"),
do(1000) * simulate(lambda, size, method = "distance")
)
}
}
Results <-
Results %>%
mutate(lambdaFac = reorder(lambdaFac, lambda))
## ----plant-density-bakeoff01-sol, tidy=FALSE-----------------------------
# have a bake-off:
SummaryResults <-
Results %>%
group_by(size, lambda, method) %>%
summarise(
bias = mean(estimate) - lambda[1],
biasR = mean(estimate) / lambda[1],
se = sd(estimate)
)
head(SummaryResults)
## ----plant-density-bakeoff02-sol, eval=FALSE-----------------------------
## log(estimate / lambda)
## ----plant-density-plot01-sol, fig.height=8, fig.width = 7, tidy=FALSE----
gf_jitter(
method ~ log(estimate / lambda) | lambdaFac ~ sizeFac,
data = Results, alpha = 0.1)
## ----plant-density-plot02-sol, fig.height=8, fig.width = 7, tidy=FALSE, warning=FALSE----
gf_violin(
log(estimate / lambda) ~ method, data = Results,
color = ~ method, fill = ~ method, alpha = 0.5, adjust = 2) %>%
gf_refine(annotate("hline", yintercept = 0), coord_flip()) %>%
gf_facet_grid(lambdaFac ~ sizeFac)
## ----batting-average, echo = FALSE, results = "asis"---------------------
set.seed(1234)
Batters2015 <-
Lahman::Batting %>%
filter(yearID == 2015) %>%
group_by(playerID) %>%
summarise(
G=sum(G, na.rm = TRUE),
AB = sum(AB, na.rm = TRUE),
H = sum(H, na.rm = TRUE)) %>%
filter(AB >= 200) %>%
mutate(BA = round(H/AB, 3))
SampleBatters <-
Batters2015 %>%
sample_n(16)
cat(paste(format(SampleBatters$BA[1:8], digits = 3), collapse = " & "))
cat("\\\\")
cat(paste(format(SampleBatters$BA[9:16], digits = 3), collapse = " & "))
## ----baseball-ba01, tidy = FALSE-----------------------------------------
ba <- c(0.276, 0.281, 0.225, 0.283, 0.257, 0.250, 0.250, 0.261,
0.312, 0.259, 0.273, 0.222, 0.314, 0.271, 0.294, 0.268)
## ----baseball-ba02, tidy = FALSE-----------------------------------------
# log likelihood function
loglik <- function(theta, x) {
if (any(theta <= 0))
NA # alert maxLik regarding parameter values that are not allowed
else
sum(dbeta(x, theta[1], theta[2], log = TRUE))
}
# alternative way to define the log-likelihood
loglik2 <- function(theta, x) {
if (any(theta <= 0))
NA # alert maxLik regarding parameter values that are not allowed
else
dbeta(x, theta[1], theta[2], log = TRUE)
}
## ----baseball-ba03-------------------------------------------------------
require(maxLik)
ml <- maxLik(loglik, start = c(shape1 = 1, shape2 = 1), x = ba)
ml2 <- maxLik(loglik2, start = c(shape1 = 1, shape2 = 1), x = ba)
ml
ml2
# get just the estimated parameter values
coef(ml)
# get just the "return message" -- always good to check
returnMessage(ml)
alpha.hat <- coef(ml)[1]
beta.hat <- coef(ml)[2]
## ----baseball-ba01-fig, echo=FALSE---------------------------------------
gf_dist("beta", shape1 = alpha.hat, shape2 = beta.hat)
## ----baseball-ba04-------------------------------------------------------
qbeta(c(0.05, 0.95), shape1 = alpha.hat, shape2 = beta.hat)
## ----baseball-ba05-------------------------------------------------------
# using the ruler method
qbeta( 0.5 / 334, alpha.hat, beta.hat)
qbeta(1 - 0.5 / 334, alpha.hat, beta.hat)
## ----baseball-ba-likelihood-fig, echo = FALSE, include = FALSE-----------
dat <- expand.grid(
alpha = seq(4, 210, by = 6),
beta = seq(10, 500, by = 15)
)
dat$loglik <- apply(cbind(dat$alpha, dat$beta), 1, FUN = "loglik", x = ba)
wireframe(
exp(loglik) ~ alpha * beta, dat,
col = "gray25",
par.settings = list(
box.3d = list(col = "transparent"),
axis.line = list(col = NA, lty = 1, lwd = 1)
),
shade = FALSE,
light.source = c(25, 50, 50),
aspect = c(1, 0.4),
screen = list(z = 20, x = -75),
xlab = list(label = expression(alpha), cex = 0.7),
ylab = list(label = expression(beta), cex = 0.7),
zlab = "",
scale = list(arrows = FALSE, cex = 0.5, z = list(draw = FALSE))
)
dat <- expand.grid(
alpha = seq(4, 325, by = 2),
beta = seq(10, 800, by = 4)
)
dat$loglik <- apply(cbind(dat$alpha, dat$beta), 1, FUN = "loglik", x = ba)
levelplot(loglik ~ alpha + beta, data = dat,
xlab = expression(alpha),
ylab = expression(beta),
main = "log-likelihood",
col.regions = topo.colors(n=100),
panel = function(x, y, z, ...){
panel.levelplot(x, y, z, ...)
panel.xyplot(x = alpha.hat, y = beta.hat, ...)
}
)
## ----ba-boot01-sol-------------------------------------------------------
loglik2 <- function(theta, x) {
if (any(theta <= 0))
NA # alert maxLik regarding parameter values that are not allowed
else
dbeta(x, theta[1], theta[2], log = TRUE)
}
BA.boot <-
do(2000) * coef(maxLik(loglik2, x = resample(ba),
start = c(shape1 = 100, shape2 = 100)))
## ----ba-boot02-sol, warning = FALSE--------------------------------------
# coefficients from original sample
cc <-
coef(maxLik(loglik2, x = ba, start = c(shape1 = 100, shape2 = 100)))
# normalized bias estimates
(mean( ~ shape1, data = BA.boot) - cc["shape1"]) /
sd( ~ shape1, data = BA.boot)
(mean( ~ shape2, data = BA.boot) - cc["shape2"]) /
sd( ~ shape2, data = BA.boot)
# 95% confidence intervals
cdata( ~ shape1, data = BA.boot)
cdata( ~ shape2, data = BA.boot)
# histograms of bootstrap distributions
gf_dhistogram( ~ shape1, data = BA.boot, binwidth = 20) %>%
gf_lims(x = c(0, 500))
gf_dhistogram( ~ shape2, data = BA.boot, binwidth = 50) %>%
gf_lims(x = c(0, 1000))
## ----ba-boot03-sol-------------------------------------------------------
# normalized bias estimate
(mean( ~ shape2 / shape1, data = BA.boot) -
cc["shape2"]/cc["shape1"]) /
sd( ~ (shape2 / shape1), data = BA.boot)
# 95% CI
cdata( ~ (shape2 / shape1), data = BA.boot)
# histogram of bootstrap distribution
gf_dhistogram(
~ (shape2 / shape1), data = BA.boot, v = cc["shape2"]/cc["shape1"])
## ----ba-boot04-sol-------------------------------------------------------
cdata( ~ (shape2 / shape1), data = BA.boot) /
(cc["shape2"]/cc["shape1"])
cdata( ~ shape1, data = BA.boot) / cc["shape1"]
cdata( ~ shape2, data = BA.boot) / cc["shape2"]
## ----ba-boot05-sol-------------------------------------------------------
alpha <- 0.5 / 334
BA.boot <- BA.boot %>%
mutate(
min = qbeta( alpha, shape1 = shape1, shape2 = shape2),
max = qbeta(1 - alpha, shape1 = shape1, shape2 = shape2))
min_est <-
qbeta( alpha, shape1 = cc["shape1"], shape2 = cc["shape2"])
max_est <-
qbeta(1 - alpha, shape1 = cc["shape1"], shape2 = cc["shape2"])
# normalized bias
(min_est - mean( ~ min, data = BA.boot)) / sd( ~ min, data = BA.boot)
(max_est - mean( ~ max, data = BA.boot)) / sd( ~ max, data = BA.boot)
gf_dhistogram( ~ min, data = BA.boot)
gf_dhistogram( ~ max, data = BA.boot)
cdata( ~ min, data = BA.boot, 0.95)
cdata( ~ max, data = BA.boot, 0.95)
## ----ba-boot06-sol-------------------------------------------------------
BA.boot2 <- do(2000) * favstats( resample(ba))
(mean( ~ mean, data = BA.boot2) - mean(ba)) / sd( ~ mean, data = BA.boot2)
confint(BA.boot2, method = "percentile", parm = "mean")
confint(BA.boot2, method = "bootstrap-t")
## ----normal-loglik-------------------------------------------------------
loglik.normal <- function(theta, x) {
mu <- theta[1]; sigma <- theta[2]
if (sigma < 0) return(NA) # alert maxLik() to invalid values of sigma
dnorm(x, mu, sigma, log = TRUE)
}
## ----normal-mle01--------------------------------------------------------
x <- rnorm(30, 100, 10)
maxLik(loglik.normal, start = c(mu = 0, sigma = 1), x = x)
## ----normal-mle02--------------------------------------------------------
MLEs <-
do(5000) * coef(maxLik(loglik.normal,
start = c(mu = 0, sigma = 1),
x = rnorm(30, 100, 10)))
head(MLEs, 3)
gf_dhistogram( ~ mu, data = MLEs, binwidth = 0.5) %>%
gf_labs(x = expression(hat(mu)))
gf_dhistogram( ~ sigma, data = MLEs, binwidth = 0.5) %>%
gf_labs(x = expression(hat(sigma)))
gf_qq( ~ mu, data = MLEs, geom = "line") %>%
gf_labs(y = expression(hat(mu)))
gf_qq( ~ sigma, data = MLEs, geom = "line") %>%
gf_labs(y = expression(hat(sigma)))
## ----normal-mle03--------------------------------------------------------
gf_dhistogram( ~ sigma^2, data = MLEs, binwidth = 5)
gf_qq( ~ sigma^2, data = MLEs, geom = "line",
distribution = qchisq, dparams = list(df = 29)) %>%
gf_labs(x = "Chisq(29)", y = expression(hat(sigma)^2))
## ----faithful-mle01, fig.show="hide", tidy=FALSE-------------------------
# density function for mixture of normals
dmix <- function(x, alpha, mu1, mu2, sigma1, sigma2) {
if (alpha < 0 || alpha > 1) return (NA)
if (sigma1 < 0 || sigma2 < 0) return (NA)
alpha * dnorm(x, mu1, sigma1) + (1-alpha) * dnorm(x, mu2, sigma2)
}
## ----faithful-mle02------------------------------------------------------
# log-likelihood
loglik.faithful <- function(theta, x) {
alpha <- theta[1]
mu1 <- theta[2]; mu2 <- theta[3]
sigma1 <- theta[4]; sigma2 <- theta[5]
sum(log(dmix(x, alpha, mu1, mu2, sigma1, sigma2)))
}
## ----faithful-mle03, fig.show="hide", tidy=FALSE-------------------------
# seed the algorithm
data(geyser, package = "MASS")
m <- mean( ~ duration, data = geyser)
s <- sd( ~ duration, data = geyser)
ml.faithful <-
maxLik(loglik.faithful, x = geyser$duration,
start = c(alpha = 0.5, mu1 = m - 1, mu2 = m + 1, sigma1 = s, sigma2 = s))
returnMessage(ml.faithful)
mle <- coef(ml.faithful); mle
gf_dhistogram( ~ duration, data = geyser, binwidth = 0.20, alpha = 0.5) %>%
gf_function(fun = dmix,
args = list(
alpha = mle[1],
mu1 = mle[2], mu2 = mle[3],
sigma1 = mle[4], sigma2 = mle[5])
)
## ----faithful-mle03-fig, echo = FALSE, results = "hide"------------------
# seed the algorithm
data(geyser, package = "MASS")
m <- mean( ~ duration, data = geyser)
s <- sd( ~ duration, data = geyser)
ml.faithful <-
maxLik(loglik.faithful, x = geyser$duration,
start = c(alpha = 0.5, mu1 = m - 1, mu2 = m + 1, sigma1 = s, sigma2 = s))
returnMessage(ml.faithful)
mle <- coef(ml.faithful); mle
gf_dhistogram( ~ duration, data = geyser, binwidth = 0.20, alpha = 0.5) %>%
gf_function(fun = dmix,
args = list(
alpha = mle[1],
mu1 = mle[2], mu2 = mle[3],
sigma1 = mle[4], sigma2 = mle[5])
)
## ----faithful-mle04, tidy = FALSE----------------------------------------
# seed the algorithm
m <- mean( ~ duration, data = geyser)
s <- sd( ~ duration, data = geyser)
# Newton-Raphson (NR) compares well to the results above
maxLik(loglik.faithful, x = geyser$duration,
start = c(alpha = 0.5, mu1 = m, mu2 = m, sigma1 = s, sigma2 = s))
# Nelder-Mead doesn't converge (fast enough)
maxLik(loglik.faithful, x = geyser$duration, method = "NM",
start = c(alpha = 0.5, mu1 = m, mu2 = m, sigma1 = s, sigma2 = s))
# Nelder-Mead converges if we give it more time
maxLik(loglik.faithful, x = geyser$duration, method = "NM",
start = c(alpha = 0.5, mu1 = m, mu2 = m, sigma1 = s, sigma2 = s),
control = list(iterlim = 3000))
# BFGS "converges", but only fits one group
maxLik(loglik.faithful, x = geyser$duration, method = "BFGS",
start = c(alpha = 0.5, mu1 = m, mu2 = m, sigma1 = s, sigma2 = s))
## ----grid01, digits = 5--------------------------------------------------
x <- c(26.65, 28.03, 35.55, 29.30, 29.54, 36.20, 30.94,
23.69, 26.12, 27.13, 34.14, 30.51, 30.68, 29.46,
26.67, 36.51, 31.09, 20.74, 31.95, 27.01)
# Note the use of mapply() below.
# This tells R to use ALL the values of x with EACH combination of m and s.
Grid <-
expand.grid(
mean = seq(10, 100, by = 0.1),
sd = seq(1, 10, by = 0.1)) %>%
mutate(
loglik = mapply(function(m, s) { sum(dnorm(x, m, s, log = TRUE)) },
m = mean, s = sd))
Grid %>% arrange(-loglik) %>% head(3)
## ----grid02, digits = 5--------------------------------------------------
Grid2 <-
expand.grid( mean = seq(29.4, 29.8, by = 0.001),
sd = seq(3.8, 4.2, by = 0.001)) %>%
mutate(loglik = mapply(function(m, s) { sum(dnorm(x, m, s, log = TRUE)) },
m = mean, s = sd))
Grid2 %>% arrange(-loglik) %>% head(3)
## ----grid03, digits = 5--------------------------------------------------
mean( ~ x)
sd (~ x) * sqrt(19/20)
## ----grid04--------------------------------------------------------------
# number of function evaluations used in this example
nrow(Grid) + nrow(Grid2)
## ----multinom-mle--------------------------------------------------------
loglik.multinom <- function(theta, x) {
probs <- c(theta, 1 - sum(theta))
if (any (probs < 0)) return(NA)
dmultinom(x, size = 100, prob = probs, log = TRUE)
}
maxLik(loglik.multinom, start = rep(0.25, 3), x = c(10, 20, 30, 40)) -> ml;
coef(ml)
## ----unif-mle-sol01, warning=FALSE---------------------------------------
x <- c(1.6, 2.8, 6.2, 8.2, 8.7)
loglik.unif <- function(theta, x) {
res <- sum (dunif(x, 0, theta, log = TRUE))
ifelse(is.finite(res), res, NA)
}
lik.unif <- function(theta, x) {
res <- prod (dunif(x, 0, theta, log = FALSE))
ifelse(is.finite(res), res, NA)
}
## ----unif-mle-sol02, warning=FALSE---------------------------------------
# works if we select a good starting point --
# but warns about boundary issues.
maxLik(loglik.unif, start = 10, x = x)
maxLik(loglik.unif, start = 10, x = x, method = "NM")
maxLik(loglik.unif, start = 10, x = x, method = "BFGS")
# but some starting points don't work well...
maxLik(loglik.unif, start = 8, x = x)
maxLik(loglik.unif, start = 8, x = x, method = "NM")
maxLik(loglik.unif, start = 8, x = x, method = "BFGS")
## ----unif-mle-sol03, warning = FALSE-------------------------------------
# a graph of the likelihood function shows why
theta <- seq(6, 12, by = 0.002)
y1 <- sapply(theta, function(theta) { lik.unif(theta, x)} )
y2 <- sapply(theta, function(theta) { loglik.unif(theta, x)} )
gf_point(y1 ~ theta, size = 0.05) %>%
gf_labs(x = expression(theta),
y = "likelihood")
gf_point(y2 ~ theta, size = 0.05) %>%
gf_labs(x = expression(theta),
y = "log-likelihood")
## ----hwe-mle-sol---------------------------------------------------------
theta2probs <- function(theta) {
c(theta^2, 2*theta*(1-theta), (1-theta)^2)
}
loglik.hwe <- function(theta, x) {
probs <- theta2probs(theta)
if (any(probs < 0 )) { return(NA) }
dmultinom(x, sum(x), theta2probs(theta), log = TRUE)
}
geno <- c(83, 447, 470)
maxLik(loglik.hwe, start = 0.5, x = geno)
## ----mix-normals03-sol---------------------------------------------------
0.3 * pnorm(12, 8, 2) + 0.7 * pnorm(12, 16, 3)
## ----mix-normals04-sol---------------------------------------------------
Plot_data <-
tibble(
x = seq(0, 30, by = 0.10),
density = 0.3 * dnorm(x, 8, 2) + 0.7 * dnorm(x, 16, 3),
density1 = 0.3 * dnorm(x, 8, 2),
density2 = 0.7 * dnorm(x, 16, 3)
)
gf_line(density ~ x, data = Plot_data, size = 1.5, alpha = 0.5) %>%
gf_line(density1 ~ x, data = Plot_data, color = "red") %>%
gf_line(density2 ~ x, data = Plot_data, color = "blue") %>%
gf_labs(title = "pdf of a mixture of normals")
## ----pois-lrt01, tidy = FALSE--------------------------------------------
x <- c(1, 1, 0, 4, 2, 1, 3, 0, 0, 2); tally(x)
mean(x)
lrtStat <- function(x, lambda0 = 1) {
x.bar <- mean(x); n <- length(x)
2 * ( - n * x.bar + n * x.bar * log(x.bar) +
n * lambda0 - n * x.bar * log(lambda0))
}
lrtStat(x)
pval <- 1 - pchisq(lrtStat(x), df = 1); pval
## ----pois-lrt02, fig.keep = "none"---------------------------------------
# We can express l() in terms of sufficient statistics
loglik.pois <- function(theta, x.bar = 1.4, n = 10) {
- n * theta + n * x.bar * log(theta)
}
ml.pois10 <-
maxLik2(loglik.pois, start = c(lambda = 1), x.bar = 1.4, n = 10)
plot(ml.pois10) %>% gf_labs(title = "n = 10")
## ----pois-lrt03----------------------------------------------------------
-hessian(ml.pois10) # I = - hessian
stdEr(ml.pois10) # I^(-1/2)(theta.hat)
(-hessian(ml.pois10))^(-1/2)
## ----pois-lrt04, fig.keep = "none"---------------------------------------
ml.pois100 <-
maxLik2(loglik.pois, start = c(lambda = 1), x.bar = 1.4, n = 100)
plot(ml.pois100) %>% gf_labs(title = "n = 100")
## ----pois-lrt04-fig, echo = FALSE----------------------------------------
# We can express l() in terms of sufficient statistics
loglik.pois <- function(theta, x.bar = 1.4, n = 10) {
- n * theta + n * x.bar * log(theta)
}
ml.pois10 <-
maxLik2(loglik.pois, start = c(lambda = 1), x.bar = 1.4, n = 10)
plot(ml.pois10) %>% gf_labs(title = "n = 10")
ml.pois100 <-
maxLik2(loglik.pois, start = c(lambda = 1), x.bar = 1.4, n = 100)
plot(ml.pois100) %>% gf_labs(title = "n = 100")
## ----pois-lrt05----------------------------------------------------------
-hessian(ml.pois100) # information
stdEr(ml.pois100)
## ----pois-mle, seed = 123------------------------------------------------
# generate 5000 samples of size 10
rdata <- do(5000) * rpois(10, 1)
statTally(x, rdata, lrtStat)
## ----pois-wald-----------------------------------------------------------
SE <- stdEr(ml.pois10); SE
z.star <- qnorm(0.975); z.star
1.4 + c(-1, 1) * z.star * SE
## ----pois-ci, tidy = FALSE-----------------------------------------------
# loglik.pois defined above
p <- function(t0) {
lrt.stat <- 2 * (loglik.pois(coef(ml.pois10)) - loglik.pois(t0))
1 - pchisq(lrt.stat, df = 1) # p-value
}
lo <- uniroot(function(t){p(t) - 0.05}, c(0, coef(ml.pois10))) %>% value()
hi <- uniroot(function(t){p(t) - 0.05}, c(10, coef(ml.pois10))) %>% value()
# confidence interval
c(lo, hi)
## ----pois-ci-plot, tidy = FALSE, fig.keep = "none"-----------------------
plot(ml.pois10, ci = c("wald", "li"), hline = TRUE)
## ----pois-ci-plot-fig, tidy = FALSE, echo = FALSE, cache = FALSE---------
plot(ml.pois10, ci = c("wald", "li"), hline = TRUE)
## ----wald-ci01, include = FALSE------------------------------------------
x <- 35; n <- 55
pi.hat <- x / n; pi.hat
SE <- sqrt(pi.hat * (1 - pi.hat) / n); SE
pi.hat + c(-1, 1) * qnorm(0.975) * SE
## ----binom-wald-ci-------------------------------------------------------
x <- 35; n <- 55
pi.hat <- x / n; pi.hat
SE <- sqrt(pi.hat * (1 - pi.hat) / n); SE
pi.hat + c(-1, 1) * qnorm(0.975) * SE
## ----binom-lci, tidy = FALSE---------------------------------------------
loglik.binom <- function(p, x, n) {
ifelse (p < 0 | p > 1, NA, x * log(p) + (n-x) * log(1 - p))
}
pval_minus_critical <- function(pi0) {
2 * (loglik.binom(pi.hat, x, n) - loglik.binom(pi0, x, n)) -
qchisq(.95, df = 1)}
lo <- uniroot( pval_minus_critical, c(0, pi.hat)) %>% value()
hi <- uniroot( pval_minus_critical, c(pi.hat, 1)) %>% value()
c(lo, hi)
## ----binom-ci-compare-fig, echo = FALSE, warning = FALSE, cache = FALSE----
ml.binom <- maxLik2(loglik.binom, x = 35, n = 55, start = 0.5)
plot(ml.binom, ci = c("w", "l"), hline = TRUE) %>%
gf_labs(x = expression(pi))
## ----binom-odds-ci, tidy = FALSE-----------------------------------------
loglik.binom2 <- function(theta, x, n) {
x * log(theta / (1 + theta)) + (n - x) * log(1 / (1 + theta))
}
ml.binom2 <- maxLik2(loglik.binom2, start = (odds = 1), x = 35, n = 55)
coef(ml.binom2)
x <- 35; n <- 55; theta.hat <- 35 / 20; theta.hat
pval_minus_critical2 <- function(theta0) {
2 * (loglik.binom2(theta.hat, x, n) - loglik.binom2(theta0, x, n)) -
qchisq(.95, df = 1)
}
lo2 <-
uniroot(pval_minus_critical2, c(0, theta.hat)) %>% value()
hi2 <-
uniroot(pval_minus_critical2, c(theta.hat, 100)) %>% value()
c(lo2, hi2)
c(lo2, hi2) / (1 + c(lo2, hi2))
c(lo, hi) # interval computed previously, for comparison
## ----binom-odds-ci-fig, echo = FALSE, warning = FALSE--------------------
plot(ml.binom2, hline = TRUE) %>%
gf_lims(y = c(-45, -35)) %>%
gf_labs(x = "log odds")
## ----logodds01-sol-------------------------------------------------------
loglik.binom3 <- function(logodds, x, n) {
odds <- exp(logodds)
p <- odds / (1 + odds)
x * log(p) + (n -x) * log(1 - p)
}
ml.binom3 <-
maxLik2(loglik.binom3, x = 35, n = 55, start = c(logodds = 0))
logodds.hat <- coef(ml.binom3); logodds.hat
# W = 2 * difference in log likelihoods
W <- function(logodds) {
2 * (loglik.binom3(logodds.hat, x = 35, n = 55) -
loglik.binom3(logodds, x = 35, n = 55))
}
# W = 2 * difference in log likelihoods
pv <- function(logodds) {
1 - pchisq(2 * (loglik.binom3(logodds.hat, x = 35, n = 55) -
loglik.binom3(logodds, x = 35, n = 55)), df = 1)
}
# find endpoints of rejection region using W
lo <- uniroot(function(logodds) W(logodds) - qchisq(0.95, df = 1),
c(-100, logodds.hat)) %>% value()
hi <- uniroot(function(logodds) W(logodds) - qchisq(0.95, df = 1),
c(10, logodds.hat)) %>% value()
# find endpoints of rejection region using pval
lo2 <- uniroot(function(logodds) pv(logodds) - 0.05,
c(-100, logodds.hat)) %>% value()
hi2 <- uniroot(function(logodds) pv(logodds) - 0.05,
c(10, logodds.hat)) %>% value()
# Likelihood interval for log odds
c(lo, hi)
c(lo2, hi2)
# Wald interval for log odds
logodds.hat + c(-1, 1) * qnorm(0.975) * stdEr(ml.binom3)
## ----logodds02-sol, tidy = FALSE, warning=FALSE--------------------------
plot(ml.binom, hline = TRUE) %>%
gf_lims(y = c(-42, -35.5)) %>%
gf_labs(title = "parameter: proportion")
plot(ml.binom2, hline = TRUE) %>%
gf_lims(y = c(-42, -35.5)) %>%
gf_labs(title = "parameter: odds")
plot(ml.binom3, hline = TRUE) %>%
gf_lims(y = c(-42, -35.5)) %>%
gf_labs(title = "parameter: log odds")
## ----logodds03-sol-------------------------------------------------------
pi.hat <- coef(ml.binom); pi.hat
odds.hat <- pi.hat / (1 - pi.hat); odds.hat
coef(ml.binom2)
log(odds.hat)
coef(ml.binom3)
## ------------------------------------------------------------------------
l <-
function(lambda, x.bar = 1.4, n = 10)
-n * lambda + log(lambda) * n * x.bar
l.star <-
function(theta, x.bar = 1.4, n = 10)
-n * 1/theta + log(1/theta) * n * x.bar
ml <- maxLik(l, start = c(lambda = 1))
ml.star <- maxLik(l.star, start = c(theta = 1))
coef(ml)
1/coef(ml)
coef(ml.star)
uniroot(
function(lambda0) 2 * (l(coef(ml)) - l(lambda0)) - qchisq(.95, 1),
c(0, 1.4)) %>% value()
1/ uniroot(
function(lambda0)
2 * (l(coef(ml)) - l(lambda0)) - qchisq(.95, 1),
c(0, 1.4)) %>% value()
uniroot(
function(theta0)
2 * (l.star(coef(ml.star)) - l.star(theta0)) - qchisq(.95, 1),
c(1/1.4, 10)) %>% value()
## ----faithful-lrt01------------------------------------------------------
data(geyser, package = "MASS")
snippet("faithful-mle01", echo = FALSE)
snippet("faithful-mle02", echo = FALSE)
loglik0.faithful <- function(theta, x) {
theta <- c(0.5, theta)
return(loglik.faithful(theta, x))
}
## ----faithful-lrt02, tidy = FALSE----------------------------------------
# seed the algorithm
m <- mean( ~ duration, data = geyser)
s <- sd( ~ duration, data = geyser)
ml <- maxLik(loglik.faithful, x = geyser$duration,
start = c(0.5, m - 1, m + 1, s, s))
mle <- coef(ml); mle
loglik.faithful(mle, x = geyser$duration)
logLik(ml) # makLik::logLik can caclulate this log-likelihood for us
ml0 <- maxLik(loglik0.faithful, x = geyser$duration,
start = c(m - 1, m + 1, s, s))
mle0 <- coef(ml0); mle0
logLik(ml0)
lrt.stat <- 2 * (logLik(ml) - logLik(ml0)); lrt.stat
1 - pchisq(lrt.stat, df = 1) # p-value based on asymptotic distribution
## ----faithful-lrt03, echo = FALSE----------------------------------------
gf_dhistogram( ~ duration, data = geyser, binwidth = 0.20, alpha = 0.2,
fill = "navy") %>%
gf_function(fun = dmix,
args = list(
alpha = mle[1],
mu1 = mle[2], mu2 = mle[3],
sigma1 = mle[4], sigma2 = mle[5]),
color = "gray30"
) %>%
gf_function(fun = dmix,
args = list(
alpha = 0.5,
mu1 = mle0[1], mu2 = mle0[2],
sigma1 = mle0[3], sigma2 = mle0[4]),
linetype = "dashed"
)
## ----faithful-lrt04, tidy = FALSE----------------------------------------
ml0a <- maxLik(loglik.faithful, x = geyser$duration,
start = c(0.5, m - 1, m + 1, s, s),
fixed = 1) # first parameter is fixed at start value
coef(ml0a)
logLik(ml0a)
## ----laplace-------------------------------------------------------------
dlaplace <- function(x, theta, lambda) {
0.5 * lambda * exp(-lambda * abs(x-theta))
}
# two ways to do plaplace:
integrate(function(x) {dlaplace(x, 1, 2)}, -Inf, Inf) # should = 1
plaplace1 <- function(q, theta = 0, lambda = 1) {
integrate(function(x) {dlaplace(x, theta, lambda)}, -Inf, q)$value
}
plaplace2 <- function(q, theta = 0, lambda = 1) {
if (q < theta) return(0.5 * (1-pexp(theta-q, lambda)))
return(0.5 + 0.5 * pexp(q-theta, lambda))
}
# should get same results either way:
plaplace1(3, lambda = 1, theta = 2)
plaplace1(3, lambda = 1, theta = 2) - plaplace1(0, lambda = 2, theta = 1)
plaplace2(3, lambda = 1, theta = 2)
plaplace2(3, lambda = 1, theta = 2) - plaplace2(0, lambda = 2, theta = 1)
## ----laplace-mle01-sol---------------------------------------------------
x <-
c(1.00, -1.43, 0.62, 0.87, -0.66, -0.59, 1.30, -1.23, -1.53, -1.94)
loglik.laplace <- function(theta, x) {
m <- theta[1]; lambda <- theta[2]
return(sum(log(0.5) + dexp(abs(x-m), rate = lambda, log = TRUE)))
}
## ----laplace-mle02-sol---------------------------------------------------
ml.laplace <- maxLik(loglik.laplace, start = c(0, 1), x = x)
ml.laplace
## ----laplace-mle03-sol---------------------------------------------------
loglik.laplace2 <- function(theta, x) {
m <- theta[1]; lambda <- theta[2]
return(sum(log(dlaplace(x, m, lambda))))
}
ml.laplace2 <- maxLik(loglik.laplace2, start = c(0, 1), x = x)
ml.laplace2
## ----laplace-moments-sol-------------------------------------------------
# method of moments estimates
# estimate for theta is the sample mean since E(X) == est.theta:
est.theta = mean(x); est.theta
# estimate for variance satisfies v == 2 / nest.lambda^2:
n <- length(x)
v <- var(x) * (n-1) / n
est.lambda <- sqrt(2 / v); est.lambda
## ----laplace-lrt01-------------------------------------------------------
# enter data
x <-
c(1.00, -1.43, 0.62, 0.87, -0.66, -0.59, 1.30, -1.23, -1.53, -1.94)
loglik.laplace1 <- function(theta, x) {
m <- theta[1]; lambda <- theta[2]
return(sum(log(0.5) + dexp(abs(x - m), rate = lambda, log = TRUE)))
}
loglik.laplace0 <- function(theta, x) {
m <- 0; lambda <- theta[1]
return(sum(log(0.5) + dexp(abs(x - m), rate = lambda, log = TRUE)))
}
## ----laplace-lrt02-------------------------------------------------------
free <-
maxLik(loglik.laplace1, start = c(m = 0, lambda = 1), x = x); free
free.est <- coef(free)
## ----laplace-lrt03-------------------------------------------------------
null <-
maxLik(loglik.laplace0, start = c(lambda = 1), x = x); null
null.est <- coef(null)
## ----laplace-lrt04-------------------------------------------------------
w <-
2 * (loglik.laplace1(free.est, x) - loglik.laplace0(null.est, x))
w
1 - pchisq(w, df = 1) # p-value based on asymptotic distribution
## ----less-lazy00, include = FALSE----------------------------------------
theta <- 1.8
set.seed(123)
rfoo <- function(n, theta) {runif(n)^(1 / (theta + 1))}
pfoo <- function(q, theta) {
q^(theta + 1)
}
qfoo <- function(p, theta) {
p^(1 / (theta + 1))
}
dfoo <- function(x, theta, log = FALSE) {
if (log) {
log(theta + 1) + theta * log(x)
} else {
(theta + 1) * x^theta
}
}
x <- round(rfoo(30, theta), 2); x
gf_dhistogram(~ x, binwidth = 0.1) %>%
gf_dist("foo", theta = 1.8)
## ----less-lazy-sol-------------------------------------------------------
x <-
c(0.64, 0.92, 0.73, 0.96, 0.98, 0.33, 0.80, 0.96, 0.81, 0.76,
0.98, 0.75, 0.87, 0.82, 0.44, 0.96, 0.61, 0.32, 0.67, 0.98,
0.96, 0.88, 0.85, 1.00, 0.86, 0.88, 0.80, 0.83, 0.64, 0.50)
n <- length(x); n
theta.hat <- -n / sum(log(x)) - 1; theta.hat
W_obs <- 2 * (n * log(theta.hat + 1) + theta.hat * sum(log(x))); W_obs
1 - pchisq(W_obs, df = 1)
## ------------------------------------------------------------------------
one_sim <- function(n = 30L) {
x <- runif(n)
theta.hat <- -n / sum(log(x)) - 1; theta.hat
W <- 2 * (n * log(theta.hat + 1) + theta.hat * sum(log(x)))
tibble(n = n, theta.hat = theta.hat, W = W, pval = 1 - pchisq(W, df = 1))
}
Sims <- do(5000) * one_sim()
head(Sims)
# estimated p-value
# more replications needed to approximate better
# but this crude estimate is at least consistent with the result above
prop1( ~(W >= W_obs), data = Sims)
binom.test( ~(W >= W_obs), data = Sims)
## ------------------------------------------------------------------------
gf_dhistogram(~ W, data = Sims, bins = 100) %>%
gf_dist("chisq", df = 1, color = "red") %>%
gf_lims(y = c(0,2))
gf_qq( ~ W, data = Sims, distribution = "qchisq", dparams = list(df = 1))
df_stats( ~ (W >= W_obs), data = Sims, props)
## ----faithful-ci, tidy = FALSE, warning=FALSE----------------------------
# loglik defined above
data(geyser, package = "MASS")
snippet("faithful-mle01", echo = FALSE)
snippet("faithful-mle02", echo = FALSE)
m <- mean( ~ duration, data = geyser)
s <- sd( ~ duration, data = geyser)
ml.faithful <- maxLik(loglik.faithful, x = geyser$duration,
start = c(0.5, m - 1, m + 1, s, s))
mle <- coef(ml.faithful)
p <- function(a) {
ml.faithful.a <- maxLik(loglik.faithful, x = geyser$duration,
start = c(a, m - 1, m + 1, s, s),
fixed = 1)
lrt.stat <- 2 * (logLik(ml.faithful) - logLik(ml.faithful.a))
pval <- 1 - pchisq(lrt.stat, df = 1)
return(pval)
}
lo <- uniroot(function(a){p(a) - 0.05}, c(0.1, mle[1])) %>% value(); lo
hi <- uniroot(function(a){p(a) - 0.05}, c(0.9, mle[1])) %>% value(); hi
## ----golfballs-max, eval = TRUE, tidy = FALSE, fig.show = "hide"---------
golfballs <- c(137, 138, 107, 104)
statTally(golfballs, rgolfballs, max,
xlab = "test statistic (max)")
## ----golfballs-max-fig, echo = FALSE, message = FALSE, results = "hide"----
golfballs <- c(137, 138, 107, 104)
statTally(golfballs, rgolfballs, max,
xlab = "test statistic (max)")
## ----golfballs-lrt1, digits = 4------------------------------------------
# LRT calculation
o <- golfballs; o
e <- rep(486 / 4, 4); e
G <- 2 * sum (o * log(o / e)); G # lrt Goodness of fit statistic
1 - pchisq(G, df = 3)
## ----golfballs-lrt2------------------------------------------------------
# function to compute G statistic from tabulated data
G <- function(o) {e <- rep(486 / 4, 4); 2 * sum (o * log(o / e))}
statTally(golfballs, rgolfballs, G)
## ----golfballs-pearson01-------------------------------------------------
E <- rep(486 / 4, 4)
chisqstat <- function(x) { sum((x - E)^2 / E) }
statTally(golfballs, rgolfballs, chisqstat, xlab = expression(X^2))
## ----golfballs-pearson02, digits = 4-------------------------------------
# manual calculation
o <- golfballs; o
e <- rep(486 / 4, 4); e
X <- sum ((o - e)^2 / e); X
1 - pchisq(X, df = 3)
# repeated using built-in method
chisq.test(o)
## ----golfballs-pearson03-------------------------------------------------
chisq.test(golfballs, simulate.p.value = TRUE, B = 10000)
## ----golfballs-complex01-------------------------------------------------
o
a <- sum(o[1:2]) / (2 * sum(o)); a
b <- sum(o[3:4]) / (2 * sum(o)); b
a + b # should equal 0.5
lnum <- 275 * log(a) + 211 * log(b)
ldenom <- sum(o * log (o/ sum(o)))
G <- -2 * (lnum - ldenom); G
1 - pchisq(G, df = 2)
## ----golfballs-complex02-------------------------------------------------
e <- c(a, a, b, b) * 486; e
2 * sum(o * log(o/e)) # lrt, same as above
sum( (o - e)^2 / e) # Pearson
## ----fit-bugs-pois01-----------------------------------------------------
o <- c(2, 10, 16, 11, 5, 3, 3)
o.collapsed <- c(2 + 10, 16, 11, 5, 3 + 3)
n <- sum(o)
m <- sum(o * 0:6) / n # mean count = MLE for lambda (full data)
p <- dpois(0:6, m)
p.collapsed <- c(p[1] + p[2], p[3:5], 1 - sum(p[1:5])) # collapsed probs
e.collapsed <- p.collapsed * n
cbind(o.collapsed, p.collapsed, e.collapsed)
lrt <- 2 * sum(o.collapsed * log(o.collapsed / e.collapsed)); lrt
pearson <- sum((o.collapsed - e.collapsed)^2 / e.collapsed); pearson
1-pchisq(lrt, df = 3)
1-pchisq(pearson, df = 3)
## ----fit-bugs-pois02-----------------------------------------------------
1-pchisq(pearson, df = 5-1)
1-pchisq(pearson, df = 5-1-1)
## ----fit-exp-------------------------------------------------------------
Edata <- c(18.0, 6.3, 7.5, 8.1, 3.1, 0.8, 2.4, 3.5, 9.5, 39.7,
3.4, 14.6, 5.1, 6.8, 2.6, 8.0, 8.5, 3.7, 21.2, 3.1,
10.2, 8.3, 6.4, 3.0, 5.7, 5.6, 7.4, 3.9, 9.1, 4.0)
n <- length(Edata)
theta.hat <- 1 / mean(Edata); theta.hat
cutpts <- c(0, 2.5, 6, 12, Inf)
bin.Edata <- cut(Edata, cutpts)
p <- diff(pexp(cutpts, theta.hat))
e <- n * p
o <- tally(bin.Edata)
print(cbind(o, e))
lrt <- 2 * sum(o * log(o / e)); lrt
pearson <- sum((o - e)^2 / e); pearson
1-pchisq(lrt, df = 2) # df = (4 - 1) - 1 [anti-conservative]
1-pchisq(pearson, df = 2)
1-pchisq(lrt, df = 3) # df = 4 - 1 [conservative]
1-pchisq(pearson, df = 3)
## ----GOF-sol01, tidy=FALSE-----------------------------------------------
GOF <-
function(
x,
lik = function(theta, x) {
return(sum(dnorm(x, mean = theta[1], sd = theta[2], log = TRUE)))
} ,
pdist = function(x, theta) {
return(pnorm(x, mean = theta[1], sd = theta[2]) )
} ,
start = c(0, 1), cutpts = quantile(x),
paramNames = paste("parameter", 1:length(start)),
pearson = FALSE, ...)
{
ml <- maxLik(lik, start = start, x = x, ...)
mle <- coef(ml)
names(mle) <- paramNames
prob <- diff(pdist(cutpts, mle))
n <- length(x)
o <- tally(cut(x, cutpts))
e <- prob * n
pearsonStat <- sum((o - e)^2 / e)
lrtStat <- 2 * sum(o * log(o / e))
df = length(cutpts) - 2 - 2
if (pearson) {
pval <- 1- pchisq(pearsonStat, df = df)
method= "Pearson Goodness of Fit Test"
stat = pearsonStat
} else {
pval <- 1- pchisq(lrtStat, df = df)
method= "LRT Goodness of Fit Test"
stat = lrtStat
}
names(df) = "df"
names(stat) = "X-squared"
message(returnMessage(ml))
structure(list(
nlmax = ml, statistic = stat,
estimate = coef(ml), parameter = df,
p.value = pval, method = method,
data.name = deparse(substitute(x)),
observed = o, expected = e,
residuals = (o - e) / sqrt(e),
table = cbind(o, e, prob),
message = returnMessage(ml)
),
class = "htest")
}
## ----GOF-sol02-----------------------------------------------------------
data <- c(18.0, 6.3, 7.5, 8.1, 3.1, 0.8, 2.4, 3.5, 9.5, 39.7,
3.4, 14.6, 5.1, 6.8, 2.6, 8.0, 8.5, 3.7, 21.2, 3.1,
10.2, 8.3, 6.4, 3.0, 5.7, 5.6, 7.4, 3.9, 9.1, 4.0)
GOF(data, cutpts = c(0, 3, 6, 12, Inf), iterlim = 1000,
start = c(5, 5))$table
GOF(data, cutpts = c(0, 3, 6, 12, Inf), iterlim = 1000, start = c(5, 5))
GOF(data, cutpts = c(0, 3, 6, 12, Inf), iterlim = 1000, start = c(5, 5),
pearson = TRUE)
## ----gof-gamma-----------------------------------------------------------
oldopt <- options(warn = -1)
data <- c(18.0, 6.3, 7.5, 8.1, 3.1, 0.8, 2.4, 3.5, 9.5, 39.7,
3.4, 14.6, 5.1, 6.8, 2.6, 8.0, 8.5, 3.7, 21.2, 3.1,
10.2, 8.3, 6.4, 3.0, 5.7, 5.6, 7.4, 3.9, 9.1, 4.0)
gamlik <-
function(theta, x) { sum(dgamma(x, theta[1], theta[2], log = TRUE)) }
pgamm <-
function(x, theta){ pgamma(x, theta[1], theta[2]) }
GOF(data, gamlik, pgamm, start = c(1, 1), cutpts = c(0, 3, 6, 12, Inf))$table
GOF(data, gamlik, pgamm, start = c(1, 1), cutpts = c(0, 3, 6, 12, Inf))
GOF(data, gamlik, pgamm, start = c(1, 1), cutpts = c(0, 3, 6, 12, Inf),
pearson = TRUE)
options(oldopt)
## ----gof-weibull, warning=FALSE------------------------------------------
data <- c(18.0, 6.3, 7.5, 8.1, 3.1, 0.8, 2.4, 3.5, 9.5, 39.7,
3.4, 14.6, 5.1, 6.8, 2.6, 8.0, 8.5, 3.7, 21.2, 3.1,
10.2, 8.3, 6.4, 3.0, 5.7, 5.6, 7.4, 3.9, 9.1, 4.0)
weiblik <-
function(theta, x) { sum(dweibull(x, theta[1], theta[2], log = TRUE)) }
pweib <- function(x, theta){ pweibull(x, theta[1], theta[2]) }
GOF(data, weiblik, pweib, start = c(1, 1), cutpts = c(0, 3, 6, 12, Inf))$table
GOF(data, weiblik, pweib, start = c(1, 1), cutpts = c(0, 3, 6, 12, Inf))
GOF(data, weiblik, pweib, start = c(1, 1), cutpts = c(0, 3, 6, 12, Inf),
pearson = TRUE)
## ----hwe-gof-sol, warning=FALSE------------------------------------------
theta2probs <- function(theta) {
c(theta^2, 2*theta*(1-theta), (1-theta)^2)
}
loglik <- function(theta, x) {
probs <- theta2probs(theta)
if (any(probs < 0)) return (NA)
dmultinom(x, sum(x), theta2probs(theta), log = TRUE)
}
geno<-c(83, 447, 470)
ml <- maxLik(loglik, start = 0.5, x = geno); ml
theta.hat <- coef(ml); theta.hat
chisq.test(geno, p = theta2probs(theta.hat))
# so we can grab that statistic and redo the p-value:
X <- stat(chisq.test(geno, p = theta2probs(coef(ml)))); X
1 - pchisq(X, df = 2 - 1) # df = 2 for multinomial, 1 for model based on theta
## ----hwe-gof-man-sol-----------------------------------------------------
o <- geno
e <- theta2probs(theta.hat) * sum(o); e
testStats <- c(lrt = 2 * sum(o * log (o / e)), pearson= sum((o - e)^2 / e))
testStats
1-pchisq(testStats, df = 2-1)
## ----fisher-plants-sol, warning = FALSE----------------------------------
fisher.counts <- c(1997, 906, 904, 32)
# computes model probabilities from value of theta
theta2probs <- function(theta) {
c(0.25 * (2 + theta),
0.25 * (1-theta),
0.25 * (1-theta),
0.25 * theta )
}
## ----fisher-plants-ll-sol, warning = FALSE-------------------------------
# direct calculation
loglik.fisher <- function(theta, x) {
if (theta < 0 || theta > 1) return(NA)
( x[1] * log(0.25 * (2 + theta))
+ (x[2] + x[3]) * log(0.25 * (1 - theta))
+ x[4] * log(0.25 * theta)
)
}
# using dmultinom()
loglik.fisher2 <- function(theta, x) {
if (theta < 0 || theta > 1) { return (NA) }
dmultinom(x, size = sum(x), prob = theta2probs(theta), log = TRUE)
}
## ----fisher-plants-a-sol, warning = FALSE--------------------------------
ml.fisher <- maxLik(loglik.fisher, start = 0.5, x = fisher.counts); ml.fisher
ml.fisher2 <- maxLik(loglik.fisher2, start = 0.5, x = fisher.counts); ml.fisher2
theta.hat <- coef(ml.fisher)
## ----fisher-plants-bcd-sol,warning = FALSE-------------------------------
# test a specific value of theta vs. best possible theta
testTheta <-
Vectorize(
vectorize.args = "theta0",
function(theta0, x) {
w <- 2 * (loglik.fisher(theta.hat, x) - loglik.fisher(theta0, x))
p.value <- 1 - pchisq(w, df = 1)
return(c(theta0 = theta0, w = w, p.value = p.value))
}
)
testTheta(c(0.03, 0.05, 0.07), x = fisher.counts) %>% t() %>% data.frame()
## ----fisher-plants-e-sol,warning = FALSE---------------------------------
o <- fisher.counts
e <- theta2probs(theta.hat) * sum(o)
testStats <- c(G = 2 * sum(o * log (o / e)), pearson = sum((o - e)^2 / e))
testStats
1-pchisq(testStats, df = 3-1)
## ----fisher-plants-chisq.test-sol----------------------------------------
chisq.test(fisher.counts, p = theta2probs(theta.hat))
# so we can grab that statistic and redo the p-value:
X <- chisq.test(fisher.counts, p = theta2probs(theta.hat)) %>% stat; X
1 - pchisq(X, df = 2)
## ----fisher-plants-f-sol, tidy=FALSE-------------------------------------
fisher.pval <-
Vectorize(
vectorize.args = "theta0",
function(theta0, x) {
w <- 2 * (loglik.fisher(theta.hat, x) - loglik.fisher(theta0, x))
1 - pchisq(w, df = 1)
}
)
lo <-
uniroot(
function(t0) fisher.pval(t0, fisher.counts) - 0.05, c(0, theta.hat)) %>%
value()
hi <-
uniroot(
function(t0) fisher.pval(t0, fisher.counts) - 0.05, c(1, theta.hat)) %>%
value()
c(lo, hi)
## ----fisher-plants-wald-sol----------------------------------------------
SE <- stdEr(ml.fisher); SE
theta.hat + c(-1, 1) * qnorm(0.975) * SE
## ----mendel-pea-cross-sol------------------------------------------------
o <- c(315, 102, 108, 31)
n <- sum(o)
e <- n * c(9, 3, 3, 1) / 16; e
G <- 2 * sum(o * log(o / e)); G
pval <- 1-pchisq(G, 3); pval
## ----family-smoking01----------------------------------------------------
smokeTab <- tally(student ~ parents, data = FamilySmoking)
smokeTab
## ----family-smoking02----------------------------------------------------
rowTotal <- rowSums(smokeTab); rowTotal
colTotal <- colSums(smokeTab); colTotal
grandTotal <- sum(smokeTab); grandTotal
e <- outer(rowTotal, colTotal) / grandTotal; e
o <- smokeTab
stat <- sum ((e - o)^2 / e); stat
pval <- 1 - pchisq(stat, df = 2); pval
## ----family-smoking03, digits = 5----------------------------------------
chisq.test(smokeTab)
## ----family-smoking04----------------------------------------------------
attributes((chisq.test(smokeTab)))
## ----family-smoking05----------------------------------------------------
xchisq.test(smokeTab)
## ----family-smoking06, eval=FALSE, message = FALSE, opts.label = "figbig", cache = FALSE----
## vcd::mosaic( ~ student + parents,
## data = FamilySmoking %>%
## mutate( # abbreviate labels to fit plot better
## student = c("NS", "S")[as.numeric(student)],
## parents = c("0", "1", "2")[as.numeric(parents)]
## ),
## shade = TRUE)
## ----family-smoking06-fig, echo=FALSE, opts.label = "figbig", cache = FALSE----
vcd::mosaic( ~ student + parents,
data = FamilySmoking %>%
mutate( # abbreviate labels to fit plot better
student = c("NS", "S")[as.numeric(student)],
parents = c("0", "1", "2")[as.numeric(parents)]
),
shade = TRUE)
## ----smoking-ads01, tidy = FALSE-----------------------------------------
smTab <- rbind(NonExperimenter = c(171, 15, 148),
Experimenter = c(89, 10, 132))
colnames(smTab) = c("Never", "Hardly Ever", "Sometimes or a lot")
smTab
chisq.test(smTab)
## ----smoking-ads02-------------------------------------------------------
xchisq.test(smTab)
## ----smoking-bbs01, tidy = FALSE-----------------------------------------
smTab2 <- rbind(NonExperimenter = c(34, 4, 296),
Experimenter = c(15, 3, 213))
colnames(smTab2) <- c("Never", "Hardly ever", "Sometimes or a lot")
smTab2
chisq.test(smTab2)
## ----smoking-bbs02-------------------------------------------------------
chisq.test(smTab2, simulate.p.value = TRUE, B = 5000)
## ----smoking-bbs03-------------------------------------------------------
smTab2[, -2]
chisq.test(smTab2[, -2])
## ----phs-----------------------------------------------------------------
phs <- cbind(c(104, 189), c(10933, 10845))
rownames(phs) <- c("aspirin", "placebo")
colnames(phs) <- c("heart attack", "no heart attack")
phs
xchisq.test(phs)
## ----chisq-twins, tidy=FALSE---------------------------------------------
convictions <- rbind(dizygotic = c(2, 15),
monozygotic = c(10, 3))
colnames(convictions) <- c("convicted", "not convicted")
convictions
chisq.test(convictions, correct = FALSE)
chisq.test(convictions) %>% pval()
fisher.test(convictions) %>% pval()
## ----amd-----------------------------------------------------------------
amd <- rbind(cases = c(27, 17, 6), controls = c(13, 46, 37))
dom <- rbind(cases = c(27 + 17, 6), controls = c(13 + 46, 37))
rec <- rbind(cases = c(27, 17 + 6), controls = c(13, 46 + 37))
chisq.test(amd)
chisq.test(dom)
chisq.test(rec)
## ----fusion1-merge-------------------------------------------------------
# merge fusion1 and pheno keeping only id's that are in both
Fusion1m <- merge(FUSION1, Pheno, by = "id", all = FALSE)
## ----fusion1-tally-geno--------------------------------------------------
tally( ~ t2d + genotype, data = Fusion1m)
## ----fusion1-tally-dose--------------------------------------------------
tally( ~ t2d + Gdose, data = Fusion1m)
## ----fusion1m-3models-sol------------------------------------------------
tally(t2d ~ genotype, data = Fusion1m)
chisq.test(tally( ~ t2d + genotype, data = Fusion1m))
chisq.test(tally( ~ t2d + (Tdose >= 1), data = Fusion1m))
chisq.test(tally( ~ t2d + (Tdose <= 1), data = Fusion1m))
## ----nfl-bt01, tidy = FALSE----------------------------------------------
NFL <- NFL2007 %>% mutate(
dscore = homeScore - visitorScore,
winner = ifelse(dscore > 0, home, visitor),
loser = ifelse(dscore > 0, visitor, home),
homeTeamWon = dscore > 0
)
head(NFL, 3)
## ----nfl-bt02, tidy = FALSE, message = FALSE-----------------------------
# fit Bradley-Terry model
require(BradleyTerry2)
NFL.model <-
BTm(cbind(homeTeamWon, !homeTeamWon), home, visitor,
data = NFL, id = "team")
## ----nfl-bt03, tidy=FALSE------------------------------------------------
bta <- BTabilities(NFL.model)
nflRatings<- data.frame(
team = rownames(bta),
rating = bta[, "ability"],
se = bta[, "s.e."],
wins = as.vector(tally( ~ winner, data = NFL)),
losses = as.vector(tally( ~ loser, data = NFL))
)
row.names(nflRatings) <- NULL
nflRatings[rev(order(nflRatings$rating)), ]
## ----nfl-bt04------------------------------------------------------------
NFL <- NFL %>%
mutate(
winnerRating = nflRatings$rating[as.numeric(winner)],
loserRating = nflRatings$rating[as.numeric(loser)],
upset = loserRating > winnerRating,
pwinner = ilogit(winnerRating - loserRating))
# how big an upset was the Super Bowl?
NFL %>% tail(1)
## ----ncaa-bt01, tidy = FALSE---------------------------------------------
NCAA <- NCAAbb %>%
filter(season == "2009-10", !postseason) %>%
mutate(
neutralSite = grepl("n", notes, ignore.case = TRUE), # at neutral site?
homeTeamWon = hscore > ascore) # did home team win?
# remove teams that didn't play > 5 at home and > 5 away
# (typically div II teams that played a few div I teams)
h <- tally( ~ home, data = NCAA); a <- tally( ~ away, data = NCAA)
deleteTeams <- c(names(h[h <= 5]), names(a[a <= 5]))
NCAA <- NCAA %>%
filter(!(home %in% deleteTeams | away %in% deleteTeams))
## ----ncaa-bt02, tidy = FALSE---------------------------------------------
# fit a Bradley-Terry model
require(BradleyTerry2)
NCAA.model <-
BTm(cbind(homeTeamWon, 1 - homeTeamWon), home, away, data = NCAA)
## ----ncaa-bt03-----------------------------------------------------------
# look at top teams
BTabilities(NCAA.model) %>%
as.data.frame() %>%
mutate(team = row.names(BTabilities(NCAA.model))) %>%
arrange(-ability) %>% head(6)
## ----ncaa-bt04-----------------------------------------------------------
require(BradleyTerry2)
# home team gets advantage unless on neutral court
NCAA$homeTeam <-
data.frame(team = NCAA$home, at.home = 1 - NCAA$neutralSite)
NCAA$awayTeam <- data.frame(team = NCAA$away, at.home = 0)
NCAA.model2 <-
BTm(cbind(homeTeamWon, 1-homeTeamWon),
homeTeam, awayTeam, id = "team",
formula = ~ team + at.home, data = NCAA)
## ----ncaa-bt05-----------------------------------------------------------
# the "order effect" is the coefficient on "at.home"
coef(NCAA.model2)["at.home"] -> oe; oe
# expressed a multiplicative odds factor
exp(oe)
# prob home team wins if teams are "equal"
ilogit(oe)
## ----ncaa-bt06, tidy=FALSE-----------------------------------------------
ab <-
BTabilities(NCAA.model2)
ratings <-
ab[order(-ab[, "ability"]), ]
ratings[1:13, ]
## ----ncaa-bt07-----------------------------------------------------------
ratings[14:30, ]
## ----ncaa2010-bt08, tidy = FALSE-----------------------------------------
compareTeams <-
function(team1, team2, model,
abilities = BTabilities(model)) {
a <- abilities[team1, 1]
b <- abilities[team2, 1]
return(ilogit(a - b))
}
compareTeams("Kansas", "Kentucky", ab = ratings)
compareTeams("Butler", "Michigan St.", ab = ratings)
compareTeams("Butler", "Duke", ab = ratings)
## ----lady-bayes01, fig.keep = "none", warning = FALSE--------------------
gf_dist("beta", shape1 = 19, shape2 = 3, xlim = c(0.4, 1)) %>%
gf_labs(title = "Beta(19, 3) posterior") %>%
gf_lims(y = c(0, 6))
gf_dist("beta", shape1 = 10, shape2 = 2, xlim = c(0.4, 1)) %>%
gf_labs(title = "Beta(10, 2) posterior") %>%
gf_lims(y = c(0, 6))
## ----lady-bayes01-fig, echo = FALSE, warning = FALSE, results = "hide"----
gf_dist("beta", shape1 = 19, shape2 = 3, xlim = c(0.4, 1)) %>%
gf_labs(title = "Beta(19, 3) posterior") %>%
gf_lims(y = c(0, 6))
gf_dist("beta", shape1 = 10, shape2 = 2, xlim = c(0.4, 1)) %>%
gf_labs(title = "Beta(10, 2) posterior") %>%
gf_lims(y = c(0, 6))
## ----lady-bayes02, fig.keep = "none", warning = FALSE--------------------
posterior_sample20 <- rbeta(1e5, shape1 = 19, shape2 = 3)
posterior_sample10 <- rbeta(1e5, shape1 = 10, shape2 = 2)
gf_dhistogram( ~ posterior_sample20, binwidth = 0.01) %>%
gf_labs(title = "Sampling from a Beta(19, 3) posterior") %>%
gf_lims(x = c(0.4, 1), y = c(0,6))
gf_dhistogram( ~ posterior_sample10, binwidth = 0.01, xlim = c(0.4, 1)) %>%
gf_labs(title = "Sampling from a Beta(10, 2) posterior") %>%
gf_lims(x = c(0.4, 1), y = c(0,6))
## ----lady-bayes02-fig, echo = FALSE, warning = FALSE, results = "hide"----
posterior_sample20 <- rbeta(1e5, shape1 = 19, shape2 = 3)
posterior_sample10 <- rbeta(1e5, shape1 = 10, shape2 = 2)
gf_dhistogram( ~ posterior_sample20, binwidth = 0.01) %>%
gf_labs(title = "Sampling from a Beta(19, 3) posterior") %>%
gf_lims(x = c(0.4, 1), y = c(0,6))
gf_dhistogram( ~ posterior_sample10, binwidth = 0.01, xlim = c(0.4, 1)) %>%
gf_labs(title = "Sampling from a Beta(10, 2) posterior") %>%
gf_lims(x = c(0.4, 1), y = c(0,6))
## ----lady-bayes03, fig.keep = "none"-------------------------------------
1 - xpbeta(c(0.5, 0.7, 0.9), shape1 = 19, shape2 = 3,
xlim = c(0.4, 1),
refinements = list(
labs(title = "Beta(19,3)"),
scale_fill_brewer(type = "qual", palette = 3)
))
1 - xpbeta(c(0.5, 0.7, 0.9), shape1 = 10, shape2 = 2,
xlim = c(0.4, 1),
refinements = list(
labs(title = "Beta(9,2)"),
scale_fill_brewer(type = "qual", palette = 3)
))
## ----lady-bayes03-fig, echo = FALSE, results = "hide"--------------------
1 - xpbeta(c(0.5, 0.7, 0.9), shape1 = 19, shape2 = 3,
xlim = c(0.4, 1),
refinements = list(
labs(title = "Beta(19,3)"),
scale_fill_brewer(type = "qual", palette = 3)
))
1 - xpbeta(c(0.5, 0.7, 0.9), shape1 = 10, shape2 = 2,
xlim = c(0.4, 1),
refinements = list(
labs(title = "Beta(9,2)"),
scale_fill_brewer(type = "qual", palette = 3)
))
## ----lady-bayes04--------------------------------------------------------
# median of posterior
qbeta(0.5, shape1 = 19, shape2 = 3)
qbeta(0.5, shape1 = 10, shape2 = 2)
# median of posterior -- approxiamted by sampling
median(posterior_sample20)
median(posterior_sample10)
# mean of posterior -- approximated by sampling
mean(posterior_sample20)
mean(posterior_sample10)
# MAP
nlmax(function(x) dbeta(x, shape1 = 19, shape2 = 3), p = 0.5) %>% value()
nlmax(function(x) dbeta(x, shape1 = 10, shape2 = 2), p = 0.5) %>% value()
## ----lady-bayes05--------------------------------------------------------
# interval formed by removing equal tail probabilities
cdata( ~ posterior_sample20, 0.90)
cdata( ~ posterior_sample10, 0.90)
# HPDI
coda::HPDinterval(coda::as.mcmc(posterior_sample20), 0.90)
rethinking::HPDI(posterior_sample20, 0.90)
coda::HPDinterval(coda::as.mcmc(posterior_sample10), 0.90)
rethinking::HPDI(posterior_sample10, 0.90)
## ----lady-bayes06--------------------------------------------------------
binom.test(18, 20, conf.level = 0.90) %>% confint()
binom.test(9, 10, conf.level = 0.90) %>% confint()
## ----binom-bayes01-------------------------------------------------------
qbeta(c(0.025, 0.975), 38 + 1, 62 + 1)
binom.test(38, 100) %>% confint() # for comparison
prop.test(38, 100) %>% confint() # for comparison
## ----binom-bayes02-------------------------------------------------------
1- pbeta(0.5, 38 + 1, 62 + 1) # 1-sided Bayesian p-value
binom.test(38, 100, alt = "less") %>% pval() # for comparison
prop.test(38, 100, alt = "less") %>% pval() # for comparison
## ----bayes-normal--------------------------------------------------------
x <- c(20, 24, 27, 28, 28, 28, 29, 30,
30, 30, 30, 32, 33, 34, 35, 38)
mean(x)
sd(x)
posterior <- function(x, mu0, sigma0, sigma = 5) {
n <- length(x)
N <- (n * mean(x) / sigma^2 + mu0 / sigma0^2)
D <- (n / sigma^2 + 1 / sigma0^2)
mu1 <- N / D; sigma1 <- sqrt(1 / D)
precision1 <- D
precision0 <- 1 / sigma0^2
precision.data <- n / sigma^2
return(cbind(mu1, sigma1, precision1, precision0, precision.data))
}
posterior(x, 20, 1)
posterior(x, 20, 4)
posterior(x, 20, 16)
posterior(x, 20, 1000)
# with very difuse prior, all the precision is coming from the data
# so sigma1 is sigma/sqrt(n)
5 / sqrt(length(x))
## ----binom-grid, fig.keep = "none"---------------------------------------
BinomGrid <-
expand.grid(pi = seq(0, 1, by = 0.001)) %>%
mutate(
prior = 1, # uniform prior
likelihood = dbinom(20, size = 50, prob = pi),
posterior = prior * likelihood # kernel of posterior
)
posterior_sample <-
with(BinomGrid, sample(pi, size = 1e5, prob = posterior, replace = TRUE))
# credible interval
cdata(~posterior_sample, 0.95) # central 95% credible interval
# compare with analytical result above
gf_dhistogram( ~ posterior_sample, binwidth = 0.02, alpha = 0.4) %>%
gf_dist("beta", shape1 = 21, shape2 = 31, color = "navy")
qbeta(c(0.025, 0.975), shape1 = 21, shape2 = 31)
## ----binom-grid-fig, fig.keep = "last", echo = FALSE, results = "hide"----
BinomGrid <-
expand.grid(pi = seq(0, 1, by = 0.001)) %>%
mutate(
prior = 1, # uniform prior
likelihood = dbinom(20, size = 50, prob = pi),
posterior = prior * likelihood # kernel of posterior
)
posterior_sample <-
with(BinomGrid, sample(pi, size = 1e5, prob = posterior, replace = TRUE))
# credible interval
cdata(~posterior_sample, 0.95) # central 95% credible interval
# compare with analytical result above
gf_dhistogram( ~ posterior_sample, binwidth = 0.02, alpha = 0.4) %>%
gf_dist("beta", shape1 = 21, shape2 = 31, color = "navy")
qbeta(c(0.025, 0.975), shape1 = 21, shape2 = 31)
## ----normal-grid01-------------------------------------------------------
x <- c(20, 24, 27, 28, 28, 28, 29, 30,
30, 30, 30, 32, 33, 34, 35, 38)
NormalGrid <-
expand.grid(
mu = seq(20, 40, length.out = 200),
sigma = seq(0.1, 15, length.out = 200) # avoid sigma = 0 here
) %>%
mutate(
prior = dnorm(mu, 0, 20) * dgamma(sigma, shape = 3, rate = 1/3),
likelihood = mapply(
function(m, s) {prod(dnorm(x, mean = m, sd = s))},
m = mu, s = sigma),
posterior = prior * likelihood
)
NormalGrid %>%
arrange(-posterior) %>%
head(3)
## ----normal-grid02-------------------------------------------------------
PosteriorSample <-
sample(NormalGrid, size = 1e5, replace = TRUE,
prob = NormalGrid$posterior)
## ----normal-grid03, fig.keep = "none"------------------------------------
gf_histogram( ~ mu, data = PosteriorSample, binwidth = 0.2)
gf_histogram( ~ sigma, data = PosteriorSample, binwidth = 0.3)
## ----normal-grid04, fig.keep = "none"------------------------------------
gf_point(sigma ~ mu, data = PosteriorSample,
size = 0.4, alpha = 0.15) %>%
gf_density2d()
## ----normal-grid04a, include = FALSE-------------------------------------
gf_hex(sigma ~ mu, data = PosteriorSample,
bins = 50) %>%
gf_density2d(color = "white", alpha = 0.5) %>%
gf_refine(scale_fill_gradient2(midpoint = 400, low = "gray80", mid = "skyblue", high = "navy")) %>%
gf_theme(legend.position = "top")
## ----normal-grid05-------------------------------------------------------
# 90% credible interval for the mean
cdata( ~ mu, data = PosteriorSample, p = 0.90)
rethinking::HPDI(PosteriorSample$mu, 0.90)
# 90% CI for comparision
t.test(x, conf.level = 0.90) %>% confint()
# 90% credible interval for the standard deviation
cdata( ~ sigma, data = PosteriorSample, p = 0.90)
rethinking::HPDI(PosteriorSample$sigma, 0.90)
## ----normal-grid03-fig, echo = FALSE-------------------------------------
gf_histogram( ~ mu, data = PosteriorSample, binwidth = 0.2)
gf_histogram( ~ sigma, data = PosteriorSample, binwidth = 0.3)
## ----normal-grid04-fig, echo = FALSE-------------------------------------
gf_point(sigma ~ mu, data = PosteriorSample,
size = 0.4, alpha = 0.15) %>%
gf_density2d()
## ----prior-compare, fig.keep = "none"------------------------------------
CoinGrid <-
expand.grid(
pi = seq(0, 1, by = 0.001),
prior_name =
c("Unif(0, 1)", "Beta(20, 10)", "Tri(0.3, 0.7)", "Unif(0.3, 0.7)")) %>%
mutate(
prior =
case_when(
prior_name == "Unif(0, 1)" ~ dunif(pi, 0, 1),
prior_name == "Beta(20, 10)" ~ dbeta(pi, 20, 10),
prior_name == "Tri(0.3, 0.7)" ~
triangle::dtriangle(pi, a = 0.3, b = 0.7, c = 0.5),
prior_name == "Unif(0.3, 0.7)" ~ dunif(pi, 0.3, 0.7)
),
likelihood = dbinom(38, size = 100, prob = pi)) %>%
group_by(prior_name) %>%
mutate(posterior = prior * likelihood / sum(prior * likelihood) * 1000)
gf_line(prior ~ pi, color = ~ "prior", data = CoinGrid) %>%
gf_line(posterior ~ pi, color = ~ "posterior", data = CoinGrid) %>%
gf_facet_wrap( ~ prior_name) %>%
gf_theme(legend.position = "top") %>%
gf_refine(guides(color = guide_legend("distribution: ")))
## ----prior-compare-fig, echo = FALSE, opts.label = "figbig"--------------
CoinGrid <-
expand.grid(
pi = seq(0, 1, by = 0.001),
prior_name =
c("Unif(0, 1)", "Beta(20, 10)", "Tri(0.3, 0.7)", "Unif(0.3, 0.7)")) %>%
mutate(
prior =
case_when(
prior_name == "Unif(0, 1)" ~ dunif(pi, 0, 1),
prior_name == "Beta(20, 10)" ~ dbeta(pi, 20, 10),
prior_name == "Tri(0.3, 0.7)" ~
triangle::dtriangle(pi, a = 0.3, b = 0.7, c = 0.5),
prior_name == "Unif(0.3, 0.7)" ~ dunif(pi, 0.3, 0.7)
),
likelihood = dbinom(38, size = 100, prob = pi)) %>%
group_by(prior_name) %>%
mutate(posterior = prior * likelihood / sum(prior * likelihood) * 1000)
gf_line(prior ~ pi, color = ~ "prior", data = CoinGrid) %>%
gf_line(posterior ~ pi, color = ~ "posterior", data = CoinGrid) %>%
gf_facet_wrap( ~ prior_name) %>%
gf_theme(legend.position = "top") %>%
gf_refine(guides(color = guide_legend("distribution: ")))
## ----google-map, eval = FALSE--------------------------------------------
## Positions <- rgeo(10)
## googleMap(position = Positions, mark = TRUE)
## ----dispersion01-sol----------------------------------------------------
val <- c(0,1,2,3,4)
frequency<- c(9,2,3,0,1)
n <- sum(frequency); n
x.bar <- sum(val * frequency) / n; x.bar
v <- sum(frequency * (val - x.bar)^2) / (n - 1); v
T <- 14 * v / x.bar; T
1- pchisq(T, 14)
## ----dispersion02-sol, seed = 12345--------------------------------------
T <- function(x) c(T = var(x) / mean(x))
# one-sided p-values
Sims1 <- (do(10000) * T(rpois(15, lambda = 1))) %>% mutate(p.val = 1 - pchisq(14 * T, df = 14))
Sims5 <- (do(10000) * T(rpois(15, lambda = 5))) %>% mutate(p.val = 1 - pchisq(14 * T, df = 14))
Sims50 <- (do(10000) * T(rpois(15, lambda = 50))) %>% mutate(p.val = 1 - pchisq(14 * T, df = 14))
# It isn't necessary to mulitiply T by (n-1) to assess linearity in a qq-plot
gf_qq(~ T, data = Sims1, distribution = qchisq, dparams = list(df = 14), geom = "line") %>%
gf_labs(title = "lambda = 1")
gf_qq(~ T, data = Sims5, distribution = qchisq, dparams = list(df = 14), geom = "line") %>%
gf_labs(title = "lambda = 5")
gf_qq(~ T, data = Sims50, distribution = qchisq, dparams = list(df = 14), geom = "line") %>%
gf_labs(title = "lambda = 50")
# now we compare p-values to uniform distribution, zooming in on small p-values
gf_qq( ~ p.val, data = Sims1, distribution = qunif, geom = "line") %>%
gf_abline(slope = 1, intercept = 0, alpha = 0.5, color = "red") %>%
gf_labs(title = "lambda = 1") %>%
gf_lims(x = c(0, 0.1), y = c(0, 0.1))
gf_qq( ~ p.val, data = Sims5, distribution = qunif, geom = "line") %>%
gf_abline(slope = 1, intercept = 0, alpha = 0.5, color = "red") %>%
gf_labs(title = "lambda = 5") %>%
gf_lims(x = c(0, 0.1), y = c(0, 0.1))
gf_qq( ~ p.val, data = Sims50, distribution = qunif, geom = "line") %>%
gf_abline(slope = 1, intercept = 0, alpha = 0.5, color = "red") %>%
gf_labs(title = "lambda = 50") %>%
gf_lims(x = c(0, 0.1), y = c(0, 0.1))
## ----LinearModels, child="LinearModels.Rnw", eval=includeChapter[6]------
## ----LM-setup, include = FALSE, cache = FALSE----------------------------
require(fastR2)
knitr::opts_chunk$set(cache.path = "cache/LM-")
## ----eval = FALSE--------------------------------------------------------
## response ~ predictor1 + predictor2 + predictor3
## ----eval = FALSE--------------------------------------------------------
## response ~ 1 + predictor1 + predictor2 + predictor3
## ----eval = FALSE--------------------------------------------------------
## response ~ -1 + predictor1 + predictor2 + predictor3
## ----eval = FALSE--------------------------------------------------------
## y ~ 1 # note: explicit intercept is the only term on right
## ----eval = FALSE--------------------------------------------------------
## length ~ weight # using implicit intercept
## length ~ 1 + weight # using explicit intercept
## ----eval = FALSE--------------------------------------------------------
## height ~ sex # using implicit intercept
## height ~ 1 + sex # using explicit intercept
## ----eval = FALSE--------------------------------------------------------
## cholesterol ~ age + I(age^2) # see below for explanation of I()
## cholesterol ~ poly(age,2) # 2nd degree polynomial in age
## ----eval = FALSE--------------------------------------------------------
## gpa ~ SATM + SATV
## gpa ~ I(SATM + SATV)
## ----small-data01, fig.keep = "none"-------------------------------------
SmallData <- data.frame(x = c(1, 2, 3, 4), y = c(2, 5, 6, 8))
gf_point(y ~ x, data = SmallData)
## ----small-data01-fig, echo = FALSE--------------------------------------
SmallData <- data.frame(x = c(1, 2, 3, 4), y = c(2, 5, 6, 8))
gf_point(y ~ x, data = SmallData)
## ----small-data02, fig.keep = "none"-------------------------------------
model <- lm(y ~ x , data = SmallData)
gf_lm(y ~ x, data = SmallData) %>%
gf_point()
## ----small-data02-fig, echo = FALSE--------------------------------------
model <- lm(y ~ x , data = SmallData)
gf_lm(y ~ x, data = SmallData) %>%
gf_point()
## ----small-data03--------------------------------------------------------
coef(model) # the coefficients
fitted(model) # y-hat values
resid(model) # residuals
SmallData$y - fitted(model) # residuals again
msummary(model) # a summary of the model
## ----small-data04--------------------------------------------------------
names(model)
names(msummary(model))
model$rank # number of linearly independent cols in model matrix
summary(model)$sigma
## ----small-data05--------------------------------------------------------
x <- SmallData$x; y <- SmallData$y
Sxx <- sum((x - mean(x))^2); Sxx
Sxy <- sum((x - mean(x)) * (y - mean(y))); Sxy
r <- 1 / 3 * sum((x - mean(x)) / sd(x) * (y - mean(y)) / sd(y)); r
slope <- r * sd(y) / sd(x); slope
intercept <- mean(y) - slope * mean(x); intercept
## ----small-data06--------------------------------------------------------
# set up the v and u vectors
v0 <- rep(1, 4); v0
u0 <- v0 / vlength(v0); u0
v1 <- x - mean(x); v1
u1 <- v1 / vlength(v1); u1
#
# projecting into the model space
project(y, v0)
project(y, v1)
project(y, v0) + project(y, v1) # fitted values
fitted(model)
#
# two ways to compute beta_1-hat
b1 <- dot(y, v1) / (vlength(v1))^2; b1
b1 <- dot(y, u1) / (vlength(v1)); b1
#
# two ways to compute alpha_0-hat
a0 <- dot(y, v0) / (vlength(v0))^2; a0
a0 <- dot(y, u0) / (vlength(v0)); a0
#
# beta_0-hat
b0 <- a0 - b1 * mean(x); b0
## ----small-data07--------------------------------------------------------
# create model matrix
x <- SmallData$x; y <- SmallData$y
x
y
intercept <- rep(1, 4)
X <- cbind(intercept, x); X
# estimate coeficients
B <- solve(t(X) %*% X) %*% t(X)
B %*% y
# compute fitted values
H <- X %*% B
H %*% y
## ----small-data08--------------------------------------------------------
X <- model.matrix(model); X
## ----trebuchet01, fig.show="hide"----------------------------------------
treb.model <- lm(distance ~ projectileWt, data = Trebuchet2)
coef(treb.model)
gf_point(distance ~ projectileWt, data = Trebuchet2) %>%
gf_lm()
## ----trebuchet01-fig, echo=FALSE, results = "hide"-----------------------
treb.model <- lm(distance ~ projectileWt, data = Trebuchet2)
coef(treb.model)
gf_point(distance ~ projectileWt, data = Trebuchet2) %>%
gf_lm()
## ----trebuchet-sol, fig.keep = "last"------------------------------------
idx <- which.min(resid(treb.model))
lm(distance ~ projectileWt, data = Trebuchet2[-idx, ]) %>%
coef()
treb.model %>% coef()
gf_point(distance ~ projectileWt, data = Trebuchet2[-idx, ]) %>%
gf_lm() %>%
gf_lm(data = Trebuchet2, color = "red")
## ----elasticband01, fig.show = "hide"------------------------------------
data(elasticband, package = "DAAG")
eband.model <- lm(distance ~ stretch, data = elasticband)
coef(eband.model)
gf_lm(distance ~ stretch, data = elasticband) %>%
gf_point()
## ----elasticband01-fig, results = "hide", echo = FALSE-------------------
data(elasticband, package = "DAAG")
eband.model <- lm(distance ~ stretch, data = elasticband)
coef(eband.model)
gf_lm(distance ~ stretch, data = elasticband) %>%
gf_point()
## ----regression01, tidy = FALSE, fig.keep = "none"-----------------------
GaltonBoys <-
Galton %>%
filter(sex == "M") %>%
group_by(family) %>%
sample_n(1) %>%
ungroup()
Galton.lm <- lm(height ~ father, data = GaltonBoys)
coef(Galton.lm)
gf_lm(height ~ father, data = GaltonBoys) %>%
gf_jitter(alpha = 0.4)
## ----regression01-fig, echo = FALSE--------------------------------------
GaltonBoys <-
Galton %>%
filter(sex == "M") %>%
group_by(family) %>%
sample_n(1) %>%
ungroup()
Galton.lm <- lm(height ~ father, data = GaltonBoys)
coef(Galton.lm)
gf_lm(height ~ father, data = GaltonBoys) %>%
gf_jitter(alpha = 0.4)
## ----regression02--------------------------------------------------------
favstats( ~ height, data = GaltonBoys)
favstats( ~ father, data = GaltonBoys)
## ----regression03--------------------------------------------------------
predictedHeight <- makeFun(Galton.lm)
predictedHeight(father = 75)
predictedHeight(father = 65)
## ----regression04, tidy = FALSE------------------------------------------
GaltonBoys <-
GaltonBoys %>%
mutate(midparent = (father + mother) / 2)
favstats( ~ height, data = GaltonBoys)
favstats( ~ midparent, data = GaltonBoys)
## ----regression05, tidy = FALSE, fig.keep = "none"-----------------------
GaltonBoys <-
GaltonBoys %>%
mutate(zheight = zscore(height),
zmidparent = zscore(midparent)
)
Galtonz.lm <- lm(zheight ~ zmidparent, data = GaltonBoys)
coef(Galtonz.lm)
gf_lm(zheight ~ zmidparent, data = GaltonBoys) %>%
gf_jitter(alpha = 0.4) %>%
gf_abline(slope = 1, intercept = 0, alpha = 0.4)
## ----regression05-fig, echo = FALSE, results = "hide"--------------------
GaltonBoys <-
GaltonBoys %>%
mutate(zheight = zscore(height),
zmidparent = zscore(midparent)
)
Galtonz.lm <- lm(zheight ~ zmidparent, data = GaltonBoys)
coef(Galtonz.lm)
gf_lm(zheight ~ zmidparent, data = GaltonBoys) %>%
gf_jitter(alpha = 0.4) %>%
gf_abline(slope = 1, intercept = 0, alpha = 0.4)
## ----bands-sol-----------------------------------------------------------
model1 <- lm(distance ~ stretch, data = elasticband)
model2 <- lm(distance ~ stretch, data = RubberBand)
msummary(model1)
msummary(model2)
## ----trebuchet02, tidy = FALSE-------------------------------------------
treb.model <-
lm(distance ~ projectileWt, data = Trebuchet2)
msummary(treb.model) # terser output than summary() produces
## ----trebuchet03---------------------------------------------------------
-0.0946 + c(-1, 1) * 0.01713 * qt(0.975, df = 14) # CI by hand
confint(treb.model, "projectileWt") # CI using confint()
## ----confint-lm----------------------------------------------------------
stats:::confint.lm
## ----elasticband02-------------------------------------------------------
msummary(eband.model)
4.554 + c(-1, 1) * 1.543 * qt(0.975, df = 5) # CI by hand
confint(eband.model, "stretch") # CI using confint()
## ----anova-trebuchet-----------------------------------------------------
treb.model <-
lm(distance ~ projectileWt, data = Trebuchet2)
anova(treb.model)
## ----trebuchet04, tidy = FALSE-------------------------------------------
rsquared(treb.model)
## ----eval = FALSE--------------------------------------------------------
## lm(y ~ 0 + x)
## lm(y ~ -1 + x)
## ----anova-elasticband---------------------------------------------------
anova(eband.model)
## ----trebuchet05, tidy = FALSE-------------------------------------------
treb.model <-
lm(distance ~ projectileWt, data = Trebuchet2)
treb.dist <- makeFun(treb.model)
treb.dist(projectileWt = 44)
treb.dist(projectileWt = 44, interval = "confidence")
treb.dist(projectileWt = 44, interval = "prediction")
## ----trebuchet06, fig.keep = "none", tidy = FALSE, warning = FALSE-------
gf_point(distance ~ projectileWt, data = Trebuchet2) %>%
gf_lm() %>%
gf_ribbon(
lwr + upr ~ projectileWt, fill = "skyblue",
data = cbind(Trebuchet2, predict(treb.model, interval = "prediction"))
) %>%
gf_ribbon(
lwr + upr ~ projectileWt,
data = cbind(Trebuchet2, predict(treb.model, interval = "confidence")))
# simpler way, using gf_lm()
gf_point(distance ~ projectileWt, data = Trebuchet2) %>%
gf_lm(interval = "prediction", fill = "skyblue") %>%
gf_lm(interval = "confidence")
## ----trebuchet06-fig, results = "hide", echo = FALSE, tidy = FALSE, fig.keep = "last", warning = FALSE----
gf_point(distance ~ projectileWt, data = Trebuchet2) %>%
gf_lm() %>%
gf_ribbon(
lwr + upr ~ projectileWt, fill = "skyblue",
data = cbind(Trebuchet2, predict(treb.model, interval = "prediction"))
) %>%
gf_ribbon(
lwr + upr ~ projectileWt,
data = cbind(Trebuchet2, predict(treb.model, interval = "confidence")))
# simpler way, using gf_lm()
gf_point(distance ~ projectileWt, data = Trebuchet2) %>%
gf_lm(interval = "prediction", fill = "skyblue") %>%
gf_lm(interval = "confidence")
## ----elasticband03, tidy = FALSE-----------------------------------------
eband.dist <- makeFun(eband.model)
eband.dist(stretch = 30)
eband.dist(stretch = 30, interval = "confidence")
eband.dist(stretch = 30, interval = "prediction")
## ----eband-fig, results = "hide", echo = FALSE, fig.keep = "last", warning = FALSE----
gf_point(distance ~ stretch, data = elasticband) %>%
gf_lm() %>%
gf_ribbon(lwr + upr ~ stretch, fill = "skyblue",
data = cbind(elasticband, predict(eband.model, interval = "prediction"))) %>%
gf_ribbon(lwr + upr ~ stretch,
data = cbind(elasticband, predict(eband.model, interval = "confidence")))
gf_point(distance ~ stretch, data = elasticband) %>%
gf_lm(interval = "prediction", fill = "skyblue") %>% # prediction band the easy way
gf_lm(interval = "confidence") # confidence band the easy way
## ----vcov----------------------------------------------------------------
treb.model <-
lm(distance ~ projectileWt, data = Trebuchet2)
vcov(treb.model)
sqrt(diag(vcov(treb.model)))
treb.model %>% summary() %>% coef()
## ----resid-v-fit-fig, results = "hide", echo = FALSE---------------------
x <- seq(0, 10, by = 0.25)
resid <- rnorm(length(x))
y1 <- x + resid
y2 <- (x - 5)^2 / 5 + resid
y3 <- x + (0.75 + 0.25 * x) * resid
y4 <- x + resid
y4[35] <- x[35]- 2 * max(abs(resid))
resid1 <- resid(lm(y1 ~ x))
resid2 <- resid(lm(y2 ~ x))
resid3 <- resid(lm(y3 ~ x))
resid4 <- resid(lm(y4 ~ x))
stresid1 <- resid1 / sd(resid1)
stresid2 <- resid2 / sd(resid2)
stresid3 <- resid3 / sd(resid3)
stresid4 <- resid4 / sd(resid4)
fit1 <- fitted(lm(y1 ~ x))
fit2 <- fitted(lm(y2 ~ x))
fit3 <- fitted(lm(y3 ~ x))
fit4 <- fitted(lm(y4 ~ x))
group <- rep(toupper(letters[1:4]), each = length(x))
rdata <- data.frame(
x = rep(x, times = 4),
fit = c(fit1, fit2, fit3, fit4),
residual = c(resid1, resid2, resid3, resid4),
stresidual = c(stresid1, stresid2, stresid3, stresid4),
group = group)
gf_point(stresidual ~ fit, data = rdata) %>%
gf_facet_wrap(~ group, scale = "free") %>%
gf_labs(y = "residual") %>%
gf_theme(axis.ticks = element_blank(), axis.text = element_blank())
# ylim = c(-1.1, 1.1) * max(abs(rdata$stresidual)),
# as.table = T)
## ----rstandard-----------------------------------------------------------
SmallData <- data.frame(x = c(1, 2, 3, 4), y = c(2, 5, 6, 8))
small.model <- lm(y ~ x, data = SmallData)
# standardized residual computing manually
e <- resid(small.model); e
sigma.hat <- sqrt(sum(e^2) / 2); sigma.hat # internal estimate
h <- hatvalues(small.model); h
e / (sigma.hat * sqrt(1 - h))
# standardized residuals using rstandard()
rstandard(small.model)
## ----hatvalues-----------------------------------------------------------
h <- hatvalues(small.model); h
range(h) # should be contained in [0,1]
sum(h) # should be p = 2
n <- nrow(SmallData)
with(SmallData, 1/n + (x - mean(x))^2/sum((x - mean(x))^2))
## ----rstudent------------------------------------------------------------
rstudent(small.model)
# computing the first externally studentized residual by hand
smaller.model <- lm(y ~ x, data = SmallData[-1, ])
# modified sigma.hat based on smaller model
summary(smaller.model)$sigma
sigma.hat <- sqrt(sum(resid(smaller.model)^2) / 1); sigma.hat
# rescaling the 1st residual
e[1] / (sigma.hat * sqrt(1 - h[1]))
## ----star01, tidy = FALSE, fig.keep = "none", message = FALSE------------
Stars <- faraway::star
star.plot1 <- gf_point(light ~ temp, data = Stars)
# select all but 4 coolest stars
HotStars <- Stars %>% filter(temp > 3.7)
star.model1 <- lm(light ~ temp, data = Stars)
star.model2 <- lm(light ~ temp, data = HotStars)
gf_point(light ~ temp, data = Stars) %>%
gf_lm(color = "gray50", linetype = "dotted") %>%
gf_lm(color = "red", linetype = "dashed", data = HotStars) %>%
gf_text(light ~ (temp + 0.04), label = ~ as.character(id),
data = Stars %>% mutate(id = 1:nrow(.)) %>% filter(temp < 4.0))
## ----star-fig, results = "hide", echo = FALSE, message = FALSE-----------
Stars <- faraway::star
star.plot1 <- gf_point(light ~ temp, data = Stars)
# select all but 4 coolest stars
HotStars <- Stars %>% filter(temp > 3.7)
star.model1 <- lm(light ~ temp, data = Stars)
star.model2 <- lm(light ~ temp, data = HotStars)
gf_point(light ~ temp, data = Stars) %>%
gf_lm(color = "gray50", linetype = "dotted") %>%
gf_lm(color = "red", linetype = "dashed", data = HotStars) %>%
gf_text(light ~ (temp + 0.04), label = ~ as.character(id),
data = Stars %>% mutate(id = 1:nrow(.)) %>% filter(temp < 4.0))
## ----star-fig01, results = "hide", echo = FALSE--------------------------
plot(star.model1, which = c(1:2, 4:5))
## ----star-fig02, results = "hide", echo = FALSE--------------------------
plot(star.model2, which = c(1:2, 4:5))
## ----plot-starmodels, fig.keep = "none"----------------------------------
plot(star.model1, which = c(1:2, 4:5))
plot(star.model2, which = c(1:2, 4:5))
## ----star-dfbeta, fig.show = "hide", tidy = FALSE------------------------
HotStars <- HotStars %>%
mutate(
dfbeta_temp = dfbeta(star.model2)[, "temp"]
)
gf_point(dfbeta_temp ~ index, data = HotStars) %>%
gf_labs(y = "DFBETA") %>%
gf_text(dfbeta_temp ~ (1.5 + index),
data = HotStars %>% filter(abs(dfbeta_temp) > 0.5),
label = ~ as.character(index))
coef(lm(light ~ temp, HotStars))
coef(lm(light ~ temp, HotStars[-7, ]))
## ----star-dfbeta-fig, echo = FALSE, results = "hide"---------------------
HotStars <- HotStars %>%
mutate(
dfbeta_temp = dfbeta(star.model2)[, "temp"]
)
gf_point(dfbeta_temp ~ index, data = HotStars) %>%
gf_labs(y = "DFBETA") %>%
gf_text(dfbeta_temp ~ (1.5 + index),
data = HotStars %>% filter(abs(dfbeta_temp) > 0.5),
label = ~ as.character(index))
coef(lm(light ~ temp, HotStars))
coef(lm(light ~ temp, HotStars[-7, ]))
## ----tukey-bulge01, results = "hide", echo = FALSE-----------------------
n <- 20
x <- runif(n, 2, 10)
y <- exp(0.3 * x)
e <- exp(rnorm(n, 0, 0.1))
y <- y * e
foo <- function(x) {
a <- x[2]
x <- x[1]
if (a == 0) { return (log(x)) }
return (x^a)
}
power <- function(x, a) {
M <- cbind(x, a)
return (apply(M, 1, foo))
}
powers <- c(0, 0.5, 1, 2,3); np <- length(powers)
a <- rep(rep(powers, each = n), each = np)
b <- rep(rep(powers, each = n), times = np)
x <- rep(x, times = n * np)
y <- rep(y, times = n * np)
X <- power(x, a)
Y <- power(y, b)
original <- (a==1 & b==1)
ddd <- data.frame(X = X, Y = Y, a = a, b = b, original = original)
gf_point(y ~ x)
## ----tukey-buldge-many-fig, results = "hide", echo = FALSE, opts.label = "figbig"----
ddd <- ddd %>%
mutate(
afacet = factor(paste("a=", a, sep = "")),
bfacet0 = factor(paste("b=", b, sep = "")),
bfacet = factor(bfacet0, levels = rev(levels(bfacet0)))
)
gf_point(Y ~ X, data = ddd, color = ~original) %>%
gf_facet_grid(bfacet ~ afacet, scale = "free") %>%
gf_theme(axis.ticks = element_blank(), axis.text = element_blank(), legend.position = "none")
## ----balldrop, fig.show = "hide"-----------------------------------------
ball.model <- lm(time ~ height, BallDrop)
msummary(ball.model)
gf_lm(time ~ height, data = BallDrop) %>%
gf_point()
plot(ball.model, w = 1)
## ----balldrop-fig, results = "hide", echo = FALSE------------------------
ball.model <- lm(time ~ height, BallDrop)
msummary(ball.model)
gf_lm(time ~ height, data = BallDrop) %>%
gf_point()
plot(ball.model, w = 1)
## ----balldrop-trans, fig.show = "hide"-----------------------------------
ball.modelT <- lm(time ~ sqrt(height), data = BallDrop)
msummary(ball.modelT)
gf_point(time ~ height, data = BallDrop) %>%
gf_lm(formula = y ~ sqrt(x), interval = "prediction",
fill = "skyblue") %>%
gf_lm(formula = y ~ sqrt(x), interval = "confidence")
plot(ball.modelT, w = 1)
## ----balldrop-trans-fig, results = "hide", echo = FALSE------------------
ball.modelT <- lm(time ~ sqrt(height), data = BallDrop)
msummary(ball.modelT)
gf_point(time ~ height, data = BallDrop) %>%
gf_lm(formula = y ~ sqrt(x), interval = "prediction",
fill = "skyblue") %>%
gf_lm(formula = y ~ sqrt(x), interval = "confidence")
plot(ball.modelT, w = 1)
## ----balldrop-sol--------------------------------------------------------
lm(time^2 ~ height, data = BallDrop)
lm(log(time) ~ log(height), BallDrop)
## ----balldrop-avg, fig.show = "hide", tidy = FALSE-----------------------
BallDropAvg <-
BallDrop %>%
group_by(height) %>%
summarise(time = mean(time))
BallDropAvg
ball.modelA <- lm(time ~ sqrt(height), data = BallDropAvg)
msummary(ball.modelA)
gf_point(time ~ height, data = BallDropAvg) %>%
gf_lm(formula = y ~ sqrt(x), fill = "skyblue") %>%
gf_lm(formula = y ~ sqrt(x), interval = "confidence")
plot(ball.modelA, w = 1)
## ----balldrop-avg-fig, results = "hide", echo = FALSE--------------------
BallDropAvg <-
BallDrop %>%
group_by(height) %>%
summarise(time = mean(time))
BallDropAvg
ball.modelA <- lm(time ~ sqrt(height), data = BallDropAvg)
msummary(ball.modelA)
gf_point(time ~ height, data = BallDropAvg) %>%
gf_lm(formula = y ~ sqrt(x), fill = "skyblue") %>%
gf_lm(formula = y ~ sqrt(x), interval = "confidence")
plot(ball.modelA, w = 1)
## ----soap01--------------------------------------------------------------
Soap.model1 <- lm(weight ~ day, data = Soap)
msummary(Soap.model1)
## ----soap02--------------------------------------------------------------
Soap.model2 <- lm(I(weight^(1/3)) ~ day, data = Soap)
msummary(Soap.model2)
## ----soap-fig, results = "hide", echo = FALSE----------------------------
fit1 <- makeFun(Soap.model1)
# transformation required to undo transformation on y
fit2 <- makeFun(Soap.model2, transformation = function(x) x^3)
gf_point(weight ~ day, data = Soap) %>%
gf_function(fun = fit1, color = "navy") %>%
gf_function(fun = fit2, color = "red")
## ----soap03--------------------------------------------------------------
confint(Soap.model1)
## ----prob-soap-----------------------------------------------------------
Soap2 <- Soap %>% filter(day < 20)
Soap.model2 <- lm(weight ~ day, data = Soap2)
msummary(Soap.model2)
plot(Soap.model2, w = 1:2)
## ----pendulum-sol01, warning = FALSE-------------------------------------
model <- lm(period ~ sqrt(length), data = Pendulum)
msummary(model)
confint(model)
f <- makeFun(model)
plot(model, w = 1)
plot(model, w = 2)
## ----pendulum-sol02, fig.keep = "last", warning = FALSE------------------
gf_point(period ~ length, data = Pendulum) %>%
gf_function(fun = f)
## ----pendulum-sol03, warning = FALSE-------------------------------------
model2 <- lm(log(period) ~ log(length), data = Pendulum)
msummary(model2)
g <- makeFun(model2)
plot(model2, w = 1)
plot(model2, w = 2)
## ----pendulum-sol04, fig.keep = "last", warning = FALSE------------------
gf_point(period ~ length, data = Pendulum) %>%
gf_fun(f(l) ~ l) %>%
gf_fun(exp(g(l)) ~ l, col = "red")
## ----pendulum-sol05------------------------------------------------------
confint(model2)
## ----cornnit-sol01-------------------------------------------------------
data(cornnit, package = "faraway")
cornnit.mod <- lm(yield ~ log(1 + nitrogen), data = cornnit)
msummary(cornnit.mod)
fit <- makeFun(cornnit.mod)
gf_point(yield ~ log(1 + nitrogen), data = cornnit) %>%
gf_lm()
gf_point(yield ~ nitrogen, data = cornnit) %>%
gf_function(fun = fit)
gf_point(yield ~ nitrogen, data = cornnit) %>%
gf_lm(formula = y ~ log(1 + x))
plot(cornnit.mod, w = 1:3)
## ----cornnit-sol02-------------------------------------------------------
cornnit.mod2 <- lm(yield ~ log(1 + nitrogen), data = cornnit[-21, ])
msummary(cornnit.mod2)
fit2 <- makeFun(cornnit.mod2)
gf_point(yield ~ log(1 + nitrogen), data = cornnit[-21, ]) %>%
gf_lm()
gf_point(yield ~ nitrogen, data = cornnit[-21, ]) %>%
gf_function(fun = fit2)
gf_point(yield ~ nitrogen, data = cornnit[-21, ]) %>%
gf_lm(formula = y ~ log(1 + x)) %>%
gf_lm(formula = y ~ log(1 + x), data = cornnit, color = "gray50")
plot(cornnit.mod2, w = 1:3)
## ----cornnit-sol03-------------------------------------------------------
cornnit.mod3 <- lm(yield^2 ~ sqrt(nitrogen), data = cornnit)
fit3 <- makeFun(cornnit.mod3, transformation = sqrt)
msummary(cornnit.mod3)
gf_point(yield^2 ~ sqrt(nitrogen), data = cornnit) %>%
gf_lm()
gf_point(yield ~ nitrogen, data = cornnit) %>%
gf_function(fit3)
plot(cornnit.mod3, w = 1:3)
## ----eval = FALSE--------------------------------------------------------
## model1 <- lm(y1 ~ x1, data = anscombe)
## msummary(model1)
## ----eband-effects-------------------------------------------------------
data(elasticband, package = "DAAG")
eband.model <- lm(distance ~ stretch, data = elasticband)
ef <- eband.model$effects; n <- length(ef)
ef
# total length
sum(ef^2)
sum(elasticband$distance^2)
# projection of residuals into n-2 orthogonal components
sum(ef[3:n]^2)
sum(resid(eband.model)^2)
# projection in direction of u[0] is mean * sqrt(n)
mean(elasticband$distance) * sqrt(n)
ef[1]
# beta1.hat obtained from projection in direction of u[1]
# Note: R's u[1] points in the opposite direction.
ef[2] / sqrt(sum((elasticband$stretch - mean(elasticband$stretch))^2))
coef(eband.model)
## ----trebuchet07---------------------------------------------------------
treb.model <- lm(distance ~ projectileWt, data = Trebuchet2)
ef <- treb.model$effects; n <- length(ef)
ef
# total length
sum(ef^2)
sum(Trebuchet2$distance^2)
# projection of residuals into n-2 orthogonal components
sum(ef[3:n]^2)
sum(resid(treb.model)^2)
# projection in direction of u[0] is mean * sqrt(n)
# Note: R's u[0] points in the opposite direction.
mean(Trebuchet2$distance) * sqrt(n)
ef[1]
# beta1.hat obtained from projection in direction of u[1]
v1 <- Trebuchet2$projectileWt - mean(Trebuchet2$projectileWt)
ef[2] / sqrt(sum(v1^2))
coef(treb.model)
## ----anova-from-lm00, include = FALSE, seed = 123------------------------
x = rep(1:5, each = 4)
y = 3 + 1 * x + rnorm(20, 0, 3)
someData = data.frame(x = x, y = y)
## ------------------------------------------------------------------------
msummary(lm(y ~ x, someData))
## ----anova-from-lm-sol01-------------------------------------------------
## ----anova-from-lm-sol02-------------------------------------------------
anova(lm(y ~ x, someData))
## ----act-gpa-sol---------------------------------------------------------
t.test(~ ACT, data = ACTgpa)
t.test(~ GPA, data = ACTgpa)
grades.model <- lm(GPA ~ ACT, data = ACTgpa)
msummary(grades.model)
gf_point(GPA ~ ACT, data = ACTgpa) %>%
gf_lm(interval = "prediction", fill = "skyblue") %>%
gf_lm(interval = "confidence") %>%
gf_lm()
act2gpa <- makeFun(grades.model)
act2gpa(ACT = 25, interval = "confidence")
act2gpa(ACT = 30, interval = "prediction")
## ----drag-sol------------------------------------------------------------
model1 <- lm(velocity^2 ~ force.drag, data = Drag)
model2 <- lm(velocity ~ sqrt(force.drag), data = Drag)
model3 <- lm(log(velocity) ~ log(force.drag), data = Drag)
msummary(model1)
msummary(model2)
msummary(model3)
## ----drag-fig, results = "hide", echo = FALSE----------------------------
gf_point(velocity^2 ~ force.drag, data= Drag, color = ~ factor(height))
plot(model1, w = 1)
gf_point(velocity ~ force.drag, data = Drag, color = ~ factor(height)) %>%
gf_refine(scale_x_log10(), scale_y_log10())
plot(model3, w = 1)
## ----spheres-sol01-------------------------------------------------------
gf_point(log(mass) ~ log(diameter), data = Spheres)
spheres.lm <- lm(log(mass) ~ log(diameter), data = Spheres)
confint(spheres.lm)
plot(spheres.lm, w = 1:2)
## ----spheres-sol02, fig.keep = "last"------------------------------------
mass <- makeFun(spheres.lm)
gf_point(mass ~ diameter, data = Spheres) %>%
gf_function(fun = mass, alpha = 0.5)
## ----spheres-sol03-------------------------------------------------------
confint(spheres.lm, level = .96)
## ----taste01-------------------------------------------------------------
favstats(score ~ scr, data = TasteTest)
## ----taste02-------------------------------------------------------------
taste.model <- lm(score ~ scr, data = TasteTest)
msummary(taste.model)
## ----taste03-------------------------------------------------------------
confint(taste.model)
## ----taste04-------------------------------------------------------------
confint(taste.model, "scrfine") / 50
## ----corn01, tidy = FALSE, messag = FALSE--------------------------------
# the Corn data frame has an inconvenient "shape"
# (each type of Corn is in its own column)
head(Corn, 3)
require(tidyr)
# this puts all the yields in one column and type of seed in another
Corn2 <- Corn %>% gather(key = "treatment", value = "yield")
# inspect a few rows in each treatment group
Corn2 %>% group_by(treatment) %>% do(head(., 3))
## ----corn02--------------------------------------------------------------
favstats(yield ~ treatment, data = Corn2)
Corn.model <- lm(yield ~ treatment, data = Corn2)
msummary(Corn.model)
## ----paired-corn-sol-----------------------------------------------------
t.test(~ (reg-kiln), data = Corn) # paired
t.test(Corn$reg, Corn$kiln) # 2-sample
## ----tirewear-sol01------------------------------------------------------
msummary(lm(weight ~ groove, data = TireWear))
gf_lm(weight ~ groove, data = TireWear) %>%
gf_point()
## ----tirewear-sol02------------------------------------------------------
msummary(lm(weight ~ -1 + groove, data = TireWear))
## ----tirewear-sol03------------------------------------------------------
t.test(TireWear$weight, TireWear$groove, paired = TRUE)
t.test( ~ (weight - groove), data = TireWear)
## ----tirewear-sol04------------------------------------------------------
binom.test(~ (weight > groove), data = TireWear)
prop.test(~ (weight > groove), data = TireWear)
## ----oats-variety, tidy = FALSE------------------------------------------
oats <- data.frame(
soil = 1:7,
A = c(71.2, 72.6, 47.8, 76.9, 42.5, 49.6, 62.8),
B = c(65.2, 60.7, 42.8, 73.0, 41.7, 56.6, 57.3)
)
t.test( ~ (A - B), data = oats)
## ----t-corn--------------------------------------------------------------
t.test(Corn$kiln, Corn$reg) # 2-vector interface
t.test(yield ~ treatment, data = Corn2) # formula interface
## ----taste05-------------------------------------------------------------
t.test(score ~ scr, data = TasteTest)
## ----power-t-test01------------------------------------------------------
power.t.test(delta = 5, sd = 10, power = 0.8)
## ----power-t-test02------------------------------------------------------
power.t.test(delta = 0.5, power = 0.8)
## ----power-t-test03------------------------------------------------------
power.t.test(delta = 0.5, n = 50)
## ----power-t-test04------------------------------------------------------
power.t.test(delta = 0.25, n = 50)
## ----power-t-test05, fig.show = "hide"-----------------------------------
pow <- function(effect) {
power.t.test(delta = effect, n = 50)$power
}
effect = seq(0, 2, by = 0.05)
gf_line(pow(effect) ~ effect) %>%
gf_labs(y = "power", x = "effect size",
title = "Power of a 2-sample test (n = 50)")
## ----power-t-test05-fig, results = "hide", echo = FALSE------------------
pow <- function(effect) {
power.t.test(delta = effect, n = 50)$power
}
effect = seq(0, 2, by = 0.05)
gf_line(pow(effect) ~ effect) %>%
gf_labs(y = "power", x = "effect size",
title = "Power of a 2-sample test (n = 50)")
## ----power-t-test06------------------------------------------------------
power.t.test(delta = 0.5, power = 0.8, type = "one.sample")
## ----power-t-test07------------------------------------------------------
power.t.test(delta = 0.5, power = 0.8, type = "paired")
## ------------------------------------------------------------------------
power.t.test(n = 30, delta = 0.5, sd = 1, sig.level = 0.05)
power.t.test(n = 30, delta = 0.5, sd = 1, sig.level = 0.05,
type = "one.sample")
## ----power-sims-sol------------------------------------------------------
Sims2 <- do(5000) * t.test(rnorm(30, 0.5, 1), rnorm(30, 0, 1)) %>% pval()
prop( ~(p.value <= 0.05), data = Sims2)
Sims1 <- do(5000) * t.test(rnorm(30, 0.5, 1)) %>% pval()
prop( ~(p.value <= 0.05), data = Sims1)
## ----power-slr-sol-------------------------------------------------------
x <- seq(0, 10, length.out = 24)
Sims <- do(1000) * {
lm(x + rnorm(24, 0, 4) ~ x) %>%
summary() %>%
coef() %>%
(function(x) x[2,4]) # p-value for slope
}
prop( ~(result <= 0.01), data = Sims) # power
## ----orings01, tidy = FALSE----------------------------------------------
# select the version of this data set in the faraway package
data(orings, package = "faraway")
orings <-
orings %>% mutate(failure = damage != 0) # convert to binary response
orings.model <-
glm(failure ~ temp, data = orings, family = binomial(link = logit))
msummary(orings.model)
## ----orings-fig, results = "hide", echo = FALSE--------------------------
pred_damage <- makeFun(orings.model)
Pred_data <-
tibble(
temp = seq(30, 100, by = 2),
pred = pred_damage(temp))
gf_point(damage / 6 ~ temp, data = orings, alpha = 0.7) %>%
gf_line(pred ~ temp, data = Pred_data) %>%
gf_lims(x = c(30, 100), y = c(-0.05, 1.05)) %>%
gf_labs(y = "proportion of O-rings damaged")
orings <- orings %>% mutate(fail = as.numeric(failure))
gf_point(fail ~ temp, data = orings, alpha = 0.7) %>%
gf_line(pred ~ temp, data = Pred_data) %>%
gf_lims(x = c(30, 100), y = c(-0.05, 1.05)) %>%
gf_labs(y = "O-ring failure (1 = yes, 0 = no)")
## ----orings02, tidy = FALSE, digits = 5----------------------------------
# by default, predict() works on the linear model scale
r <- predict(orings.model, newdata = data.frame(temp = 31)); r
ilogit(r)
# but we can ask for it to work on the "response" scale
predict(orings.model, newdata = data.frame(temp = 31), type = "response")
## ----orings03, digits = 5------------------------------------------------
# by default, makeFun() uses type = "response" and
# returns values on data scale
temp2damage <- makeFun(orings.model)
temp2damage(temp = 31)
makeFun(orings.model)(31) # We can do it all in one line if we prefer
# the other option is type = "link"
temp2damage <- makeFun(orings.model, type = "link")
temp2damage(temp = 31)
## ----orings04, digits = 5------------------------------------------------
p <- makeFun(orings.model)(31)
1 - (1-p)^(1/6) -> q; q # P(damage to particular O-ring)
1 - dbinom(0, 6, q) # P(damage to >0 O-rings)
cbind(0:6, dbinom(0:6, 6, q)) # table of all probabilities
## ----orings05, digits = 5------------------------------------------------
orings.model2 <- # link = logit is default, so unnecessary
glm(cbind(damage, 6 - damage) ~ temp, data = orings,
family = binomial(link = logit))
msummary(orings.model2)
p1 <- predict(orings.model, newdata = data.frame(temp = 31), type = "response"); p1
p2 <- predict(orings.model2, newdata = data.frame(temp = 31), type = "response"); p2
dbinom(0, 6, prob = p2) # 0 damaged O-rings
pred_damage1 <- makeFun(orings.model)
pred_damage2 <- makeFun(orings.model2)
Pred_data <-
tibble(
temp = seq(30, 100, by = 2),
pred1 = pred_damage1(temp),
pred2 = pred_damage2(temp)
)
gf_point(damage / 6 ~ temp, data = orings, alpha = 0.7) %>%
gf_line(pred1 ~ temp, data = Pred_data, color = "gray50") %>%
gf_line(pred2 ~ temp, data = Pred_data, color = "blue") %>%
gf_labs(y = "proportion of O-rings damaged")
## ----runswins01----------------------------------------------------------
head(MLB2004, 4)
## ----runswins02, tidy = FALSE--------------------------------------------
BB <- MLB2004 %>%
mutate(runmargin = (R - OR) / G)
# data frame has summarized data for each team, so different syntax here:
bb.glm <- glm(cbind(W, L) ~ runmargin, data = BB, family = "binomial")
msummary(bb.glm)
## ----runswins03, fig.keep = "none", tidy = FALSE-------------------------
BB <-
BB %>%
mutate(
winP = W / G,
predWinP = makeFun(bb.glm)(runmargin),
winPdiff = winP - predWinP
)
BB %>% arrange(-abs(winPdiff)) %>% select(1, 22:24) %>% head()
gf_point(winP ~ predWinP, data = BB) %>%
gf_abline(slope = 1, intercept = 0)
## ----runswins04, fig.keep = "none"---------------------------------------
Aux_data <-
tibble(
runmargin = seq(-3.5, 3.5, by = 0.1),
winP = makeFun(bb.glm)(runmargin = runmargin)
)
gf_point(winP ~ runmargin, data = BB) %>%
gf_line(winP ~ runmargin, data = Aux_data, alpha = 0.4)
## ----runswins03-fig, results = "hide", echo = FALSE----------------------
BB <-
BB %>%
mutate(
winP = W / G,
predWinP = makeFun(bb.glm)(runmargin),
winPdiff = winP - predWinP
)
BB %>% arrange(-abs(winPdiff)) %>% select(1, 22:24) %>% head()
gf_point(winP ~ predWinP, data = BB) %>%
gf_abline(slope = 1, intercept = 0)
Aux_data <-
tibble(
runmargin = seq(-3.5, 3.5, by = 0.1),
winP = makeFun(bb.glm)(runmargin = runmargin)
)
gf_point(winP ~ runmargin, data = BB) %>%
gf_line(winP ~ runmargin, data = Aux_data, alpha = 0.4)
## ----orings-ci01---------------------------------------------------------
st.err <- sqrt(diag(vcov(orings.model))); st.err
coef(orings.model)[2] + c(-1, 1) * st.err[2] * qnorm(0.975)
exp(coef(orings.model)[2] + c(-1, 1) * st.err[2] * qnorm(0.975))
## ----orings-ci02---------------------------------------------------------
confint(orings.model, parm = "temp")
## ----runswins-sol01, tidy = FALSE----------------------------------------
BB <-
MLB2004 %>%
mutate(
runmargin = (R - OR) / G,
winP = W / G)
bb.glm <-
glm(cbind(W, L) ~ runmargin, data = BB, family = binomial())
bb.lm <- lm(winP ~ runmargin, data = BB)
msummary(bb.lm)
BB <- BB %>%
mutate(
glmPredWinP = makeFun(bb.glm)(runmargin = runmargin),
lmPredWinP = makeFun(bb.lm) (runmargin = runmargin)
)
plot(bb.lm, w = 2)
plot(bb.glm, w = 2)
# observations 8 and 27 have largest residuals
BB[c(8, 27, 1:2, 29:30), c("team", "winP", "glmPredWinP", "lmPredWinP")]
## ----buckthorn-sol01, fig.keep = "last"----------------------------------
buck.model <-
glm(dead ~ conc, data = Buckthorn, family = binomial)
msummary(buck.model)
dead <- makeFun(buck.model)
## ----buckthorn-sol02-----------------------------------------------------
odds <- exp(coef(buck.model)[1]); odds # odds when conc = 0
# prob when conc = 0
odds / (1 + odds)
ilogit(coef(buck.model)[1])
dead(0)
## ----buckthorn-sol03-----------------------------------------------------
odds <- function(p) { p / (1 - p) }
odds(dead(0.01)) / odds(dead(0))
odds(dead(0.30)) / odds(dead(0.29))
## ----buckthorn-sol04, fig.keep = "last", tidy = FALSE--------------------
# calculate the percentage dead at each concentration used.
tbl <-
Buckthorn %>%
group_by(conc) %>%
summarise(
total = n(),
obsDead = sum(dead),
obsAlive = sum(!dead),
propDead = obsDead / (obsDead + obsAlive)
) %>%
mutate(
expDead = dead(conc) * total,
expAlive = (1 - dead(conc)) * total,
expPropDead = dead(conc)
)
tbl
gf_point(propDead ~ conc, data = tbl) %>%
gf_labs(y = "predicted death rate", x = "concentration of glyphosate") %>%
gf_function(fun = dead, alpha = 0.5)
## ----include = FALSE, eval = FALSE---------------------------------------
## tbl2 <- Buckthorn %>%
## group_by(conc) %>%
## summarise(
## propDead = mean(dead)
## )
## tbl2
## concentrations = seq(0, 0.5, by = 0.02)
## fits <- predict(buck.model, new = data.frame(conc = concentrations),
## type = "response")
## ----buckthorn-sol05-----------------------------------------------------
observed <- tbl[, 3:4]; observed
expected <- tbl[, 6:7]; expected
lrt <- 2 * sum(observed * log (observed / expected)); lrt
pearson <- sum((observed - expected)^2 / expected); pearson
# pvals
1 - pchisq(pearson, df = 2)
1 - pchisq(lrt, df = 2)
## ----logit-probit-sol, fig.keep = "last"---------------------------------
gf_fun(ilogit(3 + 2 * x) ~ x, xlim = c(-6, 3),
size = 1.0, col = "gray70") %>%
gf_fun(pnorm((1.5 + x) * sqrt(2 * pi)/2) ~ x, col = "red")
## ----runmargin-sol, fig.keep = "last", tidy = FALSE----------------------
bb.logit <-
glm(cbind(W, L) ~ runmargin, data = BB,
family = binomial(link = logit))
bb.probit <-
glm(cbind(W, L) ~ runmargin, data = BB,
family = binomial(link = probit))
confint(bb.logit)
confint(bb.probit)
f.logit <- makeFun(bb.logit)
f.probit <- makeFun(bb.probit)
gf_fun(f.logit(r) ~ r, xlim = c(-2, 2)) %>%
gf_fun(f.probit(r) ~ r, size = 2, color = "red", alpha = 0.4)
## ----orings-sol, fig.keep = "last", tidy = FALSE-------------------------
orings.logit <-
glm(failure ~ temp, data = orings, family = binomial(link = logit))
orings.probit <-
glm(failure ~ temp, data = orings, family = binomial(link = probit))
confint(orings.logit)
confint(orings.probit)
g.logit <- makeFun(orings.logit)
g.probit <- makeFun(orings.probit)
gf_fun(g.logit(t) ~ t, xlim = c(25, 90)) %>%
gf_fun(g.probit(t) ~ t, lty = 2, lwd = 1.5, alpha = .4, col = "red")
## ----buckthorn-probit-sol01, tidy = FALSE--------------------------------
buck.model2 <-
glm(dead ~ conc, data = Buckthorn, family = binomial(link = probit))
msummary(buck.model2)
## ----buckthorn-probit-sol02, fig.keep = "last", tidy = FALSE-------------
dead2 <- makeFun(buck.model2)
tbl2 <-
Buckthorn %>%
group_by(conc) %>%
summarise(
total = length(dead),
obsDead = sum(dead),
obsAlive = sum(!dead),
propDead = sum(dead) / length(dead)
) %>%
mutate(
expDead = dead2(conc) * total,
expAlive = (1 - dead2(conc)) * total,
expPropDead = dead2(conc)
)
tbl2
gf_point(propDead ~ conc, data = tbl2) %>%
gf_labs(y = "predicted death rate", x = "concentration of glyphosate") %>%
gf_fun(dead2(c) ~ c, linetype = "dotted") %>%
gf_fun(dead(c) ~ c, linetype = "dashed")
## ----bucktorn-probit-sol03, tidy = FALSE---------------------------------
observed <- tbl2[ , 3:4]
expected <- tbl2[ , 6:7]
lrt <- 2 * sum(observed * log (observed / expected)); lrt
pearson <- sum((observed - expected)^2 / expected); pearson
## ------------------------------------------------------------------------
# pvals
1 - pchisq(pearson, df = 2)
1 - pchisq(lrt, df = 2)
## ----deviance01----------------------------------------------------------
null.glm <-
glm(cbind(W, L) ~ 1, data = BB, family = "binomial")
bb.glm <-
glm(cbind(W, L) ~ runmargin, data = BB, family = "binomial")
saturated.glm <-
glm(cbind(W, L) ~ factor(1:30), data = BB, family = "binomial")
## ----deviance02, digits = 4----------------------------------------------
msummary(bb.glm)
deviance(bb.glm)
deviance(null.glm)
deviance(saturated.glm) %>% round(10)
## ----deviance03----------------------------------------------------------
deviance(null.glm) - deviance(bb.glm)
1 - pchisq(deviance(null.glm) - deviance(bb.glm), df = 1)
## ----deviance04----------------------------------------------------------
deviance(bb.glm) - deviance(saturated.glm)
1 - pchisq(deviance(bb.glm) - deviance(saturated.glm), df = 28)
## ----deviance05----------------------------------------------------------
glm(cbind(W, L) ~ SLG, data = BB, family = "binomial") %>%
msummary()
## ----students01, fig.keep = "none", warning = FALSE----------------------
act.glm <- glm(grad ~ ACT, data = Students, family = "binomial")
fit1 <- makeFun(act.glm)
gf_point(prop.grad ~ ACT.decile,
data = Students %>%
mutate(ACT.decile = ntiles(ACT, 10, format = "mean")) %>%
group_by(ACT.decile) %>%
summarise(prop.grad = prop(grad))
) %>%
gf_fun(fit1(act) ~ act)
## ----students02----------------------------------------------------------
msummary(act.glm)
## ----students03, fig.keep = "none", warning = FALSE----------------------
sat.glm <- glm(grad ~ SAT, data = Students, family = "binomial")
fit2 <- makeFun(sat.glm)
gf_point(
prop.grad ~ SAT.decile,
data = Students %>%
mutate(SAT.decile = ntiles(SAT, 10, format = "mean")) %>%
group_by(SAT.decile) %>%
summarise(prop.grad = prop(grad))
) %>%
gf_fun(fit2(sat) ~ sat)
## ----students04----------------------------------------------------------
msummary(sat.glm)
## ----students-fig, echo = FALSE, results = "hide", warning = FALSE-------
act.glm <- glm(grad ~ ACT, data = Students, family = "binomial")
fit1 <- makeFun(act.glm)
gf_point(prop.grad ~ ACT.decile,
data = Students %>%
mutate(ACT.decile = ntiles(ACT, 10, format = "mean")) %>%
group_by(ACT.decile) %>%
summarise(prop.grad = prop(grad))
) %>%
gf_fun(fit1(act) ~ act)
sat.glm <- glm(grad ~ SAT, data = Students, family = "binomial")
fit2 <- makeFun(sat.glm)
gf_point(
prop.grad ~ SAT.decile,
data = Students %>%
mutate(SAT.decile = ntiles(SAT, 10, format = "mean")) %>%
group_by(SAT.decile) %>%
summarise(prop.grad = prop(grad))
) %>%
gf_fun(fit2(sat) ~ sat)
## ----students05----------------------------------------------------------
tally( ~ is.na(ACT) + is.na(SAT), margins = TRUE,
data = Students)
## ----students06----------------------------------------------------------
deviance(act.glm)
1 - pchisq(deviance(act.glm), df = 829)
deviance(sat.glm)
1 - pchisq(deviance(sat.glm), df = 362)
## ------------------------------------------------------------------------
act2.glm <- glm(grad ~ ACT, family = "gaussian", data = Students)
## ----lm-sim01, seed = 1234-----------------------------------------------
b0 <- 3; b1 <- 5; sigma <- 2 # set model parameters
x <- rep(1:5, each = 4) # 4 observations at each of 5 values
e <- rnorm(length(x), sd = sigma) # error term in the model
y <- b0 + b1 * x + e # build response according to model
model <- lm(y ~ x)
confint(model)
## ----lm-sim02, tidy = FALSE----------------------------------------------
sim <-
function(
b0 = 3, b1 = 5, sigma = 2,
x = rep(1:5, each = 4) # 4 observations at each of 5 values
){
e <- rnorm(length(x), sd = sigma)
y <- b0 + b1 * x + e
model <- lm(y ~ x)
ci <- confint(model, 2)
dimnames(ci)[[2]] <- c("lo", "hi") # provide nicer names
ci
}
## ----lm-sim03, tidy = FALSE----------------------------------------------
sim() # one simulation
Sims <- do(5000) * sim() # lot of simulations
Sims <-
Sims %>%
mutate(status = ifelse(lo > 5, "hi", ifelse(hi < 5, "lo", "good")))
## ----lm-sim04, tidy = FALSE----------------------------------------------
tally( ~ status, data = Sims, format = "prop")
binom.test( ~ status, data = Sims, p = 0.95)
## ----lm-sim05------------------------------------------------------------
chisq.test(tally( ~ status, data = Sims), p = c(0.95, 0.025, 0.025))
## ----lm-sim06, tidy = FALSE----------------------------------------------
sim2 <-
function(
b0 = 3, b1 = 5, lambda = 1,
x = rep(1:5, each = 4) # 4 observations at each of 5 values
){
# shift to give a mean of 0.
e <- rexp(length(x), rate = 1 / lambda) - lambda
y <- b0 + b1 * x + e
model <- lm(y ~ x)
ci <- confint(model, 2)
dimnames(ci)[[2]] <- c("lo", "hi") # provide nicer names
ci
}
Sims2 <- do(5000) * sim2()
Sims2 <-
Sims2 %>%
mutate(status = ifelse(lo > 5, "hi", ifelse(hi < 5, "lo", "good")))
## ----lm-sim07, tidy = FALSE----------------------------------------------
tally( ~ status, data = Sims2, format = "prop")
binom.test( ~ status, data = Sims2, p = 0.95)
chisq.test(tally( ~ status, data = Sims2), p = c(0.95, 0.025, 0.025))
## ----lm-sim08, tidy = FALSE----------------------------------------------
sim3 <-
function(
b0 = 3, b1 = 5, lambda = 1,
x = rep(1:5, each = 4) # 4 observations at each of 5 values
){
e <- x * rnorm(length(x))
y <- b0 + b1 * x + e
model <- lm(y ~ x)
ci <- confint(model, 2)
dimnames(ci)[[2]] <- c("lo", "hi") # provide nicer names
ci
}
Sims3 <- do(5000) * sim3()
Sims3 <-
Sims3 %>%
mutate(status = ifelse(lo > 5, "hi", ifelse(hi < 5, "lo", "good")))
## ----lm-sim09, tidy = FALSE----------------------------------------------
tally( ~ status, data = Sims3) / 5000
binom.test( ~ status, data = Sims3, p = 0.95)
chisq.test(tally( ~ status, data = Sims3), p = c(0.95, 0.025, 0.025))
## ----glm-guassian-sol, tidy = FALSE--------------------------------------
glm(stretch ~ distance, data = elasticband,
family = gaussian()) %>%
msummary()
lm(stretch ~ distance, data = elasticband) %>% msummary()
lm(stretch ~ distance, data = elasticband) %>% anova()
## ----RegressionVariations, child="RegressionVariations.Rnw", eval=includeChapter[7]----
## ----reg-setup, include = FALSE, cache = FALSE---------------------------
knitr::opts_chunk$set(cache.path = "cache/Reg-")
require(multcomp)
require(effects)
require(fastR2)
## ----punting01, tidy = FALSE---------------------------------------------
punting.lm <-
lm(distance ~ rStrength + rFlexibility, data = Punting)
msummary(punting.lm)
anova(punting.lm)
## ----punting02, fig.keep = "none", tidy = FALSE--------------------------
gf_point(rStrength ~ rFlexibility, data = Punting)
lm(rFlexibility ~ rStrength, data = Punting) %>% msummary()
# if all we want is the correlation coefficient, we can get it directly
r <- cor(rStrength ~ rFlexibility, data = Punting); r
r^2
## ----punting02-fig, echo = FALSE, results = "hide", cache = FALSE--------
gf_point(rStrength ~ rFlexibility, data = Punting)
lm(rFlexibility ~ rStrength, data = Punting) %>% msummary()
# if all we want is the correlation coefficient, we can get it directly
r <- cor(rStrength ~ rFlexibility, data = Punting); r
r^2
## ----punting03, eval = FALSE---------------------------------------------
## plot(punting.lm, w = 1:2)
## ----punting03-fig, echo = FALSE, results = "hide"-----------------------
plot(punting.lm, w = 1:2)
## ----punting04, tidy = FALSE---------------------------------------------
puntingFit <- makeFun(punting.lm)
puntingFit(rStrength = 175, rFlexibility = 100,
interval = "confidence")
puntingFit(rStrength = 175, rFlexibility = 100, interval = "prediction")
## ----punting-robustness-sol, opts.label = "fig3"-------------------------
n <- nrow(Punting)
x1 <- Punting$rStrength; x2 <- Punting$rFlexibility
do(11) * {
y <- -75 + .5 * x1 + 1.5 * x2 + rnorm(n, 0, 15)
plot(lm(y ~ x1 + x2), w = 1)
}
plot(punting.lm, w = 1)
## ----concrete01----------------------------------------------------------
concrete.lm1 <- lm(strength ~ limestone + water, data = Concrete)
msummary(concrete.lm1)
## ----concrete03----------------------------------------------------------
y <- Concrete$strength
n <- length(y); v0 <- rep(1, n)
v1 <- with(Concrete, limestone - mean(limestone))
v2 <- with(Concrete, water - mean(water))
dot(y, v0) / vlength(v0)^2
mean(y)
dot(y, v1) / vlength(v1)^2
dot(y, v2) / vlength(v2)^2
## ----concrete04----------------------------------------------------------
y <- Concrete$strength
ef0 <- project(y, v0)
ef1 <- project(y, v1)
ef2 <- project(y, v2)
ef0 + ef1 + ef2
fitted(concrete.lm1)
## ----concrete05----------------------------------------------------------
dot(v1, v2)
## ----concrete-minus01----------------------------------------------------
# modify data by dropping first observation
Concretemod <- Concrete[-1, ]
concrete.lmmod <- lm(strength ~ limestone + water, data = Concretemod)
coef(concrete.lmmod)
y <- Concretemod$strength
n <- length(y); v0 <- rep(1, n)
v1 <- with(Concretemod, limestone - mean(limestone))
v2 <- with(Concretemod, water - mean(water))
project(y, v0)
mean(y)
dot(y, v1) / vlength(v1)^2
dot(y, v2) / vlength(v2)^2
ef0 <- project(y, v0)
ef1 <- project(y, v1)
ef2 <- project(y, v2)
ef0 + ef1 + ef2
fitted(concrete.lmmod)
## ----concrete-minus02----------------------------------------------------
dot(v0, v1)
dot(v0, v2)
dot(v1, v2)
## ----concrete-minus03----------------------------------------------------
w1 <- v1 - project(v1, v2)
w2 <- v2 - project(v2, v1)
dot(v0, w1)
dot(v0, w2)
dot(v1, w2)
dot(w1, v2)
## ----concrete-minus04----------------------------------------------------
y <- Concretemod$strength
# make fits using v1 and w2
ef0 <- project(y, v0)
ef1 <- project(y, v1)
ef2 <- project(y, w2)
ef0 + ef1 + ef2
# now try w1 and v2
ef0 <- project(y, v0)
ef1 <- project(y, w1)
ef2 <- project(y, v2)
ef0 + ef1 + ef2
# should match what lm() produces
fitted(concrete.lmmod)
## ----concrete-minus05----------------------------------------------------
# using v1 gives coefficient in model with
# only limestone as a predictor
dot(y, v1) / vlength(v1)^2
lm(strength ~ limestone, data = Concretemod) %>% coef()
# using v2 gives coefficient in model with only water as a predictor
dot(y, v2) / vlength(v2)^2
lm(strength ~ water, data = Concretemod) %>% coef()
# using w1 and w2 gives coefficients in the model
dot(y, w1) / vlength(w1)^2
dot(y, w2) / vlength(w2)^2
coef(concrete.lmmod)
## ----concrete-QR01-------------------------------------------------------
A <-
rbind(
w1 / vlength(w1)^2,
w2 / vlength(w2)^2)
A %*% y
## ----concrete-QR02, tidy = FALSE-----------------------------------------
x1 <- Concretemod$limestone; x2 <- Concretemod$water
X <- cbind(1, x1, x2)
Q <- cbind(
1 / sqrt(nrow(Concretemod)),
v1 / vlength(v1),
w2 / vlength(w2))
t(Q) %*% Q %>% round(4) # should be the identity matrix
## ----concrete-QR03-------------------------------------------------------
R <- t(Q) %*% X; R %>% round(4) # should be upper triangular
Q %*% R %>% round(4) # should be X
X
## ----concrete-QR04-------------------------------------------------------
solve(R) %*% t(Q) %*% y
## ----concrete-QR05-------------------------------------------------------
A %>% round(3)
solve(R) %*% t(Q) %>% round(3)
## ----concrete-QR06-------------------------------------------------------
diag(R)
c(vlength(v0), vlength(v1), vlength(v2))
## ----qr, tidy = FALSE----------------------------------------------------
QRdata <- data.frame(x = c(1, 1, 5, 5), y = c(1, 2, 4, 6))
qr.model <- lm(y ~ x, data = QRdata)
Q <- qr.model %>% qr() %>% qr.Q(); Q
R <- qr.model %>% qr() %>% qr.R(); R
## ----backsolve-----------------------------------------------------------
backsolve(R, t(Q) %*% QRdata$y)
coef(qr.model)
## ----small-lmfit-sol01---------------------------------------------------
y <- c(0, 2, 0, 2, -1, 6)
x1 <- c(1, 1, 2, 2, 3, 3); x2 <- c(0, 1, 1, 2, 1, 3)
v0 <- rep(1, length(y))
v1 <- x1 - mean(x1); v2 = x2 - mean(x2)
w1 <- v1 - project(v1, v2)
w2 <- v2 - project(v2, v1)
## ----small-lmfit-sol02---------------------------------------------------
#
# obtaining model fits by projection
#
p0 <- project(y, v0); p0
p1 <- project(y, v1); p1
p2 <- project(y, v2); p2
q1 <- project(y, w1); q1
q2 <- project(y, w2); q2
#
# this won't be a correct fit because dot(v1, v2) != 0
#
p0 + p1 + p2
## ----small-lmfit-sol03---------------------------------------------------
#
# here is the correct fit
#
p0 + q1 + p2
p0 + p1 + q2
## ----small-lmfit-sol04---------------------------------------------------
#
# we can compare the results with those from lm()
#
model <- lm(y ~ x1 + x2); fitted(model)
#
# this won't work to get the coefficients:
#
b1.wrong <- (p1/v1); b1.wrong
b2.wrong <- (p2/v2); b2.wrong
## ----small-lmfit-sol05---------------------------------------------------
#
# now let's get the coefficients correctly:
#
b1 <- (q1/w1); b1
b2 <- (q2/w2); b2
a0 <- (p0/v0); a0
b0 <- a0 - b1*mean(x1) - b2*mean(x2); b0
coef(model)
## ----small-QR-sol--------------------------------------------------------
X <- cbind(1, x1, x2)
Q <- cbind(
v0 / vlength(v0),
v1 / vlength(v1),
w2 / vlength(w2)
)
# orthogonality check for Q
t(Q) %*% Q %>% round(3)
# solve QR = X for R and check that it is upper diagonal
R <- t(Q) %*% X; R %>% round(3)
# check that X = QR (up to round off)
range(X - Q %*% R)
# find coefficients
solve(R) %*% t(Q) %*% y
# check that this matches coefficients from lm()
range( solve(R) %*% t(Q) %*% y - coef(model) )
## ----concrete-mods01-----------------------------------------------------
# define several models
concrete.lm0 <- lm(strength ~ limestone + water, data = Concrete)
concrete.lm1 <- lm(strength ~ -1 + limestone + water, data = Concrete)
concrete.lm2 <- lm(strength ~ water, data = Concrete)
concrete.lm3 <- lm(strength ~ limestone, data = Concrete)
concrete.lm4 <- lm(strength ~ 1, data = Concrete)
concrete.lm5 <- lm(strength ~ I(limestone + water), data = Concrete)
## ----concrete-mods02-----------------------------------------------------
msummary(concrete.lm0)
## ----concrete-mods03-----------------------------------------------------
anova(concrete.lm1, concrete.lm0) # with/without intercept
anova(concrete.lm2, concrete.lm0) # with/without limestone
anova(concrete.lm3, concrete.lm0) # with/without water
anova(concrete.lm4, concrete.lm0) # with/without limestone and water
## ----concrete-mods04-----------------------------------------------------
confint(concrete.lm0)
## ----concrete-rand01, warning = FALSE------------------------------------
concrete.lm1 <- lm(strength ~ limestone, data = Concrete)
anova(concrete.lm1)
concrete.r7 <- lm(strength ~ limestone + rand(7), data = Concrete)
anova(concrete.r7)
## ----include = FALSE-----------------------------------------------------
RSS1 <- sum(resid(concrete.lm1)^2)
RSS0 <- sum(resid(concrete.lm0)^2)
## ----concrete-rand02-----------------------------------------------------
anova(concrete.lm0)
## ----concrete-rand03, fig.keep = "none", message = FALSE, warning = FALSE, seed = 123----
SSplot(
lm(strength ~ limestone + water, data = Concrete),
lm(strength ~ limestone + rand(7), data = Concrete), n = 1000)
## ----concrete-rand03-fig, echo = FALSE, results = "hide", message = FALSE, warning = FALSE, seed = 123----
SSplot(
lm(strength ~ limestone + water, data = Concrete),
lm(strength ~ limestone + rand(7), data = Concrete), n = 1000)
last_plot() + xlim(0, 2)
## ----concrete-rand04, fig.keep = "none", message = FALSE, warning = FALSE, seed = 123----
SSplot(
lm(strength ~ water + limestone, data = Concrete),
lm(strength ~ water + rand(7), data = Concrete), n = 100)
## ----concrete-rand04-fig, echo = FALSE, results = "hide", message = FALSE, warning = FALSE, seed = 123----
SSplot(
lm(strength ~ water + limestone, data = Concrete),
lm(strength ~ water + rand(7), data = Concrete), n = 100)
last_plot() + xlim(0, 2)
## ----concrete-aic--------------------------------------------------------
# these two methods give different numerical values
AIC(concrete.lm0)
AIC(concrete.lm1)
extractAIC(concrete.lm0)
extractAIC(concrete.lm1)
# but differences between models are equivalent
AIC(concrete.lm0) - AIC(concrete.lm1)
extractAIC(concrete.lm0)[2] - extractAIC(concrete.lm1)[2]
## ----concrete-plot, fig.keep = "none"------------------------------------
plot(concrete.lm0, which = c(1, 2, 3, 5))
## ----concrete-plot-fig, echo = FALSE, results = "hide"-------------------
plot(concrete.lm0, which = c(1, 2, 3, 5))
## ----punting05, fig.keep = "none"----------------------------------------
# original regression with two predictors
punting.lm <- lm( distance ~ rStrength + rFlexibility, data = Punting)
# partial regressions of y and x1 on x2
punting.lmy2 <- lm(distance ~ rFlexibility, data = Punting)
punting.lm12 <- lm(rStrength ~ rFlexibility, data = Punting)
# residuals vs residuals
punting.rvr1 <- lm(resid(punting.lmy2) ~ resid(punting.lm12))
# the slope of rvr matches the coefficient from y ~ x1 + x2; intercept is 0
coef(punting.rvr1) %>% round(4)
coef(punting.lm) %>% round(4)
# rvr and original model have the same residuals
gf_point(resid(punting.rvr1) ~ resid(punting.lm))
## ----punting05-fig, echo = FALSE, results = "hide"-----------------------
# original regression with two predictors
punting.lm <- lm( distance ~ rStrength + rFlexibility, data = Punting)
# partial regressions of y and x1 on x2
punting.lmy2 <- lm(distance ~ rFlexibility, data = Punting)
punting.lm12 <- lm(rStrength ~ rFlexibility, data = Punting)
# residuals vs residuals
punting.rvr1 <- lm(resid(punting.lmy2) ~ resid(punting.lm12))
# the slope of rvr matches the coefficient from y ~ x1 + x2; intercept is 0
coef(punting.rvr1) %>% round(4)
coef(punting.lm) %>% round(4)
# rvr and original model have the same residuals
gf_point(resid(punting.rvr1) ~ resid(punting.lm))
## ----punting06, fig.keep = "none"----------------------------------------
# partial regressions of y and x2 on x1
punting.lmy1 <- lm(distance ~ rStrength, data = Punting)
punting.lm21 <- lm(rFlexibility ~ rStrength, data = Punting)
# residuals vs residual
punting.rvr2 <- lm(resid(punting.lmy1) ~ resid(punting.lm21))
# partial regression plots (a.k.a. added-variable plots)
gf_lm(resid(punting.lmy2) ~ resid(punting.lm12)) %>%
gf_point()
gf_lm(resid(punting.lmy1) ~ resid(punting.lm21)) %>%
gf_point()
## ----punting07, opts.label = "fig1", fig.keep = "none"-------------------
car::avPlots(punting.lm, id.n = 1)
## ----punting06-fig, echo = FALSE, results = "hide"-----------------------
# partial regressions of y and x2 on x1
punting.lmy1 <- lm(distance ~ rStrength, data = Punting)
punting.lm21 <- lm(rFlexibility ~ rStrength, data = Punting)
# residuals vs residual
punting.rvr2 <- lm(resid(punting.lmy1) ~ resid(punting.lm21))
# partial regression plots (a.k.a. added-variable plots)
gf_lm(resid(punting.lmy2) ~ resid(punting.lm12)) %>%
gf_point()
gf_lm(resid(punting.lmy1) ~ resid(punting.lm21)) %>%
gf_point()
## ----punting07-fig, echo = FALSE, results = "hide", opts.label = "fig1"----
car::avPlots(punting.lm, id.n = 1)
## ----punting08, fig.keep = "none"----------------------------------------
punting.lm3 <-
lm(distance ~ rStrength + rFlexibility, Punting[-3, ])
punting.lm %>% summary() %>% coef()
punting.lm3 %>% summary() %>% coef()
car::avPlots(punting.lm3, id.n = 1)
## ----punting08-fig, echo = FALSE, results = "hide", opts.label = "fig1"----
punting.lm3 <-
lm(distance ~ rStrength + rFlexibility, Punting[-3, ])
punting.lm %>% summary() %>% coef()
punting.lm3 %>% summary() %>% coef()
car::avPlots(punting.lm3, id.n = 1)
## ----concrete-effects01, fig.keep = "none"-------------------------------
require(effects)
Effect(c("water", "limestone"), concrete.lm0) %>%
plot("water")
## ----concrete-effects02, fig.keep = "none"-------------------------------
Effect(c("water", "limestone"), concrete.lm0) %>%
plot("limestone")
## ----concrete-effects01-fig, echo = FALSE, results = "hide", opts.label = "fig1"----
require(effects)
Effect(c("water", "limestone"), concrete.lm0) %>%
plot("water")
## ----concrete-effects02-fig, echo = FALSE, results = "hide", opts.label = "fig1"----
Effect(c("water", "limestone"), concrete.lm0) %>%
plot("limestone")
## ----concrete-effects03, fig.keep = "none"-------------------------------
Effect(
c("water", "limestone"), concrete.lm0, partial.resid = TRUE) %>%
plot("water")
## ----concrete-effects03-fig, echo = FALSE, results = "hide", opts.label = "fig1"----
Effect(
c("water", "limestone"), concrete.lm0, partial.resid = TRUE) %>%
plot("water")
## ----concrete-effect04---------------------------------------------------
concrete.lm6 <-
lm(strength ~ limestone + water + limestone:water, data = Concrete)
## ----concrete-effect05, fig.keep = "none", opts.label = "fig1"-----------
lm(strength ~ limestone + water + limestone:water,
data = Concrete) %>%
Effect(c("water", "limestone"), . , partial.residuals = TRUE) %>%
plot("water")
## ----concrete-effect05-fig, echo = FALSE, results = "hide", opts.label = "fig1"----
lm(strength ~ limestone + water + limestone:water,
data = Concrete) %>%
Effect(c("water", "limestone"), . , partial.residuals = TRUE) %>%
plot("water")
## ----concrete-effect06---------------------------------------------------
lm(strength ~ limestone + water, data = Concrete) %>%
msummary()
lm(strength ~ limestone + water + limestone * water, data = Concrete) %>%
msummary()
## ----effect-sim, fig.keep = "none", seed = 1234--------------------------
D <- tibble(
x1 = runif(100, 0, 10),
x2 = runif(100, 0, 10),
y1 = 5 + 2 * x1 + 3 * x2 + rnorm(100, sd = 4),
y2 = 5 + 2 * x1 + 3 * x2 - x1 * x2 + rnorm(100, sd = 4)
)
lm(y1 ~ x1 + x2, data = D) %>%
Effect(c("x1", "x2"), ., partial.residuals = TRUE) %>%
plot("x1", main = "additive model; y1")
lm(y1 ~ x1 + x2 + x1*x2, data = D) %>%
Effect(c("x1", "x2"), ., partial.residuals = TRUE) %>%
plot("x1", main = "interaction model; y1")
lm(y2 ~ x1 + x2, data = D) %>%
Effect(c("x1", "x2"), ., partial.residuals = TRUE) %>%
plot("x1", main = "additive model; y2")
lm(y2 ~ x1 + x2 + x1*x2, data = D) %>%
Effect(c("x1", "x2"), ., partial.residuals = TRUE) %>%
plot("x1", main = "interaction model; y2")
## ----effect-sim-fig, echo = FALSE, results = "hide", seed = 1234, opts.label = "fig1"----
D <- tibble(
x1 = runif(100, 0, 10),
x2 = runif(100, 0, 10),
y1 = 5 + 2 * x1 + 3 * x2 + rnorm(100, sd = 4),
y2 = 5 + 2 * x1 + 3 * x2 - x1 * x2 + rnorm(100, sd = 4)
)
lm(y1 ~ x1 + x2, data = D) %>%
Effect(c("x1", "x2"), ., partial.residuals = TRUE) %>%
plot("x1", main = "additive model; y1")
lm(y1 ~ x1 + x2 + x1*x2, data = D) %>%
Effect(c("x1", "x2"), ., partial.residuals = TRUE) %>%
plot("x1", main = "interaction model; y1")
lm(y2 ~ x1 + x2, data = D) %>%
Effect(c("x1", "x2"), ., partial.residuals = TRUE) %>%
plot("x1", main = "additive model; y2")
lm(y2 ~ x1 + x2 + x1*x2, data = D) %>%
Effect(c("x1", "x2"), ., partial.residuals = TRUE) %>%
plot("x1", main = "interaction model; y2")
## ----utilities-kwh01-----------------------------------------------------
Utilities2 <-
Utilities %>%
filter(year > 2000 | month > 6) %>% # remove bad meter reading
filter(temp <= 60) %>% # remove warm months
mutate(kwhpday = kwh / billingDays)
## ----utilities-kwh02, fig.keep = "none"----------------------------------
# fit additive and interaction models
ut.lm <- lm(thermsPerDay ~ temp + kwhpday, data = Utilities2)
ut.lmint <- lm(thermsPerDay ~ temp * kwhpday, data = Utilities2)
msummary(ut.lm)
msummary(ut.lmint)
plot(ut.lm, 1:2)
plot(ut.lmint, 1:2)
ut.lm %>%
Effect(c("temp", "kwhpday"), . , partial.residuals = TRUE) %>%
plot("temp", sub = "additive model")
ut.lmint %>%
Effect(c("temp", "kwhpday"), . , partial.residuals = TRUE) %>%
plot("temp", sub = "interation model", alternating = FALSE)
## ----utilities-kwh02-fig, echo = FALSE, results = "hide"-----------------
# fit additive and interaction models
ut.lm <- lm(thermsPerDay ~ temp + kwhpday, data = Utilities2)
ut.lmint <- lm(thermsPerDay ~ temp * kwhpday, data = Utilities2)
msummary(ut.lm)
msummary(ut.lmint)
plot(ut.lm, 1:2)
plot(ut.lmint, 1:2)
ut.lm %>%
Effect(c("temp", "kwhpday"), . , partial.residuals = TRUE) %>%
plot("temp", sub = "additive model")
ut.lmint %>%
Effect(c("temp", "kwhpday"), . , partial.residuals = TRUE) %>%
plot("temp", sub = "interation model", alternating = FALSE)
## ----utilities-kwh03-----------------------------------------------------
coef(ut.lmint)[1] + coef(ut.lmint)[3] * 25
coef(ut.lmint)[2] + coef(ut.lmint)[4] * 25
coef(ut.lm)
## ----utilities-month01, fig.keep = "none"--------------------------------
# remove first few observations because of bad meter read
Utilities3 <-
Utilities %>% filter(year > 2000 | month > 6)
ut.lm3 <- lm(thermsPerDay ~ month + I(month^2), data = Utilities3)
msummary(ut.lm3)
fit3 <- makeFun(ut.lm3)
gf_point(thermsPerDay ~ month, data = Utilities3) %>%
gf_function(fit3, color = "red", alpha = 0.6)
ut.lm3 %>% Effect("month", ., partial.residuals = TRUE) %>% plot("month")
plot(ut.lm3, w = 1:2)
## ----utilities-month-fig, echo = FALSE, results = "hide"-----------------
# remove first few observations because of bad meter read
Utilities3 <-
Utilities %>% filter(year > 2000 | month > 6)
ut.lm3 <- lm(thermsPerDay ~ month + I(month^2), data = Utilities3)
msummary(ut.lm3)
fit3 <- makeFun(ut.lm3)
gf_point(thermsPerDay ~ month, data = Utilities3) %>%
gf_function(fit3, color = "red", alpha = 0.6)
ut.lm3 %>% Effect("month", ., partial.residuals = TRUE) %>% plot("month")
plot(ut.lm3, w = 1:2)
## ----utilities-month02, fig.keep = "none"--------------------------------
Utilities3 <- Utilities3 %>%
mutate(monthShifted = (month - 2) %% 12)
ut.lm4 <-
lm(thermsPerDay ~ monthShifted + I(monthShifted^2), data = Utilities3)
msummary(ut.lm4)
fit4 <- makeFun(ut.lm4)
gf_point(thermsPerDay ~ monthShifted, data = Utilities3) %>%
gf_function(fit4, color = "red", alpha = 0.6)
ut.lm4 %>% Effect("monthShifted", ., partial.residuals = TRUE) %>%
plot("monthShifted")
plot(ut.lm4, w = 1:2)
## ----utilities-month02-fig, echo = FALSE, results = "hide"---------------
Utilities3 <- Utilities3 %>%
mutate(monthShifted = (month - 2) %% 12)
ut.lm4 <-
lm(thermsPerDay ~ monthShifted + I(monthShifted^2), data = Utilities3)
msummary(ut.lm4)
fit4 <- makeFun(ut.lm4)
gf_point(thermsPerDay ~ monthShifted, data = Utilities3) %>%
gf_function(fit4, color = "red", alpha = 0.6)
ut.lm4 %>% Effect("monthShifted", ., partial.residuals = TRUE) %>%
plot("monthShifted")
plot(ut.lm4, w = 1:2)
## ----utilities-month03---------------------------------------------------
ut.lm4a <-
lm(thermsPerDay ~ poly(monthShifted, 2), data = Utilities3)
msummary(ut.lm4a)
favstats( ~ (fitted(ut.lm4a) - fitted(ut.lm4)))
## ----utilities-month-sol-------------------------------------------------
ut.mod <-
Utilities3 %>%
mutate(monthShifted2 = (month + 5) %% 12) %>%
lm(thermsPerDay ~ poly(monthShifted2, 2), data = .)
msummary(ut.mod)
plot(ut.mod, 1:2)
ut.mod %>% Effect("monthShifted2", ., partial.residuals = TRUE) %>%
plot("monthShifted2")
## ----eval = FALSE--------------------------------------------------------
## px <- poly(Utilities2$monthShifted, 2); px
## ------------------------------------------------------------------------
Utilities3 <-
Utilities %>% filter(year > 2000 | month > 6) %>%
mutate(monthShifted = (month - 2) %% 12)
model1 <-
lm(thermsPerDay ~ monthShifted, data = Utilities3)
model2 <-
lm(thermsPerDay ~ monthShifted + I(monthShifted^2), data = Utilities3)
model1poly <-
lm(thermsPerDay ~ poly(monthShifted, 1), data = Utilities3)
model2poly <-
lm(thermsPerDay ~ poly(monthShifted, 2), data = Utilities3)
## ----gpa01---------------------------------------------------------------
gpa.lm <- lm(gpa ~ satm + satv + act, data = GPA)
msummary(gpa.lm)
## ----gpa02---------------------------------------------------------------
gpa.lm1<- lm(gpa ~ satm, data = GPA)
msummary(gpa.lm1)
## ----gpa03---------------------------------------------------------------
gpa.lm2 <- lm(satm ~ satv + act, data = GPA); msummary(gpa.lm2)
gpa.lm3 <- lm(satm ~ satv, data = GPA); msummary(gpa.lm3)
gpa.lm4 <- lm(satm ~ act, data = GPA); msummary(gpa.lm4)
## ----gpa04---------------------------------------------------------------
gpa.lm5 <- lm(gpa ~ act + satv, data = GPA); msummary(gpa.lm5)
gpa.lm6 <- lm(satv ~ act, data = GPA); msummary(gpa.lm6)
## ----gpa-mct01-----------------------------------------------------------
# fit some models
#
gpa.lm <- lm(gpa ~ satm + satv + act, data = GPA)
gpa.lma <- lm(gpa ~ -1 + satm + satv + act, data = GPA)
#
# model comparison tests for 5 p-values in msummary(gpa.lm)
#
anova(gpa.lma, gpa.lm)
## ----gpa-mct-sol01-------------------------------------------------------
# fit some models
gpa.lm <- lm(gpa ~ satm + satv + act, data = GPA)
gpa.lma <- lm(gpa ~ -1 + satm + satv + act, data = GPA)
gpa.lmb <- lm(gpa ~ satv + act, data = GPA)
gpa.lmc <- lm(gpa ~ satm + act, data = GPA)
gpa.lmd <- lm(gpa ~ satm + satv, data = GPA)
gpa.lme <- lm(gpa ~ 1, data = GPA)
## ----gpa-mct-sol02-------------------------------------------------------
# model comparison tests for 5 p-values in msummary(gpa.lm)
anova(gpa.lma, gpa.lm)
anova(gpa.lmb, gpa.lm)
## ----gpa-mct-sol03-------------------------------------------------------
anova(gpa.lmc, gpa.lm)
anova(gpa.lmd, gpa.lm)
## ----gpa-mct-sol04-------------------------------------------------------
anova(gpa.lme, gpa.lm)
msummary(gpa.lm)
## ----gpa-mct-sol05-------------------------------------------------------
# combined SAT verses subscore
gpa.lmf <- lm(gpa ~ I(satv + satm) + act, data = GPA)
anova(gpa.lmf, gpa.lm)
## ----pheno-weight-sol----------------------------------------------------
# testing beta_1 = 2
t <- ( 1.0754 - 2.0) / 0.0121; t
2 * pt(-abs(t) , df = 2237)
# testing beta_1 = 1
t <- ( 1.0754- 1.0) / 0.0121; t
2 * pt(-abs(t) , df = 2237)
# testing beta_2 = 1
t <- ( 0.8942 - 1.0) / 0.0302; t
2 * pt(-abs(t) , df = 2237)
## ----students-sol01------------------------------------------------------
summary(Students)
## ----students-sol02------------------------------------------------------
summary(Students)
model <- lm(ACT ~ SAT, data = Students); msummary(model)
## ----students-sol03------------------------------------------------------
confint(model)
confint(lm( act ~ I(satm + satv), data = GPA))
## ----pheno-weight01------------------------------------------------------
pheno.lm <-
lm(log(weight) ~ log(waist) + log(height), data = Pheno)
msummary(pheno.lm)
## ----pheno-weight02------------------------------------------------------
confint(pheno.lm)
## ----pheno-weight03, fig.keep = "none"-----------------------------------
plot(pheno.lm, w = 1:2)
## ----pheno-weight-fig, echo = FALSE, results = "hide"--------------------
plot(pheno.lm, w = 1:2)
## ----pheno-weight04, fig.keep = "none"-----------------------------------
gf_dhistogram( ~ fitted(pheno.lm))
gf_boxplot(resid(pheno.lm) ~ ntiles(fitted(pheno.lm), 10)) %>%
gf_labs(x = "fitted value deciles")
## ----pheno-weight04-fig, echo = FALSE, results = "hide"------------------
gf_dhistogram( ~ fitted(pheno.lm))
gf_boxplot(resid(pheno.lm) ~ ntiles(fitted(pheno.lm), 10)) %>%
gf_labs(x = "fitted value deciles")
## ----pheno-sex-sol-------------------------------------------------------
pheno.male <-
lm(log(weight) ~ log(waist) + log(height), data = Pheno %>% filter(sex == "M"))
pheno.female <-
lm(log(weight) ~ log(waist) + log(height), data = Pheno %>% filter(sex == "F"))
msummary(pheno.male)
msummary(pheno.female)
plot(pheno.male) # males only
plot(pheno.female) # females only
plot(pheno.lm) # all subjects
## ----pheno-case-sol01, opts.label = "figbig"-----------------------------
pheno.case <- lm(log(weight) ~ log(waist) + log(height),
data = Pheno %>% filter(t2d == "case"))
pheno.control<- lm(log(weight) ~ log(waist) + log(height),
data = Pheno %>% filter(t2d == "control"))
msummary(pheno.case)
msummary(pheno.control)
## ----pheno-case-sol02, opts.label = "figbig"-----------------------------
plot(pheno.case) # cases only
plot(pheno.control) # controls only
plot(pheno.lm) # all subjects
## ----pheno-plain-sol-----------------------------------------------------
pheno.plain <- lm(weight ~ waist + height, data = Pheno)
c(plain = rsquared(pheno.plain), transformed = rsquared(pheno.lm))
c(plain = AIC(pheno.plain), transformed = AIC(pheno.lm))
plot(pheno.plain, w = 2)
plot(pheno.lm, w = 2)
## ----coag01, fig.keep = "none"-------------------------------------------
data(coagulation, package = "faraway")
favstats(coag ~ diet, data = coagulation)
gf_point(coag ~ diet, data = coagulation)
gf_boxplot(coag ~ diet, data = coagulation)
## ----coag01-fig, echo = FALSE, results = "hide"--------------------------
data(coagulation, package = "faraway")
favstats(coag ~ diet, data = coagulation)
gf_point(coag ~ diet, data = coagulation)
gf_boxplot(coag ~ diet, data = coagulation)
## ----coag02--------------------------------------------------------------
coag.lm <- lm(coag ~ diet, data = coagulation)
msummary(coag.lm)
## ----coag03, tidy = FALSE------------------------------------------------
coag.lm <- lm(coag ~ diet, data = coagulation)
coag.lm1 <- lm(coag ~ 1, data = coagulation)
anova(coag.lm1, coag.lm)
## ----coag04--------------------------------------------------------------
anova(coag.lm)
## ----coag05--------------------------------------------------------------
data(coagulation, package = "faraway")
# group-by-group tally
coagulation %>%
mutate(grand.mean = mean(coag)) %>%
group_by(diet) %>%
summarise(n = n(), group.mean = mean(coag),
SSE = sum((coag - group.mean)^2),
SSM = sum((group.mean - grand.mean)^2),
SST = sum((coag - grand.mean)^2))
# individual tally
coagulation <-
coagulation %>%
mutate(grand.mean = mean(coag)) %>%
group_by(diet) %>% mutate(group.mean = mean(coag)) %>%
ungroup()
coagulation %>% sample(5)
data.frame(
SST = sum( ~ (coag - grand.mean)^2, data = coagulation),
SSE = sum( ~ (coag - group.mean)^2, data = coagulation),
SSM = sum( ~ (group.mean - grand.mean)^2, data = coagulation))
## ----coag04r-------------------------------------------------------------
anova(coag.lm)
## ----coag02r-------------------------------------------------------------
coag.lm <- lm(coag ~ diet, data = coagulation)
msummary(coag.lm)
## ----coag06, eval = FALSE------------------------------------------------
## model.matrix(coag.lm)
## ----coag-alt01----------------------------------------------------------
coag.altmodel <- lm(coag ~ -1 + diet, data = coagulation)
msummary(coag.altmodel)
## ----coag-alt02, eval = FALSE--------------------------------------------
## model.matrix(coag.altmodel);
## ----coag-recenter-------------------------------------------------------
data(coagulation, package = "faraway")
Coag0 <-
coagulation %>%
mutate(grand.mean = mean(coag)) %>%
group_by(diet) %>%
mutate(coag0 = coag - grand.mean) %>%
ungroup()
lm(coag0 ~ -1 + diet, data = Coag0) %>%
msummary()
## ----airp01--------------------------------------------------------------
mean(pollution ~ location, data = AirPollution)
## ----airp02--------------------------------------------------------------
airp.lm <- lm(pollution ~ location, data = AirPollution)
anova(airp.lm)
## ----seed = 1234, include = FALSE, digits = 2----------------------------
Study <- data.frame(
type = rep(LETTERS[1:3], each = 5),
yield = rnorm(15, mean = rep(c(17.5, 19, 20), each = 5), sd = 0.75)
)
## ----digits = 3----------------------------------------------------------
favstats(yield ~ type, data = Study)
## ----anova-table-sol-----------------------------------------------------
favstats(yield ~ type, data = Study)
anova(lm(yield ~ type, data = Study))
group.means <- round(c(mean(yield ~ type, data = Study)), 1); group.means
y.bar <- 5 * sum(group.means) / 15; y.bar
group.sds <- round(sd(yield ~ type, data = Study), 3); group.sds
tibble(
SSE = sum(4 * group.sds^2), MSE = SSE / 12,
SSM = sum(5 * (group.means - y.bar)^2), MSM = SSM / 2,
F = MSM/MSE, p = 1 - pf(F, 2, 12))
## ----petstress-sol01-----------------------------------------------------
pet.lm <- lm(rate ~ group, data = PetStress)
favstats(rate ~ group, data = PetStress)
## ----petstress-sol02-----------------------------------------------------
anova(pet.lm)
## ----petstress-sol03-----------------------------------------------------
msummary(pet.lm)
## ----airp-modcomp01, tidy = FALSE----------------------------------------
# convert location to a numeric variable for convenience
AirP <- AirPollution %>%
mutate(loc = as.numeric(location))
model <- lm(pollution ~ location, data = AirP)
model2 <- lm(pollution ~ 1 + (loc == 3), data = AirP)
anova(model2, model)
## ----airp-modcomp02------------------------------------------------------
# build a variable that makes the model easier to describe
AirP <- AirP %>% mutate(x = (loc == 2) + 0.5 * (loc == 3))
model3 <- lm(pollution ~ 1 + x, data = AirP)
anova(model3, model)
## ----airp-modcomp03, tidy = FALSE----------------------------------------
# build two variables that make the model easier to describe
AirP <- AirP %>% mutate(
x1 = (loc == 1) + 0.5 * (loc == 3),
x2 = (loc == 2) + 0.5 * (loc == 3))
model3 <- lm(pollution ~ -1 + x1 + x2, data = AirP)
anova(model3, model)
## ----airp-vectors--------------------------------------------------------
u1 <- 1/2 * c(1, 1, -1, -1, 0, 0)
u2 <- 1 / sqrt(12) * c(1, 1, 1, 1, -2, -2)
dot(AirPollution$pollution, u1)
dot(AirPollution$pollution, u2)
t1 <- dot(AirPollution$pollution, u1) / sqrt(202/3); t1
t2 <- dot(AirPollution$pollution, u2) / sqrt(202/3); t2
t1^2
t2^2
2 * pt( - abs(t1), df = 3)
2 * pt( - abs(t2), df = 3)
## ----coag07--------------------------------------------------------------
msummary(coag.lm)
## ----airp-TukeyHSD-------------------------------------------------------
airp.lm <- lm(pollution ~ location, data = AirPollution)
TukeyHSD(airp.lm)
## ----airp-glht01, fig.keep = "none"--------------------------------------
require(multcomp)
airp.cint <- confint(glht(airp.lm, mcp(location = "Tukey")))
airp.cint
plot(airp.cint)
mplot(TukeyHSD(airp.lm), system = "gg") %>%
gf_theme(legend.position = "top") %>%
gf_labs(title = "")
## ----airp-glht01-fig, echo = FALSE, results = "hide", cache = FALSE------
require(multcomp)
airp.cint <- confint(glht(airp.lm, mcp(location = "Tukey")))
airp.cint
plot(airp.cint)
mplot(TukeyHSD(airp.lm), system = "gg") %>%
gf_theme(legend.position = "top") %>%
gf_labs(title = "")
# plot(airp.cint)
# mplot(TukeyHSD(airp.lm), system = "gg")
## ----coag-TukeyHSD-------------------------------------------------------
coag.lm <- lm(coag ~ diet, data = coagulation)
TukeyHSD(coag.lm)
## ----coag-glht, fig.keep = "none"----------------------------------------
require(multcomp)
coag.glht <- glht(coag.lm, mcp(diet = "Tukey"))
msummary(coag.glht)
plot(confint(coag.glht))
mplot(TukeyHSD(coag.lm), system = "gg") %>%
gf_theme(legend.position = "top")
## ----coag-glht-fig, echo = FALSE, results = "hide", message = FALSE------
require(multcomp)
coag.glht <- glht(coag.lm, mcp(diet = "Tukey"))
msummary(coag.glht)
plot(confint(coag.glht))
mplot(TukeyHSD(coag.lm), system = "gg") %>%
gf_theme(legend.position = "top")
## ----airp-glht02, tidy = FALSE-------------------------------------------
airp.lm1 <- lm(pollution ~ location, data = AirPollution)
# specify contrasts by giving the coefficients
contr <- rbind(
c(0, 1, 0),
c(0, 0.5, -1))
# we can give our contrasts custom names if we like
contr1 <- rbind(
"hill - plains" = c(0, 1, 0),
"suburb - urban" = c(0, 0.5, -1))
msummary(glht(airp.lm1, contr1))
## ----airp-glht03, tidy = FALSE-------------------------------------------
# these look nicer if we parameterize differently in the model
airp.lm2 <- lm(pollution ~ -1 + location, data = AirPollution)
contr2 <- rbind(
"hill - plains" = c(1, -1, 0),
"suburb - urban" = c(1, 1, -2))
msummary(glht(airp.lm2, contr2))
## ----airp-glht04, tidy = FALSE-------------------------------------------
# using mcp() to help build the contrasts
airp.lm3 <- lm(pollution ~ location, data = AirPollution)
contr3 <-
mcp(location = rbind(
"hill - plains" = c(1, -1, 0),
"suburb - urban" = c(1, 1, -2)
))
msummary(glht(airp.lm3, contr3))
## ----airp-glht05---------------------------------------------------------
# unadjusted p-values
2 * pt(-0.731, df = 3)
2 * pt(-2.533, df = 3)
## ----airp-glht06, tidy = FALSE-------------------------------------------
airp.lm4 <- lm(pollution ~ location, data = AirPollution)
contr4 <- mcp(location = rbind(
"hill - plains" = c(1, -1, 0)))
msummary(glht(airp.lm4, contr4))
## ----cholesterol01, fig.keep = "none"------------------------------------
data(cholesterol, package = "multcomp")
cholesterol <- cholesterol %>%
mutate(trt = factor(gsub("drug", "", gsub("times*", "x", trt))))
chol.lm <- lm(response ~ trt, data = cholesterol)
plot(chol.lm, w = c(5, 2)) # diagnostic plots
msummary(chol.lm)
anova(chol.lm)
## ----cholesterol01-fig, echo = FALSE, results = "hide"-------------------
data(cholesterol, package = "multcomp")
cholesterol <- cholesterol %>%
mutate(trt = factor(gsub("drug", "", gsub("times*", "x", trt))))
chol.lm <- lm(response ~ trt, data = cholesterol)
plot(chol.lm, w = c(5, 2)) # diagnostic plots
msummary(chol.lm)
anova(chol.lm)
## ----cholesterol02, fig.keep = "none"------------------------------------
chol.glht <- confint(glht(chol.lm, mcp(trt = "Tukey")))
msummary(chol.glht)
plot(confint(chol.glht))
## ----cholesterol02-fig, echo = FALSE, results = "hide", opts.label = "figtall"----
chol.glht <- confint(glht(chol.lm, mcp(trt = "Tukey")))
msummary(chol.glht)
plot(confint(chol.glht))
## ----cholesterol03, tidy = FALSE-----------------------------------------
glht(chol.lm,
mcp(trt =
rbind(
"1time - 2times" = c(1, -1, 0, 0, 0),
"(1 or 2 times) - 4times" = c(0.5, 0.5, -1, 0, 0),
"new - old" = c(2, 2, 2, -3, -3)/6,
"drugD - drugE" = c(0, 0, 0, 1, -1))
)) %>%
summary()
## ----cholesterol04, tidy = FALSE-----------------------------------------
confint(glht(chol.lm, mcp(trt =
rbind(
"new - old" = c(2, 2, 2, -3, -3)/6)
)))
## ----dunnet--------------------------------------------------------------
glht(coag.lm, mcp(diet = "Dunnet")) %>%
summary()
## ----taste-anova01, fig.keep = "none"------------------------------------
favstats(score ~ type, data = TasteTest)
gf_point(score ~ type, data = TasteTest)
taste.lm <- lm(score ~ type, data = TasteTest)
anova(taste.lm)
taste.cint <- confint(glht(taste.lm, mcp(type = "Tukey"))); taste.cint
plot(taste.cint)
## ----taste-anova01-fig, echo = FALSE, results = "hide"-------------------
favstats(score ~ type, data = TasteTest)
gf_point(score ~ type, data = TasteTest)
taste.lm <- lm(score ~ type, data = TasteTest)
anova(taste.lm)
taste.cint <- confint(glht(taste.lm, mcp(type = "Tukey"))); taste.cint
plot(taste.cint)
## ----cholesterol05-------------------------------------------------------
chol.lm1 <- lm(response ~ trt, data = cholesterol)
cholesterol <-
cholesterol %>%
mutate(x1 = trt == "drugD", x2 = trt == "drugE")
chol.lm2 <- lm(response~ 1 + x1 + x2 , cholesterol)
anova(chol.lm1, chol.lm2)
## ----bugs----------------------------------------------------------------
model <- aov(sqrt(trapped) ~ color, data = Bugs)
TukeyHSD(model)
model <- lm(sqrt(trapped) ~ color, data = Bugs)
glht(model, mcp(color = "Tukey")) %>%
summary()
## ----taste-anova02-------------------------------------------------------
df_stats(score ~ scr + liq, data = TasteTest, mean, sd)
## ----taste-anova03-------------------------------------------------------
taste.lm <- lm(score ~ scr * liq, data = TasteTest)
anova(taste.lm)
## ----taste-anova04-------------------------------------------------------
taste.lm <- lm(score ~ scr * liq, data = TasteTest)
msummary(taste.lm)
## ----taste-anova05-------------------------------------------------------
M <- cbind( # model matrix
"C1" = rep(c(-1, -1, 1, 1), each = 4)/8, # C1
"C2" = rep(c(-1, 1, -1, 1), each = 4)/8, # C2
"C3" = rep(c(1, -1, -1, 1), each = 4)/4 # C3
)
taste.lm2 <- lm(score ~ M, data = TasteTest)
msummary(taste.lm2)
## ----taste-anova06-------------------------------------------------------
NTaste <- data.frame(score = TasteTest$score,
scr = as.numeric(TasteTest$scr) - 1,
liq = as.numeric(TasteTest$liq) - 1,
scrliq = ( as.numeric(TasteTest$scr) -1 ) *
( as.numeric(TasteTest$liq) -1 )
); NTaste
Omega <- lm(score ~ scr * liq, data= TasteTest)
M <- model.matrix(Omega)
M2 <- cbind(M[, 3], M[, 2] - 2 * M[, 4])
M3 <- cbind(M[, 2], M[, 3] - 2 * M[, 4])
omega1 <- lm(score ~ scr + liq, data = TasteTest)
omega2 <- lm(score ~ M2, data = TasteTest)
omega2a <- lm(score ~ liq + I(scr - 2 * scrliq), data = NTaste)
omega3 <- lm(score ~ M3, data = TasteTest)
omega3a <- lm(score~ scr + I(liq - 2 * scrliq), data = NTaste)
anova(omega1, Omega) # test for interaction
# test main effect for scr
# anova(omega2a, Omega) # this gives the same result as line below
anova(omega2, Omega)
# test main effect for liq
# anova(omega3a, Omega) # this gives the same result as line below
anova(omega3, Omega)
## ----noise01-------------------------------------------------------------
noise.lm <- lm(score ~ noise + group, data = MathNoise)
anova(noise.lm)
favstats(score ~ group, data = MathNoise)
## ----noise02-------------------------------------------------------------
noise.lm2 <- lm(score ~ noise * group, data = MathNoise)
anova(noise.lm2)
## ----noise03, fig.keep = "none"------------------------------------------
gf_jitter(score ~ noise, color = ~ group, data = MathNoise,
width = 0.15, height = 0) %>%
gf_line(stat = "summary", group = ~ group, fun.data = mean_se)
gf_jitter(score ~ group, color = ~ noise, data = MathNoise,
width = 0.15, height = 0) %>%
gf_line(stat = "summary", group = ~ noise, fun.data = mean_se)
## ----noise03-fig, echo = FALSE, results = "hide", seed = 1234------------
gf_jitter(score ~ noise, color = ~ group, data = MathNoise,
width = 0.15, height = 0) %>%
gf_line(stat = "summary", group = ~ group, fun.data = mean_se)
gf_jitter(score ~ group, color = ~ noise, data = MathNoise,
width = 0.15, height = 0) %>%
gf_line(stat = "summary", group = ~ noise, fun.data = mean_se)
## ----poison01------------------------------------------------------------
poison.lm <-
lm(time ~ factor(poison) * factor(treatment), data = Poison)
anova(poison.lm)
## ----poison02, fig.keep = "none"-----------------------------------------
plot(poison.lm, w = 1:2)
## ----poison02-fig, echo = FALSE, results = "hide"------------------------
plot(poison.lm, w = 1:2)
## ----poison03, fig.keep = "none"-----------------------------------------
poison.lm2 <-
lm(1/time ~ factor(poison) * factor(treatment), data = Poison)
plot(poison.lm2, w = 1:2)
## ----poison03-fig, echo = FALSE, results = "hide"------------------------
poison.lm2 <-
lm(1/time ~ factor(poison) * factor(treatment), data = Poison)
plot(poison.lm2, w = 1:2)
## ----poison-trans-anova--------------------------------------------------
anova(poison.lm2)
## ----pallets01-----------------------------------------------------------
pallets.lm1 <- lm(pallets ~ employee, data = Pallets)
anova(pallets.lm1)
## ----pallets02-----------------------------------------------------------
pallets.lm2 <- lm(pallets ~ day + employee, data = Pallets)
anova(pallets.lm2)
## ----pallets03, fig.keep = "none"----------------------------------------
gf_point(pallets ~ day, data = Pallets,
color = ~ employee) %>%
gf_line(group = ~ employee)
## ----pallets03-fig, tidy = FALSE, echo = FALSE, results = "hide"---------
gf_point(pallets ~ day, data = Pallets,
color = ~ employee) %>%
gf_line(group = ~ employee)
## ----pallets04-----------------------------------------------------------
msummary(pallets.lm2)
## ----pallets-perc--------------------------------------------------------
Pallets2 <-
Pallets %>%
group_by(day) %>% mutate(total = sum(pallets)) %>%
group_by(day, employee) %>% mutate(perc = 100 * pallets / total)
## ----pallets-perc-sol01--------------------------------------------------
## ----palets-perc---------------------------------------------------------
anova(lm(perc ~ employee, data = Pallets2))
confint(glht(lm(perc ~ employee, data = Pallets2),
mcp(employee = "Tukey")))
## ----pallets-perc-sol02--------------------------------------------------
anova(lm(perc ~ employee + day, data = Pallets2))
confint(glht(lm(perc ~ employee + day, data = Pallets2), mcp(employee = "Tukey")))
plot(confint(glht(lm(perc ~ employee + day, Pallets2), mcp(employee = "Tukey"))))
## ----domedata------------------------------------------------------------
data(domedata, package = "alr4")
msummary(domedata)
## ----domedata-step01-----------------------------------------------------
dome.lm1 <-
lm(Dist ~ Velocity + Angle + BallWt + BallDia + Cond, data = domedata)
step(dome.lm1, direction = "both", trace = FALSE)
## ----domedata-step02-----------------------------------------------------
step(
lm(Dist ~ 1, data = domedata), # starting point
scope = Dist ~ Velocity + Angle + BallWt + BallDia + Cond,
direction = "forward", trace = FALSE)
## ----seatpos01-----------------------------------------------------------
data(seatpos, package = "faraway")
seatpos.lm1 <- lm(hipcenter ~ ., data = seatpos)
msummary(seatpos.lm1)
## ----seatpos02-----------------------------------------------------------
faraway::vif(seatpos.lm1)
## ----seatpos03, fig.keep = "none"----------------------------------------
round(cor(seatpos), 2)
GGally::ggpairs(seatpos)
corrgram::corrgram(seatpos, order = TRUE)
## ----seatpos03-fig, echo = FALSE, results = "hide", opts.label = "figbig"----
round(cor(seatpos), 2)
GGally::ggpairs(seatpos)
corrgram::corrgram(seatpos, order = TRUE)
## ----seatpos04-----------------------------------------------------------
seatpos.lm2 <- lm(hipcenter ~ Age + Weight + Ht, data = seatpos)
msummary(seatpos.lm2)
faraway::vif(seatpos.lm2)
## ----seatpos05-----------------------------------------------------------
pc <- with(seatpos,
princomp(cbind(HtShoes, Ht, Seated, Arm, Thigh, Leg),
scores = TRUE))
msummary(pc, loadings = TRUE)
seatpos.lmpc <-lm(hipcenter ~ Age + Weight + pc$scores[, 1], data = seatpos)
msummary(seatpos.lmpc)
faraway::vif(seatpos.lmpc)
## ----eval = FALSE--------------------------------------------------------
## x <- 0.65*HtShoes + 0.65*Ht + 0.27*Seated + 0.15*Arm + 0.17*Thigh + 0.18*Leg
## ----seatpos06-----------------------------------------------------------
# trace=0 turns off intermediate reporting
seatpos.lmstep <- step(seatpos.lm1, trace = 0)
msummary(seatpos.lmstep)
faraway::vif(seatpos.lmstep)
## ----students-gpa--------------------------------------------------------
act.glm <-
glm(grad ~ ACT, data = Students, family = "binomial")
gpa.glm <-
glm(grad ~ hsGPA, data = Students, family = "binomial")
actgpa.glm <-
glm(grad ~ ACT + hsGPA, data = Students, family = "binomial")
msummary(actgpa.glm) %>% coef()
c(gpa = deviance(gpa.glm),
act = deviance(act.glm),
actgpa = deviance(actgpa.glm))
# small p-value suggests that adding gpa is helpful
1 - pchisq(deviance(act.glm) - deviance(actgpa.glm), df = 2)
# larger p-value here compared with act.glm suggests better fit
1 - pchisq(deviance(actgpa.glm), df = df.residual(actgpa.glm))
## ----ice01, fig.keep = "none", message = FALSE---------------------------
require(tidyverse)
Ice2 <-
Ice %>%
gather("key", "temp", b0:r12000) %>%
separate(key, c("phase", "time"), sep = 1) %>%
mutate(time = readr::parse_number(time), subject = as.character(subject))
Ice2 %>% filter(phase == "t") %>%
gf_line(temp ~ time, group = ~ subject, color = ~sex) %>%
gf_facet_grid( treatment ~ location, scales = "free_x") %>%
gf_labs(
title = "Temperature during treatment phase (3 conditions, 2 locations)")
## ----ice01-fig, opts.label = "fig1", echo = FALSE, results = "hide"------
require(tidyverse)
Ice2 <-
Ice %>%
gather("key", "temp", b0:r12000) %>%
separate(key, c("phase", "time"), sep = 1) %>%
mutate(time = readr::parse_number(time), subject = as.character(subject))
Ice2 %>% filter(phase == "t") %>%
gf_line(temp ~ time, group = ~ subject, color = ~sex) %>%
gf_facet_grid( treatment ~ location, scales = "free_x") %>%
gf_labs(
title = "Temperature during treatment phase (3 conditions, 2 locations)")
## ----ice02---------------------------------------------------------------
Ice2 %>% filter(time == 1930, phase == "b") %>%
group_by(location, treatment, phase) %>%
summarise(mean(temp))
## ----ice03, digits = 4---------------------------------------------------
Ice1930 <- Ice2 %>% filter(time == 1930)
base.lmint <-
lm(temp ~ location * treatment, data = Ice1930 %>% filter(phase == "b"))
anova(base.lmint)
## ----ice04, fig.keep = "none"--------------------------------------------
base.lmadd <-
lm(temp ~ location + treatment, data = Ice1930 %>% filter(phase == "b"))
anova(base.lmadd)
plot(base.lmadd, w = c(5, 2))
## ----ice04-fig, echo = FALSE, results = "hide"---------------------------
base.lmadd <-
lm(temp ~ location + treatment, data = Ice1930 %>% filter(phase == "b"))
anova(base.lmadd)
plot(base.lmadd, w = c(5, 2))
## ----ice05---------------------------------------------------------------
require(multcomp)
confint(glht(base.lmadd, mcp(treatment = "Tukey")), level = 0.9)
## ----ice06---------------------------------------------------------------
ice.trt <- lm(t1930 - b1930 ~ treatment * location, data = Ice)
anova(ice.trt)
## ----ice07---------------------------------------------------------------
ice.trt2 <- lm(t1930 - b1930 ~ treatment, data = Ice,
subset = location == "intramuscular")
msummary(ice.trt2)
confint(glht(ice.trt2, mcp(treatment = "Tukey")), level = 0.90)
## ----ice08, digits = 4---------------------------------------------------
Ice3 <-
Ice2 %>% filter(time == 1930) %>%
spread(location, temp)
anova(lm(surface - intramuscular ~ treatment,
data = Ice3 %>% filter(phase == "t")))
## ----fusion01------------------------------------------------------------
# merge FUSION1 and Pheno keeping only id's that are in both
Fusion1m <- merge(FUSION1, Pheno, by = "id", all = FALSE)
## ----fusion02------------------------------------------------------------
tally(t2d ~ Gdose, Fusion1m)
## ----fusion03, tidy = FALSE----------------------------------------------
f1.glm1 <-
glm( factor(t2d) ~ Gdose, Fusion1m, family = binomial)
f1.glm1
## ----fusion04------------------------------------------------------------
coef(f1.glm1)
exp(coef(f1.glm1))
## ----fusion05------------------------------------------------------------
msummary(f1.glm1)
## ----fusion06------------------------------------------------------------
1 - pchisq(3231.4 - 3213.0, df = 1)
## ----fusion07------------------------------------------------------------
f1.glm0 <- glm(factor(t2d) ~ 1, Fusion1m, family = binomial)
deviance(f1.glm0)
deviance(f1.glm1)
df1 <- df.residual(f1.glm0) - df.residual(f1.glm1); df1
1 - pchisq(deviance(f1.glm0) - deviance(f1.glm1), df = df1)
## ----fusion08------------------------------------------------------------
f1.glm2 <-
glm(factor(t2d) ~ Gdose + sex, data = Fusion1m,
family = binomial())
msummary(f1.glm2)
## ----fusion09------------------------------------------------------------
deviance(f1.glm0)
deviance(f1.glm2)
df2 <- df.residual(f1.glm0) - df.residual(f1.glm2); df2
1 - pchisq(deviance(f1.glm0) - deviance(f1.glm2), df = df2)
## ----step, fig.keep = "none"---------------------------------------------
step.lm <- lm(HR - restHR ~ height * freq, data = Step)
msummary(step.lm)
anova(step.lm)
gf_line(HR - restHR ~ freq, data = Step, color = ~height,
group = ~ height, stat = "summary", fun.data = mean_se) %>%
gf_jitter(width = 0.15, height = 0)
## ----step-fig, echo = FALSE, results = "hide"----------------------------
step.lm <- lm(HR - restHR ~ height * freq, data = Step)
msummary(step.lm)
anova(step.lm)
gf_line(HR - restHR ~ freq, data = Step, color = ~height,
group = ~ height, stat = "summary", fun.data = mean_se) %>%
gf_jitter(width = 0.15, height = 0)
## ----rat01, fig.keep = "none"--------------------------------------------
rat.lm <- lm(consumption ~ location + flavor, data = RatPoison)
anova(rat.lm)
plot(rat.lm, w=c(1, 2, 5))
gf_point(consumption ~ flavor, color = ~ location, data = RatPoison,
width = 0.15, height = 0) %>%
gf_line(stat = "summary", group = ~ location, fun.data = mean_se)
## ----rat01-fig, echo = FALSE, results = "hide"---------------------------
rat.lm <- lm(consumption ~ location + flavor, data = RatPoison)
anova(rat.lm)
plot(rat.lm, w=c(1, 2, 5))
gf_point(consumption ~ flavor, color = ~ location, data = RatPoison,
width = 0.15, height = 0) %>%
gf_line(stat = "summary", group = ~ location, fun.data = mean_se)
## ----rat02---------------------------------------------------------------
rat.lm1 <- lm(consumption ~ flavor, data = RatPoison)
anova(rat.lm)
anova(rat.lm1)
summary(rat.lm)$sigma
summary(rat.lm1)$sigma
summary(rat.lm1)$sigma^2/summary(rat.lm)$sigma^2
## ----concrete-perm01-----------------------------------------------------
concrete.lm <-
lm(strength ~ limestone + water, data = Concrete)
## ----concrete-perm02-----------------------------------------------------
msummary(concrete.lm)
obsF <- msummary(concrete.lm)$fstat[1]; obsF
## ----concrete-perm03, seed = 12345, digits = 4---------------------------
Null.F <-
do(5000) * lm(shuffle(strength) ~ limestone + water, data = Concrete)
Null.F %>% head(3)
prop1( ~ (F >= obsF), data = Null.F)
## ----concrete-perm04, seed = 12345---------------------------------------
Null.t2 <-
do(10000) * {
lm(strength ~ shuffle(limestone) + water, data = Concrete) %>%
coef()
}
Null.t3 <-
do(10000) * {
lm(strength ~ limestone + shuffle(water), data = Concrete) %>%
coef()
}
Null.t2 %>% head(3)
2 * prop1( ~ (limestone >= coef(concrete.lm)[2]), data = Null.t2)
2 * prop1( ~ (water <= coef(concrete.lm)[3]), data = Null.t3)
## ----concrete-perm-sol---------------------------------------------------
gf_dhistogram( ~ Intercept, data = Null.F, bins = 25) %>%
gf_vline(xintercept = c(0, mean(~strength, data = Concrete)))
## ----smoke-perm01--------------------------------------------------------
smokeTab <-
tally( ~ student + parents, data = FamilySmoking)
smokeTab
chisq.test(smokeTab)
observedStat <- chisq.test(smokeTab) %>% stat(); observedStat
Stats <- do(2000) * {
tally( ~ shuffle(student) + parents, data = FamilySmoking) %>%
chisq.test() %>%
stat()
}
prop( ~ (X.squared >= observedStat), data = Stats)
binom.test(0, 2000, alternative = "less") %>% confint()
## ----smoke-perm03, seed = 1234, digits = 4-------------------------------
chisq.test(smokeTab, simulate.p.value = TRUE)
## ----smoke-perm02, seed = 12345, digits = 4------------------------------
chisq.test(smokeTab, simulate.p.value = TRUE, B = 10000) %>%
pval()
chisq.test(smokeTab, simulate.p.value = TRUE, B = 100000) %>% pval()
## ----balldrop-nls01, tidy=FALSE------------------------------------------
balldrop.nls <-
nls(time ~ alpha0 + alpha1 * height^d,
data = BallDrop,
start = list(alpha0 = 0, alpha1 = 1, d = 1))
## ----balldrop-nls02------------------------------------------------------
balldrop.nls %>% coef()
## ----balldrop-nls03------------------------------------------------------
balldrop.nls %>% summary()
## ----balldrop-nls04------------------------------------------------------
balldrop.nls %>% summary() %>% coef()
balldrop.nls %>% confint()
## ----balldrop-nls05, fig.keep = "none"-----------------------------------
f <- makeFun(balldrop.nls)
gf_point( time ~ height, data = BallDrop ) %>%
gf_fun(f(height) ~ height, alpha = 0.4)
plot(balldrop.nls)
gf_point(resid(balldrop.nls) ~ fitted(balldrop.nls)) # unstandardized resids
## ----balldrop-nls05-fig, fig.keep = 2:4, results = "hide", echo = FALSE----
f <- makeFun(balldrop.nls)
gf_point( time ~ height, data = BallDrop ) %>%
gf_fun(f(height) ~ height, alpha = 0.4)
plot(balldrop.nls)
gf_point(resid(balldrop.nls) ~ fitted(balldrop.nls)) # unstandardized resids
## ----balldrop-nls06, fig.keep='none'-------------------------------------
balldrop.lm <- lm( log(time) ~ log(height), data = BallDrop)
balldrop.lm %>% coef()
balldrop.lm %>% coef() %>% getElement(1) %>% exp()
balldrop.nls %>% coef()
g <- makeFun(balldrop.lm)
gf_point(time ~ height, data = BallDrop) %>%
gf_fun(f(height) ~ height, alpha = 0.4, size = 0.8) %>%
gf_fun(g(height) ~ height, col = "red",
linetype = 2, alpha = 0.7, size = 0.8)
## ----balldrop-nls06-fig, fig.keep='last', results = "hide", echo = FALSE, opts.label = "fig1"----
balldrop.lm <- lm( log(time) ~ log(height), data = BallDrop)
balldrop.lm %>% coef()
balldrop.lm %>% coef() %>% getElement(1) %>% exp()
balldrop.nls %>% coef()
g <- makeFun(balldrop.lm)
gf_point(time ~ height, data = BallDrop) %>%
gf_fun(f(height) ~ height, alpha = 0.4, size = 0.8) %>%
gf_fun(g(height) ~ height, col = "red",
linetype = 2, alpha = 0.7, size = 0.8)
## ----balldrop-nls07, fig.keep = "none"-----------------------------------
plot(balldrop.nls)
plot(balldrop.lm, w = 1)
## ----balldrop-nls08, fig.keep = "none"-----------------------------------
gf_qq( ~ resid(balldrop.nls))
gf_qq( ~ resid(balldrop.lm))
## ----balldrop-nls09, fig.keep = "none"-----------------------------------
gf_point(resid(balldrop.nls) ~ f(BallDrop$height))
gf_point(resid(balldrop.lm) ~ g(BallDrop$height))
## ----balldrop-nls07-fig, results = "hide", echo = FALSE------------------
plot(balldrop.nls)
plot(balldrop.lm, w = 1)
gf_qq( ~ resid(balldrop.nls))
gf_qq( ~ resid(balldrop.lm))
gf_point(resid(balldrop.nls) ~ f(BallDrop$height))
gf_point(resid(balldrop.lm) ~ g(BallDrop$height))
## ----balldrop-nls10------------------------------------------------------
balldrop.lm %>% confint()
balldrop.nls %>% confint()
## ----cooling01, fig.keep = "none"----------------------------------------
gf_line(temp ~ time, data = CoolingWater1) %>%
gf_labs(y = "temp (C)", x = "time (sec)")
## ----cooling01-fig, echo = FALSE-----------------------------------------
gf_line(temp ~ time, data = CoolingWater1) %>%
gf_labs(y = "temp (C)", x = "time (sec)")
## ----cooling02, tidy=FALSE, fig.keep='none'------------------------------
cooling.model1 <-
nls(temp ~ A * exp( -k * time), data = CoolingWater1,
start = list(A = 100, k = 0.01))
f1 <- makeFun(cooling.model1)
gf_point(temp ~ time, data = CoolingWater1, size = 0.6) %>%
gf_fun(f1(time) ~ time, lty = 2, col = "gray60") %>%
gf_labs(y = "temp (C)", x = "time (sec)")
## ----cooling03, tidy = FALSE, fig.keep = "none"--------------------------
cooling.model2 <-
nls(temp ~ ambient + A * exp( - k * (1 + time)),
data = CoolingWater1,
start = list(ambient = 20, A = 80, k = 0.01) )
f2 <- makeFun(cooling.model2)
gf_point(temp ~ time, data = CoolingWater1) %>%
gf_fun(f1(time) ~ time, lty = 2, col = "gray60") %>%
gf_fun(f2(time) ~ time, col = "red", size = 0.8) %>%
gf_labs(y = "temp (C)", x = "time (sec)")
## ----cooling03-fig, echo = FALSE, opts.label = "fig1"--------------------
cooling.model2 <-
nls(temp ~ ambient + A * exp( - k * (1 + time)),
data = CoolingWater1,
start = list(ambient = 20, A = 80, k = 0.01) )
f2 <- makeFun(cooling.model2)
gf_point(temp ~ time, data = CoolingWater1) %>%
gf_fun(f1(time) ~ time, lty = 2, col = "gray60") %>%
gf_fun(f2(time) ~ time, col = "red", size = 0.8) %>%
gf_labs(y = "temp (C)", x = "time (sec)")
## ----cooling04, fig.keep = "none"----------------------------------------
gf_point(resid(cooling.model2) ~ time, data = CoolingWater1)
plot(cooling.model2)
## ----cooling04-fig, echo = FALSE-----------------------------------------
gf_point(resid(cooling.model2) ~ time, data = CoolingWater1)
plot(cooling.model2)
## ----cooling-water-sol---------------------------------------------------
cooling.model3 <-
nls(temp ~ ambient + A * exp( - k * (1 + time)),
data = CoolingWater2,
start = list(ambient = 20, A = 80, k = 0.01) )
f3 <- makeFun(cooling.model3)
gf_point(temp ~ time, data = CoolingWater2) %>%
gf_fun(f3(time) ~ time, col = "red", size = 0.8) %>%
gf_labs(y = "temp (C)", x = "time (sec)")
gf_point(resid(cooling.model3) ~ time, data = CoolingWater2)
gf_point(resid(cooling.model3) ~ fitted(cooling.model3))
## ----rat-liver-----------------------------------------------------------
data(rat, package = "alr4")
rat.lm <- lm(y ~ BodyWt * LiverWt, data = rat)
msummary(rat.lm)
## ----eggprod01-sol-------------------------------------------------------
data(eggprod, package = "faraway")
eggprod.lm <- lm(eggs ~ block + treat, data = eggprod)
anova(eggprod.lm)
## ----eggprod02-sol-------------------------------------------------------
msummary(eggprod.lm)
coef(msummary(eggprod.lm))
## ----eggprod03-sol-------------------------------------------------------
eggprod.lm1way <- lm(eggs ~ treat, data = eggprod)
anova(eggprod.lm1way)
# width of Tukey HSD intervals:
2 * qtukey(0.95, 3, 6) / sqrt(4) * msummary(eggprod.lm)$sigma
# TukeyHSD() can automate this:
TukeyHSD(aov(eggs ~ block + treat, eggprod), "treat")
## ------------------------------------------------------------------------
y <- AirPollution$pollution
model <- lm(pollution ~ location, data = AirPollution)
DFE <- anova(model)["Residuals", "Df"]
SSE <- anova(model)["Residuals", "Sum Sq"]
MSE <- anova(model)["Residuals", "Mean Sq"]
s <- sqrt(MSE); s
## ------------------------------------------------------------------------
v <- c(1/2, 1/2, 2/2, 2/2, -3/2, -3/2)
kappa <- 1 / vlength(v); kappa
u <- v / vlength(v); u
dot(y, u)
F <- dot(y, u)^2 / MSE; F
1 - pf(F, 1, DFE) # p-value
## ------------------------------------------------------------------------
C_hat <-
sum( c(1, 2, -3) * mean(pollution ~ location, data = AirPollution))
C_hat
t <- (C_hat - 0) / (s / kappa)
2 * pt( - abs(t), df = DFE)
## ------------------------------------------------------------------------
### model comparison
AirPollution <-
AirPollution %>%
mutate(
x0 = as.numeric(location == levels(location)[1]),
x1 = as.numeric(location == levels(location)[2]),
x2 = as.numeric(location == levels(location)[3])
)
AirPollution
# using mu1 = 3 * mu3 - 2 * mu2
model2 <- lm(pollution ~ 0 + I(x1 - 2*x0) + I(x2 + 3*x0), data = AirPollution)
anova(model2, model)
# using beta_1 = 3/2 * beta_2
model3 <- lm(pollution ~ 1 + I(1.5 * x1 + x2), data = AirPollution)
anova(model3, model)
## ----include = FALSE-----------------------------------------------------
data(AirPollution) # restore data to orignal form
## ----SeeAlso, child="SeeAlso.Rnw", eval=TRUE-----------------------------
## ----RIntro, child="RIntro.Rnw", eval=includeApp[1]----------------------
## ----include = FALSE, cache = FALSE--------------------------------------
knitr::opts_chunk$set(cache.path = "cache/R-")
require(faraway)
require(car)
require(tidyr)
require(ggformula)
theme_set(theme_bw())
## ----install-tidyr, eval = FALSE-----------------------------------------
## # fetch package from CRAN to local machine.
## install.packages("tidyr")
## # load (and attach) the package so it can be used.
## require(tidyr) # or library(tidyr)
## ----install-tidyr-lib, eval = FALSE-------------------------------------
## install.packages("tidyr", lib = "~/R/library")
## ----install-github, eval = FALSE----------------------------------------
## # install.packages("devtools")
## devtools::install_github("hadley/dplyr", build_vignettes = FALSE)
## devtools::install_github("hadley/lazyeval", build_vignettes = FALSE)
## devtools::install_github("rstudio/ggvis", build_vignettes = FALSE)
## ----fastR2-github, eval = FALSE-----------------------------------------
## install_github("rpruim/fastR2")
## ----eval = FALSE--------------------------------------------------------
## install.packages("some-package.tar.gz",
## repos = NULL) # use a file, not a repository
## ----eval = FALSE--------------------------------------------------------
## ?Startup
## ----eval = FALSE--------------------------------------------------------
## # always load my favorite packages
## require(fastR2)
## # adjust lattice settings
## trellis.par.set(theme = col.fastR())
## # adjust ggplot2/ggformula settings
## theme_set(theme_bw())
## ----eval = FALSE--------------------------------------------------------
## TEXINPUTS=:.:./inputs//:$TEXINPUTS
## TEXINPUTS=$TEXINPUTS:/usr/local/texlive/2016/texmf-dist/tex//
## R_PDFLATEXCMD=/Library/TeX/texbin/pdflatex
## R_PAPERSIZE=letter
## R_PDFVIEWER="/usr/bin/open -a skim"
## ----search--------------------------------------------------------------
search()
find("logit")
## ----logit-faraway-------------------------------------------------------
mosaic::logit(0.3)
faraway::logit(0.3)
car::logit(0.3)
# using percent rather than proportion -- only works in the car version
car::logit(30)
faraway::logit(30)
mosaic::logit(30)
## ----data-package01------------------------------------------------------
data(Traffic, package = "MASS")
head(Traffic)
data(Traffic, package = "fastR2")
head(Traffic)
## ----data-package02------------------------------------------------------
head(MASS::Traffic)
head(fastR2::Traffic)
## ----help, eval = FALSE--------------------------------------------------
## ?c
## ?"for"
## ----apropos-------------------------------------------------------------
apropos("hist")
## ----args----------------------------------------------------------------
args(require)
args(sum)
## ----usage---------------------------------------------------------------
formatR::usage(sum)
## ----example, eval = FALSE-----------------------------------------------
## example(gf_histogram)
## ----savehistory, eval = FALSE-------------------------------------------
## savehistory("someRCommandsIalmostLost.R")
## ----package-data--------------------------------------------------------
# first line only necessary if iris is already in use
data(iris)
str(iris) # get a summary of the data set
dim(iris) # just the dimensions
glimpse(iris) # take a quick look at the data
inspect(iris) # another quick look at the data
## ----read-table----------------------------------------------------------
# need header = TRUE because there is a header line.
# could also use read.file() without header = TRUE
Traffic <-
read.table("http://www.calvin.edu/~rpruim/fastR/trafficTufte.txt",
header = TRUE)
Traffic
## ----read-sas------------------------------------------------------------
traffic <-
read.csv("http://www.calvin.edu/~rpruim/fastR/trafficTufte.csv",
na.strings = c(".", "NA", ""))
## ----scan01, eval = FALSE------------------------------------------------
## myData1 <- scan()
## ----include = FALSE-----------------------------------------------------
myData1 <- c(15, 18, 12, 21, 23, 50, 15)
## ----scan02--------------------------------------------------------------
myData1
## ----scan03, eval = FALSE------------------------------------------------
## myData2 <- scan(what = "character")
## ----include = FALSE-----------------------------------------------------
myData2 <- c("red", "red", "orange", "green", "blue", "blue", "red")
## ----scan04--------------------------------------------------------------
myData2
## ----c01-----------------------------------------------------------------
myData1 <- c(15, 18, 12, 21, 23, 50, 15)
myData2 <- c("red", "red", "orange", "green", "blue", "blue", "red")
## ----c02-----------------------------------------------------------------
is.vector(myData1)
is.vector(myData2)
## ----c03-----------------------------------------------------------------
x <- c(first = 10, second = 20); x
names(x) # what are the names?
x["first"]
x[1]
y <- 1:3 # vector without names
names(y) <- c("A", "B", "C") # names added
y
as.vector(y) # vector without the names
## ----dataframe-----------------------------------------------------------
myDataFrame <- data.frame(color = myData2, number = myData1)
myDataFrame
## ----generating-data01---------------------------------------------------
x <- 5:20; x # all integers in a range
# structured sequences
seq(0, 50, by = 5)
seq(0, 50, length = 7)
rep(1:5, each = 3)
rep(1:5, times = 3)
c(1:5, 10, 3:5) # c() concatenates
## ----generating-data02, seed = 1234, fig.keep = "none"-------------------
rnorm(10, mean = 10, sd = 2) # random normal draws
gf_histogram( ~ rnorm(1000, mean = 10, sd = 2), binwidth = 1)
## ----generating-data02-fig, echo = FALSE, results = "hide"---------------
rnorm(10, mean = 10, sd = 2) # random normal draws
gf_histogram( ~ rnorm(1000, mean = 10, sd = 2), binwidth = 1)
## ----generating-data03, seed = 12345-------------------------------------
sample(Births78, 3) # sample 3 rows from Births78
Births78 %>% sample(3) # sample 3 rows from Births78
sample(1:10, size = 5) # random sample of size 5 (w/o replacement)
resample(1:10, size = 10) # random sample of size 10 (w/ replacement)
## ----writingData---------------------------------------------------------
args(write.table)
SomeData <- data.frame(x = 1:3, y = LETTERS[1:3])
SomeData
write.table(SomeData, "SomeData.txt")
write.csv(SomeData, "SomeData.csv")
# this system call should work on a Mac or Linux machine
system("head SomeData.txt SomeData.csv")
## ----savingData----------------------------------------------------------
greeting <- "hello, world!"
save(SomeData, greeting, file = "mystuff.rda") # saves both in 1 file
load("mystuff.rda") # loads both
## ------------------------------------------------------------------------
saveRDS(SomeData, file = "SomeData.rds")
EEE <- readRDS("SomeData.rds")
EEE
## ----mutate01, fig.keep = "none"-----------------------------------------
data(Births78)
Births78 <-
mutate(Births78, runningTotal = cumsum(births))
head(Births78, 3)
gf_line(runningTotal ~ date, data = Births78)
## ----mutate-fig, echo = FALSE, results = "hide"--------------------------
data(Births78)
Births78 <-
mutate(Births78, runningTotal = cumsum(births))
head(Births78, 3)
gf_line(runningTotal ~ date, data = Births78)
## ----mutate02------------------------------------------------------------
CPS85 <- mutate(CPS85, workforce.years = age - 6 - educ)
favstats( ~ workforce.years, data = CPS85)
## ----mutate03------------------------------------------------------------
tally( ~ (exper - workforce.years), data = CPS85)
## ----mutate04, tidy = FALSE----------------------------------------------
HELP2 <- mutate( HELPrct,
newsex = factor(female, labels = c("M", "F")) )
## ----mutate05------------------------------------------------------------
tally( ~ newsex + female, data = HELP2 )
## ----derivedFactor, tidy = FALSE, fig.keep = "none"----------------------
HELP3 <-
mutate(
HELPrct,
risklevel = derivedFactor(
low = sexrisk < 5,
medium = sexrisk < 10,
high = sexrisk >= 10,
.method = "first" # use first rule that applies
)
)
gf_jitter(sexrisk ~ risklevel, data = HELP3,
height = 0.2, width = 0.3, alpha = 0.4)
## ----derivedFactor-fig, echo = FALSE, results = "hide"-------------------
HELP3 <-
mutate(
HELPrct,
risklevel = derivedFactor(
low = sexrisk < 5,
medium = sexrisk < 10,
high = sexrisk >= 10,
.method = "first" # use first rule that applies
)
)
gf_jitter(sexrisk ~ risklevel, data = HELP3,
height = 0.2, width = 0.3, alpha = 0.4)
## ----select01------------------------------------------------------------
CPS1 <- select(CPS85, - workforce.years)
head(CPS1, 2)
## ----select02------------------------------------------------------------
CPS2 <- select(CPS85, workforce.years, exper)
head(CPS2, 2)
## ----select03------------------------------------------------------------
CPSsmall <- select(CPS85, 1:4)
head(CPSsmall, 2)
## ----select04------------------------------------------------------------
head(select(HELPrct, contains("risk")), 2)
## ----chain01-------------------------------------------------------------
HELPrct %>% select(contains("risk")) %>% head(2)
## ----eval = FALSE--------------------------------------------------------
## h(g(f(x), y), z)
## f(x) %>% g(y) %>% h(z)
## ----eval = FALSE--------------------------------------------------------
## bop(scoop(hop(foo_foo, through = forest), up = field_mice), on = head)
## ----eval = FALSE--------------------------------------------------------
## foo_foo %>%
## hop(through = forest) %>%
## scoop(up = field_mice) %>%
## bop(on = head)
## ----chain02, tidy = FALSE-----------------------------------------------
HELPrct %>%
select(ends_with("e")) %>%
head(2)
HELPrct %>%
select(starts_with("h")) %>%
head(2)
HELPrct %>%
select(matches("i[12]")) %>% # regex matching
head(2)
## ----names01-------------------------------------------------------------
names(faithful)
## ----names02-------------------------------------------------------------
names(faithful) <- c("duration", "time_til_next")
head(faithful, 3)
## ----data-revert, eval = TRUE--------------------------------------------
# don't execute this unless you want to revert to the original data
data(faithful)
## ----rename01, fig.keep = "none"-----------------------------------------
data(faithful)
faithful <-
faithful %>%
rename(duration = eruptions, time_til_next = waiting)
faithful %>% head(3)
gf_point(time_til_next ~ duration, data = faithful)
## ----rename01-fig--------------------------------------------------------
data(faithful)
faithful <-
faithful %>%
rename(duration = eruptions, time_til_next = waiting)
faithful %>% head(3)
gf_point(time_til_next ~ duration, data = faithful)
## ----rename02------------------------------------------------------------
data(CPS85) # reload the data
CPS85 %>%
rename(education = educ) %>%
head(4)
## ----rename04, tidy = FALSE----------------------------------------------
CPS85 %>%
select(education = educ, wage, race) %>%
head(3)
## ----filter01, fig.keep = "none"-----------------------------------------
# any logical can be used to create subsets
data(faithful)
faithful2 <-
faithful %>%
rename(duration = eruptions, time_til_next = waiting)
faithfulLong <-
faithful2 %>%
filter(duration > 3)
gf_point(time_til_next ~ duration, data = faithfulLong)
## ----filter01-fig, echo = FALSE, results = "hide"------------------------
# any logical can be used to create subsets
data(faithful)
faithful2 <-
faithful %>%
rename(duration = eruptions, time_til_next = waiting)
faithfulLong <-
faithful2 %>%
filter(duration > 3)
gf_point(time_til_next ~ duration, data = faithfulLong)
## ----filter02, eval = FALSE, tidy = FALSE, fig.keep = "last", fig.show = "hide"----
## gf_point(time_til_next ~ duration,
## data = faithful2 %>% filter( duration > 3))
##
## # this one will use a different viewing window
## gf_point(time_til_next ~ duration, data = faithful2) %>%
## gf_lims(x = c(3, NA))
##
## # Data can also be chained directly into ggformula functions
## faithful2 %>%
## filter( duration > 3) %>%
## gf_point(time_til_next ~ duration)
## ----summarise01---------------------------------------------------------
HELPrct %>%
summarise(x.bar = mean(age), s = sd(age))
## ----summarise02---------------------------------------------------------
HELPrct %>%
group_by(sex, substance) %>%
summarise(x.bar = mean(age), s = sd(age))
## ----summarise03---------------------------------------------------------
favstats(age ~ sex + substance, data = HELPrct)
mean(age ~ sex + substance, data = HELPrct, .format = "table")
sd(age ~ sex + substance, data = HELPrct, .format = "table")
## ----arrange, tidy = FALSE-----------------------------------------------
HELPrct %>%
group_by(sex, substance) %>%
summarise(x.bar = mean(age), s = sd(age)) %>%
arrange(x.bar)
## ----join01--------------------------------------------------------------
head(FUSION1, 3)
head(Pheno, 3)
## ----join02, tidy = FALSE------------------------------------------------
# merge FUSION1 and Pheno keeping only id's that are in both
FUSION1m <- merge(FUSION1, Pheno, by.x = "id", by.y = "id",
all.x = FALSE, all.y = FALSE)
head(FUSION1m, 3)
left_join(Pheno, FUSION1, by = "id") %>% dim()
inner_join( Pheno, FUSION1, by = "id") %>% dim()
# which ids are only in Pheno?
setdiff(Pheno$id, FUSION1$id)
anti_join(Pheno, FUSION1)
## ----join03--------------------------------------------------------------
tally( ~ t2d + genotype + marker, data = FUSION1m)
## ----merge-sol-----------------------------------------------------------
FUSION1m2 <-
FUSION1 %>%
merge(Pheno, by.x = "id", by.y = "id", all.x = FALSE, all.y = FALSE)
Pheno %>% left_join(FUSION1, by = "id") %>% dim()
Pheno %>% inner_join(FUSION1, by = "id") %>% dim()
## ----FUSION-names--------------------------------------------------------
names(FUSION1)
names(FUSION2)
## ----births-range--------------------------------------------------------
data(Births)
range( ~ date, data = Births)
## ----births-sol01--------------------------------------------------------
Births %>%
group_by(moth, day) %>%
summarise(bpd = mean(births)) %>%
arrange(-bpd) %>% head(4)
Births %>%
group_by(month, day) %>%
summarise(bpd = mean(births)) %>%
arrange(bpd) %>% head(4)
## ----births-sol02--------------------------------------------------------
Births %>%
group_by(month) %>%
summarise(bpd = mean(births)) %>%
arrange(-bpd) %>% head(3)
Births %>%
group_by(month) %>%
summarise(bpd = mean(births)) %>%
arrange(bpd) %>% head(3)
## ----births-sol03--------------------------------------------------------
Births %>%
group_by(wday) %>%
summarise(bpd = mean(births)) %>%
arrange(-bpd)
## ----spread01------------------------------------------------------------
Ut2 <-
Utilities %>%
select(month, year, temp)
Ut2 %>% head(3)
## ----spread02------------------------------------------------------------
require(tidyr)
Ut3 <-
Ut2 %>%
spread(key = month, value = temp)
Ut3 %>% head(4)
## ----gather01------------------------------------------------------------
Ut4 <-
Ut3 %>%
gather(key = month, value = temp, `1` : `12`)
Ut4 %>% head(4)
## ----gather02------------------------------------------------------------
Ut4a <-
Ut3 %>%
gather(key = month, value = temp, 2 : 13)
Ut4a %>% head(4)
## ----mode----------------------------------------------------------------
w <- 2.5; mode(w); length(w)
x <- c(1, 2); mode(x); length(x)
y <- "foo"; mode(y); length(y)
abc <- letters[1:3]
abc; mode(abc); length(abc)
z <- TRUE; mode(z); length(z)
## ----vectors01-----------------------------------------------------------
y[1]; y[2] # not an error to ask for y[2]
abc[3]
abc[6] <- "Z"; abc # NAs fill in to make vector long enough
## ----vectors02-----------------------------------------------------------
u <- c(first = 1, second = 2, third = 3)
u # names are displayed in output
names(u) # show just the names
u["second"] # access by name
names(u) <- c("one", "two", "three") # change the names
names(u)[2] <- "TWO" # change just one name
u
u["first"] # old names gone now
setNames(u, c("a", "b", "c")) # new object is named
u # u remains unchanged
## ----vectors03-----------------------------------------------------------
u * 3
sqrt(u)
## ----vectors04-----------------------------------------------------------
as.numeric(u) # creates nameless version
u # but doesn't change its input
names(u) <- NULL # this removes the names from u
u # no more names
## ----lists---------------------------------------------------------------
L <- list(a = 5, b = "X", c = 2.3, d = c("A", "B")); mode(L); length(L)
L[[4]] # 4th element of list
L[["d"]] # element named "d"
L$d # element named "d"
L[["d"]] <- 1:3; str(L)
L[["b"]] <- NULL; str(L) # removing an item from a list
## ----access--------------------------------------------------------------
xm <- matrix(1:16, nrow = 4); xm
xm[5]
xm[, 2] # this is 1 dimensional (a vector)
xm[, 2, drop = FALSE] # this is 2 dimensional (still a matrix)
## ----rows-and-cols-------------------------------------------------------
DDD <- data.frame(number = 1:5, letter = letters[1:5])
dim(DDD)
nrow(DDD)
ncol(DDD)
names(DDD)
row.names(DDD)
row.names(DDD) <- c("Abe", "Betty", "Claire", "Don", "Ethel")
DDD # row.names affects how a data.frame prints
## ----attributes----------------------------------------------------------
attributes(DDD)
## ----what-is-it----------------------------------------------------------
xm <- matrix(1:16, nrow = 4); xm
mode(xm); class(xm)
c(is.numeric(xm), is.character(xm), is.integer(xm), is.logical(xm))
c(is.vector(xm), is.matrix(xm), is.array(xm))
## ----as-you-like-it------------------------------------------------------
apropos("^as\\.")[1:10] # just a small sample
# convert numbers to strings (this drops attributes)
as.character(xm)
# convert matrix to vector
as.vector(xm)
as.logical(xm)
alpha <- c("a", "1", "b", "0.5")
mode(alpha)
as.numeric(alpha) # can't do the coersion, so NAs are introduced
as.integer(alpha) # notice coersion of 0.5 to 0
## ----vectors05-----------------------------------------------------------
x <- 1:5; y <- seq(10, 60, by = 10); z <- rnorm(10); x; y
y + 1
x * 10
x < 3
x^2
log(x); log(x, base = 10) # natural and base 10 logs
## ----vectors06-----------------------------------------------------------
# compare round() and signif() by binding rowwise into matrix
rbind(round(z, digits = 2), signif(z, digits = 2))
## ----vectors07-----------------------------------------------------------
x <- 1:10; z <- rnorm(100)
mean(z); sd(z); var(z); median(z) # basic statistical functions
range(z) # range returns a vector of length 2
## ----vectors08-----------------------------------------------------------
sum(x); prod(x) # sums and products
z <- rnorm(5); z
sort(z); rank(z); order(z) # sort, rank, order
rev(x) # reverse x
diff(x) # pairwise differences
cumsum(x) # cumulative sum
cumprod(x) # cumulative product
## ----vectors09-----------------------------------------------------------
x <- 1:5; y <- seq(10, 70, by = 10)
x + y
## ----vectors10-----------------------------------------------------------
x <- seq(2, 20, by = 2)
x[1:5]; x[c(1, 4, 7)]
## ----vectors11-----------------------------------------------------------
x <- seq(2, 20, by = 2)
x[c(TRUE, TRUE, FALSE)] # skips every third element (recycling!)
x[x > 10] # more typical use of boolean in selection
## ----vectors12-----------------------------------------------------------
x <- 1:10; x[-7]; x[-c(1, 2, 4, 8)]; x[-length(x)]
## ----vectors13-----------------------------------------------------------
notes <- toupper(letters[1:7]); a <- 1:5; b <- seq(10, 100, by = 10)
toupper(letters[5:10])
paste(letters[1:5], 1:3, sep = "-")
a+b
(a+b)[ a+b > 50]
length((a+b)[a+b > 50])
table(a+b > 50)
## ----function01----------------------------------------------------------
fstats <- function(x) {
mean(x)
median(x)
sd(x)
}
## ----function02----------------------------------------------------------
fstats((1:20)^2)
## ----function03----------------------------------------------------------
fstats <- function(x) {
print(mean(x))
print(median(x))
print(sd(x))
}
fstats((1:20)^2)
## ----function04----------------------------------------------------------
altfstats <- function(x) {
cat(paste(" mean:", format(mean(x), 4), "\n"))
cat(paste("median:", format(median(x), 4), "\n"))
cat(paste(" sd:", format(sd(x), 4), "\n"))
}
altfstats((1:20)^2)
## ----function05----------------------------------------------------------
temp <- fstats((1:20)^2)
temp
## ----function06----------------------------------------------------------
fstats <- function(x) {
c(mean(x), median(x), sd(x))
}
fstats((1:20)^2)
## ----function07----------------------------------------------------------
fstats <- function(x) {
result <- c(min(x), max(x), mean(x), median(x), sd(x))
names(result) <- c("min", "max", "mean", "median", "sd")
return(result)
}
fstats((1:20)^2)
## ----apply---------------------------------------------------------------
sapply(KidsFeet, class) # determine the class of each variable
lapply(iris, function(x) if (is.numeric(x)) favstats(x) else tally(x))
M <- rbind(1:3, 4:6, 7:9); M
apply(M, 1, sum) # row sums
rowSums(M) # dedicated row sums function
# tapply version of mean(length ~ sex, data = KidsFeet)
tapply(KidsFeet$length, KidsFeet$sex, mean)
## ----gf-histogram-anatomy------------------------------------------------
gf_density()
## ----gf-bar-position, fig.keep = "none"----------------------------------
gf_bar()
gf_bar(~ substance, data = HELPrct, fill = ~ sex)
gf_bar(~ substance, data = HELPrct, fill = ~ sex, position = "dodge")
## ----gf-bar-position-fig, echo = FALSE, results = "hide", message = FALSE----
gf_bar()
gf_bar(~ substance, data = HELPrct, fill = ~ sex)
gf_bar(~ substance, data = HELPrct, fill = ~ sex, position = "dodge")
## ----gf-list-------------------------------------------------------------
apropos("^gf_") # list all function that begin gf_
## ----facets, fig.keep = "none"-------------------------------------------
gf_density( ~ age, data = HELPrct, fill = ~ sex) %>%
gf_facet_grid( substance ~ .)
gf_density( ~ age, data = HELPrct, fill = ~ sex) %>%
gf_facet_grid( substance ~ ., scales = "free_y", space = "free")
gf_density( ~ age, data = HELPrct, fill = ~ sex) %>%
gf_facet_wrap( ~ substance, ncol = 1)
HELPrct %>% select(age, mcs, i1, cesd, substance) %>%
gather(variable, value, age:cesd) %>%
gf_dens( ~ value, color = ~substance) %>%
gf_facet_wrap( ~ variable, scales = "free", ncol = 2)
## ----facets-fig, echo = FALSE--------------------------------------------
gf_density( ~ age, data = HELPrct, fill = ~ sex) %>%
gf_facet_grid( substance ~ .)
gf_density( ~ age, data = HELPrct, fill = ~ sex) %>%
gf_facet_grid( substance ~ ., scales = "free_y", space = "free")
gf_density( ~ age, data = HELPrct, fill = ~ sex) %>%
gf_facet_wrap( ~ substance, ncol = 1)
HELPrct %>% select(age, mcs, i1, cesd, substance) %>%
gather(variable, value, age:cesd) %>%
gf_dens( ~ value, color = ~substance) %>%
gf_facet_wrap( ~ variable, scales = "free", ncol = 2)
## ----labs-theme, fig.keep = "none"---------------------------------------
gf_dens( ~ cesd, color = ~ substance, size = 1.5, data = HELPrct) %>%
gf_labs(
title = "Center for Epidemiologic Studies Depression measure",
subtitle = "(at baseline)",
color = "Abused substance: ",
x = "CESD score",
y = "",
caption = "Source: HELPrct"
) %>%
gf_theme(theme_classic()) %>%
gf_theme(
axis.text.y = element_blank(),
legend.position = "top",
plot.title = element_text(hjust = 0.5, color = "navy"),
plot.subtitle = element_text(hjust = 0.5, color = "navy", size = 12))
## ----labs-theme-fig, echo = FALSE, opts.label = "fig1"-------------------
gf_dens( ~ cesd, color = ~ substance, size = 1.5, data = HELPrct) %>%
gf_labs(
title = "Center for Epidemiologic Studies Depression measure",
subtitle = "(at baseline)",
color = "Abused substance: ",
x = "CESD score",
y = "",
caption = "Source: HELPrct"
) %>%
gf_theme(theme_classic()) %>%
gf_theme(
axis.text.y = element_blank(),
legend.position = "top",
plot.title = element_text(hjust = 0.5, color = "navy"),
plot.subtitle = element_text(hjust = 0.5, color = "navy", size = 12))
## ----scales-functions----------------------------------------------------
apropos("^scale_")
## ----scales, fig.keep = "none"-------------------------------------------
gf_point(length ~ width, data = KidsFeet, color = ~ sex) %>%
gf_lm() %>%
gf_refine(scale_color_manual(values = c(B = "navy", G = "red")))
gf_bar(~ sex, fill = ~ substance, position = "dodge", data = HELPrct) %>%
gf_refine(scale_fill_brewer(type = "qual", palette = 3))
## ----scales-fig, echo = FALSE--------------------------------------------
gf_point(length ~ width, data = KidsFeet, color = ~ sex) %>%
gf_lm() %>%
gf_refine(scale_color_manual(values = c(B = "navy", G = "red")))
gf_bar(~ sex, fill = ~ substance, position = "dodge", data = HELPrct) %>%
gf_refine(scale_fill_brewer(type = "qual", palette = 3))
## ----ggpairs, fig.show = "hide", message = FALSE-------------------------
GGally::ggpairs(iris)
## ----ggpairs-fig, echo = FALSE, message = FALSE, opts.label = "figbig"----
GGally::ggpairs(iris)
## ----whats-up, eval = FALSE----------------------------------------------
## odds <- 1 + 2 * (0:4)
## primes <- c(2, 3, 5, 7, 11, 13)
## length(odds)
## length(primes)
## odds + 1
## odds + primes
## odds * primes
## odds > 5
## sum(odds > 5)
## sum(primes < 5 | primes > 9)
## odds[3]
## odds[10]
## odds[-3]
## primes[odds]
## primes[primes >= 7]
## sum(primes[primes > 5])
## sum(odds[odds > 5])
## odds[10] <- 1 + 2 * 9
## odds
## y <- 1:10
## (x <- 1:5)
## ----whats-up-sol--------------------------------------------------------
## ----chickwt-sol01-------------------------------------------------------
ChickWeight %>%
filter(Time == 21) %>%
arrange(weight) %>%
head(1)
ChickWeight %>%
filter(Time == 21) %>%
arrange(weight) %>%
tail(1)
## ----chickwt-sol02-------------------------------------------------------
Chicks <-
ChickWeight %>%
filter(Time > 15) %>% # remove chicks that were only measured a few times
group_by(Chick) %>%
summarise(
weight = max(weight),
diet = Diet[1],
time = max(Time)
) %>%
ungroup() %>% # need this for arrange to work properly
arrange(weight)
Chicks %>% head(1)
Chicks %>% tail(1)
## ----echo = FALSE, opts.label = "fig1"-----------------------------------
# weatherData has been removed from CRAN
# require(weatherData, quietly = TRUE)
# Temps <- NewYork2013 %>% mutate(city = "NYC") %>%
# bind_rows(Mumbai2013 %>% mutate(city = "Mumbai")) %>%
# bind_rows(London2013 %>% mutate(city = "London")) %>%
# mutate(date = lubridate::date(Time),
# month = lubridate::month(Time)) %>%
# group_by(city, date) %>%
# summarise(
# hi = max(Temperature, na.rm = TRUE),
# lo = min(Temperature, na.rm = TRUE),
# mid = (hi + lo)/2
# )
# gf_linerange(lo + hi ~ date, color = ~ hi, data = Temps) %>%
# gf_facet_grid(city ~ .) %>%
# gf_refine(scale_colour_gradientn(colors = rev(rainbow(5))))
# A similar plot can be made using mosaicData::Weather, but no merging
# is required.
gf_linerange(low_temp + high_temp ~ date, color = ~ high_temp,
data = Weather) %>%
gf_facet_grid(city ~ year, scales = "free") %>%
gf_refine(scale_colour_viridis_c(begin = 0, end = 1, option = "C"))
## ----MathNotation, child="MathNotation.Rnw", eval=includeApp[2]----------
## ----math-setup, include = FALSE, cache = FALSE--------------------------
require(fastR2)
knitr::opts_chunk$set(cache.path = "cache/Math-")
## ----sum-ssol------------------------------------------------------------
(2:5)^2
sum( (2:5)^2 )
## ----some-sums-----------------------------------------------------------
x <- 0:4;
p <- c(1/6,1/3,1/4,1/6,1/12);
sum(p);
sum(p*x);
fractions( sum(p*x) );
sum(p*x^2);
fractions( sum(p*x^2) );
## ----LinearAlgebra, child="LinearAlgebra.Rnw", eval=includeApp[3]--------
## ----LA-setup, include = FALSE-------------------------------------------
knitr::opts_chunk$set(cache.path = "cache/LA-")
## ----vec-mult01----------------------------------------------------------
x <- c(1, 2, 3)
4 * x
## ----vec-mult02----------------------------------------------------------
u <- c(1, 2, 3)
v <- c(4, 5, 6)
u * v
## ----vec-dot-------------------------------------------------------------
dot(u, v)
## ----vlength-def---------------------------------------------------------
vlength
## ----length-vlength------------------------------------------------------
x <- 1:5
length(x)
vlength(x)
## ----vec-proj, tidy = FALSE----------------------------------------------
x <- c(1, 2, 3); v <- c(1, 1, 1)
project(x, v)
dot(x, v) * v / vlength(v)^2
project(x, v, type = 'coef')
dot(x, v) / vlength(v)^2
project(x, v, type = 'length')
dot(x, v) / vlength(v)
## ----vdecomp2------------------------------------------------------------
vdecomp2 <- function(x, v1, v2) {
w1 <- v1 - project(v1, v2); w2 <- v2 - project(v2, v1)
p1 <- project(x, w1); p2 <- project(x, w2)
# need to be careful about 0 vectors
a <- if (vlength(w1) == 0) { 0 } else {
sign(dot(w1, p1)) * vlength(p1) / vlength(w1)
}
b <- if (vlength(w2) == 0) { 0 } else {
sign(dot(w2, p2)) * vlength(p2) / vlength(w2)
}
list(
coefficients = c(a, b),
projection = a * v1 + b * v2,
remainder = x - a * v1 - b * v2
)
}
## ----vector-decomp01-----------------------------------------------------
v1 <- c(1, 1)
v2 <- c(2, 3)
x <- c(2, 5)
vdecomp2(x, v1, v2)
## ----vector-decomp02-----------------------------------------------------
v1 <- c(1, 0, 0)
v2 <- c(1, 1, 1)
x <- c(2, 3, 5)
vdecomp2(x, v1, v2)
h <- vdecomp2(x, v1, v2)$remainder;
round(h, 8)
round(dot(h, v1), 8)
round(dot(h, v2), 8)
## ----vector-decomp03-----------------------------------------------------
v1 <- c(1, 0, 0)
v2 <- c(1, 1, 1)
v3 <- c(1, 2, 3)
x <- c(2, 7, 3)
vdecomp2(x, v1, v2) %>% lapply(round, digits = 8)
a1 <- vdecomp2(x, v1, v2)$coefficients[1]; a1
b1 <- vdecomp2(x, v1, v2)$coefficients[2]; b1
x1 <- a1 * v1 + b1 * v2; x1
# decompose x into x1 and v3
vdecomp2(x, x1, v3) %>% lapply(round, digits = 8)
a2 <- vdecomp2(x, x1, v3)$coefficients[1]; a2
b2 <- vdecomp2(x, x1, v3)$coefficients[2]; b2
# this should equal x
a2 * (a1 * v1 + b1* v2) + b2 * v3
# the three coefficients
c(a2 * a1, a2 * b1, b2)
## ----vector-decomp04, tidy = FALSE---------------------------------------
vdecomp <- function(x, ...) {
v <- list(...)
projection <- project(x, v[[1]])
coefs <-project(x, v[[1]], type = "coef")
for (i in 2:length(v)) {
decomp <- vdecomp2(x, projection, v[[i]])
coefs <- c(coefs * decomp$coefficients[1], decomp$coefficients[2])
projection <- decomp$projection
}
list(coefficients = coefs, projection = projection,
remainder = x - projection)
}
## ----vector-decomp05-----------------------------------------------------
v1 <- c(1, 0, 0)
v2 <- c(1, 1, 1)
v3 <- c(1, 2, 3)
x <- c(2, 7, 3)
vdecomp(x, v1, v2, v3) %>% lapply(round, digits = 8)
## ----projections01-sol---------------------------------------------------
a <- project(c(1, 0), c(1, 1)); a
b <- project(c(1, 0), c(1, -1)); b
c <- project(c(1, 0), c(1, 2)); c
d <- project(c(1, 2, 3), c(1, 1, 1)); d
e <- project(c(1, 1, 1), c(1, 2, 3)); fractions(e)
f <- project(c(1, 2, 3), c(1, -1, 0)); f
g <- project(c(1, 2, 3, 4), c(1, 1, -1, -1)); g
h <- project(c(1, 1, -1, -1), c(1, -1, 1, -1)); h
## ----projections02-sol---------------------------------------------------
a + b
a + c
d + f
## ----orthonormal-s1------------------------------------------------------
x <- c(1, 1, 1)
y <- c(1, 1, -2)
w <- y - project(y, x)
dot(x, w) # confirm normality
# these two column vectors are orthogonal and have correct span
cbind( x / vlength(x), w / vlength(w) )
## ------------------------------------------------------------------------
x1 <- c(1, 1, 1)
x2 <- c(1, 2, 3)
u1 <- x1 / vlength(x1)
a <- x2 - project(x2, x1)
u2 <- a / vlength(a)
vlength(u1)
vlength(u2)
dot(u1, u2)
## ----mat-dim01-----------------------------------------------------------
M = 1:12
dim(M) = c(3, 4)
M
## ----mat-dim02-----------------------------------------------------------
dim(M)
## ----matrix--------------------------------------------------------------
x <- 1:12
matrix(x, nr = 2) # 2 rows, entries columnwise
matrix(x, nr = 3, byrow = TRUE) # 3 rows, entries rowwise
matrix(x, nc = 3, byrow = TRUE) # 3 columns, entries rowwise
x # x is unchanged
## ----matrix-recycle------------------------------------------------------
matrix(1, nr = 4, nc = 3) # matrix of all 1's
matrix(nr = 3, nc = 2) # matrix of missing data
## ----rbind-cbind---------------------------------------------------------
A = rbind(1:3, 4:6); A
B = cbind(c(5, 2, 4), c(1, 3, -1)); B
## ----as-matrix-----------------------------------------------------------
x <- 1:3
A %*% x # vector x treated as a column matrix
as.matrix(x) # explicit conversion to a column matrix
## ----matrix-t------------------------------------------------------------
t(1:4) # transpose column into row
M
t(M)
## ----matrix-mult---------------------------------------------------------
A %*% B # Note: A*B does not work
B %*% A
## ----matrix-dot----------------------------------------------------------
1:4 %*% 1:4
sum(1:4 * 1:4)
## ----matrix-outer--------------------------------------------------------
outer(1:4, 1:4)
## ----matrix-outer-fun----------------------------------------------------
outer(1:4, 1:4,
FUN = function(x, y) {paste(x, ':', y, sep = '')})
## ----matrix-solve--------------------------------------------------------
x <- as.matrix(c(3, 1)) # vector as column matrix
A <- rbind(c(5, 2), c(3, 1))
Ainv <- solve(A); Ainv # solve() computes inverse
A %*% Ainv
Ainv %*% A
Ainv %*% x # solution to system
## ----project-matrix01----------------------------------------------------
v1 <- c(1, 1, 1, 1)
v2 <- c(1, 2, -3, 0)
A <- cbind(v1 / vlength(v1), v2 / vlength(v2))
## ----project-matrix02----------------------------------------------------
t(A) %*% v1
t(A) %*% v2
x <- 1:4
coefs <- t(A) %*% x; coefs
pr <- A %*% coefs; pr
remainder <- x - pr; remainder
dot(remainder, v1) # should be 0
dot(remainder, v2) # should be 0
## ----Chap1-4Review, child="Chap1-4Review.Rnw", eval=includeApp[4]--------
## ----rev-setup, include = FALSE------------------------------------------
knitr::opts_chunk$set(cache.path = "cache/Rev-")
## ----rev-data, fig.show='hide'-------------------------------------------
require(fastR2)
names(Batting)
Batting2005 <- Batting %>% filter(year == 2005)
df_stats(HR ~ team | league, data = Batting2005, max)
gf_histogram( ~ AB | league, data = Batting2005)
gf_point(HR ~ H, data = Batting2005 %>% filter(team == "DET"))
gf_boxplot(HR ~ league, data = Batting2005) %>%
gf_refine(coord_flip())
## ----rev-data-fig, echo = FALSE, results = "hide"------------------------
require(fastR2)
names(Batting)
Batting2005 <- Batting %>% filter(year == 2005)
df_stats(HR ~ team | league, data = Batting2005, max)
gf_histogram( ~ AB | league, data = Batting2005)
gf_point(HR ~ H, data = Batting2005 %>% filter(team == "DET"))
gf_boxplot(HR ~ league, data = Batting2005) %>%
gf_refine(coord_flip())
## ----rev-moments-binom---------------------------------------------------
x <- 0:20
sum( x * dbinom(x,20,0.25) )
sum( x^2 * dbinom(x,20,0.25) )
sum( x^2 * dbinom(x,20,0.25) ) - ( sum( x * dbinom(x,20,0.25) ) )^2
## ----rev-moments-exp-sol-------------------------------------------------
f1 <- function(x) { x * dexp(x, rate=2) }
f2 <- function(x) { x^2 * dexp(x, rate=2) }
integrate(f1, 0, Inf)
integrate(f2, 0, Inf)
## ----rev-test-coin-review------------------------------------------------
binom.test(60,100)
prop.test(60,100)
## ----rev-test-coin-power1-sol--------------------------------------------
binom.test(61,100)
## ----rev-test-coin-power2-sol--------------------------------------------
prob <- c( seq(0, 0.4, by=0.10), 0.45, 0.5, 0.55, seq(0.6, 1, by=0.10) )
power <- pbinom(39,100,prob) + 1- pbinom(60,100,prob)
print(cbind(prob,power))
## ----rev-test-coin-power3-sol--------------------------------------------
prob <- c( seq(0, 1, by=0.01) )
power <- pbinom(39,100,prob) + 1- pbinom(60,100,prob)
xyplot(power~prob, type="l",
main="Power to detect a biased coin with 100 flips",
xlab="true probability of heads")
## ----rev-min-of-unif01-sol-----------------------------------------------
n <- round((1:10)^(1.75)); prob <- 1 - (0.95)^n
print(cbind(n,prob))
## ----rev-min-of-unif02-sol-----------------------------------------------
y <- c( seq(0, 1, by=0.01) )
n <- c(1,5,10,20)
n <- rep(n,times=length(y))
y <- rep(y,each=4)
density <- n * (1-y)^{n-1}
groups <- paste("n =", n)
groups <- factor(groups, levels=unique(groups))
xyplot(density~y, groups=groups, type="l",
main="Pdf of the mininum of a sample from Unif(0,1)",
key=simpleKey(levels(groups), columns=2, lines=TRUE, points=FALSE),
xlim=c(0,0.20))
## ----rev-mix-normals01---------------------------------------------------
.3 * pnorm(12,8,2) + 0.7 * pnorm(12,16,3)
## ----rev-mix-normals02---------------------------------------------------
x <- seq(0, 30, by=0.25)
density <- 0.3 * dnorm(x,8,2) + 0.7 * dnorm(x,16,3)
xyplot(density~x, type="l", main="pdf of a mixture of normals")
## ----rev-lognormal-------------------------------------------------------
#note: these functions do not check for negative values
# where they shouldn't be
dlognormal <- function(x, mu=0, sigma=1) {
dnorm(log(x), mean=mu, sd=sigma) * 1/x
}
rlognormal <- function(n, mu=0, sigma=1) {
normals <- rnorm(n, mean=mu, sd=sigma )
return(exp(normals))
}
plognormal <- function(x, mu=0, sigma=1) {
pnorm( log(x), mean=mu, sd=sigma )
}
qlognormal <- function(p, mu=0, sigma=1) {
exp( qnorm(p, mean=mu, sd=sigma ) )
}
# some checks
randomData <- rlognormal(100, mu=0, sigma=1/2)
quant <- quantile(randomData)
x <- qlognormal(c(0.25, 0.5, 0.75), mu=0, sigma=1/2); x
plognormal(x, mu=0, sigma=1/2)
plognormal(quant, mu=0, sigma=1/2)
plot1 <- histogram(~randomData)
x <- seq(0, 10, by=0.25)
nx <- length(x)
mu <- c(-1, 0, 1)
nmu <- length(mu)
sigma <- c(1/8, 1/4, 1/2, 1, 2, 4)
nsigma <- length(sigma)
x <- rep(x, each=nmu*nsigma)
mu <- rep(rep(mu, nsigma), times=nx)
sigma <- rep(rep(sigma, each=nmu), times=nx)
density <- dlognormal(x, mu, sigma)
xyplot(density~x|paste("sigma", '=', sigma),
groups = paste("mu =", mu),
type="l",
key=simpleKey(paste("mu =", sort(unique(mu))),
points=FALSE, lines=TRUE, columns=3),
scales=list(y=list(relation="free", alternating=FALSE)),
main = "pdfs of lognormal distributions")
## ----rev-faithful--------------------------------------------------------
t.test(faithful$eruptions)
## ----rev-moments-function------------------------------------------------
moment <- function(
k=1, # which moment
vals=1:6, # dice by default
probs=rep(1/length(vals), length(vals)), # uniform probs
centered=FALSE) { # center on mean of data?
if (length(k) > 1) { # vectorize this (fancy)
return( sapply(k, moment, vals=vals, probs=probs, centered=centered) )
}
if ( centered ) {
m = sum(vals * probs)
} else {
m = 0
}
sum((vals-m)^k * probs)
}
moment(k=1, 0:10, dbinom(0:10,10,0.4))
moment(k=2, 0:10, dbinom(0:10,10,0.4), centered=FALSE)
moment(k=2, 0:10, dbinom(0:10,10,0.4), centered=TRUE)
10 * 0.4 * 0.6 # should match previous and next value
moment(k=2, 0:10, dbinom(0:10,10,0.4), centered=FALSE) -
moment(k=1, 0:10, dbinom(0:10,10,0.4), centered=FALSE)^2
round(moment(k=1:4, 0:10, dbinom(0:10,10,0.4), centered=FALSE), 5)
round(moment(k=1:4, 0:10, dbinom(0:10,10,0.4), centered=TRUE), 5)
## ----rev-moments-function-cont-sol---------------------------------------
moment.cont <- function( k=1, # which moment?
dist = dnorm,
args=list(), # arguments to dist()
range=c(-Inf,Inf),
centered=FALSE) { # centered on mean?
if (length(k) > 1) { # vectorize this (fancy)
return( sapply(k, moment.cont,
dist=dist, args=args,
range=range, centered=centered) )
}
if ( centered ) {
m = moment.cont(dist=dist, range=range, k=1, centered=FALSE)
} else {
m = 0
}
int.out <- integrate(
function(x) { (x-m)^k * dist(x) },
range[1], range[2])
return (int.out$value)
}
moment.cont(dunif, k=1, centered=FALSE)
moment.cont(dunif, k=2, centered=FALSE)
moment.cont(dunif, k=2, centered=TRUE)
moment.cont(dunif, k=1:4, centered=FALSE)
round(moment.cont(dunif, k=1:4, centered=TRUE), 5)
round(moment.cont(dnorm, k=1:4, centered=TRUE), 5)
round( moment.cont(function(x) {dnorm(x, 10, 3)}, k=1:4, centered=TRUE), 5)
|
eedf8816bb7e05ba3baec70830b1ad43bc726a37 | 9dc0c09d53087c59bbc2caf8fc392ef5a11502f6 | /man/run_predict_api.Rd | 0d2afaa2e6ce523c7688022631a7ebefb2bc187d | [
"MIT"
] | permissive | xvrdm/ppp | 140c77f5ab1956b5db262fb2216df8b912fde14e | 80cc57e09a79e473701a26d800bffba3d203e88d | refs/heads/master | 2020-12-26T08:18:18.129636 | 2020-01-30T01:26:21 | 2020-01-30T01:26:21 | 237,444,900 | 1 | 0 | NOASSERTION | 2020-01-31T14:20:29 | 2020-01-31T14:20:28 | null | UTF-8 | R | false | true | 530 | rd | run_predict_api.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.R
\name{run_predict_api}
\alias{run_predict_api}
\title{Run the housing prediction API}
\usage{
run_predict_api(log = TRUE, ...)
}
\arguments{
\item{log}{Logical. Should Plumber APi log requests. Default is TRUE.}
\item{...}{Options passed to \code{plumber::plumb()$run()}}
}
\value{
A running Plumber API
}
\description{
Run the housing prediction API
}
\examples{
\dontrun{
run_predict_api()
run_predict_api(swagger = TRUE, port = 8000)
}
}
|
b08bab423f876fa9fd4b8d543522abe1fd86d487 | 72d03ec10b4955bcc7daac5f820f63f3e5ed7e75 | /input/gcam-data-system/gcam-usa-processing-code/level1/LA119.Solar.R | f7193ad0d1f324f4a7327581ecf4205d8793ef40 | [
"ECL-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | bgmishra/gcam-core | 54daddc3d037571bf745c4cf0d54c0d7a77f493f | bbfb78aeb0cde4d75f307fc3967526d70157c2f8 | refs/heads/master | 2022-04-17T11:18:25.911460 | 2020-03-17T18:03:21 | 2020-03-17T18:03:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,421 | r | LA119.Solar.R | # Before we can load headers we need some paths defined. They
# may be provided by a system environment variable or just
# having already been set in the workspace
if( !exists( "GCAMUSAPROC_DIR" ) ){
if( Sys.getenv( "GCAMUSAPROC" ) != "" ){
GCAMUSAPROC_DIR <- Sys.getenv( "GCAMUSAPROC" )
} else {
stop("Could not determine location of energy data system. Please set the R var GCAMUSAPROC_DIR to the appropriate location")
}
}
# Universal header file - provides logging, file support, etc.
source(paste(GCAMUSAPROC_DIR,"/../_common/headers/GCAM_header.R",sep=""))
source(paste(GCAMUSAPROC_DIR,"/../_common/headers/GCAMUSA_header.R",sep=""))
logstart( "LA119.Solar.R" )
printlog( "Solar capacity factors by state" )
# -----------------------------------------------------------------------------
# 1. Read files
sourcedata( "COMMON_ASSUMPTIONS", "A_common_data", extension = ".R" )
sourcedata( "COMMON_ASSUMPTIONS", "unit_conversions", extension = ".R" )
sourcedata( "GCAMUSA_ASSUMPTIONS", "A_GCAMUSA_data", extension = ".R" )
states_subregions <- readdata( "GCAMUSA_MAPPINGS", "states_subregions" )
NREL_us_re_capacity_factors <- readdata( "GCAMUSA_LEVEL0_DATA", "NREL_us_re_capacity_factors" )
NREL_us_re_technical_potential <- readdata( "GCAMUSA_LEVEL0_DATA", "NREL_us_re_technical_potential" )
# -----------------------------------------------------------------------------
# 2. Perform computations
NREL_us_re_capacity_factors$PV_scaler <- NREL_us_re_capacity_factors$Urban_Utility_scale_PV /
NREL_us_re_capacity_factors$Urban_Utility_scale_PV[ NREL_us_re_capacity_factors$State == "Average" ]
NREL_us_re_capacity_factors$CSP_scaler <- NREL_us_re_capacity_factors$CSP /
NREL_us_re_capacity_factors$CSP[ NREL_us_re_capacity_factors$State == "Average" ]
NREL_us_re_capacity_factors$state <- states_subregions$state[ match( NREL_us_re_capacity_factors$State, states_subregions$state_name ) ]
L119.CapFacScaler_PV_state <- data.frame(
state = states,
sector = "electricity generation",
fuel = "solar PV",
scaler = NREL_us_re_capacity_factors$PV_scaler[ match( states, NREL_us_re_capacity_factors$state ) ] )
L119.CapFacScaler_CSP_state <- data.frame(
state = states,
sector = "electricity generation",
fuel = "solar CSP",
scaler = NREL_us_re_capacity_factors$CSP_scaler[ match( states, NREL_us_re_capacity_factors$state ) ] )
# Null CSP capacity factor implies that CSP is not suitable in the state.
# Set capacity factor to small number (0.001) to prevent divide by 0 error in GCAM.
L119.CapFacScaler_CSP_state$scaler <- ifelse( L119.CapFacScaler_CSP_state$scaler > 0, L119.CapFacScaler_CSP_state$scaler, 0.001 )
# -----------------------------------------------------------------------------
# 3. Output
#Add comments for each table
comments.L119.CapFacScaler_PV_state <- c( "Solar PV capacity factor adjustment by state","Unitless" )
comments.L119.CapFacScaler_CSP_state <- c( "Solar PV capacity factor adjustment by state","Unitless" )
#write tables as CSV files
writedata( L119.CapFacScaler_PV_state, domain="GCAMUSA_LEVEL1_DATA", fn="L119.CapFacScaler_PV_state", comments=comments.L119.CapFacScaler_PV_state )
writedata( L119.CapFacScaler_CSP_state, domain="GCAMUSA_LEVEL1_DATA", fn="L119.CapFacScaler_CSP_state", comments=comments.L119.CapFacScaler_CSP_state )
# Every script should finish with this line
logstop()
|
5b28221fa661d0b0aca69ab5cb7015b37631e4bd | b87e4612bf378a95a08c79245a77448309abfa8e | /inst/doc/Data-Model.R | 34296652982277d16e33cc55fc7577f4fd5a65dc | [] | no_license | cran/fishdata | e7d95767a0c483190176e42fb98f5746bc18e3d3 | 05a4b3160639321b92eadf05a3625607a23d1c59 | refs/heads/master | 2021-07-21T01:35:34.386558 | 2021-05-23T03:20:02 | 2021-05-23T03:20:02 | 121,367,706 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,754 | r | Data-Model.R | ## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- setup, echo=FALSE, message=FALSE----------------------------------------
library(fishdata)
library(dm)
library(DiagrammeR)
data("adult_growth")
data("adult_metrics")
data("adults")
data("juvenile_growth")
data("juvenile_metrics")
data("juveniles")
## ---- echo=FALSE, message=FALSE-----------------------------------------------
fish_base_dm <- dm(adults,
juveniles)
dm_draw(fish_base_dm, view_type = "all")
## -----------------------------------------------------------------------------
fish_dm <- dm(adult_metrics,
adults,
juvenile_metrics,
juveniles)
fish_dm_pk <-
fish_dm %>%
dm_add_pk(table = adults, columns = fish_code) %>%
dm_add_pk(juveniles, fish_code)
fish_dm_all_keys <-
fish_dm_pk %>%
dm_add_fk(adult_metrics, fish_code, adults) %>%
dm_add_fk(juvenile_metrics, fish_code, juveniles)
dm_draw(fish_dm_all_keys, view_type = "all")
## ---- echo=FALSE, message=FALSE-----------------------------------------------
fish_dm <- dm(adult_growth,
adult_metrics,
adults,
juvenile_growth,
juvenile_metrics,
juveniles)
fish_dm_pk <-
fish_dm %>%
dm_add_pk(table = adults, columns = fish_code) %>%
dm_add_pk(juveniles, fish_code)
fish_dm_all_keys <-
fish_dm_pk %>%
dm_add_fk(table = adult_growth, columns = fish_code, ref_table = adults) %>%
dm_add_fk(adult_metrics, fish_code, adults) %>%
dm_add_fk(juvenile_growth, fish_code, juveniles) %>%
dm_add_fk(juvenile_metrics, fish_code, juveniles)
dm_draw(fish_dm_all_keys, view_type = "all")
|
aefafe1eb7b0320622b208f0bdc76fd0b501709b | f16ec098e8085462c398f2f6fcbb5a55a6dee8b2 | /plot4.R | 79945accf9421e2f0efb86df87f64ffc82ec9d81 | [] | no_license | andersonkmi/Exploratory-Data-Analysis-Project-2 | 18e7ac3edcc800672c02acb50f52cbc2d5466729 | 3ff12a53f25de2973639c371b9bd2bf60d101047 | refs/heads/master | 2021-01-11T03:51:10.484215 | 2016-10-23T18:21:49 | 2016-10-23T18:21:49 | 71,256,424 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,031 | r | plot4.R | #######################
### Library loading
#######################
library(plyr)
library(ggplot2)
library(data.table)
#######################
### Loading data sets
#######################
nei <- readRDS("summarySCC_PM25.rds")
scc <- readRDS("Source_Classification_Code.rds")
#################################
### Convert them to data tables
#################################
neiDF <- data.table(nei)
sccDF <- data.table(scc)
head(neiDF)
head(sccDF)
summary(neiDF)
summary(sccDF)
########################
### Coal related items
########################
sccCoal <- sccDF[grep("Coal", sccDF$SCC.Level.Three),]
coalEmissions <- neiDF[SCC %in% sccCoal$SCC,]
coalTotalEmissions <- with(coalEmissions, aggregate(Emissions, by = list(year), sum))
#########################
### Graph plotting
#########################
png(filename = "plot4.png", width = 480, height = 480, units = "px")
plot(coalTotalEmissions, type = "b", pch = 20, col = "blue", ylab = "Emissions (tons)", xlab = "Years", main = "Coal Yearly Emissions")
dev.off()
|
83e4123178cd3a5c9124c5f89b945a0148f422e3 | 09e4d997f1d7b6b5bb7c587a8847be0fc76c92a7 | /forecast_pov.R | d6488836517da3fc71dcb09f615377765dc8f4ca | [] | no_license | OlivierNDO/python_misc | 29174b8b2e964ba055032a5996082830bbf1d7f4 | ff026ac4fd54ecbde86b8ca52caa6532863979c6 | refs/heads/master | 2021-11-19T11:13:25.493494 | 2021-09-06T15:13:22 | 2021-09-06T15:13:22 | 146,823,864 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,945 | r | forecast_pov.R |
### Configuration
#############################################################################
# Load Packages
config_req_pkgs = c('kableExtra', 'MASS', 'plotly', 'rmarkdown', 'stringr', 'tidyverse', 'tidyquant')
lapply(config_req_pkgs, require, character.only = TRUE)
options(scipen = 999)
# Data
df = read.csv('D:/product_growth.csv')[,1:9] %>%
dplyr::filter(time > 40) %>%
magrittr::set_colnames(c('dt', 'time', 'value3', 'value2', 'value', 'acquisition', 'new_marketing_platform', 'pricing_error', 'jan')) %>%
dplyr::mutate(dt = as.Date(dt, format = '%m/%d/%Y')) %>%
dplyr::mutate(partition = ifelse(time > 156, 'Test', 'Train'))
price_error_df = df[df$partition == 'Train' & df$pricing_error == 1,]
# Prediction 1 - with business context
tseries = ts(df[df$partition == 'Train','value3'], frequency = 12)
tseries_decomposed = decompose(tseries)
arimax_model = forecast::Arima(tseries, order = c(1, 1, 0), seasonal = c(1, 0, 0))
pred_df1 = data.frame(time = df[df$partition == 'Test',]$time, value3 = predict(arimax_model, n.ahead = nrow(df[df$partition == 'Test',]))$pred) %>%
dplyr::mutate(partition = 'Predicted (black box model)')
# Prediction 2 - with business context
train_x_vars = df[df$partition == 'Train', c('acquisition', 'new_marketing_platform', 'pricing_error')] %>% as.matrix()
test_x_vars = df[df$partition == 'Test', c('acquisition', 'new_marketing_platform', 'pricing_error')] %>% as.matrix()
tseries = ts(df[df$partition == 'Train','value3'], frequency = 12)
tseries_decomposed = decompose(tseries)
arimax_model = forecast::Arima(tseries, order = c(1, 1, 0), seasonal = c(1, 0, 0), xreg = train_x_vars)
pred_df2 = data.frame(time = df[df$partition == 'Test',]$time, value3 = predict(arimax_model, n.ahead = nrow(df[df$partition == 'Test',]), newxreg = test_x_vars)$pred) %>%
dplyr::mutate(partition = 'Predicted (with business context)')
# Append Predicted Values (1)
test_df = df %>% dplyr::filter(partition == 'Test')
test_pred_df1 = test_df
test_pred_df1$partition = pred_df1$partition
test_pred_df1$value3 = pred_df1$value3
# Append Predicted Values (2)
test_pred_df2 = test_df
test_pred_df2$partition = pred_df2$partition
test_pred_df2$value3 = pred_df2$value3
df = rbind.data.frame(df, test_pred_df1, test_pred_df2) %>%
dplyr::mutate(data_type = ifelse(partition %in% c('Train', 'Test'), 'Actual', 'Predicted'))
# Get Errors
mape1 = mean(abs(pred_df1$value3 - test_df$value3) / test_df$value3)
mape2 = mean(abs(pred_df2$value3 - test_df$value3) / test_df$value3)
mape1_label = paste0(round(mape1 * 100, 2), '%')
mape2_label = paste0(round(mape2 * 100, 2), '%')
# Plot 1 - Business Context
ggplot(df %>% dplyr::filter(partition %in% c('Train', 'Test', 'Predicted (with business context)')),
aes(x = dt, y = value3, color = data_type)) +
theme_bw() +
geom_rect(data = df[df$partition == 'Train' & df$pricing_error == 1,],
aes(xmin = min(price_error_df$dt), xmax=max(price_error_df$dt), ymin=10000, ymax=16000),
fill = rgb(17, 56, 99, maxColorValue = 255),
color = rgb(17, 56, 99, maxColorValue = 255),
alpha = 0.02,
inherit.aes = FALSE) +
geom_vline(xintercept = min(df[df$acquisition == 1, 'dt']),
size = 1,
color = rgb(17, 56, 99, maxColorValue = 255)) +
geom_vline(xintercept = min(df[df$new_marketing_platform == 1, 'dt']),
size = 1,
color = rgb(17, 56, 99, maxColorValue = 255)) +
geom_line() +
geom_point() +
geom_vline(xintercept = df[df$jan == 1,'dt'],
linetype = 'dashed',
color = 'grey') +
scale_x_date(date_labels = "'%y", date_breaks = '1 year') +
labs(x = 'Month', y = 'Products in Force', title = 'Monthly Product Growth (with business context)', color = '',
subtitle = paste0('Mean % Error: ', mape2_label)) +
scale_y_continuous(labels = scales::comma,
limits = c(8000, max(df[df$partition == 'Train',]$value3))) +
theme(axis.text = element_text(size = 8),
legend.position = 'none')
# Plot 2 - No business context
ggplot(df %>% dplyr::filter(partition %in% c('Train', 'Test', 'Predicted (black box model)')),
aes(x = dt, y = value3, color = data_type)) +
theme_bw() +
geom_line() +
geom_point() +
geom_vline(xintercept = df[df$jan == 1,'dt'],
linetype = 'dashed',
color = 'grey') +
scale_x_date(date_labels = "'%y", date_breaks = '1 year') +
labs(x = 'Month', y = 'Products in Force', title = 'Monthly Product Growth',
subtitle = paste0('Mean % Error: ', mape1_label)) +
scale_y_continuous(labels = scales::comma,
limits = c(8000, max(df[df$partition == 'Train',]$value3))) +
theme(axis.text = element_text(size = 8),
legend.position = 'none') |
2f93b3455a049b6e563d115f690bff6f56be728e | f0352034f8467e2c82a31443ae6e3125039879ac | /man/ClusterFunction-methods.Rd | ccec04eade1a200a6f0f1c2eea3bc169db818898 | [] | no_license | epurdom/clusterExperiment | 8d5d43a250a1a3c28d4745aae4b72285458ba1a2 | ae86ee09697c13ccd5d32f964e28ab7d82b455d6 | refs/heads/master | 2022-11-04T01:54:19.806886 | 2022-10-11T22:00:27 | 2022-10-11T22:00:27 | 47,139,877 | 39 | 15 | null | 2021-01-27T21:26:28 | 2015-11-30T19:06:53 | R | UTF-8 | R | false | true | 2,407 | rd | ClusterFunction-methods.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllHelperClusterFunction.R
\name{ClusterFunction-methods}
\alias{ClusterFunction-methods}
\alias{requiredArgs,ClusterFunction-method}
\alias{requiredArgs,list-method}
\alias{requiredArgs,character-method}
\alias{requiredArgs}
\alias{requiredArgs,factor-method}
\alias{algorithmType,ClusterFunction-method}
\alias{algorithmType}
\alias{algorithmType,character-method}
\alias{algorithmType,factor-method}
\alias{algorithmType,list-method}
\alias{inputType,ClusterFunction-method}
\alias{inputType}
\alias{inputType,list-method}
\alias{inputType,character-method}
\alias{inputType,factor-method}
\title{Helper methods for the ClusterFunction class}
\usage{
\S4method{requiredArgs}{character}(object)
\S4method{requiredArgs}{ClusterFunction}(object, genericOnly = FALSE)
\S4method{requiredArgs}{list}(object)
\S4method{requiredArgs}{character}(object)
\S4method{requiredArgs}{character}(object)
\S4method{requiredArgs}{factor}(object)
\S4method{algorithmType}{ClusterFunction}(object)
\S4method{algorithmType}{character}(object)
\S4method{algorithmType}{factor}(object)
\S4method{algorithmType}{list}(object)
\S4method{inputType}{ClusterFunction}(object)
\S4method{inputType}{list}(object)
\S4method{inputType}{character}(object)
\S4method{inputType}{factor}(object)
}
\arguments{
\item{object}{input to the method, either a \code{ClusterFunction}
class or a character describing a built-in \code{ClusterFunction} object.
Can also be a \code{list} of \code{ClusterFunction} objects, in which case
the list must have names for each function.}
\item{genericOnly}{logical If TRUE, return only the generic required
arguments (i.e. those required by the algorithm type) and not the arguments
specific to that clustering found in the slot \code{requiredArgs}. If FALSE
both sets of arguments are returned.}
}
\value{
\code{requiredArgs} returns a list of the required args of a
function (via a call to \code{\link{requiredArgs}})
\code{algorithmType} returns a character value giving the type of
clustering function ("01" or "K")
\code{inputType} returns a character value giving the input
type of the object
}
\description{
This is a collection of helper methods for the ClusterExperiment class.
}
\details{
Note that when subsetting the data, the dendrogram information and
the co-clustering matrix are lost.
}
|
d94fa005568f364dc7b5eeec07234753c131dc4b | 3eeb623f14e24cdcb09f89ba1e44320598c83b39 | /plot_power.R | 9876a9f870ed420d7ed45c3d2a02e30fa81cc624 | [] | no_license | gbiele/sumscores | 85c1e9a9b66b4e658a4519a138405381b1fe8da2 | e861619d64400c030ad418eaae9bf8968e6262f6 | refs/heads/master | 2022-06-15T21:44:58.732036 | 2022-05-25T20:14:53 | 2022-05-25T20:14:53 | 155,848,880 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,744 | r | plot_power.R | library(boot)
library(VGAM)
library(TeachingDemos)
load("sims.Rdata")
K = 1
N = 100
breaks = -.5:22.5
df = data.frame(all_stats,row.names = NULL)
df$model = rep(c("bbr","l","lr"),length(bs)*3)
df$mu = inv.logit(df$mu)
cols = list(bbr = "green", l = "red", lr = "blue")
par(mfrow = c(2,3))
for (phi in phis) {
for (mu in mus) {
plot(0,type = "n", ylim = c(0,1), xlim = range(bs),
ylab = "power",
xlab = "b", bty = "n",xaxt = "n",
main = paste0("mu=", inv.logit(mu), ", phi=",phi))
axis(1,pos = 0)
for (m in unique(df$model)) {
tmp_stats = df[df$mu == inv.logit(mu) & df$phi == phi & df$model == m,]
lines(tmp_stats$b, tmp_stats$power, col = cols[[m]])
}
y = rbetabinom.ab(5000,
size = 22,
shape1 = inv.logit(mu)*phi,
shape2 = (1-inv.logit(mu))*phi)
h = hist(y,plot = F, breaks = breaks)
x_scale = coef(lm(c(.04,.05)~range(h$mids)))
scaled_X = x_scale[1] + h$mids*x_scale[2]
y_scale = coef(lm(c(0,.33)~c(0,max(h$counts))))
scaled_Y = y_scale[1] + h$counts*y_scale[2]
lines(c(.0395,scaled_X),c(0,scaled_Y),"S", col = "darkred")
legend("topleft", lty = 1, bty = "n",
col = c("green","red","blue"),
legend = c("beta binomial","linear","logistic"))
}
}
par(mfrow = c(2,3))
for (phi in phis) {
for (mu in mus) {
plot(0,type = "n", ylim = range(bs), xlim = range(bs),
ylab = "b-est",
xlab = "b", bty = "n",xaxt = "n",
main = paste0("mu=", inv.logit(mu), ", phi=",phi))
axis(1,pos = 0.018)
abline(0,1, col = "grey", lty = 2)
for (m in unique(df$model)[1:2]) {
tmp_stats = df[df$mu == inv.logit(mu) & df$phi == phi & df$model == m,]
lines(tmp_stats$b, tmp_stats$b_hat, col = cols[[m]])
}
y = rbetabinom.ab(5000,
size = 22,
shape1 = inv.logit(mu)*phi,
shape2 = (1-inv.logit(mu))*phi)
h = hist(y,plot = F, breaks = breaks)
x_scale = coef(lm(c(.04,.05)~range(h$mids)))
scaled_X = x_scale[1] + h$mids*x_scale[2]
y_scale = coef(lm(c(.019,.03)~c(0,max(h$counts))))
scaled_Y = y_scale[1] + h$counts*y_scale[2]
lines(c(.0395,scaled_X),c(.018,scaled_Y),"S", col = "darkred")
legend("topleft", lty = 1, bty = "n",
col = c("green","red","blue"),
legend = c("beta binomial","linear","logistic"))
}
}
library(data.table)
dt = data.table(df)
for (b in unique(df$b)) {
for (m in unique(df$model)[1:2]) {
dt[model == m & b == b & z > 1.96]
}
}
all_sims = c()
for (s in 1:length(sims)) {
stats = data.table(sims[[s]]$stats)
stats[,mu := round(inv.logit(sims[[s]]$mu),digits = 2)]
stats[,b := sims[[s]]$b]
stats[,phi := sims[[s]]$phi]
all_sims = rbind(all_sims,stats)
}
all_sims$b = round(all_sims$b,digits = 2)
for (mb in c(0.02,0.03,0.04,0.05)) {
d1f = density(all_sims[b == mb, me_bb])
d2f = density(all_sims[b == mb, me_lin])
d1 = density(all_sims[`beta-binomial` > 1.96 & b == mb, me_bb])
d2 = density(all_sims[linear > 1.96 & b == mb, me_lin])
plot(0,type = "n",
xlim = c(-.05,.15),
ylim = range(c(d1$y,d2$y,d1f$y,d2f$y)),
xlab = "estimated effect b",
ylab = "density",
bty = "n",
main = paste0("true b = ",mb))
lines(d1, col = "green")
lines(d2, col = "red")
lines(d1f, col = "green", lty = 2)
lines(d2f, col = "red", lty = 2)
legend("topleft",
col = c("green","red","grey","grey"),
lty = c(1,1,1,2),
bty = "n",
legend = c("beta binomial","linear","all estimates", "significant estimates"))
abline(v = mb)
} |
ea85942b341f81bf12e04ec7e429d070210af0b1 | 1a965bbd2f0b7e5873408e1e2b3441e199342005 | /scripts/quarterly-earnings-forecasts.R | 3c16a33bea7d5831290fa061ae5efec3ec2e063c | [] | no_license | 196sigma/ceres | ef8f465f9a230ab3c5b03684b6244e61173d1b94 | a248f62268b28a5d999f99fbc67cadd1b9d4bdd4 | refs/heads/master | 2021-06-27T16:31:10.764845 | 2020-10-17T01:07:47 | 2020-10-17T01:07:47 | 139,374,322 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 492 | r | quarterly-earnings-forecasts.R | ## Reginald Edwards
## 16 June 2018
##
## Forecasting earnings treating each firm as a time-series, using only lagged
## fundamentals for that firm,
rm(list=ls())
gc()
load("data/comp_fundq.RData")
X <- comp.fundq
###############################################################################
## Linear Models
###############################################################################
## Linear Regression
linreg1 <- lm(epspxq.lead1 ~ epspxq, data = comp.fundq)
summary(linreg1)
|
fc5ab6feae6ceb332ff0a0d5ac6be2a6454ffbc7 | 7ff48f68b3e0230ee48abcf19650846761f3822d | /utility_scripts/compute_ld_snprelate.R | b96bedc5ed46960601996e5b4a32ea26333efdf5 | [] | no_license | ksamuk/whtstbk_geno | 1b9e9b10ca2aecc612219e7910ebe01b51fc2f8a | cdbb51af1b47041cc41240b03e30fd75a7292121 | refs/heads/master | 2021-01-10T07:59:30.942748 | 2016-04-03T20:43:37 | 2016-04-03T20:43:37 | 49,299,126 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,753 | r | compute_ld_snprelate.R | # prepare a LD-pruned snp file for fastStructure
# the key problem this solves is splitting a multipopulation snp file
# into multiple files, applying the LD pruning, and reforming the multipop file
# Kieran Samuk Feb 10/2016
################################################################################
# Libraries
################################################################################
library(ggplot2)
library(gdsfmt)
library(SNPRelate)
library(data.table)
library(dplyr)
library(MASS)
list.files("functions", full.names = TRUE) %>% sapply(.,source, verbose = FALSE, echo = FALSE) %>% invisible
select <- dplyr::select
################################################################################
# Read in raw data
################################################################################
# the meta data file (pop codes)
meta_df <- read.csv("metadata/mega_meta.csv")
# raw snps (raw ped file)
raw_snps <- data.frame(fread("data/other_formats/whtstbk_bial_no_sex.raw", stringsAsFactors = FALSE, header = TRUE))
# extract genotypes into matrix
geno_matrix <- data.matrix(raw_snps[,-c(1:6)])
# grab names and convery to matching meta_df coding
sample_df <- data.frame(id = raw_snps$FID %>% gsub("whtstbk_gbs_|_brds", "", .))
sample_df <- left_join(sample_df, meta_df)
sample_df$row <- 1:length(sample_df$id)
# grab snp names (for chr pos)
snp_id <- names(raw_snps[,-c(1:6)])
# reclaim memory
rm(raw_snps)
# formate chr pos data
pos_df <- snp_id %>% fstat_label_to_columns()
pos_df <- pos_df %>%
mutate(chr = gsub("Un", "XXII", chr)) %>%
mutate(chr = gsub("chr", "", chr) %>% as.character %>% as.roman %>% as.integer) %>%
mutate(pos = pos %>% as.character %>% as.integer)
pos_df$snp <- snp_id
snpgdsCreateGeno("data/snp_relate/pop/whtstbk_raw_no_sex_outgroup.gds", genmat = geno_matrix,
sample.id = sample_df$id, snpfirstdim = FALSE,
snp.id = snp_id, snp.chromosome = pos_df$chr, snp.position = pos_df$pos)
################################################################################
# Create a pruned .gds file for each region
################################################################################
# CAN START HERE
genofile <- snpgdsOpen("data/snp_relate/pop/whtstbk_raw_no_sex_outgroup.gds")
#snpgdsClose(genofile)
ld_samples <- sample_df %>%
filter(cluster == "wht") %>%
filter(pop == "SR") %>%
select(id) %>%
unlist %>% as.character
ld_sites <- snpgdsLDpruning(genofile, sample.id = ld_samples, snp.id = NULL, missing.rate = 0.00)
ld_sites <- unlist(ld_sites)
ld_calc <- snpgdsLDMat(genofile, sample.id = ld_samples, snp.id = ld_sites,
slide = 0 , num.thread = 3)
image(t(ld_calc$LD^2), col = terrain.colors(16))
|
4e399132299ff5dbdcae0b1bd65577de2cba5269 | b435fa4b7808e7d5dfdf2e17d795e76e9f1047d2 | /esercizi_R/esercizio_MASL_0_1.R | cd375fec85a218ea5a0b37bedbf8499ef347d049 | [] | no_license | MattBlue92/MASL_2019 | cba0d8b86de0aee37584b2f077193cd130907860 | 1a0c94cf5a603235a39cbddf07ecefd2f5e7901a | refs/heads/master | 2020-08-02T08:43:01.324553 | 2019-11-08T17:20:56 | 2019-11-08T17:20:56 | 211,289,118 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 992 | r | esercizio_MASL_0_1.R | #esercizio slide: Introduction to MASL
#impostatione dei parametri del modello e della simulazione MC
n = 1000
nsim = 100
sigma = 2
b0 = -1 # intercetta del modello
b1 = 2 # coefficiente di reg per X1
b2 = 1 # coefficiente di reg l'interazzione tra X1 e X2
#fine impostazione dei parametri
# generazione dei dati e del modello
set.seed(42)
X1 = rnorm(n, sd = 2)
X2 = rnorm(n, sd = 2)
eps = rnorm(n)
Y = -1+2*X1^3+(X1)^2*X2+eps
# fine generazione
#split dei dati
split = sample(1:length(X1),1)
training = data.frame("Y"=Y[seq(from = 1, to = split)], "X1"=X1[seq(from = 1, to = split)]
,"X2"=X2[seq(from = 1, to = split)])
test = data.frame("Y"=Y[seq(from = split+1, to = length(X1))], "X1"= X1[seq(from = split+1, to = length(X1))]
,"X2"=X2[seq(from = split+1, to = length(X1))])
data.frame()
#stima del modello e previsioni con il test set
mod0 = lm(Y~X1^4+(X1)^2*X2, data = training)
summary(mod0)
yhat = predict(mod0, test)
#plotting
plot(mod0) |
fa89a1b82cf4396206e291634c93d0edd95a882c | 0d544d53685b8f94179203d89e05797266dabba1 | /man/filterCount.Rd | f8626ebcdc22ae4519be78307c0a67838bd040be | [
"MIT"
] | permissive | jianhong/deepToolsDownstream | 723a6e31ddbba523b50218160936a5da244ff22b | 214ab7b238b4cd68dcd897cb790eb2deb1f6a0e7 | refs/heads/master | 2021-10-24T16:40:18.131278 | 2021-10-21T15:52:58 | 2021-10-21T15:52:58 | 243,008,168 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 688 | rd | filterCount.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filterCount.R
\name{filterCount}
\alias{filterCount}
\title{filter computeMatrix output}
\usage{
filterCount(se, subset)
}
\arguments{
\item{se}{a SummarizedExperiment object from \link{importCount}.}
\item{subset}{filter condition for rows.}
}
\value{
a SummarizedExperiment object
}
\description{
Filter output of importCount.
}
\examples{
file <- system.file("extdata", "count.gz", package= "deepToolsDownstream")
se <- importCount(file)
library(SummarizedExperiment)
keep <- rowMeans(assays(se)[[1]], na.rm = TRUE) < 2 ## arbitory number
nrow(se)
se <- filterCount(se, subset=keep)
nrow(se)
table(keep)
}
|
f17a3179a6008265a246c99607bf92f3508b1727 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/geozoning/examples/touch.border.Rd.R | 6b41db0ec5406293b66b162a8e28d9192696efec | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 603 | r | touch.border.Rd.R | library(geozoning)
### Name: touch.border
### Title: touch.border
### Aliases: touch.border
### ** Examples
map = geozoning::mapTest
criti = correctionTree(qProb = c(0.5), map = map)
Z = criti$zk[[1]][[1]]$zonePolygone
lab = criti$zk[[1]][[1]]$lab
# zone correction
res = correctBoundaryMap(Zi = Z, map = map)
Z = res$Z
# map boundary after correction
boundary = Z[[1]]
for(i in 2:length(Z)){
boundary = rgeos::gUnion(boundary, Z[[i]])
}
# plot map
plotM(map = map, Z = Z, lab = lab, byLab = FALSE)
# verification
for(i in 1:length(Z)){
print(touch.border(z = Z[[i]], boundary = boundary))
}
|
11daf90ba9daa5c7bdeedacd5af8b3cb6843a781 | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed_and_cleaned/11020_0/rinput.R | 3a63d94d213de07b40f334d8fb71cf5452c0530f | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 137 | r | rinput.R | library(ape)
testtree <- read.tree("11020_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="11020_0_unrooted.txt") |
2e34788bce74897c19f9f30b3879221a6307ae85 | e6198537ddbe2d29ab506b9049407f9ab71c591f | /app.R | be6c721abb7f76122e5b9ee1685e2e1805333875 | [
"MIT"
] | permissive | GroundB/SecretSanta | ab7b175c6a199cd05bc94af7ad4e6de86e46ca55 | 05bf877ec2e422b39511fbd5e7f30965279477e8 | refs/heads/main | 2023-01-13T06:47:05.094748 | 2020-11-18T22:45:57 | 2020-11-18T22:45:57 | 307,253,709 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,497 | r | app.R | library(shiny)
library(shinymanager)
inactivity <- "function idleTimer() {
var t = setTimeout(logout, 120000);
window.onmousemove = resetTimer; // catches mouse movements
window.onmousedown = resetTimer; // catches mouse movements
window.onclick = resetTimer; // catches mouse clicks
window.onscroll = resetTimer; // catches scrolling
window.onkeypress = resetTimer; //catches keyboard actions
function logout() {
window.close(); //close the window
}
function resetTimer() {
clearTimeout(t);
t = setTimeout(logout, 120000); // time is in milliseconds (1000 is 1 second)
}
}
idleTimer();"
# data.frame with credentials info
credentials <- data.frame(
user = c("BotinstSanta"),
password = c("BotinstSanta"),
stringsAsFactors = FALSE
)
ui <- secure_app(head_auth = tags$script(inactivity),
fluidPage(
textInput("name", "Your Name", value = NULL),
textInput("email", "Email address", value = NULL),
actionButton('Submit', "Submit"),
br(),
br(),
actionButton("load_pep", "See who signed up"),
br(),
br(),
dataTableOutput('table')
))
server <- function(input, output, session) {
##login lock
result_auth <- secure_server(check_credentials = check_credentials(credentials))
output$res_auth <- renderPrint({
reactiveValuesToList(result_auth)
})
file.create("user_inputs.csv")
# classic app
randomVals <- eventReactive(input$load_pep, {
read.table("user_inputs.csv", header = FALSE, col.names = c("Name", "Email"))
})
output$table <- renderDataTable({randomVals()})
observeEvent(input$Submit, {
# Define inputs to save
inputs_to_save <- c('name', 'email')
# Declare inputs
inputs <- NULL
# Append all inputs before saving to folder
for(input.i in inputs_to_save){
inputs <- c(inputs, input[[input.i]])
}
# Inputs data.frame
inputs_data_frame <- rbind.data.frame(inputs)
colnames(inputs_data_frame) <- c("Name", "Email")
write.table(inputs_data_frame, file = "user_inputs.csv", row.names = FALSE, append = TRUE,
col.names = FALSE)
})
}
shinyApp(ui = ui, server = server) |
2f5d31b2f5bf0f0b0817b3bc4a0536ef146a2de1 | 8e35f38bbb6f3f27896e567b75dace5e0be013c1 | /man/full_plot.Rd | 69196eed27ac8de2bf5bd156cccd73b817cc12ff | [] | no_license | eknit/isotopevis | 4785699263f2f35629f20e93bab3d61512b48651 | 103a6af8d6ab090950c95f20ec18c50091a4474f | refs/heads/master | 2020-12-02T18:22:25.412682 | 2016-02-06T12:13:51 | 2016-02-06T12:13:51 | 37,761,721 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 278 | rd | full_plot.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/isotopevis.R
\name{full_plot}
\alias{full_plot}
\title{full_plot makes grid of all plots}
\usage{
full_plot(models, s, ncol = 3, nrow = 4)
}
\description{
full_plot makes grid of all plots
}
|
0798769de9fc9e12f33184e8e2ef6de962d024d5 | 557cda9a1cb3fd04da7ef15c9adec69bb3df9888 | /man/collegerg.Rd | ca6cc36749983dc522c300ba39a72009dedc43a3 | [] | no_license | cran/SDAResources | 7e4cb27a87fa4e8e334f641c419fcc6e912e33a2 | addafccfb82d962f234606fc6fcb2386fc8f60f3 | refs/heads/master | 2023-08-22T23:38:51.589732 | 2021-10-22T08:20:13 | 2021-10-22T08:20:13 | 368,240,812 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 6,077 | rd | collegerg.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/collegerg.R
\docType{data}
\name{collegerg}
\alias{collegerg}
\title{collegerg data}
\format{
This data frame contains the following columns:
\describe{
\item{unitid:}{unit identification number}
\item{instnm:}{institution name (character, length 81)}
\item{city:}{city (character, length 24)}
\item{stabbr:}{state abbreviation (character, length 2)}
\item{highdeg:}{highest degree awarded
3 = Bachelor's degree
4 = Graduate degree}
\item{control:}{control (ownership) of institution
1 = public
2 = private nonprofit}
\item{region:}{region where institution is located
1 New England (CT, ME, MA, NH, RI, VT)
2 Mid East (DE, DC, MD, NJ, NY, PA)
3 Great Lakes (IL, IN, MI, OH, WI)
4 Plains (IA, KS, MN, MO, NE, ND, SD)
5 Southeast (AL, AR, FL, GA, KY, LA, MS, NC, SC, TN, VA, WV)
6 Southwest (AZ, NM, OK, TX)
7 Rocky Mountains (CO, ID, MT, UT, WY)
8 Far West (AK, CA, HI, NV, OR, WA)
}
\item{locale:}{locale of institution
11 City: Large (population of 250,000 or more)
12 City: Midsize (population of at least 100,000 but less than 250,000)
13 City: Small (population less than 100,000)
21 Suburb: Large (outside principal city, in urbanized area with population of 250,000 or more)
22 Suburb: Midsize (outside principal city, in urbanized area with population of at least 100,000 but less than 250,000)
23 Suburb: Small (outside principal city, in urbanized area with population less than 100,000)
31 Town: Fringe (in urban cluster up to 10 miles from an urbanized area)
32 Town: Distant (in urban cluster more than 10 miles and up to 35 miles from an urbanized area)
33 Town: Remote (in urban cluster more than 35 miles from an urbanized area)
41 Rural: Fringe (rural territory up to 5 miles from an urbanized area or up to 2.5 miles from an urban cluster)
42 Rural: Distant (rural territory more than 5 miles but up to 25 miles from an urbanized area or more than 2.5 and up to 10 miles from an urban cluster)
43 Rural: Remote (rural territory more than 25 miles from an urbanized area and more than 10 miles from an urban cluster)
}
\item{ccbasic:}{carnegie basic classification
15 Doctoral Universities: Very High Research Activity
16 Doctoral Universities: High Research Activity
17 Doctoral/Professional Universities
18 Master's Colleges & Universities: Larger Programs
19 Master's Colleges & Universities: Medium Programs
20 Master's Colleges & Universities: Small Programs
21 Baccalaureate Colleges: Arts & Sciences Focus
22 Baccalaureate Colleges: Diverse Fields
}
\item{ccsizset:}{carnegie classification, size and setting
6 Four-year, very small, primarily nonresidential
7 Four-year, very small, primarily residential
8 Four-year, very small, highly residential
9 Four-year, small, primarily nonresidential
10 Four-year, small, primarily residential
11 Four-year, small, highly residential
12 Four-year, medium, primarily nonresidential
13 Four-year, medium, primarily residential
14 Four-year, medium, highly residential
15 Four-year, large, primarily nonresidential
16 Four-year, large, primarily residential
17 Four-year, large, highly residential
}
\item{hbcu:}{historically black college or university,
1 = yes, 0 = no}
\item{openadmp:}{does the college have an open admissions policy, that is, does it accept any students that apply or have minimal requirements for admission?
1 = yes, 0 = no}
\item{adm_rate:}{fall admissions rate, defined as the number of admitted undergraduates divided by the number of undergraduates who applied}
\item{sat_avg:}{average SAT score (or equivalent) for admitted students}
\item{ugds:}{number of degree-seeking undergraduate students enrolled in the fall term}
\item{ugds_men:}{proportion of ugds who are men}
\item{ugds_women:}{proportion of ugds who are women}
\item{ugds_white:}{proportion of ugds who are white (based on self-reports)}
\item{ugds_black:}{proportion of ugds who are black/African American (based on self-reports)}
\item{ugds_hisp:}{proportion of ugds who are Hispanic (based on self-reports)}
\item{ugds_asian:}{proportion of ugds who are Asian (based on self-reports)}
\item{ugds_other:}{proportion of ugds who have other race/ethnicity (created from other categories on original data file; race/ethnicity proportions sum to 1)}
\item{npt4:}{average net price of attendance, derived from the full cost of attendance,
including tuition and fees, books and supplies, and living expenses,
minus federal, state, and institutional grant scholarship aid, for full time,
first time undergraduate Title IV receiving students.
NPT4 created from scorecard data variables NPT4_PUB if public institution and
NPT4_PRIV if private}
\item{tuitionfee_in:}{in-state tuition and fees}
\item{tuitionfee_out:}{out-of-state tuition and fees}
\item{avgfacsal:}{average faculty salary per month}
\item{pftfac:}{proportion of faculty that is full-time}
\item{c150_4:}{proportion of first-year, full-time students who complete their degree within 150\% of the expected time to complete; for most institutions, this is the proportion of students who receive a degree within 6 years}
\item{grads:}{number of graduate students}
\item{selectionprob:}{selection probability for each replicate sample}
\item{samplingweight:}{sampling weight for each replicate sample}
\item{repgroup:}{replicate group number}
}
}
\usage{
data(collegerg)
}
\description{
Five replicate SRSs from the set of public colleges and universities (having
control = 1) in \emph{college} data. Columns 1-29 are as in college data, with additional columns
30-32 listed below. Note that the selection probabilities and sampling weights are for the
separate replicate samples, so that the weights for each replicate sample sum to the population size 500.
}
\references{
Lohr (2021), Sampling: Design and Analysis, 3rd Edition. Boca Raton, FL: CRC Press.
Lu and Lohr (2021), R Companion for \emph{Sampling: Design and Analysis, 3rd Edition}, 1st Edition. Boca Raton, FL: CRC Press.
}
\keyword{datasets}
|
17308440ca5cbf1ece5f801b34e7aa481724008a | 555651902520492b50c5c3e67e81a699bcc781a4 | /data_construction_and_analysis/lme_cut.R | 303efc029daa23fb5ea8d68d6173a5f9e2a8de72 | [] | no_license | LaurenzLammer/socialisolation | 6087a6f28091c59c8d1abbe7d20cf5deccb1c1a2 | 96e0b8ba6d3c76a53d84f7a813399f33e5dc13d9 | refs/heads/main | 2023-06-08T15:02:45.158446 | 2023-03-25T22:57:31 | 2023-03-25T22:57:31 | 332,746,212 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,020 | r | lme_cut.R | library("lme4") # 1.1-31
library("lmerTest") # 3.1-3
# ensure that lmerTest doesn't mask lmer, which would cause us multiple problems
lmer <- lme4::lmer
source("/data/gh_gr_agingandobesity_share/literature/methods/statistics/linear_models_course_rogermundry_2018/functions/diagnostic_fcns.r")
source("/data/gh_gr_agingandobesity_share/literature/methods/statistics/linear_models_course_rogermundry_2018/functions/glmm_stability.r")
library("BayesFactor") #0.9.12.4.4
library(car) #3.1-1
library(BiocManager) # 1.30.19
library(qvalue) # 2.30.0
library(doMC) #1.3.8
data <- read.csv("/data/pt_life/ResearchProjects/LLammer/si_update/Data/compiled_scaled_data.csv")
path = "/data/pt_life/ResearchProjects/LLammer/si_update/Results_cut_off_interact"
data$LSNS_cut <- ifelse(data$LSNS_sum > 18, 1, 0)
#make subject a factor to please BayeysFactor
data$subject <- as.factor(data$subject)
# we will start with hypothesis 1.5: Participants that are socially more isolated at baseline
# will experience aggravated age-related changes in hippocampal volume over the follow-up period.
# hypothesis 1.5, model 1
# The models to test our hypotheses have a very similar structure.
# Henceforth, we can reduce the necessary lines of code by employing the function "fastlme".
fastlme <- function(poi = "LSNS_base", dv, model = 1){
# saves code as most models have a very similar structure
# tests model assumptions and stability
# performs full-null model comparison
# calculates BF
# see above model for step-by-step explanation
# poi = predictor of interest = LSNS_base / LSNS_change / LSNS_base:age_change / LSNS_base:LSNS_change.
# dv = dependent variable of the model = Hippocampal volume or a cognitive function
# model = model 1 (limited control variables, is default) or model 2 (all control variables)
# further subsetting in case of model == 2, because GeneralTestBF can't handle NAs
if(dv == "HCV"){
datax = subset(data, data$outlier_HCV != 1)
}
else if(dv == "memo"){
datax = subset(data, data$outlier_memo != 1)
}
else if(dv == "exfunct"){
datax = subset(data, data$outlier_exfunct != 1)
}
else{
datax = subset(data, data$outlier_procspeed != 1)
}
if(model == 1){
if(poi == "LSNS_base"){
res <- lmer(formula = paste0(dv, "~LSNS_base+LSNS_base:LSNS_cut+LSNS_cut+LSNS_change+age_base+age_change+sex+(1|subject)"),
data=datax, REML=F, na.action = na.omit)
test <- lm(formula = paste0(dv, "~LSNS_base+LSNS_change+LSNS_cut+age_base+age_change+sex"), data=datax)
vifres <- as.data.frame(vif(test))
bf <- generalTestBF(formula = as.formula(paste0(dv, "~LSNS_base+LSNS_base:LSNS_cut+LSNS_cut+LSNS_change+age_base+age_change+sex+subject")),
data=datax, whichRandom = "subject", multicore = T,
neverExclude = c("LSNS_change", "age_base", "age_change", "sex", "subject", "^LSNS_base$", "^LSNS_cut$"))
}
else if(poi == "LSNS_change"){
res <- lmer(formula = paste0(dv, "~LSNS_base+LSNS_change+LSNS_change:LSNS_cut+LSNS_cut+age_base+age_change+sex+(1|subject)"),
data=datax, REML=F, na.action = na.omit)
test <- lm(formula = paste0(dv, "~LSNS_base+LSNS_change+LSNS_cut+age_base+age_change+sex"), data=datax)
vifres <- as.data.frame(vif(test))
bf <- generalTestBF(formula = as.formula(paste0(dv, "~LSNS_base+LSNS_change+LSNS_change:LSNS_cut+LSNS_cut+age_base+age_change+sex+subject")),
data=datax, whichRandom = "subject", multicore = T,
neverExclude = c("LSNS_base", "age_base", "age_change", "sex", "subject", "^LSNS_change$", "^LSNS_cut$"))
}
}
else{
datay <- subset(datax, !is.na(datax$CES.D))
if(poi == "LSNS_base"){
res <- lmer(formula = paste0(dv, "~LSNS_base+LSNS_base:LSNS_cut+LSNS_cut+LSNS_change+age_base+age_change+sex+education+BMI+hypertension+diabetes+CES.D+(1|subject)"),
data=datax, REML=F, na.action = na.omit)
test <- lm(formula = paste0(dv, "~LSNS_base+LSNS_change+LSNS_cut+age_base+age_change+sex+education+BMI+hypertension+diabetes+CES.D"),
data=datax, na.action = na.omit)
vifres <- as.data.frame(vif(test))
bf <- generalTestBF(formula = as.formula(paste0(dv, "~LSNS_base+LSNS_base:LSNS_cut+LSNS_cut+LSNS_change+age_base+age_change+sex+education+BMI+hypertension+diabetes+CES.D+subject")),
data=datay, whichRandom = "subject", multicore = T, neverExclude =
c("LSNS_change", "age_base", "age_change", "sex", "education", "BMI", "hypertension", "diabetes", "CES.D", "subject", "^LSNS_base$", "^LSNS_cut$"))
}
else if(poi == "LSNS_change"){
res <- lmer(formula = paste0(dv, "~LSNS_base+LSNS_change:LSNS_cut+LSNS_cut+LSNS_change+age_base+age_change+sex+education+BMI+hypertension+diabetes+CES.D+(1|subject)"),
data=datax, REML=F, na.action = na.omit)
test <- lm(formula = paste0(dv, "~LSNS_base+LSNS_change+LSNS_cut+age_base+age_change+sex+education+BMI+hypertension+diabetes+CES.D"),
data=datax, na.action = na.omit)
vifres <- as.data.frame(vif(test))
bf <- generalTestBF(formula = as.formula(paste0(dv, "~LSNS_base+LSNS_change:LSNS_cut+LSNS_cut+LSNS_change+age_base+age_change+sex+education+BMI+hypertension+diabetes+CES.D+subject")),
data=datay, whichRandom = "subject", multicore = T, neverExclude =
c("LSNS_base", "age_base", "age_change", "sex", "education", "BMI", "hypertension", "diabetes", "CES.D", "subject", "^LSNS_change$", "^LSNS_cut$"))
}
}
bf_nolog <- extractBF(bf, logbf = F)
bf_nolog["full_vs_null", 1] <- bf_nolog[1,1] / bf_nolog[2,1]
chains <- posterior(bf, 1, iterations = 10000)
if(poi == "LSNS_base"){
siding_factor <- mean(chains[,"LSNS_base.&.LSNS_cut"]<0)
} else if(poi == "LSNS_change"){
siding_factor <- mean(chains[,"LSNS_change.&.LSNS_cut"]<0)
}
bf_nolog["full_vs_null_sided", 1] <- bf_nolog["full_vs_null", 1] * 2 * siding_factor
write.csv(vifres,
file = paste0(path, "/VIFs/", dv, "_on_", poi, "_model", model, ".csv"))
sres <- summary(res)
coeff <- as.data.frame(sres$coefficients)
CI <- as.data.frame(confint.merMod(res))
coeff <- merge(coeff, CI, all = T, by="row.names")
coeff$n_total <- nobs(res)
coeff$n_individual <- ngrps(res)
write.csv(coeff, file = paste0(path, "/coefficients/", dv, "_on_", poi, "_model", model, ".csv"))
png(filename = paste0(path, "/diag_plots/", dv, "_on_", poi, "_model", model, ".png"))
diagnostics.plot(res)
dev.off()
stab_results <- glmm.model.stab(res)
write.csv(stab_results$summary, file = paste0(path, "/model_stab/", dv, "_on_", poi, "_model", model, ".csv"))
pval_res <- as_lmerModLmerTest(res)
if(poi == "LSNS_base"){
p <- as.data.frame(drop1(pval_res, scope = "LSNS_base:LSNS_cut", ddf = "Satterthwaite"))
p$sided_p <- ifelse(coeff[coeff$Row.names == "LSNS_base:LSNS_cut",]$Estimate < 0, p$`Pr(>F)`/2, 1-p$`Pr(>F)`/2)
} else if(poi == "LSNS_change"){
p <- as.data.frame(drop1(pval_res, scope = "LSNS_change:LSNS_cut", ddf = "Satterthwaite"))
p$sided_p <- ifelse(coeff[coeff$Row.names == "LSNS_change:LSNS_cut",]$Estimate < 0, p$`Pr(>F)`/2, 1-p$`Pr(>F)`/2)
}
p$qval <- 0
p$significance <- "not applicable"
list <- list(res, stab_results, vifres, bf, bf_nolog, p)
names(list) <- c("res", "stab_results", "vifres", "bf", "bf_nolog", "p")
save.image(file = paste0(path, "/Workspace/workspace.RData"))
return(list)
}
#hypothesis 1.1: Social isolation is negatively associated with hippocampal volume across individuals.
list111 <- fastlme(dv = "HCV")
list112 <- fastlme(dv = "HCV", model = 2)
#hypothesis 1.3: Social isolation is negatively associated with hippocampal volume within individuals.
list131 <- fastlme(poi = "LSNS_change" ,dv = "HCV")
list132 <- fastlme(poi = "LSNS_change", dv = "HCV", model = 2)
# hypothesis 2.1: Social isolation is negatively associated with cognitive functions across individuals.
# a: executive function
list211a <- fastlme(dv = "exfunct")
list212a <- fastlme(dv = "exfunct", model = 2)
# b: memory performance
list211b <- fastlme(dv = "memo")
list212b <- fastlme(dv = "memo", model = 2)
# c: processing speed
list211c <- fastlme(dv = "procspeed")
list212c <- fastlme(dv = "procspeed", model = 2)
# hypothesis 2.2: Social isolation is negatively associated with cognitive functions within individuals.
# a: executive function
list221a <- fastlme(poi = "LSNS_change" , dv = "exfunct")
list222a <- fastlme(poi = "LSNS_change" , dv = "exfunct", model = 2)
# b: memory performance
list221b <- fastlme(poi = "LSNS_change" , dv = "memo")
list222b <- fastlme(poi = "LSNS_change" , dv = "memo", model = 2)
# c: processing speed
list221c <- fastlme(poi = "LSNS_change" , dv = "procspeed")
list222c <- fastlme(poi = "LSNS_change" , dv = "procspeed", model = 2)
# calculate q-values and test FDR-corrected significance at alpha = 0.05
qval1 <- qvalue(c(list111$p$sided_p, list131$p$sided_p, list211a$p$sided_p, list211b$p$sided_p,
list211c$p$sided_p, list221a$p$sided_p, list221b$p$sided_p, list221c$p$sided_p), fdr.level = 0.05, pi0 = 1)
qval2 <- qvalue(c(list112$p$sided_p, list132$p$sided_p, list212a$p$sided_p, list212b$p$sided_p,
list212c$p$sided_p, list222a$p$sided_p, list222b$p$sided_p, list222c$p$sided_p), fdr.level = 0.05, pi0 = 1)
# add information from FDR-correction to lists
family1 <- list(list111, list131, list211a, list211b, list211c, list221a, list221b, list221c)
family2 <- list(list112, list132, list212a, list212b, list212c, list222a, list222b, list222c)
names(family1) <- c("111", "131", "211a", "211b", "211c", "221a", "221b", "221c")
names(family2) <- c("112", "132", "212a", "212b", "212c", "222a", "222b", "222c")
counter <- 0
for(n in names(family1)){
counter <- counter +1
family1[[n]]$p$qval <- qval1$qvalues[counter]
family1[[n]]$p$significance <- qval1$significant[counter]
}
counter <- 0
for(n in names(family2)){
counter <- counter +1
family2[[n]]$p$qval <- qval2$qvalues[counter]
family2[[n]]$p$significance <- qval2$significant[counter]
}
# create a comprehensive list of lists to use lapply
comprehensive <- c(family1, family2)
hypotheses <- c("111", "131", "211a", "211b", "211c", "221a", "221b", "221c", "112", "132", "212a", "212b", "212c", "222a", "222b", "222c")
names(comprehensive) <- hypotheses
# test if our VIF threshold was exceeded in any model and save results
vif_tresh_check <- as.data.frame(lapply(comprehensive, function(x) ifelse(max(x$vifres) > 10, print("VIF threshold exceeded"), print("ok"))))
write.csv(vif_tresh_check, file = paste0(path, "/VIFs/threshold_check.csv"))
# check for warnings in stability tests
stab_warnings <- as.data.frame(lapply(comprehensive, function(x) print(unique(x$stab_results$detailed$warnings))))
write.csv(stab_warnings, file = paste0(path, "/model_stab/warnings_check.csv"))
# write p-value tables
pvals <- data.frame(matrix(unlist(lapply(comprehensive, function(x) print(x$p))), nrow = length(comprehensive),
byrow = TRUE))
colnames(pvals) <- c("Sum Sq", "Mean Sq", "NumDF", "DenDF", "F value", "Pr(>F)", "qval", "significance", "sided p-value")
rownames(pvals) <- hypotheses
write.csv(pvals, file = paste0(path, "/pvals/overview.csv"))
# write a dataframe containing all Bayes-Factor information
BF <- data.frame(matrix(nrow = length(comprehensive)*4, ncol = 4))
BF_names <- c()
counter <- 1
for(n in names(comprehensive)){
BF[((counter):(counter+3)),] <- comprehensive[[n]]$bf_nolog
BF_names <- c(BF_names, rownames(comprehensive[[n]]$bf_nolog))
counter <- counter + 4
}
colnames(BF) <- c("bf", "error", "time", "code")
BF$model <- BF_names
for(n in (1:length(BF$model))){
BF$hypothesis[n] <- hypotheses[ceiling(n/4)]
}
write.csv(BF, file = paste0(path, "/bayes_factor/bayes_factors.csv"))
save.image(file = paste0(path, "/Workspace/workspace.RData"))
|
a57be0f830f58adc80e2fef05fd595429164f83d | 83b90bb828d6057b70296968602a3d56d0ec7302 | /06-hds-analysis.R | 1c38ba69f73db5170ab69b746dc21fc9880be721 | [] | no_license | awong234/parasite_analysis | 77f9d5d706b96736a7023d616d4b74e7a493d4f4 | 4510b3919e849213324169530ceb9f5591908e2a | refs/heads/master | 2020-04-29T12:44:40.882511 | 2019-08-24T17:10:36 | 2019-08-24T17:12:18 | 176,148,729 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 19,704 | r | 06-hds-analysis.R |
# Setup ----------------------------------------
library(INLA)
library(inlabru)
library(doParallel)
library(magrittr)
library(ggplot2)
library(MASS)
library(GGally)
library(MuMIn)
library(viridis)
library(reshape2)
library(dplyr)
source('functions.R')
load(file = 'scaled_covariates.Rdata')
load(file = 'scaled_covariates_attr.Rdata')
# load(file = '.RData')
# Load data ----------------------------------------
metadata = read.csv(file = 'metadata_adjusted.csv', stringsAsFactors = F)
metadata$Date = as.Date(metadata$Date)
ff = read.csv(file = 'fecal_flukefinder.csv', stringsAsFactors = F)
mb = read.csv(file = 'fecal_MB.csv', stringsAsFactors = F)
quant = read.csv(file = 'fecal_quant.csv', stringsAsFactors = F)
metadata_join = metadata %>% left_join(ff %>% select(PK, Total_eggs)) %>% rename(fmagna_ff = Total_eggs) %>%
left_join(mb %>% select(PK, Total_larvae_dsl, total_eggs_fmagna)) %>% rename(dsl_mb = Total_larvae_dsl, fmagna_mb = total_eggs_fmagna) %>%
left_join(quant %>% select(PK, Fascioloides_magna, Protostrongylid_DSL)) %>% rename(fmagna_quant = Fascioloides_magna, dsl_quant = Protostrongylid_DSL)
# Which na
na_location_index = is.na(metadata$Easting) | is.na(metadata$Northing)
data = bind_cols(metadata_join %>% rename(Easting_real = Easting, Northing_real = Northing), covariate_scaled_df)
data %<>% mutate(JulianDay = lubridate::yday(Date) %>% scale())
# missing_data_rows = is.na(data$Easting) | is.na(data$Northing)
data = data[complete.cases(data %>% select(fmagna_ff, dsl_mb, Easting:JulianDay)),]
# Select geomorphon
s_geomorph = 'geomorphon_40'
data$geomorphon = data[, s_geomorph]
# Data points as sp
data_sp = data
coordinates(data_sp) = ~Easting_real + Northing_real
# Create mesh
adkbound = rgdal::readOGR(dsn = '../../GIS/adkParkPoly/adkParkPoly.shp')
boundary <- list(
inla.nonconvex.hull(coordinates(data_sp), 10000),
inla.nonconvex.hull(coordinates(data_sp), 30000))
adk_mesh = inla.mesh.2d(boundary=boundary,
max.edge=c(2000, 5400),
min.angle=c(30, 21),
max.n=c(48000, 16000), ## Safeguard against large meshes.
max.n.strict=c(128000, 128000), ## Don't build a huge mesh!
cutoff=1050, ## Filter away adjacent points.
offset=c(5200, 15000)) ## Offset for extra boundaries, if needed.
# ggplot() +
# gg(data_sp) +
# gg(adk_mesh) +
# gg(adkbound) +
# coord_fixed(ratio = 1)
# Load prediction grid --------------------------------------------------------------
load('predict_grid_1000.Rdata')
# Add new predictors
# Add Precipitation --------------------------------------------------------
# Precipitation will be the mean of the three years.
precip_names = dir(path = '../../GIS/PRISM_precip_data/', pattern = 'PRISM.+tif$', full.names = F)
precip_files = dir(path = '../../GIS/PRISM_precip_data/', pattern = 'PRISM.+tif$', full.names = T)
precip_dates = data.frame(file = precip_names, year = precip_names %>% {regmatches(x = ., m = regexec(pattern = '\\d{4}', text = ., perl = T))} %>% as.integer)
precip_data = matrix(NA, nrow = NROW(predict_grid), ncol = length(precip_files))
for(f in 1:length(precip_files)){
precip = raster::raster(precip_files[f])
precip_data[,f] = raster::extract(precip, predict_grid)
}
precip_means = precip_data %>% rowMeans()
predict_grid@data$Precipitation = precip_means
# Add snow ----------------------------------------------------------------------
snowcover_names = dir(path = '../../GIS/NWS_snowfall_data/', pattern = 'snowfall.*.tif$')
snowcover_paths = dir(path = '../../GIS/NWS_snowfall_data/', pattern = 'snowfall.*.tif$', full.names = T)
snowcover_dates = data.frame(file = snowcover_names, year = snowcover_names %>% {regmatches(x = ., m = regexec(pattern = '\\d{4}', text = ., perl = T))} %>% as.integer)
# Extract snowcover values for year prior from location
snowcover_data = matrix(NA, nrow = NROW(predict_grid), ncol = length(snowcover_paths))
for(f in 1:length(snowcover_paths)){
snowcover = raster::raster(snowcover_paths[f])
snowcover_data[,f] = raster::extract(snowcover, predict_grid)
}
# Mean snowcover
snowcover_means = snowcover_data %>% rowMeans()
predict_grid@data$Snow = snowcover_means
# Add wetland distance
wetland_dist = raster::raster('../../GIS/NY_shapefile_wetlands/dist_to_wetlands.tif')
predict_grid@data$Distance_to_wetland = raster::extract(wetland_dist, predict_grid)
# Add geomorphon values
# Select 40-cell scale
s_geomorph = 'geomorphon_40'
geomorphon = raster::raster(paste0('../../GIS/geomorphon/', s_geomorph, '.tif'))
temp = raster::extract(geomorphon, predict_grid, method = 'simple')
temp = factor(temp, levels = as.integer(seq(1,10)), labels = c('flat', 'summit', 'ridge', 'shoulder',
'spur', 'slope', 'hollow', 'footslope',
'valley', 'depression'))
predict_grid@data$geomorphon = temp
# Scale prediction
predict_grid_complete = predict_grid[complete.cases(predict_grid@data),]
covariate_scaled_attr$covar = row.names(covariate_scaled_attr)
rownames(covariate_scaled_attr) = NULL
predict_grid_scaled = predict_grid_complete
rel_cols = covariate_scaled_attr$covar
# Center and scale
for(c in rel_cols){
print(c)
print(head(predict_grid_scaled@data[[c]]))
print(covariate_scaled_attr %>% filter(covar == c) %>% pull(Center))
print(covariate_scaled_attr %>% filter(covar == c) %>% pull(Scale))
# Center
predict_grid_scaled@data[[c]] = predict_grid_scaled@data[[c]] - covariate_scaled_attr %>% filter(covar == c) %>% pull(Center)
# Scale
predict_grid_scaled@data[[c]] = predict_grid_scaled@data[[c]] / covariate_scaled_attr %>% filter(covar == c) %>% pull(Scale)
}
# SPDE Setup ##################################
# Make the A matrices and spde for all subsequent stacks
boundary <- list(
inla.nonconvex.hull(coordinates(data_sp), 10000),
inla.nonconvex.hull(coordinates(data_sp), 30000))
adk_mesh = inla.mesh.2d(boundary=boundary,
max.edge=c(2000, 5400),
min.angle=c(30, 21),
max.n=c(48000, 16000), ## Safeguard against large meshes.
max.n.strict=c(128000, 128000), ## Don't build a huge mesh!
cutoff=1050, ## Filter away adjacent points.
offset=c(5200, 15000)) ## Offset for extra boundaries, if needed.
save(adk_mesh, file = 'model_outputs/adk_mesh.Rdata')
projector_A = inla.spde.make.A(adk_mesh, loc=data %>% select(Easting_real, Northing_real) %>% as.matrix())
predictor_A = inla.spde.make.A(adk_mesh, loc = predict_grid_scaled@coords)
spde <- inla.spde2.pcmatern(
mesh = adk_mesh,
alpha = 2,
### mesh and smoothness parameter
prior.range = c(1000, 0.01),
### P(practic.range<0.3)=0.5
prior.sigma = c(1, 0.01)
### P(sigma>1)=0.01
)
# Test HDS Model --------------------------------------------------------------------
# Keep mesh model from previous.
# Need transect lines, ds data
deer_transects_2018 = rgdal::readOGR(dsn = '../../GIS/DistanceSampling2018/ds_transects_2018.shp')
ds_data = read.csv('ds_data_adjusted.csv')
ds_data_sp = ds_data
coordinates(ds_data_sp) = ~Easting + Northing
proj4string(ds_data_sp) = proj4string(predict_grid)
ds_data_sp$distance = ds_data_sp$PERP_DIST_M
# What is the strip half-width? Set to a little larger than max distance
W = ceiling(max(ds_data$PERP_DIST_M)) # 3 meters
# Define half-normal detection function
hn = function(distance, lsig){
exp(-0.5*(distance/exp(lsig))^2)}
# Define matern SPDE function for deer scats
matern <- inla.spde2.pcmatern(adk_mesh,
prior.sigma = c(2, 0.01),
prior.range = c(1000, 0.5))
# Define components of SPDE model
cmp = ~ mySPDE(map = coordinates, model = matern) +
lsig + Intercept
formula = coordinates + distance ~ mySPDE +
log(hn(distance, lsig)) +
log(1/W) +
Intercept
fit = lgcp(components = cmp,
data = ds_data_sp,
samplers = deer_transects_2018,
formula = formula)
spde.range <- spde.posterior(fit, "mySPDE", what = "range"); plot(spde.range)
spde.logvar <- spde.posterior(fit, "mySPDE", what = "log.variance"); plot(spde.logvar)
# Predict over range
pxl = pixels(adk_mesh, nx = 200, ny = 200)
pr.int <- predict(fit, pxl, ~ exp(mySPDE))
ggplot() + gg(pr.int) + gg(adkbound) +
gg(deer_transects_2018, color = "red") +
gg(ds_data_sp, size = 0.2, alpha = 1) +
noyticks + noxticks +
theme(legend.key.width = unit(x = 0.2,"cm"), legend.key.height = unit(x = 0.3,"cm")) +
theme(legend.text=element_text(size=6)) +
# guides(fill=FALSE) +
coord_equal()
# View distance function
distdf <- data.frame(distance = seq(0,8,length=100))
dfun <- predict(fit, distdf, ~ hn(distance,lsig))
plot(dfun)
# Setup to fit covariate models -------------------------------------------
# Null model is meaningless here, since it will be multiplied against parasite models. Will be using
# covariate models only.
# Combine in with ds_data
ds_data_scaled = ds_data_sp
ds_data_scaled@data = cbind.data.frame(ds_data_sp@data,
over(x = ds_data_sp, y = predict_grid_scaled))
ds_data_sp@data = cbind.data.frame(ds_data_sp@data,
over(x = ds_data_sp, y = predict_grid))
# HDS Models --------------------------------------------------------
# List the models
# No spde models
# Habitat
cmp = ~ lsig + Intercept + Conifer + Mixed + Wetland
formula = coordinates + distance ~
log(hn(distance, lsig)) +
log(1/W) +
Intercept + Conifer + Mixed + Wetland
fit = lgcp(components = cmp,
data = ds_data_sp,
samplers = deer_transects_2018,
formula = formula)
summary(fit)
saveRDS(fit, file = 'model_outputs/hds/habitat.RDS')
# Habitat + elevation
cmp = ~ lsig + Intercept + Conifer + Mixed + Wetland + Elevation
formula = coordinates + distance ~
log(hn(distance, lsig)) +
log(1/W) +
Intercept + Conifer + Mixed + Wetland + Elevation
fit = lgcp(components = cmp,
data = ds_data_sp,
samplers = deer_transects_2018,
formula = formula)
summary(fit)
saveRDS(fit, file = 'model_outputs/hds/habitat_elev.RDS')
# Habitat + Elevation + Spatial
cmp = ~ lsig + Intercept + Conifer + Mixed + Wetland + Elevation + Northing + Easting
formula = coordinates + distance ~
log(hn(distance, lsig)) +
log(1/W) +
Intercept + Conifer + Mixed + Wetland + Elevation + Northing + Easting
fit = lgcp(components = cmp,
data = ds_data_sp,
samplers = deer_transects_2018,
formula = formula)
summary(fit)
saveRDS(fit, file = 'model_outputs/hds/habitat_elev_spat.RDS')
# Human presence
cmp = ~ lsig + Intercept + Highway + MinorRoad
formula = coordinates + distance ~
log(hn(distance, lsig)) +
log(1/W) +
Intercept + Highway + MinorRoad
fit = lgcp(components = cmp,
data = ds_data_sp,
samplers = deer_transects_2018,
formula = formula)
summary(fit)
saveRDS(fit, file = 'model_outputs/hds/human.RDS')
# Human presence + elevation
cmp = ~ lsig + Intercept + Highway + MinorRoad + Elevation
formula = coordinates + distance ~
log(hn(distance, lsig)) +
log(1/W) +
Intercept + Highway + MinorRoad + Elevation
fit = lgcp(components = cmp,
data = ds_data_sp,
samplers = deer_transects_2018,
formula = formula)
summary(fit)
saveRDS(fit, file = 'model_outputs/hds/human_elev.RDS')
# Human presence + elevation + spatial
cmp = ~ lsig + Intercept + Highway + MinorRoad + Elevation + Northing + Easting
formula = coordinates + distance ~
log(hn(distance, lsig)) +
log(1/W) +
Intercept + Highway + MinorRoad + Elevation + Northing + Easting
fit = lgcp(components = cmp,
data = ds_data_sp,
samplers = deer_transects_2018,
formula = formula)
summary(fit)
saveRDS(fit, file = 'model_outputs/hds/human_elev_spat.RDS')
# Spde models
# Habitat + spde
cmp = ~ mySPDE(map = coordinates, model = matern) +
lsig + Intercept + Conifer + Mixed + Wetland
formula = coordinates + distance ~ mySPDE +
log(hn(distance, lsig)) +
log(1/W) +
Intercept + Conifer + Mixed + Wetland
fit = lgcp(components = cmp,
data = ds_data_sp,
samplers = deer_transects_2018,
formula = formula)
summary(fit)
saveRDS(fit, file = 'model_outputs/hds/habitat_spde.RDS')
# Habitat + elevation + spde
cmp = ~ mySPDE(map = coordinates, model = matern) +
lsig + Intercept + Conifer + Mixed + Wetland + Elevation
formula = coordinates + distance ~ mySPDE +
log(hn(distance, lsig)) +
log(1/W) +
Intercept + Conifer + Mixed + Wetland + Elevation
fit = lgcp(components = cmp,
data = ds_data_sp,
samplers = deer_transects_2018,
formula = formula)
summary(fit)
saveRDS(fit, file = 'model_outputs/hds/habitat_elev_spde.RDS')
# Habitat + elevation + spatial + spde
cmp = ~ mySPDE(map = coordinates, model = matern) +
lsig + Intercept + Conifer + Mixed + Wetland + Elevation + Northing + Easting
formula = coordinates + distance ~ mySPDE +
log(hn(distance, lsig)) +
log(1/W) +
Intercept + Conifer + Mixed + Wetland + Elevation + Northing + Easting
fit = lgcp(components = cmp,
data = ds_data_sp,
samplers = deer_transects_2018,
formula = formula)
summary(fit)
saveRDS(fit, file = 'model_outputs/hds/habitat_elev_spatial_spde.RDS')
# Human + spde
cmp = ~ mySPDE(map = coordinates, model = matern) +
lsig + Intercept + Highway + MinorRoad
formula = coordinates + distance ~ mySPDE +
log(hn(distance, lsig)) +
log(1/W) +
Intercept + Highway + MinorRoad
fit = lgcp(components = cmp,
data = ds_data_sp,
samplers = deer_transects_2018,
formula = formula)
summary(fit)
saveRDS(fit, file = 'model_outputs/hds/human_spde.RDS')
# Human + elev + spde
cmp = ~ mySPDE(map = coordinates, model = matern) +
lsig + Intercept + Highway + MinorRoad + Elevation
formula = coordinates + distance ~ mySPDE +
log(hn(distance, lsig)) +
log(1/W) +
Intercept + Highway + MinorRoad + Elevation
fit = lgcp(components = cmp,
data = ds_data_sp,
samplers = deer_transects_2018,
formula = formula)
summary(fit)
saveRDS(fit, file = 'model_outputs/hds/human_elev_spde.RDS')
# Human + elev + spde + spat
cmp = ~ mySPDE(map = coordinates, model = matern) +
lsig + Intercept + Highway + MinorRoad + Elevation + Northing + Easting
formula = coordinates + distance ~ mySPDE +
log(hn(distance, lsig)) +
log(1/W) +
Intercept + Highway + MinorRoad + Elevation + Northing + Easting
fit = lgcp(components = cmp,
data = ds_data_sp,
samplers = deer_transects_2018,
formula = formula)
summary(fit)
saveRDS(fit, file = 'model_outputs/hds/human_elev_spatial_spde.RDS')
# Habitat + human
cmp = ~ lsig + Intercept + Highway + MinorRoad + Conifer + Mixed + Wetland
formula = coordinates + distance ~
log(hn(distance, lsig)) +
log(1/W) +
Intercept + Highway + MinorRoad + Conifer + Mixed + Wetland
fit = lgcp(components = cmp,
data = ds_data_sp,
samplers = deer_transects_2018,
formula = formula)
summary(fit)
saveRDS(fit, file = 'model_outputs/hds/habitat_human.RDS')
# habitat_human_spde
cmp = ~ lsig + mySPDE(map = coordinates, model = matern) + Intercept + Highway + MinorRoad + Conifer + Mixed + Wetland
formula = coordinates + distance ~ mySPDE +
log(hn(distance, lsig)) +
log(1/W) +
Intercept + Highway + MinorRoad + Conifer + Mixed + Wetland
fit = lgcp(components = cmp,
data = ds_data_sp,
samplers = deer_transects_2018,
formula = formula)
summary(fit)
saveRDS(fit, file = 'model_outputs/hds/habitat_human_spde.RDS')
# habitat_human_elev
cmp = ~ lsig + Intercept + Highway + MinorRoad + Conifer + Mixed + Wetland + Elevation
formula = coordinates + distance ~
log(hn(distance, lsig)) +
log(1/W) +
Intercept + Highway + MinorRoad + Conifer + Mixed + Wetland + Elevation
fit = lgcp(components = cmp,
data = ds_data_sp,
samplers = deer_transects_2018,
formula = formula)
summary(fit)
saveRDS(fit, file = 'model_outputs/hds/habitat_human_elev.RDS')
# habitat_human_elev_spde
cmp = ~ lsig + mySPDE(map = coordinates, model = matern) + Intercept + Highway + MinorRoad +
Conifer + Mixed + Wetland + Elevation
formula = coordinates + distance ~ mySPDE +
log(hn(distance, lsig)) +
log(1/W) +
Intercept + Highway + MinorRoad + Conifer + Mixed + Wetland + Elevation
fit = lgcp(components = cmp,
data = ds_data_sp,
samplers = deer_transects_2018,
formula = formula)
summary(fit)
saveRDS(fit, file = 'model_outputs/hds/habitat_human_elev_spde.RDS')
# habitat_human_elev_spat
cmp = ~ lsig + Intercept + Highway + MinorRoad + Conifer + Mixed + Wetland +
Elevation + Northing + Easting
formula = coordinates + distance ~
log(hn(distance, lsig)) +
log(1/W) +
Intercept + Highway + MinorRoad + Conifer + Mixed + Wetland + Elevation + Northing + Easting
fit = lgcp(components = cmp,
data = ds_data_sp,
samplers = deer_transects_2018,
formula = formula)
summary(fit)
saveRDS(fit, file = 'model_outputs/hds/habitat_human_elev_spat.RDS')
# habitat_human_elev_spat_spde
cmp = ~ lsig + mySPDE(map = coordinates, model = matern) + Intercept + Highway + MinorRoad +
Conifer + Mixed + Wetland + Elevation + Northing + Easting
formula = coordinates + distance ~ mySPDE +
log(hn(distance, lsig)) +
log(1/W) +
Intercept + Highway + MinorRoad + Conifer + Mixed + Wetland + Elevation + Northing + Easting
fit = lgcp(components = cmp,
data = ds_data_sp,
samplers = deer_transects_2018,
formula = formula)
summary(fit)
saveRDS(fit, file = 'model_outputs/hds/habitat_human_elev_spde.RDS')
# Summaries to refresh waic -----------
hds_models = readr::read_csv('hds_models.csv')
models_list = list.files(path = 'model_outputs/hds/', full.names = T)
hds_models_name = hds_models$Name
waic_ls = hds_model_waic(hds_models_name = hds_models_name, models_list = models_list)
waic_vec = do.call(what = c, args = waic_ls)
waic_df = data.frame(model = names(waic_vec), waic = waic_vec, row.names = NULL) %>% arrange(waic)
waic_df
# Obtain parasite distribution models ----------------------------------
##
habitat_human_elev_spat = readRDS('model_outputs/hds/habitat_human_elev_spat.RDS')
prd = stats::predict(object = habitat_human_elev_spat,
predict_grid_scaled,
formula = ~ Intercept + Highway + MinorRoad + Conifer + Mixed + Wetland + Elevation + Northing + Easting
)
ggplot(data = prd@data) +
geom_raster(aes(x = coordinates(prd)[,1], y = coordinates(prd)[,2], fill = mean)) +
scale_fill_viridis(option = 'A') +
coord_equal() + theme_bw() +
ggtitle("Hab hum elev spat")
##
habitat_human = readRDS('model_outputs/hds/habitat_human.RDS')
prd = stats::predict(object = habitat_human,
predict_grid_scaled,
formula = ~ Intercept + Highway + MinorRoad + Conifer + Mixed + Wetland
)
ggplot(data = prd@data) +
geom_raster(aes(x = coordinates(prd)[,1], y = coordinates(prd)[,2], fill = mean)) +
scale_fill_viridis(option = 'A') +
coord_equal() + theme_bw() +
ggtitle("Hab hum")
|
770beed264d08e11906f65f3de183f1339cb1de3 | 96ea123f9e5756ceb6842cebc995f8c96256ae68 | /chap3/8.R | 0170a08653f1a155b88ec284703da02137a39cd6 | [] | no_license | khoanguyen8496/stat_learning | 2c97999e8c434a1ad5f90b703aff0bef9affdd68 | 2e71fed5fd2b755546658393bb18fa7530ea6387 | refs/heads/master | 2021-01-20T10:19:26.180591 | 2017-05-05T17:43:56 | 2017-05-05T17:43:56 | 90,345,220 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 741 | r | 8.R | library(ISLR)
library(MASS)
# a) use lm to perform a simple linear regression
lm.fit = lm(mpg~horsepower, data=Auto)
summary(lm.fit)
# i. there is a relationship between the predictor and the response, since the p-value <
# 2e-16. Therefore, we can reject the null hypothesis that is \beta1 = 0
# The relationship between mpg and horsepower is negative because the coefficent is negative
predict(lm.fit, data.frame(horsepower=(c(98))), interval="confidence")
predict(lm.fit, data.frame(horsepower=(c(98))), interval="prediction")
# b) plot the response and the predictor. Use abline() function to display the least squares
# regression line
attach(Auto)
plot(horsepower, mpg)
abline(lm.fit, lwd=3, col="red")
par(mfrow=c(2,2))
plot(lm.fit)
|
30cbdfd650ad9d98ade8af753f2825c31ef17179 | 5612d154b5c939aefefbad586e3bebaf7db6a532 | /man/Full.PFS2.Rd | afc9c3900d618d33302cef379fd4984dcaef97cc | [] | no_license | AspenMeow/MSUDataPull | c1938e4a6d0f22419715c8de024e60d06551bed6 | 3fecbd9ce556b4bbae602221c5747181faf883d7 | refs/heads/master | 2023-04-30T12:21:11.038925 | 2020-04-03T19:42:07 | 2020-04-03T19:42:07 | 369,385,234 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 690 | rd | Full.PFS2.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pulldata.R
\name{Full.PFS2}
\alias{Full.PFS2}
\title{Undergrad Persistence Rate SISFull}
\usage{
Full.PFS2(cohortterm, output = "aggregate")
}
\arguments{
\item{cohortterm}{a numeric vector to indicate the fall entering cohort 1st Fall term e.g 1144}
\item{output}{indicate the output format, aggregate or list}
}
\value{
aggregate output returns the persistence rate by entering cohort and lvl entry status. list returns the detail data frame
}
\description{
Full.PFS2 is a function to get the 1st Spring and 2nd Fall persistence for undergraduate fall entering cohort including summer starters from SISFull
}
|
deda6cd92b3dad9a2217e0b03f8d025b134e8304 | 44fd958dbab924ae9d727b1fecac2e54157c5acb | /Extra practice code/Project_KNN.R | 8f389439aa8eb47d421398ef887d025c39009028 | [] | no_license | vssource/prediction-and-classification-of-H1-B-visa-application | bd2abfe5ecd2835295d87a69f48ff3ffac9793f6 | 8fb6c434243f06b2dfd287c5423b572a9b3c358f | refs/heads/master | 2023-03-10T06:25:57.225919 | 2021-02-26T02:24:42 | 2021-02-26T02:24:42 | 342,433,004 | 0 | 0 | null | 2021-02-26T02:24:43 | 2021-02-26T01:54:37 | R | UTF-8 | R | false | false | 10,427 | r | Project_KNN.R | options(scipen = 999)
library(caret)
library(dummies)
library(FNN)
library(dplyr)
library(plyr)
H1b <- read.csv(file = "h1b_kaggle.csv", header = TRUE)
#considering only year 2016
H1b16 <- subset(H1b, YEAR == 2016)
#remove Column X and YEAR
H1b16 <- H1b16[,-c(1,8)]
#Checking for na
table(is.na.data.frame(H1b16))
H1b16 <- na.omit(H1b16)
#reindex
row.names(H1b16)<-1:nrow(H1b16)
q <- subset(H1b16, SOC_NAME=="N/A")
H1b16$SOC_NAME[as.numeric(row.names(q))] <- "NETWORK AND COMPUTER SYSTEMS"
#Changing status as per the requirement of the project
H1b16$STATUS <- NA
H1b16$STATUS <- ifelse(H1b16$CASE_STATUS =="WITHDRAWN", "CANT_SAY"
,(ifelse(H1b16$CASE_STATUS == "DENIED", "DENIED",
"APPROVED")))
H1b16$FULL_TIME_POSITION <- ifelse(H1b16$FULL_TIME_POSITION== "Y", 1, 0)
#dividing the worksite in cities and states
WORK_STATE <- as.data.frame(matrix(unlist(strsplit(H1b16[,"WORKSITE"], ", ")),ncol=2, byrow=TRUE))
H1b16$CITY <- WORK_STATE[,1]
H1b16$STATE <- WORK_STATE[,2]
#Creating Regions for Exploratory Analysis
H1b16$REGION <- "Other"
H1b16$REGION[H1b16$lon < -66 & H1b16$lon > -80 & H1b16$lat < 48 & H1b16$lat > 37 & H1b16$REGION == "Other"] <- "NorthEast"
H1b16$REGION[H1b16$lon < -72 & H1b16$lon > -102 & H1b16$lat < 37 & H1b16$lat > 24 & H1b16$REGION == "Other"] <- "South"
H1b16$REGION[H1b16$lon < -80 & H1b16$lon > -102 & H1b16$lat < 50 & H1b16$lat > 37 & H1b16$REGION == "Other"] <- "MidWest"
H1b16$REGION[H1b16$lon < -102 & H1b16$lon > -126 & H1b16$lat < 50 & H1b16$lat >25 & H1b16$REGION == "Other"] <- "West"
## creating Categories based on SOC_NAME
#faster string search, remove whitespaces and special char
H1b16$SOC_NAME <- gsub(" ","_",H1b16$SOC_NAME)
H1b16$SOC_NAME <- gsub(",","",H1b16$SOC_NAME)
H1b16$Category = "Other"
H1b16$Category <- ifelse(grepl('ENGINEER|MATHEMATICAL|STATIS', H1b16$SOC_NAME, ignore.case = T), 'Engineering', H1b16$Category)
H1b16$Category <- ifelse(grepl('ANIMAL_TRAINERS|BARBERS|HAIRDRESSERS,_HAIRSTYLISTS,_AND_COSMETOLOGISTS|SKINCARE_SPECIALISTS|TOUR_GUIDES_AND_ESCORTS|TRAVEL_GUIDES|CHILDCARE_WORKERS|FITNESS_TRAINERS_AND_AEROBICS_INSTRUCTORS|RECREATION_WORKERS|RESIDENTIAL_ADVISORS|PERSONAL_CARE_AND_SERVICE_WORKERS', H1b16$SOC_NAME, ignore.case = T), 'Personal', H1b16$Category)
H1b16$Category <- ifelse(grepl('computer|programmer|software|web developer|database', H1b16$SOC_NAME, ignore.case = T), 'Computer', H1b16$Category)
H1b16$Category <- ifelse(grepl('teacher|school principal|linguist|professor|teach', H1b16$SOC_NAME, ignore.case = T), 'Education', H1b16$Category)
H1b16$Category <- ifelse(grepl('public_relation|manag|operation|chief|executive|plan', H1b16$SOC_NAME, ignore.case = T), 'Management', H1b16$Category)
H1b16$Category <- ifelse(grepl('business|business_analyst|business_systems_analyst|financ|accountant', H1b16$SOC_NAME, ignore.case = T), 'Business', H1b16$Category)
H1b16$Category <- ifelse(grepl('medical|doctor|physician|dentist|health|physical therapists|surgeon|nurse|psychiatr', H1b16$SOC_NAME, ignore.case = T), 'Healthcare', H1b16$Category)
H1b16$Category <- ifelse(grepl('scient|ECONOMISTS|BIOLOGICAL_TECHNICIANS|CHEMIST|NUCLEAR|RESEARCH_ASSISTANTS', H1b16$SOC_NAME, ignore.case = T), 'Science', H1b16$Category)
H1b16$Category <- ifelse(grepl('advertis|marketing|market|promotion', H1b16$SOC_NAME, ignore.case = T), 'Marketing', H1b16$Category)
H1b16$Category <- ifelse(grepl('PARALEGALS_AND_LEGAL_ASSISTANTS|LAWYERS|LAWYER|ATTORNEY|JUDICIAL_LAW_CLERKS|ARBITRATORS_MEDIATORS_AND_CONCILIATORS|PARALEGALS_AND_LEGAL_ASSISTANTS*|LEGAL_SUPPORT_WORKERS_ALL_OTHER', H1b16$SOC_NAME, ignore.case = T), 'Legal', H1b16$Category)
H1b16$Category <- ifelse(grepl('COUNSELORS|MENTAL|SUBSTANCE|ABUSE|SOCIAL_WORKERS|BEHAVIORAL|GUIDANCE|THERAPISTS|REHABRELIGIOUS_WORKERS', H1b16$SOC_NAME, ignore.case = T), 'Counselor', H1b16$Category)
H1b16$Category <- ifelse(grepl('SALES|FIRST-LINE|SUPERVISORS|wholesale|commodit|Manufact|TRADER|retail', H1b16$SOC_NAME, ignore.case = T), 'Sales', H1b16$Category)
H1b16$Category <- ifelse(grepl('farming|farm|fishing|agri|ANIMAL_BREEDERS|CROP', H1b16$SOC_NAME, ignore.case = T), 'Farming', H1b16$Category)
H1b16$Category <- ifelse(grepl('transport|airline|flight|pilot|drive|captain|ship_engineers', H1b16$SOC_NAME, ignore.case = T), 'Transportation', H1b16$Category)
H1b16$Category <- ifelse(grepl('Install|REPAIR|SECURITY|MECHANICS|MAINTENANCE', H1b16$SOC_NAME, ignore.case = T), 'Installation', H1b16$Category)
H1b16$Category <- ifelse(grepl('construction|PLUMBERS|helper|PAINTERS', H1b16$SOC_NAME, ignore.case = T), 'Construction', H1b16$Category)
H1b16$Category <- ifelse(grepl('CHEFS|COOKS|Food|LOUNGE', H1b16$SOC_NAME, ignore.case = T), 'Food', H1b16$Category)
H1b16$Category <- ifelse(grepl('MUSICIANS_INSTRUMENTAL|ARCHIVISTS|CURATORS|MUSEUM_TECHNICIANS_AND_CONSERVATORS|LIBRARIANS|LIBRARY_TECHNICIANS|AUDIO-VISUAL_AND_MULTIMEDIA_COLLECTIONS|FARM_AND_HOME_MANAGEMENT_ADVISORS|INSTRUCTIONAL_COORDINATORS|INSTRUCTIONAL_DESIGNERS_AND_TECHNOLOGISTS|INSTRUCTIONAL_COORDINATOR|TEACHER_ASSISTANTS|EDUCATION_TRAINING_AND_LIBRARY_WORKERS_ALL|EDUCATION_TRAINING_&_LIBRARY_WORKERS_ALL_OTHER|ART_DIRECTORS|CRAFT_ARTISTS|FINE_ARTISTS_INCLUDING_PAINTERS_SCULPTORS_AND|MULTIMEDIA_ARTISTS_AND_ANIMATORS|ARTISTS_AND_RELATED_WORKERS_ALL_OTHER|COMMERCIAL_AND_INDUSTRIAL_DESIGNERS|COMMERCIAL_AND_INDUSTRIAL_ENGINEERS|COMMERCIAL_AND_INDUSTRIAL_DEISGNERS|FASHION_DESIGNERS|FLORAL_DESIGNERS|GRAPHIC_DESIGNERS|GRAPHIC_DESIGNER|INTERIOR_DESIGNERS|INTERIOR_DESIGNER|SET_AND_EXHIBIT_DESIGNERS|DESIGNERS_ALL_OTHER|ACTORS|PRODUCERS_AND_DIRECTORS|PRODUCERS|ATHLETES_AND_SPORTS_COMPETITORS|COACHES_AND_SCOUTS|DANCERS|CHOREOGRAPHERS|MUSIC_DIRECTORS_AND_COMPOSERS|MUSICIANS_AND_SINGERS|MUSICIANS_INSTRUdata|ENTERTAINERS_AND_PERFORMERS_SPORTS_AND_RELATED|RADIO_AND_TELEVISION_ANNOUNCERS|BROADCAST_NEWS_ANALYSTS|REPORTERS_AND_CORRESPONDENTS|PUBLIC_RELATIONS_SPECIALIST|EDITORS|TECHNICAL_WRITERS|TECHNICAL_WRITER|WRITERS_AND_AUTHORS|POETS_LYRICISTS_AND_CREATIVE_WRITERS|INTERPRETERS_AND_TRANSLATORS|MEDIA_AND_COMMUNICATION_WORKERS_ALL_OTHER|MARKET_RESEARCH|AUDIO_AND_VIDEO_EQUIPMENT_TECHNICIANS|SOUND_ENGINEERING_TECHNICIANS|PHOTOGRAPHERS|CAMERA_OPERATORS_TELEVISION_VIDEO_AND_MOTION|FILM_AND_VIDEO_EDITORS|MEDIA_AND_COMMUNICATION_EQUIPMENT_WORKERS_ALL', H1b16$SOC_NAME, ignore.case = T), 'Arts', H1b16$Category)
#Check categories
table(H1b16$Category)
cat <- table(H1b16$Category)
# Barplot for Categories with Ratio
cat.count <- as.vector(cat)
bar.plot <- barplot(cat , border=F , names.arg= names(as.vector(cat)),
las=2,
col=c(rgb(0.3,0.1,0.4,0.6) , rgb(0.3,0.5,0.4,0.6) , rgb(0.3,0.9,0.4,0.6) , rgb(0.3,0.9,0.4,0.6)) ,
ylim=c(0,450)*1000,
main="Category Distribution" )
text(bar.plot, cat.count+100, paste("",round((cat.count/sum(cat.count)*100),2),"%", sep="") ,cex=1.2)
# Consider categoris which has ratio > ~2.5%
# So, categories to be considered are: arts, business, computers, engineering, management, science, education, healthcare
H1b16$Category[H1b16$Category == "Construction"] <- "Other"
H1b16$Category[H1b16$Category == "Counselor"] <- "Other"
H1b16$Category[H1b16$Category == "Farming"] <- "Other"
H1b16$Category[H1b16$Category == "Food"] <- "Other"
H1b16$Category[H1b16$Category == "Installation"] <- "Other"
H1b16$Category[H1b16$Category == "Legal"] <- "Other"
H1b16$Category[H1b16$Category == "Personal"] <- "Other"
H1b16$Category[H1b16$Category == "Production"] <- "Other"
H1b16$Category[H1b16$Category == "Protective"] <- "Other"
H1b16$Category[H1b16$Category == "Transportation"] <- "Other"
H1b16$Category[H1b16$Category == "Sales"] <- "Other"
#H1b16$Category[H1b16$Category == "Arts"] <- "Other"
table(H1b16$Category)
#H1b16 <- subset(H1b16, H1b16$Category %in% c("Arts","Management","Business","Computer","Engineering","Science","Education","Healthcare","Other"))
#Apply KNN
knn.df <- H1b16
#remove CANT_SAY status
knn.df <- subset(knn.df, knn.df$STATUS %in% c('APPROVED','DENIED'))
#changing status with CANT_SAY = 0, APPROVE/DENIED = 1
knn.df$STATUS <- ifelse(knn.df$STATUS== "DENIED", 0, 1)
selected.var <- c(5,6,8,9,10,14)
knn.df <- knn.df[,selected.var]
#create dummies for Category
knn.df <- dummy.data.frame(knn.df, names=c("Category"), sep = ".")
#partition data
set.seed(1)
train.rows <- sample(rownames(knn.df), dim(knn.df)[1]*0.6)
train.df <- knn.df[train.rows, ]
valid.rows <- setdiff(rownames(knn.df), train.rows)
valid.df <- knn.df[valid.rows, ]
#Initialize dataframes
train.norm.df <- train.df
valid.norm.df <- valid.df
norm.df <- knn.df
#Pre-Processing/Normalize the data
norm.values <- preProcess(train.df[, c(2:4)], method=c("center", "scale"))
train.norm.df[, c(2:4)] <- predict(norm.values, train.df[, c(2:4)])
valid.norm.df [, c(2:4)]<- predict(norm.values, valid.df[,c(2:4)])
norm.df [, c(2:4)]<- predict(norm.values, knn.df[, c(2:4)])
# Train KNN Model
df.knn <- knn(train = train.norm.df[, -5], test = valid.norm.df[, -5],
cl = train.norm.df$STATUS, k = 3)
confusionMatrix(df.knn, as.factor(valid.df$STATUS))
# #find best k for knn
# bestk.df <- data.frame(k = seq(1, 25, 1), accuracy = rep(0, 25))
#
# for(i in 1:25) {
# bestknn.pred <- knn(train = train.norm.df[, -5], test = valid.norm.df[, -5],
# cl = train.norm.df$STATUS, k = i)
# bestk.df[i, 2] <- confusionMatrix(bestknn.pred, as.factor(valid.df$STATUS))$overall[1]
# }
# bestk.df
# plot(bestk.df, type="b", xlab="K- Value",ylab="Accuracy level", pch=19)
#Train model with best K=5
df.knn <- knn(train = train.norm.df[, -5], test = valid.norm.df[, -5],
cl = train.norm.df$STATUS, k = 5)
confusionMatrix(df.knn, as.factor(valid.df$STATUS))
## new data for prediction
new.df <- data.frame(FULL_TIME_POSITION=0,PREVAILING_WAGE=32407,lon=-83.74304,lat=42.28083,
Category.Arts=0,Category.Business=0,Category.Computer=1,Category.Education=0,Category.Engineering=0,
Category.Healthcare=0,Category.Management=0,Category.Marketing=0,Category.Other=0,Category.Science=0)
new.norm.df <- new.df
new.norm.df[, c(2:4)] <- predict(norm.values, new.df[, c(2:4)])
#Predict new data with best K=5 found
finalknn.pred <- knn(train = train.norm.df[, -5], test = new.norm.df,
cl = train.norm.df$STATUS, k = 5)
#row.names(norm.df)[attr(finalknn.pred, "nn.index")]
summary(finalknn.pred)
|
33d5726fec4aa8e8fb99f185c3fc9bed38f21d10 | 1117b3de762f3be75c5543f542357c465fa9906b | /R Basics/STAT40180_STAT40620__Lecture_2_R_code(1).R | d25c369270660cf2068c19d32846a14ec97433b4 | [] | no_license | jjaitley/R_Programming_Practice | 5750425af5cd503662a93da7743becbead8cc532 | 407e83e1410f06661aadc094f4e01803623864bc | refs/heads/master | 2020-04-17T10:08:30.789235 | 2019-01-19T00:29:26 | 2019-01-19T00:29:26 | 166,489,205 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,856 | r | STAT40180_STAT40620__Lecture_2_R_code(1).R | # Code for lecture 2 on vectors, matrices and arrays
# Numeric and character vectors
x <- c(1, 2, 4)
y <- c('cat', 'dog', 'rabbit')
# Use mode to find out what type they are
mode(x)
mode(y)
# Create a matrix and access bits of it
m <- matrix(1:6, nrow = 3, ncol = 2)
m
# First row
m[1,]
# Second and third rows and first column
m[2:3,1]
# Add to a vector using c
x <- c(88, 5, 12, 13)
x
x <- c(x[1:3], 168, x[4])
x
# Delete an element
x <- x[-4]
x
# Declare a vector
y <- vector(length = 2)
y[1] <- 5
y[2] <- 12
y
# Vectorised addition
x <- c(1, 2, 4)
y <- x + c(5,0,-1)
# Some more clever indexing
y <- c(1.2, 3.9, 0.4, 0.12)
y[c(1, 3)]
v <- 3:4
y[v]
# Generating sequences
2:7
seq(from = 12, to = 30, by = 3)
rep(7, 4)
# Using all and any
x <- 1:10
any(x < 8)
all(x < 8)
x < 8
# NA and NULL values
x <- c(88, NA, 12, 168, 13)
x
mean(x)
mean(x, na.rm = TRUE)
x <- c(88, NULL, 12, 168, 13)
mean(x)
# More on NULL
z <- NULL
for(i in 1:5) z <- c(z, i)
z
z <- NA
for(i in 1:5) z <- c(z, i)
z
##################################
# Examples
# Function to find the number of repeated ones of length k
findruns <- function(x, k) {
n <- length(x)
runs <- NULL
for(i in 1:(n - k + 1)) {
if(all(x[i:(i + k - 1)] == 1)) runs <- c(runs, i)
}
return(runs)
}
# Using the function
y <- c(1, 0, 0, 1, 1, 1, 0, 1, 1)
findruns(y,3)
findruns(y,2)
findruns(y,6)
# Predicting discrete valued time series version used in lecture
pred <- function(x,k) {
n <- length(x)
k2 <- k/2
pred <- vector(length = n - k)
for(i in 1:(n - k)) {
if(sum(x[i:(i + (k - 1))]) >= k2) pred[i] <- 1 else pred[i] <- 0
}
return(mean(abs(pred - x[(k + 1):n])))
}
# Run it
pred(y, 1)
pred(y, 2)
pred(y, 3)
# predb - better way of updating the sum
predb <- function(x,k) {
n <- length(x)
k2 <- k/2
pred <- vector(length = n - k)
sm <- sum(x[1:k])
if(sm >= k2) pred[1] <- 1 else pred[1] <- 0
if(n - k >= 2) {
for(i in 2:(n - k)) {
sm <- sm + x[i + k - 1] - x[i-1]
if(sm >= k2) pred[i] <- 1 else pred[i] <- 0
}
}
return(mean(abs(pred - x[(k + 1):n])))
}
# Run it
predb(y, 1)
predb(y, 2)
predb(y, 3)
# Third version predc - use cumulative sum - cumsum
predc <- function(x,k) {
n <- length(x)
k2 <- k/2
pred <- vector(length = n - k)
csx <- c(0, cumsum(x))
for(i in 1:(n - k)) {
if(csx[i + k] - csx[i] >= k2) pred[i] <- 1 else pred[i] <- 0
}
return(mean(abs(pred - x[(k + 1):n])))
}
# Run it
predc(y, 1)
predc(y, 2)
predc(y, 3)
##################################
# Vectorised operations
# Some simple vectorised operations
u <- 1:5
w <- function(x) return(x + 1)
w(u)
sqrt(1:4)
# Vectorisation and recycling
y <- c(12, 5, 13)
y + 3
# Watch out for the warning
c(1, 2, 4) + c(6, 0, 9, 20, 22)
# Filtering
z <- c(5, 2, -3, 8)
w <- z[z^2 > 8]
w
z^2 > 8
# Subset
x <- c(6, 1:3, NA, 12)
x[x > 5]
subset(x, x > 5)
# which
z <- c(5, 2, -3, 8)
which(z^2 > 8)
# Filtering a matrix - using arr.ind
M <- matrix(1:6, nrow = 3, ncol = 2)
which(M > 4)
which(M > 4,arr.ind=TRUE)
# Using ifelse
x <- 1:10
y <- ifelse(x %% 2 == 0,5,12)
y
##################################
# Matrices
# Creating and indexing
M <- matrix(1:6, nrow = 3, ncol = 2)
M[c(1, 3), 1]
# Use of byrow
M2 <- matrix(1:6, nrow = 3, ncol = 2, byrow = TRUE)
# Matrix addition
M3 <- matrix(7:12, 3, 2)
M2 + M3
# Matrix operations - matrix multiplication
t(M) %*% M
# Other useful matrix functions
I <- diag(2)
M <- matrix(1:4,2,2)
det(M)
solve(M)
solve(M) %*% M
eigen(M)
# See also chol, svd, qr
# Use of apply
apply(M, 2, mean)
f <- function(x) x / max(x)
apply(M, 1, f)
# Finding shape
length(M)
dim(M)
# Common problems
r <- M[2,]
r
str(M)
str(r)
# Column and row names
M <- matrix(1:6, nrow = 3, ncol = 2)
colnames(M) <- c('a', 'b')
rownames(M) = c('c', 'd', 'e')
M
# Arrays
resting <- matrix(c(36.1, 36.0, 36.3, 68, 65, 85), nrow = 3, ncol = 2)
active <- matrix(c(36.3, 36.5, 37.3, 98, 112, 135), nrow = 3, ncol = 2)
data <- array(data = c(resting, active), dim = c(3, 2, 2))
# Indexing and finding shape
data[3, 1, 2]
dim(data)
##################################
# Example - image manipulation
# Install the pixmap package
install.packages('pixmap')
# Load the pixmap package
library(pixmap)
# Run it
x <- read.pnm(system.file("pictures/logo.ppm", package = "pixmap"))
y <- as(x, "pixmapGrey")
plot(y)
str(y)
# Manipulate it
y2 <- y
y2@grey <- 1 - y2@grey # Creates a negative
plot(y2)
y3 <- y
y3@grey <- 0.8 * y3@grey # Makes it darker
plot(y3)
y4 <- y
y4@grey[y4@grey<0.8] <- 0 # Makes dark areas black
plot(y4)
# combine all plots in a matrix with 2 rows and 3 columns
par(mfrow = c(2, 3))
plot(x, main = "x: Original")
plot(y, main = "y: Original, black and white")
plot(y2, main = "y2: Negative")
plot(y3, main = "y3: Darker")
plot(y4, main = "y4: Dark areas black")
par(mfrow = c(1, 1)) # reset to default
|
34a1997f69db021384b39f9ac82cb9ca1c604f5e | eb9a849526ee437fa61f91c60741e7718e8ffe52 | /Exploratory-Data-Analysis/Project-1/plot3.R | 34e5c897501e940b594b38ffc84a50d8b1be342b | [
"MIT"
] | permissive | aj00786/Coursera-Data-Science | c47f8c8933750023d24a16cca9c8bcc21572784a | 2fd5c8c04f77feaaa930c165a959dbe9fee64310 | refs/heads/master | 2023-03-18T10:04:50.202211 | 2016-11-27T02:33:59 | 2016-11-27T02:33:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,397 | r | plot3.R | # Create data file from Electric power consumption data
fileName = "household_power_consumption.txt"
proj1 <- read.table(fileName, header = TRUE, sep = ";", na.strings="?" , stringsAsFactors=FALSE)
# Please note that running the above command with na.strings="?" didn't convert the numerical data
# to FORMAT so I did not need to convert Global_active_power:Sub_metering_3 to num
# Subset the data by the two dates "2007-02-01" and "2007-02-02"
graph_data <- proj1[proj1$Date %in% c("1/2/2007","2/2/2007"), ]
# Clean up space by removing big data set
rm(proj1)
#Convert the date and time variables to Date/Time classes in R
date_time <- strptime(paste(graph_data$Date, graph_data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
# Create vectors for sub_metering variables
sub_metering_1 <- graph_data$Sub_metering_1
sub_metering_2 <- graph_data$Sub_metering_2
sub_metering_3 <- graph_data$Sub_metering_3
# Create plot 3
plot(date_time, sub_metering_1, xlab = "", ylab = "Energy Submetering", type ="l")
lines(date_time, sub_metering_2, type ="l", col = "red")
lines(date_time, sub_metering_3, type ="l", col = "blue")
legend("topright",inset=.05, legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, lwd = 2, col = c("black", "red","blue"), cex = 0.95)
# Copy plot to a png file
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
5b5ed24efcb509fa5498544f388c6d9bda25fb1a | b0593a0e4c43fef2a12ebe631551a8e7545f01c5 | /lab1/1BM18CS414_LAB1_program.R | de4120d18a20d48c0238bd0411f6e853a7830757 | [] | no_license | priyanka-cse/DSR_1BM18CS414 | 11b7a280441fb7f6bcc7d9049e38ae0f5dffdb14 | d2f3013d87f0874ece1f7a077721a384f5db7deb | refs/heads/master | 2023-01-13T08:57:25.612408 | 2020-11-17T07:09:53 | 2020-11-17T07:09:53 | 299,528,974 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 132 | r | 1BM18CS414_LAB1_program.R | N1<-c(10,20,30,40)
str1<-c("water","lemon","juice","milk")
Rlnum<-c(23.4,44.5,5.67,7.89,)
list1<-list(N1,str1,Rlnum)
list1
|
c8d6b798e4b562a09c2b2d77863fad667fe33c08 | cbdb720cc2fddc4a3723b562042796a35704bae9 | /man/SaveADL.Rd | 8e9dea67b615e8a49157fe95020601e7025c81af | [] | no_license | duartegomes/SamplePackage | f3a359fc165e84013ce7eff3818fa2f0c92d625a | f48b85abe797b6d6397bfc31b1b9b412ddd3d766 | refs/heads/master | 2020-04-03T07:57:51.977313 | 2018-12-12T10:32:23 | 2018-12-12T10:32:23 | 155,119,339 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,870 | rd | SaveADL.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SaveADL.R
\name{SaveADL}
\alias{SaveADL}
\title{Save file in the Data Lake}
\usage{
SaveADL(Data, FileExtension = "csv", ADLDirectory, ChunkName, SavePath,
SaveLocal = T)
}
\arguments{
\item{Data}{This is a required field and is usually a TAP chunk but it can be any item of class "data.table" or "data.frame".
If the user submits a data frame then this will first be converted to a data table before the calculated column is added.
This is because data tables are far more efficient in R than a data frame.
The resulting table that is returned by this function will also be a data table even if a data frame was submitted.}
\item{FileExtension}{Extension supports csv and Rdata files. By default is csv.}
\item{ADLDirectory}{Directory where the file will be stored. By default is a folder
with the user's alias.}
\item{ChunkName}{Optional name to be given to the file when saved in the ADL. If null will aquire the object's name.}
\item{SavePath}{Path to save the chunk locally. By default is TAP cache folder.}
\item{SaveLocal}{To save the chunk locally set as TRUE, otherwise set as FALSE. By default is TRUE.}
}
\value{
Returns a status message.
}
\description{
A function to save files in the data lake. By default saves a CSV file but also supports Rdata files.
By default saves in a folder named with user alias under user directory in the Data Lake.
}
\examples{
SaveADL(Data = TestEmailChunk, FileExtension = "csv", ADLDirectory = "Publication/", ChunkName = "EmailSample", SaveLocal=F)
}
\seealso{
\code{\link{TAPChunks}}
Other Reporting and saving tools: \code{\link{AppendCSV}},
\code{\link{ApplyPenetration}},
\code{\link{ExportToSQL}}, \code{\link{PublishADL}},
\code{\link{UseAsSlicer}}
}
\author{
JTA - The Data Scientists
}
\concept{Reporting and saving tools}
|
01e32ae61e68d944bd79fadaf1da0f14acf589cd | c7e9a7fe3ee4239aad068c6c41149a4a09888275 | /OLD_GALLERY_RSCRIPT/#209_options_of_barplots.R | 2d87c200e13c7877b97b75a83d9c4dd38dd26d74 | [
"MIT"
] | permissive | holtzy/R-graph-gallery | b0dfee965ac398fe73b3841876c6b7f95b4cbae4 | 7d266ad78c8c2d7d39f2730f79230775930e4e0b | refs/heads/master | 2023-08-04T15:10:45.396112 | 2023-07-21T08:37:32 | 2023-07-21T08:37:32 | 31,253,823 | 591 | 219 | MIT | 2023-08-30T10:20:37 | 2015-02-24T09:53:50 | HTML | UTF-8 | R | false | false | 1,882 | r | #209_options_of_barplots.R |
# Let's create a vector of data:
my_vector=c(3,12,5,18,45)
names(my_vector)=c("A","B","C","D","E")
# The most basic barplot you can do:
# see # for explanation
barplot(my_vector)
# Change color with col: uniform
# text utile: Single value or vector giving the border color of the bars. If vector length is less # of bars, the color values will be repeated. Specifying a single value will border all bars with that color.
png("#209_uniform_color_barplot.png" , width = 480, height = 480 )
barplot(my_vector, col=rgb(0.2,0.4,0.6,0.6) )
dev.off()
# Change color with col: specific to category
png("#209_specific_color_barplot.png" , width = 480, height = 480 )
barplot(my_vector, col=c(1,2,3,4,5) )
dev.off()
# We can also change the color of the border of the bar:
png("#209_border_color_barplot.png" , width = 480, height = 480 )
barplot(my_vector, col=rgb(0.1,0.1,0.1,0.1), border="blue" )
dev.off()
# Change the classic attribute of plots:
png("#209_layout_barplot.png" , width = 480, height = 480 )
barplot(my_vector, col=rgb(0.5,0.1,0.6,0.6), xlab="categories", ylab="values", main="My title" , ylim=c(0,60) )
dev.off()
# You can put some texture for each bar
# see corresponding graph for density and angle attributes.
# You can make an horizontal barplot:
# use las to make it readable
png("#209_horizontal_barplot.png" , width = 480, height = 480 )
barplot(my_vector, col=rgb(0.2,0.4,0.6,0.6), horiz=T , las=1)
dev.off()
# Add space between bars:
png("#209_custom_size_between_bars.png" , width = 480, height = 480 )
barplot(my_vector, col=rgb(0.2,0.4,0.6,0.6), space=c(0.1,0.2,3,1.5,0.3) )
dev.off()
# You can change the width of the bars. Usefull if you want to represent the number of values for example! see #
png("#209_custom_width_of_bars.png" , width = 480, height = 480 )
barplot(my_vector, col=rgb(0.2,0.4,0.6,0.6), width=c(0.1,0.2,3,1.5,0.3) )
dev.off()
|
2e3c43b5c91e154425c6f106385c97e09dd10ef6 | 495ebf2ec08b9fabdaa689c7b9aa8bedd168a022 | /man/ExploreModelMatrix-pkg.Rd | 7aa979828e6b78c627d4952408414bc3376486c5 | [
"MIT"
] | permissive | csoneson/ExploreModelMatrix | bec8a9556beaa4bd906b5a8eb777fcc2f8470f50 | 5ec1ff318756631c4e1235457f5b41582dfe4580 | refs/heads/devel | 2023-05-25T18:55:09.510113 | 2023-05-13T07:39:31 | 2023-05-13T07:39:31 | 195,576,287 | 37 | 2 | NOASSERTION | 2023-04-22T18:53:05 | 2019-07-06T19:32:33 | R | UTF-8 | R | false | true | 585 | rd | ExploreModelMatrix-pkg.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ExploreModelMatrix-pkg.R
\docType{package}
\name{ExploreModelMatrix-pkg}
\alias{ExploreModelMatrix-pkg}
\title{ExploreModelMatrix}
\description{
ExploreModelMatrix is an R package for visualizing design matrices generated
by the \code{model.matrix()} R function.
Provided with a sample data table and a design formula, the
\code{ExploreModelMatrix()} function launches a shiny app where the user can
explore the fitted values (in terms of the model coefficients) for each
combination of predictor values.
}
|
081928a597ce9a3ecdf0a10d42c12d8d7bf80e4a | 8eaffbeb1b0fa677ffc1990144bdfb095b637aac | /man/breaks-class.Rd | 6a8486452ca1db566ec64a7c01906eab5d67c3e3 | [] | no_license | ccbiolab/svpluscnv | 55cc885b3ca13a7e8f9caaeac4468f55424c16a6 | 57db725bad63233b1c45583045a49dc0d4815b7b | refs/heads/master | 2021-01-09T04:10:02.148245 | 2020-09-07T23:17:10 | 2020-09-07T23:17:10 | 242,241,163 | 20 | 1 | null | 2020-07-30T15:45:11 | 2020-02-21T22:24:05 | R | UTF-8 | R | false | true | 821 | rd | breaks-class.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/breakpoint.density.r
\docType{class}
\name{breaks-class}
\alias{breaks-class}
\alias{breaks}
\title{Data class breaks}
\arguments{
\item{breaks}{(data.table): the breakpoint info containing data.table, this will be occupied by the CNV segmentation data in the case of cnv.break.annot or SV for sv.break.annot. Unique random string rownames are added to the returned breaks data.frame.}
\item{burden}{(numeric): a vector containing the total number of breakpoints in each sample}
\item{param}{(list): a list of parametres provided}
}
\value{
an instance of the class 'breaks' containing breakpoint and breakpoint burden information
}
\description{
Class to store breakpoint annotations in association with genomic features (e.g. gene loci)
}
|
c3048b567c30b821a16605072c4a6531fc88a93e | ec4536f9edbb1ccbf582204e24dd5e1d72afd8e2 | /R/blockSize-rasterprocessing-function.R | a27537e1cbae00f470247295fc40e8e3470e5e72 | [] | no_license | monsoonforest/scripts-forever | 2d5f9a7fb18bdc2c34f931eb4ad0ce20a17eedac | 45c8dd0eb9c4d7b12e78a68e7402479687e012dc | refs/heads/master | 2023-06-15T06:10:31.206443 | 2021-07-07T15:22:18 | 2021-07-07T15:22:18 | 159,921,435 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 657 | r | blockSize-rasterprocessing-function.R | ## read more about this and other memory efficient ways of dealing with rasters in Writing functions with the ”raster” package Robert J. Hijmans September 7, 2015
# type this in bash to open it
# xdg-open /home/chintan/documents/work/literature/stas_programming/r-programming/Hijmans-2014-Writingfunctionswiththe”raster”package.pdf
f4 <- function(x, filename, format, ...)
{
out <- raster(x)
bs <- blockSize(out)
out <- writeStart(out, filename, overwrite=TRUE)
for (i in 1:bs$n)
{
v <- getValues(x, row=bs$row[i], nrows=bs$nrows[i] )
out <- writeValues(out, v, bs$row[i])
}
out <- writeStop(out)
return(out)
} |
358a7d5f46618094da2684497257563376465b37 | 76e47464f4313b79f95fecf01067aa3a6b713d8b | /man/bioclim_miroc5_rcp60_2080_landonly.Rd | dc45e16d458808149b277f94479c0a15685d8848 | [
"MIT"
] | permissive | zejiang-unsw/rISIMIP | 460116d1f6e23826bb9de57e8ee731d29f379730 | 9f9af06dd51d936932795c4cf2a99216fbfcea23 | refs/heads/master | 2021-01-04T09:46:43.105805 | 2019-12-20T11:36:35 | 2019-12-20T11:36:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 644 | rd | bioclim_miroc5_rcp60_2080_landonly.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rISIMIP-package.R
\docType{data}
\name{bioclim_miroc5_rcp60_2080_landonly}
\alias{bioclim_miroc5_rcp60_2080_landonly}
\title{Global bioclimatic data for 30-yr period centered around 2080 acquired from MIROC5 model data under RCP6.0}
\format{\code{data.frame}}
\description{
data.frame with global bioclimatic for 2080 according to MIROC5 model data under RCP6.0
}
\details{
This data.frame includes the global bioclimatic data for 2080 calculated from 30-yr daily simulated
MIROC5 data centered around 2080 based on the representative concentration pathway RCP6.0
}
|
ed31233fde980d7064e343d74f68a25d932f5901 | 08fdbab8e6d7d78eb8ae5be069b22553d06ac445 | /R/functions.R | 30de3f1b7ee073ef9c3e34d7c6b3acfa65279702 | [
"MIT"
] | permissive | ficusss/Pagoda2GraphImprovement | 939f66d1e87f97c4e0dac846adff213e08b0372c | b6672ff370625cebcbaf8510a8ae82228b2daa94 | refs/heads/master | 2020-04-04T15:20:47.123511 | 2019-02-13T16:26:30 | 2019-02-13T16:26:30 | 156,034,072 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,697 | r | functions.R | #' @export
PlotPagoda <- function(data, embedding.name, cluster.name=NULL, size=0.3, alpha=0.5) {
embeddings <- data$embeddings$PCA[[embedding.name]]
clusters <- data$clusters$PCA[[cluster.name]]
ggplot2::ggplot() +
ggplot2::geom_point(ggplot2::aes(x = embeddings[,1], y = embeddings[,2], color=clusters),
size=size,alpha=alpha)
}
#' @description Similar to basicP2proc, but with more parameters
#' @export
GetPagoda <- function (cm, n.cores = 4, clustering.type = "infomap", embeding.type = "tSNE",
tsne.iter.num = 1000, verbose = T, n.pcs = 100, distance = "cosine",
trim = 5, n.odgenes = 1000, k = 30, ...)
{
r <- pagoda2::Pagoda2$new(cm, modelType = "plain", trim = trim,
n.cores = n.cores, verbose = verbose, ...)
r$adjustVariance(plot = F, do.par = F, gam.k = 10, verbose = verbose)
r$calculatePcaReduction(nPcs = n.pcs, n.odgenes = n.odgenes, maxit = 1000)
r$makeKnnGraph(k = k, type = "PCA", center = T, distance = "cosine",
weight.type = "none", verbose = verbose)
if (clustering.type == "infomap") {
r$getKnnClusters(method = igraph::infomap.community, type = "PCA", name = "infomap")
} else if (clustering.type == "multilevel") {
r$getKnnClusters(method = igraph::multilevel.community, type = "PCA", name = "multilevel")
} else stop("Unknown clustering type")
if ("largeVis" %in% embeding.type) {
r$getEmbedding(type = "PCA", embeddingType = "largeVis")
}
if ("tSNE" %in% embeding.type) {
r$getEmbedding(type = "PCA", perplexity = 30, embeddingType = "tSNE",
max_iter = tsne.iter.num)
}
return(r)
}
#' @export
ClusterNeighborhood <- function(index, clusters, p2, order=1, mindist=0) {
tmp.list <- list()
# выбираем кластер
cluster <- names(clusters[clusters==index])
# составляем список из всех соседей для каждой вершины кластера
tmp.list <- unlist(igraph::neighborhood(p2$graphs$PCA, nodes = cluster, order=order, mindist=mindist))
# удаляем повторяющиеся вершины
tmp.list <- names(tmp.list[!duplicated(tmp.list)])
list(cluster=cluster, expand_cluster=tmp.list, add_vertex=tmp.list[!(tmp.list %in% cluster)])
}
#' @export
GetDegreeConnectivity <- function(graph, vertex.name) {
result <- list()
for (v in vertex.name$add_vertex) {
tmp <- names(graph[[v]][[1]]) %in% vertex.name$cluster
names.cluster.vertex <- names(graph[[v]][[1]])[tmp]
tmp <- names(graph[[v]][[1]]) %in% vertex.name$add_vertex
names.add.vertex <- names(graph[[v]][[1]])[tmp]
tmp <- !(names(graph[[v]][[1]]) %in% vertex.name$expand_cluster)
names.other.vertex <- names(graph[[v]][[1]])[tmp]
res <- NULL
res[[v]] <- list(
names.cluster.vertex=names.cluster.vertex,
names.add.vertex=names.add.vertex,
names.other.vertex=names.other.vertex)
result <- c(result, res)
}
return(result)
}
#' @export
RecalculationCountConnection <- function(const.vec, var.vec) {
diff <- const.vec - var.vec
index <- which.min(diff)
if (diff[index] < 0) {
correct <- (var.vec / var.vec[index]) * diff[index]
var.vec <- round(var.vec + correct)
}
return(var.vec)
}
#' @description Remove unwanted connections from the graph
#' @export
RemoveConnections <- function(graph, clusters.name, vertex.connectivity, embeding.type=NULL) {
assertthat::assert_that(length(vertex.connectivity) == length(clusters.name),
msg = "That data must have the same length")
matrix.graph <- igraph::as_adj(graph)
for (cluster.index in length(vertex.connectivity)) {
cluster <- clusters.name[[cluster.index]][[1]]
add <- clusters.name[[cluster.index]][[3]]
tmp <- !(matrix.graph@Dimnames[[1]] %in% clusters.name[[cluster.index]][[2]])
other <- matrix.graph@Dimnames[[1]][tmp]
groups.name <- list(cluster, add, other)
# рассматриваем каждую вершину из окрестности кластера
for (curr.row.name in names(vertex.connectivity[[cluster.index]])) {
# вершина с именами ее окрестности из гразны групп
curr.vertex <- vertex.connectivity[[cluster.index]][[curr.row.name]]
count.connect.vertex <- NULL
count.all.connection <- NULL
all.connection <- list()
for (index.group in length(groups.name)) {
# количество соседей которое необходимо оставить для текущей верершины
# из определенной группы (кластера/окрестности/остального)
count.connect.vertex[index.group] <- length(curr.vertex[[index.group]])
# вершины кластера/окрестности/остальные в графе
groups.vertex <- matrix.graph[curr.row.name, groups.name[[index.group]]]
# имена всех вершин с которыми иеются связи у текущей
all.connection[[index.group]] <- names(groups.vertex[groups.vertex > 0])
count.all.connection[index.group] <- length(all.connection[[index.group]])
}
count.connect.vertex <- RecalculationCountConnection(count.all.connection, count.connect.vertex)
for (index.group in length(groups.name)) {
assertthat::assert_that(count.all.connection[index.group] >= count.connect.vertex[index.group],
msg = "Error - Deleted vertex!")
# логическая индексация вершин которые нужно оставить или удалить
logical.index <- sample(c(logical(count.connect.vertex[index.group]),
!logical(count.all.connection[index.group] - count.connect.vertex[index.group])))
# вектор вершин котрые подлежат удалению
deffect.conection <- all.connection[[index.group]][logical.index]
# лишние связи убираем (зануляем)
matrix.graph[curr.row.name, deffect.conection] <- 0
matrix.graph[deffect.conection, curr.row.name] <- 0
}
}
}
return(GetPagoda(matrix.graph, embeding.type = embeding.type))
}
#' @export
UpdateGraph <- function(clusters, p2.objects, graph, clusters.name, vertex.connectivity,
embeding.type=NULL, deleted=FALSE) {
corrected.graph <- graph
for (i in 1:length(levels(clusters))) {
if (deleted == TRUE) {
corrected.graph <- igraph::delete_vertices(corrected.graph, clusters.name[[i]]$cluster)
}
corrected.graph <- igraph::union(corrected.graph, p2.objects[[i]]$graphs$PCA)
}
return(RemoveConnections(corrected.graph, clusters.name, vertex.connectivity, embeding.type))
}
#' @export
EmbedGraph <- function(graph, M=1, gamma=1, alpha=0.1, sgd_batches=1e8,
seed=1, verbose=TRUE, n.cores=1) {
wij <- igraph::as_adj(graph, attr='weight');
coords <- largeVis::projectKNNs(wij = wij, dim=2, verbose = verbose,
sgd_batches = sgd_batches,gamma=gamma, M=M,
seed=seed, alpha=alpha, rho=1, threads=n.cores)
colnames(coords) <- igraph::V(graph)$name
return(t(coords))
}
#' @export
GetBaseClusters <- function(base.pagoda.graph, new.graph, cluster.type) {
l1 <- base.pagoda.graph$clusters$PCA[[1]]
l2 <- new.graph[[1]]
assertthat::are_equal(l1, l2)
z <- base.pagoda.graph$clusters$PCA[[cluster.type]]
names(z) <- names(base.pagoda.graph$clusters$PCA[[cluster.type]])
base.clusters <- NA
for (i in 1:length(z)) {
curr.cell <- names(new.graph[[i]])
base.clusters[i] <- z[curr.cell]
}
return(as.factor(base.clusters))
}
#' @export
PlotEmbeddingGeneFraction <- function(gene, embedding, plot.mtx, title.x=0.04, title.y=0.99,
size=0.5, alpha=0.5) {
suppressMessages(
gg <- conos::embeddingPlot(embedding, colors=plot.mtx[,gene], size=size, alpha=alpha) +
ggplot2::scale_color_distiller(palette = "Spectral") +
ggplot2::theme(plot.margin = ggplot2::margin(), axis.title.x = ggplot2::element_blank(),
axis.title.y = ggplot2::element_blank())
)
gg <- cowplot::ggdraw(gg) +
cowplot::draw_label(gene, x = title.x, y = title.y, hjust = title.x, vjust = title.y)
return(gg)
}
#' @export
UpdatePagoda <- function(primary.data, pagoda.obj, clustering.type='multilevel', k=30,
improvement.algorithm='knn', embeding.type='tSNE', tsne.iter.num=1000, n.cores=4) {
r <- pagoda.obj$copy()
for (index in 1:length(k)) {
curr.k <- k[index]
clusters <- r$clusters$PCA[[clustering.type]]
clusters.name <- pbapply::pblapply(levels(clusters), ClusterNeighborhood, clusters, r, cl = n.cores)
# improvement clusters
print("Get pagoda2 for clusters")
pagoda.for.clusters <- lapply(1:length(levels(clusters)), function(id)
GetPagoda(primary.data[,clusters.name[[id]]$expand_cluster], embeding.type = NULL,
k = min(curr.k, length(clusters.name[[id]]$expand_cluster)-1), verbose=F,
n.cores = n.cores, n.pcs = max(round(length(clusters.name[[id]]$expand_cluster)/40), 5) )) # optimal count principal components
# update (improvement) graph
r$graphs$PCA <- UpdateNNGraph(r$graphs$PCA, pagoda.for.clusters, clusters, clusters.name,
curr.k, graph.type = improvement.algorithm, n.cores = n.cores)
# update some fields for pagoda object
if (clustering.type == "infomap") {
r$getKnnClusters(method = igraph::infomap.community, type = "PCA", name = "infomap")
} else if (clustering.type == "multilevel") {
r$getKnnClusters(method = igraph::multilevel.community, type = "PCA", name = "multilevel")
} else stop("Unknown clustering type")
if (index == length(k)) {
if ("largeVis" %in% embeding.type) {
r$getEmbedding(type = "PCA", embeddingType = "largeVis")
}
if ("tSNE" %in% embeding.type) {
r$getEmbedding(type = "PCA", perplexity = 30, embeddingType = "tSNE",
max_iter = tsne.iter.num)
}
}
}
return(r)
}
#' @export
FindIndexClusterForVertex <- function(vertex, clusters.name) {
for(index in 1:length(clusters.name)) {
if (names(vertex) %in% clusters.name[[index]]$cluster) {
return (index)
}
}
}
|
3a6e0f2e505bb24aff2f218ff0daeec22b7bd18d | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/adaptalint/examples/extract_style.Rd.R | f64bfe0903030d12596a5b5654e2b34aa77e0cfc | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 315 | r | extract_style.Rd.R | library(adaptalint)
### Name: extract_style
### Title: Find style of a file
### Aliases: extract_style
### ** Examples
# Get the path to a file to check
path <- system.file("extdata", 'styles.R', package='adaptalint')
# Find out the provile of lints in this package
style <- extract_style(filename = path)
|
e1e9347424851ae9a75178e5555aa14ae40a1e7c | f9cc21864821bfb3bd900776738dc431c4f3298c | /quizzes/quiz2qu7.R | b8954ad65f06852f32c99ab59f75426394d5fdfa | [] | no_license | BananuhBeatDown/Regression_Models | a7ee7851d584bc8e8d5cd6d44b5e6e2a25d96633 | 5ff0183fec63660557408c5d64c0c3fcc97f9884 | refs/heads/master | 2021-06-01T09:48:07.268792 | 2016-07-11T20:37:20 | 2016-07-11T20:37:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 80 | r | quiz2qu7.R | # centimeters = 1/100 of a meter, so multiply by 100 to get normalize for meters |
39bf82558d59ba9d0505cac7c65b99faad50b66b | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/lasso2/examples/Iowa.Rd.R | bbbb1d7934c84ba1e3442a4cd08f7e8a206ed1c9 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 155 | r | Iowa.Rd.R | library(lasso2)
### Name: Iowa
### Title: The Iowa Wheat Yield Data
### Aliases: Iowa
### Keywords: datasets
### ** Examples
data(Iowa)
pairs(Iowa)
|
d217551a89b9a77c0577162ce3024d6cc8bba9b6 | 7898dc4d7e79f533da36d5f981b433933d75c65f | /group5project/R/oralhealthdata.R | e3abf06ccb0c30b8e5aa55867aaf61db0c9f33ef | [] | no_license | khanimzul/group5_project | e3ddc7cd85bc41e7120f3a7d506d1c53325f3991 | 83f7f0456122a13fd75e1207fe625d5df1f650d2 | refs/heads/master | 2021-03-24T13:49:58.679316 | 2017-12-11T05:01:40 | 2017-12-11T05:01:40 | 108,052,265 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,336 | r | oralhealthdata.R | #' Chronic disease indicator- Oral Health
#'
#' Data is obtained from Center for Disease Control and Prevention website,https://www.cdc.gov/cdi/
#' The chronic disease indicators (CDI) are a set of surveillance indicators developed by consensus among CDC, the Council of State and Territorial Epidemiologists (CSTE),
#' and the National Association of Chronic Disease Directors (NACDD).
#' CDI enables public health professionals and policymakers to retrieve uniformly defined state and selected metropolitan-level data for chronic diseases
#' and risk factors that have a substantial impact on public health.
#' These indicators are essential for surveillance, prioritization, and evaluation of public health interventions.
#' .
#'
#' @docType data
#'
#' @usage data(oralhealthdata)
#'
#' @format A \code{matrix} with 5771 observations and 7 columns. The columns are defined as follows:
#' \describe{
#' \item{\code{ID}}{identification number for each state}
#' \item{\code{Year}}{Year of data collection}
#' \item{\code{Location}}{Location of data collected}
#' \item{\code{Topic}}{Topic Indicator}
#' \item{\code{Data Type}}{Type ofdata value}
#' \item{\code{Data Value}}{obseration value}
#' \item{\code{Category}}{indicator variable}
#' }
#'
#' @keywords datasets
#'
#' @source {https://www.cdc.gov/cdi}
"oralhealthdata"
|
873026480b35fb6dfec0d6ae72449b5f8f400b2d | 5e1174f3f71a74b07059fcead63a6739ba68667e | /R/dataset.R | 520cf49a8226227e4bdb49e1f1dc8986fe47f616 | [] | no_license | Niklas191/amateuR | 8ddbd1cfe9496c81f16551a02cbeb6b118278afa | 91b1be0e18161b5f319ff9310a8bed860ce52480 | refs/heads/master | 2020-07-27T08:50:58.968059 | 2019-09-22T15:59:05 | 2019-09-22T15:59:05 | 209,035,863 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 444 | r | dataset.R | #' Dataset for the Kreisliga Goettingen-Osterode
#'
#' Dataset for testing the amateuR package.
#' The dataset contains the data vector for the Kreisliga Göttingen-Osterode in the 2019-20 season, which was downloaded at 2019-09-20.
#'
#' @docType data
#'
#' @usage kreisliga_goettingen
#'
#' @examples
#' all_game_data(amateuR::kreisliga_goettingen)
#' current_table(amateuR::kreisliga_goettingen)
#' @keywords datasets
"kreisliga_goettingen"
|
22996e6e11dfb72b5a4efc311e80fbd6646b967d | 7632610e7aeadb88639432ae8b3fd620fbb6cda3 | /R/get_mean_sd_rasters.R | 03a17d4a1fc788d8323a09a1f3bcb143eed242e3 | [
"MIT"
] | permissive | hrvg/statisticalRoughness | 898ceece48f9c0dc2d62d0409e3d22e425bd5bb0 | 6f96da69b918eda491596bc2f6298b9d6ed0c3c7 | refs/heads/master | 2023-07-24T12:52:47.789486 | 2021-03-03T01:32:29 | 2021-03-03T01:32:29 | 275,955,288 | 1 | 0 | NOASSERTION | 2021-04-17T19:47:08 | 2020-06-30T00:19:59 | R | UTF-8 | R | false | false | 480 | r | get_mean_sd_rasters.R | #' Compute box-counting mean standard deviation rasters
#' @param dem initial DEM
#' @param L integer
#' @param R vector of values
#' @return list of rasters
#' @importFrom magrittr %>%
#' @importFrom stats sd
#' @export
#' @keywords Hurst
get_mean_sd_rasters <- function(dem, L, R){
mean_sd_rasters <- lapply(R, function(r) {
terra::aggregate(dem, fact = r, fun = sd, na.rm = TRUE) %>%
terra::aggregate(fact = L / r, fun = mean, na.rm = TRUE)
})
return(mean_sd_rasters)
} |
2229f3b9343e7205a42ec9214b34a9220db09dd1 | 83e19da2b2847b36f2a82e8bf7f38108a4286974 | /inst/testing/PyOptions.R | 7ca10c82cb0452b19c38d3cb9e532753b18f5ded | [] | no_license | cran/PythonInR | dc710e7c21e3d67bee9644517a2fa9c138ee62cd | 064930845cbab2e05bd25ed17a270547521b2a6f | refs/heads/master | 2021-07-13T05:33:42.412009 | 2020-06-21T19:50:06 | 2020-06-21T19:50:06 | 38,826,198 | 12 | 1 | null | null | null | null | UTF-8 | R | false | false | 400 | r | PyOptions.R | #' # pyOptions
require(testthat)
require(PythonInR)
invisible(capture.output(pyConnect()))
## Options
expect_that(names(pyOptions()),
equals(c("numpyAlias", "useNumpy", "pandasAlias", "usePandas", "winPython364", "intToLong")))
expect_that(pyOptions("numpyAlias"), equals("numpy"))
pyOptions("numpyAlias", "np")
expect_that(pyOptions("numpyAlias"), equals("np"))
pyOptions("numpyAlias", "numpy")
|
aebeee7eeb47591c3d8ee980e51d23e061dc98e7 | 51b7e1500f4d227cec7070d2ab82109d738dfa7e | /R/print.betadiversity.R | 378a75ddca88979cd39bb8e75bec7604e4870a05 | [] | no_license | yangxhcaf/gravy | fa0ecae2c8de480b2626b7e7f58cc19af7f0f148 | e1392ec52c723dc92a48361aa8c9c0e288257567 | refs/heads/master | 2021-09-14T09:28:04.962629 | 2018-05-11T07:45:39 | 2018-05-11T07:45:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 143 | r | print.betadiversity.R | "print.betadiversity" <-
function(x, ...)
{
out <- rbind("Gradient" = x$x, "Betadiversity" = x$beta)
print(out)
invisible(x)
}
|
957eab86fa6058873b8e34e79137f755ffc3faee | 53264211a0d64ca1b346828d02266a39e7e9ad4f | /R/exports.R | 8b6739e4621d3e964e5d6bd25c07ede5bc52b9ef | [] | no_license | cran/missDeaths | 7b02b43b42c3f787a6e157085f9c131aba853079 | f8e532e150f6114d20d5e9ca969c6276eddee92f | refs/heads/master | 2021-06-03T03:58:34.165735 | 2020-10-17T17:00:02 | 2020-10-17T17:00:02 | 37,831,946 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,115 | r | exports.R | # @useDynLib missDeaths
#' Initializes the missDeaths population table cache
#'
#' @keywords internal
#'
#'
#' @param poptable
#' @export md.init
md.init <- function(poptable) {
SurvExpInit(poptable)
}
#' Calculates individual expected survival beyond the observation time
#'
#' @keywords internal
#'
#' @param D Demographic information
#' @param time Observation time
#' @export md.expprep
md.expprep <- function(D, time) {
SurvExpPrep(D, time)
}
#' Initializes the missDeaths population table cache
#'
#' @keywords internal
#'
#'
#' @param year year
#' @param age age
#' @param prob prob
#' @param sex sex
#' @export md.survtime
md.survtime <- function(year, age, prob, sex) {
SurvTime(year, age, prob, sex)
}
#' Initializes the missDeaths population table cache
#'
#' @keywords internal
#'
#'
#' @param year year
#' @param age age
#' @param time time
#' @param sex sex
#' @export md.survprob
md.survprob <- function(year, age, time, sex) {
SurvProbability(year, age, time, sex)
}
#' Initializes the missDeaths population table cache
#'
#' @keywords internal
#'
#'
#' @param year year
#' @param sex sex
#' @export md.survdump
#md.survdump <- function(year, sex) {
# SurvDump(year, sex)
#}
#' Prepare compatible demographic information
#'
#' Utility function that returns a data.frame containing basic demographic
#' information compatible with the \code{\link{md.survnp}},
#' \code{\link{md.survcox}} and \code{\link{md.impute}} functions.
#'
#'
#' @param age vector of patient ages specified as number of days or number of years.
#' @param sex vector containing 1 for males and 2 for females
#' @param year vector of years of entry into the study can either be supplied
#' as vector of start dates or as vector of years specified in number of days from origin (1-1-1970).
#' @seealso \code{\link{md.survcox}}, \code{\link{md.survnp}}
#' @export md.D
md.D <- function(age, sex, year)
{
if (max(age) < 150)
age = age * 365.2425
if (!is.numeric(year))
year = as.numeric(year - as.Date("0-1-1")) - 1970*365.2425
D = data.frame(age=age, sex=sex, year=year)
sexlevels = levels(as.factor(D$sex))
for (i in 1:length(sexlevels))
if (!(sexlevels[i] %in% c("1", "2")))
stop ("column 'sex' can only contain values 1 and 2")
if ((min(D$age) < 0) || (max(D$age) > 130 * 365.2425))
stop ("values in column 'age' out of bounds, should be within [0, 130 * 365] days")
return (D)
}
md.fixsample <- function(observed)
{
md = observed
md$year = (md$year + md$age - 1970) * 365.2425
md$age = md$age * 365.2425
md$time = round(md$time * 365.2425)
md$maxtime = round(md$maxtime * 365.2425)
return (md)
}
#' Correctly impute missing information of possible deaths using population mortality
#'
#' An iterative approach is used in this method to estimate the conditional
#' distribution required to correctly impute the times of deaths using
#' population mortality tables.\cr\cr
#' Note, that simply imputing expected survival times may seem intuitive,
#' but does not give unbiased estimates, since the right censored individuals
#' are not a random subsample of the patients.
#'
#'
#' @param data a data.frame in which to interpret the variables named in the
#' formula.
#' @param f a formula object, with the response on the left of a ~ operator,
#' and the terms on the right. The response must be a survival object as
#' returned by the \code{Surv} function.
#' @param maxtime maximum potential observation time (number of days).
#'
#' where \code{status}=0 equals \code{time}.
#'
#' where \code{status}=1 equals potential time of right censoring if no event
#' would be observed.
#' @param D demographic information compatible with \code{md.survcox}, \code{md.impute}
#' and \code{md.survnp}, see \code{\link{md.D}}.
#' @param ratetable a population mortality table, default is \code{slopop}
#' @param iterations the number of iteration steps to be performed, default is
#' 4
#' @return an array of times with imputed times of death that can be used instead of the
#' unavailable complete data set to get unbiased estimates, ie. in \code{\link[survival]{coxph}}.
#' @seealso \code{\link{md.survcox}}
#' @references Stupnik T., Pohar Perme M. (2015) "Analysing disease recurrence
#' with missing at risk information." Statistics in Medicine 35. p1130-43.
#' \url{https://onlinelibrary.wiley.com/doi/abs/10.1002/sim.6766}
#' @examples
#' library(missDeaths)
#' data(slopop)
#'
#' data(observed)
#' observed$time = observed$time*365.2425
#' D = md.D(age=observed$age*365.2425, sex=observed$sex, year=(observed$year - 1970)*365.2425)
#' newtimes = md.impute(observed, Surv(time, status) ~ age + sex + iq + elevation,
#' observed$maxtime*365.2425, D, slopop, iterations=4)
#'
#' #Cumulative incidence function
#' cif = survfit(Surv(observed$time, observed$status)~1)
#' cif$surv = 1 - cif$surv
#' cif$upper = 1 - cif$upper
#' cif$lower = 1 - cif$lower
#' plot(cif)
#'
#' #Net survival (NOTE: std error is slightly underestimated!)
#' surv.net = survfit(Surv(newtimes, observed$status)~1)
#' summary(surv.net, times=c(3,9)*365.2425)
#' plot(surv.net)
#'
#' #Event free survival (NOTE: std error is slightly underestimated!)
#' surv.efs = survfit(Surv(newtimes, 1 * (observed$status | (newtimes != observed$time)))~1)
#' summary(surv.efs, times=c(3,9)*365.2425)
#' plot(surv.efs)
#'
#' @export md.impute
md.impute <- function(data, f, maxtime, D, ratetable, iterations=4)
{
#require(survival)
#require(rms)
D$year = 1970 + round((D$year - D$age) / 365.2425)
f = deparse(f)
f = gsub(" ", "", f, fixed = TRUE)
md.init(ratetable)
return (SimCensorX(data, maxtime, f, D, iterations))
}
#' Nonparametric analysis of disease recurrence with missing information of possible deaths
#'
#' Estimates the Net and Event free survial using a is non-parametric approach
#' that aims to correct all individuals using the unconditional survival time
#' distribution obtained from the population mortality table.\cr\cr
#' The idea comes from realizing that the number of observed events in the data
#' equals the number which would be observed in case of a complete data set,
#' but the number of patients at risk does not. Hence, this method adjusts the
#' observed number at risk to mimic the one we would get if the data was
#' complete.
#'
#'
#' @param time the time to event (number of days)
#' @param status the status indicator, 0=right censored, 1=event at \code{time}
#' @param maxtime maximum potential observation time (number of days).
#'
#' where \code{status}=0 equals \code{time}.
#'
#' where \code{status}=1 equals potential time of right censoring if no event
#' would be observed.
#' @param D demographic information compatible with \code{ratetable}, see
#' \code{\link{md.D}}.
#' @param ratetable a population mortality table, default is \code{slopop}
#' @param conf.int desired coverage of the estimated confidence interval
#' @return A list with components giving the estimates of net and event free
#' survival.
#'
#' \item{time}{times where the estimates are calculated (number of days)}
#' \item{Y.net}{adjusted number of patients at risk at each time in a hypothetical world where patients don't die}
#' \item{Y.efs}{adjusted number of patients at risk at each time}
#' \item{surv.net}{the estimated Net survival} \item{std.err.net}{the estimated
#' standard error of Net survival estimates} \item{surv.efs}{the estimated
#' Event free survival} \item{std.err.efs}{the estimated standard error of
#' Event free survival estimates}
#' @references Stupnik T., Pohar Perme M. (2015) "Analysing disease recurrence
#' with missing at risk information." Statistics in Medicine 35. p1130-43.
#' \url{https://onlinelibrary.wiley.com/doi/abs/10.1002/sim.6766}
#' @examples
#'
#' \dontrun{
#' library(missDeaths)
#' library(cmprsk)
#' data(slopop)
#'
#' data(observed)
#' D = md.D(age=observed$age*365.2425, sex=observed$sex, year=(observed$year - 1970)*365.2425)
#' np = md.survnp(observed$time*365.2425, observed$status, observed$maxtime*365.2425, D, slopop)
#'
#' #calculate net survival at 3 and 9 years
#' w = list(list(time=np$time, est=np$surv.net, var=(np$std.err.net)^2))
#' timepoints(w, times=c(3,9)*365.2425)
#'
#' #plot the net and event free survival curves
#' plot(np$time, np$surv.net)
#' plot(np$time, np$surv.efs)
#' }
#'
#' @export md.survnp
md.survnp <- function(time, status, maxtime, D, ratetable, conf.int=0.95)
{
return (my.survnp(time, status, D, ratetable, maxtime))
}
#' Fit a proportional hazards regression model over disease recurrence data
#' with missing information of possible deaths
#'
#' An iterative approach is used in this method to estimate the conditional
#' distribution required to correctly impute the times of deaths using
#' population mortality tables.\cr\cr
#' Note, that simply imputing expected survival times may seem intuitive,
#' but does not give unbiased estimates, since the right censored individuals
#' are not a random subsample of the patients.
#'
#' @param data a data.frame in which to interpret the variables named in the
#' formula.
#' @param f a formula object, with the response on the left of a ~ operator,
#' and the terms on the right. The response must be a survival object as
#' returned by the \code{Surv} function.
#' @param maxtime maximum potential observation time (number of days).
#'
#' where \code{status}=0 equals \code{time}.
#'
#' where \code{status}=1 equals potential time of right censoring if no event
#' would be observed.
#' @param D demographic information compatible with \code{ratetable}, see
#' \code{\link{md.D}}.
#' @param ratetable a population mortality table, default is \code{slopop}
#' @param iterations the number of iteration steps to be performed, default is
#' 4
#' @param R the number of multiple imputations performed to adjust the
#' estimated variance of estimates, default is 50.
#' @return if \code{R} equals 1 then an object of class
#' \code{\link[survival]{coxph.object}} representing the fit.
#'
#' if \code{R} > 1 then the result of the \code{\link[mitools]{MIcombine}} of
#' the \code{coxph} objects.
#' @seealso \code{\link{md.impute}}, \code{\link[mitools]{MIcombine}}
#' @references Stupnik T., Pohar Perme M. (2015) "Analysing disease recurrence
#' with missing at risk information." Statistics in Medicine 35. p1130-43.
#' \url{https://onlinelibrary.wiley.com/doi/abs/10.1002/sim.6766}
#' @examples
#'
#' \dontrun{
#' library(missDeaths)
#' data(slopop)
#'
#' data(observed)
#' observed$time = observed$time*365.2425
#' D = md.D(age=observed$age*365.2425, sex=observed$sex, year=(observed$year - 1970)*365.2425)
#'
#' #fit a cox model (NOTE: estimated std error is slightly underestimated!)
#' md.survcox(observed, Surv(time, status) ~ age + sex + iq + elevation,
#' observed$maxtime*365.2425, D, slopop, iterations=4, R=1)
#'
#' #multiple imputations to correct the stimated std error
#' md.survcox(observed, Surv(time, status) ~ age + sex + iq + elevation,
#' observed$maxtime*365.2425, D, slopop, iterations=4, R=50)
#' }
#'
#' @export md.survcox
md.survcox <- function(data, f, maxtime, D, ratetable, iterations=4, R = 50)
{
D$year = 1970 + round((D$year - D$age) / 365.2425)
ff = deparse(f)
ff = gsub(" ", "", ff, fixed = TRUE)
md.init(ratetable)
if (R == 1)
{
newdata = data
newdata$time = SimCensorX(data, maxtime, ff, D, iterations)
cox = survival::coxph(f, data=newdata)
return (cox)
}
models = list()
length(models) = R
for (i in 1:R)
{
newdata = data
newdata$time = SimCensorX(data, maxtime, ff, D, iterations)
models[[i]] = survival::coxph(f, data=newdata)
}
return (MIcombine(models))
}
|
f6eba5ad248df7bd276482a7f3a9c2af06045e4e | efd2197ee62e65f5548c8b1fd94ae063af0f57e0 | /plot3.R | 6f88db6a85fd2dc74acb953e59176d92d812bfd7 | [] | no_license | jbewald/ExData_Plotting1 | 5011272dc06cd80e214bff5ff3ddc242826ec125 | f783f19ac281a882e9017b6d7ecc4f4ee8481ce7 | refs/heads/master | 2021-01-21T18:53:25.330992 | 2015-10-09T04:06:55 | 2015-10-09T04:06:55 | 43,824,783 | 0 | 0 | null | 2015-10-07T15:17:44 | 2015-10-07T15:17:43 | null | UTF-8 | R | false | false | 953 | r | plot3.R | setwd("c:/data")
# Read Data
data <- read.csv("household_power_consumption.txt", stringsAsFactors = FALSE, sep=";", na.strings="?")
# Create Date Time Column
DateTime <- as.POSIXct(paste(data$Date, data$Time), format="%d/%m/%Y %H:%M:%S")
# Append new DateTime Column
all <- cbind(data, DateTime)
# Conver Date
all$Date <- as.Date(all$Date,"%d/%m/%Y")
# Just get two days of data
twodays <- subset(all, Date >= '2007-02-01' & Date <= '2007-02-02')
# Plot 3
# Init Graph
with(twodays, plot(DateTime, Sub_metering_1, type = "n", ylab = "Energy Sub Metering", xlab =""))
# Plot 3 Lines for different Sub_Metering
points(twodays$DateTime, twodays$Sub_metering_1, type = "l", col = "black")
points(twodays$DateTime, twodays$Sub_metering_2, type = "l", col = "red")
points(twodays$DateTime, twodays$Sub_metering_3, type = "l", col = "blue")
setwd("C:/Users/Brad/Documents/R/Exploratory Analysis/Assignment1")
dev.copy(png, "plot3.png")
dev.off()
|
635cc4b661a0b3926a2b2e9d448ac738976488ec | df6c3a1e165ef996c53b3eeda3a531d65cb5e9a1 | /figure_2_through_4_script.R | ae13b1baa2d1a40b920febcfa181bcae671265b9 | [
"CC0-1.0"
] | permissive | tdlan80/uwin_ms_2 | 97e4ef44bfa3cba6e4a91396592f5e963850f63b | 8164972985a056f774552f182cbd54ef52376659 | refs/heads/master | 2023-04-09T11:24:15.582397 | 2021-04-23T20:13:32 | 2021-04-23T20:13:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,916 | r | figure_2_through_4_script.R | source("sourcer.R")
cpo <- read.csv("cpo_scores.csv",
stringsAsFactors = FALSE
)
cpo <- cpo[order(cpo$species),]
which_folder <- "best"
my_files <- list.files(paste0("./results/", which_folder),
pattern = "matrix",
full.names = TRUE)
sp_name <- gsub(".*/(\\w+)_matrix.csv", "\\1", my_files)
pretty_sp <- c('coyote', 'fox squirrel', 'gray squirrel',
'opossum', 'e. cottontail', 'raccoon',
'red fox', 'skunk')
# check if a sub-folder exists
f_exist <- dir.exists(paste0("./plots/", sp_name))
for(ex in 1:length(f_exist)){
if(!f_exist[ex]){
dir.create(paste0("./plots/", sp_name[ex]))
}
}
# plots for intercept
for(species in 1:length(sp_name)){
# as a function of housing density
# read in the mcmc matrix
results_matrix <- data.table::fread(my_files[species],
data.table = FALSE) %>%
as.matrix(.)
# the covariate for prediction
new_data <- data.frame(hden = 0,
prophab_btwn = 0,
hden_btwn = seq(200, 1400, 10))
# the city data
new_city_data <- data.frame(hden = 0,
prophab_btwn = 0,
hden_btwn = cdat$hden)
row.names(new_city_data) <- cdat$city
preds <- predict.intercept(mmat = results_matrix,
new_data = new_data,
city_data = cdat,
new_city_data = new_city_data,
species_there = det_events[,sp_name[species]],
model = cpo$best[species])
if(cpo$best[species] != 'habitat'){
makeplot.hdens(preds = preds, species = sp_name[species],x = new_data$hden_btwn,
cityx = cdat$hden, species_there = det_events[,sp_name[species]],
pp = FALSE)
}
# for habitat intercept
new_data <- data.frame(hden = 0,
prophab_btwn = seq(0.1, 0.7, 0.005),
hden_btwn = 0)
new_city_data <- data.frame(hden = 0,
prophab_btwn = cdat$habitat,
hden_btwn = 0)
row.names(new_city_data) <- cdat$city
preds <- predict.intercept(mmat = results_matrix,
new_data = new_data,
city_data = cdat,
new_city_data = new_city_data,
species_there = det_events[,sp_name[species]],
model = cpo$best[species])
if(cpo$best[species] != 'housing_density'){
makeplot.habitat(preds = preds, species = sp_name[species],x = new_data$prophab_btwn,
cityx = cdat$habitat, species_there = det_events[,sp_name[species]],
pp = FALSE)
}
}
# plots for slopes
for(species in 1:length(sp_name)){
# as a function of housing density
# read in the mcmc matrix
results_matrix <- data.table::fread(my_files[species],
data.table = FALSE) %>%
as.matrix(.)
# the covariate for prediction
new_data <- data.frame(hden = 0,
prophab_btwn = 0,
hden_btwn = seq(200, 1400, 10))
# the city data
new_city_data <- data.frame(hden = 0,
prophab_btwn = 0,
hden_btwn = cdat$hden)
row.names(new_city_data) <- cdat$city
preds <- predict.slope(mmat = results_matrix,
new_data = new_data,
city_data = cdat,
new_city_data = new_city_data,
species_there = det_events[,sp_name[species]],
model = cpo$best[species])
if(cpo$best[species] != 'habitat'){
makeplot.hdens(preds = preds, species = sp_name[species],x = new_data$hden_btwn,
cityx = cdat$hden, species_there = det_events[,sp_name[species]],
intercept = FALSE, window = FALSE, pp = FALSE)
}
}
for(species in 1:length(sp_name)){
# as a function of housing density
# read in the mcmc matrix
results_matrix <- data.table::fread(my_files[species],
data.table = FALSE) %>%
as.matrix(.)
# for habitat intercept
new_data <- data.frame(hden = 0,
prophab_btwn = seq(0.1, 0.7, 0.005),
hden_btwn = 0)
new_city_data <- data.frame(hden = 0,
prophab_btwn = cdat$habitat,
hden_btwn = 0)
row.names(new_city_data) <- cdat$city
preds <- predict.slope(mmat = results_matrix,
new_data = new_data,
city_data = cdat,
new_city_data = new_city_data,
species_there = det_events[,sp_name[species]],
model = cpo$best[species])
if(cpo$best[species] != 'housing_density'){
makeplot.habitat(preds = preds, species = sp_name[species],x = new_data$prophab_btwn,
cityx = cdat$habitat,
intercept = FALSE, window = FALSE, pp = FALSE)
}
}
#### plotting for supp mater A
#
library(vioplot)
supp_plot_data <- patch_covs
supp_plot_data$hd_1000 <- supp_plot_data$hd_1000 / 1000
windows(6,8)
tiff("./plots/supp_mater/hdenrange.tiff", height = 6, width = 8,
units = "in", res = 600, compression = "lzw")
par(mar = c(5,7,0.5,0.5), usr =c(0,10,0,10) )
plot(1~1, type ="n", bty = 'l', xlab = "", ylab = "", xaxt = "n",
yaxt = "n", ylim = c(0.368,10 * 0.965), xlim = c(0.368,10 * 0.965))
par("usr")
axis(2, at = seq(0.25, 9.25, 1), labels = F, tck = -0.025)
# get names in the correct order
mtext(text = cplot$pretty[order(cplot$hden)],
2,
line = 1.25,
at = seq(0.25,9.5,1),
las = 1,
cex = 0.9
)
axis(1, at = seq(0,10, 1), labels = F, tck = -0.025)
axis(1, at = seq(0,10, 1/2), labels = F, tck = -0.025/2)
mtext(text = sprintf("%.f",seq(0,10,1)),
1,
line = 0.75,
at = 0:10)
mtext(expression("Site-level housing density (1000 houses" * phantom('h') * km^-2 *")"),
1,
at = mean(0:10),
line = 3,
cex = 1.5
)
# plot them my median
to_plot <- cdat$city[order(cdat$hden)]
u <- par("usr")
for(i in 0:10){
lines(x= rep(i, 2),
y = c(u[1],u[2]),
col = scales::alpha("#424342", 0.5))
}
for(i in 1:10){
my_vioplot(
supp_plot_data$hd_1000[patch_covs$city== to_plot[i]],
at = i-1,
side = "right",
horizontal = TRUE,
add = TRUE,
wex = 2,
col = "#32DAC3"
)
}
dev.off()
|
b15cf564f5aa2de8b08c260fa55a11f2717729ff | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/robustbase/examples/NOxEmissions.Rd.R | c7f0cdd2e4bfef912af992f34212ef6aea1025e6 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 819 | r | NOxEmissions.Rd.R | library(robustbase)
### Name: NOxEmissions
### Title: NOx Air Pollution Data
### Aliases: NOxEmissions
### Keywords: datasets
### ** Examples
data(NOxEmissions)
plot(LNOx ~ LNOxEm, data = NOxEmissions, cex = 0.25, col = "gray30")
## Not run:
##D ## these take too much time --
##D ## p = 340 ==> already Least Squares is not fast
##D (lmNOx <- lm(LNOx ~ . ,data = NOxEmissions))
##D plot(lmNOx) #-> indication of 1 outlier
##D
##D M.NOx <- MASS::rlm(LNOx ~ . , data = NOxEmissions)
##D ## M-estimation works
##D ## whereas MM-estimation fails:
##D try(MM.NOx <- MASS::rlm(LNOx ~ . , data = NOxEmissions, method = "MM"))
##D ## namely because S-estimation fails:
##D try(lts.NOx <- ltsReg(LNOx ~ . , data = NOxEmissions))
##D try(lmR.NOx <- lmrob (LNOx ~ . , data = NOxEmissions))
## End(Not run)
|
7198fa2eed52ef9d2507026c189e36cb6d08b524 | 027572c07f286cb926750c53d2b21adfac56d7b8 | /inst/examples/updatesummary.R | c010d643de0d01328a87f976deaf198e5af11286 | [] | no_license | gregorkastner/stochvol | 1cf13d9a85f8663c9af2c7c4fe47353ed9c5722f | 5b57ce128d881f9404af91a37dadea49d6afd07d | refs/heads/master | 2023-04-16T04:17:12.993453 | 2023-04-04T20:23:27 | 2023-04-04T20:23:27 | 105,893,704 | 13 | 10 | null | 2023-04-04T20:19:18 | 2017-10-05T13:32:15 | R | UTF-8 | R | false | false | 503 | r | updatesummary.R | ## Here is a baby-example to illustrate the idea.
## Simulate an SV time series of length 51 with default parameters:
sim <- svsim(51)
## Draw from the posterior:
res <- svsample(sim$y, draws = 2000, priorphi = c(10, 1.5))
## Check out the results:
summary(res)
plot(res)
## Look at other quantiles and calculate ESS of latents:
newquants <- c(0.01, 0.05, 0.25, 0.5, 0.75, 0.95, 0.99)
res <- updatesummary(res, quantiles = newquants, esslatent = TRUE)
## See the difference?
summary(res)
plot(res)
|
87caeaf393f02848531e3f6aeb45930e90e3f5cf | 3dd5ebadd04f3104bc983a667ebb0b6e88485496 | /plot2.R | db4e721c14cb99086efb9116bf0f25db60fdb4d4 | [] | no_license | aaronghunter/ExData_Plotting1 | 8933d1b8bf29d42cef9b39e8aa3065bd404a9d0c | a84fd78cf10d5ebd0267a0de3ea736d19e490830 | refs/heads/master | 2021-01-12T12:58:52.106757 | 2016-10-01T23:17:55 | 2016-10-01T23:17:55 | 69,766,163 | 0 | 0 | null | 2016-10-01T21:59:59 | 2016-10-01T21:59:58 | null | UTF-8 | R | false | false | 473 | r | plot2.R | datafile <- "./Data/household_power_consumption.txt"
data <- read.table(datafile, header = TRUE, sep = ";", dec = ".")
subset <- data[data$Date %in% c("1/2/2007","2/2/2007"),]
dt <- strptime(paste(subset$Date, subset$Time, sept = " "), "%d/%m/%Y %H:%M:%S")
plot(dt, as.numeric(as.character(subset$Global_active_power)), type = "l", xlab = "", ylab = "Global Active Power (kilowatts)") #Plot Type: Line
dev.copy(png, file = "plot2.png", width = 480, height = 480)
dev.off() |
f6bd2b0878a3073b34ed3acfa0aa2525cd9bb84a | 30d3fde9fe6a1df1cddc0fb50d8f8ba89328ad47 | /scripts/wetness_tables.R | e3d839059f5030c9d3d8d8f3e42b98b96fdc0a17 | [] | no_license | jwbannister/sfwcRft | 7a03015dadc679b2a99eaa4566196b975965c4be | 5fd102f0b2927c1c878e736e53edb290a06d2792 | refs/heads/master | 2020-09-14T11:07:35.347486 | 2016-11-22T23:52:02 | 2016-11-22T23:52:02 | 67,552,867 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,936 | r | wetness_tables.R | rm(list=ls())
load_all()
load_all("~/code/owensData")
library(tidyverse)
library(ggplot2)
library(reshape2)
swir_tbl <- wet_record %>% filter(method=='swir') %>% select(-method) %>%
dcast(dca + trgtwet ~ period)
tran_melt <- tran_record %>% select(dca, trgtwet, period, wet)
tran_melt$period <- ordered(tran_melt$period,
levels=c('0515', '1115', '1215', '0116', '0316',
'0416', '0516'))
tran_tbl <- tran_melt %>% arrange(period) %>% dcast(dca + trgtwet ~ period)
tran_tbl <- rbind(tran_tbl, c("T13", 75, rep(NA, 7)))
tran_tbl <- rbind(tran_tbl, c("T29", 0, rep(NA, 3), 0, rep(NA, 3)))
tran_tbl <- arrange(tran_tbl, dca, trgtwet)
tran_tbl$trgtwet <- as.integer(tran_tbl$trgtwet)
swir_trans <- inner_join(melt(swir_tbl, id.vars=c("dca", "trgtwet"),
variable.name="period", value.name="swir"),
melt(tran_tbl, id.vars=c("dca", "trgtwet"),
variable.name="period", value.name="tran"),
by=c("dca", "trgtwet", "period"))
swir_trans$tran <- as.numeric(swir_trans$tran)
# write table of wetness for use in dustReport
wet_tmp <- wet_record %>% filter(method=="swir") %>%
select(period, dca, trgtwet, swir=wet)
tran_tmp <- tran_record %>%
select(period, dca, trgtwet, trans=wet)
full_wet <- select(wet_record, period, dca, trgtwet) %>%
left_join(wet_tmp, by=c("period", "dca", "trgtwet")) %>%
left_join(tran_tmp, by=c("period", "dca", "trgtwet"))
save(full_wet, file="~/code/sfwcRft/data/wet_table.RData")
composite_tbl <- cbind(swir_tbl[ , 1:7], tran_tbl[ , 6], tran_tbl[ , 6:7],
swir_tbl[ , 8:10])
names(composite_tbl)[8:9] <- c("0116", "0216")
write.csv(composite_tbl,
file="~/dropbox/owens/sfwcrft/code_output/composite_table.csv",
row.names=F)
ab_df <- data.frame(int=0, slp=1)
swir_trans_plot <- swir_trans[complete.cases(swir_trans), ] %>%
ggplot(aes(x=swir*100, y=tran*100)) +
geom_point(aes(color=dca)) +
scale_x_continuous(name="SWIR Estimated Wetness (%)",
breaks=seq(0, 100, 10)) +
scale_y_continuous(name="Ground Estimated Wetness (%)",
breaks=seq(0, 100, 10)) +
scale_color_brewer(name="DCA", palette="Set1") +
geom_abline(data=ab_df, mapping=aes(intercept=int, slope=slp,
linetype="1-1 Line"), color="blue") +
scale_linetype(name="")
png(filename="~/dropbox/owens/sfwcrft/code_output/swir_trans_plot.png",
height=6, width=6, units="in", res=300)
swir_trans_plot
dev.off()
swir_tmp <- swir_trans %>% mutate(var=paste0("swir.", period)) %>%
select(-period, -tran) %>%
dcast(dca + trgtwet ~ var, value.var='swir')
tran_tmp <- swir_trans %>% mutate(var=paste0("tran.", period)) %>%
select(-period, -swir) %>%
dcast(dca + trgtwet ~ var, value.var='tran')
swir_trans_tbl <- inner_join(swir_tmp, tran_tmp, by=c("dca", "trgtwet"))
write.csv(swir_trans_tbl,
file="~/dropbox/owens/sfwcrft/code_output/swir_trans_table.csv",
row.names=F)
names(swir_tbl) <- c("DCA", "Target Wetness", "Apr 16, 2015", "June 8, 2015",
"June 20, 2015", "Nov 30, 2015", "Dec 1, 2015",
"Apr 26, 2016", "May 27, 2016", "June 24, 2016")
swir_tbl$DCA[swir_tbl$DCA=="T10"] <- "T10-1b"
swir_tbl$DCA[swir_tbl$DCA=="T29"] <- "T29-2"
swir_tbl$DCA[swir_tbl$DCA=="T13"] <- "T13-1"
swir_tbl$'Target Wetness' <- paste0(swir_tbl$'Target Wetness', "%")
for (i in 3:10){
swir_tbl[ , i] <- swir_tbl[ , i] * 100
}
write.csv(swir_tbl,
file="~/dropbox/owens/sfwcrft/code_output/swir_table.csv",
row.names=F)
names(tran_tbl) <- c("DCA", "Target Wetness", "May 2015", "November 2015",
"December 2015", "January 2016", "March 2016",
"April 2016", "May 2016")
tran_tbl$DCA[tran_tbl$DCA=="T10"] <- "T10-1b"
tran_tbl$DCA[tran_tbl$DCA=="T29"] <- "T29-2"
tran_tbl$DCA[tran_tbl$DCA=="T13"] <- "T13-1"
tran_tbl$'Target Wetness' <- paste0(tran_tbl$'Target Wetness', "%")
for (i in 3:9){
tran_tbl[ , i] <- tran_tbl[ , i] * 100
}
write.csv(tran_tbl,
file="~/dropbox/owens/sfwcrft/code_output/transect_table.csv",
row.names=F)
library(gridExtra)
g_legend<-function(a.gplot){
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
legend
}
wet_plot_df <- composite_tbl
wet_plot_melt <- reshape2::melt(wet_plot_df, id.vars=c("dca", "trgtwet"))
wet_plot_melt$variable <- ordered(wet_plot_melt$variable,
levels=c("0415", "0515", "0615", "1115",
"1215", "0116", "0216", "0316",
"0416", "0516", "0616"))
wet_plot_melt[wet_plot_melt$dca=='T26' &
wet_plot_melt$variable=='0415', ]$value <- NA
wet_plot_melt$trgtwet <- as.numeric(gsub("%", "", wet_plot_melt$trgtwet))
wet_plot_melt$value <- as.numeric(gsub("%", "", wet_plot_melt$value))
wet_plot_melt1 <- wet_plot_melt
wet_plot_melt1[wet_plot_melt1$variable %in% c("0116", "0216", "0316"), ]$value <- NA
wet_plot_melt1[wet_plot_melt1$dca=='T26' & wet_plot_melt1$variable=='0515', ]$value <- NA
wet_plot_melt1[wet_plot_melt1$dca=='T29' & (wet_plot_melt1$variable %in% c('0415', '0515', '0615')), ]$value <- NA
wet_tracking <- vector(mode="list", length=4)
names(wet_tracking) <- c("T10-1b", "T26", "T13-1", "T29-2")
for (i in names(wet_tracking)){
dca_df <- wet_plot_melt1 %>% filter(dca==substr(i, 1, 3))
wet_tracking[[i]] <- vector(mode="list",
length=length(unique(dca_df$trgtwet)))
names(wet_tracking[[i]]) <- unique(dca_df$trgtwet)
for (j in unique(dca_df$trgtwet)){
plot_df <- dca_df %>% filter(trgtwet==j)
ref_lines <- data.frame(type=c("Target Wetness", "Average SWIR Wetness"),
intercept=c(j*0.01, mean(plot_df$value, na.rm=T)),
slope=c(0, 0), x=wet_plot_melt$variable[1])
avg_wet <- mean(plot_df$value)
wet_tracking[[i]][[as.character(j)]] <- plot_df %>%
ggplot(aes(x=variable, y=value)) +
geom_abline(mapping=aes(intercept=intercept, linetype=type, slope=slope),
data=ref_lines, color="red") +
geom_path(aes(group=dca, color="SWIR Image Result")) +
theme(axis.text.y=element_blank(),
# axis.text.x=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
axis.ticks.x=element_blank(),
axis.ticks.y=element_blank(),
plot.title=element_text(size=8),
legend.position="none",
legend.title=element_blank()) +
scale_color_manual(name="", values=c("blue")) +
scale_y_continuous(breaks=seq(0, 1, .1), limits=c(0, 1)) +
scale_x_discrete(labels=c("A", "M", "J", "N", "D", "J", "F", "M", "A",
"M", "J")) +
ggtitle(paste0(i, " ", j, "%"))
}
}
#lgnd <- g_legend(wet_tracking[[1]][[1]])
#print(grid.arrange(lgnd))
#leg.file <- tempfile()
#save(lgnd, file=leg.file)
png(filename="~/Desktop/swir_panel.png",
height=6, width=10, units="in", res=300)
grid.arrange(wet_tracking[[1]][[1]],
wet_tracking[[1]][[2]],
wet_tracking[[1]][[3]],
wet_tracking[[1]][[4]],
wet_tracking[[1]][[5]],
wet_tracking[[2]][[1]],
wet_tracking[[2]][[2]],
wet_tracking[[2]][[3]],
wet_tracking[[2]][[4]],
wet_tracking[[2]][[5]],
wet_tracking[[3]][[1]],
wet_tracking[[3]][[2]],
wet_tracking[[4]][[1]],
wet_tracking[[4]][[2]],
lgnd, nrow=3)
dev.off()
|
ddbd0bc70109664ce938bd1507298aa71ead455c | 631946b597e3f80584e146e10510c2d468f52d59 | /analysis_notes/PartialAUC.r | b220a4eafe21a751f07c54dbd14f91f95088c2df | [] | no_license | nyukat/MRI_AI | ebea6b181ac481b42337dcbd4c0c06c9f74b65f1 | fe792d37dfbb87135e13914e72cdd96fdf0b8309 | refs/heads/master | 2023-04-10T20:58:55.962236 | 2022-08-13T17:34:23 | 2022-08-13T17:34:23 | 421,948,410 | 9 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,127 | r | PartialAUC.r | # Samples on how to run partial AUC calculations
# install.packages("pROC")
library(pROC)
data(aSAH)
# plot partial AUC ROC for 90-100% sensitivity
roc(aSAH$outcome, aSAH$s100b, percent=TRUE, partial.auc=c(100,90), partial.auc.correct=TRUE, partial.auc.focus="sens", plot=TRUE, auc.polygon=TRUE)
# plot partial AUC ROC for 90-100% specificity
roc(aSAH$outcome, aSAH$s100b, percent=TRUE, partial.auc=c(100,90), partial.auc.correct=TRUE, partial.auc.focus="spec", plot=TRUE, auc.polygon=TRUE)
# with CI bootstrapped
roc(aSAH$outcome, aSAH$s100b, percent=TRUE, partial.auc=c(100,90), partial.auc.correct=TRUE, partial.auc.focus="spec", plot=TRUE, auc.polygon=TRUE, ci=TRUE, boot.n=2000)
####
roc(X2021_04_06_for_auc$malignant, X2021_04_06_for_auc$probability, plot=TRUE, print.auc=TRUE)
roc(X2021_04_06_for_auc$malignant, X2021_04_06_for_auc$probability, percent=TRUE, partial.auc=c(100,80), partial.auc.correct=TRUE, partial.auc.focus="sens", plot=TRUE, print.auc=TRUE, auc.polygon=TRUE, ci=TRUE, show.thres=TRUE, print.auc.x=60)
# more details
# https://github.com/xrobin/pROC
# https://web.expasy.org/pROC/screenshots.html |
f73ef935245662a55f65cdf3735b71319bdb1218 | c5fb23242d4049e3cd556078d1a7bde163ba69f1 | /man/coeffCompare.Rd | 473f63afa14231038459cc9b4505d9c18d76f852 | [] | no_license | guiblanchet/vegan | 4eac5c5ca19ed1683d51646513121dee41847e13 | 6c27b62a2b74be3dfb3210d38060ac2282a03ff0 | refs/heads/master | 2021-01-21T02:46:11.718799 | 2015-06-24T16:08:45 | 2015-06-24T16:08:45 | 23,608,303 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,870 | rd | coeffCompare.Rd | \name{coeffCompare}
\alias{coeffCompare}
\title{Compare dissimilarities used within the RDA framework}
\description{
This function compares association coefficients used through the RDA
framework with a minimum spanning tree. It was designed to compare how
information explained by one dissimilarity coefficient diverge from
the information explained by another. The comparison is made
simultaneously on the sites scores, the species scores and the
canonical coefficients.
}
\usage{
coeffCompare(ordires, ordisigniaxis, pval = 0.05)
}
\arguments{
\item{ordires}{
A list of \code{\link[vegan]{rda}} or \code{\link[vegan]{capscale}}
result object that includes a series of RDA or distance-based RDA
(db-RDA) performed with different association coefficients on the same
data.
}
\item{ordisigniaxis}{
A list of \code{\link[vegan]{anova.cca}} object where each axis of each
RDA (or db-RDA) given in \code{ordires} was tested for significance.
This argument can also be a vector defining the number of significant
axes in each RDA. See details.
}
\item{pval}{
Numeric. P-value threshold to select the number of axes to use. This
argument is only active if a list of \code{\link[vegan]{anova.cca}}
object is given for the argument \code{ordisigniaxis}, otherwise it is
not considered. Default is 0.05.
}
}
\details{
For the argument \code{ordisigniaxis}, if a vector defining the number
of significant axes is given, it is assumed that the significant axes
are selected in sequential order from the first axis.
The comparison made here rely on the RV coefficient \code{\link{RV}},
a multivariate generalization of the squared Pearson's correlation where
matrix with the same number of rows are compared.
\code{coeffCompare} should be used prior to using
\code{\link{consensusRDA}} because it informs the user about the
different association coefficients considered interesting to perform a
consensus RDA. An association coefficient presenting results too
different from the others should not be included in the consensus RDA,
it should be considered apart or discarded.
}
\value{
\item{RVmat}{A resemblance matrix of RV coefficients calculated from the
sites scores matrices of RDA for all pairs of association coefficients.}
\item{mst}{minimum spanning tree calculated on (1-siteRVmat).}
}
\author{
F. Guillaume Blanchet
}
\seealso{
\code{\link{RV}}, \code{\link{consensusRDA}}
}
\examples{
###################################################################
### This example reproduces Figure 7b of Blanchet et al. (in press)
###################################################################
data(beetle)
data(beetle.expl)
### Construct results object
ndis<-10
ordiRes<-vector("list",length=ndis)
#---------------------------------------------
### Perform the various constrained ordination
#---------------------------------------------
### RDA species profile
sp<-beetle/apply(beetle,1,sum)
ordiRes[[1]]<-rda(sp~.,data=beetle.expl)
### RDA chord
chord<-beetle/sqrt(apply(beetle^2,1,sum))
ordiRes[[2]]<-rda(chord~.,data=beetle.expl)
### RDA Hellinger
hell<-decostand(beetle,method="hellinger")
ordiRes[[3]]<-rda(hell~.,data=beetle.expl)
### RDA chi2
chisq<-decostand(beetle,method="chi.square")
ordiRes[[4]]<-rda(chisq~.,data=beetle.expl)
### db-RDA Bray-Curtis
bray<-sqrt(vegdist(beetle,method="bray"))
ordiRes[[5]]<-capscale(bray~.,data=beetle.expl,comm=beetle)
### db-RDA square-root Bray-Curtis
bray.sqrt<-sqrt(vegdist(beetle^0.5,method="bray"))
ordiRes[[6]]<-capscale(bray.sqrt~.,data=beetle.expl,comm=beetle^0.5)
### db-RDA fourth-root Bray-Curtis
bray.fort<-sqrt(vegdist(beetle^0.25,method="bray"))
ordiRes[[7]]<-capscale(bray.fort~.,data=beetle.expl,comm=beetle^0.25)
### db-RDA modified Gower log 2
beetleLog2<-decostand(beetle, "log",logbase=2)
mGowerLog2<-vegdist(beetleLog2, "altGower")
ordiRes[[8]]<-capscale(mGowerLog2~.,data=beetle.expl,comm=beetleLog2)
### db-RDA modified Gower log 5
beetleLog5<-decostand(beetle, "log",logbase=5)
mGowerLog5<-vegdist(beetleLog5, "altGower")
ordiRes[[9]]<-capscale(mGowerLog5~.,data=beetle.expl,comm=beetleLog5)
### db-RDA modified Gower log 10
beetleLog10<-decostand(beetle, "log",logbase=10)
mGowerLog10<-vegdist(beetleLog10, "altGower")
ordiRes[[10]]<-capscale(mGowerLog10~.,data=beetle.expl,comm=beetleLog10)
### Compare association coefficients
AssoComp<-coeffCompare(ordiRes,rep(7,ndis))
#---------------------------------------------
### Draw a graphic to visualize the comparison
#---------------------------------------------
### Name of association coefficient compared
name<-c("Species profiles","Chord","Hellinger","Chi2","Bray-Curtis",
"(Bray-Curtis)^0.5","(Bray-Curtis)^0.25",
"mGowerlog2","mGowerlog5","mGowerlog10")
plot(AssoComp$mst,type="t",labels=name,xlab="",ylab="",
main="MST Sites scores")
}
\keyword{ multivariate }
|
b81f6255b24859db30519c0a45de00967665412b | 86cea3aab2a52d745af6e15ba361ca0c5a1922d9 | /man/part_kmeans.Rd | 23cca7dc0dfd6fb2e745f5d3bff0d004681b34ee | [
"MIT"
] | permissive | USCbiostats/partition | 27015e9d3d562de8aafb3699e7acf2ec87c88bd7 | a25cf12c9cb091f1f03ff1d7af062ec278faeb8e | refs/heads/master | 2023-01-09T03:57:00.328743 | 2022-12-23T19:38:29 | 2022-12-23T19:38:29 | 178,615,892 | 35 | 6 | NOASSERTION | 2022-12-23T19:38:30 | 2019-03-30T22:02:50 | HTML | UTF-8 | R | false | true | 2,452 | rd | part_kmeans.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/partitioners.R
\name{part_kmeans}
\alias{part_kmeans}
\title{Partitioner: K-means, ICC, scaled means}
\usage{
part_kmeans(
algorithm = c("armadillo", "Hartigan-Wong", "Lloyd", "Forgy", "MacQueen"),
search = c("binary", "linear"),
init_k = NULL,
n_hits = 4
)
}
\arguments{
\item{algorithm}{The K-Means algorithm to use. The default is a fast version
of the LLoyd algorithm written in armadillo. The rest are options in
\code{\link[=kmeans]{kmeans()}}. In general, armadillo is fastest, but the other algorithms can
be faster in high dimensions.}
\item{search}{The search method. Binary search is generally more efficient
but linear search can be faster in very low dimensions.}
\item{init_k}{The initial k to test. If \code{NULL}, then the initial k is the
threshold times the number of variables.}
\item{n_hits}{In linear search method, the number of iterations that should
be under the threshold before reducing; useful for preventing false
positives.}
}
\value{
a \code{partitioner}
}
\description{
Partitioners are functions that tell the partition algorithm 1)
what to try to reduce 2) how to measure how much information is lost from
the reduction and 3) how to reduce the data. In partition, functions that
handle 1) are called directors, functions that handle 2) are called
metrics, and functions that handle 3) are called reducers. partition has a
number of pre-specified partitioners for agglomerative data reduction.
Custom partitioners can be created with \code{\link[=as_partitioner]{as_partitioner()}}.
Pass \code{partitioner} objects to the \code{partitioner} argument of \code{\link[=partition]{partition()}}.
\code{part_kmeans()} uses the following direct-measure-reduce approach:
\itemize{
\item \strong{direct}: \code{direct_k_cluster()}, K-Means Clusters
\item \strong{measure}: \code{measure_min_icc()}, Minimum Intraclass Correlation
\item \strong{reduce}: \code{reduce_kmeans()}, Scaled Row Means
}
}
\examples{
set.seed(123)
df <- simulate_block_data(c(3, 4, 5), lower_corr = .4, upper_corr = .6, n = 100)
# fit partition using part_kmeans()
partition(df, threshold = .6, partitioner = part_kmeans())
}
\seealso{
Other partitioners:
\code{\link{as_partitioner}()},
\code{\link{part_icc}()},
\code{\link{part_minr2}()},
\code{\link{part_pc1}()},
\code{\link{part_stdmi}()},
\code{\link{replace_partitioner}()}
}
\concept{partitioners}
|
fb446021ad6a85f23049ca4bd2d422a2b8afc41c | 339867edfaf54148f797beecd1420182b7c66520 | /src/Old_code_cgv/Cov_matrix_meanSE_continuous.R | 06fc52260557cbffd2b3d18ca2051e48e2edcec6 | [] | no_license | RCN-ECS/CnGV | d82946dc3969c101a02bcf09db32fa533a008d85 | 4b362f891770bb6286db5195f58d418c353f2f79 | refs/heads/master | 2023-03-19T15:04:58.192221 | 2021-12-15T16:49:48 | 2021-12-15T16:49:48 | 189,079,692 | 1 | 6 | null | null | null | null | UTF-8 | R | false | false | 9,475 | r | Cov_matrix_meanSE_continuous.R | Cov_matrix_meanSE_continuous <- function(genphenenv_df){
# Estimate slopes
newdat = data.frame()
newdat = meansSE_boot_cont(genphenenv_df, 20)
# Data generation via multiple Regression
phen1 = c(intercept_G1 + slope_G1[d] * env)
plot_dat = (unique(newdat$intercept)) + # Intercept
((newdat[1,2])*(unique(genphenenv_df$exp_env_cat))) + #G1 estimates
((newdat[2,2])*(unique(genphenenv_df$exp_env_cat))) #G2 estimates
for(i in 1:nrow(genphenenv_df)){
n = genphenenv_df$phen_n[i]
u = genphenenv_df$phen_data[i]
SE = genphenenv_df$phen_mean_SE[i]
dat = rnorm(n,u,SE)
newdat_temp = data.frame(gen_factor = rep(genphenenv_df$gen_factor[i],n),
nat_env_mean = rep(genphenenv_df$nat_env_mean[i],n),
Native_env_cat = rep(genphenenv_df$Native_env_cat[i],n),
nat_env_factor = rep(genphenenv_df$nat_env_factor[i],n),
exp_env_cont = rep(genphenenv_df$exp_env_cont[i],n),
exp_env_cat = rep(genphenenv_df$exp_env_cat[i],n),
exp_env_factor = rep(genphenenv_df$exp_env_factor[i],n),
phen_n = rep(n,n),
phen_data = dat)
newdat = rbind(newdat_temp,newdat)
}
# Standardize data
phen_mean = mean(newdat$phen_data)
phen_sd = sd(newdat$phen_data)
nat_env_mean = mean(newdat$nat_env_mean)
nat_env_sd = sd(newdat$nat_env_mean)
env_avg = mean(newdat$exp_env_cont)
env_std = sd(newdat$exp_env_cont)
newdat$phen_corrected = ((newdat$phen_data-phen_mean)/phen_sd)
newdat$env_corrected = ((newdat$exp_env_cont-env_avg)/env_std)
newdat$native_env_corrected = ((newdat$nat_env_mean-env_avg)/env_std)
# Model Comparison
test_temp_a = lm(phen_corrected ~ env_corrected + gen_factor, data = newdat)
test_temp_b = lm(phen_corrected ~ env_corrected * gen_factor, data = newdat)
result = anova(test_temp_a,test_temp_b)
# Model Outputs
if(result[[2,6]] > 0.05){
test_temp = lm(phen_corrected ~ env_corrected + gen_factor, data = newdat)
# Model diagnostics
res = residuals(test_temp)
shap_wilkes = shapiro.test(res)
is.normal <- NULL
if(shap_wilkes[[2]] > 0.05){is.normal = "Yes"}else{is.normal = "No"}
if(length(unique(genphenenv_df$env_corrected))>2){ # Wont work if only 2 environments
test_nonlinear = lm(phen_corrected ~ (exp_env_factor^2) + gen_factor, data = genphenenv_df) #Polynomial
lin_test = lrtest(test_temp,test_nonlinear) # If p > 0.05, then data likely non-linear
is.linear <- NULL
if(lin_test[[2,5]] > 0.05){is.linear = "No"}else{is.linear = "Yes"}
}else{is.linear <- "NA"}
# Extract model outputs
lm_result = "No_GxE"
GxE_pval = result[[2,6]]
emm_E = emmeans(test_temp, ~ env_corrected)
emm_G = emmeans(test_temp, ~ gen_factor)
emm_GxE = NA
E_R2 = summary(aov(test_temp))[[1]][1,2]/sum(summary(aov(test_temp))[[1]][,2])
G_R2 = summary(aov(test_temp))[[1]][2,2]/sum(summary(aov(test_temp))[[1]][,2])
GxE_R2 = NA
w2_env <- (summary(aov(test_temp))[[1]][1,2]-summary(aov(test_temp))[[1]][1,1]*summary(aov(test_temp))[[1]][3,3])/
(sum(summary(aov(test_temp))[[1]][,2])+summary(aov(test_temp))[[1]][3,3])
w2_gen <- (summary(aov(test_temp))[[1]][2,2]-summary(aov(test_temp))[[1]][2,1]*summary(aov(test_temp))[[1]][3,3])/
(sum(summary(aov(test_temp))[[1]][,2])+summary(aov(test_temp))[[1]][3,3])
w2_GxE <- NA
mod_dat = data.frame()
for(i in 1:length(test_temp[[1]])){
id = names(test_temp[[1]][i])
coef = test_temp[[1]][[i]]
lwr_CI = confint(test_temp)[i,1]
upr_CI = confint(test_temp)[i,2]
pval = summary(test_temp)[[4]][i,4]
mod_dat. = data.frame("id" = id,
"coef" = coef,
"lwr_CI" = lwr_CI,
"upr_CI" = upr_CI,
"pval" = pval)
mod_dat = rbind(mod_dat,mod_dat.)
}
# Generate Matrices for Covariance and Permutations
cov_temp <- data.frame(gen_factor = rep(genphenenv_df$gen_factor,
each = length(seq(from = min(genphenenv_df$env_corrected),
to = max(genphenenv_df$env_corrected), by = 0.1))),
env_corrected = seq(from = min(genphenenv_df$env_corrected),
to = max(genphenenv_df$env_corrected), by = 0.1))
cov_temp$phen_predicted = predict(test_temp,cov_temp)
}else{
test_temp = lm(phen_corrected ~ env_corrected * gen_factor, data = newdat)
# Model diagnostics
res = residuals(test_temp)
shap_wilkes = shapiro.test(res)
is.normal <- NULL
if(shap_wilkes[[2]] > 0.05){is.normal = "Yes"}else{is.normal = "No"}
if(length(unique(genphenenv_df$env_corrected))>2){ # Wont work if only 2 environments
test_nonlinear = lm(phen_corrected ~ (exp_env_factor^2) + gen_factor, data = genphenenv_df) #Polynomial
lin_test = lrtest(test_temp,test_nonlinear) # If p > 0.05, then data likely non-linear
is.linear <- NULL
if(lin_test[[2,5]] > 0.05){is.linear = "No"}else{is.linear = "Yes"}
}else{is.linear <- "NA"}
# Extract model outputs
lm_result = "Yes_GxE"
GxE_pval = result[[2,6]]
emm_E = emmeans(test_temp, ~ env_corrected)
emm_G = emmeans(test_temp, ~ gen_factor)
emm_GxE = emmeans(test_temp, ~ env_corrected*gen_factor)
E_R2 = summary(aov(test_temp))[[1]][1,2]/sum(summary(aov(test_temp))[[1]][,2])
G_R2 = summary(aov(test_temp))[[1]][2,2]/sum(summary(aov(test_temp))[[1]][,2])
GxE_R2 = summary(aov(test_temp))[[1]][3,2]/sum(summary(aov(test_temp))[[1]][,2])
w2_env <- (summary(aov(test_temp))[[1]][1,2]-summary(aov(test_temp))[[1]][1,1]*summary(aov(test_temp))[[1]][4,3])/
(sum(summary(aov(test_temp))[[1]][,2])+summary(aov(test_temp))[[1]][4,3])
w2_gen <- (summary(aov(test_temp))[[1]][2,2]-summary(aov(test_temp))[[1]][2,1]*summary(aov(test_temp))[[1]][4,3])/
(sum(summary(aov(test_temp))[[1]][,2])+summary(aov(test_temp))[[1]][4,3])
w2_GxE <- (summary(aov(test_temp))[[1]][3,2]-summary(aov(test_temp))[[1]][3,1]*summary(aov(test_temp))[[1]][4,3])/
(sum(summary(aov(test_temp))[[1]][,2])+summary(aov(test_temp))[[1]][4,3])
mod_dat = data.frame()
for(i in 1:length(test_temp[[1]])){
id = names(test_temp[[1]][i])
coef = test_temp[[1]][[i]]
lwr_CI = confint(test_temp)[i,1]
upr_CI = confint(test_temp)[i,2]
pval = summary(test_temp)[[4]][i,4]
mod_dat. = data.frame("id" = id,
"coef" = coef,
"lwr_CI" = lwr_CI,
"upr_CI" = upr_CI,
"pval" = pval)
mod_dat = rbind(mod_dat,mod_dat.)
}
# Generate Matrices for Covariance and Permutations
cov_temp <- data.frame(gen_factor = rep(newdat$gen_factor,
each = length(seq(from = min(newdat$env_corrected),
to = max(newdat$env_corrected), by = 0.1))),
env_corrected = seq(from = min(newdat$env_corrected),
to = max(newdat$env_corrected), by = 0.1))
cov_temp$phen_predicted = predict(test_temp,cov_temp)
}
# Re-assign environmental variables to model predicted data
cov_temp$native_env_corrected <- newdat$native_env_corrected[match(cov_temp$gen_factor,newdat$gen_factor)]
cov_temp$env_corrected <- round(cov_temp$env_corrected, digits = 1)
cov_temp$native_env_corrected <- round(newdat$native_env_corrected[match(cov_temp$gen_factor,newdat$gen_factor)],digits =1)
# Covariance Emeans and Gmeans
Cov_matrix = data.frame()
# G_means
E_hat = round(mean(cov_temp$env_corrected),digits = 1)
G_means = filter(cov_temp, env_corrected == E_hat)
# E_means
E_mean_temp = data.frame()
for(m in 1:length(unique(cov_temp$native_env_corrected))){
E = unique(cov_temp$native_env_corrected)[m]
E_temp = cov_temp[which(cov_temp$env_corrected == E),]
E_mean = mean(unique(E_temp$phen_predicted))
E_mean_temp1 = data.frame("E_mean" = E_mean,
"env" = unique(E_temp$env))
E_mean_temp = rbind(E_mean_temp, E_mean_temp1)
}
Cov_matrix = data.frame("gen" = unique(cov_temp$gen),
"native_env" = as.factor(E_mean_temp[,2]),
"G_means" = unique(G_means$phen_predicted),
"E_means" = E_mean_temp[,1])
cov_est = cov(Cov_matrix$G_means,Cov_matrix$E_means)
model_specs = data.frame("Covariance_est" = cov_est,
"lm_result" = lm_result,
"GxE_pval" = GxE_pval,
"is.normal" = is.normal,
"is.linear" = is.linear,
"eta_G" = G_R2,
"eta_E" = E_R2,
"eta_GxE" = GxE_R2,
"w2_env" = w2_env,
"w2_gen" = w2_gen,
"w2_GxE" = w2_GxE)
return(list(newdat,mod_dat,Cov_matrix,model_specs))
}
|
fb05cf11a6d9239ddff3f3f9c47bde50fa986553 | 49ff0bc7c07087584b907d08e68d398e7293d910 | /mbg/mbg_core_code/mbg_central/LBDCore/man/get_fhs_population.Rd | 7612aeea9b8fb01ac3d27116f43302a7ec22a8ad | [] | no_license | The-Oxford-GBD-group/typhi_paratyphi_modelling_code | db7963836c9ce9cec3ca8da3a4645c4203bf1352 | 4219ee6b1fb122c9706078e03dd1831f24bdaa04 | refs/heads/master | 2023-07-30T07:05:28.802523 | 2021-09-27T12:11:17 | 2021-09-27T12:11:17 | 297,317,048 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,250 | rd | get_fhs_population.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_fhs_population.R
\name{get_fhs_population}
\alias{get_fhs_population}
\title{Get pre-saved FHS population outputs}
\usage{
get_fhs_population(population_version = "20190403_test_new_cluster_1000d_rerun_fix_draw_squeezed_agg_ordered",
pop_measure = "a0004t", sex_ids = 3, scenarios = 0,
year_ids = NULL, gbd_regions = NULL)
}
\arguments{
\item{population_version}{A version tag found from looking into: \code{/share/scratch/users/sadatnfs/geospatial_shared/population_fbd}.
Default: \code{"20190403_test_new_cluster_1000d_rerun_fix_draw_squeezed_agg_ordered"}}
\item{pop_measure}{WorldPop measure; only one allowed for now! Default: a0004t}
\item{gbd_regions}{Regions to query (GBD Location IDs). Default: NULL (get all regions)}
\item{sex_id}{Sex ID. Default: 3}
\item{scenario}{FHS scenario. Default: 0 (reference).}
\item{year_ids.}{Year_id to query. Default: NULL (get all years)}
}
\value{
A data.table with location_id, year_id, age_group_id = pop_measure, sex_id, run_id, population
}
\description{
Access pre-saved RDS population outputs from FHS (saved out by Nafis Sadat in \code{/share/scratch/users/sadatnfs/geospatial_shared/population_fbd})
}
|
d17bab6b852cdb34dffc4306fa51f4a68bdb571c | 772ab82a2665a8119ada68dddd8601fdcc08fbbb | /man/get.measures.Rd | 382d634d2bd4a4856d9104ef720adcb7df74dcec | [] | no_license | cran/boral | 67457f92ec2eb75dad0d58856b84631b821fceb1 | 8e1b8653ce162961b8fd744c4fb7dbdee3b600ec | refs/heads/master | 2021-07-15T04:16:20.039416 | 2021-03-12T08:50:02 | 2021-03-12T08:50:02 | 23,317,043 | 1 | 4 | null | null | null | null | UTF-8 | R | false | false | 9,951 | rd | get.measures.Rd | \name{get.measures}
\alias{get.measures}
\docType{package}
\title{Information Criteria for models}
\description{
\Sexpr[results=rd, stage=render]{lifecycle::badge("defunct")}
Calculates some information criteria for a fitted model, which could be used for model selection. WARNING: As of version 1.6, this function is no longer maintained (and probably doesn't work properly, if at all)!}
\usage{
get.measures(y, X = NULL, family, trial.size = 1, row.eff = "none",
row.ids = NULL, offset = NULL, num.lv, fit.mcmc)
}
\arguments{
\item{y}{The response matrix that the model was fitted to.}
\item{X}{The covariate matrix used in the model. Defaults to \code{NULL}, in which case it is assumed no model matrix was used.}
\item{family}{Either a single element, or a vector of length equal to the number of columns in the response matrix. The former assumes all columns of the response matrix come from this distribution. The latter option allows for different distributions for each column of the response matrix. Elements can be one of "binomial" (with probit link), "poisson" (with log link), "negative.binomial" (with log link), "normal" (with identity link), "lnormal" for lognormal (with log link), "tweedie" (with log link), "exponential" (with log link), "gamma" (with log link), "beta" (with logit link), "ordinal" (cumulative probit regression), "ztpoisson" (zero truncated Poisson with log link), "ztnegative.binomial" (zero truncated negative binomial with log link).
Please see \code{\link{about.distributions}} for information on distributions available in boral overall.
}
\item{trial.size}{Either equal to a single element, or a vector of length equal to the number of columns in y. If a single element, then all columns assumed to be binomially distributed will have trial size set to this. If a vector, different trial sizes are allowed in each column of y. The argument is ignored for all columns not assumed to be binomially distributed. Defaults to 1, i.e. Bernoulli distribution.}
\item{row.eff}{Single element indicating whether row effects are included as fixed effects ("fixed"), random effects ("random") or not included ("none") in the fitted model. If fixed effects, then for parameter identifiability the first row effect is set to zero, which analogous to acting as a reference level when dummy variables are used. If random effects, they are drawn from a normal distribution with mean zero and estimated standard deviation. Defaults to "none". }
\item{row.ids}{A matrix with the number of rows equal to the number of rows in the response matrix, and the number of columns equal to the number of row effects to be included in the model. Element \eqn{(i,j)} indicates the cluster ID of row \eqn{i} in the response matrix for random effect eqn{j}; please see \code{\link{boral}} for details. Defaults to \code{NULL}, so that if \code{row.eff = "none"} then the argument is ignored, otherwise if \cr \code{row.eff = "fixed"} or \code{"random"}, \cr then \code{row.ids = matrix(1:nrow(y), ncol = 1)} i.e., a single, row effect unique to each row.}
\item{offset}{A matrix with the same dimensions as the response matrix, specifying an a-priori known component to be included in the linear predictor during fitting. Defaults to \code{NULL}.}
\item{num.lv}{The number of latent variables used in the model.}
\item{fit.mcmc}{All MCMC samples for the fitted model. These can be extracted by fitting a model using \code{\link{boral}} with \code{save.model = TRUE}, and then applying \code{get.mcmcsamples(fit)}.}
}
\details{
The following information criteria are currently calculated, when permitted: 1) Widely Applicable Information Criterion (WAIC, Watanabe, 2010) based on the conditional log-likelihood; 2) expected AIC (EAIC, Carlin and Louis, 2011); 3) expected BIC (EBIC, Carlin and Louis, 2011); 4) AIC (using the marginal likelihood) evaluated at the posterior median; 5) BIC (using the marginal likelihood) evaluated at the posterior median.
1) WAIC has been argued to be more natural and extension of AIC to the Bayesian and hierarchical modeling context (Gelman et al., 2013), and is based on the conditional log-likelihood calculated at each of the MCMC samples.
2 & 3) EAIC and EBIC were suggested by (Carlin and Louis, 2011). Both criteria are of the form -2*mean(conditional log-likelihood) + penalty*(no. of parameters in the model), where the mean is averaged all the MCMC samples. EAIC applies a penalty of 2, while EBIC applies a penalty of \eqn{log(n)}.
4 & 5) AIC and BIC take the form -2*(marginal log-likelihood) + penalty*(no. of parameters in the model), where the log-likelihood is evaluated at the posterior median. If the parameter-wise posterior distributions are unimodal and approximately symmetric, these will produce similar results to an AIC and BIC where the log-likelihood is evaluated at the posterior mode. EAIC applies a penalty of 2, while EBIC applies a penalty of \eqn{log(n)}.
Intuitively, comparing models with and without latent variables (using information criteria such as those returned) amounts to testing whether the columns of the response matrix are correlated. With multivariate abundance data for example, where the response matrix comprises of \eqn{n} sites and \eqn{p} species, comparing models with and without latent variables tests whether there is any evidence of correlation between species.
Please note that criteria 4 and 5 are not calculated all the time. In models where traits are included in the model (such that the regression coefficients \eqn{\beta_{0j}, \bm{\beta}_j} are random effects), or more than two columns are ordinal responses (such that the intercepts \eqn{\beta_{0j}} for these columns are random effects), then criteria 4 and 5 are will not calculated. This is because the calculation of the marginal log-likelihood in such cases currently fail to marginalize over such random effects; please see the details in \code{calc.logLik.lv0} and \code{calc.marglogLik}.
}
\value{
A list with the following components:
\item{waic}{WAIC based on the conditional log-likelihood.}
\item{eaic}{EAIC based on the mean of the conditional log-likelihood.}
\item{ebic}{EBIC based on the mean of the conditional log-likelihood.}
\item{all.cond.logLik}{The conditional log-likelihood evaluated at all MCMC samples. This is done via repeated application of \code{\link{calc.condlogLik}}.}
\item{cond.num.params}{Number of estimated parameters used in the fitted model, when all parameters are treated as "fixed" effects.}
\item{do.marglik.ics}{A boolean indicating whether marginal log-likelihood based information criteria are calculated.}
If \code{do.marglik.ics = TRUE}, then we also have:
\item{median.logLik}{The marginal log-likelihood evaluated at the posterior median.}
\item{marg.num.params}{Number of estimated parameters used in the fitted model, when all parameters are treated as "fixed" effects.}
\item{aic.median}{AIC (using the marginal log-likelihood) evaluated at the posterior median.}
\item{bic.median}{BIC (using the marginal log-likelihood) evaluated at the posterior median.}
}
\section{Warning}{
As of version 1.6, this function is no longer maintained (and probably doesn't work properly, if at all)!
Using information criterion for variable selection should be done with extreme caution, for two reasons: 1) The implementation of these criteria are both \emph{heuristic} and experimental. 2) Deciding what model to fit for ordination purposes should be driven by the science. For example, it may be the case that a criterion suggests a model with 3 or 4 latent variables. However, if we interested in visualizing the data for ordination purposes, then models with 1 or 2 latent variables are far more appropriate. As an another example, whether or not we include row effects when ordinating multivariate abundance data depends on if we are interested in differences between sites in terms of relative species abundance (\code{row.eff = FALSE}) or in terms of species composition (\code{row.eff = "fixed"}).
Also, the use of information criterion in the presence of variable selection using SSVS is questionable.
}
\references{
\itemize{
\item Carlin, B. P., and Louis, T. A. (2011). Bayesian methods for data analysis. CRC Press.
\item Gelman et al. (2013). Understanding predictive information criteria for Bayesian models. Statistics and Computing, 1-20.
\item Watanabe, S. (2010). Asymptotic equivalence of Bayes cross validation and widely applicable information criterion in singular learning theory. The Journal of Machine Learning Research, 11, 3571-3594.
}
}
\author{
\packageAuthor{boral}
Maintainer: \packageMaintainer{boral}
}
\note{
When a model is fitted using \code{\link{boral}} with \code{calc.ics = TRUE}, then this function is applied and the information criteria are returned as part of the model output.
}
\seealso{
\code{\link{get.dic}} for calculating the Deviance Information Criterion (DIC) based on the conditional log-likelihood; \code{\link{get.more.measures}} for even more information criteria.}
\examples{
\dontrun{
## NOTE: The values below MUST NOT be used in a real application;
## they are only used here to make the examples run quick!!!
example_mcmc_control <- list(n.burnin = 10, n.iteration = 100,
n.thin = 1)
testpath <- file.path(tempdir(), "jagsboralmodel.txt")
library(mvabund) ## Load a dataset from the mvabund package
data(spider)
y <- spider$abun
n <- nrow(y)
p <- ncol(y)
spiderfit_pois <- boral(y, family = "poisson",
lv.control = list(num.lv = 2), row.eff = "random",
mcmc.control = example_mcmc_control)
spiderfit_pois$ics ## Returns information criteria
spiderfit_nb <- boral(y, family = "negative.binomial",
lv.control = list(num.lv = 2), row.eff = "random",
mcmc.control = example_mcmc_control, model.name = testpath)
spiderfit_nb$ics ## Returns the information criteria
}
}
|
9d62047459066766e006860cf9ea06aaf20f756e | 06ebf5fa66d030a61e5b774885d2712951e4a4b6 | /tilvk1.R | 8d1f0e075a1ea2c5667b1b82fde36f5ac0a06535 | [] | no_license | mihassin/todari | b01d43dc08ac04fe8bef801932f7cac1e218d297 | ba7e53e611bdef25e5d6559b4e1800c640d1ff7a | refs/heads/master | 2021-01-21T13:41:22.045573 | 2016-05-17T13:22:39 | 2016-05-17T13:22:39 | 50,838,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 765 | r | tilvk1.R | y1 <- c(0, .3, .5, .1, .1)
y2 <- c(.1, .4, .3, .2, 0)
x <- c(1, 2, 3, 4, 5)
png(filename="y1.png")
plot(x, y1, main="Pistetodennäköisyysfunktio", xlab="y", ylab="f(y; 1)", type="h")
dev.off()
png(filename="y2.png")
plot(x, y2, main="Pistetodennäköisyysfunktio", xlab="y", ylab="f(y; 2)", type="h")
dev.off()
y4<-c(.1, .2)
y5<-c(.1, 0)
x<-c(1, 2)
png(filename="theta1.png")
plot(x, y4, main="Uskottavuusfunktio",xlab=expression(paste(theta)),ylab=expression(plain("L(")~theta~plain("; 4)")))
dev.off()
png(filename="theta2.png")
plot(x, y5, main="Uskottavuusfunktio", xlab=expression(paste(theta)),ylab=expression(plain("L(")~theta~plain("; 5)")))
dev.off()
#plot(x,y,xlab=expression(paste(theta)),ylab=expression(plain("L(")~theta~plain("; 1)")), type="h")
|
680afb2bee390df2c8e98ddf6ee473cb75f2206e | 08422d1c775deaaad47f3763e1d30784d72f1fc9 | /man/plotHTree.Rd | 432daab32ffbccfc754c4aebcacf69ce914453c0 | [] | no_license | cognitivepsychology/PISAtools | fb555771ad08afb1c293df22b91442dd24e4e984 | d6c9d226b6f658287db19ac8bb3500484674d7da | refs/heads/master | 2020-12-24T12:47:39.872067 | 2014-05-20T08:31:49 | 2014-05-20T08:31:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,091 | rd | plotHTree.Rd | \name{plotHTree}
\alias{plotHTree}
\title{Performance vs. two level structured variable}
\description{
Some variables, like occupations have natural hirarchical structure.
Function plotHTree() allows to plot the performance against this structure.
}
\usage{
plotHTree(level1, level2, labels, sizes=rep(1, length(level2)), mar=c(2,5,20,2), plotit=rep(TRUE, length(level2)))
}
\author{
Przemyslaw Biecek
}
\examples{
library(PISA2009lite)
# two and one digits out of parent occupation
father <- substr(as.character(student2009$ST13Q01),1,2)
mother <- substr(as.character(student2009$ST09Q01),1,2)
father1 <- substr(as.character(student2009$ST13Q01),1,1)
mother1 <- substr(as.character(student2009$ST09Q01),1,1)
# names of groups
groups <- sort(na.omit(unique(c(father, mother))))
groups1 <- sort(na.omit(unique(c(father1, mother1))))
# sizes of two digits groups
sizes <- unclass(by(cbind(father, mother, student2009[,c("PV1MATH", "W_FSTUWT")]),
student2009$CNT,
function(x) {
prop.table(sapply(groups, function(group) {
sum(x[which(x[,1] == group | x[,2] == group),4], na.rm=TRUE)
}))
}))
# performance in two digits groups
perfMATH <- unclass(by(cbind(father, mother, student2009[,c("PV1MATH", "W_FSTUWT")]),
student2009$CNT, function(x) {
sapply(groups, function(group) {
inds <- which(x[,1] == group | x[,2] == group)
weighted.mean(x[inds, 3], x[inds, 4], na.rm=TRUE)
}) }))
# performance in one digit groups
perfMATH1 <- unclass(by(cbind(father1, mother1, student2009[,c("PV1MATH", "W_FSTUWT")]),
student2009$CNT, function(x) {
sapply(groups1, function(group) {
inds <- which(x[,1] == group | x[,2] == group)
weighted.mean(x[inds, 3], x[inds, 4], na.rm=TRUE)
}) }))
plotHTree(level2 = perfMATH[["Poland"]],
level1 = perfMATH1[["Poland"]],
sizes = sizes[["Poland"]] * 10,
labels = ISCO88[paste(groups, "00", sep="")])
}
\keyword{datasets}
|
024ae464fc7acd9666fc7fc348d37bc6fe08845e | a16d90b19d1280d3e0dbb69db1a68920e02073fe | /tests/testthat/test-conversions.R | cbb798a51a67bd3d68de28560d1c82ae3ba1c29f | [
"MIT"
] | permissive | potterzot/naptools | bb202fc8729ef5ed8b6bd35b34a556649ef382f6 | 0828f2bf4222af7e29a453ee27f2a5f97dbf0c10 | refs/heads/master | 2021-02-16T06:06:37.774648 | 2020-03-04T18:25:30 | 2020-03-04T18:25:30 | 244,974,748 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 119 | r | test-conversions.R | context("Conversion between units")
test_that("acres_to_kmsq works", {
expect_equal(acres_to_kmsq(1), 247.105)
})
|
3300794f3e8fd9a574ef69b722fe6d4eecbb50c4 | 7bf051cd6df8796c50f7977a6e8e93d149733fdc | /R/downsample_fractions/outer-downsample-fractions.R | 6f025e4fcf86f3a9c1b526806441b4fb60e578cb | [] | no_license | skinnider/CF-MS-analysis | d8cd9e581d2687deb6e9d23719c4b72fbfb049ca | 6253d47346f2c9323884a3bea80caeffbe3089ea | refs/heads/master | 2023-04-10T06:18:43.047922 | 2021-03-08T15:34:41 | 2021-03-08T15:34:41 | 310,446,498 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,523 | r | outer-downsample-fractions.R | setwd("~/git/CF-MS-analysis")
options(stringsAsFactors = F)
library(argparse)
# parse arguments
parser = ArgumentParser(prog = 'outer-downsample-fractions.R')
parser$add_argument('--allocation', type = 'character',
default = "rrg-ljfoster-ab")
args = parser$parse_args()
library(tidyverse)
library(magrittr)
# check that all of the dependent directories exist
if (!dir.exists("~/git/CF-MS-searches"))
stop("repository `CF-MS-searches` does not exist")
if (!dir.exists("~/git/network-validation"))
stop("repository `network-validation` does not exist")
# what system are we on?
system = 'cedar'
base_dir = "~skinnim/projects/rrg-ljfoster-ab/skinnim/CF-MS-analysis"
if (!dir.exists(base_dir)) {
base_dir = "/scratch/st-ljfoster-1/CF-MS-analysis"
system = 'sockeye'
}
# establish grid of analyses
source("R/functions.R") ## contains metrics used to predict interactions
# first metric: mutual information
opts = list(
analysis = c('complexes', 'GO'),
metric = 'MI',
transform = 'none',
missing = 'zero',
n_fractions = seq(5, 75, 5)
)
grid = do.call(expand.grid, c(opts, stringsAsFactors = F))
# add a second metric: Pearson correlation
grid2 = grid %>%
mutate(metric = 'pearson')
grid %<>% bind_rows(grid2)
# combine with quantitation strategies (input files)
chrom_dir = '~/git/CF-MS-searches/data/chromatograms'
chrom_files = list.files(chrom_dir, pattern = '*.rds', recursive = T) %>%
# ignore the metadata files
extract(!grepl("metadata", .))
split = strsplit(chrom_files, '/')
accessions = map_chr(split, 1)
experiments = map_chr(split, 2)
quant_modes = gsub("\\..*$", "", basename(chrom_files))
inputs = data.frame(file = file.path(chrom_dir, chrom_files),
accession = accessions,
experiment = experiments,
quant_mode = quant_modes) %>%
# process quant modes in 'inner' script
distinct(accession, experiment) %>%
# merge these
unite(input, accession, experiment, sep = '|')
# rep each analysis over each input
grid %<>%
dplyr::slice(rep(1:n(), each = nrow(inputs))) %>%
mutate(input = rep(inputs$input, nrow(grid))) %>%
left_join(inputs, by = 'input') %>%
separate(input, into = c("accession", "experiment"), sep = "\\|")
# filter complex analysis where species is not human or mouse
experiments = read.csv("~/git/CF-MS-searches/data/experiments.csv")
species = experiments %>% dplyr::select(Accession, Replicate, Species) %>%
dplyr::rename(species = Species)
grid %<>%
left_join(species, by = c('accession' = 'Accession',
'experiment' = 'Replicate')) %>%
filter(!(analysis == 'complexes' &
!species %in% c("Homo sapiens", "Mus musculus")))
# clean up grid
grid %<>%
dplyr::select(accession, experiment, analysis, metric, transform, missing,
n_fractions)
# write the raw array
grid_file = "sh/analysis/grids/downsample_fractions_raw.txt"
grid_dir = dirname(grid_file)
if (!dir.exists(grid_dir))
dir.create(grid_dir, recursive = T)
write.table(grid, grid_file, quote = F, row.names = F, sep = "\t")
# define output directory where results are stored
output_dir = file.path(base_dir, "downsample_fractions")
# now, check for which parameters are already complete
overwrite = F
grid0 = grid
if (overwrite == F) {
grid0 = grid %>%
mutate(output_dir = file.path(base_dir, "downsample_fractions", accession,
experiment),
output_filename = paste0(analysis,
'-metric=', metric,
'-transform=', transform,
'-missing=', missing,
'-n_fractions=', n_fractions,
'.rds'),
output_file = file.path(output_dir, output_filename),
exists = file.exists(output_file),
idx = row_number()) %>%
filter(!exists) %>%
dplyr::select(-output_dir, -output_filename, -output_file, -exists,
-idx)
}
# write the grid that still needs to be run
write.table(grid0, "sh/analysis/grids/downsample_fractions.txt",
quote = F, row.names = F, sep = "\t")
# finally, run the job on whatever system we're on
script = ifelse(system == 'cedar',
" ~/git/CF-MS-analysis/sh/analysis/downsample_fractions.sh",
" ~/git/CF-MS-analysis/sh/analysis/downsample_fractions.torque.sh")
submit_job(grid0, script, args$allocation, system)
|
6aaa6c6155868a1a0cb5b83b6534434a1fe8da12 | ec9096db7e6f33846d86b1af9a5d833a1cfa482d | /man/submitTrade.Rd | 14aaa0aadc0ed3e557e5a9651572de6c63741df4 | [] | no_license | mjdhasan/alpacaR | 7107a0b5b6954842bc2a1cbef7aea04f7a391648 | 938b5313b5ee17b47ea0bf1b126b8b13de281ad2 | refs/heads/master | 2022-04-08T08:54:54.352304 | 2020-03-10T14:38:59 | 2020-03-10T14:38:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 272 | rd | submitTrade.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trading.R
\name{submitTrade}
\alias{submitTrade}
\title{submit trade}
\usage{
submitTrade(trade)
}
\arguments{
\item{trade}{a trade object}
}
\value{
order object
}
\description{
submit trade
}
|
f004ed59ec5e61a73455cb85ce5e3f2bcf31fd85 | 2bd557829d9ed63c6a2e8da6d6592ba15be97d25 | /man-roxygen/args.R | 6ecc08356ce9c8881e8e3a749d1e0e4bd1737389 | [] | no_license | jacob-ogre/discgolf | e1d6ce065dc9c664800c58fdaacecfdaf9ee64d4 | c57d560acda03b4afbe6352158dba7e0d90be23b | refs/heads/master | 2021-01-11T02:46:36.476194 | 2016-10-14T12:06:16 | 2016-10-14T12:06:16 | 70,902,462 | 0 | 0 | null | 2016-10-14T11:05:19 | 2016-10-14T11:05:19 | null | UTF-8 | R | false | false | 412 | r | args.R | #' @param url Base url for a Discourse installation. See \code{\link{disc_setup}}
#' @param key Your api key on the Discourse installation. See \code{\link{disc_setup}}
#' @param user Your user name on the Discourse installation. See \code{\link{disc_setup}}
#' @param ... Named parameters passed on to \code{\link[httr]{GET}},
#' \code{\link[httr]{POST}}, \code{\link[httr]{PUT}}, or \code{\link[httr]{DELETE}}
|
ddc118040ec90a766ec850c0231bfa9d50a9bd3f | adaf120ebe8cfc2346da110c74963a3512150d91 | /man/unalpha.Rd | 174336e99e91819f1f41586a25163599afcb8401 | [
"MIT"
] | permissive | jmw86069/jamba | fffcb173e945e74b2ec5bc348f8f0d8dc61e3807 | 40c1a1c8275330d8b70062b774e160ba3f690b6c | refs/heads/master | 2023-08-25T18:37:40.770305 | 2023-08-10T23:13:20 | 2023-08-10T23:13:20 | 102,026,462 | 4 | 3 | null | null | null | null | UTF-8 | R | false | true | 1,847 | rd | unalpha.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jamba-colors.r
\name{unalpha}
\alias{unalpha}
\title{Remove alpha transparency from colors}
\usage{
unalpha(x, keepNA = FALSE, ...)
}
\arguments{
\item{x}{\code{character} vector of R colors}
\item{keepNA}{\code{logical} indicating whether \code{NA} values should be kept
and therefore returned as \code{NA}.
When \code{keepNA=FALSE} (default for backward compatibility) \code{NA}
values are converted to \code{"#FFFFFF"} as done by \code{grDevices::col2rgb()}.}
\item{...}{additional arguments are ignored.}
}
\value{
character vector of R colors in hex format.
}
\description{
Remove alpha transparency from colors
}
\details{
This function simply removes the alpha transparency from
R colors, returned in hex format, for example \code{"#FF0000FF"}
becomes \code{"#FF0000"}, or \code{"blue"} becomes \code{"#0000FF"}.
It also silently converts R color names to hex format,
where applicable.
}
\examples{
unalpha(c("#FFFF00DD", "red", NA, "#0000FF", "transparent"))
unalpha(c("#FFFF00DD", "red", NA, "#0000FF", "transparent"), keepNA=TRUE)
}
\seealso{
Other jam color functions:
\code{\link{alpha2col}()},
\code{\link{applyCLrange}()},
\code{\link{col2alpha}()},
\code{\link{col2hcl}()},
\code{\link{col2hsl}()},
\code{\link{col2hsv}()},
\code{\link{color2gradient}()},
\code{\link{fixYellowHue}()},
\code{\link{fixYellow}()},
\code{\link{getColorRamp}()},
\code{\link{hcl2col}()},
\code{\link{hsl2col}()},
\code{\link{hsv2col}()},
\code{\link{isColor}()},
\code{\link{kable_coloring}()},
\code{\link{makeColorDarker}()},
\code{\link{make_html_styles}()},
\code{\link{make_styles}()},
\code{\link{rgb2col}()},
\code{\link{setCLranges}()},
\code{\link{setTextContrastColor}()},
\code{\link{showColors}()},
\code{\link{warpRamp}()}
}
\concept{jam color functions}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.