Dataset Viewer
Auto-converted to Parquet Duplicate
entities
listlengths
1
284
max_stars_repo_path
stringlengths
6
201
max_stars_repo_name
stringlengths
8
116
max_stars_count
int64
0
4.78k
content
stringlengths
69
798k
id
stringlengths
1
4
new_content
stringlengths
56
224k
modified
bool
1 class
references
stringlengths
71
224k
[ { "context": "ergrey}\\today}\n \\fancyhead[L]{\\color{headergrey}Moretz, Brandon}\n \\fancyhead[C]{\\Large\\bfseries\\color{h", "end": 798, "score": 0.991347074508667, "start": 792, "tag": "NAME", "value": "Moretz" }, { "context": "\\today}\n \\fancyhead[L]{\\color{headergrey}Moretz, Brandon}\n \\fancyhead[C]{\\Large\\bfseries\\color{headergre", "end": 807, "score": 0.7542721033096313, "start": 800, "tag": "NAME", "value": "Brandon" }, { "context": " approximation.\n\n```{r, echo = T}\n\n```\n\n### 4.16\n\nMaria claims that she has drawn a random sample of size", "end": 18892, "score": 0.9997559785842896, "start": 18887, "tag": "NAME", "value": "Maria" }, { "context": "$Var = `r var`$\n\nc.) Suppose the time (in minutes) Andy takes to complete his statistics homework is $Uni", "end": 25700, "score": 0.9901853799819946, "start": 25696, "tag": "NAME", "value": "Andy" }, { "context": "statistics homework is $Unif[40, 60]$ and the time Adam takes is $Unif[45, 80]$.\n\nAssume they work indepe", "end": 25778, "score": 0.9901745319366455, "start": 25774, "tag": "NAME", "value": "Adam" } ]
Mathematical Statistics/04_sampling_distributions.rmd
bmoretz/Statistical-Computing
3
--- title: '' mainfont: Arial fontsize: 12pt documentclass: report header-includes: - \PassOptionsToPackage{table}{xcolor} - \usepackage{caption} - \usepackage{amssymb} - \usepackage{booktabs} - \usepackage{longtable} - \usepackage{array} - \usepackage{multirow} - \usepackage{wrapfig} - \usepackage{float} - \usepackage{colortbl} - \usepackage{pdflscape} - \usepackage{tabu} - \usepackage{threeparttable} - \usepackage{threeparttablex} - \usepackage[normalem]{ulem} - \usepackage{makecell} - \usepackage[table]{xcolor} - \usepackage{fancyhdr} - \usepackage{boldline} - \usepackage{tipa} \definecolor{headergrey}{HTML}{545454} \definecolor{msdblue}{HTML}{1C93D1} \pagestyle{fancy} \setlength\headheight{30pt} \rhead{\color{headergrey}\today} \fancyhead[L]{\color{headergrey}Moretz, Brandon} \fancyhead[C]{\Large\bfseries\color{headergrey}Sampling Distributions} \rfoot{\color{headergrey}\thepage} \lfoot{\color{headergrey}Chapter 4} \fancyfoot[C]{\rmfamily\color{headergrey}Mathematical Statistics} geometry: left = 1cm, right = 1cm, top = 2cm, bottom = 3cm date: "`r format(Sys.time(), '%d %B, %Y')`" output: pdf_document: fig_caption: yes latex_engine: xelatex editor_options: chunk_output_type: console --- ```{r knitr_setup, include = FALSE} # DO NOT ADD OR REVISE CODE HERE knitr::opts_chunk$set(echo = FALSE, eval = TRUE, dev = 'png') options(knitr.table.format = "latex") ``` ```{r report_setup, message = FALSE, warning = FALSE, include = FALSE} library(data.table, quietly = TRUE, warn.conflicts = FALSE) assignInNamespace("cedta.pkgEvalsUserCode", c(data.table:::cedta.pkgEvalsUserCode, "rtvs"), "data.table") library(ggplot2, quietly = TRUE, warn.conflicts = FALSE) library(ggrepel, quietly = TRUE, warn.conflicts = FALSE) library(ggthemes, quietly = TRUE, warn.conflicts = FALSE) library(knitr, quietly = TRUE, warn.conflicts = FALSE) library(kableExtra, quietly = TRUE, warn.conflicts = FALSE) library(Rblpapi, quietly = TRUE, warn.conflicts = FALSE) library(scales, quietly = TRUE, warn.conflicts = FALSE) library(pander, quietly = TRUE, warn.conflicts = FALSE) library(dplyr, quietly = TRUE, warn.conflicts = FALSE) library(formattable, quietly = TRUE, warn.conflicts = FALSE) library(grid, quietly = TRUE, warn.conflicts = FALSE) library(gridExtra, quietly = TRUE, warn.conflicts = FALSE) library(png, quietly = TRUE, warn.conflicts = FALSE) library(extrafont, quietly = TRUE, warn.conflicts = FALSE) library(tinytex, quietly = TRUE, warn.conflicts = FALSE) library(stringr, quietly = TRUE, warn.conflicts = FALSE) library(lubridate, quietly = TRUE, warn.conflicts = FALSE) library(reshape2, quietly = TRUE, warn.conflicts = FALSE) library(ggrepel, quietly = TRUE, warn.conflicts = FALSE) library(mnormt, quietly = TRUE, warn.conflicts = FALSE) library(Ecdat, quietly = TRUE, warn.conflicts = FALSE) library(MASS, quietly = TRUE, warn.conflicts = FALSE) library(copula, quietly = TRUE, warn.conflicts = FALSE) library(fGarch, quietly = TRUE, warn.conflicts = FALSE) library(forecast, quietly = TRUE, warn.conflicts = FALSE) library(tseries, quietly = TRUE, warn.conflicts = FALSE) library(gmodels, quietly = TRUE, warn.conflicts = FALSE) library(rugarch, quietly = TRUE, warn.conflicts = FALSE) library(quantmod, quietly = TRUE, warn.conflicts = FALSE) library(gtools, quietly = TRUE, warn.conflicts = FALSE) options(tinytex.verbose = TRUE) suppressMessages(library("tidyverse")) pretty_kable <- function(data, title, dig = 2) { kable(data, caption = title, digits = dig) %>% kable_styling(bootstrap_options = c("striped", "hover")) %>% kableExtra::kable_styling(latex_options = "hold_position") } theme_set(theme_light()) # Theme Overrides theme_update(axis.text.x = element_text(size = 10), axis.text.y = element_text(size = 10), plot.title = element_text(hjust = 0.5, size = 16, face = "bold", color = "darkgreen"), axis.title = element_text(face = "bold", size = 12, colour = "steelblue4"), plot.subtitle = element_text(face = "bold", size = 8, colour = "darkred"), legend.title = element_text(size = 12, color = "darkred", face = "bold"), legend.position = "right", legend.title.align=0.5, panel.border = element_rect(linetype = "solid", colour = "lightgray"), plot.margin = unit(c( 0.1, 0.1, 0.1, 0.1), "inches")) data.dir <- "D:/Projects/Statistical-Computing/datasets/" setwd("D:/Projects/Statistical-Computing/RDS") ``` ```{r pander_setup, include = FALSE} knitr::opts_chunk$set(comment = NA) panderOptions('table.alignment.default', function(df) ifelse(sapply(df, is.numeric), 'right', 'left')) panderOptions('table.split.table', Inf) panderOptions('big.mark', ",") panderOptions('keep.trailing.zeros', TRUE) ``` ### 4.1 Consider the population {1, 2, 5, 6, 10, 12}. Find (and plot) the sampling distribution of medians for samples of size 3 without replacement. ```{r, echo = T, fig.height=4.5, fig.width=8} p <- c(1, 2, 5, 6, 10, 12) c <- combinations(v = p, n = 6, r = 3) t <- apply(c, 1, median) ggplot(data.table(value = t), aes(value, fill = ..count..)) + geom_histogram(bins = 30) + labs(title = "Median Sampling Distribution of p") ``` Compare the median of the population to the mean of the medians. Median of p = __`r median(p)`__. Mean of Medians of p = __`r mean(t)`__ \newpage ### 4.2 Consider the population {3, 6, 7, 9, 11, 14}. For samples of size 3 without replacement, find (and plot) the sampling distribution for the minimum. ```{r, echo = T, fig.height=4.5, fig.width=8} p <- c(3, 6, 7, 9, 11, 14) c <- combinations(v = p, n = 6, r = 3) t <- apply(c, 1, min) ggplot(data.table(value = t), aes(value, fill = ..count..)) + geom_histogram(bins = 30) + labs(title = "Minimum Sampling Distribution of p") ``` What is the mean of the sampling distribution? __`r mean(t)`__ The statistic is an estimate of some parameter - what is the value of that parameter? This is an estimation of the minimum, which is: __`r min(p)`__ \newpage ### 4.3 Let _A_ denote the population {1, 3, 4, 5} and _B_ the population {5, 7, 9}. ```{r, echo = T} A <- c(1, 3, 4, 5) B <- c(5, 7, 9) ``` Let _X_ be a random value from _A_, and _Y_ and random value from _B_. a.) Find the sampling distribution of _X + Y_. ```{r, echo = T} result = numeric(12) index <- 1 for(j in 1:length(A)) { for(k in 1:length(B)) { result[index] <- A[j] + B[k] index <- index + 1 } } sort(result) ``` b.) In this example, does the sampling distribution depend on whether you sample with or without replacement? _No._ Why or why not? _Because 5 in is both sets._ c.) Compute the mean of the values for each of _A_ and _B_ and the values in the sampling distribution of _X + Y_. Mean of _A_: __`r mean(A)`__. Mean of _B_: __`r mean(B)`__. Mean of _A + B_: __`r mean(result)`__ How are the means related? mean(A) + mean(B) = mean(A + B). d.) Suppose you draw a random value from _A_ and a random value from _B_. ```{r, echo = T} prob <- sum(result >= 13) / length(result) ``` What is the probability that the sum is 13 or larger? __`r prob*100`%__ \newpage ### 4.4 Consider the population {3, 5, 6, 6, 8, 11, 13, 15, 19, 20}. a.) Compute the mean and standard deviation and create a dot plot of its distribution. ```{r, echo = T, fig.height=3.5, fig.width=8} p <- c(3, 5, 6, 6, 8, 11, 13, 15, 19, 20) mu <- mean(p) sigma <- sd(p) ggplot(data.table(value = p)) + geom_dotplot(aes(value, fill = ..count..), binwidth = 1) + labs(title = "Population Dot Plot") ``` $\mu = `r mu`$, $\sigma = `r sigma`$ b.) Simulate the sampling distribution of $\bar{X}$ by taking random samples of size 4 and plot your results. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e2 results <- numeric(N) for( i in 1:N) { index <- sample(length(p), size = 4, replace = F) results[i] <- mean( p[index] ) } ggplot(data.table(value = results)) + geom_histogram(aes(value, fill = ..count..), bins = 30) + labs(title = "Sample Means") xbar <- mean(results) se <- sd(results) / sqrt(N) ``` Compute the mean and standard error, and compare to the population mean and standard deviation. mean: `r xbar`, standard error: `r se` c.) Use the simulation to find $P(\bar{X} < 11)$. ```{r, echo = T} prob <- mean(results < 11) ``` $P(\bar{X} < 11) = `r prob*100`$% \newpage ### 4.5 Consider two populations A = {3, 5, 7, 9, 10, 16}, B = {8, 10, 11, 15, 18, 25, 28}. ```{r, echo = T} A <- c(3, 5, 7, 9, 10, 16) B <- c(8, 10, 11, 15, 18, 25, 28) ``` a.) Using R, draw random samples (without replacement) of size 3 from each population, and simulate the sampling distribution of the sum of their maximums. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e2 results <- numeric(N) for(i in 1:N) { samp.a <- sample(A, 3, replace = F) samp.b <- sample(B, 3, replace = F) results[i] <- max(samp.a) + max(samp.b) } ggplot(data.table(value = results)[, index := .I]) + geom_histogram(aes(value, fill = ..count..), bins = 30) + labs(title = "Sampling Distribution: max(A) + max(B)") ``` b.) Use your simulation to estimate the probability that the sum of the maximums is less than 20. ```{r, echo = T} prob <- mean(results < 20) ``` Probability: `r prob*100`% c.) Draw random samples of size 3 from each population, and find the maximum of the union of these two sets. Simulate the sampling distribution of the maximums of this union. ```{r, echo = T, fig.height=3.5, fig.width=8} results <- numeric(N) for(i in 1:N) { samp.a <- sample(A, 3, replace = F) samp.b <- sample(B, 3, replace = F) results[i] <- max(union(samp.a, samp.b)) } ggplot(data.table(value = results)[, index := .I]) + geom_histogram(aes(value, fill = ..count..), bins = 30) + labs(title = "Sampling Distribution: max(union(A,B))") ``` d.) Use simulation to find the probability that the maximum of the union is less than 20. ```{r, echo = T} prob <- mean(results < 20) ``` Probability: `r prob*100`% \newpage ### 4.6 The data set _Recidivism_ contains the poopulation of all Iowa offenders convicted of either a felony or misdemeanor who were released in 2010 (case study in Section 1.4). ```{r, echo = T} Recidivism <- data.table(read.csv(paste0(data.dir, "Recidivism.csv"), header = T)) ``` Of these, 31.6% recidivated and were sent back to prision. Simulate the sampling distribution of $\hat{p}$, the sample proportion of offeneders who recidivated, for random samples of size 25. ```{r, echo = T} mean(Recidivism$Recid == "Yes") N <- 10e2 results <- numeric(N) for(i in 1:N) { samp <- sample(Recidivism$Recid, 25) results[i] <- mean(samp == "Yes") } ``` a.) Create a histogram and describe the simulated sampling distribution of $\hat{p}$. ```{r, echo = T, fig.height=3.5, fig.width=8} ggplot(data.table(value = results)) + geom_histogram(aes(value, fill = ..count..), bins = 30) + labs(title = "Recidivism Sampling Distribution") ``` Estimate the mean and standard error. ```{r, echo = T} mu <- mean(results) se <- sd(results) / sqrt(25) ``` $\mu = `r mu`$, $\sigma = `r se`$ b.) Compare your estimate of the standard error with the theoretical standard error (_Corollary 4.3.2_). ```{r, echo = T} tse <- mu * ( 1 - mu ) / sqrt(25) ``` Theoretical: `r tse` c.) Repeat the above using samples of size 250, and compare with the $n = 25$ case. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e2 results <- numeric(N) for(i in 1:N) { samp <- sample(Recidivism$Recid, 250) results[i] <- mean(samp == "Yes") } ggplot(data.table(value = results)) + geom_histogram(aes(value, fill = ..count..), bins = 30) + labs(title = "Recidivism Sampling Distribution") mu <- mean(results) se <- sd(results) / sqrt(250) ``` $\mu = `r mu`$, $\sigma = `r se`$ ### 4.7 The data set _FlightDelays_ contains the population of all flight departures by United Airlines and American Airlines out of LGA during May and June 2009 (case study in Section 1.1). ```{r, echo = T} Flights <- data.table(read.csv(paste0(data.dir, "FlightDelays.csv"), header = T)) ``` a.) Create a histogram of _Delay_ and describe the distribution. ```{r, echo = T, fig.height=3.5, fig.width=8} ggplot(Flights, aes(Delay)) + geom_histogram(aes(fill = ..count..), bins = 30) + scale_y_continuous(labels = comma) + labs(title = "Flight Delays") ``` Compute the mean and standard deviation. ```{r, echo = T} mu <- mean(Flights$Delay) sigma <- sd(Flights$Delay) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ b.) Simulate the sampling distribution of $\bar{x}$, the sample mean of the length of the flight delays (_Delay_), for sample size 25. ```{r, echo = T} N <- 10e2 results <- numeric(N) for(i in 1:N) { samp <- sample(Flights$Delay, 25, replace = F) results[i] <- mean(samp) } ``` Create a histogram and describe the simulated sampling distribution of $\bar{x}$. ```{r, echo = T, fig.height=3.5, fig.width=8} ggplot(data.table(value = results)) + geom_histogram(aes(value, fill = ..count..), bins = 30) + labs(title = "Flight Delay Sampling Distribution") ``` Estimate the mean and standard error. ```{r, echo = T} mu <- mean(results) se <- sd(results) / sqrt(25) ``` $\mu = `r mu`$, $\Sigma = `r se`$ c.) Compare your estimate of the standard error with the theoretical standard error (_Corollary A.4.1_). ```{r, echo = T} tse <- var(results) / 25 ``` Theoretical: `r tse` d.) Repeat with sample size 250. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e2 results <- numeric(N) for(i in 1:N) { samp <- sample(Flights$Delay, 250, replace = F) results[i] <- mean(samp) } ggplot(data.table(value = results)) + geom_histogram(aes(value, fill = ..count..), bins = 30) + labs(title = "Flight Delay Sampling Distribution") mu <- mean(results) se <- sd(results) / sqrt(250) tse <- var(results) / 250 ``` $\mu = `r mu`$, $\Sigma = `r se`$ Theoretical: `r tse` ### 4.8 Let $X_1, X_2, \ldots, X_{25}$ be a random sample from some distribution and $W = T(X_1, X_2, \ldots, X_n)$ be a statistic. Suppose the _sampling distribution_ of W has a pdf given by $f(x) = \frac{2}{x^2}$, for 1 < x < 2. Find $P(w < 1.5)$ __Solution__: ```{r, echo = T, fig.height=3.5, fig.width=8} f <- function(x) 2 / x^2 x <- seq( from = 1.0001, to = 1.999, by = 0.0001) y <- f(x) ggplot(data.table(x, y)) + geom_point(aes(x, y), color = "cornflowerblue", size = .6) + labs(title = "pdf f(x) = 2/x^2") a <- cumsum(y) / sum(y) p <- round( a[x == 1.5], 4 ) * 100 d <- data.table(x, y = a) ggplot(d) + geom_point(aes(x, y), color = "cornflowerblue", size = .6) + geom_area(aes(x, y), data = d[x < 1.5], fill = "cornflowerblue", alpha = .3) + labs(title = paste("cdf f(x) = 2/x^2, A =", p )) ``` Numerical solution: `r p`% Analytical Solution: ${\int_{1}^{1.5}}\frac{2}{x^2} = \frac{2}{3}$ ### 4.9 Let $X_1, X_2, \ldots, X_{n}$ be a random sample from some distribution and $Y = T(X_1, X_2, \ldots, X_n)$ be a statistic. Suppose the _sampling distribution_ of Y has pdf $f(y) = (3/8)y^2 \ for \ 0 \le y \le 2$. Find $P(0 \le Y \le \frac{1}{5})$ __Solution__: ```{r, echo = T, fig.height=3.5, fig.width=8} f <- function(x) (3/8)*x**2 x <- seq( from = 0, to = 2, by = 0.001) y <- f(x) ggplot(data.table(x, y)) + geom_point(aes(x, y), color = "cornflowerblue", size = .6) + labs(title = paste("pdf: ", paste0(deparse(f), collapse = " "))) a <- cumsum(y) / sum(y) p <- round( a[x == 1/5], 4 ) * 100 d <- data.table(x, y = a) ggplot(d) + geom_point(aes(x, y), color = "cornflowerblue", size = .6) + geom_area(aes(x, y), data = d[x < 1/5], fill = "cornflowerblue", alpha = .3) + labs(title = paste("cdf f(x) = 2/x^2, A =", p )) ``` Numerical Solution: `r p`% Analytical Solution: ${\int_{0}^{\frac{1}{5}}}\frac{x^3}{8} = \frac{.008}{8} = .001 = .1$ % ### 4.10 Suppose the heights of boys in a certain large city follow a distribution with mean 48 in. and variance $9^2$. Use the CLT approximation to estimate the probability that in a random sample of 30 boys, the mean height is more than 51 in. ```{r, echo = T} z <- (51 - 48) / (9^2 / sqrt(30)) p <- pnorm(z, lower.tail = F) ``` Probability: __`r round(p, 4)*100`%__ ### 4.11 Let $X_1, X_2, \ldots, X_{36} \sim Bern(.55)$ be independent, and let $\hat{p}$ denote the sample proportion. Use the CLT approximation with continuity correction to find the probability that $\hat{p} \le 0.5$. ```{r, echo = T} z <- ( .5 - .55 ) / sqrt(.55 * (1 - .55) / 36) p <- pnorm(z, lower.tail = T) ``` Probability: `r round(p, 4)*100`% ### 4.12 A random sample of size $n = 20$ is drawn from a distribution with mean 6 and variance 10. Use the CLT approximation to estimate $P(\bar{X} \le 4.6)$. ```{r, echo = T} z <- ( 4.6 - 6 ) / ( 10 * sqrt(20) ) p <- pnorm(z, lower.tail = T) ``` Probability: `r round(p, 4)*100`% ### 4.13 A random sample of size $n = 244$ is drawn from a distribution with pdf $f(x) = (3/16)(x - 4)^2, 2 \le x \le 6$. Use the CLT approximation to estimate $P(X \ge 4.2)$. ```{r, echo = T, fig.height=2.6, fig.width=8} pdf <- function(x) (3/16)*(x - 4)^2 x <- seq(from = 2, to = 6, by = 0.001) y <- pdf(x) ggplot(data.table(x,y)) + geom_point(aes(x, y), col = "cornflowerblue", lwd = .8) + labs(title = paste("PDF: ", paste0(deparse(f), collapse = " "))) cdf <- function(x) (3/8)*(x - 4) y <- cumsum(y) / sum(y) ev <- x[min(which(y > .5))] ggplot(data.table(x,y)) + geom_point(aes(x, y), col = "cornflowerblue", lwd = .8) + geom_vline(xintercept = ev) + labs(title = paste("CDF: ", paste0(deparse(f), collapse = " "))) z <- ( 4.2 - ev ) / sqrt(244) pnorm(z, lower.tail = F) ``` ### 4.14 According to the 2000 census, 28.6% of the US adult population recieved a high school diploma. In a random sample of 800 US adults, what is the probability that between 220 and 230 (inclusive) people have a high school deploma? Use the CLT approximation with continuity correction, and compare with the exact probability. __Solution__: The sampling distribution of $\hat{p}$ is approximately normal with: ```{r, echo = T} n <- 800 mu <- .286 ev <- 800 * mu sigma <- sqrt(n*mu*(1-mu)) ``` $\mathbb{E}[X] = `r ev`$ and $\sigma = \sqrt{800(.286)(1 - .286)} = `r round(sigma, 4)`$ ```{r, echo = T} l <- pnorm((ev - 219.5) / sigma) h <- pnorm((ev - 230.5) / sigma) p <- l - h ``` Probability: `r round(p, 4)` ### 4.15 If $X_1, \ldots, X_n$ are i.i.d. from Unif[0, 1], how large should n be so that $P(\bar{X} - \frac{1}{2} < 0.05) \ge 0.90$, that is, is there at least a 90% chance that the sample mean is within 0.05 of $\frac{1}{2}$? Use the CLT approximation. ```{r, echo = T} ``` ### 4.16 Maria claims that she has drawn a random sample of size 30 from the exponential distribution with $\lambda = 1/10$. The mean of her sample is 12. a.) What is the expected value of a sample mean? $X \sim Exp(\frac{1}{10}), \mathbb{E}(x) = 10$ b.) Run a simulation by drawing 1000 random samples, each of size 30, from Exp(1/10), and compute the mean for each sample. ```{r, echo = T, fig.height=3.5, fig.width=12} N <- 1000 result <- numeric(N) for( i in 1:N) { samp <- rexp( n = 30, rate = 1/10) result[i] <- mean(samp) } ggplot(data.table(result), aes(result)) + geom_histogram(aes(y = ..density.., fill = ..count..), bins = 30) + geom_vline(xintercept = 12, col = "darkorange") + stat_density( kernel = "gaussian", fill = "darkorange", alpha = .3) + labs(title = "Exp(1/10) Sampling Distribution") p <- mean(result > 12) ``` What proportion of the sample means is as large or larger than 12? __`r p*100`%__ c.) Is a mean of 12 unusual for a sample of size 30 from Exp(1/10)? __Yes__, only ~13% of the sample means have a value of 12 or higher. ### 4.17 Let $X \sim N(15, 3^2)$ and $Y \sim N(4, 2^2)$ be independent random variables. a.) What is the exact sampling distribution of $W = X - 2Y$? $W \sim N(7, 5^2)$ b.) Use R to simulate the sampling distribution of $W$ and plot your results. ```{r, echo = T, fig.height=3.5, fig.width=8} X <- rnorm(10e3, 15, 3) Y <- rnorm(10e3, 4, 2) W <- X - 2*Y ggplot(data.table(x = W)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + geom_vline(xintercept = 7, col = "darkorange", lwd = 1.5) + scale_y_continuous(labels = comma) + labs(title = "Sampling Distribution") ``` Check that the simulated mean and standard error are close to the theoretical mean and standard error. ```{r, echo = T} mu <- mean(W) sigma <- sd(W) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ c.) Use the simulated sampling to estimate $P(W \le 10)$, and then check your estimate with an exact calculation. ```{r, echo = T} phat <- mean(W <= 10) p <- pnorm(10, mean = 7, sd = 5) ``` $\hat{p}= `r phat*100`$% $P(W \le 10) = `r round(p, 4)*100`$% ### 4.18 Let $X \sim Pois(4)$, $Y \sim Pois(12)$, $U \sim Pois(3)$ be independent random variables. a.) What is the exact sampling distribution of $W = X + Y + U$? $W \sim Pois(19)$ b.) Use R to simulate the sampling distribution of $W$ and plot your results. ```{r, echo = T, fig.height=3.5, fig.width=8} W <- rpois(10e3, lambda = 19) ggplot(data.table(x = W)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + geom_vline(xintercept = 19, col = "darkorange", lwd = 1.5) + scale_y_continuous(labels = comma) + labs(title = "Sampling Distribution") ``` Check that the simulated mean and standard error are close to the theoretical mean and standard error. ```{r, echo = T} mu <- mean(W) sigma <- sd(W) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ c.) Use the simulated sampling distribution to estimate $P(W \le 14)$ and then check your estimate with an exact calculation. ```{r, echo = T} phat <- mean(W <= 14) p <- ppois(14, lambda = 19) ``` $\hat{p} = `r phat*100`$% $P(W \le 14) = `r round(p, 4)*100`$% ### 4.19 Let $X_1, X_2, \ldots, X_{10} \sim^{i.i.d} N(20, 8^2)$ and $Y_1, Y_2, \ldots, Y_{15} \sim^{i.i.d} N(16, 7^2)$. Let $W = \bar{X} + \bar{Y}$ a.) Give the exact sampling distribution of W. $\sigma = (10 + 15) / sqrt( 10 + 15 - 1) = 3.1$ $W \sim N(36, 3.1^2)$ b.) Simulate the sampling distribution in R and plot your results. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e3 result <- numeric(N) for( i in 1:N) { X <- rnorm(10, 20, 8) Y <- rnorm(15, 16, 7) result[i] <- mean(X) + mean(Y) } ggplot(data.table(x = result)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + geom_vline(xintercept = 36, col = "darkorange", lwd = 1.5) + scale_y_continuous(labels = comma) + labs(title = "Sampling Distribution") ``` Check that the simulated mean and standard error are close to the exact mean and standard error. ```{r, echo = T} mu <- mean(result) sigma <- sd(result) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ c.) Use your simulation to find $P(W < 40)$. Calculate an exact answer and compare. ```{r, echo = T} phat <- mean(result <= 40) p <- pnorm(40, 36, 3) ``` $\hat{p} = `r phat*100`$% $P(W < 40) = `r round(p, 4)*100`$% ### 4.20 Let $X_1, X_2, \ldots, X_9 \sim^{i.i.d.} N(7, 3^2)$, and $Y_1, Y_2, \ldots, X_{12} \sim^{i.i.d.} N(10, 5^2)$. Let $W = \bar{X} - \bar{Y}$. a.) Give the sampling distribution of $W$. $\sigma = (3 + 5) / sqrt( 9 + 12 - 1 ) = 1.79$ $W = N(-3, 1.79^2)$ b.) Simulate the sampling distribution of W in R, and plot your results. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e3 result <- numeric(N) for(i in 1:N) { X <- rnorm(9, 7, 3) Y <- rnorm(12, 10, 5) result[i] <- mean(X) - mean(Y) } ggplot(data.table(x = result)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + geom_vline(xintercept = -3, col = "darkorange", lwd = 1.5) + scale_y_continuous(labels = comma) + labs(title = "Sampling Distribution") ``` Check that the simulated mean and standard error are close to the theoretical mean and standard error. ```{r, echo = T} mu <- mean(result) sigma <- sd(result) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ c.) Use your simulation to find $P(W < -1.5)$. ```{r, echo = T} phat <- mean(result <= -1.5) p <- pnorm(-1.5, -3, 1.79) ``` $\hat{p} = `r phat*100`$% Calculate an exact answer and compare. $P(W < 1.5) = `r round(p, 4)*100`$% ### 4.21 Let $X_1, X_2, \ldots, X_N$ be a random sample from $N(0, 1)$. Let $W = X^2_1 + X^2_2 + \ldots + X^2_n$ What is the mean and variance of the sampling distribution of W? $\mu = 0$, $\sigma = 1$ Repeat using N = 4, N = 5. $N = 4, \sigma = 4 / sqrt(4 - 1) = 2.3$ $N = 5, \sigma = 5 / sqrt(5 - 1) = 2.5$ What observations or conjectures do you have for general __n__? ### 4.22 Let $X$ be a uniform random variable on the interval $[40, 60]$ and Y be a uniform random variable on $[45, 80]$. Assume that X and Y are independent. a.) Compute the expected value and variance of $X + Y$. $\mu = 112.5$, $Var = 1/24*(140 - 85)^2 = 126.04$ b.) Simulate a sampling distribution of $X + Y$. ```{r, echo = T, fig.height=3.5, fig.width=8} X <- runif(1000, 40, 60) Y <- runif(1000, 45, 80) total <- X + Y ggplot(data.table(value = total)) + geom_histogram(aes(value, fill = ..count..), bins = 30) + labs("X + Y Sampling Distribution") ``` Describe the sampling distribution of $X + Y$. __Approximately Normal__ Compute the mean and variance of the sampling distribution and compare this with the theoretical mean and variance. ```{r, echo = T} mu <- mean(total) var <- var(total) ``` $\mu = `r mu`$, $Var = `r var`$ c.) Suppose the time (in minutes) Andy takes to complete his statistics homework is $Unif[40, 60]$ and the time Adam takes is $Unif[45, 80]$. Assume they work independently. One day they announce that their total time to finish an assignment was less than 90 minutes. How likely is this? ```{r, echo = T, fig.height=3.5, fig.width=8} p <- punif(90, 85, 140) ``` Probability: __`r round(p, 4)*100`%__ ### 4.23 Let $X_1, X_2, \ldots, X_{20} \sim^{i.i.d.} Exp(2)$. Let $X = \sum^{20}_{i=1}X_i$. a.) Simulate the sampling distribution of $X$ in R. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e3 result <- numeric(N) for(i in 1:N) { samp <- rexp(n = 20, rate = 2) result[i] <- sum(samp) } ``` b.) From your simulation, find $\mathbb{E}[X]$ and $Var[X]$. ```{r, echo = T} mu <- mean(result) var <- var(result) ``` $\mu = `r mu`$, $Var = `r var`$ c.) From your simulation, find $P( X \le 10)$. ```{r, echo = T} phat <- mean(result <= 10) ``` Probablity: _`r round(phat, 4)*100`%_ ### 4.24 Let $X_1, X_2, \ldots, X_{30} \sim^{i.i.d.} Exp(1/3)$ and let $\bar{X}$ denote the sample mean. a.) Simulate the sampling distribution of $\bar{X}$ in R. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e3 result <- numeric(N) for(i in 1:N) { samp <- rexp(n = 30, rate = 1/3) result[i] <- mean(samp) } ``` b.) Find the mean and standard error of the sampling distribution, and compare with the theoretical results. ```{r, echo = T} mu <- mean(result) sigma <- sd(result) ``` Sample: $\mu = `r mu`$, $\sigma = `r sigma`$ Theoretical: $\mu = \frac{\lambda}{1} = 3$, $sigma = 3/sqrt(30 - 1) = .56$ c.) From your simulation, find $P(\bar{X} \le 3.5)$. ```{r, echo = T} phat <- mean(result <= 3.5) ``` $\hat{p} = `r phat`$ d.) Estimate $P(\bar{X} \le 3.5)$ by assuming the CLT approximation holds. Compare this result with the one in part __(c)__. ```{r, echo = T} z <- (3.5 - 3) / sigma p <- pnorm(z) ``` $p = `r round(p, 4)`$ ### 4.25 Consider the exponential distribution with density $f(x) \frac{1}{20}e^{-x/20}$, with mean and standard deviation of 20. a.) Calculate the median of this distribution. $0.5 = \int_{m}^{\inf}f(x)dx = -e^{-x/20}$ $\ldots = M = 20*log(2)$ $\ldots = `r 20*log(2)`$ b.) Using R, draw a random sample of size 50 and graph the histogram. ```{r, echo = T, fig.height=3.5, fig.width=8} samp <- rexp( n = 50, rate = 1/20) ggplot(data.table(x = samp)) + geom_histogram(aes(x, fill = ..count..)) + labs(title = "RExp(50, 1/20)") ``` What are the mean and standard deviation of your sample? ```{r, echo = T} mu <- mean(samp) sigma <- sd(samp) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ c.) Run a simulation to find the (approximate) sampling distribution for the median of sample size 50 from the exponential distribution and describe it. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e3 result <- numeric(N) for(i in 1:N) { samp <- rexp( n = 50, rate = 1/20) result[i] <- median(samp) } ggplot(data.table(x = result)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + scale_y_continuous(labels = comma) + labs(title = "Sampling Distribution") ``` What is the mean and the standard error of this sampling distribution? ```{r, echo = T} mu <- mean(result) sigma <- sd(result) / sqrt(50) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ d.) Repeat the above but use sample sizes $n = 100, 500 \ and \ 1,000$. ```{r, echo = T, fig.height=3, fig.width=8} N <- 10e3 result <- numeric(N) for(i in 1:N) { samp <- rexp( n = 100, rate = 1/20) result[i] <- median(samp) } ggplot(data.table(x = result)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + scale_y_continuous(labels = comma) + labs(title = "Sampling Distribution") ``` ```{r, echo = T} mu <- mean(result) sigma <- sd(result) / sqrt(50) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ ```{r, echo = T, fig.height=3, fig.width=8} N <- 10e3 result <- numeric(N) for(i in 1:N) { samp <- rexp( n = 500, rate = 1/20) result[i] <- median(samp) } ggplot(data.table(x = result)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + scale_y_continuous(labels = comma) + labs(title = "Sampling Distribution") ``` ```{r, echo = T} mu <- mean(result) sigma <- sd(result) / sqrt(50) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ ```{r, echo = T, fig.height=3, fig.width=8} N <- 10e3 result <- numeric(N) for(i in 1:N) { samp <- rexp( n = 1000, rate = 1/20) result[i] <- median(samp) } ggplot(data.table(x = result)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + scale_y_continuous(labels = comma) + labs(title = "Sampling Distribution") ``` ```{r, echo = T} mu <- mean(result) sigma <- sd(result) / sqrt(50) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ How does sample size affect the sampling distribution? _The sample mean and standard error converge to the analytical solution with increased sample size._ ### 4.26 Prove theorem 4.2.1. ### 4.27 Let $X_1, X_2 \sim^{i.i.d.} F$ with corresponding pdf $f(x) = \frac{2}{x^2}$, $1 \le x \le 2$. a.) Find the pdf of $X_{max}$. $F_{max}(x) = 8(\frac{1}{x^2} - \frac{1}{x^3})$ b.) Find the expected value of $X_{max}$. $\mathbb{E}{[F_{max}]} = 1.545$ ### 4.28 Let $X_1, X_2, \ldots, X_N \sim^{i.i.d.}$ with corresponding pdf $f(x) = 3x^2, 0 \le x \le 1$. a.) Find the pdf for $X_{min}$. b.) Find the pdf for $X_{max}$. c.) If $n = 10$, find the probability that the largest value, $X_{max}$, is greater than 0.92. ### 4.29 Compute the pdf of the sampling distribution of the maximum samples of size 10 from a population with an exponential distribtuion with $\lambda = 12$. ### 4.30 Let $X_1, X_2, \ldots, X_N \sim^{i.i.d.} Exp(\lambda)$ with pdf $f(x) = \lambda e^{-\lambda x}, \lambda > 0, x > 0$. a.) Find the pdf $f_{min}(x)$ for the sample minimum $X_{min}$. Recognize this as the pdf of a known distribution. b.) Simulate in R the sampling distribution of $X_{min}$ of samples of size $n = 25$ from the exponential distribution with $\lambda = 7$. Compare the theoretical expected value of $X_{min}$ with the simulated expected value. ### 4.31 Let $X_1, X_2, \ldots, X_n \sim^{i.i.d.} Pois(3)$. Let $X = \sum^{10}_{i=1}X_i$. Find the pdf for the sampling distribution of X. ### 4.32 Let $X_1$ and $X_2$ be independent exponential random variables, both with parameter $\lambda > 0$. Find the cumulative distribution function for the sampling distribution of $X = X_1 + X_2$. ### 4.33 This simulation illustrates the CLT for a finite population. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 400 n <- 5 finpop <- rexp(N, 1/10) ggplot(data.table(x = finpop)) + geom_histogram(aes(x, fill = ..count..)) + labs(title = "Exp(1/10)") mean(finpop) #mean (mu) of your pop. sd(finpop) # stdev (sigma) of your pop. sd(finpop)/sqrt(n) # theoretical standard error of sampling distribution sd(finpop)/sqrt(n) * sqrt((N-n)/(N-1)) # without replacement Xbar <- numeric(1000) for(i in 1:1000) { x <- sample(finpop, n) # Random sample of size n # (w/o replacement) Xbar[i] <- mean(x) } p1 <- ggplot(data.table(x = Xbar)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + labs(title = "Mean Sampling Distribution Histogram") p2 <- ggplot(data.table(x = Xbar), aes(sample = x)) + stat_qq() + stat_qq_line() + labs(title = "Mean Sampling Distribution QQ-Plot") grid.arrange(p1, p2, nrow = 2) mean(Xbar) sd(Xbar) # estimated standard error of sampling distribution ``` a.) Does the sampling distribution of sample means appear approximately normal? b.) Compare the mean and standard error of your simulated sampling distribution with the theoretical ones. c.) Calculate $(\sigma/\sqrt(n))(\sqrt{(N-n)/(N-1)}$, where $\sigma$ is the standard deviation of the finite population and compare with the (estimated) standard error of the sampling distribution. d.) Repeat for larger __n__, say __n__ = 20 and __n__ = 100. ### 4.34 Let $X_1, X_2, \ldots, X_n$ be independent random variables from $N(\mu, \sigma)$. We are interested in the sampling distribution of the variance. Run a simulation to draw random samples of size 20 from $N(25, 7^2)$ and calculate the variance for each sample. ```{r, echo = T, fig.height=3.5, fig.width=8} W <- numeric(1000) for(i in 1:1000) { x <- rnorm(20, 25, 7) W[i] <- var(x) } mean(W) var(W) p1 <- ggplot(data.table(x = W)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + labs(title = "Variance Sampling Distribution") p2 <- ggplot(data.table(value = W), aes(sample = value)) + stat_qq() + stat_qq_line() + labs(title = "Mean Sampling Distribution QQ-Plot") grid.arrange(p1, p2, nrow = 2) ``` Does the sampling distribution appear to be normally distributed? Repeat with n = 50 and n = 200. ### 4.35 A random sample of size $n = 100$ is drawn from a distribution with pdf $f(x) = 3(1- x)^2, 0 \le x \le 1$. a.) Use the CLT approximation to estimate $P(\bar{X} \le 0.27)$. b.) Use the expanded CLT to estimate the same probability (dnorm). c.) If $X_1, X_2, X_3 \sim^{i.i.d.} Unif[0, 1]$, then the minimum has density __f__ given above. Use simulation to estimate the probability.
5325
--- title: '' mainfont: Arial fontsize: 12pt documentclass: report header-includes: - \PassOptionsToPackage{table}{xcolor} - \usepackage{caption} - \usepackage{amssymb} - \usepackage{booktabs} - \usepackage{longtable} - \usepackage{array} - \usepackage{multirow} - \usepackage{wrapfig} - \usepackage{float} - \usepackage{colortbl} - \usepackage{pdflscape} - \usepackage{tabu} - \usepackage{threeparttable} - \usepackage{threeparttablex} - \usepackage[normalem]{ulem} - \usepackage{makecell} - \usepackage[table]{xcolor} - \usepackage{fancyhdr} - \usepackage{boldline} - \usepackage{tipa} \definecolor{headergrey}{HTML}{545454} \definecolor{msdblue}{HTML}{1C93D1} \pagestyle{fancy} \setlength\headheight{30pt} \rhead{\color{headergrey}\today} \fancyhead[L]{\color{headergrey}<NAME>, <NAME>} \fancyhead[C]{\Large\bfseries\color{headergrey}Sampling Distributions} \rfoot{\color{headergrey}\thepage} \lfoot{\color{headergrey}Chapter 4} \fancyfoot[C]{\rmfamily\color{headergrey}Mathematical Statistics} geometry: left = 1cm, right = 1cm, top = 2cm, bottom = 3cm date: "`r format(Sys.time(), '%d %B, %Y')`" output: pdf_document: fig_caption: yes latex_engine: xelatex editor_options: chunk_output_type: console --- ```{r knitr_setup, include = FALSE} # DO NOT ADD OR REVISE CODE HERE knitr::opts_chunk$set(echo = FALSE, eval = TRUE, dev = 'png') options(knitr.table.format = "latex") ``` ```{r report_setup, message = FALSE, warning = FALSE, include = FALSE} library(data.table, quietly = TRUE, warn.conflicts = FALSE) assignInNamespace("cedta.pkgEvalsUserCode", c(data.table:::cedta.pkgEvalsUserCode, "rtvs"), "data.table") library(ggplot2, quietly = TRUE, warn.conflicts = FALSE) library(ggrepel, quietly = TRUE, warn.conflicts = FALSE) library(ggthemes, quietly = TRUE, warn.conflicts = FALSE) library(knitr, quietly = TRUE, warn.conflicts = FALSE) library(kableExtra, quietly = TRUE, warn.conflicts = FALSE) library(Rblpapi, quietly = TRUE, warn.conflicts = FALSE) library(scales, quietly = TRUE, warn.conflicts = FALSE) library(pander, quietly = TRUE, warn.conflicts = FALSE) library(dplyr, quietly = TRUE, warn.conflicts = FALSE) library(formattable, quietly = TRUE, warn.conflicts = FALSE) library(grid, quietly = TRUE, warn.conflicts = FALSE) library(gridExtra, quietly = TRUE, warn.conflicts = FALSE) library(png, quietly = TRUE, warn.conflicts = FALSE) library(extrafont, quietly = TRUE, warn.conflicts = FALSE) library(tinytex, quietly = TRUE, warn.conflicts = FALSE) library(stringr, quietly = TRUE, warn.conflicts = FALSE) library(lubridate, quietly = TRUE, warn.conflicts = FALSE) library(reshape2, quietly = TRUE, warn.conflicts = FALSE) library(ggrepel, quietly = TRUE, warn.conflicts = FALSE) library(mnormt, quietly = TRUE, warn.conflicts = FALSE) library(Ecdat, quietly = TRUE, warn.conflicts = FALSE) library(MASS, quietly = TRUE, warn.conflicts = FALSE) library(copula, quietly = TRUE, warn.conflicts = FALSE) library(fGarch, quietly = TRUE, warn.conflicts = FALSE) library(forecast, quietly = TRUE, warn.conflicts = FALSE) library(tseries, quietly = TRUE, warn.conflicts = FALSE) library(gmodels, quietly = TRUE, warn.conflicts = FALSE) library(rugarch, quietly = TRUE, warn.conflicts = FALSE) library(quantmod, quietly = TRUE, warn.conflicts = FALSE) library(gtools, quietly = TRUE, warn.conflicts = FALSE) options(tinytex.verbose = TRUE) suppressMessages(library("tidyverse")) pretty_kable <- function(data, title, dig = 2) { kable(data, caption = title, digits = dig) %>% kable_styling(bootstrap_options = c("striped", "hover")) %>% kableExtra::kable_styling(latex_options = "hold_position") } theme_set(theme_light()) # Theme Overrides theme_update(axis.text.x = element_text(size = 10), axis.text.y = element_text(size = 10), plot.title = element_text(hjust = 0.5, size = 16, face = "bold", color = "darkgreen"), axis.title = element_text(face = "bold", size = 12, colour = "steelblue4"), plot.subtitle = element_text(face = "bold", size = 8, colour = "darkred"), legend.title = element_text(size = 12, color = "darkred", face = "bold"), legend.position = "right", legend.title.align=0.5, panel.border = element_rect(linetype = "solid", colour = "lightgray"), plot.margin = unit(c( 0.1, 0.1, 0.1, 0.1), "inches")) data.dir <- "D:/Projects/Statistical-Computing/datasets/" setwd("D:/Projects/Statistical-Computing/RDS") ``` ```{r pander_setup, include = FALSE} knitr::opts_chunk$set(comment = NA) panderOptions('table.alignment.default', function(df) ifelse(sapply(df, is.numeric), 'right', 'left')) panderOptions('table.split.table', Inf) panderOptions('big.mark', ",") panderOptions('keep.trailing.zeros', TRUE) ``` ### 4.1 Consider the population {1, 2, 5, 6, 10, 12}. Find (and plot) the sampling distribution of medians for samples of size 3 without replacement. ```{r, echo = T, fig.height=4.5, fig.width=8} p <- c(1, 2, 5, 6, 10, 12) c <- combinations(v = p, n = 6, r = 3) t <- apply(c, 1, median) ggplot(data.table(value = t), aes(value, fill = ..count..)) + geom_histogram(bins = 30) + labs(title = "Median Sampling Distribution of p") ``` Compare the median of the population to the mean of the medians. Median of p = __`r median(p)`__. Mean of Medians of p = __`r mean(t)`__ \newpage ### 4.2 Consider the population {3, 6, 7, 9, 11, 14}. For samples of size 3 without replacement, find (and plot) the sampling distribution for the minimum. ```{r, echo = T, fig.height=4.5, fig.width=8} p <- c(3, 6, 7, 9, 11, 14) c <- combinations(v = p, n = 6, r = 3) t <- apply(c, 1, min) ggplot(data.table(value = t), aes(value, fill = ..count..)) + geom_histogram(bins = 30) + labs(title = "Minimum Sampling Distribution of p") ``` What is the mean of the sampling distribution? __`r mean(t)`__ The statistic is an estimate of some parameter - what is the value of that parameter? This is an estimation of the minimum, which is: __`r min(p)`__ \newpage ### 4.3 Let _A_ denote the population {1, 3, 4, 5} and _B_ the population {5, 7, 9}. ```{r, echo = T} A <- c(1, 3, 4, 5) B <- c(5, 7, 9) ``` Let _X_ be a random value from _A_, and _Y_ and random value from _B_. a.) Find the sampling distribution of _X + Y_. ```{r, echo = T} result = numeric(12) index <- 1 for(j in 1:length(A)) { for(k in 1:length(B)) { result[index] <- A[j] + B[k] index <- index + 1 } } sort(result) ``` b.) In this example, does the sampling distribution depend on whether you sample with or without replacement? _No._ Why or why not? _Because 5 in is both sets._ c.) Compute the mean of the values for each of _A_ and _B_ and the values in the sampling distribution of _X + Y_. Mean of _A_: __`r mean(A)`__. Mean of _B_: __`r mean(B)`__. Mean of _A + B_: __`r mean(result)`__ How are the means related? mean(A) + mean(B) = mean(A + B). d.) Suppose you draw a random value from _A_ and a random value from _B_. ```{r, echo = T} prob <- sum(result >= 13) / length(result) ``` What is the probability that the sum is 13 or larger? __`r prob*100`%__ \newpage ### 4.4 Consider the population {3, 5, 6, 6, 8, 11, 13, 15, 19, 20}. a.) Compute the mean and standard deviation and create a dot plot of its distribution. ```{r, echo = T, fig.height=3.5, fig.width=8} p <- c(3, 5, 6, 6, 8, 11, 13, 15, 19, 20) mu <- mean(p) sigma <- sd(p) ggplot(data.table(value = p)) + geom_dotplot(aes(value, fill = ..count..), binwidth = 1) + labs(title = "Population Dot Plot") ``` $\mu = `r mu`$, $\sigma = `r sigma`$ b.) Simulate the sampling distribution of $\bar{X}$ by taking random samples of size 4 and plot your results. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e2 results <- numeric(N) for( i in 1:N) { index <- sample(length(p), size = 4, replace = F) results[i] <- mean( p[index] ) } ggplot(data.table(value = results)) + geom_histogram(aes(value, fill = ..count..), bins = 30) + labs(title = "Sample Means") xbar <- mean(results) se <- sd(results) / sqrt(N) ``` Compute the mean and standard error, and compare to the population mean and standard deviation. mean: `r xbar`, standard error: `r se` c.) Use the simulation to find $P(\bar{X} < 11)$. ```{r, echo = T} prob <- mean(results < 11) ``` $P(\bar{X} < 11) = `r prob*100`$% \newpage ### 4.5 Consider two populations A = {3, 5, 7, 9, 10, 16}, B = {8, 10, 11, 15, 18, 25, 28}. ```{r, echo = T} A <- c(3, 5, 7, 9, 10, 16) B <- c(8, 10, 11, 15, 18, 25, 28) ``` a.) Using R, draw random samples (without replacement) of size 3 from each population, and simulate the sampling distribution of the sum of their maximums. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e2 results <- numeric(N) for(i in 1:N) { samp.a <- sample(A, 3, replace = F) samp.b <- sample(B, 3, replace = F) results[i] <- max(samp.a) + max(samp.b) } ggplot(data.table(value = results)[, index := .I]) + geom_histogram(aes(value, fill = ..count..), bins = 30) + labs(title = "Sampling Distribution: max(A) + max(B)") ``` b.) Use your simulation to estimate the probability that the sum of the maximums is less than 20. ```{r, echo = T} prob <- mean(results < 20) ``` Probability: `r prob*100`% c.) Draw random samples of size 3 from each population, and find the maximum of the union of these two sets. Simulate the sampling distribution of the maximums of this union. ```{r, echo = T, fig.height=3.5, fig.width=8} results <- numeric(N) for(i in 1:N) { samp.a <- sample(A, 3, replace = F) samp.b <- sample(B, 3, replace = F) results[i] <- max(union(samp.a, samp.b)) } ggplot(data.table(value = results)[, index := .I]) + geom_histogram(aes(value, fill = ..count..), bins = 30) + labs(title = "Sampling Distribution: max(union(A,B))") ``` d.) Use simulation to find the probability that the maximum of the union is less than 20. ```{r, echo = T} prob <- mean(results < 20) ``` Probability: `r prob*100`% \newpage ### 4.6 The data set _Recidivism_ contains the poopulation of all Iowa offenders convicted of either a felony or misdemeanor who were released in 2010 (case study in Section 1.4). ```{r, echo = T} Recidivism <- data.table(read.csv(paste0(data.dir, "Recidivism.csv"), header = T)) ``` Of these, 31.6% recidivated and were sent back to prision. Simulate the sampling distribution of $\hat{p}$, the sample proportion of offeneders who recidivated, for random samples of size 25. ```{r, echo = T} mean(Recidivism$Recid == "Yes") N <- 10e2 results <- numeric(N) for(i in 1:N) { samp <- sample(Recidivism$Recid, 25) results[i] <- mean(samp == "Yes") } ``` a.) Create a histogram and describe the simulated sampling distribution of $\hat{p}$. ```{r, echo = T, fig.height=3.5, fig.width=8} ggplot(data.table(value = results)) + geom_histogram(aes(value, fill = ..count..), bins = 30) + labs(title = "Recidivism Sampling Distribution") ``` Estimate the mean and standard error. ```{r, echo = T} mu <- mean(results) se <- sd(results) / sqrt(25) ``` $\mu = `r mu`$, $\sigma = `r se`$ b.) Compare your estimate of the standard error with the theoretical standard error (_Corollary 4.3.2_). ```{r, echo = T} tse <- mu * ( 1 - mu ) / sqrt(25) ``` Theoretical: `r tse` c.) Repeat the above using samples of size 250, and compare with the $n = 25$ case. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e2 results <- numeric(N) for(i in 1:N) { samp <- sample(Recidivism$Recid, 250) results[i] <- mean(samp == "Yes") } ggplot(data.table(value = results)) + geom_histogram(aes(value, fill = ..count..), bins = 30) + labs(title = "Recidivism Sampling Distribution") mu <- mean(results) se <- sd(results) / sqrt(250) ``` $\mu = `r mu`$, $\sigma = `r se`$ ### 4.7 The data set _FlightDelays_ contains the population of all flight departures by United Airlines and American Airlines out of LGA during May and June 2009 (case study in Section 1.1). ```{r, echo = T} Flights <- data.table(read.csv(paste0(data.dir, "FlightDelays.csv"), header = T)) ``` a.) Create a histogram of _Delay_ and describe the distribution. ```{r, echo = T, fig.height=3.5, fig.width=8} ggplot(Flights, aes(Delay)) + geom_histogram(aes(fill = ..count..), bins = 30) + scale_y_continuous(labels = comma) + labs(title = "Flight Delays") ``` Compute the mean and standard deviation. ```{r, echo = T} mu <- mean(Flights$Delay) sigma <- sd(Flights$Delay) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ b.) Simulate the sampling distribution of $\bar{x}$, the sample mean of the length of the flight delays (_Delay_), for sample size 25. ```{r, echo = T} N <- 10e2 results <- numeric(N) for(i in 1:N) { samp <- sample(Flights$Delay, 25, replace = F) results[i] <- mean(samp) } ``` Create a histogram and describe the simulated sampling distribution of $\bar{x}$. ```{r, echo = T, fig.height=3.5, fig.width=8} ggplot(data.table(value = results)) + geom_histogram(aes(value, fill = ..count..), bins = 30) + labs(title = "Flight Delay Sampling Distribution") ``` Estimate the mean and standard error. ```{r, echo = T} mu <- mean(results) se <- sd(results) / sqrt(25) ``` $\mu = `r mu`$, $\Sigma = `r se`$ c.) Compare your estimate of the standard error with the theoretical standard error (_Corollary A.4.1_). ```{r, echo = T} tse <- var(results) / 25 ``` Theoretical: `r tse` d.) Repeat with sample size 250. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e2 results <- numeric(N) for(i in 1:N) { samp <- sample(Flights$Delay, 250, replace = F) results[i] <- mean(samp) } ggplot(data.table(value = results)) + geom_histogram(aes(value, fill = ..count..), bins = 30) + labs(title = "Flight Delay Sampling Distribution") mu <- mean(results) se <- sd(results) / sqrt(250) tse <- var(results) / 250 ``` $\mu = `r mu`$, $\Sigma = `r se`$ Theoretical: `r tse` ### 4.8 Let $X_1, X_2, \ldots, X_{25}$ be a random sample from some distribution and $W = T(X_1, X_2, \ldots, X_n)$ be a statistic. Suppose the _sampling distribution_ of W has a pdf given by $f(x) = \frac{2}{x^2}$, for 1 < x < 2. Find $P(w < 1.5)$ __Solution__: ```{r, echo = T, fig.height=3.5, fig.width=8} f <- function(x) 2 / x^2 x <- seq( from = 1.0001, to = 1.999, by = 0.0001) y <- f(x) ggplot(data.table(x, y)) + geom_point(aes(x, y), color = "cornflowerblue", size = .6) + labs(title = "pdf f(x) = 2/x^2") a <- cumsum(y) / sum(y) p <- round( a[x == 1.5], 4 ) * 100 d <- data.table(x, y = a) ggplot(d) + geom_point(aes(x, y), color = "cornflowerblue", size = .6) + geom_area(aes(x, y), data = d[x < 1.5], fill = "cornflowerblue", alpha = .3) + labs(title = paste("cdf f(x) = 2/x^2, A =", p )) ``` Numerical solution: `r p`% Analytical Solution: ${\int_{1}^{1.5}}\frac{2}{x^2} = \frac{2}{3}$ ### 4.9 Let $X_1, X_2, \ldots, X_{n}$ be a random sample from some distribution and $Y = T(X_1, X_2, \ldots, X_n)$ be a statistic. Suppose the _sampling distribution_ of Y has pdf $f(y) = (3/8)y^2 \ for \ 0 \le y \le 2$. Find $P(0 \le Y \le \frac{1}{5})$ __Solution__: ```{r, echo = T, fig.height=3.5, fig.width=8} f <- function(x) (3/8)*x**2 x <- seq( from = 0, to = 2, by = 0.001) y <- f(x) ggplot(data.table(x, y)) + geom_point(aes(x, y), color = "cornflowerblue", size = .6) + labs(title = paste("pdf: ", paste0(deparse(f), collapse = " "))) a <- cumsum(y) / sum(y) p <- round( a[x == 1/5], 4 ) * 100 d <- data.table(x, y = a) ggplot(d) + geom_point(aes(x, y), color = "cornflowerblue", size = .6) + geom_area(aes(x, y), data = d[x < 1/5], fill = "cornflowerblue", alpha = .3) + labs(title = paste("cdf f(x) = 2/x^2, A =", p )) ``` Numerical Solution: `r p`% Analytical Solution: ${\int_{0}^{\frac{1}{5}}}\frac{x^3}{8} = \frac{.008}{8} = .001 = .1$ % ### 4.10 Suppose the heights of boys in a certain large city follow a distribution with mean 48 in. and variance $9^2$. Use the CLT approximation to estimate the probability that in a random sample of 30 boys, the mean height is more than 51 in. ```{r, echo = T} z <- (51 - 48) / (9^2 / sqrt(30)) p <- pnorm(z, lower.tail = F) ``` Probability: __`r round(p, 4)*100`%__ ### 4.11 Let $X_1, X_2, \ldots, X_{36} \sim Bern(.55)$ be independent, and let $\hat{p}$ denote the sample proportion. Use the CLT approximation with continuity correction to find the probability that $\hat{p} \le 0.5$. ```{r, echo = T} z <- ( .5 - .55 ) / sqrt(.55 * (1 - .55) / 36) p <- pnorm(z, lower.tail = T) ``` Probability: `r round(p, 4)*100`% ### 4.12 A random sample of size $n = 20$ is drawn from a distribution with mean 6 and variance 10. Use the CLT approximation to estimate $P(\bar{X} \le 4.6)$. ```{r, echo = T} z <- ( 4.6 - 6 ) / ( 10 * sqrt(20) ) p <- pnorm(z, lower.tail = T) ``` Probability: `r round(p, 4)*100`% ### 4.13 A random sample of size $n = 244$ is drawn from a distribution with pdf $f(x) = (3/16)(x - 4)^2, 2 \le x \le 6$. Use the CLT approximation to estimate $P(X \ge 4.2)$. ```{r, echo = T, fig.height=2.6, fig.width=8} pdf <- function(x) (3/16)*(x - 4)^2 x <- seq(from = 2, to = 6, by = 0.001) y <- pdf(x) ggplot(data.table(x,y)) + geom_point(aes(x, y), col = "cornflowerblue", lwd = .8) + labs(title = paste("PDF: ", paste0(deparse(f), collapse = " "))) cdf <- function(x) (3/8)*(x - 4) y <- cumsum(y) / sum(y) ev <- x[min(which(y > .5))] ggplot(data.table(x,y)) + geom_point(aes(x, y), col = "cornflowerblue", lwd = .8) + geom_vline(xintercept = ev) + labs(title = paste("CDF: ", paste0(deparse(f), collapse = " "))) z <- ( 4.2 - ev ) / sqrt(244) pnorm(z, lower.tail = F) ``` ### 4.14 According to the 2000 census, 28.6% of the US adult population recieved a high school diploma. In a random sample of 800 US adults, what is the probability that between 220 and 230 (inclusive) people have a high school deploma? Use the CLT approximation with continuity correction, and compare with the exact probability. __Solution__: The sampling distribution of $\hat{p}$ is approximately normal with: ```{r, echo = T} n <- 800 mu <- .286 ev <- 800 * mu sigma <- sqrt(n*mu*(1-mu)) ``` $\mathbb{E}[X] = `r ev`$ and $\sigma = \sqrt{800(.286)(1 - .286)} = `r round(sigma, 4)`$ ```{r, echo = T} l <- pnorm((ev - 219.5) / sigma) h <- pnorm((ev - 230.5) / sigma) p <- l - h ``` Probability: `r round(p, 4)` ### 4.15 If $X_1, \ldots, X_n$ are i.i.d. from Unif[0, 1], how large should n be so that $P(\bar{X} - \frac{1}{2} < 0.05) \ge 0.90$, that is, is there at least a 90% chance that the sample mean is within 0.05 of $\frac{1}{2}$? Use the CLT approximation. ```{r, echo = T} ``` ### 4.16 <NAME> claims that she has drawn a random sample of size 30 from the exponential distribution with $\lambda = 1/10$. The mean of her sample is 12. a.) What is the expected value of a sample mean? $X \sim Exp(\frac{1}{10}), \mathbb{E}(x) = 10$ b.) Run a simulation by drawing 1000 random samples, each of size 30, from Exp(1/10), and compute the mean for each sample. ```{r, echo = T, fig.height=3.5, fig.width=12} N <- 1000 result <- numeric(N) for( i in 1:N) { samp <- rexp( n = 30, rate = 1/10) result[i] <- mean(samp) } ggplot(data.table(result), aes(result)) + geom_histogram(aes(y = ..density.., fill = ..count..), bins = 30) + geom_vline(xintercept = 12, col = "darkorange") + stat_density( kernel = "gaussian", fill = "darkorange", alpha = .3) + labs(title = "Exp(1/10) Sampling Distribution") p <- mean(result > 12) ``` What proportion of the sample means is as large or larger than 12? __`r p*100`%__ c.) Is a mean of 12 unusual for a sample of size 30 from Exp(1/10)? __Yes__, only ~13% of the sample means have a value of 12 or higher. ### 4.17 Let $X \sim N(15, 3^2)$ and $Y \sim N(4, 2^2)$ be independent random variables. a.) What is the exact sampling distribution of $W = X - 2Y$? $W \sim N(7, 5^2)$ b.) Use R to simulate the sampling distribution of $W$ and plot your results. ```{r, echo = T, fig.height=3.5, fig.width=8} X <- rnorm(10e3, 15, 3) Y <- rnorm(10e3, 4, 2) W <- X - 2*Y ggplot(data.table(x = W)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + geom_vline(xintercept = 7, col = "darkorange", lwd = 1.5) + scale_y_continuous(labels = comma) + labs(title = "Sampling Distribution") ``` Check that the simulated mean and standard error are close to the theoretical mean and standard error. ```{r, echo = T} mu <- mean(W) sigma <- sd(W) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ c.) Use the simulated sampling to estimate $P(W \le 10)$, and then check your estimate with an exact calculation. ```{r, echo = T} phat <- mean(W <= 10) p <- pnorm(10, mean = 7, sd = 5) ``` $\hat{p}= `r phat*100`$% $P(W \le 10) = `r round(p, 4)*100`$% ### 4.18 Let $X \sim Pois(4)$, $Y \sim Pois(12)$, $U \sim Pois(3)$ be independent random variables. a.) What is the exact sampling distribution of $W = X + Y + U$? $W \sim Pois(19)$ b.) Use R to simulate the sampling distribution of $W$ and plot your results. ```{r, echo = T, fig.height=3.5, fig.width=8} W <- rpois(10e3, lambda = 19) ggplot(data.table(x = W)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + geom_vline(xintercept = 19, col = "darkorange", lwd = 1.5) + scale_y_continuous(labels = comma) + labs(title = "Sampling Distribution") ``` Check that the simulated mean and standard error are close to the theoretical mean and standard error. ```{r, echo = T} mu <- mean(W) sigma <- sd(W) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ c.) Use the simulated sampling distribution to estimate $P(W \le 14)$ and then check your estimate with an exact calculation. ```{r, echo = T} phat <- mean(W <= 14) p <- ppois(14, lambda = 19) ``` $\hat{p} = `r phat*100`$% $P(W \le 14) = `r round(p, 4)*100`$% ### 4.19 Let $X_1, X_2, \ldots, X_{10} \sim^{i.i.d} N(20, 8^2)$ and $Y_1, Y_2, \ldots, Y_{15} \sim^{i.i.d} N(16, 7^2)$. Let $W = \bar{X} + \bar{Y}$ a.) Give the exact sampling distribution of W. $\sigma = (10 + 15) / sqrt( 10 + 15 - 1) = 3.1$ $W \sim N(36, 3.1^2)$ b.) Simulate the sampling distribution in R and plot your results. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e3 result <- numeric(N) for( i in 1:N) { X <- rnorm(10, 20, 8) Y <- rnorm(15, 16, 7) result[i] <- mean(X) + mean(Y) } ggplot(data.table(x = result)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + geom_vline(xintercept = 36, col = "darkorange", lwd = 1.5) + scale_y_continuous(labels = comma) + labs(title = "Sampling Distribution") ``` Check that the simulated mean and standard error are close to the exact mean and standard error. ```{r, echo = T} mu <- mean(result) sigma <- sd(result) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ c.) Use your simulation to find $P(W < 40)$. Calculate an exact answer and compare. ```{r, echo = T} phat <- mean(result <= 40) p <- pnorm(40, 36, 3) ``` $\hat{p} = `r phat*100`$% $P(W < 40) = `r round(p, 4)*100`$% ### 4.20 Let $X_1, X_2, \ldots, X_9 \sim^{i.i.d.} N(7, 3^2)$, and $Y_1, Y_2, \ldots, X_{12} \sim^{i.i.d.} N(10, 5^2)$. Let $W = \bar{X} - \bar{Y}$. a.) Give the sampling distribution of $W$. $\sigma = (3 + 5) / sqrt( 9 + 12 - 1 ) = 1.79$ $W = N(-3, 1.79^2)$ b.) Simulate the sampling distribution of W in R, and plot your results. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e3 result <- numeric(N) for(i in 1:N) { X <- rnorm(9, 7, 3) Y <- rnorm(12, 10, 5) result[i] <- mean(X) - mean(Y) } ggplot(data.table(x = result)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + geom_vline(xintercept = -3, col = "darkorange", lwd = 1.5) + scale_y_continuous(labels = comma) + labs(title = "Sampling Distribution") ``` Check that the simulated mean and standard error are close to the theoretical mean and standard error. ```{r, echo = T} mu <- mean(result) sigma <- sd(result) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ c.) Use your simulation to find $P(W < -1.5)$. ```{r, echo = T} phat <- mean(result <= -1.5) p <- pnorm(-1.5, -3, 1.79) ``` $\hat{p} = `r phat*100`$% Calculate an exact answer and compare. $P(W < 1.5) = `r round(p, 4)*100`$% ### 4.21 Let $X_1, X_2, \ldots, X_N$ be a random sample from $N(0, 1)$. Let $W = X^2_1 + X^2_2 + \ldots + X^2_n$ What is the mean and variance of the sampling distribution of W? $\mu = 0$, $\sigma = 1$ Repeat using N = 4, N = 5. $N = 4, \sigma = 4 / sqrt(4 - 1) = 2.3$ $N = 5, \sigma = 5 / sqrt(5 - 1) = 2.5$ What observations or conjectures do you have for general __n__? ### 4.22 Let $X$ be a uniform random variable on the interval $[40, 60]$ and Y be a uniform random variable on $[45, 80]$. Assume that X and Y are independent. a.) Compute the expected value and variance of $X + Y$. $\mu = 112.5$, $Var = 1/24*(140 - 85)^2 = 126.04$ b.) Simulate a sampling distribution of $X + Y$. ```{r, echo = T, fig.height=3.5, fig.width=8} X <- runif(1000, 40, 60) Y <- runif(1000, 45, 80) total <- X + Y ggplot(data.table(value = total)) + geom_histogram(aes(value, fill = ..count..), bins = 30) + labs("X + Y Sampling Distribution") ``` Describe the sampling distribution of $X + Y$. __Approximately Normal__ Compute the mean and variance of the sampling distribution and compare this with the theoretical mean and variance. ```{r, echo = T} mu <- mean(total) var <- var(total) ``` $\mu = `r mu`$, $Var = `r var`$ c.) Suppose the time (in minutes) <NAME> takes to complete his statistics homework is $Unif[40, 60]$ and the time <NAME> takes is $Unif[45, 80]$. Assume they work independently. One day they announce that their total time to finish an assignment was less than 90 minutes. How likely is this? ```{r, echo = T, fig.height=3.5, fig.width=8} p <- punif(90, 85, 140) ``` Probability: __`r round(p, 4)*100`%__ ### 4.23 Let $X_1, X_2, \ldots, X_{20} \sim^{i.i.d.} Exp(2)$. Let $X = \sum^{20}_{i=1}X_i$. a.) Simulate the sampling distribution of $X$ in R. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e3 result <- numeric(N) for(i in 1:N) { samp <- rexp(n = 20, rate = 2) result[i] <- sum(samp) } ``` b.) From your simulation, find $\mathbb{E}[X]$ and $Var[X]$. ```{r, echo = T} mu <- mean(result) var <- var(result) ``` $\mu = `r mu`$, $Var = `r var`$ c.) From your simulation, find $P( X \le 10)$. ```{r, echo = T} phat <- mean(result <= 10) ``` Probablity: _`r round(phat, 4)*100`%_ ### 4.24 Let $X_1, X_2, \ldots, X_{30} \sim^{i.i.d.} Exp(1/3)$ and let $\bar{X}$ denote the sample mean. a.) Simulate the sampling distribution of $\bar{X}$ in R. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e3 result <- numeric(N) for(i in 1:N) { samp <- rexp(n = 30, rate = 1/3) result[i] <- mean(samp) } ``` b.) Find the mean and standard error of the sampling distribution, and compare with the theoretical results. ```{r, echo = T} mu <- mean(result) sigma <- sd(result) ``` Sample: $\mu = `r mu`$, $\sigma = `r sigma`$ Theoretical: $\mu = \frac{\lambda}{1} = 3$, $sigma = 3/sqrt(30 - 1) = .56$ c.) From your simulation, find $P(\bar{X} \le 3.5)$. ```{r, echo = T} phat <- mean(result <= 3.5) ``` $\hat{p} = `r phat`$ d.) Estimate $P(\bar{X} \le 3.5)$ by assuming the CLT approximation holds. Compare this result with the one in part __(c)__. ```{r, echo = T} z <- (3.5 - 3) / sigma p <- pnorm(z) ``` $p = `r round(p, 4)`$ ### 4.25 Consider the exponential distribution with density $f(x) \frac{1}{20}e^{-x/20}$, with mean and standard deviation of 20. a.) Calculate the median of this distribution. $0.5 = \int_{m}^{\inf}f(x)dx = -e^{-x/20}$ $\ldots = M = 20*log(2)$ $\ldots = `r 20*log(2)`$ b.) Using R, draw a random sample of size 50 and graph the histogram. ```{r, echo = T, fig.height=3.5, fig.width=8} samp <- rexp( n = 50, rate = 1/20) ggplot(data.table(x = samp)) + geom_histogram(aes(x, fill = ..count..)) + labs(title = "RExp(50, 1/20)") ``` What are the mean and standard deviation of your sample? ```{r, echo = T} mu <- mean(samp) sigma <- sd(samp) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ c.) Run a simulation to find the (approximate) sampling distribution for the median of sample size 50 from the exponential distribution and describe it. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e3 result <- numeric(N) for(i in 1:N) { samp <- rexp( n = 50, rate = 1/20) result[i] <- median(samp) } ggplot(data.table(x = result)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + scale_y_continuous(labels = comma) + labs(title = "Sampling Distribution") ``` What is the mean and the standard error of this sampling distribution? ```{r, echo = T} mu <- mean(result) sigma <- sd(result) / sqrt(50) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ d.) Repeat the above but use sample sizes $n = 100, 500 \ and \ 1,000$. ```{r, echo = T, fig.height=3, fig.width=8} N <- 10e3 result <- numeric(N) for(i in 1:N) { samp <- rexp( n = 100, rate = 1/20) result[i] <- median(samp) } ggplot(data.table(x = result)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + scale_y_continuous(labels = comma) + labs(title = "Sampling Distribution") ``` ```{r, echo = T} mu <- mean(result) sigma <- sd(result) / sqrt(50) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ ```{r, echo = T, fig.height=3, fig.width=8} N <- 10e3 result <- numeric(N) for(i in 1:N) { samp <- rexp( n = 500, rate = 1/20) result[i] <- median(samp) } ggplot(data.table(x = result)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + scale_y_continuous(labels = comma) + labs(title = "Sampling Distribution") ``` ```{r, echo = T} mu <- mean(result) sigma <- sd(result) / sqrt(50) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ ```{r, echo = T, fig.height=3, fig.width=8} N <- 10e3 result <- numeric(N) for(i in 1:N) { samp <- rexp( n = 1000, rate = 1/20) result[i] <- median(samp) } ggplot(data.table(x = result)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + scale_y_continuous(labels = comma) + labs(title = "Sampling Distribution") ``` ```{r, echo = T} mu <- mean(result) sigma <- sd(result) / sqrt(50) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ How does sample size affect the sampling distribution? _The sample mean and standard error converge to the analytical solution with increased sample size._ ### 4.26 Prove theorem 4.2.1. ### 4.27 Let $X_1, X_2 \sim^{i.i.d.} F$ with corresponding pdf $f(x) = \frac{2}{x^2}$, $1 \le x \le 2$. a.) Find the pdf of $X_{max}$. $F_{max}(x) = 8(\frac{1}{x^2} - \frac{1}{x^3})$ b.) Find the expected value of $X_{max}$. $\mathbb{E}{[F_{max}]} = 1.545$ ### 4.28 Let $X_1, X_2, \ldots, X_N \sim^{i.i.d.}$ with corresponding pdf $f(x) = 3x^2, 0 \le x \le 1$. a.) Find the pdf for $X_{min}$. b.) Find the pdf for $X_{max}$. c.) If $n = 10$, find the probability that the largest value, $X_{max}$, is greater than 0.92. ### 4.29 Compute the pdf of the sampling distribution of the maximum samples of size 10 from a population with an exponential distribtuion with $\lambda = 12$. ### 4.30 Let $X_1, X_2, \ldots, X_N \sim^{i.i.d.} Exp(\lambda)$ with pdf $f(x) = \lambda e^{-\lambda x}, \lambda > 0, x > 0$. a.) Find the pdf $f_{min}(x)$ for the sample minimum $X_{min}$. Recognize this as the pdf of a known distribution. b.) Simulate in R the sampling distribution of $X_{min}$ of samples of size $n = 25$ from the exponential distribution with $\lambda = 7$. Compare the theoretical expected value of $X_{min}$ with the simulated expected value. ### 4.31 Let $X_1, X_2, \ldots, X_n \sim^{i.i.d.} Pois(3)$. Let $X = \sum^{10}_{i=1}X_i$. Find the pdf for the sampling distribution of X. ### 4.32 Let $X_1$ and $X_2$ be independent exponential random variables, both with parameter $\lambda > 0$. Find the cumulative distribution function for the sampling distribution of $X = X_1 + X_2$. ### 4.33 This simulation illustrates the CLT for a finite population. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 400 n <- 5 finpop <- rexp(N, 1/10) ggplot(data.table(x = finpop)) + geom_histogram(aes(x, fill = ..count..)) + labs(title = "Exp(1/10)") mean(finpop) #mean (mu) of your pop. sd(finpop) # stdev (sigma) of your pop. sd(finpop)/sqrt(n) # theoretical standard error of sampling distribution sd(finpop)/sqrt(n) * sqrt((N-n)/(N-1)) # without replacement Xbar <- numeric(1000) for(i in 1:1000) { x <- sample(finpop, n) # Random sample of size n # (w/o replacement) Xbar[i] <- mean(x) } p1 <- ggplot(data.table(x = Xbar)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + labs(title = "Mean Sampling Distribution Histogram") p2 <- ggplot(data.table(x = Xbar), aes(sample = x)) + stat_qq() + stat_qq_line() + labs(title = "Mean Sampling Distribution QQ-Plot") grid.arrange(p1, p2, nrow = 2) mean(Xbar) sd(Xbar) # estimated standard error of sampling distribution ``` a.) Does the sampling distribution of sample means appear approximately normal? b.) Compare the mean and standard error of your simulated sampling distribution with the theoretical ones. c.) Calculate $(\sigma/\sqrt(n))(\sqrt{(N-n)/(N-1)}$, where $\sigma$ is the standard deviation of the finite population and compare with the (estimated) standard error of the sampling distribution. d.) Repeat for larger __n__, say __n__ = 20 and __n__ = 100. ### 4.34 Let $X_1, X_2, \ldots, X_n$ be independent random variables from $N(\mu, \sigma)$. We are interested in the sampling distribution of the variance. Run a simulation to draw random samples of size 20 from $N(25, 7^2)$ and calculate the variance for each sample. ```{r, echo = T, fig.height=3.5, fig.width=8} W <- numeric(1000) for(i in 1:1000) { x <- rnorm(20, 25, 7) W[i] <- var(x) } mean(W) var(W) p1 <- ggplot(data.table(x = W)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + labs(title = "Variance Sampling Distribution") p2 <- ggplot(data.table(value = W), aes(sample = value)) + stat_qq() + stat_qq_line() + labs(title = "Mean Sampling Distribution QQ-Plot") grid.arrange(p1, p2, nrow = 2) ``` Does the sampling distribution appear to be normally distributed? Repeat with n = 50 and n = 200. ### 4.35 A random sample of size $n = 100$ is drawn from a distribution with pdf $f(x) = 3(1- x)^2, 0 \le x \le 1$. a.) Use the CLT approximation to estimate $P(\bar{X} \le 0.27)$. b.) Use the expanded CLT to estimate the same probability (dnorm). c.) If $X_1, X_2, X_3 \sim^{i.i.d.} Unif[0, 1]$, then the minimum has density __f__ given above. Use simulation to estimate the probability.
true
--- title: '' mainfont: Arial fontsize: 12pt documentclass: report header-includes: - \PassOptionsToPackage{table}{xcolor} - \usepackage{caption} - \usepackage{amssymb} - \usepackage{booktabs} - \usepackage{longtable} - \usepackage{array} - \usepackage{multirow} - \usepackage{wrapfig} - \usepackage{float} - \usepackage{colortbl} - \usepackage{pdflscape} - \usepackage{tabu} - \usepackage{threeparttable} - \usepackage{threeparttablex} - \usepackage[normalem]{ulem} - \usepackage{makecell} - \usepackage[table]{xcolor} - \usepackage{fancyhdr} - \usepackage{boldline} - \usepackage{tipa} \definecolor{headergrey}{HTML}{545454} \definecolor{msdblue}{HTML}{1C93D1} \pagestyle{fancy} \setlength\headheight{30pt} \rhead{\color{headergrey}\today} \fancyhead[L]{\color{headergrey}PI:NAME:<NAME>END_PI, PI:NAME:<NAME>END_PI} \fancyhead[C]{\Large\bfseries\color{headergrey}Sampling Distributions} \rfoot{\color{headergrey}\thepage} \lfoot{\color{headergrey}Chapter 4} \fancyfoot[C]{\rmfamily\color{headergrey}Mathematical Statistics} geometry: left = 1cm, right = 1cm, top = 2cm, bottom = 3cm date: "`r format(Sys.time(), '%d %B, %Y')`" output: pdf_document: fig_caption: yes latex_engine: xelatex editor_options: chunk_output_type: console --- ```{r knitr_setup, include = FALSE} # DO NOT ADD OR REVISE CODE HERE knitr::opts_chunk$set(echo = FALSE, eval = TRUE, dev = 'png') options(knitr.table.format = "latex") ``` ```{r report_setup, message = FALSE, warning = FALSE, include = FALSE} library(data.table, quietly = TRUE, warn.conflicts = FALSE) assignInNamespace("cedta.pkgEvalsUserCode", c(data.table:::cedta.pkgEvalsUserCode, "rtvs"), "data.table") library(ggplot2, quietly = TRUE, warn.conflicts = FALSE) library(ggrepel, quietly = TRUE, warn.conflicts = FALSE) library(ggthemes, quietly = TRUE, warn.conflicts = FALSE) library(knitr, quietly = TRUE, warn.conflicts = FALSE) library(kableExtra, quietly = TRUE, warn.conflicts = FALSE) library(Rblpapi, quietly = TRUE, warn.conflicts = FALSE) library(scales, quietly = TRUE, warn.conflicts = FALSE) library(pander, quietly = TRUE, warn.conflicts = FALSE) library(dplyr, quietly = TRUE, warn.conflicts = FALSE) library(formattable, quietly = TRUE, warn.conflicts = FALSE) library(grid, quietly = TRUE, warn.conflicts = FALSE) library(gridExtra, quietly = TRUE, warn.conflicts = FALSE) library(png, quietly = TRUE, warn.conflicts = FALSE) library(extrafont, quietly = TRUE, warn.conflicts = FALSE) library(tinytex, quietly = TRUE, warn.conflicts = FALSE) library(stringr, quietly = TRUE, warn.conflicts = FALSE) library(lubridate, quietly = TRUE, warn.conflicts = FALSE) library(reshape2, quietly = TRUE, warn.conflicts = FALSE) library(ggrepel, quietly = TRUE, warn.conflicts = FALSE) library(mnormt, quietly = TRUE, warn.conflicts = FALSE) library(Ecdat, quietly = TRUE, warn.conflicts = FALSE) library(MASS, quietly = TRUE, warn.conflicts = FALSE) library(copula, quietly = TRUE, warn.conflicts = FALSE) library(fGarch, quietly = TRUE, warn.conflicts = FALSE) library(forecast, quietly = TRUE, warn.conflicts = FALSE) library(tseries, quietly = TRUE, warn.conflicts = FALSE) library(gmodels, quietly = TRUE, warn.conflicts = FALSE) library(rugarch, quietly = TRUE, warn.conflicts = FALSE) library(quantmod, quietly = TRUE, warn.conflicts = FALSE) library(gtools, quietly = TRUE, warn.conflicts = FALSE) options(tinytex.verbose = TRUE) suppressMessages(library("tidyverse")) pretty_kable <- function(data, title, dig = 2) { kable(data, caption = title, digits = dig) %>% kable_styling(bootstrap_options = c("striped", "hover")) %>% kableExtra::kable_styling(latex_options = "hold_position") } theme_set(theme_light()) # Theme Overrides theme_update(axis.text.x = element_text(size = 10), axis.text.y = element_text(size = 10), plot.title = element_text(hjust = 0.5, size = 16, face = "bold", color = "darkgreen"), axis.title = element_text(face = "bold", size = 12, colour = "steelblue4"), plot.subtitle = element_text(face = "bold", size = 8, colour = "darkred"), legend.title = element_text(size = 12, color = "darkred", face = "bold"), legend.position = "right", legend.title.align=0.5, panel.border = element_rect(linetype = "solid", colour = "lightgray"), plot.margin = unit(c( 0.1, 0.1, 0.1, 0.1), "inches")) data.dir <- "D:/Projects/Statistical-Computing/datasets/" setwd("D:/Projects/Statistical-Computing/RDS") ``` ```{r pander_setup, include = FALSE} knitr::opts_chunk$set(comment = NA) panderOptions('table.alignment.default', function(df) ifelse(sapply(df, is.numeric), 'right', 'left')) panderOptions('table.split.table', Inf) panderOptions('big.mark', ",") panderOptions('keep.trailing.zeros', TRUE) ``` ### 4.1 Consider the population {1, 2, 5, 6, 10, 12}. Find (and plot) the sampling distribution of medians for samples of size 3 without replacement. ```{r, echo = T, fig.height=4.5, fig.width=8} p <- c(1, 2, 5, 6, 10, 12) c <- combinations(v = p, n = 6, r = 3) t <- apply(c, 1, median) ggplot(data.table(value = t), aes(value, fill = ..count..)) + geom_histogram(bins = 30) + labs(title = "Median Sampling Distribution of p") ``` Compare the median of the population to the mean of the medians. Median of p = __`r median(p)`__. Mean of Medians of p = __`r mean(t)`__ \newpage ### 4.2 Consider the population {3, 6, 7, 9, 11, 14}. For samples of size 3 without replacement, find (and plot) the sampling distribution for the minimum. ```{r, echo = T, fig.height=4.5, fig.width=8} p <- c(3, 6, 7, 9, 11, 14) c <- combinations(v = p, n = 6, r = 3) t <- apply(c, 1, min) ggplot(data.table(value = t), aes(value, fill = ..count..)) + geom_histogram(bins = 30) + labs(title = "Minimum Sampling Distribution of p") ``` What is the mean of the sampling distribution? __`r mean(t)`__ The statistic is an estimate of some parameter - what is the value of that parameter? This is an estimation of the minimum, which is: __`r min(p)`__ \newpage ### 4.3 Let _A_ denote the population {1, 3, 4, 5} and _B_ the population {5, 7, 9}. ```{r, echo = T} A <- c(1, 3, 4, 5) B <- c(5, 7, 9) ``` Let _X_ be a random value from _A_, and _Y_ and random value from _B_. a.) Find the sampling distribution of _X + Y_. ```{r, echo = T} result = numeric(12) index <- 1 for(j in 1:length(A)) { for(k in 1:length(B)) { result[index] <- A[j] + B[k] index <- index + 1 } } sort(result) ``` b.) In this example, does the sampling distribution depend on whether you sample with or without replacement? _No._ Why or why not? _Because 5 in is both sets._ c.) Compute the mean of the values for each of _A_ and _B_ and the values in the sampling distribution of _X + Y_. Mean of _A_: __`r mean(A)`__. Mean of _B_: __`r mean(B)`__. Mean of _A + B_: __`r mean(result)`__ How are the means related? mean(A) + mean(B) = mean(A + B). d.) Suppose you draw a random value from _A_ and a random value from _B_. ```{r, echo = T} prob <- sum(result >= 13) / length(result) ``` What is the probability that the sum is 13 or larger? __`r prob*100`%__ \newpage ### 4.4 Consider the population {3, 5, 6, 6, 8, 11, 13, 15, 19, 20}. a.) Compute the mean and standard deviation and create a dot plot of its distribution. ```{r, echo = T, fig.height=3.5, fig.width=8} p <- c(3, 5, 6, 6, 8, 11, 13, 15, 19, 20) mu <- mean(p) sigma <- sd(p) ggplot(data.table(value = p)) + geom_dotplot(aes(value, fill = ..count..), binwidth = 1) + labs(title = "Population Dot Plot") ``` $\mu = `r mu`$, $\sigma = `r sigma`$ b.) Simulate the sampling distribution of $\bar{X}$ by taking random samples of size 4 and plot your results. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e2 results <- numeric(N) for( i in 1:N) { index <- sample(length(p), size = 4, replace = F) results[i] <- mean( p[index] ) } ggplot(data.table(value = results)) + geom_histogram(aes(value, fill = ..count..), bins = 30) + labs(title = "Sample Means") xbar <- mean(results) se <- sd(results) / sqrt(N) ``` Compute the mean and standard error, and compare to the population mean and standard deviation. mean: `r xbar`, standard error: `r se` c.) Use the simulation to find $P(\bar{X} < 11)$. ```{r, echo = T} prob <- mean(results < 11) ``` $P(\bar{X} < 11) = `r prob*100`$% \newpage ### 4.5 Consider two populations A = {3, 5, 7, 9, 10, 16}, B = {8, 10, 11, 15, 18, 25, 28}. ```{r, echo = T} A <- c(3, 5, 7, 9, 10, 16) B <- c(8, 10, 11, 15, 18, 25, 28) ``` a.) Using R, draw random samples (without replacement) of size 3 from each population, and simulate the sampling distribution of the sum of their maximums. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e2 results <- numeric(N) for(i in 1:N) { samp.a <- sample(A, 3, replace = F) samp.b <- sample(B, 3, replace = F) results[i] <- max(samp.a) + max(samp.b) } ggplot(data.table(value = results)[, index := .I]) + geom_histogram(aes(value, fill = ..count..), bins = 30) + labs(title = "Sampling Distribution: max(A) + max(B)") ``` b.) Use your simulation to estimate the probability that the sum of the maximums is less than 20. ```{r, echo = T} prob <- mean(results < 20) ``` Probability: `r prob*100`% c.) Draw random samples of size 3 from each population, and find the maximum of the union of these two sets. Simulate the sampling distribution of the maximums of this union. ```{r, echo = T, fig.height=3.5, fig.width=8} results <- numeric(N) for(i in 1:N) { samp.a <- sample(A, 3, replace = F) samp.b <- sample(B, 3, replace = F) results[i] <- max(union(samp.a, samp.b)) } ggplot(data.table(value = results)[, index := .I]) + geom_histogram(aes(value, fill = ..count..), bins = 30) + labs(title = "Sampling Distribution: max(union(A,B))") ``` d.) Use simulation to find the probability that the maximum of the union is less than 20. ```{r, echo = T} prob <- mean(results < 20) ``` Probability: `r prob*100`% \newpage ### 4.6 The data set _Recidivism_ contains the poopulation of all Iowa offenders convicted of either a felony or misdemeanor who were released in 2010 (case study in Section 1.4). ```{r, echo = T} Recidivism <- data.table(read.csv(paste0(data.dir, "Recidivism.csv"), header = T)) ``` Of these, 31.6% recidivated and were sent back to prision. Simulate the sampling distribution of $\hat{p}$, the sample proportion of offeneders who recidivated, for random samples of size 25. ```{r, echo = T} mean(Recidivism$Recid == "Yes") N <- 10e2 results <- numeric(N) for(i in 1:N) { samp <- sample(Recidivism$Recid, 25) results[i] <- mean(samp == "Yes") } ``` a.) Create a histogram and describe the simulated sampling distribution of $\hat{p}$. ```{r, echo = T, fig.height=3.5, fig.width=8} ggplot(data.table(value = results)) + geom_histogram(aes(value, fill = ..count..), bins = 30) + labs(title = "Recidivism Sampling Distribution") ``` Estimate the mean and standard error. ```{r, echo = T} mu <- mean(results) se <- sd(results) / sqrt(25) ``` $\mu = `r mu`$, $\sigma = `r se`$ b.) Compare your estimate of the standard error with the theoretical standard error (_Corollary 4.3.2_). ```{r, echo = T} tse <- mu * ( 1 - mu ) / sqrt(25) ``` Theoretical: `r tse` c.) Repeat the above using samples of size 250, and compare with the $n = 25$ case. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e2 results <- numeric(N) for(i in 1:N) { samp <- sample(Recidivism$Recid, 250) results[i] <- mean(samp == "Yes") } ggplot(data.table(value = results)) + geom_histogram(aes(value, fill = ..count..), bins = 30) + labs(title = "Recidivism Sampling Distribution") mu <- mean(results) se <- sd(results) / sqrt(250) ``` $\mu = `r mu`$, $\sigma = `r se`$ ### 4.7 The data set _FlightDelays_ contains the population of all flight departures by United Airlines and American Airlines out of LGA during May and June 2009 (case study in Section 1.1). ```{r, echo = T} Flights <- data.table(read.csv(paste0(data.dir, "FlightDelays.csv"), header = T)) ``` a.) Create a histogram of _Delay_ and describe the distribution. ```{r, echo = T, fig.height=3.5, fig.width=8} ggplot(Flights, aes(Delay)) + geom_histogram(aes(fill = ..count..), bins = 30) + scale_y_continuous(labels = comma) + labs(title = "Flight Delays") ``` Compute the mean and standard deviation. ```{r, echo = T} mu <- mean(Flights$Delay) sigma <- sd(Flights$Delay) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ b.) Simulate the sampling distribution of $\bar{x}$, the sample mean of the length of the flight delays (_Delay_), for sample size 25. ```{r, echo = T} N <- 10e2 results <- numeric(N) for(i in 1:N) { samp <- sample(Flights$Delay, 25, replace = F) results[i] <- mean(samp) } ``` Create a histogram and describe the simulated sampling distribution of $\bar{x}$. ```{r, echo = T, fig.height=3.5, fig.width=8} ggplot(data.table(value = results)) + geom_histogram(aes(value, fill = ..count..), bins = 30) + labs(title = "Flight Delay Sampling Distribution") ``` Estimate the mean and standard error. ```{r, echo = T} mu <- mean(results) se <- sd(results) / sqrt(25) ``` $\mu = `r mu`$, $\Sigma = `r se`$ c.) Compare your estimate of the standard error with the theoretical standard error (_Corollary A.4.1_). ```{r, echo = T} tse <- var(results) / 25 ``` Theoretical: `r tse` d.) Repeat with sample size 250. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e2 results <- numeric(N) for(i in 1:N) { samp <- sample(Flights$Delay, 250, replace = F) results[i] <- mean(samp) } ggplot(data.table(value = results)) + geom_histogram(aes(value, fill = ..count..), bins = 30) + labs(title = "Flight Delay Sampling Distribution") mu <- mean(results) se <- sd(results) / sqrt(250) tse <- var(results) / 250 ``` $\mu = `r mu`$, $\Sigma = `r se`$ Theoretical: `r tse` ### 4.8 Let $X_1, X_2, \ldots, X_{25}$ be a random sample from some distribution and $W = T(X_1, X_2, \ldots, X_n)$ be a statistic. Suppose the _sampling distribution_ of W has a pdf given by $f(x) = \frac{2}{x^2}$, for 1 < x < 2. Find $P(w < 1.5)$ __Solution__: ```{r, echo = T, fig.height=3.5, fig.width=8} f <- function(x) 2 / x^2 x <- seq( from = 1.0001, to = 1.999, by = 0.0001) y <- f(x) ggplot(data.table(x, y)) + geom_point(aes(x, y), color = "cornflowerblue", size = .6) + labs(title = "pdf f(x) = 2/x^2") a <- cumsum(y) / sum(y) p <- round( a[x == 1.5], 4 ) * 100 d <- data.table(x, y = a) ggplot(d) + geom_point(aes(x, y), color = "cornflowerblue", size = .6) + geom_area(aes(x, y), data = d[x < 1.5], fill = "cornflowerblue", alpha = .3) + labs(title = paste("cdf f(x) = 2/x^2, A =", p )) ``` Numerical solution: `r p`% Analytical Solution: ${\int_{1}^{1.5}}\frac{2}{x^2} = \frac{2}{3}$ ### 4.9 Let $X_1, X_2, \ldots, X_{n}$ be a random sample from some distribution and $Y = T(X_1, X_2, \ldots, X_n)$ be a statistic. Suppose the _sampling distribution_ of Y has pdf $f(y) = (3/8)y^2 \ for \ 0 \le y \le 2$. Find $P(0 \le Y \le \frac{1}{5})$ __Solution__: ```{r, echo = T, fig.height=3.5, fig.width=8} f <- function(x) (3/8)*x**2 x <- seq( from = 0, to = 2, by = 0.001) y <- f(x) ggplot(data.table(x, y)) + geom_point(aes(x, y), color = "cornflowerblue", size = .6) + labs(title = paste("pdf: ", paste0(deparse(f), collapse = " "))) a <- cumsum(y) / sum(y) p <- round( a[x == 1/5], 4 ) * 100 d <- data.table(x, y = a) ggplot(d) + geom_point(aes(x, y), color = "cornflowerblue", size = .6) + geom_area(aes(x, y), data = d[x < 1/5], fill = "cornflowerblue", alpha = .3) + labs(title = paste("cdf f(x) = 2/x^2, A =", p )) ``` Numerical Solution: `r p`% Analytical Solution: ${\int_{0}^{\frac{1}{5}}}\frac{x^3}{8} = \frac{.008}{8} = .001 = .1$ % ### 4.10 Suppose the heights of boys in a certain large city follow a distribution with mean 48 in. and variance $9^2$. Use the CLT approximation to estimate the probability that in a random sample of 30 boys, the mean height is more than 51 in. ```{r, echo = T} z <- (51 - 48) / (9^2 / sqrt(30)) p <- pnorm(z, lower.tail = F) ``` Probability: __`r round(p, 4)*100`%__ ### 4.11 Let $X_1, X_2, \ldots, X_{36} \sim Bern(.55)$ be independent, and let $\hat{p}$ denote the sample proportion. Use the CLT approximation with continuity correction to find the probability that $\hat{p} \le 0.5$. ```{r, echo = T} z <- ( .5 - .55 ) / sqrt(.55 * (1 - .55) / 36) p <- pnorm(z, lower.tail = T) ``` Probability: `r round(p, 4)*100`% ### 4.12 A random sample of size $n = 20$ is drawn from a distribution with mean 6 and variance 10. Use the CLT approximation to estimate $P(\bar{X} \le 4.6)$. ```{r, echo = T} z <- ( 4.6 - 6 ) / ( 10 * sqrt(20) ) p <- pnorm(z, lower.tail = T) ``` Probability: `r round(p, 4)*100`% ### 4.13 A random sample of size $n = 244$ is drawn from a distribution with pdf $f(x) = (3/16)(x - 4)^2, 2 \le x \le 6$. Use the CLT approximation to estimate $P(X \ge 4.2)$. ```{r, echo = T, fig.height=2.6, fig.width=8} pdf <- function(x) (3/16)*(x - 4)^2 x <- seq(from = 2, to = 6, by = 0.001) y <- pdf(x) ggplot(data.table(x,y)) + geom_point(aes(x, y), col = "cornflowerblue", lwd = .8) + labs(title = paste("PDF: ", paste0(deparse(f), collapse = " "))) cdf <- function(x) (3/8)*(x - 4) y <- cumsum(y) / sum(y) ev <- x[min(which(y > .5))] ggplot(data.table(x,y)) + geom_point(aes(x, y), col = "cornflowerblue", lwd = .8) + geom_vline(xintercept = ev) + labs(title = paste("CDF: ", paste0(deparse(f), collapse = " "))) z <- ( 4.2 - ev ) / sqrt(244) pnorm(z, lower.tail = F) ``` ### 4.14 According to the 2000 census, 28.6% of the US adult population recieved a high school diploma. In a random sample of 800 US adults, what is the probability that between 220 and 230 (inclusive) people have a high school deploma? Use the CLT approximation with continuity correction, and compare with the exact probability. __Solution__: The sampling distribution of $\hat{p}$ is approximately normal with: ```{r, echo = T} n <- 800 mu <- .286 ev <- 800 * mu sigma <- sqrt(n*mu*(1-mu)) ``` $\mathbb{E}[X] = `r ev`$ and $\sigma = \sqrt{800(.286)(1 - .286)} = `r round(sigma, 4)`$ ```{r, echo = T} l <- pnorm((ev - 219.5) / sigma) h <- pnorm((ev - 230.5) / sigma) p <- l - h ``` Probability: `r round(p, 4)` ### 4.15 If $X_1, \ldots, X_n$ are i.i.d. from Unif[0, 1], how large should n be so that $P(\bar{X} - \frac{1}{2} < 0.05) \ge 0.90$, that is, is there at least a 90% chance that the sample mean is within 0.05 of $\frac{1}{2}$? Use the CLT approximation. ```{r, echo = T} ``` ### 4.16 PI:NAME:<NAME>END_PI claims that she has drawn a random sample of size 30 from the exponential distribution with $\lambda = 1/10$. The mean of her sample is 12. a.) What is the expected value of a sample mean? $X \sim Exp(\frac{1}{10}), \mathbb{E}(x) = 10$ b.) Run a simulation by drawing 1000 random samples, each of size 30, from Exp(1/10), and compute the mean for each sample. ```{r, echo = T, fig.height=3.5, fig.width=12} N <- 1000 result <- numeric(N) for( i in 1:N) { samp <- rexp( n = 30, rate = 1/10) result[i] <- mean(samp) } ggplot(data.table(result), aes(result)) + geom_histogram(aes(y = ..density.., fill = ..count..), bins = 30) + geom_vline(xintercept = 12, col = "darkorange") + stat_density( kernel = "gaussian", fill = "darkorange", alpha = .3) + labs(title = "Exp(1/10) Sampling Distribution") p <- mean(result > 12) ``` What proportion of the sample means is as large or larger than 12? __`r p*100`%__ c.) Is a mean of 12 unusual for a sample of size 30 from Exp(1/10)? __Yes__, only ~13% of the sample means have a value of 12 or higher. ### 4.17 Let $X \sim N(15, 3^2)$ and $Y \sim N(4, 2^2)$ be independent random variables. a.) What is the exact sampling distribution of $W = X - 2Y$? $W \sim N(7, 5^2)$ b.) Use R to simulate the sampling distribution of $W$ and plot your results. ```{r, echo = T, fig.height=3.5, fig.width=8} X <- rnorm(10e3, 15, 3) Y <- rnorm(10e3, 4, 2) W <- X - 2*Y ggplot(data.table(x = W)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + geom_vline(xintercept = 7, col = "darkorange", lwd = 1.5) + scale_y_continuous(labels = comma) + labs(title = "Sampling Distribution") ``` Check that the simulated mean and standard error are close to the theoretical mean and standard error. ```{r, echo = T} mu <- mean(W) sigma <- sd(W) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ c.) Use the simulated sampling to estimate $P(W \le 10)$, and then check your estimate with an exact calculation. ```{r, echo = T} phat <- mean(W <= 10) p <- pnorm(10, mean = 7, sd = 5) ``` $\hat{p}= `r phat*100`$% $P(W \le 10) = `r round(p, 4)*100`$% ### 4.18 Let $X \sim Pois(4)$, $Y \sim Pois(12)$, $U \sim Pois(3)$ be independent random variables. a.) What is the exact sampling distribution of $W = X + Y + U$? $W \sim Pois(19)$ b.) Use R to simulate the sampling distribution of $W$ and plot your results. ```{r, echo = T, fig.height=3.5, fig.width=8} W <- rpois(10e3, lambda = 19) ggplot(data.table(x = W)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + geom_vline(xintercept = 19, col = "darkorange", lwd = 1.5) + scale_y_continuous(labels = comma) + labs(title = "Sampling Distribution") ``` Check that the simulated mean and standard error are close to the theoretical mean and standard error. ```{r, echo = T} mu <- mean(W) sigma <- sd(W) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ c.) Use the simulated sampling distribution to estimate $P(W \le 14)$ and then check your estimate with an exact calculation. ```{r, echo = T} phat <- mean(W <= 14) p <- ppois(14, lambda = 19) ``` $\hat{p} = `r phat*100`$% $P(W \le 14) = `r round(p, 4)*100`$% ### 4.19 Let $X_1, X_2, \ldots, X_{10} \sim^{i.i.d} N(20, 8^2)$ and $Y_1, Y_2, \ldots, Y_{15} \sim^{i.i.d} N(16, 7^2)$. Let $W = \bar{X} + \bar{Y}$ a.) Give the exact sampling distribution of W. $\sigma = (10 + 15) / sqrt( 10 + 15 - 1) = 3.1$ $W \sim N(36, 3.1^2)$ b.) Simulate the sampling distribution in R and plot your results. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e3 result <- numeric(N) for( i in 1:N) { X <- rnorm(10, 20, 8) Y <- rnorm(15, 16, 7) result[i] <- mean(X) + mean(Y) } ggplot(data.table(x = result)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + geom_vline(xintercept = 36, col = "darkorange", lwd = 1.5) + scale_y_continuous(labels = comma) + labs(title = "Sampling Distribution") ``` Check that the simulated mean and standard error are close to the exact mean and standard error. ```{r, echo = T} mu <- mean(result) sigma <- sd(result) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ c.) Use your simulation to find $P(W < 40)$. Calculate an exact answer and compare. ```{r, echo = T} phat <- mean(result <= 40) p <- pnorm(40, 36, 3) ``` $\hat{p} = `r phat*100`$% $P(W < 40) = `r round(p, 4)*100`$% ### 4.20 Let $X_1, X_2, \ldots, X_9 \sim^{i.i.d.} N(7, 3^2)$, and $Y_1, Y_2, \ldots, X_{12} \sim^{i.i.d.} N(10, 5^2)$. Let $W = \bar{X} - \bar{Y}$. a.) Give the sampling distribution of $W$. $\sigma = (3 + 5) / sqrt( 9 + 12 - 1 ) = 1.79$ $W = N(-3, 1.79^2)$ b.) Simulate the sampling distribution of W in R, and plot your results. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e3 result <- numeric(N) for(i in 1:N) { X <- rnorm(9, 7, 3) Y <- rnorm(12, 10, 5) result[i] <- mean(X) - mean(Y) } ggplot(data.table(x = result)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + geom_vline(xintercept = -3, col = "darkorange", lwd = 1.5) + scale_y_continuous(labels = comma) + labs(title = "Sampling Distribution") ``` Check that the simulated mean and standard error are close to the theoretical mean and standard error. ```{r, echo = T} mu <- mean(result) sigma <- sd(result) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ c.) Use your simulation to find $P(W < -1.5)$. ```{r, echo = T} phat <- mean(result <= -1.5) p <- pnorm(-1.5, -3, 1.79) ``` $\hat{p} = `r phat*100`$% Calculate an exact answer and compare. $P(W < 1.5) = `r round(p, 4)*100`$% ### 4.21 Let $X_1, X_2, \ldots, X_N$ be a random sample from $N(0, 1)$. Let $W = X^2_1 + X^2_2 + \ldots + X^2_n$ What is the mean and variance of the sampling distribution of W? $\mu = 0$, $\sigma = 1$ Repeat using N = 4, N = 5. $N = 4, \sigma = 4 / sqrt(4 - 1) = 2.3$ $N = 5, \sigma = 5 / sqrt(5 - 1) = 2.5$ What observations or conjectures do you have for general __n__? ### 4.22 Let $X$ be a uniform random variable on the interval $[40, 60]$ and Y be a uniform random variable on $[45, 80]$. Assume that X and Y are independent. a.) Compute the expected value and variance of $X + Y$. $\mu = 112.5$, $Var = 1/24*(140 - 85)^2 = 126.04$ b.) Simulate a sampling distribution of $X + Y$. ```{r, echo = T, fig.height=3.5, fig.width=8} X <- runif(1000, 40, 60) Y <- runif(1000, 45, 80) total <- X + Y ggplot(data.table(value = total)) + geom_histogram(aes(value, fill = ..count..), bins = 30) + labs("X + Y Sampling Distribution") ``` Describe the sampling distribution of $X + Y$. __Approximately Normal__ Compute the mean and variance of the sampling distribution and compare this with the theoretical mean and variance. ```{r, echo = T} mu <- mean(total) var <- var(total) ``` $\mu = `r mu`$, $Var = `r var`$ c.) Suppose the time (in minutes) PI:NAME:<NAME>END_PI takes to complete his statistics homework is $Unif[40, 60]$ and the time PI:NAME:<NAME>END_PI takes is $Unif[45, 80]$. Assume they work independently. One day they announce that their total time to finish an assignment was less than 90 minutes. How likely is this? ```{r, echo = T, fig.height=3.5, fig.width=8} p <- punif(90, 85, 140) ``` Probability: __`r round(p, 4)*100`%__ ### 4.23 Let $X_1, X_2, \ldots, X_{20} \sim^{i.i.d.} Exp(2)$. Let $X = \sum^{20}_{i=1}X_i$. a.) Simulate the sampling distribution of $X$ in R. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e3 result <- numeric(N) for(i in 1:N) { samp <- rexp(n = 20, rate = 2) result[i] <- sum(samp) } ``` b.) From your simulation, find $\mathbb{E}[X]$ and $Var[X]$. ```{r, echo = T} mu <- mean(result) var <- var(result) ``` $\mu = `r mu`$, $Var = `r var`$ c.) From your simulation, find $P( X \le 10)$. ```{r, echo = T} phat <- mean(result <= 10) ``` Probablity: _`r round(phat, 4)*100`%_ ### 4.24 Let $X_1, X_2, \ldots, X_{30} \sim^{i.i.d.} Exp(1/3)$ and let $\bar{X}$ denote the sample mean. a.) Simulate the sampling distribution of $\bar{X}$ in R. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e3 result <- numeric(N) for(i in 1:N) { samp <- rexp(n = 30, rate = 1/3) result[i] <- mean(samp) } ``` b.) Find the mean and standard error of the sampling distribution, and compare with the theoretical results. ```{r, echo = T} mu <- mean(result) sigma <- sd(result) ``` Sample: $\mu = `r mu`$, $\sigma = `r sigma`$ Theoretical: $\mu = \frac{\lambda}{1} = 3$, $sigma = 3/sqrt(30 - 1) = .56$ c.) From your simulation, find $P(\bar{X} \le 3.5)$. ```{r, echo = T} phat <- mean(result <= 3.5) ``` $\hat{p} = `r phat`$ d.) Estimate $P(\bar{X} \le 3.5)$ by assuming the CLT approximation holds. Compare this result with the one in part __(c)__. ```{r, echo = T} z <- (3.5 - 3) / sigma p <- pnorm(z) ``` $p = `r round(p, 4)`$ ### 4.25 Consider the exponential distribution with density $f(x) \frac{1}{20}e^{-x/20}$, with mean and standard deviation of 20. a.) Calculate the median of this distribution. $0.5 = \int_{m}^{\inf}f(x)dx = -e^{-x/20}$ $\ldots = M = 20*log(2)$ $\ldots = `r 20*log(2)`$ b.) Using R, draw a random sample of size 50 and graph the histogram. ```{r, echo = T, fig.height=3.5, fig.width=8} samp <- rexp( n = 50, rate = 1/20) ggplot(data.table(x = samp)) + geom_histogram(aes(x, fill = ..count..)) + labs(title = "RExp(50, 1/20)") ``` What are the mean and standard deviation of your sample? ```{r, echo = T} mu <- mean(samp) sigma <- sd(samp) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ c.) Run a simulation to find the (approximate) sampling distribution for the median of sample size 50 from the exponential distribution and describe it. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 10e3 result <- numeric(N) for(i in 1:N) { samp <- rexp( n = 50, rate = 1/20) result[i] <- median(samp) } ggplot(data.table(x = result)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + scale_y_continuous(labels = comma) + labs(title = "Sampling Distribution") ``` What is the mean and the standard error of this sampling distribution? ```{r, echo = T} mu <- mean(result) sigma <- sd(result) / sqrt(50) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ d.) Repeat the above but use sample sizes $n = 100, 500 \ and \ 1,000$. ```{r, echo = T, fig.height=3, fig.width=8} N <- 10e3 result <- numeric(N) for(i in 1:N) { samp <- rexp( n = 100, rate = 1/20) result[i] <- median(samp) } ggplot(data.table(x = result)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + scale_y_continuous(labels = comma) + labs(title = "Sampling Distribution") ``` ```{r, echo = T} mu <- mean(result) sigma <- sd(result) / sqrt(50) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ ```{r, echo = T, fig.height=3, fig.width=8} N <- 10e3 result <- numeric(N) for(i in 1:N) { samp <- rexp( n = 500, rate = 1/20) result[i] <- median(samp) } ggplot(data.table(x = result)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + scale_y_continuous(labels = comma) + labs(title = "Sampling Distribution") ``` ```{r, echo = T} mu <- mean(result) sigma <- sd(result) / sqrt(50) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ ```{r, echo = T, fig.height=3, fig.width=8} N <- 10e3 result <- numeric(N) for(i in 1:N) { samp <- rexp( n = 1000, rate = 1/20) result[i] <- median(samp) } ggplot(data.table(x = result)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + scale_y_continuous(labels = comma) + labs(title = "Sampling Distribution") ``` ```{r, echo = T} mu <- mean(result) sigma <- sd(result) / sqrt(50) ``` $\mu = `r mu`$, $\sigma = `r sigma`$ How does sample size affect the sampling distribution? _The sample mean and standard error converge to the analytical solution with increased sample size._ ### 4.26 Prove theorem 4.2.1. ### 4.27 Let $X_1, X_2 \sim^{i.i.d.} F$ with corresponding pdf $f(x) = \frac{2}{x^2}$, $1 \le x \le 2$. a.) Find the pdf of $X_{max}$. $F_{max}(x) = 8(\frac{1}{x^2} - \frac{1}{x^3})$ b.) Find the expected value of $X_{max}$. $\mathbb{E}{[F_{max}]} = 1.545$ ### 4.28 Let $X_1, X_2, \ldots, X_N \sim^{i.i.d.}$ with corresponding pdf $f(x) = 3x^2, 0 \le x \le 1$. a.) Find the pdf for $X_{min}$. b.) Find the pdf for $X_{max}$. c.) If $n = 10$, find the probability that the largest value, $X_{max}$, is greater than 0.92. ### 4.29 Compute the pdf of the sampling distribution of the maximum samples of size 10 from a population with an exponential distribtuion with $\lambda = 12$. ### 4.30 Let $X_1, X_2, \ldots, X_N \sim^{i.i.d.} Exp(\lambda)$ with pdf $f(x) = \lambda e^{-\lambda x}, \lambda > 0, x > 0$. a.) Find the pdf $f_{min}(x)$ for the sample minimum $X_{min}$. Recognize this as the pdf of a known distribution. b.) Simulate in R the sampling distribution of $X_{min}$ of samples of size $n = 25$ from the exponential distribution with $\lambda = 7$. Compare the theoretical expected value of $X_{min}$ with the simulated expected value. ### 4.31 Let $X_1, X_2, \ldots, X_n \sim^{i.i.d.} Pois(3)$. Let $X = \sum^{10}_{i=1}X_i$. Find the pdf for the sampling distribution of X. ### 4.32 Let $X_1$ and $X_2$ be independent exponential random variables, both with parameter $\lambda > 0$. Find the cumulative distribution function for the sampling distribution of $X = X_1 + X_2$. ### 4.33 This simulation illustrates the CLT for a finite population. ```{r, echo = T, fig.height=3.5, fig.width=8} N <- 400 n <- 5 finpop <- rexp(N, 1/10) ggplot(data.table(x = finpop)) + geom_histogram(aes(x, fill = ..count..)) + labs(title = "Exp(1/10)") mean(finpop) #mean (mu) of your pop. sd(finpop) # stdev (sigma) of your pop. sd(finpop)/sqrt(n) # theoretical standard error of sampling distribution sd(finpop)/sqrt(n) * sqrt((N-n)/(N-1)) # without replacement Xbar <- numeric(1000) for(i in 1:1000) { x <- sample(finpop, n) # Random sample of size n # (w/o replacement) Xbar[i] <- mean(x) } p1 <- ggplot(data.table(x = Xbar)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + labs(title = "Mean Sampling Distribution Histogram") p2 <- ggplot(data.table(x = Xbar), aes(sample = x)) + stat_qq() + stat_qq_line() + labs(title = "Mean Sampling Distribution QQ-Plot") grid.arrange(p1, p2, nrow = 2) mean(Xbar) sd(Xbar) # estimated standard error of sampling distribution ``` a.) Does the sampling distribution of sample means appear approximately normal? b.) Compare the mean and standard error of your simulated sampling distribution with the theoretical ones. c.) Calculate $(\sigma/\sqrt(n))(\sqrt{(N-n)/(N-1)}$, where $\sigma$ is the standard deviation of the finite population and compare with the (estimated) standard error of the sampling distribution. d.) Repeat for larger __n__, say __n__ = 20 and __n__ = 100. ### 4.34 Let $X_1, X_2, \ldots, X_n$ be independent random variables from $N(\mu, \sigma)$. We are interested in the sampling distribution of the variance. Run a simulation to draw random samples of size 20 from $N(25, 7^2)$ and calculate the variance for each sample. ```{r, echo = T, fig.height=3.5, fig.width=8} W <- numeric(1000) for(i in 1:1000) { x <- rnorm(20, 25, 7) W[i] <- var(x) } mean(W) var(W) p1 <- ggplot(data.table(x = W)) + geom_histogram(aes(x, fill = ..count..), bins = 30) + labs(title = "Variance Sampling Distribution") p2 <- ggplot(data.table(value = W), aes(sample = value)) + stat_qq() + stat_qq_line() + labs(title = "Mean Sampling Distribution QQ-Plot") grid.arrange(p1, p2, nrow = 2) ``` Does the sampling distribution appear to be normally distributed? Repeat with n = 50 and n = 200. ### 4.35 A random sample of size $n = 100$ is drawn from a distribution with pdf $f(x) = 3(1- x)^2, 0 \le x \le 1$. a.) Use the CLT approximation to estimate $P(\bar{X} \le 0.27)$. b.) Use the expanded CLT to estimate the same probability (dnorm). c.) If $X_1, X_2, X_3 \sim^{i.i.d.} Unif[0, 1]$, then the minimum has density __f__ given above. Use simulation to estimate the probability.
[ { "context": "---\ntitle: \"Kostyantyn Hrytsyuk & Khrystyna Kubatska. Finances Project\"\noutput:\n ", "end": 31, "score": 0.9998738169670105, "start": 12, "tag": "NAME", "value": "Kostyantyn Hrytsyuk" }, { "context": "---\ntitle: \"Kostyantyn Hrytsyuk & Khrystyna Kubatska. Finances Project\"\noutput:\n pdf_document: defaul", "end": 52, "score": 0.9995737075805664, "start": 34, "tag": "NAME", "value": "Khrystyna Kubatska" } ]
Fin_Garch_Code.rmd
kostyantynHrytsyuk/Fin-GARCH
0
--- title: "Kostyantyn Hrytsyuk & Khrystyna Kubatska. Finances Project" output: pdf_document: default html_notebook: default html_document: df_print: paged --- ```{r} require(dplyr) require(ggplot2) require(xts) require(rugarch) require(PerformanceAnalytics) require(quantmod) library(skewt) ``` ```{r} # Configure propet path to the Funds.csv file here # setwd('./') ``` ```{r} read_funds <- function(lf) { dfs <- list() browser() for (f in 1:length(lf)) { cond <- substr(lf[f], nchar(lf[f])-3, nchar(lf[f])) == '.csv' if (cond) { temp <- data.frame(read.csv(lf[f], stringsAsFactors = FALSE)) temp$Date <- as.Date(temp$Date) temp$Close <- as.numeric(temp$Close) dfs[[f]] <- temp[,c(1,5)] } } return(dfs) } funds_names <- c("Vanguard", "Blackrock", "Statestreet", "JPmorgan", "Bankmellon", "Allianz") ``` ```{r} # Forming set of parameters for different GARCH model get_garch_specs <- function() { # Standard GARCH with normal distribution of errors norm_garch_spec <- ugarchspec(mean.model = list(armaOrder = c(0,0)), variance.model = list(model = 'sGARCH'), distribution.model = 'norm') # GJR GARCH with normal distribution of errors norm_gjr_spec <- ugarchspec(mean.model = list(armaOrder = c(0,0)), variance.model = list(model = 'gjrGARCH'), distribution.model = 'norm') # Standard GARCH with skewed Student t distribution of errors sstd_garch_spec <- ugarchspec(mean.model = list(armaOrder = c(0,0)), variance.model = list(model = 'sGARCH'), distribution.model = 'sstd') # GJR GARCH with skewed Student t distribution of errors sstd_gjr_spec <- ugarchspec(mean.model = list(armaOrder = c(0,0)), variance.model = list(model = 'gjrGARCH'), distribution.model = 'sstd') garch_specs <- list(norm_garch_spec, norm_gjr_spec, sstd_garch_spec, sstd_gjr_spec) return(garch_specs) } ``` ```{r} # Apply GARCH model to our data get_garch_fits <- function(fund) { garch_specs <- get_garch_specs() garch_fits <- list() for (s in 1:length(garch_specs)) { suppressWarnings(garch_fits[[s]] <- ugarchfit(data = fund, spec = garch_specs[[s]])) } return(garch_fits) } # Visualizing standardized residuals for models visualize_residuals <- function(garch_fits) { for (f in 1:length(garch_fits)) { chart.Histogram(residuals(garch_fits[[f]], standardize = T), methods = c('add.normal', 'add.density'), main = paste('Standardized residuals of',names(garch_fits)[f])) } } # Models validation test_garch_models <- function(garch_fits) { for (f in 1:length(garch_fits)) { standard_residuals <- residuals(garch_fits[[f]], standardize = T) p <- acf(abs(standard_residuals),22, plot = F) plot(p, main = names(garch_fits)[f]) cat('\n', names(garch_fits)[f],'\n') print(Box.test(abs(standard_residuals), 22, type = 'Ljung-Box')) } } #Coefficients print_garch_coefficients <- function(garch_fits) { for (f in 1:length(garch_fits)) { cat('\nCoefficients of', names(garch_fits)[f], '\n') print(round(garch_fits[[f]]@fit$matcoef,10)) cat('\nRobust coefficients of', names(garch_fits)[f], '\n') print(round(garch_fits[[f]]@fit$robust.matcoef,10)) } } # Models comparing compare_garch_models <- function(garch_fits, short_model_names) { model_comparison <- data.frame() for (f in 1:length(garch_fits)) { temp <- data.frame() temp[1,1] <- likelihood(garch_fits[[f]]) inf_criterion <- infocriteria(garch_fits[[f]]) temp <- rbind(temp, inf_criterion) model_comparison <- c(model_comparison, temp) } model_comparison <- as.data.frame(model_comparison) rownames(model_comparison) <- c('Likelihood',rownames(inf_criterion)) colnames(model_comparison) <- short_model_names print(model_comparison) } # Visualizing impact of negative previous return on variance visualize_dependecy_ret_var <- function(garch_fits, short_model_names) { p <- ggplot() for (f in 1:length(garch_fits)) { garch_news <- as.data.frame(newsimpact(garch_fits[[f]])[1:2]) model_name <- short_model_names[f] model_name <- enquo(model_name) p <- p + geom_line(data = garch_news, aes(x = zx, y = zy, color = !!model_name)) } p <- p + labs(x = 'Error', y = 'Variance', title = 'Dependence of variance on errors in different models') + theme(plot.title = element_text(hjust = 0.5)) print(p) } # Visualizing volatility visualizing_volatility <- function(garch_fits, short_model_names, fund_vol) { p <- ggplot() garch_vol <- list() for (f in 1:length(garch_fits)) { garch_vol[[f]] <- sigma(garch_fits[[f]]) model_name <- short_model_names[f] model_name <- enquo(model_name) p <- p + geom_line(data = garch_vol[[f]], aes(x = index(garch_vol[[f]][,1]), y = garch_vol[[f]][,1], color = !!model_name), alpha = 0.2) } names(garch_vol) <- short_model_names p <- p + geom_line(data = fund_vol, aes(y = fund_vol[,1], x = index(fund_vol[,1]), color = 'Actual volatility')) + labs(x = 'Date', y = 'Volatility', title = 'Volatility constructed by different models') + theme(plot.title = element_text(hjust = 0.5)) suppressMessages(suppressWarnings(print(p))) return(garch_vol) } # Predicting volatility for n.ahead periods predict_volatility <- function(garch_fits, garch_vol, fund_vol, short_model_names) { fund_tail_volatility <- tail(fund_vol,10) predict_results <- data.frame(fund_tail_volatility) garch_sst <- c() for (f in 1:length(garch_fits)) { garch_forecast <- ugarchforecast(fitORspec = garch_fits[[f]], data = garch_vol[[f]], n.ahead = 10) predict_results <- cbind(predict_results, sigma(garch_forecast)) garch_sst[[f]] <- sum(fund_tail_volatility - sigma(garch_forecast)) names(garch_sst)[f] <- paste('TES for',short_model_names[f]) } names(predict_results)[2:ncol(predict_results)] <- short_model_names # Total error sum for models print(garch_sst) #Comparing predicted volatility for models with actual one print(predict_results) } ``` ```{r} # Data loading df <- read.csv('Funds.csv', stringsAsFactors = F) df <- df[,-1] df$Date <- as.Date(df$Date) # Transforming data from df to xts funds <- xts(df[,2:ncol(df)], order.by = df$Date) funds <- na.omit(funds) rm(df) # Subseting original data for April of 2020 year funds_red <- funds['/202004'] ``` ```{r} get_volatiles <- function(funds, width = 22, time_scale = 1, funds_names) { vol_df <- data.frame() for (i in 1:ncol(funds)) { fund <- funds[,i] temp <- rollapply(data = fund, width = 22, FUN = 'sd.annualized', scale = time_scale) if (nrow(vol_df) == 0) { vol_df <- temp } else { vol_df <- cbind(vol_df, temp) } } names(vol_df) <- funds_names return(vol_df) } visualize_funds_lines <- function(funds, y_axis_label = 'Volatility') { for (i in 1:ncol(funds)) { temp <- funds[,i] temp_mean <- mean(temp, na.rm = TRUE) p <- ggplot(temp, aes(x = index(temp), y = temp)) + geom_line(aes(color = 'Volatility')) + geom_hline(aes(yintercept = temp_mean, color = 'Mean'), size=.5, linetype='dashed') + geom_text( aes( min(index(temp)) , temp_mean, label = round(temp_mean, 4), vjust = 2)) + labs(x = 'Date', y = y_axis_label, title = names(funds)[i]) + theme(plot.title = element_text(hjust = 0.5)) suppressMessages(suppressWarnings(print(p))) } } visualize_funds_hist <- function(funds, x_axis_label = 'Return') { for (i in 1:ncol(funds)) { temp <- funds[,i] title <- paste(funds_names[i], 'returns') chart.Histogram(temp, methods = c('add.normal', 'add.density'), main = title) temp <- (temp - mean(temp, na.rm = T))/sd(temp, na.rm = T) title <- paste('Standardized', title) chart.Histogram(temp, methods = c('add.normal', 'add.density'), main = title) } } ``` ```{r} evaluate_garch <- function(fund, fund_vol) { # Models naming model_names <- c('Standard GARCH with normal distribution of errors', 'GJR GARCH with normal distribution of errors', 'Standard GARCH with skewed Student t distribution of errors', 'GJR GARCH with skewed Student t distribution of errors') short_model_names <- c('Normal GARCH', 'Normal GJR', 'Skewed t GARCH', 'Skewed t GJR') garch_fits <- get_garch_fits(fund) names(garch_fits) <- model_names visualize_residuals(garch_fits) test_garch_models(garch_fits) print_garch_coefficients(garch_fits) compare_garch_models(garch_fits, short_model_names) visualize_dependecy_ret_var(garch_fits, short_model_names) garch_vol <- visualizing_volatility(garch_fits, short_model_names, fund_vol) predict_volatility(garch_fits = garch_fits, garch_vol = garch_vol, fund_vol = fund_vol, short_model_names = short_model_names) } ``` ```{r} vol_df <- get_volatiles(funds, funds_names = funds_names) vol_df <- na.omit(vol_df) visualize_funds_lines(vol_df) visualize_funds_hist(funds) fund_tail_volatility <- tail(vol_df$Vanguard, 10) ``` ```{r} cat('Data up to 2020-04-30\n') for (i in 1:ncol(funds_red)) { cat(colnames(vol_df)[i],'\n') evaluate_garch(funds_red[,i], vol_df[,i]) cat('\n--------------------------------\n') } ``` ```{r} cat('Data up to 2020-05-08\n') for (i in 1:ncol(funds)) { cat(colnames(vol_df)[i],'\n') evaluate_garch(funds_red[,i], vol_df[,i]) cat('\n--------------------------------\n') } ```
2422
--- title: "<NAME> & <NAME>. Finances Project" output: pdf_document: default html_notebook: default html_document: df_print: paged --- ```{r} require(dplyr) require(ggplot2) require(xts) require(rugarch) require(PerformanceAnalytics) require(quantmod) library(skewt) ``` ```{r} # Configure propet path to the Funds.csv file here # setwd('./') ``` ```{r} read_funds <- function(lf) { dfs <- list() browser() for (f in 1:length(lf)) { cond <- substr(lf[f], nchar(lf[f])-3, nchar(lf[f])) == '.csv' if (cond) { temp <- data.frame(read.csv(lf[f], stringsAsFactors = FALSE)) temp$Date <- as.Date(temp$Date) temp$Close <- as.numeric(temp$Close) dfs[[f]] <- temp[,c(1,5)] } } return(dfs) } funds_names <- c("Vanguard", "Blackrock", "Statestreet", "JPmorgan", "Bankmellon", "Allianz") ``` ```{r} # Forming set of parameters for different GARCH model get_garch_specs <- function() { # Standard GARCH with normal distribution of errors norm_garch_spec <- ugarchspec(mean.model = list(armaOrder = c(0,0)), variance.model = list(model = 'sGARCH'), distribution.model = 'norm') # GJR GARCH with normal distribution of errors norm_gjr_spec <- ugarchspec(mean.model = list(armaOrder = c(0,0)), variance.model = list(model = 'gjrGARCH'), distribution.model = 'norm') # Standard GARCH with skewed Student t distribution of errors sstd_garch_spec <- ugarchspec(mean.model = list(armaOrder = c(0,0)), variance.model = list(model = 'sGARCH'), distribution.model = 'sstd') # GJR GARCH with skewed Student t distribution of errors sstd_gjr_spec <- ugarchspec(mean.model = list(armaOrder = c(0,0)), variance.model = list(model = 'gjrGARCH'), distribution.model = 'sstd') garch_specs <- list(norm_garch_spec, norm_gjr_spec, sstd_garch_spec, sstd_gjr_spec) return(garch_specs) } ``` ```{r} # Apply GARCH model to our data get_garch_fits <- function(fund) { garch_specs <- get_garch_specs() garch_fits <- list() for (s in 1:length(garch_specs)) { suppressWarnings(garch_fits[[s]] <- ugarchfit(data = fund, spec = garch_specs[[s]])) } return(garch_fits) } # Visualizing standardized residuals for models visualize_residuals <- function(garch_fits) { for (f in 1:length(garch_fits)) { chart.Histogram(residuals(garch_fits[[f]], standardize = T), methods = c('add.normal', 'add.density'), main = paste('Standardized residuals of',names(garch_fits)[f])) } } # Models validation test_garch_models <- function(garch_fits) { for (f in 1:length(garch_fits)) { standard_residuals <- residuals(garch_fits[[f]], standardize = T) p <- acf(abs(standard_residuals),22, plot = F) plot(p, main = names(garch_fits)[f]) cat('\n', names(garch_fits)[f],'\n') print(Box.test(abs(standard_residuals), 22, type = 'Ljung-Box')) } } #Coefficients print_garch_coefficients <- function(garch_fits) { for (f in 1:length(garch_fits)) { cat('\nCoefficients of', names(garch_fits)[f], '\n') print(round(garch_fits[[f]]@fit$matcoef,10)) cat('\nRobust coefficients of', names(garch_fits)[f], '\n') print(round(garch_fits[[f]]@fit$robust.matcoef,10)) } } # Models comparing compare_garch_models <- function(garch_fits, short_model_names) { model_comparison <- data.frame() for (f in 1:length(garch_fits)) { temp <- data.frame() temp[1,1] <- likelihood(garch_fits[[f]]) inf_criterion <- infocriteria(garch_fits[[f]]) temp <- rbind(temp, inf_criterion) model_comparison <- c(model_comparison, temp) } model_comparison <- as.data.frame(model_comparison) rownames(model_comparison) <- c('Likelihood',rownames(inf_criterion)) colnames(model_comparison) <- short_model_names print(model_comparison) } # Visualizing impact of negative previous return on variance visualize_dependecy_ret_var <- function(garch_fits, short_model_names) { p <- ggplot() for (f in 1:length(garch_fits)) { garch_news <- as.data.frame(newsimpact(garch_fits[[f]])[1:2]) model_name <- short_model_names[f] model_name <- enquo(model_name) p <- p + geom_line(data = garch_news, aes(x = zx, y = zy, color = !!model_name)) } p <- p + labs(x = 'Error', y = 'Variance', title = 'Dependence of variance on errors in different models') + theme(plot.title = element_text(hjust = 0.5)) print(p) } # Visualizing volatility visualizing_volatility <- function(garch_fits, short_model_names, fund_vol) { p <- ggplot() garch_vol <- list() for (f in 1:length(garch_fits)) { garch_vol[[f]] <- sigma(garch_fits[[f]]) model_name <- short_model_names[f] model_name <- enquo(model_name) p <- p + geom_line(data = garch_vol[[f]], aes(x = index(garch_vol[[f]][,1]), y = garch_vol[[f]][,1], color = !!model_name), alpha = 0.2) } names(garch_vol) <- short_model_names p <- p + geom_line(data = fund_vol, aes(y = fund_vol[,1], x = index(fund_vol[,1]), color = 'Actual volatility')) + labs(x = 'Date', y = 'Volatility', title = 'Volatility constructed by different models') + theme(plot.title = element_text(hjust = 0.5)) suppressMessages(suppressWarnings(print(p))) return(garch_vol) } # Predicting volatility for n.ahead periods predict_volatility <- function(garch_fits, garch_vol, fund_vol, short_model_names) { fund_tail_volatility <- tail(fund_vol,10) predict_results <- data.frame(fund_tail_volatility) garch_sst <- c() for (f in 1:length(garch_fits)) { garch_forecast <- ugarchforecast(fitORspec = garch_fits[[f]], data = garch_vol[[f]], n.ahead = 10) predict_results <- cbind(predict_results, sigma(garch_forecast)) garch_sst[[f]] <- sum(fund_tail_volatility - sigma(garch_forecast)) names(garch_sst)[f] <- paste('TES for',short_model_names[f]) } names(predict_results)[2:ncol(predict_results)] <- short_model_names # Total error sum for models print(garch_sst) #Comparing predicted volatility for models with actual one print(predict_results) } ``` ```{r} # Data loading df <- read.csv('Funds.csv', stringsAsFactors = F) df <- df[,-1] df$Date <- as.Date(df$Date) # Transforming data from df to xts funds <- xts(df[,2:ncol(df)], order.by = df$Date) funds <- na.omit(funds) rm(df) # Subseting original data for April of 2020 year funds_red <- funds['/202004'] ``` ```{r} get_volatiles <- function(funds, width = 22, time_scale = 1, funds_names) { vol_df <- data.frame() for (i in 1:ncol(funds)) { fund <- funds[,i] temp <- rollapply(data = fund, width = 22, FUN = 'sd.annualized', scale = time_scale) if (nrow(vol_df) == 0) { vol_df <- temp } else { vol_df <- cbind(vol_df, temp) } } names(vol_df) <- funds_names return(vol_df) } visualize_funds_lines <- function(funds, y_axis_label = 'Volatility') { for (i in 1:ncol(funds)) { temp <- funds[,i] temp_mean <- mean(temp, na.rm = TRUE) p <- ggplot(temp, aes(x = index(temp), y = temp)) + geom_line(aes(color = 'Volatility')) + geom_hline(aes(yintercept = temp_mean, color = 'Mean'), size=.5, linetype='dashed') + geom_text( aes( min(index(temp)) , temp_mean, label = round(temp_mean, 4), vjust = 2)) + labs(x = 'Date', y = y_axis_label, title = names(funds)[i]) + theme(plot.title = element_text(hjust = 0.5)) suppressMessages(suppressWarnings(print(p))) } } visualize_funds_hist <- function(funds, x_axis_label = 'Return') { for (i in 1:ncol(funds)) { temp <- funds[,i] title <- paste(funds_names[i], 'returns') chart.Histogram(temp, methods = c('add.normal', 'add.density'), main = title) temp <- (temp - mean(temp, na.rm = T))/sd(temp, na.rm = T) title <- paste('Standardized', title) chart.Histogram(temp, methods = c('add.normal', 'add.density'), main = title) } } ``` ```{r} evaluate_garch <- function(fund, fund_vol) { # Models naming model_names <- c('Standard GARCH with normal distribution of errors', 'GJR GARCH with normal distribution of errors', 'Standard GARCH with skewed Student t distribution of errors', 'GJR GARCH with skewed Student t distribution of errors') short_model_names <- c('Normal GARCH', 'Normal GJR', 'Skewed t GARCH', 'Skewed t GJR') garch_fits <- get_garch_fits(fund) names(garch_fits) <- model_names visualize_residuals(garch_fits) test_garch_models(garch_fits) print_garch_coefficients(garch_fits) compare_garch_models(garch_fits, short_model_names) visualize_dependecy_ret_var(garch_fits, short_model_names) garch_vol <- visualizing_volatility(garch_fits, short_model_names, fund_vol) predict_volatility(garch_fits = garch_fits, garch_vol = garch_vol, fund_vol = fund_vol, short_model_names = short_model_names) } ``` ```{r} vol_df <- get_volatiles(funds, funds_names = funds_names) vol_df <- na.omit(vol_df) visualize_funds_lines(vol_df) visualize_funds_hist(funds) fund_tail_volatility <- tail(vol_df$Vanguard, 10) ``` ```{r} cat('Data up to 2020-04-30\n') for (i in 1:ncol(funds_red)) { cat(colnames(vol_df)[i],'\n') evaluate_garch(funds_red[,i], vol_df[,i]) cat('\n--------------------------------\n') } ``` ```{r} cat('Data up to 2020-05-08\n') for (i in 1:ncol(funds)) { cat(colnames(vol_df)[i],'\n') evaluate_garch(funds_red[,i], vol_df[,i]) cat('\n--------------------------------\n') } ```
true
--- title: "PI:NAME:<NAME>END_PI & PI:NAME:<NAME>END_PI. Finances Project" output: pdf_document: default html_notebook: default html_document: df_print: paged --- ```{r} require(dplyr) require(ggplot2) require(xts) require(rugarch) require(PerformanceAnalytics) require(quantmod) library(skewt) ``` ```{r} # Configure propet path to the Funds.csv file here # setwd('./') ``` ```{r} read_funds <- function(lf) { dfs <- list() browser() for (f in 1:length(lf)) { cond <- substr(lf[f], nchar(lf[f])-3, nchar(lf[f])) == '.csv' if (cond) { temp <- data.frame(read.csv(lf[f], stringsAsFactors = FALSE)) temp$Date <- as.Date(temp$Date) temp$Close <- as.numeric(temp$Close) dfs[[f]] <- temp[,c(1,5)] } } return(dfs) } funds_names <- c("Vanguard", "Blackrock", "Statestreet", "JPmorgan", "Bankmellon", "Allianz") ``` ```{r} # Forming set of parameters for different GARCH model get_garch_specs <- function() { # Standard GARCH with normal distribution of errors norm_garch_spec <- ugarchspec(mean.model = list(armaOrder = c(0,0)), variance.model = list(model = 'sGARCH'), distribution.model = 'norm') # GJR GARCH with normal distribution of errors norm_gjr_spec <- ugarchspec(mean.model = list(armaOrder = c(0,0)), variance.model = list(model = 'gjrGARCH'), distribution.model = 'norm') # Standard GARCH with skewed Student t distribution of errors sstd_garch_spec <- ugarchspec(mean.model = list(armaOrder = c(0,0)), variance.model = list(model = 'sGARCH'), distribution.model = 'sstd') # GJR GARCH with skewed Student t distribution of errors sstd_gjr_spec <- ugarchspec(mean.model = list(armaOrder = c(0,0)), variance.model = list(model = 'gjrGARCH'), distribution.model = 'sstd') garch_specs <- list(norm_garch_spec, norm_gjr_spec, sstd_garch_spec, sstd_gjr_spec) return(garch_specs) } ``` ```{r} # Apply GARCH model to our data get_garch_fits <- function(fund) { garch_specs <- get_garch_specs() garch_fits <- list() for (s in 1:length(garch_specs)) { suppressWarnings(garch_fits[[s]] <- ugarchfit(data = fund, spec = garch_specs[[s]])) } return(garch_fits) } # Visualizing standardized residuals for models visualize_residuals <- function(garch_fits) { for (f in 1:length(garch_fits)) { chart.Histogram(residuals(garch_fits[[f]], standardize = T), methods = c('add.normal', 'add.density'), main = paste('Standardized residuals of',names(garch_fits)[f])) } } # Models validation test_garch_models <- function(garch_fits) { for (f in 1:length(garch_fits)) { standard_residuals <- residuals(garch_fits[[f]], standardize = T) p <- acf(abs(standard_residuals),22, plot = F) plot(p, main = names(garch_fits)[f]) cat('\n', names(garch_fits)[f],'\n') print(Box.test(abs(standard_residuals), 22, type = 'Ljung-Box')) } } #Coefficients print_garch_coefficients <- function(garch_fits) { for (f in 1:length(garch_fits)) { cat('\nCoefficients of', names(garch_fits)[f], '\n') print(round(garch_fits[[f]]@fit$matcoef,10)) cat('\nRobust coefficients of', names(garch_fits)[f], '\n') print(round(garch_fits[[f]]@fit$robust.matcoef,10)) } } # Models comparing compare_garch_models <- function(garch_fits, short_model_names) { model_comparison <- data.frame() for (f in 1:length(garch_fits)) { temp <- data.frame() temp[1,1] <- likelihood(garch_fits[[f]]) inf_criterion <- infocriteria(garch_fits[[f]]) temp <- rbind(temp, inf_criterion) model_comparison <- c(model_comparison, temp) } model_comparison <- as.data.frame(model_comparison) rownames(model_comparison) <- c('Likelihood',rownames(inf_criterion)) colnames(model_comparison) <- short_model_names print(model_comparison) } # Visualizing impact of negative previous return on variance visualize_dependecy_ret_var <- function(garch_fits, short_model_names) { p <- ggplot() for (f in 1:length(garch_fits)) { garch_news <- as.data.frame(newsimpact(garch_fits[[f]])[1:2]) model_name <- short_model_names[f] model_name <- enquo(model_name) p <- p + geom_line(data = garch_news, aes(x = zx, y = zy, color = !!model_name)) } p <- p + labs(x = 'Error', y = 'Variance', title = 'Dependence of variance on errors in different models') + theme(plot.title = element_text(hjust = 0.5)) print(p) } # Visualizing volatility visualizing_volatility <- function(garch_fits, short_model_names, fund_vol) { p <- ggplot() garch_vol <- list() for (f in 1:length(garch_fits)) { garch_vol[[f]] <- sigma(garch_fits[[f]]) model_name <- short_model_names[f] model_name <- enquo(model_name) p <- p + geom_line(data = garch_vol[[f]], aes(x = index(garch_vol[[f]][,1]), y = garch_vol[[f]][,1], color = !!model_name), alpha = 0.2) } names(garch_vol) <- short_model_names p <- p + geom_line(data = fund_vol, aes(y = fund_vol[,1], x = index(fund_vol[,1]), color = 'Actual volatility')) + labs(x = 'Date', y = 'Volatility', title = 'Volatility constructed by different models') + theme(plot.title = element_text(hjust = 0.5)) suppressMessages(suppressWarnings(print(p))) return(garch_vol) } # Predicting volatility for n.ahead periods predict_volatility <- function(garch_fits, garch_vol, fund_vol, short_model_names) { fund_tail_volatility <- tail(fund_vol,10) predict_results <- data.frame(fund_tail_volatility) garch_sst <- c() for (f in 1:length(garch_fits)) { garch_forecast <- ugarchforecast(fitORspec = garch_fits[[f]], data = garch_vol[[f]], n.ahead = 10) predict_results <- cbind(predict_results, sigma(garch_forecast)) garch_sst[[f]] <- sum(fund_tail_volatility - sigma(garch_forecast)) names(garch_sst)[f] <- paste('TES for',short_model_names[f]) } names(predict_results)[2:ncol(predict_results)] <- short_model_names # Total error sum for models print(garch_sst) #Comparing predicted volatility for models with actual one print(predict_results) } ``` ```{r} # Data loading df <- read.csv('Funds.csv', stringsAsFactors = F) df <- df[,-1] df$Date <- as.Date(df$Date) # Transforming data from df to xts funds <- xts(df[,2:ncol(df)], order.by = df$Date) funds <- na.omit(funds) rm(df) # Subseting original data for April of 2020 year funds_red <- funds['/202004'] ``` ```{r} get_volatiles <- function(funds, width = 22, time_scale = 1, funds_names) { vol_df <- data.frame() for (i in 1:ncol(funds)) { fund <- funds[,i] temp <- rollapply(data = fund, width = 22, FUN = 'sd.annualized', scale = time_scale) if (nrow(vol_df) == 0) { vol_df <- temp } else { vol_df <- cbind(vol_df, temp) } } names(vol_df) <- funds_names return(vol_df) } visualize_funds_lines <- function(funds, y_axis_label = 'Volatility') { for (i in 1:ncol(funds)) { temp <- funds[,i] temp_mean <- mean(temp, na.rm = TRUE) p <- ggplot(temp, aes(x = index(temp), y = temp)) + geom_line(aes(color = 'Volatility')) + geom_hline(aes(yintercept = temp_mean, color = 'Mean'), size=.5, linetype='dashed') + geom_text( aes( min(index(temp)) , temp_mean, label = round(temp_mean, 4), vjust = 2)) + labs(x = 'Date', y = y_axis_label, title = names(funds)[i]) + theme(plot.title = element_text(hjust = 0.5)) suppressMessages(suppressWarnings(print(p))) } } visualize_funds_hist <- function(funds, x_axis_label = 'Return') { for (i in 1:ncol(funds)) { temp <- funds[,i] title <- paste(funds_names[i], 'returns') chart.Histogram(temp, methods = c('add.normal', 'add.density'), main = title) temp <- (temp - mean(temp, na.rm = T))/sd(temp, na.rm = T) title <- paste('Standardized', title) chart.Histogram(temp, methods = c('add.normal', 'add.density'), main = title) } } ``` ```{r} evaluate_garch <- function(fund, fund_vol) { # Models naming model_names <- c('Standard GARCH with normal distribution of errors', 'GJR GARCH with normal distribution of errors', 'Standard GARCH with skewed Student t distribution of errors', 'GJR GARCH with skewed Student t distribution of errors') short_model_names <- c('Normal GARCH', 'Normal GJR', 'Skewed t GARCH', 'Skewed t GJR') garch_fits <- get_garch_fits(fund) names(garch_fits) <- model_names visualize_residuals(garch_fits) test_garch_models(garch_fits) print_garch_coefficients(garch_fits) compare_garch_models(garch_fits, short_model_names) visualize_dependecy_ret_var(garch_fits, short_model_names) garch_vol <- visualizing_volatility(garch_fits, short_model_names, fund_vol) predict_volatility(garch_fits = garch_fits, garch_vol = garch_vol, fund_vol = fund_vol, short_model_names = short_model_names) } ``` ```{r} vol_df <- get_volatiles(funds, funds_names = funds_names) vol_df <- na.omit(vol_df) visualize_funds_lines(vol_df) visualize_funds_hist(funds) fund_tail_volatility <- tail(vol_df$Vanguard, 10) ``` ```{r} cat('Data up to 2020-04-30\n') for (i in 1:ncol(funds_red)) { cat(colnames(vol_df)[i],'\n') evaluate_garch(funds_red[,i], vol_df[,i]) cat('\n--------------------------------\n') } ``` ```{r} cat('Data up to 2020-05-08\n') for (i in 1:ncol(funds)) { cat(colnames(vol_df)[i],'\n') evaluate_garch(funds_red[,i], vol_df[,i]) cat('\n--------------------------------\n') } ```
[{"context":"---\ntitle: \"random Forest\"\nauthor: \"Zheng Wu\"\ndate: \"March 4, 2018\"\noutput: h(...TRUNCATED)
randomForest.rmd
MikeSchoenhals/MSE246
2
"---\ntitle: \"random Forest\"\nauthor: \"Zheng Wu\"\ndate: \"March 4, 2018\"\noutput: html_document(...TRUNCATED)
4774
"---\ntitle: \"random Forest\"\nauthor: \"<NAME>\"\ndate: \"March 4, 2018\"\noutput: html_document\n(...TRUNCATED)
true
"---\ntitle: \"random Forest\"\nauthor: \"PI:NAME:<NAME>END_PI\"\ndate: \"March 4, 2018\"\noutput: h(...TRUNCATED)
[{"context":"---\ntitle: \"lab_12\"\nauthor: \"derek willis\"\ndate: \"11/16/2021\"\noutput: html_do(...TRUNCATED)
labs/lab_12/lab_12.rmd
dloshin/data_journalism_2021_fall
0
"---\ntitle: \"lab_12\"\nauthor: \"derek willis\"\ndate: \"11/16/2021\"\noutput: html_document\n---\(...TRUNCATED)
2651
"---\ntitle: \"lab_12\"\nauthor: \"<NAME>\"\ndate: \"11/16/2021\"\noutput: html_document\n---\n\n```(...TRUNCATED)
true
"---\ntitle: \"lab_12\"\nauthor: \"PI:NAME:<NAME>END_PI\"\ndate: \"11/16/2021\"\noutput: html_docume(...TRUNCATED)
[{"context":"itle: \"Diagnosis de Conexi贸n a Internet\"\nauthor: \"Internet Levante\"\ndate: '`r Sy(...TRUNCATED)
RMD/diagnosis.rmd
DrumSergio/services-isp
4
"---\ntitle: \"Diagnosis de Conexi贸n a Internet\"\nauthor: \"Internet Levante\"\ndate: '`r Sys.time(...TRUNCATED)
255
"---\ntitle: \"Diagnosis de Conexi贸n a Internet\"\nauthor: \"<NAME>\"\ndate: '`r Sys.time()`'\noutp(...TRUNCATED)
true
"---\ntitle: \"Diagnosis de Conexi贸n a Internet\"\nauthor: \"PI:NAME:<NAME>END_PI\"\ndate: '`r Sys.(...TRUNCATED)
[{"context":"rotection of organic matter: Aggregates\"\nauthor: \"Hannah Hubanks\"\ndate: \"February(...TRUNCATED)
content/Staging/soil hub blog 2HLHaggregates.rmd
aaberhe/SOC-Hub
4
"---\ntitle: \"Physical protection of organic matter: Aggregates\"\nauthor: \"Hannah Hubanks\"\ndate(...TRUNCATED)
1634
"---\ntitle: \"Physical protection of organic matter: Aggregates\"\nauthor: \"<NAME>\"\ndate: \"Febr(...TRUNCATED)
true
"---\ntitle: \"Physical protection of organic matter: Aggregates\"\nauthor: \"PI:NAME:<NAME>END_PI\"(...TRUNCATED)
[{"context":"-\ntitle: \"2019-08-22-hatch-ph-standards\"\nauthor: \"Michael J. Braus\"\ndate: \"`r S(...TRUNCATED)
source/2019-08-22-hatch-ph-standards-compiled/2019-08-22-hatch-ph-standards.rmd
michaeljbraus/usda-wisconsin-soil-ph
0
"---\ntitle: \"2019-08-22-hatch-ph-standards\"\nauthor: \"Michael J. Braus\"\ndate: \"`r Sys.Date()`(...TRUNCATED)
3192
"---\ntitle: \"2019-08-22-hatch-ph-standards\"\nauthor: \"<NAME>\"\ndate: \"`r Sys.Date()`\"\noutput(...TRUNCATED)
true
"---\ntitle: \"2019-08-22-hatch-ph-standards\"\nauthor: \"PI:NAME:<NAME>END_PI\"\ndate: \"`r Sys.Dat(...TRUNCATED)
[{"context":"splayMode=MathJax\n&courseID=daemon_course\n&userID=daemon\n&course_password=daemon\n&o(...TRUNCATED)
text/lines_and_planes/lines_and_planes.rmd
lahvak/Calc3
0
"---\ntitle: Lines and Planes\n---\n\n\\renewcommand{\\vec}[1]{\\mathbf{#1}}\n\\newcommand{\\v}[1]{\(...TRUNCATED)
3041
"---\ntitle: Lines and Planes\n---\n\n\\renewcommand{\\vec}[1]{\\mathbf{#1}}\n\\newcommand{\\v}[1]{\(...TRUNCATED)
true
"---\ntitle: Lines and Planes\n---\n\n\\renewcommand{\\vec}[1]{\\mathbf{#1}}\n\\newcommand{\\v}[1]{\(...TRUNCATED)
[{"context":"X scATAC multiome data - PBMC 3K unsort\"\nauthor: \"Cankun\"\ndate: \"XXX\"\noutput:\n(...TRUNCATED)
vignettes/regulon.rmd
Wang-Cankun/iris3api
0
"---\ntitle: \"IRIS3 - 10X scATAC multiome data - PBMC 3K unsort\"\nauthor: \"Cankun\"\ndate: \"XXX\(...TRUNCATED)
2553
"---\ntitle: \"IRIS3 - 10X scATAC multiome data - PBMC 3K unsort\"\nauthor: \"<NAME>\"\ndate: \"XXX\(...TRUNCATED)
true
"---\ntitle: \"IRIS3 - 10X scATAC multiome data - PBMC 3K unsort\"\nauthor: \"PI:NAME:<NAME>END_PI\"(...TRUNCATED)
[{"context":"## STAT 445 / 645 -- Section 1001 <br> Instructor: Colin Grudzien<br>\n\n## Instruction(...TRUNCATED)
teaching/stat_445_645_2021_fall/activities/activity_2021_09_24.rmd
cgrudz/cgrudz.github.io
3
"# Activity 09/24/2021\n\n## STAT 445 / 645 -- Section 1001 <br> Instructor: Colin Grudzien<br>\n\n#(...TRUNCATED)
4935
"# Activity 09/24/2021\n\n## STAT 445 / 645 -- Section 1001 <br> Instructor: <NAME><br>\n\n## Instru(...TRUNCATED)
true
"# Activity 09/24/2021\n\n## STAT 445 / 645 -- Section 1001 <br> Instructor: PI:NAME:<NAME>END_PI<br(...TRUNCATED)
End of preview. Expand in Data Studio

Dataset Card for "rmarkdown_checks"

More Information needed

Downloads last month
8