content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#***************************************************************************************************
#
# Prepare the King County data
#
#***************************************************************************************************
library(tidyverse)
library(kingCoData)
## Set Paths
data_path <- file.path(getwd(), 'data')
raw_path <- file.path(data_path, 'raw')
ready_path <- file.path(data_path, 'ready')
CURR_YEAR = as.numeric(substr(Sys.Date(), 1, 4))
### Tax Data --------------------------------------------------------------------------------------
# Tax Data
taxraw_df <- read.csv(file.path(raw_path, 'EXTR_taxacctreceivable_v.csv'))
# Create two annual snapshots
tax_df <- taxraw_df %>%
dplyr::select(AcctNbr, BillYr, LandVal, ImpsVal)
tax_df$AcctNbr <- as.character(tax_df$AcctNbr)
for (k in 1:5){
a <- which(nchar(tax_df$AcctNbr) < 12)
tax_df$AcctNbr [a]<- paste0('0', tax_df$AcctNbr[a])
}
tax_df <- tax_df %>%
dplyr::filter(BillYr == 2000 | BillYr == CURR_YEAR) %>%
dplyr::mutate(pinx = paste0('..', substr(AcctNbr, 1, 10))) %>%
dplyr::select(pinx, tax_year = BillYr, land_val = LandVal, imp_val = ImpsVal) %>%
dplyr::mutate(tax_year = tax_year - 1)
# Save
saveRDS(tax_df %>% dplyr::filter(tax_year == 1999),
file.path(ready_path, 'tax_1999.RDS'))
saveRDS(tax_df %>% dplyr::filter(tax_year != 1999),
file.path(ready_path, 'tax_current.RDS'))
### Change Data ------------------------------------------------------------------------------------
# Load data
chraw_df <- read.csv(file.path(raw_path, 'extr_changehist_v.csv'))
# Create limited change data
ch_df <- chraw_df %>%
dplyr::filter(Type %in% c(3,6,7,13)) %>%
dplyr::arrange(desc(EventDate)) %>%
dplyr::distinct(Major, Minor, .keep_all = TRUE) %>%
utilAddPinx(.) %>%
dplyr::select(pinx, type = Type, event_date = EventDate) %>%
dplyr::mutate(event_date = as.Date(event_date)) %>%
dplyr::filter(event_date >= as.Date('1999-01-01'))
# Save
saveRDS(ch_df, file.path(ready_path, 'major_changes.RDS'))
#***************************************************************************************************
#***************************************************************************************************
| /scripts/3_ create_tax_change_data.R | no_license | andykrause/kingCoData | R | false | false | 2,274 | r | #***************************************************************************************************
#
# Prepare the King County data
#
#***************************************************************************************************
library(tidyverse)
library(kingCoData)
## Set Paths
data_path <- file.path(getwd(), 'data')
raw_path <- file.path(data_path, 'raw')
ready_path <- file.path(data_path, 'ready')
CURR_YEAR = as.numeric(substr(Sys.Date(), 1, 4))
### Tax Data --------------------------------------------------------------------------------------
# Tax Data
taxraw_df <- read.csv(file.path(raw_path, 'EXTR_taxacctreceivable_v.csv'))
# Create two annual snapshots
tax_df <- taxraw_df %>%
dplyr::select(AcctNbr, BillYr, LandVal, ImpsVal)
tax_df$AcctNbr <- as.character(tax_df$AcctNbr)
for (k in 1:5){
a <- which(nchar(tax_df$AcctNbr) < 12)
tax_df$AcctNbr [a]<- paste0('0', tax_df$AcctNbr[a])
}
tax_df <- tax_df %>%
dplyr::filter(BillYr == 2000 | BillYr == CURR_YEAR) %>%
dplyr::mutate(pinx = paste0('..', substr(AcctNbr, 1, 10))) %>%
dplyr::select(pinx, tax_year = BillYr, land_val = LandVal, imp_val = ImpsVal) %>%
dplyr::mutate(tax_year = tax_year - 1)
# Save
saveRDS(tax_df %>% dplyr::filter(tax_year == 1999),
file.path(ready_path, 'tax_1999.RDS'))
saveRDS(tax_df %>% dplyr::filter(tax_year != 1999),
file.path(ready_path, 'tax_current.RDS'))
### Change Data ------------------------------------------------------------------------------------
# Load data
chraw_df <- read.csv(file.path(raw_path, 'extr_changehist_v.csv'))
# Create limited change data
ch_df <- chraw_df %>%
dplyr::filter(Type %in% c(3,6,7,13)) %>%
dplyr::arrange(desc(EventDate)) %>%
dplyr::distinct(Major, Minor, .keep_all = TRUE) %>%
utilAddPinx(.) %>%
dplyr::select(pinx, type = Type, event_date = EventDate) %>%
dplyr::mutate(event_date = as.Date(event_date)) %>%
dplyr::filter(event_date >= as.Date('1999-01-01'))
# Save
saveRDS(ch_df, file.path(ready_path, 'major_changes.RDS'))
#***************************************************************************************************
#***************************************************************************************************
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasetsDoc.R
\docType{data}
\name{Utilities2}
\alias{Utilities2}
\title{Utility bills}
\format{
A data frame containing 117 observations for the following variables.
\itemize{
\item{\code{month}} {month (coded as a number)}
\item{\code{day}} {day of month on which bill was calculated}
\item{\code{year}} {year of bill}
\item{\code{temp}} {average temperature (F) for billing period}
\item{\code{kwh}} {electricity usage (kwh)}
\item{\code{ccf}} {gas usage (ccf)}
\item{\code{thermsPerDay}} {a numeric vector}
\item{\code{billingDays}} {number of billing days in billing period}
\item{\code{totalbill}} {total bill (in dollars)}
\item{\code{gasbill}} {gas bill (in dollars)}
\item{\code{elecbill}} {electric bill (in dollars)}
\item{\code{notes}} {notes about the billing period}
\item{\code{ccfpday}} {average gas usage per day (\code{Utilities2} only)}
\item{\code{kwhpday}} {average electric usage per day (\code{Utilities2} only)}
\item{\code{gasbillpday}} {gas bill divided by billing days (\code{Utilities2} only)}
\item{\code{elecbillpday}} {electric bill divided by billing days a numeric vector
(\code{Utilities2} only)}
\item{\code{totalbillpday}} {total bill divided by billing days a
numeric vector (\code{Utilities2} only)}
\item{\code{therms}} {\code{thermsPerDay * billingDays} (\code{Utilities2} only)}
\item{\code{monthsSinceY2K}} {months since 2000 (\code{Utilities2} only)}
}
}
\source{
Daniel T. Kaplan, \emph{Statistical modeling: A fresh approach}, 2009.
}
\usage{
data(Utilities2)
}
\description{
Data from utility bills at a private residence. This is an augmented version
of \code{\link{Utilities}}.
}
\examples{
data(Utilities2)
if (require(ggformula)) {
gf_point(gasbillpday ~ temp, data = Utilities2)
}
}
\seealso{
\code{\link{Utilities}}.
}
\keyword{datasets}
| /man/Utilities2.Rd | no_license | ProjectMOSAIC/mosaicData | R | false | true | 1,872 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasetsDoc.R
\docType{data}
\name{Utilities2}
\alias{Utilities2}
\title{Utility bills}
\format{
A data frame containing 117 observations for the following variables.
\itemize{
\item{\code{month}} {month (coded as a number)}
\item{\code{day}} {day of month on which bill was calculated}
\item{\code{year}} {year of bill}
\item{\code{temp}} {average temperature (F) for billing period}
\item{\code{kwh}} {electricity usage (kwh)}
\item{\code{ccf}} {gas usage (ccf)}
\item{\code{thermsPerDay}} {a numeric vector}
\item{\code{billingDays}} {number of billing days in billing period}
\item{\code{totalbill}} {total bill (in dollars)}
\item{\code{gasbill}} {gas bill (in dollars)}
\item{\code{elecbill}} {electric bill (in dollars)}
\item{\code{notes}} {notes about the billing period}
\item{\code{ccfpday}} {average gas usage per day (\code{Utilities2} only)}
\item{\code{kwhpday}} {average electric usage per day (\code{Utilities2} only)}
\item{\code{gasbillpday}} {gas bill divided by billing days (\code{Utilities2} only)}
\item{\code{elecbillpday}} {electric bill divided by billing days a numeric vector
(\code{Utilities2} only)}
\item{\code{totalbillpday}} {total bill divided by billing days a
numeric vector (\code{Utilities2} only)}
\item{\code{therms}} {\code{thermsPerDay * billingDays} (\code{Utilities2} only)}
\item{\code{monthsSinceY2K}} {months since 2000 (\code{Utilities2} only)}
}
}
\source{
Daniel T. Kaplan, \emph{Statistical modeling: A fresh approach}, 2009.
}
\usage{
data(Utilities2)
}
\description{
Data from utility bills at a private residence. This is an augmented version
of \code{\link{Utilities}}.
}
\examples{
data(Utilities2)
if (require(ggformula)) {
gf_point(gasbillpday ~ temp, data = Utilities2)
}
}
\seealso{
\code{\link{Utilities}}.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qcv.R
\name{qcv}
\alias{qcv}
\title{Quick Character Vector}
\usage{
qcv(..., terms = NULL, space.wrap = FALSE, trailing = FALSE,
leading = FALSE, split = " ", rm.blank = TRUE)
}
\arguments{
\item{terms}{An optional argument to present the terms as one long character
string. This is useful if the split (separator) is not a comma (e.g., spaces
are the term separators).}
\item{space.wrap}{logical. If \code{TRUE} wraps the vector of terms with a
leading/trailing space.}
\item{trailing}{logical. If \code{TRUE} wraps the vector of terms with a
trailing space.}
\item{leading}{logical. If \code{TRUE} wraps the vector of terms with a
leading space.}
\item{split}{Character vector of length one to use for splitting (i.e., the
separator used in the vector). For use with the argument \code{terms}.}
\item{rm.blank}{logical. If \code{TRUE} removes all blank spaces from the
vector.}
\item{\ldots}{Character objects. Either \ldots or \code{terms} argument must
be utilized.}
}
\value{
Returns a character vector.
}
\description{
Create a character vector without the use of quotation marks.
}
\examples{
\dontrun{
qcv(I, like, dogs)
qcv(terms = "I, like, dogs") #default separator is " "
qcv(terms = "I, like, dogs", split = ",")
qcv(terms = "I like dogs")
qcv(I, like, dogs, space.wrap = TRUE)
qcv(I, like, dogs, trailing = TRUE)
qcv(I, like, dogs, leading = TRUE)
exclude(Top25Words, qcv(the, of, and))
qcv(terms = "mpg cyl disp hp drat wt qsec vs am gear carb")
}
}
\seealso{
\code{\link[base]{c}}
}
\keyword{character}
| /man/qcv.Rd | no_license | hoodaly/qdap | R | false | true | 1,627 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qcv.R
\name{qcv}
\alias{qcv}
\title{Quick Character Vector}
\usage{
qcv(..., terms = NULL, space.wrap = FALSE, trailing = FALSE,
leading = FALSE, split = " ", rm.blank = TRUE)
}
\arguments{
\item{terms}{An optional argument to present the terms as one long character
string. This is useful if the split (separator) is not a comma (e.g., spaces
are the term separators).}
\item{space.wrap}{logical. If \code{TRUE} wraps the vector of terms with a
leading/trailing space.}
\item{trailing}{logical. If \code{TRUE} wraps the vector of terms with a
trailing space.}
\item{leading}{logical. If \code{TRUE} wraps the vector of terms with a
leading space.}
\item{split}{Character vector of length one to use for splitting (i.e., the
separator used in the vector). For use with the argument \code{terms}.}
\item{rm.blank}{logical. If \code{TRUE} removes all blank spaces from the
vector.}
\item{\ldots}{Character objects. Either \ldots or \code{terms} argument must
be utilized.}
}
\value{
Returns a character vector.
}
\description{
Create a character vector without the use of quotation marks.
}
\examples{
\dontrun{
qcv(I, like, dogs)
qcv(terms = "I, like, dogs") #default separator is " "
qcv(terms = "I, like, dogs", split = ",")
qcv(terms = "I like dogs")
qcv(I, like, dogs, space.wrap = TRUE)
qcv(I, like, dogs, trailing = TRUE)
qcv(I, like, dogs, leading = TRUE)
exclude(Top25Words, qcv(the, of, and))
qcv(terms = "mpg cyl disp hp drat wt qsec vs am gear carb")
}
}
\seealso{
\code{\link[base]{c}}
}
\keyword{character}
|
##' Draw boxplots for several variables in the data or a continuous
##' variable vs a categorical variable
##'
##' This function can draw side-by-side boxplots for all the variables
##' in a data frame or boxplots for a continous variable vs a
##' categorical variable.
##'
##' Common interactions are documented in
##' \code{\link{common_key_press}}. Note boxplots also supports
##' brushing and can respond to brushing in other plots. When we brush
##' in other plots which are based on the same data, there will be
##' ``child'' boxplots in this plot showing the distributions of the
##' brushed data.
##' @param vars a list of variables (a character vector), or a
##' formula; a one-sided formula like \code{~ x1 + x2 + x3} means to
##' draw side-by-side boxplots for the variables in the right hand
##' side, whereas a two-sided formula like \code{y ~ x} means boxplots
##' of a continuous \code{y} against a categorical \code{x}
##' @inheritParams qbar
##' @param at the locations of the boxplots (by default from 1 to
##' \code{p} where \code{p} is the number of variables to plot or the
##' number of levels of the categorical variable)
##' @param width width(s) of boxes (do not have to be a same value if
##' provided as a numeric vector); by default it is about 1/10 of the
##' screen width
##' @param horizontal horizontal or vertical boxplots
##' @param points whether to add data points to the boxplot
##' @return A boxplot
##' @author Yihui Xie <\url{http://yihui.name}>
##' @example inst/examples/qboxplot-ex.R
##' @export
##' @family plots
qboxplot =
function(vars, data = last_data(), at = NULL, width = NULL, horizontal = FALSE,
main = '', xlim = NULL, ylim = NULL, xlab = NULL, ylab = NULL,
points = FALSE) {
data = check_data(data)
b = brush(data)
b$select.only = TRUE; b$draw.brush = FALSE # a selection brush
meta = Box.meta$new(horizontal = horizontal, main = main, alpha = 1, points = points,
active = TRUE)
if (missing(vars)) vars = grep('^[^.]', names(data), value = TRUE)
compute_coords = function(brush = FALSE) {
meta$minor = ifelse(meta$horizontal, 'x', 'y')
idx = visible(data)
if (brush) idx = idx & selected(data)
if (inherits(vars, 'formula')) {
vars.n = length(vars) # 2 means one-sided formula, 3 means two-sided
vars.a = all.vars(vars) # all variables in the formula
if (vars.n == 2) {
meta$vars = all.vars(vars)
if (identical(meta$vars, '.'))
meta$vars = grep('^[^.]', names(data), value = TRUE)
ylist = lapply(as.data.frame(data[idx, meta$vars, drop = FALSE]), as.numeric)
if (!brush) {
meta$xlab = if (is.null(xlab)) 'variable' else xlab
meta$ylab = if (is.null(ylab)) 'value' else ylab
}
} else if (vars.n == 3) {
meta$xvar = vars.a[2]; meta$yvar = vars.a[1]
ylist = split(data[idx, meta$yvar], data[idx, meta$xvar])
if (!brush) {
meta$xlab = if (is.null(xlab)) vars.a[2] else xlab
meta$ylab = if (is.null(ylab)) vars.a[1] else ylab
}
}
} else {
ylist = lapply(as.data.frame(data[idx, vars, drop = FALSE]), as.numeric)
if (!brush) {
meta$vars = names(data[, vars, drop = FALSE])
meta$xlab = if (is.null(xlab)) 'variable' else xlab
meta$ylab = if (is.null(ylab)) 'value' else ylab
}
}
bxp.data = lapply(ylist, boxplot.stats, do.conf = FALSE)
bxp.stats = sapply(bxp.data, `[[`, 'stats') # quantiles
bxp.out = lapply(bxp.data, `[[`, 'out') # outliers
if (brush) {
meta$bxp.stats2 = bxp.stats
return()
}
meta$bxp.stats = bxp.stats; meta$bxp.out = bxp.out
meta$xlabels = if (length(meta$vars)) meta$vars else names(ylist)
meta$yat = axis_loc(range(ylist)); meta$ylabels = format(meta$yat)
meta$xat = meta$at = if (is.null(at)) seq_along(meta$xlabels) else at
meta$width = if (is.null(width)) max(0.2 * diff(range(meta$at)), 0.3) else width
meta$limits =
cbind(extend_ranges(if (is.null(xlim))
range(meta$xat) + c(-1, 1) * max(meta$width)/2 else xlim,
ifelse(length(meta$xat) == 1, 5, 2) * qpar('mar')),
extend_ranges(if (is.null(ylim)) range(ylist) else ylim))
if (length(meta$vars)) {
meta$y =
c(vapply(as.data.frame(data[, meta$vars, drop = FALSE]), as.numeric,
numeric(nrow(data))))
meta$x = rep(meta$at, each = nrow(data))
} else {
meta$y = data[, meta$yvar]; meta$x = meta$at[as.integer(data[, meta$xvar])]
}
}
compute_coords()
compute_colors = function() {
if (!meta$points) {
meta$color = NA; meta$border = NA
} else {
if (length(meta$vars)) {
idx = !visible(data)
meta$color = data$.color; meta$border = data$.border
meta$color[idx] = NA; meta$border[idx] = NA
} else {
meta$color = meta$border = 'gray15'
}
}
}
compute_colors()
flip_coords = function() {
if (!meta$horizontal) return()
switch_value('x', 'y', meta)
switch_value('xat', 'yat', meta)
switch_value('xlabels', 'ylabels', meta)
switch_value('xlab', 'ylab', meta)
meta$limits = meta$limits[, 2:1]
}
flip_coords()
meta$brush.size = c(1, -1) * apply(meta$limits, 2, diff) / 15
main_draw = function(layer, painter) {
qdrawGlyph(painter, qglyphCircle(r = data$.size[1]), meta$x, meta$y,
stroke = meta$border, fill = meta$color)
}
brush_mouse_press = function(layer, event) {
common_mouse_press(layer, event, data, meta)
}
brush_mouse_move = function(layer, event) {
rect = qrect(update_brush_size(meta, event))
hits = layer$locate(rect)
if (length(hits)) {
if (length(meta$vars))
hits = hits %% nrow(data)
hits = hits + 1
}
selected(data) = mode_selection(selected(data), hits, mode = b$mode)
common_mouse_move(layer, event, data, meta)
}
brush_mouse_release = function(layer, event) {
brush_mouse_move(layer, event)
common_mouse_release(layer, event, data, meta)
}
key_press = function(layer, event) {
common_key_press(layer, event, data, meta)
}
key_release = function(layer, event) {
common_key_release(layer, event, data, meta)
}
scene = qscene()
layer.bxp = qbxp(data, meta, limits = qrect(meta$limits))
layer.main =
qlayer(paintFun = main_draw, mousePressFun = brush_mouse_press,
mouseReleaseFun = brush_mouse_release,
mouseMove = brush_mouse_move,
keyPressFun = key_press, keyReleaseFun = key_release,
focusInFun = function(layer, event) {
common_focus_in(layer, event, data, meta)
}, focusOutFun = function(layer, event) {
common_focus_out(layer, event, data, meta)
},
limits = qrect(meta$limits), clip = TRUE)
layer.root = qlayer(scene)
layer.brush = qbxp(data, meta, subset = TRUE, limits = qrect(meta$limits))
layer.title = qmtext(meta = meta, side = 3)
layer.xlab = qmtext(meta = meta, side = 1)
layer.ylab = qmtext(meta = meta, side = 2)
layer.xaxis = qaxis(meta = meta, side = 1)
layer.yaxis = qaxis(meta = meta, side = 2)
layer.grid = qgrid(meta = meta)
layer.root[0, 2] = layer.title
layer.root[2, 2] = layer.xaxis
layer.root[3, 2] = layer.xlab
layer.root[1, 1] = layer.yaxis
layer.root[1, 0] = layer.ylab
layer.root[1, 2] = layer.grid
layer.root[1, 2] = layer.bxp
layer.root[1, 2] = layer.main
layer.root[1, 2] = layer.brush
layer.root[1, 3] = qlayer()
set_layout = function() {
fix_dimension(layer.root,
row = list(id = c(0, 2, 3), value = c(prefer_height(meta$main),
prefer_height(meta$xlabels),
prefer_height(meta$xlab))),
column = list(id = c(1, 0, 3), value = c(prefer_width(meta$ylabels),
prefer_width(meta$ylab, FALSE),
10)))
}
set_layout()
meta$mainChanged$connect(set_layout)
meta$xlabChanged$connect(set_layout); meta$ylabChanged$connect(set_layout)
meta$xlabelsChanged$connect(set_layout); meta$ylabelsChanged$connect(set_layout)
view = qplotView(scene = scene)
view$setWindowTitle(paste("Boxplot:", if (length(meta$vars))
paste(meta$vars, collapse = ', ') else
paste(meta$yvar, meta$xvar, sep = ' ~ ')))
view$resize(480 * sqrt(length(unique(meta$x))/3), 480)
d.idx = add_listener(data, function(i, j) {
idx = which(j == c('.brushed', '.color', '.border'))
if (length(idx) < 1) {
compute_coords(); compute_colors()
qupdate(layer.grid); qupdate(layer.xaxis); qupdate(layer.yaxis)
layer.main$invalidateIndex(); qupdate(layer.main)
return()
} else idx = c(1, 2, 2)[idx]
switch(idx, {compute_coords(brush = TRUE); qupdate(layer.brush)},
{compute_colors(); qupdate(layer.main)})
})
qconnect(layer.main, 'destroyed', function(x) {
remove_listener(data, d.idx)
})
b$cursorChanged$connect(function() {
set_cursor(view, b$cursor)
})
sync_limits(meta, layer.main, layer.brush, layer.bxp) # sync limits
meta$manual.brush = function(pos) {
brush_mouse_move(layer = layer.main, event = list(pos = function() pos))
}
attr(view, 'meta') = meta
view
}
Box.meta =
setRefClass("Box_meta", fields = properties(c(
Common.meta,
list(vars = 'character', x = 'numeric', y = 'numeric',
xvar = 'character', yvar = 'character',
at = 'numeric', width = 'numeric', horizontal = 'logical',
bxp.stats = 'matrix', bxp.out = 'list', points = 'logical',
bxp.stats2 = 'matrix')
)))
##' Create a boxplot layer
##'
##' A ``low-level'' plotting function to create a boxplot layer.
##'
##' @inheritParams qbar
##' @param meta the meta data
##' @param subset whether to draw boxplots based on selected rows
##' @param ... other arguments passed to \code{\link[qtpaint]{qlayer}}
##' @return a layer object
##' @author Yihui Xie <\url{http://yihui.name}>
##' @export
##' @examples ## see source code of qboxplot()
qbxp = function(data, meta, subset = FALSE, ...) {
draw_boxplot = function(layer, painter) {
.boxcol = 'black'
width = meta$width
if (subset) {
bxp.stats = meta$bxp.stats2
if (!nrow(bxp.stats)) return()
.boxcol = 'gray'
width = mean(selected(data)) * width
} else bxp.stats = meta$bxp.stats
if (!subset) bxp.out = meta$bxp.out
at = meta$at; horizontal = meta$horizontal
x0 = rep(at, each = 2); y0 = as.vector(bxp.stats[c(1, 4), ])
x1 = x0; y1 = as.vector(bxp.stats[c(2, 5), ])
if (horizontal) {
switch_value('x0', 'y0', sys.frame(1))
switch_value('x1', 'y1', sys.frame(1))
}
qdrawSegment(painter, x0, y0, x1, y1, stroke = .boxcol) # whiskers
x0 = at - width/2; x1 = at + width/2
y0 = bxp.stats[2, ]; y1 = bxp.stats[4, ]
if (horizontal) {
switch_value('x0', 'y0', sys.frame(1))
switch_value('x1', 'y1', sys.frame(1))
}
qdrawRect(painter, x0, y0, x1, y1, fill = ifelse(subset, '#FFFF0099', 'white'),
stroke = .boxcol) # box
if (!subset && length(bxp.out)) {
y = unlist(bxp.out); x = rep(at, sapply(bxp.out, length))
if (horizontal) {
switch_value('x', 'y', sys.frame(1))
}
circle = qglyphCircle(r = data$.size[1])
qdrawGlyph(painter, circle, x, y, stroke = 'black', fill = 'black')
}
qlineWidth(painter) = 3
if (horizontal) {
x0 = x1 = bxp.stats[3, ]
} else {
y0 = y1 = bxp.stats[3, ]
}
qdrawSegment(painter, x0, y0, x1, y1, stroke = .boxcol) # median bar
qlineWidth(painter) = 1
if (subset) draw_brush(layer, painter, data, meta)
}
qlayer(paintFun = draw_boxplot, ...)
}
| /R/qboxplot.R | no_license | NickSpyrison/cranvas | R | false | false | 13,144 | r | ##' Draw boxplots for several variables in the data or a continuous
##' variable vs a categorical variable
##'
##' This function can draw side-by-side boxplots for all the variables
##' in a data frame or boxplots for a continous variable vs a
##' categorical variable.
##'
##' Common interactions are documented in
##' \code{\link{common_key_press}}. Note boxplots also supports
##' brushing and can respond to brushing in other plots. When we brush
##' in other plots which are based on the same data, there will be
##' ``child'' boxplots in this plot showing the distributions of the
##' brushed data.
##' @param vars a list of variables (a character vector), or a
##' formula; a one-sided formula like \code{~ x1 + x2 + x3} means to
##' draw side-by-side boxplots for the variables in the right hand
##' side, whereas a two-sided formula like \code{y ~ x} means boxplots
##' of a continuous \code{y} against a categorical \code{x}
##' @inheritParams qbar
##' @param at the locations of the boxplots (by default from 1 to
##' \code{p} where \code{p} is the number of variables to plot or the
##' number of levels of the categorical variable)
##' @param width width(s) of boxes (do not have to be a same value if
##' provided as a numeric vector); by default it is about 1/10 of the
##' screen width
##' @param horizontal horizontal or vertical boxplots
##' @param points whether to add data points to the boxplot
##' @return A boxplot
##' @author Yihui Xie <\url{http://yihui.name}>
##' @example inst/examples/qboxplot-ex.R
##' @export
##' @family plots
qboxplot =
function(vars, data = last_data(), at = NULL, width = NULL, horizontal = FALSE,
main = '', xlim = NULL, ylim = NULL, xlab = NULL, ylab = NULL,
points = FALSE) {
data = check_data(data)
b = brush(data)
b$select.only = TRUE; b$draw.brush = FALSE # a selection brush
meta = Box.meta$new(horizontal = horizontal, main = main, alpha = 1, points = points,
active = TRUE)
if (missing(vars)) vars = grep('^[^.]', names(data), value = TRUE)
compute_coords = function(brush = FALSE) {
meta$minor = ifelse(meta$horizontal, 'x', 'y')
idx = visible(data)
if (brush) idx = idx & selected(data)
if (inherits(vars, 'formula')) {
vars.n = length(vars) # 2 means one-sided formula, 3 means two-sided
vars.a = all.vars(vars) # all variables in the formula
if (vars.n == 2) {
meta$vars = all.vars(vars)
if (identical(meta$vars, '.'))
meta$vars = grep('^[^.]', names(data), value = TRUE)
ylist = lapply(as.data.frame(data[idx, meta$vars, drop = FALSE]), as.numeric)
if (!brush) {
meta$xlab = if (is.null(xlab)) 'variable' else xlab
meta$ylab = if (is.null(ylab)) 'value' else ylab
}
} else if (vars.n == 3) {
meta$xvar = vars.a[2]; meta$yvar = vars.a[1]
ylist = split(data[idx, meta$yvar], data[idx, meta$xvar])
if (!brush) {
meta$xlab = if (is.null(xlab)) vars.a[2] else xlab
meta$ylab = if (is.null(ylab)) vars.a[1] else ylab
}
}
} else {
ylist = lapply(as.data.frame(data[idx, vars, drop = FALSE]), as.numeric)
if (!brush) {
meta$vars = names(data[, vars, drop = FALSE])
meta$xlab = if (is.null(xlab)) 'variable' else xlab
meta$ylab = if (is.null(ylab)) 'value' else ylab
}
}
bxp.data = lapply(ylist, boxplot.stats, do.conf = FALSE)
bxp.stats = sapply(bxp.data, `[[`, 'stats') # quantiles
bxp.out = lapply(bxp.data, `[[`, 'out') # outliers
if (brush) {
meta$bxp.stats2 = bxp.stats
return()
}
meta$bxp.stats = bxp.stats; meta$bxp.out = bxp.out
meta$xlabels = if (length(meta$vars)) meta$vars else names(ylist)
meta$yat = axis_loc(range(ylist)); meta$ylabels = format(meta$yat)
meta$xat = meta$at = if (is.null(at)) seq_along(meta$xlabels) else at
meta$width = if (is.null(width)) max(0.2 * diff(range(meta$at)), 0.3) else width
meta$limits =
cbind(extend_ranges(if (is.null(xlim))
range(meta$xat) + c(-1, 1) * max(meta$width)/2 else xlim,
ifelse(length(meta$xat) == 1, 5, 2) * qpar('mar')),
extend_ranges(if (is.null(ylim)) range(ylist) else ylim))
if (length(meta$vars)) {
meta$y =
c(vapply(as.data.frame(data[, meta$vars, drop = FALSE]), as.numeric,
numeric(nrow(data))))
meta$x = rep(meta$at, each = nrow(data))
} else {
meta$y = data[, meta$yvar]; meta$x = meta$at[as.integer(data[, meta$xvar])]
}
}
compute_coords()
compute_colors = function() {
if (!meta$points) {
meta$color = NA; meta$border = NA
} else {
if (length(meta$vars)) {
idx = !visible(data)
meta$color = data$.color; meta$border = data$.border
meta$color[idx] = NA; meta$border[idx] = NA
} else {
meta$color = meta$border = 'gray15'
}
}
}
compute_colors()
flip_coords = function() {
if (!meta$horizontal) return()
switch_value('x', 'y', meta)
switch_value('xat', 'yat', meta)
switch_value('xlabels', 'ylabels', meta)
switch_value('xlab', 'ylab', meta)
meta$limits = meta$limits[, 2:1]
}
flip_coords()
meta$brush.size = c(1, -1) * apply(meta$limits, 2, diff) / 15
main_draw = function(layer, painter) {
qdrawGlyph(painter, qglyphCircle(r = data$.size[1]), meta$x, meta$y,
stroke = meta$border, fill = meta$color)
}
brush_mouse_press = function(layer, event) {
common_mouse_press(layer, event, data, meta)
}
brush_mouse_move = function(layer, event) {
rect = qrect(update_brush_size(meta, event))
hits = layer$locate(rect)
if (length(hits)) {
if (length(meta$vars))
hits = hits %% nrow(data)
hits = hits + 1
}
selected(data) = mode_selection(selected(data), hits, mode = b$mode)
common_mouse_move(layer, event, data, meta)
}
brush_mouse_release = function(layer, event) {
brush_mouse_move(layer, event)
common_mouse_release(layer, event, data, meta)
}
key_press = function(layer, event) {
common_key_press(layer, event, data, meta)
}
key_release = function(layer, event) {
common_key_release(layer, event, data, meta)
}
scene = qscene()
layer.bxp = qbxp(data, meta, limits = qrect(meta$limits))
layer.main =
qlayer(paintFun = main_draw, mousePressFun = brush_mouse_press,
mouseReleaseFun = brush_mouse_release,
mouseMove = brush_mouse_move,
keyPressFun = key_press, keyReleaseFun = key_release,
focusInFun = function(layer, event) {
common_focus_in(layer, event, data, meta)
}, focusOutFun = function(layer, event) {
common_focus_out(layer, event, data, meta)
},
limits = qrect(meta$limits), clip = TRUE)
layer.root = qlayer(scene)
layer.brush = qbxp(data, meta, subset = TRUE, limits = qrect(meta$limits))
layer.title = qmtext(meta = meta, side = 3)
layer.xlab = qmtext(meta = meta, side = 1)
layer.ylab = qmtext(meta = meta, side = 2)
layer.xaxis = qaxis(meta = meta, side = 1)
layer.yaxis = qaxis(meta = meta, side = 2)
layer.grid = qgrid(meta = meta)
layer.root[0, 2] = layer.title
layer.root[2, 2] = layer.xaxis
layer.root[3, 2] = layer.xlab
layer.root[1, 1] = layer.yaxis
layer.root[1, 0] = layer.ylab
layer.root[1, 2] = layer.grid
layer.root[1, 2] = layer.bxp
layer.root[1, 2] = layer.main
layer.root[1, 2] = layer.brush
layer.root[1, 3] = qlayer()
set_layout = function() {
fix_dimension(layer.root,
row = list(id = c(0, 2, 3), value = c(prefer_height(meta$main),
prefer_height(meta$xlabels),
prefer_height(meta$xlab))),
column = list(id = c(1, 0, 3), value = c(prefer_width(meta$ylabels),
prefer_width(meta$ylab, FALSE),
10)))
}
set_layout()
meta$mainChanged$connect(set_layout)
meta$xlabChanged$connect(set_layout); meta$ylabChanged$connect(set_layout)
meta$xlabelsChanged$connect(set_layout); meta$ylabelsChanged$connect(set_layout)
view = qplotView(scene = scene)
view$setWindowTitle(paste("Boxplot:", if (length(meta$vars))
paste(meta$vars, collapse = ', ') else
paste(meta$yvar, meta$xvar, sep = ' ~ ')))
view$resize(480 * sqrt(length(unique(meta$x))/3), 480)
d.idx = add_listener(data, function(i, j) {
idx = which(j == c('.brushed', '.color', '.border'))
if (length(idx) < 1) {
compute_coords(); compute_colors()
qupdate(layer.grid); qupdate(layer.xaxis); qupdate(layer.yaxis)
layer.main$invalidateIndex(); qupdate(layer.main)
return()
} else idx = c(1, 2, 2)[idx]
switch(idx, {compute_coords(brush = TRUE); qupdate(layer.brush)},
{compute_colors(); qupdate(layer.main)})
})
qconnect(layer.main, 'destroyed', function(x) {
remove_listener(data, d.idx)
})
b$cursorChanged$connect(function() {
set_cursor(view, b$cursor)
})
sync_limits(meta, layer.main, layer.brush, layer.bxp) # sync limits
meta$manual.brush = function(pos) {
brush_mouse_move(layer = layer.main, event = list(pos = function() pos))
}
attr(view, 'meta') = meta
view
}
Box.meta =
setRefClass("Box_meta", fields = properties(c(
Common.meta,
list(vars = 'character', x = 'numeric', y = 'numeric',
xvar = 'character', yvar = 'character',
at = 'numeric', width = 'numeric', horizontal = 'logical',
bxp.stats = 'matrix', bxp.out = 'list', points = 'logical',
bxp.stats2 = 'matrix')
)))
##' Create a boxplot layer
##'
##' A ``low-level'' plotting function to create a boxplot layer.
##'
##' @inheritParams qbar
##' @param meta the meta data
##' @param subset whether to draw boxplots based on selected rows
##' @param ... other arguments passed to \code{\link[qtpaint]{qlayer}}
##' @return a layer object
##' @author Yihui Xie <\url{http://yihui.name}>
##' @export
##' @examples ## see source code of qboxplot()
qbxp = function(data, meta, subset = FALSE, ...) {
draw_boxplot = function(layer, painter) {
.boxcol = 'black'
width = meta$width
if (subset) {
bxp.stats = meta$bxp.stats2
if (!nrow(bxp.stats)) return()
.boxcol = 'gray'
width = mean(selected(data)) * width
} else bxp.stats = meta$bxp.stats
if (!subset) bxp.out = meta$bxp.out
at = meta$at; horizontal = meta$horizontal
x0 = rep(at, each = 2); y0 = as.vector(bxp.stats[c(1, 4), ])
x1 = x0; y1 = as.vector(bxp.stats[c(2, 5), ])
if (horizontal) {
switch_value('x0', 'y0', sys.frame(1))
switch_value('x1', 'y1', sys.frame(1))
}
qdrawSegment(painter, x0, y0, x1, y1, stroke = .boxcol) # whiskers
x0 = at - width/2; x1 = at + width/2
y0 = bxp.stats[2, ]; y1 = bxp.stats[4, ]
if (horizontal) {
switch_value('x0', 'y0', sys.frame(1))
switch_value('x1', 'y1', sys.frame(1))
}
qdrawRect(painter, x0, y0, x1, y1, fill = ifelse(subset, '#FFFF0099', 'white'),
stroke = .boxcol) # box
if (!subset && length(bxp.out)) {
y = unlist(bxp.out); x = rep(at, sapply(bxp.out, length))
if (horizontal) {
switch_value('x', 'y', sys.frame(1))
}
circle = qglyphCircle(r = data$.size[1])
qdrawGlyph(painter, circle, x, y, stroke = 'black', fill = 'black')
}
qlineWidth(painter) = 3
if (horizontal) {
x0 = x1 = bxp.stats[3, ]
} else {
y0 = y1 = bxp.stats[3, ]
}
qdrawSegment(painter, x0, y0, x1, y1, stroke = .boxcol) # median bar
qlineWidth(painter) = 1
if (subset) draw_brush(layer, painter, data, meta)
}
qlayer(paintFun = draw_boxplot, ...)
}
|
get_yesterday <- function (){
x<-(sys.date()- 1)
return(x)
#comment from github
}
| /test.R | no_license | Ryanjb1234/GitAndR | R | false | false | 88 | r | get_yesterday <- function (){
x<-(sys.date()- 1)
return(x)
#comment from github
}
|
# Multisensi R package ; file quality.r (last modified: 2015-09-14)
# Authors: C. Bidot, M. Lamboni, H. Monod
# Copyright INRA 2011-2018
# MaIAGE, INRA, Univ. Paris-Saclay, 78350 Jouy-en-Josas, France
#
# More about multisensi in https://CRAN.R-project.org/package=multisensi
#
# This software is governed by the CeCILL license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL
# license as circulated by CEA, CNRS and INRIA at the following URL
# "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
#
#===========================================================================
quality <- function(echsimul,echsimul.app)
#===========================================================================
{
## calcule le biais, le coef. de determination, le lof
## INPUTS
## echsimul : ARRAY des sorties ou matrice des sorties Y
## echsimul.app : sortie du modele approxime
## OUTPUTS
## moy.biais : biais integre (moyenne par colonne des biais)
## coef.det : coefficient de determination R2
## residuals : biais
## calcul de la moyenne de Y initiales
moy.Y <- colMeans(echsimul)
#colMeans et colSums + rapides que apply
## moyenne de Y sous forme matricielle
moy.Ymat <- t(matrix(rep(moy.Y,nrow(echsimul)),ncol(echsimul),nrow(echsimul)))
## calcul du biais
biais <- echsimul-echsimul.app
moy.biais <- colMeans(biais)
##calcul du coefficient de determination
## = 1 - SSresidus/SStotal
coef.det <- 1 - colSums((biais)^2)/colSums((echsimul-moy.Ymat)^2)
## renvoi des sorties
return(list(moy.biais=moy.biais,coef.det=coef.det,residuals=biais))
}
| /R/quality.r | no_license | cran/multisensi | R | false | false | 2,907 | r | # Multisensi R package ; file quality.r (last modified: 2015-09-14)
# Authors: C. Bidot, M. Lamboni, H. Monod
# Copyright INRA 2011-2018
# MaIAGE, INRA, Univ. Paris-Saclay, 78350 Jouy-en-Josas, France
#
# More about multisensi in https://CRAN.R-project.org/package=multisensi
#
# This software is governed by the CeCILL license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL
# license as circulated by CEA, CNRS and INRIA at the following URL
# "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
#
#===========================================================================
quality <- function(echsimul,echsimul.app)
#===========================================================================
{
## calcule le biais, le coef. de determination, le lof
## INPUTS
## echsimul : ARRAY des sorties ou matrice des sorties Y
## echsimul.app : sortie du modele approxime
## OUTPUTS
## moy.biais : biais integre (moyenne par colonne des biais)
## coef.det : coefficient de determination R2
## residuals : biais
## calcul de la moyenne de Y initiales
moy.Y <- colMeans(echsimul)
#colMeans et colSums + rapides que apply
## moyenne de Y sous forme matricielle
moy.Ymat <- t(matrix(rep(moy.Y,nrow(echsimul)),ncol(echsimul),nrow(echsimul)))
## calcul du biais
biais <- echsimul-echsimul.app
moy.biais <- colMeans(biais)
##calcul du coefficient de determination
## = 1 - SSresidus/SStotal
coef.det <- 1 - colSums((biais)^2)/colSums((echsimul-moy.Ymat)^2)
## renvoi des sorties
return(list(moy.biais=moy.biais,coef.det=coef.det,residuals=biais))
}
|
/*
File: Sound.r
Contains: Sound Manager Interfaces.
Version: Technology: Sound Manager 3.6
Release: Universal Interfaces 3.3.2
Copyright: © 1986-2000 by Apple Computer, Inc., all rights reserved
Bugs?: For bug reports, consult the following page on
the World Wide Web:
http://developer.apple.com/bugreporter/
*/
#ifndef __SOUND_R__
#define __SOUND_R__
#ifndef __CONDITIONALMACROS_R__
#include "ConditionalMacros.r"
#endif
#define kInvalidSource 0xFFFFFFFF /* this source may be returned from GetInfo if no other source is the monitored source */
#define kNoSource 'none' /* no source selection */
#define kCDSource 'cd ' /* internal CD player input */
#define kExtMicSource 'emic' /* external mic input */
#define kSoundInSource 'sinj' /* sound input jack */
#define kRCAInSource 'irca' /* RCA jack input */
#define kTVFMTunerSource 'tvfm'
#define kDAVInSource 'idav' /* DAV analog input */
#define kIntMicSource 'imic' /* internal mic input */
#define kMediaBaySource 'mbay' /* media bay input */
#define kModemSource 'modm' /* modem input (internal modem on desktops, PCI input on PowerBooks) */
#define kPCCardSource 'pcm ' /* PC Card pwm input */
#define kZoomVideoSource 'zvpc' /* zoom video input */
#define kDVDSource 'dvda' /* DVD audio input */
#define kNoSoundComponentType '****'
#define kSoundComponentType 'sift' /* component type */
#define kSoundComponentPPCType 'nift' /* component type for PowerPC code */
#define kRate8SubType 'ratb' /* 8-bit rate converter */
#define kRate16SubType 'ratw' /* 16-bit rate converter */
#define kConverterSubType 'conv' /* sample format converter */
#define kSndSourceSubType 'sour' /* generic source component */
#define kMixerType 'mixr'
#define kMixer8SubType 'mixb' /* 8-bit mixer */
#define kMixer16SubType 'mixw' /* 16-bit mixer */
#define kSoundInputDeviceType 'sinp' /* sound input component */
#define kWaveInSubType 'wavi' /* Windows waveIn */
#define kWaveInSnifferSubType 'wisn' /* Windows waveIn sniffer */
#define kSoundOutputDeviceType 'sdev' /* sound output component */
#define kClassicSubType 'clas' /* classic hardware, i.e. Mac Plus */
#define kASCSubType 'asc ' /* Apple Sound Chip device */
#define kDSPSubType 'dsp ' /* DSP device */
#define kAwacsSubType 'awac' /* Another of Will's Audio Chips device */
#define kGCAwacsSubType 'awgc' /* Awacs audio with Grand Central DMA */
#define kSingerSubType 'sing' /* Singer (via Whitney) based sound */
#define kSinger2SubType 'sng2' /* Singer 2 (via Whitney) for Acme */
#define kWhitSubType 'whit' /* Whit sound component for PrimeTime 3 */
#define kSoundBlasterSubType 'sbls' /* Sound Blaster for CHRP */
#define kWaveOutSubType 'wavo' /* Windows waveOut */
#define kWaveOutSnifferSubType 'wosn' /* Windows waveOut sniffer */
#define kDirectSoundSubType 'dsnd' /* Windows DirectSound */
#define kDirectSoundSnifferSubType 'dssn' /* Windows DirectSound sniffer */
#define kUNIXsdevSubType 'un1x' /* UNIX base sdev */
#define kUSBSubType 'usb ' /* USB device */
#define kBlueBoxSubType 'bsnd' /* Blue Box sound component */
#define kSoundCompressor 'scom'
#define kSoundDecompressor 'sdec'
#define kAudioComponentType 'adio' /* Audio components and sub-types */
#define kAwacsPhoneSubType 'hphn'
#define kAudioVisionSpeakerSubType 'telc'
#define kAudioVisionHeadphoneSubType 'telh'
#define kPhilipsFaderSubType 'tvav'
#define kSGSToneSubType 'sgs0'
#define kSoundEffectsType 'snfx' /* sound effects type */
#define kEqualizerSubType 'eqal' /* frequency equalizer */
#define kSSpLocalizationSubType 'snd3'
#define kSoundNotCompressed 'NONE' /* sound is not compressed */
#define k8BitOffsetBinaryFormat 'raw ' /* 8-bit offset binary */
#define k16BitBigEndianFormat 'twos' /* 16-bit big endian */
#define k16BitLittleEndianFormat 'sowt' /* 16-bit little endian */
#define kFloat32Format 'fl32' /* 32-bit floating point */
#define kFloat64Format 'fl64' /* 64-bit floating point */
#define k24BitFormat 'in24' /* 24-bit integer */
#define k32BitFormat 'in32' /* 32-bit integer */
#define k32BitLittleEndianFormat '23ni' /* 32-bit little endian integer */
#define kMACE3Compression 'MAC3' /* MACE 3:1 */
#define kMACE6Compression 'MAC6' /* MACE 6:1 */
#define kCDXA4Compression 'cdx4' /* CD/XA 4:1 */
#define kCDXA2Compression 'cdx2' /* CD/XA 2:1 */
#define kIMACompression 'ima4' /* IMA 4:1 */
#define kULawCompression 'ulaw' /* µLaw 2:1 */
#define kALawCompression 'alaw' /* aLaw 2:1 */
#define kMicrosoftADPCMFormat 0x6D730002 /* Microsoft ADPCM - ACM code 2 */
#define kDVIIntelIMAFormat 0x6D730011 /* DVI/Intel IMA ADPCM - ACM code 17 */
#define kDVAudioFormat 'dvca' /* DV Audio */
#define kQDesignCompression 'QDMC' /* QDesign music */
#define kQUALCOMMCompression 'Qclp' /* QUALCOMM PureVoice */
#define kOffsetBinary 'raw ' /* for compatibility */
#define kTwosComplement 'twos' /* for compatibility */
#define kLittleEndianFormat 'sowt' /* for compatibility */
#define kMPEGLayer3Format 0x6D730055 /* MPEG Layer 3, CBR only (pre QT4.1) */
#define kFullMPEGLay3Format '.mp3' /* MPEG Layer 3, CBR & VBR (QT4.1 and later) */
#define k8BitRawIn 0x01 /* data description */
#define k8BitTwosIn 0x02
#define k16BitIn 0x04
#define kStereoIn 0x08
#define k8BitRawOut 0x0100
#define k8BitTwosOut 0x0200
#define k16BitOut 0x0400
#define kStereoOut 0x0800
#define kReverse 0x00010000 /* function description */
#define kRateConvert 0x00020000
#define kCreateSoundSource 0x00040000
#define kVMAwareness 0x00200000 /* component will hold its memory */
#define kHighQuality 0x00400000 /* performance description */
#define kNonRealTime 0x00800000
/*----------------------------snd • Sound----------------------------------------------*/
type 'snd ' {
switch {
case FormatOne:
key unsigned integer = $0001;
unsigned integer = $$CountOf(Synthesizers);
wide array Synthesizers {
/* Resource ID of synthesizer/modifer */
integer squareWaveSynth = $0001,
waveTableSynth = $0003,
sampledSynth = $0005;
longint; /* init parameter */
};
case FormatTwo:
key unsigned integer = $0002;
integer free = 0, keepInMemory = 256+1; /* Space for refe count */
};
unsigned integer = $$CountOf(SoundCmnds);
wide array SoundCmnds {
boolean noData, hasData;
switch {
case nullCmd:
key bitstring[15] = 0;
fill word; /* Param 1 = nil */
fill long; /* Param 2 = nil */
case quietCmd:
key bitstring[15] = 3;
fill word; /* Param 1 = nil */
fill long; /* Param 2 = nil */
case flushCmd:
key bitstring[15] = 4;
fill word; /* Param 1 = nil */
fill long; /* Param 2 = nil */
case waitCmd:
key bitstring[15] = 10;
integer oneSecond = 2000; /* Duration */
fill long; /* Param 2 = nil */
case pauseCmd:
key bitstring[15] = 11;
fill word; /* Param 1 = nil */
fill long; /* Param 2 = nil */
case resumeCmd:
key bitstring[15] = 12;
fill word; /* Param 1 = nil */
fill long; /* Param 2 = nil */
case callBackCmd:
key bitstring[15] = 13;
integer; /* User-defined */
longint; /* User-defined */
case syncCmd:
key bitstring[15] = 14;
integer; /* Count */
longint; /* Identifier */
case emptyCmd:
key bitstring[15] = 15;
fill word; /* Param 1 = nil */
fill long; /* Param 2 = nil */
case freqDurationCmd:
key bitstring[15] = 40;
integer oneSecond = 2000; /* Duration */
longint; /* frequency */
case restCmd:
key bitstring[15] = 41;
integer oneSecond = 2000; /* Duration */
fill long; /* Param 2 = nil */
case freqCmd:
key bitstring[15] = 42;
fill word; /* Param 1 = nil */
longint; /* Frequency */
case ampCmd:
key bitstring[15] = 43;
integer; /* Amplitude */
fill long; /* Param 2 */
case timbreCmd:
key bitstring[15] = 44;
integer sineWave, squareWave = 255; /* Timbre */
fill long; /* Param 2 */
case waveTableCmd:
key bitstring[15] = 60;
unsigned integer; /* Length */
longint; /* Pointer to table */
case phaseCmd:
key bitstring[15] = 61;
integer; /* Shift */
longint; /* chanPtr */
case soundCmd:
key bitstring[15] = 80;
fill word; /* Param 1 = nil */
longint; /* Pointer to sound */
case bufferCmd:
key bitstring[15] = 81;
fill word; /* Param 1 = nil */
longint; /* Pointer to buffer */
case rateCmd:
key bitstring[15] = 82;
fill word; /* Param 1 = nil */
longint; /* Rate */
};
};
array DataTables {
DataTable:
fill long; /* Pointer to data */
SampleCnt:
unsigned longint; /* # of sound samples */
unsigned hex longint
Rate22K = $56EE8BA3; /* Sampling rate */
unsigned longint; /* Start of loop */
unsigned longint; /* End of loop */
hex byte; /* encode (header type) */
hex byte; /* baseFrequency */
hex string [$$Long(SampleCnt[$$ArrayIndex(DataTables)])];
};
};
#endif /* __SOUND_R__ */
| /3.3.2/Universal/Interfaces/RIncludes/Sound.r | no_license | elliotnunn/UniversalInterfaces | R | false | false | 13,028 | r | /*
File: Sound.r
Contains: Sound Manager Interfaces.
Version: Technology: Sound Manager 3.6
Release: Universal Interfaces 3.3.2
Copyright: © 1986-2000 by Apple Computer, Inc., all rights reserved
Bugs?: For bug reports, consult the following page on
the World Wide Web:
http://developer.apple.com/bugreporter/
*/
#ifndef __SOUND_R__
#define __SOUND_R__
#ifndef __CONDITIONALMACROS_R__
#include "ConditionalMacros.r"
#endif
#define kInvalidSource 0xFFFFFFFF /* this source may be returned from GetInfo if no other source is the monitored source */
#define kNoSource 'none' /* no source selection */
#define kCDSource 'cd ' /* internal CD player input */
#define kExtMicSource 'emic' /* external mic input */
#define kSoundInSource 'sinj' /* sound input jack */
#define kRCAInSource 'irca' /* RCA jack input */
#define kTVFMTunerSource 'tvfm'
#define kDAVInSource 'idav' /* DAV analog input */
#define kIntMicSource 'imic' /* internal mic input */
#define kMediaBaySource 'mbay' /* media bay input */
#define kModemSource 'modm' /* modem input (internal modem on desktops, PCI input on PowerBooks) */
#define kPCCardSource 'pcm ' /* PC Card pwm input */
#define kZoomVideoSource 'zvpc' /* zoom video input */
#define kDVDSource 'dvda' /* DVD audio input */
#define kNoSoundComponentType '****'
#define kSoundComponentType 'sift' /* component type */
#define kSoundComponentPPCType 'nift' /* component type for PowerPC code */
#define kRate8SubType 'ratb' /* 8-bit rate converter */
#define kRate16SubType 'ratw' /* 16-bit rate converter */
#define kConverterSubType 'conv' /* sample format converter */
#define kSndSourceSubType 'sour' /* generic source component */
#define kMixerType 'mixr'
#define kMixer8SubType 'mixb' /* 8-bit mixer */
#define kMixer16SubType 'mixw' /* 16-bit mixer */
#define kSoundInputDeviceType 'sinp' /* sound input component */
#define kWaveInSubType 'wavi' /* Windows waveIn */
#define kWaveInSnifferSubType 'wisn' /* Windows waveIn sniffer */
#define kSoundOutputDeviceType 'sdev' /* sound output component */
#define kClassicSubType 'clas' /* classic hardware, i.e. Mac Plus */
#define kASCSubType 'asc ' /* Apple Sound Chip device */
#define kDSPSubType 'dsp ' /* DSP device */
#define kAwacsSubType 'awac' /* Another of Will's Audio Chips device */
#define kGCAwacsSubType 'awgc' /* Awacs audio with Grand Central DMA */
#define kSingerSubType 'sing' /* Singer (via Whitney) based sound */
#define kSinger2SubType 'sng2' /* Singer 2 (via Whitney) for Acme */
#define kWhitSubType 'whit' /* Whit sound component for PrimeTime 3 */
#define kSoundBlasterSubType 'sbls' /* Sound Blaster for CHRP */
#define kWaveOutSubType 'wavo' /* Windows waveOut */
#define kWaveOutSnifferSubType 'wosn' /* Windows waveOut sniffer */
#define kDirectSoundSubType 'dsnd' /* Windows DirectSound */
#define kDirectSoundSnifferSubType 'dssn' /* Windows DirectSound sniffer */
#define kUNIXsdevSubType 'un1x' /* UNIX base sdev */
#define kUSBSubType 'usb ' /* USB device */
#define kBlueBoxSubType 'bsnd' /* Blue Box sound component */
#define kSoundCompressor 'scom'
#define kSoundDecompressor 'sdec'
#define kAudioComponentType 'adio' /* Audio components and sub-types */
#define kAwacsPhoneSubType 'hphn'
#define kAudioVisionSpeakerSubType 'telc'
#define kAudioVisionHeadphoneSubType 'telh'
#define kPhilipsFaderSubType 'tvav'
#define kSGSToneSubType 'sgs0'
#define kSoundEffectsType 'snfx' /* sound effects type */
#define kEqualizerSubType 'eqal' /* frequency equalizer */
#define kSSpLocalizationSubType 'snd3'
#define kSoundNotCompressed 'NONE' /* sound is not compressed */
#define k8BitOffsetBinaryFormat 'raw ' /* 8-bit offset binary */
#define k16BitBigEndianFormat 'twos' /* 16-bit big endian */
#define k16BitLittleEndianFormat 'sowt' /* 16-bit little endian */
#define kFloat32Format 'fl32' /* 32-bit floating point */
#define kFloat64Format 'fl64' /* 64-bit floating point */
#define k24BitFormat 'in24' /* 24-bit integer */
#define k32BitFormat 'in32' /* 32-bit integer */
#define k32BitLittleEndianFormat '23ni' /* 32-bit little endian integer */
#define kMACE3Compression 'MAC3' /* MACE 3:1 */
#define kMACE6Compression 'MAC6' /* MACE 6:1 */
#define kCDXA4Compression 'cdx4' /* CD/XA 4:1 */
#define kCDXA2Compression 'cdx2' /* CD/XA 2:1 */
#define kIMACompression 'ima4' /* IMA 4:1 */
#define kULawCompression 'ulaw' /* µLaw 2:1 */
#define kALawCompression 'alaw' /* aLaw 2:1 */
#define kMicrosoftADPCMFormat 0x6D730002 /* Microsoft ADPCM - ACM code 2 */
#define kDVIIntelIMAFormat 0x6D730011 /* DVI/Intel IMA ADPCM - ACM code 17 */
#define kDVAudioFormat 'dvca' /* DV Audio */
#define kQDesignCompression 'QDMC' /* QDesign music */
#define kQUALCOMMCompression 'Qclp' /* QUALCOMM PureVoice */
#define kOffsetBinary 'raw ' /* for compatibility */
#define kTwosComplement 'twos' /* for compatibility */
#define kLittleEndianFormat 'sowt' /* for compatibility */
#define kMPEGLayer3Format 0x6D730055 /* MPEG Layer 3, CBR only (pre QT4.1) */
#define kFullMPEGLay3Format '.mp3' /* MPEG Layer 3, CBR & VBR (QT4.1 and later) */
#define k8BitRawIn 0x01 /* data description */
#define k8BitTwosIn 0x02
#define k16BitIn 0x04
#define kStereoIn 0x08
#define k8BitRawOut 0x0100
#define k8BitTwosOut 0x0200
#define k16BitOut 0x0400
#define kStereoOut 0x0800
#define kReverse 0x00010000 /* function description */
#define kRateConvert 0x00020000
#define kCreateSoundSource 0x00040000
#define kVMAwareness 0x00200000 /* component will hold its memory */
#define kHighQuality 0x00400000 /* performance description */
#define kNonRealTime 0x00800000
/*----------------------------snd • Sound----------------------------------------------*/
type 'snd ' {
switch {
case FormatOne:
key unsigned integer = $0001;
unsigned integer = $$CountOf(Synthesizers);
wide array Synthesizers {
/* Resource ID of synthesizer/modifer */
integer squareWaveSynth = $0001,
waveTableSynth = $0003,
sampledSynth = $0005;
longint; /* init parameter */
};
case FormatTwo:
key unsigned integer = $0002;
integer free = 0, keepInMemory = 256+1; /* Space for refe count */
};
unsigned integer = $$CountOf(SoundCmnds);
wide array SoundCmnds {
boolean noData, hasData;
switch {
case nullCmd:
key bitstring[15] = 0;
fill word; /* Param 1 = nil */
fill long; /* Param 2 = nil */
case quietCmd:
key bitstring[15] = 3;
fill word; /* Param 1 = nil */
fill long; /* Param 2 = nil */
case flushCmd:
key bitstring[15] = 4;
fill word; /* Param 1 = nil */
fill long; /* Param 2 = nil */
case waitCmd:
key bitstring[15] = 10;
integer oneSecond = 2000; /* Duration */
fill long; /* Param 2 = nil */
case pauseCmd:
key bitstring[15] = 11;
fill word; /* Param 1 = nil */
fill long; /* Param 2 = nil */
case resumeCmd:
key bitstring[15] = 12;
fill word; /* Param 1 = nil */
fill long; /* Param 2 = nil */
case callBackCmd:
key bitstring[15] = 13;
integer; /* User-defined */
longint; /* User-defined */
case syncCmd:
key bitstring[15] = 14;
integer; /* Count */
longint; /* Identifier */
case emptyCmd:
key bitstring[15] = 15;
fill word; /* Param 1 = nil */
fill long; /* Param 2 = nil */
case freqDurationCmd:
key bitstring[15] = 40;
integer oneSecond = 2000; /* Duration */
longint; /* frequency */
case restCmd:
key bitstring[15] = 41;
integer oneSecond = 2000; /* Duration */
fill long; /* Param 2 = nil */
case freqCmd:
key bitstring[15] = 42;
fill word; /* Param 1 = nil */
longint; /* Frequency */
case ampCmd:
key bitstring[15] = 43;
integer; /* Amplitude */
fill long; /* Param 2 */
case timbreCmd:
key bitstring[15] = 44;
integer sineWave, squareWave = 255; /* Timbre */
fill long; /* Param 2 */
case waveTableCmd:
key bitstring[15] = 60;
unsigned integer; /* Length */
longint; /* Pointer to table */
case phaseCmd:
key bitstring[15] = 61;
integer; /* Shift */
longint; /* chanPtr */
case soundCmd:
key bitstring[15] = 80;
fill word; /* Param 1 = nil */
longint; /* Pointer to sound */
case bufferCmd:
key bitstring[15] = 81;
fill word; /* Param 1 = nil */
longint; /* Pointer to buffer */
case rateCmd:
key bitstring[15] = 82;
fill word; /* Param 1 = nil */
longint; /* Rate */
};
};
array DataTables {
DataTable:
fill long; /* Pointer to data */
SampleCnt:
unsigned longint; /* # of sound samples */
unsigned hex longint
Rate22K = $56EE8BA3; /* Sampling rate */
unsigned longint; /* Start of loop */
unsigned longint; /* End of loop */
hex byte; /* encode (header type) */
hex byte; /* baseFrequency */
hex string [$$Long(SampleCnt[$$ArrayIndex(DataTables)])];
};
};
#endif /* __SOUND_R__ */
|
#' Create explainer from your tidymodels workflow.
#'
#' DALEX is designed to work with various black-box models like tree ensembles, linear models, neural networks etc.
#' Unfortunately R packages that create such models are very inconsistent. Different tools use different interfaces to train, validate and use models.
#' One of those tools, which is one of the most popular one is tidymodels package. We would like to present dedicated explain function for it.
#'
#'
#' @param model object - a fitted workflow created with \code{mlr3}.
#' @param data data.frame or matrix - data that was used for fitting. Data should be passed without target column (this shall be provided as the \code{y} argument). NOTE: If target variable is present in the \code{data}, some of the functionalities my not work properly. Tibble will be converted into data.frame
#' @param y numeric vector with outputs / scores. If provided then it shall have the same size as \code{data}
#' @param weights numeric vector with sampling weights. By default it's \code{NULL}. If provided then it shall have the same length as \code{data}
#' @param predict_function function that takes two arguments: model and new data and returns numeric vector with predictions
#' @param predict_function_target_column Character or numeric containing either column name or column number in the model prediction object of the class that should be considered as positive (ie. the class that is associated with probability 1). If NULL, the second column of the output will be taken for binary classification. For a multiclass classification setting that parameter cause switch to binary classification mode with 1 vs others probabilities.
#' @param residual_function function that takes three arguments: model, data and response vector y. It should return a numeric vector with model residuals for given data. If not provided, response residuals (\eqn{y-\hat{y}}) are calculated.
#' @param ... other parameters
#' @param label character - the name of the model. By default it's extracted from the 'class' attribute of the model
#' @param verbose if TRUE (default) then diagnostic messages will be printed.
#' @param precalculate if TRUE (default) then 'predicted_values' and 'residuals' are calculated when explainer is created.
#' @param colorize if TRUE (default) then \code{WARNINGS}, \code{ERRORS} and \code{NOTES} are colorized. Will work only in the R console.
#' @param model_info a named list (\code{package}, \code{version}, \code{type}) containg information about model. If \code{NULL}, \code{DALEX} will seek for information on it's own.
#' @param type type of a model, either \code{classification} or \code{regression}. If not specified then \code{type} will be extracted from \code{model_info}.
#'
#' @return explainer object (\code{\link[DALEX]{explain}}) ready to work with DALEX
#'
#' @import DALEX
#' @importFrom stats predict
#' @importFrom DALEX yhat
#'
#' @rdname explain_tidymodels
#' @export
#' @examples
#' library("DALEXtra")
#' library("tidymodels")
#' library("recipes")
#' data <- titanic_imputed
#' data$survived <- as.factor(data$survived)
#' rec <- recipe(survived ~ ., data = data) %>%
#' step_normalize(fare)
#' model <- decision_tree(tree_depth = 25) %>%
#' set_engine("rpart") %>%
#' set_mode("classification")
#'
#' wflow <- workflow() %>%
#' add_recipe(rec) %>%
#' add_model(model)
#'
#'
#' model_fitted <- wflow %>%
#' fit(data = data)
#'
#' explain_tidymodels(model_fitted, data = titanic_imputed, y = titanic_imputed$survived)
#'
#'
explain_tidymodels <-
function(model,
data = NULL,
y = NULL,
weights = NULL,
predict_function = NULL,
predict_function_target_column = NULL,
residual_function = NULL,
...,
label = NULL,
verbose = TRUE,
precalculate = TRUE,
colorize = TRUE,
model_info = NULL,
type = NULL) {
if (!model$trained) {
stop("Only trained workflows can be passed to explain function")
}
explain(
model,
data = data,
y = y,
weights = weights,
predict_function = predict_function,
predict_function_target_column = predict_function_target_column,
residual_function = residual_function,
...,
label = label,
verbose = verbose,
precalculate = precalculate,
colorize = colorize,
model_info = model_info,
type = type
)
}
| /R/explain_tidymodels.R | no_license | pragyanaischool/DALEXtra | R | false | false | 4,533 | r | #' Create explainer from your tidymodels workflow.
#'
#' DALEX is designed to work with various black-box models like tree ensembles, linear models, neural networks etc.
#' Unfortunately R packages that create such models are very inconsistent. Different tools use different interfaces to train, validate and use models.
#' One of those tools, which is one of the most popular one is tidymodels package. We would like to present dedicated explain function for it.
#'
#'
#' @param model object - a fitted workflow created with \code{mlr3}.
#' @param data data.frame or matrix - data that was used for fitting. Data should be passed without target column (this shall be provided as the \code{y} argument). NOTE: If target variable is present in the \code{data}, some of the functionalities my not work properly. Tibble will be converted into data.frame
#' @param y numeric vector with outputs / scores. If provided then it shall have the same size as \code{data}
#' @param weights numeric vector with sampling weights. By default it's \code{NULL}. If provided then it shall have the same length as \code{data}
#' @param predict_function function that takes two arguments: model and new data and returns numeric vector with predictions
#' @param predict_function_target_column Character or numeric containing either column name or column number in the model prediction object of the class that should be considered as positive (ie. the class that is associated with probability 1). If NULL, the second column of the output will be taken for binary classification. For a multiclass classification setting that parameter cause switch to binary classification mode with 1 vs others probabilities.
#' @param residual_function function that takes three arguments: model, data and response vector y. It should return a numeric vector with model residuals for given data. If not provided, response residuals (\eqn{y-\hat{y}}) are calculated.
#' @param ... other parameters
#' @param label character - the name of the model. By default it's extracted from the 'class' attribute of the model
#' @param verbose if TRUE (default) then diagnostic messages will be printed.
#' @param precalculate if TRUE (default) then 'predicted_values' and 'residuals' are calculated when explainer is created.
#' @param colorize if TRUE (default) then \code{WARNINGS}, \code{ERRORS} and \code{NOTES} are colorized. Will work only in the R console.
#' @param model_info a named list (\code{package}, \code{version}, \code{type}) containg information about model. If \code{NULL}, \code{DALEX} will seek for information on it's own.
#' @param type type of a model, either \code{classification} or \code{regression}. If not specified then \code{type} will be extracted from \code{model_info}.
#'
#' @return explainer object (\code{\link[DALEX]{explain}}) ready to work with DALEX
#'
#' @import DALEX
#' @importFrom stats predict
#' @importFrom DALEX yhat
#'
#' @rdname explain_tidymodels
#' @export
#' @examples
#' library("DALEXtra")
#' library("tidymodels")
#' library("recipes")
#' data <- titanic_imputed
#' data$survived <- as.factor(data$survived)
#' rec <- recipe(survived ~ ., data = data) %>%
#' step_normalize(fare)
#' model <- decision_tree(tree_depth = 25) %>%
#' set_engine("rpart") %>%
#' set_mode("classification")
#'
#' wflow <- workflow() %>%
#' add_recipe(rec) %>%
#' add_model(model)
#'
#'
#' model_fitted <- wflow %>%
#' fit(data = data)
#'
#' explain_tidymodels(model_fitted, data = titanic_imputed, y = titanic_imputed$survived)
#'
#'
explain_tidymodels <-
function(model,
data = NULL,
y = NULL,
weights = NULL,
predict_function = NULL,
predict_function_target_column = NULL,
residual_function = NULL,
...,
label = NULL,
verbose = TRUE,
precalculate = TRUE,
colorize = TRUE,
model_info = NULL,
type = NULL) {
if (!model$trained) {
stop("Only trained workflows can be passed to explain function")
}
explain(
model,
data = data,
y = y,
weights = weights,
predict_function = predict_function,
predict_function_target_column = predict_function_target_column,
residual_function = residual_function,
...,
label = label,
verbose = verbose,
precalculate = precalculate,
colorize = colorize,
model_info = model_info,
type = type
)
}
|
#' @importFrom V8 new_context
sh <- NULL
.onLoad <- function(libname, pkgname){
sh <<- new_context();
sh$source(system.file("js/wkx.js", package = pkgname))
sh$source(system.file("js/buffer.js", package = pkgname))
}
| /R/onLoad.R | permissive | edzer/wellknown | R | false | false | 223 | r | #' @importFrom V8 new_context
sh <- NULL
.onLoad <- function(libname, pkgname){
sh <<- new_context();
sh$source(system.file("js/wkx.js", package = pkgname))
sh$source(system.file("js/buffer.js", package = pkgname))
}
|
##############################################################
# Installation function for python dependencies
##############################################################
install_pydep <- function(method = "auto", conda = "auto") {
reticulate::py_install("scipy", method = method, conda = conda)
}
| /R/pyinstall.R | no_license | jacklv999/spmle | R | false | false | 304 | r | ##############################################################
# Installation function for python dependencies
##############################################################
install_pydep <- function(method = "auto", conda = "auto") {
reticulate::py_install("scipy", method = method, conda = conda)
}
|
## Method declarations
mainMenu <- function(e, ...)UseMethod("mainMenu")
welcome <- function(e, ...)UseMethod("welcome")
housekeeping <- function(e, ...)UseMethod("housekeeping")
inProgressMenu <- function(e, choices, ...)UseMethod("inProgressMenu")
courseMenu <- function(e, courses)UseMethod("courseMenu")
courseDir <- function(e)UseMethod("courseDir")
lessonMenu <- function(e, choices)UseMethod("lessonMenu")
restoreUserProgress <- function(e, selection)UseMethod("restoreUserProgress")
loadLesson <- function(e, ...)UseMethod("loadLesson")
loadInstructions <- function(e, ...)UseMethod("loadInstructions")
# Default course and lesson navigation logic
#
# This method implements default course and lesson navigation logic,
# decoupling menu presentation from internal processing of user
# selections. It relies on several methods for menu presentation,
# namely welcome(e), housekeeping(e), inProgressMenu(e, lessons),
# courseMenu(e, courses), and lessonMenu(e, lessons). Defaults
# are provided.
#
# @param e persistent environment accessible to the callback
#'@importFrom yaml yaml.load_file
mainMenu.default <- function(e){
# Welcome the user if necessary and set up progress tracking
if(!exists("usr",e,inherits = FALSE)){
e$usr <- welcome(e)
udat <- file.path(find.package("swirl"), "user_data", e$usr)
if(!file.exists(udat)){
housekeeping(e)
dir.create(udat, recursive=TRUE)
}
e$udat <- udat
}
# If there is no active lesson, obtain one.
if(!exists("les",e,inherits = FALSE)){
# First, allow user to continue unfinished lessons
# if there are any
pfiles <- inProgress(e)
response <- ""
if(length(pfiles) > 0){
response <- inProgressMenu(e, pfiles)
}
if(response != "" ){
# If the user has chosen to continue, restore progress
response <- gsub(" ", "_", response)
response <- paste0(response,".rda")
restoreUserProgress(e, response)
} else {
# Else load a new lesson.
# Let user choose the course.
coursesU <- dir(courseDir(e))
# Eliminate empty directories
idx <- unlist(sapply(coursesU,
function(x)length(dir(file.path(courseDir(e),x)))>0))
coursesU <- coursesU[idx]
# If no courses are available, offer to install one
if(length(coursesU)==0){
suggestions <- yaml.load_file(file.path(courseDir(e), "suggested_courses.yaml"))
choices <- sapply(suggestions, function(x)paste0(x$Course, ": ", x$Description))
swirl_out("To begin, you must install a course. I can install a",
"course for you from the internet, or I can send you to a web page",
"(https://github.com/swirldev/swirl_courses)",
"which will provide course options and directions for",
"installing courses yourself.",
"(If you are not connected to the internet, type 0 to exit.)")
choices <- c(choices, "Don't install anything for me. I'll do it myself.")
choice <- select.list(choices, graphics=FALSE)
n <- which(choice == choices)
if(length(n) == 0)return(FALSE)
if(n < length(choices)){
repeat {
temp <- try(eval(parse(text=suggestions[[n]]$Install)), silent=TRUE)
if(is(temp, "try-error")){
swirl_out("Sorry, but I'm unable to fetch ", sQuote(choice),
"right now. Are you sure you have an internet connection?",
"If so, would you like to try again or visit",
"the course repository for instructions on how to",
"install a course manually? Type 0 to exit.")
ch <- c("Try again!",
"Send me to the course repository for manual installation.")
resp <- select.list(ch, graphics=FALSE)
if(resp == "") return(FALSE)
if(resp == ch[2]) {
swirl_out("OK. I'm opening the swirl course respository in your browser.")
browseURL("https://github.com/swirldev/swirl_courses")
return(FALSE)
}
} else {
break # Break repeat loop if install is successful
}
}
coursesU <- dir(courseDir(e))
# Eliminate empty directories
idx <- unlist(sapply(coursesU,
function(x)length(dir(file.path(courseDir(e),x)))>0))
coursesU <- coursesU[idx]
} else {
swirl_out("OK. I'm opening the swirl course respository in your browser.")
browseURL("https://github.com/swirldev/swirl_courses")
return(FALSE)
}
}
# path cosmetics
coursesR <- gsub("_", " ", coursesU)
lesson <- ""
while(lesson == ""){
course <- courseMenu(e, coursesR)
if(!is.null(names(course)) && names(course)=="repo") {
swirl_out("OK. I'm opening the swirl courses web page in your browser.")
browseURL("https://github.com/swirldev/swirl_courses")
return(FALSE)
}
if(course=="")return(FALSE)
# Set temp course name since csv files don't carry attributes
e$temp_course_name <- course
# reverse path cosmetics
courseU <- coursesU[course == coursesR]
course_dir <- file.path(courseDir(e), courseU)
# Get all files/folders from course dir, excluding MANIFEST
lessons <- dir(course_dir)
lessons <- lessons[lessons != "MANIFEST"]
# If MANIFEST exists in course directory, then order courses
man_path <- file.path(course_dir, "MANIFEST")
if(file.exists(man_path)) {
manifest <- get_manifest(course_dir)
lessons <- order_lessons(current_order=lessons,
manifest_order=manifest)
}
# Clean up lesson names
lessons_clean <- gsub("_", " ", lessons)
# Let user choose the lesson.
lesson_choice <- lessonMenu(e, lessons_clean)
# Set temp lesson name since csv files don't have lesson name attribute
e$temp_lesson_name <- lesson_choice
# reverse path cosmetics
lesson <- ifelse(lesson_choice=="", "",
lessons[lesson_choice == lessons_clean])
# Return to the course menu if the lesson failed to load
if(lesson == ""){
if(exists("les", e, inherits=FALSE)){
rm("les", envir=e, inherits=FALSE)
}
lesson <- ""
next()
} else {
# Load the lesson and intialize everything
e$les <- loadLesson(e, courseU, lesson)
}
}
# For sourcing files which construct figures etc
e$path <- file.path(courseDir(e), courseU, lesson)
# If running in 'test' mode and starting partway through
# lesson, then complete first part
if(is(e, "test") && e$test_from > 1) {
complete_part(e)
}
# Remove temp lesson name and course name vars, which were surrogates
# for csv attributes -- they've been attached via lesson() by now
rm("temp_lesson_name", "temp_course_name", envir=e, inherits=FALSE)
# Initialize the progress bar
e$pbar <- txtProgressBar(style=3)
e$pbar_seq <- seq(0, 1, length=nrow(e$les))
# expr, val, ok, and vis should have been set by the callback.
# The lesson's current row - could start after 1 if in 'test' mode
if(is(e, 'test')) {
e$row <- e$test_from
} else {
e$row <- 1
}
# The current row's instruction pointer
e$iptr <- 1
# A flag indicating we should return to the prompt
e$prompt <- FALSE
# The job of loading instructions for this "virtual machine"
# is relegated to an S3 method to allow for different "programs."
loadInstructions(e)
# An identifier for the active row
e$current.row <- NULL
# Set up paths and files to save user progress
# Make file path from lesson info
fname <- progressName(attr(e$les,"course_name"), attr(e$les,"lesson_name"))
# path to file
e$progress <- file.path(e$udat, fname)
# indicator that swirl is not reacting to console input
e$playing <- FALSE
# create the file
suppressMessages(suppressWarnings(saveRDS(e, e$progress)))
}
}
return(TRUE)
}
welcome.test <- function(e, ...){
"author"
}
# Default version.
welcome.default <- function(e, ...){
swirl_out("Welcome to swirl!")
swirl_out("Please sign in. If you've been here before, use the same name as you did then. If you are new, call yourself something unique.", skip_after=TRUE)
resp <- readline("What shall I call you? ")
while(str_detect(resp, '[[:punct:]]')) {
swirl_out("Please don't use any quotes or other punctuation in your name.",
skip_after = TRUE)
resp <- readline("What shall I call you? ")
}
return(resp)
}
# Presents preliminary information to a new user
#
# @param e persistent environment used here only for its class attribute
#
housekeeping.default <- function(e){
swirl_out(paste0("Thanks, ", e$usr,". Let's cover a few quick housekeeping items before we begin our first lesson. First of all, you should know that when you see '...', that means you should press Enter when you are done reading and ready to continue."))
readline("\n... <-- That's your cue to press Enter to continue")
swirl_out("Also, when you see 'ANSWER:', the R prompt (>), or when you are asked to select from a list, that means it's your turn to enter a response, then press Enter to continue.")
select.list(c("Gotcha!", "'Savvy', as the pirates say...", "Come along, I haven't got all day"),
title="\nSelect 1, 2, or 3 and press Enter", graphics=FALSE)
swirl_out("You can exit swirl and return to the R prompt (>) at any time by pressing the Esc key. If you are already at the prompt, type bye() to exit and save your progress. When you exit properly, you'll see a short message letting you know you've done so.")
info()
swirl_out("Let's get started!", skip_before=FALSE)
readline("\n...")
}
housekeeping.test <- function(e){}
# A stub. Eventually this should be a full menu
inProgressMenu.default <- function(e, choices){
nada <- "No. Let me start something new."
swirl_out("Would you like to continue with one of these lessons?")
selection <- select.list(c(choices, nada), graphics=FALSE)
# return a blank if the user rejects all choices
if(identical(selection, nada))selection <- ""
return(selection)
}
inProgressMenu.test <- function(e, choices) {
""
}
# A stub. Eventually this should be a full menu
courseMenu.default <- function(e, choices){
repo_option <- "Take me to the swirl course repository!"
choices <- c(choices, repo = repo_option)
swirl_out("Please choose a course, or type 0 to exit swirl.")
return(select.list(choices, graphics=FALSE))
}
courseMenu.test <- function(e, choices) {
e$test_course
}
# A stub. Eventually this should be a full menu
lessonMenu.default <- function(e, choices){
swirl_out("Please choose a lesson, or type 0 to return to course menu.")
return(select.list(choices, graphics=FALSE))
}
lessonMenu.test <- function(e, choices) {
e$test_lesson
}
loadLesson.default <- function(e, courseU, lesson){
# Load the content file
lesPath <- file.path(courseDir(e), courseU, lesson)
shortname <- find_lesson(lesPath)
dataName <- file.path(lesPath,shortname)
# Handle dependencies
if(!loadDependencies(lesPath))return(FALSE)
# Initialize list of official variables
e$snapshot <- list()
# initialize course lesson, assigning lesson-specific variables
initFile <- file.path(lesPath,"initLesson.R")
if(file.exists(initFile))local({
source(initFile, local=TRUE)
# NOTE: the order of the next two statements is important,
# since a reference to e$snapshot will cause e to appear in
# local environment.
xfer(environment(), globalenv())
# Only add to the "official record" if are auto-detecting new variables
if(isTRUE(customTests$AUTO_DETECT_NEWVAR)) {
e$snapshot <- as.list(environment())
}
})
# load any custom tests, returning FALSE if they fail to load
clearCustomTests()
loadCustomTests(lesPath)
# Attached class to content based on file extension
class(dataName) <- get_content_class(dataName)
# Parse content, returning object of class "lesson"
return(parse_content(dataName, e))
}
restoreUserProgress.default <- function(e, selection){
# read the progress file
temp <- readRDS(file.path(e$udat, selection))
# transfer its contents to e
xfer(temp, e)
# Since loadDepencies will have worked once, we don't
# check for failure here. Perhaps we should.
loadDependencies(e$path)
# source the initLesson.R file if it exists
initf <- file.path(e$path, "initLesson.R")
if(file.exists(initf))local({
source(initf, local=TRUE)
xfer(environment(), globalenv())
})
# transfer swirl's "official" list of variables to the
# global environment.
if(length(e$snapshot)>0){
xfer(as.environment(e$snapshot), globalenv())
}
# load any custom tests
clearCustomTests()
loadCustomTests(e$path)
# Restore figures which precede current row (Issue #44)
idx <- 1:(e$row - 1)
figs <- e$les[idx,"Figure"]
# Check for missing Figure column (Issue #47) and omit NA's
if(is.null(figs) || length(figs) == 0)return()
figs <- figs[!is.na(figs)]
figs <- file.path(e$path, figs)
lapply(figs, function(x)source(file=x, local=TRUE))
}
loadInstructions.default <- function(e){
e$instr <- list(present, waitUser, testResponse)
}
# UTILITIES
progressName <- function(courseName, lesName){
pn <- paste0(courseName, "_", lesName, ".rda")
gsub(" ", "_", pn)
}
inProgress <- function(e){
pfiles <- dir(e$udat)[grep("[.]rda$", dir(e$udat))]
pfiles <- gsub("[.]rda", "", pfiles)
pfiles <- str_trim(gsub("_", " ", pfiles))
return(pfiles)
}
completed <- function(e){
pfiles <- dir(e$udat)[grep("[.]done$", dir(e$udat))]
pfiles <- gsub("[.]done", "", pfiles)
pfiles <- gsub("[.]rda", "", pfiles)
pfiles <- str_trim(gsub("_", " ", pfiles))
return(pfiles)
}
get_manifest <- function(course_dir) {
man <- readLines(file.path(course_dir, "MANIFEST"), warn=FALSE)
# Remove leading and trailing whitespace
man <- str_trim(man)
# Remove empty lines
man <- man[which(man != "")]
}
# Take vector of lessons and return in order given by manifest.
# Any courses not included in manifest are excluded!
order_lessons <- function(current_order, manifest_order) {
current_order[match(manifest_order, current_order)]
}
courseDir.default <- function(e){
# e's only role is to determine the method used
file.path(find.package("swirl"), "Courses")
}
# Default for determining the user
getUser <- function()UseMethod("getUser")
getUser.default <- function(){"swirladmin"}
| /R/menu.R | no_license | FlicAnderson/swirl | R | false | false | 14,995 | r | ## Method declarations
mainMenu <- function(e, ...)UseMethod("mainMenu")
welcome <- function(e, ...)UseMethod("welcome")
housekeeping <- function(e, ...)UseMethod("housekeeping")
inProgressMenu <- function(e, choices, ...)UseMethod("inProgressMenu")
courseMenu <- function(e, courses)UseMethod("courseMenu")
courseDir <- function(e)UseMethod("courseDir")
lessonMenu <- function(e, choices)UseMethod("lessonMenu")
restoreUserProgress <- function(e, selection)UseMethod("restoreUserProgress")
loadLesson <- function(e, ...)UseMethod("loadLesson")
loadInstructions <- function(e, ...)UseMethod("loadInstructions")
# Default course and lesson navigation logic
#
# This method implements default course and lesson navigation logic,
# decoupling menu presentation from internal processing of user
# selections. It relies on several methods for menu presentation,
# namely welcome(e), housekeeping(e), inProgressMenu(e, lessons),
# courseMenu(e, courses), and lessonMenu(e, lessons). Defaults
# are provided.
#
# @param e persistent environment accessible to the callback
#'@importFrom yaml yaml.load_file
mainMenu.default <- function(e){
# Welcome the user if necessary and set up progress tracking
if(!exists("usr",e,inherits = FALSE)){
e$usr <- welcome(e)
udat <- file.path(find.package("swirl"), "user_data", e$usr)
if(!file.exists(udat)){
housekeeping(e)
dir.create(udat, recursive=TRUE)
}
e$udat <- udat
}
# If there is no active lesson, obtain one.
if(!exists("les",e,inherits = FALSE)){
# First, allow user to continue unfinished lessons
# if there are any
pfiles <- inProgress(e)
response <- ""
if(length(pfiles) > 0){
response <- inProgressMenu(e, pfiles)
}
if(response != "" ){
# If the user has chosen to continue, restore progress
response <- gsub(" ", "_", response)
response <- paste0(response,".rda")
restoreUserProgress(e, response)
} else {
# Else load a new lesson.
# Let user choose the course.
coursesU <- dir(courseDir(e))
# Eliminate empty directories
idx <- unlist(sapply(coursesU,
function(x)length(dir(file.path(courseDir(e),x)))>0))
coursesU <- coursesU[idx]
# If no courses are available, offer to install one
if(length(coursesU)==0){
suggestions <- yaml.load_file(file.path(courseDir(e), "suggested_courses.yaml"))
choices <- sapply(suggestions, function(x)paste0(x$Course, ": ", x$Description))
swirl_out("To begin, you must install a course. I can install a",
"course for you from the internet, or I can send you to a web page",
"(https://github.com/swirldev/swirl_courses)",
"which will provide course options and directions for",
"installing courses yourself.",
"(If you are not connected to the internet, type 0 to exit.)")
choices <- c(choices, "Don't install anything for me. I'll do it myself.")
choice <- select.list(choices, graphics=FALSE)
n <- which(choice == choices)
if(length(n) == 0)return(FALSE)
if(n < length(choices)){
repeat {
temp <- try(eval(parse(text=suggestions[[n]]$Install)), silent=TRUE)
if(is(temp, "try-error")){
swirl_out("Sorry, but I'm unable to fetch ", sQuote(choice),
"right now. Are you sure you have an internet connection?",
"If so, would you like to try again or visit",
"the course repository for instructions on how to",
"install a course manually? Type 0 to exit.")
ch <- c("Try again!",
"Send me to the course repository for manual installation.")
resp <- select.list(ch, graphics=FALSE)
if(resp == "") return(FALSE)
if(resp == ch[2]) {
swirl_out("OK. I'm opening the swirl course respository in your browser.")
browseURL("https://github.com/swirldev/swirl_courses")
return(FALSE)
}
} else {
break # Break repeat loop if install is successful
}
}
coursesU <- dir(courseDir(e))
# Eliminate empty directories
idx <- unlist(sapply(coursesU,
function(x)length(dir(file.path(courseDir(e),x)))>0))
coursesU <- coursesU[idx]
} else {
swirl_out("OK. I'm opening the swirl course respository in your browser.")
browseURL("https://github.com/swirldev/swirl_courses")
return(FALSE)
}
}
# path cosmetics
coursesR <- gsub("_", " ", coursesU)
lesson <- ""
while(lesson == ""){
course <- courseMenu(e, coursesR)
if(!is.null(names(course)) && names(course)=="repo") {
swirl_out("OK. I'm opening the swirl courses web page in your browser.")
browseURL("https://github.com/swirldev/swirl_courses")
return(FALSE)
}
if(course=="")return(FALSE)
# Set temp course name since csv files don't carry attributes
e$temp_course_name <- course
# reverse path cosmetics
courseU <- coursesU[course == coursesR]
course_dir <- file.path(courseDir(e), courseU)
# Get all files/folders from course dir, excluding MANIFEST
lessons <- dir(course_dir)
lessons <- lessons[lessons != "MANIFEST"]
# If MANIFEST exists in course directory, then order courses
man_path <- file.path(course_dir, "MANIFEST")
if(file.exists(man_path)) {
manifest <- get_manifest(course_dir)
lessons <- order_lessons(current_order=lessons,
manifest_order=manifest)
}
# Clean up lesson names
lessons_clean <- gsub("_", " ", lessons)
# Let user choose the lesson.
lesson_choice <- lessonMenu(e, lessons_clean)
# Set temp lesson name since csv files don't have lesson name attribute
e$temp_lesson_name <- lesson_choice
# reverse path cosmetics
lesson <- ifelse(lesson_choice=="", "",
lessons[lesson_choice == lessons_clean])
# Return to the course menu if the lesson failed to load
if(lesson == ""){
if(exists("les", e, inherits=FALSE)){
rm("les", envir=e, inherits=FALSE)
}
lesson <- ""
next()
} else {
# Load the lesson and intialize everything
e$les <- loadLesson(e, courseU, lesson)
}
}
# For sourcing files which construct figures etc
e$path <- file.path(courseDir(e), courseU, lesson)
# If running in 'test' mode and starting partway through
# lesson, then complete first part
if(is(e, "test") && e$test_from > 1) {
complete_part(e)
}
# Remove temp lesson name and course name vars, which were surrogates
# for csv attributes -- they've been attached via lesson() by now
rm("temp_lesson_name", "temp_course_name", envir=e, inherits=FALSE)
# Initialize the progress bar
e$pbar <- txtProgressBar(style=3)
e$pbar_seq <- seq(0, 1, length=nrow(e$les))
# expr, val, ok, and vis should have been set by the callback.
# The lesson's current row - could start after 1 if in 'test' mode
if(is(e, 'test')) {
e$row <- e$test_from
} else {
e$row <- 1
}
# The current row's instruction pointer
e$iptr <- 1
# A flag indicating we should return to the prompt
e$prompt <- FALSE
# The job of loading instructions for this "virtual machine"
# is relegated to an S3 method to allow for different "programs."
loadInstructions(e)
# An identifier for the active row
e$current.row <- NULL
# Set up paths and files to save user progress
# Make file path from lesson info
fname <- progressName(attr(e$les,"course_name"), attr(e$les,"lesson_name"))
# path to file
e$progress <- file.path(e$udat, fname)
# indicator that swirl is not reacting to console input
e$playing <- FALSE
# create the file
suppressMessages(suppressWarnings(saveRDS(e, e$progress)))
}
}
return(TRUE)
}
welcome.test <- function(e, ...){
"author"
}
# Default version.
welcome.default <- function(e, ...){
swirl_out("Welcome to swirl!")
swirl_out("Please sign in. If you've been here before, use the same name as you did then. If you are new, call yourself something unique.", skip_after=TRUE)
resp <- readline("What shall I call you? ")
while(str_detect(resp, '[[:punct:]]')) {
swirl_out("Please don't use any quotes or other punctuation in your name.",
skip_after = TRUE)
resp <- readline("What shall I call you? ")
}
return(resp)
}
# Presents preliminary information to a new user
#
# @param e persistent environment used here only for its class attribute
#
housekeeping.default <- function(e){
swirl_out(paste0("Thanks, ", e$usr,". Let's cover a few quick housekeeping items before we begin our first lesson. First of all, you should know that when you see '...', that means you should press Enter when you are done reading and ready to continue."))
readline("\n... <-- That's your cue to press Enter to continue")
swirl_out("Also, when you see 'ANSWER:', the R prompt (>), or when you are asked to select from a list, that means it's your turn to enter a response, then press Enter to continue.")
select.list(c("Gotcha!", "'Savvy', as the pirates say...", "Come along, I haven't got all day"),
title="\nSelect 1, 2, or 3 and press Enter", graphics=FALSE)
swirl_out("You can exit swirl and return to the R prompt (>) at any time by pressing the Esc key. If you are already at the prompt, type bye() to exit and save your progress. When you exit properly, you'll see a short message letting you know you've done so.")
info()
swirl_out("Let's get started!", skip_before=FALSE)
readline("\n...")
}
housekeeping.test <- function(e){}
# A stub. Eventually this should be a full menu
inProgressMenu.default <- function(e, choices){
nada <- "No. Let me start something new."
swirl_out("Would you like to continue with one of these lessons?")
selection <- select.list(c(choices, nada), graphics=FALSE)
# return a blank if the user rejects all choices
if(identical(selection, nada))selection <- ""
return(selection)
}
inProgressMenu.test <- function(e, choices) {
""
}
# A stub. Eventually this should be a full menu
courseMenu.default <- function(e, choices){
repo_option <- "Take me to the swirl course repository!"
choices <- c(choices, repo = repo_option)
swirl_out("Please choose a course, or type 0 to exit swirl.")
return(select.list(choices, graphics=FALSE))
}
courseMenu.test <- function(e, choices) {
e$test_course
}
# A stub. Eventually this should be a full menu
lessonMenu.default <- function(e, choices){
swirl_out("Please choose a lesson, or type 0 to return to course menu.")
return(select.list(choices, graphics=FALSE))
}
lessonMenu.test <- function(e, choices) {
e$test_lesson
}
loadLesson.default <- function(e, courseU, lesson){
# Load the content file
lesPath <- file.path(courseDir(e), courseU, lesson)
shortname <- find_lesson(lesPath)
dataName <- file.path(lesPath,shortname)
# Handle dependencies
if(!loadDependencies(lesPath))return(FALSE)
# Initialize list of official variables
e$snapshot <- list()
# initialize course lesson, assigning lesson-specific variables
initFile <- file.path(lesPath,"initLesson.R")
if(file.exists(initFile))local({
source(initFile, local=TRUE)
# NOTE: the order of the next two statements is important,
# since a reference to e$snapshot will cause e to appear in
# local environment.
xfer(environment(), globalenv())
# Only add to the "official record" if are auto-detecting new variables
if(isTRUE(customTests$AUTO_DETECT_NEWVAR)) {
e$snapshot <- as.list(environment())
}
})
# load any custom tests, returning FALSE if they fail to load
clearCustomTests()
loadCustomTests(lesPath)
# Attached class to content based on file extension
class(dataName) <- get_content_class(dataName)
# Parse content, returning object of class "lesson"
return(parse_content(dataName, e))
}
restoreUserProgress.default <- function(e, selection){
# read the progress file
temp <- readRDS(file.path(e$udat, selection))
# transfer its contents to e
xfer(temp, e)
# Since loadDepencies will have worked once, we don't
# check for failure here. Perhaps we should.
loadDependencies(e$path)
# source the initLesson.R file if it exists
initf <- file.path(e$path, "initLesson.R")
if(file.exists(initf))local({
source(initf, local=TRUE)
xfer(environment(), globalenv())
})
# transfer swirl's "official" list of variables to the
# global environment.
if(length(e$snapshot)>0){
xfer(as.environment(e$snapshot), globalenv())
}
# load any custom tests
clearCustomTests()
loadCustomTests(e$path)
# Restore figures which precede current row (Issue #44)
idx <- 1:(e$row - 1)
figs <- e$les[idx,"Figure"]
# Check for missing Figure column (Issue #47) and omit NA's
if(is.null(figs) || length(figs) == 0)return()
figs <- figs[!is.na(figs)]
figs <- file.path(e$path, figs)
lapply(figs, function(x)source(file=x, local=TRUE))
}
loadInstructions.default <- function(e){
e$instr <- list(present, waitUser, testResponse)
}
# UTILITIES
progressName <- function(courseName, lesName){
pn <- paste0(courseName, "_", lesName, ".rda")
gsub(" ", "_", pn)
}
inProgress <- function(e){
pfiles <- dir(e$udat)[grep("[.]rda$", dir(e$udat))]
pfiles <- gsub("[.]rda", "", pfiles)
pfiles <- str_trim(gsub("_", " ", pfiles))
return(pfiles)
}
completed <- function(e){
pfiles <- dir(e$udat)[grep("[.]done$", dir(e$udat))]
pfiles <- gsub("[.]done", "", pfiles)
pfiles <- gsub("[.]rda", "", pfiles)
pfiles <- str_trim(gsub("_", " ", pfiles))
return(pfiles)
}
get_manifest <- function(course_dir) {
man <- readLines(file.path(course_dir, "MANIFEST"), warn=FALSE)
# Remove leading and trailing whitespace
man <- str_trim(man)
# Remove empty lines
man <- man[which(man != "")]
}
# Take vector of lessons and return in order given by manifest.
# Any courses not included in manifest are excluded!
order_lessons <- function(current_order, manifest_order) {
current_order[match(manifest_order, current_order)]
}
courseDir.default <- function(e){
# e's only role is to determine the method used
file.path(find.package("swirl"), "Courses")
}
# Default for determining the user
getUser <- function()UseMethod("getUser")
getUser.default <- function(){"swirladmin"}
|
wtdvar <- function(x, w){
xbarw <- sum(w*x) / sum(w)
varw <- sum(w * (x-xbarw)^2) / sum(w)
varw
}
| /PracTools/R/wtdvar.R | no_license | ingted/R-Examples | R | false | false | 117 | r |
wtdvar <- function(x, w){
xbarw <- sum(w*x) / sum(w)
varw <- sum(w * (x-xbarw)^2) / sum(w)
varw
}
|
ui <- fluidPage(
fileInput('file1', 'Insert File', accept = c(".xlsx", ".csv")),
textInput('file1sheet','Name of Sheet (Case-Sensitive)'),
tableOutput("value")
)
server <- function(input, output) {
sheets_name <- reactive({
if (!is.null(input$file1)) {
return(excel_sheets(path = input$file1$datapath))
} else {
return(NULL)
}
})
output$value <- renderTable({
if (!is.null(input$file1) && #not null, we read the file
(input$file1sheet %in% sheets_name())) {
return(openxlsx::read.xlsx(input$file1$datapath,
sheet = input$file1sheet, colNames = input$header,
cols = c(input$columns)))
} else {
return(NULL)
}
})
}
shinyApp(ui, server) | /Shiny.R | no_license | LyzeNaomi/Martingale-tests-on-ESG | R | false | false | 791 | r | ui <- fluidPage(
fileInput('file1', 'Insert File', accept = c(".xlsx", ".csv")),
textInput('file1sheet','Name of Sheet (Case-Sensitive)'),
tableOutput("value")
)
server <- function(input, output) {
sheets_name <- reactive({
if (!is.null(input$file1)) {
return(excel_sheets(path = input$file1$datapath))
} else {
return(NULL)
}
})
output$value <- renderTable({
if (!is.null(input$file1) && #not null, we read the file
(input$file1sheet %in% sheets_name())) {
return(openxlsx::read.xlsx(input$file1$datapath,
sheet = input$file1sheet, colNames = input$header,
cols = c(input$columns)))
} else {
return(NULL)
}
})
}
shinyApp(ui, server) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_GMV.R
\name{calc_GMV}
\alias{calc_GMV}
\title{Calculate gross merchantable volume}
\usage{
calc_GMV(GTV, QMD, SppId, Origin = "N", PlantedSpp = NA)
}
\arguments{
\item{GTV}{Gross total volume}
\item{QMD}{Quadratic mean diamater}
\item{SppId}{Id of a species or forest unit}
\item{Origin}{Origin of the stand. Use N for natural, P for plantation.}
\item{PlantedSpp}{Opional information on planted species required for some plantations.}
}
\value{
Gross merchantable volume
}
\description{
Calculates gross total volume [m3/ha] of stand based on provided inputs.
}
| /man/calc_GMV.Rd | no_license | ptompalski/MISTR | R | false | true | 650 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_GMV.R
\name{calc_GMV}
\alias{calc_GMV}
\title{Calculate gross merchantable volume}
\usage{
calc_GMV(GTV, QMD, SppId, Origin = "N", PlantedSpp = NA)
}
\arguments{
\item{GTV}{Gross total volume}
\item{QMD}{Quadratic mean diamater}
\item{SppId}{Id of a species or forest unit}
\item{Origin}{Origin of the stand. Use N for natural, P for plantation.}
\item{PlantedSpp}{Opional information on planted species required for some plantations.}
}
\value{
Gross merchantable volume
}
\description{
Calculates gross total volume [m3/ha] of stand based on provided inputs.
}
|
#' High School and Beyond - 1982
#'
#' The data are a subsample of the 1982 High School and Beyond survey, with 7,185 students from 160
#' schools (range = 14 - 67).
#'
#' \itemize{
#' \item School: school id
#' \item Minority: minority status (1 = minority, 0 = non-minority)
#' \item Sex: sex of the child (1 = female, 0 = male)
#' \item SES: standardized scale of socio-economic status constructed from variables measuring parental education,
#' occupation, and income)
#' \item MathAch: math achievement
#' \item Size: number of students per school
#' \item Sector: sector of school (1 = Catholic, 0 = public)
#' \item PRACAD: proportion of students on the academic track in a given school
#' \item DISCLIM: a scale measuring disciplinary climate
#' \item HIMNTY: whether a school has a high proportion of minority students (1 = more than 40\% minority enrollment,
#' 0 = less than 40\%)
#' \item MEANSES: mean of the SES values of students in a given school
#' }
#'
#' @docType data
#' @keywords High School
#' @name HSB_data
#' @usage data(HSB_data)
#' @format A dataframe with 7,185 observations of 11 variables
NULL
| /R/HSB_data.R | no_license | Wambuas/mleda | R | false | false | 1,154 | r | #' High School and Beyond - 1982
#'
#' The data are a subsample of the 1982 High School and Beyond survey, with 7,185 students from 160
#' schools (range = 14 - 67).
#'
#' \itemize{
#' \item School: school id
#' \item Minority: minority status (1 = minority, 0 = non-minority)
#' \item Sex: sex of the child (1 = female, 0 = male)
#' \item SES: standardized scale of socio-economic status constructed from variables measuring parental education,
#' occupation, and income)
#' \item MathAch: math achievement
#' \item Size: number of students per school
#' \item Sector: sector of school (1 = Catholic, 0 = public)
#' \item PRACAD: proportion of students on the academic track in a given school
#' \item DISCLIM: a scale measuring disciplinary climate
#' \item HIMNTY: whether a school has a high proportion of minority students (1 = more than 40\% minority enrollment,
#' 0 = less than 40\%)
#' \item MEANSES: mean of the SES values of students in a given school
#' }
#'
#' @docType data
#' @keywords High School
#' @name HSB_data
#' @usage data(HSB_data)
#' @format A dataframe with 7,185 observations of 11 variables
NULL
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22771659067073e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615837510-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 2,048 | r | testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22771659067073e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
## since we use c++ code, we have to do more cleanup
## when the package is unloaded; see
## http://r-pkgs.had.co.nz/src.html
## (accessed 20150219) for more info
.onUnload <- function (libpath) {
library.dynam.unload("dhstools", libpath)
}
| /R/zzz.r | no_license | dfeehan/dhstools | R | false | false | 250 | r | ## since we use c++ code, we have to do more cleanup
## when the package is unloaded; see
## http://r-pkgs.had.co.nz/src.html
## (accessed 20150219) for more info
.onUnload <- function (libpath) {
library.dynam.unload("dhstools", libpath)
}
|
#Name: EMILY TSIEMI SHINZATO DATE: 21/12/2014
#Country: Brazil
#Course: R Programming
#-----------------------------------------------------------------------------------------------
#Step 1: Function makeCacheMatrix
#-----------------------------------------------------------------------------------------------
#This function tracks locations within the computer's memory where the inverse of the matrix in
#question was already processed, not being necessary to process it again (since it calls the
#inverse matrix in R memory)
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
#-----------------------------------------------------------------------------------------------
#Step 2: Function cacheSolve
#-----------------------------------------------------------------------------------------------
## This function checks whether the inverse matrix has been previously calculated,
#otherwise, the inverse matrix is calculated and its value is returned.
cacheSolve <- function(x, ...) {
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
| /cachematrix.R | no_license | emilyshinzato/ProgrammingAssignment2 | R | false | false | 1,435 | r | #Name: EMILY TSIEMI SHINZATO DATE: 21/12/2014
#Country: Brazil
#Course: R Programming
#-----------------------------------------------------------------------------------------------
#Step 1: Function makeCacheMatrix
#-----------------------------------------------------------------------------------------------
#This function tracks locations within the computer's memory where the inverse of the matrix in
#question was already processed, not being necessary to process it again (since it calls the
#inverse matrix in R memory)
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
#-----------------------------------------------------------------------------------------------
#Step 2: Function cacheSolve
#-----------------------------------------------------------------------------------------------
## This function checks whether the inverse matrix has been previously calculated,
#otherwise, the inverse matrix is calculated and its value is returned.
cacheSolve <- function(x, ...) {
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
|
#################################################
########Analysis code for scRNA-seq##############
#scRNA-seq data include day 0 (pre-injury), day 3, and 7 post injury
setwd("./zebrafish_sc/")
library(Seurat)
library(dplyr)
library(Matrix)
library(openxlsx)
a1.data<-Read10X(data.dir ="./zf_0d/")
a1<-CreateSeuratObject(raw.data = a1.data, project = "day 0")
projects<-c("_3d","_7d")
project_names<-c("day 3","day 7")
for (i in seq_along(projects)){
project_data<-Read10X(data.dir=paste("./zf",projects[i],sep=""))
a1 <- AddSamples(object = a1, new.data = project_data, add.cell.id = project_names[i])
}
cm<-a1
rm(a1)
mito.genes <- grep(pattern = "^MT-", x = rownames(x = cm@data), value = TRUE)
percent.mito <- Matrix::colSums(cm@raw.data[mito.genes, ])/Matrix::colSums(cm@raw.data)
cm <- AddMetaData(object = cm, metadata = percent.mito, col.name = "percent.mito")
cm <- FilterCells(object = cm, subset.names = c("nGene", "percent.mito","nUMI"),
low.thresholds = c(200, -Inf,2000), high.thresholds = c(Inf, 0.4,Inf))
cm <- NormalizeData(object = cm, normalization.method = "LogNormalize",
scale.factor = 10000)
cm <- FindVariableGenes(object = cm, mean.function = ExpMean, dispersion.function = LogVMR,
x.low.cutoff = 0.0125, x.high.cutoff = 3, y.cutoff = 0.5)
cm <- ScaleData(object = cm, vars.to.regress = c("nUMI", "percent.mito"))
cm <- RunPCA(object = cm, pc.genes = cm@var.genes, do.print = TRUE, pcs.print = 1:5,
genes.print = 5)
VizPCA(object = cm, pcs.use = 1:2)
PCAPlot(object = cm, dim.1 = 1, dim.2 = 2)
PCHeatmap(object = cm, pc.use = 1:6, cells.use = 500, do.balanced = TRUE, label.columns = FALSE)
PCElbowPlot(object = cm)
cm <- FindClusters(object = cm, reduction.type = "pca", dims.use = 1:10,
resolution = 0.3, print.output = 0, save.SNN = TRUE,force.recalc=T)
cm <- RunTSNE(object = cm, dims.use = 1:10, do.fast = TRUE)
##Clustering
pdf("tSNE_plot_zebrafish.pdf")
TSNEPlot(object = cm,do.label=T,label.size = 10,pt.size = 2)
dev.off()
pdf("tSNE_plot_zebrafish_coded_by_date.pdf")
TSNEPlot(object = cm,do.label=F,label.size = 10,pt.size = 2,group.by = "orig.ident")
dev.off()
#Plot marker genes
#cell type markers#
markers<-c('ENC1', 'DCN', 'COL5A1','THY1')
markers<-tolower(markers)
pdf("FB_marker_genes_zebrafish.pdf",height=3,width=15)
FeaturePlot(object = cm, features.plot = markers, cols.use = c("grey", "red"), reduction.use = "tsne",nCol = 5)
dev.off()
markers<-c('MYL7', 'GATA4',"MEF2AA")
markers<-tolower(markers)
pdf("cardiac_marker_genes_zebrafish.pdf",height=3,width=12)
FeaturePlot(object = cm, features.plot = markers, cols.use = c("grey", "red"), reduction.use = "tsne",nCol = 4)
dev.off()
#Find marker genes for each cluster
maturation.markers <- FindAllMarkers(object = cm, only.pos = TRUE, min.pct = 0.25,
thresh.use = 0.25)
maturation.markers<-maturation.markers[-grep("si:",maturation.markers$gene),]
top10 <- maturation.markers %>% group_by(cluster) %>% top_n(10, avg_logFC)
pdf("Top_positive_marker_genes_by_cluster_zf.pdf",width=5,height=8)
DoHeatmap(object = cm,genes.use = top10$gene, slim.col.label = TRUE, remove.key = FALSE)
dev.off()
#Cell cycle genes#
cc.genes <- readLines(con = "./regev_lab_cell_cycle_genes.txt")
s.genes <- tolower(cc.genes[1:43])
g2m.genes <- tolower(cc.genes[44:97])
pdf("S_phase_genes_zebrafish.pdf",width=5,height=6.5)
DoHeatmap(object = cm,genes.use = tolower(s.genes), slim.col.label = TRUE, remove.key = FALSE,use.scaled=TRUE)
dev.off()
pdf("G2M_phase_genes_zebrafish.pdf",width=5,height=6.5)
DoHeatmap(object = cm,genes.use = tolower(g2m.genes), slim.col.label = TRUE, remove.key = FALSE,use.scaled=TRUE)
dev.off()
########Trajectory analysis using Monocle#########
ind<-match(colnames(cm@data),colnames(cm@raw.data))
count_matrix<-as.matrix(cm@raw.data[,ind])
pd<-new("AnnotatedDataFrame", data = cm@meta.data)
gene_info<-data.frame(gene_short_name=rownames(count_matrix))
rownames(gene_info)<-rownames(count_matrix)
fd<-new("AnnotatedDataFrame", data = gene_info)
save(fd,pd,file="zebrafish_sc_data_for_monocle.RData")
load("zebrafish_sc_data_for_monocle.RData")
CM <- newCellDataSet(as(count_matrix, "sparseMatrix"),
phenoData = pd,
featureData = fd,
lowerDetectionLimit = 0.5,
expressionFamily = negbinomial.size())
CM <- estimateSizeFactors(CM)
CM <- estimateDispersions(CM)
CM <- detectGenes(CM, min_expr = 0.1)
print(head(fData(CM)))
expressed_genes <- row.names(subset(fData(CM),
num_cells_expressed >= 10))
disp_table <- dispersionTable(CM)
unsup_clustering_genes <- subset(disp_table, mean_expression >= 0.1)
CM <- setOrderingFilter(CM, unsup_clustering_genes$gene_id)
plot_ordering_genes(CM)
plot_pc_variance_explained(CM, return_all = F) # norm_method='log'
CM <- reduceDimension(CM, max_components = 2, num_dim = 10,
reduction_method = 'tSNE', verbose = T)
CM <- clusterCells(CM, num_clusters = 5)
pdf("monocle_clusters.pdf")
plot_cell_clusters(CM, 1, 2, color = "Cluster")
dev.off()
pdf("monocle_clusters_markers.pdf")
plot_cell_clusters(CM, 1, 2, color = "CellType",
markers = c("cmlc1", "myl6"))
dev.off()
#Wnt, endothelial marker etc.
pdf("monocole_cell_type_markers.pdf")
plot_cell_clusters(CM, 1, 2, color = "CellType",
markers = c("cmlc1", "myl6","wnt11r","thy1","col1a2","dcn","rgs5a"))
dev.off()
pdf("monocole_apoptosis_markers.pdf")
plot_cell_clusters(CM, 1, 2, color = "CellType",
markers = c("casp2","bcl2l10","fosl2","hsp90b1"))
dev.off()
pdf("monocle_clusters_by_time.pdf")
plot_cell_clusters(CM, 1, 2, color = "orig.ident")
dev.off()
pdf("cluster_day_relations.pdf",height=4,width=10)
plot_cell_clusters(CM, 1, 2, color = "Cluster") +
facet_wrap(~orig.ident)
dev.off()
#trajectory
diff_test_res <- differentialGeneTest(CM[expressed_genes,],
fullModelFormulaStr = "~orig.ident")
ordering_genes <- row.names (subset(diff_test_res, qval < 0.01))
CM <- setOrderingFilter(CM, ordering_genes)
plot_ordering_genes(CM)
CM <- reduceDimension(CM, max_components = 2,
method = 'DDRTree')
CM <- orderCells(CM)
pdf("trajectory_by_day.pdf")
plot_cell_trajectory(CM, color_by = "orig.ident")
dev.off()
pdf("trajectory_by_cluster.pdf")
plot_cell_trajectory(CM, color_by = "Cluster")
dev.off()
pdf("trajectory_by_state.pdf")
plot_cell_trajectory(CM, color_by = "State")
dev.off()
GM_state <- function(cds){
if (length(unique(pData(cds)$State)) > 1){
T0_counts <- table(pData(cds)$State, pData(cds)$orig.ident)[,"day 0"]
return(as.numeric(names(T0_counts)[which
(T0_counts == max(T0_counts))]))
} else {
return (1)
}
}
CM <- orderCells(CM, root_state = GM_state(CM))
save(CM,file="monocle_CM_cluster_and_trajectory.RData")
pdf("pseudo_time_plot.pdf")
plot_cell_trajectory(CM, color_by = "Pseudotime")
dev.off()
CM_expressed_genes <- row.names(subset(fData(CM),
num_cells_expressed >= 10))
CM_filtered <- CM[CM_expressed_genes,]
my_genes <- row.names(subset(fData(CM_filtered),
gene_short_name %in% c("cmlc1", "myl7", "myl6","myl9a")))
cds_subset <- CM_filtered[my_genes,]
pdf("marker_genes_pseudotime.pdf")
plot_genes_in_pseudotime(cds_subset, color_by = "orig.ident")
dev.off()
pdf("marker_genes_pseudotime_by_cluster.pdf")
plot_genes_in_pseudotime(cds_subset, color_by = "Cluster")
dev.off()
#####################################################
#####################################################
#########Analysis code for bulk RNA-seq##############
#Bulk RNA-seq samples include young day 3, adult control, and 3 and 7 days post MTZ-injury#
library(DESeq)
#Fig.S3B
load("./zf_count_mat.RData")
ind2rm<-grep("^ERR",colnames(count_mat))
count_mat<-count_mat[,-ind2rm]
cds<-newCountDataSet(count_mat[,c(1,2,5:10)],factor(rep(c("day3","adult-ours-ctrl","adult-Bednarek"),times=c(2,2,4)),levels=c("day3","adult-ours-ctrl","adult-Bednarek"),ordered=T))
cds<-estimateSizeFactors(cds)
cds<-estimateDispersions(cds)
rs<-rowSums(counts(cds,normalized=T))
cds<-cds[rs>10,]
vst_mat<-getVarianceStabilizedData(cds)
#vst_mat<-counts(cds,normalized=T)
#vst_mat<-log2(vst_mat+0.01)
zf<- t(scale(t(vst_mat),scale=F,center=T))
set.seed(10)
pca<-prcomp(t(zf))
sd<-pca$sdev
loadings<-pca$rotation
var<-sd^2
var.percent<-var/sum(var)*100
labels<-rep(c("day3","adult-ours-ctrl","adult-Bednarek"),times=c(2,2,4))
scores <- data.frame(sample.groups=labels, pca$x[,1:3],labs=rep(c("ours","Bednarek"),times=c(4,4)))
colnames(scores)[2:4]<-c("PC1","PC2","PC3")
#scores$PC1<- -(scores$PC1)
#scores$PC2<- -(scores$PC2)
min_axis<-min(scores[,2:4])
max_axis<-max(scores[,2:4])
library(grid)
cbPalette<-c("azure4","black","blue","brown","cadetblue","chartreuse","cyan",
"darkorange","darkorchid","deeppink","gold","lightcoral","lightseagreen","magenta","red","lightsalmon","yellow","mediumorchid4","deepskyblue","mediumvioletred","olivedrab","cornsilk","lavender","navajowhite4")
#pie(rep(1,length(cbPalette)),col=cbPalette)
library(ggplot2)
p1=ggplot(data=scores, aes(x=PC1,y=PC2,color=sample.groups))+
#geom_point(aes(shape=factor(labs)),size=0.7) +
geom_point(size=0.7) +
theme(legend.key.width=unit(0.15,"cm"),legend.key.height=unit(0.15,"cm"),legend.justification=c(0,1),legend.margin=unit(-0.1,"cm"),legend.position="top",legend.text=element_text(size=5),axis.text.x = element_text(size=6),axis.text.y=element_text(size=6),axis.title=element_text(size=6),legend.title=element_blank()) + guides(color=guide_legend(ncol=4)) +
xlim(min_axis,max_axis) + ylim(-52,max_axis+2) + scale_color_manual(values=cbPalette[c(2,15,8,3,4)]) + xlab("PC 1 (75%)") + ylab("PC2 (13%)")
png("./zebrafish_day3_adult_PCA_plot_all_genes_08182016.png",res=600,width=4,height=2,units="in")
p1
dev.off()
#Fig.S3C
load("./zf_count_mat.RData")
ind2rm<-grep("^ERR",colnames(count_mat))
cds<-newCountDataSet(count_mat[,-ind2rm],factor(rep(c("day3","adult-ours-mtz","adult-ours-ctrl","adult-Bednarek","adult-Bednarek-cryoinjury"),times=c(2,2,2,4,4)),levels=c("day3","adult-ours-mtz","adult-ours-ctrl","adult-Bednarek","adult-Bednarek-cryoinjury"),ordered=T))
cds<-estimateSizeFactors(cds)
cds<-estimateDispersions(cds)
rs<-rowSums(counts(cds,normalized=T))
cds<-cds[rs>10,]
vst_mat<-getVarianceStabilizedData(cds)
zf<- t(scale(t(vst_mat),scale=F,center=T))
set.seed(10)
pca<-prcomp(t(zf))
#pca<-princomp(zf)
sd<-pca$sdev
loadings<-pca$rotation
var<-sd^2
var.percent<-var/sum(var)*100
labels<-rep(c("day3","adult-ours-mtz","adult-ours-ctrl","adult-Bednarek","adult-Bednarek-cryoinjury"),times=c(2,2,2,4,4))
scores <- data.frame(sample.groups=labels, pca$x[,1:3],labs=rep(c("ours","Bednarek"),times=c(6,8)))
colnames(scores)[2:4]<-c("PC1","PC2","PC3")
min_axis<-min(scores[,2:4])
max_axis<-max(scores[,2:4])
library(grid)
cbPalette<-c("azure4","black","blue","brown","cadetblue","chartreuse","cyan",
"darkorange","darkorchid","deeppink","gold","lightcoral","lightseagreen","magenta","red","lightsalmon","yellow","mediumorchid4","deepskyblue","mediumvioletred","olivedrab","cornsilk","lavender","navajowhite4")
library(ggplot2)
p1=ggplot(data=scores, aes(x=PC1,y=PC2,color=sample.groups))+
#geom_point(aes(shape=factor(labs)),size=0.7) +
geom_point(size=0.7) +
theme(legend.key.width=unit(0.15,"cm"),legend.key.height=unit(0.15,"cm"),legend.justification=c(0,1),legend.margin=unit(-0.1,"cm"),legend.position="top",legend.text=element_text(size=5),axis.text.x = element_text(size=6),axis.text.y=element_text(size=6),axis.title=element_text(size=6),legend.title=element_blank()) + guides(color=guide_legend(ncol=4)) +
xlim(min_axis,max_axis) + ylim(-100,max_axis+2) + scale_color_manual(values=cbPalette[c(2,15,8,3,4)]) + xlab("PC 1 (60%)") + ylab("PC2 (17%)")
png("zebrafish_day3_adult_cryoinjury_mtz_PCA_plot_all_genes_08182016.png",res=600,width=4,height=2,units="in")
p1
dev.off()
#Fig.2B
####### 3D PCA of zebrafish samples########
library(DESeq)
zf2human<-read.table("zf2human_homologue_01112016_Ensembl.txt",header=T,sep=",")
zf2human<-na.omit(zf2human)
zf2human<-subset(zf2human,Homology.Type=="ortholog_one2one")
zf2human<-subset(zf2human,Human.Ensembl.Gene.ID %in% recon204)
load("zebrafish_count_mat_complete_NOV222016.RData")
cds<-newCountDataSet(count_mat[,grep("our",labels)],factor(labels[grep("our",labels)]))
cds<-estimateSizeFactors(cds)
cds<-estimateDispersions(cds)
rs<-rowSums(counts(cds,normalized=T))
cds<-cds[rs>10,]
vst_mat<-getVarianceStabilizedData(cds)
vst_mat<-vst_mat[rownames(vst_mat) %in% zf2human$Ensembl.Gene.ID,]
zf<- t(scale(t(vst_mat),scale=F,center=T))
set.seed(10)
pca<-prcomp(t(zf))
sd<-pca$sdev
loadings<-pca$rotation
var<-sd^2
var.percent<-var/sum(var)*100
scores <- data.frame(sample.groups=labels[grep("our",labels)], pca$x[,1:3])
colnames(scores)[2:4]<-c("PC1","PC2","PC3")
min_axis<-min(scores[,2:4])
max_axis<-max(scores[,2:4])
library(grid)
cbPalette<-c("azure4","black","blue","brown","cadetblue","chartreuse","cyan",
"darkorange","darkorchid","deeppink","gold","lightcoral","lightseagreen","magenta","red","lightsalmon","yellow","mediumorchid4","deepskyblue","mediumvioletred","olivedrab","cornsilk","lavender","navajowhite4")
scores$sample.groups<-as.character(scores$sample.groups)
color_map<-data.frame(id=unique(scores$sample.groups),color=cbPalette[c(2,3,15,8)])
color_map$id<-as.character(color_map$id)
ind<-match(scores$sample.groups,color_map$id)
scores$color<-color_map$color[ind]
library(scatterplot3d)
pdf("PCA_3D_our_samples.pdf")
s3d<-scatterplot3d(scores$PC1,scores$PC2,scores$PC3,xlab="PC1",ylab="PC2",zlab="PC3",pch=19,cex.symbols = 3,cex.axis = 1.2,cex.lab = 1.8,color=scores$color)
par(xpd=TRUE)
legend(s3d$xyz.convert(-50, -30, 37),color_map$id,col=as.character(color_map$color),pch=19,ncol=2,bty="n",cex=2)
dev.off()
| /zebrafish_single_and_bulk_RNAseq_analysis.R | no_license | yuliangwang/zebrafish-heart-regeneration-expression-data-analysis- | R | false | false | 14,113 | r | #################################################
########Analysis code for scRNA-seq##############
#scRNA-seq data include day 0 (pre-injury), day 3, and 7 post injury
setwd("./zebrafish_sc/")
library(Seurat)
library(dplyr)
library(Matrix)
library(openxlsx)
a1.data<-Read10X(data.dir ="./zf_0d/")
a1<-CreateSeuratObject(raw.data = a1.data, project = "day 0")
projects<-c("_3d","_7d")
project_names<-c("day 3","day 7")
for (i in seq_along(projects)){
project_data<-Read10X(data.dir=paste("./zf",projects[i],sep=""))
a1 <- AddSamples(object = a1, new.data = project_data, add.cell.id = project_names[i])
}
cm<-a1
rm(a1)
mito.genes <- grep(pattern = "^MT-", x = rownames(x = cm@data), value = TRUE)
percent.mito <- Matrix::colSums(cm@raw.data[mito.genes, ])/Matrix::colSums(cm@raw.data)
cm <- AddMetaData(object = cm, metadata = percent.mito, col.name = "percent.mito")
cm <- FilterCells(object = cm, subset.names = c("nGene", "percent.mito","nUMI"),
low.thresholds = c(200, -Inf,2000), high.thresholds = c(Inf, 0.4,Inf))
cm <- NormalizeData(object = cm, normalization.method = "LogNormalize",
scale.factor = 10000)
cm <- FindVariableGenes(object = cm, mean.function = ExpMean, dispersion.function = LogVMR,
x.low.cutoff = 0.0125, x.high.cutoff = 3, y.cutoff = 0.5)
cm <- ScaleData(object = cm, vars.to.regress = c("nUMI", "percent.mito"))
cm <- RunPCA(object = cm, pc.genes = cm@var.genes, do.print = TRUE, pcs.print = 1:5,
genes.print = 5)
VizPCA(object = cm, pcs.use = 1:2)
PCAPlot(object = cm, dim.1 = 1, dim.2 = 2)
PCHeatmap(object = cm, pc.use = 1:6, cells.use = 500, do.balanced = TRUE, label.columns = FALSE)
PCElbowPlot(object = cm)
cm <- FindClusters(object = cm, reduction.type = "pca", dims.use = 1:10,
resolution = 0.3, print.output = 0, save.SNN = TRUE,force.recalc=T)
cm <- RunTSNE(object = cm, dims.use = 1:10, do.fast = TRUE)
##Clustering
pdf("tSNE_plot_zebrafish.pdf")
TSNEPlot(object = cm,do.label=T,label.size = 10,pt.size = 2)
dev.off()
pdf("tSNE_plot_zebrafish_coded_by_date.pdf")
TSNEPlot(object = cm,do.label=F,label.size = 10,pt.size = 2,group.by = "orig.ident")
dev.off()
#Plot marker genes
#cell type markers#
markers<-c('ENC1', 'DCN', 'COL5A1','THY1')
markers<-tolower(markers)
pdf("FB_marker_genes_zebrafish.pdf",height=3,width=15)
FeaturePlot(object = cm, features.plot = markers, cols.use = c("grey", "red"), reduction.use = "tsne",nCol = 5)
dev.off()
markers<-c('MYL7', 'GATA4',"MEF2AA")
markers<-tolower(markers)
pdf("cardiac_marker_genes_zebrafish.pdf",height=3,width=12)
FeaturePlot(object = cm, features.plot = markers, cols.use = c("grey", "red"), reduction.use = "tsne",nCol = 4)
dev.off()
#Find marker genes for each cluster
maturation.markers <- FindAllMarkers(object = cm, only.pos = TRUE, min.pct = 0.25,
thresh.use = 0.25)
maturation.markers<-maturation.markers[-grep("si:",maturation.markers$gene),]
top10 <- maturation.markers %>% group_by(cluster) %>% top_n(10, avg_logFC)
pdf("Top_positive_marker_genes_by_cluster_zf.pdf",width=5,height=8)
DoHeatmap(object = cm,genes.use = top10$gene, slim.col.label = TRUE, remove.key = FALSE)
dev.off()
#Cell cycle genes#
cc.genes <- readLines(con = "./regev_lab_cell_cycle_genes.txt")
s.genes <- tolower(cc.genes[1:43])
g2m.genes <- tolower(cc.genes[44:97])
pdf("S_phase_genes_zebrafish.pdf",width=5,height=6.5)
DoHeatmap(object = cm,genes.use = tolower(s.genes), slim.col.label = TRUE, remove.key = FALSE,use.scaled=TRUE)
dev.off()
pdf("G2M_phase_genes_zebrafish.pdf",width=5,height=6.5)
DoHeatmap(object = cm,genes.use = tolower(g2m.genes), slim.col.label = TRUE, remove.key = FALSE,use.scaled=TRUE)
dev.off()
########Trajectory analysis using Monocle#########
ind<-match(colnames(cm@data),colnames(cm@raw.data))
count_matrix<-as.matrix(cm@raw.data[,ind])
pd<-new("AnnotatedDataFrame", data = cm@meta.data)
gene_info<-data.frame(gene_short_name=rownames(count_matrix))
rownames(gene_info)<-rownames(count_matrix)
fd<-new("AnnotatedDataFrame", data = gene_info)
save(fd,pd,file="zebrafish_sc_data_for_monocle.RData")
load("zebrafish_sc_data_for_monocle.RData")
CM <- newCellDataSet(as(count_matrix, "sparseMatrix"),
phenoData = pd,
featureData = fd,
lowerDetectionLimit = 0.5,
expressionFamily = negbinomial.size())
CM <- estimateSizeFactors(CM)
CM <- estimateDispersions(CM)
CM <- detectGenes(CM, min_expr = 0.1)
print(head(fData(CM)))
expressed_genes <- row.names(subset(fData(CM),
num_cells_expressed >= 10))
disp_table <- dispersionTable(CM)
unsup_clustering_genes <- subset(disp_table, mean_expression >= 0.1)
CM <- setOrderingFilter(CM, unsup_clustering_genes$gene_id)
plot_ordering_genes(CM)
plot_pc_variance_explained(CM, return_all = F) # norm_method='log'
CM <- reduceDimension(CM, max_components = 2, num_dim = 10,
reduction_method = 'tSNE', verbose = T)
CM <- clusterCells(CM, num_clusters = 5)
pdf("monocle_clusters.pdf")
plot_cell_clusters(CM, 1, 2, color = "Cluster")
dev.off()
pdf("monocle_clusters_markers.pdf")
plot_cell_clusters(CM, 1, 2, color = "CellType",
markers = c("cmlc1", "myl6"))
dev.off()
#Wnt, endothelial marker etc.
pdf("monocole_cell_type_markers.pdf")
plot_cell_clusters(CM, 1, 2, color = "CellType",
markers = c("cmlc1", "myl6","wnt11r","thy1","col1a2","dcn","rgs5a"))
dev.off()
pdf("monocole_apoptosis_markers.pdf")
plot_cell_clusters(CM, 1, 2, color = "CellType",
markers = c("casp2","bcl2l10","fosl2","hsp90b1"))
dev.off()
pdf("monocle_clusters_by_time.pdf")
plot_cell_clusters(CM, 1, 2, color = "orig.ident")
dev.off()
pdf("cluster_day_relations.pdf",height=4,width=10)
plot_cell_clusters(CM, 1, 2, color = "Cluster") +
facet_wrap(~orig.ident)
dev.off()
#trajectory
diff_test_res <- differentialGeneTest(CM[expressed_genes,],
fullModelFormulaStr = "~orig.ident")
ordering_genes <- row.names (subset(diff_test_res, qval < 0.01))
CM <- setOrderingFilter(CM, ordering_genes)
plot_ordering_genes(CM)
CM <- reduceDimension(CM, max_components = 2,
method = 'DDRTree')
CM <- orderCells(CM)
pdf("trajectory_by_day.pdf")
plot_cell_trajectory(CM, color_by = "orig.ident")
dev.off()
pdf("trajectory_by_cluster.pdf")
plot_cell_trajectory(CM, color_by = "Cluster")
dev.off()
pdf("trajectory_by_state.pdf")
plot_cell_trajectory(CM, color_by = "State")
dev.off()
GM_state <- function(cds){
if (length(unique(pData(cds)$State)) > 1){
T0_counts <- table(pData(cds)$State, pData(cds)$orig.ident)[,"day 0"]
return(as.numeric(names(T0_counts)[which
(T0_counts == max(T0_counts))]))
} else {
return (1)
}
}
CM <- orderCells(CM, root_state = GM_state(CM))
save(CM,file="monocle_CM_cluster_and_trajectory.RData")
pdf("pseudo_time_plot.pdf")
plot_cell_trajectory(CM, color_by = "Pseudotime")
dev.off()
CM_expressed_genes <- row.names(subset(fData(CM),
num_cells_expressed >= 10))
CM_filtered <- CM[CM_expressed_genes,]
my_genes <- row.names(subset(fData(CM_filtered),
gene_short_name %in% c("cmlc1", "myl7", "myl6","myl9a")))
cds_subset <- CM_filtered[my_genes,]
pdf("marker_genes_pseudotime.pdf")
plot_genes_in_pseudotime(cds_subset, color_by = "orig.ident")
dev.off()
pdf("marker_genes_pseudotime_by_cluster.pdf")
plot_genes_in_pseudotime(cds_subset, color_by = "Cluster")
dev.off()
#####################################################
#####################################################
#########Analysis code for bulk RNA-seq##############
#Bulk RNA-seq samples include young day 3, adult control, and 3 and 7 days post MTZ-injury#
library(DESeq)
#Fig.S3B
load("./zf_count_mat.RData")
ind2rm<-grep("^ERR",colnames(count_mat))
count_mat<-count_mat[,-ind2rm]
cds<-newCountDataSet(count_mat[,c(1,2,5:10)],factor(rep(c("day3","adult-ours-ctrl","adult-Bednarek"),times=c(2,2,4)),levels=c("day3","adult-ours-ctrl","adult-Bednarek"),ordered=T))
cds<-estimateSizeFactors(cds)
cds<-estimateDispersions(cds)
rs<-rowSums(counts(cds,normalized=T))
cds<-cds[rs>10,]
vst_mat<-getVarianceStabilizedData(cds)
#vst_mat<-counts(cds,normalized=T)
#vst_mat<-log2(vst_mat+0.01)
zf<- t(scale(t(vst_mat),scale=F,center=T))
set.seed(10)
pca<-prcomp(t(zf))
sd<-pca$sdev
loadings<-pca$rotation
var<-sd^2
var.percent<-var/sum(var)*100
labels<-rep(c("day3","adult-ours-ctrl","adult-Bednarek"),times=c(2,2,4))
scores <- data.frame(sample.groups=labels, pca$x[,1:3],labs=rep(c("ours","Bednarek"),times=c(4,4)))
colnames(scores)[2:4]<-c("PC1","PC2","PC3")
#scores$PC1<- -(scores$PC1)
#scores$PC2<- -(scores$PC2)
min_axis<-min(scores[,2:4])
max_axis<-max(scores[,2:4])
library(grid)
cbPalette<-c("azure4","black","blue","brown","cadetblue","chartreuse","cyan",
"darkorange","darkorchid","deeppink","gold","lightcoral","lightseagreen","magenta","red","lightsalmon","yellow","mediumorchid4","deepskyblue","mediumvioletred","olivedrab","cornsilk","lavender","navajowhite4")
#pie(rep(1,length(cbPalette)),col=cbPalette)
library(ggplot2)
p1=ggplot(data=scores, aes(x=PC1,y=PC2,color=sample.groups))+
#geom_point(aes(shape=factor(labs)),size=0.7) +
geom_point(size=0.7) +
theme(legend.key.width=unit(0.15,"cm"),legend.key.height=unit(0.15,"cm"),legend.justification=c(0,1),legend.margin=unit(-0.1,"cm"),legend.position="top",legend.text=element_text(size=5),axis.text.x = element_text(size=6),axis.text.y=element_text(size=6),axis.title=element_text(size=6),legend.title=element_blank()) + guides(color=guide_legend(ncol=4)) +
xlim(min_axis,max_axis) + ylim(-52,max_axis+2) + scale_color_manual(values=cbPalette[c(2,15,8,3,4)]) + xlab("PC 1 (75%)") + ylab("PC2 (13%)")
png("./zebrafish_day3_adult_PCA_plot_all_genes_08182016.png",res=600,width=4,height=2,units="in")
p1
dev.off()
#Fig.S3C
load("./zf_count_mat.RData")
ind2rm<-grep("^ERR",colnames(count_mat))
cds<-newCountDataSet(count_mat[,-ind2rm],factor(rep(c("day3","adult-ours-mtz","adult-ours-ctrl","adult-Bednarek","adult-Bednarek-cryoinjury"),times=c(2,2,2,4,4)),levels=c("day3","adult-ours-mtz","adult-ours-ctrl","adult-Bednarek","adult-Bednarek-cryoinjury"),ordered=T))
cds<-estimateSizeFactors(cds)
cds<-estimateDispersions(cds)
rs<-rowSums(counts(cds,normalized=T))
cds<-cds[rs>10,]
vst_mat<-getVarianceStabilizedData(cds)
zf<- t(scale(t(vst_mat),scale=F,center=T))
set.seed(10)
pca<-prcomp(t(zf))
#pca<-princomp(zf)
sd<-pca$sdev
loadings<-pca$rotation
var<-sd^2
var.percent<-var/sum(var)*100
labels<-rep(c("day3","adult-ours-mtz","adult-ours-ctrl","adult-Bednarek","adult-Bednarek-cryoinjury"),times=c(2,2,2,4,4))
scores <- data.frame(sample.groups=labels, pca$x[,1:3],labs=rep(c("ours","Bednarek"),times=c(6,8)))
colnames(scores)[2:4]<-c("PC1","PC2","PC3")
min_axis<-min(scores[,2:4])
max_axis<-max(scores[,2:4])
library(grid)
cbPalette<-c("azure4","black","blue","brown","cadetblue","chartreuse","cyan",
"darkorange","darkorchid","deeppink","gold","lightcoral","lightseagreen","magenta","red","lightsalmon","yellow","mediumorchid4","deepskyblue","mediumvioletred","olivedrab","cornsilk","lavender","navajowhite4")
library(ggplot2)
p1=ggplot(data=scores, aes(x=PC1,y=PC2,color=sample.groups))+
#geom_point(aes(shape=factor(labs)),size=0.7) +
geom_point(size=0.7) +
theme(legend.key.width=unit(0.15,"cm"),legend.key.height=unit(0.15,"cm"),legend.justification=c(0,1),legend.margin=unit(-0.1,"cm"),legend.position="top",legend.text=element_text(size=5),axis.text.x = element_text(size=6),axis.text.y=element_text(size=6),axis.title=element_text(size=6),legend.title=element_blank()) + guides(color=guide_legend(ncol=4)) +
xlim(min_axis,max_axis) + ylim(-100,max_axis+2) + scale_color_manual(values=cbPalette[c(2,15,8,3,4)]) + xlab("PC 1 (60%)") + ylab("PC2 (17%)")
png("zebrafish_day3_adult_cryoinjury_mtz_PCA_plot_all_genes_08182016.png",res=600,width=4,height=2,units="in")
p1
dev.off()
#Fig.2B
####### 3D PCA of zebrafish samples########
library(DESeq)
zf2human<-read.table("zf2human_homologue_01112016_Ensembl.txt",header=T,sep=",")
zf2human<-na.omit(zf2human)
zf2human<-subset(zf2human,Homology.Type=="ortholog_one2one")
zf2human<-subset(zf2human,Human.Ensembl.Gene.ID %in% recon204)
load("zebrafish_count_mat_complete_NOV222016.RData")
cds<-newCountDataSet(count_mat[,grep("our",labels)],factor(labels[grep("our",labels)]))
cds<-estimateSizeFactors(cds)
cds<-estimateDispersions(cds)
rs<-rowSums(counts(cds,normalized=T))
cds<-cds[rs>10,]
vst_mat<-getVarianceStabilizedData(cds)
vst_mat<-vst_mat[rownames(vst_mat) %in% zf2human$Ensembl.Gene.ID,]
zf<- t(scale(t(vst_mat),scale=F,center=T))
set.seed(10)
pca<-prcomp(t(zf))
sd<-pca$sdev
loadings<-pca$rotation
var<-sd^2
var.percent<-var/sum(var)*100
scores <- data.frame(sample.groups=labels[grep("our",labels)], pca$x[,1:3])
colnames(scores)[2:4]<-c("PC1","PC2","PC3")
min_axis<-min(scores[,2:4])
max_axis<-max(scores[,2:4])
library(grid)
cbPalette<-c("azure4","black","blue","brown","cadetblue","chartreuse","cyan",
"darkorange","darkorchid","deeppink","gold","lightcoral","lightseagreen","magenta","red","lightsalmon","yellow","mediumorchid4","deepskyblue","mediumvioletred","olivedrab","cornsilk","lavender","navajowhite4")
scores$sample.groups<-as.character(scores$sample.groups)
color_map<-data.frame(id=unique(scores$sample.groups),color=cbPalette[c(2,3,15,8)])
color_map$id<-as.character(color_map$id)
ind<-match(scores$sample.groups,color_map$id)
scores$color<-color_map$color[ind]
library(scatterplot3d)
pdf("PCA_3D_our_samples.pdf")
s3d<-scatterplot3d(scores$PC1,scores$PC2,scores$PC3,xlab="PC1",ylab="PC2",zlab="PC3",pch=19,cex.symbols = 3,cex.axis = 1.2,cex.lab = 1.8,color=scores$color)
par(xpd=TRUE)
legend(s3d$xyz.convert(-50, -30, 37),color_map$id,col=as.character(color_map$color),pch=19,ncol=2,bty="n",cex=2)
dev.off()
|
#Load libraries to be used during project and set working directory
library(downloader)
library(data.table)
library(plyr)
setwd("C:/Users/User/OneDrive/Getting and Cleaning Data/Data/Course Project/Data Project")
###########################################################################
#Project Objectives:
#You should create one R script called run_analysis.R that does
#the following.
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard
# deviation for each measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. From the data set in step 4, creates a second, independent
# tidy data set with the average of each variable for each
# activity and each subject.
#############################################################################
#Preparation
# Read data book located at # Data Book - http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
############################################################################
#Download, unzip and store data in
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip "
download.file(url, dest="dataset.zip", mode="wb") #Download zipfile to workinjg direcrory
#Decompress the file using unzip and store files in df
df <- unzip ("dataset.zip", exdir = "./Data Project")
#############################################################################
# Step 1 - Merge the training and test sets to create one data set
x_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
x_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
x_data <- rbind(x_train, x_test) # create 'x' data set
y_data <- rbind(y_train, y_test) # create 'y' data set
subject_data <- rbind(subject_train, subject_test) # create 'subject' data set
###############################################################################
#Step 2 Extract only the measurements on the mean and stddev for each measurement
features <- read.table("./UCI HAR Dataset/features.txt")
# get only columns with mean() or std() in their names
mean_and_std_features <- grep("-(mean|std)\\(\\)", features[, 2])
# subset the desired columns
x_data <- x_data[, mean_and_std_features]
# correct the column names
names(x_data) <- features[mean_and_std_features, 2]
#################################################################################
#Step 3 - Use descriptive activity names to name the activities in the data set
features <- read.table("./UCI HAR Dataset/features.txt") # Read features.txt into data.frame
mean_and_std_features <- grep("-(mean|std)\\(\\)", features[, 2]) # mean and std columns
x_data <- x_data[, mean_and_std_features] # subset the desired columns
names(x_data) <- features[mean_and_std_features, 2] # change column names
activities <- read.table("./UCI HAR Dataset/activity_labels.txt")
# update values with correct activity names
y_data[, 1] <- activities[y_data[, 1], 2]
# correct column name
names(y_data) <- "activity"
##################################################################################
#Step 4 - Appropriately label the data set with descriptive variable names
names(subject_data) <- "subject" # correct column name
all_data <- cbind(x_data, y_data, subject_data) # bind all the data in a single data set
#################################################################################
# Step 5
# Create a second, independent tidy data set with the average of each variable
# for each activity and each subject
# Step 5. From the data set in step 4, creates a second, independent
# tidy data set with the average of each variable for each
# activity and each subject.
# Calculate column means for all cols except activity & subject
averages_data <- ddply(all_data, .(subject, activity), function(x) colMeans(x[, 1:66]))
##################################################################################
#Write result to txt file
write.table(averages_data, "./UCI HAR Dataset/averages_data.txt", row.name=FALSE)
| /run_analysis.R | no_license | auchinlec/G_C-Data | R | false | false | 4,446 | r | #Load libraries to be used during project and set working directory
library(downloader)
library(data.table)
library(plyr)
setwd("C:/Users/User/OneDrive/Getting and Cleaning Data/Data/Course Project/Data Project")
###########################################################################
#Project Objectives:
#You should create one R script called run_analysis.R that does
#the following.
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard
# deviation for each measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. From the data set in step 4, creates a second, independent
# tidy data set with the average of each variable for each
# activity and each subject.
#############################################################################
#Preparation
# Read data book located at # Data Book - http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
############################################################################
#Download, unzip and store data in
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip "
download.file(url, dest="dataset.zip", mode="wb") #Download zipfile to workinjg direcrory
#Decompress the file using unzip and store files in df
df <- unzip ("dataset.zip", exdir = "./Data Project")
#############################################################################
# Step 1 - Merge the training and test sets to create one data set
x_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
x_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
x_data <- rbind(x_train, x_test) # create 'x' data set
y_data <- rbind(y_train, y_test) # create 'y' data set
subject_data <- rbind(subject_train, subject_test) # create 'subject' data set
###############################################################################
#Step 2 Extract only the measurements on the mean and stddev for each measurement
features <- read.table("./UCI HAR Dataset/features.txt")
# get only columns with mean() or std() in their names
mean_and_std_features <- grep("-(mean|std)\\(\\)", features[, 2])
# subset the desired columns
x_data <- x_data[, mean_and_std_features]
# correct the column names
names(x_data) <- features[mean_and_std_features, 2]
#################################################################################
#Step 3 - Use descriptive activity names to name the activities in the data set
features <- read.table("./UCI HAR Dataset/features.txt") # Read features.txt into data.frame
mean_and_std_features <- grep("-(mean|std)\\(\\)", features[, 2]) # mean and std columns
x_data <- x_data[, mean_and_std_features] # subset the desired columns
names(x_data) <- features[mean_and_std_features, 2] # change column names
activities <- read.table("./UCI HAR Dataset/activity_labels.txt")
# update values with correct activity names
y_data[, 1] <- activities[y_data[, 1], 2]
# correct column name
names(y_data) <- "activity"
##################################################################################
#Step 4 - Appropriately label the data set with descriptive variable names
names(subject_data) <- "subject" # correct column name
all_data <- cbind(x_data, y_data, subject_data) # bind all the data in a single data set
#################################################################################
# Step 5
# Create a second, independent tidy data set with the average of each variable
# for each activity and each subject
# Step 5. From the data set in step 4, creates a second, independent
# tidy data set with the average of each variable for each
# activity and each subject.
# Calculate column means for all cols except activity & subject
averages_data <- ddply(all_data, .(subject, activity), function(x) colMeans(x[, 1:66]))
##################################################################################
#Write result to txt file
write.table(averages_data, "./UCI HAR Dataset/averages_data.txt", row.name=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/od-funs.R
\name{line2route}
\alias{line2route}
\title{Convert straight OD data (desire lines) into routes}
\usage{
line2route(l, route_fun = "route_cyclestreet", n_print = 10,
list_output = FALSE, l_id = NA, ...)
}
\arguments{
\item{l}{A SpatialLinesDataFrame}
\item{route_fun}{A routing function to be used for converting the straight lines to routes
\code{\link{od2line}}}
\item{n_print}{A number specifying how frequently progress updates
should be shown}
\item{list_output}{If FALSE (default) assumes SpatialLinesDataFrame output. Set to TRUE to save output as a list.}
\item{l_id}{Character string naming the id field from the input lines data,
typically the origin and destination ids pasted together. If absent, the row name of the
straight lines will be used.}
\item{...}{Arguments passed to the routing function, e.g. \code{\link{route_cyclestreet}}}
}
\description{
Convert straight OD data (desire lines) into routes
}
\section{Details}{
See \code{\link{route_cyclestreet}} and other route functions for details.
A parallel implementation of this was available until version 0.1.8.
See \href{https://github.com/ropensci/stplanr/blob/18a598674bb378d5577050178da1561489496157/R/od-funs.R}{github.com/ropensci/stplanr} for details.
}
\examples{
\dontrun{
l = flowlines[2,]
line2route(l)
l = flowlines[2:5,]
rf <- line2route(l = l, "route_cyclestreet", plan = "fastest")
rq <- line2route(l = l, plan = "quietest", silent = TRUE)
plot(rf, col = "red")
plot(rq, col = "green", add = TRUE)
plot(l, add = T)
# Plot for a single line to compare 'fastest' and 'quietest' route
n = 2
plot(l[n,])
lines(rf[n,], col = "red")
lines(rq[n,], col = "green")
# Example with list output
l <- l[1:3,]
rf_list <- line2route(l = l, list_output = TRUE)
class(rf_list) # list output
class(rf_list[[2]]) # but individual elements are spatial
rf_list_of_lists <- line2route(l = l, list_output = TRUE, save_raw = TRUE)
class(rf_list_of_lists) # list output
class(rf_list_of_lists[[2]]) # but individual elements are spatial
# illustration of how the l_id argument works:
rf$id # has id as l has "id" field
l$id <- NULL # remove id field for testing
rf_no_id <- line2route(l)
rf_no_id$id # [1] "1" "2" "3" "4"
rf_with_id = line2route(l, l_id = "All")
rf_with_id$id # [1] 38 10 44
rf_with_err = line2route(l, reporterrors = T)
# rf_with_err$error[2] has the correct error message
}
}
| /man/line2route.Rd | permissive | ktaranov/stplanr | R | false | true | 2,470 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/od-funs.R
\name{line2route}
\alias{line2route}
\title{Convert straight OD data (desire lines) into routes}
\usage{
line2route(l, route_fun = "route_cyclestreet", n_print = 10,
list_output = FALSE, l_id = NA, ...)
}
\arguments{
\item{l}{A SpatialLinesDataFrame}
\item{route_fun}{A routing function to be used for converting the straight lines to routes
\code{\link{od2line}}}
\item{n_print}{A number specifying how frequently progress updates
should be shown}
\item{list_output}{If FALSE (default) assumes SpatialLinesDataFrame output. Set to TRUE to save output as a list.}
\item{l_id}{Character string naming the id field from the input lines data,
typically the origin and destination ids pasted together. If absent, the row name of the
straight lines will be used.}
\item{...}{Arguments passed to the routing function, e.g. \code{\link{route_cyclestreet}}}
}
\description{
Convert straight OD data (desire lines) into routes
}
\section{Details}{
See \code{\link{route_cyclestreet}} and other route functions for details.
A parallel implementation of this was available until version 0.1.8.
See \href{https://github.com/ropensci/stplanr/blob/18a598674bb378d5577050178da1561489496157/R/od-funs.R}{github.com/ropensci/stplanr} for details.
}
\examples{
\dontrun{
l = flowlines[2,]
line2route(l)
l = flowlines[2:5,]
rf <- line2route(l = l, "route_cyclestreet", plan = "fastest")
rq <- line2route(l = l, plan = "quietest", silent = TRUE)
plot(rf, col = "red")
plot(rq, col = "green", add = TRUE)
plot(l, add = T)
# Plot for a single line to compare 'fastest' and 'quietest' route
n = 2
plot(l[n,])
lines(rf[n,], col = "red")
lines(rq[n,], col = "green")
# Example with list output
l <- l[1:3,]
rf_list <- line2route(l = l, list_output = TRUE)
class(rf_list) # list output
class(rf_list[[2]]) # but individual elements are spatial
rf_list_of_lists <- line2route(l = l, list_output = TRUE, save_raw = TRUE)
class(rf_list_of_lists) # list output
class(rf_list_of_lists[[2]]) # but individual elements are spatial
# illustration of how the l_id argument works:
rf$id # has id as l has "id" field
l$id <- NULL # remove id field for testing
rf_no_id <- line2route(l)
rf_no_id$id # [1] "1" "2" "3" "4"
rf_with_id = line2route(l, l_id = "All")
rf_with_id$id # [1] 38 10 44
rf_with_err = line2route(l, reporterrors = T)
# rf_with_err$error[2] has the correct error message
}
}
|
library(tidyverse)
library(nflfastR)
library(furrr)
library(gt)
library(glue)
library(here)
schedule_2020 <-
nflfastR::fast_scraper_schedules(2020)
save(schedule_2020, file = "C:\\Users\\tom\\Google Drive\\R\\BricBets\\schedule_2020.rds")
rosters_2020 <-
nflfastR::fast_scraper_roster(2020) %>%
mutate(team = if_else(team == "LAR", "LA", team)
)
save(rosters_2020, file = here("rosters_2020.rds"))
save(rosters_2020, file = "C:\\Users\\tom\\Google Drive\\R\\BricBets\\rosters_2020.rds")
nfl_pbp_2020 <-
schedule_2020 %>%
filter(as.POSIXct(paste(gameday, gametime), format = "%Y-%m-%d %H:%M") < Sys.time()) %>%
pull(game_id) %>%
nflfastR::build_nflfastR_pbp(pp = TRUE)
nfl_stats_2020 <-
nfl_pbp_2020 %>%
### For Stat Tracking, the end runner of the lateral gets the receiving yards.
mutate(
receiver_player_name =
case_when(lateral_reception == 1 ~lateral_receiver_player_name,
TRUE ~receiver_player_name
),
receiver_player_id =
case_when(lateral_reception == 1 ~lateral_receiver_player_name,
TRUE ~receiver_player_id
),
) %>%
select(
season,
play_id,
game_date,
game_id,
yardline_100,
team = posteam,
defteam,
play_type,
play_type_nfl,
passer,
passer_player_id,
passer_player_name,
rusher,
rusher_player_id,
rusher_player_name,
receiver,
receiver_player_id,
receiver_player_name,
punt_returner_player_name,
punt_returner_player_id,
kickoff_returner_player_name,
kickoff_returner_player_id,
pass,
pass_attempt,
pass_interception = interception,
incomplete_pass,
complete_pass,
rush_attempt,
sack,
rush,
yards_gained,
desc,
td_team,
touchdown,
pass_touchdown,
rush_touchdown,
return_touchdown,
two_point_attempt,
two_point_conv_result,
yards_after_catch,
air_yards,
extra_point_attempt,
extra_point_result,
field_goal_attempt,
field_goal_result,
kicker_player_id,
kicker_player_name,
ep,
epa
) %>%
left_join(schedule_2020) %>%
# remove pass attempts from Sacks
mutate(
pass = case_when(play_type_nfl == "SACK" ~ as.double(0),
TRUE ~ pass),
# code kneels as runs
play_type = case_when(play_type == "qb_kneel" ~ "run",
TRUE ~ play_type),
# ensure that kneels are counted as rush attempts
rush = case_when(play_type == "qb_kneel" ~ as.double(1),
TRUE ~ rush),
# remove NAs in numerics and replace with 0s
across(where(is.numeric), .fns = ~ replace_na(., 0))
) %>%
pivot_longer(
cols = c("rusher_player_id", "passer_player_id", "receiver_player_id", "kicker_player_id"),
names_to = "player_type",
values_to = "player_id"
) %>%
mutate(
player_type = str_remove(
player_type,
"_player_id"
)
) %>%
filter(
!is.na(player_id)
) %>%
# not accounting for returns as of now
mutate(
player_name =
case_when(
play_type %in% c("pass", "qb_spike") & player_type == "passer" ~ passer_player_name,
play_type == "pass" & player_type == "receiver" ~ receiver_player_name,
play_type == "run" & player_type == "rusher" ~ rusher_player_name,
play_type %in% c("field_goal", "extra_point") & player_type == "kicker" ~ kicker_player_name
),
rusher_attempt = case_when(
player_type == "rusher" ~ 1,
TRUE ~ 0
),
rusher_yards_gained = case_when(
player_type == "rusher" ~ yards_gained,
TRUE ~ 0
),
rusher_touchdown = case_when(
player_type == "rusher" ~ touchdown,
TRUE ~ 0
),
rusher_two_pt_success = case_when(
player_type == "rusher" & two_point_conv_result == 1 ~ 1,
TRUE ~ 0
),
rusher_two_pt_att = case_when(
player_type == "rusher" & two_point_attempt == 1 ~ 1,
TRUE ~ 0
),
receiver_reception = case_when(
player_type == "receiver" & complete_pass == 1 ~ 1,
TRUE ~ 0
),
receiver_yards_gained = case_when(
player_type == "receiver" ~ yards_gained,
TRUE ~ 0
),
receiver_incomplete_target = case_when(
player_type == "receiver" & complete_pass == 0 ~ 1,
TRUE ~ 0
),
receiver_target = case_when(
player_type == "receiver" ~ 1,
TRUE ~ 0
),
receiver_touchdown = case_when(
player_type == "receiver" ~ touchdown,
TRUE ~ 0
),
receiver_air_yards = case_when(
player_type == "receiver" ~ air_yards,
TRUE ~ 0
),
receiver_yac = case_when(
player_type == "receiver" ~ yards_after_catch,
TRUE ~ 0
),
receiver_two_pt_success = case_when(
player_type == "receiver" & two_point_conv_result == 1 ~ 1,
TRUE ~ 0
),
receiver_two_pt_att = case_when(
player_type == "receiver" & two_point_attempt == 1 ~ 1,
TRUE ~ 0
),
pass_complete = case_when(
player_type == "passer" & complete_pass == 1 ~ 1,
TRUE ~ 0
),
pass_interception = case_when(
player_type == "passer" & pass_interception == 1 ~ 1,
TRUE ~0
),
pass_incomplete = case_when(
player_type == "passer" & complete_pass == 0 ~ 1,
TRUE ~ 0
),
pass_attempt = case_when(
player_type == "passer" ~ 1,
TRUE ~ 0
),
pass_air_yards = case_when(
player_type == "passer" ~ air_yards,
TRUE ~ 0
),
pass_touchdown = case_when(
player_type == "passer" ~ touchdown,
TRUE ~ 0
),
pass_yac = case_when(
player_type == "passer" ~ yards_after_catch,
TRUE ~ 0
),
pass_yards_gained = case_when(
player_type == "passer" ~ yards_gained,
TRUE ~ 0
),
pass_two_pt_success = case_when(
player_type == "passer" & two_point_conv_result == 1 ~ 1,
TRUE ~ 0
),
pass_two_pt_att = case_when(
player_type == "passer" & two_point_attempt == 1 ~ 1,
TRUE ~ 0
),
pass_sack_yards = case_when(
player_type == "passer" & sack == 1 ~ yards_gained,
TRUE ~ 0
)
) %>%
select(
season,
week,
play_id,
game_date,
game_id,
team,
defteam,
play_type,
player_type,
player_name,
player_id,
pass,
rush,
rusher_attempt,
rusher_touchdown,
rusher_yards_gained,
rusher_two_pt_att,
rusher_two_pt_success,
pass_attempt,
pass_complete,
pass_incomplete,
pass_air_yards,
pass_touchdown,
pass_yac,
pass_yards_gained,
pass_two_pt_success,
pass_two_pt_att,
pass_interception,
pass_sack_yards,
pass_sack = sack,
receiver_reception,
receiver_incomplete_target,
receiver_target,
receiver_touchdown,
receiver_air_yards,
receiver_yards_gained,
receiver_yac,
receiver_two_pt_success,
receiver_two_pt_att,
yards_gained,
desc,
touchdown,
two_point_attempt,
two_point_conv_result,
yardline_100,
extra_point_attempt,
extra_point_result,
field_goal_attempt,
field_goal_result,
ep,
epa
) %>%
group_by(
season,
week,
game_id,
game_date,
team,
defteam,
player_id,
player_name
) %>%
summarize(
xp_atts = sum(extra_point_attempt, 0),
xp_made = sum(if_else(!is.na(extra_point_result) & extra_point_result == "good", 1, 0)),
fg_atts = sum(replace_na(field_goal_attempt, 0)),
fg_made_0_39 = sum(if_else(field_goal_result == "made" & !is.na(field_goal_result) & yardline_100 < 40, 1, 0)),
fg_made_40_49 = sum(if_else(field_goal_result == "made" & !is.na(field_goal_result) & yardline_100 < 50 & yardline_100 > 39, 1, 0)),
fg_made_50_plus = sum(if_else(field_goal_result == "made" & !is.na(field_goal_result) & yardline_100 > 49, 1, 0)),
rz_rush_atts = sum(if_else(yardline_100 <= 20 , rusher_attempt, 0)),
rz_rec_targ = sum(if_else(yardline_100 <= 20, receiver_target, 0)),
gz_rush_atts = sum(if_else(yardline_100 <= 10, rusher_attempt, 0)),
gz_rec_targ = sum(if_else(yardline_100 <= 10, receiver_target, 0)),
rz_pass_atts = sum(if_else(yardline_100 <= 20, pass_attempt, 0)),
gz_pass_atts = sum(if_else(yardline_100 <= 10, pass_attempt, 0)),
rz_ep = sum(if_else(yardline_100 <= 20, ep, 0)),
gz_ep = sum(if_else(yardline_100 <= 10, ep, 0)),
ep = sum(ep),
epa = sum(epa),
pass_cmp = sum(pass_complete),
pass_atts = sum(pass_attempt),
pass_yards = sum(pass_yards_gained),
pass_air_yards = sum(pass_air_yards),
pass_td = sum(pass_touchdown),
pass_two_pt_att = sum(pass_two_pt_att),
pass_two_pt_success = sum(pass_two_pt_success),
pass_interception = sum(pass_interception),
pass_sack = sum(pass_sack),
pass_sack_yards = sum(pass_sack_yards),
pass_long = max(pass_yards_gained),
rush_yards = sum(rusher_yards_gained),
rush_atts = sum(rusher_attempt),
rush_td = sum(rusher_touchdown),
rush_two_pt_att = sum(rusher_two_pt_att),
rush_two_pt_success = sum(rusher_two_pt_success),
rush_long = max(rusher_yards_gained),
rec_yards = sum(receiver_yards_gained),
rec_td = sum(receiver_touchdown),
rec_targ = sum(receiver_target),
rec_incomplete_target = sum(receiver_incomplete_target),
rec_cmp = sum(receiver_reception),
rec_long = max(receiver_yards_gained),
rec_airyards = sum(receiver_air_yards),
rec_yac = sum(receiver_yac),
rec_two_pt_success = sum(receiver_two_pt_success),
rec_two_pt_att = sum(receiver_two_pt_att),
rec_adot = sum(receiver_air_yards) / sum(receiver_target)
) %>%
mutate(DKPassing =
(pass_td * 4) +
if_else(pass_yards >= 300, 3, 0) +
(pass_yards * .04) +
(pass_interception * -1),
DKRushing =
(rush_td * 6) +
(rush_yards * .1) +
if_else(rush_yards >= 100, 3, 0),
DKReceiving =
(rec_td * 6) +
(rec_yards * .1) +
if_else(rec_yards >= 100, 3, 0) +
(rec_cmp * 1),
DKKicking = xp_made + (fg_made_0_39 * 3) + (fg_made_40_49 * 4) + (fg_made_50_plus * 5)
) %>%
group_by(season,
week,
game_id,
team,
defteam,
game_date) %>%
mutate(team_targets = sum(rec_targ),
team_airyards = sum(rec_airyards),
team_carries = sum(rush_atts),
team_rz_rec_targ = sum(rz_rec_targ),
team_rz_rush_atts = sum(rz_rush_atts),
team_gz_rec_targ = sum(gz_rec_targ),
team_gz_rush_atts = sum(gz_rush_atts)) %>%
ungroup() %>%
mutate(rec_targ_share = rec_targ / team_targets,
rec_airyards_share = rec_airyards / team_airyards,
rec_wopr = (1.5 * rec_targ_share) + (.7 * rec_airyards_share),
rec_racr = rec_yards / rec_airyards,
rush_carry_share = rush_atts / team_carries,
rz_carry_share = rz_rush_atts / team_rz_rush_atts,
gz_carry_share = gz_rush_atts / team_gz_rush_atts,
rz_rec_targ_share = rz_rec_targ / team_rz_rec_targ,
gz_rec_targ_share = gz_rec_targ / team_gz_rec_targ
) %>%
decode_player_ids() %>%
rename( "gsis_id" = "player_id") %>%
left_join(rosters_2020) %>%
inner_join(nfl_team_list %>% select(
team_abbr,
team_name,
team_logo_wikipedia,
team_wordmark
), by = c("team" = "team_abbr"))
## Thielen in week 1 or so has a player name of NA on a 2pt
saveRDS(nfl_stats_2020, file = "nfl_stats_2020.rds")
espn_qbr_2020 <-
nfl_stats_2020 %>%
distinct(season, week) %>%
{map2_df(.$season, .$week, espnscrapeR::get_nfl_qbr)} %>%
rename(week = game_week) %>%
mutate(week = as.double(week))
save(espn_qbr_2020, file = "espn_qbr_2020.rds")
## Team List ##
nfl_team_list <-
nflfastR::teams_colors_logos %>%
filter(!team_abbr %in% c("STL", "OAK", "LAR", "SD" ))
name_list <-
set_names(nfl_team_list$team_abbr, nfl_team_list$team_name)
saveRDS(nfl_team_list, file = here("nfl_team_list.rds")) | /NFLFastRStatsPrep.R | no_license | tomdicato/NFLFastRStats | R | false | false | 12,042 | r | library(tidyverse)
library(nflfastR)
library(furrr)
library(gt)
library(glue)
library(here)
schedule_2020 <-
nflfastR::fast_scraper_schedules(2020)
save(schedule_2020, file = "C:\\Users\\tom\\Google Drive\\R\\BricBets\\schedule_2020.rds")
rosters_2020 <-
nflfastR::fast_scraper_roster(2020) %>%
mutate(team = if_else(team == "LAR", "LA", team)
)
save(rosters_2020, file = here("rosters_2020.rds"))
save(rosters_2020, file = "C:\\Users\\tom\\Google Drive\\R\\BricBets\\rosters_2020.rds")
nfl_pbp_2020 <-
schedule_2020 %>%
filter(as.POSIXct(paste(gameday, gametime), format = "%Y-%m-%d %H:%M") < Sys.time()) %>%
pull(game_id) %>%
nflfastR::build_nflfastR_pbp(pp = TRUE)
nfl_stats_2020 <-
nfl_pbp_2020 %>%
### For Stat Tracking, the end runner of the lateral gets the receiving yards.
mutate(
receiver_player_name =
case_when(lateral_reception == 1 ~lateral_receiver_player_name,
TRUE ~receiver_player_name
),
receiver_player_id =
case_when(lateral_reception == 1 ~lateral_receiver_player_name,
TRUE ~receiver_player_id
),
) %>%
select(
season,
play_id,
game_date,
game_id,
yardline_100,
team = posteam,
defteam,
play_type,
play_type_nfl,
passer,
passer_player_id,
passer_player_name,
rusher,
rusher_player_id,
rusher_player_name,
receiver,
receiver_player_id,
receiver_player_name,
punt_returner_player_name,
punt_returner_player_id,
kickoff_returner_player_name,
kickoff_returner_player_id,
pass,
pass_attempt,
pass_interception = interception,
incomplete_pass,
complete_pass,
rush_attempt,
sack,
rush,
yards_gained,
desc,
td_team,
touchdown,
pass_touchdown,
rush_touchdown,
return_touchdown,
two_point_attempt,
two_point_conv_result,
yards_after_catch,
air_yards,
extra_point_attempt,
extra_point_result,
field_goal_attempt,
field_goal_result,
kicker_player_id,
kicker_player_name,
ep,
epa
) %>%
left_join(schedule_2020) %>%
# remove pass attempts from Sacks
mutate(
pass = case_when(play_type_nfl == "SACK" ~ as.double(0),
TRUE ~ pass),
# code kneels as runs
play_type = case_when(play_type == "qb_kneel" ~ "run",
TRUE ~ play_type),
# ensure that kneels are counted as rush attempts
rush = case_when(play_type == "qb_kneel" ~ as.double(1),
TRUE ~ rush),
# remove NAs in numerics and replace with 0s
across(where(is.numeric), .fns = ~ replace_na(., 0))
) %>%
pivot_longer(
cols = c("rusher_player_id", "passer_player_id", "receiver_player_id", "kicker_player_id"),
names_to = "player_type",
values_to = "player_id"
) %>%
mutate(
player_type = str_remove(
player_type,
"_player_id"
)
) %>%
filter(
!is.na(player_id)
) %>%
# not accounting for returns as of now
mutate(
player_name =
case_when(
play_type %in% c("pass", "qb_spike") & player_type == "passer" ~ passer_player_name,
play_type == "pass" & player_type == "receiver" ~ receiver_player_name,
play_type == "run" & player_type == "rusher" ~ rusher_player_name,
play_type %in% c("field_goal", "extra_point") & player_type == "kicker" ~ kicker_player_name
),
rusher_attempt = case_when(
player_type == "rusher" ~ 1,
TRUE ~ 0
),
rusher_yards_gained = case_when(
player_type == "rusher" ~ yards_gained,
TRUE ~ 0
),
rusher_touchdown = case_when(
player_type == "rusher" ~ touchdown,
TRUE ~ 0
),
rusher_two_pt_success = case_when(
player_type == "rusher" & two_point_conv_result == 1 ~ 1,
TRUE ~ 0
),
rusher_two_pt_att = case_when(
player_type == "rusher" & two_point_attempt == 1 ~ 1,
TRUE ~ 0
),
receiver_reception = case_when(
player_type == "receiver" & complete_pass == 1 ~ 1,
TRUE ~ 0
),
receiver_yards_gained = case_when(
player_type == "receiver" ~ yards_gained,
TRUE ~ 0
),
receiver_incomplete_target = case_when(
player_type == "receiver" & complete_pass == 0 ~ 1,
TRUE ~ 0
),
receiver_target = case_when(
player_type == "receiver" ~ 1,
TRUE ~ 0
),
receiver_touchdown = case_when(
player_type == "receiver" ~ touchdown,
TRUE ~ 0
),
receiver_air_yards = case_when(
player_type == "receiver" ~ air_yards,
TRUE ~ 0
),
receiver_yac = case_when(
player_type == "receiver" ~ yards_after_catch,
TRUE ~ 0
),
receiver_two_pt_success = case_when(
player_type == "receiver" & two_point_conv_result == 1 ~ 1,
TRUE ~ 0
),
receiver_two_pt_att = case_when(
player_type == "receiver" & two_point_attempt == 1 ~ 1,
TRUE ~ 0
),
pass_complete = case_when(
player_type == "passer" & complete_pass == 1 ~ 1,
TRUE ~ 0
),
pass_interception = case_when(
player_type == "passer" & pass_interception == 1 ~ 1,
TRUE ~0
),
pass_incomplete = case_when(
player_type == "passer" & complete_pass == 0 ~ 1,
TRUE ~ 0
),
pass_attempt = case_when(
player_type == "passer" ~ 1,
TRUE ~ 0
),
pass_air_yards = case_when(
player_type == "passer" ~ air_yards,
TRUE ~ 0
),
pass_touchdown = case_when(
player_type == "passer" ~ touchdown,
TRUE ~ 0
),
pass_yac = case_when(
player_type == "passer" ~ yards_after_catch,
TRUE ~ 0
),
pass_yards_gained = case_when(
player_type == "passer" ~ yards_gained,
TRUE ~ 0
),
pass_two_pt_success = case_when(
player_type == "passer" & two_point_conv_result == 1 ~ 1,
TRUE ~ 0
),
pass_two_pt_att = case_when(
player_type == "passer" & two_point_attempt == 1 ~ 1,
TRUE ~ 0
),
pass_sack_yards = case_when(
player_type == "passer" & sack == 1 ~ yards_gained,
TRUE ~ 0
)
) %>%
select(
season,
week,
play_id,
game_date,
game_id,
team,
defteam,
play_type,
player_type,
player_name,
player_id,
pass,
rush,
rusher_attempt,
rusher_touchdown,
rusher_yards_gained,
rusher_two_pt_att,
rusher_two_pt_success,
pass_attempt,
pass_complete,
pass_incomplete,
pass_air_yards,
pass_touchdown,
pass_yac,
pass_yards_gained,
pass_two_pt_success,
pass_two_pt_att,
pass_interception,
pass_sack_yards,
pass_sack = sack,
receiver_reception,
receiver_incomplete_target,
receiver_target,
receiver_touchdown,
receiver_air_yards,
receiver_yards_gained,
receiver_yac,
receiver_two_pt_success,
receiver_two_pt_att,
yards_gained,
desc,
touchdown,
two_point_attempt,
two_point_conv_result,
yardline_100,
extra_point_attempt,
extra_point_result,
field_goal_attempt,
field_goal_result,
ep,
epa
) %>%
group_by(
season,
week,
game_id,
game_date,
team,
defteam,
player_id,
player_name
) %>%
summarize(
xp_atts = sum(extra_point_attempt, 0),
xp_made = sum(if_else(!is.na(extra_point_result) & extra_point_result == "good", 1, 0)),
fg_atts = sum(replace_na(field_goal_attempt, 0)),
fg_made_0_39 = sum(if_else(field_goal_result == "made" & !is.na(field_goal_result) & yardline_100 < 40, 1, 0)),
fg_made_40_49 = sum(if_else(field_goal_result == "made" & !is.na(field_goal_result) & yardline_100 < 50 & yardline_100 > 39, 1, 0)),
fg_made_50_plus = sum(if_else(field_goal_result == "made" & !is.na(field_goal_result) & yardline_100 > 49, 1, 0)),
rz_rush_atts = sum(if_else(yardline_100 <= 20 , rusher_attempt, 0)),
rz_rec_targ = sum(if_else(yardline_100 <= 20, receiver_target, 0)),
gz_rush_atts = sum(if_else(yardline_100 <= 10, rusher_attempt, 0)),
gz_rec_targ = sum(if_else(yardline_100 <= 10, receiver_target, 0)),
rz_pass_atts = sum(if_else(yardline_100 <= 20, pass_attempt, 0)),
gz_pass_atts = sum(if_else(yardline_100 <= 10, pass_attempt, 0)),
rz_ep = sum(if_else(yardline_100 <= 20, ep, 0)),
gz_ep = sum(if_else(yardline_100 <= 10, ep, 0)),
ep = sum(ep),
epa = sum(epa),
pass_cmp = sum(pass_complete),
pass_atts = sum(pass_attempt),
pass_yards = sum(pass_yards_gained),
pass_air_yards = sum(pass_air_yards),
pass_td = sum(pass_touchdown),
pass_two_pt_att = sum(pass_two_pt_att),
pass_two_pt_success = sum(pass_two_pt_success),
pass_interception = sum(pass_interception),
pass_sack = sum(pass_sack),
pass_sack_yards = sum(pass_sack_yards),
pass_long = max(pass_yards_gained),
rush_yards = sum(rusher_yards_gained),
rush_atts = sum(rusher_attempt),
rush_td = sum(rusher_touchdown),
rush_two_pt_att = sum(rusher_two_pt_att),
rush_two_pt_success = sum(rusher_two_pt_success),
rush_long = max(rusher_yards_gained),
rec_yards = sum(receiver_yards_gained),
rec_td = sum(receiver_touchdown),
rec_targ = sum(receiver_target),
rec_incomplete_target = sum(receiver_incomplete_target),
rec_cmp = sum(receiver_reception),
rec_long = max(receiver_yards_gained),
rec_airyards = sum(receiver_air_yards),
rec_yac = sum(receiver_yac),
rec_two_pt_success = sum(receiver_two_pt_success),
rec_two_pt_att = sum(receiver_two_pt_att),
rec_adot = sum(receiver_air_yards) / sum(receiver_target)
) %>%
mutate(DKPassing =
(pass_td * 4) +
if_else(pass_yards >= 300, 3, 0) +
(pass_yards * .04) +
(pass_interception * -1),
DKRushing =
(rush_td * 6) +
(rush_yards * .1) +
if_else(rush_yards >= 100, 3, 0),
DKReceiving =
(rec_td * 6) +
(rec_yards * .1) +
if_else(rec_yards >= 100, 3, 0) +
(rec_cmp * 1),
DKKicking = xp_made + (fg_made_0_39 * 3) + (fg_made_40_49 * 4) + (fg_made_50_plus * 5)
) %>%
group_by(season,
week,
game_id,
team,
defteam,
game_date) %>%
mutate(team_targets = sum(rec_targ),
team_airyards = sum(rec_airyards),
team_carries = sum(rush_atts),
team_rz_rec_targ = sum(rz_rec_targ),
team_rz_rush_atts = sum(rz_rush_atts),
team_gz_rec_targ = sum(gz_rec_targ),
team_gz_rush_atts = sum(gz_rush_atts)) %>%
ungroup() %>%
mutate(rec_targ_share = rec_targ / team_targets,
rec_airyards_share = rec_airyards / team_airyards,
rec_wopr = (1.5 * rec_targ_share) + (.7 * rec_airyards_share),
rec_racr = rec_yards / rec_airyards,
rush_carry_share = rush_atts / team_carries,
rz_carry_share = rz_rush_atts / team_rz_rush_atts,
gz_carry_share = gz_rush_atts / team_gz_rush_atts,
rz_rec_targ_share = rz_rec_targ / team_rz_rec_targ,
gz_rec_targ_share = gz_rec_targ / team_gz_rec_targ
) %>%
decode_player_ids() %>%
rename( "gsis_id" = "player_id") %>%
left_join(rosters_2020) %>%
inner_join(nfl_team_list %>% select(
team_abbr,
team_name,
team_logo_wikipedia,
team_wordmark
), by = c("team" = "team_abbr"))
## Thielen in week 1 or so has a player name of NA on a 2pt
saveRDS(nfl_stats_2020, file = "nfl_stats_2020.rds")
espn_qbr_2020 <-
nfl_stats_2020 %>%
distinct(season, week) %>%
{map2_df(.$season, .$week, espnscrapeR::get_nfl_qbr)} %>%
rename(week = game_week) %>%
mutate(week = as.double(week))
save(espn_qbr_2020, file = "espn_qbr_2020.rds")
## Team List ##
nfl_team_list <-
nflfastR::teams_colors_logos %>%
filter(!team_abbr %in% c("STL", "OAK", "LAR", "SD" ))
name_list <-
set_names(nfl_team_list$team_abbr, nfl_team_list$team_name)
saveRDS(nfl_team_list, file = here("nfl_team_list.rds")) |
current_date <- Sys.Date()
current_date <- str_replace_all(current_date, pattern = "-", replacement = "")
filename <- str_c("treatment", "_", current_date, ".csv")
filename | /create_filename_with_current_date.R | no_license | sdevine188/assorted_helper_scripts | R | false | false | 172 | r | current_date <- Sys.Date()
current_date <- str_replace_all(current_date, pattern = "-", replacement = "")
filename <- str_c("treatment", "_", current_date, ".csv")
filename |
#' Plots of species ranges on maps of environmental variables
#'
#' @description ranges_emaps plots one or more ranges of the same species on
#' various maps of environmental factors (e.g. climatic variables) to detect
#' implications of using one or other type of range regarding the environmental
#' conditions in the areas.
#'
#' @param ... one or more objects of class \code{\link{sp_range}} produced with
#' any of the following functions: \code{\link{rangemap_buffer}},
#' \code{\link{rangemap_boundaries}}, \code{\link{rangemap_hull}},
#' \code{\link{rangemap_enm}}, and/or \code{\link{rangemap_tsa}}. Using up to
#' three or four ranges is recommended for more precise comparisons.
#' @param variables a RasterLayer or RasterStack object of environmental variables
#' that will be used as the base for maps. Projection is assumed to be WGS84
#' (EPSG:4326). Consider that depending on the species range, using more than 9
#' variables creates a plot that may not fit in an A4 paper sheet. A maximum of
#' 21 variables is allowed, if this limit is surpassed, other variables will be
#' ignored.
#' @param add_occurrences (logical) if \code{TRUE}, species occurrences contained
#' in \code{\link{sp_range}} objects will be added to the figure. Default =
#' \code{FALSE}. If the none of the objects contains occurrences, this argument
#' will be ignored.
#' @param range_colors vector of colors for borders of species ranges. If
#' \code{NULL}, the default, distinct levels of gray will be used. If more than
#' 3 \code{\link{sp_range}} objects are included, defining your own colors is
#' recommended.
#' @param color_variables a color palette (a vector of continuous colors generated
#' by functions like heat.colors). If \code{NULL}, the default, rev(terrain.colors(255))
#' will be used.
#' @param ranges_legend (logical) if \code{TRUE}, a legend of the plotted ranges
#' will be added to the last panel of the plot at \code{legend_position}.
#' Default = \code{TRUE}.
#' @param legend_position (numeric or character) location where the legend will
#' be placed in the plot. If numeric, vector of length = 2 indicating x and y
#' coordinates to position the legend. See details in \code{\link[graphics]{legend}}
#' for character options of position. Default = "bottomright".
#' @param legend_cex (numeric) size of the legend with respect to \code{cex}
#' option in \code{\link[graphics]{par}}. Default = 0.7.
#' @param zoom (numeric) zoom factor when plotting the species range in a map
#' (based on the largest range). Default = 1.3. Larger values will zoom in into
#' the species range and smaller values will zoom out. A value of 0.5 will
#' duplicate the area that the biggest range is covering.
#' @param verbose (logical) whether or not to print messages about the process.
#' Default = TRUE.
#'
#' @return
#' A plot showing species ranges on top of maps of environmental variables.
#'
#' @details
#' Position of distinct elements depend on the spatial configuration of the
#' species range. Therefore, their position may need to be changed if such
#' elements are needed (e.g., legend). Current character options available for
#' position are: "bottomright", "bottomleft", "topleft", and "topright".
#'
#' @usage
#' ranges_emaps(..., variables, add_occurrences = FALSE,
#' range_colors = NULL, color_variables = NULL,
#' ranges_legend = TRUE, legend_position = "bottomright",
#' legend_cex = 0.7, zoom = 0.7, verbose = TRUE)
#'
#' @export
#'
#' @importFrom sp CRS
#' @importFrom raster projectRaster
#' @importFrom grDevices gray.colors terrain.colors
#' @importFrom graphics legend par
#'
#' @examples
#' # example data
#' data("buffer_range", package = "rangemap")
#' data("cxhull_range", package = "rangemap")
#' data("cvehull_range", package = "rangemap")
#'
#' vars <- raster::stack(system.file("extdata", "variables.tif",
#' package = "rangemap"))
#' names(vars) <- c("bio5", "bio6", "bio13", "bio14")
#'
#' # plotting
#' ranges_emaps(buffer_range, cxhull_range, cvehull_range, variables = vars)
ranges_emaps <- function(..., variables, add_occurrences = FALSE,
range_colors = NULL, color_variables = NULL,
ranges_legend = TRUE, legend_position = "bottomright",
legend_cex = 0.7, zoom = 0.7, verbose = TRUE) {
# testing potential issues
if (missing(...)) {
stop("Argument '...' is necessary to perform the analysis")
} else {
ranges <- list(...)
if (length(ranges) < 1) {
stop("At least one sp_range* object is needed to produce plots.")
}
cls <- sapply(ranges, function(x) {class(x)[1]})
if (any(!cls %in% c("sp_range", "sp_range_iucn", "sp_range_iucnextra"))) {
stop("All objects to be plotted must be of class sp_range*.")
}
}
if (missing(variables)) {
stop("Argument 'variables' must be defined. See function's help for details.")
}
# preparing data
## plain projection
WGS84 <- ranges[[1]]@species_range@proj4string
## extracting data
sp_ranges <- lapply(ranges, function(x) {x@species_range})
rnames <- sapply(ranges, function(x) {x@name})
if (add_occurrences == TRUE) {
if (any(cls %in% c("sp_range_iucn", "sp_range_iucnextra"))){
wh <- which(cls %in% c("sp_range_iucn", "sp_range_iucnextra"))[1]
## occurrences
sp_records <- ranges[[wh]]@species_unique_records
} else {
add_occurrences <- FALSE
sp_records <- NULL
if (verbose == TRUE) {
message("None of the objects contain occurrences.")
}
}
} else {
sp_records <- NULL
}
# par settings
opar <- par(no.readonly = TRUE)
on.exit(par(opar))
# variable treatment
## reducing
if (raster::nlayers(variables) > 21) {
variables <- variables[[1:21]]
if (verbose == TRUE) {
message("Only 21 'variables' will be used. See function's help.")
}
}
## projecting
variables <- raster::projectRaster(variables, crs = WGS84@projargs)
# x and y limits of plots
xbox <- unlist(lapply(sp_ranges, function(x) {c(x@bbox[1, ])}))
ybox <- unlist(lapply(sp_ranges, function(x) {c(x@bbox[2, ])}))
xbox <- as.numeric(c(min(xbox), max(xbox)))
ybox <- as.numeric(c(min(ybox), max(ybox)))
## limits for map
xlim <- c(xbox[1] - ((((xbox[2] - xbox[1]) * 1/zoom) - (xbox[2] - xbox[1])) / 2),
xbox[2] + ((((xbox[2] - xbox[1]) * 1/zoom) - (xbox[2] - xbox[1])) / 2))
ylim <- c(ybox[1] - ((((ybox[2] - ybox[1]) * 1/zoom) - (ybox[2] - ybox[1])) / 2),
ybox[2] + ((((ybox[2] - ybox[1]) * 1/zoom) - (ybox[2] - ybox[1])) / 2))
# plot
## par options
fig_conf <- list(c(1, 1), c(1, 2), c(1, 3), c(2, 2), c(2, 3), c(2, 3), c(3, 3),
c(3, 3), c(3, 3), c(4, 3), c(4, 3), c(4, 3), c(5, 3), c(5, 3),
c(5, 3), c(6, 3), c(6, 3), c(6, 3), c(7, 3), c(7, 3), c(7, 3))
for (i in 1:raster::nlayers(variables)) {
if (raster::nlayers(variables) == i) {
fig_config <- fig_conf[[i]]
}
}
# range and variable colors
if (is.null(range_colors)) {
range_colors <- gray.colors(length(sp_ranges))
}
if (is.null(color_variables)) {
color_variables <- rev(terrain.colors(255))
}
par(mar = c(0, 0, 0, 3), mfrow = fig_config)
## the plot and variable legends
for (i in 1:raster::nlayers(variables)) {
plot_ranges(sp_ranges, sp_records, variable = variables[[i]],
range_colors, color_variables, xlim, ylim)
}
## legend
if (ranges_legend == TRUE) {
if (class(legend_position) == "character") {
if (add_occurrences == TRUE) {
legend(legend_position, legend = c("Occurrences", rnames), bty = "n",
inset = 0.03, pch = c(20, rep(22, length(rnames))),
pt.cex = c(1, rep(1.5, length(rnames))), cex = legend_cex,
col = c("black", range_colors))
} else {
legend(legend_position, legend = rnames, bty = "n", inset = 0.03,
pch = c(rep(22, length(rnames))), cex = legend_cex,
col = range_colors, pt.cex = c(rep(1.5, length(rnames))))
}
} else {
xleg <- legend_position[1]
yleg <- legend_position[2]
if (add_occurrences == TRUE) {
legend(x = xleg, y = yleg, legend = c("Occurrences", rnames), bty = "n",
inset = 0.03, pch = c(20, rep(22, length(rnames))),
pt.cex = c(1, rep(1.5, length(rnames))),
cex = legend_cex, col = c("black", range_colors))
} else {
legend(x = xleg, y = yleg, legend = rnames, bty = "n", inset = 0.03,
pch = c(rep(22, length(rnames))), cex = legend_cex,
col = range_colors, pt.cex = c(rep(1.5, length(rnames))))
}
}
}
}
| /R/ranges_emaps.R | no_license | marlonecobos/rangemap | R | false | false | 8,782 | r | #' Plots of species ranges on maps of environmental variables
#'
#' @description ranges_emaps plots one or more ranges of the same species on
#' various maps of environmental factors (e.g. climatic variables) to detect
#' implications of using one or other type of range regarding the environmental
#' conditions in the areas.
#'
#' @param ... one or more objects of class \code{\link{sp_range}} produced with
#' any of the following functions: \code{\link{rangemap_buffer}},
#' \code{\link{rangemap_boundaries}}, \code{\link{rangemap_hull}},
#' \code{\link{rangemap_enm}}, and/or \code{\link{rangemap_tsa}}. Using up to
#' three or four ranges is recommended for more precise comparisons.
#' @param variables a RasterLayer or RasterStack object of environmental variables
#' that will be used as the base for maps. Projection is assumed to be WGS84
#' (EPSG:4326). Consider that depending on the species range, using more than 9
#' variables creates a plot that may not fit in an A4 paper sheet. A maximum of
#' 21 variables is allowed, if this limit is surpassed, other variables will be
#' ignored.
#' @param add_occurrences (logical) if \code{TRUE}, species occurrences contained
#' in \code{\link{sp_range}} objects will be added to the figure. Default =
#' \code{FALSE}. If the none of the objects contains occurrences, this argument
#' will be ignored.
#' @param range_colors vector of colors for borders of species ranges. If
#' \code{NULL}, the default, distinct levels of gray will be used. If more than
#' 3 \code{\link{sp_range}} objects are included, defining your own colors is
#' recommended.
#' @param color_variables a color palette (a vector of continuous colors generated
#' by functions like heat.colors). If \code{NULL}, the default, rev(terrain.colors(255))
#' will be used.
#' @param ranges_legend (logical) if \code{TRUE}, a legend of the plotted ranges
#' will be added to the last panel of the plot at \code{legend_position}.
#' Default = \code{TRUE}.
#' @param legend_position (numeric or character) location where the legend will
#' be placed in the plot. If numeric, vector of length = 2 indicating x and y
#' coordinates to position the legend. See details in \code{\link[graphics]{legend}}
#' for character options of position. Default = "bottomright".
#' @param legend_cex (numeric) size of the legend with respect to \code{cex}
#' option in \code{\link[graphics]{par}}. Default = 0.7.
#' @param zoom (numeric) zoom factor when plotting the species range in a map
#' (based on the largest range). Default = 1.3. Larger values will zoom in into
#' the species range and smaller values will zoom out. A value of 0.5 will
#' duplicate the area that the biggest range is covering.
#' @param verbose (logical) whether or not to print messages about the process.
#' Default = TRUE.
#'
#' @return
#' A plot showing species ranges on top of maps of environmental variables.
#'
#' @details
#' Position of distinct elements depend on the spatial configuration of the
#' species range. Therefore, their position may need to be changed if such
#' elements are needed (e.g., legend). Current character options available for
#' position are: "bottomright", "bottomleft", "topleft", and "topright".
#'
#' @usage
#' ranges_emaps(..., variables, add_occurrences = FALSE,
#' range_colors = NULL, color_variables = NULL,
#' ranges_legend = TRUE, legend_position = "bottomright",
#' legend_cex = 0.7, zoom = 0.7, verbose = TRUE)
#'
#' @export
#'
#' @importFrom sp CRS
#' @importFrom raster projectRaster
#' @importFrom grDevices gray.colors terrain.colors
#' @importFrom graphics legend par
#'
#' @examples
#' # example data
#' data("buffer_range", package = "rangemap")
#' data("cxhull_range", package = "rangemap")
#' data("cvehull_range", package = "rangemap")
#'
#' vars <- raster::stack(system.file("extdata", "variables.tif",
#' package = "rangemap"))
#' names(vars) <- c("bio5", "bio6", "bio13", "bio14")
#'
#' # plotting
#' ranges_emaps(buffer_range, cxhull_range, cvehull_range, variables = vars)
ranges_emaps <- function(..., variables, add_occurrences = FALSE,
range_colors = NULL, color_variables = NULL,
ranges_legend = TRUE, legend_position = "bottomright",
legend_cex = 0.7, zoom = 0.7, verbose = TRUE) {
# testing potential issues
if (missing(...)) {
stop("Argument '...' is necessary to perform the analysis")
} else {
ranges <- list(...)
if (length(ranges) < 1) {
stop("At least one sp_range* object is needed to produce plots.")
}
cls <- sapply(ranges, function(x) {class(x)[1]})
if (any(!cls %in% c("sp_range", "sp_range_iucn", "sp_range_iucnextra"))) {
stop("All objects to be plotted must be of class sp_range*.")
}
}
if (missing(variables)) {
stop("Argument 'variables' must be defined. See function's help for details.")
}
# preparing data
## plain projection
WGS84 <- ranges[[1]]@species_range@proj4string
## extracting data
sp_ranges <- lapply(ranges, function(x) {x@species_range})
rnames <- sapply(ranges, function(x) {x@name})
if (add_occurrences == TRUE) {
if (any(cls %in% c("sp_range_iucn", "sp_range_iucnextra"))){
wh <- which(cls %in% c("sp_range_iucn", "sp_range_iucnextra"))[1]
## occurrences
sp_records <- ranges[[wh]]@species_unique_records
} else {
add_occurrences <- FALSE
sp_records <- NULL
if (verbose == TRUE) {
message("None of the objects contain occurrences.")
}
}
} else {
sp_records <- NULL
}
# par settings
opar <- par(no.readonly = TRUE)
on.exit(par(opar))
# variable treatment
## reducing
if (raster::nlayers(variables) > 21) {
variables <- variables[[1:21]]
if (verbose == TRUE) {
message("Only 21 'variables' will be used. See function's help.")
}
}
## projecting
variables <- raster::projectRaster(variables, crs = WGS84@projargs)
# x and y limits of plots
xbox <- unlist(lapply(sp_ranges, function(x) {c(x@bbox[1, ])}))
ybox <- unlist(lapply(sp_ranges, function(x) {c(x@bbox[2, ])}))
xbox <- as.numeric(c(min(xbox), max(xbox)))
ybox <- as.numeric(c(min(ybox), max(ybox)))
## limits for map
xlim <- c(xbox[1] - ((((xbox[2] - xbox[1]) * 1/zoom) - (xbox[2] - xbox[1])) / 2),
xbox[2] + ((((xbox[2] - xbox[1]) * 1/zoom) - (xbox[2] - xbox[1])) / 2))
ylim <- c(ybox[1] - ((((ybox[2] - ybox[1]) * 1/zoom) - (ybox[2] - ybox[1])) / 2),
ybox[2] + ((((ybox[2] - ybox[1]) * 1/zoom) - (ybox[2] - ybox[1])) / 2))
# plot
## par options
fig_conf <- list(c(1, 1), c(1, 2), c(1, 3), c(2, 2), c(2, 3), c(2, 3), c(3, 3),
c(3, 3), c(3, 3), c(4, 3), c(4, 3), c(4, 3), c(5, 3), c(5, 3),
c(5, 3), c(6, 3), c(6, 3), c(6, 3), c(7, 3), c(7, 3), c(7, 3))
for (i in 1:raster::nlayers(variables)) {
if (raster::nlayers(variables) == i) {
fig_config <- fig_conf[[i]]
}
}
# range and variable colors
if (is.null(range_colors)) {
range_colors <- gray.colors(length(sp_ranges))
}
if (is.null(color_variables)) {
color_variables <- rev(terrain.colors(255))
}
par(mar = c(0, 0, 0, 3), mfrow = fig_config)
## the plot and variable legends
for (i in 1:raster::nlayers(variables)) {
plot_ranges(sp_ranges, sp_records, variable = variables[[i]],
range_colors, color_variables, xlim, ylim)
}
## legend
if (ranges_legend == TRUE) {
if (class(legend_position) == "character") {
if (add_occurrences == TRUE) {
legend(legend_position, legend = c("Occurrences", rnames), bty = "n",
inset = 0.03, pch = c(20, rep(22, length(rnames))),
pt.cex = c(1, rep(1.5, length(rnames))), cex = legend_cex,
col = c("black", range_colors))
} else {
legend(legend_position, legend = rnames, bty = "n", inset = 0.03,
pch = c(rep(22, length(rnames))), cex = legend_cex,
col = range_colors, pt.cex = c(rep(1.5, length(rnames))))
}
} else {
xleg <- legend_position[1]
yleg <- legend_position[2]
if (add_occurrences == TRUE) {
legend(x = xleg, y = yleg, legend = c("Occurrences", rnames), bty = "n",
inset = 0.03, pch = c(20, rep(22, length(rnames))),
pt.cex = c(1, rep(1.5, length(rnames))),
cex = legend_cex, col = c("black", range_colors))
} else {
legend(x = xleg, y = yleg, legend = rnames, bty = "n", inset = 0.03,
pch = c(rep(22, length(rnames))), cex = legend_cex,
col = range_colors, pt.cex = c(rep(1.5, length(rnames))))
}
}
}
}
|
library(rjson)
library(lidR)
library(raster)
library(rgdal)
library(EBImage)
library(stringr)
arg <- commandArgs(TRUE)
baseDir <- arg[1]
result <- fromJSON(file = arg[2])
wd <- paste(baseDir, result["dir"], "/", sep = "")
shpFile <- paste(wd, result["shpFile"], sep = "")
baseDir
wd
shpFile
if(TRUE){
aoi <- readOGR(shpFile)
ctg <- readLAScatalog(wd)
lascheck(ctg)
opt_chunk_buffer(ctg)
opt_chunk_size(ctg)
opt_chunk_alignment(ctg)
opt_wall_to_wall(ctg)
outputClippedDir <- paste(str_remove(shpFile, ".shp"), "_clipped", sep = "")
outputClippedDir
#opt_output_files(ctg) <- outputClippedDir
#las_clip <- lasclip(ctg, aoi)
outputNormalizeDir <- paste(str_remove(shpFile, ".shp"), "_normalized", sep = "")
outputNormalizeDir
#opt_output_files(las_clip) <- outputNormalizeDir
#las_normal <- lasnormalize(las_clip, tin())
}
| /ec2_batch/CanyonCreekLidar.R | no_license | kitnixx/canyoncreek | R | false | false | 882 | r | library(rjson)
library(lidR)
library(raster)
library(rgdal)
library(EBImage)
library(stringr)
arg <- commandArgs(TRUE)
baseDir <- arg[1]
result <- fromJSON(file = arg[2])
wd <- paste(baseDir, result["dir"], "/", sep = "")
shpFile <- paste(wd, result["shpFile"], sep = "")
baseDir
wd
shpFile
if(TRUE){
aoi <- readOGR(shpFile)
ctg <- readLAScatalog(wd)
lascheck(ctg)
opt_chunk_buffer(ctg)
opt_chunk_size(ctg)
opt_chunk_alignment(ctg)
opt_wall_to_wall(ctg)
outputClippedDir <- paste(str_remove(shpFile, ".shp"), "_clipped", sep = "")
outputClippedDir
#opt_output_files(ctg) <- outputClippedDir
#las_clip <- lasclip(ctg, aoi)
outputNormalizeDir <- paste(str_remove(shpFile, ".shp"), "_normalized", sep = "")
outputNormalizeDir
#opt_output_files(las_clip) <- outputNormalizeDir
#las_normal <- lasnormalize(las_clip, tin())
}
|
testlist <- list(ExpressionSet = structure(c(3.10503529562433e+231, 1.23181983389617e+58, 1.52478221747732e+245, 6.36967296041789e+178, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), Phylostratum = numeric(0))
result <- do.call(myTAI:::cpp_TAI,testlist)
str(result) | /myTAI/inst/testfiles/cpp_TAI/AFL_cpp_TAI/cpp_TAI_valgrind_files/1615762333-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 334 | r | testlist <- list(ExpressionSet = structure(c(3.10503529562433e+231, 1.23181983389617e+58, 1.52478221747732e+245, 6.36967296041789e+178, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), Phylostratum = numeric(0))
result <- do.call(myTAI:::cpp_TAI,testlist)
str(result) |
rm(list=ls())
projdata1<-read.table("household_power_consumption.txt",header=T,sep=";",stringsAsFactors=FALSE)
projuse<-projdata1[projdata1$Date=="1/2/2007"|projdata1$Date=="2/2/2007",]
projuse$Global_active_power<-as.numeric(projuse$Global_active_power)
projuse$Newdate<-paste(projuse$Date,projuse$Time)
projuse$Newdate<-strptime(projuse$Newdate,"%d/%m/%Y %H:%M:%S")
png(filename="plot2.png",width=480,height=480,units="px")
plot(projuse$Newdate,projuse$Global_active_power,type="l",xlab="",ylab="Global Active Power(kilowatts)")
dev.off()
| /plot2.R | no_license | lsbillups/ExData_Plotting1 | R | false | false | 542 | r | rm(list=ls())
projdata1<-read.table("household_power_consumption.txt",header=T,sep=";",stringsAsFactors=FALSE)
projuse<-projdata1[projdata1$Date=="1/2/2007"|projdata1$Date=="2/2/2007",]
projuse$Global_active_power<-as.numeric(projuse$Global_active_power)
projuse$Newdate<-paste(projuse$Date,projuse$Time)
projuse$Newdate<-strptime(projuse$Newdate,"%d/%m/%Y %H:%M:%S")
png(filename="plot2.png",width=480,height=480,units="px")
plot(projuse$Newdate,projuse$Global_active_power,type="l",xlab="",ylab="Global Active Power(kilowatts)")
dev.off()
|
library(DALEXtra)
# build explainer ------
explain_xgb <- explain_tidymodels(
model = xgb_fit,
data = train %>% select(-`35`),
y = train %>% select(`35`),
label = "xgboost"
)
# create data model for plotting ------
vars <- c("lifetime", "26", "44", "31", "13",
"33", "5", "2", "49", "14")
create_ice <- function(var) {
pdp_xgb <- model_profile(explain_xgb,
N = 50,
center = T,
type = "accumulated",
# k = 10, # controls for number of clusters
variables = as.character(vars[var]))
pdp_xgb$agr_profiles %>%
as_tibble() %>%
mutate(var = as.character(vars[var])) %>%
rename_all(
funs(
stringr::str_replace_all(., "_", "")))
}
to_ice_plot <- map(1:length(vars), ~create_ice(.x)) %>%
reduce(bind_rows)
# plotting ------
to_ice_plot %>%
ggplot(aes(x, yhat)) +
geom_line(size = 0.8) +
geom_rug() +
facet_wrap(~var, nrow = 2,
scales = "free_x") +
labs(title = "Relationship between Target (Sensor 35) and Used Feautures",
x = "Feature Values",
y = "Sensor 35 Values") +
theme_bw() +
theme(legend.position = "none")
ggsave("08_ICE XGBoost Model.png", path = "Plots",
width = 10, height = 6, dpi = 300)
# build ALE plots
# create training data in format
to_ale <- train %>%
mutate_if(is.character, as.factor)
feature_classes <- to_ale %>%
summarise_all(class) %>%
t() %>%
as.data.frame() %>%
rownames_to_column() %>%
rename(feature = 1, class = 2) %>%
mutate(row_number = row_number())
numerical_features <- feature_classes %>%
filter(class == "numeric") %>%
pull(row_number)
# normally filtering out, but all classes are numeric
xgb_model <- finalize_workflow(workflow_xgb,
tuned_xgb %>%
select_best()) %>%
fit(train)
# create ALE
tic_ale <- Sys.time()
ale <- map(
numerical_features,
~safely(custom_ale)(X = as.data.frame(to_ale),
X.model = xgb_model,
pred.fun = predict_tidymodels,
J = .x,
K = 100)$result %>%
as_tibble() %>%
mutate(feature = colnames(to_ale)[.x]) %>%
select(feature, everything())) %>%
bind_rows()
# TODO: try out with previous finals, especially final_workflow_xgb
# does not need the actual final model (model_final_xgb), which contains the final parameters
# does not need the final model and its fit (so model_final_xgb %>% fit + formlula etc.)
# does not need final_workflow_xgb because we need a pure workflow, not a a full definition of recipe etc.
(toc_ale <- Sys.time() - tic_ale)
# plot ALE
ale %>%
filter(feature %in% c("lifetime", "26", "44", "31", "13",
"33", "5", "2", "49", "14")) %>%
ggplot(aes(x.values, f.values, color = feature)) +
geom_hline(yintercept = 0) +
geom_line(size = .8) +
geom_rug(color = "black", alpha = 0.5, sides = "b") +
facet_wrap(~feature, scales = "free_x") +
labs(title = "Accumulated Local Effects for 10 strongest features",
subtitle = "Only sensor 44 and lifetime show noteworthy effect on the predictor, \nwith sensor 26 and 31 showing contextual influence",
x = "Sensor Values",
y = "Relative Importance") +
theme_bw() +
theme(legend.position = "none")
ggsave("09_ALE XGBoost Model.png", path = "Plots",
width = 10, height = 6, dpi = 300)
| /interpretability.R | no_license | aleksejhoffaerber/sensordata | R | false | false | 3,565 | r | library(DALEXtra)
# build explainer ------
explain_xgb <- explain_tidymodels(
model = xgb_fit,
data = train %>% select(-`35`),
y = train %>% select(`35`),
label = "xgboost"
)
# create data model for plotting ------
vars <- c("lifetime", "26", "44", "31", "13",
"33", "5", "2", "49", "14")
create_ice <- function(var) {
pdp_xgb <- model_profile(explain_xgb,
N = 50,
center = T,
type = "accumulated",
# k = 10, # controls for number of clusters
variables = as.character(vars[var]))
pdp_xgb$agr_profiles %>%
as_tibble() %>%
mutate(var = as.character(vars[var])) %>%
rename_all(
funs(
stringr::str_replace_all(., "_", "")))
}
to_ice_plot <- map(1:length(vars), ~create_ice(.x)) %>%
reduce(bind_rows)
# plotting ------
to_ice_plot %>%
ggplot(aes(x, yhat)) +
geom_line(size = 0.8) +
geom_rug() +
facet_wrap(~var, nrow = 2,
scales = "free_x") +
labs(title = "Relationship between Target (Sensor 35) and Used Feautures",
x = "Feature Values",
y = "Sensor 35 Values") +
theme_bw() +
theme(legend.position = "none")
ggsave("08_ICE XGBoost Model.png", path = "Plots",
width = 10, height = 6, dpi = 300)
# build ALE plots
# create training data in format
to_ale <- train %>%
mutate_if(is.character, as.factor)
feature_classes <- to_ale %>%
summarise_all(class) %>%
t() %>%
as.data.frame() %>%
rownames_to_column() %>%
rename(feature = 1, class = 2) %>%
mutate(row_number = row_number())
numerical_features <- feature_classes %>%
filter(class == "numeric") %>%
pull(row_number)
# normally filtering out, but all classes are numeric
xgb_model <- finalize_workflow(workflow_xgb,
tuned_xgb %>%
select_best()) %>%
fit(train)
# create ALE
tic_ale <- Sys.time()
ale <- map(
numerical_features,
~safely(custom_ale)(X = as.data.frame(to_ale),
X.model = xgb_model,
pred.fun = predict_tidymodels,
J = .x,
K = 100)$result %>%
as_tibble() %>%
mutate(feature = colnames(to_ale)[.x]) %>%
select(feature, everything())) %>%
bind_rows()
# TODO: try out with previous finals, especially final_workflow_xgb
# does not need the actual final model (model_final_xgb), which contains the final parameters
# does not need the final model and its fit (so model_final_xgb %>% fit + formlula etc.)
# does not need final_workflow_xgb because we need a pure workflow, not a a full definition of recipe etc.
(toc_ale <- Sys.time() - tic_ale)
# plot ALE
ale %>%
filter(feature %in% c("lifetime", "26", "44", "31", "13",
"33", "5", "2", "49", "14")) %>%
ggplot(aes(x.values, f.values, color = feature)) +
geom_hline(yintercept = 0) +
geom_line(size = .8) +
geom_rug(color = "black", alpha = 0.5, sides = "b") +
facet_wrap(~feature, scales = "free_x") +
labs(title = "Accumulated Local Effects for 10 strongest features",
subtitle = "Only sensor 44 and lifetime show noteworthy effect on the predictor, \nwith sensor 26 and 31 showing contextual influence",
x = "Sensor Values",
y = "Relative Importance") +
theme_bw() +
theme(legend.position = "none")
ggsave("09_ALE XGBoost Model.png", path = "Plots",
width = 10, height = 6, dpi = 300)
|
pollutantmean <- function(directory, pollutant, id = 1:332)
{
names <- list.files(directory)[id]
df <- data.frame()
for (i in 1:length(names))
{
df <- rbind(df, read.csv(paste(directory, names[i], sep="/")))
}
nmean <- mean(df[, pollutant], na.rm = TRUE)
nmean
} | /02_RProgramming/Assignment1/pollutantmean.R | no_license | vxrds/Data-Science-Specialization---Johns-Hopkins-University | R | false | false | 300 | r | pollutantmean <- function(directory, pollutant, id = 1:332)
{
names <- list.files(directory)[id]
df <- data.frame()
for (i in 1:length(names))
{
df <- rbind(df, read.csv(paste(directory, names[i], sep="/")))
}
nmean <- mean(df[, pollutant], na.rm = TRUE)
nmean
} |
####setup####
library(tidyverse)
library(lme4)
library(lmerTest)
library(gridExtra)
library(ggcorrplot)
setwd("~/Dropbox/bean_dip_2018-2024/field trials/data")
####2019####
#import data
all<-read.csv("clean_data/clean_all_2019.csv")
all$variety<-as.factor(all$variety)
all$site<-factor(all$site, levels = c("K", "C", "W", "PH"))
#compare overall trial yield to our plants
#per plot calculations
per_plot<-all %>%
group_by(site,variety,plot_id,trial_yield) %>%
summarise(plot_density=mean(density),
plot_beans_g=mean(beans_g),
plot_yield=((((plot_density/8)*plot_beans_g)/454)/60)*43560)
#graph both plot level yield metrics together
p1<-ggplot(per_plot,aes(x=variety,y=trial_yield))+geom_boxplot()+facet_wrap(~site,nrow=1)+coord_cartesian(ylim=c(25,100))
p2<-ggplot(per_plot,aes(x=variety,y=plot_yield))+geom_boxplot()+facet_wrap(~site,nrow=1)+coord_cartesian(ylim=c(25,100))
grid.arrange(p1,p2,nrow=2)
#boxplots
boxes<-ggplot(all, aes(x = variety, y= Thickness))
boxes+geom_boxplot()+facet_wrap(~site,nrow = 1)#+coord_cartesian(ylim=c(0,400))
#ggsave(filename = "figures/2019 phi2.tiff",width = 6,height = 4,units = "in")
#modeling
#probably best model from 1st meeting but rank deficient & singular
mod4<-lmer(avg_trichomes~variety*site+(1|site:plot:variety), data=all)
#dropping to one fixed effect solves singularity error
mod4<-lmer(avg_trichomes~variety+(1|site:plot:variety), data=all)
mod4<-lmer(avg_trichomes~site+(1|site:plot:variety), data=all)
#trying new organization still singularity
mod5<-lmer(avg_trichomes~variety*site+(1|plot_id), data=all)
#model results
anova(mod5)
summary(mod5)
ranef(mod5)
#check model assumptions
plot(mod5)
hist(resid(mod5))
#scatterplots
sp<-ggplot(all, aes(x = Thickness, y= avg_trichomes))
sp+geom_point()+stat_smooth(method="lm")
#color code by variety
csp<-ggplot(all, aes(x = beans_g, y= trial_yield, color= site))
csp+geom_point()
#correlation of metrics
#select only metrics of interest
subsall<-select(all,
plant_biomass_g,beans_g,healthy,trial_yield,root_biomass_g,functional_nodules,stem_diameter_cm,
avg_toughness,avg_trichomes,SLA,leaf_dry_matter,osmom_final,pressure_chamber,
gH.,Phi2,PhiNPQ,Relative.Chlorophyll,Thickness,trial_yield
)
#heat map with correlation values
corrdata <- round(cor(subsall,use="complete.obs"), 1)
ggcorrplot(corrdata,type="lower",show.legend=F,lab=T,hc.order = T)
#scatterplot matrices
#all data is too much
#pairs(~.,data=subsall)
#trait data measured by hand
pairs(~SLA+leaf_dry_matter+avg_toughness+avg_trichomes+pressure_chamber,data=subsall)
#color by variety
pairs(~SLA+leaf_dry_matter+avg_toughness+avg_trichomes+pressure_chamber,data=subsall)
#trait data from photosynq
pairs(~PhiNPQ+Phi2+Relative.Chlorophyll+gH.,data=subsall)
#harvest data
pairs(~plant_biomass_g+beans_g+healthy+trial_yield+root_biomass_g+functional_nodules+stem_diameter_cm,data=subsall)
#alternate colored matrix
# library(car)
# scatterplotMatrix(~avg_trichomes+Phi2+Relative.Chlorophyll+beans_g|variety,data=all)
#line graphs across time
#calculate means and error bars
damage <- read_csv("clean_data/clean_damage_2019.csv")
avgInsects<-damage %>%
group_by(date,variety,site) %>%
summarise(avg_insect_count=mean(insects),se_insect_count=sd(insects)/sqrt(n()))
avgInsects$variety<-as.factor(avgInsects$variety)
#graph
lines<-ggplot(avgInsects, aes(x=date,y=avg_insect_count,colour=variety,group=variety))+facet_wrap(~site,nrow=1)
lines+geom_point()+geom_line()+geom_errorbar(aes(ymin=avg_insect_count-se_insect_count,ymax=avg_insect_count+se_insect_count))
#calculate means and error bars
avgChew<-damage %>%
group_by(date,variety,site) %>%
summarise(avg_pct_chew=mean(chew_pct),se_pct_chew=sd(chew_pct)/sqrt(n()))
avgChew$variety<-as.factor(avgChew$variety)
#graph
lines<-ggplot(avgChew, aes(x=date,y=avg_pct_chew,colour=variety,group=variety))+facet_wrap(~site,nrow=1)
lines+geom_point()+geom_line()+geom_errorbar(aes(ymin=avg_pct_chew-se_pct_chew,ymax=avg_pct_chew+se_pct_chew))
#line graphs across environment
#calculate means and error bars
siteAvgDf<-all %>%
group_by(site,variety) %>%
summarise(site_avg_trichomes=mean(avg_trichomes,na.rm=T),
se_avg_trichomes=sd(avg_trichomes,na.rm = T)/sqrt(n()),
site_Relative.Chlorophyll=mean(Relative.Chlorophyll,na.rm=T),
se_Relative.Chlorophyll=sd(Relative.Chlorophyll,na.rm=T)/sqrt(n()),
site_Phi2=mean(Phi2,na.rm=T),
se_Phi2=sd(Phi2,na.rm=T)/sqrt(n()),
site_SLA=mean(SLA,na.rm=T),
se_SLA=sd(SLA,na.rm = T)/sqrt(n()),
site_beans_g=mean(beans_g,na.rm=T),
se_beans_g=sd(beans_g,na.rm = T)/sqrt(n()),
site_plant_biomass_g=mean(plant_biomass_g,na.rm=T),
se_plant_biomass_g=sd(plant_biomass_g,na.rm = T)/sqrt(n()),
site_root_biomass_g=mean(root_biomass_g,na.rm=T),
se_root_biomass_g=sd(root_biomass_g,na.rm = T)/sqrt(n()),
site_trial_yield=mean(trial_yield,na.rm=T),
se_trial_yield=sd(trial_yield,na.rm = T)/sqrt(n()))
#graph with individual facets
facets<-ggplot(siteAvgDf, aes(x=site,y=site_beans_g,colour=variety,group=variety))+facet_wrap(~variety,nrow=1)+geom_point()+geom_line()+
geom_errorbar(aes(ymin=site_beans_g-se_beans_g,
ymax=site_beans_g+se_beans_g),width=0.2)
facets
#overlayed all together
overlay<-ggplot(siteAvgDf, aes(x=site,y=site_beans_g,colour=variety,group=variety))+geom_line(size=1.5)+
geom_errorbar(aes(ymin=site_beans_g-se_beans_g,
ymax=site_beans_g+se_beans_g),width=0.2,size=1.5)
overlay
#plot both to make output easier
lay <- rbind(c(1,1,1,1),
c(1,1,1,1),
c(2,2,2,2))
grid.arrange(overlay, facets, layout_matrix=lay)
####2020####
#import data
all<-read.csv("clean_data/clean_all_2020.csv")
all$variety<-as.factor(all$variety)
all$site<-factor(all$site, levels = c("K", "C", "W", "PH"))
#boxplots
boxes<-ggplot(all, aes(x = variety, y= root_biomass_g/plant_biomass_g))
boxes+geom_boxplot()+facet_wrap(~site,nrow = 1)
#scatterplots
sp<-ggplot(all, aes(x = beans_g, y= trial_yield))
sp+geom_point()+stat_smooth(method="lm")
#color code by variety
csp<-ggplot(all, aes(x = beans_g, y= trial_yield, color= site))
csp+geom_point()
#correlation of metrics
#select only metrics of interest
subsall<-select(all,
plant_biomass_g,beans_g,healthy,root_biomass_g,nodule_count,stem_diameter_cm,
avg_toughness,avg_trichomes,SLA,leaf_dry_matter,pressure_chamber,
gH.,Phi2,PhiNPQ,Relative.Chlorophyll,Thickness,trial_yield
)
#heat map with correlation values
corrdata <- round(cor(subsall,use="complete.obs"), 1)
ggcorrplot(corrdata,type="lower",show.legend=F,lab=T,hc.order = T)
#scatterplot matrices
#trait data measured by hand
pairs(~SLA+leaf_dry_matter+avg_toughness+avg_trichomes+pressure_chamber,data=subsall)
#trait data from photosynq
pairs(~PhiNPQ+Phi2+Relative.Chlorophyll+gH.+Thickness,data=subsall)
#harvest data
pairs(~plant_biomass_g+beans_g+healthy+root_biomass_g+nodule_count+stem_diameter_cm,data=subsall)
#line graphs across environment
#calculate means and error bars
siteAvgDf<-all %>%
group_by(site,variety) %>%
summarise(site_avg_trichomes=mean(avg_trichomes,na.rm=T),
se_avg_trichomes=sd(avg_trichomes,na.rm = T)/sqrt(n()),
site_Relative.Chlorophyll=mean(Relative.Chlorophyll,na.rm=T),
se_Relative.Chlorophyll=sd(Relative.Chlorophyll,na.rm=T)/sqrt(n()),
site_Phi2=mean(Phi2,na.rm=T),
se_Phi2=sd(Phi2,na.rm=T)/sqrt(n()),
site_SLA=mean(SLA,na.rm=T),
se_SLA=sd(SLA,na.rm = T)/sqrt(n()),
site_beans_g=mean(beans_g,na.rm=T),
se_beans_g=sd(beans_g,na.rm = T)/sqrt(n()),
site_plant_biomass_g=mean(plant_biomass_g,na.rm=T),
se_plant_biomass_g=sd(plant_biomass_g,na.rm = T)/sqrt(n()),
site_root_biomass_g=mean(root_biomass_g,na.rm=T),
se_root_biomass_g=sd(root_biomass_g,na.rm = T)/sqrt(n()),
site_RtoS=mean(root_biomass_g/plant_biomass_g,na.rm=T),
se_RtoS=sd((root_biomass_g/plant_biomass_g),na.rm=T/sqrt(n())),
site_trial_yield=mean(trial_yield,na.rm=T),
se_trial_yield=sd(trial_yield,na.rm = T)/sqrt(n())
)
#graph with individual facets
facets<-ggplot(siteAvgDf, aes(x=site,y=site_trial_yield,colour=variety,group=variety))+facet_wrap(~variety,nrow=1)+geom_point()+geom_line()+
geom_errorbar(aes(ymin=site_trial_yield-se_trial_yield,
ymax=site_trial_yield+se_trial_yield),width=0.2)
facets
#overlayed all together
overlay<-ggplot(siteAvgDf, aes(x=site,y=site_trial_yield,colour=variety,group=variety))+geom_line(size=1.5)+
geom_errorbar(aes(ymin=site_trial_yield-se_trial_yield,
ymax=site_trial_yield+se_trial_yield),width=0.2,size=1.5)
overlay
#plot both together to make output easier
lay <- rbind(c(1,1,1,1),
c(1,1,1,1),
c(2,2,2,2))
grid.arrange(overlay, facets, layout_matrix=lay)
####2021####
#import data
all<-read.csv("clean_data/clean_all_2021.csv")
all$variety<-as.factor(all$variety)
all$site<-factor(all$site, levels = c("K", "C", "W", "PH"))
#boxplots
boxes<-ggplot(all, aes(x = variety, y= beans_g))
boxes+geom_boxplot()+facet_wrap(~site,nrow = 1)
#scatterplots
sp<-ggplot(all, aes(x = plant_biomass_g, y= trial_yield))
sp+geom_point()+stat_smooth(method="lm")
#color code by variety
csp<-ggplot(all, aes(x = plant_biomass_g, y= trial_yield, color= site))
csp+geom_point()
#correlation of metrics
#select only metrics of interest
subsall<-select(all,
plant_biomass_g,beans_g,healthy,
#root_biomass_g,
nodule_count,stem_diameter_cm=root_collar_diameter_cm,
avg_toughness,avg_trichomes,SLA,leaf_dry_matter,pressure_chamber,
gH.,Phi2,PhiNPQ,Relative.Chlorophyll,Thickness,trial_yield
)
#heat map with correlation values
corrdata <- round(cor(subsall,use="complete.obs"), 1)
ggcorrplot(corrdata,type="lower",show.legend=F,lab=T,hc.order = T)
#scatterplot matrices
#trait data measured by hand
pairs(~SLA+leaf_dry_matter+avg_toughness+avg_trichomes+pressure_chamber,data=subsall)
#trait data from photosynq
pairs(~PhiNPQ+Phi2+Relative.Chlorophyll+gH.+Thickness,data=subsall)
#harvest data
pairs(~plant_biomass_g+beans_g+healthy+root_biomass_g+nodule_count+stem_diameter_cm,data=subsall)
#line graphs across environment
#calculate means and error bars
siteAvgDf<-all %>%
group_by(site,variety) %>%
summarise(site_avg_trichomes=mean(avg_trichomes,na.rm=T),
se_avg_trichomes=sd(avg_trichomes,na.rm = T)/sqrt(n()),
site_Relative.Chlorophyll=mean(Relative.Chlorophyll,na.rm=T),
se_Relative.Chlorophyll=sd(Relative.Chlorophyll,na.rm=T)/sqrt(n()),
site_Phi2=mean(Phi2,na.rm=T),
se_Phi2=sd(Phi2,na.rm=T)/sqrt(n()),
site_SLA=mean(SLA,na.rm=T),
se_SLA=sd(SLA,na.rm = T)/sqrt(n()),
site_beans_g=mean(beans_g,na.rm=T),
se_beans_g=sd(beans_g,na.rm = T)/sqrt(n()),
site_plant_biomass_g=mean(plant_biomass_g,na.rm=T),
se_plant_biomass_g=sd(plant_biomass_g,na.rm = T)/sqrt(n()),
site_root_biomass_g=mean(root_biomass_g,na.rm=T),
se_root_biomass_g=sd(root_biomass_g,na.rm = T)/sqrt(n()),
site_RtoS=mean(root_biomass_g/plant_biomass_g,na.rm=T),
se_RtoS=sd((root_biomass_g/plant_biomass_g),na.rm=T/sqrt(n())),
site_trial_yield=mean(trial_yield,na.rm=T),
se_trial_yield=sd(trial_yield,na.rm = T)/sqrt(n())
)
#graph with individual facets
facets<-ggplot(siteAvgDf, aes(x=site,y=site_trial_yield,colour=variety,group=variety))+facet_wrap(~variety,nrow=1)+geom_point()+geom_line()+
geom_errorbar(aes(ymin=site_trial_yield-se_trial_yield,
ymax=site_trial_yield+se_trial_yield),width=0.2)
facets
#overlayed all together
overlay<-ggplot(siteAvgDf, aes(x=site,y=site_trial_yield,colour=variety,group=variety))+geom_line(size=1.5)+
geom_errorbar(aes(ymin=site_trial_yield-se_trial_yield,
ymax=site_trial_yield+se_trial_yield),width=0.2,size=1.5)
overlay
#plot both together to make output easier
lay <- rbind(c(1,1,1,1),
c(1,1,1,1),
c(2,2,2,2))
grid.arrange(overlay, facets, layout_matrix=lay) | /treated vs untreated/graph_analyze_varieties.R | no_license | klapierre/beanDIP | R | false | false | 12,760 | r | ####setup####
library(tidyverse)
library(lme4)
library(lmerTest)
library(gridExtra)
library(ggcorrplot)
setwd("~/Dropbox/bean_dip_2018-2024/field trials/data")
####2019####
#import data
all<-read.csv("clean_data/clean_all_2019.csv")
all$variety<-as.factor(all$variety)
all$site<-factor(all$site, levels = c("K", "C", "W", "PH"))
#compare overall trial yield to our plants
#per plot calculations
per_plot<-all %>%
group_by(site,variety,plot_id,trial_yield) %>%
summarise(plot_density=mean(density),
plot_beans_g=mean(beans_g),
plot_yield=((((plot_density/8)*plot_beans_g)/454)/60)*43560)
#graph both plot level yield metrics together
p1<-ggplot(per_plot,aes(x=variety,y=trial_yield))+geom_boxplot()+facet_wrap(~site,nrow=1)+coord_cartesian(ylim=c(25,100))
p2<-ggplot(per_plot,aes(x=variety,y=plot_yield))+geom_boxplot()+facet_wrap(~site,nrow=1)+coord_cartesian(ylim=c(25,100))
grid.arrange(p1,p2,nrow=2)
#boxplots
boxes<-ggplot(all, aes(x = variety, y= Thickness))
boxes+geom_boxplot()+facet_wrap(~site,nrow = 1)#+coord_cartesian(ylim=c(0,400))
#ggsave(filename = "figures/2019 phi2.tiff",width = 6,height = 4,units = "in")
#modeling
#probably best model from 1st meeting but rank deficient & singular
mod4<-lmer(avg_trichomes~variety*site+(1|site:plot:variety), data=all)
#dropping to one fixed effect solves singularity error
mod4<-lmer(avg_trichomes~variety+(1|site:plot:variety), data=all)
mod4<-lmer(avg_trichomes~site+(1|site:plot:variety), data=all)
#trying new organization still singularity
mod5<-lmer(avg_trichomes~variety*site+(1|plot_id), data=all)
#model results
anova(mod5)
summary(mod5)
ranef(mod5)
#check model assumptions
plot(mod5)
hist(resid(mod5))
#scatterplots
sp<-ggplot(all, aes(x = Thickness, y= avg_trichomes))
sp+geom_point()+stat_smooth(method="lm")
#color code by variety
csp<-ggplot(all, aes(x = beans_g, y= trial_yield, color= site))
csp+geom_point()
#correlation of metrics
#select only metrics of interest
subsall<-select(all,
plant_biomass_g,beans_g,healthy,trial_yield,root_biomass_g,functional_nodules,stem_diameter_cm,
avg_toughness,avg_trichomes,SLA,leaf_dry_matter,osmom_final,pressure_chamber,
gH.,Phi2,PhiNPQ,Relative.Chlorophyll,Thickness,trial_yield
)
#heat map with correlation values
corrdata <- round(cor(subsall,use="complete.obs"), 1)
ggcorrplot(corrdata,type="lower",show.legend=F,lab=T,hc.order = T)
#scatterplot matrices
#all data is too much
#pairs(~.,data=subsall)
#trait data measured by hand
pairs(~SLA+leaf_dry_matter+avg_toughness+avg_trichomes+pressure_chamber,data=subsall)
#color by variety
pairs(~SLA+leaf_dry_matter+avg_toughness+avg_trichomes+pressure_chamber,data=subsall)
#trait data from photosynq
pairs(~PhiNPQ+Phi2+Relative.Chlorophyll+gH.,data=subsall)
#harvest data
pairs(~plant_biomass_g+beans_g+healthy+trial_yield+root_biomass_g+functional_nodules+stem_diameter_cm,data=subsall)
#alternate colored matrix
# library(car)
# scatterplotMatrix(~avg_trichomes+Phi2+Relative.Chlorophyll+beans_g|variety,data=all)
#line graphs across time
#calculate means and error bars
damage <- read_csv("clean_data/clean_damage_2019.csv")
avgInsects<-damage %>%
group_by(date,variety,site) %>%
summarise(avg_insect_count=mean(insects),se_insect_count=sd(insects)/sqrt(n()))
avgInsects$variety<-as.factor(avgInsects$variety)
#graph
lines<-ggplot(avgInsects, aes(x=date,y=avg_insect_count,colour=variety,group=variety))+facet_wrap(~site,nrow=1)
lines+geom_point()+geom_line()+geom_errorbar(aes(ymin=avg_insect_count-se_insect_count,ymax=avg_insect_count+se_insect_count))
#calculate means and error bars
avgChew<-damage %>%
group_by(date,variety,site) %>%
summarise(avg_pct_chew=mean(chew_pct),se_pct_chew=sd(chew_pct)/sqrt(n()))
avgChew$variety<-as.factor(avgChew$variety)
#graph
lines<-ggplot(avgChew, aes(x=date,y=avg_pct_chew,colour=variety,group=variety))+facet_wrap(~site,nrow=1)
lines+geom_point()+geom_line()+geom_errorbar(aes(ymin=avg_pct_chew-se_pct_chew,ymax=avg_pct_chew+se_pct_chew))
#line graphs across environment
#calculate means and error bars
siteAvgDf<-all %>%
group_by(site,variety) %>%
summarise(site_avg_trichomes=mean(avg_trichomes,na.rm=T),
se_avg_trichomes=sd(avg_trichomes,na.rm = T)/sqrt(n()),
site_Relative.Chlorophyll=mean(Relative.Chlorophyll,na.rm=T),
se_Relative.Chlorophyll=sd(Relative.Chlorophyll,na.rm=T)/sqrt(n()),
site_Phi2=mean(Phi2,na.rm=T),
se_Phi2=sd(Phi2,na.rm=T)/sqrt(n()),
site_SLA=mean(SLA,na.rm=T),
se_SLA=sd(SLA,na.rm = T)/sqrt(n()),
site_beans_g=mean(beans_g,na.rm=T),
se_beans_g=sd(beans_g,na.rm = T)/sqrt(n()),
site_plant_biomass_g=mean(plant_biomass_g,na.rm=T),
se_plant_biomass_g=sd(plant_biomass_g,na.rm = T)/sqrt(n()),
site_root_biomass_g=mean(root_biomass_g,na.rm=T),
se_root_biomass_g=sd(root_biomass_g,na.rm = T)/sqrt(n()),
site_trial_yield=mean(trial_yield,na.rm=T),
se_trial_yield=sd(trial_yield,na.rm = T)/sqrt(n()))
#graph with individual facets
facets<-ggplot(siteAvgDf, aes(x=site,y=site_beans_g,colour=variety,group=variety))+facet_wrap(~variety,nrow=1)+geom_point()+geom_line()+
geom_errorbar(aes(ymin=site_beans_g-se_beans_g,
ymax=site_beans_g+se_beans_g),width=0.2)
facets
#overlayed all together
overlay<-ggplot(siteAvgDf, aes(x=site,y=site_beans_g,colour=variety,group=variety))+geom_line(size=1.5)+
geom_errorbar(aes(ymin=site_beans_g-se_beans_g,
ymax=site_beans_g+se_beans_g),width=0.2,size=1.5)
overlay
#plot both to make output easier
lay <- rbind(c(1,1,1,1),
c(1,1,1,1),
c(2,2,2,2))
grid.arrange(overlay, facets, layout_matrix=lay)
####2020####
#import data
all<-read.csv("clean_data/clean_all_2020.csv")
all$variety<-as.factor(all$variety)
all$site<-factor(all$site, levels = c("K", "C", "W", "PH"))
#boxplots
boxes<-ggplot(all, aes(x = variety, y= root_biomass_g/plant_biomass_g))
boxes+geom_boxplot()+facet_wrap(~site,nrow = 1)
#scatterplots
sp<-ggplot(all, aes(x = beans_g, y= trial_yield))
sp+geom_point()+stat_smooth(method="lm")
#color code by variety
csp<-ggplot(all, aes(x = beans_g, y= trial_yield, color= site))
csp+geom_point()
#correlation of metrics
#select only metrics of interest
subsall<-select(all,
plant_biomass_g,beans_g,healthy,root_biomass_g,nodule_count,stem_diameter_cm,
avg_toughness,avg_trichomes,SLA,leaf_dry_matter,pressure_chamber,
gH.,Phi2,PhiNPQ,Relative.Chlorophyll,Thickness,trial_yield
)
#heat map with correlation values
corrdata <- round(cor(subsall,use="complete.obs"), 1)
ggcorrplot(corrdata,type="lower",show.legend=F,lab=T,hc.order = T)
#scatterplot matrices
#trait data measured by hand
pairs(~SLA+leaf_dry_matter+avg_toughness+avg_trichomes+pressure_chamber,data=subsall)
#trait data from photosynq
pairs(~PhiNPQ+Phi2+Relative.Chlorophyll+gH.+Thickness,data=subsall)
#harvest data
pairs(~plant_biomass_g+beans_g+healthy+root_biomass_g+nodule_count+stem_diameter_cm,data=subsall)
#line graphs across environment
#calculate means and error bars
siteAvgDf<-all %>%
group_by(site,variety) %>%
summarise(site_avg_trichomes=mean(avg_trichomes,na.rm=T),
se_avg_trichomes=sd(avg_trichomes,na.rm = T)/sqrt(n()),
site_Relative.Chlorophyll=mean(Relative.Chlorophyll,na.rm=T),
se_Relative.Chlorophyll=sd(Relative.Chlorophyll,na.rm=T)/sqrt(n()),
site_Phi2=mean(Phi2,na.rm=T),
se_Phi2=sd(Phi2,na.rm=T)/sqrt(n()),
site_SLA=mean(SLA,na.rm=T),
se_SLA=sd(SLA,na.rm = T)/sqrt(n()),
site_beans_g=mean(beans_g,na.rm=T),
se_beans_g=sd(beans_g,na.rm = T)/sqrt(n()),
site_plant_biomass_g=mean(plant_biomass_g,na.rm=T),
se_plant_biomass_g=sd(plant_biomass_g,na.rm = T)/sqrt(n()),
site_root_biomass_g=mean(root_biomass_g,na.rm=T),
se_root_biomass_g=sd(root_biomass_g,na.rm = T)/sqrt(n()),
site_RtoS=mean(root_biomass_g/plant_biomass_g,na.rm=T),
se_RtoS=sd((root_biomass_g/plant_biomass_g),na.rm=T/sqrt(n())),
site_trial_yield=mean(trial_yield,na.rm=T),
se_trial_yield=sd(trial_yield,na.rm = T)/sqrt(n())
)
#graph with individual facets
facets<-ggplot(siteAvgDf, aes(x=site,y=site_trial_yield,colour=variety,group=variety))+facet_wrap(~variety,nrow=1)+geom_point()+geom_line()+
geom_errorbar(aes(ymin=site_trial_yield-se_trial_yield,
ymax=site_trial_yield+se_trial_yield),width=0.2)
facets
#overlayed all together
overlay<-ggplot(siteAvgDf, aes(x=site,y=site_trial_yield,colour=variety,group=variety))+geom_line(size=1.5)+
geom_errorbar(aes(ymin=site_trial_yield-se_trial_yield,
ymax=site_trial_yield+se_trial_yield),width=0.2,size=1.5)
overlay
#plot both together to make output easier
lay <- rbind(c(1,1,1,1),
c(1,1,1,1),
c(2,2,2,2))
grid.arrange(overlay, facets, layout_matrix=lay)
####2021####
#import data
all<-read.csv("clean_data/clean_all_2021.csv")
all$variety<-as.factor(all$variety)
all$site<-factor(all$site, levels = c("K", "C", "W", "PH"))
#boxplots
boxes<-ggplot(all, aes(x = variety, y= beans_g))
boxes+geom_boxplot()+facet_wrap(~site,nrow = 1)
#scatterplots
sp<-ggplot(all, aes(x = plant_biomass_g, y= trial_yield))
sp+geom_point()+stat_smooth(method="lm")
#color code by variety
csp<-ggplot(all, aes(x = plant_biomass_g, y= trial_yield, color= site))
csp+geom_point()
#correlation of metrics
#select only metrics of interest
subsall<-select(all,
plant_biomass_g,beans_g,healthy,
#root_biomass_g,
nodule_count,stem_diameter_cm=root_collar_diameter_cm,
avg_toughness,avg_trichomes,SLA,leaf_dry_matter,pressure_chamber,
gH.,Phi2,PhiNPQ,Relative.Chlorophyll,Thickness,trial_yield
)
#heat map with correlation values
corrdata <- round(cor(subsall,use="complete.obs"), 1)
ggcorrplot(corrdata,type="lower",show.legend=F,lab=T,hc.order = T)
#scatterplot matrices
#trait data measured by hand
pairs(~SLA+leaf_dry_matter+avg_toughness+avg_trichomes+pressure_chamber,data=subsall)
#trait data from photosynq
pairs(~PhiNPQ+Phi2+Relative.Chlorophyll+gH.+Thickness,data=subsall)
#harvest data
pairs(~plant_biomass_g+beans_g+healthy+root_biomass_g+nodule_count+stem_diameter_cm,data=subsall)
#line graphs across environment
#calculate means and error bars
siteAvgDf<-all %>%
group_by(site,variety) %>%
summarise(site_avg_trichomes=mean(avg_trichomes,na.rm=T),
se_avg_trichomes=sd(avg_trichomes,na.rm = T)/sqrt(n()),
site_Relative.Chlorophyll=mean(Relative.Chlorophyll,na.rm=T),
se_Relative.Chlorophyll=sd(Relative.Chlorophyll,na.rm=T)/sqrt(n()),
site_Phi2=mean(Phi2,na.rm=T),
se_Phi2=sd(Phi2,na.rm=T)/sqrt(n()),
site_SLA=mean(SLA,na.rm=T),
se_SLA=sd(SLA,na.rm = T)/sqrt(n()),
site_beans_g=mean(beans_g,na.rm=T),
se_beans_g=sd(beans_g,na.rm = T)/sqrt(n()),
site_plant_biomass_g=mean(plant_biomass_g,na.rm=T),
se_plant_biomass_g=sd(plant_biomass_g,na.rm = T)/sqrt(n()),
site_root_biomass_g=mean(root_biomass_g,na.rm=T),
se_root_biomass_g=sd(root_biomass_g,na.rm = T)/sqrt(n()),
site_RtoS=mean(root_biomass_g/plant_biomass_g,na.rm=T),
se_RtoS=sd((root_biomass_g/plant_biomass_g),na.rm=T/sqrt(n())),
site_trial_yield=mean(trial_yield,na.rm=T),
se_trial_yield=sd(trial_yield,na.rm = T)/sqrt(n())
)
#graph with individual facets
facets<-ggplot(siteAvgDf, aes(x=site,y=site_trial_yield,colour=variety,group=variety))+facet_wrap(~variety,nrow=1)+geom_point()+geom_line()+
geom_errorbar(aes(ymin=site_trial_yield-se_trial_yield,
ymax=site_trial_yield+se_trial_yield),width=0.2)
facets
#overlayed all together
overlay<-ggplot(siteAvgDf, aes(x=site,y=site_trial_yield,colour=variety,group=variety))+geom_line(size=1.5)+
geom_errorbar(aes(ymin=site_trial_yield-se_trial_yield,
ymax=site_trial_yield+se_trial_yield),width=0.2,size=1.5)
overlay
#plot both together to make output easier
lay <- rbind(c(1,1,1,1),
c(1,1,1,1),
c(2,2,2,2))
grid.arrange(overlay, facets, layout_matrix=lay) |
library(ggplot2)
str(ggplot2::mpg)
mpg <- as.data.frame(ggplot2::mpg)
# 문제 1
plot(x=mpg$cty, y=mpg$hwy, xlab="고속도로연비", ylab="도시연비", pch="+")
# 문제 2
barplot(table(mpg$drv), col=rainbow(3))
# 문제 3
boxplot(hwy~manufacturer,data=mpg, col=heat.colors(15),
ylab="고속도로연비", main="*제조사별 고속도로연비",
col.main="magenta", las=2, ylim=c(5,40))
| /R/0823/graph_lab.R | no_license | bernow/TIL-1 | R | false | false | 411 | r | library(ggplot2)
str(ggplot2::mpg)
mpg <- as.data.frame(ggplot2::mpg)
# 문제 1
plot(x=mpg$cty, y=mpg$hwy, xlab="고속도로연비", ylab="도시연비", pch="+")
# 문제 2
barplot(table(mpg$drv), col=rainbow(3))
# 문제 3
boxplot(hwy~manufacturer,data=mpg, col=heat.colors(15),
ylab="고속도로연비", main="*제조사별 고속도로연비",
col.main="magenta", las=2, ylim=c(5,40))
|
# Gibbs sampler in R ------------------------------------------------------
# https://adv-r.hadley.nz/rcpp.html
gibbs_r <- function(N, thin) {
mat <- matrix(nrow = N, ncol = 2)
x <- 0
y <- 0
for (i in 1:N) {
for (j in 1:thin) {
x <- rgamma(1, 3, y * y + 4)
y <- rnorm(1, 1 / (x + 1), 1 / sqrt(2 * (x + 1)))
}
mat[i, ] <- c(x, y)
}
mat
}
# Run the cpp code to generate function -----------------------------------
library(Rcpp)
sourceCpp('Bayes_gibbs_sampler_cpp.cpp')
library(microbenchmark)
microbenchmark(gibbs_cpp(100, 10), gibbs_r(100, 10))
| /RStan/Bayes_gibbs_R_version.R | no_license | PyRPy/stats_r | R | false | false | 587 | r | # Gibbs sampler in R ------------------------------------------------------
# https://adv-r.hadley.nz/rcpp.html
gibbs_r <- function(N, thin) {
mat <- matrix(nrow = N, ncol = 2)
x <- 0
y <- 0
for (i in 1:N) {
for (j in 1:thin) {
x <- rgamma(1, 3, y * y + 4)
y <- rnorm(1, 1 / (x + 1), 1 / sqrt(2 * (x + 1)))
}
mat[i, ] <- c(x, y)
}
mat
}
# Run the cpp code to generate function -----------------------------------
library(Rcpp)
sourceCpp('Bayes_gibbs_sampler_cpp.cpp')
library(microbenchmark)
microbenchmark(gibbs_cpp(100, 10), gibbs_r(100, 10))
|
#' log likelihood function for fitted vennLasso objects
#'
#' @param object fitted "vennLasso" model object.
#' @param ... not used
#' @rdname logLik
#' @export
#' @examples
#' library(Matrix)
#'
#' set.seed(123)
#' n.obs <- 200
#' n.vars <- 50
#'
#' true.beta.mat <- array(NA, dim = c(3, n.vars))
#' true.beta.mat[1,] <- c(-0.5, -1, 0, 0, 2, rep(0, n.vars - 5))
#' true.beta.mat[2,] <- c(0.5, 0.5, -0.5, -0.5, 1, -1, rep(0, n.vars - 6))
#' true.beta.mat[3,] <- c(0, 0, 1, 1, -1, rep(0, n.vars - 5))
#' rownames(true.beta.mat) <- c("1,0", "1,1", "0,1")
#' true.beta <- as.vector(t(true.beta.mat))
#'
#' x.sub1 <- matrix(rnorm(n.obs * n.vars), n.obs, n.vars)
#' x.sub2 <- matrix(rnorm(n.obs * n.vars), n.obs, n.vars)
#' x.sub3 <- matrix(rnorm(n.obs * n.vars), n.obs, n.vars)
#'
#' x <- as.matrix(rbind(x.sub1, x.sub2, x.sub3))
#'
#' conditions <- as.matrix(cbind(c(rep(1, 2 * n.obs), rep(0, n.obs)),
#' c(rep(0, n.obs), rep(1, 2 * n.obs))))
#'
#' y <- rnorm(n.obs * 3, sd = 3) + drop(as.matrix(bdiag(x.sub1, x.sub2, x.sub3)) %*% true.beta)
#'
#' fit <- vennLasso(x = x, y = y, groups = conditions)
#'
#' logLik(fit)
#'
logLik.vennLasso <- function(object, ...) {
# taken from ncvreg. Thanks to Patrick Breheny.
n <- as.numeric(object$nobs)
df <- object$df
if (object$family == "gaussian")
{
resid.ss <- object$loss
logL <- -0.5 * n * (log(2 * pi) - log(n) + log(resid.ss)) - 0.5 * n
} else if (object$family == "binomial")
{
logL <- -1 * object$loss
} else if (object$family == "poisson")
{
stop("poisson not complete yet")
#y <- object$y
#ind <- y != 0
#logL <- -object$loss + sum(y[ind] * log(y[ind])) - sum(y) - sum(lfactorial(y))
} else if (object$family == "coxph")
{
logL <- -1e99
}
#attr(logL,"df") <- df
#attr(logL,"nobs") <- n
#class(logL) <- "logLik"
logL
}
| /fuzzedpackages/vennLasso/R/log_lik.R | no_license | akhikolla/testpackages | R | false | false | 1,931 | r | #' log likelihood function for fitted vennLasso objects
#'
#' @param object fitted "vennLasso" model object.
#' @param ... not used
#' @rdname logLik
#' @export
#' @examples
#' library(Matrix)
#'
#' set.seed(123)
#' n.obs <- 200
#' n.vars <- 50
#'
#' true.beta.mat <- array(NA, dim = c(3, n.vars))
#' true.beta.mat[1,] <- c(-0.5, -1, 0, 0, 2, rep(0, n.vars - 5))
#' true.beta.mat[2,] <- c(0.5, 0.5, -0.5, -0.5, 1, -1, rep(0, n.vars - 6))
#' true.beta.mat[3,] <- c(0, 0, 1, 1, -1, rep(0, n.vars - 5))
#' rownames(true.beta.mat) <- c("1,0", "1,1", "0,1")
#' true.beta <- as.vector(t(true.beta.mat))
#'
#' x.sub1 <- matrix(rnorm(n.obs * n.vars), n.obs, n.vars)
#' x.sub2 <- matrix(rnorm(n.obs * n.vars), n.obs, n.vars)
#' x.sub3 <- matrix(rnorm(n.obs * n.vars), n.obs, n.vars)
#'
#' x <- as.matrix(rbind(x.sub1, x.sub2, x.sub3))
#'
#' conditions <- as.matrix(cbind(c(rep(1, 2 * n.obs), rep(0, n.obs)),
#' c(rep(0, n.obs), rep(1, 2 * n.obs))))
#'
#' y <- rnorm(n.obs * 3, sd = 3) + drop(as.matrix(bdiag(x.sub1, x.sub2, x.sub3)) %*% true.beta)
#'
#' fit <- vennLasso(x = x, y = y, groups = conditions)
#'
#' logLik(fit)
#'
logLik.vennLasso <- function(object, ...) {
# taken from ncvreg. Thanks to Patrick Breheny.
n <- as.numeric(object$nobs)
df <- object$df
if (object$family == "gaussian")
{
resid.ss <- object$loss
logL <- -0.5 * n * (log(2 * pi) - log(n) + log(resid.ss)) - 0.5 * n
} else if (object$family == "binomial")
{
logL <- -1 * object$loss
} else if (object$family == "poisson")
{
stop("poisson not complete yet")
#y <- object$y
#ind <- y != 0
#logL <- -object$loss + sum(y[ind] * log(y[ind])) - sum(y) - sum(lfactorial(y))
} else if (object$family == "coxph")
{
logL <- -1e99
}
#attr(logL,"df") <- df
#attr(logL,"nobs") <- n
#class(logL) <- "logLik"
logL
}
|
###treemap
##계층데이터를 중첩된 사각형 세트로 표시
##트리의 각 가지에는 사각형이 주어진다.
##하위 분기를 나타내는 작은 사각형으로 바둑판 식으로 배열
##Treemap은 Treemap라이브러리를 사용한다
install.packages("treemap")
library(treemap)
##데이터만들기
group=c("group-1", "group-2","group-3")
value=c(13,5,22)
data=data.frame(group, value)
##treemap
treemap(data, index="group", vSize="value", type="index")
| /10월16일_기본그래프/treeplot.r | no_license | namu2018/DATA-VISUALIZATION | R | false | false | 481 | r | ###treemap
##계층데이터를 중첩된 사각형 세트로 표시
##트리의 각 가지에는 사각형이 주어진다.
##하위 분기를 나타내는 작은 사각형으로 바둑판 식으로 배열
##Treemap은 Treemap라이브러리를 사용한다
install.packages("treemap")
library(treemap)
##데이터만들기
group=c("group-1", "group-2","group-3")
value=c(13,5,22)
data=data.frame(group, value)
##treemap
treemap(data, index="group", vSize="value", type="index")
|
##' Estimates (weighted) forecasted means, variances, and correlations from a fitted bmgarch model.
##' @title Forecast method for bmgarch objects.
##' @param object bmgarch object.
##' @param ahead Integer (Default: 1). Periods to be forecasted ahead.
##' @param xC Numeric vector or matrix. Covariates(s) for the constant variance terms in C, or c. Used in a log-linear model on the constant variance terms. If vector, then it acts as a covariate for all constant variance terms. If matrix, must have columns equal to number of time series, and each column acts as a covariate for the respective time series (e.g., column 1 predicts constant variance for time series 1).
##' @param newdata Future datapoints for LFO-CV computation
##' @param CrI Numeric vector (Default: \code{c(.025, .975)}). Lower and upper bound of predictive credible interval.
##' @param seed Integer (Optional). Specify seed for \code{\link[rstan]{sampling}}.
##' @param digits Integer (Default: 2, optional). Number of digits to round to when printing.
##' @param weights Takes weights from model_weight function. Defaults to 1 -- this parameter is not typically set by user.
##' @param L Minimal length of time series before engaging in lfocv
##' @param method Ensemble methods, 'stacking' (default) or 'pseudobma'
##' @param inc_samples Logical (Default: FALSE). Whether to return the MCMC samples for the fitted values.
##' @param ... Not used
##' @return forecast.bmgarch object. List containing \code{forecast}, \code{backcast}, and \code{meta}data.
##' See \code{\link{fitted.bmgarch}} for information on \code{backcast}.
##' \code{forecast} is a list of four components:
##' \describe{
##' \item{mean}{\code{[N, 7, TS]} array of mean forecasts, where N is the timeseries length, and TS is the number of time series. E.g., \code{fc$forecast$mean[3,,"tsA"]} is the 3-ahead mean forecast for time series "tsA".}
##' \item{var}{\code{[N, 7, TS]} array of variance forecasts, where N is the timeseries length, and TS is the number of time series. E.g., \code{fc$forecast$var[3,,"tsA"]} is the 3-ahead variance forecast for time series "tsA".}
##' \item{cor}{\code{[N, 7, TS(TS - 1)/2]} array of correlation forecasts, where N is the timeseries length, and \code{TS(TS - 1)/2} is the number of correlations. E.g., \code{fc$forecast$cor[3,, "tsB_tsA"]} is the 3-ahead forecast for the correlation between "tsB" and "tsA". Lower triangular correlations are saved.}
##' \item{meta}{Meta-data specific to the forecast. I.e., TS_length (number ahead) and xC.}
##' \item{samples}{List}. If inc_samples is \code{TRUE}, then a list of arrays of MCMC samples for means, vars, and cors. Each array is [Iteration, Period, ..., ...].
##' }
##' @aliases forecast
##' @importFrom forecast forecast
##' @export
##' @export forecast
##' @examples
##' \donttest{
##' data(panas)
##' # Fit DCC(2,2) with constant mean structure.
##' fit <- bmgarch(panas, parameterization = "DCC", P = 2, Q = 2, meanstructure = "constant")
##'
##' # Forecast 8 ahead
##' fit.fc <- forecast(fit, ahead = 8)
##'
##' # Print forecasts
##' fit.fc
##' print(fit.fc)
##'
##' # Plot variance forecasts
##' plot(fit.fc, type = "var")
##'
##' # Plot correlation forecasts
##' plot(fit.fc, type = "cor")
##'
##' # Save backcasted and forecasted values as data frame.
##' fit.fc.df <- as.data.frame(fit.fc)
##'
##' # Save only forecasted values as data frame.
##' fit.fc.df <- as.data.frame(fit.fc, backcast = FALSE)
##'
##' # Add another model, compute model weights and perform a model weighted forecast
##'
##' # Fit a DCC(1,1) model
##' fit1 <- bmgarch(panas, parameterization = "DCC", P = 1, Q = 1, meanstructure = "constant")
##'
##' # Compute model stacking weights based on the last 19 time points (with L = 80)
##' blist <- bmgarch_list( fit1, fit )
##' mw <- model_weights(blist, L = 80)
##'
##' # Weighted forecasts:
##' w.fc <- forecast(object = blist, ahead = 8, weights = mw)
##' }
forecast.bmgarch <- function(object, ahead = 1, xC = NULL,
newdata = NULL, CrI = c(.025, .975),
seed = NA, digits = 2, weights = NULL,
L = NA, method = 'stacking', inc_samples = FALSE, ...) {
## Are we dealing with one object or a list of objects
n_mods <- 1
if("bmgarch_list" %in% class(object)) {
n_mods <- length(object)
} else {
object <- bmgarch_list(object)
}
TS_names <- object[[1]]$TS_names
# Check for TS name consistency
TS_names_consistent <- all(sapply(object, function(x) {
all(x$TS_names == TS_names)
}))
if(!TS_names_consistent) {
# Could *possibly* rearrange the column orders to 'fix' this, but this is a much safer default.
# Could also check whether the training data are the same across models, to ensure the predictions make sense.
stop("Time series column names are not consistent across models. Forecasting halted.")
}
## n_mods <- .depth( object )
# Define a 0 array for stan.
if(is.null(xC)) {
xC <- array(0, dim = c(ahead, object[[1]]$nt))
}
if(is.null(newdata)) {
newdata <- array(0, dim = c(ahead, object[[1]]$nt))
compute_log_lik <- 0
} else {
compute_log_lik <- 1
}
## if user provides weights from the model_weigths function
## proceed directly to forecasting, else, run model_weights
## and extract model weights here
## Case 1: No model weights provided
## Case 2: model weights from a model_weight object
## Case 3: No model weights requested
if(n_mods > 1 & is.null( weights ) ) {
mw <- bmgarch::model_weights(bmgarch_objects = object, L = L)
weights <- mw$wts[]
} else if( n_mods > 1 & !is.null( weights ) ) {
weights <- weights$wts[]
} else if( n_mods == 1 ) {
weights <- 1
## object[[1]] <- object
}
object.f <- lapply(object, function(m) {
standat <- list(T = m$TS_length,
nt = m$nt,
rts = cbind(m$RTS_full),
xC = m$xC,
Q = m$mgarchQ,
P = m$mgarchP,
ahead = ahead,
meanstructure = m$meanstructure,
distribution = m$num_dist,
xC_p = xC,
future_rts = newdata,
compute_log_lik = compute_log_lik)
gqs_model <- switch(m$param,
DCC = stanmodels$forecastDCC,
CCC = stanmodels$forecastCCC,
BEKK =stanmodels$forecastBEKK,
pdBEKK = stanmodels$forecastBEKK,
NULL)
if(is.null(gqs_model)) {
stop("bmgarch object 'param' does not match a supported model. ",
m$param, "is not one in ", paste0(supported_models, collapse = ", "),
".")
}
backcast <- max(m$mgarchP, m$mgarchQ)
nt <- m$nt
cast_start <- (m$TS_length - backcast + 1)
forecast_start <- (m$TS_length + 1)
forecast_end <- (m$TS_length + ahead)
## TODO: Limit pars to only what is needed (H_p, R/R_p, rts_p, mu_p)
forecasted <- rstan::gqs(gqs_model,
draws = as.matrix(m$model_fit),
data = standat,
seed = seed)
return(forecasted)
})
## Init f.mean
f.mean <- .get_stan_summary(object.f, "rts_forecasted", CrI, weights)
## f.var
f.var <- .get_stan_summary(object.f, "H_forecasted", CrI, weights)
## Init f.cor
f.cor <- .get_stan_summary(object.f, "R_forecasted", CrI, weights)
# Restructure to array
## backcast <- max(object[[1]]$mgarchP, object[[1]]$mgarchQ)
nt <- object[[1]]$nt
## cast_start <- (object[[1]]$TS_length - backcast + 1)
forecast_start <- (object[[1]]$TS_length + 1)
forecast_end <- (object[[1]]$TS_length + ahead)
## f.mean
stan_sum_cols <- colnames(f.mean)
f.mean <- array(f.mean, dim = c(nt, ahead, ncol(f.mean)))
f.mean <- aperm(f.mean, c(2,3,1))
dimnames(f.mean) <- list(period = forecast_start:forecast_end, stan_sum_cols, TS = TS_names)
## f.var
### Pull out indices for [period, a, a]
f.var.indices <- grep("H_forecasted\\[[[:digit:]]+,([[:digit:]]+),\\1]", rownames(f.var), value = TRUE)
f.var <- f.var[f.var.indices,]
f.var <- array(f.var, dim = c(nt, ahead, ncol(f.var)))
f.var <- aperm(f.var, c(2, 3, 1))
dimnames(f.var) <- list(period = forecast_start:forecast_end, stan_sum_cols, TS = TS_names)
## f.cor
# Lower-triangular indices
f.cor.indices.L <- which(lower.tri(matrix(0, nt, nt)), arr.ind = TRUE)
# Labels mapping to TS names
f.cor.indices.L.labels <- paste0(TS_names[f.cor.indices.L[, 1]], "_",
TS_names[f.cor.indices.L[, 2]])
# Indices as "a,b"
f.cor.indices.L.char <- paste0(f.cor.indices.L[, 1], ",", f.cor.indices.L[,2])
# Indicices as "[period,a,b]"
f.cor.indices.L.all <- paste0("R_forecasted[",1:(ahead), ",",
rep(f.cor.indices.L.char, each = (ahead)),"]")
# Get only these elements.
f.cor <- f.cor[f.cor.indices.L.all, ,drop = FALSE]
f.cor <- array(f.cor, dim = c(ahead, length(f.cor.indices.L.char), ncol(f.cor ) ))
f.cor <- aperm(f.cor, c(1, 3, 2))
dimnames(f.cor) <- list(period = forecast_start:forecast_end, stan_sum_cols, TS = f.cor.indices.L.labels)
# Remove backcasts from forecasts.
## f.mean <- f.mean[-c(1:backcast), , , drop = FALSE]
## f.var <- f.var[-c(1:backcast), , , drop = FALSE]
## f.cor <- f.cor[-c(1:backcast), , , drop = FALSE]
out <- list()
out$forecast$mean <- f.mean
out$forecast$var <- f.var
out$forecast$cor <- f.cor
out$forecast$meta <- list(xC = xC, TS_length = ahead)
if(inc_samples) {
out$forecast$samples$mean <- .weighted_samples(object.f, "rts_forecasted", weights)$rts_forecasted[, ,, drop = FALSE]
out$forecast$samples$var <- .weighted_samples(object.f, "H_forecasted", weights)$H_forecasted[,, , , drop = FALSE]
out$forecast$samples$cor <- .weighted_samples(object.f, "R_forecasted", weights)$R_forecasted[,, , , drop = FALSE]
## out$forecast$samples$mean <- out$forecast$samples$mean[] # Todo, remove backcast
}
## Extract all log_lik simulations
if(compute_log_lik == 1 ) {
log_lik <- lapply(object.f, function(x) {
rstan::extract(x, pars = "log_lik")$log_lik
})
out$forecast$log_lik <- log_lik
}
metaNames <- c("param", "distribution", "num_dist", "nt", "TS_length", "TS_names", "RTS_full", "mgarchQ", "mgarchP", "xC", "meanstructure")
meta <- with(object[[1]], mget(metaNames))
meta_bmgarch_list <- lapply(object, function(x) {with(x, mget(metaNames))})
out$meta_list <- meta_bmgarch_list
out$meta <- meta
out$meta$n_mods <- n_mods
out$meta$digits <- digits
out$meta$CrI <- CrI
out$meta$weights <- weights
out$backcast <- fitted.bmgarch(object, CrI, digits = digits, weights = weights, inc_samples = inc_samples)$backcast
class(out) <- "forecast.bmgarch"
return(out)
}
##' Extracts the model-predicted means, variances, and correlations for the fitted data.
##'
##' Whereas \code{\link{forecast.bmgarch}} computes the \emph{forecasted} values for future time periods, \code{fitted.bmgarch} computes the \emph{backcasted} (model-predicted) values for the observed time periods.
##' @title Fitted (backcasting) method for bmgarch objects.
##' @param object bmgarch object.
##' @param CrI Numeric vector (Default: \code{c(.025, .975)}). Lower and upper bound of predictive credible interval.
##' @param digits Integer (Default: 2, optional). Number of digits to round to when printing.
##' @param weights Takes weights from model_weight function. Defaults to 1 -- this parameter is not typically set by user.
##' @param inc_samples Logical (Default: FALSE). Whether to return the MCMC samples for the fitted values.
##' @param ... Not used.
##' @return fitted.bmgarch object. List containing \code{meta}data and the \code{backcast}. Backcast is a list containing three elements:
##' \describe{
##' \item{mean}{\code{[N, 7, TS]} array of mean backcasts, where N is the timeseries length, and TS is the number of time series. E.g., \code{bc$backcast$mean[3,,"tsA"]} is the mean backcast for the third observation in time series "tsA".}
##' \item{var}{\code{[N, 7, TS]} array of variance backcasts, where N is the timeseries length, and TS is the number of time series. E.g., \code{bc$backcast$var[3,,"tsA"]} is the variance backcast for the third observation in time series "tsA".}
##' \item{cor}{\code{[N, 7, TS(TS - 1)/2]} array of correlation backcasts, where N is the timeseries length, and \code{TS(TS - 1)/2} is the number of correlations. E.g., \code{bc$backcast$cor[3,, "tsB_tsA"]} is the backcast for the correlation between "tsB" and "tsA" on the third observation. Lower triangular correlations are saved.}
##' \item{samples}{List}. If inc_samples is \code{TRUE}, then a list of arrays of MCMC samples for means, vars, and cors. Each array is [Iteration, Period, ..., ...].
##' }
##' @importFrom stats fitted
##' @export
##' @examples
##' \donttest{
##' data(panas)
##' # Fit CCC(1,1) and constant meanstructure.
##' fit <- bmgarch(panas, parameterization = "CCC", meanstructure = "constant")
##'
##' # Obtain fitted values
##' fit.bc <- fitted(fit)
##'
##' # Print fitted values
##' print(fit.bc)
##'
##' # Plot fitted values (plot.bmgarch calls fitted internally)
##' plot(fit, type = "var")
##'
##' # Save fitted values as data frame
##' fit.bc.df <- as.data.frame(fit.bc)
##' }
fitted.bmgarch <- function(object, CrI = c(.025, .975), digits = 2, weights = NULL, inc_samples = FALSE, ...) {
n_mods <- 1
if("bmgarch_list" %in% class(object)) {
n_mods <- length(object)
} else {
object <- bmgarch_list(object)
}
nt <- object[[1]]$nt
TS_length <- object[[1]]$TS_length
TS_names <- object[[1]]$TS_names
if(n_mods > 1 & is.null(weights)) {
stop("Weights must be provided.")
} else if(n_mods == 1) {
weights <- 1
}
fits <- lapply(object, function(m) {m$model_fit})
b.mean <- .get_stan_summary(fits, "mu", CrI, weights)
b.var <- .get_stan_summary(fits, "H", CrI, weights)
b.cor <- .get_stan_summary(fits, "corH", CrI, weights)
# Restructure
## b.mean
stan_sum_cols <- colnames(b.mean)
b.mean <- array(b.mean, dim = c(nt, TS_length, ncol(b.mean)))
b.mean <- aperm(b.mean, c(2,3,1))
dimnames(b.mean) <- list(period = 1:TS_length, stan_sum_cols, TS = TS_names)
## b.var
b.var.indices <- grep("H\\[[[:digit:]]+,([[:digit:]]+),\\1]", rownames(b.var), value = TRUE)
b.var <- b.var[b.var.indices,]
b.var <- array(b.var, dim = c(nt, TS_length, ncol(b.var)))
b.var <- aperm(b.var, c(2, 3, 1))
dimnames(b.var) <- list(period = 1:TS_length, stan_sum_cols, TS = TS_names)
## b.cor
# Lower-triangular indices
b.cor.indices.L <- which(lower.tri(matrix(0, nt, nt)), arr.ind = TRUE)
# Labels mapping to TS names
b.cor.indices.L.labels <- paste0(TS_names[b.cor.indices.L[,1]], "_", TS_names[b.cor.indices.L[,2]])
# Indices as "a,b"
b.cor.indices.L.char <- paste0(b.cor.indices.L[,1], ",", b.cor.indices.L[,2])
# Indicices as "[period,a,b]"
b.cor.indices.L.all <- paste0("corH[",1:TS_length, ",", rep(b.cor.indices.L.char, each = TS_length),"]")
# Get only these elements.
b.cor <- b.cor[b.cor.indices.L.all,]
b.cor <- array(b.cor, dim = c(TS_length, length(b.cor.indices.L.char), ncol(b.cor)))
b.cor <- aperm(b.cor, c(1, 3, 2))
dimnames(b.cor) <- list(period = 1:TS_length, stan_sum_cols, TS = b.cor.indices.L.labels)
out <- list()
out$backcast$mean <- b.mean
out$backcast$var <- b.var
out$backcast$cor <- b.cor
if(inc_samples) {
out$backcast$samples$mean <- .weighted_samples(fits, "mu", weights)$mu
out$backcast$samples$var <- .weighted_samples(fits, "H", weights)$H
out$backcast$samples$cor <- .weighted_samples(fits, "corH", weights)$corH
}
metaNames <- c("param", "distribution", "num_dist", "nt", "TS_length", "TS_names", "RTS_full", "mgarchQ", "mgarchP", "xC", "meanstructure")
meta <- with(object[[1]], mget(metaNames))
out$meta_list <- lapply(object, function(x) {with(x, mget(metaNames))})
out$meta <- meta
out$meta$digits <- digits
out$meta$n_mods <- n_mods
out$meta$CrI <- CrI
out$meta$weights <- weights
class(out) <- "fitted.bmgarch"
return(out)
}
##' @title Print method for forecast.bmgarch objects.
##' @param x forecast.bmgarch object. See \code{\link{forecast.bmgarch}}
##' @param ... Not used.
##' @return x (invisible).
##' @author Stephen R. Martin
##' @export
print.forecast.bmgarch <- function(x, ...) {
ahead <- x$forecast$meta$TS_length
nt <- x$meta$nt
TS_names <- x$meta$TS_names
digits <- x$meta$digits
if(x$meta$n_mods > 1) {
.sep()
cat("LFO-weighted forecasts across ", x$meta$n_mods, "models.")
.newline()
}
# Mean structure
meanstructure <- any(sapply(x$meta_list, function(x) {x$meanstructure == 1}))
## if(x$meta$meanstructure == 1 | x$meta$n_mod > 1) {
if(meanstructure) {
.sep()
cat("[Mean]", "Forecast for", ahead, "ahead:")
.newline(2)
for(t in 1:nt) {
cat(TS_names[t], ":")
.newline()
print(round(x$forecast$mean[,,t], digits))
}
}
# Variance
.sep()
cat("[Variance]", "Forecast for", ahead, "ahead:")
.newline(2)
for(t in 1:nt) {
cat(TS_names[t], ":")
.newline()
print(round(x$forecast$var[,,t], digits))
}
# Cors
condCor <- any(sapply(x$meta_list, function(x) {x$param != "CCC"}))
## if(x$meta$param != "CCC" | x$meta$n_mod > 1) {
if(condCor) {
cat("[Correlation]", "Forecast for", ahead, "ahead:")
.newline(2)
for(t in 1:(nt*(nt - 1) / 2)) {
cat(dimnames(x$forecast$cor)[[3]][t], ":")
.newline()
print(round(x$forecast$cor[,,t], digits))
}
}
}
##' @title Print method for fitted.bmgarch objects.
##' @param x fitted.bmgarch object.
##' @param ... Not used.
##' @return object (invisible).
##' @author Stephen R. Martin
##' @export
print.fitted.bmgarch <- function(x, ...) {
TS_length <- x$meta$TS_length
nt <- x$meta$nt
TS_names <- x$meta$TS_names
digits <- x$meta$digits
# Mean structure
if(x$meta$meanstructure == 1) {
.sep()
cat("[Mean]", "Fitted values:")
.newline(2)
for(t in 1:nt) {
cat(TS_names[t], ":")
.newline()
print(round(x$backcast$mean[,,t], digits))
}
}
# Variance
.sep()
cat("[Variance]", "Fitted values:")
.newline(2)
for(t in 1:nt) {
cat(TS_names[t], ":")
.newline()
print(round(x$backcast$var[,,t], digits))
}
# Cors
if(x$meta$param != "CCC") {
cat("[Correlation]", "Fitted values:")
.newline(2)
for(t in 1:(nt*(nt - 1) / 2)) {
cat(dimnames(x$backcast$cor)[[3]][t], ":")
.newline()
print(round(x$backcast$cor[,,t], digits))
}
}
object <- x
return(invisible(object))
}
##' Helper function for as.data.frame.{fitted, forecast}. Converts predictive array to data.frame.
##'
##'
##' @title Convert predictive array to data.frame.
##' @param arr Array to convert into data frame.
##' @param type String. "backcast" or "forecast".
##' @param param String. "var", "mean", or "cor".
##' @return data.frame. Columns: period, type (backcast, forecast), param (var, mean, cor), TS (which time series, or which correlation for param = cor), summary columns.
##' @author Stephen R. Martin
##' @keywords internal
.pred_array_to_df <- function(arr, type = "backcast", param = "var") {
dims <- dim(arr)
arrnames <- dimnames(arr)
dfList <- apply(arr, 3, function(x) {
out <- as.data.frame(x)
out$period <- as.numeric(rownames(x))
out
})
for(i in seq_len(length(dfList))) {
dfList[[i]]$TS <- arrnames[[3]][i]
}
df <- do.call(rbind, dfList)
df$type <- type
df$param <- param
rownames(df) <- NULL
return(df)
}
##' @title as.data.frame method for forecast.bmgarch objects.
##' @param x forecast.bmgarch object.
##' @param backcast Logical (Default: True). Whether to include "backcasted" values from \code{\link{fitted.bmgarch}} in data frame.
##' @param ... Not used.
##' @return Data frame.
##' @author Stephen R. Martin
##' @export
as.data.frame.forecast.bmgarch <- function(x, ..., backcast = TRUE ) {
# Forecast
dfList <- list()
dfList$forecast.mean <- .pred_array_to_df(x$forecast$mean, "forecast", "mean")
dfList$forecast.var <- .pred_array_to_df(x$forecast$var, "forecast", "var")
## if(x$meta$param != "CCC") {
condCor <- any(sapply(x$meta_list, function(x) {x$param != "CCC"}))
if(condCor) {
dfList$forecast.cor <- .pred_array_to_df(x$forecast$cor, "forecast", "cor")
}
if(backcast) {
# Backcast
dfList$backcast.mean <- .pred_array_to_df(x$backcast$mean, "backcast", "mean")
dfList$backcast.var <- .pred_array_to_df(x$backcast$var, "backcast", "var")
if(x$meta$param != "CCC") {
dfList$backcast.cor <- .pred_array_to_df(x$backcast$cor, "backcast", "cor")
}
}
# Combine
df <- do.call(rbind, dfList)
# Re-order columns: period TS | type | param
desc <- c("period","TS","type","param")
cn <- colnames(df)
cn_not_desc <- cn[!(cn %in% desc)]
df <- df[,c(desc, cn_not_desc)]
# Sort
df <- df[with(df, order(param, TS, period)),]
rownames(df) <- NULL
return(df)
}
##' @title as.data.frame method for fitted.bmgarch objects.
##' @param x fitted.bmgarch object.
##' @param ... Not used.
##' @return Data frame.
##' @author Stephen R. Martin
##' @export
as.data.frame.fitted.bmgarch <- function(x, ...) {
dfList <- list()
dfList$backcast.mean <- .pred_array_to_df(x$backcast$mean, "backcast", "mean")
dfList$backcast.var <- .pred_array_to_df(x$backcast$var, "backcast", "var")
if(x$meta$param != "CCC") {
dfList$backcast.cor <- .pred_array_to_df(x$backcast$cor, "backcast", "cor")
}
# Combine
df <- do.call(rbind, dfList)
# Re-order columns: period TS | type | param
desc <- c("period","TS","type","param")
cn <- colnames(df)
cn_not_desc <- cn[!(cn %in% desc)]
df <- df[,c(desc, cn_not_desc)]
# Sort
df <- df[with(df, order(param, TS, period)),]
rownames(df) <- NULL
return(df)
}
##' @title Collect bmgarch objects into list.
##' @param ... bmgarch objects.
##' @return List of bmgarch objects. Class: bmgarch_list and bmgarch.
##' @export
bmgarch_list <- function(...) {
out <- list(...)
class(out) <- c("bmgarch_list", "bmgarch")
return(out)
}
| /fuzzedpackages/bmgarch/R/forecasting_gq.R | no_license | akhikolla/testpackages | R | false | false | 23,405 | r | ##' Estimates (weighted) forecasted means, variances, and correlations from a fitted bmgarch model.
##' @title Forecast method for bmgarch objects.
##' @param object bmgarch object.
##' @param ahead Integer (Default: 1). Periods to be forecasted ahead.
##' @param xC Numeric vector or matrix. Covariates(s) for the constant variance terms in C, or c. Used in a log-linear model on the constant variance terms. If vector, then it acts as a covariate for all constant variance terms. If matrix, must have columns equal to number of time series, and each column acts as a covariate for the respective time series (e.g., column 1 predicts constant variance for time series 1).
##' @param newdata Future datapoints for LFO-CV computation
##' @param CrI Numeric vector (Default: \code{c(.025, .975)}). Lower and upper bound of predictive credible interval.
##' @param seed Integer (Optional). Specify seed for \code{\link[rstan]{sampling}}.
##' @param digits Integer (Default: 2, optional). Number of digits to round to when printing.
##' @param weights Takes weights from model_weight function. Defaults to 1 -- this parameter is not typically set by user.
##' @param L Minimal length of time series before engaging in lfocv
##' @param method Ensemble methods, 'stacking' (default) or 'pseudobma'
##' @param inc_samples Logical (Default: FALSE). Whether to return the MCMC samples for the fitted values.
##' @param ... Not used
##' @return forecast.bmgarch object. List containing \code{forecast}, \code{backcast}, and \code{meta}data.
##' See \code{\link{fitted.bmgarch}} for information on \code{backcast}.
##' \code{forecast} is a list of four components:
##' \describe{
##' \item{mean}{\code{[N, 7, TS]} array of mean forecasts, where N is the timeseries length, and TS is the number of time series. E.g., \code{fc$forecast$mean[3,,"tsA"]} is the 3-ahead mean forecast for time series "tsA".}
##' \item{var}{\code{[N, 7, TS]} array of variance forecasts, where N is the timeseries length, and TS is the number of time series. E.g., \code{fc$forecast$var[3,,"tsA"]} is the 3-ahead variance forecast for time series "tsA".}
##' \item{cor}{\code{[N, 7, TS(TS - 1)/2]} array of correlation forecasts, where N is the timeseries length, and \code{TS(TS - 1)/2} is the number of correlations. E.g., \code{fc$forecast$cor[3,, "tsB_tsA"]} is the 3-ahead forecast for the correlation between "tsB" and "tsA". Lower triangular correlations are saved.}
##' \item{meta}{Meta-data specific to the forecast. I.e., TS_length (number ahead) and xC.}
##' \item{samples}{List}. If inc_samples is \code{TRUE}, then a list of arrays of MCMC samples for means, vars, and cors. Each array is [Iteration, Period, ..., ...].
##' }
##' @aliases forecast
##' @importFrom forecast forecast
##' @export
##' @export forecast
##' @examples
##' \donttest{
##' data(panas)
##' # Fit DCC(2,2) with constant mean structure.
##' fit <- bmgarch(panas, parameterization = "DCC", P = 2, Q = 2, meanstructure = "constant")
##'
##' # Forecast 8 ahead
##' fit.fc <- forecast(fit, ahead = 8)
##'
##' # Print forecasts
##' fit.fc
##' print(fit.fc)
##'
##' # Plot variance forecasts
##' plot(fit.fc, type = "var")
##'
##' # Plot correlation forecasts
##' plot(fit.fc, type = "cor")
##'
##' # Save backcasted and forecasted values as data frame.
##' fit.fc.df <- as.data.frame(fit.fc)
##'
##' # Save only forecasted values as data frame.
##' fit.fc.df <- as.data.frame(fit.fc, backcast = FALSE)
##'
##' # Add another model, compute model weights and perform a model weighted forecast
##'
##' # Fit a DCC(1,1) model
##' fit1 <- bmgarch(panas, parameterization = "DCC", P = 1, Q = 1, meanstructure = "constant")
##'
##' # Compute model stacking weights based on the last 19 time points (with L = 80)
##' blist <- bmgarch_list( fit1, fit )
##' mw <- model_weights(blist, L = 80)
##'
##' # Weighted forecasts:
##' w.fc <- forecast(object = blist, ahead = 8, weights = mw)
##' }
forecast.bmgarch <- function(object, ahead = 1, xC = NULL,
newdata = NULL, CrI = c(.025, .975),
seed = NA, digits = 2, weights = NULL,
L = NA, method = 'stacking', inc_samples = FALSE, ...) {
## Are we dealing with one object or a list of objects
n_mods <- 1
if("bmgarch_list" %in% class(object)) {
n_mods <- length(object)
} else {
object <- bmgarch_list(object)
}
TS_names <- object[[1]]$TS_names
# Check for TS name consistency
TS_names_consistent <- all(sapply(object, function(x) {
all(x$TS_names == TS_names)
}))
if(!TS_names_consistent) {
# Could *possibly* rearrange the column orders to 'fix' this, but this is a much safer default.
# Could also check whether the training data are the same across models, to ensure the predictions make sense.
stop("Time series column names are not consistent across models. Forecasting halted.")
}
## n_mods <- .depth( object )
# Define a 0 array for stan.
if(is.null(xC)) {
xC <- array(0, dim = c(ahead, object[[1]]$nt))
}
if(is.null(newdata)) {
newdata <- array(0, dim = c(ahead, object[[1]]$nt))
compute_log_lik <- 0
} else {
compute_log_lik <- 1
}
## if user provides weights from the model_weigths function
## proceed directly to forecasting, else, run model_weights
## and extract model weights here
## Case 1: No model weights provided
## Case 2: model weights from a model_weight object
## Case 3: No model weights requested
if(n_mods > 1 & is.null( weights ) ) {
mw <- bmgarch::model_weights(bmgarch_objects = object, L = L)
weights <- mw$wts[]
} else if( n_mods > 1 & !is.null( weights ) ) {
weights <- weights$wts[]
} else if( n_mods == 1 ) {
weights <- 1
## object[[1]] <- object
}
object.f <- lapply(object, function(m) {
standat <- list(T = m$TS_length,
nt = m$nt,
rts = cbind(m$RTS_full),
xC = m$xC,
Q = m$mgarchQ,
P = m$mgarchP,
ahead = ahead,
meanstructure = m$meanstructure,
distribution = m$num_dist,
xC_p = xC,
future_rts = newdata,
compute_log_lik = compute_log_lik)
gqs_model <- switch(m$param,
DCC = stanmodels$forecastDCC,
CCC = stanmodels$forecastCCC,
BEKK =stanmodels$forecastBEKK,
pdBEKK = stanmodels$forecastBEKK,
NULL)
if(is.null(gqs_model)) {
stop("bmgarch object 'param' does not match a supported model. ",
m$param, "is not one in ", paste0(supported_models, collapse = ", "),
".")
}
backcast <- max(m$mgarchP, m$mgarchQ)
nt <- m$nt
cast_start <- (m$TS_length - backcast + 1)
forecast_start <- (m$TS_length + 1)
forecast_end <- (m$TS_length + ahead)
## TODO: Limit pars to only what is needed (H_p, R/R_p, rts_p, mu_p)
forecasted <- rstan::gqs(gqs_model,
draws = as.matrix(m$model_fit),
data = standat,
seed = seed)
return(forecasted)
})
## Init f.mean
f.mean <- .get_stan_summary(object.f, "rts_forecasted", CrI, weights)
## f.var
f.var <- .get_stan_summary(object.f, "H_forecasted", CrI, weights)
## Init f.cor
f.cor <- .get_stan_summary(object.f, "R_forecasted", CrI, weights)
# Restructure to array
## backcast <- max(object[[1]]$mgarchP, object[[1]]$mgarchQ)
nt <- object[[1]]$nt
## cast_start <- (object[[1]]$TS_length - backcast + 1)
forecast_start <- (object[[1]]$TS_length + 1)
forecast_end <- (object[[1]]$TS_length + ahead)
## f.mean
stan_sum_cols <- colnames(f.mean)
f.mean <- array(f.mean, dim = c(nt, ahead, ncol(f.mean)))
f.mean <- aperm(f.mean, c(2,3,1))
dimnames(f.mean) <- list(period = forecast_start:forecast_end, stan_sum_cols, TS = TS_names)
## f.var
### Pull out indices for [period, a, a]
f.var.indices <- grep("H_forecasted\\[[[:digit:]]+,([[:digit:]]+),\\1]", rownames(f.var), value = TRUE)
f.var <- f.var[f.var.indices,]
f.var <- array(f.var, dim = c(nt, ahead, ncol(f.var)))
f.var <- aperm(f.var, c(2, 3, 1))
dimnames(f.var) <- list(period = forecast_start:forecast_end, stan_sum_cols, TS = TS_names)
## f.cor
# Lower-triangular indices
f.cor.indices.L <- which(lower.tri(matrix(0, nt, nt)), arr.ind = TRUE)
# Labels mapping to TS names
f.cor.indices.L.labels <- paste0(TS_names[f.cor.indices.L[, 1]], "_",
TS_names[f.cor.indices.L[, 2]])
# Indices as "a,b"
f.cor.indices.L.char <- paste0(f.cor.indices.L[, 1], ",", f.cor.indices.L[,2])
# Indicices as "[period,a,b]"
f.cor.indices.L.all <- paste0("R_forecasted[",1:(ahead), ",",
rep(f.cor.indices.L.char, each = (ahead)),"]")
# Get only these elements.
f.cor <- f.cor[f.cor.indices.L.all, ,drop = FALSE]
f.cor <- array(f.cor, dim = c(ahead, length(f.cor.indices.L.char), ncol(f.cor ) ))
f.cor <- aperm(f.cor, c(1, 3, 2))
dimnames(f.cor) <- list(period = forecast_start:forecast_end, stan_sum_cols, TS = f.cor.indices.L.labels)
# Remove backcasts from forecasts.
## f.mean <- f.mean[-c(1:backcast), , , drop = FALSE]
## f.var <- f.var[-c(1:backcast), , , drop = FALSE]
## f.cor <- f.cor[-c(1:backcast), , , drop = FALSE]
out <- list()
out$forecast$mean <- f.mean
out$forecast$var <- f.var
out$forecast$cor <- f.cor
out$forecast$meta <- list(xC = xC, TS_length = ahead)
if(inc_samples) {
out$forecast$samples$mean <- .weighted_samples(object.f, "rts_forecasted", weights)$rts_forecasted[, ,, drop = FALSE]
out$forecast$samples$var <- .weighted_samples(object.f, "H_forecasted", weights)$H_forecasted[,, , , drop = FALSE]
out$forecast$samples$cor <- .weighted_samples(object.f, "R_forecasted", weights)$R_forecasted[,, , , drop = FALSE]
## out$forecast$samples$mean <- out$forecast$samples$mean[] # Todo, remove backcast
}
## Extract all log_lik simulations
if(compute_log_lik == 1 ) {
log_lik <- lapply(object.f, function(x) {
rstan::extract(x, pars = "log_lik")$log_lik
})
out$forecast$log_lik <- log_lik
}
metaNames <- c("param", "distribution", "num_dist", "nt", "TS_length", "TS_names", "RTS_full", "mgarchQ", "mgarchP", "xC", "meanstructure")
meta <- with(object[[1]], mget(metaNames))
meta_bmgarch_list <- lapply(object, function(x) {with(x, mget(metaNames))})
out$meta_list <- meta_bmgarch_list
out$meta <- meta
out$meta$n_mods <- n_mods
out$meta$digits <- digits
out$meta$CrI <- CrI
out$meta$weights <- weights
out$backcast <- fitted.bmgarch(object, CrI, digits = digits, weights = weights, inc_samples = inc_samples)$backcast
class(out) <- "forecast.bmgarch"
return(out)
}
##' Extracts the model-predicted means, variances, and correlations for the fitted data.
##'
##' Whereas \code{\link{forecast.bmgarch}} computes the \emph{forecasted} values for future time periods, \code{fitted.bmgarch} computes the \emph{backcasted} (model-predicted) values for the observed time periods.
##' @title Fitted (backcasting) method for bmgarch objects.
##' @param object bmgarch object.
##' @param CrI Numeric vector (Default: \code{c(.025, .975)}). Lower and upper bound of predictive credible interval.
##' @param digits Integer (Default: 2, optional). Number of digits to round to when printing.
##' @param weights Takes weights from model_weight function. Defaults to 1 -- this parameter is not typically set by user.
##' @param inc_samples Logical (Default: FALSE). Whether to return the MCMC samples for the fitted values.
##' @param ... Not used.
##' @return fitted.bmgarch object. List containing \code{meta}data and the \code{backcast}. Backcast is a list containing three elements:
##' \describe{
##' \item{mean}{\code{[N, 7, TS]} array of mean backcasts, where N is the timeseries length, and TS is the number of time series. E.g., \code{bc$backcast$mean[3,,"tsA"]} is the mean backcast for the third observation in time series "tsA".}
##' \item{var}{\code{[N, 7, TS]} array of variance backcasts, where N is the timeseries length, and TS is the number of time series. E.g., \code{bc$backcast$var[3,,"tsA"]} is the variance backcast for the third observation in time series "tsA".}
##' \item{cor}{\code{[N, 7, TS(TS - 1)/2]} array of correlation backcasts, where N is the timeseries length, and \code{TS(TS - 1)/2} is the number of correlations. E.g., \code{bc$backcast$cor[3,, "tsB_tsA"]} is the backcast for the correlation between "tsB" and "tsA" on the third observation. Lower triangular correlations are saved.}
##' \item{samples}{List}. If inc_samples is \code{TRUE}, then a list of arrays of MCMC samples for means, vars, and cors. Each array is [Iteration, Period, ..., ...].
##' }
##' @importFrom stats fitted
##' @export
##' @examples
##' \donttest{
##' data(panas)
##' # Fit CCC(1,1) and constant meanstructure.
##' fit <- bmgarch(panas, parameterization = "CCC", meanstructure = "constant")
##'
##' # Obtain fitted values
##' fit.bc <- fitted(fit)
##'
##' # Print fitted values
##' print(fit.bc)
##'
##' # Plot fitted values (plot.bmgarch calls fitted internally)
##' plot(fit, type = "var")
##'
##' # Save fitted values as data frame
##' fit.bc.df <- as.data.frame(fit.bc)
##' }
fitted.bmgarch <- function(object, CrI = c(.025, .975), digits = 2, weights = NULL, inc_samples = FALSE, ...) {
n_mods <- 1
if("bmgarch_list" %in% class(object)) {
n_mods <- length(object)
} else {
object <- bmgarch_list(object)
}
nt <- object[[1]]$nt
TS_length <- object[[1]]$TS_length
TS_names <- object[[1]]$TS_names
if(n_mods > 1 & is.null(weights)) {
stop("Weights must be provided.")
} else if(n_mods == 1) {
weights <- 1
}
fits <- lapply(object, function(m) {m$model_fit})
b.mean <- .get_stan_summary(fits, "mu", CrI, weights)
b.var <- .get_stan_summary(fits, "H", CrI, weights)
b.cor <- .get_stan_summary(fits, "corH", CrI, weights)
# Restructure
## b.mean
stan_sum_cols <- colnames(b.mean)
b.mean <- array(b.mean, dim = c(nt, TS_length, ncol(b.mean)))
b.mean <- aperm(b.mean, c(2,3,1))
dimnames(b.mean) <- list(period = 1:TS_length, stan_sum_cols, TS = TS_names)
## b.var
b.var.indices <- grep("H\\[[[:digit:]]+,([[:digit:]]+),\\1]", rownames(b.var), value = TRUE)
b.var <- b.var[b.var.indices,]
b.var <- array(b.var, dim = c(nt, TS_length, ncol(b.var)))
b.var <- aperm(b.var, c(2, 3, 1))
dimnames(b.var) <- list(period = 1:TS_length, stan_sum_cols, TS = TS_names)
## b.cor
# Lower-triangular indices
b.cor.indices.L <- which(lower.tri(matrix(0, nt, nt)), arr.ind = TRUE)
# Labels mapping to TS names
b.cor.indices.L.labels <- paste0(TS_names[b.cor.indices.L[,1]], "_", TS_names[b.cor.indices.L[,2]])
# Indices as "a,b"
b.cor.indices.L.char <- paste0(b.cor.indices.L[,1], ",", b.cor.indices.L[,2])
# Indicices as "[period,a,b]"
b.cor.indices.L.all <- paste0("corH[",1:TS_length, ",", rep(b.cor.indices.L.char, each = TS_length),"]")
# Get only these elements.
b.cor <- b.cor[b.cor.indices.L.all,]
b.cor <- array(b.cor, dim = c(TS_length, length(b.cor.indices.L.char), ncol(b.cor)))
b.cor <- aperm(b.cor, c(1, 3, 2))
dimnames(b.cor) <- list(period = 1:TS_length, stan_sum_cols, TS = b.cor.indices.L.labels)
out <- list()
out$backcast$mean <- b.mean
out$backcast$var <- b.var
out$backcast$cor <- b.cor
if(inc_samples) {
out$backcast$samples$mean <- .weighted_samples(fits, "mu", weights)$mu
out$backcast$samples$var <- .weighted_samples(fits, "H", weights)$H
out$backcast$samples$cor <- .weighted_samples(fits, "corH", weights)$corH
}
metaNames <- c("param", "distribution", "num_dist", "nt", "TS_length", "TS_names", "RTS_full", "mgarchQ", "mgarchP", "xC", "meanstructure")
meta <- with(object[[1]], mget(metaNames))
out$meta_list <- lapply(object, function(x) {with(x, mget(metaNames))})
out$meta <- meta
out$meta$digits <- digits
out$meta$n_mods <- n_mods
out$meta$CrI <- CrI
out$meta$weights <- weights
class(out) <- "fitted.bmgarch"
return(out)
}
##' @title Print method for forecast.bmgarch objects.
##' @param x forecast.bmgarch object. See \code{\link{forecast.bmgarch}}
##' @param ... Not used.
##' @return x (invisible).
##' @author Stephen R. Martin
##' @export
print.forecast.bmgarch <- function(x, ...) {
ahead <- x$forecast$meta$TS_length
nt <- x$meta$nt
TS_names <- x$meta$TS_names
digits <- x$meta$digits
if(x$meta$n_mods > 1) {
.sep()
cat("LFO-weighted forecasts across ", x$meta$n_mods, "models.")
.newline()
}
# Mean structure
meanstructure <- any(sapply(x$meta_list, function(x) {x$meanstructure == 1}))
## if(x$meta$meanstructure == 1 | x$meta$n_mod > 1) {
if(meanstructure) {
.sep()
cat("[Mean]", "Forecast for", ahead, "ahead:")
.newline(2)
for(t in 1:nt) {
cat(TS_names[t], ":")
.newline()
print(round(x$forecast$mean[,,t], digits))
}
}
# Variance
.sep()
cat("[Variance]", "Forecast for", ahead, "ahead:")
.newline(2)
for(t in 1:nt) {
cat(TS_names[t], ":")
.newline()
print(round(x$forecast$var[,,t], digits))
}
# Cors
condCor <- any(sapply(x$meta_list, function(x) {x$param != "CCC"}))
## if(x$meta$param != "CCC" | x$meta$n_mod > 1) {
if(condCor) {
cat("[Correlation]", "Forecast for", ahead, "ahead:")
.newline(2)
for(t in 1:(nt*(nt - 1) / 2)) {
cat(dimnames(x$forecast$cor)[[3]][t], ":")
.newline()
print(round(x$forecast$cor[,,t], digits))
}
}
}
##' @title Print method for fitted.bmgarch objects.
##' @param x fitted.bmgarch object.
##' @param ... Not used.
##' @return object (invisible).
##' @author Stephen R. Martin
##' @export
print.fitted.bmgarch <- function(x, ...) {
TS_length <- x$meta$TS_length
nt <- x$meta$nt
TS_names <- x$meta$TS_names
digits <- x$meta$digits
# Mean structure
if(x$meta$meanstructure == 1) {
.sep()
cat("[Mean]", "Fitted values:")
.newline(2)
for(t in 1:nt) {
cat(TS_names[t], ":")
.newline()
print(round(x$backcast$mean[,,t], digits))
}
}
# Variance
.sep()
cat("[Variance]", "Fitted values:")
.newline(2)
for(t in 1:nt) {
cat(TS_names[t], ":")
.newline()
print(round(x$backcast$var[,,t], digits))
}
# Cors
if(x$meta$param != "CCC") {
cat("[Correlation]", "Fitted values:")
.newline(2)
for(t in 1:(nt*(nt - 1) / 2)) {
cat(dimnames(x$backcast$cor)[[3]][t], ":")
.newline()
print(round(x$backcast$cor[,,t], digits))
}
}
object <- x
return(invisible(object))
}
##' Helper function for as.data.frame.{fitted, forecast}. Converts predictive array to data.frame.
##'
##'
##' @title Convert predictive array to data.frame.
##' @param arr Array to convert into data frame.
##' @param type String. "backcast" or "forecast".
##' @param param String. "var", "mean", or "cor".
##' @return data.frame. Columns: period, type (backcast, forecast), param (var, mean, cor), TS (which time series, or which correlation for param = cor), summary columns.
##' @author Stephen R. Martin
##' @keywords internal
.pred_array_to_df <- function(arr, type = "backcast", param = "var") {
dims <- dim(arr)
arrnames <- dimnames(arr)
dfList <- apply(arr, 3, function(x) {
out <- as.data.frame(x)
out$period <- as.numeric(rownames(x))
out
})
for(i in seq_len(length(dfList))) {
dfList[[i]]$TS <- arrnames[[3]][i]
}
df <- do.call(rbind, dfList)
df$type <- type
df$param <- param
rownames(df) <- NULL
return(df)
}
##' @title as.data.frame method for forecast.bmgarch objects.
##' @param x forecast.bmgarch object.
##' @param backcast Logical (Default: True). Whether to include "backcasted" values from \code{\link{fitted.bmgarch}} in data frame.
##' @param ... Not used.
##' @return Data frame.
##' @author Stephen R. Martin
##' @export
as.data.frame.forecast.bmgarch <- function(x, ..., backcast = TRUE ) {
# Forecast
dfList <- list()
dfList$forecast.mean <- .pred_array_to_df(x$forecast$mean, "forecast", "mean")
dfList$forecast.var <- .pred_array_to_df(x$forecast$var, "forecast", "var")
## if(x$meta$param != "CCC") {
condCor <- any(sapply(x$meta_list, function(x) {x$param != "CCC"}))
if(condCor) {
dfList$forecast.cor <- .pred_array_to_df(x$forecast$cor, "forecast", "cor")
}
if(backcast) {
# Backcast
dfList$backcast.mean <- .pred_array_to_df(x$backcast$mean, "backcast", "mean")
dfList$backcast.var <- .pred_array_to_df(x$backcast$var, "backcast", "var")
if(x$meta$param != "CCC") {
dfList$backcast.cor <- .pred_array_to_df(x$backcast$cor, "backcast", "cor")
}
}
# Combine
df <- do.call(rbind, dfList)
# Re-order columns: period TS | type | param
desc <- c("period","TS","type","param")
cn <- colnames(df)
cn_not_desc <- cn[!(cn %in% desc)]
df <- df[,c(desc, cn_not_desc)]
# Sort
df <- df[with(df, order(param, TS, period)),]
rownames(df) <- NULL
return(df)
}
##' @title as.data.frame method for fitted.bmgarch objects.
##' @param x fitted.bmgarch object.
##' @param ... Not used.
##' @return Data frame.
##' @author Stephen R. Martin
##' @export
as.data.frame.fitted.bmgarch <- function(x, ...) {
dfList <- list()
dfList$backcast.mean <- .pred_array_to_df(x$backcast$mean, "backcast", "mean")
dfList$backcast.var <- .pred_array_to_df(x$backcast$var, "backcast", "var")
if(x$meta$param != "CCC") {
dfList$backcast.cor <- .pred_array_to_df(x$backcast$cor, "backcast", "cor")
}
# Combine
df <- do.call(rbind, dfList)
# Re-order columns: period TS | type | param
desc <- c("period","TS","type","param")
cn <- colnames(df)
cn_not_desc <- cn[!(cn %in% desc)]
df <- df[,c(desc, cn_not_desc)]
# Sort
df <- df[with(df, order(param, TS, period)),]
rownames(df) <- NULL
return(df)
}
##' @title Collect bmgarch objects into list.
##' @param ... bmgarch objects.
##' @return List of bmgarch objects. Class: bmgarch_list and bmgarch.
##' @export
bmgarch_list <- function(...) {
out <- list(...)
class(out) <- c("bmgarch_list", "bmgarch")
return(out)
}
|
## ------------------------------------------------------------------------
library(sf)
library(raster)
library(fasterize)
library(agricolae)
library(tibble)
library(dplyr)
set.seed(12345)
f_coords <- cbind(c(0, 900), c(0, 600))
pol <- st_as_sfc(st_bbox(st_multipoint(f_coords)))
field <- st_sf(pol, crs = 32616)
rst <- raster(field, res = 1)
trial <- aggregate(rst, 3)
n_treats = 5
treat <- seq(1, n_treats)
n_blocks = round(ncell(trial)/(n_treats ** 2))
n_rows = nrow(trial)/n_treats
n_cols = ncol(trial)/n_treats
tidx = rep(1:n_treats, 5) + rep(1:n_treats, each = 5)
tidx = tidx %% n_treats + 1
tidx = matrix(tidx, ncol = n_treats)
new_idx = lapply(1:n_blocks, function(x) tidx[sample(treat), sample(treat)])
new_idx = unlist(new_idx)
dim(new_idx) = c(n_treats, n_treats, n_rows, n_cols)
new_idx = aperm(new_idx, c(1, 3, 2, 4))
dim(new_idx) = c(n_treats * n_rows, n_treats * n_cols)
trial$treat = new_idx
# trial$id <- 1:ncell(trial)
# trial$pcol = 1:ncol(trial)
# trial$prow = rep(1:nrow(trial), each = ncol(trial))
# trial$block = 1e6 * ceiling(trial$pcol/n_treats) + ceiling(trial$prow/n_treats)
# trial$block = as.numeric(as.factor(trial$block[]))
# trial$col = (trial$pcol - 1) %% n_treats + 1
# trial$row = (trial$prow - 1) %% n_treats + 1
#
#
# rdf <- as_tibble(cbind(row = trial$row[], col = trial$col[], block = trial$block[]))
# df <- as_tibble(design.lsd(treat)$book) %>% mutate_all(as.integer)
# row <- lapply(1:n_blocks, function(x) sample(treat)[df$row]) %>% unlist()
# col <- lapply(1:n_blocks, function(x) sample(treat)[df$col]) %>% unlist()
# block <- rep(1:n_blocks, each = n_treats ** 2)
# tdf <- as_tibble(cbind(row, col, block)) %>%
# left_join(df, by = c("row", "col")) %>%
# right_join(rdf, by = c("row", "col", "block"))
#
#
# trial$treat <- tdf$treat
# plot(trial$treat)
rst$treat <- disaggregate(trial$treat, 3)
plot(rst$treat)
writeRaster(rst, 'data/Trial_Design.tif', overwrite = TRUE, datatype = 'INT1U')
## ------------------------------------------------------------------------
trial_grd <- rasterToPoints(rst, spatial = TRUE)
gridded(trial_grd) <- TRUE
m <- gstat::vgm(psill = 1, model = "Gau",
range = 50,
nugget = 0.1)
g.dummy <- gstat::gstat(
formula = z ~ 1,
dummy = TRUE, beta = 0,
model = m, nmax = 10
)
rst_sim <- predict(g.dummy, trial_grd, nsim = 1)
rst$Yield_Ref <- raster::scale(raster::stack(rst_sim))
rst$Yield_Ref = 1e3 * rst$Yield_Ref
plot(rst$Yield_Ref)
m <- gstat::vgm(psill = 1, model = "Gau",
range = 100,
nugget = 0.1)
g.dummy <- gstat::gstat(
formula = z ~ 1,
dummy = TRUE, beta = 0,
model = m, nmax = 10
)
rst_sim <- predict(g.dummy, trial_grd, nsim = 1)
rst$Error = 2e2 * rnorm(ncell(rst_sim))
rst$Treat_Resp <- raster::scale(raster::stack(rst_sim))
rst$NR_gwr = 10 + 2.5 * rst$Treat_Resp
rst$NR2_gwr = -0.05
rst$NR_opt = -0.5 * rst$NR_gwr / rst$NR2_gwr
hist(rst$NR_opt[])
rst$TreatD <- rst$Treat - 100
rst$TreatD2 <- rst$TreatD**2
rst$NRD_opt = rst$TreatD - rst$NR_opt
hist(rst$NRD_opt[])
rst$Treat_Yield = rst$NR_gwr * rst$TreatD + rst$NR2_gwr * rst$TreatD
rst$Yield_Obs = 1e4 + rst$Yield_Ref + rst$Error + rst$Treat_Yield
lm0 = lm(Yield_Obs ~ poly(TreatD, 2, raw = TRUE), data.frame(rst[]))
summary(lm0)
rst$Yield_Res <- residuals(lm0)
trial_grd = rasterToPoints(rst, spatial = TRUE)
gridded(trial_grd) = TRUE
pts <- sp::coordinates(trial_grd)
dMat <- GWmodel::gw.dist(pts, pts)
gwr.fml <- as.formula(Yield_Obs ~ TreatD)
bwcv = 20
# bwcv <- GWmodel::bw.gwr(gwr.fml, trial_grd,
# approach = "AIC", dMat = dMat,
# kernel = "gaussian"
# )
gwr.model <- GWmodel::gwr.basic(gwr.fml, trial_grd,
bw = bwcv, dMat = dMat,
kernel = "gaussian"
)
print(gwr.model)
gwr_r <- gwr.model$SDF
gridded(gwr_r) = TRUE
gwr_rst = stack(gwr_r)
gwr_rst$Treat = gwr_rst$TreatD
plot(gwr_rst)
plot(gwr_rst$Treat[], rst$NR_gwr[], asp = 1)
cor(gwr_rst$Treat[], rst$NR_gwr[])
plot(rst$NR_gwr)
plot(gwr_rst$Treat)
plot(gwr_rst$Intercept)
plot(rst$Yield_Ref)
plot(gwr_rst$Intercept[], rst$Yield_Ref[], asp = 1)
cor(gwr_rst$Intercept[], rst$Yield_Ref[])
| /R/Trial_Design_v0.R | no_license | RodrigoAgronomia/ofpe_icpa | R | false | false | 4,212 | r | ## ------------------------------------------------------------------------
library(sf)
library(raster)
library(fasterize)
library(agricolae)
library(tibble)
library(dplyr)
set.seed(12345)
f_coords <- cbind(c(0, 900), c(0, 600))
pol <- st_as_sfc(st_bbox(st_multipoint(f_coords)))
field <- st_sf(pol, crs = 32616)
rst <- raster(field, res = 1)
trial <- aggregate(rst, 3)
n_treats = 5
treat <- seq(1, n_treats)
n_blocks = round(ncell(trial)/(n_treats ** 2))
n_rows = nrow(trial)/n_treats
n_cols = ncol(trial)/n_treats
tidx = rep(1:n_treats, 5) + rep(1:n_treats, each = 5)
tidx = tidx %% n_treats + 1
tidx = matrix(tidx, ncol = n_treats)
new_idx = lapply(1:n_blocks, function(x) tidx[sample(treat), sample(treat)])
new_idx = unlist(new_idx)
dim(new_idx) = c(n_treats, n_treats, n_rows, n_cols)
new_idx = aperm(new_idx, c(1, 3, 2, 4))
dim(new_idx) = c(n_treats * n_rows, n_treats * n_cols)
trial$treat = new_idx
# trial$id <- 1:ncell(trial)
# trial$pcol = 1:ncol(trial)
# trial$prow = rep(1:nrow(trial), each = ncol(trial))
# trial$block = 1e6 * ceiling(trial$pcol/n_treats) + ceiling(trial$prow/n_treats)
# trial$block = as.numeric(as.factor(trial$block[]))
# trial$col = (trial$pcol - 1) %% n_treats + 1
# trial$row = (trial$prow - 1) %% n_treats + 1
#
#
# rdf <- as_tibble(cbind(row = trial$row[], col = trial$col[], block = trial$block[]))
# df <- as_tibble(design.lsd(treat)$book) %>% mutate_all(as.integer)
# row <- lapply(1:n_blocks, function(x) sample(treat)[df$row]) %>% unlist()
# col <- lapply(1:n_blocks, function(x) sample(treat)[df$col]) %>% unlist()
# block <- rep(1:n_blocks, each = n_treats ** 2)
# tdf <- as_tibble(cbind(row, col, block)) %>%
# left_join(df, by = c("row", "col")) %>%
# right_join(rdf, by = c("row", "col", "block"))
#
#
# trial$treat <- tdf$treat
# plot(trial$treat)
rst$treat <- disaggregate(trial$treat, 3)
plot(rst$treat)
writeRaster(rst, 'data/Trial_Design.tif', overwrite = TRUE, datatype = 'INT1U')
## ------------------------------------------------------------------------
trial_grd <- rasterToPoints(rst, spatial = TRUE)
gridded(trial_grd) <- TRUE
m <- gstat::vgm(psill = 1, model = "Gau",
range = 50,
nugget = 0.1)
g.dummy <- gstat::gstat(
formula = z ~ 1,
dummy = TRUE, beta = 0,
model = m, nmax = 10
)
rst_sim <- predict(g.dummy, trial_grd, nsim = 1)
rst$Yield_Ref <- raster::scale(raster::stack(rst_sim))
rst$Yield_Ref = 1e3 * rst$Yield_Ref
plot(rst$Yield_Ref)
m <- gstat::vgm(psill = 1, model = "Gau",
range = 100,
nugget = 0.1)
g.dummy <- gstat::gstat(
formula = z ~ 1,
dummy = TRUE, beta = 0,
model = m, nmax = 10
)
rst_sim <- predict(g.dummy, trial_grd, nsim = 1)
rst$Error = 2e2 * rnorm(ncell(rst_sim))
rst$Treat_Resp <- raster::scale(raster::stack(rst_sim))
rst$NR_gwr = 10 + 2.5 * rst$Treat_Resp
rst$NR2_gwr = -0.05
rst$NR_opt = -0.5 * rst$NR_gwr / rst$NR2_gwr
hist(rst$NR_opt[])
rst$TreatD <- rst$Treat - 100
rst$TreatD2 <- rst$TreatD**2
rst$NRD_opt = rst$TreatD - rst$NR_opt
hist(rst$NRD_opt[])
rst$Treat_Yield = rst$NR_gwr * rst$TreatD + rst$NR2_gwr * rst$TreatD
rst$Yield_Obs = 1e4 + rst$Yield_Ref + rst$Error + rst$Treat_Yield
lm0 = lm(Yield_Obs ~ poly(TreatD, 2, raw = TRUE), data.frame(rst[]))
summary(lm0)
rst$Yield_Res <- residuals(lm0)
trial_grd = rasterToPoints(rst, spatial = TRUE)
gridded(trial_grd) = TRUE
pts <- sp::coordinates(trial_grd)
dMat <- GWmodel::gw.dist(pts, pts)
gwr.fml <- as.formula(Yield_Obs ~ TreatD)
bwcv = 20
# bwcv <- GWmodel::bw.gwr(gwr.fml, trial_grd,
# approach = "AIC", dMat = dMat,
# kernel = "gaussian"
# )
gwr.model <- GWmodel::gwr.basic(gwr.fml, trial_grd,
bw = bwcv, dMat = dMat,
kernel = "gaussian"
)
print(gwr.model)
gwr_r <- gwr.model$SDF
gridded(gwr_r) = TRUE
gwr_rst = stack(gwr_r)
gwr_rst$Treat = gwr_rst$TreatD
plot(gwr_rst)
plot(gwr_rst$Treat[], rst$NR_gwr[], asp = 1)
cor(gwr_rst$Treat[], rst$NR_gwr[])
plot(rst$NR_gwr)
plot(gwr_rst$Treat)
plot(gwr_rst$Intercept)
plot(rst$Yield_Ref)
plot(gwr_rst$Intercept[], rst$Yield_Ref[], asp = 1)
cor(gwr_rst$Intercept[], rst$Yield_Ref[])
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.clouddirectory_operations.R
\name{attach_object}
\alias{attach_object}
\title{Attaches an existing object to another object}
\usage{
attach_object(DirectoryArn, ParentReference, ChildReference, LinkName)
}
\arguments{
\item{DirectoryArn}{[required] Amazon Resource Name (ARN) that is associated with the Directory where both objects reside. For more information, see arns.}
\item{ParentReference}{[required] The parent object reference.}
\item{ChildReference}{[required] The child object reference to be attached to the object.}
\item{LinkName}{[required] The link name with which the child object is attached to the parent.}
}
\description{
Attaches an existing object to another object. An object can be accessed in two ways:
}
\details{
\enumerate{
\item Using the path
\item Using \code{ObjectIdentifier}
}
}
\section{Accepted Parameters}{
\preformatted{attach_object(
DirectoryArn = "string",
ParentReference = list(
Selector = "string"
),
ChildReference = list(
Selector = "string"
),
LinkName = "string"
)
}
}
| /service/paws.clouddirectory/man/attach_object.Rd | permissive | CR-Mercado/paws | R | false | true | 1,124 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.clouddirectory_operations.R
\name{attach_object}
\alias{attach_object}
\title{Attaches an existing object to another object}
\usage{
attach_object(DirectoryArn, ParentReference, ChildReference, LinkName)
}
\arguments{
\item{DirectoryArn}{[required] Amazon Resource Name (ARN) that is associated with the Directory where both objects reside. For more information, see arns.}
\item{ParentReference}{[required] The parent object reference.}
\item{ChildReference}{[required] The child object reference to be attached to the object.}
\item{LinkName}{[required] The link name with which the child object is attached to the parent.}
}
\description{
Attaches an existing object to another object. An object can be accessed in two ways:
}
\details{
\enumerate{
\item Using the path
\item Using \code{ObjectIdentifier}
}
}
\section{Accepted Parameters}{
\preformatted{attach_object(
DirectoryArn = "string",
ParentReference = list(
Selector = "string"
),
ChildReference = list(
Selector = "string"
),
LinkName = "string"
)
}
}
|
library(prefmod)
### Name: patt.design
### Title: Paired Comparison Patterns - Design Matrix Generation
### Aliases: patt.design
### Keywords: models regression
### ** Examples
# mini example with three Likert items and two subject covariates
dsgnmat <- patt.design(xmpl, nitems = 3, resptype = "rating",
ia = TRUE, cov.sel = "ALL")
head(dsgnmat)
# ILLUSTRATING THE ISSP2000 EXAMPLE
# simplified version of the analysis as given in Dittrich et. al (2007).
design <- patt.design(issp2000, nitems = 6, resptype = "rating",
cov.sel = c("SEX", "EDU"))
# - fit null multinomial model (basic model for items without subject
# covariates) through Poisson distribution.
# - SEX:EDU parameters are nuisance parameters
# - the last item (GENE) becomes a reference item in the model and is aliased;
# all other items are compared to this last item
# item parameters with undecided effects and no covariate effects.
summary(glm(y ~ SEX*EDU
+ CAR+IND+FARM+WATER+TEMP+GENE
+ u12+u13+u23+u14+u24+u34+u15+u25+u35+u45+u16+u26+u36+u46+u56,
data = design, family = poisson))
# now add main effect of SEX on items
summary(glm(y ~ SEX:EDU
+ CAR+IND+FARM+WATER+TEMP+GENE
+ (CAR+IND+FARM+WATER+TEMP+GENE):SEX
+ u12+u13+u23+u14+u24+u34+u15+u25+u35+u45+u16+u26+u36+u46+u56,
data = design, family = poisson))
| /data/genthat_extracted_code/prefmod/examples/patt.design.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,330 | r | library(prefmod)
### Name: patt.design
### Title: Paired Comparison Patterns - Design Matrix Generation
### Aliases: patt.design
### Keywords: models regression
### ** Examples
# mini example with three Likert items and two subject covariates
dsgnmat <- patt.design(xmpl, nitems = 3, resptype = "rating",
ia = TRUE, cov.sel = "ALL")
head(dsgnmat)
# ILLUSTRATING THE ISSP2000 EXAMPLE
# simplified version of the analysis as given in Dittrich et. al (2007).
design <- patt.design(issp2000, nitems = 6, resptype = "rating",
cov.sel = c("SEX", "EDU"))
# - fit null multinomial model (basic model for items without subject
# covariates) through Poisson distribution.
# - SEX:EDU parameters are nuisance parameters
# - the last item (GENE) becomes a reference item in the model and is aliased;
# all other items are compared to this last item
# item parameters with undecided effects and no covariate effects.
summary(glm(y ~ SEX*EDU
+ CAR+IND+FARM+WATER+TEMP+GENE
+ u12+u13+u23+u14+u24+u34+u15+u25+u35+u45+u16+u26+u36+u46+u56,
data = design, family = poisson))
# now add main effect of SEX on items
summary(glm(y ~ SEX:EDU
+ CAR+IND+FARM+WATER+TEMP+GENE
+ (CAR+IND+FARM+WATER+TEMP+GENE):SEX
+ u12+u13+u23+u14+u24+u34+u15+u25+u35+u45+u16+u26+u36+u46+u56,
data = design, family = poisson))
|
## extractor for $se, returns NULL if it's an error
## (will be error for s4 objects like 'mer'
getCustomSE <- function(x) {
se <- try(x$se)
if(is.null(se) || inherits(se, "try-error")) {
warning("Oops: Custom se display requested but none provided.")
return(NULL)
}
else
if(any(is.na(coef(x)))) {
warning("SE different size from model coefs; ",
"a model is probably singular. Dropping rows.")
}
return(se)
}
tValue <- function(est,se) {
if(length(est) != length(se)) {
warning("Length mismatch between coefs and $se; ",
"probably result of model singularity.",
call.=FALSE)
}
est[!is.na(est)] / se
}
fround <- function (x, digits) {
format (round (x, digits), nsmall=digits)
}
pfround <- function (x, digits) {
print (fround (x, digits), quote=FALSE)
}
"print.apsrtable" <- function(x,...) {
cat(paste(x))
}
### This borrowed from package 'arm'
as.matrix.VarCorr <- function (varc, useScale, digits){
## VarCorr function for lmer objects, altered as follows:
## 1. specify rounding
## 2. print statement at end is removed
## 3. reMat is returned
## 4. last line kept in reMat even when there's no error term
sc <- attr(varc, "sc")[[1]]
if(is.na(sc)) sc <- 1
recorr <- lapply(varc, function(el) attr(el, "correlation"))
reStdDev <- c(lapply(varc, function(el) attr(el, "stddev")),
list(Residual = sc))
reLens <- unlist(c(lapply(reStdDev, length)))
reMat <- array('', c(sum(reLens), 4),
list(rep('', sum(reLens)),
c("Groups", "Name", "Variance", "Std.Dev.")))
reMat[1+cumsum(reLens)-reLens, 1] <- names(reLens)
reMat[,2] <- c(unlist(lapply(reStdDev, names)), "")
reMat[,3] <- fround(unlist(reStdDev)^2, digits)
reMat[,4] <- fround(unlist(reStdDev), digits)
if (any(reLens > 1)) {
maxlen <- max(reLens)
corr <-
do.call("rbind",
lapply(recorr,
function(x, maxlen) {
x <- as(x, "matrix")
cc <- fround (x, digits)
cc[!lower.tri(cc)] <- ""
nr <- dim(cc)[1]
if (nr >= maxlen) return(cc)
cbind(cc, matrix("", nr, maxlen-nr))
}, maxlen))
colnames(corr) <- c("Corr", rep("", maxlen - 1))
reMat <- cbind(reMat, rbind(corr, rep("", ncol(corr))))
}
if (useScale<0) reMat[nrow(reMat),] <-
c ("No residual sd", rep("",ncol(reMat)-1))
return (reMat)
}
## Given a list of model summaries (or anything with a coef method),
## and a master (unioned) list of coef names,
## Append an attribute to each element containing its
## coefs' position in the master coefficient list
"coefPosition" <- function(model.summaries, coefnames) {
model.summaries <- lapply(model.summaries, function(x) {
pos <- match(rownames(coef(x)), coefnames)
attr(x,"var.pos") <- pos
return(x)
})
return(model.summaries)
}
"coef.model.info" <- function(object,...) {
x <- as.matrix(unlist(object)); invisible(x)
}
## RULES: All according to longest model,
## then left to right
## RESULT: union of all models' coefficient names in requested order.
orderCoef <- function(model.summaries,order="lr") {
nmodels <- length(model.summaries)
mlength <- sapply(model.summaries, function(x) NROW(coef(x)) )
longest <- which.max(mlength) # longest model
if(order=="rl") {
modelorder <- nmodels:1 } else {
modelorder <- 1:nmodels }
if(order=="longest") {
coefnames <- rownames(coef(model.summaries[[longest]]))
} else {
coefnames <- rownames(coef(model.summaries[[modelorder[1]]])) }
for(i in seq_along(model.summaries)) {
matched <- match(rownames(coef(model.summaries[[i]])), coefnames, nomatch=0)
unmatched <- which(is.na(matched) | matched==0)
coefnames <- c(coefnames,
rownames(coef(model.summaries[[i]]))[unmatched]
)
}
return(coefnames)
}
| /pkg/R/Internal.R | no_license | malecki/apsrtable | R | false | false | 4,222 | r |
## extractor for $se, returns NULL if it's an error
## (will be error for s4 objects like 'mer'
getCustomSE <- function(x) {
se <- try(x$se)
if(is.null(se) || inherits(se, "try-error")) {
warning("Oops: Custom se display requested but none provided.")
return(NULL)
}
else
if(any(is.na(coef(x)))) {
warning("SE different size from model coefs; ",
"a model is probably singular. Dropping rows.")
}
return(se)
}
tValue <- function(est,se) {
if(length(est) != length(se)) {
warning("Length mismatch between coefs and $se; ",
"probably result of model singularity.",
call.=FALSE)
}
est[!is.na(est)] / se
}
fround <- function (x, digits) {
format (round (x, digits), nsmall=digits)
}
pfround <- function (x, digits) {
print (fround (x, digits), quote=FALSE)
}
"print.apsrtable" <- function(x,...) {
cat(paste(x))
}
### This borrowed from package 'arm'
as.matrix.VarCorr <- function (varc, useScale, digits){
## VarCorr function for lmer objects, altered as follows:
## 1. specify rounding
## 2. print statement at end is removed
## 3. reMat is returned
## 4. last line kept in reMat even when there's no error term
sc <- attr(varc, "sc")[[1]]
if(is.na(sc)) sc <- 1
recorr <- lapply(varc, function(el) attr(el, "correlation"))
reStdDev <- c(lapply(varc, function(el) attr(el, "stddev")),
list(Residual = sc))
reLens <- unlist(c(lapply(reStdDev, length)))
reMat <- array('', c(sum(reLens), 4),
list(rep('', sum(reLens)),
c("Groups", "Name", "Variance", "Std.Dev.")))
reMat[1+cumsum(reLens)-reLens, 1] <- names(reLens)
reMat[,2] <- c(unlist(lapply(reStdDev, names)), "")
reMat[,3] <- fround(unlist(reStdDev)^2, digits)
reMat[,4] <- fround(unlist(reStdDev), digits)
if (any(reLens > 1)) {
maxlen <- max(reLens)
corr <-
do.call("rbind",
lapply(recorr,
function(x, maxlen) {
x <- as(x, "matrix")
cc <- fround (x, digits)
cc[!lower.tri(cc)] <- ""
nr <- dim(cc)[1]
if (nr >= maxlen) return(cc)
cbind(cc, matrix("", nr, maxlen-nr))
}, maxlen))
colnames(corr) <- c("Corr", rep("", maxlen - 1))
reMat <- cbind(reMat, rbind(corr, rep("", ncol(corr))))
}
if (useScale<0) reMat[nrow(reMat),] <-
c ("No residual sd", rep("",ncol(reMat)-1))
return (reMat)
}
## Given a list of model summaries (or anything with a coef method),
## and a master (unioned) list of coef names,
## Append an attribute to each element containing its
## coefs' position in the master coefficient list
"coefPosition" <- function(model.summaries, coefnames) {
model.summaries <- lapply(model.summaries, function(x) {
pos <- match(rownames(coef(x)), coefnames)
attr(x,"var.pos") <- pos
return(x)
})
return(model.summaries)
}
"coef.model.info" <- function(object,...) {
x <- as.matrix(unlist(object)); invisible(x)
}
## RULES: All according to longest model,
## then left to right
## RESULT: union of all models' coefficient names in requested order.
orderCoef <- function(model.summaries,order="lr") {
nmodels <- length(model.summaries)
mlength <- sapply(model.summaries, function(x) NROW(coef(x)) )
longest <- which.max(mlength) # longest model
if(order=="rl") {
modelorder <- nmodels:1 } else {
modelorder <- 1:nmodels }
if(order=="longest") {
coefnames <- rownames(coef(model.summaries[[longest]]))
} else {
coefnames <- rownames(coef(model.summaries[[modelorder[1]]])) }
for(i in seq_along(model.summaries)) {
matched <- match(rownames(coef(model.summaries[[i]])), coefnames, nomatch=0)
unmatched <- which(is.na(matched) | matched==0)
coefnames <- c(coefnames,
rownames(coef(model.summaries[[i]]))[unmatched]
)
}
return(coefnames)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.R
\name{read_alitv}
\alias{read_alitv}
\title{Read AliTV .json file}
\usage{
read_alitv(file)
}
\arguments{
\item{file}{path to json}
}
\value{
list with seqs, genes, and links
}
\description{
this file contains sequences, links and (optionally) genes
}
\examples{
ali <- read_alitv("https://alitvteam.github.io/AliTV/d3/data/chloroplasts.json")
gggenomes(ali$seqs, ali$genes, links=ali$links) +
geom_seq() +
geom_bin_label() +
geom_gene(aes(fill=class)) +
geom_link()
p <- gggenomes(ali$seqs, ali$genes, links=ali$links) +
geom_seq() +
geom_bin_label() +
geom_gene(aes(color=class)) +
geom_link(aes(fill=identity)) +
scale_fill_distiller(palette="RdYlGn", direction = 1)
p \%>\% flip_seq("Same_gi") \%>\% pick(1,3,2,4,5,6,7,8)
}
| /man/read_alitv.Rd | permissive | quanrd/gggenomes | R | false | true | 831 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.R
\name{read_alitv}
\alias{read_alitv}
\title{Read AliTV .json file}
\usage{
read_alitv(file)
}
\arguments{
\item{file}{path to json}
}
\value{
list with seqs, genes, and links
}
\description{
this file contains sequences, links and (optionally) genes
}
\examples{
ali <- read_alitv("https://alitvteam.github.io/AliTV/d3/data/chloroplasts.json")
gggenomes(ali$seqs, ali$genes, links=ali$links) +
geom_seq() +
geom_bin_label() +
geom_gene(aes(fill=class)) +
geom_link()
p <- gggenomes(ali$seqs, ali$genes, links=ali$links) +
geom_seq() +
geom_bin_label() +
geom_gene(aes(color=class)) +
geom_link(aes(fill=identity)) +
scale_fill_distiller(palette="RdYlGn", direction = 1)
p \%>\% flip_seq("Same_gi") \%>\% pick(1,3,2,4,5,6,7,8)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{plotPCAPlotly}
\alias{plotPCAPlotly}
\title{Plot PCA}
\usage{
plotPCAPlotly(df.input, condition.color.vec,
condition.color.name = "condition", condition.shape.vec,
condition.shape.name = "condition", columnTitle = "Title", pc.a = "PC1",
pc.b = "PC2")
}
\arguments{
\item{df.input}{Input data object that contains the data to be plotted. Required}
\item{condition.color.vec}{color vector. Required}
\item{condition.color.name}{color variable name. Required}
\item{condition.shape.vec}{shape vector. Required}
\item{condition.shape.name}{shape variable name. Required}
\item{columnTitle}{Title to be displayed at top of heatmap.}
}
\description{
Plot PCA
}
\examples{
plotPCAPlotly(df.input, condition.vec, condition.name = "condition",
columnTitle = "Title", pc.a = "PC1", pc.b = "PC2")
}
| /man/plotPCAPlotly.Rd | no_license | tseanlu/PathoStat | R | false | true | 900 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{plotPCAPlotly}
\alias{plotPCAPlotly}
\title{Plot PCA}
\usage{
plotPCAPlotly(df.input, condition.color.vec,
condition.color.name = "condition", condition.shape.vec,
condition.shape.name = "condition", columnTitle = "Title", pc.a = "PC1",
pc.b = "PC2")
}
\arguments{
\item{df.input}{Input data object that contains the data to be plotted. Required}
\item{condition.color.vec}{color vector. Required}
\item{condition.color.name}{color variable name. Required}
\item{condition.shape.vec}{shape vector. Required}
\item{condition.shape.name}{shape variable name. Required}
\item{columnTitle}{Title to be displayed at top of heatmap.}
}
\description{
Plot PCA
}
\examples{
plotPCAPlotly(df.input, condition.vec, condition.name = "condition",
columnTitle = "Title", pc.a = "PC1", pc.b = "PC2")
}
|
###################################################################################################################
##re-analysis
###3 groups together RF
a2=read.delim("otu_table_new3.csv", header=T, sep=",")
data4=a2[,c(1,24:861)]
library(randomForest);library(pROC)
library(dplyr);library(AUCRF)
data4$trt=factor(data4$trt)
levels(data4$trt)=c(1:length(levels(data4$trt))-1)
set.seed(1)
rf_testset_trt4 <- AUCRF(trt~., data=data4, ntree=10000, pdel=0.05, ranking="MDA")
aucrf_test_trt4 <- AUCRFcv(rf_testset_trt4, nCV=10, M=20)
test_held_trt4 <- predict(aucrf_test_trt4$RFopt, type='prob')[,2]
trt_roc4 <- roc(data4$trt ~ test_held_trt4)
pdf(file='AUC-3goups.pdf', width=4, height=3)
layout(matrix(c(1,
1),
nrow=1, byrow = TRUE))
par(mar=c(4,4,1,1))
plot(c(1,0),c(0,1), type='l', lty=3, xlim=c(1.01,0), ylim=c(-0.01,1.01), xaxs='i', yaxs='i', ylab='', xlab='')
plot(trt_roc4, col='blue', add=T, lty=1,print.thres.col="blue",print.thres=T)
mtext(side=2, text="Sensitivity", line=2.5)
mtext(side=1, text="Specificity", line=2.5)
legend('bottomright', legend=c(
sprintf('MRO-MRC-MCA, AUC = 1.00')
),lty=1, lwd = 2, cex=0.7, col=c( 'blue'), bty='n')
dev.off()
pdf(file='AUC-3goups-33.pdf', width=4, height=3)
layout(matrix(c(1,
1),
nrow=1, byrow = TRUE))
par(mar=c(4,4,1,1))
##my code=above function
p1=max(aucrf_test_trt4$AUCcurve$AUC);p2=(aucrf_test_trt4$AUCcurve$AUC)
r=p2-p1;ylim=c(max(0,p2-r),min(1,p1+r))
plot(aucrf_test_trt4$AUCcurve$AUC~aucrf_test_trt4$AUCcurve$k,type='o',col='4',pch=20,ylim=c(0,1.1),
ylab='OOB-AUC',xlab='Number of selected variables')
text(aucrf_test_trt4$Kopt,aucrf_test_trt4$"OOB-AUCopt","|", pos=3,col='4')
text(aucrf_test_trt4$Kopt,aucrf_test_trt4$"OOB-AUCopt"+1.8*(ylim[1]-ylim[2])/10, paste("OOB-AUCopt = ",round(aucrf_test_trt4$"OOB-AUCopt",digits=3)," (Kopt = ",aucrf_test_trt4$Kopt,")",sep=""),
pos=4,col='4', cex=0.7, offset=0)
text(aucrf_test_trt4$Kopt,aucrf_test_trt4$"OOB-AUCopt"+2.8*(ylim[1]-ylim[2])/10, paste("cvAUC = ",round(aucrf_test_trt4$cvAUC,digits=3),sep=""), col='4',pos=4, cex=0.7, offset=0)
dev.off()
plot(aucrf_test_trt4,which=c("ranking"),maxvars=25)
imp <- sort(aucrf_test_trt4$ranking,decreasing=T)[1:25]
imp <- sort(aucrf_test_trt4$ranking,decreasing=T)[26:50]
specific=subset(data4,(colnames(data4) %in% names(imp)))
Oname <- array(1:25,c(25,4))
for(i in 1:25){
Oname[i,2] <- which(colnames(data4)==names(imp[i]))
Oname[i,3] <- names(imp[i])
Oname[i,4] <- imp[i]
}
d1 <-data4[,c(1,as.integer(Oname[,2]))]
levels(d1$trt)=c('MCA','MRC','MRO')
rn <- colnames(d1)
rn= as.character(rn)
for(i in 1:26){
ww <- strsplit(rn, "[.]")[[i]]
if(length(ww)==7) rn[i] <- paste(ww[7],sep="_")
if(length(ww)==6) rn[i] <- paste(ww[6],sep="_")
if(length(ww)==5 )rn[i] <- paste(ww[5],sep="_")
if(length(ww)==4) rn[i] <- paste(ww[4],sep="_")
if(length(ww)==3) rn[i] <- paste(ww[3],sep="_")
if(length(ww)==2) rn[i] <- paste(ww[2])
if(length(ww)==1) rn[i] <- paste(ww[1])
}
rn1=rn;rn1= as.character(rn1)
for(i in 1:26){
ww <- strsplit(rn, "[__]")[[i]]
if(length(ww)==7) rn[i] <- paste(ww[7],sep="_")
if(length(ww)==6) rn[i] <- paste(ww[6],sep="_")
if(length(ww)==5 )rn[i] <- paste(ww[2],ww[3],ww[4],ww[5],sep="_")
if(length(ww)==4) rn[i] <- paste(ww[4],sep="_")
if(length(ww)==3) rn[i] <- paste(ww[3],sep="_")
if(length(ww)==2) rn[i] <- paste(ww[2])
if(length(ww)==1) rn[i] <- paste(ww[1])
}
#rn1[10]='Subdivision5_genera_incertae_sedis'
colnames(d1)=rn1
d1$trt=factor(d1$trt, levels=c('MRO','MRC','MCA'))
pdf("boxpot-trt-AE-others-auc22223.pdf",width=8, height=10.5, paper="letter")
par(mfrow=c(5,3));par(mar=c(2,2,2,2))
for(i in 2:100){
boxplot(d1[,i]~d1$trt,main=rn[i],col=c('4','red'))
}
dev.off()
###ggplot generate mutiple figures with pvalue
library(reshape2)
library(ggplot2);library(magrittr)
library(ggpubr)
d <- melt(d1, id.var=c("trt"))
ggplot(d,aes(x=factor(trt),y=value))+geom_boxplot(aes(fill=trt))+ labs(x="", y="")+
ylab("")+theme(axis.title.y = element_text(size = rel(1.2)),plot.title = element_text(hjust = .5))+border(color = "black", size = 0.8)+
facet_wrap(. ~ variable,scale="free",nrow=5)+ guides(fill=FALSE)+
stat_compare_means(label = "p.format",label.y.npc='top')+ theme_bw()
############
#change color manual
ggplot(d,aes(x=factor(trt),y=value))+geom_boxplot(aes(fill=trt))+ labs(x="", y="")+
ylab("")+theme(axis.title.y = element_text(size = rel(1.2)),plot.title = element_text(hjust = .5))+border(color = "black", size = 0.8)+
facet_wrap(. ~ variable,scale="free",nrow=5)+ guides(fill=FALSE)+
stat_compare_means(label = "p.format",label.y.npc='top')+ theme_bw()+scale_colour_manual(values=c("chartreuse4",'orangered', "dodgerblue4"), aesthetics = "fill")
#compare_means(shannon~trt, data=d1)
#my_comparisons <- list(c("others", "AECOPD"))
#comparisons=my_comparisons,
# Add pairwise comparisons p-value from Wilcoxon test
#######################################
##multiple figures into multiple pages
gg1=ggplot(d,aes(x=factor(trt),y=value))+geom_boxplot(aes(fill=trt))+ labs(x="", y="")+
ylab("")+theme(axis.title.y = element_text(size = rel(1.2)),plot.title = element_text(hjust = .5))+border(color = "black", size = 0.8)+
#facet_wrap(. ~ variable,scale="free",nrow=5)+
guides(fill=FALSE)+
stat_compare_means(label = "p.format",label.y.npc='top')+ theme_bw()
devtools::install_github("guiastrennec/ggplus")
library(ggplus)
pdf("need10.pdf")
gg10 <- facet_multiple(plot=gg1, facets="variable", ncol = 2, nrow = 5)
dev.off()
write.csv(imp,'imp.csv')
imp <- sort(aucrf_test_trt4$ranking,decreasing=T)[1:1398]
imp <- sort(aucrf_test_trt4$ranking,decreasing=T)[1:838]
#########################################################################################
##AUCRF-pair-wise
##MRO vs MRC
a2=read.delim("otu_table_new3.csv", header=T, sep=",")
a=subset(a2,trt=='MRO'|trt=='MRC')
data=a[,c(1,24:861)]
data$trt=droplevels(data$trt)
##> levels(data$trt)
#"MRC" "MRO"
library(randomForest);library(pROC)
library(dplyr);library(AUCRF)
data$trt=factor(data$trt)
levels(data$trt)=c(1:length(levels(data$trt))-1)
set.seed(1)
rf_testset_trt <- AUCRF(trt~., data=data, ntree=10000, pdel=0.05, ranking="MDA")
aucrf_test_trt <- AUCRFcv(rf_testset_trt, nCV=10, M=20)
test_held_trt <- predict(aucrf_test_trt$RFopt, type='prob')[,2]
trt_roc <- roc(data$trt ~ test_held_trt)
pdf(file='AUC-MRO-MRC.pdf', width=4, height=3)
layout(matrix(c(1,
1),
nrow=1, byrow = TRUE))
par(mar=c(4,4,1,1))
plot(c(1,0),c(0,1), type='l', lty=3, xlim=c(1.01,0), ylim=c(-0.01,1.01), xaxs='i', yaxs='i', ylab='', xlab='')
plot(trt_roc, col='blue', add=T, lty=1,print.thres.col="blue",print.thres=T)
mtext(side=2, text="Sensitivity", line=2.5)
mtext(side=1, text="Specificity", line=2.5)
legend('bottomright', legend=c(
sprintf('MRO-MRC, AUC = 1.00')
),lty=1, lwd = 2, cex=0.7, col=c( 'blue'), bty='n')
dev.off()
pdf(file='AUC-MRO-MRC-33.pdf', width=4, height=3)
layout(matrix(c(1,
1),
nrow=1, byrow = TRUE))
par(mar=c(4,4,1,1))
##my code=above function
p1=max(aucrf_test_trt$AUCcurve$AUC);p2=(aucrf_test_trt$AUCcurve$AUC)
r=p2-p1;ylim=c(max(0,p2-r),min(1,p1+r))
plot(aucrf_test_trt$AUCcurve$AUC~aucrf_test_trt$AUCcurve$k,type='o',col='4',pch=20,ylim=c(0,1.1),
ylab='OOB-AUC',xlab='Number of selected variables')
text(aucrf_test_trt$Kopt,aucrf_test_trt$"OOB-AUCopt","|", pos=3,col='4')
text(aucrf_test_trt$Kopt,aucrf_test_trt$"OOB-AUCopt"+1.8*(ylim[1]-ylim[2])/10, paste("OOB-AUCopt = ",round(aucrf_test_trt$"OOB-AUCopt",digits=3)," (Kopt = ",aucrf_test_trt$Kopt,")",sep=""),
pos=4,col='4', cex=0.7, offset=0)
text(aucrf_test_trt$Kopt,aucrf_test_trt$"OOB-AUCopt"+2.8*(ylim[1]-ylim[2])/10, paste("cvAUC = ",round(aucrf_test_trt$cvAUC,digits=3),sep=""), col='4',pos=4, cex=0.7, offset=0)
dev.off()
plot(aucrf_test_trt,which=c("ranking"),maxvars=25)
imp <- sort(aucrf_test_trt$ranking,decreasing=T)[1:25]
specific=subset(data,(colnames(data) %in% names(imp)))
Oname <- array(1:25,c(25,4))
for(i in 1:25){
Oname[i,2] <- which(colnames(data)==names(imp[i]))
Oname[i,3] <- names(imp[i])
Oname[i,4] <- imp[i]
}
d1 <-data[,c(1,as.integer(Oname[,2]))]
levels(d1$trt)=c('MRC','MRO')
d1$trt=factor(d1$trt, levels=c('MRO','MRC'))
library(reshape2)
library(ggplot2);library(magrittr)
library(ggpubr)
d <- melt(d1, id.var=c("trt"))
ggplot(d,aes(x=factor(trt),y=value))+geom_boxplot(aes(fill=trt))+ labs(x="", y="")+
ylab("")+theme(axis.title.y = element_text(size = rel(1.2)),plot.title = element_text(hjust = .5))+border(color = "black", size = 0.8)+
facet_wrap(. ~ variable,scale="free",nrow=5)+ guides(fill=FALSE)+
stat_compare_means(label = "p.format",label.y.npc='top')+ theme_bw()+scale_colour_manual(values=c("chartreuse4",'orangered'), aesthetics = "fill")
##MRO vs MCA
a2=read.delim("otu_table_new3.csv", header=T, sep=",")
a=subset(a2,trt=='MRO'|trt=='MCA')
data=a[,c(1,24:861)]
data$trt=droplevels(data$trt)
##> levels(data$trt)
#"MCA" "MRO"
library(randomForest);library(pROC)
library(dplyr);library(AUCRF)
data$trt=factor(data$trt)
levels(data$trt)=c(1:length(levels(data$trt))-1)
set.seed(1)
rf_testset_trt <- AUCRF(trt~., data=data, ntree=10000, pdel=0.05, ranking="MDA")
aucrf_test_trt <- AUCRFcv(rf_testset_trt, nCV=10, M=20)
test_held_trt <- predict(aucrf_test_trt$RFopt, type='prob')[,2]
trt_roc <- roc(data$trt ~ test_held_trt)
pdf(file='AUC-MRO-MCA.pdf', width=4, height=3)
layout(matrix(c(1,
1),
nrow=1, byrow = TRUE))
par(mar=c(4,4,1,1))
plot(c(1,0),c(0,1), type='l', lty=3, xlim=c(1.01,0), ylim=c(-0.01,1.01), xaxs='i', yaxs='i', ylab='', xlab='')
plot(trt_roc, col='blue', add=T, lty=1,print.thres.col="blue",print.thres=T)
mtext(side=2, text="Sensitivity", line=2.5)
mtext(side=1, text="Specificity", line=2.5)
legend('bottomright', legend=c(
sprintf('MRO-MCA, AUC = 1.00')
),lty=1, lwd = 2, cex=0.7, col=c( 'blue'), bty='n')
dev.off()
pdf(file='AUC-MRO-MCA-33.pdf', width=4, height=3)
layout(matrix(c(1,
1),
nrow=1, byrow = TRUE))
par(mar=c(4,4,1,1))
##my code=above function
p1=max(aucrf_test_trt$AUCcurve$AUC);p2=(aucrf_test_trt$AUCcurve$AUC)
r=p2-p1;ylim=c(max(0,p2-r),min(1,p1+r))
plot(aucrf_test_trt$AUCcurve$AUC~aucrf_test_trt$AUCcurve$k,type='o',col='4',pch=20,ylim=c(0,1.1),
ylab='OOB-AUC',xlab='Number of selected variables')
text(aucrf_test_trt$Kopt,aucrf_test_trt$"OOB-AUCopt","|", pos=3,col='4')
text(aucrf_test_trt$Kopt,aucrf_test_trt$"OOB-AUCopt"+1.8*(ylim[1]-ylim[2])/10, paste("OOB-AUCopt = ",round(aucrf_test_trt$"OOB-AUCopt",digits=3)," (Kopt = ",aucrf_test_trt$Kopt,")",sep=""),
pos=4,col='4', cex=0.7, offset=0)
text(aucrf_test_trt$Kopt,aucrf_test_trt$"OOB-AUCopt"+2.8*(ylim[1]-ylim[2])/10, paste("cvAUC = ",round(aucrf_test_trt$cvAUC,digits=3),sep=""), col='4',pos=4, cex=0.7, offset=0)
dev.off()
plot(aucrf_test_trt,which=c("ranking"),maxvars=25)
imp <- sort(aucrf_test_trt$ranking,decreasing=T)[1:25]
specific=subset(data,(colnames(data) %in% names(imp)))
Oname <- array(1:25,c(25,4))
for(i in 1:25){
Oname[i,2] <- which(colnames(data)==names(imp[i]))
Oname[i,3] <- names(imp[i])
Oname[i,4] <- imp[i]
}
d1 <-data[,c(1,as.integer(Oname[,2]))]
levels(d1$trt)=c('MCA','MRO')
d1$trt=factor(d1$trt, levels=c('MRO','MCA'))
library(reshape2)
library(ggplot2);library(magrittr)
library(ggpubr)
d <- melt(d1, id.var=c("trt"))
ggplot(d,aes(x=factor(trt),y=value))+geom_boxplot(aes(fill=trt))+ labs(x="", y="")+
ylab("")+theme(axis.title.y = element_text(size = rel(1.2)),plot.title = element_text(hjust = .5))+border(color = "black", size = 0.8)+
facet_wrap(. ~ variable,scale="free",nrow=5)+ guides(fill=FALSE)+
stat_compare_means(label = "p.format",label.y.npc='top')+ theme_bw()+scale_colour_manual(values=c("chartreuse4",'dodgerblue4'), aesthetics = "fill")
##MRC vs MCA
a2=read.delim("otu_table_new3.csv", header=T, sep=",")
a=subset(a2,trt=='MRC'|trt=='MCA')
data=a[,c(1,24:861)]
data$trt=droplevels(data$trt)
##> levels(data$trt)
#"MCA" "MRC"
library(randomForest);library(pROC)
library(dplyr);library(AUCRF)
data$trt=factor(data$trt)
levels(data$trt)=c(1:length(levels(data$trt))-1)
set.seed(1)
rf_testset_trt <- AUCRF(trt~., data=data, ntree=10000, pdel=0.05, ranking="MDA")
aucrf_test_trt <- AUCRFcv(rf_testset_trt, nCV=10, M=20)
test_held_trt <- predict(aucrf_test_trt$RFopt, type='prob')[,2]
trt_roc <- roc(data$trt ~ test_held_trt)
pdf(file='AUC-MRC-MCA.pdf', width=4, height=3)
layout(matrix(c(1,
1),
nrow=1, byrow = TRUE))
par(mar=c(4,4,1,1))
plot(c(1,0),c(0,1), type='l', lty=3, xlim=c(1.01,0), ylim=c(-0.01,1.01), xaxs='i', yaxs='i', ylab='', xlab='')
plot(trt_roc, col='blue', add=T, lty=1,print.thres.col="blue",print.thres=T)
mtext(side=2, text="Sensitivity", line=2.5)
mtext(side=1, text="Specificity", line=2.5)
legend('bottomright', legend=c(
sprintf('MRC-MCA, AUC = 1.00')
),lty=1, lwd = 2, cex=0.7, col=c( 'blue'), bty='n')
dev.off()
pdf(file='KOPT-MRC-MCA.pdf', width=4, height=3)
layout(matrix(c(1,
1),
nrow=1, byrow = TRUE))
par(mar=c(4,4,1,1))
##my code=above function
p1=max(aucrf_test_trt$AUCcurve$AUC);p2=(aucrf_test_trt$AUCcurve$AUC)
r=p2-p1;ylim=c(max(0,p2-r),min(1,p1+r))
plot(aucrf_test_trt$AUCcurve$AUC~aucrf_test_trt$AUCcurve$k,type='o',col='4',pch=20,ylim=c(0,1.1),
ylab='OOB-AUC',xlab='Number of selected variables')
text(aucrf_test_trt$Kopt,aucrf_test_trt$"OOB-AUCopt","|", pos=3,col='4')
text(aucrf_test_trt$Kopt,aucrf_test_trt$"OOB-AUCopt"+1.8*(ylim[1]-ylim[2])/10, paste("OOB-AUCopt = ",round(aucrf_test_trt$"OOB-AUCopt",digits=3)," (Kopt = ",aucrf_test_trt$Kopt,")",sep=""),
pos=4,col='4', cex=0.7, offset=0)
text(aucrf_test_trt$Kopt,aucrf_test_trt$"OOB-AUCopt"+2.8*(ylim[1]-ylim[2])/10, paste("cvAUC = ",round(aucrf_test_trt$cvAUC,digits=3),sep=""), col='4',pos=4, cex=0.7, offset=0)
dev.off()
plot(aucrf_test_trt,which=c("ranking"),maxvars=25)
imp <- sort(aucrf_test_trt$ranking,decreasing=T)[1:25]
specific=subset(data,(colnames(data) %in% names(imp)))
Oname <- array(1:25,c(25,4))
for(i in 1:25){
Oname[i,2] <- which(colnames(data)==names(imp[i]))
Oname[i,3] <- names(imp[i])
Oname[i,4] <- imp[i]
}
d1 <-data[,c(1,as.integer(Oname[,2]))]
levels(d1$trt)=c('MCA','MRC')
d1$trt=factor(d1$trt, levels=c('MRC','MCA'))
library(reshape2)
library(ggplot2);library(magrittr)
library(ggpubr)
d <- melt(d1, id.var=c("trt"))
ggplot(d,aes(x=factor(trt),y=value))+geom_boxplot(aes(fill=trt))+ labs(x="", y="")+
ylab("")+theme(axis.title.y = element_text(size = rel(1.2)),plot.title = element_text(hjust = .5))+border(color = "black", size = 0.8)+
facet_wrap(. ~ variable,scale="free",nrow=5)+ guides(fill=FALSE)+
stat_compare_means(label = "p.format",label.y.npc='top')+ theme_bw()+scale_colour_manual(values=c("orangered",'dodgerblue4'), aesthetics = "fill") | /code/fig 4 R code for random forest.R | no_license | chaichai9521/goat-rumen-mcirobiome-analysis | R | false | false | 15,335 | r | ###################################################################################################################
##re-analysis
###3 groups together RF
a2=read.delim("otu_table_new3.csv", header=T, sep=",")
data4=a2[,c(1,24:861)]
library(randomForest);library(pROC)
library(dplyr);library(AUCRF)
data4$trt=factor(data4$trt)
levels(data4$trt)=c(1:length(levels(data4$trt))-1)
set.seed(1)
rf_testset_trt4 <- AUCRF(trt~., data=data4, ntree=10000, pdel=0.05, ranking="MDA")
aucrf_test_trt4 <- AUCRFcv(rf_testset_trt4, nCV=10, M=20)
test_held_trt4 <- predict(aucrf_test_trt4$RFopt, type='prob')[,2]
trt_roc4 <- roc(data4$trt ~ test_held_trt4)
pdf(file='AUC-3goups.pdf', width=4, height=3)
layout(matrix(c(1,
1),
nrow=1, byrow = TRUE))
par(mar=c(4,4,1,1))
plot(c(1,0),c(0,1), type='l', lty=3, xlim=c(1.01,0), ylim=c(-0.01,1.01), xaxs='i', yaxs='i', ylab='', xlab='')
plot(trt_roc4, col='blue', add=T, lty=1,print.thres.col="blue",print.thres=T)
mtext(side=2, text="Sensitivity", line=2.5)
mtext(side=1, text="Specificity", line=2.5)
legend('bottomright', legend=c(
sprintf('MRO-MRC-MCA, AUC = 1.00')
),lty=1, lwd = 2, cex=0.7, col=c( 'blue'), bty='n')
dev.off()
pdf(file='AUC-3goups-33.pdf', width=4, height=3)
layout(matrix(c(1,
1),
nrow=1, byrow = TRUE))
par(mar=c(4,4,1,1))
##my code=above function
p1=max(aucrf_test_trt4$AUCcurve$AUC);p2=(aucrf_test_trt4$AUCcurve$AUC)
r=p2-p1;ylim=c(max(0,p2-r),min(1,p1+r))
plot(aucrf_test_trt4$AUCcurve$AUC~aucrf_test_trt4$AUCcurve$k,type='o',col='4',pch=20,ylim=c(0,1.1),
ylab='OOB-AUC',xlab='Number of selected variables')
text(aucrf_test_trt4$Kopt,aucrf_test_trt4$"OOB-AUCopt","|", pos=3,col='4')
text(aucrf_test_trt4$Kopt,aucrf_test_trt4$"OOB-AUCopt"+1.8*(ylim[1]-ylim[2])/10, paste("OOB-AUCopt = ",round(aucrf_test_trt4$"OOB-AUCopt",digits=3)," (Kopt = ",aucrf_test_trt4$Kopt,")",sep=""),
pos=4,col='4', cex=0.7, offset=0)
text(aucrf_test_trt4$Kopt,aucrf_test_trt4$"OOB-AUCopt"+2.8*(ylim[1]-ylim[2])/10, paste("cvAUC = ",round(aucrf_test_trt4$cvAUC,digits=3),sep=""), col='4',pos=4, cex=0.7, offset=0)
dev.off()
plot(aucrf_test_trt4,which=c("ranking"),maxvars=25)
imp <- sort(aucrf_test_trt4$ranking,decreasing=T)[1:25]
imp <- sort(aucrf_test_trt4$ranking,decreasing=T)[26:50]
specific=subset(data4,(colnames(data4) %in% names(imp)))
Oname <- array(1:25,c(25,4))
for(i in 1:25){
Oname[i,2] <- which(colnames(data4)==names(imp[i]))
Oname[i,3] <- names(imp[i])
Oname[i,4] <- imp[i]
}
d1 <-data4[,c(1,as.integer(Oname[,2]))]
levels(d1$trt)=c('MCA','MRC','MRO')
rn <- colnames(d1)
rn= as.character(rn)
for(i in 1:26){
ww <- strsplit(rn, "[.]")[[i]]
if(length(ww)==7) rn[i] <- paste(ww[7],sep="_")
if(length(ww)==6) rn[i] <- paste(ww[6],sep="_")
if(length(ww)==5 )rn[i] <- paste(ww[5],sep="_")
if(length(ww)==4) rn[i] <- paste(ww[4],sep="_")
if(length(ww)==3) rn[i] <- paste(ww[3],sep="_")
if(length(ww)==2) rn[i] <- paste(ww[2])
if(length(ww)==1) rn[i] <- paste(ww[1])
}
rn1=rn;rn1= as.character(rn1)
for(i in 1:26){
ww <- strsplit(rn, "[__]")[[i]]
if(length(ww)==7) rn[i] <- paste(ww[7],sep="_")
if(length(ww)==6) rn[i] <- paste(ww[6],sep="_")
if(length(ww)==5 )rn[i] <- paste(ww[2],ww[3],ww[4],ww[5],sep="_")
if(length(ww)==4) rn[i] <- paste(ww[4],sep="_")
if(length(ww)==3) rn[i] <- paste(ww[3],sep="_")
if(length(ww)==2) rn[i] <- paste(ww[2])
if(length(ww)==1) rn[i] <- paste(ww[1])
}
#rn1[10]='Subdivision5_genera_incertae_sedis'
colnames(d1)=rn1
d1$trt=factor(d1$trt, levels=c('MRO','MRC','MCA'))
pdf("boxpot-trt-AE-others-auc22223.pdf",width=8, height=10.5, paper="letter")
par(mfrow=c(5,3));par(mar=c(2,2,2,2))
for(i in 2:100){
boxplot(d1[,i]~d1$trt,main=rn[i],col=c('4','red'))
}
dev.off()
###ggplot generate mutiple figures with pvalue
library(reshape2)
library(ggplot2);library(magrittr)
library(ggpubr)
d <- melt(d1, id.var=c("trt"))
ggplot(d,aes(x=factor(trt),y=value))+geom_boxplot(aes(fill=trt))+ labs(x="", y="")+
ylab("")+theme(axis.title.y = element_text(size = rel(1.2)),plot.title = element_text(hjust = .5))+border(color = "black", size = 0.8)+
facet_wrap(. ~ variable,scale="free",nrow=5)+ guides(fill=FALSE)+
stat_compare_means(label = "p.format",label.y.npc='top')+ theme_bw()
############
#change color manual
ggplot(d,aes(x=factor(trt),y=value))+geom_boxplot(aes(fill=trt))+ labs(x="", y="")+
ylab("")+theme(axis.title.y = element_text(size = rel(1.2)),plot.title = element_text(hjust = .5))+border(color = "black", size = 0.8)+
facet_wrap(. ~ variable,scale="free",nrow=5)+ guides(fill=FALSE)+
stat_compare_means(label = "p.format",label.y.npc='top')+ theme_bw()+scale_colour_manual(values=c("chartreuse4",'orangered', "dodgerblue4"), aesthetics = "fill")
#compare_means(shannon~trt, data=d1)
#my_comparisons <- list(c("others", "AECOPD"))
#comparisons=my_comparisons,
# Add pairwise comparisons p-value from Wilcoxon test
#######################################
##multiple figures into multiple pages
gg1=ggplot(d,aes(x=factor(trt),y=value))+geom_boxplot(aes(fill=trt))+ labs(x="", y="")+
ylab("")+theme(axis.title.y = element_text(size = rel(1.2)),plot.title = element_text(hjust = .5))+border(color = "black", size = 0.8)+
#facet_wrap(. ~ variable,scale="free",nrow=5)+
guides(fill=FALSE)+
stat_compare_means(label = "p.format",label.y.npc='top')+ theme_bw()
devtools::install_github("guiastrennec/ggplus")
library(ggplus)
pdf("need10.pdf")
gg10 <- facet_multiple(plot=gg1, facets="variable", ncol = 2, nrow = 5)
dev.off()
write.csv(imp,'imp.csv')
imp <- sort(aucrf_test_trt4$ranking,decreasing=T)[1:1398]
imp <- sort(aucrf_test_trt4$ranking,decreasing=T)[1:838]
#########################################################################################
##AUCRF-pair-wise
##MRO vs MRC
a2=read.delim("otu_table_new3.csv", header=T, sep=",")
a=subset(a2,trt=='MRO'|trt=='MRC')
data=a[,c(1,24:861)]
data$trt=droplevels(data$trt)
##> levels(data$trt)
#"MRC" "MRO"
library(randomForest);library(pROC)
library(dplyr);library(AUCRF)
data$trt=factor(data$trt)
levels(data$trt)=c(1:length(levels(data$trt))-1)
set.seed(1)
rf_testset_trt <- AUCRF(trt~., data=data, ntree=10000, pdel=0.05, ranking="MDA")
aucrf_test_trt <- AUCRFcv(rf_testset_trt, nCV=10, M=20)
test_held_trt <- predict(aucrf_test_trt$RFopt, type='prob')[,2]
trt_roc <- roc(data$trt ~ test_held_trt)
pdf(file='AUC-MRO-MRC.pdf', width=4, height=3)
layout(matrix(c(1,
1),
nrow=1, byrow = TRUE))
par(mar=c(4,4,1,1))
plot(c(1,0),c(0,1), type='l', lty=3, xlim=c(1.01,0), ylim=c(-0.01,1.01), xaxs='i', yaxs='i', ylab='', xlab='')
plot(trt_roc, col='blue', add=T, lty=1,print.thres.col="blue",print.thres=T)
mtext(side=2, text="Sensitivity", line=2.5)
mtext(side=1, text="Specificity", line=2.5)
legend('bottomright', legend=c(
sprintf('MRO-MRC, AUC = 1.00')
),lty=1, lwd = 2, cex=0.7, col=c( 'blue'), bty='n')
dev.off()
pdf(file='AUC-MRO-MRC-33.pdf', width=4, height=3)
layout(matrix(c(1,
1),
nrow=1, byrow = TRUE))
par(mar=c(4,4,1,1))
##my code=above function
p1=max(aucrf_test_trt$AUCcurve$AUC);p2=(aucrf_test_trt$AUCcurve$AUC)
r=p2-p1;ylim=c(max(0,p2-r),min(1,p1+r))
plot(aucrf_test_trt$AUCcurve$AUC~aucrf_test_trt$AUCcurve$k,type='o',col='4',pch=20,ylim=c(0,1.1),
ylab='OOB-AUC',xlab='Number of selected variables')
text(aucrf_test_trt$Kopt,aucrf_test_trt$"OOB-AUCopt","|", pos=3,col='4')
text(aucrf_test_trt$Kopt,aucrf_test_trt$"OOB-AUCopt"+1.8*(ylim[1]-ylim[2])/10, paste("OOB-AUCopt = ",round(aucrf_test_trt$"OOB-AUCopt",digits=3)," (Kopt = ",aucrf_test_trt$Kopt,")",sep=""),
pos=4,col='4', cex=0.7, offset=0)
text(aucrf_test_trt$Kopt,aucrf_test_trt$"OOB-AUCopt"+2.8*(ylim[1]-ylim[2])/10, paste("cvAUC = ",round(aucrf_test_trt$cvAUC,digits=3),sep=""), col='4',pos=4, cex=0.7, offset=0)
dev.off()
plot(aucrf_test_trt,which=c("ranking"),maxvars=25)
imp <- sort(aucrf_test_trt$ranking,decreasing=T)[1:25]
specific=subset(data,(colnames(data) %in% names(imp)))
Oname <- array(1:25,c(25,4))
for(i in 1:25){
Oname[i,2] <- which(colnames(data)==names(imp[i]))
Oname[i,3] <- names(imp[i])
Oname[i,4] <- imp[i]
}
d1 <-data[,c(1,as.integer(Oname[,2]))]
levels(d1$trt)=c('MRC','MRO')
d1$trt=factor(d1$trt, levels=c('MRO','MRC'))
library(reshape2)
library(ggplot2);library(magrittr)
library(ggpubr)
d <- melt(d1, id.var=c("trt"))
ggplot(d,aes(x=factor(trt),y=value))+geom_boxplot(aes(fill=trt))+ labs(x="", y="")+
ylab("")+theme(axis.title.y = element_text(size = rel(1.2)),plot.title = element_text(hjust = .5))+border(color = "black", size = 0.8)+
facet_wrap(. ~ variable,scale="free",nrow=5)+ guides(fill=FALSE)+
stat_compare_means(label = "p.format",label.y.npc='top')+ theme_bw()+scale_colour_manual(values=c("chartreuse4",'orangered'), aesthetics = "fill")
##MRO vs MCA
a2=read.delim("otu_table_new3.csv", header=T, sep=",")
a=subset(a2,trt=='MRO'|trt=='MCA')
data=a[,c(1,24:861)]
data$trt=droplevels(data$trt)
##> levels(data$trt)
#"MCA" "MRO"
library(randomForest);library(pROC)
library(dplyr);library(AUCRF)
data$trt=factor(data$trt)
levels(data$trt)=c(1:length(levels(data$trt))-1)
set.seed(1)
rf_testset_trt <- AUCRF(trt~., data=data, ntree=10000, pdel=0.05, ranking="MDA")
aucrf_test_trt <- AUCRFcv(rf_testset_trt, nCV=10, M=20)
test_held_trt <- predict(aucrf_test_trt$RFopt, type='prob')[,2]
trt_roc <- roc(data$trt ~ test_held_trt)
pdf(file='AUC-MRO-MCA.pdf', width=4, height=3)
layout(matrix(c(1,
1),
nrow=1, byrow = TRUE))
par(mar=c(4,4,1,1))
plot(c(1,0),c(0,1), type='l', lty=3, xlim=c(1.01,0), ylim=c(-0.01,1.01), xaxs='i', yaxs='i', ylab='', xlab='')
plot(trt_roc, col='blue', add=T, lty=1,print.thres.col="blue",print.thres=T)
mtext(side=2, text="Sensitivity", line=2.5)
mtext(side=1, text="Specificity", line=2.5)
legend('bottomright', legend=c(
sprintf('MRO-MCA, AUC = 1.00')
),lty=1, lwd = 2, cex=0.7, col=c( 'blue'), bty='n')
dev.off()
pdf(file='AUC-MRO-MCA-33.pdf', width=4, height=3)
layout(matrix(c(1,
1),
nrow=1, byrow = TRUE))
par(mar=c(4,4,1,1))
##my code=above function
p1=max(aucrf_test_trt$AUCcurve$AUC);p2=(aucrf_test_trt$AUCcurve$AUC)
r=p2-p1;ylim=c(max(0,p2-r),min(1,p1+r))
plot(aucrf_test_trt$AUCcurve$AUC~aucrf_test_trt$AUCcurve$k,type='o',col='4',pch=20,ylim=c(0,1.1),
ylab='OOB-AUC',xlab='Number of selected variables')
text(aucrf_test_trt$Kopt,aucrf_test_trt$"OOB-AUCopt","|", pos=3,col='4')
text(aucrf_test_trt$Kopt,aucrf_test_trt$"OOB-AUCopt"+1.8*(ylim[1]-ylim[2])/10, paste("OOB-AUCopt = ",round(aucrf_test_trt$"OOB-AUCopt",digits=3)," (Kopt = ",aucrf_test_trt$Kopt,")",sep=""),
pos=4,col='4', cex=0.7, offset=0)
text(aucrf_test_trt$Kopt,aucrf_test_trt$"OOB-AUCopt"+2.8*(ylim[1]-ylim[2])/10, paste("cvAUC = ",round(aucrf_test_trt$cvAUC,digits=3),sep=""), col='4',pos=4, cex=0.7, offset=0)
dev.off()
plot(aucrf_test_trt,which=c("ranking"),maxvars=25)
imp <- sort(aucrf_test_trt$ranking,decreasing=T)[1:25]
specific=subset(data,(colnames(data) %in% names(imp)))
Oname <- array(1:25,c(25,4))
for(i in 1:25){
Oname[i,2] <- which(colnames(data)==names(imp[i]))
Oname[i,3] <- names(imp[i])
Oname[i,4] <- imp[i]
}
d1 <-data[,c(1,as.integer(Oname[,2]))]
levels(d1$trt)=c('MCA','MRO')
d1$trt=factor(d1$trt, levels=c('MRO','MCA'))
library(reshape2)
library(ggplot2);library(magrittr)
library(ggpubr)
d <- melt(d1, id.var=c("trt"))
ggplot(d,aes(x=factor(trt),y=value))+geom_boxplot(aes(fill=trt))+ labs(x="", y="")+
ylab("")+theme(axis.title.y = element_text(size = rel(1.2)),plot.title = element_text(hjust = .5))+border(color = "black", size = 0.8)+
facet_wrap(. ~ variable,scale="free",nrow=5)+ guides(fill=FALSE)+
stat_compare_means(label = "p.format",label.y.npc='top')+ theme_bw()+scale_colour_manual(values=c("chartreuse4",'dodgerblue4'), aesthetics = "fill")
##MRC vs MCA
a2=read.delim("otu_table_new3.csv", header=T, sep=",")
a=subset(a2,trt=='MRC'|trt=='MCA')
data=a[,c(1,24:861)]
data$trt=droplevels(data$trt)
##> levels(data$trt)
#"MCA" "MRC"
library(randomForest);library(pROC)
library(dplyr);library(AUCRF)
data$trt=factor(data$trt)
levels(data$trt)=c(1:length(levels(data$trt))-1)
set.seed(1)
rf_testset_trt <- AUCRF(trt~., data=data, ntree=10000, pdel=0.05, ranking="MDA")
aucrf_test_trt <- AUCRFcv(rf_testset_trt, nCV=10, M=20)
test_held_trt <- predict(aucrf_test_trt$RFopt, type='prob')[,2]
trt_roc <- roc(data$trt ~ test_held_trt)
pdf(file='AUC-MRC-MCA.pdf', width=4, height=3)
layout(matrix(c(1,
1),
nrow=1, byrow = TRUE))
par(mar=c(4,4,1,1))
plot(c(1,0),c(0,1), type='l', lty=3, xlim=c(1.01,0), ylim=c(-0.01,1.01), xaxs='i', yaxs='i', ylab='', xlab='')
plot(trt_roc, col='blue', add=T, lty=1,print.thres.col="blue",print.thres=T)
mtext(side=2, text="Sensitivity", line=2.5)
mtext(side=1, text="Specificity", line=2.5)
legend('bottomright', legend=c(
sprintf('MRC-MCA, AUC = 1.00')
),lty=1, lwd = 2, cex=0.7, col=c( 'blue'), bty='n')
dev.off()
pdf(file='KOPT-MRC-MCA.pdf', width=4, height=3)
layout(matrix(c(1,
1),
nrow=1, byrow = TRUE))
par(mar=c(4,4,1,1))
##my code=above function
p1=max(aucrf_test_trt$AUCcurve$AUC);p2=(aucrf_test_trt$AUCcurve$AUC)
r=p2-p1;ylim=c(max(0,p2-r),min(1,p1+r))
plot(aucrf_test_trt$AUCcurve$AUC~aucrf_test_trt$AUCcurve$k,type='o',col='4',pch=20,ylim=c(0,1.1),
ylab='OOB-AUC',xlab='Number of selected variables')
text(aucrf_test_trt$Kopt,aucrf_test_trt$"OOB-AUCopt","|", pos=3,col='4')
text(aucrf_test_trt$Kopt,aucrf_test_trt$"OOB-AUCopt"+1.8*(ylim[1]-ylim[2])/10, paste("OOB-AUCopt = ",round(aucrf_test_trt$"OOB-AUCopt",digits=3)," (Kopt = ",aucrf_test_trt$Kopt,")",sep=""),
pos=4,col='4', cex=0.7, offset=0)
text(aucrf_test_trt$Kopt,aucrf_test_trt$"OOB-AUCopt"+2.8*(ylim[1]-ylim[2])/10, paste("cvAUC = ",round(aucrf_test_trt$cvAUC,digits=3),sep=""), col='4',pos=4, cex=0.7, offset=0)
dev.off()
plot(aucrf_test_trt,which=c("ranking"),maxvars=25)
imp <- sort(aucrf_test_trt$ranking,decreasing=T)[1:25]
specific=subset(data,(colnames(data) %in% names(imp)))
Oname <- array(1:25,c(25,4))
for(i in 1:25){
Oname[i,2] <- which(colnames(data)==names(imp[i]))
Oname[i,3] <- names(imp[i])
Oname[i,4] <- imp[i]
}
d1 <-data[,c(1,as.integer(Oname[,2]))]
levels(d1$trt)=c('MCA','MRC')
d1$trt=factor(d1$trt, levels=c('MRC','MCA'))
library(reshape2)
library(ggplot2);library(magrittr)
library(ggpubr)
d <- melt(d1, id.var=c("trt"))
ggplot(d,aes(x=factor(trt),y=value))+geom_boxplot(aes(fill=trt))+ labs(x="", y="")+
ylab("")+theme(axis.title.y = element_text(size = rel(1.2)),plot.title = element_text(hjust = .5))+border(color = "black", size = 0.8)+
facet_wrap(. ~ variable,scale="free",nrow=5)+ guides(fill=FALSE)+
stat_compare_means(label = "p.format",label.y.npc='top')+ theme_bw()+scale_colour_manual(values=c("orangered",'dodgerblue4'), aesthetics = "fill") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coo-utilities.R
\name{coo_interpolate}
\alias{coo_interpolate}
\title{Interpolates coordinates}
\usage{
coo_interpolate(coo, n)
}
\arguments{
\item{coo}{\code{matrix} of \code{(x; y)} coordinates or any \link{Coo} object.}
\item{n}{code{integer}, the number fo points to interpolate.}
}
\value{
a \code{matrix} of (x; y) coordinates, or a \link{Coo} object.
}
\description{
Interpolates n coordinates 'among existing points'between' existing points,
along the perimeter of the coordinates provided and keeping the first point
}
\examples{
b5 <- bot \%>\% slice(1:5) # for speed sake
stack(b5)
stack(coo_scale(b5))
stack(b5)
stack(coo_interpolate(coo_sample(b5, 12), 120))
coo_plot(bot[1])
coo_plot(coo_interpolate(coo_sample(bot[1], 12), 120))
}
\seealso{
Other sampling functions: \code{\link{coo_extract}},
\code{\link{coo_sample_prop}},
\code{\link{coo_samplerr}}, \code{\link{coo_sample}}
Other coo_ utilities: \code{\link{coo_aligncalliper}},
\code{\link{coo_alignminradius}},
\code{\link{coo_alignxax}}, \code{\link{coo_align}},
\code{\link{coo_baseline}}, \code{\link{coo_bookstein}},
\code{\link{coo_boundingbox}},
\code{\link{coo_calliper}}, \code{\link{coo_centdist}},
\code{\link{coo_center}}, \code{\link{coo_centpos}},
\code{\link{coo_close}}, \code{\link{coo_down}},
\code{\link{coo_dxy}}, \code{\link{coo_extract}},
\code{\link{coo_flipx}}, \code{\link{coo_force2close}},
\code{\link{coo_is_closed}}, \code{\link{coo_jitter}},
\code{\link{coo_left}},
\code{\link{coo_likely_clockwise}}, \code{\link{coo_nb}},
\code{\link{coo_perim}}, \code{\link{coo_range}},
\code{\link{coo_rev}}, \code{\link{coo_right}},
\code{\link{coo_rotatecenter}}, \code{\link{coo_rotate}},
\code{\link{coo_sample_prop}},
\code{\link{coo_samplerr}}, \code{\link{coo_sample}},
\code{\link{coo_scale}}, \code{\link{coo_shearx}},
\code{\link{coo_slice}},
\code{\link{coo_slidedirection}},
\code{\link{coo_slidegap}}, \code{\link{coo_slide}},
\code{\link{coo_smoothcurve}}, \code{\link{coo_smooth}},
\code{\link{coo_template}}, \code{\link{coo_trans}},
\code{\link{coo_trimbottom}}, \code{\link{coo_trimtop}},
\code{\link{coo_trim}}, \code{\link{coo_untiltx}},
\code{\link{coo_up}}, \code{\link{is_equallyspacedradii}}
}
| /man/coo_interpolate.Rd | no_license | chris-english/Momocs | R | false | true | 2,342 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coo-utilities.R
\name{coo_interpolate}
\alias{coo_interpolate}
\title{Interpolates coordinates}
\usage{
coo_interpolate(coo, n)
}
\arguments{
\item{coo}{\code{matrix} of \code{(x; y)} coordinates or any \link{Coo} object.}
\item{n}{code{integer}, the number fo points to interpolate.}
}
\value{
a \code{matrix} of (x; y) coordinates, or a \link{Coo} object.
}
\description{
Interpolates n coordinates 'among existing points'between' existing points,
along the perimeter of the coordinates provided and keeping the first point
}
\examples{
b5 <- bot \%>\% slice(1:5) # for speed sake
stack(b5)
stack(coo_scale(b5))
stack(b5)
stack(coo_interpolate(coo_sample(b5, 12), 120))
coo_plot(bot[1])
coo_plot(coo_interpolate(coo_sample(bot[1], 12), 120))
}
\seealso{
Other sampling functions: \code{\link{coo_extract}},
\code{\link{coo_sample_prop}},
\code{\link{coo_samplerr}}, \code{\link{coo_sample}}
Other coo_ utilities: \code{\link{coo_aligncalliper}},
\code{\link{coo_alignminradius}},
\code{\link{coo_alignxax}}, \code{\link{coo_align}},
\code{\link{coo_baseline}}, \code{\link{coo_bookstein}},
\code{\link{coo_boundingbox}},
\code{\link{coo_calliper}}, \code{\link{coo_centdist}},
\code{\link{coo_center}}, \code{\link{coo_centpos}},
\code{\link{coo_close}}, \code{\link{coo_down}},
\code{\link{coo_dxy}}, \code{\link{coo_extract}},
\code{\link{coo_flipx}}, \code{\link{coo_force2close}},
\code{\link{coo_is_closed}}, \code{\link{coo_jitter}},
\code{\link{coo_left}},
\code{\link{coo_likely_clockwise}}, \code{\link{coo_nb}},
\code{\link{coo_perim}}, \code{\link{coo_range}},
\code{\link{coo_rev}}, \code{\link{coo_right}},
\code{\link{coo_rotatecenter}}, \code{\link{coo_rotate}},
\code{\link{coo_sample_prop}},
\code{\link{coo_samplerr}}, \code{\link{coo_sample}},
\code{\link{coo_scale}}, \code{\link{coo_shearx}},
\code{\link{coo_slice}},
\code{\link{coo_slidedirection}},
\code{\link{coo_slidegap}}, \code{\link{coo_slide}},
\code{\link{coo_smoothcurve}}, \code{\link{coo_smooth}},
\code{\link{coo_template}}, \code{\link{coo_trans}},
\code{\link{coo_trimbottom}}, \code{\link{coo_trimtop}},
\code{\link{coo_trim}}, \code{\link{coo_untiltx}},
\code{\link{coo_up}}, \code{\link{is_equallyspacedradii}}
}
|
\name{dict}
\alias{dict}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Create a named vector, imitating python dict
}
\description{
}
\usage{
# k %!% v
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{k}{
A vector giving the keys (will be coerced into character)
}
\item{v}{
A vector of values
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
A named vector of values
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
JFP
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
# c("a", "b") %!% c(1, 2)
}
\keyword{dict}
\keyword{key}
| /man/dict.Rd | permissive | HikaGenji/fql | R | false | false | 784 | rd | \name{dict}
\alias{dict}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Create a named vector, imitating python dict
}
\description{
}
\usage{
# k %!% v
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{k}{
A vector giving the keys (will be coerced into character)
}
\item{v}{
A vector of values
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
A named vector of values
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
JFP
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
# c("a", "b") %!% c(1, 2)
}
\keyword{dict}
\keyword{key}
|
rm(list=ls())
library(ggplot2)
library(reshape2)
library(plyr)
library(stringr)
library(grid)
# loading multiplot function
source('../multiplot.R')
wd = '/Users/pascaltimshel/git/snpsnap/analysis/validation_summary_stats_inputEQmatched'
setwd(wd)
path.base = '/Users/pascaltimshel/git/snpsnap/analysis/validation_summary_stats_inputEQmatched'
path.compare = file.path(path.base, 'compare') # path.compare is a subdir in the path.base
## Read into a list of files:
path.datafiles <- path.compare
pat = "SNPsnap_rand100_defaultMatchCrit_n(.*)_excludeInputHLA_compare\\.csv$"
files <- list.files(path = path.datafiles, pattern = pat, full.names = TRUE) #full path
params <- str_match(files, 'SNPsnap_rand100_defaultMatchCrit_n(.*)_excludeInputHLA_compare.csv')[,2] # perl does not work. SAVING REQUIRED NUMBER OF SNPS IN PARAMs
names(files) <- params
cat(names(files), sep="\n")
list_of_data <- llply(files, read.csv, stringsAsFactors=FALSE, na.strings = NULL) # files are COMMA seperated
x <- list_of_data[[1]]
str(x)
add_col <- function(dfname, dflist){
df <- dflist[[dfname]]
#df["param"] <- factor(dfname)
df["param"] <- as.numeric(dfname) # OBS
return (df)
}
dflist <- lapply(names(list_of_data), add_col, list_of_data) #adding column
names(dflist) <- names(list_of_data) #copy names
df.combined <- ldply(dflist) # merging, .id=NULL does not work!?
df.combined.clean <- df.combined[,-1] # REMOVING the automatically created index column (.id)
str(df.combined.clean)
df.combined.clean[df.combined.clean[,'origin']=="NA",'origin'] <- c("ratio") ## REPLACING "NA" STRING values in "origin" column with ratio
#df.combined.clean[,'param'] <- as.numeric(as.character(dat3$a)) # http://stackoverflow.com/questions/9480408/convert-factor-to-integer-in-a-data-frame
df.melt <- melt(df.combined.clean, id.vars=c("origin", "param"))
#melt(data, id.vars, measure.vars, variable.name = "variable", value.name = "value")
### Initializing plot list
plots <- list() # new empty list
##### FREQ
p <- ggplot(subset(df.melt, variable=="mean_freq_bin" & origin=="ratio"), aes(x=param, y=value)) #fill=origin
p <- p + geom_bar(stat="identity", position="dodge")
p <- p + labs(title="Minor Allele Frequency")
p <- p + labs(x='Number of required SNPs', y=expression(paste(mu['input']/mu['matched'],' ','(%)', sep="")))
p <- p + coord_cartesian(ylim=c(100,125))
p <- p + scale_x_continuous(breaks=unique(df.melt[,'param']))
p <- p + theme(panel.grid.minor.x=element_blank(), panel.grid.major.x=element_blank()) # Hide all the x-axis gridlines
p <- p + theme(axis.text.x=element_text(size=14, angle=45, vjust=0.5), axis.text.y=element_text(size=14)) # adjust tickmarks size and angle
p <- p + theme(axis.title.x = element_text(size=16), axis.title.y = element_text(size=20))
p
plots[["freq"]] <- p
### GENE COUNT
p <- ggplot(subset(df.melt, variable=="mean_gene_count" & origin=="ratio"), aes(x=param, y=value)) #fill=origin
p <- p + geom_bar(stat="identity", position="dodge")
p <- p + labs(title="Gene Density")
p <- p + labs(x='Number of required SNPs', y=expression(paste(mu['input']/mu['matched'],' ','(%)', sep="")))
p <- p + coord_cartesian(ylim=c(100,125))
p <- p + scale_x_continuous(breaks=unique(df.melt[,'param']))
p <- p + theme(panel.grid.minor.x=element_blank(), panel.grid.major.x=element_blank()) # Hide all the x-axis gridlines
p <- p + theme(axis.text.x=element_text(size=14, angle=45, vjust=0.5), axis.text.y=element_text(size=14)) # adjust tickmarks size and angle
p <- p + theme(axis.title.x = element_text(size=16), axis.title.y = element_text(size=20))
p
plots[["gene_count"]] <- p
### DIST
p <- ggplot(subset(df.melt, variable=="mean_dist_nearest_gene_snpsnap" & origin=="ratio"), aes(x=param, y=value)) #fill=origin
p <- p + geom_bar(stat="identity", position="dodge")
p <- p + labs(title="Distance to Nearest Gene")
p <- p + labs(x='Number of required SNPs', y=expression(paste(mu['input']/mu['matched'],' ','(%)', sep="")))
p <- p + coord_cartesian(ylim=c(100,125))
p <- p + scale_x_continuous(breaks=unique(df.melt[,'param']))
p <- p + theme(panel.grid.minor.x=element_blank(), panel.grid.major.x=element_blank()) # Hide all the x-axis gridlines
p <- p + theme(axis.text.x=element_text(size=14, angle=45, vjust=0.5), axis.text.y=element_text(size=14)) # adjust tickmarks size and angle
p <- p + theme(axis.title.x = element_text(size=16), axis.title.y = element_text(size=20))
p
plots[["dist"]] <- p
### LD BUDDY
p <- ggplot(subset(df.melt, variable=="mean_friends_ld05" & origin=="ratio"), aes(x=param, y=value)) #fill=origin
p <- p + geom_bar(stat="identity", position="dodge")
p <- p + labs(title="LD Buddy Count")
p <- p + labs(x='Number of required SNPs', y=expression(paste(mu['input']/mu['matched'],' ','(%)', sep="")))
p <- p + coord_cartesian(ylim=c(100,125))
p <- p + scale_x_continuous(breaks=unique(df.melt[,'param']))
p <- p + theme(panel.grid.minor.x=element_blank(), panel.grid.major.x=element_blank()) # Hide all the x-axis gridlines
p <- p + theme(axis.text.x=element_text(size=14, angle=45, vjust=0.5), axis.text.y=element_text(size=14)) # adjust tickmarks size and angle
p <- p + theme(axis.title.x = element_text(size=16), axis.title.y = element_text(size=20))
p
plots[["ld_buddy"]] <- p
multiplot(plotlist = plots, cols=2)
####################### BLABLA ###################################
#c(seq(from = 10, to = 200, by = 10), seq(from = 52, to = 58, by = 2))
#c(seq(from = 10, to = 200, by = 10), rep("", 4))
######### things to try:
## A theme with white background and black gridlines.
#p + theme_bw()
## this worked
#named_vec_breaks <- unique(df.melt[,'param'])
#names(named_vec_breaks) <- unique(df.melt[,'param'])
#names(named_vec_breaks)
#p <- p + scale_x_continuous(breaks=named_vec_breaks)
################## LOTS OF BS - converting NA factor levels ####################
#df.combined.clean[,'origin'] <- factor(df.combined.clean[,'origin'], exclude=NULL) # set NA as an extra level, and not a missing value
#levels(df.combined.clean[,'origin'])
#levels(df.combined.clean[,'origin'])[levels(df.combined.clean[,'origin'])=="NA"] <- "ratio" ## REPLACING NA values in "origin" column with ratio
#is.na(levels(df.combined.clean[,'origin'])[df.combined.clean[,'origin']])
#df.combined.clean[df.combined.clean[,"origin"]==NA,"origin"] <- "ratio"
#df.combined.clean[is.na(df.combined.clean[,"origin"]),"origin"] <- "ratio" # DID NOT WORK! # REPLACING NA values in "origin" column with ratio
| /analysis/validation_summary_stats_inputEQmatched/meanRatio_vs_requiredSNPs.R | no_license | pascaltimshel/snpsnap | R | false | false | 6,460 | r | rm(list=ls())
library(ggplot2)
library(reshape2)
library(plyr)
library(stringr)
library(grid)
# loading multiplot function
source('../multiplot.R')
wd = '/Users/pascaltimshel/git/snpsnap/analysis/validation_summary_stats_inputEQmatched'
setwd(wd)
path.base = '/Users/pascaltimshel/git/snpsnap/analysis/validation_summary_stats_inputEQmatched'
path.compare = file.path(path.base, 'compare') # path.compare is a subdir in the path.base
## Read into a list of files:
path.datafiles <- path.compare
pat = "SNPsnap_rand100_defaultMatchCrit_n(.*)_excludeInputHLA_compare\\.csv$"
files <- list.files(path = path.datafiles, pattern = pat, full.names = TRUE) #full path
params <- str_match(files, 'SNPsnap_rand100_defaultMatchCrit_n(.*)_excludeInputHLA_compare.csv')[,2] # perl does not work. SAVING REQUIRED NUMBER OF SNPS IN PARAMs
names(files) <- params
cat(names(files), sep="\n")
list_of_data <- llply(files, read.csv, stringsAsFactors=FALSE, na.strings = NULL) # files are COMMA seperated
x <- list_of_data[[1]]
str(x)
add_col <- function(dfname, dflist){
df <- dflist[[dfname]]
#df["param"] <- factor(dfname)
df["param"] <- as.numeric(dfname) # OBS
return (df)
}
dflist <- lapply(names(list_of_data), add_col, list_of_data) #adding column
names(dflist) <- names(list_of_data) #copy names
df.combined <- ldply(dflist) # merging, .id=NULL does not work!?
df.combined.clean <- df.combined[,-1] # REMOVING the automatically created index column (.id)
str(df.combined.clean)
df.combined.clean[df.combined.clean[,'origin']=="NA",'origin'] <- c("ratio") ## REPLACING "NA" STRING values in "origin" column with ratio
#df.combined.clean[,'param'] <- as.numeric(as.character(dat3$a)) # http://stackoverflow.com/questions/9480408/convert-factor-to-integer-in-a-data-frame
df.melt <- melt(df.combined.clean, id.vars=c("origin", "param"))
#melt(data, id.vars, measure.vars, variable.name = "variable", value.name = "value")
### Initializing plot list
plots <- list() # new empty list
##### FREQ
p <- ggplot(subset(df.melt, variable=="mean_freq_bin" & origin=="ratio"), aes(x=param, y=value)) #fill=origin
p <- p + geom_bar(stat="identity", position="dodge")
p <- p + labs(title="Minor Allele Frequency")
p <- p + labs(x='Number of required SNPs', y=expression(paste(mu['input']/mu['matched'],' ','(%)', sep="")))
p <- p + coord_cartesian(ylim=c(100,125))
p <- p + scale_x_continuous(breaks=unique(df.melt[,'param']))
p <- p + theme(panel.grid.minor.x=element_blank(), panel.grid.major.x=element_blank()) # Hide all the x-axis gridlines
p <- p + theme(axis.text.x=element_text(size=14, angle=45, vjust=0.5), axis.text.y=element_text(size=14)) # adjust tickmarks size and angle
p <- p + theme(axis.title.x = element_text(size=16), axis.title.y = element_text(size=20))
p
plots[["freq"]] <- p
### GENE COUNT
p <- ggplot(subset(df.melt, variable=="mean_gene_count" & origin=="ratio"), aes(x=param, y=value)) #fill=origin
p <- p + geom_bar(stat="identity", position="dodge")
p <- p + labs(title="Gene Density")
p <- p + labs(x='Number of required SNPs', y=expression(paste(mu['input']/mu['matched'],' ','(%)', sep="")))
p <- p + coord_cartesian(ylim=c(100,125))
p <- p + scale_x_continuous(breaks=unique(df.melt[,'param']))
p <- p + theme(panel.grid.minor.x=element_blank(), panel.grid.major.x=element_blank()) # Hide all the x-axis gridlines
p <- p + theme(axis.text.x=element_text(size=14, angle=45, vjust=0.5), axis.text.y=element_text(size=14)) # adjust tickmarks size and angle
p <- p + theme(axis.title.x = element_text(size=16), axis.title.y = element_text(size=20))
p
plots[["gene_count"]] <- p
### DIST
p <- ggplot(subset(df.melt, variable=="mean_dist_nearest_gene_snpsnap" & origin=="ratio"), aes(x=param, y=value)) #fill=origin
p <- p + geom_bar(stat="identity", position="dodge")
p <- p + labs(title="Distance to Nearest Gene")
p <- p + labs(x='Number of required SNPs', y=expression(paste(mu['input']/mu['matched'],' ','(%)', sep="")))
p <- p + coord_cartesian(ylim=c(100,125))
p <- p + scale_x_continuous(breaks=unique(df.melt[,'param']))
p <- p + theme(panel.grid.minor.x=element_blank(), panel.grid.major.x=element_blank()) # Hide all the x-axis gridlines
p <- p + theme(axis.text.x=element_text(size=14, angle=45, vjust=0.5), axis.text.y=element_text(size=14)) # adjust tickmarks size and angle
p <- p + theme(axis.title.x = element_text(size=16), axis.title.y = element_text(size=20))
p
plots[["dist"]] <- p
### LD BUDDY
p <- ggplot(subset(df.melt, variable=="mean_friends_ld05" & origin=="ratio"), aes(x=param, y=value)) #fill=origin
p <- p + geom_bar(stat="identity", position="dodge")
p <- p + labs(title="LD Buddy Count")
p <- p + labs(x='Number of required SNPs', y=expression(paste(mu['input']/mu['matched'],' ','(%)', sep="")))
p <- p + coord_cartesian(ylim=c(100,125))
p <- p + scale_x_continuous(breaks=unique(df.melt[,'param']))
p <- p + theme(panel.grid.minor.x=element_blank(), panel.grid.major.x=element_blank()) # Hide all the x-axis gridlines
p <- p + theme(axis.text.x=element_text(size=14, angle=45, vjust=0.5), axis.text.y=element_text(size=14)) # adjust tickmarks size and angle
p <- p + theme(axis.title.x = element_text(size=16), axis.title.y = element_text(size=20))
p
plots[["ld_buddy"]] <- p
multiplot(plotlist = plots, cols=2)
####################### BLABLA ###################################
#c(seq(from = 10, to = 200, by = 10), seq(from = 52, to = 58, by = 2))
#c(seq(from = 10, to = 200, by = 10), rep("", 4))
######### things to try:
## A theme with white background and black gridlines.
#p + theme_bw()
## this worked
#named_vec_breaks <- unique(df.melt[,'param'])
#names(named_vec_breaks) <- unique(df.melt[,'param'])
#names(named_vec_breaks)
#p <- p + scale_x_continuous(breaks=named_vec_breaks)
################## LOTS OF BS - converting NA factor levels ####################
#df.combined.clean[,'origin'] <- factor(df.combined.clean[,'origin'], exclude=NULL) # set NA as an extra level, and not a missing value
#levels(df.combined.clean[,'origin'])
#levels(df.combined.clean[,'origin'])[levels(df.combined.clean[,'origin'])=="NA"] <- "ratio" ## REPLACING NA values in "origin" column with ratio
#is.na(levels(df.combined.clean[,'origin'])[df.combined.clean[,'origin']])
#df.combined.clean[df.combined.clean[,"origin"]==NA,"origin"] <- "ratio"
#df.combined.clean[is.na(df.combined.clean[,"origin"]),"origin"] <- "ratio" # DID NOT WORK! # REPLACING NA values in "origin" column with ratio
|
##########################################################################################
##########################################################################################
#CSA Analysis ####
# Function to pull out csa at intervals and compile into dataframe
midshaft.csa.function <- function(x)
{
slicelength <- as.numeric(length(csa[which(label == x)]))
slice.intervals <- as.integer(seq(1,slicelength, by=(slicelength*.05)))
slice.intervals <- slice.intervals + min(slice[which(label == x)])
slicelength <- slicelength + (min(slice[which(label == x)])-1)
slice.bind <- cbind(seq((min(slice[which(label == x)])), slicelength), (csa[which(label == x)]))
slice.bind <- rbind(matrix(nrow = (min(slice[which(label == x)])-1), ncol=2), slice.bind)
slice.matrix <- as.matrix(slice.bind[slice.intervals, ])
csa.levels <- data.frame(rbind(slice.matrix[,2]))
csa.levels <- cbind(x, csa.levels)
colnames(csa.levels) <- c("specimen", seq(0,95, by=5))
perdiff <- rbind(((csa.levels[,2:21] - csa.levels[,12])/csa.levels[,12])*100)/length(unique(x))
perdiff <- data.frame(cbind(x, perdiff))
colnames(perdiff) <- c("specimen",seq(0,95, by=5))
return(perdiff)
}
x <- as.vector(data$label)
result.csa = mdply(unique(x), midshaft.csa.function)
#Reshape into long format in order to plot
result.csa.long <- melt(result.csa, variable.name="level", value.name="percent.difference")
result.csa.long <- rbind(result.csa.long[which(result.csa.long$level == 30),],
result.csa.long[which(result.csa.long$level == 35),],
result.csa.long[which(result.csa.long$level == 40),],
result.csa.long[which(result.csa.long$level == 45),],
result.csa.long[which(result.csa.long$level == 50),],
result.csa.long[which(result.csa.long$level == 55),],
result.csa.long[which(result.csa.long$level == 60),],
result.csa.long[which(result.csa.long$level == 65),],
result.csa.long[which(result.csa.long$level == 70),])
#Now the same function, but with the raw data in order to perform the statistical tests (this is just repeating all steps up to, but not including calculating percentages)
midshaft.raw.csa.function <- function(x)
{
slicelength <- as.numeric(length(csa[which(label == x)]))
slice.intervals <- as.integer(seq(1,slicelength, by=(slicelength*.05)))
slice.intervals <- slice.intervals + min(slice[which(label == x)])
slicelength <- slicelength + (min(slice[which(label == x)])-1)
slice.bind <- cbind(seq((min(slice[which(label == x)])), slicelength), (csa[which(label == x)]))
slice.bind <- rbind(matrix(nrow = (min(slice[which(label == x)])-1), ncol=2), slice.bind)
slice.matrix <- as.matrix(slice.bind[slice.intervals, ])
csa.levels <- data.frame(rbind(slice.matrix[,2]))
csa.levels <- cbind(x, csa.levels)
colnames(csa.levels) <- c("specimen", seq(0,95, by=5))
return(csa.levels)
}
raw.csa.result = mdply(unique(x), midshaft.raw.csa.function)
raw.result.csa.long <- melt(raw.csa.result, variable.name="level", value.name="raw.value")
#inverse of previous result.csa.long to keep specimen names for the anova-- probably need to change for all
raw.result.csa.long <- raw.result.csa.long[-c((which(raw.result.csa.long$level == 0)), (which(raw.result.csa.long$level == 5)),(which(raw.result.csa.long$level == 10)), (which(raw.result.csa.long$level == 15)),(which(raw.result.csa.long$level == 20)), (which(raw.result.csa.long$level == 25)),(which(raw.result.csa.long$level == 75)),(which(raw.result.csa.long$level == 80)),(which(raw.result.csa.long$level == 85)),(which(raw.result.csa.long$level == 90)),(which(raw.result.csa.long$level == 95))),]
#Repeated measures ANOVA####
#Standard repeated measures anova (correcting for repeated measures from the same specimen)
m <- aov(terms(raw.result.csa.long$raw.value ~ raw.result.csa.long$level + (raw.result.csa.long$specimen)))
m <- TukeyHSD(m)
midshaft.aov <- c('50-30','50-35','50-40','50-45', '55-50', '60-50', '65-50','70-50')
csa.aov <- m$`raw.result.csa.long$level`[midshaft.aov,]
#Paired T-test of all levels against midshaft
csa.ttest <-as.data.frame(cbind( c(30,35,40,45,55,60, 65,70), c(t.test(raw.csa.result$'50', raw.csa.result$'30', paired=T)$p.value,
t.test(raw.csa.result$'50', raw.csa.result$'35', paired=T)$p.value,
t.test(raw.csa.result$'50', raw.csa.result$'40', paired=T)$p.value,
t.test(raw.csa.result$'50', raw.csa.result$'45', paired=T)$p.value,
t.test(raw.csa.result$'50', raw.csa.result$'55', paired=T)$p.value,
t.test(raw.csa.result$'50', raw.csa.result$'60', paired=T)$p.value,
t.test(raw.csa.result$'50', raw.csa.result$'65', paired=T)$p.value,
t.test(raw.csa.result$'50', raw.csa.result$'70', paired=T)$p.value)))
colnames(csa.ttest) <- c("level", "p-value")
#Bonferroni Correction to these p-values
csa.ttest[,2] <- p.adjust(csa.ttest[,2], method = "bonferroni", n = length(csa.ttest[,2]))
#Compare Results of ANOVA and T-test:
csa.pvalues <- cbind(csa.aov[,-c(1:3)], csa.ttest[,2])
colnames(csa.pvalues) <- c("R.M. ANOVA", "Paired T-Test")
csa.pvalues
#And the same significance tests on the logged values####
m <- aov(terms(log(raw.result.csa.long$raw.value) ~ raw.result.csa.long$level + (raw.result.csa.long$specimen)))
m <- TukeyHSD(m)
midshaft.aov <- c('50-30','50-35','50-40','50-45', '55-50', '60-50', '65-50','70-50')
log.csa.aov <- m$`raw.result.csa.long$level`[midshaft.aov,]
#Paired T-test of all levels against midshaft
log.csa.ttest <-as.data.frame(cbind( c(30,35,40,45,55,60, 65,70), c(t.test(log(log(raw.csa.result$'50')), log(raw.csa.result$'30'), paired=T)$p.value,
t.test(log(raw.csa.result$'50'), log(raw.csa.result$'35'), paired=T)$p.value,
t.test(log(raw.csa.result$'50'), log(raw.csa.result$'40'), paired=T)$p.value,
t.test(log(raw.csa.result$'50'), log(raw.csa.result$'45'), paired=T)$p.value,
t.test(log(raw.csa.result$'50'), log(raw.csa.result$'55'), paired=T)$p.value,
t.test(log(raw.csa.result$'50'), log(raw.csa.result$'60'), paired=T)$p.value,
t.test(log(raw.csa.result$'50'), log(raw.csa.result$'65'), paired=T)$p.value,
t.test(log(raw.csa.result$'50'), log(raw.csa.result$'70'), paired=T)$p.value)))
colnames(log.csa.ttest) <- c("level", "p-value")
#Bonferroni Correction to these p-values
log.csa.ttest[,2] <- p.adjust(csa.ttest[,2], method = "bonferroni", n = length(csa.ttest[,2]))
#Compare Results of logged ANOVA and T-test:
log.csa.pvalues <- cbind(log.csa.aov[,-c(1:3)], log.csa.ttest[,2])
colnames(log.csa.pvalues) <- c("R.M. ANOVA", "Paired T-Test")
log.csa.pvalues
###############################################################################################################################################
#J Analysis ####
# Function to pull out J at intervals and compile into dataframe
midshaft.J.function <- function(x)
{
slicelength <- as.numeric(length(J[which(label == x)]))
slice.intervals <- as.integer(seq(1,slicelength, by=(slicelength*.05)))
slice.intervals <- slice.intervals + min(slice[which(label == x)])
slicelength <- slicelength + (min(slice[which(label == x)])-1)
slice.bind <- cbind(seq((min(slice[which(label == x)])), slicelength), (J[which(label == x)]))
slice.bind <- rbind(matrix(nrow = (min(slice[which(label == x)])-1), ncol=2), slice.bind)
slice.matrix <- as.matrix(slice.bind[slice.intervals, ])
J.levels <- data.frame(rbind(slice.matrix[,2]))
J.levels <- cbind(x, J.levels)
colnames(J.levels) <- c("specimen", seq(0,95, by=5))
perdiff <- rbind(((J.levels[,2:21] - J.levels[,12])/J.levels[,12])*100)/length(unique(x))
perdiff <- data.frame(cbind(x, perdiff))
colnames(perdiff) <- c("specimen",seq(0,95, by=5))
return(perdiff)
}
x <- as.vector(data$label)
result.J = mdply(unique(x), midshaft.J.function)
#Reshape into long format in order to plot
result.J.long <- melt(result.J, variable.name="level", value.name="percent.difference")
result.J.long <- rbind(result.J.long[which(result.J.long$level == 30),],
result.J.long[which(result.J.long$level == 35),],
result.J.long[which(result.J.long$level == 40),],
result.J.long[which(result.J.long$level == 45),],
result.J.long[which(result.J.long$level == 50),],
result.J.long[which(result.J.long$level == 55),],
result.J.long[which(result.J.long$level == 60),],
result.J.long[which(result.J.long$level == 65),],
result.J.long[which(result.J.long$level == 70),])
#Now the same function, but with the raw data in order to perform the statistical tests (this is just repeating all steps up to, but not including calculating percentages)
midshaft.raw.J.function <- function(x)
{
slicelength <- as.numeric(length(J[which(label == x)]))
slice.intervals <- as.integer(seq(1,slicelength, by=(slicelength*.05)))
slice.intervals <- slice.intervals + min(slice[which(label == x)])
slicelength <- slicelength + (min(slice[which(label == x)])-1)
slice.bind <- cbind(seq((min(slice[which(label == x)])), slicelength), (J[which(label == x)]))
slice.bind <- rbind(matrix(nrow = (min(slice[which(label == x)])-1), ncol=2), slice.bind)
slice.matrix <- as.matrix(slice.bind[slice.intervals, ])
J.levels <- data.frame(rbind(slice.matrix[,2]))
J.levels <- cbind(x, J.levels)
colnames(J.levels) <- c("specimen", seq(0,95, by=5))
return(J.levels)
}
raw.J.result = mdply(unique(x), midshaft.raw.J.function)
raw.result.J.long <- melt(raw.J.result, variable.name="level", value.name="raw.value")
#inverse of previous result.J.long to keep specimen names for the anova-- probably need to change for all
raw.result.J.long <- raw.result.J.long[-c((which(raw.result.J.long$level == 0)), (which(raw.result.J.long$level == 5)),(which(raw.result.J.long$level == 10)), (which(raw.result.J.long$level == 15)),(which(raw.result.J.long$level == 20)), (which(raw.result.J.long$level == 25)),(which(raw.result.J.long$level == 75)),(which(raw.result.J.long$level == 80)),(which(raw.result.J.long$level == 85)),(which(raw.result.J.long$level == 90)),(which(raw.result.J.long$level == 95))),]
#Repeated measures ANOVA####
#Standard repeated measures anova (correcting for repeated measures from the same specimen)
m <- aov(terms(raw.result.J.long$raw.value ~ raw.result.J.long$level + (raw.result.J.long$specimen)))
m <- TukeyHSD(m)
midshaft.aov <- c('50-30','50-35','50-40','50-45', '55-50', '60-50', '65-50','70-50')
J.aov <- m$`raw.result.J.long$level`[midshaft.aov,]
#Paired T-test of all levels against midshaft
J.ttest <-as.data.frame(cbind( c(30,35,40,45,55,60, 65,70), c(t.test(raw.J.result$'50', raw.J.result$'30', paired=T)$p.value,
t.test(raw.J.result$'50', raw.J.result$'35', paired=T)$p.value,
t.test(raw.J.result$'50', raw.J.result$'40', paired=T)$p.value,
t.test(raw.J.result$'50', raw.J.result$'45', paired=T)$p.value,
t.test(raw.J.result$'50', raw.J.result$'55', paired=T)$p.value,
t.test(raw.J.result$'50', raw.J.result$'60', paired=T)$p.value,
t.test(raw.J.result$'50', raw.J.result$'65', paired=T)$p.value,
t.test(raw.J.result$'50', raw.J.result$'70', paired=T)$p.value)))
colnames(J.ttest) <- c("level", "p-value")
#Bonferroni Correction to these p-values
J.ttest[,2] <- p.adjust(J.ttest[,2], method = "bonferroni", n = length(J.ttest[,2]))
#Compare Results of ANOVA and T-test:
J.pvalues <- cbind(J.aov[,-c(1:3)], J.ttest[,2])
colnames(J.pvalues) <- c("R.M. ANOVA", "Paired T-Test")
J.pvalues
#And the same significance tests on the logged values####
m <- aov(terms(log(raw.result.J.long$raw.value) ~ raw.result.J.long$level + (raw.result.J.long$specimen)))
m <- TukeyHSD(m)
midshaft.aov <- c('50-30','50-35','50-40','50-45', '55-50', '60-50', '65-50','70-50')
log.J.aov <- m$`raw.result.J.long$level`[midshaft.aov,]
#Paired T-test of all levels against midshaft
log.J.ttest <-as.data.frame(cbind( c(30,35,40,45,55,60, 65,70), c(t.test(log(log(raw.J.result$'50')), log(raw.J.result$'30'), paired=T)$p.value,
t.test(log(raw.J.result$'50'), log(raw.J.result$'35'), paired=T)$p.value,
t.test(log(raw.J.result$'50'), log(raw.J.result$'40'), paired=T)$p.value,
t.test(log(raw.J.result$'50'), log(raw.J.result$'45'), paired=T)$p.value,
t.test(log(raw.J.result$'50'), log(raw.J.result$'55'), paired=T)$p.value,
t.test(log(raw.J.result$'50'), log(raw.J.result$'60'), paired=T)$p.value,
t.test(log(raw.J.result$'50'), log(raw.J.result$'65'), paired=T)$p.value,
t.test(log(raw.J.result$'50'), log(raw.J.result$'70'), paired=T)$p.value)))
colnames(log.J.ttest) <- c("level", "p-value")
#Bonferroni Correction to these p-values
log.J.ttest[,2] <- p.adjust(J.ttest[,2], method = "bonferroni", n = length(J.ttest[,2]))
#Compare Results of logged ANOVA and T-test:
log.J.pvalues <- cbind(log.J.aov[,-c(1:3)], log.J.ttest[,2])
colnames(log.J.pvalues) <- c("R.M. ANOVA", "Paired T-Test")
log.J.pvalues
###############################################################################################################################################
#Imax/Imin Analysis ####
# Function to pull out ratio at intervals and compile into dataframe
midshaft.ratio.function <- function(x)
{
slicelength <- as.numeric(length(ratio[which(label == x)]))
slice.intervals <- as.integer(seq(1,slicelength, by=(slicelength*.05)))
slice.intervals <- slice.intervals + min(slice[which(label == x)])
slicelength <- slicelength + (min(slice[which(label == x)])-1)
slice.bind <- cbind(seq((min(slice[which(label == x)])), slicelength), (ratio[which(label == x)]))
slice.bind <- rbind(matrix(nrow = (min(slice[which(label == x)])-1), ncol=2), slice.bind)
slice.matrix <- as.matrix(slice.bind[slice.intervals, ])
ratio.levels <- data.frame(rbind(slice.matrix[,2]))
ratio.levels <- cbind(x, ratio.levels)
colnames(ratio.levels) <- c("specimen", seq(0,95, by=5))
perdiff <- rbind(((ratio.levels[,2:21] - ratio.levels[,12])/ratio.levels[,12])*100)/length(unique(x))
perdiff <- data.frame(cbind(x, perdiff))
colnames(perdiff) <- c("specimen",seq(0,95, by=5))
return(perdiff)
}
x <- as.vector(data$label)
result.ratio = mdply(unique(x), midshaft.ratio.function)
#Reshape into long format in order to plot
result.ratio.long <- melt(result.ratio, variable.name="level", value.name="percent.difference")
result.ratio.long <- rbind(result.ratio.long[which(result.ratio.long$level == 30),],
result.ratio.long[which(result.ratio.long$level == 35),],
result.ratio.long[which(result.ratio.long$level == 40),],
result.ratio.long[which(result.ratio.long$level == 45),],
result.ratio.long[which(result.ratio.long$level == 50),],
result.ratio.long[which(result.ratio.long$level == 55),],
result.ratio.long[which(result.ratio.long$level == 60),],
result.ratio.long[which(result.ratio.long$level == 65),],
result.ratio.long[which(result.ratio.long$level == 70),])
#Now the same function, but with the raw data in order to perform the statistical tests (this is just repeating all steps up to, but not including calculating percentages)
midshaft.raw.ratio.function <- function(x)
{
slicelength <- as.numeric(length(ratio[which(label == x)]))
slice.intervals <- as.integer(seq(1,slicelength, by=(slicelength*.05)))
slice.intervals <- slice.intervals + min(slice[which(label == x)])
slicelength <- slicelength + (min(slice[which(label == x)])-1)
slice.bind <- cbind(seq((min(slice[which(label == x)])), slicelength), (ratio[which(label == x)]))
slice.bind <- rbind(matrix(nrow = (min(slice[which(label == x)])-1), ncol=2), slice.bind)
slice.matrix <- as.matrix(slice.bind[slice.intervals, ])
ratio.levels <- data.frame(rbind(slice.matrix[,2]))
ratio.levels <- cbind(x, ratio.levels)
colnames(ratio.levels) <- c("specimen", seq(0,95, by=5))
return(ratio.levels)
}
raw.ratio.result = mdply(unique(x), midshaft.raw.ratio.function)
raw.result.ratio.long <- melt(raw.ratio.result, variable.name="level", value.name="raw.value")
#inverse of previous result.ratio.long to keep specimen names for the anova-- probably need to change for all
raw.result.ratio.long <- raw.result.ratio.long[-c((which(raw.result.ratio.long$level == 0)), (which(raw.result.ratio.long$level == 5)),(which(raw.result.ratio.long$level == 10)), (which(raw.result.ratio.long$level == 15)),(which(raw.result.ratio.long$level == 20)), (which(raw.result.ratio.long$level == 25)),(which(raw.result.ratio.long$level == 75)),(which(raw.result.ratio.long$level == 80)),(which(raw.result.ratio.long$level == 85)),(which(raw.result.ratio.long$level == 90)),(which(raw.result.ratio.long$level == 95))),]
#Repeated measures ANOVA####
#Standard repeated measures anova (correcting for repeated measures from the same specimen)
m <- aov(terms(raw.result.ratio.long$raw.value ~ raw.result.ratio.long$level + (raw.result.ratio.long$specimen)))
m <- TukeyHSD(m)
midshaft.aov <- c('50-30','50-35','50-40','50-45', '55-50', '60-50', '65-50','70-50')
ratio.aov <- m$`raw.result.ratio.long$level`[midshaft.aov,]
#Paired T-test of all levels against midshaft
ratio.ttest <-as.data.frame(cbind( c(30,35,40,45,55,60, 65,70), c(t.test(raw.ratio.result$'50', raw.ratio.result$'30', paired=T)$p.value,
t.test(raw.ratio.result$'50', raw.ratio.result$'35', paired=T)$p.value,
t.test(raw.ratio.result$'50', raw.ratio.result$'40', paired=T)$p.value,
t.test(raw.ratio.result$'50', raw.ratio.result$'45', paired=T)$p.value,
t.test(raw.ratio.result$'50', raw.ratio.result$'55', paired=T)$p.value,
t.test(raw.ratio.result$'50', raw.ratio.result$'60', paired=T)$p.value,
t.test(raw.ratio.result$'50', raw.ratio.result$'65', paired=T)$p.value,
t.test(raw.ratio.result$'50', raw.ratio.result$'70', paired=T)$p.value)))
colnames(ratio.ttest) <- c("level", "p-value")
#Bonferroni Correction to these p-values
ratio.ttest[,2] <- p.adjust(ratio.ttest[,2], method = "bonferroni", n = length(ratio.ttest[,2]))
#Compare Results of ANOVA and T-test:
ratio.pvalues <- cbind(ratio.aov[,-c(1:3)], ratio.ttest[,2])
colnames(ratio.pvalues) <- c("R.M. ANOVA", "Paired T-Test")
ratio.pvalues
#And the same significance tests on the logged values####
m <- aov(terms(log(raw.result.ratio.long$raw.value) ~ raw.result.ratio.long$level + (raw.result.ratio.long$specimen)))
m <- TukeyHSD(m)
midshaft.aov <- c('50-30','50-35','50-40','50-45', '55-50', '60-50', '65-50','70-50')
log.ratio.aov <- m$`raw.result.ratio.long$level`[midshaft.aov,]
#Paired T-test of all levels against midshaft
log.ratio.ttest <-as.data.frame(cbind( c(30,35,40,45,55,60, 65,70), c(t.test(log(log(raw.ratio.result$'50')), log(raw.ratio.result$'30'), paired=T)$p.value,
t.test(log(raw.ratio.result$'50'), log(raw.ratio.result$'35'), paired=T)$p.value,
t.test(log(raw.ratio.result$'50'), log(raw.ratio.result$'40'), paired=T)$p.value,
t.test(log(raw.ratio.result$'50'), log(raw.ratio.result$'45'), paired=T)$p.value,
t.test(log(raw.ratio.result$'50'), log(raw.ratio.result$'55'), paired=T)$p.value,
t.test(log(raw.ratio.result$'50'), log(raw.ratio.result$'60'), paired=T)$p.value,
t.test(log(raw.ratio.result$'50'), log(raw.ratio.result$'65'), paired=T)$p.value,
t.test(log(raw.ratio.result$'50'), log(raw.ratio.result$'70'), paired=T)$p.value)))
colnames(log.ratio.ttest) <- c("level", "p-value")
#Bonferroni Correction to these p-values
log.ratio.ttest[,2] <- p.adjust(ratio.ttest[,2], method = "bonferroni", n = length(ratio.ttest[,2]))
#Compare Results of logged ANOVA and T-test:
log.ratio.pvalues <- cbind(log.ratio.aov[,-c(1:3)], log.ratio.ttest[,2])
colnames(log.ratio.pvalues) <- c("R.M. ANOVA", "Paired T-Test")
log.ratio.pvalues
| /Midshaft.Analysis_Functions.R | no_license | cmongle/Midshaft | R | false | false | 22,633 | r |
##########################################################################################
##########################################################################################
#CSA Analysis ####
# Function to pull out csa at intervals and compile into dataframe
midshaft.csa.function <- function(x)
{
slicelength <- as.numeric(length(csa[which(label == x)]))
slice.intervals <- as.integer(seq(1,slicelength, by=(slicelength*.05)))
slice.intervals <- slice.intervals + min(slice[which(label == x)])
slicelength <- slicelength + (min(slice[which(label == x)])-1)
slice.bind <- cbind(seq((min(slice[which(label == x)])), slicelength), (csa[which(label == x)]))
slice.bind <- rbind(matrix(nrow = (min(slice[which(label == x)])-1), ncol=2), slice.bind)
slice.matrix <- as.matrix(slice.bind[slice.intervals, ])
csa.levels <- data.frame(rbind(slice.matrix[,2]))
csa.levels <- cbind(x, csa.levels)
colnames(csa.levels) <- c("specimen", seq(0,95, by=5))
perdiff <- rbind(((csa.levels[,2:21] - csa.levels[,12])/csa.levels[,12])*100)/length(unique(x))
perdiff <- data.frame(cbind(x, perdiff))
colnames(perdiff) <- c("specimen",seq(0,95, by=5))
return(perdiff)
}
x <- as.vector(data$label)
result.csa = mdply(unique(x), midshaft.csa.function)
#Reshape into long format in order to plot
result.csa.long <- melt(result.csa, variable.name="level", value.name="percent.difference")
result.csa.long <- rbind(result.csa.long[which(result.csa.long$level == 30),],
result.csa.long[which(result.csa.long$level == 35),],
result.csa.long[which(result.csa.long$level == 40),],
result.csa.long[which(result.csa.long$level == 45),],
result.csa.long[which(result.csa.long$level == 50),],
result.csa.long[which(result.csa.long$level == 55),],
result.csa.long[which(result.csa.long$level == 60),],
result.csa.long[which(result.csa.long$level == 65),],
result.csa.long[which(result.csa.long$level == 70),])
#Now the same function, but with the raw data in order to perform the statistical tests (this is just repeating all steps up to, but not including calculating percentages)
midshaft.raw.csa.function <- function(x)
{
slicelength <- as.numeric(length(csa[which(label == x)]))
slice.intervals <- as.integer(seq(1,slicelength, by=(slicelength*.05)))
slice.intervals <- slice.intervals + min(slice[which(label == x)])
slicelength <- slicelength + (min(slice[which(label == x)])-1)
slice.bind <- cbind(seq((min(slice[which(label == x)])), slicelength), (csa[which(label == x)]))
slice.bind <- rbind(matrix(nrow = (min(slice[which(label == x)])-1), ncol=2), slice.bind)
slice.matrix <- as.matrix(slice.bind[slice.intervals, ])
csa.levels <- data.frame(rbind(slice.matrix[,2]))
csa.levels <- cbind(x, csa.levels)
colnames(csa.levels) <- c("specimen", seq(0,95, by=5))
return(csa.levels)
}
raw.csa.result = mdply(unique(x), midshaft.raw.csa.function)
raw.result.csa.long <- melt(raw.csa.result, variable.name="level", value.name="raw.value")
#inverse of previous result.csa.long to keep specimen names for the anova-- probably need to change for all
raw.result.csa.long <- raw.result.csa.long[-c((which(raw.result.csa.long$level == 0)), (which(raw.result.csa.long$level == 5)),(which(raw.result.csa.long$level == 10)), (which(raw.result.csa.long$level == 15)),(which(raw.result.csa.long$level == 20)), (which(raw.result.csa.long$level == 25)),(which(raw.result.csa.long$level == 75)),(which(raw.result.csa.long$level == 80)),(which(raw.result.csa.long$level == 85)),(which(raw.result.csa.long$level == 90)),(which(raw.result.csa.long$level == 95))),]
#Repeated measures ANOVA####
#Standard repeated measures anova (correcting for repeated measures from the same specimen)
m <- aov(terms(raw.result.csa.long$raw.value ~ raw.result.csa.long$level + (raw.result.csa.long$specimen)))
m <- TukeyHSD(m)
midshaft.aov <- c('50-30','50-35','50-40','50-45', '55-50', '60-50', '65-50','70-50')
csa.aov <- m$`raw.result.csa.long$level`[midshaft.aov,]
#Paired T-test of all levels against midshaft
csa.ttest <-as.data.frame(cbind( c(30,35,40,45,55,60, 65,70), c(t.test(raw.csa.result$'50', raw.csa.result$'30', paired=T)$p.value,
t.test(raw.csa.result$'50', raw.csa.result$'35', paired=T)$p.value,
t.test(raw.csa.result$'50', raw.csa.result$'40', paired=T)$p.value,
t.test(raw.csa.result$'50', raw.csa.result$'45', paired=T)$p.value,
t.test(raw.csa.result$'50', raw.csa.result$'55', paired=T)$p.value,
t.test(raw.csa.result$'50', raw.csa.result$'60', paired=T)$p.value,
t.test(raw.csa.result$'50', raw.csa.result$'65', paired=T)$p.value,
t.test(raw.csa.result$'50', raw.csa.result$'70', paired=T)$p.value)))
colnames(csa.ttest) <- c("level", "p-value")
#Bonferroni Correction to these p-values
csa.ttest[,2] <- p.adjust(csa.ttest[,2], method = "bonferroni", n = length(csa.ttest[,2]))
#Compare Results of ANOVA and T-test:
csa.pvalues <- cbind(csa.aov[,-c(1:3)], csa.ttest[,2])
colnames(csa.pvalues) <- c("R.M. ANOVA", "Paired T-Test")
csa.pvalues
#And the same significance tests on the logged values####
m <- aov(terms(log(raw.result.csa.long$raw.value) ~ raw.result.csa.long$level + (raw.result.csa.long$specimen)))
m <- TukeyHSD(m)
midshaft.aov <- c('50-30','50-35','50-40','50-45', '55-50', '60-50', '65-50','70-50')
log.csa.aov <- m$`raw.result.csa.long$level`[midshaft.aov,]
#Paired T-test of all levels against midshaft
log.csa.ttest <-as.data.frame(cbind( c(30,35,40,45,55,60, 65,70), c(t.test(log(log(raw.csa.result$'50')), log(raw.csa.result$'30'), paired=T)$p.value,
t.test(log(raw.csa.result$'50'), log(raw.csa.result$'35'), paired=T)$p.value,
t.test(log(raw.csa.result$'50'), log(raw.csa.result$'40'), paired=T)$p.value,
t.test(log(raw.csa.result$'50'), log(raw.csa.result$'45'), paired=T)$p.value,
t.test(log(raw.csa.result$'50'), log(raw.csa.result$'55'), paired=T)$p.value,
t.test(log(raw.csa.result$'50'), log(raw.csa.result$'60'), paired=T)$p.value,
t.test(log(raw.csa.result$'50'), log(raw.csa.result$'65'), paired=T)$p.value,
t.test(log(raw.csa.result$'50'), log(raw.csa.result$'70'), paired=T)$p.value)))
colnames(log.csa.ttest) <- c("level", "p-value")
#Bonferroni Correction to these p-values
log.csa.ttest[,2] <- p.adjust(csa.ttest[,2], method = "bonferroni", n = length(csa.ttest[,2]))
#Compare Results of logged ANOVA and T-test:
log.csa.pvalues <- cbind(log.csa.aov[,-c(1:3)], log.csa.ttest[,2])
colnames(log.csa.pvalues) <- c("R.M. ANOVA", "Paired T-Test")
log.csa.pvalues
###############################################################################################################################################
#J Analysis ####
# Function to pull out J at intervals and compile into dataframe
midshaft.J.function <- function(x)
{
slicelength <- as.numeric(length(J[which(label == x)]))
slice.intervals <- as.integer(seq(1,slicelength, by=(slicelength*.05)))
slice.intervals <- slice.intervals + min(slice[which(label == x)])
slicelength <- slicelength + (min(slice[which(label == x)])-1)
slice.bind <- cbind(seq((min(slice[which(label == x)])), slicelength), (J[which(label == x)]))
slice.bind <- rbind(matrix(nrow = (min(slice[which(label == x)])-1), ncol=2), slice.bind)
slice.matrix <- as.matrix(slice.bind[slice.intervals, ])
J.levels <- data.frame(rbind(slice.matrix[,2]))
J.levels <- cbind(x, J.levels)
colnames(J.levels) <- c("specimen", seq(0,95, by=5))
perdiff <- rbind(((J.levels[,2:21] - J.levels[,12])/J.levels[,12])*100)/length(unique(x))
perdiff <- data.frame(cbind(x, perdiff))
colnames(perdiff) <- c("specimen",seq(0,95, by=5))
return(perdiff)
}
x <- as.vector(data$label)
result.J = mdply(unique(x), midshaft.J.function)
#Reshape into long format in order to plot
result.J.long <- melt(result.J, variable.name="level", value.name="percent.difference")
result.J.long <- rbind(result.J.long[which(result.J.long$level == 30),],
result.J.long[which(result.J.long$level == 35),],
result.J.long[which(result.J.long$level == 40),],
result.J.long[which(result.J.long$level == 45),],
result.J.long[which(result.J.long$level == 50),],
result.J.long[which(result.J.long$level == 55),],
result.J.long[which(result.J.long$level == 60),],
result.J.long[which(result.J.long$level == 65),],
result.J.long[which(result.J.long$level == 70),])
#Now the same function, but with the raw data in order to perform the statistical tests (this is just repeating all steps up to, but not including calculating percentages)
midshaft.raw.J.function <- function(x)
{
slicelength <- as.numeric(length(J[which(label == x)]))
slice.intervals <- as.integer(seq(1,slicelength, by=(slicelength*.05)))
slice.intervals <- slice.intervals + min(slice[which(label == x)])
slicelength <- slicelength + (min(slice[which(label == x)])-1)
slice.bind <- cbind(seq((min(slice[which(label == x)])), slicelength), (J[which(label == x)]))
slice.bind <- rbind(matrix(nrow = (min(slice[which(label == x)])-1), ncol=2), slice.bind)
slice.matrix <- as.matrix(slice.bind[slice.intervals, ])
J.levels <- data.frame(rbind(slice.matrix[,2]))
J.levels <- cbind(x, J.levels)
colnames(J.levels) <- c("specimen", seq(0,95, by=5))
return(J.levels)
}
raw.J.result = mdply(unique(x), midshaft.raw.J.function)
raw.result.J.long <- melt(raw.J.result, variable.name="level", value.name="raw.value")
#inverse of previous result.J.long to keep specimen names for the anova-- probably need to change for all
raw.result.J.long <- raw.result.J.long[-c((which(raw.result.J.long$level == 0)), (which(raw.result.J.long$level == 5)),(which(raw.result.J.long$level == 10)), (which(raw.result.J.long$level == 15)),(which(raw.result.J.long$level == 20)), (which(raw.result.J.long$level == 25)),(which(raw.result.J.long$level == 75)),(which(raw.result.J.long$level == 80)),(which(raw.result.J.long$level == 85)),(which(raw.result.J.long$level == 90)),(which(raw.result.J.long$level == 95))),]
#Repeated measures ANOVA####
#Standard repeated measures anova (correcting for repeated measures from the same specimen)
m <- aov(terms(raw.result.J.long$raw.value ~ raw.result.J.long$level + (raw.result.J.long$specimen)))
m <- TukeyHSD(m)
midshaft.aov <- c('50-30','50-35','50-40','50-45', '55-50', '60-50', '65-50','70-50')
J.aov <- m$`raw.result.J.long$level`[midshaft.aov,]
#Paired T-test of all levels against midshaft
J.ttest <-as.data.frame(cbind( c(30,35,40,45,55,60, 65,70), c(t.test(raw.J.result$'50', raw.J.result$'30', paired=T)$p.value,
t.test(raw.J.result$'50', raw.J.result$'35', paired=T)$p.value,
t.test(raw.J.result$'50', raw.J.result$'40', paired=T)$p.value,
t.test(raw.J.result$'50', raw.J.result$'45', paired=T)$p.value,
t.test(raw.J.result$'50', raw.J.result$'55', paired=T)$p.value,
t.test(raw.J.result$'50', raw.J.result$'60', paired=T)$p.value,
t.test(raw.J.result$'50', raw.J.result$'65', paired=T)$p.value,
t.test(raw.J.result$'50', raw.J.result$'70', paired=T)$p.value)))
colnames(J.ttest) <- c("level", "p-value")
#Bonferroni Correction to these p-values
J.ttest[,2] <- p.adjust(J.ttest[,2], method = "bonferroni", n = length(J.ttest[,2]))
#Compare Results of ANOVA and T-test:
J.pvalues <- cbind(J.aov[,-c(1:3)], J.ttest[,2])
colnames(J.pvalues) <- c("R.M. ANOVA", "Paired T-Test")
J.pvalues
#And the same significance tests on the logged values####
m <- aov(terms(log(raw.result.J.long$raw.value) ~ raw.result.J.long$level + (raw.result.J.long$specimen)))
m <- TukeyHSD(m)
midshaft.aov <- c('50-30','50-35','50-40','50-45', '55-50', '60-50', '65-50','70-50')
log.J.aov <- m$`raw.result.J.long$level`[midshaft.aov,]
#Paired T-test of all levels against midshaft
log.J.ttest <-as.data.frame(cbind( c(30,35,40,45,55,60, 65,70), c(t.test(log(log(raw.J.result$'50')), log(raw.J.result$'30'), paired=T)$p.value,
t.test(log(raw.J.result$'50'), log(raw.J.result$'35'), paired=T)$p.value,
t.test(log(raw.J.result$'50'), log(raw.J.result$'40'), paired=T)$p.value,
t.test(log(raw.J.result$'50'), log(raw.J.result$'45'), paired=T)$p.value,
t.test(log(raw.J.result$'50'), log(raw.J.result$'55'), paired=T)$p.value,
t.test(log(raw.J.result$'50'), log(raw.J.result$'60'), paired=T)$p.value,
t.test(log(raw.J.result$'50'), log(raw.J.result$'65'), paired=T)$p.value,
t.test(log(raw.J.result$'50'), log(raw.J.result$'70'), paired=T)$p.value)))
colnames(log.J.ttest) <- c("level", "p-value")
#Bonferroni Correction to these p-values
log.J.ttest[,2] <- p.adjust(J.ttest[,2], method = "bonferroni", n = length(J.ttest[,2]))
#Compare Results of logged ANOVA and T-test:
log.J.pvalues <- cbind(log.J.aov[,-c(1:3)], log.J.ttest[,2])
colnames(log.J.pvalues) <- c("R.M. ANOVA", "Paired T-Test")
log.J.pvalues
###############################################################################################################################################
#Imax/Imin Analysis ####
# Function to pull out ratio at intervals and compile into dataframe
midshaft.ratio.function <- function(x)
{
slicelength <- as.numeric(length(ratio[which(label == x)]))
slice.intervals <- as.integer(seq(1,slicelength, by=(slicelength*.05)))
slice.intervals <- slice.intervals + min(slice[which(label == x)])
slicelength <- slicelength + (min(slice[which(label == x)])-1)
slice.bind <- cbind(seq((min(slice[which(label == x)])), slicelength), (ratio[which(label == x)]))
slice.bind <- rbind(matrix(nrow = (min(slice[which(label == x)])-1), ncol=2), slice.bind)
slice.matrix <- as.matrix(slice.bind[slice.intervals, ])
ratio.levels <- data.frame(rbind(slice.matrix[,2]))
ratio.levels <- cbind(x, ratio.levels)
colnames(ratio.levels) <- c("specimen", seq(0,95, by=5))
perdiff <- rbind(((ratio.levels[,2:21] - ratio.levels[,12])/ratio.levels[,12])*100)/length(unique(x))
perdiff <- data.frame(cbind(x, perdiff))
colnames(perdiff) <- c("specimen",seq(0,95, by=5))
return(perdiff)
}
x <- as.vector(data$label)
result.ratio = mdply(unique(x), midshaft.ratio.function)
#Reshape into long format in order to plot
result.ratio.long <- melt(result.ratio, variable.name="level", value.name="percent.difference")
result.ratio.long <- rbind(result.ratio.long[which(result.ratio.long$level == 30),],
result.ratio.long[which(result.ratio.long$level == 35),],
result.ratio.long[which(result.ratio.long$level == 40),],
result.ratio.long[which(result.ratio.long$level == 45),],
result.ratio.long[which(result.ratio.long$level == 50),],
result.ratio.long[which(result.ratio.long$level == 55),],
result.ratio.long[which(result.ratio.long$level == 60),],
result.ratio.long[which(result.ratio.long$level == 65),],
result.ratio.long[which(result.ratio.long$level == 70),])
#Now the same function, but with the raw data in order to perform the statistical tests (this is just repeating all steps up to, but not including calculating percentages)
midshaft.raw.ratio.function <- function(x)
{
slicelength <- as.numeric(length(ratio[which(label == x)]))
slice.intervals <- as.integer(seq(1,slicelength, by=(slicelength*.05)))
slice.intervals <- slice.intervals + min(slice[which(label == x)])
slicelength <- slicelength + (min(slice[which(label == x)])-1)
slice.bind <- cbind(seq((min(slice[which(label == x)])), slicelength), (ratio[which(label == x)]))
slice.bind <- rbind(matrix(nrow = (min(slice[which(label == x)])-1), ncol=2), slice.bind)
slice.matrix <- as.matrix(slice.bind[slice.intervals, ])
ratio.levels <- data.frame(rbind(slice.matrix[,2]))
ratio.levels <- cbind(x, ratio.levels)
colnames(ratio.levels) <- c("specimen", seq(0,95, by=5))
return(ratio.levels)
}
raw.ratio.result = mdply(unique(x), midshaft.raw.ratio.function)
raw.result.ratio.long <- melt(raw.ratio.result, variable.name="level", value.name="raw.value")
#inverse of previous result.ratio.long to keep specimen names for the anova-- probably need to change for all
raw.result.ratio.long <- raw.result.ratio.long[-c((which(raw.result.ratio.long$level == 0)), (which(raw.result.ratio.long$level == 5)),(which(raw.result.ratio.long$level == 10)), (which(raw.result.ratio.long$level == 15)),(which(raw.result.ratio.long$level == 20)), (which(raw.result.ratio.long$level == 25)),(which(raw.result.ratio.long$level == 75)),(which(raw.result.ratio.long$level == 80)),(which(raw.result.ratio.long$level == 85)),(which(raw.result.ratio.long$level == 90)),(which(raw.result.ratio.long$level == 95))),]
#Repeated measures ANOVA####
#Standard repeated measures anova (correcting for repeated measures from the same specimen)
m <- aov(terms(raw.result.ratio.long$raw.value ~ raw.result.ratio.long$level + (raw.result.ratio.long$specimen)))
m <- TukeyHSD(m)
midshaft.aov <- c('50-30','50-35','50-40','50-45', '55-50', '60-50', '65-50','70-50')
ratio.aov <- m$`raw.result.ratio.long$level`[midshaft.aov,]
#Paired T-test of all levels against midshaft
ratio.ttest <-as.data.frame(cbind( c(30,35,40,45,55,60, 65,70), c(t.test(raw.ratio.result$'50', raw.ratio.result$'30', paired=T)$p.value,
t.test(raw.ratio.result$'50', raw.ratio.result$'35', paired=T)$p.value,
t.test(raw.ratio.result$'50', raw.ratio.result$'40', paired=T)$p.value,
t.test(raw.ratio.result$'50', raw.ratio.result$'45', paired=T)$p.value,
t.test(raw.ratio.result$'50', raw.ratio.result$'55', paired=T)$p.value,
t.test(raw.ratio.result$'50', raw.ratio.result$'60', paired=T)$p.value,
t.test(raw.ratio.result$'50', raw.ratio.result$'65', paired=T)$p.value,
t.test(raw.ratio.result$'50', raw.ratio.result$'70', paired=T)$p.value)))
colnames(ratio.ttest) <- c("level", "p-value")
#Bonferroni Correction to these p-values
ratio.ttest[,2] <- p.adjust(ratio.ttest[,2], method = "bonferroni", n = length(ratio.ttest[,2]))
#Compare Results of ANOVA and T-test:
ratio.pvalues <- cbind(ratio.aov[,-c(1:3)], ratio.ttest[,2])
colnames(ratio.pvalues) <- c("R.M. ANOVA", "Paired T-Test")
ratio.pvalues
#And the same significance tests on the logged values####
m <- aov(terms(log(raw.result.ratio.long$raw.value) ~ raw.result.ratio.long$level + (raw.result.ratio.long$specimen)))
m <- TukeyHSD(m)
midshaft.aov <- c('50-30','50-35','50-40','50-45', '55-50', '60-50', '65-50','70-50')
log.ratio.aov <- m$`raw.result.ratio.long$level`[midshaft.aov,]
#Paired T-test of all levels against midshaft
log.ratio.ttest <-as.data.frame(cbind( c(30,35,40,45,55,60, 65,70), c(t.test(log(log(raw.ratio.result$'50')), log(raw.ratio.result$'30'), paired=T)$p.value,
t.test(log(raw.ratio.result$'50'), log(raw.ratio.result$'35'), paired=T)$p.value,
t.test(log(raw.ratio.result$'50'), log(raw.ratio.result$'40'), paired=T)$p.value,
t.test(log(raw.ratio.result$'50'), log(raw.ratio.result$'45'), paired=T)$p.value,
t.test(log(raw.ratio.result$'50'), log(raw.ratio.result$'55'), paired=T)$p.value,
t.test(log(raw.ratio.result$'50'), log(raw.ratio.result$'60'), paired=T)$p.value,
t.test(log(raw.ratio.result$'50'), log(raw.ratio.result$'65'), paired=T)$p.value,
t.test(log(raw.ratio.result$'50'), log(raw.ratio.result$'70'), paired=T)$p.value)))
colnames(log.ratio.ttest) <- c("level", "p-value")
#Bonferroni Correction to these p-values
log.ratio.ttest[,2] <- p.adjust(ratio.ttest[,2], method = "bonferroni", n = length(ratio.ttest[,2]))
#Compare Results of logged ANOVA and T-test:
log.ratio.pvalues <- cbind(log.ratio.aov[,-c(1:3)], log.ratio.ttest[,2])
colnames(log.ratio.pvalues) <- c("R.M. ANOVA", "Paired T-Test")
log.ratio.pvalues
|
# OpenSilex API
#
# No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
#
# OpenAPI spec version: 1.0.0-rc+2
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' DataGetDTO Class
#'
#' @field uri
#' @field date
#' @field target
#' @field variable
#' @field value
#' @field confidence
#' @field provenance
#' @field metadata
#' @field raw_data
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
DataGetDTO <- R6::R6Class(
'DataGetDTO',
public = list(
`uri` = NULL,
`date` = NULL,
`target` = NULL,
`variable` = NULL,
`value` = NULL,
`confidence` = NULL,
`provenance` = NULL,
`metadata` = NULL,
`raw_data` = NULL,
initialize = function(`uri`, `date`, `target`, `variable`, `value`, `confidence`, `provenance`, `metadata`, `raw_data`){
if (!missing(`uri`)) {
stopifnot(is.character(`uri`), length(`uri`) == 1)
self$`uri` <- `uri`
}
if (!missing(`date`)) {
stopifnot(is.character(`date`), length(`date`) == 1)
self$`date` <- `date`
}
if (!missing(`target`)) {
stopifnot(is.character(`target`), length(`target`) == 1)
self$`target` <- `target`
}
if (!missing(`variable`)) {
stopifnot(is.character(`variable`), length(`variable`) == 1)
self$`variable` <- `variable`
}
if (!missing(`value`)) {
stopifnot(R6::is.R6(`value`))
self$`value` <- `value`
}
if (!missing(`confidence`)) {
stopifnot(is.numeric(`confidence`), length(`confidence`) == 1)
self$`confidence` <- `confidence`
}
if (!missing(`provenance`)) {
stopifnot(R6::is.R6(`provenance`))
self$`provenance` <- `provenance`
}
if (!missing(`metadata`)) {
stopifnot(R6::is.R6(`metadata`))
self$`metadata` <- `metadata`
}
if (!missing(`raw_data`)) {
stopifnot(is.list(`raw_data`), length(`raw_data`) != 0)
lapply(`raw_data`, function(x) stopifnot(R6::is.R6(x)))
self$`raw_data` <- `raw_data`
}
},
toJSON = function() {
DataGetDTOObject <- list()
if (!is.null(self$`uri`)) {
DataGetDTOObject[['uri']] <- self$`uri`
}
if (!is.null(self$`date`)) {
DataGetDTOObject[['date']] <- self$`date`
}
if (!is.null(self$`target`)) {
DataGetDTOObject[['target']] <- self$`target`
}
if (!is.null(self$`variable`)) {
DataGetDTOObject[['variable']] <- self$`variable`
}
if (!is.null(self$`value`)) {
DataGetDTOObject[['value']] <- self$`value`$toJSON()
}
if (!is.null(self$`confidence`)) {
DataGetDTOObject[['confidence']] <- self$`confidence`
}
if (!is.null(self$`provenance`)) {
DataGetDTOObject[['provenance']] <- self$`provenance`$toJSON()
}
if (!is.null(self$`metadata`)) {
DataGetDTOObject[['metadata']] <- self$`metadata`$toJSON()
}
if (!is.null(self$`raw_data`)) {
DataGetDTOObject[['raw_data']] <- lapply(self$`raw_data`, function(x) x$toJSON())
}
DataGetDTOObject
},
fromJSON = function(DataGetDTOJson) {
DataGetDTOObject <- jsonlite::fromJSON(DataGetDTOJson)
if (!is.null(DataGetDTOObject$`uri`)) {
self$`uri` <- DataGetDTOObject$`uri`
}
if (!is.null(DataGetDTOObject$`date`)) {
self$`date` <- DataGetDTOObject$`date`
}
if (!is.null(DataGetDTOObject$`target`)) {
self$`target` <- DataGetDTOObject$`target`
}
if (!is.null(DataGetDTOObject$`variable`)) {
self$`variable` <- DataGetDTOObject$`variable`
}
if (!is.null(DataGetDTOObject$`value`)) {
valueObject <- ObjectDTO$new()
valueObject$fromJSON(jsonlite::toJSON(DataGetDTOObject$value, auto_unbox = TRUE, null = "null"))
self$`value` <- valueObject
}
if (!is.null(DataGetDTOObject$`confidence`)) {
self$`confidence` <- DataGetDTOObject$`confidence`
}
if (!is.null(DataGetDTOObject$`provenance`)) {
provenanceObject <- DataProvenanceModel$new()
provenanceObject$fromJSON(jsonlite::toJSON(DataGetDTOObject$provenance, auto_unbox = TRUE, null = "null"))
self$`provenance` <- provenanceObject
}
if (!is.null(DataGetDTOObject$`metadata`)) {
metadataObject <- ObjectDTO$new()
metadataObject$fromJSON(jsonlite::toJSON(DataGetDTOObject$metadata, auto_unbox = TRUE, null = "null"))
self$`metadata` <- metadataObject
}
if (!is.null(DataGetDTOObject$`raw_data`)) {
self$`raw_data` <- lapply(DataGetDTOObject$`raw_data`, function(x) {
raw_dataObject <- ObjectDTO$new()
raw_dataObject$fromJSON(jsonlite::toJSON(x, auto_unbox = TRUE, null = "null"))
raw_dataObject
})
}
},
fromJSONObject = function(DataGetDTOObject) {
if (!is.null(DataGetDTOObject$`uri`)) {
self$`uri` <- DataGetDTOObject$`uri`
}
if (!is.null(DataGetDTOObject$`date`)) {
self$`date` <- DataGetDTOObject$`date`
}
if (!is.null(DataGetDTOObject$`target`)) {
self$`target` <- DataGetDTOObject$`target`
}
if (!is.null(DataGetDTOObject$`variable`)) {
self$`variable` <- DataGetDTOObject$`variable`
}
if (!is.null(DataGetDTOObject$`value`)) {
valueObject <- ObjectDTO$new()
valueObject$fromJSON(jsonlite::toJSON(DataGetDTOObject$value, auto_unbox = TRUE, null = "null"))
self$`value` <- valueObject
}
if (!is.null(DataGetDTOObject$`confidence`)) {
self$`confidence` <- DataGetDTOObject$`confidence`
}
if (!is.null(DataGetDTOObject$`provenance`)) {
provenanceObject <- DataProvenanceModel$new()
provenanceObject$fromJSON(jsonlite::toJSON(DataGetDTOObject$provenance, auto_unbox = TRUE, null = "null"))
self$`provenance` <- provenanceObject
}
if (!is.null(DataGetDTOObject$`metadata`)) {
metadataObject <- ObjectDTO$new()
metadataObject$fromJSON(jsonlite::toJSON(DataGetDTOObject$metadata, auto_unbox = TRUE, null = "null"))
self$`metadata` <- metadataObject
}
if (!is.null(DataGetDTOObject$`raw_data`)) {
self$`raw_data` <- lapply(DataGetDTOObject$`raw_data`, function(x) {
raw_dataObject <- ObjectDTO$new()
raw_dataObject$fromJSON(jsonlite::toJSON(x, auto_unbox = TRUE, null = "null"))
raw_dataObject
})
}
},
toJSONString = function() {
raw_dataList = paste(lapply(self$`raw_data`, function(x) x$toJSONString()),collapse = ",")
sprintf(
'{
"uri": %s,
"date": %s,
"target": %s,
"variable": %s,
"value": %s,
"confidence": %s,
"provenance": %s,
"metadata": %s,
"raw_data": [%s]
}',
ifelse(is.null(self$`uri`), "null",jsonlite::toJSON(self$`uri`,auto_unbox=TRUE, null = "null")),
ifelse(is.null(self$`date`), "null",jsonlite::toJSON(self$`date`,auto_unbox=TRUE, null = "null")),
ifelse(is.null(self$`target`), "null",jsonlite::toJSON(self$`target`,auto_unbox=TRUE, null = "null")),
ifelse(is.null(self$`variable`), "null",jsonlite::toJSON(self$`variable`,auto_unbox=TRUE, null = "null")),
jsonlite::toJSON(self$`value`$toJSON(),auto_unbox=TRUE, null = "null"),
ifelse(is.null(self$`confidence`), "null",as.numeric(jsonlite::toJSON(self$`confidence`,auto_unbox=TRUE, null = "null"))),
jsonlite::toJSON(self$`provenance`$toJSON(),auto_unbox=TRUE, null = "null"),
jsonlite::toJSON(self$`metadata`$toJSON(),auto_unbox=TRUE, null = "null"),
raw_dataList
)
},
fromJSONString = function(DataGetDTOJson) {
DataGetDTOObject <- jsonlite::fromJSON(DataGetDTOJson)
self$`uri` <- DataGetDTOObject$`uri`
self$`date` <- DataGetDTOObject$`date`
self$`target` <- DataGetDTOObject$`target`
self$`variable` <- DataGetDTOObject$`variable`
ObjectDTOObject <- ObjectDTO$new()
self$`value` <- ObjectDTOObject$fromJSON(jsonlite::toJSON(DataGetDTOObject$value, auto_unbox = TRUE))
self$`confidence` <- DataGetDTOObject$`confidence`
DataProvenanceModelObject <- DataProvenanceModel$new()
self$`provenance` <- DataProvenanceModelObject$fromJSON(jsonlite::toJSON(DataGetDTOObject$provenance, auto_unbox = TRUE))
ObjectDTOObject <- ObjectDTO$new()
self$`metadata` <- ObjectDTOObject$fromJSON(jsonlite::toJSON(DataGetDTOObject$metadata, auto_unbox = TRUE))
self$`raw_data` <- lapply(DataGetDTOObject$`raw_data`, function(x) ObjectDTO$new()$fromJSON(jsonlite::toJSON(x, auto_unbox = TRUE)))
}
)
)
| /R/DataGetDTO.r | no_license | OpenSILEX/opensilexClientToolsR | R | false | false | 8,872 | r | # OpenSilex API
#
# No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
#
# OpenAPI spec version: 1.0.0-rc+2
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' DataGetDTO Class
#'
#' @field uri
#' @field date
#' @field target
#' @field variable
#' @field value
#' @field confidence
#' @field provenance
#' @field metadata
#' @field raw_data
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
DataGetDTO <- R6::R6Class(
'DataGetDTO',
public = list(
`uri` = NULL,
`date` = NULL,
`target` = NULL,
`variable` = NULL,
`value` = NULL,
`confidence` = NULL,
`provenance` = NULL,
`metadata` = NULL,
`raw_data` = NULL,
initialize = function(`uri`, `date`, `target`, `variable`, `value`, `confidence`, `provenance`, `metadata`, `raw_data`){
if (!missing(`uri`)) {
stopifnot(is.character(`uri`), length(`uri`) == 1)
self$`uri` <- `uri`
}
if (!missing(`date`)) {
stopifnot(is.character(`date`), length(`date`) == 1)
self$`date` <- `date`
}
if (!missing(`target`)) {
stopifnot(is.character(`target`), length(`target`) == 1)
self$`target` <- `target`
}
if (!missing(`variable`)) {
stopifnot(is.character(`variable`), length(`variable`) == 1)
self$`variable` <- `variable`
}
if (!missing(`value`)) {
stopifnot(R6::is.R6(`value`))
self$`value` <- `value`
}
if (!missing(`confidence`)) {
stopifnot(is.numeric(`confidence`), length(`confidence`) == 1)
self$`confidence` <- `confidence`
}
if (!missing(`provenance`)) {
stopifnot(R6::is.R6(`provenance`))
self$`provenance` <- `provenance`
}
if (!missing(`metadata`)) {
stopifnot(R6::is.R6(`metadata`))
self$`metadata` <- `metadata`
}
if (!missing(`raw_data`)) {
stopifnot(is.list(`raw_data`), length(`raw_data`) != 0)
lapply(`raw_data`, function(x) stopifnot(R6::is.R6(x)))
self$`raw_data` <- `raw_data`
}
},
toJSON = function() {
DataGetDTOObject <- list()
if (!is.null(self$`uri`)) {
DataGetDTOObject[['uri']] <- self$`uri`
}
if (!is.null(self$`date`)) {
DataGetDTOObject[['date']] <- self$`date`
}
if (!is.null(self$`target`)) {
DataGetDTOObject[['target']] <- self$`target`
}
if (!is.null(self$`variable`)) {
DataGetDTOObject[['variable']] <- self$`variable`
}
if (!is.null(self$`value`)) {
DataGetDTOObject[['value']] <- self$`value`$toJSON()
}
if (!is.null(self$`confidence`)) {
DataGetDTOObject[['confidence']] <- self$`confidence`
}
if (!is.null(self$`provenance`)) {
DataGetDTOObject[['provenance']] <- self$`provenance`$toJSON()
}
if (!is.null(self$`metadata`)) {
DataGetDTOObject[['metadata']] <- self$`metadata`$toJSON()
}
if (!is.null(self$`raw_data`)) {
DataGetDTOObject[['raw_data']] <- lapply(self$`raw_data`, function(x) x$toJSON())
}
DataGetDTOObject
},
fromJSON = function(DataGetDTOJson) {
DataGetDTOObject <- jsonlite::fromJSON(DataGetDTOJson)
if (!is.null(DataGetDTOObject$`uri`)) {
self$`uri` <- DataGetDTOObject$`uri`
}
if (!is.null(DataGetDTOObject$`date`)) {
self$`date` <- DataGetDTOObject$`date`
}
if (!is.null(DataGetDTOObject$`target`)) {
self$`target` <- DataGetDTOObject$`target`
}
if (!is.null(DataGetDTOObject$`variable`)) {
self$`variable` <- DataGetDTOObject$`variable`
}
if (!is.null(DataGetDTOObject$`value`)) {
valueObject <- ObjectDTO$new()
valueObject$fromJSON(jsonlite::toJSON(DataGetDTOObject$value, auto_unbox = TRUE, null = "null"))
self$`value` <- valueObject
}
if (!is.null(DataGetDTOObject$`confidence`)) {
self$`confidence` <- DataGetDTOObject$`confidence`
}
if (!is.null(DataGetDTOObject$`provenance`)) {
provenanceObject <- DataProvenanceModel$new()
provenanceObject$fromJSON(jsonlite::toJSON(DataGetDTOObject$provenance, auto_unbox = TRUE, null = "null"))
self$`provenance` <- provenanceObject
}
if (!is.null(DataGetDTOObject$`metadata`)) {
metadataObject <- ObjectDTO$new()
metadataObject$fromJSON(jsonlite::toJSON(DataGetDTOObject$metadata, auto_unbox = TRUE, null = "null"))
self$`metadata` <- metadataObject
}
if (!is.null(DataGetDTOObject$`raw_data`)) {
self$`raw_data` <- lapply(DataGetDTOObject$`raw_data`, function(x) {
raw_dataObject <- ObjectDTO$new()
raw_dataObject$fromJSON(jsonlite::toJSON(x, auto_unbox = TRUE, null = "null"))
raw_dataObject
})
}
},
fromJSONObject = function(DataGetDTOObject) {
if (!is.null(DataGetDTOObject$`uri`)) {
self$`uri` <- DataGetDTOObject$`uri`
}
if (!is.null(DataGetDTOObject$`date`)) {
self$`date` <- DataGetDTOObject$`date`
}
if (!is.null(DataGetDTOObject$`target`)) {
self$`target` <- DataGetDTOObject$`target`
}
if (!is.null(DataGetDTOObject$`variable`)) {
self$`variable` <- DataGetDTOObject$`variable`
}
if (!is.null(DataGetDTOObject$`value`)) {
valueObject <- ObjectDTO$new()
valueObject$fromJSON(jsonlite::toJSON(DataGetDTOObject$value, auto_unbox = TRUE, null = "null"))
self$`value` <- valueObject
}
if (!is.null(DataGetDTOObject$`confidence`)) {
self$`confidence` <- DataGetDTOObject$`confidence`
}
if (!is.null(DataGetDTOObject$`provenance`)) {
provenanceObject <- DataProvenanceModel$new()
provenanceObject$fromJSON(jsonlite::toJSON(DataGetDTOObject$provenance, auto_unbox = TRUE, null = "null"))
self$`provenance` <- provenanceObject
}
if (!is.null(DataGetDTOObject$`metadata`)) {
metadataObject <- ObjectDTO$new()
metadataObject$fromJSON(jsonlite::toJSON(DataGetDTOObject$metadata, auto_unbox = TRUE, null = "null"))
self$`metadata` <- metadataObject
}
if (!is.null(DataGetDTOObject$`raw_data`)) {
self$`raw_data` <- lapply(DataGetDTOObject$`raw_data`, function(x) {
raw_dataObject <- ObjectDTO$new()
raw_dataObject$fromJSON(jsonlite::toJSON(x, auto_unbox = TRUE, null = "null"))
raw_dataObject
})
}
},
toJSONString = function() {
raw_dataList = paste(lapply(self$`raw_data`, function(x) x$toJSONString()),collapse = ",")
sprintf(
'{
"uri": %s,
"date": %s,
"target": %s,
"variable": %s,
"value": %s,
"confidence": %s,
"provenance": %s,
"metadata": %s,
"raw_data": [%s]
}',
ifelse(is.null(self$`uri`), "null",jsonlite::toJSON(self$`uri`,auto_unbox=TRUE, null = "null")),
ifelse(is.null(self$`date`), "null",jsonlite::toJSON(self$`date`,auto_unbox=TRUE, null = "null")),
ifelse(is.null(self$`target`), "null",jsonlite::toJSON(self$`target`,auto_unbox=TRUE, null = "null")),
ifelse(is.null(self$`variable`), "null",jsonlite::toJSON(self$`variable`,auto_unbox=TRUE, null = "null")),
jsonlite::toJSON(self$`value`$toJSON(),auto_unbox=TRUE, null = "null"),
ifelse(is.null(self$`confidence`), "null",as.numeric(jsonlite::toJSON(self$`confidence`,auto_unbox=TRUE, null = "null"))),
jsonlite::toJSON(self$`provenance`$toJSON(),auto_unbox=TRUE, null = "null"),
jsonlite::toJSON(self$`metadata`$toJSON(),auto_unbox=TRUE, null = "null"),
raw_dataList
)
},
fromJSONString = function(DataGetDTOJson) {
DataGetDTOObject <- jsonlite::fromJSON(DataGetDTOJson)
self$`uri` <- DataGetDTOObject$`uri`
self$`date` <- DataGetDTOObject$`date`
self$`target` <- DataGetDTOObject$`target`
self$`variable` <- DataGetDTOObject$`variable`
ObjectDTOObject <- ObjectDTO$new()
self$`value` <- ObjectDTOObject$fromJSON(jsonlite::toJSON(DataGetDTOObject$value, auto_unbox = TRUE))
self$`confidence` <- DataGetDTOObject$`confidence`
DataProvenanceModelObject <- DataProvenanceModel$new()
self$`provenance` <- DataProvenanceModelObject$fromJSON(jsonlite::toJSON(DataGetDTOObject$provenance, auto_unbox = TRUE))
ObjectDTOObject <- ObjectDTO$new()
self$`metadata` <- ObjectDTOObject$fromJSON(jsonlite::toJSON(DataGetDTOObject$metadata, auto_unbox = TRUE))
self$`raw_data` <- lapply(DataGetDTOObject$`raw_data`, function(x) ObjectDTO$new()$fromJSON(jsonlite::toJSON(x, auto_unbox = TRUE)))
}
)
)
|
##' Simulation suite for privacy preserving data analysis methods
##'
##' Implement simulation for privacy preserving data analysis methods in the setting of distributed data network.
##'
##' @name distributed-package
##' @aliases distributed
##'
##' @docType package
##'
##' @import assertthat
##' @importFrom stats sd var binomial quasibinomial poisson coef glm predict quantile rbinom rexp rnorm runif vcov as.formula
##' @importFrom utils read.csv write.csv
##' @importFrom magrittr '%>%'
##' @importFrom survival clogit coxph strata Surv
##' @importFrom survey svydesign svyglm svycoxph
##' @importFrom doRNG '%dorng%'
##'
##' @author Kazuki Yoshida
##'
NULL
| /R/package.R | no_license | kaz-yos/distributed | R | false | false | 664 | r | ##' Simulation suite for privacy preserving data analysis methods
##'
##' Implement simulation for privacy preserving data analysis methods in the setting of distributed data network.
##'
##' @name distributed-package
##' @aliases distributed
##'
##' @docType package
##'
##' @import assertthat
##' @importFrom stats sd var binomial quasibinomial poisson coef glm predict quantile rbinom rexp rnorm runif vcov as.formula
##' @importFrom utils read.csv write.csv
##' @importFrom magrittr '%>%'
##' @importFrom survival clogit coxph strata Surv
##' @importFrom survey svydesign svyglm svycoxph
##' @importFrom doRNG '%dorng%'
##'
##' @author Kazuki Yoshida
##'
NULL
|
#### Create the Shiny App
library(shiny)
#### Import the coeffiencts so that we can use the model
coeff <- read.csv("Data/full_coefficients.csv", header = TRUE)
# Create a dictionary for race
race_dict <- list(
"UNDESIGNATED" = "U",
"OTHER" = "O" ,
"BLACK or AFRICAN AMERICAN" = "B" ,
"AMERICAN INDIAN or ALASKA NATIVE" = "I" ,
"WHITE" = "W" ,
"ASIAN" = "A",
"TWO or MORE RACES" = "M"
)
races <- names(race_dict) # all the possible values of race
races = unlist(races, recursive = TRUE, use.names = TRUE)
# Create a dictionary for ethnicity
ethnicity_dict <- list(
"UNDESIGNATED" = "U" ,
"HISPANIC/LATINO" = "HL" ,
"NOT HISPANIC/LATINO" = "NL"
)
enthnicities <- names(ethnicity_dict) # all the possible values of ethnicity
# Create a dictionary for gender
gender_dict <- list(
"UNDESIGNATED" = "U",
"MALE" = "M" ,
"FEMALE" = "F"
)
genders <- names(gender_dict) # all the possible values of gender
# Create a dictionary for drivers license
license_dict <- list(
"UNDESIGNATED" = "U",
"YES" = "Y" ,
"NO" = "N"
)
licenses <- names(license_dict) # all the possible values of drivers license
# Create a list of all state codes and add "None"
states <- state.abb
states <-append(states,"None") # all the possible values of state adding "None" to the end
# Create a list of all zip codes in North Carolina
library(zipcode)
zipcodes <- subset(coeff, name == "zip_code")$index
#zipcodes <- sort(zipcodes)
# Create a list of years since regisration
registration_years <- seq.int(as.numeric(substr(Sys.Date(), 0, 4)), as.numeric(substr(Sys.Date(), 0, 4))-90)
# Create a list of ages
birth_ages <- seq.int(18, 100)
# Use a fluid Bootstrap layout
ui <- fluidPage(
# Give the page a title
titlePanel("Prediction of Political Sentiment in North Carolina"),
# Generate a row with a sidebar
sidebarLayout(
# Define the sidebar with one input
sidebarPanel(
selectInput("race", "Race:",
choices=races),
selectInput("gender", "Gender:",
choices=genders),
selectInput("ethnicity", "Ethnicity:",
choices=enthnicities),
selectInput("drivers_lic", "Drivers License:",
choices=licenses),
selectInput("birth_state", "Birth State:",
choices=states),
selectInput("zip_code", "Zip Code:",
choices=zipcodes, selected = "27515"),
sliderInput("birth_age", "Age:",
min=18, max=99, value=65),
sliderInput("registration_year", "Year Registerd to Vote in NC:",
min=1948, max=2017, value=1980, sep = ""),
fluidRow(column(3, verbatimTextOutput("years"))),
hr(),
helpText("Select the Values")
),
mainPanel(
tableOutput("values"), img(src='NCFlag.png', align = "right", height='300',width='600', align = "top")
)
)
)
#### Function to take all the inputs and calculate the probablity of leaning republican
get_probability <- function(birth_age,
registration_year,
race,
gender,
ethnicity,
drivers_lic,
birth_state,
zip_code ){
#### Import the coeffiencts so that we can use the model
coeff <- read.csv("Data/full_coefficients.csv", header = TRUE)
#### Drop columns X, class, stderr
coeff <- subset(coeff, select = c("name","index", "value") )
#### Let's get the values of all of the coefficients for continuous variables plus the intercept
intercept_coeff <- subset(coeff, name == "(intercept)")$value
birth_age_coeff <- subset(coeff, name == "birth_age")$value
age_squared_coeff <- subset(coeff, name == "age_squared")$value
age_cubed_coeff <- subset(coeff, name == "age_cubed")$value
years_since_registration_coeff <- subset(coeff, name == "years_since_registration")$value
years_squared_coeff<- subset(coeff, name == "years_squared")$value
years_cubed_coeff <- subset(coeff, name == "years_cubed")$value
#### We now create all the calculated fields
age_squared = birth_age^2
age_cubed = birth_age^3
years_since_registration = as.numeric(substr(Sys.Date(), 0, 4)) - as.numeric(registration_year)
years_squared = years_since_registration^2
years_cubed = years_since_registration^3
race_gender = paste0(race,gender)
race_ethnicity = paste0(race,ethnicity)
gender_ethnicity = paste0(gender,ethnicity)
#### Now, let's write a function to pull out the specific value of a categorical coefficient
get_coeff <- function(df, feature, fact){
output <- subset(df, (name == feature & index == fact))$value
if (length(output) == 0){
return(0) # if not found, return 0
}
else{
return(output)
}
}
#### Now we calculate Y, the 'margin' or value of the polynomial that will be inputted into the logisitc funtion
# First the continuous variables
Y <- birth_age*birth_age_coeff+age_squared*age_squared_coeff+age_cubed*age_cubed_coeff
Y <- Y + years_since_registration*years_since_registration_coeff+years_squared*years_squared_coeff+years_cubed*years_cubed_coeff
# Now the intercept
Y <- Y + intercept_coeff
# Now the categorical variables
Y <- Y + get_coeff(coeff,"race_gender", race_gender)
Y <- Y + get_coeff(coeff,"race_ethnicity", race_ethnicity)
Y <- Y + get_coeff(coeff,"gender_ethnicity", gender_ethnicity)
Y <- Y + get_coeff(coeff,"zip_code", zip_code)
Y <- Y + get_coeff(coeff,"birth_state", birth_state)
Y <- Y + get_coeff(coeff,"drivers_lic", drivers_lic)
#### We will use the R function "logigistic" from the psych package to calcluate the probabilities
library(psych) # for built in logistic function
probability <- logistic(Y)
if (probability >.50){
party_preference <- "Republican"
}else{
party_preference <- "Democratic"}
return(probability)
}
# Define a server for the Shiny app
server <- function(input, output) {
score <- .50
party <- "None"
registration_year <- 1950
tablevalues <- reactive({
# Compose data frame
data.frame(
Name = c("Race",
"Gender",
"Ethnicity",
"Drivers Licence",
"Birth State",
"Zip Code",
"Age",
"Registration_Year",
"Score",
"Prediction"),
Value = as.character(c(input$race,
input$gender,
input$ethnicity,
input$drivers_lic,
input$birth_state,
input$zip_code,
input$birth_age,
input$registration_year,
get_probability(input$birth_age,
input$registration_year,
race_dict[[input$race]],
gender_dict[[input$gender]],
ethnicity_dict[[input$ethnicity]],
license_dict[[input$drivers_lic]],
input$birth_state,
input$zip_code),
if (get_probability(input$birth_age,
input$registration_year,
race_dict[[input$race]],
gender_dict[[input$gender]],
ethnicity_dict[[input$ethnicity]],
license_dict[[input$drivers_lic]],
input$birth_state,
input$zip_code) > .50){"Republican"} else {"Democratic"}
)),
stringsAsFactors=FALSE)
})
# Show the values using an HTML table
#observeEvent(input$action, {output$values <- renderTable({tablevalues() })})
output$values <- renderTable({
tablevalues()
})
}
shinyApp(ui=ui, server=server)
| /code/prediction shiny app.R | no_license | NCVotes/analytics | R | false | false | 9,108 | r | #### Create the Shiny App
library(shiny)
#### Import the coeffiencts so that we can use the model
coeff <- read.csv("Data/full_coefficients.csv", header = TRUE)
# Create a dictionary for race
race_dict <- list(
"UNDESIGNATED" = "U",
"OTHER" = "O" ,
"BLACK or AFRICAN AMERICAN" = "B" ,
"AMERICAN INDIAN or ALASKA NATIVE" = "I" ,
"WHITE" = "W" ,
"ASIAN" = "A",
"TWO or MORE RACES" = "M"
)
races <- names(race_dict) # all the possible values of race
races = unlist(races, recursive = TRUE, use.names = TRUE)
# Create a dictionary for ethnicity
ethnicity_dict <- list(
"UNDESIGNATED" = "U" ,
"HISPANIC/LATINO" = "HL" ,
"NOT HISPANIC/LATINO" = "NL"
)
enthnicities <- names(ethnicity_dict) # all the possible values of ethnicity
# Create a dictionary for gender
gender_dict <- list(
"UNDESIGNATED" = "U",
"MALE" = "M" ,
"FEMALE" = "F"
)
genders <- names(gender_dict) # all the possible values of gender
# Create a dictionary for drivers license
license_dict <- list(
"UNDESIGNATED" = "U",
"YES" = "Y" ,
"NO" = "N"
)
licenses <- names(license_dict) # all the possible values of drivers license
# Create a list of all state codes and add "None"
states <- state.abb
states <-append(states,"None") # all the possible values of state adding "None" to the end
# Create a list of all zip codes in North Carolina
library(zipcode)
zipcodes <- subset(coeff, name == "zip_code")$index
#zipcodes <- sort(zipcodes)
# Create a list of years since regisration
registration_years <- seq.int(as.numeric(substr(Sys.Date(), 0, 4)), as.numeric(substr(Sys.Date(), 0, 4))-90)
# Create a list of ages
birth_ages <- seq.int(18, 100)
# Use a fluid Bootstrap layout
ui <- fluidPage(
# Give the page a title
titlePanel("Prediction of Political Sentiment in North Carolina"),
# Generate a row with a sidebar
sidebarLayout(
# Define the sidebar with one input
sidebarPanel(
selectInput("race", "Race:",
choices=races),
selectInput("gender", "Gender:",
choices=genders),
selectInput("ethnicity", "Ethnicity:",
choices=enthnicities),
selectInput("drivers_lic", "Drivers License:",
choices=licenses),
selectInput("birth_state", "Birth State:",
choices=states),
selectInput("zip_code", "Zip Code:",
choices=zipcodes, selected = "27515"),
sliderInput("birth_age", "Age:",
min=18, max=99, value=65),
sliderInput("registration_year", "Year Registerd to Vote in NC:",
min=1948, max=2017, value=1980, sep = ""),
fluidRow(column(3, verbatimTextOutput("years"))),
hr(),
helpText("Select the Values")
),
mainPanel(
tableOutput("values"), img(src='NCFlag.png', align = "right", height='300',width='600', align = "top")
)
)
)
#### Function to take all the inputs and calculate the probablity of leaning republican
get_probability <- function(birth_age,
registration_year,
race,
gender,
ethnicity,
drivers_lic,
birth_state,
zip_code ){
#### Import the coeffiencts so that we can use the model
coeff <- read.csv("Data/full_coefficients.csv", header = TRUE)
#### Drop columns X, class, stderr
coeff <- subset(coeff, select = c("name","index", "value") )
#### Let's get the values of all of the coefficients for continuous variables plus the intercept
intercept_coeff <- subset(coeff, name == "(intercept)")$value
birth_age_coeff <- subset(coeff, name == "birth_age")$value
age_squared_coeff <- subset(coeff, name == "age_squared")$value
age_cubed_coeff <- subset(coeff, name == "age_cubed")$value
years_since_registration_coeff <- subset(coeff, name == "years_since_registration")$value
years_squared_coeff<- subset(coeff, name == "years_squared")$value
years_cubed_coeff <- subset(coeff, name == "years_cubed")$value
#### We now create all the calculated fields
age_squared = birth_age^2
age_cubed = birth_age^3
years_since_registration = as.numeric(substr(Sys.Date(), 0, 4)) - as.numeric(registration_year)
years_squared = years_since_registration^2
years_cubed = years_since_registration^3
race_gender = paste0(race,gender)
race_ethnicity = paste0(race,ethnicity)
gender_ethnicity = paste0(gender,ethnicity)
#### Now, let's write a function to pull out the specific value of a categorical coefficient
get_coeff <- function(df, feature, fact){
output <- subset(df, (name == feature & index == fact))$value
if (length(output) == 0){
return(0) # if not found, return 0
}
else{
return(output)
}
}
#### Now we calculate Y, the 'margin' or value of the polynomial that will be inputted into the logisitc funtion
# First the continuous variables
Y <- birth_age*birth_age_coeff+age_squared*age_squared_coeff+age_cubed*age_cubed_coeff
Y <- Y + years_since_registration*years_since_registration_coeff+years_squared*years_squared_coeff+years_cubed*years_cubed_coeff
# Now the intercept
Y <- Y + intercept_coeff
# Now the categorical variables
Y <- Y + get_coeff(coeff,"race_gender", race_gender)
Y <- Y + get_coeff(coeff,"race_ethnicity", race_ethnicity)
Y <- Y + get_coeff(coeff,"gender_ethnicity", gender_ethnicity)
Y <- Y + get_coeff(coeff,"zip_code", zip_code)
Y <- Y + get_coeff(coeff,"birth_state", birth_state)
Y <- Y + get_coeff(coeff,"drivers_lic", drivers_lic)
#### We will use the R function "logigistic" from the psych package to calcluate the probabilities
library(psych) # for built in logistic function
probability <- logistic(Y)
if (probability >.50){
party_preference <- "Republican"
}else{
party_preference <- "Democratic"}
return(probability)
}
# Define a server for the Shiny app
server <- function(input, output) {
score <- .50
party <- "None"
registration_year <- 1950
tablevalues <- reactive({
# Compose data frame
data.frame(
Name = c("Race",
"Gender",
"Ethnicity",
"Drivers Licence",
"Birth State",
"Zip Code",
"Age",
"Registration_Year",
"Score",
"Prediction"),
Value = as.character(c(input$race,
input$gender,
input$ethnicity,
input$drivers_lic,
input$birth_state,
input$zip_code,
input$birth_age,
input$registration_year,
get_probability(input$birth_age,
input$registration_year,
race_dict[[input$race]],
gender_dict[[input$gender]],
ethnicity_dict[[input$ethnicity]],
license_dict[[input$drivers_lic]],
input$birth_state,
input$zip_code),
if (get_probability(input$birth_age,
input$registration_year,
race_dict[[input$race]],
gender_dict[[input$gender]],
ethnicity_dict[[input$ethnicity]],
license_dict[[input$drivers_lic]],
input$birth_state,
input$zip_code) > .50){"Republican"} else {"Democratic"}
)),
stringsAsFactors=FALSE)
})
# Show the values using an HTML table
#observeEvent(input$action, {output$values <- renderTable({tablevalues() })})
output$values <- renderTable({
tablevalues()
})
}
shinyApp(ui=ui, server=server)
|
#' Progress bar in the terminal
#'
#' Progress bars are configurable, may include percentage, elapsed time,
#' and/or the estimated completion time. They work in the command line,
#' in Emacs and in R Studio. The progress package was heavily influenced by
#' https://github.com/tj/node-progress
#'
#' @section Creating the progress bar:
#' A progress bar is an R6 object, that can be created with
#' \code{progress_bar$new()}. It has the following arguments:
#' \describe{
#' \item{format}{The format of the progress bar. A number of
#' tokens can be used here, see them below. It defaults to
#' \code{"[:bar] :percent"}, which means that the progress
#' bar is within brackets on the left, and the percentage
#' is printed on the right.}
#' \item{total}{Total number of ticks to complete. Defaults to 100.}
#' \item{width}{Width of the progress bar. Default is the current
#' terminal width (see \code{options()} and \code{width}) minus two.}
#' \item{stream}{The output stream to put the progress bar on.
#' It defaults to \code{stderr()}, except in R Studio that has
#' a bug when printing on the standard error, so there we use
#' \code{stdout}. If the output stream is not a terminal and
#' we are not in R Studio, then no progress bar is printed.}
#' \item{complete}{Completion character, defaults to \code{=}.}
#' \item{incomplete}{Incomplete character, defaults to \code{-}.}
#' \item{callback}{Callback function to call when the progress
#' bar finishes. The progress bar object itself is passed to it
#' as the single parameter.}
#' \item{clear}{Whether to clear the progress bar on completion.
#' Defaults to \code{TRUE}.}
#' \item{show_after}{Amount of time in seconds, after which the progress
#' bar is shown on the screen. For very short processes,
#' it is probably not worth showing it at all. Defaults to two
#' tenth of a second.}
#' \item{force}{Whether to force showing the progress bar,
#' even if the given (or default) stream does not seem support it.}
#' }
#'
#' @section Using the progress bar:
#' Two functions can update a progress bar. \code{progress_bar$tick()}
#' increases the number of ticks by one (or another specified value).
#' \code{progress_bar$update()} sets a given ratio.
#'
#' The progress bar is displayed after the first `tick` command.
#' This might not be desirable for long computations, because
#' nothing is shown before the first tick. It is good practice to
#' call `tick(0)` at the beginning of the computation or download,
#' which shows the progress bar immediately.
#'
#' @section Tokens:
#' They can be used in the \code{format} argument when creating the
#' progress bar.
#' \describe{
#' \item{:bar}{The progress bar itself.}
#' \item{:current}{Current tick number.}
#' \item{:total}{Total ticks.}
#' \item{:elapsed}{Elapsed time in seconds.}
#' \item{:elapsedfull}{Elapsed time in hh:mm:ss format.}
#' \item{:eta}{Estimated completion time in seconds.}
#' \item{:percent}{Completion percentage.}
#' \item{:rate}{Download rate, bytes per second. See example below.}
#' \item{:bytes}{Shows :current, formatted as bytes. Useful
#' for downloads or file reads if you don't know the size of the
#' file in advance. See example below.}
#' \item{:spin}{Shows a spinner that updates even when progress is
#' advanced by zero.}
#' }
#'
#' Custom tokens are also supported, and you need to pass their
#' values to \code{progress_bar$tick()} or \code{progress_bar$update()},
#' in a named list. See example below.
#'
#' @importFrom R6 R6Class
#'
#' @export
#' @examples
#'
#' ## We don't run the examples on CRAN, because they takes >10s
#' ## altogether. Unfortunately it is hard to create a set of
#' ## meaningful progress bar examples that also run quickly.
#' \dontrun{
#'
#' ## Basic
#' pb <- progress_bar$new(total = 100)
#' for (i in 1:100) {
#' pb$tick()
#' Sys.sleep(1 / 100)
#' }
#'
#' ## ETA
#' pb <- progress_bar$new(
#' format = " downloading [:bar] :percent eta: :eta",
#' total = 100, clear = FALSE, width= 60)
#' for (i in 1:100) {
#' pb$tick()
#' Sys.sleep(1 / 100)
#' }
#'
#' ## Elapsed time
#' pb <- progress_bar$new(
#' format = " downloading [:bar] :percent in :elapsed",
#' total = 100, clear = FALSE, width= 60)
#' for (i in 1:100) {
#' pb$tick()
#' Sys.sleep(1 / 100)
#' }
#'
#' ## Spinner
#' pb <- progress_bar$new(
#' format = "(:spin) [:bar] :percent",
#' total = 30, clear = FALSE, width = 60)
#' for (i in 1:30) {
#' pb$tick()
#' Sys.sleep(3 / 100)
#' }
#'
#' ## Custom tokens
#' pb <- progress_bar$new(
#' format = " downloading :what [:bar] :percent eta: :eta",
#' clear = FALSE, total = 200, width = 60)
#' f <- function() {
#' for (i in 1:100) {
#' pb$tick(tokens = list(what = "foo "))
#' Sys.sleep(2 / 100)
#' }
#' for (i in 1:100) {
#' pb$tick(tokens = list(what = "foobar"))
#' Sys.sleep(2 / 100)
#' }
#' }
#' f()
#'
#' ## Download (or other) rates
#' pb <- progress_bar$new(
#' format = " downloading foobar at :rate, got :bytes in :elapsed",
#' clear = FALSE, total = 1e7, width = 60)
#' f <- function() {
#' for (i in 1:100) {
#' pb$tick(sample(1:100 * 1000, 1))
#' Sys.sleep(2/100)
#' }
#' pb$tick(1e7)
#' invisible()
#' }
#' f()
#'
#' }
#'
#' @name progress_bar
NULL
progress_bar <- R6Class("progress_bar",
public = list(
initialize = function(format = "[:bar] :percent", total = 100,
width = getOption("width") - 2, stream = NULL, complete = "=",
incomplete = "-", callback = function(self) {}, clear = TRUE,
show_after = 0.2, force = FALSE) {
pb_init(self, private, format, total, width, stream, complete,
incomplete, callback, clear, show_after, force)
},
tick = function(len = 1, tokens = list()) {
pb_tick(self, private, len, tokens) },
update = function(ratio, tokens = list()) {
pb_update(self, private, ratio, tokens) }
),
private = list(
render = function(tokens) { pb_render(self, private, tokens) },
terminate = function() { pb_terminate(self, private) },
ratio = function() { pb_ratio(self, private) },
first = TRUE,
supported = NA,
format = NULL,
total = NULL,
current = 0,
width = NULL,
stream = NULL,
chars = list(
complete = "=",
incomplete = "-"
),
callback = NULL,
clear = NULL,
show_after = NULL,
last_draw = "",
start = NULL,
toupdate = FALSE,
complete = FALSE,
spin = NULL,
has_token = c(current = FALSE, total = FALSE, elapsedfull = FALSE,
elapsed = FALSE, eta = FALSE, percent = FALSE, rate = FALSE,
bytes = FALSE, bar = FALSE, spin = FALSE)
)
)
pb_init <- function(self, private, format, total, width, stream,
complete, incomplete, callback, clear, show_after,
force) {
stream <- default_stream(stream)
assert_character_scalar(format)
assert_positive_scalar(total)
assert_nonzero_count(width)
assert_connection(stream)
assert_single_char(complete)
assert_single_char(incomplete)
assert_function(callback)
assert_flag(clear)
assert_nonnegative_scalar(show_after)
private$first <- TRUE
private$supported <- force || is_supported(stream)
private$format <- format
private$total <- total
private$width <- width
private$stream <- stream
private$chars$complete <- complete
private$chars$incomplete <- incomplete
private$callback <- callback
private$clear <- clear
private$show_after <- as.difftime(show_after, units = "secs")
private$spin <- spin_symbols()
private$has_token <- pb_update_has_token(private$has_token, format)
self
}
pb_update_has_token <- function(tokens, format) {
for (n in names(tokens)) {
tokens[n] <- grepl(paste0(":", n), format, fixed = TRUE)
}
tokens
}
pb_tick <- function(self, private, len, tokens) {
assert_scalar(len)
assert_named_or_empty_list(tokens)
if (private$first) {
private$first <- FALSE
private$start <- Sys.time()
}
private$current <- private$current + len
if (!private$toupdate) {
if (Sys.time() - private$start >= private$show_after) {
private$toupdate <- TRUE
}
}
if (private$current >= private$total) private$complete <- TRUE
if (private$toupdate) private$render(tokens)
if (private$complete) {
private$terminate()
private$callback()
}
self
}
#' @importFrom prettyunits vague_dt pretty_bytes
#' @importFrom utils flush.console
pb_ratio <- function(self, private) {
ratio <- (private$current / private$total)
ratio <- max(ratio, 0)
ratio <- min(ratio, 1)
ratio
}
#' @importFrom hms as.hms
pb_render <- function(self, private, tokens) {
if (! private$supported) return(invisible())
str <- private$format
if (private$has_token["percent"]) {
percent <- private$ratio() * 100
str <- sub(str, pattern = ":percent", replacement =
paste0(format(round(percent), width = 3), "%"))
}
if (private$has_token["elapsedfull"]) {
elapsed <- Sys.time() - private$start
units(elapsed) <- "secs"
elapsedfull <- format(as.hms(as.integer(elapsed)))
str <- sub(str, pattern = ":elapsedfull", replacement = elapsedfull)
}
if (private$has_token["elapsed"]) {
elapsed_secs <- Sys.time() - private$start
elapsed <- vague_dt(elapsed_secs, format = "terse")
str <- sub(str, pattern = ":elapsed", replacement = elapsed)
}
if (private$has_token["eta"]) {
percent <- private$ratio() * 100
elapsed_secs <- Sys.time() - private$start
eta_secs <- if (percent == 100) {
0
} else {
elapsed_secs * (private$total / private$current - 1.0)
}
eta <- as.difftime(eta_secs, units = "secs")
if (is.nan(eta) || eta == Inf) {
eta <- " ?s"
} else {
eta <- vague_dt(eta, format = "terse")
}
str <- sub(str, pattern = ":eta", replacement = eta)
}
if (private$has_token["rate"]) {
elapsed_secs <- Sys.time() - private$start
rate <- private$current / as.double(elapsed_secs, units = "secs")
if (is.nan(rate)) rate <- 0
rate <- paste0(pretty_bytes(round(rate)), "/s")
str <- sub(str, pattern = ":rate", replacement = rate)
}
if (private$has_token["current"]) {
str <- sub(str, pattern = ":current",
replacement = round(private$current))
}
if (private$has_token["total"]) {
str <- sub(str, pattern = ":total", replacement = round(private$total))
}
if (private$has_token["bytes"]) {
bytes <- pretty_bytes(round(private$current))
str <- sub(str, pattern = ":bytes", replacement = bytes)
}
if (private$has_token["spin"]) {
## NOTE: fixed = TRUE is needed here or "\\" causes trouble with
## the replacement (I think it's interpreted as an invalid
## backreference).
str <- sub(str, pattern = ":spin", replacement = private$spin(), fixed = TRUE)
}
for (t in names(tokens)) {
txt <- tryCatch(as.character(tokens[[t]])[[1]], error = function(e) "???")
str <- gsub(paste0(":", t), txt, str, fixed = TRUE)
}
if (private$has_token["bar"]) {
bar_width <- nchar(sub(str, pattern = ":bar", replacement = ""))
bar_width <- private$width - bar_width
bar_width <- max(0, bar_width)
ratio <- private$ratio()
complete_len <- round(bar_width * ratio)
complete <- paste(rep("", complete_len + 1),
collapse = private$chars$complete)
incomplete <- paste(rep("", bar_width - complete_len + 1),
collapse = private$chars$incomplete)
str <- sub(":bar", paste0(complete, incomplete), str)
}
if (private$last_draw != str) {
if (nchar(private$last_draw) > nchar(str)) {
clear_line(private$stream, private$width)
}
cursor_to_start(private$stream)
cat(str, file = private$stream)
private$last_draw <- str
}
flush.console()
self
}
pb_update <- function(self, private, ratio, tokens) {
assert_ratio(ratio)
goal <- floor(ratio * private$total)
self$tick(goal - private$current, tokens)
}
pb_terminate <- function(self, private) {
if (!private$supported || !private$toupdate) return(invisible())
if (private$clear) {
clear_line(private$stream, private$width)
cursor_to_start(private$stream)
} else {
cat("\n", file = private$stream)
}
}
spin_symbols <- function() {
sym <- c("-", "\\", "|", "/")
i <- 0L
n <- length(sym)
function() {
sym[[i <<- if (i >= n) 1L else i + 1L]]
}
}
| /R/progress.R | no_license | jimhester/progress | R | false | false | 12,530 | r |
#' Progress bar in the terminal
#'
#' Progress bars are configurable, may include percentage, elapsed time,
#' and/or the estimated completion time. They work in the command line,
#' in Emacs and in R Studio. The progress package was heavily influenced by
#' https://github.com/tj/node-progress
#'
#' @section Creating the progress bar:
#' A progress bar is an R6 object, that can be created with
#' \code{progress_bar$new()}. It has the following arguments:
#' \describe{
#' \item{format}{The format of the progress bar. A number of
#' tokens can be used here, see them below. It defaults to
#' \code{"[:bar] :percent"}, which means that the progress
#' bar is within brackets on the left, and the percentage
#' is printed on the right.}
#' \item{total}{Total number of ticks to complete. Defaults to 100.}
#' \item{width}{Width of the progress bar. Default is the current
#' terminal width (see \code{options()} and \code{width}) minus two.}
#' \item{stream}{The output stream to put the progress bar on.
#' It defaults to \code{stderr()}, except in R Studio that has
#' a bug when printing on the standard error, so there we use
#' \code{stdout}. If the output stream is not a terminal and
#' we are not in R Studio, then no progress bar is printed.}
#' \item{complete}{Completion character, defaults to \code{=}.}
#' \item{incomplete}{Incomplete character, defaults to \code{-}.}
#' \item{callback}{Callback function to call when the progress
#' bar finishes. The progress bar object itself is passed to it
#' as the single parameter.}
#' \item{clear}{Whether to clear the progress bar on completion.
#' Defaults to \code{TRUE}.}
#' \item{show_after}{Amount of time in seconds, after which the progress
#' bar is shown on the screen. For very short processes,
#' it is probably not worth showing it at all. Defaults to two
#' tenth of a second.}
#' \item{force}{Whether to force showing the progress bar,
#' even if the given (or default) stream does not seem support it.}
#' }
#'
#' @section Using the progress bar:
#' Two functions can update a progress bar. \code{progress_bar$tick()}
#' increases the number of ticks by one (or another specified value).
#' \code{progress_bar$update()} sets a given ratio.
#'
#' The progress bar is displayed after the first `tick` command.
#' This might not be desirable for long computations, because
#' nothing is shown before the first tick. It is good practice to
#' call `tick(0)` at the beginning of the computation or download,
#' which shows the progress bar immediately.
#'
#' @section Tokens:
#' They can be used in the \code{format} argument when creating the
#' progress bar.
#' \describe{
#' \item{:bar}{The progress bar itself.}
#' \item{:current}{Current tick number.}
#' \item{:total}{Total ticks.}
#' \item{:elapsed}{Elapsed time in seconds.}
#' \item{:elapsedfull}{Elapsed time in hh:mm:ss format.}
#' \item{:eta}{Estimated completion time in seconds.}
#' \item{:percent}{Completion percentage.}
#' \item{:rate}{Download rate, bytes per second. See example below.}
#' \item{:bytes}{Shows :current, formatted as bytes. Useful
#' for downloads or file reads if you don't know the size of the
#' file in advance. See example below.}
#' \item{:spin}{Shows a spinner that updates even when progress is
#' advanced by zero.}
#' }
#'
#' Custom tokens are also supported, and you need to pass their
#' values to \code{progress_bar$tick()} or \code{progress_bar$update()},
#' in a named list. See example below.
#'
#' @importFrom R6 R6Class
#'
#' @export
#' @examples
#'
#' ## We don't run the examples on CRAN, because they takes >10s
#' ## altogether. Unfortunately it is hard to create a set of
#' ## meaningful progress bar examples that also run quickly.
#' \dontrun{
#'
#' ## Basic
#' pb <- progress_bar$new(total = 100)
#' for (i in 1:100) {
#' pb$tick()
#' Sys.sleep(1 / 100)
#' }
#'
#' ## ETA
#' pb <- progress_bar$new(
#' format = " downloading [:bar] :percent eta: :eta",
#' total = 100, clear = FALSE, width= 60)
#' for (i in 1:100) {
#' pb$tick()
#' Sys.sleep(1 / 100)
#' }
#'
#' ## Elapsed time
#' pb <- progress_bar$new(
#' format = " downloading [:bar] :percent in :elapsed",
#' total = 100, clear = FALSE, width= 60)
#' for (i in 1:100) {
#' pb$tick()
#' Sys.sleep(1 / 100)
#' }
#'
#' ## Spinner
#' pb <- progress_bar$new(
#' format = "(:spin) [:bar] :percent",
#' total = 30, clear = FALSE, width = 60)
#' for (i in 1:30) {
#' pb$tick()
#' Sys.sleep(3 / 100)
#' }
#'
#' ## Custom tokens
#' pb <- progress_bar$new(
#' format = " downloading :what [:bar] :percent eta: :eta",
#' clear = FALSE, total = 200, width = 60)
#' f <- function() {
#' for (i in 1:100) {
#' pb$tick(tokens = list(what = "foo "))
#' Sys.sleep(2 / 100)
#' }
#' for (i in 1:100) {
#' pb$tick(tokens = list(what = "foobar"))
#' Sys.sleep(2 / 100)
#' }
#' }
#' f()
#'
#' ## Download (or other) rates
#' pb <- progress_bar$new(
#' format = " downloading foobar at :rate, got :bytes in :elapsed",
#' clear = FALSE, total = 1e7, width = 60)
#' f <- function() {
#' for (i in 1:100) {
#' pb$tick(sample(1:100 * 1000, 1))
#' Sys.sleep(2/100)
#' }
#' pb$tick(1e7)
#' invisible()
#' }
#' f()
#'
#' }
#'
#' @name progress_bar
NULL
progress_bar <- R6Class("progress_bar",
public = list(
initialize = function(format = "[:bar] :percent", total = 100,
width = getOption("width") - 2, stream = NULL, complete = "=",
incomplete = "-", callback = function(self) {}, clear = TRUE,
show_after = 0.2, force = FALSE) {
pb_init(self, private, format, total, width, stream, complete,
incomplete, callback, clear, show_after, force)
},
tick = function(len = 1, tokens = list()) {
pb_tick(self, private, len, tokens) },
update = function(ratio, tokens = list()) {
pb_update(self, private, ratio, tokens) }
),
private = list(
render = function(tokens) { pb_render(self, private, tokens) },
terminate = function() { pb_terminate(self, private) },
ratio = function() { pb_ratio(self, private) },
first = TRUE,
supported = NA,
format = NULL,
total = NULL,
current = 0,
width = NULL,
stream = NULL,
chars = list(
complete = "=",
incomplete = "-"
),
callback = NULL,
clear = NULL,
show_after = NULL,
last_draw = "",
start = NULL,
toupdate = FALSE,
complete = FALSE,
spin = NULL,
has_token = c(current = FALSE, total = FALSE, elapsedfull = FALSE,
elapsed = FALSE, eta = FALSE, percent = FALSE, rate = FALSE,
bytes = FALSE, bar = FALSE, spin = FALSE)
)
)
pb_init <- function(self, private, format, total, width, stream,
complete, incomplete, callback, clear, show_after,
force) {
stream <- default_stream(stream)
assert_character_scalar(format)
assert_positive_scalar(total)
assert_nonzero_count(width)
assert_connection(stream)
assert_single_char(complete)
assert_single_char(incomplete)
assert_function(callback)
assert_flag(clear)
assert_nonnegative_scalar(show_after)
private$first <- TRUE
private$supported <- force || is_supported(stream)
private$format <- format
private$total <- total
private$width <- width
private$stream <- stream
private$chars$complete <- complete
private$chars$incomplete <- incomplete
private$callback <- callback
private$clear <- clear
private$show_after <- as.difftime(show_after, units = "secs")
private$spin <- spin_symbols()
private$has_token <- pb_update_has_token(private$has_token, format)
self
}
pb_update_has_token <- function(tokens, format) {
for (n in names(tokens)) {
tokens[n] <- grepl(paste0(":", n), format, fixed = TRUE)
}
tokens
}
pb_tick <- function(self, private, len, tokens) {
assert_scalar(len)
assert_named_or_empty_list(tokens)
if (private$first) {
private$first <- FALSE
private$start <- Sys.time()
}
private$current <- private$current + len
if (!private$toupdate) {
if (Sys.time() - private$start >= private$show_after) {
private$toupdate <- TRUE
}
}
if (private$current >= private$total) private$complete <- TRUE
if (private$toupdate) private$render(tokens)
if (private$complete) {
private$terminate()
private$callback()
}
self
}
#' @importFrom prettyunits vague_dt pretty_bytes
#' @importFrom utils flush.console
pb_ratio <- function(self, private) {
ratio <- (private$current / private$total)
ratio <- max(ratio, 0)
ratio <- min(ratio, 1)
ratio
}
#' @importFrom hms as.hms
pb_render <- function(self, private, tokens) {
if (! private$supported) return(invisible())
str <- private$format
if (private$has_token["percent"]) {
percent <- private$ratio() * 100
str <- sub(str, pattern = ":percent", replacement =
paste0(format(round(percent), width = 3), "%"))
}
if (private$has_token["elapsedfull"]) {
elapsed <- Sys.time() - private$start
units(elapsed) <- "secs"
elapsedfull <- format(as.hms(as.integer(elapsed)))
str <- sub(str, pattern = ":elapsedfull", replacement = elapsedfull)
}
if (private$has_token["elapsed"]) {
elapsed_secs <- Sys.time() - private$start
elapsed <- vague_dt(elapsed_secs, format = "terse")
str <- sub(str, pattern = ":elapsed", replacement = elapsed)
}
if (private$has_token["eta"]) {
percent <- private$ratio() * 100
elapsed_secs <- Sys.time() - private$start
eta_secs <- if (percent == 100) {
0
} else {
elapsed_secs * (private$total / private$current - 1.0)
}
eta <- as.difftime(eta_secs, units = "secs")
if (is.nan(eta) || eta == Inf) {
eta <- " ?s"
} else {
eta <- vague_dt(eta, format = "terse")
}
str <- sub(str, pattern = ":eta", replacement = eta)
}
if (private$has_token["rate"]) {
elapsed_secs <- Sys.time() - private$start
rate <- private$current / as.double(elapsed_secs, units = "secs")
if (is.nan(rate)) rate <- 0
rate <- paste0(pretty_bytes(round(rate)), "/s")
str <- sub(str, pattern = ":rate", replacement = rate)
}
if (private$has_token["current"]) {
str <- sub(str, pattern = ":current",
replacement = round(private$current))
}
if (private$has_token["total"]) {
str <- sub(str, pattern = ":total", replacement = round(private$total))
}
if (private$has_token["bytes"]) {
bytes <- pretty_bytes(round(private$current))
str <- sub(str, pattern = ":bytes", replacement = bytes)
}
if (private$has_token["spin"]) {
## NOTE: fixed = TRUE is needed here or "\\" causes trouble with
## the replacement (I think it's interpreted as an invalid
## backreference).
str <- sub(str, pattern = ":spin", replacement = private$spin(), fixed = TRUE)
}
for (t in names(tokens)) {
txt <- tryCatch(as.character(tokens[[t]])[[1]], error = function(e) "???")
str <- gsub(paste0(":", t), txt, str, fixed = TRUE)
}
if (private$has_token["bar"]) {
bar_width <- nchar(sub(str, pattern = ":bar", replacement = ""))
bar_width <- private$width - bar_width
bar_width <- max(0, bar_width)
ratio <- private$ratio()
complete_len <- round(bar_width * ratio)
complete <- paste(rep("", complete_len + 1),
collapse = private$chars$complete)
incomplete <- paste(rep("", bar_width - complete_len + 1),
collapse = private$chars$incomplete)
str <- sub(":bar", paste0(complete, incomplete), str)
}
if (private$last_draw != str) {
if (nchar(private$last_draw) > nchar(str)) {
clear_line(private$stream, private$width)
}
cursor_to_start(private$stream)
cat(str, file = private$stream)
private$last_draw <- str
}
flush.console()
self
}
pb_update <- function(self, private, ratio, tokens) {
assert_ratio(ratio)
goal <- floor(ratio * private$total)
self$tick(goal - private$current, tokens)
}
pb_terminate <- function(self, private) {
if (!private$supported || !private$toupdate) return(invisible())
if (private$clear) {
clear_line(private$stream, private$width)
cursor_to_start(private$stream)
} else {
cat("\n", file = private$stream)
}
}
spin_symbols <- function() {
sym <- c("-", "\\", "|", "/")
i <- 0L
n <- length(sym)
function() {
sym[[i <<- if (i >= n) 1L else i + 1L]]
}
}
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' @title A random walk Metropolis sampler using R
#' @description A random walk Metropolis sampler using R
#' @param sigma the sd
#' @param x0 the initial value
#' @param N the number of samples
#' @return a random sample of size \code{n}
#' @examples
#' \dontrun{
#' N <- 2000
#' sigma <- c(.05, .5, 2, 16)
#' x0 <- 25
#' rw1 <- rwMetropolisC(sigma[1], x0, N)
#' rw2 <- rwMetropolisC(sigma[2], x0, N)
#' rw3 <- rwMetropolisC(sigma[3], x0, N)
#' rw4 <- rwMetropolisC(sigma[4], x0, N)
#' rw <- cbind(rw1$x, rw2$x, rw3$x, rw4$x)
#' print(c(rw1$k, rw2$k, rw3$k, rw4$k))
#' print(c(1-rw1$k/N,1-rw2$k/N,1-rw3$k/N,1-rw4$k/N))
#' }
#' @export
rwMetropolisC <- function(sigma, x0, N) {
.Call('_SC19030_rwMetropolisC', PACKAGE = 'SC19030', sigma, x0, N)
}
| /R/RcppExports.R | no_license | Meredith-Bo/SC19030 | R | false | false | 880 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' @title A random walk Metropolis sampler using R
#' @description A random walk Metropolis sampler using R
#' @param sigma the sd
#' @param x0 the initial value
#' @param N the number of samples
#' @return a random sample of size \code{n}
#' @examples
#' \dontrun{
#' N <- 2000
#' sigma <- c(.05, .5, 2, 16)
#' x0 <- 25
#' rw1 <- rwMetropolisC(sigma[1], x0, N)
#' rw2 <- rwMetropolisC(sigma[2], x0, N)
#' rw3 <- rwMetropolisC(sigma[3], x0, N)
#' rw4 <- rwMetropolisC(sigma[4], x0, N)
#' rw <- cbind(rw1$x, rw2$x, rw3$x, rw4$x)
#' print(c(rw1$k, rw2$k, rw3$k, rw4$k))
#' print(c(1-rw1$k/N,1-rw2$k/N,1-rw3$k/N,1-rw4$k/N))
#' }
#' @export
rwMetropolisC <- function(sigma, x0, N) {
.Call('_SC19030_rwMetropolisC', PACKAGE = 'SC19030', sigma, x0, N)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{plot_requests_per_second}
\alias{plot_requests_per_second}
\title{Plot the requests per second made during the test}
\usage{
plot_requests_per_second(results)
}
\arguments{
\item{results}{A data frame returned from the loadtest function}
}
\value{
A ggplot2 showing the distribution of requests by request per second
}
\description{
Plot the requests per second made during the test
}
\examples{
results <- loadtest("google.com","GET")
plot_requests_per_second(results)
}
| /man/plot_requests_per_second.Rd | permissive | jcpsantiago/loadtest | R | false | true | 564 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{plot_requests_per_second}
\alias{plot_requests_per_second}
\title{Plot the requests per second made during the test}
\usage{
plot_requests_per_second(results)
}
\arguments{
\item{results}{A data frame returned from the loadtest function}
}
\value{
A ggplot2 showing the distribution of requests by request per second
}
\description{
Plot the requests per second made during the test
}
\examples{
results <- loadtest("google.com","GET")
plot_requests_per_second(results)
}
|
library(dabestr)
context("Test Loading and Bootstrap")
test_that("Two-group unpaired loads properly", {
sample.size <- sample(50:100, 1) # random sample size.
diff <- sample(50:100, 1) # random difference in means.
dummy.data <- generate.two.groups(sampleN = sample.size,
difference = diff)
my.data <- dabest(dummy.data, Group, Value, paired = FALSE,
idx = c("Control", "Test"))
# Check attributes are ok.
my.data.class <- attr(my.data, "class")
expect_match(my.data.class[1], "dabest")
expect_match(my.data.class[2], "list")
})
test_that("Two-group unpaired mean diff parses properly", {
sample.size <- sample(50:100, 1) # random sample size.
diff <- sample(50:100, 1) # random difference in means.
dummy.data <- generate.two.groups(sampleN = sample.size,
difference = diff)
my.data <- dabest(dummy.data, Group, Value, paired = FALSE,
idx = c("Control", "Test"))
md.my.data <- my.data %>% mean_diff()
report <- md.my.data$result
expect_match(as.character(report$control_group), "Control")
expect_equal(report$control_size, sample.size)
expect_match(as.character(report$test_group), "Test")
expect_equal(report$test_size, sample.size)
expect_match(as.character(report$variable), "Value")
expect_false(report$paired)
})
test_that("Two-group unpaired mean diff falls within 95 CI", {
diff <- sample(50: 100, 1)
dummy.data <- generate.two.groups(difference = diff)
ci.for.test <- 95
ci.failures <- 0
seeds <- seq(51: 100)
my.data <- dabest(dummy.data, Group, Value, paired = FALSE,
idx = c("Control", "Test"))
for (s in seeds) {
boot.result <- my.data %>% mean_diff(seed = s * s)
report <- boot.result$result
if (diff < report$bca_ci_low || diff > report$bca_ci_high) {
ci.failures <- ci.failures + 1
}
}
max.errors <- ceiling((1 - (ci.for.test/100)) * length(seeds))
expect_lte(ci.failures, max.errors)
})
test_that("Two-group paired reports accurately", {
sample.size <- sample(50:100, 1) # random sample size.
diff <- sample(50:100, 1) # random difference in means.
dummy.data <- generate.two.groups(sampleN = sample.size,
difference = diff)
loaded <- dabest(dummy.data, Group, Value,
paired = TRUE, id.col = ID,
idx = c("Control", "Test"))
expect_true(loaded$is.paired)
})
test_that("Two-group paired mean diff requires id.col", {
expect_error(dabest(dummy.data, Group, Value, paired = TRUE,
# Missing `id.col` parameter.
idx = c("Control", "Test")),
"`paired` is TRUE but no `id.col` was supplied.")
})
| /tests/testthat/test-bootstrap.R | permissive | josesho/dabestr | R | false | false | 2,871 | r | library(dabestr)
context("Test Loading and Bootstrap")
test_that("Two-group unpaired loads properly", {
sample.size <- sample(50:100, 1) # random sample size.
diff <- sample(50:100, 1) # random difference in means.
dummy.data <- generate.two.groups(sampleN = sample.size,
difference = diff)
my.data <- dabest(dummy.data, Group, Value, paired = FALSE,
idx = c("Control", "Test"))
# Check attributes are ok.
my.data.class <- attr(my.data, "class")
expect_match(my.data.class[1], "dabest")
expect_match(my.data.class[2], "list")
})
test_that("Two-group unpaired mean diff parses properly", {
sample.size <- sample(50:100, 1) # random sample size.
diff <- sample(50:100, 1) # random difference in means.
dummy.data <- generate.two.groups(sampleN = sample.size,
difference = diff)
my.data <- dabest(dummy.data, Group, Value, paired = FALSE,
idx = c("Control", "Test"))
md.my.data <- my.data %>% mean_diff()
report <- md.my.data$result
expect_match(as.character(report$control_group), "Control")
expect_equal(report$control_size, sample.size)
expect_match(as.character(report$test_group), "Test")
expect_equal(report$test_size, sample.size)
expect_match(as.character(report$variable), "Value")
expect_false(report$paired)
})
test_that("Two-group unpaired mean diff falls within 95 CI", {
diff <- sample(50: 100, 1)
dummy.data <- generate.two.groups(difference = diff)
ci.for.test <- 95
ci.failures <- 0
seeds <- seq(51: 100)
my.data <- dabest(dummy.data, Group, Value, paired = FALSE,
idx = c("Control", "Test"))
for (s in seeds) {
boot.result <- my.data %>% mean_diff(seed = s * s)
report <- boot.result$result
if (diff < report$bca_ci_low || diff > report$bca_ci_high) {
ci.failures <- ci.failures + 1
}
}
max.errors <- ceiling((1 - (ci.for.test/100)) * length(seeds))
expect_lte(ci.failures, max.errors)
})
test_that("Two-group paired reports accurately", {
sample.size <- sample(50:100, 1) # random sample size.
diff <- sample(50:100, 1) # random difference in means.
dummy.data <- generate.two.groups(sampleN = sample.size,
difference = diff)
loaded <- dabest(dummy.data, Group, Value,
paired = TRUE, id.col = ID,
idx = c("Control", "Test"))
expect_true(loaded$is.paired)
})
test_that("Two-group paired mean diff requires id.col", {
expect_error(dabest(dummy.data, Group, Value, paired = TRUE,
# Missing `id.col` parameter.
idx = c("Control", "Test")),
"`paired` is TRUE but no `id.col` was supplied.")
})
|
# note: it is assumed that user IDs are consecutive numbers starting at
# 1
# utility to read in raw data in standrd (user ID, item ID, rating)
# format, and form an R list for user data, with class 'usrData'; each
# element of the list will be of class 'usrDatum'
# arguments:
# ratingsIn: input data, whose first 3 cols are user ID, item ID
# and rating
# usrCovs: data frame of user covariates, e.g. gender and age, one
# row per user
# itmCats: data frame of item categories, e.g. genre for movies, one
# row of booleans per item; categories need not be
# mutually exclusive
# fileOut: if specified, save the value returned by the function
# using R save(), with file name fileOut
# value:
# object of class 'usrData': an R list with one element per user;
# each such element is itself an R list, an object of class
# 'usrDatum', with these components:
#
# userID: the ID of this user
# ratings: ratings set by this user
# itms: IDs of items rated by this user
# cvrs: covariate data for this user, if any
# cats: item category data for this user, if any; i-th element
# is proportion of items rated by this user that are
# in category i
formUserData <- function(ratingsIn,usrCovs=NULL,itmCats=NULL,fileOut='') {
# rownums[[i]] will be the row numbers in ratingsIn belonging to user i
rownums <- split(1:nrow(ratingsIn),ratingsIn[,1])
nusers <- length(rownums)
userrange <- range(as.numeric(names(rownums)))
usermin <- userrange[1]
usermax <- userrange[2]
if (usermin != 1) {
stop('user IDs must start at 1')
}
if (usermax - usermin + 1 != nusers) {
stop('some user IDs missing')
}
# should add check for item IDs too
retval <- list()
if (!is.null(itmCats)) {
itmCats <- as.matrix(itmCats)
nitems <- nrow(itmCats)
}
for (i in 1:nusers) {
whichrows <- rownums[[i]]
retval[[i]] <- list(userID=i)
retval[[i]]$itms <- ratingsIn[whichrows,2]
retval[[i]]$ratings <- ratingsIn[whichrows,3]
if (!is.null(usrCovs))
retval[[i]]$cvrs <- as.numeric(usrCovs[i,])
if (!is.null(itmCats)) {
tmp <- rep(0,nitems)
tmp[retval[[i]]$itms] <- 1
retval[[i]]$cats <- tmp %*% itmCats / sum(tmp)
}
class(retval[[i]]) <- 'usrDatum'
}
class(retval) <- 'usrData'
if (fileOut != '') save(retval,file=fileOut)
retval
}
# construct a new object of class 'usrDatum'
formUserDatum <- function(itms,ratings,userID=NULL) {
obj <- list(itms = itms, ratings=ratings,userID=userID)
class(obj) <- 'usrDatum'
obj
}
# utility: find input row for a given user, item
findInputRow <- function(ratingsIn,usrID,itmID) {
ratingsIn[ratingsIn[,1]==usrID & ratingsIn[,2]==itmID,]
}
| /rectoolShiny/rectools-master/R/findUsrItmData.R | no_license | latuji/rectools | R | false | false | 2,868 | r |
# note: it is assumed that user IDs are consecutive numbers starting at
# 1
# utility to read in raw data in standrd (user ID, item ID, rating)
# format, and form an R list for user data, with class 'usrData'; each
# element of the list will be of class 'usrDatum'
# arguments:
# ratingsIn: input data, whose first 3 cols are user ID, item ID
# and rating
# usrCovs: data frame of user covariates, e.g. gender and age, one
# row per user
# itmCats: data frame of item categories, e.g. genre for movies, one
# row of booleans per item; categories need not be
# mutually exclusive
# fileOut: if specified, save the value returned by the function
# using R save(), with file name fileOut
# value:
# object of class 'usrData': an R list with one element per user;
# each such element is itself an R list, an object of class
# 'usrDatum', with these components:
#
# userID: the ID of this user
# ratings: ratings set by this user
# itms: IDs of items rated by this user
# cvrs: covariate data for this user, if any
# cats: item category data for this user, if any; i-th element
# is proportion of items rated by this user that are
# in category i
formUserData <- function(ratingsIn,usrCovs=NULL,itmCats=NULL,fileOut='') {
# rownums[[i]] will be the row numbers in ratingsIn belonging to user i
rownums <- split(1:nrow(ratingsIn),ratingsIn[,1])
nusers <- length(rownums)
userrange <- range(as.numeric(names(rownums)))
usermin <- userrange[1]
usermax <- userrange[2]
if (usermin != 1) {
stop('user IDs must start at 1')
}
if (usermax - usermin + 1 != nusers) {
stop('some user IDs missing')
}
# should add check for item IDs too
retval <- list()
if (!is.null(itmCats)) {
itmCats <- as.matrix(itmCats)
nitems <- nrow(itmCats)
}
for (i in 1:nusers) {
whichrows <- rownums[[i]]
retval[[i]] <- list(userID=i)
retval[[i]]$itms <- ratingsIn[whichrows,2]
retval[[i]]$ratings <- ratingsIn[whichrows,3]
if (!is.null(usrCovs))
retval[[i]]$cvrs <- as.numeric(usrCovs[i,])
if (!is.null(itmCats)) {
tmp <- rep(0,nitems)
tmp[retval[[i]]$itms] <- 1
retval[[i]]$cats <- tmp %*% itmCats / sum(tmp)
}
class(retval[[i]]) <- 'usrDatum'
}
class(retval) <- 'usrData'
if (fileOut != '') save(retval,file=fileOut)
retval
}
# construct a new object of class 'usrDatum'
formUserDatum <- function(itms,ratings,userID=NULL) {
obj <- list(itms = itms, ratings=ratings,userID=userID)
class(obj) <- 'usrDatum'
obj
}
# utility: find input row for a given user, item
findInputRow <- function(ratingsIn,usrID,itmID) {
ratingsIn[ratingsIn[,1]==usrID & ratingsIn[,2]==itmID,]
}
|
context("check that data set is loaded/available")
test_that("check correct dimensions", {
expect_equal(nrow(wordlist_en), 7776)
expect_equal(nrow(wordlist_de), 7776)
expect_equal(nrow(wordlist_es), 7776)
expect_equal(nrow(wordlist_fr), 7776)
expect_equal(nrow(wordlist_it), 7776)
expect_equal(nrow(wordlist_jp), 7776)
## expect_equal(nrow(wordlist_nl), 7776)
expect_equal(nrow(wordlist_sv), 7776)
})
context("test token")
test_that("check length", {
expect_false(check_token(character(0)))
expect_false(check_token(c("11111", "22222")))
expect_false(check_token("111"))
expect_false(check_token("111111"))
expect_true(check_token("11111"))
})
test_that("check digits", {
expect_false(check_token("A1111"))
expect_false(check_token(NA))
expect_false(check_token(NULL))
expect_false(check_token("01234"))
expect_true(check_token("12345"))
expect_true(check_token("23456"))
})
not_working <- function() {
if (capabilities()["http/ftp"]) { ## Are we on a platform that allows internet access?
if (.Platform$OS.type == "unix") { ## If unix, then we can use nsl to check connection
res <- is.null(utils::nsl("random.org"))
} else { ## otherwise just fail
res <- TRUE
}
} else {
res <- TRUE
}
res
}
check_random_org <- function() {
if (not_working()) {
skip("can't connect to random.org")
}
}
context("generate token")
test_that("correct length and pseudorandom", {
test_len <- lapply(1:10, generate_token, method = "pseudo")
expect_equal(1:10, sapply(test_len, length))
expect_equal(1, length(generate_token(1)))
})
test_that("random numbers", {
skip_on_cran()
check_random_org()
expect_equal(1, length(generate_token(n_words = 1, method = "random")))
})
context("match token")
test_that("correct matching", {
expect_equal(match_token("11111", wordlist = wordlist_en, title_case = TRUE), "A")
expect_equal(match_token("11111", wordlist = wordlist_en, title_case = FALSE), "a")
expect_equal(match_token("16234", wordlist = wordlist_en, title_case = TRUE), "Cat")
expect_equal(match_token("16234", wordlist = wordlist_en, title_case = FALSE), "cat")
})
test_that("fails if incorrect token", {
expect_error(match_token("cat"), "invalid token")
})
context("generate passphrase")
test_that("test passphrase", {
expect_message(generate_passphrase(
tokens = c("36156", "35646"),
wordlist = wordlist_en,
title_case = FALSE,
verbose = TRUE
), "lava lamp")
expect_equal(generate_passphrase(
tokens = c("36156", "35646"),
wordlist = wordlist_en,
title_case = FALSE,
verbose = FALSE
), "lavalamp")
expect_equal(generate_passphrase(
tokens = c("36156", "35646"),
wordlist = wordlist_en,
title_case = TRUE,
verbose = FALSE
), "LavaLamp")
## EFF's list
expect_equal(generate_passphrase(
tokens = c("15233"),
wordlist = wordlist_eff,
title_case = TRUE,
verbose = FALSE
), "Catnip")
## German
expect_equal(generate_passphrase(
tokens = c("34454"), wordlist = wordlist_de,
title_case = TRUE, verbose = FALSE
), "Katze")
expect_equal(generate_passphrase(
tokens = c("34454"), wordlist = wordlist_de,
title_case = FALSE, verbose = FALSE
), "katze")
## Spanish
expect_equal(generate_passphrase(
tokens = c("35622"), wordlist = wordlist_es,
title_case = TRUE, verbose = FALSE
), "Gato")
expect_equal(generate_passphrase(
tokens = c("35622"), wordlist = wordlist_es,
title_case = FALSE, verbose = FALSE
), "gato")
## French
expect_equal(generate_passphrase(
tokens = c("21631"), wordlist = wordlist_fr,
title_case = TRUE, verbose = FALSE
), "Chaton")
expect_equal(generate_passphrase(
tokens = c("21631"), wordlist = wordlist_fr,
title_case = FALSE, verbose = FALSE
), "chaton")
## Italian (no cat :( )
expect_equal(generate_passphrase(
tokens = c("32141"), wordlist = wordlist_it,
title_case = TRUE, verbose = FALSE
), "Gelato")
expect_equal(generate_passphrase(
tokens = c("32141"), wordlist = wordlist_it,
title_case = FALSE, verbose = FALSE
), "gelato")
## Japanese
expect_equal(generate_passphrase(
tokens = c("44565"), wordlist = wordlist_jp,
title_case = TRUE, verbose = FALSE
), "Neko")
expect_equal(generate_passphrase(
tokens = c("44565"), wordlist = wordlist_jp,
title_case = FALSE, verbose = FALSE
), "neko")
## Dutch
# expect_equal(generate_passphrase(tokens = c("53431"), wordlist = wordlist_nl,
# title_case = TRUE, verbose = FALSE), "Kat")
# expect_equal(generate_passphrase(tokens = c("53431"), wordlist = wordlist_nl,
# title_case = FALSE, verbose = FALSE), "kat")
## Swedish
expect_equal(generate_passphrase(
tokens = c("33343"), wordlist = wordlist_sv,
title_case = TRUE, verbose = FALSE
), "Katt")
expect_equal(generate_passphrase(
tokens = c("33343"), wordlist = wordlist_sv,
title_case = FALSE, verbose = FALSE
), "katt")
})
test_that("no NAs in the wordlists", {
lapply(
list(
wordlist_de,
wordlist_en,
wordlist_eff,
wordlist_es,
wordlist_fr,
wordlist_it,
wordlist_jp,
wordlist_sv
),
function(x) {
identical(sum(is.na(x[["word"]])), 0L)
}
)
})
| /tests/testthat/test-rice.R | no_license | fmichonneau/riceware | R | false | false | 5,345 | r | context("check that data set is loaded/available")
test_that("check correct dimensions", {
expect_equal(nrow(wordlist_en), 7776)
expect_equal(nrow(wordlist_de), 7776)
expect_equal(nrow(wordlist_es), 7776)
expect_equal(nrow(wordlist_fr), 7776)
expect_equal(nrow(wordlist_it), 7776)
expect_equal(nrow(wordlist_jp), 7776)
## expect_equal(nrow(wordlist_nl), 7776)
expect_equal(nrow(wordlist_sv), 7776)
})
context("test token")
test_that("check length", {
expect_false(check_token(character(0)))
expect_false(check_token(c("11111", "22222")))
expect_false(check_token("111"))
expect_false(check_token("111111"))
expect_true(check_token("11111"))
})
test_that("check digits", {
expect_false(check_token("A1111"))
expect_false(check_token(NA))
expect_false(check_token(NULL))
expect_false(check_token("01234"))
expect_true(check_token("12345"))
expect_true(check_token("23456"))
})
not_working <- function() {
if (capabilities()["http/ftp"]) { ## Are we on a platform that allows internet access?
if (.Platform$OS.type == "unix") { ## If unix, then we can use nsl to check connection
res <- is.null(utils::nsl("random.org"))
} else { ## otherwise just fail
res <- TRUE
}
} else {
res <- TRUE
}
res
}
check_random_org <- function() {
if (not_working()) {
skip("can't connect to random.org")
}
}
context("generate token")
test_that("correct length and pseudorandom", {
test_len <- lapply(1:10, generate_token, method = "pseudo")
expect_equal(1:10, sapply(test_len, length))
expect_equal(1, length(generate_token(1)))
})
test_that("random numbers", {
skip_on_cran()
check_random_org()
expect_equal(1, length(generate_token(n_words = 1, method = "random")))
})
context("match token")
test_that("correct matching", {
expect_equal(match_token("11111", wordlist = wordlist_en, title_case = TRUE), "A")
expect_equal(match_token("11111", wordlist = wordlist_en, title_case = FALSE), "a")
expect_equal(match_token("16234", wordlist = wordlist_en, title_case = TRUE), "Cat")
expect_equal(match_token("16234", wordlist = wordlist_en, title_case = FALSE), "cat")
})
test_that("fails if incorrect token", {
expect_error(match_token("cat"), "invalid token")
})
context("generate passphrase")
test_that("test passphrase", {
expect_message(generate_passphrase(
tokens = c("36156", "35646"),
wordlist = wordlist_en,
title_case = FALSE,
verbose = TRUE
), "lava lamp")
expect_equal(generate_passphrase(
tokens = c("36156", "35646"),
wordlist = wordlist_en,
title_case = FALSE,
verbose = FALSE
), "lavalamp")
expect_equal(generate_passphrase(
tokens = c("36156", "35646"),
wordlist = wordlist_en,
title_case = TRUE,
verbose = FALSE
), "LavaLamp")
## EFF's list
expect_equal(generate_passphrase(
tokens = c("15233"),
wordlist = wordlist_eff,
title_case = TRUE,
verbose = FALSE
), "Catnip")
## German
expect_equal(generate_passphrase(
tokens = c("34454"), wordlist = wordlist_de,
title_case = TRUE, verbose = FALSE
), "Katze")
expect_equal(generate_passphrase(
tokens = c("34454"), wordlist = wordlist_de,
title_case = FALSE, verbose = FALSE
), "katze")
## Spanish
expect_equal(generate_passphrase(
tokens = c("35622"), wordlist = wordlist_es,
title_case = TRUE, verbose = FALSE
), "Gato")
expect_equal(generate_passphrase(
tokens = c("35622"), wordlist = wordlist_es,
title_case = FALSE, verbose = FALSE
), "gato")
## French
expect_equal(generate_passphrase(
tokens = c("21631"), wordlist = wordlist_fr,
title_case = TRUE, verbose = FALSE
), "Chaton")
expect_equal(generate_passphrase(
tokens = c("21631"), wordlist = wordlist_fr,
title_case = FALSE, verbose = FALSE
), "chaton")
## Italian (no cat :( )
expect_equal(generate_passphrase(
tokens = c("32141"), wordlist = wordlist_it,
title_case = TRUE, verbose = FALSE
), "Gelato")
expect_equal(generate_passphrase(
tokens = c("32141"), wordlist = wordlist_it,
title_case = FALSE, verbose = FALSE
), "gelato")
## Japanese
expect_equal(generate_passphrase(
tokens = c("44565"), wordlist = wordlist_jp,
title_case = TRUE, verbose = FALSE
), "Neko")
expect_equal(generate_passphrase(
tokens = c("44565"), wordlist = wordlist_jp,
title_case = FALSE, verbose = FALSE
), "neko")
## Dutch
# expect_equal(generate_passphrase(tokens = c("53431"), wordlist = wordlist_nl,
# title_case = TRUE, verbose = FALSE), "Kat")
# expect_equal(generate_passphrase(tokens = c("53431"), wordlist = wordlist_nl,
# title_case = FALSE, verbose = FALSE), "kat")
## Swedish
expect_equal(generate_passphrase(
tokens = c("33343"), wordlist = wordlist_sv,
title_case = TRUE, verbose = FALSE
), "Katt")
expect_equal(generate_passphrase(
tokens = c("33343"), wordlist = wordlist_sv,
title_case = FALSE, verbose = FALSE
), "katt")
})
test_that("no NAs in the wordlists", {
lapply(
list(
wordlist_de,
wordlist_en,
wordlist_eff,
wordlist_es,
wordlist_fr,
wordlist_it,
wordlist_jp,
wordlist_sv
),
function(x) {
identical(sum(is.na(x[["word"]])), 0L)
}
)
})
|
library(here)
library(ggplot2)
library(cowplot)
library(distances)
library(parallel)
library(foreach)
library(doSNOW)
source(here("code_memory_model", "rscripts", "analysis_tools.R"))
######################
##### SOME FUNS ######
######################
# scale values to a range of - 1 to 1
scale.personality <- function(x) (2* (x - min(x))/diff(range(x))) - 1
#scale values to relative proportions of the maximum, the maximum may be the maximum of the vector or the maximum of some vector (e.g. observed / max(simulated))
scale.data <- function(x, maxx = NA) if(is.na(maxx)){x/max(x)}else{x/maxx}
#NRMSE
RMSE <- function(x, y) return(sqrt(mean((x - y)^2))/mean(y))
#Rsq
Rsq <- function(x, y) return(cor(x, y )^2)
##### SOME GENERAL DATASETS ####
predict.seq <- data.frame(boldness.scaled = seq(-1, 1, length.out = 50))
predict.seq.alpha <- data.frame(ALPHA.scaled = seq(-1, 1, length.out = 50))
##### REAL DATA #####
# read data from Schirmer et al. 2019 (can't be provided here)
real.area <- xlsx::read.xlsx(here("data", "SpaceUse_UM16.xlsx"), sheetIndex = 2,endRow = 37, stringsAsFactors = F)
real.distmoved <- xlsx::read.xlsx(here("data", "MovementTracks_Uckermark2016.xlsx"), sheetName = "allTracks",endRow = 37, stringsAsFactors = F)
real.ol <- xlsx::read.xlsx(here("data", "SpaceUse_UM16.xlsx"), sheetIndex = 3,endRow = 37, stringsAsFactors = F)
plot(real.ol$intraspecific_OL ~ real.ol$Boldness)
# merge kernel areas, overlaps and track lengths
real.merged <- Reduce(function(x, y) merge(x = x,y = y, by = c("PIT", "Species", "Grid"), all = T),
list(real.area, real.distmoved, real.ol))
plot(intraspecific_OL ~ Boldness, real.merged[real.merged$Species == "SFM",])
real.merged$Boldness.x[is.na(real.merged$Boldness.x)] <- real.merged$Boldness.y[is.na(real.merged$Boldness.x)]
real.merged <- real.merged[real.merged$Species == "BV",]
# choose columns of interest, remove one individual that has an extreme boldness and hence can hardly be used for general pattern analysis. This individual was pregnant and may, hence, have should some other biological signal.
real.data <- setNames(real.merged[, c("Boldness.x", "Kernel50", "Kernel95", "Track_length", "intraspecific_OL")], c("Boldness", "SIZE50", "SIZE95", "DISTMOVED", "HR50"))
real.data <- real.data[real.data$Boldness < 10, ]
rm(list = c("all.merged", "real.area", "real.ol", "real.distmoved", "real.merged"))
real.data$boldness.scaled <- scale.personality(real.data$Boldness)
# create predictions from simple linear models
real.model.scaled <- real.model <- data.frame("Boldness" = predict.seq,
"SIZE50" = predict(lm(SIZE50 ~ boldness.scaled + I(boldness.scaled^2), real.data), newdata = predict.seq),
"SIZE95" = predict(lm(SIZE95 ~ boldness.scaled + I(boldness.scaled^2), real.data), newdata = predict.seq),
"HR50" = predict(lm(HR50 ~ boldness.scaled, real.data), newdata = predict.seq),
"DISTMOVED" = predict(lm(DISTMOVED ~ boldness.scaled, real.data), newdata = predict.seq))
real.data.scaled <- real.data
for(metric in names(real.model)[-1]){
real.data.scaled[, metric] <- scale.data(real.data[,metric], maxx = max(real.model[,metric]))
if(metric != "HR50") real.model.scaled[, metric] <- scale.data(real.model[,metric])
if(metric == "HR50"){
real.model.scaled[, metric] <- real.model[,metric]
real.data.scaled[, metric] <- real.data[,metric]
}
}
mtext(side = 1, text = "Boldness Scores")
names(real.data)
plot.sc <- real.data.scaled
plot.nsc <- real.data
plot.sc$Boldness <- plot.sc$boldness.scaled
names(plot.sc)[1:5] <- names(plot.nsc)[1:5] <- c("Boldness", "Home Range Size (Kernel Density; 50 %)", "Home Range Size (Kernel Density; 95 %)", "Total Distance Moved", "Mean Home Range Overlap (Kernel Density; 50 %)")
mdf <- reshape2::melt(data.frame(rbind(plot.nsc[,-6], plot.sc[,-6]), scaled = rep(c(F, T), each = nrow(plot.nsc))), id.vars = c("Boldness", "scaled"))
new.levels <- c("Home range area\n(50 % kernel)", "Home range area\n(95 % kernel)", "Total distance moved", "Home range\noverlap (95 % kernel)")
levels(mdf$variable) <- new.levels
scaled <- ggplot(mdf[mdf$scaled,], aes(x = Boldness, y = value))+
geom_point()+
facet_wrap( ~ variable, scales = "free", nc = 1)+
ggtitle("Scaled")+
theme_classic()+
ylab("Home range metrics")
not.scaled <- ggplot(mdf[!mdf$scaled,], aes(x = Boldness, y = value))+
geom_point()+
facet_wrap( ~ variable, scales = "free", nc = 1)+
ggtitle("Not scaled")+
theme_classic()+
ylab("")
scaled.vs.notscaled<- gridExtra::grid.arrange(not.scaled, scaled, ncol = 2)
ggsave(here("figs", "nonscaled_vs_scaled_real_data.png"), scaled.vs.notscaled, width = 6, height = 8)
png(here("figs", "scaled_data_linear_model.png"))
par(mfrow = c(2,2))
plot(SIZE50 ~ boldness.scaled, real.data.scaled, ylab = "Home Range Size (Kernel Density; 50 %)", xlab = "")
lines(SIZE50 ~ boldness.scaled, real.model.scaled)
plot(SIZE95 ~ boldness.scaled, real.data.scaled, ylab = "Home Range Size (Kernel Density; 95 %)", xlab = "")
lines(SIZE95 ~ boldness.scaled, real.model.scaled)
plot(DISTMOVED ~ boldness.scaled, real.data.scaled, ylab = "Total Distance Moved", xlab = "")
lines(DISTMOVED ~ boldness.scaled, real.model.scaled)
plot(HR50 ~ boldness.scaled, real.data.scaled, ylab = "Mean Home Range Overlap (Kernel Density; 50 %)", xlab = "")
lines(HR50 ~ boldness.scaled, real.model.scaled)
dev.off()
########################
#### SIMULATED DATA ####
########################
#read simulated data from LHS (n = 1,000)
ols <- read.sim(day = "200514", exp.name = "HRanalysis_processed_1000_fit", nofcores = 1)
# split simulation runs into single list elements
split.ols <- split(ols, ols$ID)
# predict the fit of simulated data using the same model as for observed data
real.boldness.scaled <- data.frame(ALPHA.scaled = real.data.scaled$boldness.scaled)
sim.data.scaled <- do.call(rbind,
lapply(split.ols, function(x){
x$ALPHA.scaled <- -scale.personality(x$ALPHA)
SIZE50.model <- lm(SIZE50 ~ ALPHA.scaled + I(ALPHA.scaled^2), x)
SIZE50.pred <- scale.data(predict(SIZE50.model, newdata = predict.seq.alpha))
SIZE95.model <- lm(SIZE95 ~ ALPHA.scaled + I(ALPHA.scaled^2), x)
SIZE95.pred <- scale.data(predict(SIZE95.model, newdata = predict.seq.alpha))
DISTMOVED.model <- lm(DISTMOVED ~ ALPHA.scaled, x)
DISTMOVED.pred <- scale.data(predict(DISTMOVED.model, newdata = predict.seq.alpha))
HR50.model <- lm(HR95 ~ ALPHA.scaled, x)
HR50.pred <- predict(HR50.model, newdata = predict.seq.alpha)
pred <- data.frame(SIZE50 = SIZE50.pred,
SIZE50.RMSE = RMSE(SIZE50.pred, real.model.scaled$SIZE50),
SIZE95 = SIZE95.pred,
SIZE95.RMSE = RMSE(SIZE95.pred, real.model.scaled$SIZE95),
DISTMOVED = DISTMOVED.pred,
DISTMOVED.RMSE = RMSE(DISTMOVED.pred, real.model.scaled$DISTMOVED),
HR50 = HR50.pred,
HR50.RMSE = RMSE(HR50.pred, real.model.scaled$HR50),
PATCHINESS = x$patchiness[1],
N_INDS = x$n_inds[1],
RESOURCE_COVER = x$resource_cover[1],
TRAIT_MEAN = x$trait_mean[1],
ITV = x$ITV[1],
ALPHA.scaled = predict.seq.alpha)
return(pred)
}
)
)
################################
##### Analyze fit to data ######
################################
names(sim.data.scaled)
summary(sim.data.scaled)
# get the parameter values that are lower than 90 % of RMSE values for each metric
ranks <- setNames(data.frame(apply(sim.data.scaled[,c(2,4,6, 8)], 2, function(x) x)), c("SIZE50.rank", "SIZE95.rank", "DISTMOVED.rank", "HR50.rank"))
sim <- data.frame(sim.data.scaled, data.frame(ranks, rank = apply(ranks, 1, sum)))
############################################################
##### plot the parameter distribution / correlations for the top 25 % #####
############################################################
gg.sim.params <- sim[sim$rank < quantile(sim$rank, .25),]
gg.sim.params <- reshape2::melt(gg.sim.params[,c("N_INDS", "ITV", "TRAIT_MEAN", "PATCHINESS", "RESOURCE_COVER")])
#correlation between parameters
png(here("figs", "parameter_cor.png"))
par(oma = c(0,2,0,0))
psych::cor.plot(gg.sim.params[,9:13], diag = F, stars = F, scale = T, numbers = T, cex = .8)
dev.off()
# parameter distribution of all paramteters
parameter_dist <- ggplot(gg.sim.params, aes(x = value, fill = variable))+
geom_histogram(bins = 10, fill = "lightgray", color = "gray")+
facet_wrap(~as.factor(variable), scales = "free")+
xlab("Paramter value")+
ylab("Count")+
theme_classic()
ggsave(here("figs", "parameter_dist.png"), width = 6.9, height = 5.5)
ggsave(here("figs", "parameter_dist.pdf"), width = 6.9, height = 5.5)
# parameter distribution of ITV
ITV_dist <- ggplot(gg.sim.params[gg.sim.params$variable == "ITV",], aes(x = value, fill = variable))+
geom_density(fill = "lightgray", color = "gray")+
scale_fill_brewer(palette = "Accent")+
xlab("ITV")+
theme(text = element_text(size = 12))+
geom_vline(xintercept = mean(gg.sim.params[gg.sim.params$variable == "ITV",]$value), linetype = "dashed")
ggplot(gg.sim.params[gg.sim.params$variable == "ITV",], aes(x = value, fill = variable))+
geom_histogram(fill = "lightgray", color = "gray")+
scale_fill_brewer(palette = "Accent")+
xlab("ITV")+
theme(text = element_text(size = 12))+
geom_vline(xintercept = median(gg.sim.params[gg.sim.params$variable == "ITV",]$value), linetype = "dashed")
ggsave(here("figs", "ITV_dist.png"), ITV_dist, width = 2, height = 2)
ggsave(here("figs", "ITV_dist.pdf"), ITV_dist, width = 2, height = 2)
###########################################################################
##### format the 4 metrics and the error to a ggplot-readable layout #####
###########################################################################
# convert layout of simulated data
sim$ID <- rep(seq(NROW(sim) / NROW(predict.seq)), each = NROW(predict.seq))
names(sim)
gg.metrics <- reshape2::melt(sim[,c("SIZE50", "SIZE95", "DISTMOVED", "HR50", "ALPHA.scaled", "ID", "ITV", "rank")], id.vars = c("ID", "ALPHA.scaled", "ITV", "rank"), variable.name = "metrics")
gg.error <- reshape2::melt(sim[,c("SIZE50.RMSE", "SIZE95.RMSE", "DISTMOVED.RMSE", "HR50.RMSE", "ID")], id.vars = c("ID"), variable.name = "error.metric", value.name = "RMSE")
# add column which informs about whether parameter set is within the 25-% quantile
gg.merge <- cbind(gg.metrics, gg.error[,-1])
gg.merge$q <- "all"
gg.merge$q[quantile(gg.merge$rank, .25) > gg.merge$rank] <- "25 %"
gg.merge$rank <- NULL
# convert layout of empirical data
gg.real <- reshape2::melt(real.data.scaled[, -1], id.vars = "boldness.scaled", variable.name = "metrics")
new.levels <- c("Home range area (50 % kernel)", "Home range area (95 % kernel)", "Total distance moved", "Home range overlap (95 % kernel)")
levels(gg.merge$metrics) <- levels(gg.real$metrics) <- new.levels
################################################
#### visualize fit of the top 25 % to data ####
################################################
all.fitted <-
ggplot()+
geom_line(data = gg.merge[gg.merge$q != "all",], aes(x = ALPHA.scaled, y = value, group = ID, color = q), alpha = 1)+
geom_smooth(data = gg.real[gg.real$metrics %in% new.levels[1:2], ], aes(x = boldness.scaled, y = value), alpha = .1, color = "black", se = F, method = "lm", formula = y ~ x + I(x^2))+
geom_smooth(data = gg.real[gg.real$metrics %in% new.levels[3:4], ], aes(x = boldness.scaled, y = value), alpha = .1, color = "black", se = F, method = "lm", formula = y ~ x)+
geom_smooth(data = gg.real[gg.real$metrics %in% new.levels[1:2], ], aes(x = boldness.scaled, y = value), alpha = .5, color = "black", se = T, method = "lm", formula = y ~ x + I(x^2), fill = "cornsilk2")+
geom_smooth(data = gg.real[gg.real$metrics %in% new.levels[3:4], ], aes(x = boldness.scaled, y = value), alpha = .5, color = "black", se = T, method = "lm", formula = y ~ x, fill = "cornsilk2")+
geom_point(data = gg.real, aes(x = boldness.scaled, y = value), color = "black")+
scale_color_manual(values = c("cadetblue", "cadetblue4"))+
facet_wrap(~metrics)+
theme_classic()+
xlab(expression(paste("Scaled ", alpha, " / boldness score")))+
ylab(expression(paste("Scaled home range metrics")))+
theme(text = element_text(size = 10), legend.position = "none")
####################
### SAVE FIGURES ###
####################
widths = 16.6
heights = 10
ggsave(here("figs", "all.fitted.png") ,all.fitted, width = widths, height = heights, units = "cm", dpi = 600) # 8.2, 11.0, 17.3
ggsave(here("figs", "all.fitted.pdf") ,all.fitted, width = widths, height = heights, units = "cm", dpi = 600)
ggsave(here("figs", "all.fitted.tiff") ,all.fitted, width = widths, height = heights, units = "cm", dpi = 600)
| /code_memory_model/rscripts/6_plot_fit2data.R | no_license | aMilles/ITV_model | R | false | false | 13,060 | r | library(here)
library(ggplot2)
library(cowplot)
library(distances)
library(parallel)
library(foreach)
library(doSNOW)
source(here("code_memory_model", "rscripts", "analysis_tools.R"))
######################
##### SOME FUNS ######
######################
# scale values to a range of - 1 to 1
scale.personality <- function(x) (2* (x - min(x))/diff(range(x))) - 1
#scale values to relative proportions of the maximum, the maximum may be the maximum of the vector or the maximum of some vector (e.g. observed / max(simulated))
scale.data <- function(x, maxx = NA) if(is.na(maxx)){x/max(x)}else{x/maxx}
#NRMSE
RMSE <- function(x, y) return(sqrt(mean((x - y)^2))/mean(y))
#Rsq
Rsq <- function(x, y) return(cor(x, y )^2)
##### SOME GENERAL DATASETS ####
predict.seq <- data.frame(boldness.scaled = seq(-1, 1, length.out = 50))
predict.seq.alpha <- data.frame(ALPHA.scaled = seq(-1, 1, length.out = 50))
##### REAL DATA #####
# read data from Schirmer et al. 2019 (can't be provided here)
real.area <- xlsx::read.xlsx(here("data", "SpaceUse_UM16.xlsx"), sheetIndex = 2,endRow = 37, stringsAsFactors = F)
real.distmoved <- xlsx::read.xlsx(here("data", "MovementTracks_Uckermark2016.xlsx"), sheetName = "allTracks",endRow = 37, stringsAsFactors = F)
real.ol <- xlsx::read.xlsx(here("data", "SpaceUse_UM16.xlsx"), sheetIndex = 3,endRow = 37, stringsAsFactors = F)
plot(real.ol$intraspecific_OL ~ real.ol$Boldness)
# merge kernel areas, overlaps and track lengths
real.merged <- Reduce(function(x, y) merge(x = x,y = y, by = c("PIT", "Species", "Grid"), all = T),
list(real.area, real.distmoved, real.ol))
plot(intraspecific_OL ~ Boldness, real.merged[real.merged$Species == "SFM",])
real.merged$Boldness.x[is.na(real.merged$Boldness.x)] <- real.merged$Boldness.y[is.na(real.merged$Boldness.x)]
real.merged <- real.merged[real.merged$Species == "BV",]
# choose columns of interest, remove one individual that has an extreme boldness and hence can hardly be used for general pattern analysis. This individual was pregnant and may, hence, have should some other biological signal.
real.data <- setNames(real.merged[, c("Boldness.x", "Kernel50", "Kernel95", "Track_length", "intraspecific_OL")], c("Boldness", "SIZE50", "SIZE95", "DISTMOVED", "HR50"))
real.data <- real.data[real.data$Boldness < 10, ]
rm(list = c("all.merged", "real.area", "real.ol", "real.distmoved", "real.merged"))
real.data$boldness.scaled <- scale.personality(real.data$Boldness)
# create predictions from simple linear models
real.model.scaled <- real.model <- data.frame("Boldness" = predict.seq,
"SIZE50" = predict(lm(SIZE50 ~ boldness.scaled + I(boldness.scaled^2), real.data), newdata = predict.seq),
"SIZE95" = predict(lm(SIZE95 ~ boldness.scaled + I(boldness.scaled^2), real.data), newdata = predict.seq),
"HR50" = predict(lm(HR50 ~ boldness.scaled, real.data), newdata = predict.seq),
"DISTMOVED" = predict(lm(DISTMOVED ~ boldness.scaled, real.data), newdata = predict.seq))
real.data.scaled <- real.data
for(metric in names(real.model)[-1]){
real.data.scaled[, metric] <- scale.data(real.data[,metric], maxx = max(real.model[,metric]))
if(metric != "HR50") real.model.scaled[, metric] <- scale.data(real.model[,metric])
if(metric == "HR50"){
real.model.scaled[, metric] <- real.model[,metric]
real.data.scaled[, metric] <- real.data[,metric]
}
}
mtext(side = 1, text = "Boldness Scores")
names(real.data)
plot.sc <- real.data.scaled
plot.nsc <- real.data
plot.sc$Boldness <- plot.sc$boldness.scaled
names(plot.sc)[1:5] <- names(plot.nsc)[1:5] <- c("Boldness", "Home Range Size (Kernel Density; 50 %)", "Home Range Size (Kernel Density; 95 %)", "Total Distance Moved", "Mean Home Range Overlap (Kernel Density; 50 %)")
mdf <- reshape2::melt(data.frame(rbind(plot.nsc[,-6], plot.sc[,-6]), scaled = rep(c(F, T), each = nrow(plot.nsc))), id.vars = c("Boldness", "scaled"))
new.levels <- c("Home range area\n(50 % kernel)", "Home range area\n(95 % kernel)", "Total distance moved", "Home range\noverlap (95 % kernel)")
levels(mdf$variable) <- new.levels
scaled <- ggplot(mdf[mdf$scaled,], aes(x = Boldness, y = value))+
geom_point()+
facet_wrap( ~ variable, scales = "free", nc = 1)+
ggtitle("Scaled")+
theme_classic()+
ylab("Home range metrics")
not.scaled <- ggplot(mdf[!mdf$scaled,], aes(x = Boldness, y = value))+
geom_point()+
facet_wrap( ~ variable, scales = "free", nc = 1)+
ggtitle("Not scaled")+
theme_classic()+
ylab("")
scaled.vs.notscaled<- gridExtra::grid.arrange(not.scaled, scaled, ncol = 2)
ggsave(here("figs", "nonscaled_vs_scaled_real_data.png"), scaled.vs.notscaled, width = 6, height = 8)
png(here("figs", "scaled_data_linear_model.png"))
par(mfrow = c(2,2))
plot(SIZE50 ~ boldness.scaled, real.data.scaled, ylab = "Home Range Size (Kernel Density; 50 %)", xlab = "")
lines(SIZE50 ~ boldness.scaled, real.model.scaled)
plot(SIZE95 ~ boldness.scaled, real.data.scaled, ylab = "Home Range Size (Kernel Density; 95 %)", xlab = "")
lines(SIZE95 ~ boldness.scaled, real.model.scaled)
plot(DISTMOVED ~ boldness.scaled, real.data.scaled, ylab = "Total Distance Moved", xlab = "")
lines(DISTMOVED ~ boldness.scaled, real.model.scaled)
plot(HR50 ~ boldness.scaled, real.data.scaled, ylab = "Mean Home Range Overlap (Kernel Density; 50 %)", xlab = "")
lines(HR50 ~ boldness.scaled, real.model.scaled)
dev.off()
########################
#### SIMULATED DATA ####
########################
#read simulated data from LHS (n = 1,000)
ols <- read.sim(day = "200514", exp.name = "HRanalysis_processed_1000_fit", nofcores = 1)
# split simulation runs into single list elements
split.ols <- split(ols, ols$ID)
# predict the fit of simulated data using the same model as for observed data
real.boldness.scaled <- data.frame(ALPHA.scaled = real.data.scaled$boldness.scaled)
sim.data.scaled <- do.call(rbind,
lapply(split.ols, function(x){
x$ALPHA.scaled <- -scale.personality(x$ALPHA)
SIZE50.model <- lm(SIZE50 ~ ALPHA.scaled + I(ALPHA.scaled^2), x)
SIZE50.pred <- scale.data(predict(SIZE50.model, newdata = predict.seq.alpha))
SIZE95.model <- lm(SIZE95 ~ ALPHA.scaled + I(ALPHA.scaled^2), x)
SIZE95.pred <- scale.data(predict(SIZE95.model, newdata = predict.seq.alpha))
DISTMOVED.model <- lm(DISTMOVED ~ ALPHA.scaled, x)
DISTMOVED.pred <- scale.data(predict(DISTMOVED.model, newdata = predict.seq.alpha))
HR50.model <- lm(HR95 ~ ALPHA.scaled, x)
HR50.pred <- predict(HR50.model, newdata = predict.seq.alpha)
pred <- data.frame(SIZE50 = SIZE50.pred,
SIZE50.RMSE = RMSE(SIZE50.pred, real.model.scaled$SIZE50),
SIZE95 = SIZE95.pred,
SIZE95.RMSE = RMSE(SIZE95.pred, real.model.scaled$SIZE95),
DISTMOVED = DISTMOVED.pred,
DISTMOVED.RMSE = RMSE(DISTMOVED.pred, real.model.scaled$DISTMOVED),
HR50 = HR50.pred,
HR50.RMSE = RMSE(HR50.pred, real.model.scaled$HR50),
PATCHINESS = x$patchiness[1],
N_INDS = x$n_inds[1],
RESOURCE_COVER = x$resource_cover[1],
TRAIT_MEAN = x$trait_mean[1],
ITV = x$ITV[1],
ALPHA.scaled = predict.seq.alpha)
return(pred)
}
)
)
################################
##### Analyze fit to data ######
################################
names(sim.data.scaled)
summary(sim.data.scaled)
# get the parameter values that are lower than 90 % of RMSE values for each metric
ranks <- setNames(data.frame(apply(sim.data.scaled[,c(2,4,6, 8)], 2, function(x) x)), c("SIZE50.rank", "SIZE95.rank", "DISTMOVED.rank", "HR50.rank"))
sim <- data.frame(sim.data.scaled, data.frame(ranks, rank = apply(ranks, 1, sum)))
############################################################
##### plot the parameter distribution / correlations for the top 25 % #####
############################################################
gg.sim.params <- sim[sim$rank < quantile(sim$rank, .25),]
gg.sim.params <- reshape2::melt(gg.sim.params[,c("N_INDS", "ITV", "TRAIT_MEAN", "PATCHINESS", "RESOURCE_COVER")])
#correlation between parameters
png(here("figs", "parameter_cor.png"))
par(oma = c(0,2,0,0))
psych::cor.plot(gg.sim.params[,9:13], diag = F, stars = F, scale = T, numbers = T, cex = .8)
dev.off()
# parameter distribution of all paramteters
parameter_dist <- ggplot(gg.sim.params, aes(x = value, fill = variable))+
geom_histogram(bins = 10, fill = "lightgray", color = "gray")+
facet_wrap(~as.factor(variable), scales = "free")+
xlab("Paramter value")+
ylab("Count")+
theme_classic()
ggsave(here("figs", "parameter_dist.png"), width = 6.9, height = 5.5)
ggsave(here("figs", "parameter_dist.pdf"), width = 6.9, height = 5.5)
# parameter distribution of ITV
ITV_dist <- ggplot(gg.sim.params[gg.sim.params$variable == "ITV",], aes(x = value, fill = variable))+
geom_density(fill = "lightgray", color = "gray")+
scale_fill_brewer(palette = "Accent")+
xlab("ITV")+
theme(text = element_text(size = 12))+
geom_vline(xintercept = mean(gg.sim.params[gg.sim.params$variable == "ITV",]$value), linetype = "dashed")
ggplot(gg.sim.params[gg.sim.params$variable == "ITV",], aes(x = value, fill = variable))+
geom_histogram(fill = "lightgray", color = "gray")+
scale_fill_brewer(palette = "Accent")+
xlab("ITV")+
theme(text = element_text(size = 12))+
geom_vline(xintercept = median(gg.sim.params[gg.sim.params$variable == "ITV",]$value), linetype = "dashed")
ggsave(here("figs", "ITV_dist.png"), ITV_dist, width = 2, height = 2)
ggsave(here("figs", "ITV_dist.pdf"), ITV_dist, width = 2, height = 2)
###########################################################################
##### format the 4 metrics and the error to a ggplot-readable layout #####
###########################################################################
# convert layout of simulated data
sim$ID <- rep(seq(NROW(sim) / NROW(predict.seq)), each = NROW(predict.seq))
names(sim)
gg.metrics <- reshape2::melt(sim[,c("SIZE50", "SIZE95", "DISTMOVED", "HR50", "ALPHA.scaled", "ID", "ITV", "rank")], id.vars = c("ID", "ALPHA.scaled", "ITV", "rank"), variable.name = "metrics")
gg.error <- reshape2::melt(sim[,c("SIZE50.RMSE", "SIZE95.RMSE", "DISTMOVED.RMSE", "HR50.RMSE", "ID")], id.vars = c("ID"), variable.name = "error.metric", value.name = "RMSE")
# add column which informs about whether parameter set is within the 25-% quantile
gg.merge <- cbind(gg.metrics, gg.error[,-1])
gg.merge$q <- "all"
gg.merge$q[quantile(gg.merge$rank, .25) > gg.merge$rank] <- "25 %"
gg.merge$rank <- NULL
# convert layout of empirical data
gg.real <- reshape2::melt(real.data.scaled[, -1], id.vars = "boldness.scaled", variable.name = "metrics")
new.levels <- c("Home range area (50 % kernel)", "Home range area (95 % kernel)", "Total distance moved", "Home range overlap (95 % kernel)")
levels(gg.merge$metrics) <- levels(gg.real$metrics) <- new.levels
################################################
#### visualize fit of the top 25 % to data ####
################################################
all.fitted <-
ggplot()+
geom_line(data = gg.merge[gg.merge$q != "all",], aes(x = ALPHA.scaled, y = value, group = ID, color = q), alpha = 1)+
geom_smooth(data = gg.real[gg.real$metrics %in% new.levels[1:2], ], aes(x = boldness.scaled, y = value), alpha = .1, color = "black", se = F, method = "lm", formula = y ~ x + I(x^2))+
geom_smooth(data = gg.real[gg.real$metrics %in% new.levels[3:4], ], aes(x = boldness.scaled, y = value), alpha = .1, color = "black", se = F, method = "lm", formula = y ~ x)+
geom_smooth(data = gg.real[gg.real$metrics %in% new.levels[1:2], ], aes(x = boldness.scaled, y = value), alpha = .5, color = "black", se = T, method = "lm", formula = y ~ x + I(x^2), fill = "cornsilk2")+
geom_smooth(data = gg.real[gg.real$metrics %in% new.levels[3:4], ], aes(x = boldness.scaled, y = value), alpha = .5, color = "black", se = T, method = "lm", formula = y ~ x, fill = "cornsilk2")+
geom_point(data = gg.real, aes(x = boldness.scaled, y = value), color = "black")+
scale_color_manual(values = c("cadetblue", "cadetblue4"))+
facet_wrap(~metrics)+
theme_classic()+
xlab(expression(paste("Scaled ", alpha, " / boldness score")))+
ylab(expression(paste("Scaled home range metrics")))+
theme(text = element_text(size = 10), legend.position = "none")
####################
### SAVE FIGURES ###
####################
widths = 16.6
heights = 10
ggsave(here("figs", "all.fitted.png") ,all.fitted, width = widths, height = heights, units = "cm", dpi = 600) # 8.2, 11.0, 17.3
ggsave(here("figs", "all.fitted.pdf") ,all.fitted, width = widths, height = heights, units = "cm", dpi = 600)
ggsave(here("figs", "all.fitted.tiff") ,all.fitted, width = widths, height = heights, units = "cm", dpi = 600)
|
qic <-
function(model.R) {
require(MASS, quietly = TRUE)
check.class <- class(model.R)
known <- NULL
if(identical(check.class[1], "geeglm")) {
known[1] <- 1
}
if(identical(check.class[1], "geese")) {
stop("\nQIC function not defined for object of class geese rerun models using geeglm in geepack\n")
known[2] <- 1
}
if(sum(known) < 1) {
stop("\nFunction not defined for this object class\n")
}
model.indep <- update(model.R, corstr = "independence")
# Quasilikelihood
mu.R <- model.R$fitted.values
y <- model.R$y
type <- family(model.R)$family
quasi.R <- switch(type,
poisson = sum((y*log(mu.R)) - mu.R),
gaussian = sum(((y - mu.R)^2)/-2),
binomial = sum(y*log(mu.R/(1 - mu.R)) + log(1 - mu.R)),
Gamma = sum(-y/(mu.R - log(mu.R))),
stop("Error: distribution not defined for this function"))
# Trace Term (penalty for model complexity)
omegaI <- ginv(model.indep$geese$vbeta.naiv) # Omega-hat(I) via Moore-Penrose
Vr <- model.R$geese$vbeta
trace.R <- sum(diag(omegaI %*% Vr))
px <- length(mu.R) # number non-redunant columns in design matrix
# QIC
QIC <- 2*(trace.R - quasi.R)
#QICu <- (-2)*quasi.R + 2*px # Approximation assuming model structured correctly
output <- data.frame(list(QIC, quasi.R, trace.R, px))
names(output) <- c('QIC', 'Log.QLik', 'Trace', 'px')
return(output)
}
| /QICpack.Rcheck/00_pkg_src/QICpack/R/qic.R | no_license | muthca/qicpack | R | false | false | 1,561 | r | qic <-
function(model.R) {
require(MASS, quietly = TRUE)
check.class <- class(model.R)
known <- NULL
if(identical(check.class[1], "geeglm")) {
known[1] <- 1
}
if(identical(check.class[1], "geese")) {
stop("\nQIC function not defined for object of class geese rerun models using geeglm in geepack\n")
known[2] <- 1
}
if(sum(known) < 1) {
stop("\nFunction not defined for this object class\n")
}
model.indep <- update(model.R, corstr = "independence")
# Quasilikelihood
mu.R <- model.R$fitted.values
y <- model.R$y
type <- family(model.R)$family
quasi.R <- switch(type,
poisson = sum((y*log(mu.R)) - mu.R),
gaussian = sum(((y - mu.R)^2)/-2),
binomial = sum(y*log(mu.R/(1 - mu.R)) + log(1 - mu.R)),
Gamma = sum(-y/(mu.R - log(mu.R))),
stop("Error: distribution not defined for this function"))
# Trace Term (penalty for model complexity)
omegaI <- ginv(model.indep$geese$vbeta.naiv) # Omega-hat(I) via Moore-Penrose
Vr <- model.R$geese$vbeta
trace.R <- sum(diag(omegaI %*% Vr))
px <- length(mu.R) # number non-redunant columns in design matrix
# QIC
QIC <- 2*(trace.R - quasi.R)
#QICu <- (-2)*quasi.R + 2*px # Approximation assuming model structured correctly
output <- data.frame(list(QIC, quasi.R, trace.R, px))
names(output) <- c('QIC', 'Log.QLik', 'Trace', 'px')
return(output)
}
|
# Differential expression analysis of Hal Fil F1 population
# Script #3
# Author- JT Lovell
# Data - 10-March 2015
####################
####################
#Part 1: Raw counts processing:
rm(list=ls())
setwd("~/Library/Mobile Documents/com~apple~CloudDocs/ph2015_eqtl")
pkg <- c("RCurl","plyr","mclust","qtl","DESeq2","GenomicRanges","car")
invisible(lapply(pkg, function(x) {cat(x,"..."); library(x, character.only=T, verbose=F, warn.conflicts=F,quietly=T)} ))
sessionInfo()
options(warn=-1)
# import necessary functions from github
function.names<-c("genobyMclust.R","multiplot.R")
us<-paste("https://raw.githubusercontent.com/jtlovell/eqtlanalysis/master/",function.names, sep="")
for(i in 1:length(function.names)){
script <- getURL(us[i], ssl.verifypeer = FALSE)
eval(parse(text = script))
}
#read in ASE raw count data
counts.fil<-read.delim("Phallii-FIL.counts")
counts.hal<-read.delim("Phallii-HAL.counts")
colnames(counts.fil)[grep("FIL2_H328_83",colnames(counts.fil))]<-"FIL2_H328_383"
colnames(counts.hal)[grep("FIL2_H328_83",colnames(counts.hal))]<-"FIL2_H328_383"
bad.lines<-vector()
for(i in c("HAL","FIL","F1")){
dat.hal<-counts.hal[,grep(i,colnames(counts.hal))]
dat.fil<-counts.fil[,grep(i,colnames(counts.fil))]
mean.hal<-apply(dat.hal,2,mean) ; mean.fil<-apply(dat.fil,2,mean)
plot(mean.hal,mean.fil, main=i)
mod<-lm(mean.fil~mean.hal)
ol<-outlierTest(mod)
if(ol$bonf.p<0.0001){
out<-names(ol$bonf); bad.lines<-c(bad.lines, out)
}
}
#drop contaminated individuals w/ high Cooks D for outlier test
counts.hal<-counts.hal[,-which(colnames(counts.hal) %in% bad.lines)]
counts.fil<-counts.fil[,-which(colnames(counts.fil) %in% bad.lines)]
#get info
info<-read.csv("JGI_Samples_3_6_14.csv") #experimental design information from Lowry/TEJ, via Scott
info$jgi.id<-paste(info$id,info$N,info$Plant.Number, sep="_")
idtrt<-info[,c("id","Treatment","jgi.id","VWC_July","Day_July","JGI.Batch")]
idtrt$generation<-ifelse(grepl("F1",idtrt$jgi.id),"F1",
ifelse(grepl("FIL",idtrt$jgi.id) | grepl("HAL",idtrt$jgi.id) ,"F0",
ifelse(grepl("FH",idtrt$jgi.id),"F2","NA")))
#cull to just F1/parents
counts.fil<-counts.fil[,-grep("FH",colnames(counts.fil))]
counts.hal<-counts.hal[,-grep("FH",colnames(counts.hal))]
ids<-colnames(counts.hal)
genes<-rownames(counts.hal)
colnames(counts.fil)<-paste(colnames(counts.fil),"_fil",sep="")
colnames(counts.hal)<-paste(colnames(counts.hal),"_hal",sep="")
#quick summary of bias in F1
f1.counts.hal<-counts.hal[,grep("F1", colnames(counts.hal))]
f1.counts.fil<-counts.fil[,grep("F1", colnames(counts.fil))]
f0.counts.hal<-counts.hal[,-grep("F1", colnames(counts.hal))]
f0.counts.fil<-counts.fil[,-grep("F1", colnames(counts.fil))]
f1.genesums.hal<-apply(f1.counts.hal,1,sum)
f1.genesums.fil<-apply(f1.counts.fil,1,sum)
f0.genesums.hal<-apply(f0.counts.hal,1,sum)
f0.genesums.fil<-apply(f0.counts.fil,1,sum)
plot(log10(f1.genesums.fil+1),log10(f1.genesums.hal+1), pch=".",
main="total counts / gene \n (black=1:1, red=F0, blue=F1)", bty='n',
ylim=c(0,max(log10(f0.genesums.hal+1),log10(f1.genesums.hal+1))), xlim=c(0,max(log10(f1.genesums.fil+1),log10(f0.genesums.fil+1))))
points(log10(f0.genesums.fil+1),log10(f0.genesums.hal+1), pch=".", col="red")
abline(0,1, lty=2, col="black", lwd=3)
abline(lm(log10(f1.genesums.hal+1)~log10(f1.genesums.fil+1)), lty=1, lwd=2, col="blue")
abline(lm(log10(f0.genesums.hal+1)~log10(f0.genesums.fil+1)), lty=1, lwd=2, col="red")
summary(lm(log10(f1.genesums.hal+1)~log10(f1.genesums.fil+1)))
summary(lm(log10(f0.genesums.hal+1)~log10(f0.genesums.fil+1)))
#there is more read mapping bias in the parents than the F1
#merge datasets
all<-cbind(counts.fil,counts.hal)
tot.counts<-apply(all,1,sum)
med.counts<-apply(all,1,median)
mean.counts<-apply(all,1,mean)
hist(log10(tot.counts+1), breaks=100)
hist(log10(med.counts+1), breaks=100)
hist(log10(mean.counts+1), breaks=100)
#filter to only genes w/ mean expression >5
all<-all[mean.counts>=5,]
#make a new dataset with some highly sigificant genes
library(MASS)
aov.out<-data.frame()
for(i in 1:100){
line.info$dat<-as.numeric(all[i,])
line.info$cis<-line.info$allele
line.info$trans1<-ifelse(line.info$generation == "F0" & line.info$allele =="fil","Fil.F0","not")
line.info$trans2<-ifelse(line.info$generation == "F0" & line.info$allele =="hal","Hal.F0","not")
with(line.info,by(dat, cis, mean))
tapply(line.info$dat, c(line.info[,c("cis","trt")]), mean)
ddply(line.info$dat, ~ line.info$cis, mean)
aov1<-aov(dat~cis+trans1+trans2+cis*Treatment+trans1*Treatment+trans2*Treatment, data=line.info)
boxplot(dat ~ cis, data=line.info)
sa<-summary(aov1)
ps<-data.frame(rownames(all)[i],t(data.frame(summary(aov1)[[1]][5][[1]][-8])))
colnames(ps)<-c("gene.id","cis","trans1","trans2","Treatment","cis_trt","trans1_trt","trans2_trt")
rownames(ps)<-i
aov.out<-rbind(aov.out,ps)
}
#check matchup
ids[!ids %in% idtrt$jgi.id] #genes in counts data, not in info file
#create two additional files - annotation for each gene, experimental design by sample
idtrt<-idtrt[idtrt$jgi.id %in% ids,]
halcols<-idtrt; halcols$jgi.id<-paste(halcols$jgi.id,"_hal",sep="")
filcols<-idtrt; filcols$jgi.id<-paste(filcols$jgi.id,"_fil",sep="")
halcols$allele<-"hal";filcols$allele<-"fil"
line.info<-rbind(halcols,filcols)
line.info$allele<-as.factor(line.info$allele)
line.info$generation<-as.factor(line.info$generation)
annot<-read.csv("ph2015_v1.1annot.edited.csv")
annot<-annot[,c("chr","start","strand","id", "end")]
rowData<-annot[annot$id %in% rownames(all),]
#files:
dim(all)
dim(rowData)
dim(line.info)
chrs<-data.frame(rowData[,"chr"]); colnames(chrs)<-"chrs"
gr<-GRanges(seqnames = rowData$id,
ranges = IRanges(
start=rowData$start,
end=rowData$end,
names=rowData$id),
strand = rowData$strand)
counts<-data.matrix(all)
line.info<-DataFrame(line.info)
se<-SummarizedExperiment(assays = counts,
rowData = gr,
colData = line.info,
verbose=T)
dds <- DESeqDataSet(se = se, design = ~ Treatment + generation + allele + generation : allele + Treatment : allele + generation : Treatment)
dds$Treatment <- relevel(dds$Treatment, "Dry")
dds$generation <- relevel(dds$generation, "F0")
dds$allele <- relevel(dds$allele, "fil")
#####################
# Part 2: Cis-trans test... workflow from Cublillos et al 2014 (Loudet's group)
# 2.1 library-size adjust data
# 2.2 binom.test - summed read counts / allele in F1; q-value adjust results
# 2.3 Variance estimation
# Notes:
# Use variance stabilized expression values (from DES)
#get normalized counts
#test for ASE in F1
#variance component modelling - separately normalize hal/fil alleles separately (count median...)
cds = estimateDispersions(dds)
vsd = getVarianceStabilizedData(cds)
vst<-varianceStabilizingTransformation(dds, blind = F)
par(mfrow=c(1,2))
plot(rank(rowMeans(counts(dds))), genefilter::rowVars(log2(counts(dds)+1)),
main="log2(x+1) transform")
plot(rank(rowMeans(assay(vst))), genefilter::rowVars(assay(vst)),
main="VST")
test<-getVarianceStabilizedData(vst)
dds1 <- makeExampleDESeqDataSet(m=6)
vsd1 <- varianceStabilizingTransformation(dds1, blind=TRUE)
par(mfrow=c(1,2))
plot(rank(rowMeans(counts(dds1))), genefilter::rowVars(log2(counts(dds1)+1)),
main="log2(x+1) transform")
plot(rank(rowMeans(assay(vsd1))), genefilter::rowVars(assay(vsd1)),
main="VST")
dds <- DESeq(dds)
save(dds, file="ct2015_deseq1.RData")
res <- results(dds)
# treatment main effect
resMFType <- results(dds, contrast=c("Treatment","Wet","Dry"))
# allele main effect
resMFType <- results(dds, name="allele_hal_vs_fil")
# generation by allele (Trans)
resMFType <- results(dds, name="generationF1.allelehal")
all.norm<-data.frame(counts(vst, normalized=F))
colnames(all.norm)<-colnames(all)
fil.ase<-all.norm[,grep("_fil",colnames(all.norm))]
hal.ase<-all.norm[,grep("_hal",colnames(all.norm))]
fil.means<-apply(fil.ase,2,mean)
hal.means<-apply(hal.ase,2,mean)
plot(fil.means, hal.means)
fil.means<-apply(counts.fil,2,mean)
hal.means<-apply(counts.hal,2,mean)
plot(fil.means, hal.means)
id-idtrt$jgi.id[!grep("FH",idtrt$jgi.id)]
table(id.f2 %in% colnames(counts.fil))
all<-cbind()
#merge by genes
#merge to do normalizations
counts.all<-rbind(counts.fil,counts.hal)
#QC
feature <- data.frame(gc=yeastGC,length=yeastLength)
data <- newSeqExpressionSet(counts=as.matrix(geneLevelData[common,]),
featureData=feature[common,],
phenoData=data.frame(
conditions=c(rep("mut",2),rep("wt",2)),
row.names=colnames(geneLevelData)))
data
lines(cnvrt.coords( c(0,0,.5), c(.5,0,0), input='plt')$usr)
hist
plot(fil.counts.f1[,"Pahalv11b000009m.g"],hal.counts.f1[,"Pahalv11b000009m.g"])
#less stringent cuttoffs - 40% increase in markers
genes.parents.lax<-genes[med.hal.hal>10 &
med.fil.fil>10 &
med.hal.fil<2 &
med.fil.hal<2]
length(genes.parents.lax)
#genes.parents<-genes.parents.lax
#for remaining analyses, use 1/50 cutoffs - these will most likely give the strongest clustering for later
####################
####################
#Part 3: of the genes that look good in parents, which have both alleles in F1s?
fil.counts.f1<-counts.fil[,grep("F1",colnames(counts.fil))]
hal.counts.f1<-counts.hal[,grep("F1",colnames(counts.hal))] | /ct2015_draft1.R | no_license | jtlovell/eqtlanalysis | R | false | false | 9,592 | r | # Differential expression analysis of Hal Fil F1 population
# Script #3
# Author- JT Lovell
# Data - 10-March 2015
####################
####################
#Part 1: Raw counts processing:
rm(list=ls())
setwd("~/Library/Mobile Documents/com~apple~CloudDocs/ph2015_eqtl")
pkg <- c("RCurl","plyr","mclust","qtl","DESeq2","GenomicRanges","car")
invisible(lapply(pkg, function(x) {cat(x,"..."); library(x, character.only=T, verbose=F, warn.conflicts=F,quietly=T)} ))
sessionInfo()
options(warn=-1)
# import necessary functions from github
function.names<-c("genobyMclust.R","multiplot.R")
us<-paste("https://raw.githubusercontent.com/jtlovell/eqtlanalysis/master/",function.names, sep="")
for(i in 1:length(function.names)){
script <- getURL(us[i], ssl.verifypeer = FALSE)
eval(parse(text = script))
}
#read in ASE raw count data
counts.fil<-read.delim("Phallii-FIL.counts")
counts.hal<-read.delim("Phallii-HAL.counts")
colnames(counts.fil)[grep("FIL2_H328_83",colnames(counts.fil))]<-"FIL2_H328_383"
colnames(counts.hal)[grep("FIL2_H328_83",colnames(counts.hal))]<-"FIL2_H328_383"
bad.lines<-vector()
for(i in c("HAL","FIL","F1")){
dat.hal<-counts.hal[,grep(i,colnames(counts.hal))]
dat.fil<-counts.fil[,grep(i,colnames(counts.fil))]
mean.hal<-apply(dat.hal,2,mean) ; mean.fil<-apply(dat.fil,2,mean)
plot(mean.hal,mean.fil, main=i)
mod<-lm(mean.fil~mean.hal)
ol<-outlierTest(mod)
if(ol$bonf.p<0.0001){
out<-names(ol$bonf); bad.lines<-c(bad.lines, out)
}
}
#drop contaminated individuals w/ high Cooks D for outlier test
counts.hal<-counts.hal[,-which(colnames(counts.hal) %in% bad.lines)]
counts.fil<-counts.fil[,-which(colnames(counts.fil) %in% bad.lines)]
#get info
info<-read.csv("JGI_Samples_3_6_14.csv") #experimental design information from Lowry/TEJ, via Scott
info$jgi.id<-paste(info$id,info$N,info$Plant.Number, sep="_")
idtrt<-info[,c("id","Treatment","jgi.id","VWC_July","Day_July","JGI.Batch")]
idtrt$generation<-ifelse(grepl("F1",idtrt$jgi.id),"F1",
ifelse(grepl("FIL",idtrt$jgi.id) | grepl("HAL",idtrt$jgi.id) ,"F0",
ifelse(grepl("FH",idtrt$jgi.id),"F2","NA")))
#cull to just F1/parents
counts.fil<-counts.fil[,-grep("FH",colnames(counts.fil))]
counts.hal<-counts.hal[,-grep("FH",colnames(counts.hal))]
ids<-colnames(counts.hal)
genes<-rownames(counts.hal)
colnames(counts.fil)<-paste(colnames(counts.fil),"_fil",sep="")
colnames(counts.hal)<-paste(colnames(counts.hal),"_hal",sep="")
#quick summary of bias in F1
f1.counts.hal<-counts.hal[,grep("F1", colnames(counts.hal))]
f1.counts.fil<-counts.fil[,grep("F1", colnames(counts.fil))]
f0.counts.hal<-counts.hal[,-grep("F1", colnames(counts.hal))]
f0.counts.fil<-counts.fil[,-grep("F1", colnames(counts.fil))]
f1.genesums.hal<-apply(f1.counts.hal,1,sum)
f1.genesums.fil<-apply(f1.counts.fil,1,sum)
f0.genesums.hal<-apply(f0.counts.hal,1,sum)
f0.genesums.fil<-apply(f0.counts.fil,1,sum)
plot(log10(f1.genesums.fil+1),log10(f1.genesums.hal+1), pch=".",
main="total counts / gene \n (black=1:1, red=F0, blue=F1)", bty='n',
ylim=c(0,max(log10(f0.genesums.hal+1),log10(f1.genesums.hal+1))), xlim=c(0,max(log10(f1.genesums.fil+1),log10(f0.genesums.fil+1))))
points(log10(f0.genesums.fil+1),log10(f0.genesums.hal+1), pch=".", col="red")
abline(0,1, lty=2, col="black", lwd=3)
abline(lm(log10(f1.genesums.hal+1)~log10(f1.genesums.fil+1)), lty=1, lwd=2, col="blue")
abline(lm(log10(f0.genesums.hal+1)~log10(f0.genesums.fil+1)), lty=1, lwd=2, col="red")
summary(lm(log10(f1.genesums.hal+1)~log10(f1.genesums.fil+1)))
summary(lm(log10(f0.genesums.hal+1)~log10(f0.genesums.fil+1)))
#there is more read mapping bias in the parents than the F1
#merge datasets
all<-cbind(counts.fil,counts.hal)
tot.counts<-apply(all,1,sum)
med.counts<-apply(all,1,median)
mean.counts<-apply(all,1,mean)
hist(log10(tot.counts+1), breaks=100)
hist(log10(med.counts+1), breaks=100)
hist(log10(mean.counts+1), breaks=100)
#filter to only genes w/ mean expression >5
all<-all[mean.counts>=5,]
#make a new dataset with some highly sigificant genes
library(MASS)
aov.out<-data.frame()
for(i in 1:100){
line.info$dat<-as.numeric(all[i,])
line.info$cis<-line.info$allele
line.info$trans1<-ifelse(line.info$generation == "F0" & line.info$allele =="fil","Fil.F0","not")
line.info$trans2<-ifelse(line.info$generation == "F0" & line.info$allele =="hal","Hal.F0","not")
with(line.info,by(dat, cis, mean))
tapply(line.info$dat, c(line.info[,c("cis","trt")]), mean)
ddply(line.info$dat, ~ line.info$cis, mean)
aov1<-aov(dat~cis+trans1+trans2+cis*Treatment+trans1*Treatment+trans2*Treatment, data=line.info)
boxplot(dat ~ cis, data=line.info)
sa<-summary(aov1)
ps<-data.frame(rownames(all)[i],t(data.frame(summary(aov1)[[1]][5][[1]][-8])))
colnames(ps)<-c("gene.id","cis","trans1","trans2","Treatment","cis_trt","trans1_trt","trans2_trt")
rownames(ps)<-i
aov.out<-rbind(aov.out,ps)
}
#check matchup
ids[!ids %in% idtrt$jgi.id] #genes in counts data, not in info file
#create two additional files - annotation for each gene, experimental design by sample
idtrt<-idtrt[idtrt$jgi.id %in% ids,]
halcols<-idtrt; halcols$jgi.id<-paste(halcols$jgi.id,"_hal",sep="")
filcols<-idtrt; filcols$jgi.id<-paste(filcols$jgi.id,"_fil",sep="")
halcols$allele<-"hal";filcols$allele<-"fil"
line.info<-rbind(halcols,filcols)
line.info$allele<-as.factor(line.info$allele)
line.info$generation<-as.factor(line.info$generation)
annot<-read.csv("ph2015_v1.1annot.edited.csv")
annot<-annot[,c("chr","start","strand","id", "end")]
rowData<-annot[annot$id %in% rownames(all),]
#files:
dim(all)
dim(rowData)
dim(line.info)
chrs<-data.frame(rowData[,"chr"]); colnames(chrs)<-"chrs"
gr<-GRanges(seqnames = rowData$id,
ranges = IRanges(
start=rowData$start,
end=rowData$end,
names=rowData$id),
strand = rowData$strand)
counts<-data.matrix(all)
line.info<-DataFrame(line.info)
se<-SummarizedExperiment(assays = counts,
rowData = gr,
colData = line.info,
verbose=T)
dds <- DESeqDataSet(se = se, design = ~ Treatment + generation + allele + generation : allele + Treatment : allele + generation : Treatment)
dds$Treatment <- relevel(dds$Treatment, "Dry")
dds$generation <- relevel(dds$generation, "F0")
dds$allele <- relevel(dds$allele, "fil")
#####################
# Part 2: Cis-trans test... workflow from Cublillos et al 2014 (Loudet's group)
# 2.1 library-size adjust data
# 2.2 binom.test - summed read counts / allele in F1; q-value adjust results
# 2.3 Variance estimation
# Notes:
# Use variance stabilized expression values (from DES)
#get normalized counts
#test for ASE in F1
#variance component modelling - separately normalize hal/fil alleles separately (count median...)
cds = estimateDispersions(dds)
vsd = getVarianceStabilizedData(cds)
vst<-varianceStabilizingTransformation(dds, blind = F)
par(mfrow=c(1,2))
plot(rank(rowMeans(counts(dds))), genefilter::rowVars(log2(counts(dds)+1)),
main="log2(x+1) transform")
plot(rank(rowMeans(assay(vst))), genefilter::rowVars(assay(vst)),
main="VST")
test<-getVarianceStabilizedData(vst)
dds1 <- makeExampleDESeqDataSet(m=6)
vsd1 <- varianceStabilizingTransformation(dds1, blind=TRUE)
par(mfrow=c(1,2))
plot(rank(rowMeans(counts(dds1))), genefilter::rowVars(log2(counts(dds1)+1)),
main="log2(x+1) transform")
plot(rank(rowMeans(assay(vsd1))), genefilter::rowVars(assay(vsd1)),
main="VST")
dds <- DESeq(dds)
save(dds, file="ct2015_deseq1.RData")
res <- results(dds)
# treatment main effect
resMFType <- results(dds, contrast=c("Treatment","Wet","Dry"))
# allele main effect
resMFType <- results(dds, name="allele_hal_vs_fil")
# generation by allele (Trans)
resMFType <- results(dds, name="generationF1.allelehal")
all.norm<-data.frame(counts(vst, normalized=F))
colnames(all.norm)<-colnames(all)
fil.ase<-all.norm[,grep("_fil",colnames(all.norm))]
hal.ase<-all.norm[,grep("_hal",colnames(all.norm))]
fil.means<-apply(fil.ase,2,mean)
hal.means<-apply(hal.ase,2,mean)
plot(fil.means, hal.means)
fil.means<-apply(counts.fil,2,mean)
hal.means<-apply(counts.hal,2,mean)
plot(fil.means, hal.means)
id-idtrt$jgi.id[!grep("FH",idtrt$jgi.id)]
table(id.f2 %in% colnames(counts.fil))
all<-cbind()
#merge by genes
#merge to do normalizations
counts.all<-rbind(counts.fil,counts.hal)
#QC
feature <- data.frame(gc=yeastGC,length=yeastLength)
data <- newSeqExpressionSet(counts=as.matrix(geneLevelData[common,]),
featureData=feature[common,],
phenoData=data.frame(
conditions=c(rep("mut",2),rep("wt",2)),
row.names=colnames(geneLevelData)))
data
lines(cnvrt.coords( c(0,0,.5), c(.5,0,0), input='plt')$usr)
hist
plot(fil.counts.f1[,"Pahalv11b000009m.g"],hal.counts.f1[,"Pahalv11b000009m.g"])
#less stringent cuttoffs - 40% increase in markers
genes.parents.lax<-genes[med.hal.hal>10 &
med.fil.fil>10 &
med.hal.fil<2 &
med.fil.hal<2]
length(genes.parents.lax)
#genes.parents<-genes.parents.lax
#for remaining analyses, use 1/50 cutoffs - these will most likely give the strongest clustering for later
####################
####################
#Part 3: of the genes that look good in parents, which have both alleles in F1s?
fil.counts.f1<-counts.fil[,grep("F1",colnames(counts.fil))]
hal.counts.f1<-counts.hal[,grep("F1",colnames(counts.hal))] |
# class: setup
# author: J.A. Torres-Matallana
# organization: Luxembourg Institute of Science and Technology (LIST), Luxembourg
# Wagenigen University and Research Centre (WUR), Wageningen, The Netherlands
# date: 22.04.2016 - 28.07.2017
setup <- setClass("setup",
slots = c(id = "character",
nsim = "numeric",
seed = "numeric",
mcCores = "numeric",
ts.input = "data.frame",
rng = "list",
ar.model = "list",
var.model = "list"),
prototype = list(id = "MC_sim_1",
nsim = 1,
seed = 0.7010607,
mcCores = 1,
ts.input = NULL,
rng = NULL,
ar.model = list(NULL),
var.model = list(NULL))
)
| /R/Class-setup.R | no_license | cran/stUPscales | R | false | false | 999 | r | # class: setup
# author: J.A. Torres-Matallana
# organization: Luxembourg Institute of Science and Technology (LIST), Luxembourg
# Wagenigen University and Research Centre (WUR), Wageningen, The Netherlands
# date: 22.04.2016 - 28.07.2017
setup <- setClass("setup",
slots = c(id = "character",
nsim = "numeric",
seed = "numeric",
mcCores = "numeric",
ts.input = "data.frame",
rng = "list",
ar.model = "list",
var.model = "list"),
prototype = list(id = "MC_sim_1",
nsim = 1,
seed = 0.7010607,
mcCores = 1,
ts.input = NULL,
rng = NULL,
ar.model = list(NULL),
var.model = list(NULL))
)
|
\name{sjdbcCloseConnection}
\alias{sjdbcCloseConnection}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Close a Persistent Database Connection }
\description{
Closes any open persistent database connection.
}
\usage{
sjdbcCloseConnection()
}
\details{
Closes a connection that was made persistent previously using the \code{keepAlive} argument to one of the database functions.
This function is used primarily by \code{importJDBC}, \code{exportJDBC}, and \code{executeJDBC} to
close connections after execution. It rarely needs to be called directly.
}
\value{
returns no value.
}
\seealso{
\code{\link{importJDBC}}, \code{\link{exportJDBC}}, \code{\link{executeJDBC}}
}
\examples{\dontrun{
# close an open connection
sjdbcCloseConnection()
}}
\keyword{ interface }
| /man/sjdbcCloseConnection.Rd | no_license | cran/sjdbc | R | false | false | 817 | rd | \name{sjdbcCloseConnection}
\alias{sjdbcCloseConnection}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Close a Persistent Database Connection }
\description{
Closes any open persistent database connection.
}
\usage{
sjdbcCloseConnection()
}
\details{
Closes a connection that was made persistent previously using the \code{keepAlive} argument to one of the database functions.
This function is used primarily by \code{importJDBC}, \code{exportJDBC}, and \code{executeJDBC} to
close connections after execution. It rarely needs to be called directly.
}
\value{
returns no value.
}
\seealso{
\code{\link{importJDBC}}, \code{\link{exportJDBC}}, \code{\link{executeJDBC}}
}
\examples{\dontrun{
# close an open connection
sjdbcCloseConnection()
}}
\keyword{ interface }
|
fullData <- read.csv("/Users/Iva/Documents/Coursera/Exploratory-Data-Analysis/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259)
fullData$Date <- as.Date(fullData$Date, format="%d/%m/%Y")
data <- subset(fullData, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(fullData)
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
plot(data$Global_active_power~data$Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="")
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off() | /plot2.R | no_license | rahul606/ExData_Plotting1 | R | false | false | 595 | r | fullData <- read.csv("/Users/Iva/Documents/Coursera/Exploratory-Data-Analysis/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259)
fullData$Date <- as.Date(fullData$Date, format="%d/%m/%Y")
data <- subset(fullData, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(fullData)
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
plot(data$Global_active_power~data$Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="")
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off() |
env <- openEnvCPLEX()
prob <- initProbCPLEX(env)
chgProbNameCPLEX(env, prob, "sample")
cplex.nc <- 4
cplex.nr <- 3
cplex.nz <- 9
cplex.obj <- c(1,2,3,1)
cplex.rhs <- c(20,30,0)
cplex.sense <- c("L", "L", "E")
cplex.lb <- c(0,0,0,2)
cplex.ub <- c(40, CPX_INFBOUND, CPX_INFBOUND, 3)
cplex.beg <- rep(0, p)
cplex.cnt <- rep(0, p)
cplex.ind <- c()
cplex.val <- rep(1, cplex.nz)
for (j in 1:p) {
cplex.cnt[j] <- sum(bx[,j]) # number of nonzeros in the column
if (j == 1) {
cplex.beg[j] <- 0
} else {
cplex.beg[j] <- cplex.beg[j-1] + cplex.cnt[j-1]
}
cplex.ind <- c(cplex.ind, which(bx[,j] != 0) - 1)
}
cplex.ctype <- rep("C", p + n)
cplex.ctype[int.vec] <- "I"
print(cplex.beg)
print(cplex.cnt)
print(cplex.ind)
print(cplex.val)
print(cplex.nc)
print(cplex.nr)
print(length(cplex.beg))
print(length(cplex.cnt))
print(length(cplex.ind))
print(length(cplex.val))
cplexAPI::copyLpCPLEX(env = cplex.env, lp = cplex.prob, nCols = cplex.nc, nRows = cplex.nr,
lpdir = cplexAPI::CPX_MIN,
objf = cplex.obj, rhs = cplex.rhs, sense = cplex.sense,
matbeg = cplex.beg, matcnt = cplex.cnt, matind = cplex.ind, matval = cplex.val,
lb = cplex.lb, ub = cplex.ub)
| /testCplexAPI.R | no_license | profyliu/bsnsing_test | R | false | false | 1,250 | r | env <- openEnvCPLEX()
prob <- initProbCPLEX(env)
chgProbNameCPLEX(env, prob, "sample")
cplex.nc <- 4
cplex.nr <- 3
cplex.nz <- 9
cplex.obj <- c(1,2,3,1)
cplex.rhs <- c(20,30,0)
cplex.sense <- c("L", "L", "E")
cplex.lb <- c(0,0,0,2)
cplex.ub <- c(40, CPX_INFBOUND, CPX_INFBOUND, 3)
cplex.beg <- rep(0, p)
cplex.cnt <- rep(0, p)
cplex.ind <- c()
cplex.val <- rep(1, cplex.nz)
for (j in 1:p) {
cplex.cnt[j] <- sum(bx[,j]) # number of nonzeros in the column
if (j == 1) {
cplex.beg[j] <- 0
} else {
cplex.beg[j] <- cplex.beg[j-1] + cplex.cnt[j-1]
}
cplex.ind <- c(cplex.ind, which(bx[,j] != 0) - 1)
}
cplex.ctype <- rep("C", p + n)
cplex.ctype[int.vec] <- "I"
print(cplex.beg)
print(cplex.cnt)
print(cplex.ind)
print(cplex.val)
print(cplex.nc)
print(cplex.nr)
print(length(cplex.beg))
print(length(cplex.cnt))
print(length(cplex.ind))
print(length(cplex.val))
cplexAPI::copyLpCPLEX(env = cplex.env, lp = cplex.prob, nCols = cplex.nc, nRows = cplex.nr,
lpdir = cplexAPI::CPX_MIN,
objf = cplex.obj, rhs = cplex.rhs, sense = cplex.sense,
matbeg = cplex.beg, matcnt = cplex.cnt, matind = cplex.ind, matval = cplex.val,
lb = cplex.lb, ub = cplex.ub)
|
setwd("C:/Adatok/coursera_edX/4_Exploratory Data analysis/Quizes_Assignments/Assignment1")
data<- read.table("household_power_consumption.txt", sep= ";",
na.strings= "?", colClasses = c("character", "character",
rep("numeric", 7)), header= TRUE)
d<- data[data$Date %in% c("1/2/2007","2/2/2007"),]
library(lubridate)
d$Date2<- as.Date(d$Date, "%d/%m/%Y")
days<-weekdays(d$Date2)
d$sdays<- wday(d$Date2, label= TRUE)
DateTime<- paste(d$Date, d$Time)
d$DateTime<-strptime(DateTime, format= "%d/%m/%Y %H:%M:%S")
d$DateTime2<-as.POSIXct(d$DateTime)
#Plot4
png("plot4.png", width= 480, height= 480)
par(mfrow= c(2,2), mar = c(4.2, 4.2, 1, 1))
with(d, plot(Global_active_power~d$DateTime2, type= "n", ylab= "Global Active Power (kilowatts)",
xlab= ""))
lines(d$Global_active_power~d$DateTime2)
with(d, plot(Voltage ~ d$DateTime2, type= "n", ylab= "Voltage", xlab= "datetime"))
lines(d$Voltage~d$DateTime2)
with(d, plot(Sub_metering_1 ~ d$DateTime2, type= "n", ylab= "Energy sub metering", xlab= ""),
plot(Sub_metering_2~d$DateTime2, type= "n"), plot(Sub_metering_3~d$DateTime2, type= "n"))
lines(d$Sub_metering_1~d$DateTime2, lty= 1)
lines(d$Sub_metering_2~d$DateTime2, lty=1, col= "red")
lines(d$Sub_metering_3~d$DateTime2, lty=1, col= "blue")
legend("topright", bty= "n", lty= c(1,1,1), col= c("black", "red", "blue"),
cex=1 ,legend= c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
with(d, plot(Global_reactive_power ~ d$DateTime2, type= "n", ylab= "Global_reactive_power",
xlab= "datetime"))
lines(d$Global_reactive_power~d$DateTime2)
dev.off()
| /plot4.R | no_license | Enoana/ExData_Plotting1 | R | false | false | 1,673 | r | setwd("C:/Adatok/coursera_edX/4_Exploratory Data analysis/Quizes_Assignments/Assignment1")
data<- read.table("household_power_consumption.txt", sep= ";",
na.strings= "?", colClasses = c("character", "character",
rep("numeric", 7)), header= TRUE)
d<- data[data$Date %in% c("1/2/2007","2/2/2007"),]
library(lubridate)
d$Date2<- as.Date(d$Date, "%d/%m/%Y")
days<-weekdays(d$Date2)
d$sdays<- wday(d$Date2, label= TRUE)
DateTime<- paste(d$Date, d$Time)
d$DateTime<-strptime(DateTime, format= "%d/%m/%Y %H:%M:%S")
d$DateTime2<-as.POSIXct(d$DateTime)
#Plot4
png("plot4.png", width= 480, height= 480)
par(mfrow= c(2,2), mar = c(4.2, 4.2, 1, 1))
with(d, plot(Global_active_power~d$DateTime2, type= "n", ylab= "Global Active Power (kilowatts)",
xlab= ""))
lines(d$Global_active_power~d$DateTime2)
with(d, plot(Voltage ~ d$DateTime2, type= "n", ylab= "Voltage", xlab= "datetime"))
lines(d$Voltage~d$DateTime2)
with(d, plot(Sub_metering_1 ~ d$DateTime2, type= "n", ylab= "Energy sub metering", xlab= ""),
plot(Sub_metering_2~d$DateTime2, type= "n"), plot(Sub_metering_3~d$DateTime2, type= "n"))
lines(d$Sub_metering_1~d$DateTime2, lty= 1)
lines(d$Sub_metering_2~d$DateTime2, lty=1, col= "red")
lines(d$Sub_metering_3~d$DateTime2, lty=1, col= "blue")
legend("topright", bty= "n", lty= c(1,1,1), col= c("black", "red", "blue"),
cex=1 ,legend= c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
with(d, plot(Global_reactive_power ~ d$DateTime2, type= "n", ylab= "Global_reactive_power",
xlab= "datetime"))
lines(d$Global_reactive_power~d$DateTime2)
dev.off()
|
context('Experiment')
test_that('Experiments work',{
f = function(x){
unlist(x[1] + x[2])
}
X = data.frame(a = c(1,2,3), b = c(2,3,4))
ds = DataSampler$new(X)
pred = Prediction.f$new(f)
e = Experiment$new(pred, ds)
set.seed(1)
dat = e$run()$data()
set.seed(2)
dat3 = e$run(force=TRUE)$data()
})
| /tests/testthat/test-experiment.R | permissive | spark-lin/iml | R | false | false | 338 | r | context('Experiment')
test_that('Experiments work',{
f = function(x){
unlist(x[1] + x[2])
}
X = data.frame(a = c(1,2,3), b = c(2,3,4))
ds = DataSampler$new(X)
pred = Prediction.f$new(f)
e = Experiment$new(pred, ds)
set.seed(1)
dat = e$run()$data()
set.seed(2)
dat3 = e$run(force=TRUE)$data()
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/element.R
\name{element}
\alias{element}
\title{HTML element}
\description{
HTML element
}
\section{Usage}{
\preformatted{e <- s$find_element(css = NULL, link_text = NULL,
partial_link_text = NULL, xpath = NULL)
e$find_element(css = NULL, link_text = NULL,
partial_link_text = NULL, xpath = NULL)
e$find_elements(css = NULL, link_text = NULL,
partial_link_text = NULL, xpath = NULL)
e$is_selected()
e$get_value()
e$set_value(value)
e$get_attribute(name)
e$get_class()
e$get_css_value(name)
e$get_text()
e$get_name()
e$get_data(name)
e$get_rect()
e$is_enabled()
e$click()
e$clear()
e$send_keys(...)
e$move_mouse_to(xoffset = NULL, yoffset = NULL)
e$execute_script(script, ...)
e$execute_script_async(script, ...)
}
}
\section{Arguments}{
\describe{
\item{e}{An \code{element} object.}
\item{s}{A \code{\link{session}} object.}
\item{css}{Css selector to find an HTML element.}
\item{link_text}{Find \code{<a>} HTML elements based on their
\code{innerText}.}
\item{partial_link_text}{Find \code{<a>} HTML elements based on their
\code{innerText}. It uses partial matching.}
\item{xpath}{Find HTML elements using XPath expressions.}
\item{name}{String scalar, named of attribute, property or css key.
For \code{get_data}, the key of the data attribute.}
\item{xoffset}{Horizontal offset for mouse movement, relative to the
position of the element. If at least of of \code{xoffset} and
\code{yoffset} is \code{NULL}, then they are ignored.}
\item{yoffset}{Vertical offset for mouse movement, relative to the
position of the element. If at least of of \code{xoffset} and
\code{yoffset} is \code{NULL}, then they are ignored.}
\item{value}{Value to set, a character string.}
\item{...}{For \code{send_keys} the keys to send, see
\code{\link{key}}. For \code{execute_script} and
\code{execute_script_async} argument to supply to the script.}
}
}
\section{Details}{
To create \code{element} objects, you need to use the \code{find_element}
(or \code{find_element}) method of a \code{\link{session}} object.
\code{e$find_element()} finds the \emph{next} HTML element from the
current one. You need to specify one of the \code{css}, \code{link_text},
\code{partial_link_text} and \code{xpath} arguments. It returns a new
\code{element} object.
\code{e$find_elements()} finds all matching HTML elements starting from
the current element. You need to specify one of the \code{css},
\code{link_text}, \code{partial_link_text} and \code{xpath} arguments.
It returns a list of newly created \code{element} objects.
\code{e$is_selected()} returns \code{TRUE} is the element is currently
selected, and \code{FALSE} otherwise.
\code{e$get_value()} returns the value of an input element, it is a
shorthand for \code{e$get_attribute("value")}.
\code{e$set_value()} sets the value of an input element, it is
essentially equivalent to sending keys via \code{e$send_keys()}.
\code{e$get_attribute()} queries an arbitrary HTML attribute. It is
does not exist, \code{NULL} is returned.
\code{e$get_class()} uses \code{e$get_attribute} to parse the
\sQuote{class} attribute into a character vector.
\code{e$get_css_value()} queries a CSS property of an element.
\code{e$get_text()} returns the \code{innerText} on an element.
\code{e$get_name()} returns the tag name of an element.
\code{e$get_data()} is a shorthand for querying \code{data-*} attributes.
\code{e$get_rect()} returns the \sQuote{rectangle} of an element. It is
named list with components \code{x}, \code{y}, \code{height} and
\code{width}.
\code{e$is_enabled()} returns \code{TRUE} if the element is enabled,
\code{FALSE} otherwise.
\code{e$click()} scrolls the element into view, and clicks the
in-view centre point of it.
\code{e$clear()} scrolls the element into view, and then attempts to
clear its value, checkedness or text content.
\code{e$send_keys()} scrolls the form control element into view, and
sends the provided keys to it. See \code{\link{key}} for a list of
special keys that can be sent.
\code{e$move_mouse_to()} moves the mouse cursor to the element, with
the specified offsets. If one or both offsets are \code{NULL}, then
it places the cursor on the center of the element. If the element is
not on the screen, then is scrolls it into the screen first.
\code{e$execute_script()} and \code{e$execute_script_async()}
call the method of the same name on the \code{\link{session}} object.
The first argument of the script (\code{arguments[0]}) will always
hold the element object itself.
}
| /man/element.Rd | permissive | wch/webdriver | R | false | true | 4,623 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/element.R
\name{element}
\alias{element}
\title{HTML element}
\description{
HTML element
}
\section{Usage}{
\preformatted{e <- s$find_element(css = NULL, link_text = NULL,
partial_link_text = NULL, xpath = NULL)
e$find_element(css = NULL, link_text = NULL,
partial_link_text = NULL, xpath = NULL)
e$find_elements(css = NULL, link_text = NULL,
partial_link_text = NULL, xpath = NULL)
e$is_selected()
e$get_value()
e$set_value(value)
e$get_attribute(name)
e$get_class()
e$get_css_value(name)
e$get_text()
e$get_name()
e$get_data(name)
e$get_rect()
e$is_enabled()
e$click()
e$clear()
e$send_keys(...)
e$move_mouse_to(xoffset = NULL, yoffset = NULL)
e$execute_script(script, ...)
e$execute_script_async(script, ...)
}
}
\section{Arguments}{
\describe{
\item{e}{An \code{element} object.}
\item{s}{A \code{\link{session}} object.}
\item{css}{Css selector to find an HTML element.}
\item{link_text}{Find \code{<a>} HTML elements based on their
\code{innerText}.}
\item{partial_link_text}{Find \code{<a>} HTML elements based on their
\code{innerText}. It uses partial matching.}
\item{xpath}{Find HTML elements using XPath expressions.}
\item{name}{String scalar, named of attribute, property or css key.
For \code{get_data}, the key of the data attribute.}
\item{xoffset}{Horizontal offset for mouse movement, relative to the
position of the element. If at least of of \code{xoffset} and
\code{yoffset} is \code{NULL}, then they are ignored.}
\item{yoffset}{Vertical offset for mouse movement, relative to the
position of the element. If at least of of \code{xoffset} and
\code{yoffset} is \code{NULL}, then they are ignored.}
\item{value}{Value to set, a character string.}
\item{...}{For \code{send_keys} the keys to send, see
\code{\link{key}}. For \code{execute_script} and
\code{execute_script_async} argument to supply to the script.}
}
}
\section{Details}{
To create \code{element} objects, you need to use the \code{find_element}
(or \code{find_element}) method of a \code{\link{session}} object.
\code{e$find_element()} finds the \emph{next} HTML element from the
current one. You need to specify one of the \code{css}, \code{link_text},
\code{partial_link_text} and \code{xpath} arguments. It returns a new
\code{element} object.
\code{e$find_elements()} finds all matching HTML elements starting from
the current element. You need to specify one of the \code{css},
\code{link_text}, \code{partial_link_text} and \code{xpath} arguments.
It returns a list of newly created \code{element} objects.
\code{e$is_selected()} returns \code{TRUE} is the element is currently
selected, and \code{FALSE} otherwise.
\code{e$get_value()} returns the value of an input element, it is a
shorthand for \code{e$get_attribute("value")}.
\code{e$set_value()} sets the value of an input element, it is
essentially equivalent to sending keys via \code{e$send_keys()}.
\code{e$get_attribute()} queries an arbitrary HTML attribute. It is
does not exist, \code{NULL} is returned.
\code{e$get_class()} uses \code{e$get_attribute} to parse the
\sQuote{class} attribute into a character vector.
\code{e$get_css_value()} queries a CSS property of an element.
\code{e$get_text()} returns the \code{innerText} on an element.
\code{e$get_name()} returns the tag name of an element.
\code{e$get_data()} is a shorthand for querying \code{data-*} attributes.
\code{e$get_rect()} returns the \sQuote{rectangle} of an element. It is
named list with components \code{x}, \code{y}, \code{height} and
\code{width}.
\code{e$is_enabled()} returns \code{TRUE} if the element is enabled,
\code{FALSE} otherwise.
\code{e$click()} scrolls the element into view, and clicks the
in-view centre point of it.
\code{e$clear()} scrolls the element into view, and then attempts to
clear its value, checkedness or text content.
\code{e$send_keys()} scrolls the form control element into view, and
sends the provided keys to it. See \code{\link{key}} for a list of
special keys that can be sent.
\code{e$move_mouse_to()} moves the mouse cursor to the element, with
the specified offsets. If one or both offsets are \code{NULL}, then
it places the cursor on the center of the element. If the element is
not on the screen, then is scrolls it into the screen first.
\code{e$execute_script()} and \code{e$execute_script_async()}
call the method of the same name on the \code{\link{session}} object.
The first argument of the script (\code{arguments[0]}) will always
hold the element object itself.
}
|
library(oaPlots)
### Name: densityLegend
### Title: Create a colored density legend for visually representing the
### distribution of a color variable on a plot
### Aliases: densityLegend
### ** Examples
library(ggplot2)
library(RColorBrewer)
# subset the data object
dsub <- subset(diamonds, x > 5 & x < 6 & y > 5 & y < 6)
dsub <- dsub[-which(dsub$z > 4), ]
dsub <- dsub[-which(dsub$z < 3), ]
# define color pallette, color vector and color region breaks
colorPalette <- brewer.pal(9, "Blues")[4:9]
colorObj <- splitColorVar(colorVar = dsub$z, colorPalette)
colorVec <- colorObj$colorVec
breaks <- colorObj$breaks
# plot the data
prepLegend(side = "right", proportion = 0.3)
oaTemplate(xlim = range(dsub$x), ylim = range(dsub$y),
main = "Diamond Length by Width \n Colored by Depth",
xlab = "Length (mm)", ylab = "Width (mm)")
points(x = dsub$x, y = dsub$y, col = colorVec, pch = 19, cex = 0.6)
# add the legend
densityLegend(x = dsub$z, colorPalette = colorPalette, side = "right",
main = "Diamond Depth", colorBreaks = breaks)
| /data/genthat_extracted_code/oaPlots/examples/densityLegend.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,049 | r | library(oaPlots)
### Name: densityLegend
### Title: Create a colored density legend for visually representing the
### distribution of a color variable on a plot
### Aliases: densityLegend
### ** Examples
library(ggplot2)
library(RColorBrewer)
# subset the data object
dsub <- subset(diamonds, x > 5 & x < 6 & y > 5 & y < 6)
dsub <- dsub[-which(dsub$z > 4), ]
dsub <- dsub[-which(dsub$z < 3), ]
# define color pallette, color vector and color region breaks
colorPalette <- brewer.pal(9, "Blues")[4:9]
colorObj <- splitColorVar(colorVar = dsub$z, colorPalette)
colorVec <- colorObj$colorVec
breaks <- colorObj$breaks
# plot the data
prepLegend(side = "right", proportion = 0.3)
oaTemplate(xlim = range(dsub$x), ylim = range(dsub$y),
main = "Diamond Length by Width \n Colored by Depth",
xlab = "Length (mm)", ylab = "Width (mm)")
points(x = dsub$x, y = dsub$y, col = colorVec, pch = 19, cex = 0.6)
# add the legend
densityLegend(x = dsub$z, colorPalette = colorPalette, side = "right",
main = "Diamond Depth", colorBreaks = breaks)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/recombination_wright.R
\name{recombination_wright}
\alias{recombination_wright}
\title{Heuristic Wright recombination for DE}
\usage{
recombination_wright(L, ...)
}
\arguments{
\item{L}{list with all parameters for ExpDE framework}
\item{...}{optional parameters (unused)}
}
\value{
Matrix \code{U} containing the recombined population
}
\description{
Implements the "/wright" (Heuristic Wright) recombination for the ExpDE
framework.
}
\section{Warning}{
This recombination operator evaluates the candidate solutions in \code{M},
which adds an extra \code{popsize} evaluations per iteration.
}
\section{References}{
F. Herrera, M. Lozano, A. M. Sanchez, "A taxonomy for the crossover
operator for real-coded genetic algorithms: an experimental study",
International Journal of Intelligent Systems 18(3) 309-338, 2003.\cr
A.H. Wright, "Genetic Algorithms for Real Parameter Optimization",
Proc. Foundations of Genetic Algorithms, 205-218, 1991.
}
\section{X}{
Population matrix (original).
}
\section{M}{
Population matrix (mutated).
}
| /man/recombination_wright.Rd | no_license | brunasqz/ExpDE | R | false | true | 1,127 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/recombination_wright.R
\name{recombination_wright}
\alias{recombination_wright}
\title{Heuristic Wright recombination for DE}
\usage{
recombination_wright(L, ...)
}
\arguments{
\item{L}{list with all parameters for ExpDE framework}
\item{...}{optional parameters (unused)}
}
\value{
Matrix \code{U} containing the recombined population
}
\description{
Implements the "/wright" (Heuristic Wright) recombination for the ExpDE
framework.
}
\section{Warning}{
This recombination operator evaluates the candidate solutions in \code{M},
which adds an extra \code{popsize} evaluations per iteration.
}
\section{References}{
F. Herrera, M. Lozano, A. M. Sanchez, "A taxonomy for the crossover
operator for real-coded genetic algorithms: an experimental study",
International Journal of Intelligent Systems 18(3) 309-338, 2003.\cr
A.H. Wright, "Genetic Algorithms for Real Parameter Optimization",
Proc. Foundations of Genetic Algorithms, 205-218, 1991.
}
\section{X}{
Population matrix (original).
}
\section{M}{
Population matrix (mutated).
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/regcoeffs.R
\name{plot.regcoeffs}
\alias{plot.regcoeffs}
\title{Regression coefficients plot}
\usage{
\method{plot}{regcoeffs}(x, ncomp = 1, ny = 1, type = NULL, col = NULL,
main = "Regression coefficients", xlab = "Variables",
ylab = "Coefficients", show.line = T, show.ci = T, ...)
}
\arguments{
\item{x}{regression coefficients object (class \code{regcoeffs})}
\item{ncomp}{number of components to return the coefficients for}
\item{ny}{number of response variable to return the coefficients for}
\item{type}{type of the plot}
\item{col}{vector with colors for the plot (vector or one value)}
\item{main}{main plot title}
\item{xlab}{label for x axis}
\item{ylab}{label for y axis}
\item{show.line}{logical, show or not line for 0 value}
\item{show.ci}{logical, show or not confidence intervals if they are available}
\item{...}{other arguments}
}
\description{
Shows plot with regression coefficient values for every predictor variable (x)
}
| /man/plot.regcoeffs.Rd | no_license | zeehio/mdatools | R | false | false | 1,047 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/regcoeffs.R
\name{plot.regcoeffs}
\alias{plot.regcoeffs}
\title{Regression coefficients plot}
\usage{
\method{plot}{regcoeffs}(x, ncomp = 1, ny = 1, type = NULL, col = NULL,
main = "Regression coefficients", xlab = "Variables",
ylab = "Coefficients", show.line = T, show.ci = T, ...)
}
\arguments{
\item{x}{regression coefficients object (class \code{regcoeffs})}
\item{ncomp}{number of components to return the coefficients for}
\item{ny}{number of response variable to return the coefficients for}
\item{type}{type of the plot}
\item{col}{vector with colors for the plot (vector or one value)}
\item{main}{main plot title}
\item{xlab}{label for x axis}
\item{ylab}{label for y axis}
\item{show.line}{logical, show or not line for 0 value}
\item{show.ci}{logical, show or not confidence intervals if they are available}
\item{...}{other arguments}
}
\description{
Shows plot with regression coefficient values for every predictor variable (x)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/api.R
\name{fetch}
\alias{fetch}
\title{Fetch last computed}
\usage{
fetch(target_name, require_current = FALSE, remake_file = "remake.yml")
}
\arguments{
\item{target_name}{The name of a single target to fetch the value
of}
\item{require_current}{Logical indicating if the targets must be
up-to-date to be fetched. If this is \code{TRUE} and the targets
are not up-to-date, then an error will be thrown.}
\item{remake_file}{Name of the remakefile (by default
\code{remake.yml})}
}
\value{
An R object.
}
\description{
Fetch the last computed value from the remake database.
}
\details{
The last computed value would be returned invisibly by
\code{make}, but this function provides a way of accessing values
without ever triggering a rebuild. As such, it's possible that the
target is not made, or is not current, so there are options for
controlling what to do in this case.
It is an error to use this function with file targets (but see
\code{\link{is_current}} for checking currentness) and
\code{fetch_archive} for extracting files from archives.
}
| /man/fetch.Rd | no_license | aammd/remake | R | false | false | 1,145 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/api.R
\name{fetch}
\alias{fetch}
\title{Fetch last computed}
\usage{
fetch(target_name, require_current = FALSE, remake_file = "remake.yml")
}
\arguments{
\item{target_name}{The name of a single target to fetch the value
of}
\item{require_current}{Logical indicating if the targets must be
up-to-date to be fetched. If this is \code{TRUE} and the targets
are not up-to-date, then an error will be thrown.}
\item{remake_file}{Name of the remakefile (by default
\code{remake.yml})}
}
\value{
An R object.
}
\description{
Fetch the last computed value from the remake database.
}
\details{
The last computed value would be returned invisibly by
\code{make}, but this function provides a way of accessing values
without ever triggering a rebuild. As such, it's possible that the
target is not made, or is not current, so there are options for
controlling what to do in this case.
It is an error to use this function with file targets (but see
\code{\link{is_current}} for checking currentness) and
\code{fetch_archive} for extracting files from archives.
}
|
####### Objective: Quantify multiple western blots
####### Author: Candace Savonen
####### Last Update: 10-3-17
####### Input needed:
############# A "gel set up file" that contains the labels treatment groups, and amounts of lysate pipetted in micrograms in the order that the data are in.
############# A Quantification file that contains the quantification data output from ImageJ program with the a corresponding background signal for each taken above or below the sample.
############# A REVERT quantification file that contains the quantification data output from ImageJ program in the same order as the gel set up and quantification file.
####### Output created:
############# An ANOVA results file
############# A posthoc analysis file
############# A standard curve graph
############# A bar plot across groups
############# A boxplot across groups
####### About the data analysis:
####### The boxplots and bar plots are made off of standardized data using regression on Total Quantities - an obtained Background signal taken above the signal box.
####### These estimated values from regression are then divided REVERT.
####### This script also conducts an outlier test and removes any values that have a absolute value of Z-score (Within their group) greater than the 1.93 (This is according to grubbs test for n=18 and will need to be adjusted depending on the sample size)
##################################################################################################
###################################### The Intial Set Up ########################################
##################################################################################################
####### Install Packages if you don't have them ###############
library(readxl)
library(XLConnect)
library(colorspace)
##### This function will calculate standard error #######
std.e <- function(x) sd(x)/sqrt(length(x))
###### Write the names targets you are assessing ########### This will be used to find the correct files. So write it as it is called in the input file names.
target=c("TH","DAT","VMAT2")
### If you'd like the outlier test to be more stringent, you can change the cutoff in the line below:
### Options for p value cutoffs: 0.1,.075,.05,.025,.01
grubbs.pvalcutoff=.05
### For known bad samples that need to be removed, put their names here:
#badsamples="P2-E1-027"
# Change "home" variable below to the directory of where your input files are stored.
home="/Users/cansav091/Desktop/Current Projects /Western Blots/WesternBlotQuantifications"
setwd(home)
####### This will make a list of the files in your current directory
files=grep(".xls",dir(),value=TRUE)
####### Import an excel file that shows the gel set up. Format must be like the following and match the order that the quantification data are in:
gelssetup=select.list(grep("setup",files,value=TRUE,ignore.case = TRUE),multiple=TRUE, title="Which output file(s) includes the gel set up? Choose 0 if you want the most recently added file.")
##################################################################################################
############### Read in the data for each target and each gel for that target ###################
##################################################################################################
###### Does a loop and reads in the data for each target listed in the "target" object.
all.aovs=c() #### These objects store the ANOVA output for all the targets
all.posthocs=c()
for(jj in 1:length(target)){
setwd(home)
files=grep(".xls",dir(),value=TRUE)
####Imports the quantification file based on it including the target name and not including "REVERT"
proj=grep("REVERT",grep(target[jj],files,value=TRUE),invert=TRUE,value=TRUE)
####Imports the REVERT file based on it including the target name and not including "REVERT"
reverts=grep("REVERT",grep(target[jj],files,value=TRUE),value=TRUE)
###### These empty variables will store the combined data from multiple gels for a single target
combodat=data.frame() #### This will have the data from ImageJ for both gels
comborev=data.frame() #### This will have the REVERT data from ImageJ for both gels
combogelsetup=data.frame() #### This will have the gel set up for both gels
combogroups=c() #### This will have the tx groups for both gels
combogroups.stds=c() #### This will have the tx groups including the stds for both gels
comboestimatedvals=c() #### This will have the estimate values for each sample from our linear model using our standards divided by relative revert values
comboamounts=c() #### This will have the micrograms of lysate that were pipetted in.
combobckgnd=c()
# This loop will repeat for each gel data file for the given target
for(ii in 1:length(proj)){
setwd(home)
### Read in the gel set up file.
gelsetup=t(read_xlsx(gelssetup,sheet=ii))[,1:3]
### Sort out the Ladders and empty wells from the data.
wells=gelsetup[,1]
nonemptywells=grep("Empty",wells,ignore.case=TRUE,invert=TRUE)
wells=wells[nonemptywells]
ladderwells=grep("Ladder",wells,ignore.case=TRUE)
wells=wells[-ladderwells]
wells=wells[!is.na(wells)]
### Extract the amounts of lysate pipetted in each well
amounts=as.numeric(gsub("ug","",gelsetup[nonemptywells,3]))[-ladderwells]
combogelsetup=rbind(combogelsetup,gelsetup[nonemptywells,][-ladderwells,])
### Label which lanes are standards
stds=gsub("ug","",wells)
stds=gsub(" STD","",stds)
stds=as.numeric(stds)
stdswells=which(!is.na(stds))
stds=stds[stdswells]
### Label which lanes are samples
samplewells=wells[-stdswells]
samples=wells[samplewells]
### Take note of the N of your sample size including standards
n=length(wells)
### Keep the group info for the different treatments and standards
groups.stds=as.factor(gelsetup[,2][as.numeric(names(wells))])
groups=gelsetup[as.numeric(names(samplewells)),2]
### Import the quantification file for this gel from ImageJ output
dat=as.data.frame(read_xls(proj[ii]))
if(target[jj]=="DAT"){
dat=dat[which(dat$Channel==700),]
}else{
dat=dat[which(dat$Channel==800),]
}
### Import the REVERT file for this gel from ImageJ output
revert=as.data.frame(read_xls(reverts[ii]))
### Transforms the total into a numeric variable
revert[,4:ncol(revert)]=suppressWarnings(apply(revert[,4:ncol(revert)],2,as.numeric))
### Combine gel quant data, revert data,background data, tx groups, and amounts pipetted from both gels.
combodat=rbind(combodat,dat[1:n,])
comborev=rbind(comborev,revert)
combobckgnd=rbind(combobckgnd,dat[(n+1):(2*n),])
comboamounts=c(comboamounts,amounts)
combogroups=c(combogroups,groups) # This variable has the treatment groups but doesn't include standards
combogroups.stds=c(combogroups.stds,groups.stds) ## This group variable includes standards
####Create a folder for the output using the target name
fold.name=paste0(target[jj],"output")
if(dir.exists(fold.name)==FALSE){
dir.create(fold.name)
}
setwd(paste0(home,"/",fold.name)) # Set the directory to the new folder.
##### Adjusted signal means the signal - background (not the local background that ImageJ does, but the background that we take ourselves separately)
adj.sig=(dat$Total[1:n]-dat$Total[(n+1):(2*n)])
##################################################################################################
################### Analyze the standard curve for this particular gel by itself ################
##################################################################################################
##### Create a 4x4 panel plot with variations on Std Curve and a boxplot for that particular gel
jpeg(paste0(target[jj],"Gel",ii,"Std Curve Graphs.jpeg"))
par(mfrow=c(2,2),oma = c(0, 0, 2, 0))
### Plot Amounts vs the Adj signals
reg=lm(adj.sig[stdswells]~stds)
Rval=summary(reg)$r.squared
pval=summary(reg)$coefficients[8]
plot(amounts[1:n],adj.sig,main=paste("Total-Bckgnd R=",round(Rval,3),"p=",round(pval,3)),xlab="Total ug ",ylab="Total-Bckgnd",pch=21, bg=c("yellow","red","orange","black")[groups.stds])
abline(reg,col="red")
legend(x="bottomright", legend = levels(groups.stds),fill=c("yellow","red","orange","black"),cex=.8)
### Plot Amounts vs the REVERT signals
reg=lm(revert$Total[which(groups.stds=="Standard")]~stds)
plot(amounts,revert$Total[1:n],main=paste("Total Protein Total R = ",round(Rval,3),"p=",round(pval,3)),xlab="Total ug",ylab="REVERT Total",pch=21, bg=c("yellow","red","orange","black")[groups.stds])
revnormal=reg$coefficients[2]*revert$Total
abline(reg,col="red")
legend(x="bottomright", legend = levels(groups.stds),fill=c("yellow","red","orange","black"),cex=.8)
#### Plot REVERT against the signal - background
reg=lm(revert$Total[which(groups.stds=="Standard")]~(dat$Total-dat$Bkgnd.)[stdswells])
Rval=summary(reg)$r.squared
pval=summary(reg)$coefficients[8]
plot((dat$Total[1:n]-dat$Total[(n+1):(2*n)]),revert$Mean[1:17],main=paste("Total Protein R = ",round(Rval,3),"p=",round(pval,3)),xlab="Total-Bckgnd",ylab="REVERT Total",pch=21,bg=c("yellow","red","orange","black")[groups.stds])
abline(reg,col="red")
legend(x="bottomright", legend = levels(groups.stds),fill=c("yellow","red","orange","black"),cex=.8)
#####################################################################################################################################
# Use the linear model from our standards to create a normalized and estimated relative quantity for each sample ###################
#####################################################################################################################################
if(length(samples)>0){
### Calculate a "Relative Revert" by dividing every revert signal by the smallest REVERT signal in that particular gel
rel.revert=revert$Total/sort(revert$Total[-stdswells])[1]
###### Linear model using our standards:
reg=lm(amounts[stdswells]~adj.sig[stdswells])
###### Plot the linear model with it's p value:
p=round(summary(reg)$coefficients[8],4)
plot(amounts[stdswells],adj.sig[stdswells],main="Regression",sub=paste0("p=",p))
abline(reg,col="red")# Put the slope of the line
####### Calculate Samples' estimated values based on the above linear model using our standards and divide by "relative revert"
estimatedvals=(reg$coefficients[1]+reg$coefficients[2]*adj.sig)/rel.revert
####### Combine the estimated values for both gels for this target
comboestimatedvals=c(comboestimatedvals,estimatedvals)
}
dev.off()##### The 4x4 graph will print out
}
####### This piece of code re-orders the factor to be Control, Low, High, instead of the default of being alphabetical
combogroups=factor(combogroups,unique(combogroups))
#####################################################################################################################################
############# Do Grubbs outlier test for this target and the data from both gels ###################################################
#####################################################################################################################################
## Read in the chart of standards for Grubbs Tests
grubbs.chart=read.csv("/Users/cansav091/Desktop/Current Projects /Western Blots/WesternBlotQuantifications/GrubbsCutoffs.csv")
### Obtain averages by group
group.avg=tapply(comboestimatedvals,combogroups.stds,mean)
### Obtain sd's by group
group.sd=tapply(comboestimatedvals,combogroups.stds,sd)
### Create an empty object for storing the z scores
groups.z.scores=rep(NA,2*n)
tx.group.n=c() ## Stores the number of samples for each group.
### Loop repeats this calculation for each treatment group
for(kk in 1:(length(unique(combogroups.stds))-1)){
xx=which(combogroups.stds==sort(unique(combogroups.stds))[kk])
groups.z.scores[xx]=(comboestimatedvals[xx]-group.avg[kk])/group.sd[kk]
tx.group.n=c(tx.group.n,length(xx))
}
### p values in the chart:
p.val.cutoffs=c(0.1,.075,.05,.025,.01)
### Finds the grubbs cutoff according to your selected p cutoff and size of your treatment group
grubbs.cutoff=grubbs.chart[which(grubbs.chart[,1]==tx.group.n[ii]),which(p.val.cutoffs==grubbs.pvalcutoff)]
outliers=c(match(badsamples,combogelsetup[,1]),which(abs(groups.z.scores)>grubbs.cutoff))
outliers.column=rep("Good",2*n)
outliers.column[xx]="Outlier"
#####################################################################################################################################
############# Create a csv with the cleaned data for this particular target #######################################################
#####################################################################################################################################
####### Put the cleaned data in one big dataframe that we will write to a csv
target.data=cbind(rep(target[jj],length(comboamounts)),comboestimatedvals*rel.revert,comboestimatedvals,rel.revert,(combodat$Total-combobckgnd$Total)/comborev$Total,combobckgnd$Total,combodat$Total,comborev$Total,comboamounts,combogelsetup[,1:2],c(rep("Gel1",nrow(comborev)/2),rep("Gel2",nrow(comborev)/2)),groups.z.scores,outliers.column)
colnames(target.data)=c("Target","EstimatedVals","EstimatVals.RelRevert","RelRevert","AdjSig","Backgr","Total","Revert","Amount","Sample","Treatment","Gel","ZScoresByGroups","OutlierCall")
if(jj==1){
alldata=target.data
colnames(alldata)=c("Target","EstimatedVals","EstimatVals.RelRevert","RelRevert","AdjSig","Backgr","Total","Revert","Amount","Sample","Treatment","Gel","ZScoresByGroups","OutlierCall")
}else{
alldata=rbind(alldata,target.data)
}
write.csv(target.data,file=paste0(target[jj],"CleanData.csv"))
### Store this cleaned information for this particular target as it's own dataframe within R's environment so we can use it later.
assign(target[jj],alldata, envir=.GlobalEnv)
### Determine which data are standards so you can remove them from the ANOVA
xx=which(alldata$Treatment=="Standard")
groups=factor(alldata$Treatment[-xx],unique(alldata$Treatment[-xx]))
#### Do ANOVA for the both gels' data for this target
target.aov=aov(alldata$EstimatVals.RelRevert[-xx]~groups)
#### Post Hoc Analyses
target.posthoc=t(as.data.frame(TukeyHSD(target.aov)$groups))
colnames(target.posthoc)=paste0(target[jj],colnames(target.posthoc))
all.posthocs=cbind(all.posthocs,target.posthoc)
#### Summary of the ANOVA
target.aov=t(data.frame(summary(target.aov)[[1]][1:5]))
colnames(target.aov)=paste0(target[jj],colnames(target.aov))
all.aovs=cbind(all.aovs,target.aov)
}
#####################################################################################################################################
############# Write csvs that contain all the data for all the targets ##############################################################
#####################################################################################################################################
all.aovs=as.data.frame(all.aovs)
all.posthocs=as.data.frame(all.posthocs)
setwd(home)
if(dir.exists(paste0("FinalResultsFolder"))=="FALSE"){
dir.create(paste0("FinalResultsFolder"))
}
setwd(paste0(home,"/FinalResultsFolder"))
write.csv(alldata,file="AllTargetsCleanData.csv")
write.csv(all.aovs,file="AllWesternANOVAResults.csv",na="NA")
write.csv(all.posthocs,file="AllWesternPostHocResults.csv",na="NA")
groups=factor(alldata$Treatment,unique(alldata$Treatment))
#####################################################################################################################################
############# Create a boxplot for all the targets in a single graph ################################################################
#####################################################################################################################################
jpeg(paste0("AllTargetsBoxplot.jpeg"),width=800,height=500)
par(mfrow=c(1,length(target)),oma = c(0, 4, 0, 0))
for(ii in 1:length(target)){
xx=which(alldata$Target==unique(alldata$Target)[ii])### Only graph the data for a particular target
target.data=alldata[xx,]
tx.groups=groups[xx]
xx=which(alldata$Treatment=="Standard")
tx.groups=factor(tx.groups[-xx],unique(tx.groups[-xx]))
boxplot(target.data$EstimatVals[-xx]~tx.groups,names=c("0 mg/kg","0.3 mg/kg","1 mg/kg"),cex.main=3,cex.names=1.5,cex.lab=2,cex.axis = 1.5,ylim=c(0,20),xlab="Dose",main=target[ii],col=c("palegreen4","orange","red3"))
}
dev.off()
#####################################################################################################################################
############# Create a barplot for all the targets in a single graph using SE bars ##################################################
#####################################################################################################################################
jpeg(paste0("AllTargetsBarplot.jpeg"),width=800,height=500)
par(mfrow=c(1,length(target)),oma = c(0, 4, 0, 0))
for(ii in 1:length(target)){
xx=which(alldata$Target==unique(alldata$Target)[ii])### Only graph the data for a particular target
target.data=alldata[xx,]
tx.groups=groups[xx]
xx=which(alldata$Treatment=="Standard")
tx.groups=factor(tx.groups[-xx],unique(tx.groups[-xx]))
avg=tapply(target.data$EstimatVals[-xx],tx.groups,mean)
bars=barplot(avg,names=c("0 mg/kg","0.3 mg/kg","1 mg/kg"),cex.main=3,cex.names=1.5,cex.lab=2,cex.axis = 1.5,ylim=c(0,20),xlab="Dose",main=target[ii],col=c("palegreen4","orange","red3"))
segments(bars, avg - std.e(alldata$EstimatVals[-xx]) * 2, bars, avg + std.e(alldata$EstimatVals[-xx]) * 2, lwd = 1.5)
arrows(bars, avg - std.e(alldata$EstimatVals[-xx]) * 2, bars, avg + std.e(alldata$EstimatVals[-xx]) * 2, lwd = 1.5, angle = 90, code = 3, length = 0.05)
}
dev.off()
| /WesternQuantification.R | no_license | cansavvy/WesternBlotQuantifications | R | false | false | 17,833 | r |
####### Objective: Quantify multiple western blots
####### Author: Candace Savonen
####### Last Update: 10-3-17
####### Input needed:
############# A "gel set up file" that contains the labels treatment groups, and amounts of lysate pipetted in micrograms in the order that the data are in.
############# A Quantification file that contains the quantification data output from ImageJ program with the a corresponding background signal for each taken above or below the sample.
############# A REVERT quantification file that contains the quantification data output from ImageJ program in the same order as the gel set up and quantification file.
####### Output created:
############# An ANOVA results file
############# A posthoc analysis file
############# A standard curve graph
############# A bar plot across groups
############# A boxplot across groups
####### About the data analysis:
####### The boxplots and bar plots are made off of standardized data using regression on Total Quantities - an obtained Background signal taken above the signal box.
####### These estimated values from regression are then divided REVERT.
####### This script also conducts an outlier test and removes any values that have a absolute value of Z-score (Within their group) greater than the 1.93 (This is according to grubbs test for n=18 and will need to be adjusted depending on the sample size)
##################################################################################################
###################################### The Intial Set Up ########################################
##################################################################################################
####### Install Packages if you don't have them ###############
library(readxl)
library(XLConnect)
library(colorspace)
##### This function will calculate standard error #######
std.e <- function(x) sd(x)/sqrt(length(x))
###### Write the names targets you are assessing ########### This will be used to find the correct files. So write it as it is called in the input file names.
target=c("TH","DAT","VMAT2")
### If you'd like the outlier test to be more stringent, you can change the cutoff in the line below:
### Options for p value cutoffs: 0.1,.075,.05,.025,.01
grubbs.pvalcutoff=.05
### For known bad samples that need to be removed, put their names here:
#badsamples="P2-E1-027"
# Change "home" variable below to the directory of where your input files are stored.
home="/Users/cansav091/Desktop/Current Projects /Western Blots/WesternBlotQuantifications"
setwd(home)
####### This will make a list of the files in your current directory
files=grep(".xls",dir(),value=TRUE)
####### Import an excel file that shows the gel set up. Format must be like the following and match the order that the quantification data are in:
gelssetup=select.list(grep("setup",files,value=TRUE,ignore.case = TRUE),multiple=TRUE, title="Which output file(s) includes the gel set up? Choose 0 if you want the most recently added file.")
##################################################################################################
############### Read in the data for each target and each gel for that target ###################
##################################################################################################
###### Does a loop and reads in the data for each target listed in the "target" object.
all.aovs=c() #### These objects store the ANOVA output for all the targets
all.posthocs=c()
for(jj in 1:length(target)){
setwd(home)
files=grep(".xls",dir(),value=TRUE)
####Imports the quantification file based on it including the target name and not including "REVERT"
proj=grep("REVERT",grep(target[jj],files,value=TRUE),invert=TRUE,value=TRUE)
####Imports the REVERT file based on it including the target name and not including "REVERT"
reverts=grep("REVERT",grep(target[jj],files,value=TRUE),value=TRUE)
###### These empty variables will store the combined data from multiple gels for a single target
combodat=data.frame() #### This will have the data from ImageJ for both gels
comborev=data.frame() #### This will have the REVERT data from ImageJ for both gels
combogelsetup=data.frame() #### This will have the gel set up for both gels
combogroups=c() #### This will have the tx groups for both gels
combogroups.stds=c() #### This will have the tx groups including the stds for both gels
comboestimatedvals=c() #### This will have the estimate values for each sample from our linear model using our standards divided by relative revert values
comboamounts=c() #### This will have the micrograms of lysate that were pipetted in.
combobckgnd=c()
# This loop will repeat for each gel data file for the given target
for(ii in 1:length(proj)){
setwd(home)
### Read in the gel set up file.
gelsetup=t(read_xlsx(gelssetup,sheet=ii))[,1:3]
### Sort out the Ladders and empty wells from the data.
wells=gelsetup[,1]
nonemptywells=grep("Empty",wells,ignore.case=TRUE,invert=TRUE)
wells=wells[nonemptywells]
ladderwells=grep("Ladder",wells,ignore.case=TRUE)
wells=wells[-ladderwells]
wells=wells[!is.na(wells)]
### Extract the amounts of lysate pipetted in each well
amounts=as.numeric(gsub("ug","",gelsetup[nonemptywells,3]))[-ladderwells]
combogelsetup=rbind(combogelsetup,gelsetup[nonemptywells,][-ladderwells,])
### Label which lanes are standards
stds=gsub("ug","",wells)
stds=gsub(" STD","",stds)
stds=as.numeric(stds)
stdswells=which(!is.na(stds))
stds=stds[stdswells]
### Label which lanes are samples
samplewells=wells[-stdswells]
samples=wells[samplewells]
### Take note of the N of your sample size including standards
n=length(wells)
### Keep the group info for the different treatments and standards
groups.stds=as.factor(gelsetup[,2][as.numeric(names(wells))])
groups=gelsetup[as.numeric(names(samplewells)),2]
### Import the quantification file for this gel from ImageJ output
dat=as.data.frame(read_xls(proj[ii]))
if(target[jj]=="DAT"){
dat=dat[which(dat$Channel==700),]
}else{
dat=dat[which(dat$Channel==800),]
}
### Import the REVERT file for this gel from ImageJ output
revert=as.data.frame(read_xls(reverts[ii]))
### Transforms the total into a numeric variable
revert[,4:ncol(revert)]=suppressWarnings(apply(revert[,4:ncol(revert)],2,as.numeric))
### Combine gel quant data, revert data,background data, tx groups, and amounts pipetted from both gels.
combodat=rbind(combodat,dat[1:n,])
comborev=rbind(comborev,revert)
combobckgnd=rbind(combobckgnd,dat[(n+1):(2*n),])
comboamounts=c(comboamounts,amounts)
combogroups=c(combogroups,groups) # This variable has the treatment groups but doesn't include standards
combogroups.stds=c(combogroups.stds,groups.stds) ## This group variable includes standards
####Create a folder for the output using the target name
fold.name=paste0(target[jj],"output")
if(dir.exists(fold.name)==FALSE){
dir.create(fold.name)
}
setwd(paste0(home,"/",fold.name)) # Set the directory to the new folder.
##### Adjusted signal means the signal - background (not the local background that ImageJ does, but the background that we take ourselves separately)
adj.sig=(dat$Total[1:n]-dat$Total[(n+1):(2*n)])
##################################################################################################
################### Analyze the standard curve for this particular gel by itself ################
##################################################################################################
##### Create a 4x4 panel plot with variations on Std Curve and a boxplot for that particular gel
jpeg(paste0(target[jj],"Gel",ii,"Std Curve Graphs.jpeg"))
par(mfrow=c(2,2),oma = c(0, 0, 2, 0))
### Plot Amounts vs the Adj signals
reg=lm(adj.sig[stdswells]~stds)
Rval=summary(reg)$r.squared
pval=summary(reg)$coefficients[8]
plot(amounts[1:n],adj.sig,main=paste("Total-Bckgnd R=",round(Rval,3),"p=",round(pval,3)),xlab="Total ug ",ylab="Total-Bckgnd",pch=21, bg=c("yellow","red","orange","black")[groups.stds])
abline(reg,col="red")
legend(x="bottomright", legend = levels(groups.stds),fill=c("yellow","red","orange","black"),cex=.8)
### Plot Amounts vs the REVERT signals
reg=lm(revert$Total[which(groups.stds=="Standard")]~stds)
plot(amounts,revert$Total[1:n],main=paste("Total Protein Total R = ",round(Rval,3),"p=",round(pval,3)),xlab="Total ug",ylab="REVERT Total",pch=21, bg=c("yellow","red","orange","black")[groups.stds])
revnormal=reg$coefficients[2]*revert$Total
abline(reg,col="red")
legend(x="bottomright", legend = levels(groups.stds),fill=c("yellow","red","orange","black"),cex=.8)
#### Plot REVERT against the signal - background
reg=lm(revert$Total[which(groups.stds=="Standard")]~(dat$Total-dat$Bkgnd.)[stdswells])
Rval=summary(reg)$r.squared
pval=summary(reg)$coefficients[8]
plot((dat$Total[1:n]-dat$Total[(n+1):(2*n)]),revert$Mean[1:17],main=paste("Total Protein R = ",round(Rval,3),"p=",round(pval,3)),xlab="Total-Bckgnd",ylab="REVERT Total",pch=21,bg=c("yellow","red","orange","black")[groups.stds])
abline(reg,col="red")
legend(x="bottomright", legend = levels(groups.stds),fill=c("yellow","red","orange","black"),cex=.8)
#####################################################################################################################################
# Use the linear model from our standards to create a normalized and estimated relative quantity for each sample ###################
#####################################################################################################################################
if(length(samples)>0){
### Calculate a "Relative Revert" by dividing every revert signal by the smallest REVERT signal in that particular gel
rel.revert=revert$Total/sort(revert$Total[-stdswells])[1]
###### Linear model using our standards:
reg=lm(amounts[stdswells]~adj.sig[stdswells])
###### Plot the linear model with it's p value:
p=round(summary(reg)$coefficients[8],4)
plot(amounts[stdswells],adj.sig[stdswells],main="Regression",sub=paste0("p=",p))
abline(reg,col="red")# Put the slope of the line
####### Calculate Samples' estimated values based on the above linear model using our standards and divide by "relative revert"
estimatedvals=(reg$coefficients[1]+reg$coefficients[2]*adj.sig)/rel.revert
####### Combine the estimated values for both gels for this target
comboestimatedvals=c(comboestimatedvals,estimatedvals)
}
dev.off()##### The 4x4 graph will print out
}
####### This piece of code re-orders the factor to be Control, Low, High, instead of the default of being alphabetical
combogroups=factor(combogroups,unique(combogroups))
#####################################################################################################################################
############# Do Grubbs outlier test for this target and the data from both gels ###################################################
#####################################################################################################################################
## Read in the chart of standards for Grubbs Tests
grubbs.chart=read.csv("/Users/cansav091/Desktop/Current Projects /Western Blots/WesternBlotQuantifications/GrubbsCutoffs.csv")
### Obtain averages by group
group.avg=tapply(comboestimatedvals,combogroups.stds,mean)
### Obtain sd's by group
group.sd=tapply(comboestimatedvals,combogroups.stds,sd)
### Create an empty object for storing the z scores
groups.z.scores=rep(NA,2*n)
tx.group.n=c() ## Stores the number of samples for each group.
### Loop repeats this calculation for each treatment group
for(kk in 1:(length(unique(combogroups.stds))-1)){
xx=which(combogroups.stds==sort(unique(combogroups.stds))[kk])
groups.z.scores[xx]=(comboestimatedvals[xx]-group.avg[kk])/group.sd[kk]
tx.group.n=c(tx.group.n,length(xx))
}
### p values in the chart:
p.val.cutoffs=c(0.1,.075,.05,.025,.01)
### Finds the grubbs cutoff according to your selected p cutoff and size of your treatment group
grubbs.cutoff=grubbs.chart[which(grubbs.chart[,1]==tx.group.n[ii]),which(p.val.cutoffs==grubbs.pvalcutoff)]
outliers=c(match(badsamples,combogelsetup[,1]),which(abs(groups.z.scores)>grubbs.cutoff))
outliers.column=rep("Good",2*n)
outliers.column[xx]="Outlier"
#####################################################################################################################################
############# Create a csv with the cleaned data for this particular target #######################################################
#####################################################################################################################################
####### Put the cleaned data in one big dataframe that we will write to a csv
target.data=cbind(rep(target[jj],length(comboamounts)),comboestimatedvals*rel.revert,comboestimatedvals,rel.revert,(combodat$Total-combobckgnd$Total)/comborev$Total,combobckgnd$Total,combodat$Total,comborev$Total,comboamounts,combogelsetup[,1:2],c(rep("Gel1",nrow(comborev)/2),rep("Gel2",nrow(comborev)/2)),groups.z.scores,outliers.column)
colnames(target.data)=c("Target","EstimatedVals","EstimatVals.RelRevert","RelRevert","AdjSig","Backgr","Total","Revert","Amount","Sample","Treatment","Gel","ZScoresByGroups","OutlierCall")
if(jj==1){
alldata=target.data
colnames(alldata)=c("Target","EstimatedVals","EstimatVals.RelRevert","RelRevert","AdjSig","Backgr","Total","Revert","Amount","Sample","Treatment","Gel","ZScoresByGroups","OutlierCall")
}else{
alldata=rbind(alldata,target.data)
}
write.csv(target.data,file=paste0(target[jj],"CleanData.csv"))
### Store this cleaned information for this particular target as it's own dataframe within R's environment so we can use it later.
assign(target[jj],alldata, envir=.GlobalEnv)
### Determine which data are standards so you can remove them from the ANOVA
xx=which(alldata$Treatment=="Standard")
groups=factor(alldata$Treatment[-xx],unique(alldata$Treatment[-xx]))
#### Do ANOVA for the both gels' data for this target
target.aov=aov(alldata$EstimatVals.RelRevert[-xx]~groups)
#### Post Hoc Analyses
target.posthoc=t(as.data.frame(TukeyHSD(target.aov)$groups))
colnames(target.posthoc)=paste0(target[jj],colnames(target.posthoc))
all.posthocs=cbind(all.posthocs,target.posthoc)
#### Summary of the ANOVA
target.aov=t(data.frame(summary(target.aov)[[1]][1:5]))
colnames(target.aov)=paste0(target[jj],colnames(target.aov))
all.aovs=cbind(all.aovs,target.aov)
}
#####################################################################################################################################
############# Write csvs that contain all the data for all the targets ##############################################################
#####################################################################################################################################
all.aovs=as.data.frame(all.aovs)
all.posthocs=as.data.frame(all.posthocs)
setwd(home)
if(dir.exists(paste0("FinalResultsFolder"))=="FALSE"){
dir.create(paste0("FinalResultsFolder"))
}
setwd(paste0(home,"/FinalResultsFolder"))
write.csv(alldata,file="AllTargetsCleanData.csv")
write.csv(all.aovs,file="AllWesternANOVAResults.csv",na="NA")
write.csv(all.posthocs,file="AllWesternPostHocResults.csv",na="NA")
groups=factor(alldata$Treatment,unique(alldata$Treatment))
#####################################################################################################################################
############# Create a boxplot for all the targets in a single graph ################################################################
#####################################################################################################################################
jpeg(paste0("AllTargetsBoxplot.jpeg"),width=800,height=500)
par(mfrow=c(1,length(target)),oma = c(0, 4, 0, 0))
for(ii in 1:length(target)){
xx=which(alldata$Target==unique(alldata$Target)[ii])### Only graph the data for a particular target
target.data=alldata[xx,]
tx.groups=groups[xx]
xx=which(alldata$Treatment=="Standard")
tx.groups=factor(tx.groups[-xx],unique(tx.groups[-xx]))
boxplot(target.data$EstimatVals[-xx]~tx.groups,names=c("0 mg/kg","0.3 mg/kg","1 mg/kg"),cex.main=3,cex.names=1.5,cex.lab=2,cex.axis = 1.5,ylim=c(0,20),xlab="Dose",main=target[ii],col=c("palegreen4","orange","red3"))
}
dev.off()
#####################################################################################################################################
############# Create a barplot for all the targets in a single graph using SE bars ##################################################
#####################################################################################################################################
jpeg(paste0("AllTargetsBarplot.jpeg"),width=800,height=500)
par(mfrow=c(1,length(target)),oma = c(0, 4, 0, 0))
for(ii in 1:length(target)){
xx=which(alldata$Target==unique(alldata$Target)[ii])### Only graph the data for a particular target
target.data=alldata[xx,]
tx.groups=groups[xx]
xx=which(alldata$Treatment=="Standard")
tx.groups=factor(tx.groups[-xx],unique(tx.groups[-xx]))
avg=tapply(target.data$EstimatVals[-xx],tx.groups,mean)
bars=barplot(avg,names=c("0 mg/kg","0.3 mg/kg","1 mg/kg"),cex.main=3,cex.names=1.5,cex.lab=2,cex.axis = 1.5,ylim=c(0,20),xlab="Dose",main=target[ii],col=c("palegreen4","orange","red3"))
segments(bars, avg - std.e(alldata$EstimatVals[-xx]) * 2, bars, avg + std.e(alldata$EstimatVals[-xx]) * 2, lwd = 1.5)
arrows(bars, avg - std.e(alldata$EstimatVals[-xx]) * 2, bars, avg + std.e(alldata$EstimatVals[-xx]) * 2, lwd = 1.5, angle = 90, code = 3, length = 0.05)
}
dev.off()
|
library(scrobbler)
### Name: fetch_tracks
### Title: fetch_tracks
### Aliases: fetch_tracks
### ** Examples
## Not run:
##D fetch_tracks("your_username", out_file = "scrobbles.txt", start_page = 1)
## End(Not run)
| /data/genthat_extracted_code/scrobbler/examples/fetch_tracks.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 222 | r | library(scrobbler)
### Name: fetch_tracks
### Title: fetch_tracks
### Aliases: fetch_tracks
### ** Examples
## Not run:
##D fetch_tracks("your_username", out_file = "scrobbles.txt", start_page = 1)
## End(Not run)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/networkmanager_operations.R
\name{networkmanager_update_device}
\alias{networkmanager_update_device}
\title{Updates the details for an existing device}
\usage{
networkmanager_update_device(
GlobalNetworkId,
DeviceId,
AWSLocation = NULL,
Description = NULL,
Type = NULL,
Vendor = NULL,
Model = NULL,
SerialNumber = NULL,
Location = NULL,
SiteId = NULL
)
}
\arguments{
\item{GlobalNetworkId}{[required] The ID of the global network.}
\item{DeviceId}{[required] The ID of the device.}
\item{AWSLocation}{The Amazon Web Services location of the device, if applicable. For an
on-premises device, you can omit this parameter.}
\item{Description}{A description of the device.
Constraints: Maximum length of 256 characters.}
\item{Type}{The type of the device.}
\item{Vendor}{The vendor of the device.
Constraints: Maximum length of 128 characters.}
\item{Model}{The model of the device.
Constraints: Maximum length of 128 characters.}
\item{SerialNumber}{The serial number of the device.
Constraints: Maximum length of 128 characters.}
\item{Location}{}
\item{SiteId}{The ID of the site.}
}
\description{
Updates the details for an existing device. To remove information for any of the parameters, specify an empty string.
See \url{https://www.paws-r-sdk.com/docs/networkmanager_update_device/} for full documentation.
}
\keyword{internal}
| /cran/paws.networking/man/networkmanager_update_device.Rd | permissive | paws-r/paws | R | false | true | 1,445 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/networkmanager_operations.R
\name{networkmanager_update_device}
\alias{networkmanager_update_device}
\title{Updates the details for an existing device}
\usage{
networkmanager_update_device(
GlobalNetworkId,
DeviceId,
AWSLocation = NULL,
Description = NULL,
Type = NULL,
Vendor = NULL,
Model = NULL,
SerialNumber = NULL,
Location = NULL,
SiteId = NULL
)
}
\arguments{
\item{GlobalNetworkId}{[required] The ID of the global network.}
\item{DeviceId}{[required] The ID of the device.}
\item{AWSLocation}{The Amazon Web Services location of the device, if applicable. For an
on-premises device, you can omit this parameter.}
\item{Description}{A description of the device.
Constraints: Maximum length of 256 characters.}
\item{Type}{The type of the device.}
\item{Vendor}{The vendor of the device.
Constraints: Maximum length of 128 characters.}
\item{Model}{The model of the device.
Constraints: Maximum length of 128 characters.}
\item{SerialNumber}{The serial number of the device.
Constraints: Maximum length of 128 characters.}
\item{Location}{}
\item{SiteId}{The ID of the site.}
}
\description{
Updates the details for an existing device. To remove information for any of the parameters, specify an empty string.
See \url{https://www.paws-r-sdk.com/docs/networkmanager_update_device/} for full documentation.
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/secretsmanager_operations.R
\name{secretsmanager_describe_secret}
\alias{secretsmanager_describe_secret}
\title{Retrieves the details of a secret}
\usage{
secretsmanager_describe_secret(SecretId)
}
\arguments{
\item{SecretId}{[required] The identifier of the secret whose details you want to retrieve. You can
specify either the Amazon Resource Name (ARN) or the friendly name of
the secret.
If you specify an ARN, we generally recommend that you specify a
complete ARN. You can specify a partial ARN too—for example, if you
don’t include the final hyphen and six random characters that Secrets
Manager adds at the end of the ARN when you created the secret. A
partial ARN match can work as long as it uniquely matches only one
secret. However, if your secret has a name that ends in a hyphen
followed by six characters (before Secrets Manager adds the hyphen and
six characters to the ARN) and you try to use that as a partial ARN,
then those characters cause Secrets Manager to assume that you’re
specifying a complete ARN. This confusion can cause unexpected results.
To avoid this situation, we recommend that you don’t create secret names
ending with a hyphen followed by six characters.
If you specify an incomplete ARN without the random suffix, and instead
provide the 'friendly name', you \emph{must} not include the random suffix.
If you do include the random suffix added by Secrets Manager, you
receive either a \emph{ResourceNotFoundException} or an
\emph{AccessDeniedException} error, depending on your permissions.}
}
\value{
A list with the following syntax:\preformatted{list(
ARN = "string",
Name = "string",
Description = "string",
KmsKeyId = "string",
RotationEnabled = TRUE|FALSE,
RotationLambdaARN = "string",
RotationRules = list(
AutomaticallyAfterDays = 123
),
LastRotatedDate = as.POSIXct(
"2015-01-01"
),
LastChangedDate = as.POSIXct(
"2015-01-01"
),
LastAccessedDate = as.POSIXct(
"2015-01-01"
),
DeletedDate = as.POSIXct(
"2015-01-01"
),
Tags = list(
list(
Key = "string",
Value = "string"
)
),
VersionIdsToStages = list(
list(
"string"
)
),
OwningService = "string",
CreatedDate = as.POSIXct(
"2015-01-01"
)
)
}
}
\description{
Retrieves the details of a secret. It does not include the encrypted
fields. Secrets Manager only returns fields populated with a value in
the response.
\strong{Minimum permissions}
To run this command, you must have the following permissions:
\itemize{
\item secretsmanager:DescribeSecret
}
\strong{Related operations}
\itemize{
\item To create a secret, use
\code{\link[=secretsmanager_create_secret]{create_secret}}.
\item To modify a secret, use
\code{\link[=secretsmanager_update_secret]{update_secret}}.
\item To retrieve the encrypted secret information in a version of the
secret, use \code{\link[=secretsmanager_get_secret_value]{get_secret_value}}.
\item To list all of the secrets in the AWS account, use
\code{\link[=secretsmanager_list_secrets]{list_secrets}}.
}
}
\section{Request syntax}{
\preformatted{svc$describe_secret(
SecretId = "string"
)
}
}
\examples{
\dontrun{
# The following example shows how to get the details about a secret.
svc$describe_secret(
SecretId = "MyTestDatabaseSecret"
)
}
}
\keyword{internal}
| /cran/paws.security.identity/man/secretsmanager_describe_secret.Rd | permissive | TWarczak/paws | R | false | true | 3,394 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/secretsmanager_operations.R
\name{secretsmanager_describe_secret}
\alias{secretsmanager_describe_secret}
\title{Retrieves the details of a secret}
\usage{
secretsmanager_describe_secret(SecretId)
}
\arguments{
\item{SecretId}{[required] The identifier of the secret whose details you want to retrieve. You can
specify either the Amazon Resource Name (ARN) or the friendly name of
the secret.
If you specify an ARN, we generally recommend that you specify a
complete ARN. You can specify a partial ARN too—for example, if you
don’t include the final hyphen and six random characters that Secrets
Manager adds at the end of the ARN when you created the secret. A
partial ARN match can work as long as it uniquely matches only one
secret. However, if your secret has a name that ends in a hyphen
followed by six characters (before Secrets Manager adds the hyphen and
six characters to the ARN) and you try to use that as a partial ARN,
then those characters cause Secrets Manager to assume that you’re
specifying a complete ARN. This confusion can cause unexpected results.
To avoid this situation, we recommend that you don’t create secret names
ending with a hyphen followed by six characters.
If you specify an incomplete ARN without the random suffix, and instead
provide the 'friendly name', you \emph{must} not include the random suffix.
If you do include the random suffix added by Secrets Manager, you
receive either a \emph{ResourceNotFoundException} or an
\emph{AccessDeniedException} error, depending on your permissions.}
}
\value{
A list with the following syntax:\preformatted{list(
ARN = "string",
Name = "string",
Description = "string",
KmsKeyId = "string",
RotationEnabled = TRUE|FALSE,
RotationLambdaARN = "string",
RotationRules = list(
AutomaticallyAfterDays = 123
),
LastRotatedDate = as.POSIXct(
"2015-01-01"
),
LastChangedDate = as.POSIXct(
"2015-01-01"
),
LastAccessedDate = as.POSIXct(
"2015-01-01"
),
DeletedDate = as.POSIXct(
"2015-01-01"
),
Tags = list(
list(
Key = "string",
Value = "string"
)
),
VersionIdsToStages = list(
list(
"string"
)
),
OwningService = "string",
CreatedDate = as.POSIXct(
"2015-01-01"
)
)
}
}
\description{
Retrieves the details of a secret. It does not include the encrypted
fields. Secrets Manager only returns fields populated with a value in
the response.
\strong{Minimum permissions}
To run this command, you must have the following permissions:
\itemize{
\item secretsmanager:DescribeSecret
}
\strong{Related operations}
\itemize{
\item To create a secret, use
\code{\link[=secretsmanager_create_secret]{create_secret}}.
\item To modify a secret, use
\code{\link[=secretsmanager_update_secret]{update_secret}}.
\item To retrieve the encrypted secret information in a version of the
secret, use \code{\link[=secretsmanager_get_secret_value]{get_secret_value}}.
\item To list all of the secrets in the AWS account, use
\code{\link[=secretsmanager_list_secrets]{list_secrets}}.
}
}
\section{Request syntax}{
\preformatted{svc$describe_secret(
SecretId = "string"
)
}
}
\examples{
\dontrun{
# The following example shows how to get the details about a secret.
svc$describe_secret(
SecretId = "MyTestDatabaseSecret"
)
}
}
\keyword{internal}
|
library(shiny)
library(data.table)
inputFile = "weatherData_slim.csv"
stationNamesFile = "stationNames.csv"
wxdata = fread(inputFile)
stations = fread(stationNamesFile, sep=",")
# Change TAVG to be average of TMAX and TMIN
wxdata[,TAVG:=(TMIN+TMAX)/2]
# Assign city to each entry
for (i in 1:nrow(stations)) {
wxdata[STATION==stations$STATION[i],City:=stations$CITY[i]]
}
# Format Date and create Month and Day fields
wxdata[,Date:=as.Date(DATE)]
wxdata[,`:=`(Month=as.integer(format(Date,"%m")),Day=as.integer(format(Date,"%d")))]
# Only keep specific columns
wxdata = wxdata[,.(City,Month,Day,TAVG)]
# Remove rows with NAs for TAVG
wxdata = wxdata[!is.na(TAVG),]
ymin = 10
ymax = 95
shinyServer(function(input, output) {
output$distPlot <- renderPlot({
x = wxdata[City==input$city & Month==input$month & Day==input$day,TAVG]
if (length(x) > 0 & input$tempLow < input$tempHigh) {
boxplot(x, ylim=c(ymin,ymax), ylab="Average Temperature (deg F)", main=paste0("Distribution of Average Temperature\nMonth ",input$month," , Day ",input$day,", ",input$city))
abline(h=input$tempLow, col="blue", lwd=3)
text(0.5, input$tempLow-5, input$tempLow, col="blue", adj=0)
abline(h=input$tempHigh, col="red", lwd=3)
text(0.5, input$tempHigh+5, input$tempHigh, col="red", adj=0)
probComfort = round(100*sum(x>=input$tempLow & x<=input$tempHigh)/length(x), 0)
text(0.5, ymax, paste0("Probability of\nBeing Comfortable: ",probComfort,"%"), adj=c(0,1))
}
})
})
| /server.R | no_license | cbednarc/WeatherProbability | R | false | false | 1,551 | r |
library(shiny)
library(data.table)
inputFile = "weatherData_slim.csv"
stationNamesFile = "stationNames.csv"
wxdata = fread(inputFile)
stations = fread(stationNamesFile, sep=",")
# Change TAVG to be average of TMAX and TMIN
wxdata[,TAVG:=(TMIN+TMAX)/2]
# Assign city to each entry
for (i in 1:nrow(stations)) {
wxdata[STATION==stations$STATION[i],City:=stations$CITY[i]]
}
# Format Date and create Month and Day fields
wxdata[,Date:=as.Date(DATE)]
wxdata[,`:=`(Month=as.integer(format(Date,"%m")),Day=as.integer(format(Date,"%d")))]
# Only keep specific columns
wxdata = wxdata[,.(City,Month,Day,TAVG)]
# Remove rows with NAs for TAVG
wxdata = wxdata[!is.na(TAVG),]
ymin = 10
ymax = 95
shinyServer(function(input, output) {
output$distPlot <- renderPlot({
x = wxdata[City==input$city & Month==input$month & Day==input$day,TAVG]
if (length(x) > 0 & input$tempLow < input$tempHigh) {
boxplot(x, ylim=c(ymin,ymax), ylab="Average Temperature (deg F)", main=paste0("Distribution of Average Temperature\nMonth ",input$month," , Day ",input$day,", ",input$city))
abline(h=input$tempLow, col="blue", lwd=3)
text(0.5, input$tempLow-5, input$tempLow, col="blue", adj=0)
abline(h=input$tempHigh, col="red", lwd=3)
text(0.5, input$tempHigh+5, input$tempHigh, col="red", adj=0)
probComfort = round(100*sum(x>=input$tempLow & x<=input$tempHigh)/length(x), 0)
text(0.5, ymax, paste0("Probability of\nBeing Comfortable: ",probComfort,"%"), adj=c(0,1))
}
})
})
|
#'@title calf
#'@description Coarse approximation linear function
#'@param data Matrix or data frame. First column must contain case/control dummy coded variable (if targetVector = "binary"). Otherwise, first column must contain real number vector corresponding to selection variable (if targetVector = "real"). All other columns contain relevant markers.
#'@param nMarkers Maximum number of markers to include in creation of sum.
#'@param targetVector Indicate "binary" for target vector with two options (e.g., case/control). Indicate "real" for target vector with real numbers.
#'@param margin Real number from 0 to 1. Indicates the amount a potential marker must improve the target criterion (Pearson correlation or p-value) in order to add the marker.
#'@param optimize Criteria to optimize if targetVector = "binary." Indicate "pval" to optimize the p-value corresponding to the t-test distinguishing case and control. Indicate "auc" to optimize the AUC.
#'@param verbose Logical. Indicate TRUE to print activity at each iteration to console. Defaults to FALSE.
#'@return A data frame containing the chosen markers and their assigned weight (-1 or 1)
#'@return The AUC value for the classification
#'@return rocPlot. A plot object from ggplot2 for the receiver operating curve.
#'@examples
#'calf(data = CaseControl, nMarkers = 6, targetVector = "binary")
#'@export
calf <- function(data,
nMarkers,
targetVector,
margin,
optimize = "pval",
# reverse = FALSE,
verbose = FALSE){
calf_internal(data,
nMarkers,
proportion = NULL,
randomize = FALSE,
targetVector = targetVector,
times = 1,
margin = NULL,
optimize = optimize,
# reverse = reverse,
verbose = verbose)
}
#'@title calf_randomize
#'@description Coarse approximation linear function, randomized
#'@param data Matrix or data frame. First column must contain case/control dummy coded variable (if targetVector = "binary"). Otherwise, first column must contain real number vector corresponding to selection variable (if targetVector = "real"). All other columns contain relevant markers.
#'@param nMarkers Maximum number of markers to include in creation of sum.
#'@param randomize Logical. Indicate TRUE to randomize the case/control status (or real number vector) for each individual. Used to compare results from true data with results from randomized data.
#'@param targetVector Indicate "binary" for target vector with two options (e.g., case/control). Indicate "real" for target vector with real numbers.
#'@param times Numeric. Indicates the number of replications to run with randomization.
#'@param margin Real number from 0 to 1. Indicates the amount a potential marker must improve the target criterion (Pearson correlation or p-value) in order to add the marker.
#'@param optimize Criteria to optimize if targetVector = "binary." Indicate "pval" to optimize the p-value corresponding to the t-test distinguishing case and control. Indicate "auc" to optimize the AUC.
#'@param verbose Logical. Indicate TRUE to print activity at each iteration to console. Defaults to FALSE.
#'@return A data frame containing the chosen markers and their assigned weight (-1 or 1)
#'@return The AUC value for the classification
#'@return aucHist A histogram of the AUCs across replications.
#'@examples
#'calf_randomize(data = CaseControl, nMarkers = 6, targetVector = "binary", times = 5)
#'@export
calf_randomize <- function(data,
nMarkers,
randomize = TRUE,
targetVector,
times = 1,
margin = NULL,
optimize = "pval",
#reverse = FALSE,
verbose = FALSE){
auc <- numeric()
finalBest <- numeric()
allMarkers <- character()
count <- 1
AUC = NULL
repeat {
out <- calf_internal(data,
nMarkers,
proportion = NULL,
randomize = randomize,
targetVector = targetVector,
times,
margin = margin,
optimize = optimize,
# reverse = reverse,
verbose = verbose)
auc[count] <- out$auc
selection <- out$selection
markers <- as.character(out$selection[,1])
finalBest <- append(finalBest, out$finalBest)
allMarkers <- as.character((append(allMarkers, markers)))
if (count == times) break
count <- count + 1
}
if (times > 1) {
summaryMarkers <- as.data.frame(table(allMarkers), check.names = FALSE)
colnames(summaryMarkers) <- c("Marker", "Frequency")
summaryMarkers <- summaryMarkers[order(-summaryMarkers$Frequency),]
if (targetVector == "binary"){
auc <- as.data.frame(auc)
colnames(auc) <- "AUC"
aucHist <- ggplot(auc, aes(AUC)) +
geom_histogram() +
ylab("Count") +
xlab("AUC") +
scale_x_continuous() +
theme_bw()
} else aucHist = NULL
} else {
summaryMarkers = NULL
aucHist = NULL
}
if (times == 1 & targetVector == "binary") {
rocPlot <- out$rocPlot
} else {
rocPlot <- NULL
}
est <- list(selection = selection,
multiple = summaryMarkers,
auc = auc,
randomize = randomize,
targetVec = targetVector,
aucHist = aucHist,
times = times,
finalBest = finalBest,
rocPlot = rocPlot,
optimize = optimize,
# reverse = reverse,
verbose = verbose)
class(est) <- "calf_randomize"
return(est)
}
#'@title calf_subset
#'@description Coarse approximation linear function, randomized
#'@param data Matrix or data frame. First column must contain case/control dummy coded variable (if targetVector = "binary"). Otherwise, first column must contain real number vector corresponding to selection variable (if targetVector = "real"). All other columns contain relevant markers.
#'@param nMarkers Maximum number of markers to include in creation of sum.
#'@param proportion Numeric. A value (where 0 < proportion <= 1) indicating the proportion of cases and controls to use in analysis (if targetVector = "binary"). If targetVector = "real", this is just a proportion of the full sample. Used to evaluate robustness of solution. Defaults to 0.8.
#'@param targetVector Indicate "binary" for target vector with two options (e.g., case/control). Indicate "real" for target vector with real numbers.
#'@param times Numeric. Indicates the number of replications to run with randomization.
#'@param margin Real number from 0 to 1. Indicates the amount a potential marker must improve the target criterion (Pearson correlation or p-value) in order to add the marker.
#'@param optimize Criteria to optimize if targetVector = "binary." Indicate "pval" to optimize the p-value corresponding to the t-test distinguishing case and control. Indicate "auc" to optimize the AUC.
#'@param verbose Logical. Indicate TRUE to print activity at each iteration to console. Defaults to FALSE.
#'@return A data frame containing the chosen markers and their assigned weight (-1 or 1)
#'@return The AUC value for the classification. If multiple replications are requested, this will be a data.frame containing all AUCs across replications.
#'@return aucHist A histogram of the AUCs across replications.
#'@examples
#'calf_subset(data = CaseControl, nMarkers = 6, targetVector = "binary", times = 5)
#'@export
calf_subset <- function(data,
nMarkers,
proportion = .8,
targetVector,
times = 1,
margin = NULL,
optimize = "pval",
# reverse = FALSE,
verbose = FALSE){
auc <- numeric()
allMarkers <- character()
finalBest <- numeric()
count <- 1
AUC = NULL
repeat {
out <- calf_internal(data,
nMarkers,
proportion = proportion,
randomize = FALSE,
targetVector = targetVector,
times,
margin = margin,
optimize = optimize,
# reverse = reverse,
verbose = verbose)
auc[count] <- out$auc
selection <- out$selection
finalBest <- append(finalBest, out$finalBest)
markers <- as.character(out$selection[,1])
allMarkers <- as.character((append(allMarkers, markers)))
if (count == times) break
count <- count + 1
}
if (times > 1){
summaryMarkers <- as.data.frame(table(allMarkers), check.names = FALSE)
colnames(summaryMarkers) <- c("Marker", "Frequency")
summaryMarkers <- summaryMarkers[order(-summaryMarkers$Frequency),]
if (targetVector == "binary"){
auc <- as.data.frame(auc)
colnames(auc) <- "AUC"
aucHist <- ggplot(auc, aes(AUC)) +
geom_histogram() +
ylab("Count") +
xlab("AUC") +
scale_x_continuous() +
theme_bw()
} else aucHist = NULL
} else {
summaryMarkers = NULL
aucHist = NULL
}
if (times == 1 & targetVector == "binary") {
rocPlot <- out$rocPlot
} else {
rocPlot <- NULL
}
est <- list(selection = selection,
multiple = summaryMarkers,
auc = auc,
proportion = proportion,
targetVec = targetVector,
aucHist = aucHist,
times = times,
finalBest = finalBest,
rocPlot = rocPlot,
optimize = optimize)
class(est) <- "calf_subset"
return(est)
}
| /R/calf_wrappers.R | no_license | stlane/calf | R | false | false | 10,296 | r | #'@title calf
#'@description Coarse approximation linear function
#'@param data Matrix or data frame. First column must contain case/control dummy coded variable (if targetVector = "binary"). Otherwise, first column must contain real number vector corresponding to selection variable (if targetVector = "real"). All other columns contain relevant markers.
#'@param nMarkers Maximum number of markers to include in creation of sum.
#'@param targetVector Indicate "binary" for target vector with two options (e.g., case/control). Indicate "real" for target vector with real numbers.
#'@param margin Real number from 0 to 1. Indicates the amount a potential marker must improve the target criterion (Pearson correlation or p-value) in order to add the marker.
#'@param optimize Criteria to optimize if targetVector = "binary." Indicate "pval" to optimize the p-value corresponding to the t-test distinguishing case and control. Indicate "auc" to optimize the AUC.
#'@param verbose Logical. Indicate TRUE to print activity at each iteration to console. Defaults to FALSE.
#'@return A data frame containing the chosen markers and their assigned weight (-1 or 1)
#'@return The AUC value for the classification
#'@return rocPlot. A plot object from ggplot2 for the receiver operating curve.
#'@examples
#'calf(data = CaseControl, nMarkers = 6, targetVector = "binary")
#'@export
calf <- function(data,
nMarkers,
targetVector,
margin,
optimize = "pval",
# reverse = FALSE,
verbose = FALSE){
calf_internal(data,
nMarkers,
proportion = NULL,
randomize = FALSE,
targetVector = targetVector,
times = 1,
margin = NULL,
optimize = optimize,
# reverse = reverse,
verbose = verbose)
}
#'@title calf_randomize
#'@description Coarse approximation linear function, randomized
#'@param data Matrix or data frame. First column must contain case/control dummy coded variable (if targetVector = "binary"). Otherwise, first column must contain real number vector corresponding to selection variable (if targetVector = "real"). All other columns contain relevant markers.
#'@param nMarkers Maximum number of markers to include in creation of sum.
#'@param randomize Logical. Indicate TRUE to randomize the case/control status (or real number vector) for each individual. Used to compare results from true data with results from randomized data.
#'@param targetVector Indicate "binary" for target vector with two options (e.g., case/control). Indicate "real" for target vector with real numbers.
#'@param times Numeric. Indicates the number of replications to run with randomization.
#'@param margin Real number from 0 to 1. Indicates the amount a potential marker must improve the target criterion (Pearson correlation or p-value) in order to add the marker.
#'@param optimize Criteria to optimize if targetVector = "binary." Indicate "pval" to optimize the p-value corresponding to the t-test distinguishing case and control. Indicate "auc" to optimize the AUC.
#'@param verbose Logical. Indicate TRUE to print activity at each iteration to console. Defaults to FALSE.
#'@return A data frame containing the chosen markers and their assigned weight (-1 or 1)
#'@return The AUC value for the classification
#'@return aucHist A histogram of the AUCs across replications.
#'@examples
#'calf_randomize(data = CaseControl, nMarkers = 6, targetVector = "binary", times = 5)
#'@export
calf_randomize <- function(data,
nMarkers,
randomize = TRUE,
targetVector,
times = 1,
margin = NULL,
optimize = "pval",
#reverse = FALSE,
verbose = FALSE){
auc <- numeric()
finalBest <- numeric()
allMarkers <- character()
count <- 1
AUC = NULL
repeat {
out <- calf_internal(data,
nMarkers,
proportion = NULL,
randomize = randomize,
targetVector = targetVector,
times,
margin = margin,
optimize = optimize,
# reverse = reverse,
verbose = verbose)
auc[count] <- out$auc
selection <- out$selection
markers <- as.character(out$selection[,1])
finalBest <- append(finalBest, out$finalBest)
allMarkers <- as.character((append(allMarkers, markers)))
if (count == times) break
count <- count + 1
}
if (times > 1) {
summaryMarkers <- as.data.frame(table(allMarkers), check.names = FALSE)
colnames(summaryMarkers) <- c("Marker", "Frequency")
summaryMarkers <- summaryMarkers[order(-summaryMarkers$Frequency),]
if (targetVector == "binary"){
auc <- as.data.frame(auc)
colnames(auc) <- "AUC"
aucHist <- ggplot(auc, aes(AUC)) +
geom_histogram() +
ylab("Count") +
xlab("AUC") +
scale_x_continuous() +
theme_bw()
} else aucHist = NULL
} else {
summaryMarkers = NULL
aucHist = NULL
}
if (times == 1 & targetVector == "binary") {
rocPlot <- out$rocPlot
} else {
rocPlot <- NULL
}
est <- list(selection = selection,
multiple = summaryMarkers,
auc = auc,
randomize = randomize,
targetVec = targetVector,
aucHist = aucHist,
times = times,
finalBest = finalBest,
rocPlot = rocPlot,
optimize = optimize,
# reverse = reverse,
verbose = verbose)
class(est) <- "calf_randomize"
return(est)
}
#'@title calf_subset
#'@description Coarse approximation linear function, randomized
#'@param data Matrix or data frame. First column must contain case/control dummy coded variable (if targetVector = "binary"). Otherwise, first column must contain real number vector corresponding to selection variable (if targetVector = "real"). All other columns contain relevant markers.
#'@param nMarkers Maximum number of markers to include in creation of sum.
#'@param proportion Numeric. A value (where 0 < proportion <= 1) indicating the proportion of cases and controls to use in analysis (if targetVector = "binary"). If targetVector = "real", this is just a proportion of the full sample. Used to evaluate robustness of solution. Defaults to 0.8.
#'@param targetVector Indicate "binary" for target vector with two options (e.g., case/control). Indicate "real" for target vector with real numbers.
#'@param times Numeric. Indicates the number of replications to run with randomization.
#'@param margin Real number from 0 to 1. Indicates the amount a potential marker must improve the target criterion (Pearson correlation or p-value) in order to add the marker.
#'@param optimize Criteria to optimize if targetVector = "binary." Indicate "pval" to optimize the p-value corresponding to the t-test distinguishing case and control. Indicate "auc" to optimize the AUC.
#'@param verbose Logical. Indicate TRUE to print activity at each iteration to console. Defaults to FALSE.
#'@return A data frame containing the chosen markers and their assigned weight (-1 or 1)
#'@return The AUC value for the classification. If multiple replications are requested, this will be a data.frame containing all AUCs across replications.
#'@return aucHist A histogram of the AUCs across replications.
#'@examples
#'calf_subset(data = CaseControl, nMarkers = 6, targetVector = "binary", times = 5)
#'@export
calf_subset <- function(data,
nMarkers,
proportion = .8,
targetVector,
times = 1,
margin = NULL,
optimize = "pval",
# reverse = FALSE,
verbose = FALSE){
auc <- numeric()
allMarkers <- character()
finalBest <- numeric()
count <- 1
AUC = NULL
repeat {
out <- calf_internal(data,
nMarkers,
proportion = proportion,
randomize = FALSE,
targetVector = targetVector,
times,
margin = margin,
optimize = optimize,
# reverse = reverse,
verbose = verbose)
auc[count] <- out$auc
selection <- out$selection
finalBest <- append(finalBest, out$finalBest)
markers <- as.character(out$selection[,1])
allMarkers <- as.character((append(allMarkers, markers)))
if (count == times) break
count <- count + 1
}
if (times > 1){
summaryMarkers <- as.data.frame(table(allMarkers), check.names = FALSE)
colnames(summaryMarkers) <- c("Marker", "Frequency")
summaryMarkers <- summaryMarkers[order(-summaryMarkers$Frequency),]
if (targetVector == "binary"){
auc <- as.data.frame(auc)
colnames(auc) <- "AUC"
aucHist <- ggplot(auc, aes(AUC)) +
geom_histogram() +
ylab("Count") +
xlab("AUC") +
scale_x_continuous() +
theme_bw()
} else aucHist = NULL
} else {
summaryMarkers = NULL
aucHist = NULL
}
if (times == 1 & targetVector == "binary") {
rocPlot <- out$rocPlot
} else {
rocPlot <- NULL
}
est <- list(selection = selection,
multiple = summaryMarkers,
auc = auc,
proportion = proportion,
targetVec = targetVector,
aucHist = aucHist,
times = times,
finalBest = finalBest,
rocPlot = rocPlot,
optimize = optimize)
class(est) <- "calf_subset"
return(est)
}
|
#' Load a set of files to objects
#'
#' Loads a set of inputs files to objects in the global environment
#' @param filelist list of strings containing file names; uses the names of the list as the object names
#' @param inputdir file path to the inputs directory (e.g. model$inputdir)
#' @keywords Management
#' @export
loadInputs <- function (filelist,inputdir) {
if(length(filelist)>0){
for (i in 1:length(filelist)){
assign(names(filelist)[i],fread(file.path(inputdir,filelist[[i]])),envir = .GlobalEnv)
}
}
}
| /rFreight/R/loadInputs.R | no_license | CMAP-REPOS/cmap_freight_model | R | false | false | 547 | r | #' Load a set of files to objects
#'
#' Loads a set of inputs files to objects in the global environment
#' @param filelist list of strings containing file names; uses the names of the list as the object names
#' @param inputdir file path to the inputs directory (e.g. model$inputdir)
#' @keywords Management
#' @export
loadInputs <- function (filelist,inputdir) {
if(length(filelist)>0){
for (i in 1:length(filelist)){
assign(names(filelist)[i],fread(file.path(inputdir,filelist[[i]])),envir = .GlobalEnv)
}
}
}
|
## This script will read in a dataset obtained from the National Emissions Inventory database
## and create a plot comparing motor vehicle emissions for Baltimore City compared to LA County from 1999 - 2008.
## The specific emissions data included in this plot is PM 2.5
## PM 2.5 refers to particulate matter 2.5 microns or less in width.
## Exposure to PM 2.5 can contribute to acute symptoms, such as eye, nose, and throat irritation, and
## asthma, chronic respiratory disease, etc.
## This script is 6 / 6 completed for Course Project 2 in the Coursera course:
## Exploratory Data Analysis in R, through Johns Hopkins University
## Instructor: Roger Peng
## To Run
# setwd("ExData_Plotting2")
# source("plot6.R")
# Load libraries
library(plyr) # need this library for join
library(ggplot2) # need this for plot
# Read in data
# Data frame with all PM2.5 emissions data for 1999, 2002, 2005, and 2008.
# For each year the table contains number of tons of PM2.5 emitted from
# a specific type of source for the entire year
NEI <- readRDS("summarySCC_PM25.rds")
# Columns in NEI data frame:
# fips: A five digit number indicating US county
# SCC: The name of the source as indicated by a digit string
# Pollutant: A string indicating the pollutant
# Emissions: Amount of PM2.5 emitted, in tons
# type: The type of source (point, non-point, on-road, or non-road)
# year: The year of emissions recorded
# Data frame with the source classification codes for emissions sources.
# This data frame can be used to map the SCC digit strings in the emissions
# table to the actual name of the PM2.5 source.
SCC <- readRDS("Source_Classification_Code.rds")
# Columns in SCC data frame:
# Join SCC data to NEI data
NEI.w.SCC <- join(NEI, SCC, by = "SCC")
# Answer question 6
# Compare emissions from motor vehicle sources in Baltimore City with emissions from motor vehicle sources inLos Angeles County, California (fips == "06037"). Which city has seen greater changes over time in motor vehicle emissions?
# To answer this question, we first need to subset our data to include only
# PM2.5 emissions from motor vehicle sources
# Before we can do this, we must first define a motor vehicle source, then
# decide how to identify it in the data.
# I am using this definition for motor vehicle:
# "A motor vehicle is a self-propelled road vehicle, commonly wheeled, that does not operate on rails."
# This definition includes only on-road (highway) vehicles, not off-road (off-highway) vehicles, farm equipment, etc.
# After inspecting the data it appears the simplest way to identify motor vehicles is to use
# SCC.Level.Two, because this column contains two categories that identify on-road vehicles:
# "Highway Vehicles - Diesel"
# "Highway Vehicles - Gasoline"
# So, based on the above definition, we will subset the full data set, based on records where:
# SCC.Level.Two = "Highway Vehicles - Diesel" or "Highway Vehicles - Gasoline"
motor.vehicle.subset <- NEI.w.SCC[ which(NEI.w.SCC$SCC.Level.Two =="Highway Vehicles - Diesel" | NEI.w.SCC$SCC.Level.Two =="Highway Vehicles - Gasoline"), ]
# Next, we need to subset the motor.vehicle.subset for Baltimore City, Maryland and LA County, CA
motor.vehicle.subset.BaltimoreC.LA <- motor.vehicle.subset[which(motor.vehicle.subset$fips == "24510" | motor.vehicle.subset$fips == "06037"),]
# Now add a column to the data frame that will be used to label the facets in plot 6
# We could label the facets with the fips column, but it would be nice to use more descriptive text
# This vector will be used to joining the facet names to the Baltimore / LA subset
fips <- c("24510","06037")
# This vector will be used to give descriptive names to the facet plots
facet.names <- c("Baltimore City", "Los Angeles County")
# combine the fips and facet.names vectors into one matrix
facet.labels <- cbind(fips, facet.names)
#convert the facet.labels matrix into a data frame
facet.labels <- as.data.frame(facet.labels)
# join the facet labels to the Baltimore City / LA County subset by the fips code
# use the result in the plot, and facet by facet.names
plot6.data <- join(motor.vehicle.subset.BaltimoreC.LA, facet.labels, by = "fips")
# Create a bar plot for plot 6 that shows total emissions from motor vehicle sources by year
plot6 <- ggplot(plot6.data, aes(as.character(year), Emissions, fill = facet.names)) +
# Use theme_bw() to use black lines on white background, instead of default gray background
theme_bw() +
# Add the bar chart geometry to the plot
geom_bar(stat = "identity") +
# Use the color brewer qualitative palette to color the bars in the bar plot
scale_fill_brewer(type = "qual", palette = 6) +
# Create a facet plot for each type of PM2.5 emissions source
facet_wrap(~ facet.names) +
# Define the title of the main plot
ggtitle ("PM 2.5 Emissions from Motor Vehicles 1999 - 2008 :\n Baltimore City vs. Los Angeles County") +
# Define the label on the Y axis
ylab("PM 2.5 Emissions (tons)") +
# Define the label on the X axis
xlab("Year") +
# Format the text for the X axis title
theme(axis.title.x = element_text(face="bold", size = 10, vjust = -0.5),
# Format the text for the Y axis title
axis.title.y = element_text(face="bold", size = 10),
# Format the text for the Main plot title
plot.title= element_text(face="bold", size=14, vjust = 2),
# Format the text for the labels on the X and Y axes
axis.text = element_text(face="plain", size=10),
# Format the text on the facet titles
strip.text.x = element_text(face="bold", size = 10),
# Suppress the legend
legend.position="non")
# Print plot 6
plot6
ggsave("plot6.png", width = 6, height = 6.5, dpi=150)
# End of Plot 6
# Plot 6 shows that Los Angeles County has seen a greater change in PM 2.5 emissions from motor
# vehicles over 1999 - 2008. When compared with Baltimore City, Los Angeles County started
# with significantly higher levels of PM 2.5 emissions from motor vehicles in 1999,
# nearly 4,000 tons for LA county vs about 350 tons for Baltimore City.
# PM 2.5 emissions in LA County increased to a maximum in 2005, before declining
# to a level that was slightly higher than PM 2.5 emissions from motor vehicles in 1999.
# This compares to Baltimore City, which say a steady decline in PM 2.5 emissions from motor
# vehicles over the same time period.
# The end
| /plot6.R | no_license | daileya5/ExData_Plotting2 | R | false | false | 6,391 | r | ## This script will read in a dataset obtained from the National Emissions Inventory database
## and create a plot comparing motor vehicle emissions for Baltimore City compared to LA County from 1999 - 2008.
## The specific emissions data included in this plot is PM 2.5
## PM 2.5 refers to particulate matter 2.5 microns or less in width.
## Exposure to PM 2.5 can contribute to acute symptoms, such as eye, nose, and throat irritation, and
## asthma, chronic respiratory disease, etc.
## This script is 6 / 6 completed for Course Project 2 in the Coursera course:
## Exploratory Data Analysis in R, through Johns Hopkins University
## Instructor: Roger Peng
## To Run
# setwd("ExData_Plotting2")
# source("plot6.R")
# Load libraries
library(plyr) # need this library for join
library(ggplot2) # need this for plot
# Read in data
# Data frame with all PM2.5 emissions data for 1999, 2002, 2005, and 2008.
# For each year the table contains number of tons of PM2.5 emitted from
# a specific type of source for the entire year
NEI <- readRDS("summarySCC_PM25.rds")
# Columns in NEI data frame:
# fips: A five digit number indicating US county
# SCC: The name of the source as indicated by a digit string
# Pollutant: A string indicating the pollutant
# Emissions: Amount of PM2.5 emitted, in tons
# type: The type of source (point, non-point, on-road, or non-road)
# year: The year of emissions recorded
# Data frame with the source classification codes for emissions sources.
# This data frame can be used to map the SCC digit strings in the emissions
# table to the actual name of the PM2.5 source.
SCC <- readRDS("Source_Classification_Code.rds")
# Columns in SCC data frame:
# Join SCC data to NEI data
NEI.w.SCC <- join(NEI, SCC, by = "SCC")
# Answer question 6
# Compare emissions from motor vehicle sources in Baltimore City with emissions from motor vehicle sources inLos Angeles County, California (fips == "06037"). Which city has seen greater changes over time in motor vehicle emissions?
# To answer this question, we first need to subset our data to include only
# PM2.5 emissions from motor vehicle sources
# Before we can do this, we must first define a motor vehicle source, then
# decide how to identify it in the data.
# I am using this definition for motor vehicle:
# "A motor vehicle is a self-propelled road vehicle, commonly wheeled, that does not operate on rails."
# This definition includes only on-road (highway) vehicles, not off-road (off-highway) vehicles, farm equipment, etc.
# After inspecting the data it appears the simplest way to identify motor vehicles is to use
# SCC.Level.Two, because this column contains two categories that identify on-road vehicles:
# "Highway Vehicles - Diesel"
# "Highway Vehicles - Gasoline"
# So, based on the above definition, we will subset the full data set, based on records where:
# SCC.Level.Two = "Highway Vehicles - Diesel" or "Highway Vehicles - Gasoline"
motor.vehicle.subset <- NEI.w.SCC[ which(NEI.w.SCC$SCC.Level.Two =="Highway Vehicles - Diesel" | NEI.w.SCC$SCC.Level.Two =="Highway Vehicles - Gasoline"), ]
# Next, we need to subset the motor.vehicle.subset for Baltimore City, Maryland and LA County, CA
motor.vehicle.subset.BaltimoreC.LA <- motor.vehicle.subset[which(motor.vehicle.subset$fips == "24510" | motor.vehicle.subset$fips == "06037"),]
# Now add a column to the data frame that will be used to label the facets in plot 6
# We could label the facets with the fips column, but it would be nice to use more descriptive text
# This vector will be used to joining the facet names to the Baltimore / LA subset
fips <- c("24510","06037")
# This vector will be used to give descriptive names to the facet plots
facet.names <- c("Baltimore City", "Los Angeles County")
# combine the fips and facet.names vectors into one matrix
facet.labels <- cbind(fips, facet.names)
#convert the facet.labels matrix into a data frame
facet.labels <- as.data.frame(facet.labels)
# join the facet labels to the Baltimore City / LA County subset by the fips code
# use the result in the plot, and facet by facet.names
plot6.data <- join(motor.vehicle.subset.BaltimoreC.LA, facet.labels, by = "fips")
# Create a bar plot for plot 6 that shows total emissions from motor vehicle sources by year
plot6 <- ggplot(plot6.data, aes(as.character(year), Emissions, fill = facet.names)) +
# Use theme_bw() to use black lines on white background, instead of default gray background
theme_bw() +
# Add the bar chart geometry to the plot
geom_bar(stat = "identity") +
# Use the color brewer qualitative palette to color the bars in the bar plot
scale_fill_brewer(type = "qual", palette = 6) +
# Create a facet plot for each type of PM2.5 emissions source
facet_wrap(~ facet.names) +
# Define the title of the main plot
ggtitle ("PM 2.5 Emissions from Motor Vehicles 1999 - 2008 :\n Baltimore City vs. Los Angeles County") +
# Define the label on the Y axis
ylab("PM 2.5 Emissions (tons)") +
# Define the label on the X axis
xlab("Year") +
# Format the text for the X axis title
theme(axis.title.x = element_text(face="bold", size = 10, vjust = -0.5),
# Format the text for the Y axis title
axis.title.y = element_text(face="bold", size = 10),
# Format the text for the Main plot title
plot.title= element_text(face="bold", size=14, vjust = 2),
# Format the text for the labels on the X and Y axes
axis.text = element_text(face="plain", size=10),
# Format the text on the facet titles
strip.text.x = element_text(face="bold", size = 10),
# Suppress the legend
legend.position="non")
# Print plot 6
plot6
ggsave("plot6.png", width = 6, height = 6.5, dpi=150)
# End of Plot 6
# Plot 6 shows that Los Angeles County has seen a greater change in PM 2.5 emissions from motor
# vehicles over 1999 - 2008. When compared with Baltimore City, Los Angeles County started
# with significantly higher levels of PM 2.5 emissions from motor vehicles in 1999,
# nearly 4,000 tons for LA county vs about 350 tons for Baltimore City.
# PM 2.5 emissions in LA County increased to a maximum in 2005, before declining
# to a level that was slightly higher than PM 2.5 emissions from motor vehicles in 1999.
# This compares to Baltimore City, which say a steady decline in PM 2.5 emissions from motor
# vehicles over the same time period.
# The end
|
# Used in case we need to special-case packages what packages are cached
isCacheable <- function(package) {
TRUE
}
isUsingCache <- function(project) {
isTRUE(get_opts("use.cache", project = project))
}
# We assume 'path' is the path to a DESCRIPTION file
#' @importFrom tools md5sum
hash <- function(path) {
if (!file.exists(path))
stop("No DESCRIPTION file at path '", path, "'!")
pkgName <- basename(dirname(path))
DESCRIPTION <- as.data.frame(readDcf(path), stringsAsFactors = FALSE)
# If we already have a GitHub SHA1, just use that
if ("GithubSHA1" %in% names(DESCRIPTION))
return(DESCRIPTION$GithubSHA1)
# TODO: Do we want the 'Built' field used for hashing? The main problem with using that is
# it essentially makes packages installed from source un-recoverable, since they will get
# built transiently and installed (and so that field could never be replicated).
fields <- c("Package", "Version", "Depends", "Imports", "Suggests", "LinkingTo")
sub <- DESCRIPTION[names(DESCRIPTION) %in% fields]
# Handle LinkingTo specially -- we need to discover what version of packages in LinkingTo
# were actually linked against in order to properly disambiguate e.g. httpuv 1.0 linked
# against Rcpp 0.11.2 and httpuv 1.0 linked against Rcpp 0.11.2.1
# TODO: It would really be best if, on installation, we recorded what version of LinkingTo
# packages were actually linked to, in case that package is not available in the library
# (or, even worse, is actually a different version!)
linkingToField <- unlist(strsplit(as.character(sub[["LinkingTo"]]), "\\s*,\\s*"))
linkingToPkgs <- gsub("\\s*\\(.*", "", linkingToField)
linkingToPkgs <- gsub("^\\s*(.*?)\\s*$", "\\1", linkingToPkgs, perl = TRUE)
linkingToHashes <- lapply(linkingToPkgs, function(x) {
DESCRIPTION <- system.file("DESCRIPTION", package = x)
if (!file.exists(DESCRIPTION)) return(NULL) ## warn later
else hash(DESCRIPTION)
})
missingLinkingToPkgs <- linkingToPkgs[vapply(linkingToHashes, is.null, logical(1))]
if (length(missingLinkingToPkgs)) {
warning("The following packages specified in the LinkingTo field for package '",
pkgName,
"' are unavailable:\n- ",
paste(shQuote(missingLinkingToPkgs), collapse = ", "),
"\nThese packages are required to be installed when attempting to hash this package for caching.",
call. = FALSE)
}
linkingToHashes <- dropNull(linkingToHashes)
# Normalize for hashing and add in the linkingTo hashes as well
ready <- normalizeForHash(sub)
ready <- paste0(ready, do.call(paste0, linkingToHashes))
tempfile <- tempfile()
cat(ready, file = tempfile)
result <- md5sum(tempfile)
unlink(tempfile)
if (is.na(result)) stop("Failed to hash file!")
unname(result)
}
normalizeForHash <- function(item) {
gsub("[[:space:]]", "", paste(unlist(item), collapse = ""))
}
moveInstalledPackagesToCache <- function(project = NULL) {
project <- getProjectDir(project)
# Only do this is we're actually using the packrat cache
if (!isUsingCache(project)) return(invisible())
if (!file.exists(cacheLibDir()))
dir.create(cacheLibDir(), recursive = TRUE)
project <- getProjectDir(project)
## All directories within the 'lib' directory which are not symlinks are fresh
## and may need to be moved
installedPkgPaths <- list.files(libDir(project), full.names = TRUE)
if (!length(installedPkgPaths)) return(invisible())
cachedPkgPaths <- list.files(cacheLibDir(), full.names = TRUE)
cachedPkgHashes <- list.files(cachedPkgPaths)
needsMove <- installedPkgPaths[sapply(installedPkgPaths, Negate(is.symlink))]
## for each package installed that is not a symlink, we migrate it to the cache
for (package in needsMove) {
hash <- hash(file.path(package, "DESCRIPTION"))
## if the package doesn't exist in the cache, copy it there
if (!file.exists(cacheLibDir(basename(package), hash))) {
dir_copy(
package,
cacheLibDir(basename(package), hash)
)
}
## replace the local package with a symlink
if (!is.symlink(package)) unlink(package, recursive = TRUE)
symlink(
normalizePath(cacheLibDir(basename(package), hash)),
package
)
}
}
# Pull out cached package information from the DESCRIPTION
cachedPackages <- function(cacheDir = cacheLibDir(), fields = NULL) {
pkgCachePaths <- list.files(cacheDir, full.names = TRUE)
pkgPaths <- setNames(lapply(pkgCachePaths, function(x) {
list.files(x, full.names = TRUE)
}), basename(pkgCachePaths))
lapply(pkgPaths, function(hashedPath) {
result <- setNames(lapply(hashedPath, function(path) {
as.list(readDcf(file.path(path, "DESCRIPTION"), all = TRUE))
}), basename(hashedPath))
if (!is.null(fields)) {
result[fields]
} else {
result
}
})
}
listCachedPackages <- cachedPackages
clearPackageCache <- function(cacheDir = cacheLibDir(), ask = TRUE) {
if (ask) {
message("The packrat cache directory was resolved to:\n- ",
shQuote(cacheDir))
msg <- "Are you sure you want to clear the packrat cache? [Y/n]: "
response <- readline(msg)
if (tolower(substring(response, 1, 1)) != "y") {
message("Operation aborted.")
return(invisible(NULL))
}
}
unlink(cacheDir, recursive = TRUE)
}
deletePackagesFromCache <- function(packages, cacheDir = cacheLibDir()) {
paths <- file.path(cacheDir, packages)
lapply(paths, function(path) {
unlink(path, recursive = TRUE)
})
}
| /R/cache.R | no_license | rbdixon/packrat | R | false | false | 5,558 | r | # Used in case we need to special-case packages what packages are cached
isCacheable <- function(package) {
TRUE
}
isUsingCache <- function(project) {
isTRUE(get_opts("use.cache", project = project))
}
# We assume 'path' is the path to a DESCRIPTION file
#' @importFrom tools md5sum
hash <- function(path) {
if (!file.exists(path))
stop("No DESCRIPTION file at path '", path, "'!")
pkgName <- basename(dirname(path))
DESCRIPTION <- as.data.frame(readDcf(path), stringsAsFactors = FALSE)
# If we already have a GitHub SHA1, just use that
if ("GithubSHA1" %in% names(DESCRIPTION))
return(DESCRIPTION$GithubSHA1)
# TODO: Do we want the 'Built' field used for hashing? The main problem with using that is
# it essentially makes packages installed from source un-recoverable, since they will get
# built transiently and installed (and so that field could never be replicated).
fields <- c("Package", "Version", "Depends", "Imports", "Suggests", "LinkingTo")
sub <- DESCRIPTION[names(DESCRIPTION) %in% fields]
# Handle LinkingTo specially -- we need to discover what version of packages in LinkingTo
# were actually linked against in order to properly disambiguate e.g. httpuv 1.0 linked
# against Rcpp 0.11.2 and httpuv 1.0 linked against Rcpp 0.11.2.1
# TODO: It would really be best if, on installation, we recorded what version of LinkingTo
# packages were actually linked to, in case that package is not available in the library
# (or, even worse, is actually a different version!)
linkingToField <- unlist(strsplit(as.character(sub[["LinkingTo"]]), "\\s*,\\s*"))
linkingToPkgs <- gsub("\\s*\\(.*", "", linkingToField)
linkingToPkgs <- gsub("^\\s*(.*?)\\s*$", "\\1", linkingToPkgs, perl = TRUE)
linkingToHashes <- lapply(linkingToPkgs, function(x) {
DESCRIPTION <- system.file("DESCRIPTION", package = x)
if (!file.exists(DESCRIPTION)) return(NULL) ## warn later
else hash(DESCRIPTION)
})
missingLinkingToPkgs <- linkingToPkgs[vapply(linkingToHashes, is.null, logical(1))]
if (length(missingLinkingToPkgs)) {
warning("The following packages specified in the LinkingTo field for package '",
pkgName,
"' are unavailable:\n- ",
paste(shQuote(missingLinkingToPkgs), collapse = ", "),
"\nThese packages are required to be installed when attempting to hash this package for caching.",
call. = FALSE)
}
linkingToHashes <- dropNull(linkingToHashes)
# Normalize for hashing and add in the linkingTo hashes as well
ready <- normalizeForHash(sub)
ready <- paste0(ready, do.call(paste0, linkingToHashes))
tempfile <- tempfile()
cat(ready, file = tempfile)
result <- md5sum(tempfile)
unlink(tempfile)
if (is.na(result)) stop("Failed to hash file!")
unname(result)
}
normalizeForHash <- function(item) {
gsub("[[:space:]]", "", paste(unlist(item), collapse = ""))
}
moveInstalledPackagesToCache <- function(project = NULL) {
project <- getProjectDir(project)
# Only do this is we're actually using the packrat cache
if (!isUsingCache(project)) return(invisible())
if (!file.exists(cacheLibDir()))
dir.create(cacheLibDir(), recursive = TRUE)
project <- getProjectDir(project)
## All directories within the 'lib' directory which are not symlinks are fresh
## and may need to be moved
installedPkgPaths <- list.files(libDir(project), full.names = TRUE)
if (!length(installedPkgPaths)) return(invisible())
cachedPkgPaths <- list.files(cacheLibDir(), full.names = TRUE)
cachedPkgHashes <- list.files(cachedPkgPaths)
needsMove <- installedPkgPaths[sapply(installedPkgPaths, Negate(is.symlink))]
## for each package installed that is not a symlink, we migrate it to the cache
for (package in needsMove) {
hash <- hash(file.path(package, "DESCRIPTION"))
## if the package doesn't exist in the cache, copy it there
if (!file.exists(cacheLibDir(basename(package), hash))) {
dir_copy(
package,
cacheLibDir(basename(package), hash)
)
}
## replace the local package with a symlink
if (!is.symlink(package)) unlink(package, recursive = TRUE)
symlink(
normalizePath(cacheLibDir(basename(package), hash)),
package
)
}
}
# Pull out cached package information from the DESCRIPTION
cachedPackages <- function(cacheDir = cacheLibDir(), fields = NULL) {
pkgCachePaths <- list.files(cacheDir, full.names = TRUE)
pkgPaths <- setNames(lapply(pkgCachePaths, function(x) {
list.files(x, full.names = TRUE)
}), basename(pkgCachePaths))
lapply(pkgPaths, function(hashedPath) {
result <- setNames(lapply(hashedPath, function(path) {
as.list(readDcf(file.path(path, "DESCRIPTION"), all = TRUE))
}), basename(hashedPath))
if (!is.null(fields)) {
result[fields]
} else {
result
}
})
}
listCachedPackages <- cachedPackages
clearPackageCache <- function(cacheDir = cacheLibDir(), ask = TRUE) {
if (ask) {
message("The packrat cache directory was resolved to:\n- ",
shQuote(cacheDir))
msg <- "Are you sure you want to clear the packrat cache? [Y/n]: "
response <- readline(msg)
if (tolower(substring(response, 1, 1)) != "y") {
message("Operation aborted.")
return(invisible(NULL))
}
}
unlink(cacheDir, recursive = TRUE)
}
deletePackagesFromCache <- function(packages, cacheDir = cacheLibDir()) {
paths <- file.path(cacheDir, packages)
lapply(paths, function(path) {
unlink(path, recursive = TRUE)
})
}
|
context('functions')
test_that('check_data', {
test <- matrix()
expect_error(check_data(test, 1))
})
| /tests/testthat/test-check_data.R | no_license | cran/crseEventStudy | R | false | false | 116 | r | context('functions')
test_that('check_data', {
test <- matrix()
expect_error(check_data(test, 1))
})
|
######page_no_100####
rm(list=ls())
bill<-c(42.19,38.45,29.23,89.35,118.04,110.46,0,72.88,83.05,95.73,103.15,94.52,26.84,93.93,90.26,72.78,101.36,104.8,74.01,56.01,39.21,48.54,93.31,104.88,30.61,22.57,63.7,108.84,6.45,16.47,89.5,13.36,44.16,92.97,99.56,92.62,78.89,87.71,93.57,0,75.71,88.62,99.5,85,0,8.41,70.48,92.88,3.2,115.5,2.42,1.08,76.79,13.62,88.51,55.99,12.24,119.63,23.31,11.05,8.37,7.18,11.07,1.47,26.4,13.26,21.13,95.03,29.04,5.42,77.21,72.47,0,5.64,6.48,6.95,19.6,8.11,9.01,84.77,1.62,91.1,10.88,30.62,100.05,26.97,15.43,29.25,1.88,16.44,109.08,2.45,21.97,17.12,19.7,6.93,10.05,99.03,29.24,15.21,28.77,9.12,118.75,0,13.95,14.34,79.52,2.72,9.63,21.34,104.4,2.88,65.9,20.55,3.43,10.44,21.36,24.42,95.52,6.72,35.32,117.69,106.84,8.4,90.04,3.85,91.56,10.13,5.72,33.69,115.78,0.98,19.45,0,27.21,89.27,14.49,92.17,21,106.59,13.9,9.22,109.94,10.7,0,11.27,72.02,7.74,5.04,33.4,6.95,6.48,11.64,83.26,15.42,24.49,89.13,111.14,92.64,53.9,114.67,27.57,64.78,45.81,56.04,20.39,31.77,94.67,44.32,3.69,19.34,13.54,18.89,1.57,0,5.2,2.8,5.1,3.03,9.16,15.3,75.49,68.69,35,9.12,18.49,84.12,13.68,20.84,100.04,112.94,20.12,53.21,15.3,49.24,9.44,2.67,4.69,41.38,45.77)
median(bill) | /Managerial_Statistics_by_Gerald_Keller/CH4/EX4.4/Ex4_4.R | permissive | FOSSEE/R_TBC_Uploads | R | false | false | 1,175 | r | ######page_no_100####
rm(list=ls())
bill<-c(42.19,38.45,29.23,89.35,118.04,110.46,0,72.88,83.05,95.73,103.15,94.52,26.84,93.93,90.26,72.78,101.36,104.8,74.01,56.01,39.21,48.54,93.31,104.88,30.61,22.57,63.7,108.84,6.45,16.47,89.5,13.36,44.16,92.97,99.56,92.62,78.89,87.71,93.57,0,75.71,88.62,99.5,85,0,8.41,70.48,92.88,3.2,115.5,2.42,1.08,76.79,13.62,88.51,55.99,12.24,119.63,23.31,11.05,8.37,7.18,11.07,1.47,26.4,13.26,21.13,95.03,29.04,5.42,77.21,72.47,0,5.64,6.48,6.95,19.6,8.11,9.01,84.77,1.62,91.1,10.88,30.62,100.05,26.97,15.43,29.25,1.88,16.44,109.08,2.45,21.97,17.12,19.7,6.93,10.05,99.03,29.24,15.21,28.77,9.12,118.75,0,13.95,14.34,79.52,2.72,9.63,21.34,104.4,2.88,65.9,20.55,3.43,10.44,21.36,24.42,95.52,6.72,35.32,117.69,106.84,8.4,90.04,3.85,91.56,10.13,5.72,33.69,115.78,0.98,19.45,0,27.21,89.27,14.49,92.17,21,106.59,13.9,9.22,109.94,10.7,0,11.27,72.02,7.74,5.04,33.4,6.95,6.48,11.64,83.26,15.42,24.49,89.13,111.14,92.64,53.9,114.67,27.57,64.78,45.81,56.04,20.39,31.77,94.67,44.32,3.69,19.34,13.54,18.89,1.57,0,5.2,2.8,5.1,3.03,9.16,15.3,75.49,68.69,35,9.12,18.49,84.12,13.68,20.84,100.04,112.94,20.12,53.21,15.3,49.24,9.44,2.67,4.69,41.38,45.77)
median(bill) |
#' Sampling matrix for 'monitoring' spawners
#'
#' This function computes the sampling design over multiple years and streams
#' given either constant or changing monitoring coverage (proportion of streams
#' that are monitored each year). In the case of declining coverage, a start
#' and end year for the decline can be specified.
#' Note: in it's current form this assumed the same proportion of indicator and
#' non-indicator streams are monitored each year, but this could be revised
#' if desired.
#'
#'
#' @param ppnSampled The initial proportion of streams that are monitored.
#' If there is no change, then this is the constant proportion among years.
#' @param nPop Number of subpopulations (indicator + non-indicator)
#' @param simYears Number of years to return monitoring coverage.
#' @param ppnChange The change in the proportion of streams that are monitored.
#' This can be positive or negative, but care should be taken that the
#' \code{ppnSampled + ppnChange} is not less than zero or greater than one.
#' Otherwise, the function will return an error.
#' @param samplingDeclStart The year that the first change in sampling effort
#' began.
#' @param samplingDeclEnd The last year that had a change in sampling effort.
#' @param gen The number of different ages that fish return at. Used to
#' distinguish the initialization years when calculating the rows in which
#' \code{samplingDeclStart} and \code{samplingDeclEnd} should be applied.
#' @return Returns a matrix of zeroes and ones, with the number of rows equal
#' to \code{simYears} and the number of columns equal to \code{nPop}.
#' Zeroes indicate that subpopulation was not monitored in that year, wheras
#' ones indicate that the subpopulation was monitored in the given year.
#'
#' @examples
#' Example based on historic monitoring effort and recent declines reported for
#' chum CUs on BC's central coast from the Pacific Salmon Explorer.
#'
#' samplingDesign(ppnSampled = 0.85, nPop = 35, simYears = 57,
#' ppnChange = -0.18, samplingDeclStart = 40, samplingDeclEnd = 50, gen = 5)
#'
#' @export
samplingDesign <- function(ppnSampled, nPop, simYears, ppnChange = 0,
samplingDeclStart = NULL,
samplingDeclEnd = samplingDeclStart,
gen = NULL){
if (ppnChange == 0) { # If ppnSampled is constant
sampled <- matrix(sample(x = c(0,1), size = nPop*simYears, replace = TRUE,
prob = c(1-ppnSampled, ppnSampled)),
nrow = simYears, ncol = nPop)
} else {
# Calculate ppnSampled for each year
ppnSampled.change <- rep(ppnSampled, simYears)
# If decline happens over time, interpolate change:
if ((samplingDeclEnd - samplingDeclStart) > 0) {
ppnSampled.change[samplingDeclStart : samplingDeclEnd] <-
ppnSampled + approx(
x = c(0, ppnChange),
n = samplingDeclEnd - samplingDeclStart + 2)$y[2:(samplingDeclEnd - samplingDeclStart + 2)]
}
# After change remain at new ppnSampled:
ppnSampled.change[samplingDeclEnd : simYears] <- ppnSampled + ppnChange
# If any ppnSampled is less than 0 or greater than 1, error!
if(length(which(ppnSampled.change > 1 | ppnSampled.change < 0)) > 0) stop("Proportion of streams observed outside of (0,1).")
# Compute matrix of sampling design (0 = not sampled) for each year
# and subpopulation
sampled <- matrix(NA, nrow = simYears, ncol = nPop)
for(y in 1:simYears){
sampled[y, ] <- sample(x = c(0, 1), size = nPop, replace = TRUE,
prob = c(1 - ppnSampled.change[y], ppnSampled.change[y]))
} # end simYears
} # end else (decline in monitoring)
return(sampled)
} # end function | /model/obsSubmodFns.R | no_license | salmonwatersheds/run-reconst-sim | R | false | false | 3,650 | r | #' Sampling matrix for 'monitoring' spawners
#'
#' This function computes the sampling design over multiple years and streams
#' given either constant or changing monitoring coverage (proportion of streams
#' that are monitored each year). In the case of declining coverage, a start
#' and end year for the decline can be specified.
#' Note: in it's current form this assumed the same proportion of indicator and
#' non-indicator streams are monitored each year, but this could be revised
#' if desired.
#'
#'
#' @param ppnSampled The initial proportion of streams that are monitored.
#' If there is no change, then this is the constant proportion among years.
#' @param nPop Number of subpopulations (indicator + non-indicator)
#' @param simYears Number of years to return monitoring coverage.
#' @param ppnChange The change in the proportion of streams that are monitored.
#' This can be positive or negative, but care should be taken that the
#' \code{ppnSampled + ppnChange} is not less than zero or greater than one.
#' Otherwise, the function will return an error.
#' @param samplingDeclStart The year that the first change in sampling effort
#' began.
#' @param samplingDeclEnd The last year that had a change in sampling effort.
#' @param gen The number of different ages that fish return at. Used to
#' distinguish the initialization years when calculating the rows in which
#' \code{samplingDeclStart} and \code{samplingDeclEnd} should be applied.
#' @return Returns a matrix of zeroes and ones, with the number of rows equal
#' to \code{simYears} and the number of columns equal to \code{nPop}.
#' Zeroes indicate that subpopulation was not monitored in that year, wheras
#' ones indicate that the subpopulation was monitored in the given year.
#'
#' @examples
#' Example based on historic monitoring effort and recent declines reported for
#' chum CUs on BC's central coast from the Pacific Salmon Explorer.
#'
#' samplingDesign(ppnSampled = 0.85, nPop = 35, simYears = 57,
#' ppnChange = -0.18, samplingDeclStart = 40, samplingDeclEnd = 50, gen = 5)
#'
#' @export
samplingDesign <- function(ppnSampled, nPop, simYears, ppnChange = 0,
samplingDeclStart = NULL,
samplingDeclEnd = samplingDeclStart,
gen = NULL){
if (ppnChange == 0) { # If ppnSampled is constant
sampled <- matrix(sample(x = c(0,1), size = nPop*simYears, replace = TRUE,
prob = c(1-ppnSampled, ppnSampled)),
nrow = simYears, ncol = nPop)
} else {
# Calculate ppnSampled for each year
ppnSampled.change <- rep(ppnSampled, simYears)
# If decline happens over time, interpolate change:
if ((samplingDeclEnd - samplingDeclStart) > 0) {
ppnSampled.change[samplingDeclStart : samplingDeclEnd] <-
ppnSampled + approx(
x = c(0, ppnChange),
n = samplingDeclEnd - samplingDeclStart + 2)$y[2:(samplingDeclEnd - samplingDeclStart + 2)]
}
# After change remain at new ppnSampled:
ppnSampled.change[samplingDeclEnd : simYears] <- ppnSampled + ppnChange
# If any ppnSampled is less than 0 or greater than 1, error!
if(length(which(ppnSampled.change > 1 | ppnSampled.change < 0)) > 0) stop("Proportion of streams observed outside of (0,1).")
# Compute matrix of sampling design (0 = not sampled) for each year
# and subpopulation
sampled <- matrix(NA, nrow = simYears, ncol = nPop)
for(y in 1:simYears){
sampled[y, ] <- sample(x = c(0, 1), size = nPop, replace = TRUE,
prob = c(1 - ppnSampled.change[y], ppnSampled.change[y]))
} # end simYears
} # end else (decline in monitoring)
return(sampled)
} # end function |
#' Search Families Based on Scientific or Chinese Names of Plants
#'
#' This function prepare checklist of plants with scientific name, Kew Family
#' and APGIII family based on Chinese Names specified in an text file.
#'
#' A vector of character strings containing the Chinese Names to look up.
#'
#' @param taxa Character vector of the species names (Chinese Characters).
#' @return A data frame containing the following columns:
#'
#' \item{TAXA_NAME}{Chinese Name of the taxa}
#'
#' \item{SPECIES}{Scientific name without authorship}
#'
#' \item{SPECIES_FULL}{Scientific name}
#'
#' \item{GENUS}{Genus}
#'
#' \item{GENUS_CN}{Genus in Chinese}
#'
#' \item{FAMILY_APGIII}{The family in APGIII classification systems}
#'
#' \item{FAMILY_CN}{The family name in Chinese}
#'
#' \item{GROUP}{The group of vascular plants}
#'
#' \item{IUCN_CHINA}{The IUCN status published in 2014.}
#'
#' \item{ENDEMIC_TO_CHINA}{Logical, Is the taxa endemic to China}
#'
#' \item{PROVINTIAL_DISTRIBUTION}{Provinces in which the taxa is naturally
#' occurred}
#'
#' \item{ALTITUDE}{Altitudinal range in meters}
#' @author Jinlong Zhang \email{ jinlongzhang01@@gmail.com }
#' @references The Plant List Website.
#' @examples
#'
#' # Do not Run
#' # see the vignettes
#'
#' @export CTPL
CTPL <- function(taxa = NULL) {
options(stringsAsFactors = FALSE)
if (length(taxa) == 1) {
if (any(unique(taxa) == "")) {
stop("taxa is empty, please provide scientific or Chinese name(s)")
}
}
if (any(taxa == "" | is.null(taxa))) {
stop("At least one taxa is empty, can not search")
}
taxa <- enc2utf8(taxa)
# taxa <- data.frame(taxa)
# colnames(taxa) <- "TAXA_NAME"
cnplants_dat <- plantlist::cnplants_dat
## Tool function
Cap <- function(x) {
paste(toupper(substring(x, 1, 1)), tolower(substring(x, 2)), sep = "")
}
## Tool function, replace multiple white space
REPLACE <- function(x) {
if (length(x) > 1) {
stop("only one string is allowed")
}
bbb <- gsub(" +", " ", gsub(
",+", ", ",
gsub(", +", ",", x)
))
bbb <- gsub(
"^[[:space:]]+|[[:space:]]+$", "",
bbb
)
endchar <- substr(bbb, nchar(bbb), nchar(bbb))
if (endchar == ",") {
yyy <- gregexpr(pattern = ",", bbb)
res <-
substr(bbb,
start = 1,
stop = ifelse(unlist(lapply(
yyy,
function(x) {
max(x) - 1
}
)) > 1, unlist(lapply(yyy, function(x) {
max(x) - 1
})), nchar(bbb))
)
} else {
res <- bbb
}
res <- gsub("^[[:space:]]+|[[:space:]]+$", "", res)
return(res)
}
YOUR_SEARCH <- Cap(sapply(taxa, REPLACE, USE.NAMES = FALSE))
SPECIES_CN <- cnplants_dat$SPECIES_CN
SPECIES <- cnplants_dat$SPECIES
res0 <- cnplants_dat[1, ]
res_empty <- t(data.frame(rep(NA, ncol(cnplants_dat[1, ]))))
colnames(res_empty) <- colnames(cnplants_dat)
rep_id <- c() # Count number of times the element needs to repeat.
for (i in 1:length(YOUR_SEARCH)) {
selected_index <-
SPECIES_CN %in% YOUR_SEARCH[i] | SPECIES %in% YOUR_SEARCH[i]
if (any(selected_index > 0)) {
res0 <- rbind(res0, cnplants_dat[selected_index, ])
rep_id[i] <- table(selected_index)[2]
if (table(selected_index)[2] > 1) {
warning(
paste(
"Taxa: '",
YOUR_SEARCH[i],
"' matched more than one row.",
collapse = "",
sep = ""
)
)
}
} else {
res0 <- rbind(res0, res_empty)
rep_id[i] <- 1
}
}
res1 <- res0[2:nrow(res0), ]
res <- data.frame(YOUR_SEARCH = rep(taxa, rep_id), res1)
row.names(res) <- 1:nrow(res)
return(res)
}
| /R/CTPL.R | permissive | helixcn/plantlist | R | false | false | 3,772 | r | #' Search Families Based on Scientific or Chinese Names of Plants
#'
#' This function prepare checklist of plants with scientific name, Kew Family
#' and APGIII family based on Chinese Names specified in an text file.
#'
#' A vector of character strings containing the Chinese Names to look up.
#'
#' @param taxa Character vector of the species names (Chinese Characters).
#' @return A data frame containing the following columns:
#'
#' \item{TAXA_NAME}{Chinese Name of the taxa}
#'
#' \item{SPECIES}{Scientific name without authorship}
#'
#' \item{SPECIES_FULL}{Scientific name}
#'
#' \item{GENUS}{Genus}
#'
#' \item{GENUS_CN}{Genus in Chinese}
#'
#' \item{FAMILY_APGIII}{The family in APGIII classification systems}
#'
#' \item{FAMILY_CN}{The family name in Chinese}
#'
#' \item{GROUP}{The group of vascular plants}
#'
#' \item{IUCN_CHINA}{The IUCN status published in 2014.}
#'
#' \item{ENDEMIC_TO_CHINA}{Logical, Is the taxa endemic to China}
#'
#' \item{PROVINTIAL_DISTRIBUTION}{Provinces in which the taxa is naturally
#' occurred}
#'
#' \item{ALTITUDE}{Altitudinal range in meters}
#' @author Jinlong Zhang \email{ jinlongzhang01@@gmail.com }
#' @references The Plant List Website.
#' @examples
#'
#' # Do not Run
#' # see the vignettes
#'
#' @export CTPL
CTPL <- function(taxa = NULL) {
options(stringsAsFactors = FALSE)
if (length(taxa) == 1) {
if (any(unique(taxa) == "")) {
stop("taxa is empty, please provide scientific or Chinese name(s)")
}
}
if (any(taxa == "" | is.null(taxa))) {
stop("At least one taxa is empty, can not search")
}
taxa <- enc2utf8(taxa)
# taxa <- data.frame(taxa)
# colnames(taxa) <- "TAXA_NAME"
cnplants_dat <- plantlist::cnplants_dat
## Tool function
Cap <- function(x) {
paste(toupper(substring(x, 1, 1)), tolower(substring(x, 2)), sep = "")
}
## Tool function, replace multiple white space
REPLACE <- function(x) {
if (length(x) > 1) {
stop("only one string is allowed")
}
bbb <- gsub(" +", " ", gsub(
",+", ", ",
gsub(", +", ",", x)
))
bbb <- gsub(
"^[[:space:]]+|[[:space:]]+$", "",
bbb
)
endchar <- substr(bbb, nchar(bbb), nchar(bbb))
if (endchar == ",") {
yyy <- gregexpr(pattern = ",", bbb)
res <-
substr(bbb,
start = 1,
stop = ifelse(unlist(lapply(
yyy,
function(x) {
max(x) - 1
}
)) > 1, unlist(lapply(yyy, function(x) {
max(x) - 1
})), nchar(bbb))
)
} else {
res <- bbb
}
res <- gsub("^[[:space:]]+|[[:space:]]+$", "", res)
return(res)
}
YOUR_SEARCH <- Cap(sapply(taxa, REPLACE, USE.NAMES = FALSE))
SPECIES_CN <- cnplants_dat$SPECIES_CN
SPECIES <- cnplants_dat$SPECIES
res0 <- cnplants_dat[1, ]
res_empty <- t(data.frame(rep(NA, ncol(cnplants_dat[1, ]))))
colnames(res_empty) <- colnames(cnplants_dat)
rep_id <- c() # Count number of times the element needs to repeat.
for (i in 1:length(YOUR_SEARCH)) {
selected_index <-
SPECIES_CN %in% YOUR_SEARCH[i] | SPECIES %in% YOUR_SEARCH[i]
if (any(selected_index > 0)) {
res0 <- rbind(res0, cnplants_dat[selected_index, ])
rep_id[i] <- table(selected_index)[2]
if (table(selected_index)[2] > 1) {
warning(
paste(
"Taxa: '",
YOUR_SEARCH[i],
"' matched more than one row.",
collapse = "",
sep = ""
)
)
}
} else {
res0 <- rbind(res0, res_empty)
rep_id[i] <- 1
}
}
res1 <- res0[2:nrow(res0), ]
res <- data.frame(YOUR_SEARCH = rep(taxa, rep_id), res1)
row.names(res) <- 1:nrow(res)
return(res)
}
|
## Bagging and Random Forests (Boston data)
## load packages
library(randomForest)
## create training and test set
set.seed(1)
train = sample(1 : nrow(Boston), nrow(Boston) / 2)
Boston.test <- Boston[-train, ]
## train the model (Bagging - m = p)
bag.fit = randomForest(medv ~., data = Boston, subset = train, mtry = 13, importance = TRUE)
bag.fit
## predicting on test data using fitted model
bag.pred = predict(bag.fit, newdata = Boston.test)
medv.test = Boston.test$medv
plot(bag.pred, medv.test)
abline(0, 1)
mean((bag.pred - medv.test)^2) # test mse is 13.34%
#--------------------------------------------------------
## train the model (with custom selection of trees)
bag.fit = randomForest(medv ~., data = Boston, subset = train, mtry = 13, ntree = 25)
bag.fit
## prediciting on test data using fitted model
bag.pred = predict(bag.fit, newdata = Boston.test)
mean((bag.pred - medv.test)^2) # test mse is 15.97%
#--------------------------------------------------------
## train the model (random forest - m = sqrt(p))
set.seed(1)
rf.fit = randomForest(medv ~., data = Boston, subset = train, mtry = 6, importance = TRUE)
rf.fit
## prediciting on test data using fitted model
rf.pred = predict(rf.fit, newdata = Boston.test)
mean((rf.pred - medv.test)^2) # test mse is 11.48%
## viewing importance of the variables
importance(rf.fit)
varImpPlot(rf.fit)
# random forest test mse : 11.48% , bagging test mse: 13.34%, regression tree test mse: 25.05%, in this example random forest dominates other methods.
| /C8 Tree Based Methods/Bagging & Random Forest.R | no_license | CheeLoong/Introduction-to-Statistical-Learning | R | false | false | 1,525 | r | ## Bagging and Random Forests (Boston data)
## load packages
library(randomForest)
## create training and test set
set.seed(1)
train = sample(1 : nrow(Boston), nrow(Boston) / 2)
Boston.test <- Boston[-train, ]
## train the model (Bagging - m = p)
bag.fit = randomForest(medv ~., data = Boston, subset = train, mtry = 13, importance = TRUE)
bag.fit
## predicting on test data using fitted model
bag.pred = predict(bag.fit, newdata = Boston.test)
medv.test = Boston.test$medv
plot(bag.pred, medv.test)
abline(0, 1)
mean((bag.pred - medv.test)^2) # test mse is 13.34%
#--------------------------------------------------------
## train the model (with custom selection of trees)
bag.fit = randomForest(medv ~., data = Boston, subset = train, mtry = 13, ntree = 25)
bag.fit
## prediciting on test data using fitted model
bag.pred = predict(bag.fit, newdata = Boston.test)
mean((bag.pred - medv.test)^2) # test mse is 15.97%
#--------------------------------------------------------
## train the model (random forest - m = sqrt(p))
set.seed(1)
rf.fit = randomForest(medv ~., data = Boston, subset = train, mtry = 6, importance = TRUE)
rf.fit
## prediciting on test data using fitted model
rf.pred = predict(rf.fit, newdata = Boston.test)
mean((rf.pred - medv.test)^2) # test mse is 11.48%
## viewing importance of the variables
importance(rf.fit)
varImpPlot(rf.fit)
# random forest test mse : 11.48% , bagging test mse: 13.34%, regression tree test mse: 25.05%, in this example random forest dominates other methods.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateMutations.R
\name{annotate_simulated_variants}
\alias{annotate_simulated_variants}
\title{Annotating base substitutions generated by generate_mutations()}
\usage{
annotate_simulated_variants(variants, txdb, genome,
current_source = Hsapiens)
}
\description{
@description This function generates mutations in a set of genes given the total number of mutations
in samples, numbers of silent mutations per gene and total numbers of silent mutations
per sample. Mutations are annotated, and numbers of silent and non-silent mutations
calculated per iteration.
@param variants Matrix of type character; needs to contain columns
\code{Chrom} for chromosomes, \code{Pos} for substitution position,
\code{Ref} for reference allele, \code{Mut} for alternative allele, and
\code{Sample} for sample names.
@param genome BSgenome object with relevant genome built.
@param txdb A [TxDb](http://127.0.0.1:11491/help/library/GenomicFeatures/html/TxDb.html) object which serves as the annotation for
distinguishing synonymous and non-synonymous variants.
@param current_source \code{seqSource} argument for [predictCoding](http://127.0.0.1:11491/help/library/VariantAnnotation/help/predictCoding)
annotation function. Default is "Hsapiens".
@details Modifies the matrix with variants and makes it suitable for annotation using [predictCoding](http://127.0.0.1:11491/help/library/VariantAnnotation/help/predictCoding)
function. Returns a matrix with additional column - \code{Effect} - which contains
the prediction (unknown, synonymous, non-synonymous, or nonsense).
@examples
library(BSgenome.Hsapiens.UCSC.hg19)
library(TxDb.Hsapiens.UCSC.hg19.knownGene)
genome <- BSgenome.Hsapiens.UCSC.hg19
txdb <- TxDb.Hsapiens.UCSC.hg19.knownGene
variants <- as.matrix(data.frame(Sample = c('T1','T2'),
Chrom = c('chr2','chr2'),
Pos = c(1764958, 19845736),
Ref = c('A', 'G'),
Mut = c('T', 'A')))
annotate_simulated_variants(variants, txdb, genome)
}
| /man/annotate_simulated_variants.Rd | no_license | nvolkova/MutSim | R | false | true | 2,204 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateMutations.R
\name{annotate_simulated_variants}
\alias{annotate_simulated_variants}
\title{Annotating base substitutions generated by generate_mutations()}
\usage{
annotate_simulated_variants(variants, txdb, genome,
current_source = Hsapiens)
}
\description{
@description This function generates mutations in a set of genes given the total number of mutations
in samples, numbers of silent mutations per gene and total numbers of silent mutations
per sample. Mutations are annotated, and numbers of silent and non-silent mutations
calculated per iteration.
@param variants Matrix of type character; needs to contain columns
\code{Chrom} for chromosomes, \code{Pos} for substitution position,
\code{Ref} for reference allele, \code{Mut} for alternative allele, and
\code{Sample} for sample names.
@param genome BSgenome object with relevant genome built.
@param txdb A [TxDb](http://127.0.0.1:11491/help/library/GenomicFeatures/html/TxDb.html) object which serves as the annotation for
distinguishing synonymous and non-synonymous variants.
@param current_source \code{seqSource} argument for [predictCoding](http://127.0.0.1:11491/help/library/VariantAnnotation/help/predictCoding)
annotation function. Default is "Hsapiens".
@details Modifies the matrix with variants and makes it suitable for annotation using [predictCoding](http://127.0.0.1:11491/help/library/VariantAnnotation/help/predictCoding)
function. Returns a matrix with additional column - \code{Effect} - which contains
the prediction (unknown, synonymous, non-synonymous, or nonsense).
@examples
library(BSgenome.Hsapiens.UCSC.hg19)
library(TxDb.Hsapiens.UCSC.hg19.knownGene)
genome <- BSgenome.Hsapiens.UCSC.hg19
txdb <- TxDb.Hsapiens.UCSC.hg19.knownGene
variants <- as.matrix(data.frame(Sample = c('T1','T2'),
Chrom = c('chr2','chr2'),
Pos = c(1764958, 19845736),
Ref = c('A', 'G'),
Mut = c('T', 'A')))
annotate_simulated_variants(variants, txdb, genome)
}
|
#--------------------------------------------------------------------------
# aWhere R Tutorial: Access aWhere API Data
#
# Purpose of script:
# This code will show you how to access aWhere's ag-weather datasets from
# the API (Application Programming Interface) for your location of interest.
# Prior to running this script, we enourage you to find the latitude and
# longitude of an area of interest by using Google Maps, QGIS and aWhere's
# geospatial files found on apps.awhere.com, or by using GPS points that you
# have previously collected.
#
# This script provides the following datasets for your location of interest:
# 1. A csv output of the Forecast (Hourly, 6 hour, 12-hour,
# or daily blocks of time)
# 2. Observed data for any time period between 2008 and present
# 3. Long-Term Normals (LTN) for chosen time period between 2008 and present
# 4. A csv output called the "aWhere Weather Dataset" which includes all
# observed variables and all LTN variables including the differences
# from normal.
#
# You will need to be connected to the internet to run this script.
#
# Date updated: 2019-06-22
#--------------------------------------------------------------------------
# Install and load packages -----------------------------------------------
# Clear your environment and remove all previous variables
rm(list = ls())
# Install the aWhere R packages, if you have not already
devtools::install_github("aWhereAPI/aWhere-R-Library")
devtools::install_github("aWhereAPI/aWhere-R-Charts")
# Load the packages needed for this script.
# If they have not been installed yet on your computer,
# using this code to install them: install.packages("NAME OF PACKAGE")
library(devtools)
library(rgeos)
library(raster)
library(foreach)
library(aWhereAPI)
library(aWhereCharts)
# Load aWhere credentials -------------------------------------------------
# You will need to load your credentials file which includes your aWhere
# key and secret, like a username and password. This gives you a token which
# shows that you have access to the API and all of aWhere's data. Your
# credentials should be kept in a location where you can easily find them.
# Copy the pathfile name and paste it below over the phrase,
# "YOUR CREDENTIALS HERE"
aWhereAPI::load_credentials("YOUR CREDENTIALS HERE")
# Set working & output directories ----------------------------------------
# Next, you need to set your working directory. This is the location on your
# computer where R will automatically save the output files created by this
# script.
# To set your working directory, find the folder on your computer where you
# would like the outputs of this script to be saved, copy the pathfile name
# and paste it over the phrase, "YOUR WD HERE"
working_dir <- "YOUR WD HERE"
setwd(working_dir) # This sets your working directory to the working_dir path
# Now you will create the folder within your working directory where your
# output csv files will be saved. This line creates a folder in your working
# directory called outputCSVs. You can navigate to your working directory on
# your computer and see that this folder was created.
dir.create(path = "outputCSVs/", showWarnings = FALSE, recursive = TRUE)
# Now that your parameters have been set for this script, you are ready to
# begin requesting data from the API and investigating your area of interest.
# Forecast ----------------------------------------------------------------
# In this section, we will pull forecast data for your location of interest.
# First, determine the location's name, latitude, and longitude.
# You can use QGIS, Google Maps, or your own data to find this information.
# Next, create a text file with this location information. Refer to
# the "locations.txt" text file example in the RunSet folder for formatting
# this file. It must have 3 columns called place_name, latitude, longitude.
# An example of a row with location information would thus be:
# place_name, latitude, longitude
# Nairobi, -1.283, 36.816
# CHANGE THIS to the path of your locations text file
locations_file <- "YOUR LOCATION FILE.txt"
# Read the location(s) text file
locations <- read.csv(locations_file)
for (i in(1:nrow(locations))) {
# Get the first latitude, longitude, and name of your location(s) of interest
lat <- locations$latitude[i]
lon <- locations$longitude[i]
place_name <- locations$place_name[i]
# Pull the weather forecast directly from the aWhere API
forecast <- aWhereAPI::forecasts_latlng(lat
,lon
,day_start = as.character(Sys.Date())
,day_end = as.character(Sys.Date()+7)
,block_size = 6)
# The default forecast parameters in the code above are:
# Starting date is today, Sys.Date()
# Ending date is seven days from now, Sys.Date() + 7
# Block size refers to the number of hours each data point will consist
# of. By default, this value is 6, which pulls forecast data in 6-hour blocks.
# A block size of 1 would yield hourly blocks of forecast data.
# Save a .csv file of the forecast data in the outputCSVs folder that you
# created within your working directory
write.csv(forecast, file = paste0("outputCSVs/Forecast-6hour-",place_name,".csv"), row.names=F)
# You can also click on the forecast dataframe in the "environment" tab in the
# top right console to see the data in RStudio!
# Observed Data -----------------------------------------------------------
# Here you will pull the historical data for your location of interest.
# Set the starting and ending dates to a time period of interest
starting_date <- "2018-01-01" # January 1, 2016
ending_date <- as.character(Sys.Date() - 2) # two days ago
# Pull observed weather data from the aWhere API
observed <- aWhereAPI::daily_observed_latlng(latitude = lat,
longitude = lon,
day_start = starting_date,
day_end = ending_date)
write.csv(observed, file = paste0("outputCSVs/observedData-",place_name,".csv"), row.names=F)
# The parameters for this function can have many formats.
# You can change the starting/ending dates for a timeframe of interest.
# The starting date can be as early as 2008.
# You can use the "YYYY-MM-DD" format for a specific date.
# You can also use Sys.Date() to make your end date today,
# or similarly, use Sys.Date() - 1 to make your end date yesterday.
# NOTE that observed data can ONLY be in the past. You will get an error
# if a future date is selected!
# Click the "observed" dataframe in the "environment" tab on the top right
# console to see the data!
# Agronomic data ----------------------------------------------------------
# Here you will pull agronomic data for your location and time of interest.
# If you do not change the "starting_date" and "ending_date" variables,
# then the time period will remain the same from the observed data pulled above.
# Pull agronomic weather data from the aWhere API
ag <- aWhereAPI::agronomic_values_latlng(lat
,lon
,day_start = starting_date
,day_end = ending_date)
# Click the "ag" dataframe in the "environment" tab on the top right
# console to see the data!
write.csv(ag, file = paste0("outputCSVs/agronomicsData-",place_name,".csv"), row.names=F)
# Long Term Normals -------------------------------------------------------
# Here you will pull the long-term normals (LTN) for your location and time
# period of interest.
# LTN values will be calculated across this range of years
year_start <- 2011
year_end <- 2018
# Specify the starting and ending month-day of interest,
# such as the growing season in your region
monthday_start <- "01-01" # January 1
monthday_end <- "06-16" # June 16
# Pull LTN weather data from the aWhere API
ltn <- weather_norms_latlng(lat, lon,
monthday_start = monthday_start,
monthday_end = monthday_end,
year_start = year_start,
year_end = year_end,
# you can choose to exclude years from the LTN
exclude_years = c("2011", "2016"))
# Click the "ltn" dataframe in the "environment" tab on the top right
# console to see the data!
write.csv(ltn, file = paste0("outputCSVs/ltnData-",place_name,".csv"), row.names=F)
# Full aWhere Ag-Weather Dataset ------------------------------------------
# This section combines all of the above datasets into one cohesive .csv for
# analysis. You can change the location and time period as needed in
# the lines of code below.
starting_date <- "2018-01-01"
ending_date <- "2019-06-16"
year_start <- 2008
year_end <- 2018
# This function generates a clean dataset with observed AND forecast
# agronomics AND Long Term Normals!
weather_df <- generateaWhereDataset(lat = lat,
lon = lon,
day_start = starting_date,
day_end = ending_date,
year_start = year_start,
year_end = year_end)
# Save .csv file of the dataset in the outputCSVs folder created within
# your working directory
write.csv(weather_df,
file = paste0("outputCSVs/aWhereWeatherDataset-",place_name,".csv"),
row.names=F)
}
| /archives/deprecated/1-access_awhere_api_data.R | no_license | aWhereAPI/R-Training-Tutorials | R | false | false | 9,991 | r | #--------------------------------------------------------------------------
# aWhere R Tutorial: Access aWhere API Data
#
# Purpose of script:
# This code will show you how to access aWhere's ag-weather datasets from
# the API (Application Programming Interface) for your location of interest.
# Prior to running this script, we enourage you to find the latitude and
# longitude of an area of interest by using Google Maps, QGIS and aWhere's
# geospatial files found on apps.awhere.com, or by using GPS points that you
# have previously collected.
#
# This script provides the following datasets for your location of interest:
# 1. A csv output of the Forecast (Hourly, 6 hour, 12-hour,
# or daily blocks of time)
# 2. Observed data for any time period between 2008 and present
# 3. Long-Term Normals (LTN) for chosen time period between 2008 and present
# 4. A csv output called the "aWhere Weather Dataset" which includes all
# observed variables and all LTN variables including the differences
# from normal.
#
# You will need to be connected to the internet to run this script.
#
# Date updated: 2019-06-22
#--------------------------------------------------------------------------
# Install and load packages -----------------------------------------------
# Clear your environment and remove all previous variables
rm(list = ls())
# Install the aWhere R packages, if you have not already
devtools::install_github("aWhereAPI/aWhere-R-Library")
devtools::install_github("aWhereAPI/aWhere-R-Charts")
# Load the packages needed for this script.
# If they have not been installed yet on your computer,
# using this code to install them: install.packages("NAME OF PACKAGE")
library(devtools)
library(rgeos)
library(raster)
library(foreach)
library(aWhereAPI)
library(aWhereCharts)
# Load aWhere credentials -------------------------------------------------
# You will need to load your credentials file which includes your aWhere
# key and secret, like a username and password. This gives you a token which
# shows that you have access to the API and all of aWhere's data. Your
# credentials should be kept in a location where you can easily find them.
# Copy the pathfile name and paste it below over the phrase,
# "YOUR CREDENTIALS HERE"
aWhereAPI::load_credentials("YOUR CREDENTIALS HERE")
# Set working & output directories ----------------------------------------
# Next, you need to set your working directory. This is the location on your
# computer where R will automatically save the output files created by this
# script.
# To set your working directory, find the folder on your computer where you
# would like the outputs of this script to be saved, copy the pathfile name
# and paste it over the phrase, "YOUR WD HERE"
working_dir <- "YOUR WD HERE"
setwd(working_dir) # This sets your working directory to the working_dir path
# Now you will create the folder within your working directory where your
# output csv files will be saved. This line creates a folder in your working
# directory called outputCSVs. You can navigate to your working directory on
# your computer and see that this folder was created.
dir.create(path = "outputCSVs/", showWarnings = FALSE, recursive = TRUE)
# Now that your parameters have been set for this script, you are ready to
# begin requesting data from the API and investigating your area of interest.
# Forecast ----------------------------------------------------------------
# In this section, we will pull forecast data for your location of interest.
# First, determine the location's name, latitude, and longitude.
# You can use QGIS, Google Maps, or your own data to find this information.
# Next, create a text file with this location information. Refer to
# the "locations.txt" text file example in the RunSet folder for formatting
# this file. It must have 3 columns called place_name, latitude, longitude.
# An example of a row with location information would thus be:
# place_name, latitude, longitude
# Nairobi, -1.283, 36.816
# CHANGE THIS to the path of your locations text file
locations_file <- "YOUR LOCATION FILE.txt"
# Read the location(s) text file
locations <- read.csv(locations_file)
for (i in(1:nrow(locations))) {
# Get the first latitude, longitude, and name of your location(s) of interest
lat <- locations$latitude[i]
lon <- locations$longitude[i]
place_name <- locations$place_name[i]
# Pull the weather forecast directly from the aWhere API
forecast <- aWhereAPI::forecasts_latlng(lat
,lon
,day_start = as.character(Sys.Date())
,day_end = as.character(Sys.Date()+7)
,block_size = 6)
# The default forecast parameters in the code above are:
# Starting date is today, Sys.Date()
# Ending date is seven days from now, Sys.Date() + 7
# Block size refers to the number of hours each data point will consist
# of. By default, this value is 6, which pulls forecast data in 6-hour blocks.
# A block size of 1 would yield hourly blocks of forecast data.
# Save a .csv file of the forecast data in the outputCSVs folder that you
# created within your working directory
write.csv(forecast, file = paste0("outputCSVs/Forecast-6hour-",place_name,".csv"), row.names=F)
# You can also click on the forecast dataframe in the "environment" tab in the
# top right console to see the data in RStudio!
# Observed Data -----------------------------------------------------------
# Here you will pull the historical data for your location of interest.
# Set the starting and ending dates to a time period of interest
starting_date <- "2018-01-01" # January 1, 2016
ending_date <- as.character(Sys.Date() - 2) # two days ago
# Pull observed weather data from the aWhere API
observed <- aWhereAPI::daily_observed_latlng(latitude = lat,
longitude = lon,
day_start = starting_date,
day_end = ending_date)
write.csv(observed, file = paste0("outputCSVs/observedData-",place_name,".csv"), row.names=F)
# The parameters for this function can have many formats.
# You can change the starting/ending dates for a timeframe of interest.
# The starting date can be as early as 2008.
# You can use the "YYYY-MM-DD" format for a specific date.
# You can also use Sys.Date() to make your end date today,
# or similarly, use Sys.Date() - 1 to make your end date yesterday.
# NOTE that observed data can ONLY be in the past. You will get an error
# if a future date is selected!
# Click the "observed" dataframe in the "environment" tab on the top right
# console to see the data!
# Agronomic data ----------------------------------------------------------
# Here you will pull agronomic data for your location and time of interest.
# If you do not change the "starting_date" and "ending_date" variables,
# then the time period will remain the same from the observed data pulled above.
# Pull agronomic weather data from the aWhere API
ag <- aWhereAPI::agronomic_values_latlng(lat
,lon
,day_start = starting_date
,day_end = ending_date)
# Click the "ag" dataframe in the "environment" tab on the top right
# console to see the data!
write.csv(ag, file = paste0("outputCSVs/agronomicsData-",place_name,".csv"), row.names=F)
# Long Term Normals -------------------------------------------------------
# Here you will pull the long-term normals (LTN) for your location and time
# period of interest.
# LTN values will be calculated across this range of years
year_start <- 2011
year_end <- 2018
# Specify the starting and ending month-day of interest,
# such as the growing season in your region
monthday_start <- "01-01" # January 1
monthday_end <- "06-16" # June 16
# Pull LTN weather data from the aWhere API
ltn <- weather_norms_latlng(lat, lon,
monthday_start = monthday_start,
monthday_end = monthday_end,
year_start = year_start,
year_end = year_end,
# you can choose to exclude years from the LTN
exclude_years = c("2011", "2016"))
# Click the "ltn" dataframe in the "environment" tab on the top right
# console to see the data!
write.csv(ltn, file = paste0("outputCSVs/ltnData-",place_name,".csv"), row.names=F)
# Full aWhere Ag-Weather Dataset ------------------------------------------
# This section combines all of the above datasets into one cohesive .csv for
# analysis. You can change the location and time period as needed in
# the lines of code below.
starting_date <- "2018-01-01"
ending_date <- "2019-06-16"
year_start <- 2008
year_end <- 2018
# This function generates a clean dataset with observed AND forecast
# agronomics AND Long Term Normals!
weather_df <- generateaWhereDataset(lat = lat,
lon = lon,
day_start = starting_date,
day_end = ending_date,
year_start = year_start,
year_end = year_end)
# Save .csv file of the dataset in the outputCSVs folder created within
# your working directory
write.csv(weather_df,
file = paste0("outputCSVs/aWhereWeatherDataset-",place_name,".csv"),
row.names=F)
}
|
# creating list of files with ANC data for 1999, 2005, 2012, 2018
HH_ITN.list <- list(GNIR[[2]],GNIR[[3]], GNIR[[4]])
look_for(GNIR, "hml1")
# creates two new variables named hh_net (number of household members per net) and net_ratio
#(categorical variable- if > 2 then access is 0, If 2 or less than access is 1)
HH_ITN.list <- map(HH_ITN.list, funEnv$recoder.nets)
table(HH_ITN.list[[3]]$net_ratio)
HH_ITN.list <- map(HH_ITN.list, funEnv$survey.month.fun)#creates survey month and changes hv001 to v001 to enable left_join
# key list for ANC
keys.HH_ITN <- list(key_list[[2]],key_list[[3]], key_list[[4]])
#changing to a list of keys
# key datasets and dhs/mis datasets are joined
HH_ITN.list <- map2(HH_ITN.list, keys.HH_ITN, left_join) #PR datasets
#####################################################################################################
# HH_ITN_access
####################################################################################################
# 2018
HH_ITN.list[[3]] <-funEnv$dataclean.HH(HH_ITN.list[[3]], net_ratio, hv005,'net_ratio', 'net_ratio')
HH_ITN.svyd18 <- funEnv$svydesign.fun(HH_ITN.list[[3]])
table(HH_ITN.list[[3]]$net_ratio)
DS_HH_ITN_18 <- result.fun.HH('net_ratio', 'NAME_2','num_p', design=HH_ITN.svyd18)
head(DS_HH_ITN_18)
write.csv(DS_HH_ITN_18, "master/results/HH_ITN/DS_HH_ITN_18.csv")
# 2012
HH_ITN.list[[2]] <-funEnv$dataclean.HH(HH_ITN.list[[2]], net_ratio, hv005,'net_ratio', 'net_ratio')
HH_ITN.svyd12 <- funEnv$svydesign.fun(HH_ITN.list[[2]])
DS_HH_ITN_12 <- result.fun.HH('net_ratio', 'NAME_2','num_p', design=HH_ITN.svyd12)
head(DS_HH_ITN_12)
write.csv(DS_HH_ITN_12, "master/results/HH_ITN/DS_HH_ITN_12.csv")
# 2005
HH_ITN.list[[1]] <-funEnv$dataclean.HH(HH_ITN.list[[1]], net_ratio, hv005,'net_ratio', 'net_ratio')
HH_ITN.svyd05 <- funEnv$svydesign.fun(HH_ITN.list[[1]])
DS_HH_ITN_05 <- result.fun.HH('net_ratio', 'NAME_2','num_p', design=HH_ITN.svyd05)
head(DS_HH_ITN_05)
write.csv(DS_HH_ITN_05, "master/results/HH_ITN/DS_HH_ITN_05.csv")
# cluster-level estimates
# 2018
clu_HH_ITN_18 <- funEnv$result.clu.fun.HH('net_ratio', 'v001', design=HH_ITN.svyd18,HH_ITN.list[[3]])
head(clu_HH_ITN_18)
write.csv(clu_HH_ITN_18, "master/results/HH_ITN/clu_U5_ITN_18.csv")
# 2012
clu_HH_ITN_12 <- funEnv$result.clu.fun.HH('net_ratio', 'v001', design=HH_ITN.svyd12,HH_ITN.list[[2]])
head(clu_HH_ITN_12)
write.csv(clu_HH_ITN_12, "master/results/HH_ITN/clu_U5_ITN_12.csv")
# 2005
clu_HH_ITN_05 <- funEnv$result.clu.fun.HH('net_ratio', 'v001', design=HH_ITN.svyd05,HH_ITN.list[[1]])
head(clu_HH_ITN_05)
write.csv(clu_HH_ITN_05, "master/results/HH_ITN/clu_U5_ITN_05.csv")
#####################################################################################################
## Maps
####################################################################################################
# 2018 transformations
DS_file <- HD_sf %>% left_join(DS_HH_ITN_18)
pts_file <- GNshplist_sf[[4]] %>% left_join(clu_HH_ITN_18)
# 2012 transformations
DS_file_12 <- HD_sf %>% left_join(DS_HH_ITN_12)
pts_file_12 <- GNshplist_sf[[3]] %>% left_join(clu_HH_ITN_12)
# 2005 transformations
DS_file_05 <- HD_sf %>% left_join(DS_HH_ITN_05)
pts_file_05 <- GNshplist_sf[[2]] %>% left_join(clu_HH_ITN_05)
# 2018 map
GN_HH_ITN18 <- funEnv$tmap.fun3(DS_file, colname="net_ratio", legtitle="HH with 2 or fewer persons per net",
maintitle="Household (HH) ITN access by District (2018)", ptsfile=pts_file, "Number of Households",
"net_ratio")
CK_HH_ITN18 <- funEnv$tmap.fun3(DS_cona, colname="net_ratio", legtitle="HH with 2 or fewer persons per net",
maintitle="Household (HH) ITN access by District (2018)", ptsfile=cky_pt18, "Number of Households",
"net_ratio")
GN_all_HH_ITN18<-tmap_arrange(GN_HH_ITN18, CK_HH_ITN18)
# 2012 map
GN_HH_ITN12 <- funEnv$tmap.fun3(DS_file_12, colname="net_ratio", legtitle="HH with 2 or fewer persons per net",
maintitle="Household (HH) ITN access by District (2012)", ptsfile=pts_file_12, "Number of Households",
"net_ratio")
CK_HH_ITN12 <- funEnv$tmap.fun3(DS_cona, colname="net_ratio", legtitle="HH with 2 or fewer persons per net",
maintitle="Household (HH) ITN access by District (2012)", ptsfile=cky_pt18, "Number of Households",
"net_ratio")
GN_all_HH_ITN12 <- tmap_arrange(GN_HH_ITN12, CK_HH_ITN12)
# 2005 map
GN_HH_ITN05 <- funEnv$tmap.fun3(DS_file_05, colname="net_ratio", legtitle="HH with 2 or fewer persons per net",
maintitle="Household (HH) ITN access by District (2005)", ptsfile=pts_file_05, "Number of Households",
"net_ratio")
CK_HH_ITN05 <- funEnv$tmap.fun3(DS_cona, colname="net_ratio", legtitle="HH with 2 or fewer persons per net",
maintitle="Household (HH) ITN access by District (2005)", ptsfile=cky_pts05, "Number of Households",
"net_ratio")
GN_all_HH_ITN05 <- tmap_arrange(GN_HH_ITN05, CK_HH_ITN05)
##
all_HH_ITN <- tmap_arrange(GN_HH_ITN05,GN_HH_ITN12,GN_HH_ITN18)
###
tmap_save(tm = CK_HH_ITN05, filename = "/Users/ousmanediallo/Box/NU-malaria-team/projects/hbhi_guinea/maps/HH_ITN/CK_HH_ITN05.pdf",width=13, height=13, units ="in", asp=0,
paper ="A4r", useDingbats=FALSE)
| /HH_ITN.R | no_license | Ousmanerabi/Guinea_Scripts | R | false | false | 5,490 | r | # creating list of files with ANC data for 1999, 2005, 2012, 2018
HH_ITN.list <- list(GNIR[[2]],GNIR[[3]], GNIR[[4]])
look_for(GNIR, "hml1")
# creates two new variables named hh_net (number of household members per net) and net_ratio
#(categorical variable- if > 2 then access is 0, If 2 or less than access is 1)
HH_ITN.list <- map(HH_ITN.list, funEnv$recoder.nets)
table(HH_ITN.list[[3]]$net_ratio)
HH_ITN.list <- map(HH_ITN.list, funEnv$survey.month.fun)#creates survey month and changes hv001 to v001 to enable left_join
# key list for ANC
keys.HH_ITN <- list(key_list[[2]],key_list[[3]], key_list[[4]])
#changing to a list of keys
# key datasets and dhs/mis datasets are joined
HH_ITN.list <- map2(HH_ITN.list, keys.HH_ITN, left_join) #PR datasets
#####################################################################################################
# HH_ITN_access
####################################################################################################
# 2018
HH_ITN.list[[3]] <-funEnv$dataclean.HH(HH_ITN.list[[3]], net_ratio, hv005,'net_ratio', 'net_ratio')
HH_ITN.svyd18 <- funEnv$svydesign.fun(HH_ITN.list[[3]])
table(HH_ITN.list[[3]]$net_ratio)
DS_HH_ITN_18 <- result.fun.HH('net_ratio', 'NAME_2','num_p', design=HH_ITN.svyd18)
head(DS_HH_ITN_18)
write.csv(DS_HH_ITN_18, "master/results/HH_ITN/DS_HH_ITN_18.csv")
# 2012
HH_ITN.list[[2]] <-funEnv$dataclean.HH(HH_ITN.list[[2]], net_ratio, hv005,'net_ratio', 'net_ratio')
HH_ITN.svyd12 <- funEnv$svydesign.fun(HH_ITN.list[[2]])
DS_HH_ITN_12 <- result.fun.HH('net_ratio', 'NAME_2','num_p', design=HH_ITN.svyd12)
head(DS_HH_ITN_12)
write.csv(DS_HH_ITN_12, "master/results/HH_ITN/DS_HH_ITN_12.csv")
# 2005
HH_ITN.list[[1]] <-funEnv$dataclean.HH(HH_ITN.list[[1]], net_ratio, hv005,'net_ratio', 'net_ratio')
HH_ITN.svyd05 <- funEnv$svydesign.fun(HH_ITN.list[[1]])
DS_HH_ITN_05 <- result.fun.HH('net_ratio', 'NAME_2','num_p', design=HH_ITN.svyd05)
head(DS_HH_ITN_05)
write.csv(DS_HH_ITN_05, "master/results/HH_ITN/DS_HH_ITN_05.csv")
# cluster-level estimates
# 2018
clu_HH_ITN_18 <- funEnv$result.clu.fun.HH('net_ratio', 'v001', design=HH_ITN.svyd18,HH_ITN.list[[3]])
head(clu_HH_ITN_18)
write.csv(clu_HH_ITN_18, "master/results/HH_ITN/clu_U5_ITN_18.csv")
# 2012
clu_HH_ITN_12 <- funEnv$result.clu.fun.HH('net_ratio', 'v001', design=HH_ITN.svyd12,HH_ITN.list[[2]])
head(clu_HH_ITN_12)
write.csv(clu_HH_ITN_12, "master/results/HH_ITN/clu_U5_ITN_12.csv")
# 2005
clu_HH_ITN_05 <- funEnv$result.clu.fun.HH('net_ratio', 'v001', design=HH_ITN.svyd05,HH_ITN.list[[1]])
head(clu_HH_ITN_05)
write.csv(clu_HH_ITN_05, "master/results/HH_ITN/clu_U5_ITN_05.csv")
#####################################################################################################
## Maps
####################################################################################################
# 2018 transformations
DS_file <- HD_sf %>% left_join(DS_HH_ITN_18)
pts_file <- GNshplist_sf[[4]] %>% left_join(clu_HH_ITN_18)
# 2012 transformations
DS_file_12 <- HD_sf %>% left_join(DS_HH_ITN_12)
pts_file_12 <- GNshplist_sf[[3]] %>% left_join(clu_HH_ITN_12)
# 2005 transformations
DS_file_05 <- HD_sf %>% left_join(DS_HH_ITN_05)
pts_file_05 <- GNshplist_sf[[2]] %>% left_join(clu_HH_ITN_05)
# 2018 map
GN_HH_ITN18 <- funEnv$tmap.fun3(DS_file, colname="net_ratio", legtitle="HH with 2 or fewer persons per net",
maintitle="Household (HH) ITN access by District (2018)", ptsfile=pts_file, "Number of Households",
"net_ratio")
CK_HH_ITN18 <- funEnv$tmap.fun3(DS_cona, colname="net_ratio", legtitle="HH with 2 or fewer persons per net",
maintitle="Household (HH) ITN access by District (2018)", ptsfile=cky_pt18, "Number of Households",
"net_ratio")
GN_all_HH_ITN18<-tmap_arrange(GN_HH_ITN18, CK_HH_ITN18)
# 2012 map
GN_HH_ITN12 <- funEnv$tmap.fun3(DS_file_12, colname="net_ratio", legtitle="HH with 2 or fewer persons per net",
maintitle="Household (HH) ITN access by District (2012)", ptsfile=pts_file_12, "Number of Households",
"net_ratio")
CK_HH_ITN12 <- funEnv$tmap.fun3(DS_cona, colname="net_ratio", legtitle="HH with 2 or fewer persons per net",
maintitle="Household (HH) ITN access by District (2012)", ptsfile=cky_pt18, "Number of Households",
"net_ratio")
GN_all_HH_ITN12 <- tmap_arrange(GN_HH_ITN12, CK_HH_ITN12)
# 2005 map
GN_HH_ITN05 <- funEnv$tmap.fun3(DS_file_05, colname="net_ratio", legtitle="HH with 2 or fewer persons per net",
maintitle="Household (HH) ITN access by District (2005)", ptsfile=pts_file_05, "Number of Households",
"net_ratio")
CK_HH_ITN05 <- funEnv$tmap.fun3(DS_cona, colname="net_ratio", legtitle="HH with 2 or fewer persons per net",
maintitle="Household (HH) ITN access by District (2005)", ptsfile=cky_pts05, "Number of Households",
"net_ratio")
GN_all_HH_ITN05 <- tmap_arrange(GN_HH_ITN05, CK_HH_ITN05)
##
all_HH_ITN <- tmap_arrange(GN_HH_ITN05,GN_HH_ITN12,GN_HH_ITN18)
###
tmap_save(tm = CK_HH_ITN05, filename = "/Users/ousmanediallo/Box/NU-malaria-team/projects/hbhi_guinea/maps/HH_ITN/CK_HH_ITN05.pdf",width=13, height=13, units ="in", asp=0,
paper ="A4r", useDingbats=FALSE)
|
#' The function that asks you before it tests every single function
#'
#' This function will probably not be in the final version because it's quite time consuming,
#' but can be useful for debugging.
#'
#' It runs through all the good functions, printing the names of the
#' functions as it goes, so you can see which function is causing the hang-up.
#' @param data the data (in a list) that you want to test
#' @param answer the answer you expect
#' @param ask a binary TRUE/FALSE of whether you want to be prompted at the command line every time
#' \code{isitperm} tries another function, or if it should just print each function name as it goes.
#' @export
isitperm <- function(data, answer, ask=TRUE){
require(gtools)
listNoSideEffects <- goodfunctions
orderings <- permutations(length(data), length(data), 1:length(data))
for (i in 1:dim(orderings)[1]){
for (procName in listNoSideEffects){
if(ask){
readline(prompt = paste0("hit Enter to try ", deparse(substitute(procName)), ":"))
}
else {
print(procName)
}
timeOut <- function (expr, ...) {
on.exit(setTimeLimit())
setTimeLimit(...)
expr
}
trialAnswer <- tryCatch(
timeOut(do.call(procName, data[orderings[i,]]), elapsed=1)
, error = function(e) as.character(e)
)
}
}
}
| /R/isitperm.R | no_license | AmeliaMN/locatr | R | false | false | 1,379 | r | #' The function that asks you before it tests every single function
#'
#' This function will probably not be in the final version because it's quite time consuming,
#' but can be useful for debugging.
#'
#' It runs through all the good functions, printing the names of the
#' functions as it goes, so you can see which function is causing the hang-up.
#' @param data the data (in a list) that you want to test
#' @param answer the answer you expect
#' @param ask a binary TRUE/FALSE of whether you want to be prompted at the command line every time
#' \code{isitperm} tries another function, or if it should just print each function name as it goes.
#' @export
isitperm <- function(data, answer, ask=TRUE){
require(gtools)
listNoSideEffects <- goodfunctions
orderings <- permutations(length(data), length(data), 1:length(data))
for (i in 1:dim(orderings)[1]){
for (procName in listNoSideEffects){
if(ask){
readline(prompt = paste0("hit Enter to try ", deparse(substitute(procName)), ":"))
}
else {
print(procName)
}
timeOut <- function (expr, ...) {
on.exit(setTimeLimit())
setTimeLimit(...)
expr
}
trialAnswer <- tryCatch(
timeOut(do.call(procName, data[orderings[i,]]), elapsed=1)
, error = function(e) as.character(e)
)
}
}
}
|
# Learn about API authentication here: {{BASE_URL}}/r/getting-started
# Find your api_key here: {{BASE_URL}}/settings/api
library(plotly)
py <- plotly(username='TestBot', key='r1neazxo9w')
trace1 <- list(
x = c(1, 2, 3, 4),
y = c(1, 4, 9, 16),
name = "$\alpha_{1c} = 352 \pm 11 \text{ km s}^{-1}$",
type = "scatter"
)
trace2 <- list(
x = c(1, 2, 3, 4),
y = c(0.5, 2, 4.5, 8),
name = "$\beta_{1c} = 25 \pm 11 \text{ km s}^{-1}$",
type = "scatter"
)
data <- list(trace1, trace2)
layout <- list(
xaxis = list(title = "$\sqrt{(n_\text{c}(t|{T_\text{early}}))}$"),
yaxis = list(title = "$d, r \text{ (solar radius)}$")
)
response <- py$plotly(data, kwargs=list(layout=layout, filename="shiny-r", fileopt="overwrite"))
url <- response$url
| /auto-docs/executables/r/shiny_r.r | no_license | VukDukic/documentation | R | false | false | 762 | r | # Learn about API authentication here: {{BASE_URL}}/r/getting-started
# Find your api_key here: {{BASE_URL}}/settings/api
library(plotly)
py <- plotly(username='TestBot', key='r1neazxo9w')
trace1 <- list(
x = c(1, 2, 3, 4),
y = c(1, 4, 9, 16),
name = "$\alpha_{1c} = 352 \pm 11 \text{ km s}^{-1}$",
type = "scatter"
)
trace2 <- list(
x = c(1, 2, 3, 4),
y = c(0.5, 2, 4.5, 8),
name = "$\beta_{1c} = 25 \pm 11 \text{ km s}^{-1}$",
type = "scatter"
)
data <- list(trace1, trace2)
layout <- list(
xaxis = list(title = "$\sqrt{(n_\text{c}(t|{T_\text{early}}))}$"),
yaxis = list(title = "$d, r \text{ (solar radius)}$")
)
response <- py$plotly(data, kwargs=list(layout=layout, filename="shiny-r", fileopt="overwrite"))
url <- response$url
|
context("lm")
sc <- testthat_spark_connection()
expect_coef_equal <- function(lhs, rhs) {
nm <- names(lhs)
lhs <- lhs[nm]
rhs <- rhs[nm]
expect_true(all.equal(lhs, rhs, tolerance = 0.01))
}
test_that("ml_linear_regression and 'penalized' produce similar model fits", {
skip_on_cran()
test_requires("glmnet")
mtcars_tbl <- testthat_tbl("mtcars")
values <- seq(0, 0.5, by = 0.1)
parMatrix <- expand.grid(values, values, KEEP.OUT.ATTRS = FALSE)
for (i in seq_len(nrow(parMatrix))) {
alpha <- parMatrix[[1]][[i]]
lambda <- parMatrix[[2]][[i]]
gFit <- glmnet::glmnet(
x = as.matrix(mtcars[, c("cyl", "disp")]),
y = mtcars$mpg,
family = "gaussian",
alpha = alpha,
lambda = lambda
)
sFit <- ml_linear_regression(
mtcars_tbl,
"mpg",
c("cyl", "disp"),
alpha = alpha,
lambda = lambda
)
gCoef <- coefficients(gFit)[, 1]
sCoef <- coefficients(sFit)
expect_coef_equal(gCoef, sCoef)
}
})
test_that("weights column works for lm", {
set.seed(42)
iris_weighted <- iris %>%
dplyr::mutate(weights = rpois(nrow(iris), 1) + 1,
ones = rep(1, nrow(iris)),
versicolor = ifelse(Species == "versicolor", 1L, 0L))
iris_weighted_tbl <- testthat_tbl("iris_weighted")
r <- lm(Sepal.Length ~ Sepal.Width + Petal.Length + Petal.Width,
weights = weights, data = iris_weighted)
s <- ml_linear_regression(iris_weighted_tbl,
response = "Sepal_Length",
features = c("Sepal_Width", "Petal_Length", "Petal_Width"),
lambda = 0L,
weights.column = "weights")
expect_equal(unname(coef(r)), unname(coef(s)))
r <- lm(Sepal.Length ~ Sepal.Width + Petal.Length + Petal.Width,
data = iris_weighted)
s <- ml_linear_regression(iris_weighted_tbl,
response = "Sepal_Length",
features = c("Sepal_Width", "Petal_Length", "Petal_Width"),
lambda = 0L,
weights.column = "ones")
expect_equal(unname(coef(r)), unname(coef(s)))
})
| /tests/testthat/test-ml-linear-regression.R | permissive | leosouzadias/sparklyr | R | false | false | 2,206 | r | context("lm")
sc <- testthat_spark_connection()
expect_coef_equal <- function(lhs, rhs) {
nm <- names(lhs)
lhs <- lhs[nm]
rhs <- rhs[nm]
expect_true(all.equal(lhs, rhs, tolerance = 0.01))
}
test_that("ml_linear_regression and 'penalized' produce similar model fits", {
skip_on_cran()
test_requires("glmnet")
mtcars_tbl <- testthat_tbl("mtcars")
values <- seq(0, 0.5, by = 0.1)
parMatrix <- expand.grid(values, values, KEEP.OUT.ATTRS = FALSE)
for (i in seq_len(nrow(parMatrix))) {
alpha <- parMatrix[[1]][[i]]
lambda <- parMatrix[[2]][[i]]
gFit <- glmnet::glmnet(
x = as.matrix(mtcars[, c("cyl", "disp")]),
y = mtcars$mpg,
family = "gaussian",
alpha = alpha,
lambda = lambda
)
sFit <- ml_linear_regression(
mtcars_tbl,
"mpg",
c("cyl", "disp"),
alpha = alpha,
lambda = lambda
)
gCoef <- coefficients(gFit)[, 1]
sCoef <- coefficients(sFit)
expect_coef_equal(gCoef, sCoef)
}
})
test_that("weights column works for lm", {
set.seed(42)
iris_weighted <- iris %>%
dplyr::mutate(weights = rpois(nrow(iris), 1) + 1,
ones = rep(1, nrow(iris)),
versicolor = ifelse(Species == "versicolor", 1L, 0L))
iris_weighted_tbl <- testthat_tbl("iris_weighted")
r <- lm(Sepal.Length ~ Sepal.Width + Petal.Length + Petal.Width,
weights = weights, data = iris_weighted)
s <- ml_linear_regression(iris_weighted_tbl,
response = "Sepal_Length",
features = c("Sepal_Width", "Petal_Length", "Petal_Width"),
lambda = 0L,
weights.column = "weights")
expect_equal(unname(coef(r)), unname(coef(s)))
r <- lm(Sepal.Length ~ Sepal.Width + Petal.Length + Petal.Width,
data = iris_weighted)
s <- ml_linear_regression(iris_weighted_tbl,
response = "Sepal_Length",
features = c("Sepal_Width", "Petal_Length", "Petal_Width"),
lambda = 0L,
weights.column = "ones")
expect_equal(unname(coef(r)), unname(coef(s)))
})
|
library(testthat)
library(erisfibo)
test_check("erisfibo")
| /tests/testthat.R | permissive | cerikahp/erisfibo | R | false | false | 60 | r | library(testthat)
library(erisfibo)
test_check("erisfibo")
|
library(readxl)
library(dplyr)
library(tidyr)
titanic3 <- read_excel("titanic3.xls")
titanic3_clean <- titanic3
#replace NA values in embarked
titanic3_clean$embarked[is.na(titanic3$embarked)] <- "S"
# calculate mean age
mean_age <- mean(titanic3_clean$age, na.rm = TRUE)
#replace age NAs with mean age
titanic3_clean$age[is.na(titanic3_clean$age)] <- mean_age
#other ways to calculate replacement age for NA?
#median: might reduce influence of any outliers?
#create dummy value for empty boat values
titanic3_clean$boat[is.na(titanic3_clean$boat)] <- "none"
#it doesn't make sense to fill in a false cabin number, it's not statistically intersesting
#create binary has_cabin_number column
i = 1
while (i < nrow(titanic3_clean)) {
if (!is.na(titanic3_clean$cabin)[i] == TRUE) {
titanic3_clean[i, "has_cabin_number"] <- 1
} else {
titanic3_clean[i, "has_cabin_number"] <- 0
}
i <- i + 1
}
# write.csv(titanic3_clean, "titanic_clean.csv")
| /titanic_clean.R | no_license | mottledMantis/titanic | R | false | false | 964 | r | library(readxl)
library(dplyr)
library(tidyr)
titanic3 <- read_excel("titanic3.xls")
titanic3_clean <- titanic3
#replace NA values in embarked
titanic3_clean$embarked[is.na(titanic3$embarked)] <- "S"
# calculate mean age
mean_age <- mean(titanic3_clean$age, na.rm = TRUE)
#replace age NAs with mean age
titanic3_clean$age[is.na(titanic3_clean$age)] <- mean_age
#other ways to calculate replacement age for NA?
#median: might reduce influence of any outliers?
#create dummy value for empty boat values
titanic3_clean$boat[is.na(titanic3_clean$boat)] <- "none"
#it doesn't make sense to fill in a false cabin number, it's not statistically intersesting
#create binary has_cabin_number column
i = 1
while (i < nrow(titanic3_clean)) {
if (!is.na(titanic3_clean$cabin)[i] == TRUE) {
titanic3_clean[i, "has_cabin_number"] <- 1
} else {
titanic3_clean[i, "has_cabin_number"] <- 0
}
i <- i + 1
}
# write.csv(titanic3_clean, "titanic_clean.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/comppred_function.R
\name{comp.pred}
\alias{comp.pred}
\title{Wrapper for classfication algorithms}
\usage{
comp.pred(formula, data.train, data.test = NULL, algorithm = NULL)
}
\arguments{
\item{formula}{a formula}
\item{data.train}{dataframe. A training dataset}
\item{data.test}{dataframe. A testing dataset}
\item{algorithm}{string. An algorithm in the set "lr" -- logistic regression, cart" -- decision trees, "rlr" -- regularised logistic regression, "svm" -- support vector machines, "rf" -- random forests}
}
\description{
This function is a wrapper for many classification algorithms such as CART (rpart::rpart), regularised logistic regression (glmnet::glmnet), support vector machines (svm::svm) and random forests (randomForest::randomForest)
}
\examples{
# Fit many alternative algorithms to the mushrooms dataset
mushrooms.cart.pred <- comp.pred(formula = poisonous ~.,
data.train = mushrooms[1:100,],
data.test = mushrooms[101:nrow(mushrooms),],
algorithm = "cart")
mushrooms.rf.pred <- comp.pred(formula = poisonous ~.,
data.train = mushrooms[1:100,],
data.test = mushrooms[101:nrow(mushrooms),],
algorithm = "rf")
mushrooms.svm.pred <- comp.pred(formula = poisonous ~.,
data.train = mushrooms[1:100,],
data.test = mushrooms[101:nrow(mushrooms),],
algorithm = "svm")
}
| /man/comp.pred.Rd | no_license | ronypik/FFTrees | R | false | true | 1,653 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/comppred_function.R
\name{comp.pred}
\alias{comp.pred}
\title{Wrapper for classfication algorithms}
\usage{
comp.pred(formula, data.train, data.test = NULL, algorithm = NULL)
}
\arguments{
\item{formula}{a formula}
\item{data.train}{dataframe. A training dataset}
\item{data.test}{dataframe. A testing dataset}
\item{algorithm}{string. An algorithm in the set "lr" -- logistic regression, cart" -- decision trees, "rlr" -- regularised logistic regression, "svm" -- support vector machines, "rf" -- random forests}
}
\description{
This function is a wrapper for many classification algorithms such as CART (rpart::rpart), regularised logistic regression (glmnet::glmnet), support vector machines (svm::svm) and random forests (randomForest::randomForest)
}
\examples{
# Fit many alternative algorithms to the mushrooms dataset
mushrooms.cart.pred <- comp.pred(formula = poisonous ~.,
data.train = mushrooms[1:100,],
data.test = mushrooms[101:nrow(mushrooms),],
algorithm = "cart")
mushrooms.rf.pred <- comp.pred(formula = poisonous ~.,
data.train = mushrooms[1:100,],
data.test = mushrooms[101:nrow(mushrooms),],
algorithm = "rf")
mushrooms.svm.pred <- comp.pred(formula = poisonous ~.,
data.train = mushrooms[1:100,],
data.test = mushrooms[101:nrow(mushrooms),],
algorithm = "svm")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/papSmear.R
\docType{data}
\name{CNN}
\alias{CNN}
\title{CNN}
\usage{
data(CNN)
}
\description{
Trained Convolution Neuronal Network on images of unique cervix cells
The model is build with mxnet package using 2 convolution and 2 fully connected layers
}
\details{
90 rounds, 20 images per round, 7 folders that countain 7 cells classes
}
\author{
Karim Mezhoud \email{kmezhoud@gmail.com}
}
\keyword{data}
| /man/CNN.Rd | no_license | kmezhoud/papSmear | R | false | true | 497 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/papSmear.R
\docType{data}
\name{CNN}
\alias{CNN}
\title{CNN}
\usage{
data(CNN)
}
\description{
Trained Convolution Neuronal Network on images of unique cervix cells
The model is build with mxnet package using 2 convolution and 2 fully connected layers
}
\details{
90 rounds, 20 images per round, 7 folders that countain 7 cells classes
}
\author{
Karim Mezhoud \email{kmezhoud@gmail.com}
}
\keyword{data}
|
## From: Prof Brian Ripley
## To: Martin Maechler
## cc: Doug and Martin
## Subject: Re: [Rd] Package Matrix does not compile in R-devel_2009-01-10 (fwd)
## Date: Thu, 15 Jan 2009 14:22:17 +0000 (GMT)
AMD <- c("aat", "1", "2", "postorder", "post_tree", "defaults",
"order", "control", "info", "valid", "preprocess", "dump")
CC1 <- "\t$(CC) $(ALL_CPPFLAGS) $(ALL_CFLAGS) -I../Include"
for (i in AMD)
cat(sprintf("amd_i_%s.o: amd_%s.c $(INC)", i, i),
sprintf(paste(CC1, "-DDINT -c amd_%s.c -o $@"), i), sep="\n")
cat("\n")
for (i in AMD)
cat(sprintf("amd_l_%s.o: amd_%s.c $(INC)", i,i),
sprintf(paste(CC1, "-DDLONG -c amd_%s.c -o $@"), i), sep="\n")
| /tags/0.999375-23/src/AMD/Source/make-Make.R | no_license | LTLA/Matrix | R | false | false | 689 | r | ## From: Prof Brian Ripley
## To: Martin Maechler
## cc: Doug and Martin
## Subject: Re: [Rd] Package Matrix does not compile in R-devel_2009-01-10 (fwd)
## Date: Thu, 15 Jan 2009 14:22:17 +0000 (GMT)
AMD <- c("aat", "1", "2", "postorder", "post_tree", "defaults",
"order", "control", "info", "valid", "preprocess", "dump")
CC1 <- "\t$(CC) $(ALL_CPPFLAGS) $(ALL_CFLAGS) -I../Include"
for (i in AMD)
cat(sprintf("amd_i_%s.o: amd_%s.c $(INC)", i, i),
sprintf(paste(CC1, "-DDINT -c amd_%s.c -o $@"), i), sep="\n")
cat("\n")
for (i in AMD)
cat(sprintf("amd_l_%s.o: amd_%s.c $(INC)", i,i),
sprintf(paste(CC1, "-DDLONG -c amd_%s.c -o $@"), i), sep="\n")
|
# Jake Yeung
# Date of Creation: 2020-11-12
# File: ~/projects/scchic/scripts/rstudioserver_analysis/spikeins/BM_merged_with_old/7-differential_expression_analysis_peaks.R
#
rm(list=ls())
library(parallel)
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(Matrix)
library(topicmodels)
library(hash)
library(igraph)
library(umap)
library(scchicFuncs)
jstart <- Sys.time()
jsettings <- umap.defaults
jsettings$n_neighbors <- 30
jsettings$min_dist <- 0.1
jsettings$random_state <- 123
# Load LDA (contains countmat) ---------------------------------------------------------------
ncores <- 8
hubprefix <- "/home/jyeung/hub_oudenaarden"
jtype <- "hiddendomains"
# jdist <- "TES"
# outdir <- "/home/jyeung/data/from_rstudioserver/spikein_fits_BM_poisson"
outdir <- "/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/from_rstudioserver/poisson_fits_BM_AllMerged3.spikeins"
# jmark <- "H3K4me1"
jmarks <- c("H3K4me1", "H3K4me3", "H3K27me3"); names(jmarks) <- jmarks
inf.spikein <- file.path(hubprefix, "jyeung/data/scChiC/from_rstudioserver/quality_control_BM_round2_all.blfix/spikein_info_BM_round2_all.blfix.txt")
dat.spikein.all <- fread(inf.spikein)
cbPalette <- c("#696969", "#56B4E9", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#32CD32", "#FFB6C1", "#0b1b7f", "#ff9f7d", "#eb9d01", "#2c2349", "#753187", "#f80597")
for (jmark in jmarks){
outf <- file.path(outdir, paste0("poisson_fit_", jtype, ".", jmark, ".", Sys.Date(), ".spikeins.RData"))
assertthat::assert_that(!file.exists(outf))
indir <- file.path(hubprefix, paste0("jyeung/data/scChiC/raw_demultiplexed/LDA_outputs_all_spikeins/ldaAnalysisBins_mouse_spikein_BMround2all_MergeWithOld.from_", jtype))
fname <- paste0("lda_outputs.count_mat_from_", jtype, ".", jmark, ".K-30.binarize.FALSE/ldaOut.count_mat_from_", jtype, ".", jmark, ".K-30.Robj")
load(file.path(indir, fname), v=T)
dat.spikein <- subset(dat.spikein.all, cell %in% colnames(mat))
tm.result <- posterior(out.lda)
dat.umap <- DoUmapAndLouvain(tm.result$topics, jsettings)
ggplot(dat.umap, aes(x = umap1, y = umap2, color = louvain)) +
geom_point() +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
# Load metadata -----------------------------------------------------------
indir.annot <- paste0("/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/from_rstudioserver/cell_cluster_tables.spikeins_mouse.BMround2_umaps_and_ratios.Round1Round2")
inf.annot <- file.path(indir.annot, paste0("spikeins_mouse.BMround1and2_umaps_and_ratios.colfix.celltyping.2020-11-01.WithRelLevels.mark_", jmark, ".cell_cluster_tables.txt"))
dat.annot <- fread(inf.annot)
cells.keep <- dat.spikein$cell
dat.annot.filt <- subset(dat.annot, cell %in% cells.keep)
dat.umap.merge <- left_join(dat.umap, subset(dat.annot, select = c(cell, cluster)))
# ggplot(dat.umap.merge, aes(x = umap1, y = umap2, color = cluster)) +
# geom_point() +
# scale_color_manual(values = cbPalette) +
# Load spikeins -----------------------------------------------------------
# Run fits gene by gene ---------------------------------------------------
print(jmark)
jmat.mark <- count.mat
dat.annots.filt.mark <- dat.annot.filt %>%
dat.annots.filt.mark <- dat.annot.filt %>%
mutate(Cluster = ifelse(cluster == "HSPCs", "aHSPCs", cluster)) %>%
rowwise() %>%
mutate(batch = IsRound1(cell, mark = jmark)) %>%
mutate(Plate = ifelse(batch == "Round2", plate, "Round1"))
print(unique(dat.annots.filt.mark$Plate))
# ncuts.for.fit.mark <- data.frame(cell = colnames(count.mat), ncuts.total = colSums(count.mat), stringsAsFactors = FALSE)
# ncuts.for.fit.mark <- data.frame(cell = colnames(count.mat), ncuts.total = colSums(count.mat), stringsAsFactors = FALSE)
ncuts.for.fit.mark <- subset(dat.spikein, select = c(samp, spikeincounts)) %>%
dplyr::rename(cell = samp) %>%
dplyr::mutate(ncuts.total = spikeincounts)
cnames <- colnames(jmat.mark)
jrow.names <- rownames(jmat.mark)
names(jrow.names) <- jrow.names
jfits.lst <- parallel::mclapply(jrow.names, function(jrow.name){
jrow <- jmat.mark[jrow.name, ]
jout <- FitGlmRowClustersPlate(jrow, cnames, dat.annots.filt.mark, ncuts.for.fit.mark, jbin = NULL, returnobj = FALSE)
return(jout)
}, mc.cores = ncores)
# Ssave outputs -----------------------------------------------------------
# saveRDS(jfits.lst, outf)
save(jfits.lst, dat.annots.filt.mark, count.mat, file = outf)
print(Sys.time() - jstart)
}
print("Done")
print(Sys.time() - jstart)
| /scripts/rstudioserver_analysis/spikeins/BM_merged_with_old/7-differential_expression_analysis_hiddendomains_spikeins.baackup.R | no_license | jakeyeung/sortchicAllScripts | R | false | false | 4,702 | r | # Jake Yeung
# Date of Creation: 2020-11-12
# File: ~/projects/scchic/scripts/rstudioserver_analysis/spikeins/BM_merged_with_old/7-differential_expression_analysis_peaks.R
#
rm(list=ls())
library(parallel)
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(Matrix)
library(topicmodels)
library(hash)
library(igraph)
library(umap)
library(scchicFuncs)
jstart <- Sys.time()
jsettings <- umap.defaults
jsettings$n_neighbors <- 30
jsettings$min_dist <- 0.1
jsettings$random_state <- 123
# Load LDA (contains countmat) ---------------------------------------------------------------
ncores <- 8
hubprefix <- "/home/jyeung/hub_oudenaarden"
jtype <- "hiddendomains"
# jdist <- "TES"
# outdir <- "/home/jyeung/data/from_rstudioserver/spikein_fits_BM_poisson"
outdir <- "/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/from_rstudioserver/poisson_fits_BM_AllMerged3.spikeins"
# jmark <- "H3K4me1"
jmarks <- c("H3K4me1", "H3K4me3", "H3K27me3"); names(jmarks) <- jmarks
inf.spikein <- file.path(hubprefix, "jyeung/data/scChiC/from_rstudioserver/quality_control_BM_round2_all.blfix/spikein_info_BM_round2_all.blfix.txt")
dat.spikein.all <- fread(inf.spikein)
cbPalette <- c("#696969", "#56B4E9", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#32CD32", "#FFB6C1", "#0b1b7f", "#ff9f7d", "#eb9d01", "#2c2349", "#753187", "#f80597")
for (jmark in jmarks){
outf <- file.path(outdir, paste0("poisson_fit_", jtype, ".", jmark, ".", Sys.Date(), ".spikeins.RData"))
assertthat::assert_that(!file.exists(outf))
indir <- file.path(hubprefix, paste0("jyeung/data/scChiC/raw_demultiplexed/LDA_outputs_all_spikeins/ldaAnalysisBins_mouse_spikein_BMround2all_MergeWithOld.from_", jtype))
fname <- paste0("lda_outputs.count_mat_from_", jtype, ".", jmark, ".K-30.binarize.FALSE/ldaOut.count_mat_from_", jtype, ".", jmark, ".K-30.Robj")
load(file.path(indir, fname), v=T)
dat.spikein <- subset(dat.spikein.all, cell %in% colnames(mat))
tm.result <- posterior(out.lda)
dat.umap <- DoUmapAndLouvain(tm.result$topics, jsettings)
ggplot(dat.umap, aes(x = umap1, y = umap2, color = louvain)) +
geom_point() +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
# Load metadata -----------------------------------------------------------
indir.annot <- paste0("/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/from_rstudioserver/cell_cluster_tables.spikeins_mouse.BMround2_umaps_and_ratios.Round1Round2")
inf.annot <- file.path(indir.annot, paste0("spikeins_mouse.BMround1and2_umaps_and_ratios.colfix.celltyping.2020-11-01.WithRelLevels.mark_", jmark, ".cell_cluster_tables.txt"))
dat.annot <- fread(inf.annot)
cells.keep <- dat.spikein$cell
dat.annot.filt <- subset(dat.annot, cell %in% cells.keep)
dat.umap.merge <- left_join(dat.umap, subset(dat.annot, select = c(cell, cluster)))
# ggplot(dat.umap.merge, aes(x = umap1, y = umap2, color = cluster)) +
# geom_point() +
# scale_color_manual(values = cbPalette) +
# Load spikeins -----------------------------------------------------------
# Run fits gene by gene ---------------------------------------------------
print(jmark)
jmat.mark <- count.mat
dat.annots.filt.mark <- dat.annot.filt %>%
dat.annots.filt.mark <- dat.annot.filt %>%
mutate(Cluster = ifelse(cluster == "HSPCs", "aHSPCs", cluster)) %>%
rowwise() %>%
mutate(batch = IsRound1(cell, mark = jmark)) %>%
mutate(Plate = ifelse(batch == "Round2", plate, "Round1"))
print(unique(dat.annots.filt.mark$Plate))
# ncuts.for.fit.mark <- data.frame(cell = colnames(count.mat), ncuts.total = colSums(count.mat), stringsAsFactors = FALSE)
# ncuts.for.fit.mark <- data.frame(cell = colnames(count.mat), ncuts.total = colSums(count.mat), stringsAsFactors = FALSE)
ncuts.for.fit.mark <- subset(dat.spikein, select = c(samp, spikeincounts)) %>%
dplyr::rename(cell = samp) %>%
dplyr::mutate(ncuts.total = spikeincounts)
cnames <- colnames(jmat.mark)
jrow.names <- rownames(jmat.mark)
names(jrow.names) <- jrow.names
jfits.lst <- parallel::mclapply(jrow.names, function(jrow.name){
jrow <- jmat.mark[jrow.name, ]
jout <- FitGlmRowClustersPlate(jrow, cnames, dat.annots.filt.mark, ncuts.for.fit.mark, jbin = NULL, returnobj = FALSE)
return(jout)
}, mc.cores = ncores)
# Ssave outputs -----------------------------------------------------------
# saveRDS(jfits.lst, outf)
save(jfits.lst, dat.annots.filt.mark, count.mat, file = outf)
print(Sys.time() - jstart)
}
print("Done")
print(Sys.time() - jstart)
|
totaldata=read.table("exdata_data_household_power_consumption.txt",header=TRUE,sep=";",stringsAsFactor=FALSE)
data=subset(totaldata,Date=="1/2/2007" | Date=="2/2/2007")
with(data,{
plot(x=strptime(paste(data$Date,data$Time),format="%d/%m/%Y %H:%M:%S"),y=as.numeric(data$Sub_metering_1),col="black",ylab="Energy sub metering",xlab="",type="l")
lines(x=strptime(paste(data$Date,data$Time),format="%d/%m/%Y %H:%M:%S"),y=as.numeric(data$Sub_metering_2),col="red",type="l")
lines(x=strptime(paste(data$Date,data$Time),format="%d/%m/%Y %H:%M:%S"),y=as.numeric(data$Sub_metering_3),col="blue",type="l")
legend("topright", lty=par("lty"), seg.len=3,col = c("black", "red","blue"), legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"))
})
dev.copy(png,file="plot3.png")
dev.off() | /plot3.R | no_license | oopeking/EDA | R | false | false | 781 | r | totaldata=read.table("exdata_data_household_power_consumption.txt",header=TRUE,sep=";",stringsAsFactor=FALSE)
data=subset(totaldata,Date=="1/2/2007" | Date=="2/2/2007")
with(data,{
plot(x=strptime(paste(data$Date,data$Time),format="%d/%m/%Y %H:%M:%S"),y=as.numeric(data$Sub_metering_1),col="black",ylab="Energy sub metering",xlab="",type="l")
lines(x=strptime(paste(data$Date,data$Time),format="%d/%m/%Y %H:%M:%S"),y=as.numeric(data$Sub_metering_2),col="red",type="l")
lines(x=strptime(paste(data$Date,data$Time),format="%d/%m/%Y %H:%M:%S"),y=as.numeric(data$Sub_metering_3),col="blue",type="l")
legend("topright", lty=par("lty"), seg.len=3,col = c("black", "red","blue"), legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"))
})
dev.copy(png,file="plot3.png")
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_corr.R
\name{corr_dotmap}
\alias{corr_dotmap}
\title{corr_dotmap
Dotmap based on correlation coefficients and p values}
\usage{
corr_dotmap(cor_r, cor_p, name)
}
\arguments{
\item{cor_r}{A vector or matrix of correltaion coefficient}
\item{cor_p}{A vector or matirx of correlation test p values}
\item{r_cut, p_cut}{cutoffs for the significant correlations}
}
\description{
corr_dotmap
Dotmap based on correlation coefficients and p values
}
\examples{
}
| /man/corr_dotmap.Rd | no_license | yzeng-lol/meeqtl | R | false | true | 541 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_corr.R
\name{corr_dotmap}
\alias{corr_dotmap}
\title{corr_dotmap
Dotmap based on correlation coefficients and p values}
\usage{
corr_dotmap(cor_r, cor_p, name)
}
\arguments{
\item{cor_r}{A vector or matrix of correltaion coefficient}
\item{cor_p}{A vector or matirx of correlation test p values}
\item{r_cut, p_cut}{cutoffs for the significant correlations}
}
\description{
corr_dotmap
Dotmap based on correlation coefficients and p values
}
\examples{
}
|
\name{simCLT}
\alias{simCLT}
\title{Pedagogical Simulation for the Central Limit Theorem}
\description{
Show the distribution of sample means and relevant summary statistics, such as the 95\% range of variation. Provide a plot of both the specified population and the corresponding distribution of sample means.
}
\usage{
simCLT(ns, n, p1=0, p2=1,
type=c("normal", "uniform", "lognormal", "antinormal"),
fill="lightsteelblue3", n.display=2, digits.d=3,
subtitle=TRUE, pop=TRUE,
main=NULL, pdf=FALSE, width=5, height=5, \ldots)
}
\arguments{
\item{ns}{Number of samples, that is, repetitions of the experiment.}
\item{n}{Size of each sample.}
\item{p1}{First parameter value for the population distribution, the mean, minimum or meanlog for the normal, uniform and lognormal populations, respectively. Must be 0, the minimum, for the anti-normal distribution.}
\item{p2}{Second parameter value for the population distribution, the standard deviation, maximum or sdlog for the normal, uniform and lognormal populations, respectively. Is the maximum for the anti-normal, usually left at the default value of 1.}
\item{type}{The general population distribution.}
\item{fill}{Fill color of the graphs.}
\item{n.display}{Number of samples for which to display the sample mean and data values.}
\item{digits.d}{Number of decimal digits to display on the output.}
\item{subtitle}{If \code{TRUE}, then display the specific parameter values of the population or sample, depending on the graph.}
\item{pop}{If \code{TRUE}, then display the graph of the population from which the data are sampled.}
\item{main}{Title of graph.}
\item{pdf}{Indicator as to if the graphic files should be saved as pdf files
instead of directed to the standard graphics windows.}
\item{width}{Width of the pdf file in inches.}
\item{height}{Height of the pdf file in inches.}
\item{\dots}{Other parameter values for R function \code{\link{lm}} which provides the core computations.}
}
\details{
Provide a plot of both the specified population and the corresponding distribution of sample means. Include descriptive statistics including the 95\% range of sampling variation in raw units and standard errors for comparison to the normal distribution. Also provide a few samples of the data and corresponding means.
Four different populations are provided: normal, uniform, lognormal for a skewed distribution, and what is called the anti-normal, the combining of two side-by-side triangular distributions so that most of the values are in the extremes and fewer values are close to the middle.
For the lognormal distribution, increase the skew by increasing the value of \code{p2}, which is the population standard deviation.
The anti-normal distribution requires the \code{triangle} package. No population mean and standard deviation are provided for the anti-normal distribution, so the 95\% range of sampling variable of the sample mean in terms of standard errors is not provided. ** Not activated until the triangle package is updated. **
If the two plots, of the population and sample distributions respectively, are written to pdf files, according to \code{pdf=TRUE}, they are named SimPopulation.pdf and SimSample.pdf. Their names and the directory to which they are written are provided as part the console output.
}
\author{David W. Gerbing (Portland State University; \email{gerbing@pdx.edu})}
\examples{
# plot of the standardized normal
# and corresponding sampling distribution with 10000 samples
# each of size 2
simCLT(ns=1000, n=2)
# plot of the uniform dist from 0 to 4
# and corresponding sampling distribution with 10000 samples
# each of size 2
simCLT(ns=1000, n=2, p1=0, p2=4, type="uniform", bin.width=0.01)
# save the population and sample distributions to pdf files
simCLT(100, 10, pdf=TRUE)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ central limit theorem }
| /man/simCLT.Rd | no_license | goliatl/lessR | R | false | false | 4,006 | rd | \name{simCLT}
\alias{simCLT}
\title{Pedagogical Simulation for the Central Limit Theorem}
\description{
Show the distribution of sample means and relevant summary statistics, such as the 95\% range of variation. Provide a plot of both the specified population and the corresponding distribution of sample means.
}
\usage{
simCLT(ns, n, p1=0, p2=1,
type=c("normal", "uniform", "lognormal", "antinormal"),
fill="lightsteelblue3", n.display=2, digits.d=3,
subtitle=TRUE, pop=TRUE,
main=NULL, pdf=FALSE, width=5, height=5, \ldots)
}
\arguments{
\item{ns}{Number of samples, that is, repetitions of the experiment.}
\item{n}{Size of each sample.}
\item{p1}{First parameter value for the population distribution, the mean, minimum or meanlog for the normal, uniform and lognormal populations, respectively. Must be 0, the minimum, for the anti-normal distribution.}
\item{p2}{Second parameter value for the population distribution, the standard deviation, maximum or sdlog for the normal, uniform and lognormal populations, respectively. Is the maximum for the anti-normal, usually left at the default value of 1.}
\item{type}{The general population distribution.}
\item{fill}{Fill color of the graphs.}
\item{n.display}{Number of samples for which to display the sample mean and data values.}
\item{digits.d}{Number of decimal digits to display on the output.}
\item{subtitle}{If \code{TRUE}, then display the specific parameter values of the population or sample, depending on the graph.}
\item{pop}{If \code{TRUE}, then display the graph of the population from which the data are sampled.}
\item{main}{Title of graph.}
\item{pdf}{Indicator as to if the graphic files should be saved as pdf files
instead of directed to the standard graphics windows.}
\item{width}{Width of the pdf file in inches.}
\item{height}{Height of the pdf file in inches.}
\item{\dots}{Other parameter values for R function \code{\link{lm}} which provides the core computations.}
}
\details{
Provide a plot of both the specified population and the corresponding distribution of sample means. Include descriptive statistics including the 95\% range of sampling variation in raw units and standard errors for comparison to the normal distribution. Also provide a few samples of the data and corresponding means.
Four different populations are provided: normal, uniform, lognormal for a skewed distribution, and what is called the anti-normal, the combining of two side-by-side triangular distributions so that most of the values are in the extremes and fewer values are close to the middle.
For the lognormal distribution, increase the skew by increasing the value of \code{p2}, which is the population standard deviation.
The anti-normal distribution requires the \code{triangle} package. No population mean and standard deviation are provided for the anti-normal distribution, so the 95\% range of sampling variable of the sample mean in terms of standard errors is not provided. ** Not activated until the triangle package is updated. **
If the two plots, of the population and sample distributions respectively, are written to pdf files, according to \code{pdf=TRUE}, they are named SimPopulation.pdf and SimSample.pdf. Their names and the directory to which they are written are provided as part the console output.
}
\author{David W. Gerbing (Portland State University; \email{gerbing@pdx.edu})}
\examples{
# plot of the standardized normal
# and corresponding sampling distribution with 10000 samples
# each of size 2
simCLT(ns=1000, n=2)
# plot of the uniform dist from 0 to 4
# and corresponding sampling distribution with 10000 samples
# each of size 2
simCLT(ns=1000, n=2, p1=0, p2=4, type="uniform", bin.width=0.01)
# save the population and sample distributions to pdf files
simCLT(100, 10, pdf=TRUE)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ central limit theorem }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.nexus.R
\name{read.nexus.taxablock}
\alias{read.nexus.taxablock}
\title{Read the Taxa block from a Nexus file}
\usage{
read.nexus.taxablock(filename, n)
}
\arguments{
\item{filename}{a character, the name of the file which will be read for the taxa labels.}
\item{n}{the number of taxa}
}
\value{
A character vector of the taxa's labels read from the nexus file.
}
\description{
Read the taxa block from a nexus file, and get the taxa's labels.
}
\seealso{
\code{\link{read.nexus.distanceblock}}
}
| /man/read.nexus.taxablock.Rd | no_license | yukimayuli-gmz/lpnet | R | false | true | 582 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.nexus.R
\name{read.nexus.taxablock}
\alias{read.nexus.taxablock}
\title{Read the Taxa block from a Nexus file}
\usage{
read.nexus.taxablock(filename, n)
}
\arguments{
\item{filename}{a character, the name of the file which will be read for the taxa labels.}
\item{n}{the number of taxa}
}
\value{
A character vector of the taxa's labels read from the nexus file.
}
\description{
Read the taxa block from a nexus file, and get the taxa's labels.
}
\seealso{
\code{\link{read.nexus.distanceblock}}
}
|
testlist <- list(A = structure(c(1.04342609697395e+82, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613113893-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 251 | r | testlist <- list(A = structure(c(1.04342609697395e+82, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.