content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
library(spatstat)
### Name: print.ppm
### Title: Print a Fitted Point Process Model
### Aliases: print.ppm
### Keywords: spatial print models
### ** Examples
## Not run:
##D m <- ppm(cells, ~1, Strauss(0.05))
##D m
##D
## End(Not run)
| /data/genthat_extracted_code/spatstat/examples/print.ppm.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 248 | r | library(spatstat)
### Name: print.ppm
### Title: Print a Fitted Point Process Model
### Aliases: print.ppm
### Keywords: spatial print models
### ** Examples
## Not run:
##D m <- ppm(cells, ~1, Strauss(0.05))
##D m
##D
## End(Not run)
|
# URLs
# Women: https://en.wikipedia.org/wiki/List_of_Olympic_medalists_in_athletics_(women)
# Men: https://en.wikipedia.org/wiki/List_of_Olympic_medalists_in_athletics_(men)
# TO DO:
# - Extract table of results from wikipedia pages (current & discontinued events)
library(httr)
library(XML)
women <- 'https://en.wikipedia.org/wiki/List_of_Olympic_medalists_in_athletics_(women)'
women_html <- read_html(women)
r <- GET(women)
doc <- readHTMLTable(
doc=content(r, "text"))
womens_events <- c(
'100 metres',
'200 metres',
'400 metres',
'800 metres',
'1500 metres',
'5000 metres',
'10,000 metres',
'Marathon',
'100 metres hurdles',
'400 metres hurdles',
'3000 metres steeplechase',
'4 × 100 metres relay',
'4 × 400 metres relay',
'20 kilometres race walk',
'High jump',
'Pole vault',
'Long jump',
'Triple jump',
'Shot put',
'Discus throw',
'Hammer throw',
'Javelin throw',
'Heptathlon',
'3000 metres',
'80 metres hurdles',
'Pentathlon',
'10 kilometers race walk'
)
for(i in 1:27){
print(i)
df <- doc[i]$`NULL`
colnames(df) <- c('games','gold','silver','bronze')
df <- df[-1, ]
df$event <- rep(womens_events[i],nrow(df))
df$gender <- rep('women',nrow(df))
if(i == 1){
womens_results <- df
}
if(i != 1){
womens_results <- rbind(womens_results,df)
}
}
write.csv(womens_results,"data/womens_results.csv", row.names = F) | /get_womens_results.R | permissive | wjsutton/olympic_athletics | R | false | false | 1,411 | r | # URLs
# Women: https://en.wikipedia.org/wiki/List_of_Olympic_medalists_in_athletics_(women)
# Men: https://en.wikipedia.org/wiki/List_of_Olympic_medalists_in_athletics_(men)
# TO DO:
# - Extract table of results from wikipedia pages (current & discontinued events)
library(httr)
library(XML)
women <- 'https://en.wikipedia.org/wiki/List_of_Olympic_medalists_in_athletics_(women)'
women_html <- read_html(women)
r <- GET(women)
doc <- readHTMLTable(
doc=content(r, "text"))
womens_events <- c(
'100 metres',
'200 metres',
'400 metres',
'800 metres',
'1500 metres',
'5000 metres',
'10,000 metres',
'Marathon',
'100 metres hurdles',
'400 metres hurdles',
'3000 metres steeplechase',
'4 × 100 metres relay',
'4 × 400 metres relay',
'20 kilometres race walk',
'High jump',
'Pole vault',
'Long jump',
'Triple jump',
'Shot put',
'Discus throw',
'Hammer throw',
'Javelin throw',
'Heptathlon',
'3000 metres',
'80 metres hurdles',
'Pentathlon',
'10 kilometers race walk'
)
for(i in 1:27){
print(i)
df <- doc[i]$`NULL`
colnames(df) <- c('games','gold','silver','bronze')
df <- df[-1, ]
df$event <- rep(womens_events[i],nrow(df))
df$gender <- rep('women',nrow(df))
if(i == 1){
womens_results <- df
}
if(i != 1){
womens_results <- rbind(womens_results,df)
}
}
write.csv(womens_results,"data/womens_results.csv", row.names = F) |
#' Wrapper
#'
#' Binds output from \code{\link{linear_detrend}} and
#' \code{\link{monthly_bins}}
#'
#' @param ... Arguments passed to \code{\link{linear_detrend}}
#'
#' @return A \code{\link[base]{data.frame}}.
#'
#' @author Diego Barneche and Scott Burgess.
detrend_and_bin <- function(...) {
df <- linear_detrend(...)
cbind(df, monthly_bins(df$resids, df$dates))
}
#' Linear detrending of time series
#'
#' @param time_series A \code{\link[base]{numeric}} vector containing
#' a raw environmental time series.
#'
#' @param dates An object of class \code{\link[base]{Date}} of format YYYY-MM-DD
#' (must be in progressive chronological order).
#'
#' @return A \code{\link[base]{data.frame}} containing a \code{\link[base]{numeric}} vector
#' of the time (in days) for each observation in \code{time_series} starting at day 0,
#' and a \code{\link[base]{numeric}} vector containing the residual variation of \code{time_series}
#' after removing the linear trend.
#'
#' @author Diego Barneche and Scott Burgess.
#'
#' @importFrom stats coef lm
#'
#' @seealso \code{\link{seasonality_and_colour}}.
linear_detrend <- function(time_series, dates) {
predictor <- cumsum(c(0, difftime(dates[2:length(dates)],
dates[1:(length(dates) - 1)],
units = "days")))
lm_mod <- lm(time_series ~ predictor)
predicted_av <- matrix(c(rep(1, length(predictor)), predictor), ncol = 2)
predicted_av <- predicted_av %*% coef(lm_mod)
data.frame(predictor = predictor,
resids = time_series - predicted_av[, 1],
dates = dates,
stringsAsFactors = FALSE)
}
#' Colour of environmental noise
#'
#' Calculate the 'colour' of environmental noise assuming 1/f noise family
#'
#' @param spec_obj A \code{\link[base]{data.frame}} containing a vector
#' of frequencies/periods scanned, and a vector containing the normalised power
#' corresponding to scanned frequencies/periods.
#'
#' @return A \code{\link[base]{list}} containing the beta slope, the original
#' spec_obj \code{\link[base]{data.frame}}, and the fitted linear model.
#'
#' @details Calculates slope of log10(normalised power)~log10(frequencies).
#'
#' @author Diego Barneche and Scott Burgess.
#'
#' @importFrom stats coef lm
#'
#' @seealso \code{\link{seasonality_and_colour}}.
spec_slope_get <- function(spec_obj) {
model <- lm(log10(spec) ~ log10(freq), data = spec_obj)
list(slope = as.numeric(abs(coef(model)[2])),
spec_obj = spec_obj,
model = model)
}
#' Adapt Lomb-Scargle Periodogram
#'
#' Calculates and renames output of Lomb-Scargle Periodogram
#'
#' @param resid_time_series A \code{\link[base]{numeric}} vector containing unpredicted
#' de-trended residuals as obtained by function \code{\link{monthly_bins}}.
#' @param ... Additional arguments to \code{\link[lomb]{lsp}}.
#'
#' @return A \code{\link[base]{data.frame}} containing a vector
#' of frequencies/periods scanned, and a vector containing the normalised power
#' corresponding to scanned frequencies/periods.
#'
#' @author Diego Barneche and Scott Burgess.
#'
#' @seealso \code{\link{spec_slope_get}}, \code{\link{seasonality_and_colour}}.
#'
#' @importFrom lomb lsp
lmb_output_prep <- function(resid_time_series, ...) {
lmb_obj <- lsp(resid_time_series, plot = FALSE, ...)
data.frame(freq = lmb_obj$scanned,
spec = lmb_obj$power,
stringsAsFactors = FALSE)
}
#' Adapt Spectrum Periodogram
#'
#' Calculates and renames output of Spectrum Periodogram
#'
#' @param resid_time_series A \code{\link[base]{numeric}} vector containing unpredicted
#' de-trended residuals as obtained by function \code{\link{monthly_bins}}.
#' @param ... Additional arguments to \code{\link[stats]{spectrum}}.
#'
#' @return A \code{\link[base]{data.frame}} containing a vector
#' of frequencies/periods scanned, and a vector containing the normalised power
#' corresponding to scanned frequencies/periods.
#'
#' @author Diego Barneche and Scott Burgess.
#'
#' @importFrom stats spectrum as.ts
#'
#' @seealso \code{\link{spec_slope_get}}, \code{\link{seasonality_and_colour}}.
spec_output_prep <- function(resid_time_series, ...) {
spec_obj <- spectrum(as.ts(resid_time_series),
plot = FALSE,
...)
data.frame(freq = spec_obj$freq,
spec = spec_obj$spec,
stringsAsFactors = FALSE)
}
#' Seasonality
#'
#' Calculates seasonality based on seasonally-interpolated vector.
#'
#' @param interpolated_seasons A \code{\link[base]{numeric}} vector containing predicted (interpolated)
#' de-trended residuals as obtained by function \code{\link{monthly_bins}}.
#' @param resid_time_series A \code{\link[base]{numeric}} vector containing unpredicted
#' de-trended residuals as obtained by function \code{\link{monthly_bins}}.
#'
#' @return A \code{\link[base]{list}} containing the sample variance of \code{interpolated_seasons},
#' \code{resid_time_series}, and the resulting seasonality. See details.
#'
#' @details Three types of seasonality are returned currently: an 'absolute' seasonality which corresponds to the sample variance of
#' \code{interpolated_seasons}; an unbounded seasonality which corresponds to the
#' ratio between sample variances of \code{interpolated_seasons} and \code{resid_time_series}; and a 'bounded' seasonality which corresponds to the sample variance of \code{interpolated_seasons}
#' relative to the total summed variances of both \code{interpolated_seasons} and \code{resid_time_series}.
#'
#' @author Diego Barneche and Scott Burgess.
#'
#' @importFrom stats var
#'
#' @seealso \code{\link{monthly_bins}}, \code{\link{seasonality_and_colour}}.
seasonality_calc <- function(interpolated_seasons, resid_time_series) {
var_predict <- var(interpolated_seasons, na.rm = TRUE)
var_unpredict <- var(resid_time_series, na.rm = TRUE)
list(predicted_var = var_predict,
unpredicted_var = var_unpredict,
unbounded_seasonality = var_predict / var_unpredict,
bounded_seasonality = var_predict / (var_predict + var_unpredict)
)
}
#' Environmental noise wrapper
#'
#' Adapts periodogram and calculates the 'colour' of the noise of unpredicted de-trended residuals
#'
#' @param resid_time_series A \code{\link[base]{numeric}} vector containing unpredicted
#' de-trended residuals as obtained by function \code{\link{monthly_bins}}.
#' @param predictor A \code{\link[base]{numeric}} vector containing the time (in days)
#' for each observation in \code{resid_time_series}. Needs to start at day 0.
#' @param noise_method A method for estimating the slope beta. Takes 2 possible
#' values: \code{'spectrum'} for evenly distributed time series or
#' \code{'lomb_scargle'} for unevenly distributed ones.
#'
#' @return A \code{\link[base]{list}} containing the beta slope, the original
#' spec_obj \code{\link[base]{data.frame}}, and the fitted linear model.
#'
#' @details This function calculates beta slope (assuming 1/f noise family) on the
#' residual time series using function \code{\link{spec_slope_get}}.
#'
#' @author Diego Barneche and Scott Burgess.
#' @seealso \code{\link{monthly_bins}}, \code{\link{seasonality_and_colour}}, \code{\link{spec_slope_get}}.
noise_calc <- function(resid_time_series, predictor,
noise_method = c("spectrum", "lomb_scargle")) {
switch(match.arg(noise_method),
"spectrum" = {
spec_obj <- spec_output_prep(resid_time_series)
},
"lomb_scargle" = {
spec_obj <- lmb_output_prep(resid_time_series, times = predictor)
}
)
spec_slope_get(spec_obj)
}
#' Compute the seasonal (monthly) components of time series
#'
#' @param resids A \code{\link[base]{numeric}} vector containing the residual
#' variation of a raw time series after removing the linear trend.
#' @param dates An object of class \code{\link[base]{Date}} of format YYYY-MM-DD
#' (must be in progressive chronological order).
#'
#' @return A \code{\link[base]{data.frame}} containing the a \code{\link[base]{numeric}}
#' vector containing predicted (interpolated) de-trended residuals (\code{'interpolated_seasons'}), and
#' a \code{\link[base]{numeric}} vector containing unpredicted de-trended residuals
#' \code{'resid_time_series'}. See details.
#'
#' @details This algorithm follows the steps described in \href{http://onlinelibrary.wiley.com/doi/10.1111/ele.12402/abstract}{Marshall and Burgess 2015} Ecology Letters 18: 1461–0248, doi: \href{http://dx.doi.org/10.1111/ele.12402}{10.1111/ele.12402}.
#'
#' @author Diego Barneche and Scott Burgess.
#'
#' @seealso \code{\link{seasonality_and_colour}}.
#'
#' @importFrom stats aggregate approxfun median
monthly_bins <- function(resids, dates) {
month_av_res_yrs <- aggregate(resids, by = list(format(dates, format = "%B")),
mean, na.rm = TRUE)
daily_dummy_ts <- seq.Date(from = as.Date(format(dates[1],
format = "1 %B %Y"),
format = "%d %B %Y"),
to = last_day(dates[length(dates)]),
by = "day")
dummy_ts_m_y <- format(daily_dummy_ts, format = "%B %Y")
med_month_yrs <- aggregate(daily_dummy_ts, by = list(dummy_ts_m_y), median)
med_month_yrs <- med_month_yrs$x[match(unique(dummy_ts_m_y),
med_month_yrs$Group.1)]
month_data <- month_av_res_yrs[match(format(med_month_yrs, format = "%B"),
month_av_res_yrs[, 1]), ]
# Do the interpolation to get seasonal trend
# (linear, do not allow for NAs at the extremes)
season_interpolate <- approxfun(x = med_month_yrs,
y = month_data[, 2],
method = "linear",
rule = 2)
interpolated_seasons <- season_interpolate(dates)
# Calculate residual time series with seasonal trend removed
resid_time_series <- resids - interpolated_seasons
data.frame(resid_time_series = resid_time_series,
interpolated_seasons = interpolated_seasons,
stringsAsFactors = FALSE)
}
#' Compute last day of the month
#'
#' @param x An object of class \code{\link[base]{Date}} of format YYYY-MM-DD
#' (must be in progressive chronological order).
#' @return An object of class \code{\link[base]{Date}} of format YYYY-MM-DD.
#' @author Diego Barneche and Scott Burgess.
#' @seealso \code{\link{monthly_bins}}.
#' @importFrom lubridate ceiling_date days
last_day <- function(x) {
ceiling_date(x[length(x)], "month") - days(1)
}
#' Checks if argument is an \code{envpreddata} object
#'
#' @param x An R object
#'
#' @seealso \code{\link{envpreddata}}, \code{\link{env_stats}}.
#'
#' @export
is.envpreddata <- function(x) {
inherits(x, "envpreddata")
}
| /R/helpers.R | permissive | dbarneche/envPred | R | false | false | 10,951 | r | #' Wrapper
#'
#' Binds output from \code{\link{linear_detrend}} and
#' \code{\link{monthly_bins}}
#'
#' @param ... Arguments passed to \code{\link{linear_detrend}}
#'
#' @return A \code{\link[base]{data.frame}}.
#'
#' @author Diego Barneche and Scott Burgess.
detrend_and_bin <- function(...) {
df <- linear_detrend(...)
cbind(df, monthly_bins(df$resids, df$dates))
}
#' Linear detrending of time series
#'
#' @param time_series A \code{\link[base]{numeric}} vector containing
#' a raw environmental time series.
#'
#' @param dates An object of class \code{\link[base]{Date}} of format YYYY-MM-DD
#' (must be in progressive chronological order).
#'
#' @return A \code{\link[base]{data.frame}} containing a \code{\link[base]{numeric}} vector
#' of the time (in days) for each observation in \code{time_series} starting at day 0,
#' and a \code{\link[base]{numeric}} vector containing the residual variation of \code{time_series}
#' after removing the linear trend.
#'
#' @author Diego Barneche and Scott Burgess.
#'
#' @importFrom stats coef lm
#'
#' @seealso \code{\link{seasonality_and_colour}}.
linear_detrend <- function(time_series, dates) {
predictor <- cumsum(c(0, difftime(dates[2:length(dates)],
dates[1:(length(dates) - 1)],
units = "days")))
lm_mod <- lm(time_series ~ predictor)
predicted_av <- matrix(c(rep(1, length(predictor)), predictor), ncol = 2)
predicted_av <- predicted_av %*% coef(lm_mod)
data.frame(predictor = predictor,
resids = time_series - predicted_av[, 1],
dates = dates,
stringsAsFactors = FALSE)
}
#' Colour of environmental noise
#'
#' Calculate the 'colour' of environmental noise assuming 1/f noise family
#'
#' @param spec_obj A \code{\link[base]{data.frame}} containing a vector
#' of frequencies/periods scanned, and a vector containing the normalised power
#' corresponding to scanned frequencies/periods.
#'
#' @return A \code{\link[base]{list}} containing the beta slope, the original
#' spec_obj \code{\link[base]{data.frame}}, and the fitted linear model.
#'
#' @details Calculates slope of log10(normalised power)~log10(frequencies).
#'
#' @author Diego Barneche and Scott Burgess.
#'
#' @importFrom stats coef lm
#'
#' @seealso \code{\link{seasonality_and_colour}}.
spec_slope_get <- function(spec_obj) {
model <- lm(log10(spec) ~ log10(freq), data = spec_obj)
list(slope = as.numeric(abs(coef(model)[2])),
spec_obj = spec_obj,
model = model)
}
#' Adapt Lomb-Scargle Periodogram
#'
#' Calculates and renames output of Lomb-Scargle Periodogram
#'
#' @param resid_time_series A \code{\link[base]{numeric}} vector containing unpredicted
#' de-trended residuals as obtained by function \code{\link{monthly_bins}}.
#' @param ... Additional arguments to \code{\link[lomb]{lsp}}.
#'
#' @return A \code{\link[base]{data.frame}} containing a vector
#' of frequencies/periods scanned, and a vector containing the normalised power
#' corresponding to scanned frequencies/periods.
#'
#' @author Diego Barneche and Scott Burgess.
#'
#' @seealso \code{\link{spec_slope_get}}, \code{\link{seasonality_and_colour}}.
#'
#' @importFrom lomb lsp
lmb_output_prep <- function(resid_time_series, ...) {
lmb_obj <- lsp(resid_time_series, plot = FALSE, ...)
data.frame(freq = lmb_obj$scanned,
spec = lmb_obj$power,
stringsAsFactors = FALSE)
}
#' Adapt Spectrum Periodogram
#'
#' Calculates and renames output of Spectrum Periodogram
#'
#' @param resid_time_series A \code{\link[base]{numeric}} vector containing unpredicted
#' de-trended residuals as obtained by function \code{\link{monthly_bins}}.
#' @param ... Additional arguments to \code{\link[stats]{spectrum}}.
#'
#' @return A \code{\link[base]{data.frame}} containing a vector
#' of frequencies/periods scanned, and a vector containing the normalised power
#' corresponding to scanned frequencies/periods.
#'
#' @author Diego Barneche and Scott Burgess.
#'
#' @importFrom stats spectrum as.ts
#'
#' @seealso \code{\link{spec_slope_get}}, \code{\link{seasonality_and_colour}}.
spec_output_prep <- function(resid_time_series, ...) {
spec_obj <- spectrum(as.ts(resid_time_series),
plot = FALSE,
...)
data.frame(freq = spec_obj$freq,
spec = spec_obj$spec,
stringsAsFactors = FALSE)
}
#' Seasonality
#'
#' Calculates seasonality based on seasonally-interpolated vector.
#'
#' @param interpolated_seasons A \code{\link[base]{numeric}} vector containing predicted (interpolated)
#' de-trended residuals as obtained by function \code{\link{monthly_bins}}.
#' @param resid_time_series A \code{\link[base]{numeric}} vector containing unpredicted
#' de-trended residuals as obtained by function \code{\link{monthly_bins}}.
#'
#' @return A \code{\link[base]{list}} containing the sample variance of \code{interpolated_seasons},
#' \code{resid_time_series}, and the resulting seasonality. See details.
#'
#' @details Three types of seasonality are returned currently: an 'absolute' seasonality which corresponds to the sample variance of
#' \code{interpolated_seasons}; an unbounded seasonality which corresponds to the
#' ratio between sample variances of \code{interpolated_seasons} and \code{resid_time_series}; and a 'bounded' seasonality which corresponds to the sample variance of \code{interpolated_seasons}
#' relative to the total summed variances of both \code{interpolated_seasons} and \code{resid_time_series}.
#'
#' @author Diego Barneche and Scott Burgess.
#'
#' @importFrom stats var
#'
#' @seealso \code{\link{monthly_bins}}, \code{\link{seasonality_and_colour}}.
seasonality_calc <- function(interpolated_seasons, resid_time_series) {
var_predict <- var(interpolated_seasons, na.rm = TRUE)
var_unpredict <- var(resid_time_series, na.rm = TRUE)
list(predicted_var = var_predict,
unpredicted_var = var_unpredict,
unbounded_seasonality = var_predict / var_unpredict,
bounded_seasonality = var_predict / (var_predict + var_unpredict)
)
}
#' Environmental noise wrapper
#'
#' Adapts periodogram and calculates the 'colour' of the noise of unpredicted de-trended residuals
#'
#' @param resid_time_series A \code{\link[base]{numeric}} vector containing unpredicted
#' de-trended residuals as obtained by function \code{\link{monthly_bins}}.
#' @param predictor A \code{\link[base]{numeric}} vector containing the time (in days)
#' for each observation in \code{resid_time_series}. Needs to start at day 0.
#' @param noise_method A method for estimating the slope beta. Takes 2 possible
#' values: \code{'spectrum'} for evenly distributed time series or
#' \code{'lomb_scargle'} for unevenly distributed ones.
#'
#' @return A \code{\link[base]{list}} containing the beta slope, the original
#' spec_obj \code{\link[base]{data.frame}}, and the fitted linear model.
#'
#' @details This function calculates beta slope (assuming 1/f noise family) on the
#' residual time series using function \code{\link{spec_slope_get}}.
#'
#' @author Diego Barneche and Scott Burgess.
#' @seealso \code{\link{monthly_bins}}, \code{\link{seasonality_and_colour}}, \code{\link{spec_slope_get}}.
noise_calc <- function(resid_time_series, predictor,
noise_method = c("spectrum", "lomb_scargle")) {
switch(match.arg(noise_method),
"spectrum" = {
spec_obj <- spec_output_prep(resid_time_series)
},
"lomb_scargle" = {
spec_obj <- lmb_output_prep(resid_time_series, times = predictor)
}
)
spec_slope_get(spec_obj)
}
#' Compute the seasonal (monthly) components of time series
#'
#' @param resids A \code{\link[base]{numeric}} vector containing the residual
#' variation of a raw time series after removing the linear trend.
#' @param dates An object of class \code{\link[base]{Date}} of format YYYY-MM-DD
#' (must be in progressive chronological order).
#'
#' @return A \code{\link[base]{data.frame}} containing the a \code{\link[base]{numeric}}
#' vector containing predicted (interpolated) de-trended residuals (\code{'interpolated_seasons'}), and
#' a \code{\link[base]{numeric}} vector containing unpredicted de-trended residuals
#' \code{'resid_time_series'}. See details.
#'
#' @details This algorithm follows the steps described in \href{http://onlinelibrary.wiley.com/doi/10.1111/ele.12402/abstract}{Marshall and Burgess 2015} Ecology Letters 18: 1461–0248, doi: \href{http://dx.doi.org/10.1111/ele.12402}{10.1111/ele.12402}.
#'
#' @author Diego Barneche and Scott Burgess.
#'
#' @seealso \code{\link{seasonality_and_colour}}.
#'
#' @importFrom stats aggregate approxfun median
monthly_bins <- function(resids, dates) {
month_av_res_yrs <- aggregate(resids, by = list(format(dates, format = "%B")),
mean, na.rm = TRUE)
daily_dummy_ts <- seq.Date(from = as.Date(format(dates[1],
format = "1 %B %Y"),
format = "%d %B %Y"),
to = last_day(dates[length(dates)]),
by = "day")
dummy_ts_m_y <- format(daily_dummy_ts, format = "%B %Y")
med_month_yrs <- aggregate(daily_dummy_ts, by = list(dummy_ts_m_y), median)
med_month_yrs <- med_month_yrs$x[match(unique(dummy_ts_m_y),
med_month_yrs$Group.1)]
month_data <- month_av_res_yrs[match(format(med_month_yrs, format = "%B"),
month_av_res_yrs[, 1]), ]
# Do the interpolation to get seasonal trend
# (linear, do not allow for NAs at the extremes)
season_interpolate <- approxfun(x = med_month_yrs,
y = month_data[, 2],
method = "linear",
rule = 2)
interpolated_seasons <- season_interpolate(dates)
# Calculate residual time series with seasonal trend removed
resid_time_series <- resids - interpolated_seasons
data.frame(resid_time_series = resid_time_series,
interpolated_seasons = interpolated_seasons,
stringsAsFactors = FALSE)
}
#' Compute last day of the month
#'
#' @param x An object of class \code{\link[base]{Date}} of format YYYY-MM-DD
#' (must be in progressive chronological order).
#' @return An object of class \code{\link[base]{Date}} of format YYYY-MM-DD.
#' @author Diego Barneche and Scott Burgess.
#' @seealso \code{\link{monthly_bins}}.
#' @importFrom lubridate ceiling_date days
last_day <- function(x) {
ceiling_date(x[length(x)], "month") - days(1)
}
#' Checks if argument is an \code{envpreddata} object
#'
#' @param x An R object
#'
#' @seealso \code{\link{envpreddata}}, \code{\link{env_stats}}.
#'
#' @export
is.envpreddata <- function(x) {
inherits(x, "envpreddata")
}
|
library(tidycensus)
library(acs)
library(dplyr)
library(sf)
library(ggplot2)
library(ggthemes)
library(viridis)
census_api_key("548d39e0315b591a0e9f5a8d9d6c1f22ea8fafe0") # Teja's key
#
# Select variables -------------------------------------------------------------------------------------------------------------
#
# Digital connectivity
#a) percent households with no internet access or relying solely on cellular data to access the internet (Int1):
#b) percent households with no computing devices or relying solely on mobile devices to access the internet (Int2);
#c) percent population with access to no providers or access to up to 10/1 advertised speeds only (Int3).
# a)
# B28002 PRESENCE AND TYPES OF INTERNET SUBSCRIPTIONS IN HOUSEHOLD
# _001 (total), _003 (only dial up), _006 (cellular data plan with no other type of internet subscription), _010 (only satellite), _013 (no internet access)
# B28002_001
# B28002_003
# B28002_006
# B28002_010
# B28002_013
# b)
# B28007 LABOR FORCE STATUS BY PRESENCE OF A COMPUTER AND TYPES OF INTERNET SUBSCRIPTION IN HOUSEHOLD
# _002 (total in the civilian labor force: employed and unemployed), _008 (no computer employed) + _014 (no computer unemployed)
# B28007_002
# B28007_008
# B28007_014
# Work
#a) Industry: industries that overall engage less in working from home.
# occupations: share of workers employed in service, natural, construction, maintenance, production, transportation, material moving,
# and military specific occupations (Occ)
# b)industry: share of workers employed in construction, manufacturing, wholesale, retail, transportation and warehousing, utilities,
# and government, including armed forces (Ind).
# b) B08124 MEANS OF TRANSPORTATION TO WORK BY OCCUPATION
# _001 (total) - _043 (already work at home),
# _010, _012, _013, _014, _017, _019, _020, _021, _024, _026, _027, _028, _031, _033, _034, _035, _038, _040, _041, _042
# B08124_001
# B08124_043
# B08124_010
# B08124_012
# B08124_013
# B08124_014
# B08124_017
# B08124_019
# B08124_020
# B08124_021
# B08124_024
# B08124_026
# B08124_027
# B08124_028
# B08124_031
# B08124_033
# B08124_034
# B08124_035
# B08124_038
# B08124_040
# B08124_041
# B08124_042
# a) B08126 MEANS OF TRANSPORTATION TO WORK BY INDUSTRY
# _001 (total) - _091 (already work at home)
# B08126_001
# B08126_091
# B08126_018
# B08126_019
# B08126_020
# B08126_021
# B08126_022
# B08126_029
# B08126_030
# B08126_033
# B08126_034
# B08126_035
# B08126_036
# B08126_037
# B08126_044
# B08126_045
# B08126_048
# B08126_049
# B08126_050
# B08126_051
# B08126_052
# B08126_059
# B08126_060
# B08126_063
# B08126_064
# B08126_065
# B08126_066
# B08126_067
# B08126_074
# B08126_075
# B08126_078
# B08126_079
# B08126_080
# B08126_081
# B08126_082
# B08126_089
# B08126_090
# Select variables
vars <- c(
# presence and types of internet
"B28002_001", "B28002_003", "B28002_006", "B28002_010", "B28002_013",
# presence of computer
"B28007_002", "B28007_008", "B28007_014",
# occupations
"B08124_001", "B08124_043", "B08124_010", "B08124_012", "B08124_013", "B08124_014",
"B08124_017", "B08124_019", "B08124_020", "B08124_021", "B08124_024", "B08124_026",
"B08124_027", "B08124_028", "B08124_031", "B08124_033", "B08124_034", "B08124_035",
"B08124_038", "B08124_040", "B08124_041", "B08124_042",
# industries
"B08126_001", "B08126_091", "B08126_018", "B08126_019", "B08126_020", "B08126_021",
"B08126_022", "B08126_029", "B08126_030", "B08126_033", "B08126_034", "B08126_035", "B08126_036", "B08126_037",
"B08126_044", "B08126_045", "B08126_048", "B08126_049", "B08126_050", "B08126_051",
"B08126_052", "B08126_059", "B08126_060", "B08126_063", "B08126_064", "B08126_065",
"B08126_066", "B08126_067", "B08126_074", "B08126_075", "B08126_078", "B08126_079",
"B08126_080", "B08126_081", "B08126_082", "B08126_089", "B08126_090"
)
#
# Get variables for counties --------------------------------------------------------------------------------------
#
acsdata <- get_acs(geography = "tract", county = 059, state = 51, variables = vars, year = 2018, survey = "acs5",
cache_table = TRUE, output = "wide", geometry = TRUE, keep_geo_vars = TRUE)
#
# Calculate --------------------------------------------------------------------------------------
#
data <- acsdata %>% mutate(
STATEFP = STATEFP,
COUNTYFP = COUNTYFP,
AFFGEOID = AFFGEOID,
GEOID = GEOID,
LSAD = LSAD,
NAME.x = NAME.x,
ALAND = ALAND,
AWATER = AWATER,
geometry = geometry,
# internet access (only dial up/only cellular/only satellite/none : total)
internet = (B28002_003E + B28002_006E + B28002_010E + B28002_013E) / B28002_001E,
# presence of computer for those in labor force (no computer employed + no computer unemployed : total in labor force)
computer = (B28007_008E + B28007_014E) / B28007_002E,
# occupations (not-wfh-friendly occupations / total occupations that don't already WFH)
occup = (B08124_010E + B08124_012E + B08124_013E + B08124_014E + B08124_017E +
B08124_019E + B08124_020E + B08124_021E + B08124_024E + B08124_026E +
B08124_027E + B08124_028E + B08124_031E + B08124_033E + B08124_034E +
B08124_035E + B08124_038E + B08124_040E + B08124_041E + B08124_042E) / (B08124_001E - B08124_043E),
# industries (not-wfh-friendly industries / total in industries that don't already WFH)
industr = (B08126_018E + B08126_019E + B08126_020E + B08126_021E + B08126_022E + B08126_029E +
B08126_030E + B08126_033E + B08126_034E + B08126_035E + B08126_036E + B08126_037E +
B08126_044E + B08126_045E + B08126_048E + B08126_049E + B08126_050E + B08126_051E +
B08126_052E + B08126_059E + B08126_060E + B08126_063E + B08126_064E + B08126_065E +
B08126_066E + B08126_067E + B08126_074E + B08126_075E + B08126_078E + B08126_079E +
B08126_080E + B08126_081E + B08126_082E + B08126_089E + B08126_090E) / (B08126_001E - B08126_091E)
)
#
# Quintiles --------------------------------------------------------------------------------------
#
# Histograms
hist(data$internet)
hist(data$computer)
hist(data$occup)
hist(data$industr)
# Missingness
any(is.na(data))
test <- data %>% filter(is.na(computer) | is.na(internet) | is.na(occup) | is.na(industr))
# Looks like empty tracts. 3 tracts don't have data.
data <- data %>% filter(!is.na(computer), !is.na(internet), !is.na(occup), !is.na(industr))
# Find quintiles
data <- data %>% mutate(internetQuint = ntile(internet, 5),
computerQuint = ntile(computer, 5),
occupQuint = ntile(occup, 5),
industrQuint = ntile(industr, 5))
# Get quantile cutoffs
qint <- quantile(data$internet, prob = seq(0, 1, 0.2))
qcomp <- quantile(data$computer, prob = seq(0, 1, 0.2))
qoccup <- quantile(data$occup, prob = seq(0, 1, 0.2))
qind <- quantile(data$industr, prob = seq(0, 1, 0.2))
quintcuts <- bind_rows(qint, qcomp, qoccup, qind)
quintcuts$id <- c("Internet", "Computer", "Occupation", "Industry")
quintcuts
#
# Create variables --------------------------------------------------------------------------------------
#
# Counties were divided into five equal groups or quintiles
# Very High vulnerability:placed in 4/5th quintiles in 4 of the 4 variables
# High vulnerability: placed in 4/5th quintiles in 3 of the 4 variables
# Moderate vulnerability: placed in the 4/5th quintiles in 2 of the 4 variables
# Low vulnerability: placed in 4/5th quintiles in 1 of the 4 variables
# No vulnerability: placed in 4/5th quintiles in 0 of the 4 variables
# Did they place in 4 or 5th quintile?
data <- data %>% mutate(internetTop = ifelse(internetQuint >= 4, 1, 0),
computerTop = ifelse(computerQuint >= 4, 1, 0),
occupTop = ifelse(occupQuint >= 4, 1, 0),
industrTop = ifelse(industrQuint >= 4, 1, 0),
scoreTop = internetTop + computerTop + occupTop + industrTop)
# Vulnerability
data <- data %>% mutate(vulnerability = case_when(
internetTop + computerTop + occupTop + industrTop == 4 ~ "Very High",
internetTop + computerTop + occupTop + industrTop == 3 ~ "High",
internetTop + computerTop + occupTop + industrTop == 2 ~ "Medium",
internetTop + computerTop + occupTop + industrTop == 1 ~ "Low",
internetTop + computerTop + occupTop + industrTop == 0 ~ "None"))
data$vulnerability <- factor(data$vulnerability, levels = c("None", "Low", "Medium", "High", "Very High"), ordered = TRUE)
#
# Map --------------------------------------------------------------------------------------
#
ggplot() +
geom_sf(data = acsdata, size = 0.2, fill = "#F0F0F0") +
geom_sf(data = data, size = 0.2, aes(fill = vulnerability)) +
labs(title = "Fairfax County Tract-Level\nTelework Vulnerability",
caption = "Data source: American Community Survey 2014-18 (5-year) estimates.\n
\n
Vulnerability calculated using information on % households with no broadband\n
internet subscription, % persons in labor force with no computer available,\n
% persons employed in telework unfriendly occupations not working remotely,\n
and % persons employed in telework unfriendly industries not working remotely.\n
\n
Tracts are considered very high vulnerability if they placed in 4th or 5th quintile\n
on all 4 indicators considered, high if on 3 indicators, medium if on 2, low if on\n
1, and no vulnerability if they did not place in the 4th or 5th quintile on any\n
indicators.") +
theme_map() +
theme(plot.title = element_text(size = 16, face = "bold", hjust = 0.5),
plot.caption = element_text(size = 8, lineheight = 0.5),
legend.title = element_text(size = 9, face = "bold"),
legend.text = element_text(size = 8),
legend.position = "left",
plot.caption.position = "plot",
plot.title.position = "plot") +
scale_fill_viridis(name = "Vulnerability", guide = "legend", discrete = T)
| /src/remote/01_remote.R | permissive | uva-bi-sdad/fairfax | R | false | false | 10,207 | r | library(tidycensus)
library(acs)
library(dplyr)
library(sf)
library(ggplot2)
library(ggthemes)
library(viridis)
census_api_key("548d39e0315b591a0e9f5a8d9d6c1f22ea8fafe0") # Teja's key
#
# Select variables -------------------------------------------------------------------------------------------------------------
#
# Digital connectivity
#a) percent households with no internet access or relying solely on cellular data to access the internet (Int1):
#b) percent households with no computing devices or relying solely on mobile devices to access the internet (Int2);
#c) percent population with access to no providers or access to up to 10/1 advertised speeds only (Int3).
# a)
# B28002 PRESENCE AND TYPES OF INTERNET SUBSCRIPTIONS IN HOUSEHOLD
# _001 (total), _003 (only dial up), _006 (cellular data plan with no other type of internet subscription), _010 (only satellite), _013 (no internet access)
# B28002_001
# B28002_003
# B28002_006
# B28002_010
# B28002_013
# b)
# B28007 LABOR FORCE STATUS BY PRESENCE OF A COMPUTER AND TYPES OF INTERNET SUBSCRIPTION IN HOUSEHOLD
# _002 (total in the civilian labor force: employed and unemployed), _008 (no computer employed) + _014 (no computer unemployed)
# B28007_002
# B28007_008
# B28007_014
# Work
#a) Industry: industries that overall engage less in working from home.
# occupations: share of workers employed in service, natural, construction, maintenance, production, transportation, material moving,
# and military specific occupations (Occ)
# b)industry: share of workers employed in construction, manufacturing, wholesale, retail, transportation and warehousing, utilities,
# and government, including armed forces (Ind).
# b) B08124 MEANS OF TRANSPORTATION TO WORK BY OCCUPATION
# _001 (total) - _043 (already work at home),
# _010, _012, _013, _014, _017, _019, _020, _021, _024, _026, _027, _028, _031, _033, _034, _035, _038, _040, _041, _042
# B08124_001
# B08124_043
# B08124_010
# B08124_012
# B08124_013
# B08124_014
# B08124_017
# B08124_019
# B08124_020
# B08124_021
# B08124_024
# B08124_026
# B08124_027
# B08124_028
# B08124_031
# B08124_033
# B08124_034
# B08124_035
# B08124_038
# B08124_040
# B08124_041
# B08124_042
# a) B08126 MEANS OF TRANSPORTATION TO WORK BY INDUSTRY
# _001 (total) - _091 (already work at home)
# B08126_001
# B08126_091
# B08126_018
# B08126_019
# B08126_020
# B08126_021
# B08126_022
# B08126_029
# B08126_030
# B08126_033
# B08126_034
# B08126_035
# B08126_036
# B08126_037
# B08126_044
# B08126_045
# B08126_048
# B08126_049
# B08126_050
# B08126_051
# B08126_052
# B08126_059
# B08126_060
# B08126_063
# B08126_064
# B08126_065
# B08126_066
# B08126_067
# B08126_074
# B08126_075
# B08126_078
# B08126_079
# B08126_080
# B08126_081
# B08126_082
# B08126_089
# B08126_090
# Select variables
vars <- c(
# presence and types of internet
"B28002_001", "B28002_003", "B28002_006", "B28002_010", "B28002_013",
# presence of computer
"B28007_002", "B28007_008", "B28007_014",
# occupations
"B08124_001", "B08124_043", "B08124_010", "B08124_012", "B08124_013", "B08124_014",
"B08124_017", "B08124_019", "B08124_020", "B08124_021", "B08124_024", "B08124_026",
"B08124_027", "B08124_028", "B08124_031", "B08124_033", "B08124_034", "B08124_035",
"B08124_038", "B08124_040", "B08124_041", "B08124_042",
# industries
"B08126_001", "B08126_091", "B08126_018", "B08126_019", "B08126_020", "B08126_021",
"B08126_022", "B08126_029", "B08126_030", "B08126_033", "B08126_034", "B08126_035", "B08126_036", "B08126_037",
"B08126_044", "B08126_045", "B08126_048", "B08126_049", "B08126_050", "B08126_051",
"B08126_052", "B08126_059", "B08126_060", "B08126_063", "B08126_064", "B08126_065",
"B08126_066", "B08126_067", "B08126_074", "B08126_075", "B08126_078", "B08126_079",
"B08126_080", "B08126_081", "B08126_082", "B08126_089", "B08126_090"
)
#
# Get variables for counties --------------------------------------------------------------------------------------
#
acsdata <- get_acs(geography = "tract", county = 059, state = 51, variables = vars, year = 2018, survey = "acs5",
cache_table = TRUE, output = "wide", geometry = TRUE, keep_geo_vars = TRUE)
#
# Calculate --------------------------------------------------------------------------------------
#
data <- acsdata %>% mutate(
STATEFP = STATEFP,
COUNTYFP = COUNTYFP,
AFFGEOID = AFFGEOID,
GEOID = GEOID,
LSAD = LSAD,
NAME.x = NAME.x,
ALAND = ALAND,
AWATER = AWATER,
geometry = geometry,
# internet access (only dial up/only cellular/only satellite/none : total)
internet = (B28002_003E + B28002_006E + B28002_010E + B28002_013E) / B28002_001E,
# presence of computer for those in labor force (no computer employed + no computer unemployed : total in labor force)
computer = (B28007_008E + B28007_014E) / B28007_002E,
# occupations (not-wfh-friendly occupations / total occupations that don't already WFH)
occup = (B08124_010E + B08124_012E + B08124_013E + B08124_014E + B08124_017E +
B08124_019E + B08124_020E + B08124_021E + B08124_024E + B08124_026E +
B08124_027E + B08124_028E + B08124_031E + B08124_033E + B08124_034E +
B08124_035E + B08124_038E + B08124_040E + B08124_041E + B08124_042E) / (B08124_001E - B08124_043E),
# industries (not-wfh-friendly industries / total in industries that don't already WFH)
industr = (B08126_018E + B08126_019E + B08126_020E + B08126_021E + B08126_022E + B08126_029E +
B08126_030E + B08126_033E + B08126_034E + B08126_035E + B08126_036E + B08126_037E +
B08126_044E + B08126_045E + B08126_048E + B08126_049E + B08126_050E + B08126_051E +
B08126_052E + B08126_059E + B08126_060E + B08126_063E + B08126_064E + B08126_065E +
B08126_066E + B08126_067E + B08126_074E + B08126_075E + B08126_078E + B08126_079E +
B08126_080E + B08126_081E + B08126_082E + B08126_089E + B08126_090E) / (B08126_001E - B08126_091E)
)
#
# Quintiles --------------------------------------------------------------------------------------
#
# Histograms
hist(data$internet)
hist(data$computer)
hist(data$occup)
hist(data$industr)
# Missingness
any(is.na(data))
test <- data %>% filter(is.na(computer) | is.na(internet) | is.na(occup) | is.na(industr))
# Looks like empty tracts. 3 tracts don't have data.
data <- data %>% filter(!is.na(computer), !is.na(internet), !is.na(occup), !is.na(industr))
# Find quintiles
data <- data %>% mutate(internetQuint = ntile(internet, 5),
computerQuint = ntile(computer, 5),
occupQuint = ntile(occup, 5),
industrQuint = ntile(industr, 5))
# Get quantile cutoffs
qint <- quantile(data$internet, prob = seq(0, 1, 0.2))
qcomp <- quantile(data$computer, prob = seq(0, 1, 0.2))
qoccup <- quantile(data$occup, prob = seq(0, 1, 0.2))
qind <- quantile(data$industr, prob = seq(0, 1, 0.2))
quintcuts <- bind_rows(qint, qcomp, qoccup, qind)
quintcuts$id <- c("Internet", "Computer", "Occupation", "Industry")
quintcuts
#
# Create variables --------------------------------------------------------------------------------------
#
# Counties were divided into five equal groups or quintiles
# Very High vulnerability:placed in 4/5th quintiles in 4 of the 4 variables
# High vulnerability: placed in 4/5th quintiles in 3 of the 4 variables
# Moderate vulnerability: placed in the 4/5th quintiles in 2 of the 4 variables
# Low vulnerability: placed in 4/5th quintiles in 1 of the 4 variables
# No vulnerability: placed in 4/5th quintiles in 0 of the 4 variables
# Did they place in 4 or 5th quintile?
data <- data %>% mutate(internetTop = ifelse(internetQuint >= 4, 1, 0),
computerTop = ifelse(computerQuint >= 4, 1, 0),
occupTop = ifelse(occupQuint >= 4, 1, 0),
industrTop = ifelse(industrQuint >= 4, 1, 0),
scoreTop = internetTop + computerTop + occupTop + industrTop)
# Vulnerability
data <- data %>% mutate(vulnerability = case_when(
internetTop + computerTop + occupTop + industrTop == 4 ~ "Very High",
internetTop + computerTop + occupTop + industrTop == 3 ~ "High",
internetTop + computerTop + occupTop + industrTop == 2 ~ "Medium",
internetTop + computerTop + occupTop + industrTop == 1 ~ "Low",
internetTop + computerTop + occupTop + industrTop == 0 ~ "None"))
data$vulnerability <- factor(data$vulnerability, levels = c("None", "Low", "Medium", "High", "Very High"), ordered = TRUE)
#
# Map --------------------------------------------------------------------------------------
#
ggplot() +
geom_sf(data = acsdata, size = 0.2, fill = "#F0F0F0") +
geom_sf(data = data, size = 0.2, aes(fill = vulnerability)) +
labs(title = "Fairfax County Tract-Level\nTelework Vulnerability",
caption = "Data source: American Community Survey 2014-18 (5-year) estimates.\n
\n
Vulnerability calculated using information on % households with no broadband\n
internet subscription, % persons in labor force with no computer available,\n
% persons employed in telework unfriendly occupations not working remotely,\n
and % persons employed in telework unfriendly industries not working remotely.\n
\n
Tracts are considered very high vulnerability if they placed in 4th or 5th quintile\n
on all 4 indicators considered, high if on 3 indicators, medium if on 2, low if on\n
1, and no vulnerability if they did not place in the 4th or 5th quintile on any\n
indicators.") +
theme_map() +
theme(plot.title = element_text(size = 16, face = "bold", hjust = 0.5),
plot.caption = element_text(size = 8, lineheight = 0.5),
legend.title = element_text(size = 9, face = "bold"),
legend.text = element_text(size = 8),
legend.position = "left",
plot.caption.position = "plot",
plot.title.position = "plot") +
scale_fill_viridis(name = "Vulnerability", guide = "legend", discrete = T)
|
##Airtime Functions##
remove(list = ls())
options(stringsAsFactors = FALSE)
options(scipen = 999)
source("/Users/harrocyranka/Desktop/code/script_to_load_packages.R")
library(jsonlite)
library(gtools)
create_scrape_file <- function(infile, define_wd, outfile){
x <- read.csv(infile, header = TRUE)
filter <- grep("taken-at", x$Link)
x <- as.data.frame(x[filter,1])
colnames(x) <- "link"
file_name <- paste(define_wd,outfile,".xlsx",sep = "")
write.xlsx(x,file_name, row.names = FALSE, append = FALSE)
return(x)
}
school_top_posters <- function(folder_file,define_wd,outfile){
setwd(folder_file)
my_files <- mixedsort(list.files())
list_of_frames <- list()
vector_of_likes <- NULL
vector_of_comments <- NULL
vector_of_dates <- NULL
vector_of_usernames <- NULL
vector_of_names <- NULL
y <- fromJSON(my_files[1])
for(i in 1:length(my_files)){
y <- fromJSON(my_files[i])
k <- y$entry_data$PostPage$media
list_of_frames[[i]] <- k
vector_of_usernames <- append(vector_of_usernames,list_of_frames[[i]]$owner$username)
vector_of_names <- append(vector_of_names,list_of_frames[[i]]$owner$full_name)
vector_of_likes <- append(vector_of_likes,list_of_frames[[i]]$likes$count)
vector_of_comments <- append(vector_of_comments,list_of_frames[[i]]$comments$count)
fix_date <- as.character(as.POSIXct(list_of_frames[[i]]$date, origin="1970-01-01"))
vector_of_dates <- append(vector_of_dates, fix_date)
}
data_frame <- as.data.frame(cbind(vector_of_names,vector_of_usernames,vector_of_dates, vector_of_likes, vector_of_comments))
data_frame$vector_of_likes <- as.numeric(as.character(data_frame$vector_of_likes))
data_frame$vector_of_comments <- as.numeric(as.character(data_frame$vector_of_comments))
#View(data_frame)
data_frame$vector_of_names <- my_coalesce(data_frame$vector_of_names, "Not Displayed")
summed_df <- sqldf("SELECT vector_of_usernames,vector_of_names,SUM(vector_of_likes) as 'total_likes',
SUM(vector_of_comments) as 'total_comments'
FROM data_frame
GROUP BY 1
ORDER BY 3 DESC")
file_name_1 <- paste(define_wd,outfile,"_top_posters",".xlsx",sep = "")
file_name_2 <- paste(define_wd,outfile,"_all_posts",".xlsx",sep = "")
write.xlsx(summed_df, file_name_1, row.names = FALSE)
write.xlsx(data_frame,file_name_2, row.names = FALSE)
setwd("..//")
} | /create_files_from_klipper.R | no_license | ngoyle1/DKC-Analytics-Help-Code | R | false | false | 2,589 | r | ##Airtime Functions##
remove(list = ls())
options(stringsAsFactors = FALSE)
options(scipen = 999)
source("/Users/harrocyranka/Desktop/code/script_to_load_packages.R")
library(jsonlite)
library(gtools)
create_scrape_file <- function(infile, define_wd, outfile){
x <- read.csv(infile, header = TRUE)
filter <- grep("taken-at", x$Link)
x <- as.data.frame(x[filter,1])
colnames(x) <- "link"
file_name <- paste(define_wd,outfile,".xlsx",sep = "")
write.xlsx(x,file_name, row.names = FALSE, append = FALSE)
return(x)
}
school_top_posters <- function(folder_file,define_wd,outfile){
setwd(folder_file)
my_files <- mixedsort(list.files())
list_of_frames <- list()
vector_of_likes <- NULL
vector_of_comments <- NULL
vector_of_dates <- NULL
vector_of_usernames <- NULL
vector_of_names <- NULL
y <- fromJSON(my_files[1])
for(i in 1:length(my_files)){
y <- fromJSON(my_files[i])
k <- y$entry_data$PostPage$media
list_of_frames[[i]] <- k
vector_of_usernames <- append(vector_of_usernames,list_of_frames[[i]]$owner$username)
vector_of_names <- append(vector_of_names,list_of_frames[[i]]$owner$full_name)
vector_of_likes <- append(vector_of_likes,list_of_frames[[i]]$likes$count)
vector_of_comments <- append(vector_of_comments,list_of_frames[[i]]$comments$count)
fix_date <- as.character(as.POSIXct(list_of_frames[[i]]$date, origin="1970-01-01"))
vector_of_dates <- append(vector_of_dates, fix_date)
}
data_frame <- as.data.frame(cbind(vector_of_names,vector_of_usernames,vector_of_dates, vector_of_likes, vector_of_comments))
data_frame$vector_of_likes <- as.numeric(as.character(data_frame$vector_of_likes))
data_frame$vector_of_comments <- as.numeric(as.character(data_frame$vector_of_comments))
#View(data_frame)
data_frame$vector_of_names <- my_coalesce(data_frame$vector_of_names, "Not Displayed")
summed_df <- sqldf("SELECT vector_of_usernames,vector_of_names,SUM(vector_of_likes) as 'total_likes',
SUM(vector_of_comments) as 'total_comments'
FROM data_frame
GROUP BY 1
ORDER BY 3 DESC")
file_name_1 <- paste(define_wd,outfile,"_top_posters",".xlsx",sep = "")
file_name_2 <- paste(define_wd,outfile,"_all_posts",".xlsx",sep = "")
write.xlsx(summed_df, file_name_1, row.names = FALSE)
write.xlsx(data_frame,file_name_2, row.names = FALSE)
setwd("..//")
} |
#ui.R
library(shiny)
require(shinydashboard)
require(leaflet)
# Define UI for application that plots random distributions
dashboardPage(
# Application title
dashboardHeader(title = "Portuguese Bank Marketing Data", titleWidth = 350),
dashboardSidebar(width = 350,
sidebarMenu(
menuItem("ScatterPlot", tabName = "scatterplot", icon = icon("line-chart")),
menuItem("Barchart", tabName = "barchart", icon = icon("bar-chart")),
menuItem("CrossTab", tabName = "crosstab", icon = icon("table"))
)
),
dashboardBody(
tabItems(
#first tab content
tabItem(tabName = "scatterplot",
h4("Scatterplot Input: "),
selectInput("OutcomeSelectionFilter",
"Filter Outcome:",
choices = list("Lower Tail" = 1, "Upper Tail" = 2, "All" = 3), selected = 3),
sliderInput("LowerTail",
"Lower_Tail_Percent:",
min = 2,
max = 40,
value = 20),
sliderInput("UpperTail",
"Upper_Tail_Percent:",
min = 60,
max = 99,
value = 80),
h4("Scatterplot: "),
plotOutput("scatterPlot")
),
tabItem(tabName = "barchart",
h4("Barchart Input:"),
radioButtons("BarchartSorting",
"Sort By:",
choices = list("Average Salary" = 1, "Number of Campaigns" = 2), selected = 1),
h4("Barchart: "),
plotOutput("barPlot")
),
tabItem(tabName = "crosstab",
h4("Crosstab Inputs: "),
sliderInput("KPI1",
"KPI_Low_Max_value:",
min = 0,
max = .13,
value = .1),
sliderInput("KPI2",
"KPI_Medium_Max_value:",
min = .13,
max = .18,
value = .15),
h4("Crosstab: "),
plotOutput("crosstabPlot")
)
)
)
)
| /04 Shiny/ui.R | no_license | rmelendez94/DV_FinalProject | R | false | false | 2,291 | r | #ui.R
library(shiny)
require(shinydashboard)
require(leaflet)
# Define UI for application that plots random distributions
dashboardPage(
# Application title
dashboardHeader(title = "Portuguese Bank Marketing Data", titleWidth = 350),
dashboardSidebar(width = 350,
sidebarMenu(
menuItem("ScatterPlot", tabName = "scatterplot", icon = icon("line-chart")),
menuItem("Barchart", tabName = "barchart", icon = icon("bar-chart")),
menuItem("CrossTab", tabName = "crosstab", icon = icon("table"))
)
),
dashboardBody(
tabItems(
#first tab content
tabItem(tabName = "scatterplot",
h4("Scatterplot Input: "),
selectInput("OutcomeSelectionFilter",
"Filter Outcome:",
choices = list("Lower Tail" = 1, "Upper Tail" = 2, "All" = 3), selected = 3),
sliderInput("LowerTail",
"Lower_Tail_Percent:",
min = 2,
max = 40,
value = 20),
sliderInput("UpperTail",
"Upper_Tail_Percent:",
min = 60,
max = 99,
value = 80),
h4("Scatterplot: "),
plotOutput("scatterPlot")
),
tabItem(tabName = "barchart",
h4("Barchart Input:"),
radioButtons("BarchartSorting",
"Sort By:",
choices = list("Average Salary" = 1, "Number of Campaigns" = 2), selected = 1),
h4("Barchart: "),
plotOutput("barPlot")
),
tabItem(tabName = "crosstab",
h4("Crosstab Inputs: "),
sliderInput("KPI1",
"KPI_Low_Max_value:",
min = 0,
max = .13,
value = .1),
sliderInput("KPI2",
"KPI_Medium_Max_value:",
min = .13,
max = .18,
value = .15),
h4("Crosstab: "),
plotOutput("crosstabPlot")
)
)
)
)
|
#' Buttons including Action Buttons and Event Buttons
#'
#' Creates an action button whose value is initially zero, and increments by one
#' each time it is pressed.
#'
#' @param inputId Specifies the input slot that will be used to access the
#' value.
#' @param label The contents of the button--usually a text label, but you could
#' also use any other HTML, like an image.
#' @param styleclass The Bootstrap styling class of the button--options are
#' primary, info, success, warning, danger, inverse, link or blank
#' @param size The size of the button--options are large, small, mini
#' @param block Whehter the button should fill the block
#' @param icon Display an icon for the button
#' @param icon.library Specify an icon set to use
#' \url{http://www.fontawesome.io/icons} or
#' \url{http://getbootstrap.com/2.3.2/base-css.html#icons}
#' @param css.class Any additional CSS class one wishes to add to the action
#' button
#' @param ... Other argument to feed into shiny::actionButton
#'
#' @family ShinySky elements
#'
#'
#' @export
actionButton2 <- function(inputId, label, styleclass = "", size = "",
block = F, icon = NULL, icon.library = c("bootstrap", "font awesome"), css.class = "", ...) {
if (styleclass %in% c("primary", "info", "success", "warning",
"danger", "inverse", "link")) {
btn.css.class <- paste("btn", styleclass, sep = "-")
} else btn.css.class = ""
if (size %in% c("large", "small", "mini")) {
btn.size.class <- paste("btn", size, sep = "-")
} else btn.size.class = ""
if (block) {
btn.block = "btn-block"
} else btn.block = ""
if (!is.null(icon)) {
if( !(icon.library[1] %in% c("font awesome", "bootstrap")) ){
icon.set = "bootstrap"
}else{
icon.set = icon.library[1]
}
set = switch(icon.set,
"font awesome" = "fa fa-",
"bootstrap" = "icon-")
icon.code <- HTML(paste0("<i class='", set, icon, "'></i>"))
} else icon.code = ""
tags$button(id = inputId, type = "button", class = paste("btn action-button",
btn.css.class, btn.size.class, btn.block, css.class, collapse = " "),
icon.code, label, ...)
}
| /R/action-button.R | permissive | ablejec/ShinySky | R | false | false | 2,278 | r | #' Buttons including Action Buttons and Event Buttons
#'
#' Creates an action button whose value is initially zero, and increments by one
#' each time it is pressed.
#'
#' @param inputId Specifies the input slot that will be used to access the
#' value.
#' @param label The contents of the button--usually a text label, but you could
#' also use any other HTML, like an image.
#' @param styleclass The Bootstrap styling class of the button--options are
#' primary, info, success, warning, danger, inverse, link or blank
#' @param size The size of the button--options are large, small, mini
#' @param block Whehter the button should fill the block
#' @param icon Display an icon for the button
#' @param icon.library Specify an icon set to use
#' \url{http://www.fontawesome.io/icons} or
#' \url{http://getbootstrap.com/2.3.2/base-css.html#icons}
#' @param css.class Any additional CSS class one wishes to add to the action
#' button
#' @param ... Other argument to feed into shiny::actionButton
#'
#' @family ShinySky elements
#'
#'
#' @export
actionButton2 <- function(inputId, label, styleclass = "", size = "",
block = F, icon = NULL, icon.library = c("bootstrap", "font awesome"), css.class = "", ...) {
if (styleclass %in% c("primary", "info", "success", "warning",
"danger", "inverse", "link")) {
btn.css.class <- paste("btn", styleclass, sep = "-")
} else btn.css.class = ""
if (size %in% c("large", "small", "mini")) {
btn.size.class <- paste("btn", size, sep = "-")
} else btn.size.class = ""
if (block) {
btn.block = "btn-block"
} else btn.block = ""
if (!is.null(icon)) {
if( !(icon.library[1] %in% c("font awesome", "bootstrap")) ){
icon.set = "bootstrap"
}else{
icon.set = icon.library[1]
}
set = switch(icon.set,
"font awesome" = "fa fa-",
"bootstrap" = "icon-")
icon.code <- HTML(paste0("<i class='", set, icon, "'></i>"))
} else icon.code = ""
tags$button(id = inputId, type = "button", class = paste("btn action-button",
btn.css.class, btn.size.class, btn.block, css.class, collapse = " "),
icon.code, label, ...)
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
| /cachematrix.R | no_license | nallamala/ProgrammingAssignment2 | R | false | false | 741 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/miRNAGenes.updated.R
\name{miRNAGenes.updated}
\alias{miRNAGenes.updated}
\title{Get microRNA-target Interactions}
\usage{
miRNAGenes.updated(
DE.miRNA,
DE.target = NULL,
path = "/bicoh/MARGenomics/annotationData/miRNAIntegration/"
)
}
\arguments{
\item{DE.miRNA}{Character string or character vector of miRNA(s)}
\item{DE.target}{Character string or character vector of mRNA(s)}
\item{path}{Character string with the path to where the database files are stored}
}
\value{
Returns a list with four elements: 1) list of miRNAs each conataining
a vector with all identified targets in DE.target; 2) dataframe with results from
miRDB, 3) mirTarBase and4) TargetScan
}
\description{
Retrieve predicted and validated miRNA-target interactions using TargetScan,
miRDB and mirTarBase
}
\author{
Julia Perera Bel <jperera@imim.es>
}
| /man/miRNAGenes.updated.Rd | no_license | margenomics/microTargets | R | false | true | 915 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/miRNAGenes.updated.R
\name{miRNAGenes.updated}
\alias{miRNAGenes.updated}
\title{Get microRNA-target Interactions}
\usage{
miRNAGenes.updated(
DE.miRNA,
DE.target = NULL,
path = "/bicoh/MARGenomics/annotationData/miRNAIntegration/"
)
}
\arguments{
\item{DE.miRNA}{Character string or character vector of miRNA(s)}
\item{DE.target}{Character string or character vector of mRNA(s)}
\item{path}{Character string with the path to where the database files are stored}
}
\value{
Returns a list with four elements: 1) list of miRNAs each conataining
a vector with all identified targets in DE.target; 2) dataframe with results from
miRDB, 3) mirTarBase and4) TargetScan
}
\description{
Retrieve predicted and validated miRNA-target interactions using TargetScan,
miRDB and mirTarBase
}
\author{
Julia Perera Bel <jperera@imim.es>
}
|
#' Read entry in file of report format
#'
#' This function reads one entry of a reporting file (a file in the model
#' intercomparison file format *.mif) into a MAgPIE object. This function can
#' be used by read_all() to read in the data for more than one output folder
#'
#'
#' @param outputdir output folder which contains the reporting
#' @param entry entry of the reporting that you want to read in
#' @param type type of the reporting that you want to read in
#' @author Lavinia Baumstark, David Klein
#' @examples
#'
#' \dontrun{
#' read.reportEntry("output/SSP2-ref",entry="Emi|Kyoto Gases (Mt CO2eq/yr)")
#' read_all(outputdirs,read.reportEntry,entry="Emi|Kyoto Gases (Mt CO2eq/yr)",as.list=FALSE)
#' }
#' @export
#' @importFrom lucode2 getScenNames
#' @importFrom magclass read.report collapseNames
read.reportEntry <- function(outputdir,entry,type=NULL){
fname <- file.path(outputdir,paste0("REMIND_generic_",getScenNames(outputdir)))
if(is.null(type)){
if (file.exists(paste0(fname,".mif"))) {
mif_path <- paste0(fname,".mif")
} else {
mif_path <- paste0(fname,".csv")
}
}else{
mif_path <- paste0(fname,type)
}
mif <- read.report(mif_path,as.list=FALSE)
mif <- collapseNames(mif)
mif <- mif[,,entry]
return(mif)
}
| /R/read.reportEntry.R | no_license | pik-piam/remind | R | false | false | 1,291 | r | #' Read entry in file of report format
#'
#' This function reads one entry of a reporting file (a file in the model
#' intercomparison file format *.mif) into a MAgPIE object. This function can
#' be used by read_all() to read in the data for more than one output folder
#'
#'
#' @param outputdir output folder which contains the reporting
#' @param entry entry of the reporting that you want to read in
#' @param type type of the reporting that you want to read in
#' @author Lavinia Baumstark, David Klein
#' @examples
#'
#' \dontrun{
#' read.reportEntry("output/SSP2-ref",entry="Emi|Kyoto Gases (Mt CO2eq/yr)")
#' read_all(outputdirs,read.reportEntry,entry="Emi|Kyoto Gases (Mt CO2eq/yr)",as.list=FALSE)
#' }
#' @export
#' @importFrom lucode2 getScenNames
#' @importFrom magclass read.report collapseNames
read.reportEntry <- function(outputdir,entry,type=NULL){
fname <- file.path(outputdir,paste0("REMIND_generic_",getScenNames(outputdir)))
if(is.null(type)){
if (file.exists(paste0(fname,".mif"))) {
mif_path <- paste0(fname,".mif")
} else {
mif_path <- paste0(fname,".csv")
}
}else{
mif_path <- paste0(fname,type)
}
mif <- read.report(mif_path,as.list=FALSE)
mif <- collapseNames(mif)
mif <- mif[,,entry]
return(mif)
}
|
#' The application server-side
#'
#' @param input,output,session Internal parameters for {shiny}.
#' DO NOT REMOVE.
#' @import shiny
#' @noRd
eng_app_server <- function(input, output, session) {
# List the first level callModules here
## Header info
mod_img_header_server("logo_testa", "Covid19.png")
mod_img_header_server("logo_coda_torino", "Torino.png")
mod_img_header_server("logo_coda_novara", "Novara.png")
mod_img_header_server("logo_coda", "Covid19.png")
eng_mod_info_sidebar_server("summary_today")
## Impact
eng_mod_ind_ita_server("20200315")
## plottply help
eng_mod_help_plot_server("help")
## National
eng_mod_ts_ita_server("ts_nat_cum", "cum")
eng_mod_ts_ita_server("ts_nat_inc", "inc")
## Regional
eng_mod_ts_reg_server("ts_reg_cum_mes", "cum", color_var = "measure")
eng_mod_ts_reg_server("ts_reg_inc_mes", "inc", color_var = "measure")
eng_mod_ts_reg_server("ts_reg_cum_reg", "cum", color_var = "region")
eng_mod_ts_reg_server("ts_reg_inc_reg", "inc", color_var = "region")
## Provincial
eng_mod_ts_prv_server("ts_prv_cum", "cum")
eng_mod_ts_prv_server("ts_prv_inc", "inc")
## In evidenza
eng_mod_0314_server("dapb")
eng_mod_0318_friuli_server("20200318_fvg")
eng_mod_0318_piemonte_server("20200318_piemonte")
eng_mod_0318_intensive_server("21")
eng_mod_0320_novara_server("da_novara")
eng_mod_0320_novara_server("da_vercelli",
loc = "Vercelli",
pop = 174904
)
eng_mod_0320_novara_server("da_alessandria",
loc = "Alessandria",
pop = 428826
)
eng_mod_0323_picco_server("picco")
eng_mod_0325_hosp_server("hosp")
eng_mod_0328_hosp_server("tot")
eng_mod_0331_server("ven_pie")
eng_mod_0404_magnani_server("mortality")
eng_mod_0406_mort_ve_server("mort_veneto")
eng_mod_0415_tamponi_server("tamp_hosp")
## Geo-spatial
eng_mod_maps_server("geo_1")
}
| /R/eng-app_server.R | permissive | UBESP-DCTV/covid19ita | R | false | false | 1,873 | r | #' The application server-side
#'
#' @param input,output,session Internal parameters for {shiny}.
#' DO NOT REMOVE.
#' @import shiny
#' @noRd
eng_app_server <- function(input, output, session) {
# List the first level callModules here
## Header info
mod_img_header_server("logo_testa", "Covid19.png")
mod_img_header_server("logo_coda_torino", "Torino.png")
mod_img_header_server("logo_coda_novara", "Novara.png")
mod_img_header_server("logo_coda", "Covid19.png")
eng_mod_info_sidebar_server("summary_today")
## Impact
eng_mod_ind_ita_server("20200315")
## plottply help
eng_mod_help_plot_server("help")
## National
eng_mod_ts_ita_server("ts_nat_cum", "cum")
eng_mod_ts_ita_server("ts_nat_inc", "inc")
## Regional
eng_mod_ts_reg_server("ts_reg_cum_mes", "cum", color_var = "measure")
eng_mod_ts_reg_server("ts_reg_inc_mes", "inc", color_var = "measure")
eng_mod_ts_reg_server("ts_reg_cum_reg", "cum", color_var = "region")
eng_mod_ts_reg_server("ts_reg_inc_reg", "inc", color_var = "region")
## Provincial
eng_mod_ts_prv_server("ts_prv_cum", "cum")
eng_mod_ts_prv_server("ts_prv_inc", "inc")
## In evidenza
eng_mod_0314_server("dapb")
eng_mod_0318_friuli_server("20200318_fvg")
eng_mod_0318_piemonte_server("20200318_piemonte")
eng_mod_0318_intensive_server("21")
eng_mod_0320_novara_server("da_novara")
eng_mod_0320_novara_server("da_vercelli",
loc = "Vercelli",
pop = 174904
)
eng_mod_0320_novara_server("da_alessandria",
loc = "Alessandria",
pop = 428826
)
eng_mod_0323_picco_server("picco")
eng_mod_0325_hosp_server("hosp")
eng_mod_0328_hosp_server("tot")
eng_mod_0331_server("ven_pie")
eng_mod_0404_magnani_server("mortality")
eng_mod_0406_mort_ve_server("mort_veneto")
eng_mod_0415_tamponi_server("tamp_hosp")
## Geo-spatial
eng_mod_maps_server("geo_1")
}
|
##run_analysis.R
## - activity_labels contains an index and name for each activity type
## - features contains a description of each measurement
## - subject_test and subject_tran identify which of the 30 subjects participated in each observation
## - y_test and y_train contain the activity index for each observation
## - X_test and X_train contain all measurements associated with each observation
## - in this script, dataframes with lower_case_underscores represent data read directly from data inputs
## - in this script, dataframes with camelCase represent data created by the script
## Read the basic files provided in the data set. Pay attention to the directory structure.
activity_labels <- read.table("activity_labels.txt", sep=" ")
features <- read.table("features.txt", sep=" ")
## Observations during testing phase
subject_test <- read.table("test/subject_test.txt")
X_test <- read.table("test/X_test.txt")
y_test <- read.table("test/y_test.txt")
## Observations during training phase
subject_train <- read.table("train/subject_train.txt")
X_train <- read.table("train/X_train.txt")
y_train <- read.table("train/y_train.txt")
## With all files read, assign names to each column according to descriptions in features file
names(X_test) <- features[,2]
names(X_train) <- features[,2]
## Find all columns containing the text "mean" or "std"
## We will use the relevant_col vector to select only those columns measuring mean and standard deviation
meanCol <- grep("mean", features[,2])
stdCol <- grep("std", features[,2])
matchingCol <- append (meanCol, stdCol)
## There are numerous calls that are mean Frequency, and should not be in the tidy data set
removeCol <- grep("meanFreq", features[,2])
## Using setdiff is acceptable here since removeCol is clearly a subset of matchingCol
relevantCol <- setdiff(matchingCol, removeCol)
relevantCol <- sort(relevantCol)
## Now, subset both X_train and X_test to contain only mean and std columns
X_test <- X_test[,relevantCol]
X_train <- X_train[,relevantCol]
## Create a vector with the Activity Description for each observation
yTestActivity <- activity_labels[y_test[, 1],2]
## Add this column to the measurements data frame and give it a column name
X_test <- cbind(yTestActivity, X_test)
names(X_test)[1] <- "Activity"
## Add the Activity Descriptions for the training set too
yTrainActivity <- activity_labels[y_train[, 1],2]
X_train <- cbind(yTrainActivity, X_train)
names(X_train)[1] <- "Activity"
## Add a column to X_test identifying the type of observation, because we will soon merge with the
## training file and lose this information
X_test <- cbind(ObservationType="Test", X_test)
## Add the subject identifer column to X_test
X_test <- cbind(subject_test, X_test)
names(X_test)[1] <- "Subject"
## Similar operations to those above, but for the X_train data set
X_train <- cbind(ObservationType="Train", X_train)
X_train <- cbind(subject_train, X_train)
names(X_train)[1] <- "Subject"
## OK, now we've got all the data and column headings in each data set
## Time to merge them into one data set using rbind
X_obs <- rbind(X_test, X_train)
library(dplyr)
## We will use dplyr to do some grouping, summarizing and tidying up
X_copy <- X_obs
## group the copy data set by Subject and Activity
X_copy <- group_by(X_copy, Subject, Activity)
## get the mean for each feature across all observations, summarized by Subject and Activity
X_summary <- summarise_each(X_copy, "mean")
## To tidy up a bit, remove the ObservationType column, since the mean is pointless here
X_summary <- select(X_summary, -(ObservationType))
## cleanup
rm(activity_labels)
rm(features)
rm(subject_test)
rm(subject_train)
rm(y_test)
rm(y_train)
rm(X_train)
rm(X_test)
rm(X_copy)
rm(matchingCol)
rm(meanCol)
rm(relevantCol)
rm(removeCol)
rm(stdCol)
rm(yTestActivity)
rm(yTrainActivity)
| /run_analysis.R | no_license | heatwolej/TidyDataProject | R | false | false | 3,846 | r | ##run_analysis.R
## - activity_labels contains an index and name for each activity type
## - features contains a description of each measurement
## - subject_test and subject_tran identify which of the 30 subjects participated in each observation
## - y_test and y_train contain the activity index for each observation
## - X_test and X_train contain all measurements associated with each observation
## - in this script, dataframes with lower_case_underscores represent data read directly from data inputs
## - in this script, dataframes with camelCase represent data created by the script
## Read the basic files provided in the data set. Pay attention to the directory structure.
activity_labels <- read.table("activity_labels.txt", sep=" ")
features <- read.table("features.txt", sep=" ")
## Observations during testing phase
subject_test <- read.table("test/subject_test.txt")
X_test <- read.table("test/X_test.txt")
y_test <- read.table("test/y_test.txt")
## Observations during training phase
subject_train <- read.table("train/subject_train.txt")
X_train <- read.table("train/X_train.txt")
y_train <- read.table("train/y_train.txt")
## With all files read, assign names to each column according to descriptions in features file
names(X_test) <- features[,2]
names(X_train) <- features[,2]
## Find all columns containing the text "mean" or "std"
## We will use the relevant_col vector to select only those columns measuring mean and standard deviation
meanCol <- grep("mean", features[,2])
stdCol <- grep("std", features[,2])
matchingCol <- append (meanCol, stdCol)
## There are numerous calls that are mean Frequency, and should not be in the tidy data set
removeCol <- grep("meanFreq", features[,2])
## Using setdiff is acceptable here since removeCol is clearly a subset of matchingCol
relevantCol <- setdiff(matchingCol, removeCol)
relevantCol <- sort(relevantCol)
## Now, subset both X_train and X_test to contain only mean and std columns
X_test <- X_test[,relevantCol]
X_train <- X_train[,relevantCol]
## Create a vector with the Activity Description for each observation
yTestActivity <- activity_labels[y_test[, 1],2]
## Add this column to the measurements data frame and give it a column name
X_test <- cbind(yTestActivity, X_test)
names(X_test)[1] <- "Activity"
## Add the Activity Descriptions for the training set too
yTrainActivity <- activity_labels[y_train[, 1],2]
X_train <- cbind(yTrainActivity, X_train)
names(X_train)[1] <- "Activity"
## Add a column to X_test identifying the type of observation, because we will soon merge with the
## training file and lose this information
X_test <- cbind(ObservationType="Test", X_test)
## Add the subject identifer column to X_test
X_test <- cbind(subject_test, X_test)
names(X_test)[1] <- "Subject"
## Similar operations to those above, but for the X_train data set
X_train <- cbind(ObservationType="Train", X_train)
X_train <- cbind(subject_train, X_train)
names(X_train)[1] <- "Subject"
## OK, now we've got all the data and column headings in each data set
## Time to merge them into one data set using rbind
X_obs <- rbind(X_test, X_train)
library(dplyr)
## We will use dplyr to do some grouping, summarizing and tidying up
X_copy <- X_obs
## group the copy data set by Subject and Activity
X_copy <- group_by(X_copy, Subject, Activity)
## get the mean for each feature across all observations, summarized by Subject and Activity
X_summary <- summarise_each(X_copy, "mean")
## To tidy up a bit, remove the ObservationType column, since the mean is pointless here
X_summary <- select(X_summary, -(ObservationType))
## cleanup
rm(activity_labels)
rm(features)
rm(subject_test)
rm(subject_train)
rm(y_test)
rm(y_train)
rm(X_train)
rm(X_test)
rm(X_copy)
rm(matchingCol)
rm(meanCol)
rm(relevantCol)
rm(removeCol)
rm(stdCol)
rm(yTestActivity)
rm(yTrainActivity)
|
library(data.table)
runoff_stations <- readRDS('./data/runoff_stations_raw.rds')
raw_path <- './data/raw/runoff_day/'
fnames <- list.files(raw_path)
n_station <- length(fnames)
id_length <- 7
runoff_day_raw <- data.table()
id_sname <- runoff_stations[, .(id, sname)]
for(file_count in 1:n_station){
temp_dt <- fread(paste0(raw_path, fnames[file_count]))
station_id <- substr(fnames[file_count], 1, id_length)
temp_dt <- cbind(id = factor(station_id), temp_dt)
temp_dt <- id_sname[temp_dt, on = 'id', ]
runoff_day_raw <- rbind(runoff_day_raw, temp_dt)
}
runoff_day_raw[, 'hh:mm' := NULL]
colnames(runoff_day_raw)[3:4] <- c('date', 'value')
runoff_day_raw[, date := as.Date(date)]
saveRDS(runoff_day_raw, './data/runoff_day_raw.rds')
| /code/01b_import_runoff_day.R | no_license | richardrex/eda_rhine | R | false | false | 747 | r | library(data.table)
runoff_stations <- readRDS('./data/runoff_stations_raw.rds')
raw_path <- './data/raw/runoff_day/'
fnames <- list.files(raw_path)
n_station <- length(fnames)
id_length <- 7
runoff_day_raw <- data.table()
id_sname <- runoff_stations[, .(id, sname)]
for(file_count in 1:n_station){
temp_dt <- fread(paste0(raw_path, fnames[file_count]))
station_id <- substr(fnames[file_count], 1, id_length)
temp_dt <- cbind(id = factor(station_id), temp_dt)
temp_dt <- id_sname[temp_dt, on = 'id', ]
runoff_day_raw <- rbind(runoff_day_raw, temp_dt)
}
runoff_day_raw[, 'hh:mm' := NULL]
colnames(runoff_day_raw)[3:4] <- c('date', 'value')
runoff_day_raw[, date := as.Date(date)]
saveRDS(runoff_day_raw, './data/runoff_day_raw.rds')
|
#' This function calculates the relative weight of each sector for each day
#' and display the result in a dataframe
#' @export
rel_weight <- function(category){
summary <- data.frame();
if (category == "minvol"){
data(minvol)
dates <- unique(minvol$Date)
temp <- minvol
} else {
data(usa)
dates <- unique(usa$Date)
temp <- usa
}
for (date in dates){
# Extract entries with the matched date
msci_by_date <- minvol[which(temp$Date == date),]
# Calculate the relative weight of each sector for the given date
sector.freq <- table(msci_by_date$Sector)
sector.relfreq <- sector.freq/nrow(msci_by_date)
sector.weight <- sector.relfreq[which(sector.relfreq > 0)]
local_summary<- merge(date, sector.weight)
summary <- rbind(summary, local_summary)
}
colnames(summary) <- c("Date", "Sector", "Weight" )
summary$Date <- as.Date(summary$Date, origin = "1970-01-01")
summary$Sector <- as.character(summary$Sector)
summary <- mutate(summary, Sector = ifelse(Sector == "Telecommunications", "Telecommunication Services", Sector))
if (category == "minvol"){
minvol_weight <- summary
save(minvol_weight, file = "C:/Users/Nam Nguyen/Documents/MSCI-master/msci/data/minvol_weight.RData")
} else {
usa_weight <- summary
save(usa_weight, file = "C:/Users/Nam Nguyen/Documents/MSCI-master/msci/data/usa_weight.RData")
}
}
| /R/rel_weight.R | no_license | nnguyen2496/mscir | R | false | false | 1,447 | r | #' This function calculates the relative weight of each sector for each day
#' and display the result in a dataframe
#' @export
rel_weight <- function(category){
summary <- data.frame();
if (category == "minvol"){
data(minvol)
dates <- unique(minvol$Date)
temp <- minvol
} else {
data(usa)
dates <- unique(usa$Date)
temp <- usa
}
for (date in dates){
# Extract entries with the matched date
msci_by_date <- minvol[which(temp$Date == date),]
# Calculate the relative weight of each sector for the given date
sector.freq <- table(msci_by_date$Sector)
sector.relfreq <- sector.freq/nrow(msci_by_date)
sector.weight <- sector.relfreq[which(sector.relfreq > 0)]
local_summary<- merge(date, sector.weight)
summary <- rbind(summary, local_summary)
}
colnames(summary) <- c("Date", "Sector", "Weight" )
summary$Date <- as.Date(summary$Date, origin = "1970-01-01")
summary$Sector <- as.character(summary$Sector)
summary <- mutate(summary, Sector = ifelse(Sector == "Telecommunications", "Telecommunication Services", Sector))
if (category == "minvol"){
minvol_weight <- summary
save(minvol_weight, file = "C:/Users/Nam Nguyen/Documents/MSCI-master/msci/data/minvol_weight.RData")
} else {
usa_weight <- summary
save(usa_weight, file = "C:/Users/Nam Nguyen/Documents/MSCI-master/msci/data/usa_weight.RData")
}
}
|
### PSAT
## 클린업 1주차 패키지
# 2019310260 임주은
# 기본세팅
install.packages('plyr')
install.packages('tidyverse')
install.packages('data.table')
library(plyr)
library(tidyverse)
library(data.table)
getwd()
setwd("C:/Users/Jooeun/Desktop/1주차패키지")
data <- fread('data.csv', stringsAsFactors=FALSE, data.table=FALSE)
## Chapter 1. 전처리
#1
str(data)
data %>% is.na %>% colSums
nrow(unique(data))
#2-1
data <- data %>% filter(!is.na(confirmed_date))
#2.2
data <- data %>%
filter(patient_id != "") %>%
filter(sex != "") %>%
filter(age != "") %>%
filter(country != "") %>%
filter(province != "") %>%
filter(city != "") %>%
filter(confirmed_date != "") %>%
filter(state != "")
colSums(is.na(data))
nrow(unique(data))
#3
data <- data[data$country == 'Korea',]
data <- subset(data, select= -c(country))
#4
data$province <- data$province %>%
revalue(c("서울"="서울특별시",
"부산"="부산광역시",
"대구"="대구광역시",
"인천"="인천광역시",
"대전"="대전광역시",
"세종"="세종특별자치시",
"울산"="울산광역시",
"제주도"="제주특별자치도"))
data %>% head
#5
data$confirmed_date <- data$confirmed_date %>% as.Date
str(data$confirmed_date)
#6
A <- data %>% group_by(confirmed_date) %>% summarize(n=n())
colnames(A) = c('confirmed_date', 'confirmed_number')
data <- left_join(data, A, by='confirmed_date')
head(data)
#7
data$wday <- format(data$confirmed_date, format='%a')
data$wday <- factor(data$wday, levels=c('일','월','화','수','목','금','토'))
data$wday <- ifelse(data$wday %in% c('일','토', '주말','주중'))
head(data)
#8
tapply(data$confirmed_number, data$age, summary)
## Chapter 2. 시각화
#1-1
data %>% ggplot(aes(x=confirmed_date, y=confirmed_number))+geom_line(colors='lightblue') + ggtitle('코로나 확진자수 추이 \n - 국내인 기준') + theme(plot.title=element_text(hjust=0.5))
#1-2
data %>% group_by(province %>% summarize(n=n()))
#2
B = data %>% group_by(province) %>% summarize(n=n())
data %>% left_join(B, by='province') %>% ggplot(aes(x=reorder(province, n)))
+ geom_bar(aes(fill=state, color=state), alpha = 0.2, position='stack')+labs(x='지역',y='확진자수', fill='state') + coord_flip()
#3-1
data %>%
group_by(age, confirmed_date) %>%
summarise(
count=n()
) %>%
ggplot(aes(age, count)) +
geom_boxplot(aes(x=age, y=count, fill=age, colour=age),
alpha=0.4,
outlier.shape = NA) +
stat_boxplot(geom='errorbar', aes(color = age)) +
theme_classic() +
labs(y="일단위 확진자수")
#3-2
data %>%
group_by(age, confirmed_date) %>%
summarise(
count=n()
) %>%
aov(count ~ age, data=.) %>%
summary
#Chapter 3.모델링_회귀분석
#0
library(MASS)
library(corrplot)
library(caret)
library(MLmetrics)
#1
Boston <- as.data.frame(MASS::Boston)
B_cor <- cor(Boston)
round(B_cor, 2)
corrplot(B_cor, method='number',type='upper')
#2
Boston %>%
gather(-medv, key="variable", value = "val") %>%
ggplot(aes(x=val, y=medv)) +
geom_point(aes(x=val, y=medv)) +
geom_smooth(method = lm, color ='lightblue') +
facet_wrap(~variable, nrow=4, scales = "free") +
labs(title="Scatter plot of dependent variables vs Median Value (medv)")
#3-1
set.seed(1234)
train_index <- createDataPartition(Boston$medv, p=0.7, list=FALSE)
train_data <- Boston[train_index,]
test_data <- Boston[-train_index,]
#3-2
Fit <- train_data %>%
train(medv ~ ., data=., method="lm")
summary(Fit)
medv_pred <- predict(Fit, newdata = test_data)
medv_true = test_data$medv
RMSE(medv_pred, medv_true)
#3-3
print('RMSE , 즉 평균 제곱근 오차 (Root Mean Square Error)는 추정 값
또는 모델이 예측한 값과 실제 환경에서 관찰되는 값의 차이를 다룰 때
흔히 사용되는 측도이다. 이는 MSE에 루트를 씌운 값이므로 가설을
세운 뒤에 평균 제곱근 오차를 판단하여 조금씩 변화를 주고,
이 변화가 긍정적이면 오차를 최소로 만들 수 있도록 과정을 반복한다.
그러므로, 모델의 RMSE를 낮추려면 예측한 값과 실제 값의 차이를
낮출 수 있도록 평균 제곱근 오차를 최소화 만들면 된다.')
#4
result <- summary(Fit)$coefficients %>% as.data.frame
col <- ifelse(result$Estimate >5, 'red', ifelse(result$Estimate < -2, 'blue', 'yellow'))
result %>%
ggplot(aes(x=Estimate, y=reorder(rownames(.), Estimate))) +
geom_col(fill = col, color=col, alpha=0.3) +
geom_text(aes(label=round(Estimate,2)), position = position_stack(0.5)) +
theme_classic() +
theme(legend.position = "none") +
labs(x="value", y="intercept and variables") | /클린업 1주차 패키지/클린업 1주차_임주은 답.R | no_license | Junelim829/Packages | R | false | false | 4,926 | r | ### PSAT
## 클린업 1주차 패키지
# 2019310260 임주은
# 기본세팅
install.packages('plyr')
install.packages('tidyverse')
install.packages('data.table')
library(plyr)
library(tidyverse)
library(data.table)
getwd()
setwd("C:/Users/Jooeun/Desktop/1주차패키지")
data <- fread('data.csv', stringsAsFactors=FALSE, data.table=FALSE)
## Chapter 1. 전처리
#1
str(data)
data %>% is.na %>% colSums
nrow(unique(data))
#2-1
data <- data %>% filter(!is.na(confirmed_date))
#2.2
data <- data %>%
filter(patient_id != "") %>%
filter(sex != "") %>%
filter(age != "") %>%
filter(country != "") %>%
filter(province != "") %>%
filter(city != "") %>%
filter(confirmed_date != "") %>%
filter(state != "")
colSums(is.na(data))
nrow(unique(data))
#3
data <- data[data$country == 'Korea',]
data <- subset(data, select= -c(country))
#4
data$province <- data$province %>%
revalue(c("서울"="서울특별시",
"부산"="부산광역시",
"대구"="대구광역시",
"인천"="인천광역시",
"대전"="대전광역시",
"세종"="세종특별자치시",
"울산"="울산광역시",
"제주도"="제주특별자치도"))
data %>% head
#5
data$confirmed_date <- data$confirmed_date %>% as.Date
str(data$confirmed_date)
#6
A <- data %>% group_by(confirmed_date) %>% summarize(n=n())
colnames(A) = c('confirmed_date', 'confirmed_number')
data <- left_join(data, A, by='confirmed_date')
head(data)
#7
data$wday <- format(data$confirmed_date, format='%a')
data$wday <- factor(data$wday, levels=c('일','월','화','수','목','금','토'))
data$wday <- ifelse(data$wday %in% c('일','토', '주말','주중'))
head(data)
#8
tapply(data$confirmed_number, data$age, summary)
## Chapter 2. 시각화
#1-1
data %>% ggplot(aes(x=confirmed_date, y=confirmed_number))+geom_line(colors='lightblue') + ggtitle('코로나 확진자수 추이 \n - 국내인 기준') + theme(plot.title=element_text(hjust=0.5))
#1-2
data %>% group_by(province %>% summarize(n=n()))
#2
B = data %>% group_by(province) %>% summarize(n=n())
data %>% left_join(B, by='province') %>% ggplot(aes(x=reorder(province, n)))
+ geom_bar(aes(fill=state, color=state), alpha = 0.2, position='stack')+labs(x='지역',y='확진자수', fill='state') + coord_flip()
#3-1
data %>%
group_by(age, confirmed_date) %>%
summarise(
count=n()
) %>%
ggplot(aes(age, count)) +
geom_boxplot(aes(x=age, y=count, fill=age, colour=age),
alpha=0.4,
outlier.shape = NA) +
stat_boxplot(geom='errorbar', aes(color = age)) +
theme_classic() +
labs(y="일단위 확진자수")
#3-2
data %>%
group_by(age, confirmed_date) %>%
summarise(
count=n()
) %>%
aov(count ~ age, data=.) %>%
summary
#Chapter 3.모델링_회귀분석
#0
library(MASS)
library(corrplot)
library(caret)
library(MLmetrics)
#1
Boston <- as.data.frame(MASS::Boston)
B_cor <- cor(Boston)
round(B_cor, 2)
corrplot(B_cor, method='number',type='upper')
#2
Boston %>%
gather(-medv, key="variable", value = "val") %>%
ggplot(aes(x=val, y=medv)) +
geom_point(aes(x=val, y=medv)) +
geom_smooth(method = lm, color ='lightblue') +
facet_wrap(~variable, nrow=4, scales = "free") +
labs(title="Scatter plot of dependent variables vs Median Value (medv)")
#3-1
set.seed(1234)
train_index <- createDataPartition(Boston$medv, p=0.7, list=FALSE)
train_data <- Boston[train_index,]
test_data <- Boston[-train_index,]
#3-2
Fit <- train_data %>%
train(medv ~ ., data=., method="lm")
summary(Fit)
medv_pred <- predict(Fit, newdata = test_data)
medv_true = test_data$medv
RMSE(medv_pred, medv_true)
#3-3
print('RMSE , 즉 평균 제곱근 오차 (Root Mean Square Error)는 추정 값
또는 모델이 예측한 값과 실제 환경에서 관찰되는 값의 차이를 다룰 때
흔히 사용되는 측도이다. 이는 MSE에 루트를 씌운 값이므로 가설을
세운 뒤에 평균 제곱근 오차를 판단하여 조금씩 변화를 주고,
이 변화가 긍정적이면 오차를 최소로 만들 수 있도록 과정을 반복한다.
그러므로, 모델의 RMSE를 낮추려면 예측한 값과 실제 값의 차이를
낮출 수 있도록 평균 제곱근 오차를 최소화 만들면 된다.')
#4
result <- summary(Fit)$coefficients %>% as.data.frame
col <- ifelse(result$Estimate >5, 'red', ifelse(result$Estimate < -2, 'blue', 'yellow'))
result %>%
ggplot(aes(x=Estimate, y=reorder(rownames(.), Estimate))) +
geom_col(fill = col, color=col, alpha=0.3) +
geom_text(aes(label=round(Estimate,2)), position = position_stack(0.5)) +
theme_classic() +
theme(legend.position = "none") +
labs(x="value", y="intercept and variables") |
# Install EpiModel Stack
install.packages("EpiModel")
# Install Extra Helper Packages
install.packages(c("remotes", "sessioninfo"))
# Latest Dev Versions of Packages
remotes::install_github("statnet/network", ref = "deff2a0")
remotes::install_github("statnet/networkDynamic", ref = "14182bf")
remotes::install_github("statnet/statnet.common", ref = "3307a8c")
remotes::install_github("statnet/ergm", ref = "7c81899")
remotes::install_github("statnet/tergm", ref = "d3af135")
remotes::install_github("statnet/EpiModel", ref = "ea6aa16")
remotes::install_github("statnet/EpiModelHPC", ref = "a53760d")
remotes::install_github("statnet/tergmLite", ref = "73d2a2d")
remotes::install_github("EpiModel/EpiModelHIV-p", ref = "injectable-prep")
remotes::install_github("EpiModel/EpiModelHIV-p", ref = "5bc2af6")
# Package Listing ---------------------------------------------------------
library("EpiModelHIV")
sessioninfo::session_info()
# Runs on Jan 3 2019
# EpiModel * 1.7.3 2019-01-03 [1] Github (statnet/EpiModel@20ed1fa)
# EpiModelHIV * 1.5.0 2019-01-03 [1] Github (EpiModel/EpiModelHIV-p@924f511)
# EpiModelHPC * 2.0.0 2019-01-03 [1] Github (statnet/EpiModelHPC@1d6accc)
# ergm * 3.10.0-4725 2019-01-03 [1] Github (statnet/ergm@9cad193)
# network * 1.14-355 2018-12-14 [1] Github (statnet/network@533c8db)
# statnet.common 4.2.0-208 2018-12-18 [1] Github (statnet/statnet.common@095f1ac)
# tergm * 3.6.0-1659 2018-12-14 [1] Github (statnet/tergm@d3af135)
# tergmLite * 1.2.0 2018-12-18 [1] Github (statnet/tergmLite@73d2a2d)
# Updates on Mar 27 2019
# EpiModel * 1.7.3 2019-03-27 [1] Github (statnet/EpiModel@ea6aa16)
# EpiModelHIV * 1.5.0 2019-03-27 [1] Github (EpiModel/EpiModelHIV-p@5bc2af6)
# EpiModelHPC * 2.0.1 2019-01-22 [1] Github (statnet/EpiModelHPC@a53760d)
# ergm * 3.10.0-4827 2019-03-27 [1] Github (statnet/ergm@7c81899)
# network * 1.14-377 2019-03-22 [1] Github (statnet/network@deff2a0)
# networkDynamic * 0.10 2019-05-03 [1] Github (statnet/networkDynamic@14182bf)
# statnet.common 4.3.0-230 2019-02-20 [1] Github (statnet/statnet.common@3307a8c)
# tergm * 3.6.0-1659 2019-01-22 [1] Github (statnet/tergm@d3af135)
# tergmLite * 1.2.0 2019-01-22 [1] Github (statnet/tergmLite@73d2a2d)
| /scripts/InstallStack.R | permissive | EpiModel/injectable-prep | R | false | false | 2,372 | r | # Install EpiModel Stack
install.packages("EpiModel")
# Install Extra Helper Packages
install.packages(c("remotes", "sessioninfo"))
# Latest Dev Versions of Packages
remotes::install_github("statnet/network", ref = "deff2a0")
remotes::install_github("statnet/networkDynamic", ref = "14182bf")
remotes::install_github("statnet/statnet.common", ref = "3307a8c")
remotes::install_github("statnet/ergm", ref = "7c81899")
remotes::install_github("statnet/tergm", ref = "d3af135")
remotes::install_github("statnet/EpiModel", ref = "ea6aa16")
remotes::install_github("statnet/EpiModelHPC", ref = "a53760d")
remotes::install_github("statnet/tergmLite", ref = "73d2a2d")
remotes::install_github("EpiModel/EpiModelHIV-p", ref = "injectable-prep")
remotes::install_github("EpiModel/EpiModelHIV-p", ref = "5bc2af6")
# Package Listing ---------------------------------------------------------
library("EpiModelHIV")
sessioninfo::session_info()
# Runs on Jan 3 2019
# EpiModel * 1.7.3 2019-01-03 [1] Github (statnet/EpiModel@20ed1fa)
# EpiModelHIV * 1.5.0 2019-01-03 [1] Github (EpiModel/EpiModelHIV-p@924f511)
# EpiModelHPC * 2.0.0 2019-01-03 [1] Github (statnet/EpiModelHPC@1d6accc)
# ergm * 3.10.0-4725 2019-01-03 [1] Github (statnet/ergm@9cad193)
# network * 1.14-355 2018-12-14 [1] Github (statnet/network@533c8db)
# statnet.common 4.2.0-208 2018-12-18 [1] Github (statnet/statnet.common@095f1ac)
# tergm * 3.6.0-1659 2018-12-14 [1] Github (statnet/tergm@d3af135)
# tergmLite * 1.2.0 2018-12-18 [1] Github (statnet/tergmLite@73d2a2d)
# Updates on Mar 27 2019
# EpiModel * 1.7.3 2019-03-27 [1] Github (statnet/EpiModel@ea6aa16)
# EpiModelHIV * 1.5.0 2019-03-27 [1] Github (EpiModel/EpiModelHIV-p@5bc2af6)
# EpiModelHPC * 2.0.1 2019-01-22 [1] Github (statnet/EpiModelHPC@a53760d)
# ergm * 3.10.0-4827 2019-03-27 [1] Github (statnet/ergm@7c81899)
# network * 1.14-377 2019-03-22 [1] Github (statnet/network@deff2a0)
# networkDynamic * 0.10 2019-05-03 [1] Github (statnet/networkDynamic@14182bf)
# statnet.common 4.3.0-230 2019-02-20 [1] Github (statnet/statnet.common@3307a8c)
# tergm * 3.6.0-1659 2019-01-22 [1] Github (statnet/tergm@d3af135)
# tergmLite * 1.2.0 2019-01-22 [1] Github (statnet/tergmLite@73d2a2d)
|
context('acfft tests')
test_that('Outputs of acfft are correct',{
skip_on_cran()
data(musdata)
coords <- musdata[ ,4:5]
mglm <- glm(musculus ~ pollution + exposure, "poisson", musdata)
dmax <- 10
ac <- acfft(coords, resid(mglm, type = "pearson"),
lim1 = 0, lim2 = 1, dmax = dmax)
expect_equal(ac,c(0.685338504, 0.509680590, 0.363021118,
0.247398654, 0.144726020, 0.084220961, 0.050228656,
0.022369044, -0.001985639, -0.027296083),
tolerance = 1e-7)
expect_true(is.vector(ac))
dmax <- 50
ac <- acfft(coords, resid(mglm, type = "pearson"),
lim1 = 0, lim2 = 1, dmax = dmax)
expect_equal(length(ac), dmax)
})
test_that('Fails correctly',{
skip_on_cran()
data(musdata)
coords <- musdata[ ,4:5]
mglm <- glm(musculus ~ pollution + exposure, "poisson", musdata)
coords <- cbind(round(runif(350)), round(runif(350)))
expect_error(acfft(coords, resid(mglm, type = "pearson"),
lim1 = 0, lim2 = 1, dmax = dmax),
"error in dimension")
}) | /tests/testthat/test_acfft.R | no_license | levisc8/spind | R | false | false | 1,088 | r | context('acfft tests')
test_that('Outputs of acfft are correct',{
skip_on_cran()
data(musdata)
coords <- musdata[ ,4:5]
mglm <- glm(musculus ~ pollution + exposure, "poisson", musdata)
dmax <- 10
ac <- acfft(coords, resid(mglm, type = "pearson"),
lim1 = 0, lim2 = 1, dmax = dmax)
expect_equal(ac,c(0.685338504, 0.509680590, 0.363021118,
0.247398654, 0.144726020, 0.084220961, 0.050228656,
0.022369044, -0.001985639, -0.027296083),
tolerance = 1e-7)
expect_true(is.vector(ac))
dmax <- 50
ac <- acfft(coords, resid(mglm, type = "pearson"),
lim1 = 0, lim2 = 1, dmax = dmax)
expect_equal(length(ac), dmax)
})
test_that('Fails correctly',{
skip_on_cran()
data(musdata)
coords <- musdata[ ,4:5]
mglm <- glm(musculus ~ pollution + exposure, "poisson", musdata)
coords <- cbind(round(runif(350)), round(runif(350)))
expect_error(acfft(coords, resid(mglm, type = "pearson"),
lim1 = 0, lim2 = 1, dmax = dmax),
"error in dimension")
}) |
library(shiny)
library(rsconnect)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("A sleepy survey"),
# Sidebar
sidebarPanel("Thanks for checking out our site!"),
# Show a plot of the generated distribution
mainPanel(" ",
checkboxGroupInput("checkGroup", label = h3 ("What would you like to learn more about?"),
choices = list("If I have a sleep disorder" = 1, "The treatment for sleep disturbances" = 2,
"What's going on with autism and sleep" = 3, "How other autistics deal with sleep problems" = 4,
"How parents or family members deal with children's sleep problems" = 5),
selected = 1),
hr(),
fluidRow(column(3)),
shinydashboard::box(title = "Where to find more information....",
actionButton(inputId='ab1', label="Sleep problems and ASD symptoms",
icon = icon("th"),
onclick ="window.open('https://iancommunity.org/ssc/sleep-problems-linked-more-severe-autism-symptoms', '_blank')"),
actionButton(inputId='ab1', label="Treating sleep disorders",
icon = icon("th"),
onclick ="window.open('https://www.tuck.com/autism-spectrum-disorder-and-sleep/#treatment_options_for_asd_related_sleep_problems', '_blank')"),
actionButton(inputId='ab1', label="Statement from the American Academy of Pediatrics",
icon = icon("th"),
onclick ="window.open('https://www.aap.org/en-us/about-the-aap/aap-press-room/Pages/Children-with-Autism-Spectrum-Disorder-Experience-Poor-Sleep-Habits.aspx', '_blank')"),
actionButton(inputId='ab1', label="Strategies for improving sleep",
icon = icon("th"),
onclick ="window.open('https://www.autism.org.uk/about/health/sleep.aspx', '_blank')")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
# You can access the values of the widget (as a vector)
# with input$checkGroup, e.g.
output$value <- renderPrint({ input$checkGroup })
}
# Run the application
shinyApp(ui = ui, server = server)
| /docs/SurveySays/app.R | no_license | mhorger/Sleep-ASD | R | false | false | 2,558 | r |
library(shiny)
library(rsconnect)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("A sleepy survey"),
# Sidebar
sidebarPanel("Thanks for checking out our site!"),
# Show a plot of the generated distribution
mainPanel(" ",
checkboxGroupInput("checkGroup", label = h3 ("What would you like to learn more about?"),
choices = list("If I have a sleep disorder" = 1, "The treatment for sleep disturbances" = 2,
"What's going on with autism and sleep" = 3, "How other autistics deal with sleep problems" = 4,
"How parents or family members deal with children's sleep problems" = 5),
selected = 1),
hr(),
fluidRow(column(3)),
shinydashboard::box(title = "Where to find more information....",
actionButton(inputId='ab1', label="Sleep problems and ASD symptoms",
icon = icon("th"),
onclick ="window.open('https://iancommunity.org/ssc/sleep-problems-linked-more-severe-autism-symptoms', '_blank')"),
actionButton(inputId='ab1', label="Treating sleep disorders",
icon = icon("th"),
onclick ="window.open('https://www.tuck.com/autism-spectrum-disorder-and-sleep/#treatment_options_for_asd_related_sleep_problems', '_blank')"),
actionButton(inputId='ab1', label="Statement from the American Academy of Pediatrics",
icon = icon("th"),
onclick ="window.open('https://www.aap.org/en-us/about-the-aap/aap-press-room/Pages/Children-with-Autism-Spectrum-Disorder-Experience-Poor-Sleep-Habits.aspx', '_blank')"),
actionButton(inputId='ab1', label="Strategies for improving sleep",
icon = icon("th"),
onclick ="window.open('https://www.autism.org.uk/about/health/sleep.aspx', '_blank')")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
# You can access the values of the widget (as a vector)
# with input$checkGroup, e.g.
output$value <- renderPrint({ input$checkGroup })
}
# Run the application
shinyApp(ui = ui, server = server)
|
###################################################################
# Author:
#
# Alfredo Sanchez-Tojar (alfredo.tojar@gmail.com)
# Affiliation: Dept. Evolutionary Biology, Bielefeld University, Germany
# Profile: https://scholar.google.de/citations?user=Sh-Rjq8AAAAJ&hl=de
# Script first created on the 4th of Feb 2020
###################################################################
# Description of script and Instructions
###################################################################
# This script is to import and clean the sheets with data extracted
# from the references included in a review for the following study:
# Antica Culina, Ilona van den Berga, Simon Evans, Alfredo Sanchez-Tojar.
# Low availability of code in ecology: a call for urgent action.
# The are many lines of commented code that were used simply to
# peform checks throughout the dataset. Any typo was then corrected
# in the original dataset, which, together with having two observers
# working on this via email, explains why the version of the dataset
# imported is V8.
###################################################################
# Packages needed
##############################################################
pacman::p_load(stringr,openxlsx,dplyr)
# Clear memory
rm(list=ls())
##############################################################
# Functions needed
##############################################################
# function obtained from: https://rstudio-pubs-static.s3.amazonaws.com/408658_512da947714740b99253228f084a08a9.html
# this function makes the first letter of a word capital to keep everything tidy and consistent
CapStr <- function(y) {
c <- strsplit(y, " ")[[1]]
paste(toupper(substring(c, 1,1)), substring(c, 2),
sep="", collapse=" ")
}
##############################################################
# Importing data
##############################################################
# importing data for 2015-2016
db.2015.16 <- read.xlsx("data/Data_Feb_2020_V8.xlsx",
colNames = T,sheet = 1)
# importing data for 2018-2019
db.2018.19 <- read.xlsx("data/Data_Feb_2020_V8.xlsx",
colNames = T ,sheet = 2)
##############################################################
# Standardizing databases
##############################################################
# are the variable names the same?
setdiff(names(db.2015.16),names(db.2018.19))
setdiff(names(db.2018.19),names(db.2015.16))
# changing name of a variable to standardize across datasets
db.2015.16 <- rename(db.2015.16, BreafDescription = "BreafDescription/Howto")
# are the variable names the same? Yes
setdiff(names(db.2015.16),names(db.2018.19))
setdiff(names(db.2018.19),names(db.2015.16))
# putting both databases together
db.full <- rbind(db.2015.16,db.2018.19)
# exploring database
summary(db.full)
# transforming some variables to factor
cols.factor <- c("fulltextID", "Journal", "Excluded.abstract.screening", "statistical.analysis.or/and.simulations",
"bioinformatic.analysis", "Stat_analysis_software", "CodePublished", "CodeMentioned", "Location_CodeMentioned",
"LocationShared", "Repository", "FreeSoftware", "DataUsed", "DataShared", "BreafDescription",
"InlineComments")
db.full[cols.factor] <- lapply(db.full[cols.factor], factor)
# exploring the database further
summary(db.full)
names(db.full)
#############################################################################################
# exploring each variable and cleaning and standardizing if necessary
#############################################################################################
###############################
# Journal
sort(table(db.full$Journal))
sum(table(db.full$Journal))
# visualizing where the NA's are, if any: none, all fixed
db.full[is.na(db.full$Journal),]
###############################
# Publication_year
table(db.full$Publication_year)
# check if there are NA's: none, all fixed
db.full[is.na(db.full$Publication_year),]
# we are creating a variable with two levels to properly label the two
# period of times that we sampled (we did not concieved it as 4 years, but rather as 2 time points)
db.full$Publication_year.2 <- ifelse(db.full$Publication_year < 2017,
"2015-2016",
"2018-2019")
db.full$Publication_year.2 <- as.factor(db.full$Publication_year.2)
table(db.full$Publication_year.2)
###############################
# Excluded.abstract.screening
table(db.full$Excluded.abstract.screening)
# some yes's need to be standardized
yes.conditionals <- db.full[!(db.full$Excluded.abstract.screening %in% c("yes","no")),"Excluded.abstract.screening"]
# extracting the exclusion reasons of those yes's to add them to the additional.comment.on.analysis variable
db.full[!(db.full$Excluded.abstract.screening %in% c("yes","no")),"additional.comment.on.analysis"] <- str_extract(yes.conditionals, "[:alpha:]+$")
db.full[!(db.full$Excluded.abstract.screening %in% c("yes","no")),"Excluded.abstract.screening"] <- "yes"
#restarting factor levels to remove the old ones
db.full$Excluded.abstract.screening <- factor(db.full$Excluded.abstract.screening)
table(db.full$Excluded.abstract.screening) # everything looks good now
###############################
# statistical.analysis.or/and.simulations
# first name needs to be modified to make it easy to handle: get rid of the "/"
names(db.full)[8] <- "statistical.analysis.and.or.simulations"
table(db.full$statistical.analysis.and.or.simulations)
# for those named as "yes, implement a model" we are going rename them as "yes, simulation"
# (but the information will still be available in the additional.comment.on.analysis as seen
# in the following check)
# db.full[!(is.na(db.full$statistical.analysis.and.or.simulations)) &
# db.full$statistical.analysis.and.or.simulations=="yes, implement a model",]
# standardizing
db.full$statistical.analysis.and.or.simulations <- recode(db.full$statistical.analysis.and.or.simulations,
"yes, simulations" = "yes, simulation",
"yes, statistical and simulations" = "yes, statistical and simulation",
"yes, implement a model" = "yes, simulation",
"yes, methodological and statistical" = "yes, statistical and methodological",
.default = levels(db.full$statistical.analysis.and.or.simulations))
table(db.full$statistical.analysis.and.or.simulations)
# creating a new variable with simply yes or no to make subsetting easier, also, becuase some
# papers are dificult to label (e.g. simulation vs. model), and we are not interesting in strictly
# labelling them per se, just to know if they provide some stats or simulations for which analytical
# code should hopefully be available.
db.full$statistical.analysis.and.or.simulations.2 <- as.factor(ifelse(as.character(db.full$statistical.analysis.and.or.simulations)=="no",
"no",
"yes"))
table(db.full$statistical.analysis.and.or.simulations.2)
# checking consistency in data collection. For those papers that we reviewed,
# if statistical.analysis.and.or.simulations.2 == "no", there should not be data collected
# summary(db.full[db.full$Excluded.abstract.screening=="no" &
# db.full$statistical.analysis.and.or.simulations.2 == "no",]) #all good!
###############################
# bioinformatic.analysis: keep in mind that it was sometimes tricky to label some papers as
# bioinformatic or no. We reviewed and discussed many cases among observers before final
# decisions.
table(db.full$bioinformatic.analysis)
###############################
# Stat_analysis_software
table(db.full$Stat_analysis_software)
# standardizing terminology: first, substituting ',' by 'and'
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, ",", " and")
# making the first letter of a word capital to keep everything tidy and consistent
db.full$Stat_analysis_software <- sapply(as.character(db.full$Stat_analysis_software), CapStr)
# reformatting NA's
db.full$Stat_analysis_software <- as.factor(db.full$Stat_analysis_software)
levels(db.full$Stat_analysis_software)[levels(db.full$Stat_analysis_software)=='NANA'] <- NA
# a bunch of manual formatting for standardization
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "And", "and")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "Stata", "STATA")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "SigmaPlot For Windows", "Sigmaplot")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "OpenBUGS", "Openbugs")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "C Compiler", "C")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "Hyperniche Version 2.0 and Other", "Hyperniche")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "JAGS Software Via The R2jags In R", "R and JAGS")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "Matlab", "MATLAB")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "MATLAB and Maybe Other", "MATLAB")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "Minitab and Other?", "Minitab")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "Minitab\\?", "Minitab")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "Not Mentioned But Seems To Be Python", "Not Stated")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "GraphPad Prism", "Prism")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software,"\\Ibm\\X+","IBM ILOG CPLEX")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software,"R and Bug","R and Bugs")
db.full$Stat_analysis_software <- factor(db.full$Stat_analysis_software)
table(db.full$Stat_analysis_software)
# checking software and freeness consistency
# db.full[order(db.full$Stat_analysis_software),c("Stat_analysis_software","FreeSoftware")] #all good
# counting articles using R and other software
table(str_detect(db.full$Stat_analysis_software, "R "))
# counting articles using exclusively R
nrow(db.full[db.full$Stat_analysis_software=="R" & !(is.na(db.full$Stat_analysis_software)),])
###############################
# CodePublished
table(db.full$CodePublished)
# convert all to lower case
db.full$CodePublished <- str_to_lower(db.full$CodePublished, locale = "en")
# creating new variable where embargoed is counted as simply yes
db.full$CodePublished.2 <- recode(db.full$CodePublished,
"yes, but embargoed" = "yes",
#"some" = "yes",
.default = levels(db.full$CodePublished))
db.full$CodePublished <- factor(db.full$CodePublished)
db.full$CodePublished.2 <- factor(db.full$CodePublished.2)
table(db.full$CodePublished.2)
# checking consistency in data collection. For those papers with some code published,
# there should be data collected about the code
# summary(db.full[!(is.na(db.full$CodePublished.2)) &
# db.full$CodePublished.2=="yes",]) # found a couple of inconsistencies that are now fixed
# db.full[!(is.na(db.full$CodePublished.2)) &
# db.full$CodePublished.2=="yes" &
# is.na(db.full$bioinformatic.analysis),]
# this is an interesting case where code is provided only as an R package,
# but not the code to reproduce the simulation, so we have decided to call it
# CodePublished="no", and we are going to make CodeMentioned and
# Location_CodeMentioned as NA to keep things simple
db.full[db.full$CodePublished=="no" &
!(is.na(db.full$CodeMentioned)),
c("CodeMentioned","Location_CodeMentioned")] <- NA
# creating new variable where some is counted simply as yes
db.full$CodePublished.3 <- recode(db.full$CodePublished.2,
"some" = "yes",
.default = levels(db.full$CodePublished.2))
db.full$CodePublished.3 <- factor(db.full$CodePublished.3)
table(db.full$CodePublished.3)
# some number checking
table(db.full[db.full$Publication_year.2=="2015-2016","CodePublished.2"])
table(db.full[db.full$Publication_year.2=="2018-2019","CodePublished.2"])
# doing some countings
# number of journals covered each year
db.full %>% group_by(Publication_year.2,Journal) %>% summarise(count = n_distinct(CodePublished.2)) %>% summarise(n = n())
# counting number of articles per year per journal
articles.per.journal <- as.data.frame(db.full %>% group_by(Journal) %>% summarise(n = n()))
# number of journals covered each year
code.published.per.journal <- as.data.frame(db.full %>% filter(CodePublished.2=="yes") %>% group_by(Journal) %>% summarise(total = n()))
as.data.frame(cbind(as.character(articles.per.journal$Journal),as.integer(round((code.published.per.journal$total/articles.per.journal$n)*100,0))))
# number of journals covered each year (at least some code)
code.published.per.journal.some <- as.data.frame(db.full %>% filter(CodePublished.3=="yes") %>% group_by(Journal) %>% summarise(total = n()))
as.data.frame(cbind(as.character(articles.per.journal$Journal),as.integer(round((code.published.per.journal.some$total/articles.per.journal$n)*100,0))))
summary(as.numeric(as.character(as.data.frame(cbind(as.character(articles.per.journal$Journal),as.integer(round((code.published.per.journal.some$total/articles.per.journal$n)*100,0))))$V2)))
###############################
# CodeMentioned
table(db.full$CodeMentioned)
# standardizing
db.full$CodeMentioned <- recode(db.full$CodeMentioned,
"yes, script and code" = "yes, code and script",
"yes, but only as \"simulation is available in\"" = "yes, simulation",
"none" = "no",
.default = levels(db.full$CodeMentioned))
# creating a new binary variable to know whether code was mentioned in the text or not
db.full$CodeMentioned.2 <- as.factor(ifelse(as.character(db.full$CodeMentioned)=="no",
"no",
"yes"))
table(db.full$CodeMentioned.2)
###############################
# Location_CodeMentioned
table(db.full$Location_CodeMentioned)
table(db.full[db.full$Publication_year.2=="2015-2016","Location_CodeMentioned"])
table(db.full[db.full$Publication_year.2=="2018-2019","Location_CodeMentioned"])
###############################
# LocationShared
table(db.full$LocationShared)
# creating a new level after suggestion of reviewer #2. This new level is "version control online repository"
# for those codes only hosted in GitHub
db.full$LocationShared <- as.character(db.full$LocationShared)
db.full[(db.full$Repository=="Github" | db.full$Repository=="GitHub") & !(is.na(db.full$Repository)) ,"LocationShared"] <- "version control platform"
db.full$LocationShared <- as.factor(db.full$LocationShared)
table(db.full$LocationShared)
# creating new, recoded variable. We are however still considering GitHub as a repository for the figure
# to make things simpler but we specify the difference in the main text.
db.full$LocationShared.2 <- recode(db.full$LocationShared,
"link in the article" = "supplementary file",
"repository and supplementary file" = "repository",
"version control platform" = "repository",
"webpage (govermental)" = "webpage",
.default = levels(db.full$LocationShared))
table(db.full$LocationShared.2)
table(db.full[db.full$Publication_year.2=="2015-2016","LocationShared.2"])
table(db.full[db.full$Publication_year.2=="2018-2019","LocationShared.2"])
###############################
# Repository
table(db.full$Repository)
# some manual editing to standardize
db.full$Repository <- str_replace_all(db.full$Repository, "dryad", "Dryad")
db.full$Repository <- str_replace_all(db.full$Repository, "FigShare", "Figshare")
db.full$Repository <- str_replace_all(db.full$Repository, "Github", "GitHub")
db.full$Repository <- str_replace_all(db.full$Repository, "GitHub and Zenodo", "Zenodo and GitHub")
db.full$Repository <- str_replace_all(db.full$Repository, "GitHub \\+ Dryad", "Dryad and GitHub")
db.full$Repository <- str_replace_all(db.full$Repository, "Zenodo \\+ GitHub", "Zenodo and GitHub")
table(db.full$Repository)
###############################
# FreeSoftware
table(db.full$FreeSoftware)
# standardizing
db.full$FreeSoftware <- recode(db.full$FreeSoftware,
"some" = "partially",
"unknown" = "NA",
.default = levels(db.full$FreeSoftware))
levels(db.full$FreeSoftware)[levels(db.full$FreeSoftware)=='NA'] <- NA
# creating new, recoded variable were for those for which at least some software is free, we label them as FreeSoftware=="yes"
db.full$FreeSoftware.2 <- recode(db.full$FreeSoftware,
"partially" = "yes",
.default = levels(db.full$FreeSoftware))
table(db.full$FreeSoftware.2)
sum(table(db.full$FreeSoftware.2))# makes sense because there are 32 articles where software was not stated
table(db.full[db.full$CodePublished.3=="yes" & !(is.na(db.full$CodePublished.3)),"FreeSoftware.2"]) #one article provides code, but does not state the software, and we could not figure it out
###############################
# DataUsed
table(db.full$DataUsed)
###############################
# DataShared
table(db.full$DataShared)
# creating new, recoded variable
db.full$DataShared.2 <- recode(db.full$DataShared,
"yes, but embargoed" = "yes",
"some" = "partially",
"link broken" = "no",
"yes, but in a repository that requires log in, which seems to be free" = "yes",
"yes, but in another publication" = "yes",
"yes, but it consists of means and SEs only, so not really the data" = "no",
"yes, but one dataset embargoed" = "yes",
"yes, but only sequencing and geographical data" = "partially",
"yes, but only sequencing data" = "partially",
"yes, but only SNP data" = "partially",
"yes, but only some" = "partially",
"yes, mostly, but for some of the data one has to ask a govermental department" = "yes",
.default = levels(db.full$DataShared))
table(db.full$DataShared.2)
# checking number of papers with all code and all data
#summary(db.full[(db.full$CodePublished.2=="yes" & !(is.na(db.full$CodePublished.2))) & (db.full$DataShared.2=="yes" | is.na(db.full$DataShared.2)),])
# creating new binary variable
db.full$DataShared.3 <- recode(db.full$DataShared.2,
"partially" = "yes",
.default = levels(db.full$DataShared.2))
table(db.full$DataShared.3)
# table(db.full[db.full$statistical.analysis.and.or.simulations.2=="yes" & !(is.na(db.full$statistical.analysis.and.or.simulations.2)),"DataShared.3"])
# nrow(db.full[db.full$statistical.analysis.and.or.simulations.2=="yes" & !(is.na(db.full$statistical.analysis.and.or.simulations.2)) & is.na(db.full$DataShared.3),])
# table(db.full[db.full$statistical.analysis.and.or.simulations.2=="yes" & !(is.na(db.full$statistical.analysis.and.or.simulations.2)) & is.na(db.full$DataShared.3),"DataUsed"])
###############################
# Second_screener
table(db.full$Second_screener)
###############################
# adding doi's
# from the title and abstract screening database
doi.2018.19 <- read.table("screening_process/title-and-abstract_screening/random_200_2018_2019_rayyan.csv",header=T,sep=",")
# reducing database
doi.2018.19.red <- doi.2018.19[,c("title","url")]
names(doi.2018.19.red) <- c("Title","doi")
# removing {} to make matchin easier
doi.2018.19.red$Title <- str_remove_all(doi.2018.19.red$Title, "[{}]")
db.full$Title <- str_remove_all(db.full$Title, "[{}]")
# adding dois by merging by title
# first, are the titles the same (i.e. format is the same, no need to worry)
setdiff(db.full[db.full$Publication_year>2017,"Title"],doi.2018.19.red$Title)
setdiff(doi.2018.19.red$Title,db.full[db.full$Publication_year>2017,"Title"])
# everything matches, therefore I can add dois to the database
db.full.doi <- merge(db.full,doi.2018.19.red,by="Title",all.x=T)
# adding a missing doi manually
db.full.doi$doi <- as.character(db.full.doi$doi)
db.full.doi[db.full.doi$fulltextID=="CAE168" & !(is.na(db.full.doi$fulltextID)),"doi"]<-"10.1002/ecy.2191"
# adding doi's from before 2017
db.full.doi$fulltextID <- as.character(db.full.doi$fulltextID)
db.full.doi$doi <- ifelse(is.na(db.full.doi$doi),
db.full.doi$fulltextID,
db.full.doi$doi)
db.full.doi$doi <- as.factor(db.full.doi$doi)
summary(db.full.doi)
###############################
# exporting clean dataset
db.full.doi.reduced <- db.full.doi[,c("doi","Journal","Publication_year","Publication_year.2",
"Excluded.abstract.screening","statistical.analysis.and.or.simulations.2",
"bioinformatic.analysis","additional.comment.on.analysis",
"Stat_analysis_software","CodePublished","CodePublished.2","CodePublished.3","LinktoCode",
"BreafDescription","InlineComments",
"CodeMentioned","CodeMentioned.2","Location_CodeMentioned","LocationShared","LocationShared.2",
"Repository","FreeSoftware","DataUsed","DataShared.2","DataShared.3",
"ExtraComments","Second_screener","Changes_after_second_screening",
"Remarks_on_decisions")]
write.csv(db.full.doi.reduced,"data/code_availability_full_and_clean.csv",
row.names=FALSE)
# Some extra checks:
# double checking consistency for those studies with simulations and code, data should be at least partially shared (depends, though, think about it) (if the code provided generates the data)
# db.full[db.full$statistical.analysis.and.or.simulations=="yes, simulation" &
# !(is.na(db.full$statistical.analysis.and.or.simulations)) &
# db.full$CodePublished=="yes" & db.full$DataUsed=="yes",
# c("statistical.analysis.and.or.simulations","CodePublished","DataUsed","DataShared")]
# db.full[db.full$statistical.analysis.and.or.simulations=="yes, statistical and simulation" &
# !(is.na(db.full$statistical.analysis.and.or.simulations)) &
# db.full$CodePublished!="no",
# c("statistical.analysis.and.or.simulations","CodePublished","DataUsed","DataShared")]
# # checking papers for which only simulations where run to see if CodePublished DataUsed and DataShared agree
# db.full[!(is.na(db.full$statistical.analysis.and.or.simulations)) &
# db.full$statistical.analysis.and.or.simulations=="yes, simulation",
# c("fulltextID","Authors","Publication_year","CodePublished","DataUsed","DataShared","Second_screener")]
| /003_data_cleaning_and_standardization.R | permissive | ASanchez-Tojar/code_in_ecology | R | false | false | 24,213 | r | ###################################################################
# Author:
#
# Alfredo Sanchez-Tojar (alfredo.tojar@gmail.com)
# Affiliation: Dept. Evolutionary Biology, Bielefeld University, Germany
# Profile: https://scholar.google.de/citations?user=Sh-Rjq8AAAAJ&hl=de
# Script first created on the 4th of Feb 2020
###################################################################
# Description of script and Instructions
###################################################################
# This script is to import and clean the sheets with data extracted
# from the references included in a review for the following study:
# Antica Culina, Ilona van den Berga, Simon Evans, Alfredo Sanchez-Tojar.
# Low availability of code in ecology: a call for urgent action.
# The are many lines of commented code that were used simply to
# peform checks throughout the dataset. Any typo was then corrected
# in the original dataset, which, together with having two observers
# working on this via email, explains why the version of the dataset
# imported is V8.
###################################################################
# Packages needed
##############################################################
pacman::p_load(stringr,openxlsx,dplyr)
# Clear memory
rm(list=ls())
##############################################################
# Functions needed
##############################################################
# function obtained from: https://rstudio-pubs-static.s3.amazonaws.com/408658_512da947714740b99253228f084a08a9.html
# this function makes the first letter of a word capital to keep everything tidy and consistent
CapStr <- function(y) {
c <- strsplit(y, " ")[[1]]
paste(toupper(substring(c, 1,1)), substring(c, 2),
sep="", collapse=" ")
}
##############################################################
# Importing data
##############################################################
# importing data for 2015-2016
db.2015.16 <- read.xlsx("data/Data_Feb_2020_V8.xlsx",
colNames = T,sheet = 1)
# importing data for 2018-2019
db.2018.19 <- read.xlsx("data/Data_Feb_2020_V8.xlsx",
colNames = T ,sheet = 2)
##############################################################
# Standardizing databases
##############################################################
# are the variable names the same?
setdiff(names(db.2015.16),names(db.2018.19))
setdiff(names(db.2018.19),names(db.2015.16))
# changing name of a variable to standardize across datasets
db.2015.16 <- rename(db.2015.16, BreafDescription = "BreafDescription/Howto")
# are the variable names the same? Yes
setdiff(names(db.2015.16),names(db.2018.19))
setdiff(names(db.2018.19),names(db.2015.16))
# putting both databases together
db.full <- rbind(db.2015.16,db.2018.19)
# exploring database
summary(db.full)
# transforming some variables to factor
cols.factor <- c("fulltextID", "Journal", "Excluded.abstract.screening", "statistical.analysis.or/and.simulations",
"bioinformatic.analysis", "Stat_analysis_software", "CodePublished", "CodeMentioned", "Location_CodeMentioned",
"LocationShared", "Repository", "FreeSoftware", "DataUsed", "DataShared", "BreafDescription",
"InlineComments")
db.full[cols.factor] <- lapply(db.full[cols.factor], factor)
# exploring the database further
summary(db.full)
names(db.full)
#############################################################################################
# exploring each variable and cleaning and standardizing if necessary
#############################################################################################
###############################
# Journal
sort(table(db.full$Journal))
sum(table(db.full$Journal))
# visualizing where the NA's are, if any: none, all fixed
db.full[is.na(db.full$Journal),]
###############################
# Publication_year
table(db.full$Publication_year)
# check if there are NA's: none, all fixed
db.full[is.na(db.full$Publication_year),]
# we are creating a variable with two levels to properly label the two
# period of times that we sampled (we did not concieved it as 4 years, but rather as 2 time points)
db.full$Publication_year.2 <- ifelse(db.full$Publication_year < 2017,
"2015-2016",
"2018-2019")
db.full$Publication_year.2 <- as.factor(db.full$Publication_year.2)
table(db.full$Publication_year.2)
###############################
# Excluded.abstract.screening
table(db.full$Excluded.abstract.screening)
# some yes's need to be standardized
yes.conditionals <- db.full[!(db.full$Excluded.abstract.screening %in% c("yes","no")),"Excluded.abstract.screening"]
# extracting the exclusion reasons of those yes's to add them to the additional.comment.on.analysis variable
db.full[!(db.full$Excluded.abstract.screening %in% c("yes","no")),"additional.comment.on.analysis"] <- str_extract(yes.conditionals, "[:alpha:]+$")
db.full[!(db.full$Excluded.abstract.screening %in% c("yes","no")),"Excluded.abstract.screening"] <- "yes"
#restarting factor levels to remove the old ones
db.full$Excluded.abstract.screening <- factor(db.full$Excluded.abstract.screening)
table(db.full$Excluded.abstract.screening) # everything looks good now
###############################
# statistical.analysis.or/and.simulations
# first name needs to be modified to make it easy to handle: get rid of the "/"
names(db.full)[8] <- "statistical.analysis.and.or.simulations"
table(db.full$statistical.analysis.and.or.simulations)
# for those named as "yes, implement a model" we are going rename them as "yes, simulation"
# (but the information will still be available in the additional.comment.on.analysis as seen
# in the following check)
# db.full[!(is.na(db.full$statistical.analysis.and.or.simulations)) &
# db.full$statistical.analysis.and.or.simulations=="yes, implement a model",]
# standardizing
db.full$statistical.analysis.and.or.simulations <- recode(db.full$statistical.analysis.and.or.simulations,
"yes, simulations" = "yes, simulation",
"yes, statistical and simulations" = "yes, statistical and simulation",
"yes, implement a model" = "yes, simulation",
"yes, methodological and statistical" = "yes, statistical and methodological",
.default = levels(db.full$statistical.analysis.and.or.simulations))
table(db.full$statistical.analysis.and.or.simulations)
# creating a new variable with simply yes or no to make subsetting easier, also, becuase some
# papers are dificult to label (e.g. simulation vs. model), and we are not interesting in strictly
# labelling them per se, just to know if they provide some stats or simulations for which analytical
# code should hopefully be available.
db.full$statistical.analysis.and.or.simulations.2 <- as.factor(ifelse(as.character(db.full$statistical.analysis.and.or.simulations)=="no",
"no",
"yes"))
table(db.full$statistical.analysis.and.or.simulations.2)
# checking consistency in data collection. For those papers that we reviewed,
# if statistical.analysis.and.or.simulations.2 == "no", there should not be data collected
# summary(db.full[db.full$Excluded.abstract.screening=="no" &
# db.full$statistical.analysis.and.or.simulations.2 == "no",]) #all good!
###############################
# bioinformatic.analysis: keep in mind that it was sometimes tricky to label some papers as
# bioinformatic or no. We reviewed and discussed many cases among observers before final
# decisions.
table(db.full$bioinformatic.analysis)
###############################
# Stat_analysis_software
table(db.full$Stat_analysis_software)
# standardizing terminology: first, substituting ',' by 'and'
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, ",", " and")
# making the first letter of a word capital to keep everything tidy and consistent
db.full$Stat_analysis_software <- sapply(as.character(db.full$Stat_analysis_software), CapStr)
# reformatting NA's
db.full$Stat_analysis_software <- as.factor(db.full$Stat_analysis_software)
levels(db.full$Stat_analysis_software)[levels(db.full$Stat_analysis_software)=='NANA'] <- NA
# a bunch of manual formatting for standardization
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "And", "and")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "Stata", "STATA")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "SigmaPlot For Windows", "Sigmaplot")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "OpenBUGS", "Openbugs")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "C Compiler", "C")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "Hyperniche Version 2.0 and Other", "Hyperniche")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "JAGS Software Via The R2jags In R", "R and JAGS")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "Matlab", "MATLAB")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "MATLAB and Maybe Other", "MATLAB")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "Minitab and Other?", "Minitab")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "Minitab\\?", "Minitab")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "Not Mentioned But Seems To Be Python", "Not Stated")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software, "GraphPad Prism", "Prism")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software,"\\Ibm\\X+","IBM ILOG CPLEX")
db.full$Stat_analysis_software <- str_replace_all(db.full$Stat_analysis_software,"R and Bug","R and Bugs")
db.full$Stat_analysis_software <- factor(db.full$Stat_analysis_software)
table(db.full$Stat_analysis_software)
# checking software and freeness consistency
# db.full[order(db.full$Stat_analysis_software),c("Stat_analysis_software","FreeSoftware")] #all good
# counting articles using R and other software
table(str_detect(db.full$Stat_analysis_software, "R "))
# counting articles using exclusively R
nrow(db.full[db.full$Stat_analysis_software=="R" & !(is.na(db.full$Stat_analysis_software)),])
###############################
# CodePublished
table(db.full$CodePublished)
# convert all to lower case
db.full$CodePublished <- str_to_lower(db.full$CodePublished, locale = "en")
# creating new variable where embargoed is counted as simply yes
db.full$CodePublished.2 <- recode(db.full$CodePublished,
"yes, but embargoed" = "yes",
#"some" = "yes",
.default = levels(db.full$CodePublished))
db.full$CodePublished <- factor(db.full$CodePublished)
db.full$CodePublished.2 <- factor(db.full$CodePublished.2)
table(db.full$CodePublished.2)
# checking consistency in data collection. For those papers with some code published,
# there should be data collected about the code
# summary(db.full[!(is.na(db.full$CodePublished.2)) &
# db.full$CodePublished.2=="yes",]) # found a couple of inconsistencies that are now fixed
# db.full[!(is.na(db.full$CodePublished.2)) &
# db.full$CodePublished.2=="yes" &
# is.na(db.full$bioinformatic.analysis),]
# this is an interesting case where code is provided only as an R package,
# but not the code to reproduce the simulation, so we have decided to call it
# CodePublished="no", and we are going to make CodeMentioned and
# Location_CodeMentioned as NA to keep things simple
db.full[db.full$CodePublished=="no" &
!(is.na(db.full$CodeMentioned)),
c("CodeMentioned","Location_CodeMentioned")] <- NA
# creating new variable where some is counted simply as yes
db.full$CodePublished.3 <- recode(db.full$CodePublished.2,
"some" = "yes",
.default = levels(db.full$CodePublished.2))
db.full$CodePublished.3 <- factor(db.full$CodePublished.3)
table(db.full$CodePublished.3)
# some number checking
table(db.full[db.full$Publication_year.2=="2015-2016","CodePublished.2"])
table(db.full[db.full$Publication_year.2=="2018-2019","CodePublished.2"])
# doing some countings
# number of journals covered each year
db.full %>% group_by(Publication_year.2,Journal) %>% summarise(count = n_distinct(CodePublished.2)) %>% summarise(n = n())
# counting number of articles per year per journal
articles.per.journal <- as.data.frame(db.full %>% group_by(Journal) %>% summarise(n = n()))
# number of journals covered each year
code.published.per.journal <- as.data.frame(db.full %>% filter(CodePublished.2=="yes") %>% group_by(Journal) %>% summarise(total = n()))
as.data.frame(cbind(as.character(articles.per.journal$Journal),as.integer(round((code.published.per.journal$total/articles.per.journal$n)*100,0))))
# number of journals covered each year (at least some code)
code.published.per.journal.some <- as.data.frame(db.full %>% filter(CodePublished.3=="yes") %>% group_by(Journal) %>% summarise(total = n()))
as.data.frame(cbind(as.character(articles.per.journal$Journal),as.integer(round((code.published.per.journal.some$total/articles.per.journal$n)*100,0))))
summary(as.numeric(as.character(as.data.frame(cbind(as.character(articles.per.journal$Journal),as.integer(round((code.published.per.journal.some$total/articles.per.journal$n)*100,0))))$V2)))
###############################
# CodeMentioned
table(db.full$CodeMentioned)
# standardizing
db.full$CodeMentioned <- recode(db.full$CodeMentioned,
"yes, script and code" = "yes, code and script",
"yes, but only as \"simulation is available in\"" = "yes, simulation",
"none" = "no",
.default = levels(db.full$CodeMentioned))
# creating a new binary variable to know whether code was mentioned in the text or not
db.full$CodeMentioned.2 <- as.factor(ifelse(as.character(db.full$CodeMentioned)=="no",
"no",
"yes"))
table(db.full$CodeMentioned.2)
###############################
# Location_CodeMentioned
table(db.full$Location_CodeMentioned)
table(db.full[db.full$Publication_year.2=="2015-2016","Location_CodeMentioned"])
table(db.full[db.full$Publication_year.2=="2018-2019","Location_CodeMentioned"])
###############################
# LocationShared
table(db.full$LocationShared)
# creating a new level after suggestion of reviewer #2. This new level is "version control online repository"
# for those codes only hosted in GitHub
db.full$LocationShared <- as.character(db.full$LocationShared)
db.full[(db.full$Repository=="Github" | db.full$Repository=="GitHub") & !(is.na(db.full$Repository)) ,"LocationShared"] <- "version control platform"
db.full$LocationShared <- as.factor(db.full$LocationShared)
table(db.full$LocationShared)
# creating new, recoded variable. We are however still considering GitHub as a repository for the figure
# to make things simpler but we specify the difference in the main text.
db.full$LocationShared.2 <- recode(db.full$LocationShared,
"link in the article" = "supplementary file",
"repository and supplementary file" = "repository",
"version control platform" = "repository",
"webpage (govermental)" = "webpage",
.default = levels(db.full$LocationShared))
table(db.full$LocationShared.2)
table(db.full[db.full$Publication_year.2=="2015-2016","LocationShared.2"])
table(db.full[db.full$Publication_year.2=="2018-2019","LocationShared.2"])
###############################
# Repository
table(db.full$Repository)
# some manual editing to standardize
db.full$Repository <- str_replace_all(db.full$Repository, "dryad", "Dryad")
db.full$Repository <- str_replace_all(db.full$Repository, "FigShare", "Figshare")
db.full$Repository <- str_replace_all(db.full$Repository, "Github", "GitHub")
db.full$Repository <- str_replace_all(db.full$Repository, "GitHub and Zenodo", "Zenodo and GitHub")
db.full$Repository <- str_replace_all(db.full$Repository, "GitHub \\+ Dryad", "Dryad and GitHub")
db.full$Repository <- str_replace_all(db.full$Repository, "Zenodo \\+ GitHub", "Zenodo and GitHub")
table(db.full$Repository)
###############################
# FreeSoftware
table(db.full$FreeSoftware)
# standardizing
db.full$FreeSoftware <- recode(db.full$FreeSoftware,
"some" = "partially",
"unknown" = "NA",
.default = levels(db.full$FreeSoftware))
levels(db.full$FreeSoftware)[levels(db.full$FreeSoftware)=='NA'] <- NA
# creating new, recoded variable were for those for which at least some software is free, we label them as FreeSoftware=="yes"
db.full$FreeSoftware.2 <- recode(db.full$FreeSoftware,
"partially" = "yes",
.default = levels(db.full$FreeSoftware))
table(db.full$FreeSoftware.2)
sum(table(db.full$FreeSoftware.2))# makes sense because there are 32 articles where software was not stated
table(db.full[db.full$CodePublished.3=="yes" & !(is.na(db.full$CodePublished.3)),"FreeSoftware.2"]) #one article provides code, but does not state the software, and we could not figure it out
###############################
# DataUsed
table(db.full$DataUsed)
###############################
# DataShared
table(db.full$DataShared)
# creating new, recoded variable
db.full$DataShared.2 <- recode(db.full$DataShared,
"yes, but embargoed" = "yes",
"some" = "partially",
"link broken" = "no",
"yes, but in a repository that requires log in, which seems to be free" = "yes",
"yes, but in another publication" = "yes",
"yes, but it consists of means and SEs only, so not really the data" = "no",
"yes, but one dataset embargoed" = "yes",
"yes, but only sequencing and geographical data" = "partially",
"yes, but only sequencing data" = "partially",
"yes, but only SNP data" = "partially",
"yes, but only some" = "partially",
"yes, mostly, but for some of the data one has to ask a govermental department" = "yes",
.default = levels(db.full$DataShared))
table(db.full$DataShared.2)
# checking number of papers with all code and all data
#summary(db.full[(db.full$CodePublished.2=="yes" & !(is.na(db.full$CodePublished.2))) & (db.full$DataShared.2=="yes" | is.na(db.full$DataShared.2)),])
# creating new binary variable
db.full$DataShared.3 <- recode(db.full$DataShared.2,
"partially" = "yes",
.default = levels(db.full$DataShared.2))
table(db.full$DataShared.3)
# table(db.full[db.full$statistical.analysis.and.or.simulations.2=="yes" & !(is.na(db.full$statistical.analysis.and.or.simulations.2)),"DataShared.3"])
# nrow(db.full[db.full$statistical.analysis.and.or.simulations.2=="yes" & !(is.na(db.full$statistical.analysis.and.or.simulations.2)) & is.na(db.full$DataShared.3),])
# table(db.full[db.full$statistical.analysis.and.or.simulations.2=="yes" & !(is.na(db.full$statistical.analysis.and.or.simulations.2)) & is.na(db.full$DataShared.3),"DataUsed"])
###############################
# Second_screener
table(db.full$Second_screener)
###############################
# adding doi's
# from the title and abstract screening database
doi.2018.19 <- read.table("screening_process/title-and-abstract_screening/random_200_2018_2019_rayyan.csv",header=T,sep=",")
# reducing database
doi.2018.19.red <- doi.2018.19[,c("title","url")]
names(doi.2018.19.red) <- c("Title","doi")
# removing {} to make matchin easier
doi.2018.19.red$Title <- str_remove_all(doi.2018.19.red$Title, "[{}]")
db.full$Title <- str_remove_all(db.full$Title, "[{}]")
# adding dois by merging by title
# first, are the titles the same (i.e. format is the same, no need to worry)
setdiff(db.full[db.full$Publication_year>2017,"Title"],doi.2018.19.red$Title)
setdiff(doi.2018.19.red$Title,db.full[db.full$Publication_year>2017,"Title"])
# everything matches, therefore I can add dois to the database
db.full.doi <- merge(db.full,doi.2018.19.red,by="Title",all.x=T)
# adding a missing doi manually
db.full.doi$doi <- as.character(db.full.doi$doi)
db.full.doi[db.full.doi$fulltextID=="CAE168" & !(is.na(db.full.doi$fulltextID)),"doi"]<-"10.1002/ecy.2191"
# adding doi's from before 2017
db.full.doi$fulltextID <- as.character(db.full.doi$fulltextID)
db.full.doi$doi <- ifelse(is.na(db.full.doi$doi),
db.full.doi$fulltextID,
db.full.doi$doi)
db.full.doi$doi <- as.factor(db.full.doi$doi)
summary(db.full.doi)
###############################
# exporting clean dataset
db.full.doi.reduced <- db.full.doi[,c("doi","Journal","Publication_year","Publication_year.2",
"Excluded.abstract.screening","statistical.analysis.and.or.simulations.2",
"bioinformatic.analysis","additional.comment.on.analysis",
"Stat_analysis_software","CodePublished","CodePublished.2","CodePublished.3","LinktoCode",
"BreafDescription","InlineComments",
"CodeMentioned","CodeMentioned.2","Location_CodeMentioned","LocationShared","LocationShared.2",
"Repository","FreeSoftware","DataUsed","DataShared.2","DataShared.3",
"ExtraComments","Second_screener","Changes_after_second_screening",
"Remarks_on_decisions")]
write.csv(db.full.doi.reduced,"data/code_availability_full_and_clean.csv",
row.names=FALSE)
# Some extra checks:
# double checking consistency for those studies with simulations and code, data should be at least partially shared (depends, though, think about it) (if the code provided generates the data)
# db.full[db.full$statistical.analysis.and.or.simulations=="yes, simulation" &
# !(is.na(db.full$statistical.analysis.and.or.simulations)) &
# db.full$CodePublished=="yes" & db.full$DataUsed=="yes",
# c("statistical.analysis.and.or.simulations","CodePublished","DataUsed","DataShared")]
# db.full[db.full$statistical.analysis.and.or.simulations=="yes, statistical and simulation" &
# !(is.na(db.full$statistical.analysis.and.or.simulations)) &
# db.full$CodePublished!="no",
# c("statistical.analysis.and.or.simulations","CodePublished","DataUsed","DataShared")]
# # checking papers for which only simulations where run to see if CodePublished DataUsed and DataShared agree
# db.full[!(is.na(db.full$statistical.analysis.and.or.simulations)) &
# db.full$statistical.analysis.and.or.simulations=="yes, simulation",
# c("fulltextID","Authors","Publication_year","CodePublished","DataUsed","DataShared","Second_screener")]
|
# Course Project 1 of Exploratory Data Analysis
# plot2.R
# Construct Plot 2 and save as a PNG file (480x480 pixels)
#setwd( paste(getwd(), "/Coursera/Exploratory Data Analysis/ExData_Plotting1", sep="") )
pc <- read.csv( "../exdata-data-household_power_consumption/household_power_consumption.01-02Feb2007.txt", sep=";",
colClasses = c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric") )
dt <- with( pc, strptime( paste(Date, Time ), "%d/%m/%Y %T") )
power_consumption <- cbind( dt, pc )
png("plot2.png")
par(ps=12)
with( power_consumption, {
plot( dt, Global_active_power, type="n", xlab="", ylab="Global Active Power (kilowatts)" )
lines( dt, Global_active_power )
} )
dev.off()
| /plot2.R | no_license | kwhwong/ExData_Plotting1 | R | false | false | 777 | r | # Course Project 1 of Exploratory Data Analysis
# plot2.R
# Construct Plot 2 and save as a PNG file (480x480 pixels)
#setwd( paste(getwd(), "/Coursera/Exploratory Data Analysis/ExData_Plotting1", sep="") )
pc <- read.csv( "../exdata-data-household_power_consumption/household_power_consumption.01-02Feb2007.txt", sep=";",
colClasses = c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric") )
dt <- with( pc, strptime( paste(Date, Time ), "%d/%m/%Y %T") )
power_consumption <- cbind( dt, pc )
png("plot2.png")
par(ps=12)
with( power_consumption, {
plot( dt, Global_active_power, type="n", xlab="", ylab="Global Active Power (kilowatts)" )
lines( dt, Global_active_power )
} )
dev.off()
|
library(LearnGeom)
### Name: SimilarPolygon
### Title: Creates a similar polygon to a given one
### Aliases: SimilarPolygon
### ** Examples
x_min <- -5
x_max <- 5
y_min <- -5
y_max <- 5
CoordinatePlane(x_min, x_max, y_min, y_max)
P1 <- c(0,0)
P2 <- c(1,1)
P3 <- c(2,0)
Poly <- CreatePolygon(P1, P2, P3)
Draw(Poly, "blue")
k <- 2
Poly_similar <- SimilarPolygon(Poly, k)
Draw(Translate(Poly_similar, c(-1,2)), "orange")
| /data/genthat_extracted_code/LearnGeom/examples/SimilarPolygon.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 426 | r | library(LearnGeom)
### Name: SimilarPolygon
### Title: Creates a similar polygon to a given one
### Aliases: SimilarPolygon
### ** Examples
x_min <- -5
x_max <- 5
y_min <- -5
y_max <- 5
CoordinatePlane(x_min, x_max, y_min, y_max)
P1 <- c(0,0)
P2 <- c(1,1)
P3 <- c(2,0)
Poly <- CreatePolygon(P1, P2, P3)
Draw(Poly, "blue")
k <- 2
Poly_similar <- SimilarPolygon(Poly, k)
Draw(Translate(Poly_similar, c(-1,2)), "orange")
|
\docType{data}
\name{OCN_400_Allout}
\alias{OCN_400_Allout}
\title{Example of OCN with all perimetric pixels as outlets}
\format{A list. See \code{\link{create_OCN}} documentation for details.}
\usage{
data(OCN_400_Allout)
}
\description{
A network built on a 400x400 lattice obtained by executing \code{set.seed(8); create_OCN(400, 400, nOutlet = "All", cellsize = 50)}.
}
\keyword{datasets}
| /man/OCN_400_Allout.Rd | no_license | jacquetclaire/OCNet | R | false | false | 405 | rd | \docType{data}
\name{OCN_400_Allout}
\alias{OCN_400_Allout}
\title{Example of OCN with all perimetric pixels as outlets}
\format{A list. See \code{\link{create_OCN}} documentation for details.}
\usage{
data(OCN_400_Allout)
}
\description{
A network built on a 400x400 lattice obtained by executing \code{set.seed(8); create_OCN(400, 400, nOutlet = "All", cellsize = 50)}.
}
\keyword{datasets}
|
testlist <- list(type = 0L, z = 3.31425658083704e-318)
result <- do.call(esreg::G1_fun,testlist)
str(result) | /esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609890290-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 108 | r | testlist <- list(type = 0L, z = 3.31425658083704e-318)
result <- do.call(esreg::G1_fun,testlist)
str(result) |
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(m = matrix()){
i <- NULL
set <- function(matrix){
m <<- matrix
i <<-NULL
}
get <- function() m
setInverse <- function(inverse) i <<- inverse
getInverse <- function() i
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...){
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if(!is.null(m)){
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setInverse(m)
m
}
| /cachematrix.R | no_license | finite/ProgrammingAssignment2 | R | false | false | 1,030 | r | ## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(m = matrix()){
i <- NULL
set <- function(matrix){
m <<- matrix
i <<-NULL
}
get <- function() m
setInverse <- function(inverse) i <<- inverse
getInverse <- function() i
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...){
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if(!is.null(m)){
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setInverse(m)
m
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/p_load.R
\name{p_load}
\alias{p_load}
\title{Load One or More Packages}
\usage{
p_load(..., char, install = TRUE, update = getOption("pac_update"),
character.only = FALSE)
}
\arguments{
\item{char}{Character vector containing packages to load. If you are calling
\code{p_load} from within a function (or just having difficulties calling it
using a character vector input) then pass your character vector of packages
to load to this parameter directly.}
\item{install}{logical. If \code{TRUE} will attempt to install a package not
found in the library.}
\item{update}{logical. If \code{TRUE} will attempt to update all out of date
packages. Default allows the user to set a \code{"pac_update"} in his/her
.Rprofile.}
\item{character.only}{logical. If \code{TRUE} then \code{p_load} will only
accept a single input which is a character vector containing the names of
packages to load.}
\item{\ldots}{name(s) of package(s).}
}
\description{
This function is a wrapper for \code{\link[base]{library}} and
\code{\link[base]{require}}. It checks to see if a
package is installed, if not it attempts to install the package from CRAN
and/or any other repository in the \pkg{pacman} repository list.
}
\examples{
\dontrun{
p_load(lattice)
p_unload(lattice)
p_load(lattice, foreign, boot, rpart)
p_loaded()
p_unload(lattice, foreign, boot, rpart)
p_loaded()
}
}
\seealso{
\code{\link[base]{library}},
\code{\link[base]{require}},
\code{\link[utils]{install.packages}}
}
| /man/p_load.Rd | no_license | khughitt/pacman | R | false | false | 1,558 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/p_load.R
\name{p_load}
\alias{p_load}
\title{Load One or More Packages}
\usage{
p_load(..., char, install = TRUE, update = getOption("pac_update"),
character.only = FALSE)
}
\arguments{
\item{char}{Character vector containing packages to load. If you are calling
\code{p_load} from within a function (or just having difficulties calling it
using a character vector input) then pass your character vector of packages
to load to this parameter directly.}
\item{install}{logical. If \code{TRUE} will attempt to install a package not
found in the library.}
\item{update}{logical. If \code{TRUE} will attempt to update all out of date
packages. Default allows the user to set a \code{"pac_update"} in his/her
.Rprofile.}
\item{character.only}{logical. If \code{TRUE} then \code{p_load} will only
accept a single input which is a character vector containing the names of
packages to load.}
\item{\ldots}{name(s) of package(s).}
}
\description{
This function is a wrapper for \code{\link[base]{library}} and
\code{\link[base]{require}}. It checks to see if a
package is installed, if not it attempts to install the package from CRAN
and/or any other repository in the \pkg{pacman} repository list.
}
\examples{
\dontrun{
p_load(lattice)
p_unload(lattice)
p_load(lattice, foreign, boot, rpart)
p_loaded()
p_unload(lattice, foreign, boot, rpart)
p_loaded()
}
}
\seealso{
\code{\link[base]{library}},
\code{\link[base]{require}},
\code{\link[utils]{install.packages}}
}
|
library(tidyverse)
install.packages("VIM")
library(VIM)
#Use quantmod
if (!require("quantmod")) {
install.packages("quantmod")
library(quantmod)
}
start <- as.Date("2020-03-01")
end <- as.Date("2020-06-02")
getSymbols("GS", src = "yahoo", from = start, to = end)
getSymbols("MRK", src = "yahoo", from = start, to = end)
getSymbols("AZN", src = "yahoo", from = start, to = end)
getSymbols("MS", src = "yahoo", from = start, to = end)
getSymbols("MSFT", src = "yahoo", from = start, to = end)
getSymbols("V", src = "yahoo", from = start, to = end)
getSymbols("PG", src = "yahoo", from = start, to = end)
getSymbols("MA", src = "yahoo", from = start, to = end)
getSymbols("INTC", src = "yahoo", from = start, to = end)
getSymbols("ZM", src = "yahoo", from = start, to = end)
data = data.frame(GS = GS$Close,MRK=MRK$Close,AZN=AZN$Close,MS=MS$Close,MSFT=MSFT$Close,
V=V$Close,PG=PG$Close,MA=MA$Close,INTC=INTC$Close,ZM=ZM$Close)
ncol <- ncol(data)
nrow <- nrow(data)
for(i in 1:30){
col_missing = sample(1:ncol,1)
row_missing = sample(1:nrow,1)
data[row_missing,col_missing]<- NA
}
data1 <- kNN(data, variable = c("GS","INTC","MA","MRK","MS","MSFT","PG","V","ZM","AZN"),k=4)
data2 <- subset(data1, select=GS:ZM)
data2 | /hw3 R code.R | no_license | jingmanli0417/hw3 | R | false | false | 1,281 | r | library(tidyverse)
install.packages("VIM")
library(VIM)
#Use quantmod
if (!require("quantmod")) {
install.packages("quantmod")
library(quantmod)
}
start <- as.Date("2020-03-01")
end <- as.Date("2020-06-02")
getSymbols("GS", src = "yahoo", from = start, to = end)
getSymbols("MRK", src = "yahoo", from = start, to = end)
getSymbols("AZN", src = "yahoo", from = start, to = end)
getSymbols("MS", src = "yahoo", from = start, to = end)
getSymbols("MSFT", src = "yahoo", from = start, to = end)
getSymbols("V", src = "yahoo", from = start, to = end)
getSymbols("PG", src = "yahoo", from = start, to = end)
getSymbols("MA", src = "yahoo", from = start, to = end)
getSymbols("INTC", src = "yahoo", from = start, to = end)
getSymbols("ZM", src = "yahoo", from = start, to = end)
data = data.frame(GS = GS$Close,MRK=MRK$Close,AZN=AZN$Close,MS=MS$Close,MSFT=MSFT$Close,
V=V$Close,PG=PG$Close,MA=MA$Close,INTC=INTC$Close,ZM=ZM$Close)
ncol <- ncol(data)
nrow <- nrow(data)
for(i in 1:30){
col_missing = sample(1:ncol,1)
row_missing = sample(1:nrow,1)
data[row_missing,col_missing]<- NA
}
data1 <- kNN(data, variable = c("GS","INTC","MA","MRK","MS","MSFT","PG","V","ZM","AZN"),k=4)
data2 <- subset(data1, select=GS:ZM)
data2 |
# Exercise 2: using built-in string functions
# Create a variable `lyric` that contains the text "I like to eat apples and
# bananas"
lyric <- "I like to eat apples and bananas"
lyric
# Use the `substr()` function to extract the 1st through 13th letters from the
# `lyric`, and store the result in a variable called `intro`
# Use `?substr` to see more about this function
intro <- substr(lyric, 1, 13)
intro
# Use the `substr()` function to extract the 15th through the last letter of the
# `lyric`, and store the result in a variable called `fruits`
# Hint: use `nchar()` to determine how many total letters there are!
fruits <- substr(lyric, 15, nchar(lyric))
fruits
# Use the `gsub()` function to substitute all the "a"s in `fruits` with "ee".
# Store the result in a variable called `fruits_e`
# Hint: see http://www.endmemo.com/program/R/sub.php for a simpmle example (or
# use `?gsub`)
fruits_e <- gsub("a", "ee", fruits)
fruits_e
# Use the `gsub()` function to substitute all the "a"s in `fruits` with "o".
# Store the result in a variable called `fruits_o`
fruits_o <- gsub("a", "o", fruits)
fruits_o
# Create a new variable `lyric_e` that is the `intro` combined with the new
# `fruits_e` ending. Print out this variable
lyric_e <- paste(intro, fruits_e)
lyric_e
# Without making a new variable, print out the `intro` combined with the new
# `fruits_o` ending
print(paste(intro, fruits_o))
| /chapter-06-exercises/exercise-2/exercise.R | permissive | abq14/book-exercises | R | false | false | 1,412 | r | # Exercise 2: using built-in string functions
# Create a variable `lyric` that contains the text "I like to eat apples and
# bananas"
lyric <- "I like to eat apples and bananas"
lyric
# Use the `substr()` function to extract the 1st through 13th letters from the
# `lyric`, and store the result in a variable called `intro`
# Use `?substr` to see more about this function
intro <- substr(lyric, 1, 13)
intro
# Use the `substr()` function to extract the 15th through the last letter of the
# `lyric`, and store the result in a variable called `fruits`
# Hint: use `nchar()` to determine how many total letters there are!
fruits <- substr(lyric, 15, nchar(lyric))
fruits
# Use the `gsub()` function to substitute all the "a"s in `fruits` with "ee".
# Store the result in a variable called `fruits_e`
# Hint: see http://www.endmemo.com/program/R/sub.php for a simpmle example (or
# use `?gsub`)
fruits_e <- gsub("a", "ee", fruits)
fruits_e
# Use the `gsub()` function to substitute all the "a"s in `fruits` with "o".
# Store the result in a variable called `fruits_o`
fruits_o <- gsub("a", "o", fruits)
fruits_o
# Create a new variable `lyric_e` that is the `intro` combined with the new
# `fruits_e` ending. Print out this variable
lyric_e <- paste(intro, fruits_e)
lyric_e
# Without making a new variable, print out the `intro` combined with the new
# `fruits_o` ending
print(paste(intro, fruits_o))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/general_summary_diff_tool.R
\name{add_change_rate_cols}
\alias{add_change_rate_cols}
\title{add change rate cols}
\usage{
add_change_rate_cols(summary_df)
}
\arguments{
\item{summary_df}{}
}
\value{
data.frame
}
\description{
add cols which was mutated as change rate from past term data
(lag target col) into the summary data
}
| /man/add_change_rate_cols.Rd | permissive | rea-osaka/retiex | R | false | true | 408 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/general_summary_diff_tool.R
\name{add_change_rate_cols}
\alias{add_change_rate_cols}
\title{add change rate cols}
\usage{
add_change_rate_cols(summary_df)
}
\arguments{
\item{summary_df}{}
}
\value{
data.frame
}
\description{
add cols which was mutated as change rate from past term data
(lag target col) into the summary data
}
|
setwd("C:/Users/sh355/Desktop/shiny")
library(shiny)
ui <- fluidPage(
fluidRow(
splitLayout(cellWidths = c("50%", "50%"),
checkboxGroupInput("checkGroup", label = h3("Checkbox group"),
choices = list("PS2","X360",
"PS3",
"Wii",
"DS",
"PS",
"GBA",
"PSP",
"PS4",
"PC"
),
selected = "PC"),
checkboxGroupInput("checkGroup2", label = h3("Checkbox group2"),
choices = list("Activision","Electronic Arts","Konami Digital Entertainment","Namco Bandai Games","Nintendo","Sega","Sony Computer Entertainment","Take-Two Interactive","THQ","Ubisoft"
),
selected = "Activision"))),
mainPanel("main panel",
fluidRow(
splitLayout(cellWidths = c("50%", "50%"), plotOutput("PlatformPlot"), plotOutput("PublisherPlot"))
)
)
)
server <- function(input, output) {
output$PlatformPlot<- renderPlot({
ggplot(data = platform_year[platform_year$Platform == input$checkGroup,]) +
geom_point(mapping = aes(x=Year, y=Revenue, group = Platform, color=input$checkGroup),na.rm = TRUE)+
geom_line(mapping = aes(x=Year, y=Revenue, group = Platform, color=input$checkGroup),na.rm = TRUE)})
output$PublisherPlot<- renderPlot({
ggplot(data = publisher_year[publisher_year$Publisher == input$checkGroup2,]) +
geom_point(mapping = aes(x=Year, y=Revenue, group = Publisher, color=input$checkGroup2),na.rm = TRUE)+
geom_line(mapping = aes(x=Year, y=Revenue, group = Publisher, color=input$checkGroup2),na.rm = TRUE)})
}
shinyApp(ui = ui, server = server) | /line2.R | no_license | Senyao-Han/Hackathon-StatFest-2016---Illuminating-Statistics-via-Dashboard | R | false | false | 1,928 | r | setwd("C:/Users/sh355/Desktop/shiny")
library(shiny)
ui <- fluidPage(
fluidRow(
splitLayout(cellWidths = c("50%", "50%"),
checkboxGroupInput("checkGroup", label = h3("Checkbox group"),
choices = list("PS2","X360",
"PS3",
"Wii",
"DS",
"PS",
"GBA",
"PSP",
"PS4",
"PC"
),
selected = "PC"),
checkboxGroupInput("checkGroup2", label = h3("Checkbox group2"),
choices = list("Activision","Electronic Arts","Konami Digital Entertainment","Namco Bandai Games","Nintendo","Sega","Sony Computer Entertainment","Take-Two Interactive","THQ","Ubisoft"
),
selected = "Activision"))),
mainPanel("main panel",
fluidRow(
splitLayout(cellWidths = c("50%", "50%"), plotOutput("PlatformPlot"), plotOutput("PublisherPlot"))
)
)
)
server <- function(input, output) {
output$PlatformPlot<- renderPlot({
ggplot(data = platform_year[platform_year$Platform == input$checkGroup,]) +
geom_point(mapping = aes(x=Year, y=Revenue, group = Platform, color=input$checkGroup),na.rm = TRUE)+
geom_line(mapping = aes(x=Year, y=Revenue, group = Platform, color=input$checkGroup),na.rm = TRUE)})
output$PublisherPlot<- renderPlot({
ggplot(data = publisher_year[publisher_year$Publisher == input$checkGroup2,]) +
geom_point(mapping = aes(x=Year, y=Revenue, group = Publisher, color=input$checkGroup2),na.rm = TRUE)+
geom_line(mapping = aes(x=Year, y=Revenue, group = Publisher, color=input$checkGroup2),na.rm = TRUE)})
}
shinyApp(ui = ui, server = server) |
## Two functions that caches the inverse of a matrix
## makeCacheMatrix creates a special matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(inv) m <<- inv
getinv <- function() m
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## cacheSolve computes the inverse of the special matrix if not already cached, otherwise return cached data
cacheSolve <- function(x, ...) {
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data)
x$setinv(m)
m
}
| /cachematrix.R | no_license | bdkimmie4/ProgrammingAssignment2 | R | false | false | 681 | r | ## Two functions that caches the inverse of a matrix
## makeCacheMatrix creates a special matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(inv) m <<- inv
getinv <- function() m
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## cacheSolve computes the inverse of the special matrix if not already cached, otherwise return cached data
cacheSolve <- function(x, ...) {
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data)
x$setinv(m)
m
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/proximitybeacon_functions.R
\name{beacons.get}
\alias{beacons.get}
\title{Returns detailed information about the specified beacon. Authenticate using an [OAuth access token](https://developers.google.com/identity/protocols/OAuth2) from a signed-in user with **viewer**, **Is owner** or **Can edit** permissions in the Google Developers Console project. Requests may supply an Eddystone-EID beacon name in the form: `beacons/4!beaconId` where the `beaconId` is the base16 ephemeral ID broadcast by the beacon. The returned `Beacon` object will contain the beacon's stable Eddystone-UID. Clients not authorized to resolve the beacon's ephemeral Eddystone-EID broadcast will receive an error.}
\usage{
beacons.get(beaconName, projectId = NULL)
}
\arguments{
\item{beaconName}{Resource name of this beacon}
\item{projectId}{The project id of the beacon to request}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/userlocation.beacon.registry
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/userlocation.beacon.registry)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/beacons/proximity/}{Google Documentation}
}
| /googleproximitybeaconv1beta1.auto/man/beacons.get.Rd | permissive | GVersteeg/autoGoogleAPI | R | false | true | 1,483 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/proximitybeacon_functions.R
\name{beacons.get}
\alias{beacons.get}
\title{Returns detailed information about the specified beacon. Authenticate using an [OAuth access token](https://developers.google.com/identity/protocols/OAuth2) from a signed-in user with **viewer**, **Is owner** or **Can edit** permissions in the Google Developers Console project. Requests may supply an Eddystone-EID beacon name in the form: `beacons/4!beaconId` where the `beaconId` is the base16 ephemeral ID broadcast by the beacon. The returned `Beacon` object will contain the beacon's stable Eddystone-UID. Clients not authorized to resolve the beacon's ephemeral Eddystone-EID broadcast will receive an error.}
\usage{
beacons.get(beaconName, projectId = NULL)
}
\arguments{
\item{beaconName}{Resource name of this beacon}
\item{projectId}{The project id of the beacon to request}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/userlocation.beacon.registry
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/userlocation.beacon.registry)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/beacons/proximity/}{Google Documentation}
}
|
library(tidyverse)
library(ggplot2)
#install.packages("gridExtra")
library(gridExtra)
library(ggplot2)
library(dplyr)
library(plotly)
library(hrbrthemes)
data()
head(diamonds)
str(diamonds)
qplot(carat, price, data = diamonds)
qplot(log(carat), log(price), data = diamonds)
qplot(carat, price, data = diamonds[1:50,], colour = color)
qplot(carat, price, data = diamonds[1:50,], shape = cut)
qplot(carat, price, data = diamonds[1:50,], size = price)
library(scales)
qplot(carat, price, data = diamonds, colour = I(alpha("black", 1/200)))
qplot(carat, price, data = diamonds, geom = c("point", "smooth"))
qplot(carat, data = diamonds, geom = "histogram")
qplot(carat, data = diamonds, geom = "density")
qplot(carat, data = diamonds, geom = "histogram", fill = color)
qplot(carat, data = diamonds, geom = "density", colour = color)
str(diamonds)
#points-scatterplot
p<-ggplot(diamonds, aes(x=carat, y=price, color=cut))+
geom_point()
p
ggsave("myggplot.png", plot=p, width = 10,height=20, dpi = 300) # save a stored ggplot
#bar
p<- ggplot(diamonds, aes(cut)) +
geom_bar()
p
p<-ggplot(diamonds, aes(cut)) +
geom_bar(aes(fill = clarity),position = "stack")
p
ggplot(diamonds, aes(cut)) +
geom_bar(aes(fill = clarity), position = "fill")
ggplot(diamonds, aes(cut)) +
geom_bar(aes(fill = clarity), position = "dodge")
"
#marker
p<-ggplot(diamonds, aes(x=carat, y=price)) +
geom_point(aes(size=carat, shape=clarity, alpha=price))
p
"
#Layers
#Overlay a smoothing line on top of the scatter plot using geom_smooth
# Adding scatterplot geom (layer1) and smoothing geom (layer2).
p<-ggplot(diamonds) + geom_point(aes(x=carat, y=price, color=cut)) +
geom_smooth(aes(x=carat, y=price, color=cut))
p
#facet-faceting that allows the user to split one plot into
#multiple plots based on a factor included in the dataset.
# columns defined by 'cut'
#facet_wrap(~ factor1 + factor2 + ... + factorn)
p<- p+facet_wrap( ~ cut, nrow=3,ncol=2)
p
#facet_grid(row_variable ~ column_variable)
p<-p + facet_grid(color ~ cut)
p
p <- ggplotly(p)
p
ggplot(diamonds, aes(x=carat, y=price, color=cut)) +
geom_point() +
facet_wrap(~ clarity)
plot <- ggplot(diamonds,
aes(x=carat,y=price))+ geom_density(aes(fill=cut),alpha=0.5)
plot<-plot+facet_wrap(~cut)
plot
## the main title, x and y axis labels
ggplot(diamonds, aes(x=carat, y=price, color=cut)) +
geom_point() + geom_smooth() +
ggtitle("Scatter") +
xlab("carat") + ylab("price")+
coord_cartesian(ylim=c(0, 10000))
#Change the appearance of the main title &axis labels -- theme() & element_text()
# main title -p + theme(plot.title = element_text(family, face, colour, size))
# x/y axis title -p + theme(axis.title.x/y = element_text(family, face, colour, size))
"
family : font family
face : font face. Possible values are plain, italic, bold and
bold.italic
colour : text color
size : text size in pts
hjust : horizontal justification (in [0, 1])
vjust : vertical justification (in [0, 1])
lineheight : line height. In multi-line text, the lineheight argument is used to change the spacing between lines.
color : an alias for colour
"
#Legend - Deleting and Changing Position
# remove legend
p1 <- ggplot(diamonds, aes(x=carat, y=price, color=cut)) +
geom_point() + geom_smooth() +
theme(legend.position="none",
axis.title.x = element_text(color="blue", size=14, face="bold"),
axis.title.y = element_text(color="#993333", size=14, face="bold")) +
labs(title="legend.position='none'")
# legend at top
p2 <- ggplot(diamonds, aes(x=carat, y=price, color=cut)) + geom_point() + geom_smooth() + theme(legend.position="top") + labs(title="legend.position='top'")
# legend inside the plot.
p3 <- ggplot(diamonds, aes(x=carat, y=price, color=cut)) + geom_point() + geom_smooth() + labs(title="legend.position='coords inside plot'") + theme(legend.justification=c(1,0), legend.position=c(1,0))
# arrange
grid.arrange(p1, p2, p3,nrow=1,ncol=3)
| /CSE3020 - DATA VISUALISATION/R Codes/GGPLOT-EX-upload.R | no_license | sriteja2602/Education | R | false | false | 3,995 | r | library(tidyverse)
library(ggplot2)
#install.packages("gridExtra")
library(gridExtra)
library(ggplot2)
library(dplyr)
library(plotly)
library(hrbrthemes)
data()
head(diamonds)
str(diamonds)
qplot(carat, price, data = diamonds)
qplot(log(carat), log(price), data = diamonds)
qplot(carat, price, data = diamonds[1:50,], colour = color)
qplot(carat, price, data = diamonds[1:50,], shape = cut)
qplot(carat, price, data = diamonds[1:50,], size = price)
library(scales)
qplot(carat, price, data = diamonds, colour = I(alpha("black", 1/200)))
qplot(carat, price, data = diamonds, geom = c("point", "smooth"))
qplot(carat, data = diamonds, geom = "histogram")
qplot(carat, data = diamonds, geom = "density")
qplot(carat, data = diamonds, geom = "histogram", fill = color)
qplot(carat, data = diamonds, geom = "density", colour = color)
str(diamonds)
#points-scatterplot
p<-ggplot(diamonds, aes(x=carat, y=price, color=cut))+
geom_point()
p
ggsave("myggplot.png", plot=p, width = 10,height=20, dpi = 300) # save a stored ggplot
#bar
p<- ggplot(diamonds, aes(cut)) +
geom_bar()
p
p<-ggplot(diamonds, aes(cut)) +
geom_bar(aes(fill = clarity),position = "stack")
p
ggplot(diamonds, aes(cut)) +
geom_bar(aes(fill = clarity), position = "fill")
ggplot(diamonds, aes(cut)) +
geom_bar(aes(fill = clarity), position = "dodge")
"
#marker
p<-ggplot(diamonds, aes(x=carat, y=price)) +
geom_point(aes(size=carat, shape=clarity, alpha=price))
p
"
#Layers
#Overlay a smoothing line on top of the scatter plot using geom_smooth
# Adding scatterplot geom (layer1) and smoothing geom (layer2).
p<-ggplot(diamonds) + geom_point(aes(x=carat, y=price, color=cut)) +
geom_smooth(aes(x=carat, y=price, color=cut))
p
#facet-faceting that allows the user to split one plot into
#multiple plots based on a factor included in the dataset.
# columns defined by 'cut'
#facet_wrap(~ factor1 + factor2 + ... + factorn)
p<- p+facet_wrap( ~ cut, nrow=3,ncol=2)
p
#facet_grid(row_variable ~ column_variable)
p<-p + facet_grid(color ~ cut)
p
p <- ggplotly(p)
p
ggplot(diamonds, aes(x=carat, y=price, color=cut)) +
geom_point() +
facet_wrap(~ clarity)
plot <- ggplot(diamonds,
aes(x=carat,y=price))+ geom_density(aes(fill=cut),alpha=0.5)
plot<-plot+facet_wrap(~cut)
plot
## the main title, x and y axis labels
ggplot(diamonds, aes(x=carat, y=price, color=cut)) +
geom_point() + geom_smooth() +
ggtitle("Scatter") +
xlab("carat") + ylab("price")+
coord_cartesian(ylim=c(0, 10000))
#Change the appearance of the main title &axis labels -- theme() & element_text()
# main title -p + theme(plot.title = element_text(family, face, colour, size))
# x/y axis title -p + theme(axis.title.x/y = element_text(family, face, colour, size))
"
family : font family
face : font face. Possible values are plain, italic, bold and
bold.italic
colour : text color
size : text size in pts
hjust : horizontal justification (in [0, 1])
vjust : vertical justification (in [0, 1])
lineheight : line height. In multi-line text, the lineheight argument is used to change the spacing between lines.
color : an alias for colour
"
#Legend - Deleting and Changing Position
# remove legend
p1 <- ggplot(diamonds, aes(x=carat, y=price, color=cut)) +
geom_point() + geom_smooth() +
theme(legend.position="none",
axis.title.x = element_text(color="blue", size=14, face="bold"),
axis.title.y = element_text(color="#993333", size=14, face="bold")) +
labs(title="legend.position='none'")
# legend at top
p2 <- ggplot(diamonds, aes(x=carat, y=price, color=cut)) + geom_point() + geom_smooth() + theme(legend.position="top") + labs(title="legend.position='top'")
# legend inside the plot.
p3 <- ggplot(diamonds, aes(x=carat, y=price, color=cut)) + geom_point() + geom_smooth() + labs(title="legend.position='coords inside plot'") + theme(legend.justification=c(1,0), legend.position=c(1,0))
# arrange
grid.arrange(p1, p2, p3,nrow=1,ncol=3)
|
library(shiny)
library(leaflet)
library(RColorBrewer)
library(knitr)
library(vcd)
library(grid)
library(plotly)
#library(googleVis)
library(igraph)
library(tidyverse)
# Define server logic for slider examples
shinyServer(function(input, output, session) {
#Ventana 1
#### For the map in leaflet
points <- reactive({
#input$update
#TableL <- TableL()
req(input$NOM_ENT)
if (input$NOM_ENT != "All") {
FinalTT <- FinalTT[FinalTT$NOM_ENT %in% input$NOM_ENT,]
}else FinalTT <- FinalTT
})
#head(TianguisFF)
#names(TianguisFF)
#summary(TianguisFF)
#vtess <- deldir(Tianguis_1[,6:7])
#class(vtess)
#summary(vtess)
#dim(as.data.frame(vtess$summary$dir.area))
#summary(vtess$delsgs)
#summary(vtess$dirsgs)
#summary(vtess$ind.orig)
#P el mapa en leaflet
output$mymap1 <- renderLeaflet(
{
Goldberg <- points()
#Goldberg$Variable1 <- as.factor(Goldberg$Variable1)
Tianguis_1 <- Goldberg %>%
#filter(Variable1 != "NE") %>%
filter(Variable1 == "Tianguis")
Mercados_1 <- Goldberg %>%
# filter(Variable1 != "NE") %>%
filter(Variable1 != "Tianguis")
vor_pts <- SpatialPointsDataFrame(cbind(Tianguis_1$lng,
Tianguis_1$lat),
Tianguis_1, match.ID = TRUE)
vor <- SPointsDF_to_voronoi_SPolysDF(vor_pts)
pal <- colorFactor(c("navy", "red", "black"), domain = c("Tianguis", "Maiz", "Sin_Maiz"))
leaflet() %>%
# addTiles() %>%
addProviderTiles("CartoDB.Positron") %>%
addPolygons(data = vor,
stroke = T, color = "green", weight = 2,
fill = F, fillOpacity = 0.0,
smoothFactor = 0.5
#popup = sprintf("Total In/Out: %s",
# as.character(vor@data$tot))
) %>%
#Para los tianguis
addCircleMarkers(data = Tianguis_1,
~lng, ~lat,
popup = paste(sep = " ","Municipio:",Tianguis_1$NOM_MUN,
"<br/>","Localidad:",Tianguis_1$NOM_LOC,
"<br/>","Tipo:",Tianguis_1$Variable1),
radius = ~ifelse(Variable1 == "Tianguis", 7, 6),
color = ~pal(Variable1),
stroke = FALSE, fillOpacity = 0.5) %>%
#Para los sitios con maíz
addCircleMarkers(data = Mercados_1,
~lng, ~lat,
popup = paste(sep = " ","Municipio:",Mercados_1$NOM_MUN,
"<br/>","Localidad:",Mercados_1$NOM_LOC,
"<br/>","Tipo:",Mercados_1$Variable1),
radius = ~ifelse(Variable1 == "Maiz", 4, 4),
color = ~pal(Variable1),
stroke = FALSE, fillOpacity = 0.5
# clusterOptions = markerClusterOptions(showCoverageOnHover = T,
# spiderfyOnMaxZoom = T,
# zoomToBoundsOnClick = T,
# spiderfyDistanceMultiplier = 2)
)
# TTT <- c(brewer.pal(8,"Dark2"))
#TTT <- colorNumeric(c(1:64), levels(TableL$Raza_primaria))
# Goldberg <- points()
# Trip2 <- points2()
# TT <- paste(Goldberg$Raza_primaria)
# leaflet(data = Goldberg) %>%
#clearShapes() %>%
# addTiles() %>%
#clearBounds() %>%
# addCircleMarkers(Goldberg$longitude, Goldberg$latitude,
# weight = 8, radius = 5, stroke = F, fillOpacity = 0.9, color = sample(TTT,1),
# clusterOptions = markerClusterOptions(showCoverageOnHover = T,
# spiderfyOnMaxZoom = T,
# zoomToBoundsOnClick = T,
# spiderfyDistanceMultiplier = 2),
# popup = paste(sep = " ",
# "Complejo Racial:",Goldberg$Complejo_racial,"<br/>",
# "Raza Maiz:",Goldberg$Raza_primaria,"<br/>",
# "Municipio:",Goldberg$Municipio, "<br/>",
# "Localidad:",Goldberg$Localidad)) %>%
#addMeasure(primaryLengthUnit = "kilometers", primaryAreaUnit = "hectares",activeColor = '#FF00FF') %>%
#addProviderTiles("Esri.WorldTopoMap")
# addProviderTiles("CartoDB.Positron")
# addLayersControl(
# overlayGroups = names(Teo1),
# options = layersControlOptions(collapsed = FALSE))
})
# observeEvent({# update the map markers and view on map clicks
# proxy2 <- leafletProxy("mymap1")
# #proxy2 %>% clearControls()
# # Trip2 <- points2()
# if (input$tripsacum) {
# proxy2 %>%
# addCircleMarkers(Trip1$long, Trip1$lat, weight = 3, radius = 1,
# color = '#FA5', opacity = 1, stroke = T,
# popup = paste(sep = " ",
# "Raza Maiz:",Trip1$Taxa,"<br/>",
# "Municipio:",Trip1$Municipio))
# }
# })
})
| /server.R | no_license | APonce73/VoronoiInegi | R | false | false | 5,699 | r | library(shiny)
library(leaflet)
library(RColorBrewer)
library(knitr)
library(vcd)
library(grid)
library(plotly)
#library(googleVis)
library(igraph)
library(tidyverse)
# Define server logic for slider examples
shinyServer(function(input, output, session) {
#Ventana 1
#### For the map in leaflet
points <- reactive({
#input$update
#TableL <- TableL()
req(input$NOM_ENT)
if (input$NOM_ENT != "All") {
FinalTT <- FinalTT[FinalTT$NOM_ENT %in% input$NOM_ENT,]
}else FinalTT <- FinalTT
})
#head(TianguisFF)
#names(TianguisFF)
#summary(TianguisFF)
#vtess <- deldir(Tianguis_1[,6:7])
#class(vtess)
#summary(vtess)
#dim(as.data.frame(vtess$summary$dir.area))
#summary(vtess$delsgs)
#summary(vtess$dirsgs)
#summary(vtess$ind.orig)
#P el mapa en leaflet
output$mymap1 <- renderLeaflet(
{
Goldberg <- points()
#Goldberg$Variable1 <- as.factor(Goldberg$Variable1)
Tianguis_1 <- Goldberg %>%
#filter(Variable1 != "NE") %>%
filter(Variable1 == "Tianguis")
Mercados_1 <- Goldberg %>%
# filter(Variable1 != "NE") %>%
filter(Variable1 != "Tianguis")
vor_pts <- SpatialPointsDataFrame(cbind(Tianguis_1$lng,
Tianguis_1$lat),
Tianguis_1, match.ID = TRUE)
vor <- SPointsDF_to_voronoi_SPolysDF(vor_pts)
pal <- colorFactor(c("navy", "red", "black"), domain = c("Tianguis", "Maiz", "Sin_Maiz"))
leaflet() %>%
# addTiles() %>%
addProviderTiles("CartoDB.Positron") %>%
addPolygons(data = vor,
stroke = T, color = "green", weight = 2,
fill = F, fillOpacity = 0.0,
smoothFactor = 0.5
#popup = sprintf("Total In/Out: %s",
# as.character(vor@data$tot))
) %>%
#Para los tianguis
addCircleMarkers(data = Tianguis_1,
~lng, ~lat,
popup = paste(sep = " ","Municipio:",Tianguis_1$NOM_MUN,
"<br/>","Localidad:",Tianguis_1$NOM_LOC,
"<br/>","Tipo:",Tianguis_1$Variable1),
radius = ~ifelse(Variable1 == "Tianguis", 7, 6),
color = ~pal(Variable1),
stroke = FALSE, fillOpacity = 0.5) %>%
#Para los sitios con maíz
addCircleMarkers(data = Mercados_1,
~lng, ~lat,
popup = paste(sep = " ","Municipio:",Mercados_1$NOM_MUN,
"<br/>","Localidad:",Mercados_1$NOM_LOC,
"<br/>","Tipo:",Mercados_1$Variable1),
radius = ~ifelse(Variable1 == "Maiz", 4, 4),
color = ~pal(Variable1),
stroke = FALSE, fillOpacity = 0.5
# clusterOptions = markerClusterOptions(showCoverageOnHover = T,
# spiderfyOnMaxZoom = T,
# zoomToBoundsOnClick = T,
# spiderfyDistanceMultiplier = 2)
)
# TTT <- c(brewer.pal(8,"Dark2"))
#TTT <- colorNumeric(c(1:64), levels(TableL$Raza_primaria))
# Goldberg <- points()
# Trip2 <- points2()
# TT <- paste(Goldberg$Raza_primaria)
# leaflet(data = Goldberg) %>%
#clearShapes() %>%
# addTiles() %>%
#clearBounds() %>%
# addCircleMarkers(Goldberg$longitude, Goldberg$latitude,
# weight = 8, radius = 5, stroke = F, fillOpacity = 0.9, color = sample(TTT,1),
# clusterOptions = markerClusterOptions(showCoverageOnHover = T,
# spiderfyOnMaxZoom = T,
# zoomToBoundsOnClick = T,
# spiderfyDistanceMultiplier = 2),
# popup = paste(sep = " ",
# "Complejo Racial:",Goldberg$Complejo_racial,"<br/>",
# "Raza Maiz:",Goldberg$Raza_primaria,"<br/>",
# "Municipio:",Goldberg$Municipio, "<br/>",
# "Localidad:",Goldberg$Localidad)) %>%
#addMeasure(primaryLengthUnit = "kilometers", primaryAreaUnit = "hectares",activeColor = '#FF00FF') %>%
#addProviderTiles("Esri.WorldTopoMap")
# addProviderTiles("CartoDB.Positron")
# addLayersControl(
# overlayGroups = names(Teo1),
# options = layersControlOptions(collapsed = FALSE))
})
# observeEvent({# update the map markers and view on map clicks
# proxy2 <- leafletProxy("mymap1")
# #proxy2 %>% clearControls()
# # Trip2 <- points2()
# if (input$tripsacum) {
# proxy2 %>%
# addCircleMarkers(Trip1$long, Trip1$lat, weight = 3, radius = 1,
# color = '#FA5', opacity = 1, stroke = T,
# popup = paste(sep = " ",
# "Raza Maiz:",Trip1$Taxa,"<br/>",
# "Municipio:",Trip1$Municipio))
# }
# })
})
|
## -----------------------------------------------------------------------------
set.seed(1234)
source("Manuscript figures and matrices/printMat.R")
library(tidyverse)
library(PPGtools)
library(spam)
library(xtable)
library(cowplot)
library(ggthemes)
# color palette
cbp2 <- c("#000000", "#E69F00", "#56B4E9", "#009E73",
"#F0E442", "#0072B2", "#D55E00", "#CC79A7")
# path to store figures and matrices
mypath <- "Manuscript figures and matrices/figures/"
## -------------------------------------------------------------------------------------
library(PPGtools)
# prepare data
raw_signal <- prepInput(rec, "Green", tstart = 10, tstop = 80)
# Plot the red, green, and blue colour channel of the raw signal
rgb <-
ggplot(data.frame(rec), aes(x = time)) +
geom_line(aes(y=Red), colour = cbp2[7]) +
geom_line(aes(y=Green), colour = cbp2[4]) +
geom_line(aes(y=Blue), colour = cbp2[6]) +
labs(x = "Time (s)", y = "", title = "Raw PPG signal") +
theme_cowplot() +
theme(legend.position = "none")
# save figure - raw data ---------------------------------------------------------
postscript(paste0(mypath, "rawsignal.eps"))
rgb
dev.off()
# plot only green raw signal
onlyg <-
ggplot(data.frame(rec), aes(x = time)) +
geom_line(aes(y=Green), colour = cbp2[1]) +
labs(x = "Time (s)", y = "", title = "Raw PPG signal, green channel") +
scale_x_continuous(limits = c(10, 80)) +
scale_y_continuous(limits = c(.15, .25)) +
theme_cowplot() +
theme(legend.position = "none")
# save ffigure only green signal ----------------------------------------------
postscript(paste0(mypath, "greensignal.eps"))
onlyg
dev.off()
## -----------------------------------------------------------------------------
# first order differences in timesteps:
tdiff <- data.frame(diff1 = diff(raw_signal$time))
# summary statistics on interval widths
ts <- tdiff %>%
summarise(n = length(diff1)+1, # number of timepoints
min = min(diff1),
median = median(diff1),
mean = mean(diff1),
max = max(diff1),
sd = sd(diff1),
range = max-min)
ts
ts$max/ts$min # minimum interval is 12 times shorter then maximum interval
## -------------------------------------------------------------------------------------
# plot distribution of time interval widths in the signal
timedist <-
ggplot(tdiff, aes(diff1, colour = diff1)) +
geom_histogram(bins = 50, colour=NA, fill=cbp2[1]) +
labs(x = "Interval width", y = "") +
scale_x_continuous(breaks=seq(0.01,0.09, 0.01)) +
scale_y_continuous(
# don't expand y scale at the lower end
expand = expand_scale(mult = c(0, 0.05))) +
theme_cowplot()
timedist
# save figure - time hist ---------------------------------------------------------
postscript(paste0(mypath, "timehist.eps"))
timedist
dev.off()
## -------------------------------------------------------------------------------------
# sampling rate of the smartphone camera (frames per second)
m <- length(raw_signal$time) # number of frames
sec <- tail(raw_signal$time, n=1) - head(raw_signal$time, n=1) # number of seconds
fps <- m/sec
fps # frames per second
1/fps # expected interval width at a constand sampling rate
# scale raw signal
raw_signal$time_scaled <- raw_signal$time *fps
# for four different lambda values
lambda <- 10^(seq(-1, 5, 2))
lambda <- matrix(lambda, dimnames = list(paste0('lambda_', 1:length(lambda)), NULL))
# for three order differences
for(d in 1:3){
# 1. FILTER THE DATA
# smooth assuming equal timesteps (ES)
zES <- smoothWE(raw_signal = raw_signal, lambda = lambda , d = d, uni = TRUE)
#pdates <- data.frame(raw_signal, zES)
# plot for smoothing equal timesteps (pes)
pes1 <-
plotLambda(raw_signal, zES, "") +
scale_x_continuous(limits = c(30, 40)) +
scale_y_continuous(limits = c(0.15, 0.225)) +
theme(legend.position = "none")
# save plot to .eps file
postscript(paste0(mypath, "pes1d", d, ".eps"))
plot(pes1)
dev.off()
# smooth assuming unequal time steps (UES)
zUES <- smoothWE(raw_signal = raw_signal, lambda = lambda , d = d, uni = FALSE)
#pdatues <- data.frame(raw_signal, zUES)
# plot for smoothing assuming unequal timesteps (pues)
pues1 <-
plotLambda(raw_signal, zUES, "") +
scale_x_continuous(limits = c(30, 40)) +
scale_y_continuous(limits = c(0.15, 0.225))+
theme(legend.position = "none")
# save plots to .eps file
postscript(paste0(mypath, "pues1d", d, ".eps"))
plot(pues1)
dev.off()
# 2. DETREND THE RAW SIGNAL WITH A LARGE LAMBDA VALUE
detrendES <- data.frame(raw_signal,
ES = raw_signal$Green-zES[,"l4"],
UES = raw_signal$Green-zUES[,"l4"])
# assuming equal timesteps (ES)
dtES <-
ggplot(detrendES, aes(x=time, y = ES)) +
geom_line() +
labs(x = "Time (s)",
y = "signal") +
scale_x_continuous(limits = c(30, 40)) +
theme_cowplot()
# save plot
postscript(paste0(mypath, "detrended_es_d", d, ".eps"))
plot(dtES)
dev.off()
# assuming unequal time steps (UES)
dtUES <-
ggplot(detrendES, aes(x=time, y = UES)) +
geom_line() +
labs(x = "Time (s)",
y = "signal") +
scale_x_continuous(limits = c(30, 40)) +
theme_cowplot()
# save plot
postscript(paste0(mypath, "detrended_ues_d", d, ".eps"))
plot(dtUES)
dev.off()
}
## -------------------------------------------------------------------------------------
# set up grid of lambda values
lambda <- 10^(seq(-3, 1, by = .01))
lambda <- matrix(lambda, dimnames = list(paste0('lambda_', 1:length(lambda)), NULL))
plot(lambda)
# empty frame to save cross-validation standard errors
cve <- data.frame(lambda = lambda,
scv_e1 = 0,
scv_e2 = 0,
scv_e3 = 0,
scv_ue1 = 0,
scv_ue2 = 0,
scv_ue3 = 0)
# indicators:
steps <- c(TRUE, FALSE) # equal and unequal timesteps
I <- matrix(1:6, ncol=2, nrow=3) # index for cve dataframe
for(s in 1:2){ # for equal or unequal timesteps
U <- steps[s]
for(d in 1:3){ # for 3 order differences
i <- I[d,s]
# estimate trend
trend <- smoothWE(raw_signal = raw_signal, lambda = 10^5 , d = d, uni = U)
# detrend the raw signal
detrended_signal <- raw_signal
detrended_signal$Green <- detrended_signal$Green - trend[,1]
# compute cross-validation standard error over the whole lambda grid
for(l in 1:length(lambda)){
z <- smoothWE(raw_signal=detrended_signal,
lambda = lambda[l,],
d=d, uni = U, cv = TRUE)
cve[l,i+1] <- z$cve
}
}
}
# plot the results of the grid search in the 6 (2*3) conditions.
for(i in 1:6){
nm <- names(cve)[i+1] # condition
grides <-
ggplot(data = cve, aes(x=lambda, y = cve[,i+1])) +
labs(title = "", x = "lambda", y = "cross-validation standard error") +
theme_bw() +
geom_line()
grides
# save plot
postscript(paste0(mypath, nm, ".eps"))
plot(grides)
dev.off()
}
## -------------------------------------------------------------------------------------
| /Manuscript figures and matrices/figures.R | no_license | GerbrichFerdinands/research-report-PPG-supplement | R | false | false | 7,520 | r | ## -----------------------------------------------------------------------------
set.seed(1234)
source("Manuscript figures and matrices/printMat.R")
library(tidyverse)
library(PPGtools)
library(spam)
library(xtable)
library(cowplot)
library(ggthemes)
# color palette
cbp2 <- c("#000000", "#E69F00", "#56B4E9", "#009E73",
"#F0E442", "#0072B2", "#D55E00", "#CC79A7")
# path to store figures and matrices
mypath <- "Manuscript figures and matrices/figures/"
## -------------------------------------------------------------------------------------
library(PPGtools)
# prepare data
raw_signal <- prepInput(rec, "Green", tstart = 10, tstop = 80)
# Plot the red, green, and blue colour channel of the raw signal
rgb <-
ggplot(data.frame(rec), aes(x = time)) +
geom_line(aes(y=Red), colour = cbp2[7]) +
geom_line(aes(y=Green), colour = cbp2[4]) +
geom_line(aes(y=Blue), colour = cbp2[6]) +
labs(x = "Time (s)", y = "", title = "Raw PPG signal") +
theme_cowplot() +
theme(legend.position = "none")
# save figure - raw data ---------------------------------------------------------
postscript(paste0(mypath, "rawsignal.eps"))
rgb
dev.off()
# plot only green raw signal
onlyg <-
ggplot(data.frame(rec), aes(x = time)) +
geom_line(aes(y=Green), colour = cbp2[1]) +
labs(x = "Time (s)", y = "", title = "Raw PPG signal, green channel") +
scale_x_continuous(limits = c(10, 80)) +
scale_y_continuous(limits = c(.15, .25)) +
theme_cowplot() +
theme(legend.position = "none")
# save ffigure only green signal ----------------------------------------------
postscript(paste0(mypath, "greensignal.eps"))
onlyg
dev.off()
## -----------------------------------------------------------------------------
# first order differences in timesteps:
tdiff <- data.frame(diff1 = diff(raw_signal$time))
# summary statistics on interval widths
ts <- tdiff %>%
summarise(n = length(diff1)+1, # number of timepoints
min = min(diff1),
median = median(diff1),
mean = mean(diff1),
max = max(diff1),
sd = sd(diff1),
range = max-min)
ts
ts$max/ts$min # minimum interval is 12 times shorter then maximum interval
## -------------------------------------------------------------------------------------
# plot distribution of time interval widths in the signal
timedist <-
ggplot(tdiff, aes(diff1, colour = diff1)) +
geom_histogram(bins = 50, colour=NA, fill=cbp2[1]) +
labs(x = "Interval width", y = "") +
scale_x_continuous(breaks=seq(0.01,0.09, 0.01)) +
scale_y_continuous(
# don't expand y scale at the lower end
expand = expand_scale(mult = c(0, 0.05))) +
theme_cowplot()
timedist
# save figure - time hist ---------------------------------------------------------
postscript(paste0(mypath, "timehist.eps"))
timedist
dev.off()
## -------------------------------------------------------------------------------------
# sampling rate of the smartphone camera (frames per second)
m <- length(raw_signal$time) # number of frames
sec <- tail(raw_signal$time, n=1) - head(raw_signal$time, n=1) # number of seconds
fps <- m/sec
fps # frames per second
1/fps # expected interval width at a constand sampling rate
# scale raw signal
raw_signal$time_scaled <- raw_signal$time *fps
# for four different lambda values
lambda <- 10^(seq(-1, 5, 2))
lambda <- matrix(lambda, dimnames = list(paste0('lambda_', 1:length(lambda)), NULL))
# for three order differences
for(d in 1:3){
# 1. FILTER THE DATA
# smooth assuming equal timesteps (ES)
zES <- smoothWE(raw_signal = raw_signal, lambda = lambda , d = d, uni = TRUE)
#pdates <- data.frame(raw_signal, zES)
# plot for smoothing equal timesteps (pes)
pes1 <-
plotLambda(raw_signal, zES, "") +
scale_x_continuous(limits = c(30, 40)) +
scale_y_continuous(limits = c(0.15, 0.225)) +
theme(legend.position = "none")
# save plot to .eps file
postscript(paste0(mypath, "pes1d", d, ".eps"))
plot(pes1)
dev.off()
# smooth assuming unequal time steps (UES)
zUES <- smoothWE(raw_signal = raw_signal, lambda = lambda , d = d, uni = FALSE)
#pdatues <- data.frame(raw_signal, zUES)
# plot for smoothing assuming unequal timesteps (pues)
pues1 <-
plotLambda(raw_signal, zUES, "") +
scale_x_continuous(limits = c(30, 40)) +
scale_y_continuous(limits = c(0.15, 0.225))+
theme(legend.position = "none")
# save plots to .eps file
postscript(paste0(mypath, "pues1d", d, ".eps"))
plot(pues1)
dev.off()
# 2. DETREND THE RAW SIGNAL WITH A LARGE LAMBDA VALUE
detrendES <- data.frame(raw_signal,
ES = raw_signal$Green-zES[,"l4"],
UES = raw_signal$Green-zUES[,"l4"])
# assuming equal timesteps (ES)
dtES <-
ggplot(detrendES, aes(x=time, y = ES)) +
geom_line() +
labs(x = "Time (s)",
y = "signal") +
scale_x_continuous(limits = c(30, 40)) +
theme_cowplot()
# save plot
postscript(paste0(mypath, "detrended_es_d", d, ".eps"))
plot(dtES)
dev.off()
# assuming unequal time steps (UES)
dtUES <-
ggplot(detrendES, aes(x=time, y = UES)) +
geom_line() +
labs(x = "Time (s)",
y = "signal") +
scale_x_continuous(limits = c(30, 40)) +
theme_cowplot()
# save plot
postscript(paste0(mypath, "detrended_ues_d", d, ".eps"))
plot(dtUES)
dev.off()
}
## -------------------------------------------------------------------------------------
# set up grid of lambda values
lambda <- 10^(seq(-3, 1, by = .01))
lambda <- matrix(lambda, dimnames = list(paste0('lambda_', 1:length(lambda)), NULL))
plot(lambda)
# empty frame to save cross-validation standard errors
cve <- data.frame(lambda = lambda,
scv_e1 = 0,
scv_e2 = 0,
scv_e3 = 0,
scv_ue1 = 0,
scv_ue2 = 0,
scv_ue3 = 0)
# indicators:
steps <- c(TRUE, FALSE) # equal and unequal timesteps
I <- matrix(1:6, ncol=2, nrow=3) # index for cve dataframe
for(s in 1:2){ # for equal or unequal timesteps
U <- steps[s]
for(d in 1:3){ # for 3 order differences
i <- I[d,s]
# estimate trend
trend <- smoothWE(raw_signal = raw_signal, lambda = 10^5 , d = d, uni = U)
# detrend the raw signal
detrended_signal <- raw_signal
detrended_signal$Green <- detrended_signal$Green - trend[,1]
# compute cross-validation standard error over the whole lambda grid
for(l in 1:length(lambda)){
z <- smoothWE(raw_signal=detrended_signal,
lambda = lambda[l,],
d=d, uni = U, cv = TRUE)
cve[l,i+1] <- z$cve
}
}
}
# plot the results of the grid search in the 6 (2*3) conditions.
for(i in 1:6){
nm <- names(cve)[i+1] # condition
grides <-
ggplot(data = cve, aes(x=lambda, y = cve[,i+1])) +
labs(title = "", x = "lambda", y = "cross-validation standard error") +
theme_bw() +
geom_line()
grides
# save plot
postscript(paste0(mypath, nm, ".eps"))
plot(grides)
dev.off()
}
## -------------------------------------------------------------------------------------
|
##run_analysis.R
##Set your Working Directory.
##Once we have set the working directory, place the download UCI HAR Dataset into the working directory.
##Following Code is used to read the X_test, X_train, y_test, y_train, activities_label, features, subject_test,
##subject train dataset into R from the given UCI HAR Dataset
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt", quote="\"", stringsAsFactors=FALSE)
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt", quote="\"", stringsAsFactors=FALSE)
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt", quote="\"",stringsAsFactors=FALSE)
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt", quote="\"", stringsAsFactors=FALSE)
features <- read.table("./UCI HAR Dataset/features.txt", quote="\"")
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt", quote="\"", stringsAsFactors=FALSE)
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt", quote="\"", stringsAsFactors=FALSE)
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt", quote="\"", stringsAsFactors=FALSE)
###################################################################################################
##1. Merges the training and the test sets to create one data set.#################################
###################################################################################################
##Following part of script is used to merge X_train and X_test datasets.
raw_dataset<-rbind(X_test,X_train)
###################################################################################################
##End of Step 1####################################################################################
###################################################################################################
###################################################################################################
##2.Extracts only the measurements on the mean and standard deviation for each measurement.########
###################################################################################################
##Following part of script is used to apply feature names to columns of raw_dataset using features dataset.
colnames(raw_dataset)<-features[,2]
##Following part of script is used to extracts only the measurements on the mean and standard deviation for each measurement.
meancol<-grepl("mean()",features[,2])
meanFreqcol<-grepl("meanFreq()",features[,2])
meancol<-meancol & (!meanFreqcol) ##To remove those values of meancol where "meanFreq() occurs"
stdcol<-grep("std()",features[,2])
raw_dataset1<-raw_dataset[,meancol]
raw_dataset2<-raw_dataset[,stdcol]
raw_dataset<-cbind(raw_dataset1,raw_dataset2)
###################################################################################################
##End of Step 2####################################################################################
###################################################################################################
###################################################################################################
##3.Uses descriptive activity names to name the activities in the data set#########################
###################################################################################################
y_test[,1]<-activity_labels[y_test[,1],2]##To apply activity labels to y test data set
y_train[,1]<-activity_labels[y_train[,1],2]##To apply activity labels to y train data set
activity<-rbind(y_test,y_train)
###################################################################################################
##End of Step 3####################################################################################
###################################################################################################
###################################################################################################
##4.Appropriately labels the data set with descriptive variable names.#############################
###################################################################################################
##Already done in step 2
##combining subject and activity columns to raw_dataset
subject<-rbind(subject_test,subject_train)
subject_activity<-cbind(subject,activity)
colnames(subject_activity)<-c("SUBJECT","ACTIVITY")
raw_dataset1<-cbind(subject_activity,raw_dataset)
###################################################################################################
##End of Step 4####################################################################################
###################################################################################################
###################################################################################################
##5.From the data set in step 4, creates a second,independent tidy data set with the average of####
##each variable for each activity and each subject.################################################
###################################################################################################
##load plyr library
library(plyr)
names<-colnames(raw_dataset)
tidy_data<-ddply(raw_dataset1,.(SUBJECT,ACTIVITY),function(raw_dataset1) colMeans(raw_dataset1[,names]))
##TO REMOVE TEMPORARY DATASETS CREATED ABOVE#######################################################
list1<-c("raw_dataset","raw_dataset1","raw_dataset2","subject_activity","activity_labels","subject")
rm(list=list1)
###################################################################################################
##End of Step 5####################################################################################
################################################################################################### | /run_analysis.R | no_license | rajsingh605/Getting-and-Cleaning-Data | R | false | false | 5,794 | r | ##run_analysis.R
##Set your Working Directory.
##Once we have set the working directory, place the download UCI HAR Dataset into the working directory.
##Following Code is used to read the X_test, X_train, y_test, y_train, activities_label, features, subject_test,
##subject train dataset into R from the given UCI HAR Dataset
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt", quote="\"", stringsAsFactors=FALSE)
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt", quote="\"", stringsAsFactors=FALSE)
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt", quote="\"",stringsAsFactors=FALSE)
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt", quote="\"", stringsAsFactors=FALSE)
features <- read.table("./UCI HAR Dataset/features.txt", quote="\"")
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt", quote="\"", stringsAsFactors=FALSE)
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt", quote="\"", stringsAsFactors=FALSE)
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt", quote="\"", stringsAsFactors=FALSE)
###################################################################################################
##1. Merges the training and the test sets to create one data set.#################################
###################################################################################################
##Following part of script is used to merge X_train and X_test datasets.
raw_dataset<-rbind(X_test,X_train)
###################################################################################################
##End of Step 1####################################################################################
###################################################################################################
###################################################################################################
##2.Extracts only the measurements on the mean and standard deviation for each measurement.########
###################################################################################################
##Following part of script is used to apply feature names to columns of raw_dataset using features dataset.
colnames(raw_dataset)<-features[,2]
##Following part of script is used to extracts only the measurements on the mean and standard deviation for each measurement.
meancol<-grepl("mean()",features[,2])
meanFreqcol<-grepl("meanFreq()",features[,2])
meancol<-meancol & (!meanFreqcol) ##To remove those values of meancol where "meanFreq() occurs"
stdcol<-grep("std()",features[,2])
raw_dataset1<-raw_dataset[,meancol]
raw_dataset2<-raw_dataset[,stdcol]
raw_dataset<-cbind(raw_dataset1,raw_dataset2)
###################################################################################################
##End of Step 2####################################################################################
###################################################################################################
###################################################################################################
##3.Uses descriptive activity names to name the activities in the data set#########################
###################################################################################################
y_test[,1]<-activity_labels[y_test[,1],2]##To apply activity labels to y test data set
y_train[,1]<-activity_labels[y_train[,1],2]##To apply activity labels to y train data set
activity<-rbind(y_test,y_train)
###################################################################################################
##End of Step 3####################################################################################
###################################################################################################
###################################################################################################
##4.Appropriately labels the data set with descriptive variable names.#############################
###################################################################################################
##Already done in step 2
##combining subject and activity columns to raw_dataset
subject<-rbind(subject_test,subject_train)
subject_activity<-cbind(subject,activity)
colnames(subject_activity)<-c("SUBJECT","ACTIVITY")
raw_dataset1<-cbind(subject_activity,raw_dataset)
###################################################################################################
##End of Step 4####################################################################################
###################################################################################################
###################################################################################################
##5.From the data set in step 4, creates a second,independent tidy data set with the average of####
##each variable for each activity and each subject.################################################
###################################################################################################
##load plyr library
library(plyr)
names<-colnames(raw_dataset)
tidy_data<-ddply(raw_dataset1,.(SUBJECT,ACTIVITY),function(raw_dataset1) colMeans(raw_dataset1[,names]))
##TO REMOVE TEMPORARY DATASETS CREATED ABOVE#######################################################
list1<-c("raw_dataset","raw_dataset1","raw_dataset2","subject_activity","activity_labels","subject")
rm(list=list1)
###################################################################################################
##End of Step 5####################################################################################
################################################################################################### |
###################################################################
#
# Expectiation-Maximization Algorithm for Gaussian Distribution
#
# Tim Whitson
#
###################################################################
library(mvtnorm)
empp <- function(data, K, iter.max = 100, epsilon = 1e-5){
data <- data.matrix(data)
dimensions <- dim(data)
# initialize parameters -
# mu = K-means++
# sigma = identity matrix, d x d
# prior = 1 / K
init <- function(){
params = list()
# k++
kpp_indexes <- sample(1:nrow(data), 1)
for(k in 2:K){
# get distance of each point for probability vector
p <- sapply(1:nrow(data), function(i){
min(sapply(1:length(kpp_indexes), function(j){
dist(rbind(data[i,], data[kpp_indexes[j],]))
})) ^ 2
})
new_index <- sample(1:nrow(data), 1, prob = p)
kpp_indexes <- c(kpp_indexes, new_index)
}
params$u <- data.matrix(data[kpp_indexes,])
params$s <- lapply(1:K, function(i){
diag(dimensions[2])
})
params$p <- sapply(1:K, function(i){
1 / K
})
params$l <- log.likelihood(params)
return(params)
}
# log likelihood
log.likelihood <- function(params){
l <- sum(log(rowSums(sapply(1:K, function(i){
params$p[i] * ns.dmvnorm(params$u[i,], params$s[[i]])
}))))
return(l)
}
# e-step
expectation <- function(params){
w <- c()
# probability density
for(i in 1:K){
w <- cbind(w, params$p[i] * ns.dmvnorm(params$u[i,], params$s[[i]]))
}
# divide each value by its row sum
w <- sweep(w, 1, rowSums(w), '/')
return(w)
}
# m-step
maximization <- function(w){
new_params <- list()
u <- matrix(nrow = K, ncol = dimensions[2])
s <- c()
for(i in 1:K){
w.i <- w[,i]
# update means
u[i,] <- colSums(data * w.i) / sum(w.i)
# update variance
dif <- sweep(data, 2, u[i,])
new.s <- t(dif) %*% (dif * w.i) / sum(w.i)
s[[i]] <- matrix(new.s, ncol = dimensions[2], byrow = TRUE)
}
new_params$u <- u
new_params$s <- s
# update probability
new_params$p <- colSums(w) / dimensions[1]
return(new_params)
}
# dmvnorm singular matrix fix
# add .001 to diagonal
ns.dmvnorm <- function(mu, sigma){
dn <- dmvnorm(data, mu, sigma)
if(all(dn == 0)){
new.s <- sigma
diag(new.s) <- diag(new.s) + .001
dn <- dmvnorm(data, mu, new.s)
}
return(dn)
}
# run EM
run <- function(){
params <- init()
iter.n <- 0
while(iter.n < iter.max){
# e-step
w <- expectation(params)
# iterate here, so if cluster is lost and has to be reset, does not count as iteration
iter.n <- iter.n + 1
# m-step
new_params <- maximization(w)
# distance between means
u.dist <- sum(sqrt(rowSums((params$u - new_params$u) ^ 2)))
# log likelihood
new_params$l <- log.likelihood(new_params)
converge <- (new_params$l - params$l) < epsilon
params <- new_params
if(converge){
break
}
}
labels <- apply(w, 1, function(c){
which.max(c)
})
# return values
params$w <- w
params$labels <- labels
params$iterations <- iter.n
return(params)
}
return(run())
} | /em/empp.R | no_license | twhitsn/ml-implementations | R | false | false | 4,138 | r | ###################################################################
#
# Expectiation-Maximization Algorithm for Gaussian Distribution
#
# Tim Whitson
#
###################################################################
library(mvtnorm)
empp <- function(data, K, iter.max = 100, epsilon = 1e-5){
data <- data.matrix(data)
dimensions <- dim(data)
# initialize parameters -
# mu = K-means++
# sigma = identity matrix, d x d
# prior = 1 / K
init <- function(){
params = list()
# k++
kpp_indexes <- sample(1:nrow(data), 1)
for(k in 2:K){
# get distance of each point for probability vector
p <- sapply(1:nrow(data), function(i){
min(sapply(1:length(kpp_indexes), function(j){
dist(rbind(data[i,], data[kpp_indexes[j],]))
})) ^ 2
})
new_index <- sample(1:nrow(data), 1, prob = p)
kpp_indexes <- c(kpp_indexes, new_index)
}
params$u <- data.matrix(data[kpp_indexes,])
params$s <- lapply(1:K, function(i){
diag(dimensions[2])
})
params$p <- sapply(1:K, function(i){
1 / K
})
params$l <- log.likelihood(params)
return(params)
}
# log likelihood
log.likelihood <- function(params){
l <- sum(log(rowSums(sapply(1:K, function(i){
params$p[i] * ns.dmvnorm(params$u[i,], params$s[[i]])
}))))
return(l)
}
# e-step
expectation <- function(params){
w <- c()
# probability density
for(i in 1:K){
w <- cbind(w, params$p[i] * ns.dmvnorm(params$u[i,], params$s[[i]]))
}
# divide each value by its row sum
w <- sweep(w, 1, rowSums(w), '/')
return(w)
}
# m-step
maximization <- function(w){
new_params <- list()
u <- matrix(nrow = K, ncol = dimensions[2])
s <- c()
for(i in 1:K){
w.i <- w[,i]
# update means
u[i,] <- colSums(data * w.i) / sum(w.i)
# update variance
dif <- sweep(data, 2, u[i,])
new.s <- t(dif) %*% (dif * w.i) / sum(w.i)
s[[i]] <- matrix(new.s, ncol = dimensions[2], byrow = TRUE)
}
new_params$u <- u
new_params$s <- s
# update probability
new_params$p <- colSums(w) / dimensions[1]
return(new_params)
}
# dmvnorm singular matrix fix
# add .001 to diagonal
ns.dmvnorm <- function(mu, sigma){
dn <- dmvnorm(data, mu, sigma)
if(all(dn == 0)){
new.s <- sigma
diag(new.s) <- diag(new.s) + .001
dn <- dmvnorm(data, mu, new.s)
}
return(dn)
}
# run EM
run <- function(){
params <- init()
iter.n <- 0
while(iter.n < iter.max){
# e-step
w <- expectation(params)
# iterate here, so if cluster is lost and has to be reset, does not count as iteration
iter.n <- iter.n + 1
# m-step
new_params <- maximization(w)
# distance between means
u.dist <- sum(sqrt(rowSums((params$u - new_params$u) ^ 2)))
# log likelihood
new_params$l <- log.likelihood(new_params)
converge <- (new_params$l - params$l) < epsilon
params <- new_params
if(converge){
break
}
}
labels <- apply(w, 1, function(c){
which.max(c)
})
# return values
params$w <- w
params$labels <- labels
params$iterations <- iter.n
return(params)
}
return(run())
} |
library(tidyverse)
library(lubridate)
library(readr)
library(ggthemes)
library(scales)
library(stringr)
library(plotly)
raw_data <- read_csv("../ERHS_Project/CDPHE_COVID19_County-Level_Open_Data_Repository.csv")
covid <- raw_data %>%
mutate(COUNTY = as.factor(COUNTY),
Date = mdy(Date),
Desc_ = as.factor(Desc_),
FIPS = as.factor(FIPS),
FULL_ = as.factor(FULL_),
LABEL = as.factor(LABEL),
Metric = as.factor(Metric),
ObjectId = as.factor(ObjectId),
POP = as.factor(POP),
Rate = as.numeric(Rate),
Value = as.numeric(Value)) %>%
rename(county = COUNTY,
date = Date,
descriptor = Desc_,
fips = FIPS,
full = FULL_,
label = LABEL,
metric = Metric,
objectid = ObjectId,
population = POP,
rate = Rate,
value = Value) %>%
select(label, fips, population, descriptor, metric, value, rate, date, objectid) %>%
mutate(response = coalesce(value, rate)) %>%
select(label, fips, population, metric, response, date)
covid
#### per day data
state_new_cases <- covid %>%
filter(metric %in% c("Cases")) %>%
group_by(date) %>%
summarize(state_cumulative_total_perday = sum(response)) %>%
ungroup() %>%
mutate(cases_perday = state_cumulative_total_perday - lag(state_cumulative_total_perday)) %>%
select(date, cases_perday)
View(state_new_cases)
state_new_deaths <- covid %>%
filter(metric == "Deaths") %>%
group_by(date) %>%
summarize(state_cumulative_total_perday = sum(response)) %>%
ungroup() %>%
mutate(deaths_perday = state_cumulative_total_perday - lag(state_cumulative_total_perday)) %>%
mutate(deaths_perday = str_replace(state_new_deaths$deaths_perday, pattern = "[-]", "0")) %>%
select(date, deaths_perday)
View(state_new_deaths)
state_new_full <- left_join(state_new_cases, state_new_deaths, by = "date")
head(state_new_full)
perday_cases <- state_new_full %>%
select(date, cases_perday) %>%
ggplot(aes(x = date, y = cases_perday)) +
geom_line()
perday_cases
perday_deaths <- state_new_full %>%
select(date, deaths_perday) %>%
ggplot(aes(x = date, y = deaths_perday)) +
geom_col()
perday_deaths
caseColor <- "#69b3a2"
deathColor <- "#8d61cf"
perday_plot <- state_new_full %>%
ggplot(aes(x = date)) +
geom_col(aes(y = cases_perday), size = 1, fill = caseColor) +
geom_line(aes(y = deaths_perday), size = 0.7, color = deathColor) +
labs(x = "", y = "") +
scale_y_continuous(name = "Cases", labels = scales::comma) +
scale_x_date(date_breaks = "1 month", date_labels = "%b") +
ggtitle("Colorado New Cases and Deaths",
subtitle = "per day") +
theme_few()
perday_plot
ggplotly(perday_plot)
| /First graph_SVG.R | no_license | erhs-r/group_2 | R | false | false | 2,769 | r | library(tidyverse)
library(lubridate)
library(readr)
library(ggthemes)
library(scales)
library(stringr)
library(plotly)
raw_data <- read_csv("../ERHS_Project/CDPHE_COVID19_County-Level_Open_Data_Repository.csv")
covid <- raw_data %>%
mutate(COUNTY = as.factor(COUNTY),
Date = mdy(Date),
Desc_ = as.factor(Desc_),
FIPS = as.factor(FIPS),
FULL_ = as.factor(FULL_),
LABEL = as.factor(LABEL),
Metric = as.factor(Metric),
ObjectId = as.factor(ObjectId),
POP = as.factor(POP),
Rate = as.numeric(Rate),
Value = as.numeric(Value)) %>%
rename(county = COUNTY,
date = Date,
descriptor = Desc_,
fips = FIPS,
full = FULL_,
label = LABEL,
metric = Metric,
objectid = ObjectId,
population = POP,
rate = Rate,
value = Value) %>%
select(label, fips, population, descriptor, metric, value, rate, date, objectid) %>%
mutate(response = coalesce(value, rate)) %>%
select(label, fips, population, metric, response, date)
covid
#### per day data
state_new_cases <- covid %>%
filter(metric %in% c("Cases")) %>%
group_by(date) %>%
summarize(state_cumulative_total_perday = sum(response)) %>%
ungroup() %>%
mutate(cases_perday = state_cumulative_total_perday - lag(state_cumulative_total_perday)) %>%
select(date, cases_perday)
View(state_new_cases)
state_new_deaths <- covid %>%
filter(metric == "Deaths") %>%
group_by(date) %>%
summarize(state_cumulative_total_perday = sum(response)) %>%
ungroup() %>%
mutate(deaths_perday = state_cumulative_total_perday - lag(state_cumulative_total_perday)) %>%
mutate(deaths_perday = str_replace(state_new_deaths$deaths_perday, pattern = "[-]", "0")) %>%
select(date, deaths_perday)
View(state_new_deaths)
state_new_full <- left_join(state_new_cases, state_new_deaths, by = "date")
head(state_new_full)
perday_cases <- state_new_full %>%
select(date, cases_perday) %>%
ggplot(aes(x = date, y = cases_perday)) +
geom_line()
perday_cases
perday_deaths <- state_new_full %>%
select(date, deaths_perday) %>%
ggplot(aes(x = date, y = deaths_perday)) +
geom_col()
perday_deaths
caseColor <- "#69b3a2"
deathColor <- "#8d61cf"
perday_plot <- state_new_full %>%
ggplot(aes(x = date)) +
geom_col(aes(y = cases_perday), size = 1, fill = caseColor) +
geom_line(aes(y = deaths_perday), size = 0.7, color = deathColor) +
labs(x = "", y = "") +
scale_y_continuous(name = "Cases", labels = scales::comma) +
scale_x_date(date_breaks = "1 month", date_labels = "%b") +
ggtitle("Colorado New Cases and Deaths",
subtitle = "per day") +
theme_few()
perday_plot
ggplotly(perday_plot)
|
../quarterly_report_data_pull.R | /SigOps/quarterly_report_data_pull.R | no_license | atops/GDOT-Flexdashboard-Report | R | false | false | 31 | r | ../quarterly_report_data_pull.R |
# libraries
library(shiny)
# my functions
source("scripts/my_functions1.R")
source("scripts/my_functions2.R")
# list of variables names
# paramNamesMort <- c("start_prop_value", "down_payment_pct", "mortgage_rate", "n_years")
paramNames <- c("start_prop_value", "down_payment_pct", "mortgage_rate", "n_years",
"initial_fixed_costs",
"prop_tax_rate_pct", "prop_insurance", "HOA_monthly_fee",
"start_rent", "rent_insurance",
"annual_appreciation", "annual_appreciation_sd",
"annual_inv", "annual_inv_sd",
"annual_inflation", "annual_inflation_sd",
"annual_rent_extra_increase_mean",
"fraction_extra_cash_invested_pct",
"income_tax_rate_pct", "itemized_deductions", "std_deduction",
"n_sim")
#---------------------------------------------------------------------------------------------------
shinyServer(function(input, output, session) {
#-------------------------------------------------------------------------------
# Function that generates and computes trade-off scenarios.
# The expression is wrapped in a call to "reactive" and
# therefore should be automatically re-executed when inputs change
#
sim.tradeoff <- reactive( do.call(simulate_tradeoff, getParams1(input, paramNames)) )
# Expression that plot simulated data.
# The expression is wrapped in a call to "renderPlot" and therefore
# it is "reactive" and should be automatically re-executed when inputs change.
# The output type is a plot.
#
output$multiPlot <- renderPlot({
n.sim <- getParams1(input, "n_sim")[[1]]
n.years <- getParams1(input, "n_years")[[1]]
plot_sims(n.sim, n.years, sim.tradeoff())
})
})
| /DevDataProd/App/server.R | no_license | pedrosan/DS_specialization | R | false | false | 1,788 | r | # libraries
library(shiny)
# my functions
source("scripts/my_functions1.R")
source("scripts/my_functions2.R")
# list of variables names
# paramNamesMort <- c("start_prop_value", "down_payment_pct", "mortgage_rate", "n_years")
paramNames <- c("start_prop_value", "down_payment_pct", "mortgage_rate", "n_years",
"initial_fixed_costs",
"prop_tax_rate_pct", "prop_insurance", "HOA_monthly_fee",
"start_rent", "rent_insurance",
"annual_appreciation", "annual_appreciation_sd",
"annual_inv", "annual_inv_sd",
"annual_inflation", "annual_inflation_sd",
"annual_rent_extra_increase_mean",
"fraction_extra_cash_invested_pct",
"income_tax_rate_pct", "itemized_deductions", "std_deduction",
"n_sim")
#---------------------------------------------------------------------------------------------------
shinyServer(function(input, output, session) {
#-------------------------------------------------------------------------------
# Function that generates and computes trade-off scenarios.
# The expression is wrapped in a call to "reactive" and
# therefore should be automatically re-executed when inputs change
#
sim.tradeoff <- reactive( do.call(simulate_tradeoff, getParams1(input, paramNames)) )
# Expression that plot simulated data.
# The expression is wrapped in a call to "renderPlot" and therefore
# it is "reactive" and should be automatically re-executed when inputs change.
# The output type is a plot.
#
output$multiPlot <- renderPlot({
n.sim <- getParams1(input, "n_sim")[[1]]
n.years <- getParams1(input, "n_years")[[1]]
plot_sims(n.sim, n.years, sim.tradeoff())
})
})
|
#=============================================================================
## Solve Linear Program problem
#=============================================================================
#
#
#
## lp2.R
#
#=============================================================================
# check if the package lpSolve alreasdy exists
#=============================================================================
#
if("lpSolve" %in% rownames(installed.packages()) == FALSE)
{ install.packages("lpSolve")}
#
library(lpSolve)
#
##---------------------------------------------
## Setup problem: Maxmimize
# 8 x1 + 5 x2 + 2 x3 subject to
# 50 x1 + 30 x2 + 20 x3 <= 800
# x1 + x2 + x3 <= 20
# x1 + 1.2 x2 + 1.8 x3 <= 15
## ---------------------------------------------
f.obj <- c(8,5,2)
f.con <- matrix(c(50,30,20,1,1,1,1,1/1.2,1/1.8), nrow =3, byrow=TRUE)
f.dir <- c("<=","<=","<=")
f.rhs <- c(800,20,15)
#
## Execute the program
#
lp_ans <- lp("max",f.obj, f.con, f.dir, f.rhs)
#
zx1 <- lp_ans$solution[1]
zx2 <- lp_ans$solution[2]
zx3 <- lp_ans$solution[3]
#
z <- 8 * zx1 + 5 * zx2 + 2 * zx3
#
cat("\n\n The solution is given below:\n")
cat("x1 = ",zx1,"\n")
cat("x2 = ",zx2,"\n")
cat("x3 = ",zx3,"\n")
cat("\n Max Objective Function value is",z,"\n\n")
#
| /lp2.r | no_license | pvs-greatlakes/R-materials | R | false | false | 1,364 | r | #=============================================================================
## Solve Linear Program problem
#=============================================================================
#
#
#
## lp2.R
#
#=============================================================================
# check if the package lpSolve alreasdy exists
#=============================================================================
#
if("lpSolve" %in% rownames(installed.packages()) == FALSE)
{ install.packages("lpSolve")}
#
library(lpSolve)
#
##---------------------------------------------
## Setup problem: Maxmimize
# 8 x1 + 5 x2 + 2 x3 subject to
# 50 x1 + 30 x2 + 20 x3 <= 800
# x1 + x2 + x3 <= 20
# x1 + 1.2 x2 + 1.8 x3 <= 15
## ---------------------------------------------
f.obj <- c(8,5,2)
f.con <- matrix(c(50,30,20,1,1,1,1,1/1.2,1/1.8), nrow =3, byrow=TRUE)
f.dir <- c("<=","<=","<=")
f.rhs <- c(800,20,15)
#
## Execute the program
#
lp_ans <- lp("max",f.obj, f.con, f.dir, f.rhs)
#
zx1 <- lp_ans$solution[1]
zx2 <- lp_ans$solution[2]
zx3 <- lp_ans$solution[3]
#
z <- 8 * zx1 + 5 * zx2 + 2 * zx3
#
cat("\n\n The solution is given below:\n")
cat("x1 = ",zx1,"\n")
cat("x2 = ",zx2,"\n")
cat("x3 = ",zx3,"\n")
cat("\n Max Objective Function value is",z,"\n\n")
#
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/ccn.r
\docType{methods}
\name{icn}
\alias{icn}
\alias{icn,data.frame-method}
\alias{icn,matrix-method}
\alias{icn,mids-method}
\title{Incomplete cases n}
\usage{
icn(x)
}
\arguments{
\item{x}{An \code{R} object. Currently supported are methods for the
following classes: \code{mids}, \code{data.frame} and \code{matrix}. In
addition, \code{x} can be a vector of any kind.}
}
\value{
An integer with the number of elements in \code{x} with incomplete
data.
}
\description{
Calculates the number of incomplete cases. The companion function for
calculating the number of complete cases is \code{ccn()}.
}
\examples{
icn(nhanes) # the remaining 12 rows
icn(nhanes[,c("bmi","hyp")]) # number of cases with incomplete bmi and hyp
}
\author{
Stef van Buuren, 2010.
}
\seealso{
\code{\link{ccn}}, \code{\link{cc}}, \code{\link{ic}},
\code{\link{cci}}, \code{\link{ici}}
}
\keyword{univar}
| /man/icn-methods.Rd | no_license | andland/mice | R | false | false | 969 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/ccn.r
\docType{methods}
\name{icn}
\alias{icn}
\alias{icn,data.frame-method}
\alias{icn,matrix-method}
\alias{icn,mids-method}
\title{Incomplete cases n}
\usage{
icn(x)
}
\arguments{
\item{x}{An \code{R} object. Currently supported are methods for the
following classes: \code{mids}, \code{data.frame} and \code{matrix}. In
addition, \code{x} can be a vector of any kind.}
}
\value{
An integer with the number of elements in \code{x} with incomplete
data.
}
\description{
Calculates the number of incomplete cases. The companion function for
calculating the number of complete cases is \code{ccn()}.
}
\examples{
icn(nhanes) # the remaining 12 rows
icn(nhanes[,c("bmi","hyp")]) # number of cases with incomplete bmi and hyp
}
\author{
Stef van Buuren, 2010.
}
\seealso{
\code{\link{ccn}}, \code{\link{cc}}, \code{\link{ic}},
\code{\link{cci}}, \code{\link{ici}}
}
\keyword{univar}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/2_buildSOM.R
\name{GetClusters}
\alias{GetClusters}
\title{Get cluster label for all individual cells}
\usage{
GetClusters(fsom)
}
\arguments{
\item{fsom}{FlowSOM object as generated by the FlowSOM function
or the BuildSOM function}
}
\value{
vector label for every cell
}
\description{
Get cluster label for all individual cells
}
| /man/GetClusters.Rd | no_license | AbhivKoladiya/FlowSOM | R | false | true | 410 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/2_buildSOM.R
\name{GetClusters}
\alias{GetClusters}
\title{Get cluster label for all individual cells}
\usage{
GetClusters(fsom)
}
\arguments{
\item{fsom}{FlowSOM object as generated by the FlowSOM function
or the BuildSOM function}
}
\value{
vector label for every cell
}
\description{
Get cluster label for all individual cells
}
|
library(reshape2)
library(ggplot2)
args <- commandArgs(TRUE)
input <- args[1]
output <- args[2]
ylabel <- args[3]
data <- read.table(input, sep="\t",header=T)
#re_data <- melt(data, colnames(data)[1])
#re_data[,1] <- factor(re_data[,1], levels=re_data[1:100,1], ordered=TRUE)
pdf(output)
ggplot(data=data, aes(x=factor(1), y=counts, fill=factor(Region), stat="identify" ),) + geom_bar(stat="identity") + facet_grid(paste(".~",colnames(data)[3],sep="")) + ylab(ylabel) + xlab("")
dev.off()
| /bin/piechart.r | no_license | biovlab/biovlab_mcpg_snp_express | R | false | false | 493 | r | library(reshape2)
library(ggplot2)
args <- commandArgs(TRUE)
input <- args[1]
output <- args[2]
ylabel <- args[3]
data <- read.table(input, sep="\t",header=T)
#re_data <- melt(data, colnames(data)[1])
#re_data[,1] <- factor(re_data[,1], levels=re_data[1:100,1], ordered=TRUE)
pdf(output)
ggplot(data=data, aes(x=factor(1), y=counts, fill=factor(Region), stat="identify" ),) + geom_bar(stat="identity") + facet_grid(paste(".~",colnames(data)[3],sep="")) + ylab(ylabel) + xlab("")
dev.off()
|
#' @title Cases
#'
#' @description Provides a fine-grained summary of an event log with characteristics for each case: the number of events,
#' the number of activity types, the timespan, the trace, the duration and the first and last event type.
#'
#'
#' @param eventlog The event log to be used. An object of class
#' \code{eventlog}.
#'
#'
#' @examples
#'
#'
#' data(example_log)
#' cases(example_log)
#'
#' @export cases
cases <- function(eventlog){
stop_eventlog(eventlog)
traces_per_case <- cases_light(eventlog)
durations <- durations(eventlog)
colnames(traces_per_case)[colnames(traces_per_case)==case_id(eventlog )] <- "case_classifier"
colnames(durations)[colnames(durations)==case_id(eventlog)] <- "case_classifier"
colnames(eventlog)[colnames(eventlog)==activity_instance_id(eventlog)] <- "activity_instance_classifier"
colnames(eventlog)[colnames(eventlog)==activity_id(eventlog)] <- "event_classifier"
colnames(eventlog)[colnames(eventlog)==lifecycle_id(eventlog)] <- "life_cycle_classifier"
colnames(eventlog)[colnames(eventlog)==case_id(eventlog)] <- "case_classifier"
colnames(eventlog)[colnames(eventlog)==timestamp(eventlog)] <- "timestamp_classifier"
summary <- eventlog %>%
group_by(case_classifier) %>%
summarize(trace_length = n_distinct(activity_instance_classifier),
number_of_activities = n_distinct(event_classifier),
start_timestamp = min(timestamp_classifier),
complete_timestamp = max(timestamp_classifier))
summary <- merge(summary, traces_per_case, "case_classifier")
summary <- merge(summary, durations, "case_classifier")
for(i in 1:nrow(summary)){
summary$first_activity[i] <- strsplit(summary$trace[i], split = ",")[[1]][1]
summary$last_activity[i] <- strsplit(summary$trace[i], split = ",")[[1]][length(strsplit(summary$trace[i], split =",")[[1]])]
}
summary$first_activity <- as.factor(summary$first_activity)
summary$last_activity <- as.factor(summary$last_activity)
colnames(summary)[colnames(summary)=="case_classifier"] <- case_id(eventlog)
summary <- tbl_df(summary)
return(summary)
}
| /edeaR/R/cases.r | no_license | ingted/R-Examples | R | false | false | 2,147 | r | #' @title Cases
#'
#' @description Provides a fine-grained summary of an event log with characteristics for each case: the number of events,
#' the number of activity types, the timespan, the trace, the duration and the first and last event type.
#'
#'
#' @param eventlog The event log to be used. An object of class
#' \code{eventlog}.
#'
#'
#' @examples
#'
#'
#' data(example_log)
#' cases(example_log)
#'
#' @export cases
cases <- function(eventlog){
stop_eventlog(eventlog)
traces_per_case <- cases_light(eventlog)
durations <- durations(eventlog)
colnames(traces_per_case)[colnames(traces_per_case)==case_id(eventlog )] <- "case_classifier"
colnames(durations)[colnames(durations)==case_id(eventlog)] <- "case_classifier"
colnames(eventlog)[colnames(eventlog)==activity_instance_id(eventlog)] <- "activity_instance_classifier"
colnames(eventlog)[colnames(eventlog)==activity_id(eventlog)] <- "event_classifier"
colnames(eventlog)[colnames(eventlog)==lifecycle_id(eventlog)] <- "life_cycle_classifier"
colnames(eventlog)[colnames(eventlog)==case_id(eventlog)] <- "case_classifier"
colnames(eventlog)[colnames(eventlog)==timestamp(eventlog)] <- "timestamp_classifier"
summary <- eventlog %>%
group_by(case_classifier) %>%
summarize(trace_length = n_distinct(activity_instance_classifier),
number_of_activities = n_distinct(event_classifier),
start_timestamp = min(timestamp_classifier),
complete_timestamp = max(timestamp_classifier))
summary <- merge(summary, traces_per_case, "case_classifier")
summary <- merge(summary, durations, "case_classifier")
for(i in 1:nrow(summary)){
summary$first_activity[i] <- strsplit(summary$trace[i], split = ",")[[1]][1]
summary$last_activity[i] <- strsplit(summary$trace[i], split = ",")[[1]][length(strsplit(summary$trace[i], split =",")[[1]])]
}
summary$first_activity <- as.factor(summary$first_activity)
summary$last_activity <- as.factor(summary$last_activity)
colnames(summary)[colnames(summary)=="case_classifier"] <- case_id(eventlog)
summary <- tbl_df(summary)
return(summary)
}
|
#' Inputs value into any LER model config file
#'
#' Wrapper function for functions that input values into model config files
#' @param model string; name of a model in the LER package
#' @param file filepath; to model-specific config file
#' @param label string; which corresponds to section where the key is located
#' @param key string; name of key in which to extract the value
#' @param value string; name of value to input into config file
#' @param out_file filepath; to write the output json file (optional);
#' defaults to overwriting file if not specified
#'
#' @importFrom gotmtools input_yaml input_nml
#'
#' @examples
#' \dontrun{
#' input_config_value(model = "GOTM", file = "gotm.yaml",
#' label = "turb_param", key = "k_min", value = 0.00001)
#' }
#'
#' @export
input_config_value <- function(model, file, label, key, value, out_file = NULL){
if(model == "FLake" | model == "GLM"){
return(gotmtools::input_nml(file = file, label = label, key = key,
value = value, out_file = out_file))
}else if(model == "GOTM"){
return(gotmtools::input_yaml(file = file, label = label, key = key,
value = value, out_file = out_file))
}else if(model == "Simstrat"){
return(input_json(file = file, label = label, key = key,
value = value, out_file = out_file))
}else if(model == "MyLake"){
return(input_mylakeconfig(file = file, label = label, key = key,
value = value, out_file = out_file))
}else{
stop("\"", model, "\" is not recognised as a valid model argument by input_config_value")
}
}
| /R/input_config_value.R | permissive | addelany/LakeEnsemblR | R | false | false | 1,648 | r | #' Inputs value into any LER model config file
#'
#' Wrapper function for functions that input values into model config files
#' @param model string; name of a model in the LER package
#' @param file filepath; to model-specific config file
#' @param label string; which corresponds to section where the key is located
#' @param key string; name of key in which to extract the value
#' @param value string; name of value to input into config file
#' @param out_file filepath; to write the output json file (optional);
#' defaults to overwriting file if not specified
#'
#' @importFrom gotmtools input_yaml input_nml
#'
#' @examples
#' \dontrun{
#' input_config_value(model = "GOTM", file = "gotm.yaml",
#' label = "turb_param", key = "k_min", value = 0.00001)
#' }
#'
#' @export
input_config_value <- function(model, file, label, key, value, out_file = NULL){
if(model == "FLake" | model == "GLM"){
return(gotmtools::input_nml(file = file, label = label, key = key,
value = value, out_file = out_file))
}else if(model == "GOTM"){
return(gotmtools::input_yaml(file = file, label = label, key = key,
value = value, out_file = out_file))
}else if(model == "Simstrat"){
return(input_json(file = file, label = label, key = key,
value = value, out_file = out_file))
}else if(model == "MyLake"){
return(input_mylakeconfig(file = file, label = label, key = key,
value = value, out_file = out_file))
}else{
stop("\"", model, "\" is not recognised as a valid model argument by input_config_value")
}
}
|
library(ggplot2)
load('output/result-model8-1.RData')
load('output/result-model8-2.RData')
load('output/result-model8-3.RData')
ms1 <- rstan::extract(fit1)
ms2 <- rstan::extract(fit2)
ms3 <- rstan::extract(fit3)
K <- 4
qua <- apply(ms2$a, 2, quantile, prob=c(0.025, 0.25, 0.5, 0.75, 0.975))
d_qua1 <- data.frame(1:K-0.1, '8-2', t(qua))
colnames(d_qua1) <- c('KID', 'Model', 'p2.5', 'p25', 'p50', 'p75', 'p97.5')
qua <- apply(ms3$a, 2, quantile, prob=c(0.025, 0.25, 0.5, 0.75, 0.975))
d_qua2 <- data.frame(1:K+0.1, '8-3', t(qua))
colnames(d_qua2) <- c('KID', 'Model', 'p2.5', 'p25', 'p50', 'p75', 'p97.5')
d_qua <- rbind(d_qua1, d_qua2)
p <- ggplot(data=d_qua, aes(x=KID, y=p50, ymin=p2.5, ymax=p97.5, shape=Model, linetype=Model, fill=Model))
p <- p + theme_bw(base_size=18) + theme(legend.key.height=grid::unit(2.5,'line'))
p <- p + geom_pointrange(size=0.6)
p <- p + geom_hline(yintercept=median(ms1$a), color='black', alpha=0.3, linetype='solid', size=1.2)
p <- p + scale_shape_manual(values=c(21, 21))
p <- p + scale_linetype_manual(values=c('31', 'solid'))
p <- p + scale_fill_manual(values=c('white','black'))
p <- p + labs(y='a')
ggsave(file='output/fig8-4-left.png', plot=p, dpi=300, w=4, h=3)
| /chap08/fig8-4-left.R | no_license | winterwang/RStanBook | R | false | false | 1,206 | r | library(ggplot2)
load('output/result-model8-1.RData')
load('output/result-model8-2.RData')
load('output/result-model8-3.RData')
ms1 <- rstan::extract(fit1)
ms2 <- rstan::extract(fit2)
ms3 <- rstan::extract(fit3)
K <- 4
qua <- apply(ms2$a, 2, quantile, prob=c(0.025, 0.25, 0.5, 0.75, 0.975))
d_qua1 <- data.frame(1:K-0.1, '8-2', t(qua))
colnames(d_qua1) <- c('KID', 'Model', 'p2.5', 'p25', 'p50', 'p75', 'p97.5')
qua <- apply(ms3$a, 2, quantile, prob=c(0.025, 0.25, 0.5, 0.75, 0.975))
d_qua2 <- data.frame(1:K+0.1, '8-3', t(qua))
colnames(d_qua2) <- c('KID', 'Model', 'p2.5', 'p25', 'p50', 'p75', 'p97.5')
d_qua <- rbind(d_qua1, d_qua2)
p <- ggplot(data=d_qua, aes(x=KID, y=p50, ymin=p2.5, ymax=p97.5, shape=Model, linetype=Model, fill=Model))
p <- p + theme_bw(base_size=18) + theme(legend.key.height=grid::unit(2.5,'line'))
p <- p + geom_pointrange(size=0.6)
p <- p + geom_hline(yintercept=median(ms1$a), color='black', alpha=0.3, linetype='solid', size=1.2)
p <- p + scale_shape_manual(values=c(21, 21))
p <- p + scale_linetype_manual(values=c('31', 'solid'))
p <- p + scale_fill_manual(values=c('white','black'))
p <- p + labs(y='a')
ggsave(file='output/fig8-4-left.png', plot=p, dpi=300, w=4, h=3)
|
# initiate an rmetasim landscape with parameters
rms.init.landscape <- function(num.pops = NULL, carrying = NULL, sample.size = NULL, mig.rates = NULL,
num.loc = NULL, loc.type = NULL, mut.rate = NULL, seq.length = NULL,
num.stgs = NULL, selfing = NULL,
surv.matr = NULL, repr.matr = NULL, male.matr = NULL,
init.pop.sizes = NULL, num.gen = NULL, num.alleles = NULL, allele.freqs = NULL) {
if(exists("skeletonland")) rm(skeletonland)
skeletonland<-landscape.new.empty()
#define selfing rate
skeletonland<-landscape.new.floatparam(skeletonland,s=selfing)
#Hard coded in current generation, current epoch, max number generations, max number individuals
skeletonland<-landscape.new.intparam(skeletonland,h=num.pops,s=num.stgs,cg=0,ce=0,totgen=100000,maxland=100000)
#Hard coded in multiple paternity (yes) and density dependence (no) parameters
skeletonland<-landscape.new.switchparam(skeletonland,re=0,rd=0,mp=1,dd=0)
#local matrices, will give same demography to each local population
for (i in 1:num.pops)
skeletonland<-landscape.new.local.demo(skeletonland,surv.matr, repr.matr, male.matr)
#cross habitat matrices
epoch_s_matr<-matrix(0,nrow=4, ncol=4)
epoch_r_matr<-landscape.mig.matrix(h=num.pops,s=num.stgs,mig.model="stepping.stone.linear")$R
epoch_m_matr<-matrix(0,nrow=4, ncol=4)
#no extinction allowed, hard coded
skeletonland<-landscape.new.epoch(skeletonland,epochprob=1, epoch_s_matr, epoch_r_matr, epoch_m_matr,
startgen=0,extinct=NULL,carry=carrying)
#LOCI
#Note that for SNPs, numalleles should be 2, allelesize only used for sequences
#type = 0 IAM, type = 1 SMM type = 2 DNA sequence
#assumes biparental transmission (transmission = 0)
rms.locus.type = NULL
if (loc.type == "SNP") {rms.locus.type = 2; num.alleles = 4; seq.length = rep(1,num.loc)}
if (loc.type == "microsat") rms.locus.type = 1
if (loc.type == "sequence") rms.locus.type = 2
for (l in 1:num.loc)
skeletonland<-landscape.new.locus(skeletonland, type=1, ploidy=2, mutationrate=mut.rate[l],
transmission=0, numalleles=num.alleles[l], frequencies=allele.freqs, allelesize=seq.length[l])
#assumes population initial sizes all defined nicely by user
skeletonland<-landscape.new.individuals(skeletonland,init.pop.sizes)
}
| /Archive/NIMBioS.code/rmetasim/rms.init.landscape.R | no_license | christianparobek/skeleSim | R | false | false | 2,334 | r | # initiate an rmetasim landscape with parameters
rms.init.landscape <- function(num.pops = NULL, carrying = NULL, sample.size = NULL, mig.rates = NULL,
num.loc = NULL, loc.type = NULL, mut.rate = NULL, seq.length = NULL,
num.stgs = NULL, selfing = NULL,
surv.matr = NULL, repr.matr = NULL, male.matr = NULL,
init.pop.sizes = NULL, num.gen = NULL, num.alleles = NULL, allele.freqs = NULL) {
if(exists("skeletonland")) rm(skeletonland)
skeletonland<-landscape.new.empty()
#define selfing rate
skeletonland<-landscape.new.floatparam(skeletonland,s=selfing)
#Hard coded in current generation, current epoch, max number generations, max number individuals
skeletonland<-landscape.new.intparam(skeletonland,h=num.pops,s=num.stgs,cg=0,ce=0,totgen=100000,maxland=100000)
#Hard coded in multiple paternity (yes) and density dependence (no) parameters
skeletonland<-landscape.new.switchparam(skeletonland,re=0,rd=0,mp=1,dd=0)
#local matrices, will give same demography to each local population
for (i in 1:num.pops)
skeletonland<-landscape.new.local.demo(skeletonland,surv.matr, repr.matr, male.matr)
#cross habitat matrices
epoch_s_matr<-matrix(0,nrow=4, ncol=4)
epoch_r_matr<-landscape.mig.matrix(h=num.pops,s=num.stgs,mig.model="stepping.stone.linear")$R
epoch_m_matr<-matrix(0,nrow=4, ncol=4)
#no extinction allowed, hard coded
skeletonland<-landscape.new.epoch(skeletonland,epochprob=1, epoch_s_matr, epoch_r_matr, epoch_m_matr,
startgen=0,extinct=NULL,carry=carrying)
#LOCI
#Note that for SNPs, numalleles should be 2, allelesize only used for sequences
#type = 0 IAM, type = 1 SMM type = 2 DNA sequence
#assumes biparental transmission (transmission = 0)
rms.locus.type = NULL
if (loc.type == "SNP") {rms.locus.type = 2; num.alleles = 4; seq.length = rep(1,num.loc)}
if (loc.type == "microsat") rms.locus.type = 1
if (loc.type == "sequence") rms.locus.type = 2
for (l in 1:num.loc)
skeletonland<-landscape.new.locus(skeletonland, type=1, ploidy=2, mutationrate=mut.rate[l],
transmission=0, numalleles=num.alleles[l], frequencies=allele.freqs, allelesize=seq.length[l])
#assumes population initial sizes all defined nicely by user
skeletonland<-landscape.new.individuals(skeletonland,init.pop.sizes)
}
|
\name{cudaPointerGetAttributes}
\alias{cudaPointerGetAttributes}
\title{Returns attributes about a specified pointer}
\description{ Returns the attributes of the pointer \code{ptr}.}
\usage{cudaPointerGetAttributes(ptr)}
\arguments{
\item{ptr}{Pointer to get attributes for}
}
\value{attributes}
\seealso{\code{cudaGetDeviceCount}
\code{\link{cudaGetDevice}}
\code{cudaSetDevice}
\code{\link{cudaChooseDevice}}}
\references{\url{http://docs.nvidia.com/cuda/cuda-driver-api/index.htm}}
\keyword{programming}
\concept{GPU}
| /man/cudaPointerGetAttributes.Rd | no_license | xfbingshan/RCUDA | R | false | false | 524 | rd | \name{cudaPointerGetAttributes}
\alias{cudaPointerGetAttributes}
\title{Returns attributes about a specified pointer}
\description{ Returns the attributes of the pointer \code{ptr}.}
\usage{cudaPointerGetAttributes(ptr)}
\arguments{
\item{ptr}{Pointer to get attributes for}
}
\value{attributes}
\seealso{\code{cudaGetDeviceCount}
\code{\link{cudaGetDevice}}
\code{cudaSetDevice}
\code{\link{cudaChooseDevice}}}
\references{\url{http://docs.nvidia.com/cuda/cuda-driver-api/index.htm}}
\keyword{programming}
\concept{GPU}
|
rm(list = ls())
source(file.path("plots", "featureSelectionResultPlotting.R"))
source(file.path("plots", "dendrogramPlot.R"))
load(file.path("main", "resultData", "finalResultB.RData"))
barplotForElapsedMinutes(result)
boxplotsForAllMetrics(result)
plotDendrogram(clusteringResult$hclust) | /main/plotFinalResultB.R | permissive | lacerdas/featureSelectionFramework | R | false | false | 292 | r |
rm(list = ls())
source(file.path("plots", "featureSelectionResultPlotting.R"))
source(file.path("plots", "dendrogramPlot.R"))
load(file.path("main", "resultData", "finalResultB.RData"))
barplotForElapsedMinutes(result)
boxplotsForAllMetrics(result)
plotDendrogram(clusteringResult$hclust) |
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551355892e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615831981-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 2,048 | r | testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551355892e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
library(choplump)
### Name: choplump
### Title: Choplump test
### Aliases: choplump choplump.default choplump.formula
### Keywords: htest
### ** Examples
set.seed(1)
Ntotal<-200
Mtotal<-12
Z<-rep(0,Ntotal)
Z[sample(1:Ntotal,Ntotal/2,replace=FALSE)]<-1
test<-data.frame(W=c(rep(0,Ntotal-Mtotal),abs(rnorm(Mtotal))),Z=Z)
## defaults to asymptotic approximation if
## the number of calculations of the test
## statistic is greater than parms
## see help for methodRule1
choplump(W~Z,data=test,use.ranks=TRUE)
## alternate form
cout<-choplump(test$W[test$Z==0],test$W[test$Z==1],use.ranks=TRUE,exact=TRUE)
cout
cout$p.values
| /data/genthat_extracted_code/choplump/examples/choplump.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 630 | r | library(choplump)
### Name: choplump
### Title: Choplump test
### Aliases: choplump choplump.default choplump.formula
### Keywords: htest
### ** Examples
set.seed(1)
Ntotal<-200
Mtotal<-12
Z<-rep(0,Ntotal)
Z[sample(1:Ntotal,Ntotal/2,replace=FALSE)]<-1
test<-data.frame(W=c(rep(0,Ntotal-Mtotal),abs(rnorm(Mtotal))),Z=Z)
## defaults to asymptotic approximation if
## the number of calculations of the test
## statistic is greater than parms
## see help for methodRule1
choplump(W~Z,data=test,use.ranks=TRUE)
## alternate form
cout<-choplump(test$W[test$Z==0],test$W[test$Z==1],use.ranks=TRUE,exact=TRUE)
cout
cout$p.values
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interpolate_gbd.R
\name{interpolate_gbd}
\alias{interpolate_gbd}
\title{FUNCTION_TITLE}
\usage{
interpolate_gbd(gbd)
}
\arguments{
\item{gbd}{PARAM_DESCRIPTION}
}
\value{
OUTPUT_DESCRIPTION
}
\description{
FUNCTION_DESCRIPTION
}
\details{
DETAILS
}
\examples{
\dontrun{
if (interactive()) {
# EXAMPLE1
}
}
}
| /mbg/mbg_core_code/mbg_central/LBDCore/man/interpolate_gbd.Rd | no_license | The-Oxford-GBD-group/typhi_paratyphi_modelling_code | R | false | true | 388 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interpolate_gbd.R
\name{interpolate_gbd}
\alias{interpolate_gbd}
\title{FUNCTION_TITLE}
\usage{
interpolate_gbd(gbd)
}
\arguments{
\item{gbd}{PARAM_DESCRIPTION}
}
\value{
OUTPUT_DESCRIPTION
}
\description{
FUNCTION_DESCRIPTION
}
\details{
DETAILS
}
\examples{
\dontrun{
if (interactive()) {
# EXAMPLE1
}
}
}
|
## Pair of functions that cache the inverse of a matrix
## Following function creates a matrix object that cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inverse<-NULL
set<-function(matrix)
{
m <<-matrix
inverse <<-NULL
}
get<-function()
{
m
}
setInverse<-function(inverse_val)
{
inverse<<-inverse_val
}
getInverse<-function()
{
inverse
}
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## This function calculates the inverse of the matrix object given by the above function
## and if the matrix is unchanged then the inverse is retrieved from the cache
cacheSolve <- function(x, ...) {
m<-x$getInverse()
if(!is.null(m))
{
message("getting cached data")
return (m)
}
data<-x$get()
m<-solve(data)%*%data
x$setInverse(m)
m
}
| /cachematrix.R | no_license | visheshtayal/ProgrammingAssignment2 | R | false | false | 921 | r | ## Pair of functions that cache the inverse of a matrix
## Following function creates a matrix object that cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inverse<-NULL
set<-function(matrix)
{
m <<-matrix
inverse <<-NULL
}
get<-function()
{
m
}
setInverse<-function(inverse_val)
{
inverse<<-inverse_val
}
getInverse<-function()
{
inverse
}
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## This function calculates the inverse of the matrix object given by the above function
## and if the matrix is unchanged then the inverse is retrieved from the cache
cacheSolve <- function(x, ...) {
m<-x$getInverse()
if(!is.null(m))
{
message("getting cached data")
return (m)
}
data<-x$get()
m<-solve(data)%*%data
x$setInverse(m)
m
}
|
#' Summarise z values over binned wind data.
#'
#' @description Binning is done by StatWind, so input data to[stat_summary_wind()] should be original unbinned data.
#' Depending on the groups argument, binning is either done 2-dimensional over cartesian u and v wind vectors
#' (calculated from input data; then, [stat_summary_wind()] yields results similar to [openair::polarPlot()])
#' or 1-dimensional over wind direction or wind velocity bins, respectively.
#'
#' @param mapping ggplot2 mapping, e.g. aes(wd = wd, ws = ws, z = NOx); requires wd, ws, z
#' @param data The data to be displayed in this layer. Requires input data including at least three
#' columns carrying information regarding:
#' * wind direction (in °)
#' * wind velocity
#' * z-values (e.g. air pollutant concentration)
#' @param fun function or list of functions for summary.
#' @param ... other arguments passed on to [ggplot2::layer()] as `params = list(...)`.
#' @param fun.args a list of extra arguments to pass to fun.
#' @param nmin numeric, minimum number of values for fun, if n < nmin: NA is returned
#' @param ws_max numeric or NA, maximum wind velocity for binning: above ws_max, z is set NA
#' @param bins numeric, number of bins over the range of values if `!groups %in% c("u", "v")`
#' @param smooth TRUE/FALSE, applies if `groups = c("u", "v")`; should smoothing of summary results should be performed
#' using [fit_gam_surface()]?
#' @param k numeric, applies if smooth = TRUE; degree of smoothing in smooth term in fit_gam_surface()
#' @param extrapolate TRUE/FALSE, applies if smooth = TRUE; fit_gam_surface() returns extrapolated (predicted)
#' values for u, v coordinates that otherwise would have have NA for summarised z if extrapolate = TRUE,
#' those values are returned (to a certain degree depending on the value of dist)
#' @param dist numeric, fraction of 1, applies if smooth = TRUE and extrapolate = TRUE; maximum distance to next
#' coordinate-pair at which the result of fit_gam_surface(z) should be returned
#' @param geom The geometric object to use display the data (in this case: raster).
#' @param na.rm If `FALSE`, the default, missing values are removed with
#' a warning. If `TRUE`, missing values are silently removed.
#' @inheritParams ggplot2::layer
#'
#' @return ggplot2 layer
#'
#' @section Aesthetics:
#'
#' * wd: wind direction in degrees
#' * ws: wind velocity
#' * z: z values to be summarised
#'
#' @section Computed variables:
#'
#' * If groups = c("u", "v"): a tibble is returned, binned over u and v, with variables:
#' - wd: wind direction corresponding to midpoint value of u and v
#' - ws: wind velocity corresponding to midpoint value of u and v
#' - wd_class: new bins over wd considering binwidth
#' - ws_class: new bins over ws considering binwidth and ws_max
#' - u: bins over u (from input wd and ws)
#' - v: bins over v (from input wd and ws)
#' - z: result from fun(z, ...)
#' * If groups = NULL: groups = "wd". In this case, bins are calculated over wind direction;
#' a tibble including wd_class and summarised z is returned
#' * groups can be strings for other varibables in data; then fun is applied over those;
#' a tibble including groups and summarised z is returned
#'
#' @export
#'
#' @examples
#' library(ggplot2)
#'
#' fn <- rOstluft.data::f("Zch_Stampfenbachstrasse_2010-2014.csv")
#'
#' df <- rOstluft::read_airmo_csv(fn) %>%
#' rOstluft::rolf_to_openair()
#'
#' ggplot(df, aes(x = stat(u), y = stat(v), fill = stat(z))) +
#' stat_summary_wind_2d(mapping = aes(wd = wd, ws = ws, z = NO2), bins = 50^2) +
#' coord_cartpolar() +
#' scale_fill_viridis_c(na.value = NA)
stat_summary_wind_2d <- function (data = NULL, mapping = NULL, geom = "raster", position = "identity",
...,
fun = "mean",
fun.args = list(),
nmin = 1,
ws_max = NA,
bins = 10^2,
smooth = TRUE,
k = 100,
extrapolate = TRUE,
dist = 0.1,
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
layer(
data = data,
mapping = mapping,
stat = StatSummaryWind2d,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
fun = fun,
fun.args = fun.args,
nmin = nmin,
ws_max = ws_max,
bins = bins,
smooth = smooth,
k = k,
extrapolate = extrapolate,
dist = dist,
na.rm = na.rm,
...
)
)
}
#' @rdname rOstluft-ggproto
#' @export
StatSummaryWind2d <- ggproto("StatSummaryWind2d", Stat,
compute_group = function(data, scales,
fun = "mean",
fun.args = list(),
nmin = 1,
ws_max = NA,
bins = 10^2,
smooth = TRUE,
k = 100,
extrapolate = TRUE,
dist = 0.1) {
summary_wind_2d(data = data, wd = "wd", ws = "ws", z = "z", fun = fun, fun.args = fun.args, nmin = nmin,
ws_max = ws_max, smooth = smooth, k = k, extrapolate = extrapolate,
dist = dist, bins = bins)
},
required_aes = c("wd", "ws", "z")
)
| /R/stat_summary_wind_2d.R | permissive | Ostluft/rOstluft.plot | R | false | false | 5,630 | r | #' Summarise z values over binned wind data.
#'
#' @description Binning is done by StatWind, so input data to[stat_summary_wind()] should be original unbinned data.
#' Depending on the groups argument, binning is either done 2-dimensional over cartesian u and v wind vectors
#' (calculated from input data; then, [stat_summary_wind()] yields results similar to [openair::polarPlot()])
#' or 1-dimensional over wind direction or wind velocity bins, respectively.
#'
#' @param mapping ggplot2 mapping, e.g. aes(wd = wd, ws = ws, z = NOx); requires wd, ws, z
#' @param data The data to be displayed in this layer. Requires input data including at least three
#' columns carrying information regarding:
#' * wind direction (in °)
#' * wind velocity
#' * z-values (e.g. air pollutant concentration)
#' @param fun function or list of functions for summary.
#' @param ... other arguments passed on to [ggplot2::layer()] as `params = list(...)`.
#' @param fun.args a list of extra arguments to pass to fun.
#' @param nmin numeric, minimum number of values for fun, if n < nmin: NA is returned
#' @param ws_max numeric or NA, maximum wind velocity for binning: above ws_max, z is set NA
#' @param bins numeric, number of bins over the range of values if `!groups %in% c("u", "v")`
#' @param smooth TRUE/FALSE, applies if `groups = c("u", "v")`; should smoothing of summary results should be performed
#' using [fit_gam_surface()]?
#' @param k numeric, applies if smooth = TRUE; degree of smoothing in smooth term in fit_gam_surface()
#' @param extrapolate TRUE/FALSE, applies if smooth = TRUE; fit_gam_surface() returns extrapolated (predicted)
#' values for u, v coordinates that otherwise would have have NA for summarised z if extrapolate = TRUE,
#' those values are returned (to a certain degree depending on the value of dist)
#' @param dist numeric, fraction of 1, applies if smooth = TRUE and extrapolate = TRUE; maximum distance to next
#' coordinate-pair at which the result of fit_gam_surface(z) should be returned
#' @param geom The geometric object to use display the data (in this case: raster).
#' @param na.rm If `FALSE`, the default, missing values are removed with
#' a warning. If `TRUE`, missing values are silently removed.
#' @inheritParams ggplot2::layer
#'
#' @return ggplot2 layer
#'
#' @section Aesthetics:
#'
#' * wd: wind direction in degrees
#' * ws: wind velocity
#' * z: z values to be summarised
#'
#' @section Computed variables:
#'
#' * If groups = c("u", "v"): a tibble is returned, binned over u and v, with variables:
#' - wd: wind direction corresponding to midpoint value of u and v
#' - ws: wind velocity corresponding to midpoint value of u and v
#' - wd_class: new bins over wd considering binwidth
#' - ws_class: new bins over ws considering binwidth and ws_max
#' - u: bins over u (from input wd and ws)
#' - v: bins over v (from input wd and ws)
#' - z: result from fun(z, ...)
#' * If groups = NULL: groups = "wd". In this case, bins are calculated over wind direction;
#' a tibble including wd_class and summarised z is returned
#' * groups can be strings for other varibables in data; then fun is applied over those;
#' a tibble including groups and summarised z is returned
#'
#' @export
#'
#' @examples
#' library(ggplot2)
#'
#' fn <- rOstluft.data::f("Zch_Stampfenbachstrasse_2010-2014.csv")
#'
#' df <- rOstluft::read_airmo_csv(fn) %>%
#' rOstluft::rolf_to_openair()
#'
#' ggplot(df, aes(x = stat(u), y = stat(v), fill = stat(z))) +
#' stat_summary_wind_2d(mapping = aes(wd = wd, ws = ws, z = NO2), bins = 50^2) +
#' coord_cartpolar() +
#' scale_fill_viridis_c(na.value = NA)
stat_summary_wind_2d <- function (data = NULL, mapping = NULL, geom = "raster", position = "identity",
...,
fun = "mean",
fun.args = list(),
nmin = 1,
ws_max = NA,
bins = 10^2,
smooth = TRUE,
k = 100,
extrapolate = TRUE,
dist = 0.1,
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
layer(
data = data,
mapping = mapping,
stat = StatSummaryWind2d,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
fun = fun,
fun.args = fun.args,
nmin = nmin,
ws_max = ws_max,
bins = bins,
smooth = smooth,
k = k,
extrapolate = extrapolate,
dist = dist,
na.rm = na.rm,
...
)
)
}
#' @rdname rOstluft-ggproto
#' @export
StatSummaryWind2d <- ggproto("StatSummaryWind2d", Stat,
compute_group = function(data, scales,
fun = "mean",
fun.args = list(),
nmin = 1,
ws_max = NA,
bins = 10^2,
smooth = TRUE,
k = 100,
extrapolate = TRUE,
dist = 0.1) {
summary_wind_2d(data = data, wd = "wd", ws = "ws", z = "z", fun = fun, fun.args = fun.args, nmin = nmin,
ws_max = ws_max, smooth = smooth, k = k, extrapolate = extrapolate,
dist = dist, bins = bins)
},
required_aes = c("wd", "ws", "z")
)
|
# Nonlinearizable C-QE3
# Common variances for two segments
# Smoothness
llsearch.QE3.CCS <- function(x, y, n, jlo, jhi,start1,start2,start3,start4)
{
fj <- matrix(0, n)
fxy <- matrix(0, jhi - jlo + 1)
jgrid <- expand.grid(jlo:jhi)
k.ll <- apply(jgrid, 1, p.estFUN.QE3.CCS, x = x, y = y, n = n,start1=start1,start2=start2,start3=start3,start4=start4)
fxy <- matrix(k.ll, nrow = jhi-jlo+1)
rownames(fxy) <- jlo:jhi
z <- findmax(fxy)
jcrit <- z$imax + jlo - 1
list(jhat = jcrit, value = max(fxy))
}
# Function for deriving the ML estimates of the change-points problem.
p.estFUN.QE3.CCS <- function(j, x, y, n,start1,start2,start3,start4){
a <- p.est.QE3.CCS(x,y,n,j,start1,start2,start3,start4)
s2 <- a$sigma2
return(p.ll.CCS(n, j, s2))
}
p.est.QE3.CCS <- function(x,y,n,j,start1,start2,start3,start4){
xa <- x[1:j]
ya <- y[1:j]
jp1 <- j+1
xb <- x[jp1:n]
yb <- y[jp1:n]
fun <- nls(y ~ I(x <= x[j])*(a0 + a1*x + a2*x^2) +
I(x > x[j])*(a0 + a1*x[j]+a2*x[j]^2 - (a1+2*a2*x[j])/b2 + (a1+2*a2*x[j])/b2*exp(b2*(x-x[j]))),
start = list(a0 = 5, a1 = -2, a2= -1, b2 = 0.3))
a0 <- summary(fun)$coe[1]
a1 <- summary(fun)$coe[2]
a2 <- summary(fun)$coe[3]
b2 <- summary(fun)$coe[4]
b1 <- (a1+2*a2*x[j])/b2
b0 <- a0 + a1 * x[j] + a2 * x[j]^2 - b1
beta <-c(a0, a1, a2, b0, b1, b2)
s2<- (sum((ya-a0 - a1*xa - a2*xa^2)^2)+sum((yb-b0-b1*exp(b2*(xb-x[j])))^2))/n
list(a0=beta[1],a1=beta[2],a2=beta[3],b0=beta[4],b1=beta[5],b2=beta[6],sigma2=s2,xj=x[j])
}
# Function to compute the log-likelihood of the change-point problem
p.ll.CCS <- function(n, j, s2){
q1 <- n * log(sqrt(2 * pi))
q2 <- 0.5 * j * (1 + log(s2))
- (q1 + q2)
}
findmax <-function(a)
{
maxa<-max(a)
imax<- which(a==max(a),arr.ind=TRUE)[1]
jmax<-which(a==max(a),arr.ind=TRUE)[2]
list(imax = imax, jmax = jmax, value = maxa)
} | /vrcp/R/C-QE3.CS.R | no_license | ingted/R-Examples | R | false | false | 1,890 | r | # Nonlinearizable C-QE3
# Common variances for two segments
# Smoothness
llsearch.QE3.CCS <- function(x, y, n, jlo, jhi,start1,start2,start3,start4)
{
fj <- matrix(0, n)
fxy <- matrix(0, jhi - jlo + 1)
jgrid <- expand.grid(jlo:jhi)
k.ll <- apply(jgrid, 1, p.estFUN.QE3.CCS, x = x, y = y, n = n,start1=start1,start2=start2,start3=start3,start4=start4)
fxy <- matrix(k.ll, nrow = jhi-jlo+1)
rownames(fxy) <- jlo:jhi
z <- findmax(fxy)
jcrit <- z$imax + jlo - 1
list(jhat = jcrit, value = max(fxy))
}
# Function for deriving the ML estimates of the change-points problem.
p.estFUN.QE3.CCS <- function(j, x, y, n,start1,start2,start3,start4){
a <- p.est.QE3.CCS(x,y,n,j,start1,start2,start3,start4)
s2 <- a$sigma2
return(p.ll.CCS(n, j, s2))
}
p.est.QE3.CCS <- function(x,y,n,j,start1,start2,start3,start4){
xa <- x[1:j]
ya <- y[1:j]
jp1 <- j+1
xb <- x[jp1:n]
yb <- y[jp1:n]
fun <- nls(y ~ I(x <= x[j])*(a0 + a1*x + a2*x^2) +
I(x > x[j])*(a0 + a1*x[j]+a2*x[j]^2 - (a1+2*a2*x[j])/b2 + (a1+2*a2*x[j])/b2*exp(b2*(x-x[j]))),
start = list(a0 = 5, a1 = -2, a2= -1, b2 = 0.3))
a0 <- summary(fun)$coe[1]
a1 <- summary(fun)$coe[2]
a2 <- summary(fun)$coe[3]
b2 <- summary(fun)$coe[4]
b1 <- (a1+2*a2*x[j])/b2
b0 <- a0 + a1 * x[j] + a2 * x[j]^2 - b1
beta <-c(a0, a1, a2, b0, b1, b2)
s2<- (sum((ya-a0 - a1*xa - a2*xa^2)^2)+sum((yb-b0-b1*exp(b2*(xb-x[j])))^2))/n
list(a0=beta[1],a1=beta[2],a2=beta[3],b0=beta[4],b1=beta[5],b2=beta[6],sigma2=s2,xj=x[j])
}
# Function to compute the log-likelihood of the change-point problem
p.ll.CCS <- function(n, j, s2){
q1 <- n * log(sqrt(2 * pi))
q2 <- 0.5 * j * (1 + log(s2))
- (q1 + q2)
}
findmax <-function(a)
{
maxa<-max(a)
imax<- which(a==max(a),arr.ind=TRUE)[1]
jmax<-which(a==max(a),arr.ind=TRUE)[2]
list(imax = imax, jmax = jmax, value = maxa)
} |
unbiased_test_rejects <- function(mu, y, sigma, alpha = 0.05,
trunc_lo = -Inf, trunc_hi = Inf) {
# Immediately reject if observation falls outside the truncation set
if (is_contained(y, c(trunc_lo, trunc_hi)) == FALSE) return(TRUE)
y_interval <- unbiased_interval(mu = mu,
sigma = sigma,
alpha = alpha,
trunc_lo = trunc_lo,
trunc_hi = trunc_hi)
# Reject if y is outside the interval
is_contained(y, y_interval) == FALSE
}
unbiased_interval <- function(mu, sigma, alpha = 0.05, trunc_lo = -Inf,
trunc_hi = Inf) {
c_lo <- solve_c_lo(mu = mu,
sigma = sigma,
trunc_lo = trunc_lo,
trunc_hi = trunc_hi,
alpha = alpha)
c_hi <- solve_c_hi(c_lo = c_lo,
mu = mu,
sigma = sigma,
trunc_lo = trunc_lo,
trunc_hi = trunc_hi,
alpha = alpha)
c(c_lo, c_hi)
}
solve_c_lo <- function(mu, sigma, alpha = 0.05,
trunc_lo = -Inf, trunc_hi = Inf) {
# Optimization Bounds -----------------------------------------------------
c_lo_init <- q_truncnorm(p = alpha / 2,
mean = mu,
sd = sigma,
trunc_lo = trunc_lo,
trunc_hi = trunc_hi)
c_lo_min <- trunc_lo
c_lo_max <- q_truncnorm(p = alpha,
mean = mu,
sd = sigma,
trunc_lo = trunc_lo,
trunc_hi = trunc_hi)
# Optimization ------------------------------------------------------------
opt_result <- stats::optim(par = c_lo_init,
fn = solve_c_lo_loss,
mu = mu,
sigma = sigma,
alpha = alpha,
trunc_lo = trunc_lo,
trunc_hi = trunc_hi,
method = "Brent",
lower = c_lo_min,
upper = c_lo_max)
# Return ------------------------------------------------------------------
opt_result$par
}
solve_c_lo_loss <- function(c_lo, mu, sigma, alpha = 0.05,
trunc_lo = -Inf, trunc_hi = Inf) {
# Get interval length
c_hi <- solve_c_hi(c_lo = c_lo,
mu = mu,
sigma = sigma,
trunc_lo = trunc_lo,
trunc_hi = trunc_hi,
alpha = alpha)
c_z <- c(c_lo, c_hi)
# E[zeta * indicator]
e_zi <- e_truncnorm_times_indicator(mu = mu,
sigma = sigma,
c_z = c_z,
trunc_lo = trunc_lo,
trunc_hi = trunc_hi)
e_z <- e_truncnorm(mean = mu,
sd = sigma,
trunc_lo = trunc_lo,
trunc_hi = trunc_hi)
# Return
abs(e_zi - (1 - alpha) * e_z)
}
solve_c_hi <- function(c_lo, mu, sigma, trunc_lo, trunc_hi, alpha) {
p_c_lo <- p_truncnorm(q = c_lo,
mean = mu,
sd = sigma,
trunc_lo = trunc_lo,
trunc_hi = trunc_hi)
p_c_hi <- p_c_lo + (1 - alpha)
if (is.nan(p_c_hi)) return(NaN)
if (p_c_hi > 1) browser()
# Return
q_truncnorm(p = p_c_hi,
mean = mu,
sd = sigma,
trunc_lo = trunc_lo,
trunc_hi = trunc_hi)
}
| /R/hypothesis_test_unbiased.R | permissive | adviksh/winference | R | false | false | 3,960 | r | unbiased_test_rejects <- function(mu, y, sigma, alpha = 0.05,
trunc_lo = -Inf, trunc_hi = Inf) {
# Immediately reject if observation falls outside the truncation set
if (is_contained(y, c(trunc_lo, trunc_hi)) == FALSE) return(TRUE)
y_interval <- unbiased_interval(mu = mu,
sigma = sigma,
alpha = alpha,
trunc_lo = trunc_lo,
trunc_hi = trunc_hi)
# Reject if y is outside the interval
is_contained(y, y_interval) == FALSE
}
unbiased_interval <- function(mu, sigma, alpha = 0.05, trunc_lo = -Inf,
trunc_hi = Inf) {
c_lo <- solve_c_lo(mu = mu,
sigma = sigma,
trunc_lo = trunc_lo,
trunc_hi = trunc_hi,
alpha = alpha)
c_hi <- solve_c_hi(c_lo = c_lo,
mu = mu,
sigma = sigma,
trunc_lo = trunc_lo,
trunc_hi = trunc_hi,
alpha = alpha)
c(c_lo, c_hi)
}
solve_c_lo <- function(mu, sigma, alpha = 0.05,
trunc_lo = -Inf, trunc_hi = Inf) {
# Optimization Bounds -----------------------------------------------------
c_lo_init <- q_truncnorm(p = alpha / 2,
mean = mu,
sd = sigma,
trunc_lo = trunc_lo,
trunc_hi = trunc_hi)
c_lo_min <- trunc_lo
c_lo_max <- q_truncnorm(p = alpha,
mean = mu,
sd = sigma,
trunc_lo = trunc_lo,
trunc_hi = trunc_hi)
# Optimization ------------------------------------------------------------
opt_result <- stats::optim(par = c_lo_init,
fn = solve_c_lo_loss,
mu = mu,
sigma = sigma,
alpha = alpha,
trunc_lo = trunc_lo,
trunc_hi = trunc_hi,
method = "Brent",
lower = c_lo_min,
upper = c_lo_max)
# Return ------------------------------------------------------------------
opt_result$par
}
solve_c_lo_loss <- function(c_lo, mu, sigma, alpha = 0.05,
trunc_lo = -Inf, trunc_hi = Inf) {
# Get interval length
c_hi <- solve_c_hi(c_lo = c_lo,
mu = mu,
sigma = sigma,
trunc_lo = trunc_lo,
trunc_hi = trunc_hi,
alpha = alpha)
c_z <- c(c_lo, c_hi)
# E[zeta * indicator]
e_zi <- e_truncnorm_times_indicator(mu = mu,
sigma = sigma,
c_z = c_z,
trunc_lo = trunc_lo,
trunc_hi = trunc_hi)
e_z <- e_truncnorm(mean = mu,
sd = sigma,
trunc_lo = trunc_lo,
trunc_hi = trunc_hi)
# Return
abs(e_zi - (1 - alpha) * e_z)
}
solve_c_hi <- function(c_lo, mu, sigma, trunc_lo, trunc_hi, alpha) {
p_c_lo <- p_truncnorm(q = c_lo,
mean = mu,
sd = sigma,
trunc_lo = trunc_lo,
trunc_hi = trunc_hi)
p_c_hi <- p_c_lo + (1 - alpha)
if (is.nan(p_c_hi)) return(NaN)
if (p_c_hi > 1) browser()
# Return
q_truncnorm(p = p_c_hi,
mean = mu,
sd = sigma,
trunc_lo = trunc_lo,
trunc_hi = trunc_hi)
}
|
isAttached <- function(pkg) {
if (requireNamespace("hutils", quietly = TRUE) &&
packageVersion("hutils") > "1.2.0") {
hutils::isAttached(pkg)
} else {
.pkg <- as.character(substitute(pkg))
.pkg %in% .packages()
}
}
dist2km <- function(string) {
stopifnot(is.character(string),
length(string) == 1L)
# put km before m!
if (endsWith(string, "km")) {
dist_km <- sub("\\s*km$", "", string)
# use as.double here and as.numeric later to separate warning msgs
dist_km <- as.double(dist_km)
} else if (endsWith(string, "m")) {
dist_km <- sub("\\s*m$", "", string)
dist_km <- as.numeric(dist_km) / 1000
}
stopifnot(!anyNA(dist_km), is.numeric(dist_km))
dist_km
}
| /R/utils.R | no_license | HughParsonage/PSMA | R | false | false | 726 | r |
isAttached <- function(pkg) {
if (requireNamespace("hutils", quietly = TRUE) &&
packageVersion("hutils") > "1.2.0") {
hutils::isAttached(pkg)
} else {
.pkg <- as.character(substitute(pkg))
.pkg %in% .packages()
}
}
dist2km <- function(string) {
stopifnot(is.character(string),
length(string) == 1L)
# put km before m!
if (endsWith(string, "km")) {
dist_km <- sub("\\s*km$", "", string)
# use as.double here and as.numeric later to separate warning msgs
dist_km <- as.double(dist_km)
} else if (endsWith(string, "m")) {
dist_km <- sub("\\s*m$", "", string)
dist_km <- as.numeric(dist_km) / 1000
}
stopifnot(!anyNA(dist_km), is.numeric(dist_km))
dist_km
}
|
##makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
##cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
makeCacheMatrix <- function(x = matrix()) {
inv_x <- NULL
set <- function(y) {
x <<- y
inv_x <<- NULL
}
get <- function() x
setinverse<- function(inverse) inv_x <<-inverse
getinverse <- function() inv_x
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## CacheSolve functions will return the inverse of a matrix A created with the makeCacheMatrix function.
## If the cached inverse is available, cacheSolve retrieves it. Otherwise it computes, caches, and returns it.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv_x <- x$getinverse()
if (!is.null(inv_x)) {
message("cached inverse matrix")
return(inv_x)
} else {
inv_x <- solve(x$get())
x$setinverse(inv_x)
return(inv_x)
}
}
| /cachematrix.R | no_license | RobertoTan/ProgrammingAssignment2 | R | false | false | 1,322 | r | ##makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
##cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
makeCacheMatrix <- function(x = matrix()) {
inv_x <- NULL
set <- function(y) {
x <<- y
inv_x <<- NULL
}
get <- function() x
setinverse<- function(inverse) inv_x <<-inverse
getinverse <- function() inv_x
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## CacheSolve functions will return the inverse of a matrix A created with the makeCacheMatrix function.
## If the cached inverse is available, cacheSolve retrieves it. Otherwise it computes, caches, and returns it.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv_x <- x$getinverse()
if (!is.null(inv_x)) {
message("cached inverse matrix")
return(inv_x)
} else {
inv_x <- solve(x$get())
x$setinverse(inv_x)
return(inv_x)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{county_points}
\alias{county_points}
\title{Eastern U.S. county latitude and longitudes}
\format{A dataframe with 2,396 rows and 3 variables:
\describe{
\item{fips}{A character vector giving the county's five-digit Federal
Information Processing Standard (FIPS) code}
\item{glat}{A numeric vector giving the latitude of the population mean
center of each county}
\item{glon}{A numeric vector giving the longitude of the population mean
center of each county}
}}
\source{
\url{http://www2.census.gov/geo/docs/reference/cenpop2010/county/CenPop2010_Mean_CO.txt}
}
\usage{
county_points
}
\description{
A dataframe containing locations of population mean centers for counties in
the eastern United States. Each county is identified by its 5-digit Federal
Information Processing Standard (FIPS) code. This dataframe can be used to
model storm winds at each county center. This dataset was put together using
a dataframe from the U.S. Census Bureau, which was pulled from the website
listed in "Source".
}
\keyword{datasets}
| /man/county_points.Rd | no_license | liztennant/stormwindmodel | R | false | true | 1,190 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{county_points}
\alias{county_points}
\title{Eastern U.S. county latitude and longitudes}
\format{A dataframe with 2,396 rows and 3 variables:
\describe{
\item{fips}{A character vector giving the county's five-digit Federal
Information Processing Standard (FIPS) code}
\item{glat}{A numeric vector giving the latitude of the population mean
center of each county}
\item{glon}{A numeric vector giving the longitude of the population mean
center of each county}
}}
\source{
\url{http://www2.census.gov/geo/docs/reference/cenpop2010/county/CenPop2010_Mean_CO.txt}
}
\usage{
county_points
}
\description{
A dataframe containing locations of population mean centers for counties in
the eastern United States. Each county is identified by its 5-digit Federal
Information Processing Standard (FIPS) code. This dataframe can be used to
model storm winds at each county center. This dataset was put together using
a dataframe from the U.S. Census Bureau, which was pulled from the website
listed in "Source".
}
\keyword{datasets}
|
\name{NBumiFeatureSelectionCombinedDrop}
\alias{NBumiFeatureSelectionCombinedDrop}
\title{Dropout-based Feature Selection}
\description{
Ranks genes by significance of increase in dropouts compared to expectation.
}
\usage{
NBumiFeatureSelectionCombinedDrop(fit, ntop=NULL, fdr=2, suppress.plot=TRUE)
}
\arguments{
\item{fit}{output from NBumiFitModel or NBumiFitBasicModel.}
\item{ntop}{number of top ranked genes to return}
\item{fdr}{significant threshold}
\item{suppress.plot}{logical, whether to plot the fitted curve and highlight selected features}
}
\details{
Calculates dropout probability for each observation using depth-adjusted negative binomial means and dispersions calculated
from a fitted power-law relationship between mean and dispersion. Total dropouts per gene are modelled using the normal
approximation of the sum of bernoulli variables. And significance is evaluated using a Z-test.
If provided, ntop will overrule the significance threshold.
}
\value{
Sorted vector of p-values
}
\examples{
library(M3DExampleData)
counts <- NBumiConvertToInteger(Mmus_example_list$data);
fit <- NBumiFitModel(counts);
Dropout_features <- names(NBumiFeatureSelectionCombinedDrop(fit, fdr=0.05));
}
\keyword{single cell}
\keyword{feature selection}
\keyword{depth-adjusted negative binomial}
| /man/NBumi_FSDrop.Rd | no_license | leonfodoulian/M3Drop | R | false | false | 1,319 | rd | \name{NBumiFeatureSelectionCombinedDrop}
\alias{NBumiFeatureSelectionCombinedDrop}
\title{Dropout-based Feature Selection}
\description{
Ranks genes by significance of increase in dropouts compared to expectation.
}
\usage{
NBumiFeatureSelectionCombinedDrop(fit, ntop=NULL, fdr=2, suppress.plot=TRUE)
}
\arguments{
\item{fit}{output from NBumiFitModel or NBumiFitBasicModel.}
\item{ntop}{number of top ranked genes to return}
\item{fdr}{significant threshold}
\item{suppress.plot}{logical, whether to plot the fitted curve and highlight selected features}
}
\details{
Calculates dropout probability for each observation using depth-adjusted negative binomial means and dispersions calculated
from a fitted power-law relationship between mean and dispersion. Total dropouts per gene are modelled using the normal
approximation of the sum of bernoulli variables. And significance is evaluated using a Z-test.
If provided, ntop will overrule the significance threshold.
}
\value{
Sorted vector of p-values
}
\examples{
library(M3DExampleData)
counts <- NBumiConvertToInteger(Mmus_example_list$data);
fit <- NBumiFitModel(counts);
Dropout_features <- names(NBumiFeatureSelectionCombinedDrop(fit, fdr=0.05));
}
\keyword{single cell}
\keyword{feature selection}
\keyword{depth-adjusted negative binomial}
|
# Analyze MCMC results
devtools::document()
devtools::load_all()
# Import data
dir <- "data/results/AdaptiveGlobal/Priors unif et norm/"
files <- list.files(path = dir, pattern = "results_")
species <- gsub("results_","",files)
species <- gsub(".rds","",species)
species_long <- c("Acer pseudoplatanus", "Aesculus hippocastanum", "Alnus glutinosa",
"Betula pendula", "Carpinus betulus", "Castanea sativa", "Corylus avellana",
"Fagus sylvativa", "Fraexinus excelsior", "Larix decidua", "Prunus avium", "Quercus robur")
# Loop over species
library(ggplot2)
library(plotROC)
library(reshape)
burnin <- 10000
# Prior
priors <- readRDS(paste0(dir,files[1]))$prior # same priors used for all the species, we can only look at the first one
table_priors <- lapply(priors,FUN = function(p){
temp <- data.frame(p@distRNG,t(unlist(p@hyperParams)))
names(temp) <- c("dist",names(p@hyperParams))
return(temp)
})
table_priors <- do.call(dplyr::bind_rows,table_priors)
table_priors$params <- names(priors)
write.table(table_priors,paste0(dir,"priors.txt"),quote=F,row.names = F)
# Posterior mean
postmean <- lapply(1:length(files), FUN = function(i){
calib <- readRDS(paste0(dir,files[i]))$calib
chain_after_burnin <- calib$chain[-(1:burnin),]
d <- as.data.frame(t(apply(chain_after_burnin,2,mean)))
names(d) <- c("a.cu","b.cu","a.fu","b.fu","mu","s")
d$species <- species[i]
d
})
postmean <- do.call(rbind,postmean)
# Maximum a posteriori
map <- lapply(1:length(files), FUN = function(i){
calib <- readRDS(paste0(dir,files[i]))$calib
chain_after_burnin <- calib$chain[-(1:burnin),]
d <- apply(chain_after_burnin,2,density)
map <- sapply(1:length(d),FUN=function(j){
d[[j]]$x[which.max(d[[j]]$y)]
})
map <- as.data.frame(t(map))
names(map) <- c("a.cu","b.cu","a.fu","b.fu","mu","s")
map$species <- species[i]
map
})
map <- do.call(rbind,map)
ci95 <- lapply(1:length(files), FUN = function(i){
calib <- readRDS(paste0(dir,files[i]))$calib
chain_after_burnin <- calib$chain[-(1:burnin),]
d <- as.data.frame(t(apply(chain_after_burnin,2,quantile,c(0.025,0.975))))
d$param <- c("a.cu","b.cu","a.fu","b.fu","mu","s")
d$species <- species[i]
d
})
ci95 <- do.call(rbind,ci95)
# Table with MAP and 95% CI
map_t <- melt(map)
names(map_t) <- c("species","param","MAP")
map_ci95 <- merge(map_t,ci95,by=c("species","param"))
ci952 <- melt(ci95)
ci952$var <- paste0(ci952$param,ci952$variable)
ci952$param <- ci952$variable <- NULL
ci952 <- dcast(ci952, species ~ var)
ci95merged <- ci952$species
for (i in 1:6){
ci95merged <- cbind(ci95merged,paste0("[",format(ci952[,2*i],digits=3)," ; ",format(ci952[,2*i+1],digits=3),"]"))
}
ci95merged <- as.data.frame(ci95merged)
names(ci95merged) <- c("species",paste0(c("a.cu","a.fu","b.cu","b.fu","mu","s"),"_95CI"))
map_ci95_2 <- merge(map,ci95merged)
library(xtable)
print(xtable(map_ci95_2[,c(1,2,8,3,10,4,9,5,11)]), include.rownames=F)
print(xtable(map_ci95_2[,c(1,6,12,7,13)]), include.rownames=F)
# Compare predictions using posterior means and MAP with observations
temp.data <- readRDS("data/temperaturePlants.rds")
origin.date.cu = "09-01"
origin.date.fu = "12-01"
var.names = list(date="date",plant="plant",session="session",rep="rep",temp="temp.plant",duration="duration")
temp.params = list(temp.min.cu = -10, temp.max.cu = 15, temp.min.fu = 5, temp.max.fu = 35)
names.params <- c("a.cu","b.cu","a.fu","b.fu","mu","s")
obs.files <- list.files(path="data/",pattern = "obs.data")
dpredall <- lapply(1:length(files), FUN = function(i){
params1 <- postmean[i,]
params2 <- map[i,]
obs.data <- readRDS(paste0("data/",obs.files[i]))
predPostMean <- modelProbaBB(temp.data = temp.data,
var.names = var.names,
temp.params = temp.params,
cufu.params =stats::setNames(as.list(params1),names.params))
names(predPostMean) <- c("session","plant","rep","cu.postmean","fu.postmean","probaBB.postmean")
predMAP <- modelProbaBB(temp.data = temp.data,
var.names = var.names,
temp.params = temp.params,
cufu.params =stats::setNames(as.list(params2),names.params))
names(predMAP) <- c("session","plant","rep","cu.map","fu.map","probaBB.map")
pred <- dplyr::inner_join(predMAP,predPostMean,by = c("session", "plant", "rep"))
pred <- dplyr::inner_join(obs.data,pred,by = c("session", "plant", "rep"))
dpred <- melt(pred,id.vars = "budburst", measure.vars = c("probaBB.postmean","probaBB.map"))
dpred$species <- species[i]
dpred$n <- nrow(pred)
# pred_plot <- ggplot(data=dpred[dpred$variable=="probaBB.map",],aes(x=as.factor(budburst),y=value)) +
# geom_boxplot(width=0.25,fill="grey") + xlab("Observed budburst") + ylab("Predicted probability of budburst") +
# #scale_fill_discrete(name="Estimate",labels=c("posterior mean","maximum a posteriori")) +
# guides(fill = FALSE) +
# ggtitle(paste0(species_long[i],", n=",nrow(pred))) + ylim(c(0,1))
# #ggsave(paste0(dir,"predictions_",species[i],".pdf"),pred_plot,height=5,width=6)
#
# roc <- ggplot(dpred[dpred$variable=="probaBB.map",],aes(m=value,d=budburst)) + geom_roc(labels=F,pointsize=0) + ggtitle(paste0(species_long[i],", n=",nrow(pred)))
# auc <- calc_auc(roc)
# col <- unique(ggplot_build(roc)$data[[1]]["colour"])
# roc <- roc + style_roc(theme = theme_gray,xlab = "1 - Specificity") +
# #scale_color_discrete(name="Estimate",labels=c("posterior mean","maximum a posteriori")) +
# guides(fill = FALSE) +
# annotate("text", x = .75, y = .25, label = paste("AUC =", round(calc_auc(roc)$AUC[1], 2)), color=col$colour[1]) +
# annotate("text", x = .75, y = .2, label = paste("AUC =", round(calc_auc(roc)$AUC[2], 2)), color=col$colour[2])
# #ggsave(paste0(dir,"roc_",species[i],".pdf"),roc, height=5,width=6)
#return(list(pred=pred_plot,roc=roc))
return(dpred)
})
dpredall <- do.call(rbind,dpredall)
dpredall$species <- factor(dpredall$species, levels = species, labels = species_long)
dpredall$title <- paste0(dpredall$species,", n=",dpredall$n)
pred_plot <- ggplot(data=dpredall[dpredall$variable=="probaBB.map",],aes(x=as.factor(budburst),y=value)) +
geom_boxplot(width=0.25,fill="grey") + xlab("Observed budburst") + ylab("Predicted probability of budburst") +
#scale_fill_discrete(name="Estimate",labels=c("posterior mean","maximum a posteriori")) +
guides(fill = FALSE) + facet_wrap(~title,nc=4,nr=3) +
ylim(c(0,1)) + theme(strip.text = element_text(face = "italic"))
pred_plot
ggsave("~/Documents/Articles/En cours/Dormancy/figures/pred_plot_MAP.pdf",pred_plot,height = 7, width = 8.5)
rocf <- ggplot(dpredall[dpredall$variable=="probaBB.map",],aes(m=value,d=budburst)) + geom_roc(labels=F,pointsize=0,size=0.5) +
facet_wrap(~title,nc=4,nr=3)
auc <- calc_auc(rocf)
auc$title <- unique(dpredall$title)
#col <- unique(ggplot_build(roc)$data[[1]]["colour"])
roc <- rocf + style_roc(theme = theme_gray,xlab = "1 - Specificity") +
#scale_color_discrete(name="Estimate",labels=c("posterior mean","maximum a posteriori")) +
guides(fill = FALSE) + #facet_wrap(~title,nc=3,nr=4) +
geom_text(data = auc, aes(label=paste0("AUC=",round(AUC,2))),
x = Inf, y = -Inf, hjust=1, vjust=0,
inherit.aes = FALSE) + theme(strip.text = element_text(face = "italic"))
#annotate("text", x = 0.75, y=0.25, label = auc$AUC, group=auc$AUC)
#annotate("text", x = .75, y = .25, label = paste("AUC =", auc$AUC))#, color=col$colour[1])
#annotate("text", x = .75, y = .2, label = paste("AUC =", round(calc_auc(roc)$AUC[2], 2)), color=col$colour[2])
roc
ggsave("~/Documents/Articles/En cours/Dormancy/figures/roc_all_MAP.pdf",roc,height = 7, width = 8.5)
# Plot estimated CU+FU as a function of time
lapply(1:length(files), FUN = function(i){
params1 <- postmean[i,]
params2 <- map[i,]
obs.data <- readRDS(paste0("data/",obs.files[i]))
data <- dplyr::right_join(temp.data,obs.data)
# compute CU and FU
data$cu.mean <- chillingUnits(data,
var.names = var.names,
temp.min = temp.params$temp.min.cu, temp.max= temp.params$temp.max.cu,
mu = params1$a.cu, s = params1$b.cu)
data$fu.mean <- forcingUnits(data,
var.names = var.names,
temp.min = temp.params$temp.min.fu, temp.max= temp.params$temp.max.fu,
a = params1$a.fu, b = params1$b.fu)
# do not account for temperatures accumulated between 01/01 and origin.date
data$fu.mean[data$before.origin.cu] <- 0
data$cu.mean[data$before.origin.cu] <- 0
data <- data %>% dplyr::group_by_at(c(var.names$session,var.names$plant,var.names$rep)) %>% dplyr::mutate(cu.cum = cumsum(cu.mean), fu.cum = cumsum(fu.mean))
dataCU <- data %>% dplyr::select(!fu.cum)
dataCU$units <- dataCU$cu.cum
dataCU$type <- "Chilling"
dataFU <- data %>% dplyr::select(!cu.cum)
dataFU$units <- dataFU$fu.cum
dataFU$type <- "Forcing"
dataCU$cu.cum <- NULL
dataFU$fu.cum <- NULL
dataCUFU <- rbind(dataCU,dataFU)
dataBB <- data %>% dplyr::select(session,rep,plant,harv.date,budburst) %>% dplyr::distinct()
data$month <- lubridate::month(data$date)
data$year <- lubridate::year(data$date)
dataCUFU$month <- lubridate::month(dataCUFU$date)
dataCUFU$year <- lubridate::year(dataCUFU$date)
p1 <- ggplot(data[data$month>=9,],aes(x=date,y=cu.cum,col=as.factor(rep)),group=as.factor(rep)) + geom_line() +
facet_wrap(~year,scales = "free") + scale_color_discrete(name="Branch")
p12 <- ggplot(dataCUFU[dataCUFU$month>=9,],aes(x=date,y=units,col=as.factor(rep),linetype=type),group=as.factor(rep)) + geom_line() +
facet_wrap(~year,scales = "free") + scale_color_discrete(name="Branch") + scale_linetype_discrete(name="Units")
p2 <- ggplot(data[data$month>=9,],aes(x=date,y=fu.cum,col=as.factor(rep)),group=as.factor(rep)) + geom_line() +
facet_wrap(~year,scales = "free") + scale_color_discrete(name="Branch")
ggsave(paste0(dir,"/cu.cum_",species[i],".pdf"),p1,height = 6, width = 12)
ggsave(paste0(dir,"fu.cum_",species[i],".pdf"),p2,height = 6, width = 12)
ggsave(paste0(dir,"cufu.cum_",species[i],".pdf"),p12,height = 6, width = 12)
data$probaBB <- 1/(1+exp(-(data$cu.cum + data$fu.cum - params1$mu)/params1$s))
dataBB <- data %>% dplyr::select(session,harv.date,plant,rep,budburst) %>% dplyr::distinct()
p3 <- ggplot(data,aes(x=date,y=probaBB,col=as.factor(rep)),group=as.factor(rep)) + geom_line() + facet_grid(session~rep,scales = "free", labeller=label_both) + ylim(c(0,1))
p3 <- p3 + geom_point(data=dataBB,aes(x=harv.date,y=budburst,pch='.')) + scale_color_discrete(name="Branch") + scale_shape_discrete(name="Budburst") + theme(axis.text.x=element_text(angle=45, hjust=1))
ggsave(paste0(dir,"probaBB_",species[i],".pdf"),p3,height = 6, width = 12)
})
| /analyses/analyzeResults.R | no_license | baeyc/dormancy | R | false | false | 10,991 | r | # Analyze MCMC results
devtools::document()
devtools::load_all()
# Import data
dir <- "data/results/AdaptiveGlobal/Priors unif et norm/"
files <- list.files(path = dir, pattern = "results_")
species <- gsub("results_","",files)
species <- gsub(".rds","",species)
species_long <- c("Acer pseudoplatanus", "Aesculus hippocastanum", "Alnus glutinosa",
"Betula pendula", "Carpinus betulus", "Castanea sativa", "Corylus avellana",
"Fagus sylvativa", "Fraexinus excelsior", "Larix decidua", "Prunus avium", "Quercus robur")
# Loop over species
library(ggplot2)
library(plotROC)
library(reshape)
burnin <- 10000
# Prior
priors <- readRDS(paste0(dir,files[1]))$prior # same priors used for all the species, we can only look at the first one
table_priors <- lapply(priors,FUN = function(p){
temp <- data.frame(p@distRNG,t(unlist(p@hyperParams)))
names(temp) <- c("dist",names(p@hyperParams))
return(temp)
})
table_priors <- do.call(dplyr::bind_rows,table_priors)
table_priors$params <- names(priors)
write.table(table_priors,paste0(dir,"priors.txt"),quote=F,row.names = F)
# Posterior mean
postmean <- lapply(1:length(files), FUN = function(i){
calib <- readRDS(paste0(dir,files[i]))$calib
chain_after_burnin <- calib$chain[-(1:burnin),]
d <- as.data.frame(t(apply(chain_after_burnin,2,mean)))
names(d) <- c("a.cu","b.cu","a.fu","b.fu","mu","s")
d$species <- species[i]
d
})
postmean <- do.call(rbind,postmean)
# Maximum a posteriori
map <- lapply(1:length(files), FUN = function(i){
calib <- readRDS(paste0(dir,files[i]))$calib
chain_after_burnin <- calib$chain[-(1:burnin),]
d <- apply(chain_after_burnin,2,density)
map <- sapply(1:length(d),FUN=function(j){
d[[j]]$x[which.max(d[[j]]$y)]
})
map <- as.data.frame(t(map))
names(map) <- c("a.cu","b.cu","a.fu","b.fu","mu","s")
map$species <- species[i]
map
})
map <- do.call(rbind,map)
ci95 <- lapply(1:length(files), FUN = function(i){
calib <- readRDS(paste0(dir,files[i]))$calib
chain_after_burnin <- calib$chain[-(1:burnin),]
d <- as.data.frame(t(apply(chain_after_burnin,2,quantile,c(0.025,0.975))))
d$param <- c("a.cu","b.cu","a.fu","b.fu","mu","s")
d$species <- species[i]
d
})
ci95 <- do.call(rbind,ci95)
# Table with MAP and 95% CI
map_t <- melt(map)
names(map_t) <- c("species","param","MAP")
map_ci95 <- merge(map_t,ci95,by=c("species","param"))
ci952 <- melt(ci95)
ci952$var <- paste0(ci952$param,ci952$variable)
ci952$param <- ci952$variable <- NULL
ci952 <- dcast(ci952, species ~ var)
ci95merged <- ci952$species
for (i in 1:6){
ci95merged <- cbind(ci95merged,paste0("[",format(ci952[,2*i],digits=3)," ; ",format(ci952[,2*i+1],digits=3),"]"))
}
ci95merged <- as.data.frame(ci95merged)
names(ci95merged) <- c("species",paste0(c("a.cu","a.fu","b.cu","b.fu","mu","s"),"_95CI"))
map_ci95_2 <- merge(map,ci95merged)
library(xtable)
print(xtable(map_ci95_2[,c(1,2,8,3,10,4,9,5,11)]), include.rownames=F)
print(xtable(map_ci95_2[,c(1,6,12,7,13)]), include.rownames=F)
# Compare predictions using posterior means and MAP with observations
temp.data <- readRDS("data/temperaturePlants.rds")
origin.date.cu = "09-01"
origin.date.fu = "12-01"
var.names = list(date="date",plant="plant",session="session",rep="rep",temp="temp.plant",duration="duration")
temp.params = list(temp.min.cu = -10, temp.max.cu = 15, temp.min.fu = 5, temp.max.fu = 35)
names.params <- c("a.cu","b.cu","a.fu","b.fu","mu","s")
obs.files <- list.files(path="data/",pattern = "obs.data")
dpredall <- lapply(1:length(files), FUN = function(i){
params1 <- postmean[i,]
params2 <- map[i,]
obs.data <- readRDS(paste0("data/",obs.files[i]))
predPostMean <- modelProbaBB(temp.data = temp.data,
var.names = var.names,
temp.params = temp.params,
cufu.params =stats::setNames(as.list(params1),names.params))
names(predPostMean) <- c("session","plant","rep","cu.postmean","fu.postmean","probaBB.postmean")
predMAP <- modelProbaBB(temp.data = temp.data,
var.names = var.names,
temp.params = temp.params,
cufu.params =stats::setNames(as.list(params2),names.params))
names(predMAP) <- c("session","plant","rep","cu.map","fu.map","probaBB.map")
pred <- dplyr::inner_join(predMAP,predPostMean,by = c("session", "plant", "rep"))
pred <- dplyr::inner_join(obs.data,pred,by = c("session", "plant", "rep"))
dpred <- melt(pred,id.vars = "budburst", measure.vars = c("probaBB.postmean","probaBB.map"))
dpred$species <- species[i]
dpred$n <- nrow(pred)
# pred_plot <- ggplot(data=dpred[dpred$variable=="probaBB.map",],aes(x=as.factor(budburst),y=value)) +
# geom_boxplot(width=0.25,fill="grey") + xlab("Observed budburst") + ylab("Predicted probability of budburst") +
# #scale_fill_discrete(name="Estimate",labels=c("posterior mean","maximum a posteriori")) +
# guides(fill = FALSE) +
# ggtitle(paste0(species_long[i],", n=",nrow(pred))) + ylim(c(0,1))
# #ggsave(paste0(dir,"predictions_",species[i],".pdf"),pred_plot,height=5,width=6)
#
# roc <- ggplot(dpred[dpred$variable=="probaBB.map",],aes(m=value,d=budburst)) + geom_roc(labels=F,pointsize=0) + ggtitle(paste0(species_long[i],", n=",nrow(pred)))
# auc <- calc_auc(roc)
# col <- unique(ggplot_build(roc)$data[[1]]["colour"])
# roc <- roc + style_roc(theme = theme_gray,xlab = "1 - Specificity") +
# #scale_color_discrete(name="Estimate",labels=c("posterior mean","maximum a posteriori")) +
# guides(fill = FALSE) +
# annotate("text", x = .75, y = .25, label = paste("AUC =", round(calc_auc(roc)$AUC[1], 2)), color=col$colour[1]) +
# annotate("text", x = .75, y = .2, label = paste("AUC =", round(calc_auc(roc)$AUC[2], 2)), color=col$colour[2])
# #ggsave(paste0(dir,"roc_",species[i],".pdf"),roc, height=5,width=6)
#return(list(pred=pred_plot,roc=roc))
return(dpred)
})
dpredall <- do.call(rbind,dpredall)
dpredall$species <- factor(dpredall$species, levels = species, labels = species_long)
dpredall$title <- paste0(dpredall$species,", n=",dpredall$n)
pred_plot <- ggplot(data=dpredall[dpredall$variable=="probaBB.map",],aes(x=as.factor(budburst),y=value)) +
geom_boxplot(width=0.25,fill="grey") + xlab("Observed budburst") + ylab("Predicted probability of budburst") +
#scale_fill_discrete(name="Estimate",labels=c("posterior mean","maximum a posteriori")) +
guides(fill = FALSE) + facet_wrap(~title,nc=4,nr=3) +
ylim(c(0,1)) + theme(strip.text = element_text(face = "italic"))
pred_plot
ggsave("~/Documents/Articles/En cours/Dormancy/figures/pred_plot_MAP.pdf",pred_plot,height = 7, width = 8.5)
rocf <- ggplot(dpredall[dpredall$variable=="probaBB.map",],aes(m=value,d=budburst)) + geom_roc(labels=F,pointsize=0,size=0.5) +
facet_wrap(~title,nc=4,nr=3)
auc <- calc_auc(rocf)
auc$title <- unique(dpredall$title)
#col <- unique(ggplot_build(roc)$data[[1]]["colour"])
roc <- rocf + style_roc(theme = theme_gray,xlab = "1 - Specificity") +
#scale_color_discrete(name="Estimate",labels=c("posterior mean","maximum a posteriori")) +
guides(fill = FALSE) + #facet_wrap(~title,nc=3,nr=4) +
geom_text(data = auc, aes(label=paste0("AUC=",round(AUC,2))),
x = Inf, y = -Inf, hjust=1, vjust=0,
inherit.aes = FALSE) + theme(strip.text = element_text(face = "italic"))
#annotate("text", x = 0.75, y=0.25, label = auc$AUC, group=auc$AUC)
#annotate("text", x = .75, y = .25, label = paste("AUC =", auc$AUC))#, color=col$colour[1])
#annotate("text", x = .75, y = .2, label = paste("AUC =", round(calc_auc(roc)$AUC[2], 2)), color=col$colour[2])
roc
ggsave("~/Documents/Articles/En cours/Dormancy/figures/roc_all_MAP.pdf",roc,height = 7, width = 8.5)
# Plot estimated CU+FU as a function of time
lapply(1:length(files), FUN = function(i){
params1 <- postmean[i,]
params2 <- map[i,]
obs.data <- readRDS(paste0("data/",obs.files[i]))
data <- dplyr::right_join(temp.data,obs.data)
# compute CU and FU
data$cu.mean <- chillingUnits(data,
var.names = var.names,
temp.min = temp.params$temp.min.cu, temp.max= temp.params$temp.max.cu,
mu = params1$a.cu, s = params1$b.cu)
data$fu.mean <- forcingUnits(data,
var.names = var.names,
temp.min = temp.params$temp.min.fu, temp.max= temp.params$temp.max.fu,
a = params1$a.fu, b = params1$b.fu)
# do not account for temperatures accumulated between 01/01 and origin.date
data$fu.mean[data$before.origin.cu] <- 0
data$cu.mean[data$before.origin.cu] <- 0
data <- data %>% dplyr::group_by_at(c(var.names$session,var.names$plant,var.names$rep)) %>% dplyr::mutate(cu.cum = cumsum(cu.mean), fu.cum = cumsum(fu.mean))
dataCU <- data %>% dplyr::select(!fu.cum)
dataCU$units <- dataCU$cu.cum
dataCU$type <- "Chilling"
dataFU <- data %>% dplyr::select(!cu.cum)
dataFU$units <- dataFU$fu.cum
dataFU$type <- "Forcing"
dataCU$cu.cum <- NULL
dataFU$fu.cum <- NULL
dataCUFU <- rbind(dataCU,dataFU)
dataBB <- data %>% dplyr::select(session,rep,plant,harv.date,budburst) %>% dplyr::distinct()
data$month <- lubridate::month(data$date)
data$year <- lubridate::year(data$date)
dataCUFU$month <- lubridate::month(dataCUFU$date)
dataCUFU$year <- lubridate::year(dataCUFU$date)
p1 <- ggplot(data[data$month>=9,],aes(x=date,y=cu.cum,col=as.factor(rep)),group=as.factor(rep)) + geom_line() +
facet_wrap(~year,scales = "free") + scale_color_discrete(name="Branch")
p12 <- ggplot(dataCUFU[dataCUFU$month>=9,],aes(x=date,y=units,col=as.factor(rep),linetype=type),group=as.factor(rep)) + geom_line() +
facet_wrap(~year,scales = "free") + scale_color_discrete(name="Branch") + scale_linetype_discrete(name="Units")
p2 <- ggplot(data[data$month>=9,],aes(x=date,y=fu.cum,col=as.factor(rep)),group=as.factor(rep)) + geom_line() +
facet_wrap(~year,scales = "free") + scale_color_discrete(name="Branch")
ggsave(paste0(dir,"/cu.cum_",species[i],".pdf"),p1,height = 6, width = 12)
ggsave(paste0(dir,"fu.cum_",species[i],".pdf"),p2,height = 6, width = 12)
ggsave(paste0(dir,"cufu.cum_",species[i],".pdf"),p12,height = 6, width = 12)
data$probaBB <- 1/(1+exp(-(data$cu.cum + data$fu.cum - params1$mu)/params1$s))
dataBB <- data %>% dplyr::select(session,harv.date,plant,rep,budburst) %>% dplyr::distinct()
p3 <- ggplot(data,aes(x=date,y=probaBB,col=as.factor(rep)),group=as.factor(rep)) + geom_line() + facet_grid(session~rep,scales = "free", labeller=label_both) + ylim(c(0,1))
p3 <- p3 + geom_point(data=dataBB,aes(x=harv.date,y=budburst,pch='.')) + scale_color_discrete(name="Branch") + scale_shape_discrete(name="Budburst") + theme(axis.text.x=element_text(angle=45, hjust=1))
ggsave(paste0(dir,"probaBB_",species[i],".pdf"),p3,height = 6, width = 12)
})
|
#' Filter spotify search queries
#'
#' Filter spotify search queries data table to contain only entries from specified time period
#'
#' @param search_queries A data table with search queries,
#' made with \code{\link{make_search_queries_dt}}
#' @param start_date A POSIXt,Date or string that can be coerced
#' into Date by \code{\link{as_date}} indicating start of the period of time.
#' @param end_date A POSIXt, Date or string that can be coerced
#' into Date by \code{\link{as_date}} indicating end of the period of time.
#'
#' @return A search queries data table containing only entries between \code{start_date} and
#' \code{end_date}
#'
#' @export
#'
#' @seealso \code{\link{filter_streaming_history}}
filter_search_queries <- function(search_queries, start_date, end_date) {
search_queries[date <= as_date(end_date) & date >= as_date(start_date), ]
}
| /R/filter_search_queries.R | no_license | mabuszka/SpotifyViz | R | false | false | 872 | r | #' Filter spotify search queries
#'
#' Filter spotify search queries data table to contain only entries from specified time period
#'
#' @param search_queries A data table with search queries,
#' made with \code{\link{make_search_queries_dt}}
#' @param start_date A POSIXt,Date or string that can be coerced
#' into Date by \code{\link{as_date}} indicating start of the period of time.
#' @param end_date A POSIXt, Date or string that can be coerced
#' into Date by \code{\link{as_date}} indicating end of the period of time.
#'
#' @return A search queries data table containing only entries between \code{start_date} and
#' \code{end_date}
#'
#' @export
#'
#' @seealso \code{\link{filter_streaming_history}}
filter_search_queries <- function(search_queries, start_date, end_date) {
search_queries[date <= as_date(end_date) & date >= as_date(start_date), ]
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/topgamesummary.R
\name{topgamesummary}
\alias{topgamesummary}
\title{Scrapes brief description of top 3 games for console specified by user from metacritic,
and formats collected data into dataframe}
\usage{
topgamesummary(url = "http://www.metacritic.com/browse/games/score/metascore/all/ps4/filtered")
}
\arguments{
\item{url}{metacritic top games table url}
}
\value{
table with descriptions of top 3 games
}
\description{
Scrapes brief description of top 3 games for console specified by user from metacritic,
and formats collected data into dataframe
}
| /OneConsoleToRuleThemAll/man/topgamesummary.Rd | no_license | antoshachekhonte/WebScrapingTool | R | false | true | 639 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/topgamesummary.R
\name{topgamesummary}
\alias{topgamesummary}
\title{Scrapes brief description of top 3 games for console specified by user from metacritic,
and formats collected data into dataframe}
\usage{
topgamesummary(url = "http://www.metacritic.com/browse/games/score/metascore/all/ps4/filtered")
}
\arguments{
\item{url}{metacritic top games table url}
}
\value{
table with descriptions of top 3 games
}
\description{
Scrapes brief description of top 3 games for console specified by user from metacritic,
and formats collected data into dataframe
}
|
#' Convert .mp3 files to .wav
#'
#' \code{mp32wav} converts several .mp3 files in working directory to .wav format
#' @usage mp32wav(samp.rate = 44.1, parallel = 1, from.path = NULL, to.path = NULL,
#' normalize = NULL, pb = TRUE)
#' @param samp.rate Sampling rate at which the .wav files should be written. The maximum permitted is 44.1 kHz (default). Units should be kHz.
#' @param parallel Numeric. Controls whether parallel computing is applied.
#' It specifies the number of cores to be used. Default is 1 (i.e. no parallel computing).
#' Not availble in Windows OS.
#' @param from.path Character string containing the directory path where the .mp3 files are located.
#' If \code{NULL} (default) then the current working directory is used.
#' @param to.path Character string containing the directory path where the .wav files will be saved.
#' If \code{NULL} (default) then the current working directory is used.
#' @param normalize Character string containing the units to be used for amplitude normalization. Check
#' (\code{\link[tuneR]{normalize}}) for details. If NULL (default) no normalization is carried out.
#' @param pb Logical argument to control progress bar. Default is \code{TRUE}. Note that progress bar is only used
#' when parallel = 1.
#' @return .wav files saved in the working directory with same name as original mp3 files.
#' @export
#' @name mp32wav
#' @examples
#' \dontrun{
#' # First set temporary folder
#' setwd(tempdir())
#'
#' #Then download mp3 files from xeno-canto
#' querxc(qword = "Phaethornis aethopygus", download = TRUE)
#'
#' # Convert all files to .wav format
#' mp32wav()
#'
#' #check this folder!!
#' getwd()
#' }
#' @details convert all .mp3 files in working directory to .wav format. Function used internally to read .mp3 files (\code{\link[tuneR]{readMP3}}) sometimes crashes.
#' @author Marcelo Araya-Salas (\email{araya-salas@@cornell.edu}) and Grace Smith Vidaurre
#last modification on jul-5-2016 (MAS)
mp32wav <- function(samp.rate = 44.1, parallel = 1, from.path = NULL, to.path = NULL, normalize = NULL,
pb = TRUE) {
if(!is.null(to.path))
{if(class(try(setwd(from.path), silent = TRUE)) == "try-error") stop("'path' provided does not exist")} else
from.path <- getwd() #set working directory
#normalize
if(!is.null(normalize))
{if(length(normalize) >1) stop("'normalize' should have a single value")
if(!normalize %in% c("1", "8", "16", "24", "32", "64", "0")) stop("'normalize' value not allowed (check the documentation from the normalize function in the tuneR package")
}
#fix sample rate
if(samp.rate > 44.1) samp.rate <- 44.1
#if parallel is not numeric
if(!is.numeric(parallel)) stop("'parallel' must be a numeric vector of length 1")
if(any(!(parallel %% 1 == 0),parallel < 1)) stop("'parallel' should be a positive integer")
#parallel not available on windows
if(parallel > 1 & Sys.info()[1] == "Windows")
{message("parallel computing not availabe in Windows OS for this function")
parallel <- 1}
if(parallel > 1)
lapp <- function(X, FUN) parallel::mclapply(X, FUN, mc.cores = parallel) else
{if(pb) lapp <- pbapply::pblapply else lapp <- lapply}
files <- list.files(path=getwd(), pattern = ".mp3$", ignore.case = TRUE) #list .mp3 files in working directory
if(length(files) == 0) stop("no 'mp3' files in working directory")
#exclude the ones that already have a .wav version
wavs <- list.files(pattern = "\\.wav$", ignore.case = TRUE)
files <- files[!substr(files, 0, nchar(files) - 4) %in% substr(wavs, 0, nchar(wavs) - 4)]
if(length(files) == 0) stop("all 'mp3' files have been converted")
message("Start writing wav files:")
if(!is.null(normalize))
suppressWarnings( a<-lapp(files, function(x) tuneR::writeWave(object = tuneR::normalize(tuneR::downsample(tuneR::readMP3(filename = x), samp.rate = samp.rate * 1000), unit = normalize), filename = paste0(from.path, substr(x, 0, nchar(x) - 4), ".wav")))) else
suppressWarnings( a<-lapp(files, function(x) tuneR::writeWave(object = tuneR::downsample(tuneR::readMP3(filename = x), samp.rate = samp.rate * 1000), filename = paste0(from.path, substr(x, 0, nchar(x) - 4), ".wav"))))
}
| /R/mp32wav.R | no_license | sildeag/warbleR | R | false | false | 4,261 | r | #' Convert .mp3 files to .wav
#'
#' \code{mp32wav} converts several .mp3 files in working directory to .wav format
#' @usage mp32wav(samp.rate = 44.1, parallel = 1, from.path = NULL, to.path = NULL,
#' normalize = NULL, pb = TRUE)
#' @param samp.rate Sampling rate at which the .wav files should be written. The maximum permitted is 44.1 kHz (default). Units should be kHz.
#' @param parallel Numeric. Controls whether parallel computing is applied.
#' It specifies the number of cores to be used. Default is 1 (i.e. no parallel computing).
#' Not availble in Windows OS.
#' @param from.path Character string containing the directory path where the .mp3 files are located.
#' If \code{NULL} (default) then the current working directory is used.
#' @param to.path Character string containing the directory path where the .wav files will be saved.
#' If \code{NULL} (default) then the current working directory is used.
#' @param normalize Character string containing the units to be used for amplitude normalization. Check
#' (\code{\link[tuneR]{normalize}}) for details. If NULL (default) no normalization is carried out.
#' @param pb Logical argument to control progress bar. Default is \code{TRUE}. Note that progress bar is only used
#' when parallel = 1.
#' @return .wav files saved in the working directory with same name as original mp3 files.
#' @export
#' @name mp32wav
#' @examples
#' \dontrun{
#' # First set temporary folder
#' setwd(tempdir())
#'
#' #Then download mp3 files from xeno-canto
#' querxc(qword = "Phaethornis aethopygus", download = TRUE)
#'
#' # Convert all files to .wav format
#' mp32wav()
#'
#' #check this folder!!
#' getwd()
#' }
#' @details convert all .mp3 files in working directory to .wav format. Function used internally to read .mp3 files (\code{\link[tuneR]{readMP3}}) sometimes crashes.
#' @author Marcelo Araya-Salas (\email{araya-salas@@cornell.edu}) and Grace Smith Vidaurre
#last modification on jul-5-2016 (MAS)
mp32wav <- function(samp.rate = 44.1, parallel = 1, from.path = NULL, to.path = NULL, normalize = NULL,
pb = TRUE) {
if(!is.null(to.path))
{if(class(try(setwd(from.path), silent = TRUE)) == "try-error") stop("'path' provided does not exist")} else
from.path <- getwd() #set working directory
#normalize
if(!is.null(normalize))
{if(length(normalize) >1) stop("'normalize' should have a single value")
if(!normalize %in% c("1", "8", "16", "24", "32", "64", "0")) stop("'normalize' value not allowed (check the documentation from the normalize function in the tuneR package")
}
#fix sample rate
if(samp.rate > 44.1) samp.rate <- 44.1
#if parallel is not numeric
if(!is.numeric(parallel)) stop("'parallel' must be a numeric vector of length 1")
if(any(!(parallel %% 1 == 0),parallel < 1)) stop("'parallel' should be a positive integer")
#parallel not available on windows
if(parallel > 1 & Sys.info()[1] == "Windows")
{message("parallel computing not availabe in Windows OS for this function")
parallel <- 1}
if(parallel > 1)
lapp <- function(X, FUN) parallel::mclapply(X, FUN, mc.cores = parallel) else
{if(pb) lapp <- pbapply::pblapply else lapp <- lapply}
files <- list.files(path=getwd(), pattern = ".mp3$", ignore.case = TRUE) #list .mp3 files in working directory
if(length(files) == 0) stop("no 'mp3' files in working directory")
#exclude the ones that already have a .wav version
wavs <- list.files(pattern = "\\.wav$", ignore.case = TRUE)
files <- files[!substr(files, 0, nchar(files) - 4) %in% substr(wavs, 0, nchar(wavs) - 4)]
if(length(files) == 0) stop("all 'mp3' files have been converted")
message("Start writing wav files:")
if(!is.null(normalize))
suppressWarnings( a<-lapp(files, function(x) tuneR::writeWave(object = tuneR::normalize(tuneR::downsample(tuneR::readMP3(filename = x), samp.rate = samp.rate * 1000), unit = normalize), filename = paste0(from.path, substr(x, 0, nchar(x) - 4), ".wav")))) else
suppressWarnings( a<-lapp(files, function(x) tuneR::writeWave(object = tuneR::downsample(tuneR::readMP3(filename = x), samp.rate = samp.rate * 1000), filename = paste0(from.path, substr(x, 0, nchar(x) - 4), ".wav"))))
}
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Classifier/haematopoietic.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.65,family="gaussian",standardize=FALSE)
sink('./haematopoietic_070.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Classifier/haematopoietic/haematopoietic_070.R | no_license | esbgkannan/QSMART | R | false | false | 366 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Classifier/haematopoietic.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.65,family="gaussian",standardize=FALSE)
sink('./haematopoietic_070.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MiscFuns.R
\docType{data}
\name{setFixest_notes}
\alias{setFixest_notes}
\alias{getFixest_notes}
\title{Sets/gets whether to display notes in \code{fixest} estimation functions}
\format{
An object of class \code{function} of length 1.
}
\usage{
setFixest_notes(x)
getFixest_notes
}
\arguments{
\item{x}{A logical. If \code{FALSE}, then notes are permanently removed.}
}
\description{
Sets/gets the default values of whether notes (informing for NA and observations removed) should be displayed in \code{fixest} estimation functions.
}
\examples{
# Change default with
setFixest_notes(FALSE)
# Back to default which is TRUE
getFixest_notes()
}
\author{
Laurent Berge
}
\keyword{datasets}
| /fixest/man/setFixest_notes.Rd | no_license | akhikolla/InformationHouse | R | false | true | 803 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MiscFuns.R
\docType{data}
\name{setFixest_notes}
\alias{setFixest_notes}
\alias{getFixest_notes}
\title{Sets/gets whether to display notes in \code{fixest} estimation functions}
\format{
An object of class \code{function} of length 1.
}
\usage{
setFixest_notes(x)
getFixest_notes
}
\arguments{
\item{x}{A logical. If \code{FALSE}, then notes are permanently removed.}
}
\description{
Sets/gets the default values of whether notes (informing for NA and observations removed) should be displayed in \code{fixest} estimation functions.
}
\examples{
# Change default with
setFixest_notes(FALSE)
# Back to default which is TRUE
getFixest_notes()
}
\author{
Laurent Berge
}
\keyword{datasets}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/transform.R
\name{rlogTransform}
\alias{rlogTransform}
\title{Regularised log transform counts}
\usage{
rlogTransform(data)
}
\arguments{
\item{data}{Matrix of raw counts}
}
\value{
Matrix of transformed data
}
\description{
Apply DESeq2's regularised log transformation to raw counts
}
| /man/rlogTransform.Rd | permissive | QSong-github/RNAtools | R | false | false | 374 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/transform.R
\name{rlogTransform}
\alias{rlogTransform}
\title{Regularised log transform counts}
\usage{
rlogTransform(data)
}
\arguments{
\item{data}{Matrix of raw counts}
}
\value{
Matrix of transformed data
}
\description{
Apply DESeq2's regularised log transformation to raw counts
}
|
library(tidyverse)
library(readxl)
library(ICPIutilities)
memory.limit(size=500000)
#GENIE FILES#
df1<-read_msd("Genie\\fy20q2\\Genie_SITE_IM_South_Africa_1_20200526.txt", save_rds = FALSE, remove_txt = FALSE)
df2<-read_msd("Genie\\fy20q2\\Genie_SITE_IM_South_Africa_2_20200526.txt", save_rds = FALSE, remove_txt = FALSE)
df3<-read_msd("Genie\\fy20q2\\Genie_SITE_IM_South_Africa_3_20200526.txt", save_rds = FALSE, remove_txt = FALSE)
df4<-read_msd("Genie\\fy20q2\\Genie_SITE_IM_South_Africa_4_20200526.txt", save_rds = FALSE, remove_txt = FALSE)
df5<-read_msd("Genie\\fy20q2\\Genie_SITE_IM_South_Africa_5_20200526.txt", save_rds = FALSE, remove_txt = FALSE)
genie_combined<-bind_rows(df1,df2,df3,df4,df5)
rm(df1,df2,df3,df4,df5)
#trim genie to only fy20 if it is not already in that format
genie_combined <- genie_combined %>%
filter(fiscal_year %in% c("2020", "2021"))
#MSD & bind to genie
msd17_19<-read_msd("MSD\\fy19q4c\\MER_Structured_Datasets_Site_IM_FY17-20_20191220_v2_1_South Africa.txt",
save_rds = FALSE, remove_txt = FALSE)
msd17_19<-msd17_19 %>%
filter(fiscal_year %in% c("2017","2018","2019"))
genie_msd<-genie_combined %>%
bind_rows(msd17_19)
rm(genie_combined,msd17_19)
saveRDS(genie_msd,"Processed_Files\\msd_genie_fy17to2020_20200526.RDS")
##CONTEXT FILES & RSD
dsp<-read_excel("ContextFiles\\UserFriendly_PartnerName_DSPcolumn.xlsx") %>%
rename(mech_code=MechanismID,
DSP=DSP_18_19)
ethk<-read_excel("ContextFiles\\eThekwiniSiteShifts.xlsx") %>%
filter(Transitionstat=="USAIDtoCDC") %>%
rename(fy19q1_sitetransition=Transitionstat,
orgunituid=Facilityuid) %>%
select(orgunituid,fy19q1_sitetransition)
siyenza<-read_excel("ContextFiles\\siyenza_att_uid_20200318.xlsx", sheet="Sheet1") %>%
rename(orgunituid=orgunit_internal_id) %>%
select(-c(facility))
factype<-read_excel("ContextFiles\\Facility_Type.xlsx") %>%
rename(facility=Facility)
#MSD/Genie/Context Merge ##
final<-genie_msd %>%
left_join(dsp, by="mech_code") %>%
left_join(ethk,by="orgunituid") %>%
left_join(factype, by="facility") %>%
left_join(siyenza, by="orgunituid")
rm(dsp,ethk,factype,genie_msd,siyenza)
#Write file#
write_tsv(final,"Processed_Files\\msd_genie_fy17to20_20200526_attributes.txt",na="")
| /MER/genie_merge.R | no_license | gsarfaty/SA-Scripts | R | false | false | 2,293 | r | library(tidyverse)
library(readxl)
library(ICPIutilities)
memory.limit(size=500000)
#GENIE FILES#
df1<-read_msd("Genie\\fy20q2\\Genie_SITE_IM_South_Africa_1_20200526.txt", save_rds = FALSE, remove_txt = FALSE)
df2<-read_msd("Genie\\fy20q2\\Genie_SITE_IM_South_Africa_2_20200526.txt", save_rds = FALSE, remove_txt = FALSE)
df3<-read_msd("Genie\\fy20q2\\Genie_SITE_IM_South_Africa_3_20200526.txt", save_rds = FALSE, remove_txt = FALSE)
df4<-read_msd("Genie\\fy20q2\\Genie_SITE_IM_South_Africa_4_20200526.txt", save_rds = FALSE, remove_txt = FALSE)
df5<-read_msd("Genie\\fy20q2\\Genie_SITE_IM_South_Africa_5_20200526.txt", save_rds = FALSE, remove_txt = FALSE)
genie_combined<-bind_rows(df1,df2,df3,df4,df5)
rm(df1,df2,df3,df4,df5)
#trim genie to only fy20 if it is not already in that format
genie_combined <- genie_combined %>%
filter(fiscal_year %in% c("2020", "2021"))
#MSD & bind to genie
msd17_19<-read_msd("MSD\\fy19q4c\\MER_Structured_Datasets_Site_IM_FY17-20_20191220_v2_1_South Africa.txt",
save_rds = FALSE, remove_txt = FALSE)
msd17_19<-msd17_19 %>%
filter(fiscal_year %in% c("2017","2018","2019"))
genie_msd<-genie_combined %>%
bind_rows(msd17_19)
rm(genie_combined,msd17_19)
saveRDS(genie_msd,"Processed_Files\\msd_genie_fy17to2020_20200526.RDS")
##CONTEXT FILES & RSD
dsp<-read_excel("ContextFiles\\UserFriendly_PartnerName_DSPcolumn.xlsx") %>%
rename(mech_code=MechanismID,
DSP=DSP_18_19)
ethk<-read_excel("ContextFiles\\eThekwiniSiteShifts.xlsx") %>%
filter(Transitionstat=="USAIDtoCDC") %>%
rename(fy19q1_sitetransition=Transitionstat,
orgunituid=Facilityuid) %>%
select(orgunituid,fy19q1_sitetransition)
siyenza<-read_excel("ContextFiles\\siyenza_att_uid_20200318.xlsx", sheet="Sheet1") %>%
rename(orgunituid=orgunit_internal_id) %>%
select(-c(facility))
factype<-read_excel("ContextFiles\\Facility_Type.xlsx") %>%
rename(facility=Facility)
#MSD/Genie/Context Merge ##
final<-genie_msd %>%
left_join(dsp, by="mech_code") %>%
left_join(ethk,by="orgunituid") %>%
left_join(factype, by="facility") %>%
left_join(siyenza, by="orgunituid")
rm(dsp,ethk,factype,genie_msd,siyenza)
#Write file#
write_tsv(final,"Processed_Files\\msd_genie_fy17to20_20200526_attributes.txt",na="")
|
% File man/lm.br_internal.Rd
% Part of the lm.br package
\name{lm.br_internal}
\alias{Cpp_Clmbr}
\alias{Cpp_Clmbr-class}
\alias{Rcpp_Cpp_Clmbr-class}
\docType{class}
\title{Internal lm.br class \code{"Cpp_Clmbr"} }
\description{
Internal S4 class.
Class of data + model objects used for broken line regression. An object instance stores pre-calculated quantities for a particular data set and model, so that methods can quickly compute exact-confidence intervals and regions for the changepoint.
}
\details{
This class is not meant for the user.
}
\section{Extends}{
Class \code{"\linkS4class{C++Object}"}, directly.
All reference classes extend and inherit methods from \code{"\linkS4class{envRefClass}"}.
}
\section{Methods}{
\describe{
\item{\code{ci}:}{ printout confidence interval }
\item{\code{cr}:}{ printout confidence region }
\item{\code{mle}:}{ maximum likelihood estimates of parameters }
\item{\code{sety}:}{ set new y-values in a Cpp_Clmbr object }
\item{\code{sl}:}{ significance level for a postulate changepoint value }
}
}
\seealso{ lm.br }
\examples{
showClass( Cpp_Clmbr )
}
\keyword{ classes }
\keyword{ internal }
| /fuzzedpackages/lm.br/man/lm.br_internal.Rd | no_license | akhikolla/testpackages | R | false | false | 1,243 | rd |
% File man/lm.br_internal.Rd
% Part of the lm.br package
\name{lm.br_internal}
\alias{Cpp_Clmbr}
\alias{Cpp_Clmbr-class}
\alias{Rcpp_Cpp_Clmbr-class}
\docType{class}
\title{Internal lm.br class \code{"Cpp_Clmbr"} }
\description{
Internal S4 class.
Class of data + model objects used for broken line regression. An object instance stores pre-calculated quantities for a particular data set and model, so that methods can quickly compute exact-confidence intervals and regions for the changepoint.
}
\details{
This class is not meant for the user.
}
\section{Extends}{
Class \code{"\linkS4class{C++Object}"}, directly.
All reference classes extend and inherit methods from \code{"\linkS4class{envRefClass}"}.
}
\section{Methods}{
\describe{
\item{\code{ci}:}{ printout confidence interval }
\item{\code{cr}:}{ printout confidence region }
\item{\code{mle}:}{ maximum likelihood estimates of parameters }
\item{\code{sety}:}{ set new y-values in a Cpp_Clmbr object }
\item{\code{sl}:}{ significance level for a postulate changepoint value }
}
}
\seealso{ lm.br }
\examples{
showClass( Cpp_Clmbr )
}
\keyword{ classes }
\keyword{ internal }
|
# Exploratory Data Analysis
# Coursera
# John Hopkins University
# Jason R. Battles
# jason.r.battles@gmail.com
## Obtain full dataset. Remove NA values
data_full <- read.csv("../data/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subsetting the data and remove full dataset.
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 1
hist(data$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
## Saving to file
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off() | /plot1.R | no_license | jrbattles/ExploratoryDataAnalysis3 | R | false | false | 899 | r | # Exploratory Data Analysis
# Coursera
# John Hopkins University
# Jason R. Battles
# jason.r.battles@gmail.com
## Obtain full dataset. Remove NA values
data_full <- read.csv("../data/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subsetting the data and remove full dataset.
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 1
hist(data$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
## Saving to file
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off() |
\name{modelPriorAS-class}
\Rdversion{1.1}
\docType{class}
\alias{modelPriorAS-class}
\alias{show,modelPriorAS-method}
\alias{[,modelPriorAS,ANY,ANY,ANY-method}
\alias{coef,modelPriorAS-method}
\alias{coef,modelPriorAS,ANY,ANY,ANY-method}
\title{Class "modelPriorAS"}
\description{
\code{modelPriorAS} stores parameters for the prior distribution on
all possible alternative splicing configuration (i.e. prior on model
space). This information is used for de novo reconstruction of
splicing variants.
}
\section{Objects from the Class}{
Objects are created by function \code{modelPrior}.
}
\section{Slots}{
\describe{
\item{nvarPrior}{Prior on the number of variants per gene. A list with components \code{"nbpar"} containing
the parameters of the Negative Binomial distribution, \code{"obs"}
containing the observed counts and \code{"pred"} the Negative
Binomial predicted counts.}
\item{nexonPrior}{Prior on the number of exons in an expressed
variant. A list with components \code{"bbpar"} containing
Beta-Binomial parameters, \code{"obs"} containing the observed
counts and \code{"pred"} the Beta-Binomial predicted counts.}
}
}
\section{Methods}{
\describe{
\item{show}{\code{signature(object = "modelPriorAS")}: Displays
general information about the object. }
\item{"["}{Selects prior parameters for genes with the specified
number of exons}
\item{coef}{Selects a single gene}
}
}
\author{
David Rossell
}
\seealso{
\code{\link{procGenome}} and \code{\link{createDenovoGenome}} to
create \code{modelPriorAS} objects.
}
\examples{
showClass("modelPriorAS")
}
\keyword{classes}
| /man/modelPriorAS-class.Rd | no_license | davidrusi/casper | R | false | false | 1,673 | rd | \name{modelPriorAS-class}
\Rdversion{1.1}
\docType{class}
\alias{modelPriorAS-class}
\alias{show,modelPriorAS-method}
\alias{[,modelPriorAS,ANY,ANY,ANY-method}
\alias{coef,modelPriorAS-method}
\alias{coef,modelPriorAS,ANY,ANY,ANY-method}
\title{Class "modelPriorAS"}
\description{
\code{modelPriorAS} stores parameters for the prior distribution on
all possible alternative splicing configuration (i.e. prior on model
space). This information is used for de novo reconstruction of
splicing variants.
}
\section{Objects from the Class}{
Objects are created by function \code{modelPrior}.
}
\section{Slots}{
\describe{
\item{nvarPrior}{Prior on the number of variants per gene. A list with components \code{"nbpar"} containing
the parameters of the Negative Binomial distribution, \code{"obs"}
containing the observed counts and \code{"pred"} the Negative
Binomial predicted counts.}
\item{nexonPrior}{Prior on the number of exons in an expressed
variant. A list with components \code{"bbpar"} containing
Beta-Binomial parameters, \code{"obs"} containing the observed
counts and \code{"pred"} the Beta-Binomial predicted counts.}
}
}
\section{Methods}{
\describe{
\item{show}{\code{signature(object = "modelPriorAS")}: Displays
general information about the object. }
\item{"["}{Selects prior parameters for genes with the specified
number of exons}
\item{coef}{Selects a single gene}
}
}
\author{
David Rossell
}
\seealso{
\code{\link{procGenome}} and \code{\link{createDenovoGenome}} to
create \code{modelPriorAS} objects.
}
\examples{
showClass("modelPriorAS")
}
\keyword{classes}
|
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Calculation of the total cost of a Loan"),
sidebarPanel(
h4('Introduction / Documentation'),
h5('The goal of this application is to calculate to cost of amortization of a
loan. In this application just amortization for fixed loans could be
calculated. The user should introduce 3 inputs, the total money that want
to request, the interest rate that is required to deliver this money and
the years that are need to pay back that money. Once that the user
introduce these 3 inputs,just should press the bar Submit and the
results will be shown on the Main Panel'),
h4('Inputs'),
numericInput('money', 'Quantity of money that we request', 0, min=0, max=100000000000),
numericInput('interest', 'Interest ratio in %', 0, min=0, max=100),
numericInput('years', 'number of years to pay back', 0, min=0, max=100),
submitButton('Submit')
),
mainPanel(
h4('Output information'),
h5('Here the results are displayed, first one is the quantity that the user
should pay every month to return the loan in the required period, second
is the total interest that the user will pay by the end of the loan
considering the introduced inputs, the last one will be the total amount
that the user will have to pay back for that loan'),
h3('Results of the calculation'),
h4('Monthly payment'),
verbatimTextOutput("pm"),
h4('Total Interest payment'),
verbatimTextOutput("pi"),
h4('Total quantity to return'),
verbatimTextOutput("P")
)
))
| /ui.R | no_license | Yambcn/Shiny-Application | R | false | false | 1,624 | r | library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Calculation of the total cost of a Loan"),
sidebarPanel(
h4('Introduction / Documentation'),
h5('The goal of this application is to calculate to cost of amortization of a
loan. In this application just amortization for fixed loans could be
calculated. The user should introduce 3 inputs, the total money that want
to request, the interest rate that is required to deliver this money and
the years that are need to pay back that money. Once that the user
introduce these 3 inputs,just should press the bar Submit and the
results will be shown on the Main Panel'),
h4('Inputs'),
numericInput('money', 'Quantity of money that we request', 0, min=0, max=100000000000),
numericInput('interest', 'Interest ratio in %', 0, min=0, max=100),
numericInput('years', 'number of years to pay back', 0, min=0, max=100),
submitButton('Submit')
),
mainPanel(
h4('Output information'),
h5('Here the results are displayed, first one is the quantity that the user
should pay every month to return the loan in the required period, second
is the total interest that the user will pay by the end of the loan
considering the introduced inputs, the last one will be the total amount
that the user will have to pay back for that loan'),
h3('Results of the calculation'),
h4('Monthly payment'),
verbatimTextOutput("pm"),
h4('Total Interest payment'),
verbatimTextOutput("pi"),
h4('Total quantity to return'),
verbatimTextOutput("P")
)
))
|
#30/06/20
#Mads F. Schou
rm(list = ls(all = TRUE))
pacman::p_load("dplyr","tidyr","pedantics","doBy","MCMCglmm","parallel","coda","fitR")
DATAPATH = "Temp"
OUTPATH = "Analyses/4 Max attackers MCMC"
#---- Data
mydat <- read.table(paste(DATAPATH,"social attack data for Mads 22.05.20_prepped.csv",sep = "/"), sep = ",", header = TRUE)
#---- Prior
MyPrior <- list(
R=list(V = diag(1), nu=0.002, fix = FALSE),
G=list(G1=list(V = diag(1),
nu = 0.002)))
####################################
##---- Filtering (Keep minor at this stage)
####################################
#Two highly influenctial
outlier_ids <- mydat$trial[mydat$attackers > 20]
mydat <- mydat[!mydat$trial %in% outlier_ids,]
####################################
##---- Prepping (Keep minor at this stage)
####################################
# Note that we could leave nestID out here, as most only have one observation per nest id.
mydat <- summaryBy(attackers ~ species + nestId + preysize + trial + preysize_z + species2, data = mydat, FUN = max)
mydat <- mydat %>%
mutate(preysize_z = scale(preysize)[,1]) %>%
mutate_at(c("species","nestId","trial"), funs(factor(.)))
####################################
##---- TEST RUN
####################################
#
# MyStart <- Sys.time()
# mtest <- MCMCglmm(attackers ~ timepoint_z + species-1 + species:preysize_z,
# random = ~ us(1+timepoint_z):trial,
# data = mydat,
# family = "poisson",
# prior = MyPrior,
# nitt=100000, thin=1, burnin=0,
# verbose = T)
# TotalTime <- Sys.time() - MyStart
#
# #---------- Inspect how we need to set thin and burnin to get efficient sampling
#
# #Extract trace
# Mytrace <- mtest$VCV
# mcmc.trace <- mcmc(Mytrace)
#
# #Plot trace plots
# plot(mcmc.trace)
#
# #Cut by burnin
# mcmc.trace.burned <- burnAndThin(mcmc.trace, burn = 30000)
# plot(mcmc.trace.burned) #plot again
#
# #Autocorr
# autocorr.plot(mcmc.trace.burned)
# autoOut <- autocorr.diag(mcmc.trace.burned)
# autoOut <- data.frame("rowMax" = apply(autoOut,1,max),autoOut) #Get max auto across all variates
# autoOut
#
# #Thin it
# mcmc.trace.burned.thinned <- burnAndThin(mcmc.trace.burned, thin = 500)
# autocorr.plot(mcmc.trace.burned.thinned) #Inspect autocorr again
# autoOut <- autocorr.diag(mcmc.trace.burned.thinned)
# autoOut <- data.frame("rowMax" = apply(autoOut,1,max),autoOut) #Get max auto across all variates
# autoOut
#
# plot(mcmc.trace.burned.thinned) # Looks good
####################################
##---- FINAL RUN
####################################
#---------------------------- No poly
MyStart <- Sys.time()
#set.seed(1)
#m1_3 <- mclapply(1:3, function(i) {
m1 <- MCMCglmm(attackers.max ~ species-1 + species:preysize_z,
random = ~ nestId,
data = mydat,
family = "poisson",
prior = MyPrior,
#thin = 1,burnin = 0,nitt = 1000,
nitt=1030000, thin=500, burnin=30000,
verbose = T)
#}, mc.cores=3)
TotalTime <- Sys.time() - MyStart
#---------------------------- Poly
#set.seed(1)
#m1_3 <- mclapply(1:3, function(i) {
m1.poly <- MCMCglmm(attackers.max ~ species-1 + species:poly(preysize_z,2),
random = ~ nestId,
data = mydat,
family = "poisson",
prior = MyPrior,
#thin = 1,burnin = 0,nitt = 1000,
nitt=1030000, thin=500, burnin=30000,
verbose = T)
#}, mc.cores=3)
save.image(paste(OUTPATH,"/", "Max attackers no outliers.RData", sep = ""))
| /code/4 Max attackers MCMC/4 Max attackers MCMC.R | no_license | abumadsen/lena-spider-attack | R | false | false | 3,673 | r | #30/06/20
#Mads F. Schou
rm(list = ls(all = TRUE))
pacman::p_load("dplyr","tidyr","pedantics","doBy","MCMCglmm","parallel","coda","fitR")
DATAPATH = "Temp"
OUTPATH = "Analyses/4 Max attackers MCMC"
#---- Data
mydat <- read.table(paste(DATAPATH,"social attack data for Mads 22.05.20_prepped.csv",sep = "/"), sep = ",", header = TRUE)
#---- Prior
MyPrior <- list(
R=list(V = diag(1), nu=0.002, fix = FALSE),
G=list(G1=list(V = diag(1),
nu = 0.002)))
####################################
##---- Filtering (Keep minor at this stage)
####################################
#Two highly influenctial
outlier_ids <- mydat$trial[mydat$attackers > 20]
mydat <- mydat[!mydat$trial %in% outlier_ids,]
####################################
##---- Prepping (Keep minor at this stage)
####################################
# Note that we could leave nestID out here, as most only have one observation per nest id.
mydat <- summaryBy(attackers ~ species + nestId + preysize + trial + preysize_z + species2, data = mydat, FUN = max)
mydat <- mydat %>%
mutate(preysize_z = scale(preysize)[,1]) %>%
mutate_at(c("species","nestId","trial"), funs(factor(.)))
####################################
##---- TEST RUN
####################################
#
# MyStart <- Sys.time()
# mtest <- MCMCglmm(attackers ~ timepoint_z + species-1 + species:preysize_z,
# random = ~ us(1+timepoint_z):trial,
# data = mydat,
# family = "poisson",
# prior = MyPrior,
# nitt=100000, thin=1, burnin=0,
# verbose = T)
# TotalTime <- Sys.time() - MyStart
#
# #---------- Inspect how we need to set thin and burnin to get efficient sampling
#
# #Extract trace
# Mytrace <- mtest$VCV
# mcmc.trace <- mcmc(Mytrace)
#
# #Plot trace plots
# plot(mcmc.trace)
#
# #Cut by burnin
# mcmc.trace.burned <- burnAndThin(mcmc.trace, burn = 30000)
# plot(mcmc.trace.burned) #plot again
#
# #Autocorr
# autocorr.plot(mcmc.trace.burned)
# autoOut <- autocorr.diag(mcmc.trace.burned)
# autoOut <- data.frame("rowMax" = apply(autoOut,1,max),autoOut) #Get max auto across all variates
# autoOut
#
# #Thin it
# mcmc.trace.burned.thinned <- burnAndThin(mcmc.trace.burned, thin = 500)
# autocorr.plot(mcmc.trace.burned.thinned) #Inspect autocorr again
# autoOut <- autocorr.diag(mcmc.trace.burned.thinned)
# autoOut <- data.frame("rowMax" = apply(autoOut,1,max),autoOut) #Get max auto across all variates
# autoOut
#
# plot(mcmc.trace.burned.thinned) # Looks good
####################################
##---- FINAL RUN
####################################
#---------------------------- No poly
MyStart <- Sys.time()
#set.seed(1)
#m1_3 <- mclapply(1:3, function(i) {
m1 <- MCMCglmm(attackers.max ~ species-1 + species:preysize_z,
random = ~ nestId,
data = mydat,
family = "poisson",
prior = MyPrior,
#thin = 1,burnin = 0,nitt = 1000,
nitt=1030000, thin=500, burnin=30000,
verbose = T)
#}, mc.cores=3)
TotalTime <- Sys.time() - MyStart
#---------------------------- Poly
#set.seed(1)
#m1_3 <- mclapply(1:3, function(i) {
m1.poly <- MCMCglmm(attackers.max ~ species-1 + species:poly(preysize_z,2),
random = ~ nestId,
data = mydat,
family = "poisson",
prior = MyPrior,
#thin = 1,burnin = 0,nitt = 1000,
nitt=1030000, thin=500, burnin=30000,
verbose = T)
#}, mc.cores=3)
save.image(paste(OUTPATH,"/", "Max attackers no outliers.RData", sep = ""))
|
source("incl/start.R")
options(progressr.demo.delay = 0.001)
options(progressr.interval = 0.0)
options(progressr.clear = FALSE)
message("with_progress() ...")
x <- 1:100
truth <- sum(x)
message("with_progress() - default ...")
if (requireNamespace("utils")) {
with_progress({
sum <- slow_sum(x)
})
print(sum)
stopifnot(sum == truth)
with_progress({
cat("This stdout output will be delayed")
message("This message will be delayed")
warning("This warning will be delayed")
signalCondition(simpleCondition("This simpleCondition will be delayed"))
sum <- slow_sum(x)
}, interval = 0.1, enable = TRUE, delay_conditions = "condition")
print(sum)
stopifnot(sum == truth)
}
message("with_progress() - default ... done")
message("with_progress() - filesize ...")
with_progress({
sum <- slow_sum(x)
}, handler_filesize())
print(sum)
stopifnot(sum == truth)
message("with_progress() - filesize ... done")
message("with_progress() - utils::txtProgressBar() ...")
if (requireNamespace("utils")) {
with_progress({
sum <- slow_sum(x)
}, handler_txtprogressbar(style = 2L))
print(sum)
stopifnot(sum == truth)
}
message("with_progress() - utils::txtProgressBar() ... done")
message("with_progress() - tcltk::tkProgressBar() ...")
with_progress({
sum <- slow_sum(x)
}, handler_tkprogressbar)
message("with_progress() - tcltk::tkProgressBar() ... done")
message("with_progress() - utils::winProgressBar() ...")
with_progress({
sum <- slow_sum(x)
}, handler_winprogressbar)
message("with_progress() - utils::winProgressBar() ... done")
message("with_progress() - progress::progress_bar() ...")
if (requireNamespace("progress")) {
## Display progress using default handler
with_progress({
sum <- slow_sum(x)
}, handler_progress(clear = FALSE))
print(sum)
stopifnot(sum == truth)
}
message("with_progress() - progress::progress_bar() ... done")
message("with_progress() - pbmcapply::progressBar() ...")
with_progress({
sum <- slow_sum(x)
}, handler_pbmcapply)
message("with_progress() - pbmcapply::progressBar() ... done")
message("with_progress() - ascii_alert ...")
with_progress({
sum <- slow_sum(x)
}, handler_ascii_alert())
print(sum)
stopifnot(sum == truth)
message("with_progress() - ascii_alert ... done")
message("with_progress() - beepr::beep() ...")
with_progress({
sum <- slow_sum(x)
}, handler_beepr)
print(sum)
stopifnot(sum == truth)
message("with_progress() - beepr::beep() ... done")
message("with_progress() - notifier::notify() ...")
with_progress({
sum <- slow_sum(x)
}, handler_notifier)
print(sum)
stopifnot(sum == truth)
message("with_progress() - notifier::notify() ... done")
message("with_progress() - void ...")
## Mute progress updates
with_progress({
sum <- slow_sum(x)
}, NULL)
print(sum)
stopifnot(sum == truth)
message(" - via option")
## NOTE: Set it to NULL, will use the default utils::txtProgressBar()
options(progressr.handlers = list())
with_progress({
sum <- slow_sum(x)
})
print(sum)
stopifnot(sum == truth)
message("with_progress() - void ... done")
message("with_progress() - multiple handlers ...")
if (requireNamespace("utils", quietly = TRUE)) {
handlers <- list(handler_txtprogressbar, handler_newline, handler_debug)
options(progressr.handlers = handlers)
with_progress({
sum <- slow_sum(x)
})
print(sum)
stopifnot(sum == truth)
}
message("with_progress() - multiple handlers ... done")
message("with_progress() ... done")
source("incl/end.R")
| /tests/with_progress.R | no_license | DavisVaughan/progressr | R | false | false | 3,527 | r | source("incl/start.R")
options(progressr.demo.delay = 0.001)
options(progressr.interval = 0.0)
options(progressr.clear = FALSE)
message("with_progress() ...")
x <- 1:100
truth <- sum(x)
message("with_progress() - default ...")
if (requireNamespace("utils")) {
with_progress({
sum <- slow_sum(x)
})
print(sum)
stopifnot(sum == truth)
with_progress({
cat("This stdout output will be delayed")
message("This message will be delayed")
warning("This warning will be delayed")
signalCondition(simpleCondition("This simpleCondition will be delayed"))
sum <- slow_sum(x)
}, interval = 0.1, enable = TRUE, delay_conditions = "condition")
print(sum)
stopifnot(sum == truth)
}
message("with_progress() - default ... done")
message("with_progress() - filesize ...")
with_progress({
sum <- slow_sum(x)
}, handler_filesize())
print(sum)
stopifnot(sum == truth)
message("with_progress() - filesize ... done")
message("with_progress() - utils::txtProgressBar() ...")
if (requireNamespace("utils")) {
with_progress({
sum <- slow_sum(x)
}, handler_txtprogressbar(style = 2L))
print(sum)
stopifnot(sum == truth)
}
message("with_progress() - utils::txtProgressBar() ... done")
message("with_progress() - tcltk::tkProgressBar() ...")
with_progress({
sum <- slow_sum(x)
}, handler_tkprogressbar)
message("with_progress() - tcltk::tkProgressBar() ... done")
message("with_progress() - utils::winProgressBar() ...")
with_progress({
sum <- slow_sum(x)
}, handler_winprogressbar)
message("with_progress() - utils::winProgressBar() ... done")
message("with_progress() - progress::progress_bar() ...")
if (requireNamespace("progress")) {
## Display progress using default handler
with_progress({
sum <- slow_sum(x)
}, handler_progress(clear = FALSE))
print(sum)
stopifnot(sum == truth)
}
message("with_progress() - progress::progress_bar() ... done")
message("with_progress() - pbmcapply::progressBar() ...")
with_progress({
sum <- slow_sum(x)
}, handler_pbmcapply)
message("with_progress() - pbmcapply::progressBar() ... done")
message("with_progress() - ascii_alert ...")
with_progress({
sum <- slow_sum(x)
}, handler_ascii_alert())
print(sum)
stopifnot(sum == truth)
message("with_progress() - ascii_alert ... done")
message("with_progress() - beepr::beep() ...")
with_progress({
sum <- slow_sum(x)
}, handler_beepr)
print(sum)
stopifnot(sum == truth)
message("with_progress() - beepr::beep() ... done")
message("with_progress() - notifier::notify() ...")
with_progress({
sum <- slow_sum(x)
}, handler_notifier)
print(sum)
stopifnot(sum == truth)
message("with_progress() - notifier::notify() ... done")
message("with_progress() - void ...")
## Mute progress updates
with_progress({
sum <- slow_sum(x)
}, NULL)
print(sum)
stopifnot(sum == truth)
message(" - via option")
## NOTE: Set it to NULL, will use the default utils::txtProgressBar()
options(progressr.handlers = list())
with_progress({
sum <- slow_sum(x)
})
print(sum)
stopifnot(sum == truth)
message("with_progress() - void ... done")
message("with_progress() - multiple handlers ...")
if (requireNamespace("utils", quietly = TRUE)) {
handlers <- list(handler_txtprogressbar, handler_newline, handler_debug)
options(progressr.handlers = handlers)
with_progress({
sum <- slow_sum(x)
})
print(sum)
stopifnot(sum == truth)
}
message("with_progress() - multiple handlers ... done")
message("with_progress() ... done")
source("incl/end.R")
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
}
| /cachematrix.R | no_license | deevesh/ProgrammingAssignment2 | R | false | false | 769 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pinpoint_service.R
\name{pinpoint}
\alias{pinpoint}
\title{Amazon Pinpoint}
\usage{
pinpoint(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.}
}
\description{
Doc Engage API - Amazon Pinpoint API
}
\section{Service syntax}{
\preformatted{svc <- pinpoint(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string"
),
endpoint = "string",
region = "string"
)
)
}
}
\section{Operations}{
\tabular{ll}{
\link[=pinpoint_create_app]{create_app} \tab Creates an application \cr
\link[=pinpoint_create_campaign]{create_campaign} \tab Creates a new campaign for an application or updates the settings of an existing campaign for an application \cr
\link[=pinpoint_create_email_template]{create_email_template} \tab Creates a message template for messages that are sent through the email channel \cr
\link[=pinpoint_create_export_job]{create_export_job} \tab Creates an export job for an application \cr
\link[=pinpoint_create_import_job]{create_import_job} \tab Creates an import job for an application \cr
\link[=pinpoint_create_journey]{create_journey} \tab Creates a journey for an application \cr
\link[=pinpoint_create_push_template]{create_push_template} \tab Creates a message template for messages that are sent through a push notification channel \cr
\link[=pinpoint_create_segment]{create_segment} \tab Creates a new segment for an application or updates the configuration, dimension, and other settings for an existing segment that's associated with an application\cr
\link[=pinpoint_create_sms_template]{create_sms_template} \tab Creates a message template for messages that are sent through the SMS channel \cr
\link[=pinpoint_create_voice_template]{create_voice_template} \tab Creates a message template for messages that are sent through the voice channel \cr
\link[=pinpoint_delete_adm_channel]{delete_adm_channel} \tab Disables the ADM channel for an application and deletes any existing settings for the channel \cr
\link[=pinpoint_delete_apns_channel]{delete_apns_channel} \tab Disables the APNs channel for an application and deletes any existing settings for the channel \cr
\link[=pinpoint_delete_apns_sandbox_channel]{delete_apns_sandbox_channel} \tab Disables the APNs sandbox channel for an application and deletes any existing settings for the channel \cr
\link[=pinpoint_delete_apns_voip_channel]{delete_apns_voip_channel} \tab Disables the APNs VoIP channel for an application and deletes any existing settings for the channel \cr
\link[=pinpoint_delete_apns_voip_sandbox_channel]{delete_apns_voip_sandbox_channel} \tab Disables the APNs VoIP sandbox channel for an application and deletes any existing settings for the channel \cr
\link[=pinpoint_delete_app]{delete_app} \tab Deletes an application \cr
\link[=pinpoint_delete_baidu_channel]{delete_baidu_channel} \tab Disables the Baidu channel for an application and deletes any existing settings for the channel \cr
\link[=pinpoint_delete_campaign]{delete_campaign} \tab Deletes a campaign from an application \cr
\link[=pinpoint_delete_email_channel]{delete_email_channel} \tab Disables the email channel for an application and deletes any existing settings for the channel \cr
\link[=pinpoint_delete_email_template]{delete_email_template} \tab Deletes a message template for messages that were sent through the email channel \cr
\link[=pinpoint_delete_endpoint]{delete_endpoint} \tab Deletes an endpoint from an application \cr
\link[=pinpoint_delete_event_stream]{delete_event_stream} \tab Deletes the event stream for an application \cr
\link[=pinpoint_delete_gcm_channel]{delete_gcm_channel} \tab Disables the GCM channel for an application and deletes any existing settings for the channel \cr
\link[=pinpoint_delete_journey]{delete_journey} \tab Deletes a journey from an application \cr
\link[=pinpoint_delete_push_template]{delete_push_template} \tab Deletes a message template for messages that were sent through a push notification channel \cr
\link[=pinpoint_delete_segment]{delete_segment} \tab Deletes a segment from an application \cr
\link[=pinpoint_delete_sms_channel]{delete_sms_channel} \tab Disables the SMS channel for an application and deletes any existing settings for the channel \cr
\link[=pinpoint_delete_sms_template]{delete_sms_template} \tab Deletes a message template for messages that were sent through the SMS channel \cr
\link[=pinpoint_delete_user_endpoints]{delete_user_endpoints} \tab Deletes all the endpoints that are associated with a specific user ID \cr
\link[=pinpoint_delete_voice_channel]{delete_voice_channel} \tab Disables the voice channel for an application and deletes any existing settings for the channel \cr
\link[=pinpoint_delete_voice_template]{delete_voice_template} \tab Deletes a message template for messages that were sent through the voice channel \cr
\link[=pinpoint_get_adm_channel]{get_adm_channel} \tab Retrieves information about the status and settings of the ADM channel for an application \cr
\link[=pinpoint_get_apns_channel]{get_apns_channel} \tab Retrieves information about the status and settings of the APNs channel for an application \cr
\link[=pinpoint_get_apns_sandbox_channel]{get_apns_sandbox_channel} \tab Retrieves information about the status and settings of the APNs sandbox channel for an application \cr
\link[=pinpoint_get_apns_voip_channel]{get_apns_voip_channel} \tab Retrieves information about the status and settings of the APNs VoIP channel for an application \cr
\link[=pinpoint_get_apns_voip_sandbox_channel]{get_apns_voip_sandbox_channel} \tab Retrieves information about the status and settings of the APNs VoIP sandbox channel for an application \cr
\link[=pinpoint_get_app]{get_app} \tab Retrieves information about an application \cr
\link[=pinpoint_get_application_date_range_kpi]{get_application_date_range_kpi} \tab Retrieves (queries) pre-aggregated data for a standard metric that applies to an application \cr
\link[=pinpoint_get_application_settings]{get_application_settings} \tab Retrieves information about the settings for an application \cr
\link[=pinpoint_get_apps]{get_apps} \tab Retrieves information about all the applications that are associated with your Amazon Pinpoint account \cr
\link[=pinpoint_get_baidu_channel]{get_baidu_channel} \tab Retrieves information about the status and settings of the Baidu channel for an application \cr
\link[=pinpoint_get_campaign]{get_campaign} \tab Retrieves information about the status, configuration, and other settings for a campaign \cr
\link[=pinpoint_get_campaign_activities]{get_campaign_activities} \tab Retrieves information about all the activities for a campaign \cr
\link[=pinpoint_get_campaign_date_range_kpi]{get_campaign_date_range_kpi} \tab Retrieves (queries) pre-aggregated data for a standard metric that applies to a campaign \cr
\link[=pinpoint_get_campaigns]{get_campaigns} \tab Retrieves information about the status, configuration, and other settings for all the campaigns that are associated with an application \cr
\link[=pinpoint_get_campaign_version]{get_campaign_version} \tab Retrieves information about the status, configuration, and other settings for a specific version of a campaign \cr
\link[=pinpoint_get_campaign_versions]{get_campaign_versions} \tab Retrieves information about the status, configuration, and other settings for all versions of a campaign \cr
\link[=pinpoint_get_channels]{get_channels} \tab Retrieves information about the history and status of each channel for an application \cr
\link[=pinpoint_get_email_channel]{get_email_channel} \tab Retrieves information about the status and settings of the email channel for an application \cr
\link[=pinpoint_get_email_template]{get_email_template} \tab Retrieves the content and settings of a message template for messages that are sent through the email channel \cr
\link[=pinpoint_get_endpoint]{get_endpoint} \tab Retrieves information about the settings and attributes of a specific endpoint for an application \cr
\link[=pinpoint_get_event_stream]{get_event_stream} \tab Retrieves information about the event stream settings for an application \cr
\link[=pinpoint_get_export_job]{get_export_job} \tab Retrieves information about the status and settings of a specific export job for an application \cr
\link[=pinpoint_get_export_jobs]{get_export_jobs} \tab Retrieves information about the status and settings of all the export jobs for an application \cr
\link[=pinpoint_get_gcm_channel]{get_gcm_channel} \tab Retrieves information about the status and settings of the GCM channel for an application \cr
\link[=pinpoint_get_import_job]{get_import_job} \tab Retrieves information about the status and settings of a specific import job for an application \cr
\link[=pinpoint_get_import_jobs]{get_import_jobs} \tab Retrieves information about the status and settings of all the import jobs for an application \cr
\link[=pinpoint_get_journey]{get_journey} \tab Retrieves information about the status, configuration, and other settings for a journey \cr
\link[=pinpoint_get_journey_date_range_kpi]{get_journey_date_range_kpi} \tab Retrieves (queries) pre-aggregated data for a standard engagement metric that applies to a journey \cr
\link[=pinpoint_get_journey_execution_activity_metrics]{get_journey_execution_activity_metrics} \tab Retrieves (queries) pre-aggregated data for a standard execution metric that applies to a journey activity \cr
\link[=pinpoint_get_journey_execution_metrics]{get_journey_execution_metrics} \tab Retrieves (queries) pre-aggregated data for a standard execution metric that applies to a journey \cr
\link[=pinpoint_get_push_template]{get_push_template} \tab Retrieves the content and settings of a message template for messages that are sent through a push notification channel \cr
\link[=pinpoint_get_segment]{get_segment} \tab Retrieves information about the configuration, dimension, and other settings for a specific segment that's associated with an application \cr
\link[=pinpoint_get_segment_export_jobs]{get_segment_export_jobs} \tab Retrieves information about the status and settings of the export jobs for a segment \cr
\link[=pinpoint_get_segment_import_jobs]{get_segment_import_jobs} \tab Retrieves information about the status and settings of the import jobs for a segment \cr
\link[=pinpoint_get_segments]{get_segments} \tab Retrieves information about the configuration, dimension, and other settings for all the segments that are associated with an application \cr
\link[=pinpoint_get_segment_version]{get_segment_version} \tab Retrieves information about the configuration, dimension, and other settings for a specific version of a segment that's associated with an application \cr
\link[=pinpoint_get_segment_versions]{get_segment_versions} \tab Retrieves information about the configuration, dimension, and other settings for all the versions of a specific segment that's associated with an application \cr
\link[=pinpoint_get_sms_channel]{get_sms_channel} \tab Retrieves information about the status and settings of the SMS channel for an application \cr
\link[=pinpoint_get_sms_template]{get_sms_template} \tab Retrieves the content and settings of a message template for messages that are sent through the SMS channel \cr
\link[=pinpoint_get_user_endpoints]{get_user_endpoints} \tab Retrieves information about all the endpoints that are associated with a specific user ID \cr
\link[=pinpoint_get_voice_channel]{get_voice_channel} \tab Retrieves information about the status and settings of the voice channel for an application \cr
\link[=pinpoint_get_voice_template]{get_voice_template} \tab Retrieves the content and settings of a message template for messages that are sent through the voice channel \cr
\link[=pinpoint_list_journeys]{list_journeys} \tab Retrieves information about the status, configuration, and other settings for all the journeys that are associated with an application \cr
\link[=pinpoint_list_tags_for_resource]{list_tags_for_resource} \tab Retrieves all the tags (keys and values) that are associated with an application, campaign, journey, message template, or segment \cr
\link[=pinpoint_list_templates]{list_templates} \tab Retrieves information about all the message templates that are associated with your Amazon Pinpoint account \cr
\link[=pinpoint_list_template_versions]{list_template_versions} \tab Retrieves information about all the versions of a specific message template \cr
\link[=pinpoint_phone_number_validate]{phone_number_validate} \tab Retrieves information about a phone number \cr
\link[=pinpoint_put_events]{put_events} \tab Creates a new event to record for endpoints, or creates or updates endpoint data that existing events are associated with \cr
\link[=pinpoint_put_event_stream]{put_event_stream} \tab Creates a new event stream for an application or updates the settings of an existing event stream for an application \cr
\link[=pinpoint_remove_attributes]{remove_attributes} \tab Removes one or more attributes, of the same attribute type, from all the endpoints that are associated with an application \cr
\link[=pinpoint_send_messages]{send_messages} \tab Creates and sends a direct message \cr
\link[=pinpoint_send_users_messages]{send_users_messages} \tab Creates and sends a message to a list of users \cr
\link[=pinpoint_tag_resource]{tag_resource} \tab Adds one or more tags (keys and values) to an application, campaign, journey, message template, or segment \cr
\link[=pinpoint_untag_resource]{untag_resource} \tab Removes one or more tags (keys and values) from an application, campaign, journey, message template, or segment \cr
\link[=pinpoint_update_adm_channel]{update_adm_channel} \tab Enables the ADM channel for an application or updates the status and settings of the ADM channel for an application \cr
\link[=pinpoint_update_apns_channel]{update_apns_channel} \tab Enables the APNs channel for an application or updates the status and settings of the APNs channel for an application \cr
\link[=pinpoint_update_apns_sandbox_channel]{update_apns_sandbox_channel} \tab Enables the APNs sandbox channel for an application or updates the status and settings of the APNs sandbox channel for an application \cr
\link[=pinpoint_update_apns_voip_channel]{update_apns_voip_channel} \tab Enables the APNs VoIP channel for an application or updates the status and settings of the APNs VoIP channel for an application \cr
\link[=pinpoint_update_apns_voip_sandbox_channel]{update_apns_voip_sandbox_channel} \tab Enables the APNs VoIP sandbox channel for an application or updates the status and settings of the APNs VoIP sandbox channel for an application \cr
\link[=pinpoint_update_application_settings]{update_application_settings} \tab Updates the settings for an application \cr
\link[=pinpoint_update_baidu_channel]{update_baidu_channel} \tab Enables the Baidu channel for an application or updates the status and settings of the Baidu channel for an application \cr
\link[=pinpoint_update_campaign]{update_campaign} \tab Updates the configuration and other settings for a campaign \cr
\link[=pinpoint_update_email_channel]{update_email_channel} \tab Enables the email channel for an application or updates the status and settings of the email channel for an application \cr
\link[=pinpoint_update_email_template]{update_email_template} \tab Updates an existing message template for messages that are sent through the email channel \cr
\link[=pinpoint_update_endpoint]{update_endpoint} \tab Creates a new endpoint for an application or updates the settings and attributes of an existing endpoint for an application \cr
\link[=pinpoint_update_endpoints_batch]{update_endpoints_batch} \tab Creates a new batch of endpoints for an application or updates the settings and attributes of a batch of existing endpoints for an application \cr
\link[=pinpoint_update_gcm_channel]{update_gcm_channel} \tab Enables the GCM channel for an application or updates the status and settings of the GCM channel for an application \cr
\link[=pinpoint_update_journey]{update_journey} \tab Updates the configuration and other settings for a journey \cr
\link[=pinpoint_update_journey_state]{update_journey_state} \tab Cancels (stops) an active journey \cr
\link[=pinpoint_update_push_template]{update_push_template} \tab Updates an existing message template for messages that are sent through a push notification channel \cr
\link[=pinpoint_update_segment]{update_segment} \tab Creates a new segment for an application or updates the configuration, dimension, and other settings for an existing segment that's associated with an application\cr
\link[=pinpoint_update_sms_channel]{update_sms_channel} \tab Enables the SMS channel for an application or updates the status and settings of the SMS channel for an application \cr
\link[=pinpoint_update_sms_template]{update_sms_template} \tab Updates an existing message template for messages that are sent through the SMS channel \cr
\link[=pinpoint_update_template_active_version]{update_template_active_version} \tab Changes the status of a specific version of a message template to \emph{active} \cr
\link[=pinpoint_update_voice_channel]{update_voice_channel} \tab Enables the voice channel for an application or updates the status and settings of the voice channel for an application \cr
\link[=pinpoint_update_voice_template]{update_voice_template} \tab Updates an existing message template for messages that are sent through the voice channel
}
}
\examples{
\dontrun{
svc <- pinpoint()
svc$create_app(
Foo = 123
)
}
}
| /cran/paws.customer.engagement/man/pinpoint.Rd | permissive | johnnytommy/paws | R | false | true | 17,796 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pinpoint_service.R
\name{pinpoint}
\alias{pinpoint}
\title{Amazon Pinpoint}
\usage{
pinpoint(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.}
}
\description{
Doc Engage API - Amazon Pinpoint API
}
\section{Service syntax}{
\preformatted{svc <- pinpoint(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string"
),
endpoint = "string",
region = "string"
)
)
}
}
\section{Operations}{
\tabular{ll}{
\link[=pinpoint_create_app]{create_app} \tab Creates an application \cr
\link[=pinpoint_create_campaign]{create_campaign} \tab Creates a new campaign for an application or updates the settings of an existing campaign for an application \cr
\link[=pinpoint_create_email_template]{create_email_template} \tab Creates a message template for messages that are sent through the email channel \cr
\link[=pinpoint_create_export_job]{create_export_job} \tab Creates an export job for an application \cr
\link[=pinpoint_create_import_job]{create_import_job} \tab Creates an import job for an application \cr
\link[=pinpoint_create_journey]{create_journey} \tab Creates a journey for an application \cr
\link[=pinpoint_create_push_template]{create_push_template} \tab Creates a message template for messages that are sent through a push notification channel \cr
\link[=pinpoint_create_segment]{create_segment} \tab Creates a new segment for an application or updates the configuration, dimension, and other settings for an existing segment that's associated with an application\cr
\link[=pinpoint_create_sms_template]{create_sms_template} \tab Creates a message template for messages that are sent through the SMS channel \cr
\link[=pinpoint_create_voice_template]{create_voice_template} \tab Creates a message template for messages that are sent through the voice channel \cr
\link[=pinpoint_delete_adm_channel]{delete_adm_channel} \tab Disables the ADM channel for an application and deletes any existing settings for the channel \cr
\link[=pinpoint_delete_apns_channel]{delete_apns_channel} \tab Disables the APNs channel for an application and deletes any existing settings for the channel \cr
\link[=pinpoint_delete_apns_sandbox_channel]{delete_apns_sandbox_channel} \tab Disables the APNs sandbox channel for an application and deletes any existing settings for the channel \cr
\link[=pinpoint_delete_apns_voip_channel]{delete_apns_voip_channel} \tab Disables the APNs VoIP channel for an application and deletes any existing settings for the channel \cr
\link[=pinpoint_delete_apns_voip_sandbox_channel]{delete_apns_voip_sandbox_channel} \tab Disables the APNs VoIP sandbox channel for an application and deletes any existing settings for the channel \cr
\link[=pinpoint_delete_app]{delete_app} \tab Deletes an application \cr
\link[=pinpoint_delete_baidu_channel]{delete_baidu_channel} \tab Disables the Baidu channel for an application and deletes any existing settings for the channel \cr
\link[=pinpoint_delete_campaign]{delete_campaign} \tab Deletes a campaign from an application \cr
\link[=pinpoint_delete_email_channel]{delete_email_channel} \tab Disables the email channel for an application and deletes any existing settings for the channel \cr
\link[=pinpoint_delete_email_template]{delete_email_template} \tab Deletes a message template for messages that were sent through the email channel \cr
\link[=pinpoint_delete_endpoint]{delete_endpoint} \tab Deletes an endpoint from an application \cr
\link[=pinpoint_delete_event_stream]{delete_event_stream} \tab Deletes the event stream for an application \cr
\link[=pinpoint_delete_gcm_channel]{delete_gcm_channel} \tab Disables the GCM channel for an application and deletes any existing settings for the channel \cr
\link[=pinpoint_delete_journey]{delete_journey} \tab Deletes a journey from an application \cr
\link[=pinpoint_delete_push_template]{delete_push_template} \tab Deletes a message template for messages that were sent through a push notification channel \cr
\link[=pinpoint_delete_segment]{delete_segment} \tab Deletes a segment from an application \cr
\link[=pinpoint_delete_sms_channel]{delete_sms_channel} \tab Disables the SMS channel for an application and deletes any existing settings for the channel \cr
\link[=pinpoint_delete_sms_template]{delete_sms_template} \tab Deletes a message template for messages that were sent through the SMS channel \cr
\link[=pinpoint_delete_user_endpoints]{delete_user_endpoints} \tab Deletes all the endpoints that are associated with a specific user ID \cr
\link[=pinpoint_delete_voice_channel]{delete_voice_channel} \tab Disables the voice channel for an application and deletes any existing settings for the channel \cr
\link[=pinpoint_delete_voice_template]{delete_voice_template} \tab Deletes a message template for messages that were sent through the voice channel \cr
\link[=pinpoint_get_adm_channel]{get_adm_channel} \tab Retrieves information about the status and settings of the ADM channel for an application \cr
\link[=pinpoint_get_apns_channel]{get_apns_channel} \tab Retrieves information about the status and settings of the APNs channel for an application \cr
\link[=pinpoint_get_apns_sandbox_channel]{get_apns_sandbox_channel} \tab Retrieves information about the status and settings of the APNs sandbox channel for an application \cr
\link[=pinpoint_get_apns_voip_channel]{get_apns_voip_channel} \tab Retrieves information about the status and settings of the APNs VoIP channel for an application \cr
\link[=pinpoint_get_apns_voip_sandbox_channel]{get_apns_voip_sandbox_channel} \tab Retrieves information about the status and settings of the APNs VoIP sandbox channel for an application \cr
\link[=pinpoint_get_app]{get_app} \tab Retrieves information about an application \cr
\link[=pinpoint_get_application_date_range_kpi]{get_application_date_range_kpi} \tab Retrieves (queries) pre-aggregated data for a standard metric that applies to an application \cr
\link[=pinpoint_get_application_settings]{get_application_settings} \tab Retrieves information about the settings for an application \cr
\link[=pinpoint_get_apps]{get_apps} \tab Retrieves information about all the applications that are associated with your Amazon Pinpoint account \cr
\link[=pinpoint_get_baidu_channel]{get_baidu_channel} \tab Retrieves information about the status and settings of the Baidu channel for an application \cr
\link[=pinpoint_get_campaign]{get_campaign} \tab Retrieves information about the status, configuration, and other settings for a campaign \cr
\link[=pinpoint_get_campaign_activities]{get_campaign_activities} \tab Retrieves information about all the activities for a campaign \cr
\link[=pinpoint_get_campaign_date_range_kpi]{get_campaign_date_range_kpi} \tab Retrieves (queries) pre-aggregated data for a standard metric that applies to a campaign \cr
\link[=pinpoint_get_campaigns]{get_campaigns} \tab Retrieves information about the status, configuration, and other settings for all the campaigns that are associated with an application \cr
\link[=pinpoint_get_campaign_version]{get_campaign_version} \tab Retrieves information about the status, configuration, and other settings for a specific version of a campaign \cr
\link[=pinpoint_get_campaign_versions]{get_campaign_versions} \tab Retrieves information about the status, configuration, and other settings for all versions of a campaign \cr
\link[=pinpoint_get_channels]{get_channels} \tab Retrieves information about the history and status of each channel for an application \cr
\link[=pinpoint_get_email_channel]{get_email_channel} \tab Retrieves information about the status and settings of the email channel for an application \cr
\link[=pinpoint_get_email_template]{get_email_template} \tab Retrieves the content and settings of a message template for messages that are sent through the email channel \cr
\link[=pinpoint_get_endpoint]{get_endpoint} \tab Retrieves information about the settings and attributes of a specific endpoint for an application \cr
\link[=pinpoint_get_event_stream]{get_event_stream} \tab Retrieves information about the event stream settings for an application \cr
\link[=pinpoint_get_export_job]{get_export_job} \tab Retrieves information about the status and settings of a specific export job for an application \cr
\link[=pinpoint_get_export_jobs]{get_export_jobs} \tab Retrieves information about the status and settings of all the export jobs for an application \cr
\link[=pinpoint_get_gcm_channel]{get_gcm_channel} \tab Retrieves information about the status and settings of the GCM channel for an application \cr
\link[=pinpoint_get_import_job]{get_import_job} \tab Retrieves information about the status and settings of a specific import job for an application \cr
\link[=pinpoint_get_import_jobs]{get_import_jobs} \tab Retrieves information about the status and settings of all the import jobs for an application \cr
\link[=pinpoint_get_journey]{get_journey} \tab Retrieves information about the status, configuration, and other settings for a journey \cr
\link[=pinpoint_get_journey_date_range_kpi]{get_journey_date_range_kpi} \tab Retrieves (queries) pre-aggregated data for a standard engagement metric that applies to a journey \cr
\link[=pinpoint_get_journey_execution_activity_metrics]{get_journey_execution_activity_metrics} \tab Retrieves (queries) pre-aggregated data for a standard execution metric that applies to a journey activity \cr
\link[=pinpoint_get_journey_execution_metrics]{get_journey_execution_metrics} \tab Retrieves (queries) pre-aggregated data for a standard execution metric that applies to a journey \cr
\link[=pinpoint_get_push_template]{get_push_template} \tab Retrieves the content and settings of a message template for messages that are sent through a push notification channel \cr
\link[=pinpoint_get_segment]{get_segment} \tab Retrieves information about the configuration, dimension, and other settings for a specific segment that's associated with an application \cr
\link[=pinpoint_get_segment_export_jobs]{get_segment_export_jobs} \tab Retrieves information about the status and settings of the export jobs for a segment \cr
\link[=pinpoint_get_segment_import_jobs]{get_segment_import_jobs} \tab Retrieves information about the status and settings of the import jobs for a segment \cr
\link[=pinpoint_get_segments]{get_segments} \tab Retrieves information about the configuration, dimension, and other settings for all the segments that are associated with an application \cr
\link[=pinpoint_get_segment_version]{get_segment_version} \tab Retrieves information about the configuration, dimension, and other settings for a specific version of a segment that's associated with an application \cr
\link[=pinpoint_get_segment_versions]{get_segment_versions} \tab Retrieves information about the configuration, dimension, and other settings for all the versions of a specific segment that's associated with an application \cr
\link[=pinpoint_get_sms_channel]{get_sms_channel} \tab Retrieves information about the status and settings of the SMS channel for an application \cr
\link[=pinpoint_get_sms_template]{get_sms_template} \tab Retrieves the content and settings of a message template for messages that are sent through the SMS channel \cr
\link[=pinpoint_get_user_endpoints]{get_user_endpoints} \tab Retrieves information about all the endpoints that are associated with a specific user ID \cr
\link[=pinpoint_get_voice_channel]{get_voice_channel} \tab Retrieves information about the status and settings of the voice channel for an application \cr
\link[=pinpoint_get_voice_template]{get_voice_template} \tab Retrieves the content and settings of a message template for messages that are sent through the voice channel \cr
\link[=pinpoint_list_journeys]{list_journeys} \tab Retrieves information about the status, configuration, and other settings for all the journeys that are associated with an application \cr
\link[=pinpoint_list_tags_for_resource]{list_tags_for_resource} \tab Retrieves all the tags (keys and values) that are associated with an application, campaign, journey, message template, or segment \cr
\link[=pinpoint_list_templates]{list_templates} \tab Retrieves information about all the message templates that are associated with your Amazon Pinpoint account \cr
\link[=pinpoint_list_template_versions]{list_template_versions} \tab Retrieves information about all the versions of a specific message template \cr
\link[=pinpoint_phone_number_validate]{phone_number_validate} \tab Retrieves information about a phone number \cr
\link[=pinpoint_put_events]{put_events} \tab Creates a new event to record for endpoints, or creates or updates endpoint data that existing events are associated with \cr
\link[=pinpoint_put_event_stream]{put_event_stream} \tab Creates a new event stream for an application or updates the settings of an existing event stream for an application \cr
\link[=pinpoint_remove_attributes]{remove_attributes} \tab Removes one or more attributes, of the same attribute type, from all the endpoints that are associated with an application \cr
\link[=pinpoint_send_messages]{send_messages} \tab Creates and sends a direct message \cr
\link[=pinpoint_send_users_messages]{send_users_messages} \tab Creates and sends a message to a list of users \cr
\link[=pinpoint_tag_resource]{tag_resource} \tab Adds one or more tags (keys and values) to an application, campaign, journey, message template, or segment \cr
\link[=pinpoint_untag_resource]{untag_resource} \tab Removes one or more tags (keys and values) from an application, campaign, journey, message template, or segment \cr
\link[=pinpoint_update_adm_channel]{update_adm_channel} \tab Enables the ADM channel for an application or updates the status and settings of the ADM channel for an application \cr
\link[=pinpoint_update_apns_channel]{update_apns_channel} \tab Enables the APNs channel for an application or updates the status and settings of the APNs channel for an application \cr
\link[=pinpoint_update_apns_sandbox_channel]{update_apns_sandbox_channel} \tab Enables the APNs sandbox channel for an application or updates the status and settings of the APNs sandbox channel for an application \cr
\link[=pinpoint_update_apns_voip_channel]{update_apns_voip_channel} \tab Enables the APNs VoIP channel for an application or updates the status and settings of the APNs VoIP channel for an application \cr
\link[=pinpoint_update_apns_voip_sandbox_channel]{update_apns_voip_sandbox_channel} \tab Enables the APNs VoIP sandbox channel for an application or updates the status and settings of the APNs VoIP sandbox channel for an application \cr
\link[=pinpoint_update_application_settings]{update_application_settings} \tab Updates the settings for an application \cr
\link[=pinpoint_update_baidu_channel]{update_baidu_channel} \tab Enables the Baidu channel for an application or updates the status and settings of the Baidu channel for an application \cr
\link[=pinpoint_update_campaign]{update_campaign} \tab Updates the configuration and other settings for a campaign \cr
\link[=pinpoint_update_email_channel]{update_email_channel} \tab Enables the email channel for an application or updates the status and settings of the email channel for an application \cr
\link[=pinpoint_update_email_template]{update_email_template} \tab Updates an existing message template for messages that are sent through the email channel \cr
\link[=pinpoint_update_endpoint]{update_endpoint} \tab Creates a new endpoint for an application or updates the settings and attributes of an existing endpoint for an application \cr
\link[=pinpoint_update_endpoints_batch]{update_endpoints_batch} \tab Creates a new batch of endpoints for an application or updates the settings and attributes of a batch of existing endpoints for an application \cr
\link[=pinpoint_update_gcm_channel]{update_gcm_channel} \tab Enables the GCM channel for an application or updates the status and settings of the GCM channel for an application \cr
\link[=pinpoint_update_journey]{update_journey} \tab Updates the configuration and other settings for a journey \cr
\link[=pinpoint_update_journey_state]{update_journey_state} \tab Cancels (stops) an active journey \cr
\link[=pinpoint_update_push_template]{update_push_template} \tab Updates an existing message template for messages that are sent through a push notification channel \cr
\link[=pinpoint_update_segment]{update_segment} \tab Creates a new segment for an application or updates the configuration, dimension, and other settings for an existing segment that's associated with an application\cr
\link[=pinpoint_update_sms_channel]{update_sms_channel} \tab Enables the SMS channel for an application or updates the status and settings of the SMS channel for an application \cr
\link[=pinpoint_update_sms_template]{update_sms_template} \tab Updates an existing message template for messages that are sent through the SMS channel \cr
\link[=pinpoint_update_template_active_version]{update_template_active_version} \tab Changes the status of a specific version of a message template to \emph{active} \cr
\link[=pinpoint_update_voice_channel]{update_voice_channel} \tab Enables the voice channel for an application or updates the status and settings of the voice channel for an application \cr
\link[=pinpoint_update_voice_template]{update_voice_template} \tab Updates an existing message template for messages that are sent through the voice channel
}
}
\examples{
\dontrun{
svc <- pinpoint()
svc$create_app(
Foo = 123
)
}
}
|
# Statistics
# Number of clicks per user
nc_A = length(ind_clA)
nc_B = length(ind_clB)
# Total clicks
total_nc = nc_A + nc_B
ind_Cros
ind_clA
ind_clB
ct_nco = 0;
stats = seq(1,length(ind_clA)) # sequence from 1 to 100 in steps of 2
stats.agentA = NULL
stats.agentB = NULL
stats.ct_nca = 0;
stats.ct_ncb = 0;
fps = 25 # frames per seconds in Processing 2.~
marg = round(1.25/(1/fps))+1 # 2 second of margin (user reaction)
# Cuantos se han hecho en un agent
# Compare crosses and clicks from user A
for (i in 1:length(ind_clA) ) {
for (j in 1:length(ind_Cros)){
# Compare crosses and clicks with user A, with
if ((ind_clA[i]<ind_Cros[j]+marg)&&(ind_clA[i]>ind_Cros[j]-marg)){
stats.agentA[i] = x$nCrosses[i]-x$nCrosses[i+1]
stats.ct_nca = stats.ct_nca + 1
}
}
}
# Compare crosses and clicks from user B
for (i in 1:length(ind_clB) ) {
for (j in 1:length(ind_Cros)){
# Compare crosses and clicks with user A
if ((ind_clB[i]<ind_Cros[j]+13)&&(ind_clB[i]>ind_Cros[j]-13)){
stats.agentB[i] = x$nCrosses[i]-x$nCrosses[i+1]
stats.ct_ncb = stats.ct_ncb + 1
}
}
}
stats.ct_nca # number of clicks with agent (avatarA)
stats.ct_ncb # number of clicks with agent (avatarB)
# Number of clicks in a shadow - User A
# shadow is located -70 pixels of avatar position.
# So, we can do a for loop to evaluate the position of avatars in every click.
shd_pst = 70 # shadow position interval with Avatar
stats.shadowA = NULL
stats.ct_nc_sdwA = 0
for (i in 1:length(ind_clA) ) {
# Compare with the the user B (reciprocal clicks)
if ((x$AvatarA[ind_clA[i]]<x$AvatarB[ind_clA[i]]+shd_pst+marg)&&
(x$AvatarA[ind_clA[i]]>x$AvatarB[ind_clA[i]] + shd_pst - marg)){
stats.shadowA[i] = x$AvatarB[ind_clA[i]]+shd_pst
stats.ct_nc_sdwA = stats.ct_nc_sdwA + 1
}
}
stats.ct_nc_sdwA
# Number of clicks in a shadow - User A
stats.shadowB = NULL
stats.ct_nc_sdwB = 0
for (i in 1:length(ind_clB) ) {
# Compare with the the user B (reciprocal clicks)
if ((x$AvatarB[ind_clB[i]]<x$AvatarA[ind_clB[i]]+shd_pst+marg)&&
(x$AvatarB[ind_clB[i]]>x$AvatarA[ind_clB[i]] + shd_pst - marg)){
stats.shadowB[i] = x$AvatarA[ind_clB[i]]+shd_pst
stats.ct_nc_sdwB = stats.ct_nc_sdwB + 1
}
}
stats.ct_nc_sdwB
# Cuantos se han hecho en un object (static)
# Object A
objA_pst = width/4 # shadow position interval with Avatar
stats.objA = NULL
stats.ct_nc_objA = 0
for (i in 1:length(ind_clA) ) {
# Compare with the the user B (reciprocal clicks)
if ((x$AvatarA[ind_clA[i]]<objA_pst+marg)&&
(x$AvatarA[ind_clA[i]]>objA_pst-marg)){
stats.objA[i] = ind_clA[i]
stats.ct_nc_objA = stats.ct_nc_objA + 1
}
}
stats.ct_nc_objA
# Object B
objB_pst = 3*width/4 # shadow position interval with Avatar
stats.objB = NULL
stats.ct_nc_objB = 0
for (i in 1:length(ind_clB) ) {
# Compare with the the user B (reciprocal clicks)
if ((x$AvatarB[ind_clB[i]]<objB_pst+marg)&&
(x$AvatarB[ind_clB[i]]>objB_pst-marg)){
stats.objB[i] = ind_clB[i]
stats.ct_nc_objB = stats.ct_nc_objB + 1
}
}
stats.ct_nc_objB
# Cuantos clics han sido recíprocos
# Reciprocal response
stats.rcpcl = NULL
stats.ct_nc_rec = 0
for (i in 1:length(ind_clA) ) {
for (j in 1:length(ind_clB)) {
# Compare with the the user B (reciprocal clicks)
if ((ind_clA[i]<ind_clB[j]+marg)&&
(ind_clA[i]>ind_clB[j]-marg)){
stats.rcpcl[i] = 1
stats.ct_nc_rec = stats.ct_nc_rec + 1
}
}
}
stats.ct_nc_rec
# Print a table with the results for an experiment
table1 <- matrix(c(total_nc,nc_A,stats.ct_nca,stats.ct_nc_sdwA,stats.ct_nc_objA,nc_B,stats.ct_ncb,stats.ct_nc_sdwB,stats.ct_nc_objB,stats.ct_nc_rec))
colnames(table1) <- c("Experiment5")
rownames(table1) <- c("nclicks", "UserA","AgentA", "ShadowA", "ObjA", "UserB","AgentB", "ShadowB", "ObjB","Reciproc")
table1 <- as.table(table1)
table1
# The same should be done for each experiment and look for conclusions | /data/stats (Autosaved).R | no_license | xaviliz/Minimal-Interaction | R | false | false | 3,969 | r | # Statistics
# Number of clicks per user
nc_A = length(ind_clA)
nc_B = length(ind_clB)
# Total clicks
total_nc = nc_A + nc_B
ind_Cros
ind_clA
ind_clB
ct_nco = 0;
stats = seq(1,length(ind_clA)) # sequence from 1 to 100 in steps of 2
stats.agentA = NULL
stats.agentB = NULL
stats.ct_nca = 0;
stats.ct_ncb = 0;
fps = 25 # frames per seconds in Processing 2.~
marg = round(1.25/(1/fps))+1 # 2 second of margin (user reaction)
# Cuantos se han hecho en un agent
# Compare crosses and clicks from user A
for (i in 1:length(ind_clA) ) {
for (j in 1:length(ind_Cros)){
# Compare crosses and clicks with user A, with
if ((ind_clA[i]<ind_Cros[j]+marg)&&(ind_clA[i]>ind_Cros[j]-marg)){
stats.agentA[i] = x$nCrosses[i]-x$nCrosses[i+1]
stats.ct_nca = stats.ct_nca + 1
}
}
}
# Compare crosses and clicks from user B
for (i in 1:length(ind_clB) ) {
for (j in 1:length(ind_Cros)){
# Compare crosses and clicks with user A
if ((ind_clB[i]<ind_Cros[j]+13)&&(ind_clB[i]>ind_Cros[j]-13)){
stats.agentB[i] = x$nCrosses[i]-x$nCrosses[i+1]
stats.ct_ncb = stats.ct_ncb + 1
}
}
}
stats.ct_nca # number of clicks with agent (avatarA)
stats.ct_ncb # number of clicks with agent (avatarB)
# Number of clicks in a shadow - User A
# shadow is located -70 pixels of avatar position.
# So, we can do a for loop to evaluate the position of avatars in every click.
shd_pst = 70 # shadow position interval with Avatar
stats.shadowA = NULL
stats.ct_nc_sdwA = 0
for (i in 1:length(ind_clA) ) {
# Compare with the the user B (reciprocal clicks)
if ((x$AvatarA[ind_clA[i]]<x$AvatarB[ind_clA[i]]+shd_pst+marg)&&
(x$AvatarA[ind_clA[i]]>x$AvatarB[ind_clA[i]] + shd_pst - marg)){
stats.shadowA[i] = x$AvatarB[ind_clA[i]]+shd_pst
stats.ct_nc_sdwA = stats.ct_nc_sdwA + 1
}
}
stats.ct_nc_sdwA
# Number of clicks in a shadow - User A
stats.shadowB = NULL
stats.ct_nc_sdwB = 0
for (i in 1:length(ind_clB) ) {
# Compare with the the user B (reciprocal clicks)
if ((x$AvatarB[ind_clB[i]]<x$AvatarA[ind_clB[i]]+shd_pst+marg)&&
(x$AvatarB[ind_clB[i]]>x$AvatarA[ind_clB[i]] + shd_pst - marg)){
stats.shadowB[i] = x$AvatarA[ind_clB[i]]+shd_pst
stats.ct_nc_sdwB = stats.ct_nc_sdwB + 1
}
}
stats.ct_nc_sdwB
# Cuantos se han hecho en un object (static)
# Object A
objA_pst = width/4 # shadow position interval with Avatar
stats.objA = NULL
stats.ct_nc_objA = 0
for (i in 1:length(ind_clA) ) {
# Compare with the the user B (reciprocal clicks)
if ((x$AvatarA[ind_clA[i]]<objA_pst+marg)&&
(x$AvatarA[ind_clA[i]]>objA_pst-marg)){
stats.objA[i] = ind_clA[i]
stats.ct_nc_objA = stats.ct_nc_objA + 1
}
}
stats.ct_nc_objA
# Object B
objB_pst = 3*width/4 # shadow position interval with Avatar
stats.objB = NULL
stats.ct_nc_objB = 0
for (i in 1:length(ind_clB) ) {
# Compare with the the user B (reciprocal clicks)
if ((x$AvatarB[ind_clB[i]]<objB_pst+marg)&&
(x$AvatarB[ind_clB[i]]>objB_pst-marg)){
stats.objB[i] = ind_clB[i]
stats.ct_nc_objB = stats.ct_nc_objB + 1
}
}
stats.ct_nc_objB
# Cuantos clics han sido recíprocos
# Reciprocal response
stats.rcpcl = NULL
stats.ct_nc_rec = 0
for (i in 1:length(ind_clA) ) {
for (j in 1:length(ind_clB)) {
# Compare with the the user B (reciprocal clicks)
if ((ind_clA[i]<ind_clB[j]+marg)&&
(ind_clA[i]>ind_clB[j]-marg)){
stats.rcpcl[i] = 1
stats.ct_nc_rec = stats.ct_nc_rec + 1
}
}
}
stats.ct_nc_rec
# Print a table with the results for an experiment
table1 <- matrix(c(total_nc,nc_A,stats.ct_nca,stats.ct_nc_sdwA,stats.ct_nc_objA,nc_B,stats.ct_ncb,stats.ct_nc_sdwB,stats.ct_nc_objB,stats.ct_nc_rec))
colnames(table1) <- c("Experiment5")
rownames(table1) <- c("nclicks", "UserA","AgentA", "ShadowA", "ObjA", "UserB","AgentB", "ShadowB", "ObjB","Reciproc")
table1 <- as.table(table1)
table1
# The same should be done for each experiment and look for conclusions |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package_dep.R
\name{order_pkgs}
\alias{order_pkgs}
\title{Order packages by their dependencies}
\usage{
order_pkgs(pkgs, inst_pkgs, which = c("Depends", "Imports", "LinkingTo"))
}
\arguments{
\item{pkgs}{The packages to order}
\item{inst_pkgs}{a database of installed packages as returned by \code{installed.packages}.}
\item{which}{Which type of dependencies should be taken into account. a character vector listing the types of dependencies, a
subset of ‘c("Depends", "Imports", "LinkingTo", "Suggests",
"Enhances")’. Character string ‘"all"’ is shorthand for that
vector, character string ‘"most"’ for the same vector without
‘"Enhances"’.}
}
\value{
A data frame with the column 'pkg' giving the name of the package and 'level', by which the dataframe is sorted from lowest to highest.
}
\description{
Order packages by their dependencies
}
\details{
Given packages, the function orders them so that every package
only depends on other packages with lower order number. This way,
they can safely be removed from the highest value down.
}
\author{
Holger Hoefling
}
| /man/order_pkgs.Rd | no_license | hhoeflin/sanitizr | R | false | true | 1,166 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package_dep.R
\name{order_pkgs}
\alias{order_pkgs}
\title{Order packages by their dependencies}
\usage{
order_pkgs(pkgs, inst_pkgs, which = c("Depends", "Imports", "LinkingTo"))
}
\arguments{
\item{pkgs}{The packages to order}
\item{inst_pkgs}{a database of installed packages as returned by \code{installed.packages}.}
\item{which}{Which type of dependencies should be taken into account. a character vector listing the types of dependencies, a
subset of ‘c("Depends", "Imports", "LinkingTo", "Suggests",
"Enhances")’. Character string ‘"all"’ is shorthand for that
vector, character string ‘"most"’ for the same vector without
‘"Enhances"’.}
}
\value{
A data frame with the column 'pkg' giving the name of the package and 'level', by which the dataframe is sorted from lowest to highest.
}
\description{
Order packages by their dependencies
}
\details{
Given packages, the function orders them so that every package
only depends on other packages with lower order number. This way,
they can safely be removed from the highest value down.
}
\author{
Holger Hoefling
}
|
advertising <- read.csv("http://www-bcf.usc.edu/%7Egareth/ISL/Advertising.csv")
advertising <- advertising[-1]
sink(file = "../data/eda-output.txt")
summary(advertising)
sink()
pdf(file = "../images/histogram-tv.pdf")
hist(advertising$TV, main="Histogram of TV Budgets",xlab= "TV",ylab="Frequency")
dev.off
png(filename = "../images/histogram-tv.png")
hist(advertising$TV, main="Histogram of TV Budgets",xlab= "TV",ylab="Frequency")
dev.off
pdf(file = "../images/histogram-sales.pdf")
hist(advertising$Sales, main="Histogram of Sales",xlab="Sales",ylab="Frequency")
dev.off
png(filename = "../images/histogram-sales.png")
hist(advertising$Sales, main="Histogram of Sales",xlab="Sales",ylab="Frequency")
dev.off | /code/eda-script.R | permissive | shirleyxjin/stat159-fall2016-hw02 | R | false | false | 716 | r | advertising <- read.csv("http://www-bcf.usc.edu/%7Egareth/ISL/Advertising.csv")
advertising <- advertising[-1]
sink(file = "../data/eda-output.txt")
summary(advertising)
sink()
pdf(file = "../images/histogram-tv.pdf")
hist(advertising$TV, main="Histogram of TV Budgets",xlab= "TV",ylab="Frequency")
dev.off
png(filename = "../images/histogram-tv.png")
hist(advertising$TV, main="Histogram of TV Budgets",xlab= "TV",ylab="Frequency")
dev.off
pdf(file = "../images/histogram-sales.pdf")
hist(advertising$Sales, main="Histogram of Sales",xlab="Sales",ylab="Frequency")
dev.off
png(filename = "../images/histogram-sales.png")
hist(advertising$Sales, main="Histogram of Sales",xlab="Sales",ylab="Frequency")
dev.off |
#' Extract medication information from clinical notes
#'
#' This function is an interface to the \code{\link[medExtractR]{medExtractR}} function
#' within the \pkg{medExtractR} package, and allows drug dosing information to be extracted
#' from free-text sources, e.g., clinical notes.
#'
#' Medication information, including dosing data, is often stored in free-text sources such as
#' clinical notes. The \code{extractMed} function serves as a convenient wrapper for the
#' \pkg{medExtractR} package, a natural language processing system written in R for extracting
#' medication data. Within \code{extractMed}, the \code{\link[medExtractR]{medExtractR}} function
#' identifies dosing data for drug(s) of interest, specified by the \code{drugnames} argument,
#' using rule-based and dictionary-based approaches. Relevant dosing entities include medication
#' strength (identified using the \code{unit} argument), dose amount, dose given intake, intake
#' time or frequency of dose, dose change keywords (e.g., 'increase' or 'decrease'), and time of
#' last dose. After applying \code{\link[medExtractR]{medExtractR}}
#' to extract drug dosing information, \code{extractMed} appends the file name to results to ensure
#' they are appropriately labeled.
#'
#' See EHR Vignette for for Extract-Med and Pro-Med-NLP. For more details, see Weeks, et al. 2020.
#'
#' @param note_fn File name(s) for the text file(s) containing the clinical notes. Can be
#' a character string for an individual note, or a vector or list of file names for
#' multiple notes.
#' @param drugnames Vector of drug names for which dosing information should be extracted.
#' Can include various forms (e.g., generic, brand name) as well as abbreviations.
#' @param drgunit Unit of the drug being extracted, e.g., 'mg'
#' @param windowlength Length of the search window (in characters) around the drug name in
#' which to search for dosing entities
#' @param max_edit_dist Maximum edit distance allowed when attempting to extract \code{drugnames}.
#' Allows for capturing misspelled drug name information.
#' @param ... Additional arguments to \code{\link[medExtractR]{medExtractR}}, for example
#' \code{lastdose=TRUE} to extract time of last dose (see \pkg{medExtractR} package documentation for details)
#'
#' @return A data.frame with the extracted dosing information, labeled with file name as an identifier \cr
#' Sample output:\cr
#' \tabular{rrrr}{
#' filename \tab entity \tab expr \tab pos\cr
#' note_file1.txt \tab DoseChange\tab decrease \tab 66:74\cr
#' note_file1.txt \tab DrugName \tab Prograf \tab 78:85\cr
#' note_file1.txt \tab Strength \tab 2 mg \tab 86:90\cr
#' note_file1.txt \tab DoseAmt \tab 1 \tab 91:92\cr
#' note_file1.txt \tab Frequency \tab bid \tab 101:104\cr
#' note_file1.txt \tab LastDose \tab 2100 \tab 121:125\cr
#' }
#'
#' @examples
#' \donttest{
#' tac_fn <- list(system.file("examples", "tacpid1_2008-06-26_note1_1.txt", package = "EHR"),
#' system.file("examples", "tacpid1_2008-06-26_note2_1.txt", package = "EHR"),
#' system.file("examples", "tacpid1_2008-12-16_note3_1.txt", package = "EHR"))
#'
#' extractMed(tac_fn,
#' drugnames = c("tacrolimus", "prograf", "tac", "tacro", "fk", "fk506"),
#' drgunit = "mg",
#' windowlength = 60,
#' max_edit_dist = 2,
#' lastdose=TRUE)
#' }
#' @export
extractMed <- function(note_fn, drugnames, drgunit,
windowlength, max_edit_dist = 0, ...) {
if(!(class(note_fn) %in% c("character", "list"))) {
stop("`notefn` must be of class 'character' or 'list'")
}
if(!requireNamespace("medExtractR", quietly = TRUE)) {
stop("extractMed requires the medExtractR package, please install it.",
call. = FALSE)
}
s2f <- options(stringsAsFactors = FALSE)
on.exit(options(s2f))
addl <- list(...)
addlvar <- names(addl)
batchsize <- 1000
if('batchsize' %in% addlvar) {
batchsize <- addl[['batchsize']]
addl[['batchsize']] <- NULL
}
progress <- TRUE
if('progress' %in% addlvar) {
progress <- addl[['progress']]
addl[['progress']] <- NULL
}
# data in EHR package must be manually loaded
e <- new.env()
if(!('dosechange_dict' %in% addlvar)) {
utils::data('dosechange_vals', package = "medExtractR", envir = e)
addl[['dosechange_dict']] <- e$dosechange_vals
}
if(!('drug_list' %in% addlvar)) {
utils::data('rxnorm_druglist', package = "medExtractR", envir = e)
addl[['drug_list']] <- e$rxnorm_druglist
}
if(!('frequency_dict' %in% addlvar)) {
utils::data('frequency_vals', package = "medExtractR", envir = e)
addl[['frequency_dict']] <- e$frequency_vals
}
if(!('intaketime_dict' %in% addlvar)) {
utils::data('intaketime_vals', package = "medExtractR", envir = e)
addl[['intaketime_dict']] <- e$intaketime_vals
}
rm(e)
doseArgs <- list(
drug_names = drugnames,
unit = drgunit,
window_length = windowlength,
max_dist = max_edit_dist
)
n <- length(note_fn)
chunks <- ceiling(n / batchsize)
dat <- vector('list', length = chunks)
for(i in seq_along(dat)) {
a <- (i - 1) * batchsize + 1
b <- min(i * batchsize, n)
if(progress) cat(sprintf("running notes %s-%s in batch %s of %s (%s%%)\r", a, b, i, chunks, round(100 * i / chunks)))
dat[[i]] <- do.call(qrbind, lapply(note_fn[seq(a, b)], function(x) {
do.call(getDose, c(x, doseArgs, addl))
}))
}
if(progress) cat("\n")
do.call(qrbind, dat)
}
| /EHR/R/extractMed.R | no_license | choileena/EHR | R | false | false | 5,546 | r | #' Extract medication information from clinical notes
#'
#' This function is an interface to the \code{\link[medExtractR]{medExtractR}} function
#' within the \pkg{medExtractR} package, and allows drug dosing information to be extracted
#' from free-text sources, e.g., clinical notes.
#'
#' Medication information, including dosing data, is often stored in free-text sources such as
#' clinical notes. The \code{extractMed} function serves as a convenient wrapper for the
#' \pkg{medExtractR} package, a natural language processing system written in R for extracting
#' medication data. Within \code{extractMed}, the \code{\link[medExtractR]{medExtractR}} function
#' identifies dosing data for drug(s) of interest, specified by the \code{drugnames} argument,
#' using rule-based and dictionary-based approaches. Relevant dosing entities include medication
#' strength (identified using the \code{unit} argument), dose amount, dose given intake, intake
#' time or frequency of dose, dose change keywords (e.g., 'increase' or 'decrease'), and time of
#' last dose. After applying \code{\link[medExtractR]{medExtractR}}
#' to extract drug dosing information, \code{extractMed} appends the file name to results to ensure
#' they are appropriately labeled.
#'
#' See EHR Vignette for for Extract-Med and Pro-Med-NLP. For more details, see Weeks, et al. 2020.
#'
#' @param note_fn File name(s) for the text file(s) containing the clinical notes. Can be
#' a character string for an individual note, or a vector or list of file names for
#' multiple notes.
#' @param drugnames Vector of drug names for which dosing information should be extracted.
#' Can include various forms (e.g., generic, brand name) as well as abbreviations.
#' @param drgunit Unit of the drug being extracted, e.g., 'mg'
#' @param windowlength Length of the search window (in characters) around the drug name in
#' which to search for dosing entities
#' @param max_edit_dist Maximum edit distance allowed when attempting to extract \code{drugnames}.
#' Allows for capturing misspelled drug name information.
#' @param ... Additional arguments to \code{\link[medExtractR]{medExtractR}}, for example
#' \code{lastdose=TRUE} to extract time of last dose (see \pkg{medExtractR} package documentation for details)
#'
#' @return A data.frame with the extracted dosing information, labeled with file name as an identifier \cr
#' Sample output:\cr
#' \tabular{rrrr}{
#' filename \tab entity \tab expr \tab pos\cr
#' note_file1.txt \tab DoseChange\tab decrease \tab 66:74\cr
#' note_file1.txt \tab DrugName \tab Prograf \tab 78:85\cr
#' note_file1.txt \tab Strength \tab 2 mg \tab 86:90\cr
#' note_file1.txt \tab DoseAmt \tab 1 \tab 91:92\cr
#' note_file1.txt \tab Frequency \tab bid \tab 101:104\cr
#' note_file1.txt \tab LastDose \tab 2100 \tab 121:125\cr
#' }
#'
#' @examples
#' \donttest{
#' tac_fn <- list(system.file("examples", "tacpid1_2008-06-26_note1_1.txt", package = "EHR"),
#' system.file("examples", "tacpid1_2008-06-26_note2_1.txt", package = "EHR"),
#' system.file("examples", "tacpid1_2008-12-16_note3_1.txt", package = "EHR"))
#'
#' extractMed(tac_fn,
#' drugnames = c("tacrolimus", "prograf", "tac", "tacro", "fk", "fk506"),
#' drgunit = "mg",
#' windowlength = 60,
#' max_edit_dist = 2,
#' lastdose=TRUE)
#' }
#' @export
extractMed <- function(note_fn, drugnames, drgunit,
windowlength, max_edit_dist = 0, ...) {
if(!(class(note_fn) %in% c("character", "list"))) {
stop("`notefn` must be of class 'character' or 'list'")
}
if(!requireNamespace("medExtractR", quietly = TRUE)) {
stop("extractMed requires the medExtractR package, please install it.",
call. = FALSE)
}
s2f <- options(stringsAsFactors = FALSE)
on.exit(options(s2f))
addl <- list(...)
addlvar <- names(addl)
batchsize <- 1000
if('batchsize' %in% addlvar) {
batchsize <- addl[['batchsize']]
addl[['batchsize']] <- NULL
}
progress <- TRUE
if('progress' %in% addlvar) {
progress <- addl[['progress']]
addl[['progress']] <- NULL
}
# data in EHR package must be manually loaded
e <- new.env()
if(!('dosechange_dict' %in% addlvar)) {
utils::data('dosechange_vals', package = "medExtractR", envir = e)
addl[['dosechange_dict']] <- e$dosechange_vals
}
if(!('drug_list' %in% addlvar)) {
utils::data('rxnorm_druglist', package = "medExtractR", envir = e)
addl[['drug_list']] <- e$rxnorm_druglist
}
if(!('frequency_dict' %in% addlvar)) {
utils::data('frequency_vals', package = "medExtractR", envir = e)
addl[['frequency_dict']] <- e$frequency_vals
}
if(!('intaketime_dict' %in% addlvar)) {
utils::data('intaketime_vals', package = "medExtractR", envir = e)
addl[['intaketime_dict']] <- e$intaketime_vals
}
rm(e)
doseArgs <- list(
drug_names = drugnames,
unit = drgunit,
window_length = windowlength,
max_dist = max_edit_dist
)
n <- length(note_fn)
chunks <- ceiling(n / batchsize)
dat <- vector('list', length = chunks)
for(i in seq_along(dat)) {
a <- (i - 1) * batchsize + 1
b <- min(i * batchsize, n)
if(progress) cat(sprintf("running notes %s-%s in batch %s of %s (%s%%)\r", a, b, i, chunks, round(100 * i / chunks)))
dat[[i]] <- do.call(qrbind, lapply(note_fn[seq(a, b)], function(x) {
do.call(getDose, c(x, doseArgs, addl))
}))
}
if(progress) cat("\n")
do.call(qrbind, dat)
}
|
local({
library(data.table)
if(!dir.exists("tables")) dir.create("tables")
df1 <- fread("temp\\PeptQuant_average_Hippoc.txt")
df2 <- fread("temp\\PeptQuant_average_Str_OB_SN.txt")
lod <- fread("temp\\Linear_range_aSyn15N.txt")
# Add region and enzyme
df1[, Region := "Hippocampus"]
df1[, Enzyme := "Trypsin"]
df1[grepl("^K", Peptide), Enzyme := "LysN"]
# rename mice
df1[Condition == "HU-P3", Condition := "TM1"]
df1[Condition == "HU-TR", Condition := "TM2"]
df1[Condition == "P3", Condition := "TP"]
df1[Condition == "TR", Condition := "D119"]
# combine tables
df <- rbind(df1, df2)
# set significance level at 2 digits for the mean
df[, aSyn_conc_mean := signif(aSyn_conc_mean, 2)]
df[, aSyn_conc_sd := signif(aSyn_conc_sd, 1)]
# add starting point of the linear range (log2 Intensity)
df <- merge(df, lod[, c("NAME", "linear_range")], by.x = "Peptide", by.y = "NAME", all.x = TRUE)
# add aSyn specificities
df[Peptide == "EGVVHGVATVAEK", aSyn_specificity := "TM1;TM2;D119"]
df[Peptide == "EGVVHGVATVPEK", aSyn_specificity := "TM1"]
df[Peptide == "EGVVHGVTTVAEK", aSyn_specificity := "WT"]
df[Peptide == "EQVTNVGGAVVTGVTAVAQK", aSyn_specificity := "WT;TM1;TM2;D119"]
df[Peptide == "EQVTNVGGAVVTGVTPVAQK", aSyn_specificity := "TP"]
df[Peptide == "KNEEGAPQEGILEDMPVD", aSyn_specificity := "D119"]
df[Peptide == "TVEGAGNIAAATGFVK", aSyn_specificity := "WT"]
df[Peptide == "TVEGAGSIAAATGFVK", aSyn_specificity := "TM1;TM2;TP;D119"]
df[Peptide == "KEGVVHGVATVAE", aSyn_specificity := "TM1;TM2;D119"]
df[Peptide == "KEGVVHGVTTVAE", aSyn_specificity := "WT"]
# remove carry over in WT
df <- df[!(Peptide == "TVEGAGSIAAATGFVK" & Condition == "WT")]
# rename columns
names(df) <- c("Peptide", "Mouse", "log2_Intensity_mean", "log2_Intensity_sd",
"aSyn_fmol_ug_mean", "aSyn_fmol_ug_sd", "Region", "Enzyme", "log2_Intensity_LLOQ",
"aSyn_specificity")
df <- df[, c("Region", "Enzyme", "Peptide", "Mouse", "aSyn_specificity",
"log2_Intensity_mean", "log2_Intensity_sd",
"log2_Intensity_LLOQ", "aSyn_fmol_ug_mean", "aSyn_fmol_ug_sd")]
fwrite(df, "tables\\aSyn_quant.txt", sep = "\t")
}) | /06_prepare_report.R | no_license | IvanSilbern/2021_AMartinez_aSyn | R | false | false | 2,263 | r | local({
library(data.table)
if(!dir.exists("tables")) dir.create("tables")
df1 <- fread("temp\\PeptQuant_average_Hippoc.txt")
df2 <- fread("temp\\PeptQuant_average_Str_OB_SN.txt")
lod <- fread("temp\\Linear_range_aSyn15N.txt")
# Add region and enzyme
df1[, Region := "Hippocampus"]
df1[, Enzyme := "Trypsin"]
df1[grepl("^K", Peptide), Enzyme := "LysN"]
# rename mice
df1[Condition == "HU-P3", Condition := "TM1"]
df1[Condition == "HU-TR", Condition := "TM2"]
df1[Condition == "P3", Condition := "TP"]
df1[Condition == "TR", Condition := "D119"]
# combine tables
df <- rbind(df1, df2)
# set significance level at 2 digits for the mean
df[, aSyn_conc_mean := signif(aSyn_conc_mean, 2)]
df[, aSyn_conc_sd := signif(aSyn_conc_sd, 1)]
# add starting point of the linear range (log2 Intensity)
df <- merge(df, lod[, c("NAME", "linear_range")], by.x = "Peptide", by.y = "NAME", all.x = TRUE)
# add aSyn specificities
df[Peptide == "EGVVHGVATVAEK", aSyn_specificity := "TM1;TM2;D119"]
df[Peptide == "EGVVHGVATVPEK", aSyn_specificity := "TM1"]
df[Peptide == "EGVVHGVTTVAEK", aSyn_specificity := "WT"]
df[Peptide == "EQVTNVGGAVVTGVTAVAQK", aSyn_specificity := "WT;TM1;TM2;D119"]
df[Peptide == "EQVTNVGGAVVTGVTPVAQK", aSyn_specificity := "TP"]
df[Peptide == "KNEEGAPQEGILEDMPVD", aSyn_specificity := "D119"]
df[Peptide == "TVEGAGNIAAATGFVK", aSyn_specificity := "WT"]
df[Peptide == "TVEGAGSIAAATGFVK", aSyn_specificity := "TM1;TM2;TP;D119"]
df[Peptide == "KEGVVHGVATVAE", aSyn_specificity := "TM1;TM2;D119"]
df[Peptide == "KEGVVHGVTTVAE", aSyn_specificity := "WT"]
# remove carry over in WT
df <- df[!(Peptide == "TVEGAGSIAAATGFVK" & Condition == "WT")]
# rename columns
names(df) <- c("Peptide", "Mouse", "log2_Intensity_mean", "log2_Intensity_sd",
"aSyn_fmol_ug_mean", "aSyn_fmol_ug_sd", "Region", "Enzyme", "log2_Intensity_LLOQ",
"aSyn_specificity")
df <- df[, c("Region", "Enzyme", "Peptide", "Mouse", "aSyn_specificity",
"log2_Intensity_mean", "log2_Intensity_sd",
"log2_Intensity_LLOQ", "aSyn_fmol_ug_mean", "aSyn_fmol_ug_sd")]
fwrite(df, "tables\\aSyn_quant.txt", sep = "\t")
}) |
# Load the data in R
###Open File interactively in R
dataPath<-file.choose();
data <- read.table(dataPath, header=TRUE, sep=";",stringsAsFactors=FALSE,dec=".")
### Remove all the puncuations from the variable names
### Convert the variable names to lower case
varName<-tolower(gsub("[[:punct:]]", "", names(data)))
names(data)<-varName
### Conver the time variable from character to Time type using strptime()
### Convert the date variable from character to Date type
data$time<- strptime(paste(data$date, data$time, sep=" "), "%d/%m/%Y %H:%M:%S")
data$date<-as.Date(data$date,"%d/%m/%Y")
### Access the subset of the data from 2007-02-01 to 2007-02-02
smallData<-subset(data,(data$date>=as.Date("2007-02-01"))&(data$date<=as.Date("2007-02-02")))
### convert global active power to numeric value from character
png("plot2.png", width=480, height=480)
plot(smallData$time,as.numeric(smallData$globalactivepower),type="l",xlab = "",ylab="Global Active Power (kilowatts)")
dev.off() | /plot2.R | no_license | jaimit/ExData_Plotting1 | R | false | false | 986 | r | # Load the data in R
###Open File interactively in R
dataPath<-file.choose();
data <- read.table(dataPath, header=TRUE, sep=";",stringsAsFactors=FALSE,dec=".")
### Remove all the puncuations from the variable names
### Convert the variable names to lower case
varName<-tolower(gsub("[[:punct:]]", "", names(data)))
names(data)<-varName
### Conver the time variable from character to Time type using strptime()
### Convert the date variable from character to Date type
data$time<- strptime(paste(data$date, data$time, sep=" "), "%d/%m/%Y %H:%M:%S")
data$date<-as.Date(data$date,"%d/%m/%Y")
### Access the subset of the data from 2007-02-01 to 2007-02-02
smallData<-subset(data,(data$date>=as.Date("2007-02-01"))&(data$date<=as.Date("2007-02-02")))
### convert global active power to numeric value from character
png("plot2.png", width=480, height=480)
plot(smallData$time,as.numeric(smallData$globalactivepower),type="l",xlab = "",ylab="Global Active Power (kilowatts)")
dev.off() |
hw2data <- read.delim("HW2.dat", header = TRUE)
library(ggplot2)
library(pastecs)
library(psych)
library(boot)
library(QuantPsyc)
library(car)
#1-3 based on class notes/book knowledge
#4-10
stat.desc(hw2data[,c("Stigma", "Thoughts", "Hope", "Emotion")], basic = F, norm = T)
#11-14 based on class notes/book knowedge
#15-19
model1 <- lm(Stigma ~ Thoughts, data = hw2data, na.action = na.omit)
summary(model1)
#20
lm.beta(model1)
#21 based on class notes/book knowledge
#22
#-0.28 * 1.5= -0.42 (answer to #20 times 1.5 standard deviations = this number)
#-0.42 * 7.55 = -3.17 (number from above times the standard deviation for stigma =)
# -3.17 + 39.21 = 36.04 (the above number times #19)
stat.desc(hw2data[,c("Stigma", "Thoughts")], basic = F, norm = T) #to find SD of Stigmafor 22
#23-27
model2dat <- read.delim("HW2.dat", header = TRUE)
model2 <- lm(Stigma ~ Thoughts + Hope + Emotion, data = hw2data, na.action = na.omit)
summary(model2)
#23 for 23, r^2 is 0.01031 * 100 = 10.31%
#28-30
lm.beta(model2)
#31 (r^(2) model 2) - (r^(2) model 1) *100 = 0.1031 - 0.07678 = -0.02632 *100 = 2.63
#32-34
anova(model1, model2)
#35-36 9/2=.045*100= 4.5%
rstandard(model2)
model2dat$stdresid <- rstandard(model2)
model2dat$large.resid <- model2dat$stdresid > 2 | model2dat$stdresid < -2
sum(model2dat$large.resid)
#37
problems <- model2dat[model2dat$large.resid,] #if before the column you want the row,
problems
model2dat$cooks <- cooks.distance(model2)
model2dat$leverage <- hatvalues(model2)
model2dat$dffit <- dffits(model2)
model2dat$dfbeta <- dfbeta(model2)
problems <- model2dat[model2dat$large.resid,c("cooks", "leverage", "dffit", "dfbeta")]
problems
#38 (k+1)/n 3+1/200 = 4/200= .02
#39 looked at them
#40
ncvTest(model2)
#41 p value
#42
modelpredict <- model2dat[,c("Thoughts","Hope","Emotion")]
cor(modelpredict, use = "pairwise.complete.obs", method = "pearson")
#43-44
vif(model2)
#added the three together and divided by 3
#45-47
durbinWatsonTest(model2)
#48
boot.ci(boot.out = boot_r, type = "bca", index = 1)
#49 notes | /Regression/HW2_Syntax_Cantu2.R | no_license | can2c33/642_Lab | R | false | false | 2,031 | r | hw2data <- read.delim("HW2.dat", header = TRUE)
library(ggplot2)
library(pastecs)
library(psych)
library(boot)
library(QuantPsyc)
library(car)
#1-3 based on class notes/book knowledge
#4-10
stat.desc(hw2data[,c("Stigma", "Thoughts", "Hope", "Emotion")], basic = F, norm = T)
#11-14 based on class notes/book knowedge
#15-19
model1 <- lm(Stigma ~ Thoughts, data = hw2data, na.action = na.omit)
summary(model1)
#20
lm.beta(model1)
#21 based on class notes/book knowledge
#22
#-0.28 * 1.5= -0.42 (answer to #20 times 1.5 standard deviations = this number)
#-0.42 * 7.55 = -3.17 (number from above times the standard deviation for stigma =)
# -3.17 + 39.21 = 36.04 (the above number times #19)
stat.desc(hw2data[,c("Stigma", "Thoughts")], basic = F, norm = T) #to find SD of Stigmafor 22
#23-27
model2dat <- read.delim("HW2.dat", header = TRUE)
model2 <- lm(Stigma ~ Thoughts + Hope + Emotion, data = hw2data, na.action = na.omit)
summary(model2)
#23 for 23, r^2 is 0.01031 * 100 = 10.31%
#28-30
lm.beta(model2)
#31 (r^(2) model 2) - (r^(2) model 1) *100 = 0.1031 - 0.07678 = -0.02632 *100 = 2.63
#32-34
anova(model1, model2)
#35-36 9/2=.045*100= 4.5%
rstandard(model2)
model2dat$stdresid <- rstandard(model2)
model2dat$large.resid <- model2dat$stdresid > 2 | model2dat$stdresid < -2
sum(model2dat$large.resid)
#37
problems <- model2dat[model2dat$large.resid,] #if before the column you want the row,
problems
model2dat$cooks <- cooks.distance(model2)
model2dat$leverage <- hatvalues(model2)
model2dat$dffit <- dffits(model2)
model2dat$dfbeta <- dfbeta(model2)
problems <- model2dat[model2dat$large.resid,c("cooks", "leverage", "dffit", "dfbeta")]
problems
#38 (k+1)/n 3+1/200 = 4/200= .02
#39 looked at them
#40
ncvTest(model2)
#41 p value
#42
modelpredict <- model2dat[,c("Thoughts","Hope","Emotion")]
cor(modelpredict, use = "pairwise.complete.obs", method = "pearson")
#43-44
vif(model2)
#added the three together and divided by 3
#45-47
durbinWatsonTest(model2)
#48
boot.ci(boot.out = boot_r, type = "bca", index = 1)
#49 notes |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mod_plots_module.R
\name{mod_plots_module_ui}
\alias{mod_plots_module_ui}
\alias{mod_plots_module_server}
\title{mod_plots_module_ui and mod_plots_module_server}
\usage{
mod_plots_module_ui(id)
mod_plots_module_server(input, output, session, dataset, time_window)
}
\arguments{
\item{id}{shiny id}
\item{input}{internal}
\item{output}{internal}
\item{session}{internal}
}
\description{
A shiny Module.
}
\keyword{internal}
| /man/mod_plots_module.Rd | permissive | korur/mobilequake | R | false | true | 505 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mod_plots_module.R
\name{mod_plots_module_ui}
\alias{mod_plots_module_ui}
\alias{mod_plots_module_server}
\title{mod_plots_module_ui and mod_plots_module_server}
\usage{
mod_plots_module_ui(id)
mod_plots_module_server(input, output, session, dataset, time_window)
}
\arguments{
\item{id}{shiny id}
\item{input}{internal}
\item{output}{internal}
\item{session}{internal}
}
\description{
A shiny Module.
}
\keyword{internal}
|
/Analisis series CPT e IDEAM/organizar_series_raw.R | no_license | lizllanos/Otros | R | false | false | 2,859 | r | ||
trials = read.csv("clinical_trial.csv", stringsAsFactors=FALSE)
summary(nchar(trials$abstract))
max(nchar(trials$abstract))
table(nchar(trials$abstract) == 0)
sum(nchar(trials$abstract) == 0)
which.min(nchar(trials$title))
trials$title[1258]
library(tm)
library(SnowballC)
corpusTitle = VCorpus(VectorSource(trials$title))
corpusTitle = tm_map(corpusTitle, content_transformer(tolower))
corpusTitle = tm_map(corpusTitle, removePunctuation)
corpusTitle = tm_map(corpusTitle, removeWords, stopwords("english"))
corpusTitle = tm_map(corpusTitle, stemDocument)
dtmTitle = DocumentTermMatrix(corpusTitle)
dtmTitle = removeSparseTerms(dtmTitle, 0.95)
dtmTitle = as.data.frame(as.matrix(dtmTitle))
str(dtmTitle)
str(dtmAbstract)
csAbstract = colSums(dtmAbstract)
which.max(csAbstract)
colnames(dtmTitle) = paste0("T", colnames(dtmTitle))
colnames(dtmAbstract) = paste0("A", colnames(dtmAbstract))
dtm = cbind(dtmTitle, dtmAbstract)
dtm = cbind(dtmTitle, dtmAbstract)
dtm$trial = trials$trial
table(train$trial)
730/(730+572)
trialCART = rpart(trial~., data=train, method="class")
prp(trialCART)
predTrain = predict(trialCart)[,2]
summary(predTrain)
table(train$trial, predTrain >= 0.5)
(631+441)/(631+441+99+131)
441/(441+131)
631/(631+99)
predTest = predict(trialCART, newdata=test)[,2]
table(test$trial, predTest >= 0.5)
(261+162)/(261+162+83+52)
library(ROCR)
pred = prediction(predTest, test$trial)
as.numeric(performance(pred, "auc")@y.values)
| /M8/A2.R | no_license | Pranav-20186017/Data-Analytics-With-R | R | false | false | 1,444 | r | trials = read.csv("clinical_trial.csv", stringsAsFactors=FALSE)
summary(nchar(trials$abstract))
max(nchar(trials$abstract))
table(nchar(trials$abstract) == 0)
sum(nchar(trials$abstract) == 0)
which.min(nchar(trials$title))
trials$title[1258]
library(tm)
library(SnowballC)
corpusTitle = VCorpus(VectorSource(trials$title))
corpusTitle = tm_map(corpusTitle, content_transformer(tolower))
corpusTitle = tm_map(corpusTitle, removePunctuation)
corpusTitle = tm_map(corpusTitle, removeWords, stopwords("english"))
corpusTitle = tm_map(corpusTitle, stemDocument)
dtmTitle = DocumentTermMatrix(corpusTitle)
dtmTitle = removeSparseTerms(dtmTitle, 0.95)
dtmTitle = as.data.frame(as.matrix(dtmTitle))
str(dtmTitle)
str(dtmAbstract)
csAbstract = colSums(dtmAbstract)
which.max(csAbstract)
colnames(dtmTitle) = paste0("T", colnames(dtmTitle))
colnames(dtmAbstract) = paste0("A", colnames(dtmAbstract))
dtm = cbind(dtmTitle, dtmAbstract)
dtm = cbind(dtmTitle, dtmAbstract)
dtm$trial = trials$trial
table(train$trial)
730/(730+572)
trialCART = rpart(trial~., data=train, method="class")
prp(trialCART)
predTrain = predict(trialCart)[,2]
summary(predTrain)
table(train$trial, predTrain >= 0.5)
(631+441)/(631+441+99+131)
441/(441+131)
631/(631+99)
predTest = predict(trialCART, newdata=test)[,2]
table(test$trial, predTest >= 0.5)
(261+162)/(261+162+83+52)
library(ROCR)
pred = prediction(predTest, test$trial)
as.numeric(performance(pred, "auc")@y.values)
|
library(arules)
path <- "/home/oscar/Dropbox/Publicaciones/Paper patter mining-datos biologicos/1-Diabetes/"
datasetT <- read.csv(paste(path,"T-withoutMissingValues-norm-disc.csv",sep = ""))
datasetN <- read.csv(paste(path,"N-withoutMissingValues-norm-disc.csv",sep = ""))
numberAtt <- ncol(datasetT)
patterns <- apriori(datasetT,
parameter = list(
support = 0.95,
target = "frequent itemsets",
maxlen = numberAtt
))
# Arules uses its own objects
patterns <- as(patterns, "data.frame")
if(nrow(patterns) > 0){
newData <- data.frame(
"itemset" = character(0),
"supportT" = numeric(0),
"supportN" = numeric(0),
"growthRate" = numeric(0))
for (t in 1:nrow(patterns)) {
transaction <- patterns[t, "items"]
#convert the factor in a character vector
transaction <- as.character(transaction)
itemset <- unlist(strsplit(transaction, ","))
supportN <- 0
#for each row in datasetN. In this part is computed the support of the pattern in the other dataset
for (f in 1:nrow(datasetN)) {
flag = TRUE
for (item in itemset) {
itemValues <- unlist(strsplit(item, "="))
itemName <- itemValues[1]
if (startsWith(itemName, "{")) {
itemName <- unlist(strsplit(itemName, '\\{'))[2]
}
itemValue <- itemValues[2]
if (endsWith(itemValue, "}")) {
itemValue <- unlist(strsplit(itemValue, '\\}'))[1]
}
currentValue <- as.character(datasetN[f, itemName])
if (currentValue != itemValue)
{
flag <- FALSE
break()
}
}
# The pattern matchs
if (flag) {
supportN <- supportN + 1
}
}
supportN <- supportN / nrow(datasetN)
supportP <- patterns[t, "support"]
growthRate <- supportP / supportN
#register the frequent itemset
newData <-
rbind(
newData,
data.frame(
"itemset" = transaction,
"supportP" = supportP,
"supportN" = supportN,
"growthRate" = growthRate
)
)
}
# Sort by growthRate
newData <-
newData[order(-newData$growthRate,-newData$supportP),]
write.table(
newData,
file = paste(path,"patterns-growthRate.csv",sep = ""),
quote = TRUE,
sep = "," ,
row.names = FALSE,
col.names = TRUE)
} | /src/R-scripts/EmergingPatternsv3.R | no_license | ogreyesp/PatternMining-Biology | R | false | false | 2,556 | r | library(arules)
path <- "/home/oscar/Dropbox/Publicaciones/Paper patter mining-datos biologicos/1-Diabetes/"
datasetT <- read.csv(paste(path,"T-withoutMissingValues-norm-disc.csv",sep = ""))
datasetN <- read.csv(paste(path,"N-withoutMissingValues-norm-disc.csv",sep = ""))
numberAtt <- ncol(datasetT)
patterns <- apriori(datasetT,
parameter = list(
support = 0.95,
target = "frequent itemsets",
maxlen = numberAtt
))
# Arules uses its own objects
patterns <- as(patterns, "data.frame")
if(nrow(patterns) > 0){
newData <- data.frame(
"itemset" = character(0),
"supportT" = numeric(0),
"supportN" = numeric(0),
"growthRate" = numeric(0))
for (t in 1:nrow(patterns)) {
transaction <- patterns[t, "items"]
#convert the factor in a character vector
transaction <- as.character(transaction)
itemset <- unlist(strsplit(transaction, ","))
supportN <- 0
#for each row in datasetN. In this part is computed the support of the pattern in the other dataset
for (f in 1:nrow(datasetN)) {
flag = TRUE
for (item in itemset) {
itemValues <- unlist(strsplit(item, "="))
itemName <- itemValues[1]
if (startsWith(itemName, "{")) {
itemName <- unlist(strsplit(itemName, '\\{'))[2]
}
itemValue <- itemValues[2]
if (endsWith(itemValue, "}")) {
itemValue <- unlist(strsplit(itemValue, '\\}'))[1]
}
currentValue <- as.character(datasetN[f, itemName])
if (currentValue != itemValue)
{
flag <- FALSE
break()
}
}
# The pattern matchs
if (flag) {
supportN <- supportN + 1
}
}
supportN <- supportN / nrow(datasetN)
supportP <- patterns[t, "support"]
growthRate <- supportP / supportN
#register the frequent itemset
newData <-
rbind(
newData,
data.frame(
"itemset" = transaction,
"supportP" = supportP,
"supportN" = supportN,
"growthRate" = growthRate
)
)
}
# Sort by growthRate
newData <-
newData[order(-newData$growthRate,-newData$supportP),]
write.table(
newData,
file = paste(path,"patterns-growthRate.csv",sep = ""),
quote = TRUE,
sep = "," ,
row.names = FALSE,
col.names = TRUE)
} |
# RAILWAY LENGTH FOR EVERY DUTCH MUNICIPALITY USING OSM DATA
# Milos Popovic 05/03/2021
library(plyr, quietly=T)
library(tidyverse, quietly=T)
library(sf, quietly=T)
library(ggplot2, quietly=T)
library(dplyr, quietly=T)
library(rgdal, quietly=T)
library(classInt, quietly=T)
library(zoo, quietly=T)
library(geofabrik, quietly=T)
library(raster, quietly=T)
#download the official shapefile of Dutch munis
tm <- tempfile(fileext = ".zip")
download.file("https://opendata.arcgis.com/datasets/e1f0dd70abcb4fceabbc43412e43ad4b_0.zip",
tm)
unzip(tm)
#load muni shapefile
nl <- readOGR(getwd(),
"Gemeentegrenzen__voorlopig____kustlijn",
verbose = TRUE,
stringsAsFactors = FALSE) %>%
spTransform(CRS("+proj=longlat +datum=WGS84 +no_defs"))
# get OSM data for the Netherlands (size 1816.0 MB)
outDir <- paste0(getwd(), "/netherlands")
get_osm("Netherlands",
type = "shp",
file = outDir,
quiet = FALSE)
unzip(paste0(outDir, ".shp.zip"), exdir=getwd())
#load railways and subset rail type
rail <- readOGR(getwd(),
"gis_osm_railways_free_1",
verbose = TRUE,
stringsAsFactors = FALSE) %>%
subset(fclass=="rail") %>%
spTransform(CRS("+proj=longlat +datum=WGS84 +no_defs"))
#calculate the length of railway for every muni
# first, turn both shp files into sf class
n <- nl %>% st_as_sf
r <- rail %>% st_as_sf
ints <- st_intersection(r, n) %>%
dplyr::mutate(len_m = sf::st_length(geometry)) %>% # returns length in meters
dplyr::group_by(Code)
int <- as.data.frame(as.matrix(ints))
int$len_m <- as.numeric(as.character(int$len_m))
int$Code <- as.character(int$Code)
ii <- ddply(int, "Code", summarise, lr_m = sum(len_m, na.rm = TRUE))
# join nl and ii
df <- merge(nl, ii, by='Code', all.x=T)
# compute land area size in square kilometers
df$area_sqkm <- area(df) / 1000000
# rail density: km or rail / 100 square km
df$rail_dens <- (df$lr_m/1000) / df$area_sqkm * 100
# place back in sf object for mapping
d <- df %>% st_as_sf
# let's find a natural interval with quantile breaks
ni = classIntervals(d$rail_dens,
n = 8,
style = 'quantile')$brks
# this function uses above intervals to create categories
labels <- c()
for(i in 1:length(ni)){
labels <- c(labels, paste0(round(ni[i], 0),
"–",
round(ni[i + 1], 0)))
}
labels <- labels[1:length(labels)-1]
# finally, carve out the categorical variable based on the breaks and labels above
d$cat <- cut(d$rail_dens,
breaks = ni,
labels = labels,
include.lowest = T)
levels(d$cat) # let's check how many levels it has (8)
# label NAs, too
lvl <- levels(d$cat)
lvl[length(lvl) + 1] <- "No rail"
d$cat <- factor(d$cat, levels = lvl)
d$cat[is.na(d$cat)] <- "No rail"
levels(d$cat)
# plot
p <- ggplot() +
geom_sf(data=d, aes(fill = cat), color="white", size=0.1) +
coord_sf(crs = 4326, datum = NA) +
theme_minimal() +
scale_fill_manual(name= expression(paste("1 km of rail per 100", km^{2}, "of land area")),
values = rev(c("grey80", '#513b56', '#51556f', '#4c6e88', '#44889e', '#51a1a3', '#75b99c', '#99d091', '#bce784')),
labels = c("~0–8", "8–14", "14–20", "20–26", "26–41", "41–55", "55–78",
">78", "No rail"),
drop = F)+
guides(color=F, fill = guide_legend(
direction = "horizontal",
keyheight = unit(1.15, units = "mm"),
keywidth = unit(15, units = "mm"),
title.position = 'top',
title.hjust = 0.5,
label.hjust = .5,
nrow = 1,
byrow = T,
# also the guide needs to be reversed
reverse = F,
label.position = "bottom"
)
) +
labs(x = "©2021 Milos Popovic (https://milosp.info)\n Data: OSM Geofabrik",
title = "Railway density in the Netherlands",
subtitle = "municipality level",
caption = "") +
theme(
axis.line = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.title.x = element_text(size=9, color="grey60", hjust=0, vjust=25),
axis.title.y = element_blank(),
legend.position = c(.5, -.01),
legend.text = element_text(size=11, color="grey20"),
legend.title = element_text(size=12, color="grey20"),
# panel.grid.minor = element_line(color = "grey20", size = 0.2),
panel.grid.major = element_line(color = "white", size = 0.2),
panel.grid.minor = element_blank(),
plot.margin = unit(c(t=0, r=0, b=0, l=0),"lines"), #added these narrower margins to enlarge maps
plot.title = element_text(face="bold", size=20, color="#513b56", hjust=.5, vjust=-2),
plot.subtitle = element_text(size=16, color="#457795", hjust=.5, vjust=-2),
plot.background = element_rect(fill = "white", color = NA),
panel.background = element_rect(fill = "white", color = NA),
legend.background = element_rect(fill = "white", color = NA),
panel.border = element_blank())
ggsave(filename="nld_rail_density.png", width= 7, height= 9.15, dpi = 600, device='png', p)
| /R/netherlands_rail_length.r | no_license | milos-agathon/netherlands_rail_density_map | R | false | false | 5,327 | r | # RAILWAY LENGTH FOR EVERY DUTCH MUNICIPALITY USING OSM DATA
# Milos Popovic 05/03/2021
library(plyr, quietly=T)
library(tidyverse, quietly=T)
library(sf, quietly=T)
library(ggplot2, quietly=T)
library(dplyr, quietly=T)
library(rgdal, quietly=T)
library(classInt, quietly=T)
library(zoo, quietly=T)
library(geofabrik, quietly=T)
library(raster, quietly=T)
#download the official shapefile of Dutch munis
tm <- tempfile(fileext = ".zip")
download.file("https://opendata.arcgis.com/datasets/e1f0dd70abcb4fceabbc43412e43ad4b_0.zip",
tm)
unzip(tm)
#load muni shapefile
nl <- readOGR(getwd(),
"Gemeentegrenzen__voorlopig____kustlijn",
verbose = TRUE,
stringsAsFactors = FALSE) %>%
spTransform(CRS("+proj=longlat +datum=WGS84 +no_defs"))
# get OSM data for the Netherlands (size 1816.0 MB)
outDir <- paste0(getwd(), "/netherlands")
get_osm("Netherlands",
type = "shp",
file = outDir,
quiet = FALSE)
unzip(paste0(outDir, ".shp.zip"), exdir=getwd())
#load railways and subset rail type
rail <- readOGR(getwd(),
"gis_osm_railways_free_1",
verbose = TRUE,
stringsAsFactors = FALSE) %>%
subset(fclass=="rail") %>%
spTransform(CRS("+proj=longlat +datum=WGS84 +no_defs"))
#calculate the length of railway for every muni
# first, turn both shp files into sf class
n <- nl %>% st_as_sf
r <- rail %>% st_as_sf
ints <- st_intersection(r, n) %>%
dplyr::mutate(len_m = sf::st_length(geometry)) %>% # returns length in meters
dplyr::group_by(Code)
int <- as.data.frame(as.matrix(ints))
int$len_m <- as.numeric(as.character(int$len_m))
int$Code <- as.character(int$Code)
ii <- ddply(int, "Code", summarise, lr_m = sum(len_m, na.rm = TRUE))
# join nl and ii
df <- merge(nl, ii, by='Code', all.x=T)
# compute land area size in square kilometers
df$area_sqkm <- area(df) / 1000000
# rail density: km or rail / 100 square km
df$rail_dens <- (df$lr_m/1000) / df$area_sqkm * 100
# place back in sf object for mapping
d <- df %>% st_as_sf
# let's find a natural interval with quantile breaks
ni = classIntervals(d$rail_dens,
n = 8,
style = 'quantile')$brks
# this function uses above intervals to create categories
labels <- c()
for(i in 1:length(ni)){
labels <- c(labels, paste0(round(ni[i], 0),
"–",
round(ni[i + 1], 0)))
}
labels <- labels[1:length(labels)-1]
# finally, carve out the categorical variable based on the breaks and labels above
d$cat <- cut(d$rail_dens,
breaks = ni,
labels = labels,
include.lowest = T)
levels(d$cat) # let's check how many levels it has (8)
# label NAs, too
lvl <- levels(d$cat)
lvl[length(lvl) + 1] <- "No rail"
d$cat <- factor(d$cat, levels = lvl)
d$cat[is.na(d$cat)] <- "No rail"
levels(d$cat)
# plot
p <- ggplot() +
geom_sf(data=d, aes(fill = cat), color="white", size=0.1) +
coord_sf(crs = 4326, datum = NA) +
theme_minimal() +
scale_fill_manual(name= expression(paste("1 km of rail per 100", km^{2}, "of land area")),
values = rev(c("grey80", '#513b56', '#51556f', '#4c6e88', '#44889e', '#51a1a3', '#75b99c', '#99d091', '#bce784')),
labels = c("~0–8", "8–14", "14–20", "20–26", "26–41", "41–55", "55–78",
">78", "No rail"),
drop = F)+
guides(color=F, fill = guide_legend(
direction = "horizontal",
keyheight = unit(1.15, units = "mm"),
keywidth = unit(15, units = "mm"),
title.position = 'top',
title.hjust = 0.5,
label.hjust = .5,
nrow = 1,
byrow = T,
# also the guide needs to be reversed
reverse = F,
label.position = "bottom"
)
) +
labs(x = "©2021 Milos Popovic (https://milosp.info)\n Data: OSM Geofabrik",
title = "Railway density in the Netherlands",
subtitle = "municipality level",
caption = "") +
theme(
axis.line = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.title.x = element_text(size=9, color="grey60", hjust=0, vjust=25),
axis.title.y = element_blank(),
legend.position = c(.5, -.01),
legend.text = element_text(size=11, color="grey20"),
legend.title = element_text(size=12, color="grey20"),
# panel.grid.minor = element_line(color = "grey20", size = 0.2),
panel.grid.major = element_line(color = "white", size = 0.2),
panel.grid.minor = element_blank(),
plot.margin = unit(c(t=0, r=0, b=0, l=0),"lines"), #added these narrower margins to enlarge maps
plot.title = element_text(face="bold", size=20, color="#513b56", hjust=.5, vjust=-2),
plot.subtitle = element_text(size=16, color="#457795", hjust=.5, vjust=-2),
plot.background = element_rect(fill = "white", color = NA),
panel.background = element_rect(fill = "white", color = NA),
legend.background = element_rect(fill = "white", color = NA),
panel.border = element_blank())
ggsave(filename="nld_rail_density.png", width= 7, height= 9.15, dpi = 600, device='png', p)
|
# This script simply cycles through multiple Google Analytics views,
# pulls back a handful of metrics, and then consolidates them and
# pushes them into a .csv.
# Load the packages we'll use
library(googleAnalyticsR)
library(tidyverse)
# Settings
start_date <- as.character(Sys.Date()-31)
end_date <- as.character(Sys.Date()-1)
metrics <- c("sessions","pageviews","totalEvents")
dimensions <- "year"
# Authorize Google Analytics
ga_auth()
# Pull a lit of all available accounts and views and then filter it
# down to the subset of interest. This will need to be adjusted. Currently,
# it's assuming a single GA account is of interest (but grepl() or
# contains() could be used to grab multiple ones), and it's then
# getting all the views that start with "PROD", which also will
# likely need to be adjusted.
account_summary <- google_analytics_account_list() %>%
filter(accountName == "[name of the Google Analytics account]",
grepl("^PROD.*", viewName))
# Add the start and end date to the data frame, as well as some
# columns to use to populate the metrics
account_summary$start_date <- start_date
account_summary$end_date <- end_date
# Function to pull the data from Google Analytics
get_data <- function(view_id){
# Pull the data. The query might return multiple rows (if it spans
# a year boundary), so collapse the results just in case.
ga_data <- google_analytics_4(viewId = view_id,
date_range = c(start_date,end_date),
metrics = metrics,
dimensions = dimensions) %>%
summarise(sessions = sum(sessions),
pageviews = sum(pageviews),
total_events = sum(totalEvents))
}
# Get the data for each view ID. The do() function is a little confusing,
# but it's a bit more efficient than using a for() loop.
result_metrics <- group_by(account_summary, viewId) %>%
do(get_data(.$viewId))
# Add the metrics back to the summary
account_summary <- left_join(account_summary, result_metrics,
by = c("viewId","viewId"))
# Make a more compact set of data
clean_summary <- select(account_summary,
Account = accountName,
Property = webPropertyName,
View = viewName,
Type = type,
Level = level,
'Start Date' = start_date,
'End Date' = end_date,
Sessions = sessions,
Pageview = pageviews,
'Total Events' = total_events)
# Output the results to a .csv file. Another function can be used if
# comma-delimited isn't ideal.
write.csv(clean_summary, "summary_results.csv", row.names = FALSE)
| /ga-view-summary.R | permissive | firefoxxy8/ga-view-summary | R | false | false | 2,799 | r | # This script simply cycles through multiple Google Analytics views,
# pulls back a handful of metrics, and then consolidates them and
# pushes them into a .csv.
# Load the packages we'll use
library(googleAnalyticsR)
library(tidyverse)
# Settings
start_date <- as.character(Sys.Date()-31)
end_date <- as.character(Sys.Date()-1)
metrics <- c("sessions","pageviews","totalEvents")
dimensions <- "year"
# Authorize Google Analytics
ga_auth()
# Pull a lit of all available accounts and views and then filter it
# down to the subset of interest. This will need to be adjusted. Currently,
# it's assuming a single GA account is of interest (but grepl() or
# contains() could be used to grab multiple ones), and it's then
# getting all the views that start with "PROD", which also will
# likely need to be adjusted.
account_summary <- google_analytics_account_list() %>%
filter(accountName == "[name of the Google Analytics account]",
grepl("^PROD.*", viewName))
# Add the start and end date to the data frame, as well as some
# columns to use to populate the metrics
account_summary$start_date <- start_date
account_summary$end_date <- end_date
# Function to pull the data from Google Analytics
get_data <- function(view_id){
# Pull the data. The query might return multiple rows (if it spans
# a year boundary), so collapse the results just in case.
ga_data <- google_analytics_4(viewId = view_id,
date_range = c(start_date,end_date),
metrics = metrics,
dimensions = dimensions) %>%
summarise(sessions = sum(sessions),
pageviews = sum(pageviews),
total_events = sum(totalEvents))
}
# Get the data for each view ID. The do() function is a little confusing,
# but it's a bit more efficient than using a for() loop.
result_metrics <- group_by(account_summary, viewId) %>%
do(get_data(.$viewId))
# Add the metrics back to the summary
account_summary <- left_join(account_summary, result_metrics,
by = c("viewId","viewId"))
# Make a more compact set of data
clean_summary <- select(account_summary,
Account = accountName,
Property = webPropertyName,
View = viewName,
Type = type,
Level = level,
'Start Date' = start_date,
'End Date' = end_date,
Sessions = sessions,
Pageview = pageviews,
'Total Events' = total_events)
# Output the results to a .csv file. Another function can be used if
# comma-delimited isn't ideal.
write.csv(clean_summary, "summary_results.csv", row.names = FALSE)
|
#' @title Target resources
#' @export
#' @family resources
#' @description Create a `resources` argument for [tar_target()]
#' or [tar_option_set()].
#' @section Resources:
#' Functions [tar_target()] and [tar_option_set()]
#' each takes an optional `resources` argument to supply
#' non-default settings of various optional backends for data storage
#' and high-performance computing. The `tar_resources()` function
#' is a helper to supply those settings in the correct manner.
#' @return A list of objects of class `"tar_resources"` with
#' non-default settings of various optional backends for data storage
#' and high-performance computing.
#' @param aws Output of function [tar_resources_aws()].
#' AWS S3 storage settings for AWS backed storage formats
#' such as `"aws_qs"` and `"aws_parquet`. Applies to all formats
#' beginning with the `"aws_"` prefix. For details on formats,
#' see the `format` argument of [tar_target()].
#' @param clustermq Output of function [tar_resources_clustermq()].
#' Optional `clustermq` settings for [tar_make_clustermq()],
#' including the `log_worker` and `template` arguments of
#' `clustermq::workers()`.
#' @param feather Output of function [tar_resources_feather()].
#' Non-default arguments to `arrow::read_feather()` and
#' `arrow::write_feather()` for `arrow`/feather-based storage formats.
#' Applies to all formats ending with the `"_feather"` suffix.
#' For details on formats, see the `format` argument of [tar_target()].
#' @param fst Output of function [tar_resources_fst()].
#' Non-default arguments to `fst::read_fst()` and
#' `fst::write_fst()` for `fst`-based storage formats.
#' Applies to all formats ending with `"fst"` in the name.
#' For details on formats, see the `format` argument of [tar_target()].
#' @param future Output of function [tar_resources_future()].
#' Optional `future` settings for [tar_make_future()],
#' including the `resources` argument of
#' `future::future()`, which can include values to insert in
#' template placeholders in `future.batchtools` template files.
#' @param parquet Output of function [tar_resources_parquet()].
#' Non-default arguments to `arrow::read_parquet()` and
#' `arrow::write_parquet()` for `arrow`/parquet-based storage formats.
#' Applies to all formats ending with the `"_parquet"` suffix.
#' For details on formats, see the `format` argument of [tar_target()].
#' @param qs Output of function [tar_resources_qs()].
#' Non-default arguments to `qs::qread()` and
#' `qs::qsave()` for `qs`-based storage formats.
#' Applies to all formats ending with the `"_qs"` suffix.
#' For details on formats, see the `format` argument of [tar_target()].
#' @param url Output of function [tar_resources_url()].
#' Non-default settings for storage formats ending with the `"_url"` suffix.
#' These settings include the `curl` handle for extra control over HTTP
#' requests. For details on formats, see the `format` argument of
#' [tar_target()].
#' @examples
#' # Somewhere in you target script file (usually _targets.R):
#' tar_target(
#' name,
#' command(),
#' format = "qs",
#' resources = tar_resources(
#' qs = tar_resources_qs(preset = "fast"),
#' future = tar_resources_future(resources = list(n_cores = 1))
#' )
#' )
tar_resources <- function(
aws = NULL,
clustermq = NULL,
feather = NULL,
fst = NULL,
future = NULL,
parquet = NULL,
qs = NULL,
url = NULL
) {
envir <- environment()
names <- names(formals(tar_resources))
out <- list()
for (name in names) {
value <- envir[[name]]
class <- paste0("tar_resources_", name)
message <- paste0(
name,
" argument to tar_resources() must be output from tar_resources_",
name,
"() or NULL."
)
if (!is.null(value)) {
assert_inherits(value, class, message)
out[[name]] <- value
}
}
out
}
| /R/tar_resources.R | permissive | boshek/targets | R | false | false | 3,919 | r | #' @title Target resources
#' @export
#' @family resources
#' @description Create a `resources` argument for [tar_target()]
#' or [tar_option_set()].
#' @section Resources:
#' Functions [tar_target()] and [tar_option_set()]
#' each takes an optional `resources` argument to supply
#' non-default settings of various optional backends for data storage
#' and high-performance computing. The `tar_resources()` function
#' is a helper to supply those settings in the correct manner.
#' @return A list of objects of class `"tar_resources"` with
#' non-default settings of various optional backends for data storage
#' and high-performance computing.
#' @param aws Output of function [tar_resources_aws()].
#' AWS S3 storage settings for AWS backed storage formats
#' such as `"aws_qs"` and `"aws_parquet`. Applies to all formats
#' beginning with the `"aws_"` prefix. For details on formats,
#' see the `format` argument of [tar_target()].
#' @param clustermq Output of function [tar_resources_clustermq()].
#' Optional `clustermq` settings for [tar_make_clustermq()],
#' including the `log_worker` and `template` arguments of
#' `clustermq::workers()`.
#' @param feather Output of function [tar_resources_feather()].
#' Non-default arguments to `arrow::read_feather()` and
#' `arrow::write_feather()` for `arrow`/feather-based storage formats.
#' Applies to all formats ending with the `"_feather"` suffix.
#' For details on formats, see the `format` argument of [tar_target()].
#' @param fst Output of function [tar_resources_fst()].
#' Non-default arguments to `fst::read_fst()` and
#' `fst::write_fst()` for `fst`-based storage formats.
#' Applies to all formats ending with `"fst"` in the name.
#' For details on formats, see the `format` argument of [tar_target()].
#' @param future Output of function [tar_resources_future()].
#' Optional `future` settings for [tar_make_future()],
#' including the `resources` argument of
#' `future::future()`, which can include values to insert in
#' template placeholders in `future.batchtools` template files.
#' @param parquet Output of function [tar_resources_parquet()].
#' Non-default arguments to `arrow::read_parquet()` and
#' `arrow::write_parquet()` for `arrow`/parquet-based storage formats.
#' Applies to all formats ending with the `"_parquet"` suffix.
#' For details on formats, see the `format` argument of [tar_target()].
#' @param qs Output of function [tar_resources_qs()].
#' Non-default arguments to `qs::qread()` and
#' `qs::qsave()` for `qs`-based storage formats.
#' Applies to all formats ending with the `"_qs"` suffix.
#' For details on formats, see the `format` argument of [tar_target()].
#' @param url Output of function [tar_resources_url()].
#' Non-default settings for storage formats ending with the `"_url"` suffix.
#' These settings include the `curl` handle for extra control over HTTP
#' requests. For details on formats, see the `format` argument of
#' [tar_target()].
#' @examples
#' # Somewhere in you target script file (usually _targets.R):
#' tar_target(
#' name,
#' command(),
#' format = "qs",
#' resources = tar_resources(
#' qs = tar_resources_qs(preset = "fast"),
#' future = tar_resources_future(resources = list(n_cores = 1))
#' )
#' )
tar_resources <- function(
aws = NULL,
clustermq = NULL,
feather = NULL,
fst = NULL,
future = NULL,
parquet = NULL,
qs = NULL,
url = NULL
) {
envir <- environment()
names <- names(formals(tar_resources))
out <- list()
for (name in names) {
value <- envir[[name]]
class <- paste0("tar_resources_", name)
message <- paste0(
name,
" argument to tar_resources() must be output from tar_resources_",
name,
"() or NULL."
)
if (!is.null(value)) {
assert_inherits(value, class, message)
out[[name]] <- value
}
}
out
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/games_functions.R
\name{achievements.unlock}
\alias{achievements.unlock}
\title{Unlocks this achievement for the currently authenticated player.}
\usage{
achievements.unlock(achievementId, consistencyToken = NULL)
}
\arguments{
\item{achievementId}{The ID of the achievement used by this method}
\item{consistencyToken}{The last-seen mutation timestamp}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/games
\item https://www.googleapis.com/auth/plus.login
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/games, https://www.googleapis.com/auth/plus.login)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/games/services/}{Google Documentation}
}
| /googlegamesv1.auto/man/achievements.unlock.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 1,021 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/games_functions.R
\name{achievements.unlock}
\alias{achievements.unlock}
\title{Unlocks this achievement for the currently authenticated player.}
\usage{
achievements.unlock(achievementId, consistencyToken = NULL)
}
\arguments{
\item{achievementId}{The ID of the achievement used by this method}
\item{consistencyToken}{The last-seen mutation timestamp}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/games
\item https://www.googleapis.com/auth/plus.login
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/games, https://www.googleapis.com/auth/plus.login)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/games/services/}{Google Documentation}
}
|
\name{mincontrast}
\alias{mincontrast}
\title{Method of Minimum Contrast}
\description{
A general low-level algorithm for fitting theoretical point process models
to point pattern data by the Method of Minimum Contrast.
}
\usage{
mincontrast(observed, theoretical, startpar, \dots,
ctrl=list(q = 1/4, p = 2, rmin=NULL, rmax=NULL),
fvlab=list(label=NULL, desc="minimum contrast fit"),
explain=list(dataname=NULL, modelname=NULL, fname=NULL))
}
\arguments{
\item{observed}{
Summary statistic, computed for the data.
An object of class \code{"fv"}.
}
\item{theoretical}{
An R language function that calculates the theoretical expected value
of the summary statistic, given the model parameters.
See Details.
}
\item{startpar}{
Vector of initial values of the parameters of the
point process model (passed to \code{theoretical}).
}
\item{\dots}{
Additional arguments passed to the function \code{theoretical}
and to the optimisation algorithm \code{\link[stats]{optim}}.
}
\item{ctrl}{
Optional. List of arguments controlling the optimisation. See Details.
}
\item{fvlab}{
Optional. List containing some labels for the return value. See Details.
}
\item{explain}{
Optional. List containing strings that give a human-readable description
of the model, the data and the summary statistic.
}
}
\details{
This function is a general algorithm for fitting point process models
by the Method of Minimum Contrast. If you want to fit the
Thomas process, see \code{\link{thomas.estK}}.
If you want to fit a log-Gaussian Cox process, see
\code{\link{lgcp.estK}}. If you want to fit the Matern cluster
process, see \code{\link{matclust.estK}}.
The Method of Minimum Contrast (Diggle and Gratton, 1984)
is a general technique for fitting
a point process model to point pattern data. First a summary function
(typically the \eqn{K} function) is computed from the data point
pattern. Second, the theoretical expected
value of this summary statistic under the point process model
is derived (if possible, as an algebraic expression involving the
parameters of the model) or estimated from simulations of the model.
Then the model is fitted by finding the optimal parameter values
for the model to give the closest match between the theoretical
and empirical curves.
The argument \code{observed} should be an object of class \code{"fv"}
(see \code{\link{fv.object}}) containing the values of a summary
statistic computed from the data point pattern. Usually this is the
function \eqn{K(r)} computed by \code{\link{Kest}} or one of its relatives.
The argument \code{theoretical} should be a user-supplied function
that computes the theoretical expected value of the summary statistic.
It must have an argument named \code{par} that will be the vector
of parameter values for the model (the length and format of this
vector are determined by the starting values in \code{startpar}).
The function \code{theoretical} should also expect a second argument
(the first argument other than \code{par})
containing values of the distance \eqn{r} for which the theoretical
value of the summary statistic \eqn{K(r)} should be computed.
The value returned by \code{theoretical} should be a vector of the
same length as the given vector of \eqn{r} values.
The argument \code{ctrl} determines the contrast criterion
(the objective function that will be minimised).
The algorithm minimises the criterion
\deqn{
D(\theta)=
\int_{r_{\mbox{\scriptsize min}}}^{r_{\mbox{\scriptsize max}}}
|\hat F(r)^q - F_\theta(r)^q|^p \, {\rm d}r
}{
D(theta) = integral from rmin to rmax of
abs(Fhat(r)^q - F(theta,r)^q)^p
}
where \eqn{\theta}{theta} is the vector of parameters of the model,
\eqn{\hat F(r)}{Fhat(r)} is the observed value of the summary statistic
computed from the data, \eqn{F_\theta(r)}{F(theta,r)} is the
theoretical expected value of the summary statistic,
and \eqn{p,q} are two exponents. The default is \code{q = 1/4},
\code{p=2} so that the contrast criterion is the integrated squared
difference between the fourth roots of the two functions
(Waagepetersen, 2006).
The other arguments just make things print nicely.
The argument \code{fvlab} contains labels for the component
\code{fit} of the return value.
The argument \code{explain} contains human-readable strings
describing the data, the model and the summary statistic.
The \code{"..."} argument of \code{mincontrast} can be used to
pass extra arguments to the function \code{theoretical}
and/or to the optimisation function \code{\link[stats]{optim}}.
In this case, the function \code{theoretical}
should also have a \code{"..."} argument and should ignore it
(so that it ignores arguments intended for \code{\link[stats]{optim}}).
}
\value{
An object of class \code{"minconfit"}. There are methods for printing
and plotting this object. It contains the following components:
\item{par }{Vector of fitted parameter values.}
\item{fit }{Function value table (object of class \code{"fv"})
containing the observed values of the summary statistic
(\code{observed}) and the theoretical values of the summary
statistic computed from the fitted model parameters.
}
\item{opt }{The return value from the optimizer \code{\link{optim}}.}
\item{crtl }{The control parameters of the algorithm.}
\item{info }{List of explanatory strings.}
}
\references{
Diggle, P.J. and Gratton, R.J. (1984)
Monte Carlo methods of inference for implicit statistical models.
\emph{Journal of the Royal Statistical Society, series B}
\bold{46}, 193 -- 212.
\ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003).
Statistical Inference and Simulation for Spatial Point Processes.
Chapman and Hall/CRC, Boca Raton.
Waagepetersen, R. (2006).
An estimating function approach to inference for
inhomogeneous Neyman-Scott processes.
\emph{Biometrics} \bold{63} (2007) 252--258.
}
\author{Rasmus Waagepetersen
\email{rw@math.auc.dk},
adapted for \pkg{spatstat} by
Adrian Baddeley
\email{Adrian.Baddeley@uwa.edu.au}
\url{http://www.maths.uwa.edu.au/~adrian/}
}
\seealso{
\code{\link{kppm}},
\code{\link{lgcp.estK}},
\code{\link{matclust.estK}},
\code{\link{thomas.estK}},
}
\keyword{spatial}
\keyword{models}
| /man/mincontrast.Rd | no_license | cuulee/spatstat | R | false | false | 6,446 | rd | \name{mincontrast}
\alias{mincontrast}
\title{Method of Minimum Contrast}
\description{
A general low-level algorithm for fitting theoretical point process models
to point pattern data by the Method of Minimum Contrast.
}
\usage{
mincontrast(observed, theoretical, startpar, \dots,
ctrl=list(q = 1/4, p = 2, rmin=NULL, rmax=NULL),
fvlab=list(label=NULL, desc="minimum contrast fit"),
explain=list(dataname=NULL, modelname=NULL, fname=NULL))
}
\arguments{
\item{observed}{
Summary statistic, computed for the data.
An object of class \code{"fv"}.
}
\item{theoretical}{
An R language function that calculates the theoretical expected value
of the summary statistic, given the model parameters.
See Details.
}
\item{startpar}{
Vector of initial values of the parameters of the
point process model (passed to \code{theoretical}).
}
\item{\dots}{
Additional arguments passed to the function \code{theoretical}
and to the optimisation algorithm \code{\link[stats]{optim}}.
}
\item{ctrl}{
Optional. List of arguments controlling the optimisation. See Details.
}
\item{fvlab}{
Optional. List containing some labels for the return value. See Details.
}
\item{explain}{
Optional. List containing strings that give a human-readable description
of the model, the data and the summary statistic.
}
}
\details{
This function is a general algorithm for fitting point process models
by the Method of Minimum Contrast. If you want to fit the
Thomas process, see \code{\link{thomas.estK}}.
If you want to fit a log-Gaussian Cox process, see
\code{\link{lgcp.estK}}. If you want to fit the Matern cluster
process, see \code{\link{matclust.estK}}.
The Method of Minimum Contrast (Diggle and Gratton, 1984)
is a general technique for fitting
a point process model to point pattern data. First a summary function
(typically the \eqn{K} function) is computed from the data point
pattern. Second, the theoretical expected
value of this summary statistic under the point process model
is derived (if possible, as an algebraic expression involving the
parameters of the model) or estimated from simulations of the model.
Then the model is fitted by finding the optimal parameter values
for the model to give the closest match between the theoretical
and empirical curves.
The argument \code{observed} should be an object of class \code{"fv"}
(see \code{\link{fv.object}}) containing the values of a summary
statistic computed from the data point pattern. Usually this is the
function \eqn{K(r)} computed by \code{\link{Kest}} or one of its relatives.
The argument \code{theoretical} should be a user-supplied function
that computes the theoretical expected value of the summary statistic.
It must have an argument named \code{par} that will be the vector
of parameter values for the model (the length and format of this
vector are determined by the starting values in \code{startpar}).
The function \code{theoretical} should also expect a second argument
(the first argument other than \code{par})
containing values of the distance \eqn{r} for which the theoretical
value of the summary statistic \eqn{K(r)} should be computed.
The value returned by \code{theoretical} should be a vector of the
same length as the given vector of \eqn{r} values.
The argument \code{ctrl} determines the contrast criterion
(the objective function that will be minimised).
The algorithm minimises the criterion
\deqn{
D(\theta)=
\int_{r_{\mbox{\scriptsize min}}}^{r_{\mbox{\scriptsize max}}}
|\hat F(r)^q - F_\theta(r)^q|^p \, {\rm d}r
}{
D(theta) = integral from rmin to rmax of
abs(Fhat(r)^q - F(theta,r)^q)^p
}
where \eqn{\theta}{theta} is the vector of parameters of the model,
\eqn{\hat F(r)}{Fhat(r)} is the observed value of the summary statistic
computed from the data, \eqn{F_\theta(r)}{F(theta,r)} is the
theoretical expected value of the summary statistic,
and \eqn{p,q} are two exponents. The default is \code{q = 1/4},
\code{p=2} so that the contrast criterion is the integrated squared
difference between the fourth roots of the two functions
(Waagepetersen, 2006).
The other arguments just make things print nicely.
The argument \code{fvlab} contains labels for the component
\code{fit} of the return value.
The argument \code{explain} contains human-readable strings
describing the data, the model and the summary statistic.
The \code{"..."} argument of \code{mincontrast} can be used to
pass extra arguments to the function \code{theoretical}
and/or to the optimisation function \code{\link[stats]{optim}}.
In this case, the function \code{theoretical}
should also have a \code{"..."} argument and should ignore it
(so that it ignores arguments intended for \code{\link[stats]{optim}}).
}
\value{
An object of class \code{"minconfit"}. There are methods for printing
and plotting this object. It contains the following components:
\item{par }{Vector of fitted parameter values.}
\item{fit }{Function value table (object of class \code{"fv"})
containing the observed values of the summary statistic
(\code{observed}) and the theoretical values of the summary
statistic computed from the fitted model parameters.
}
\item{opt }{The return value from the optimizer \code{\link{optim}}.}
\item{crtl }{The control parameters of the algorithm.}
\item{info }{List of explanatory strings.}
}
\references{
Diggle, P.J. and Gratton, R.J. (1984)
Monte Carlo methods of inference for implicit statistical models.
\emph{Journal of the Royal Statistical Society, series B}
\bold{46}, 193 -- 212.
\ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003).
Statistical Inference and Simulation for Spatial Point Processes.
Chapman and Hall/CRC, Boca Raton.
Waagepetersen, R. (2006).
An estimating function approach to inference for
inhomogeneous Neyman-Scott processes.
\emph{Biometrics} \bold{63} (2007) 252--258.
}
\author{Rasmus Waagepetersen
\email{rw@math.auc.dk},
adapted for \pkg{spatstat} by
Adrian Baddeley
\email{Adrian.Baddeley@uwa.edu.au}
\url{http://www.maths.uwa.edu.au/~adrian/}
}
\seealso{
\code{\link{kppm}},
\code{\link{lgcp.estK}},
\code{\link{matclust.estK}},
\code{\link{thomas.estK}},
}
\keyword{spatial}
\keyword{models}
|
globalVariables(".")
#' Skim a data frame, getting useful summary statistics
#'
#' `skim()` is an alternative to [`summary()`], quickly providing a broad
#' overview of a data frame. It handles data of all types, dispatching a
#' different set of summary functions based on the types of columns in the data
#' frame.
#'
#' Each call produces a `skim_df`, which is a fundamentally a tibble with a
#' special print method. Instead of showing the result in a long format, `skim`
#' prints a wide version of your data with formatting applied to each column.
#' Printing does not change the structure of the `skim_df`, which remains a long
#' tibble.
#'
#' If you just want to see the printed output, call `skim_tee()` instead.
#' This function returns the original data frame.
#'
#' If you want to work with a data frame that resembles the printed output,
#' call [`skim_to_wide()`] or for a named list of data frames by type
#' [`skim_to_list()`]. Note that all of the columns in the data frames produced
#' by these functions are character. The intent is that you will be processing
#' the **printed** result further, not the original data.
#'
#' `skim()` is designed to operate in pipes and to generally play nicely with
#' other `tidyverse` functions. This means that you can use `tidyselect` helpers
#' within `skim` to select or drop specific columns for summary. You can also
#' further work with a `skim_df` using `dplyr` functions in a pipeline.
#'
#' @section Customizing skim:
#' `skim()` is an intentionally simple function, with minimal arguments like
#' [`summary()`]. Nonetheless, this package provides two broad approaches to
#' how you can customize `skim()`'s behavior. You can customize the functions
#' that are called to produce summary statistics with [`skim_with()`]. You
#' can customize how the output is displayed with [`skim_format()`].
#'
#' @section Unicode rendering:
#' If the rendered examples show unencoded values such as `<U+2587>` you will
#' need to change your locale to allow proper rendering. Please review the
#' *Using Skimr* vignette for more information
#' (`vignette("Using_skimr" package = "skimr")`).
#'
#' @param .data A tibble, or an object that can be coerced into a tibble.
#' @param ... Additional options, normally used to list individual unquoted
#' column names.
#' @return A `skim_df` object, which can be treated like a tibble in most
#' instances.
#' @examples
#' skim(iris)
#'
#' # Use tidyselect
#' skim(iris, Species)
#' skim(iris, starts_with("Sepal"))
#'
#' # Skim also works groupwise
#' dplyr::group_by(iris, Species) %>% skim()
#'
#' # Skim pipelines; now we work with the tall format
#' skim(iris) %>% as.data.frame()
#' skim(iris) %>% dplyr::filter(type == "factor")
#'
#' # Which column as the greatest mean value?
#' skim(iris) %>%
#' dplyr::filter(stat == "mean") %>%
#' dplyr::arrange(dplyr::desc(value))
#'
#' # Use skim_tee to view the skim results and
#' # continue using the original data.
#' chickwts %>% skim_tee() %>% dplyr::filter(feed == "sunflower")
#' @export
skim <- function(.data, ...) {
UseMethod("skim")
}
#'@export
skim.data.frame <- function(.data, ... ) {
.vars <- rlang::quos(...)
if (length(.vars) == 0) selected <- tidyselect::everything(.data)
else selected <- tidyselect::vars_select(names(.data), !!! .vars)
rows <- purrr::map(.data[selected], skim_v)
combined <- dplyr::bind_rows(rows, .id = "variable")
structure(combined, class = c("skim_df", class(combined)),
data_rows = nrow(.data), data_cols = ncol(.data),
df_name = substitute(.data))
}
#' @export
skim.grouped_df <- function(.data, ...) {
defaults <- options(dplyr.show_progress = FALSE)
on.exit(options(defaults))
skimmed <- dplyr::do(.data, skim(., ...))
# Drop the grouping variable
groups <- dplyr::groups(skimmed)
to_drop <- quote(!(variable %in% groups))
skimmed <- dplyr::filter(skimmed, !!to_drop)
structure(skimmed, class = c("skim_df", class(skimmed)),
data_rows = nrow(.data), data_cols = ncol(.data),
df_name = substitute(.data))
}
#' @export
skim.default <-function(.data, ...){
if (!is.atomic(.data) | !is.null(dim(.data))[1]){
return(message("No skim method exists for class ", class(.data), "."))
}
skimmed <- skim_v(.data)
skimmed$variable <- deparse(substitute(.data))
skimmed <- dplyr::select(skimmed, !!rlang::sym("variable"),
!!rlang::sym("type"),
!!rlang::sym("stat"),
!!rlang::sym("level"),
!!rlang::sym("value"),
!!rlang::sym("formatted"))
structure(skimmed, class = c("skim_vector", "skim_df", class(skimmed)),
df_name = skimmed$variable[1])
}
#' @rdname skim
#' @export
skim_tee <- function(.data, ...) {
print(skim(.data))
invisible(.data)
}
| /R/skim.R | no_license | alexilliamson/skimr | R | false | false | 4,912 | r | globalVariables(".")
#' Skim a data frame, getting useful summary statistics
#'
#' `skim()` is an alternative to [`summary()`], quickly providing a broad
#' overview of a data frame. It handles data of all types, dispatching a
#' different set of summary functions based on the types of columns in the data
#' frame.
#'
#' Each call produces a `skim_df`, which is a fundamentally a tibble with a
#' special print method. Instead of showing the result in a long format, `skim`
#' prints a wide version of your data with formatting applied to each column.
#' Printing does not change the structure of the `skim_df`, which remains a long
#' tibble.
#'
#' If you just want to see the printed output, call `skim_tee()` instead.
#' This function returns the original data frame.
#'
#' If you want to work with a data frame that resembles the printed output,
#' call [`skim_to_wide()`] or for a named list of data frames by type
#' [`skim_to_list()`]. Note that all of the columns in the data frames produced
#' by these functions are character. The intent is that you will be processing
#' the **printed** result further, not the original data.
#'
#' `skim()` is designed to operate in pipes and to generally play nicely with
#' other `tidyverse` functions. This means that you can use `tidyselect` helpers
#' within `skim` to select or drop specific columns for summary. You can also
#' further work with a `skim_df` using `dplyr` functions in a pipeline.
#'
#' @section Customizing skim:
#' `skim()` is an intentionally simple function, with minimal arguments like
#' [`summary()`]. Nonetheless, this package provides two broad approaches to
#' how you can customize `skim()`'s behavior. You can customize the functions
#' that are called to produce summary statistics with [`skim_with()`]. You
#' can customize how the output is displayed with [`skim_format()`].
#'
#' @section Unicode rendering:
#' If the rendered examples show unencoded values such as `<U+2587>` you will
#' need to change your locale to allow proper rendering. Please review the
#' *Using Skimr* vignette for more information
#' (`vignette("Using_skimr" package = "skimr")`).
#'
#' @param .data A tibble, or an object that can be coerced into a tibble.
#' @param ... Additional options, normally used to list individual unquoted
#' column names.
#' @return A `skim_df` object, which can be treated like a tibble in most
#' instances.
#' @examples
#' skim(iris)
#'
#' # Use tidyselect
#' skim(iris, Species)
#' skim(iris, starts_with("Sepal"))
#'
#' # Skim also works groupwise
#' dplyr::group_by(iris, Species) %>% skim()
#'
#' # Skim pipelines; now we work with the tall format
#' skim(iris) %>% as.data.frame()
#' skim(iris) %>% dplyr::filter(type == "factor")
#'
#' # Which column as the greatest mean value?
#' skim(iris) %>%
#' dplyr::filter(stat == "mean") %>%
#' dplyr::arrange(dplyr::desc(value))
#'
#' # Use skim_tee to view the skim results and
#' # continue using the original data.
#' chickwts %>% skim_tee() %>% dplyr::filter(feed == "sunflower")
#' @export
skim <- function(.data, ...) {
UseMethod("skim")
}
#'@export
skim.data.frame <- function(.data, ... ) {
.vars <- rlang::quos(...)
if (length(.vars) == 0) selected <- tidyselect::everything(.data)
else selected <- tidyselect::vars_select(names(.data), !!! .vars)
rows <- purrr::map(.data[selected], skim_v)
combined <- dplyr::bind_rows(rows, .id = "variable")
structure(combined, class = c("skim_df", class(combined)),
data_rows = nrow(.data), data_cols = ncol(.data),
df_name = substitute(.data))
}
#' @export
skim.grouped_df <- function(.data, ...) {
defaults <- options(dplyr.show_progress = FALSE)
on.exit(options(defaults))
skimmed <- dplyr::do(.data, skim(., ...))
# Drop the grouping variable
groups <- dplyr::groups(skimmed)
to_drop <- quote(!(variable %in% groups))
skimmed <- dplyr::filter(skimmed, !!to_drop)
structure(skimmed, class = c("skim_df", class(skimmed)),
data_rows = nrow(.data), data_cols = ncol(.data),
df_name = substitute(.data))
}
#' @export
skim.default <-function(.data, ...){
if (!is.atomic(.data) | !is.null(dim(.data))[1]){
return(message("No skim method exists for class ", class(.data), "."))
}
skimmed <- skim_v(.data)
skimmed$variable <- deparse(substitute(.data))
skimmed <- dplyr::select(skimmed, !!rlang::sym("variable"),
!!rlang::sym("type"),
!!rlang::sym("stat"),
!!rlang::sym("level"),
!!rlang::sym("value"),
!!rlang::sym("formatted"))
structure(skimmed, class = c("skim_vector", "skim_df", class(skimmed)),
df_name = skimmed$variable[1])
}
#' @rdname skim
#' @export
skim_tee <- function(.data, ...) {
print(skim(.data))
invisible(.data)
}
|
#!/applications/R/R-3.5.0/bin/Rscript
# Compile cMMb gene quantile GO enrichment analysis results into one supplemental table
library(GO.db)
quantiles <- 1:4
summaryTableDFList <- lapply(seq_along(quantiles), function(x) {
Wg <- data.frame(Subgenome = "All",
cMMb_gene_quantile = paste0("Quantile ", x),
read.table(paste0("/home/ajt200/analysis/wheat/ASY1_CS/snakemake_ChIPseq/mapped/geneProfiles_subgenomes/quantiles/",
"quantiles_by_cMMb/GO/featureIDs_quantile", x, "_of_4_by_cMMb_of_genes_in_",
"Agenome_Bgenome_Dgenome_genomewide_GO_BP_enrichment.tsv"),
header = T, sep = "\t", quote = "\"", check.names = F, stringsAsFactors = F),
stringsAsFactors = F)
Ag <- data.frame(Subgenome = "A",
cMMb_gene_quantile = paste0("Quantile ", x),
read.table(paste0("/home/ajt200/analysis/wheat/ASY1_CS/snakemake_ChIPseq/mapped/geneProfiles_subgenomes/quantiles/",
"quantiles_by_cMMb/GO/featureIDs_quantile", x, "_of_4_by_cMMb_of_genes_in_",
"Agenome_genomewide_GO_BP_enrichment.tsv"),
header = T, sep = "\t", quote = "\"", check.names = F, stringsAsFactors = F),
stringsAsFactors = F)
Bg <- data.frame(Subgenome = "B",
cMMb_gene_quantile = paste0("Quantile ", x),
read.table(paste0("/home/ajt200/analysis/wheat/ASY1_CS/snakemake_ChIPseq/mapped/geneProfiles_subgenomes/quantiles/",
"quantiles_by_cMMb/GO/featureIDs_quantile", x, "_of_4_by_cMMb_of_genes_in_",
"Bgenome_genomewide_GO_BP_enrichment.tsv"),
header = T, sep = "\t", quote = "\"", check.names = F, stringsAsFactors = F),
stringsAsFactors = F)
Dg <- data.frame(Subgenome = "D",
cMMb_gene_quantile = paste0("Quantile ", x),
read.table(paste0("/home/ajt200/analysis/wheat/ASY1_CS/snakemake_ChIPseq/mapped/geneProfiles_subgenomes/quantiles/",
"quantiles_by_cMMb/GO/featureIDs_quantile", x, "_of_4_by_cMMb_of_genes_in_",
"Dgenome_genomewide_GO_BP_enrichment.tsv"),
header = T, sep = "\t", quote = "\"", check.names = F, stringsAsFactors = F),
stringsAsFactors = F)
rbind(Wg, Ag, Bg, Dg)
})
summaryTableDF <- do.call(rbind, summaryTableDFList)
# Remove additional test statistics from other methods ("classic", "elim", "weight")
summaryTableDF <- summaryTableDF[,-c(8:11)]
# Retain GO terms with topGO Fisher's exact test P-values <= 0.05
summaryTableDF$topGOFisher <- sub(pattern = "< ", replacement = "", summaryTableDF$topGOFisher)
summaryTableDF$topGOFisher <- as.numeric(summaryTableDF$topGOFisher)
summaryTableDF <- summaryTableDF[summaryTableDF$topGOFisher <= 0.05,]
# Replace truncated GO terms with full GO terms
summaryTableDF <- data.frame(summaryTableDF,
select(GO.db, keys = summaryTableDF$GO.ID,
columns = c("GOID", "TERM"),#"DEFINITION"),
keytype = "GOID"),
stringsAsFactors = F)
# Ensure summaryTableDF$GO.ID matches the GO.db-obtained summaryTableDF$GOID,
# and thus truncated GO terms correspond to the correct full GO terms
stopifnot(all.equal(summaryTableDF$GO.ID, summaryTableDF$GOID))
print("Number of topGO-derived truncated GO terms to be replaced with corresponding GO.db-obtained full GO terms =")
print(length(summaryTableDF$Term[which(grepl("\\.\\.\\.", summaryTableDF$Term))]))
print("Number of non-matching summaryTableDF$Term (original) and summaryTableDF$TERM (GO.db-obtained) GO terms =")
print(all.equal(summaryTableDF$Term, summaryTableDF$TERM))
summaryTableDF$Term <- summaryTableDF$TERM
summaryTableDF <- summaryTableDF[,-c(9:10)]
summaryTableDF <- summaryTableDF[,c(1:5, 7, 6, 8)]
print(colnames(summaryTableDF))
colnames(summaryTableDF) <- c("Subgenome", "cM/Mb gene quantile",
"GO ID", "GO term",
"Annotated genes in subgenome", "Expected genes in quantile", "Observed genes in quantile", "P")
print(colnames(summaryTableDF))
write.table(summaryTableDF,
file = "Supplemental_TableS4_cMMb_gene_quantiles_GO.tsv",
col.names = T, row.names = F, sep = "\t", quote = F)
write.csv(summaryTableDF,
file = "Supplemental_TableS4_cMMb_gene_quantiles_GO.csv",
row.names = F, quote = F)
| /manuscript_DMC1_ASY1/GenomeResearch/SuppTables/SuppTableS4_cMMb_gene_quantiles_GO.R | no_license | ajtock/wheat | R | false | false | 4,784 | r | #!/applications/R/R-3.5.0/bin/Rscript
# Compile cMMb gene quantile GO enrichment analysis results into one supplemental table
library(GO.db)
quantiles <- 1:4
summaryTableDFList <- lapply(seq_along(quantiles), function(x) {
Wg <- data.frame(Subgenome = "All",
cMMb_gene_quantile = paste0("Quantile ", x),
read.table(paste0("/home/ajt200/analysis/wheat/ASY1_CS/snakemake_ChIPseq/mapped/geneProfiles_subgenomes/quantiles/",
"quantiles_by_cMMb/GO/featureIDs_quantile", x, "_of_4_by_cMMb_of_genes_in_",
"Agenome_Bgenome_Dgenome_genomewide_GO_BP_enrichment.tsv"),
header = T, sep = "\t", quote = "\"", check.names = F, stringsAsFactors = F),
stringsAsFactors = F)
Ag <- data.frame(Subgenome = "A",
cMMb_gene_quantile = paste0("Quantile ", x),
read.table(paste0("/home/ajt200/analysis/wheat/ASY1_CS/snakemake_ChIPseq/mapped/geneProfiles_subgenomes/quantiles/",
"quantiles_by_cMMb/GO/featureIDs_quantile", x, "_of_4_by_cMMb_of_genes_in_",
"Agenome_genomewide_GO_BP_enrichment.tsv"),
header = T, sep = "\t", quote = "\"", check.names = F, stringsAsFactors = F),
stringsAsFactors = F)
Bg <- data.frame(Subgenome = "B",
cMMb_gene_quantile = paste0("Quantile ", x),
read.table(paste0("/home/ajt200/analysis/wheat/ASY1_CS/snakemake_ChIPseq/mapped/geneProfiles_subgenomes/quantiles/",
"quantiles_by_cMMb/GO/featureIDs_quantile", x, "_of_4_by_cMMb_of_genes_in_",
"Bgenome_genomewide_GO_BP_enrichment.tsv"),
header = T, sep = "\t", quote = "\"", check.names = F, stringsAsFactors = F),
stringsAsFactors = F)
Dg <- data.frame(Subgenome = "D",
cMMb_gene_quantile = paste0("Quantile ", x),
read.table(paste0("/home/ajt200/analysis/wheat/ASY1_CS/snakemake_ChIPseq/mapped/geneProfiles_subgenomes/quantiles/",
"quantiles_by_cMMb/GO/featureIDs_quantile", x, "_of_4_by_cMMb_of_genes_in_",
"Dgenome_genomewide_GO_BP_enrichment.tsv"),
header = T, sep = "\t", quote = "\"", check.names = F, stringsAsFactors = F),
stringsAsFactors = F)
rbind(Wg, Ag, Bg, Dg)
})
summaryTableDF <- do.call(rbind, summaryTableDFList)
# Remove additional test statistics from other methods ("classic", "elim", "weight")
summaryTableDF <- summaryTableDF[,-c(8:11)]
# Retain GO terms with topGO Fisher's exact test P-values <= 0.05
summaryTableDF$topGOFisher <- sub(pattern = "< ", replacement = "", summaryTableDF$topGOFisher)
summaryTableDF$topGOFisher <- as.numeric(summaryTableDF$topGOFisher)
summaryTableDF <- summaryTableDF[summaryTableDF$topGOFisher <= 0.05,]
# Replace truncated GO terms with full GO terms
summaryTableDF <- data.frame(summaryTableDF,
select(GO.db, keys = summaryTableDF$GO.ID,
columns = c("GOID", "TERM"),#"DEFINITION"),
keytype = "GOID"),
stringsAsFactors = F)
# Ensure summaryTableDF$GO.ID matches the GO.db-obtained summaryTableDF$GOID,
# and thus truncated GO terms correspond to the correct full GO terms
stopifnot(all.equal(summaryTableDF$GO.ID, summaryTableDF$GOID))
print("Number of topGO-derived truncated GO terms to be replaced with corresponding GO.db-obtained full GO terms =")
print(length(summaryTableDF$Term[which(grepl("\\.\\.\\.", summaryTableDF$Term))]))
print("Number of non-matching summaryTableDF$Term (original) and summaryTableDF$TERM (GO.db-obtained) GO terms =")
print(all.equal(summaryTableDF$Term, summaryTableDF$TERM))
summaryTableDF$Term <- summaryTableDF$TERM
summaryTableDF <- summaryTableDF[,-c(9:10)]
summaryTableDF <- summaryTableDF[,c(1:5, 7, 6, 8)]
print(colnames(summaryTableDF))
colnames(summaryTableDF) <- c("Subgenome", "cM/Mb gene quantile",
"GO ID", "GO term",
"Annotated genes in subgenome", "Expected genes in quantile", "Observed genes in quantile", "P")
print(colnames(summaryTableDF))
write.table(summaryTableDF,
file = "Supplemental_TableS4_cMMb_gene_quantiles_GO.tsv",
col.names = T, row.names = F, sep = "\t", quote = F)
write.csv(summaryTableDF,
file = "Supplemental_TableS4_cMMb_gene_quantiles_GO.csv",
row.names = F, quote = F)
|
for (j in set(docs))
{
docs[[j]] <- gsub("/", " ", docs[[j]])
docs[[j]] <- gsub("@", " ", docs[[j]])
docs[[j]] <- gsub("\\|", " ", docs[[j]])
} | /specificPreprocessing.R | no_license | borjaeg/TextMiningWithR | R | false | false | 149 | r | for (j in set(docs))
{
docs[[j]] <- gsub("/", " ", docs[[j]])
docs[[j]] <- gsub("@", " ", docs[[j]])
docs[[j]] <- gsub("\\|", " ", docs[[j]])
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aveytoolkit_GetEqn.R
\name{GetEqn}
\alias{GetEqn}
\title{GetEqn}
\usage{
GetEqn(m)
}
\arguments{
\item{m}{a model object}
}
\description{
\code{GetEqn} gets the equation for various models in a human readable format
}
\details{
}
\examples{
## First Example
}
\references{
original lm_eqn and inspiration from this SO post \url{http://stackoverflow.com/questions/7549694/ggplot2-adding-regression-line-equation-and-r2-on-graph}.
}
\author{
Stefan Avey
}
\keyword{aveytoolkit}
| /man/GetEqn.Rd | no_license | stefanavey/aveytoolkit | R | false | true | 556 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aveytoolkit_GetEqn.R
\name{GetEqn}
\alias{GetEqn}
\title{GetEqn}
\usage{
GetEqn(m)
}
\arguments{
\item{m}{a model object}
}
\description{
\code{GetEqn} gets the equation for various models in a human readable format
}
\details{
}
\examples{
## First Example
}
\references{
original lm_eqn and inspiration from this SO post \url{http://stackoverflow.com/questions/7549694/ggplot2-adding-regression-line-equation-and-r2-on-graph}.
}
\author{
Stefan Avey
}
\keyword{aveytoolkit}
|
#' Import Syrinx selections
#'
#' \code{imp.syrinx} imports Syrinx selection data from many files simultaneously.
#' All files must be have the same columns.
#' @usage imp.syrinx(path = NULL, all.data = FALSE, recursive = FALSE,
#' exclude = FALSE, hz.to.khz = TRUE)
#' @param path A character string indicating the path of the directory in which to look for the text files.
#' If not provided (default) the function searches into the current working directory. Default is \code{NULL}).
#' @param all.data Logical. If \code{TRUE}) all columns in text files are returned. Default is \code{FALSE}). Note
#' that all files should contain exactly the same columns in the same order.
#' @param recursive Logical. If \code{TRUE}) the listing recurse into sub-directories.
#' @param exclude Logical. Controls whether files that cannot be read are ignored (\code{TRUE}). Default is \code{FALSE}.
#' @param hz.to.khz Logical. Controls if frequency variables should be converted from Hz (the unit used by Syrinx) to kHz (the unit used by warbleR). Default if \code{TRUE}. Ignored if all.data is \code{TRUE}.
#' @return A single data frame with information of the selection files. If all.data argument is set to \code{FALSE}) the data
#' frame contains the following columns: selec, start, end, and selec.file. If sound.file.col is provided the data frame
#' will also contain a 'sound.files' column. In addition, all rows with duplicated data are removed. This is useful when
#' both spectrogram and waveform views are included in the Syrinx selection files. If all.data is set to \code{TRUE} then all
#' columns in selection files are returned.
#' @seealso \code{\link{imp.raven}}
#' @export
#' @name imp.syrinx
#' @examples
#' \dontrun{
#' # First set temporary folder
#' setwd(tempdir())
#'
#' #load data
#' data(selection.files)
#'
#' write.table(selection.files[[3]],file = "harpyeagle.wav.txt",row.names = FALSE,
#' col.names = FALSE, sep= "\t")
#'
#' write.table(selection.files[[4]],file = "Phae.long4.wav.txt",row.names = FALSE,
#' col.names = FALSE, sep= "\t")
#'
#' syr.dat<-imp.syrinx(all.data = FALSE)
#'
#' View(syr.dat)
#'
#' #getting all the data
#' syr.dat<-imp.syrinx(all.data = TRUE)
#'
#' View(syr.dat)
#' }
#' @author Marcelo Araya-Salas (\email{araya-salas@@cornell.edu})
#last modification on jul-5-2016 (MAS)
imp.syrinx <- function(path = NULL, all.data = FALSE, recursive = FALSE,
exclude = FALSE, hz.to.khz = TRUE)
{
# reset working directory
wd <- getwd()
on.exit(setwd(wd))
#check path to working directory
if(is.null(path)) path <- getwd() else {if(!file.exists(path)) stop("'path' provided does not exist") else
setwd(path)
}
sel.txt <- list.files(full.names = TRUE)
sel.txt2 <- list.files(full.names = FALSE)
sel.txt <- sel.txt[grep(".log$|.txt$",ignore.case = TRUE, sel.txt)]
sel.txt2 <- sel.txt2[grep(".log$|.txt$",ignore.case = TRUE, sel.txt2)]
if(length(sel.txt) == 0) stop("No selection files in working directory/'path' provided")
b<-NULL
if(substring(text = readLines(sel.txt[1])[1], first = 0, last = 9) == "fieldkey:") field <- T else field <- F
clist<-lapply(1:length(sel.txt), function(i)
{
if(field) {
a <- try(read.table(sel.txt[i], header = TRUE, sep = "\t", fill = TRUE, stringsAsFactors = FALSE), silent = TRUE)
if(!exclude & class(a) == "try-error") stop(paste("The selection file",sel.txt[i], "cannot be read"))
if(!class(a) == "try-error" & !all.data) { c <- data.frame(selec.file = sel.txt2[i], sound.files = a[, grep("soundfile",colnames(a))],
selec = 1,
start = a[, grep("lefttimesec",colnames(a))],
end = a[, grep("righttimesec",colnames(a))],
low.freq = a[, grep("bottomfreq",colnames(a))],
high.freq = a[, grep("topfreq",colnames(a))])
for(i in 2:nrow(c)) if(c$selec.file[i] == c$selec.file[i-1]) c$selec[i]<-c$selec[i-1] + 1
} else c<-a
} else {
a <- try(read.table(sel.txt[i], header = FALSE, sep = "\t", fill = TRUE, stringsAsFactors = FALSE), silent = TRUE)
if(!exclude & class(a) == "try-error") stop(paste("The selection file",sel.txt[i], "cannot be read"))
if(!class(a) == "try-error")
{
c <- a[, seq(2, ncol(a), by =2)]
colnames(c) <- gsub(":", "", unlist(a[1, seq(1,ncol(a), by =2)]), fixed = TRUE)
if(!all.data) {c<-data.frame(sound.files = c[, grep("selected",colnames(c), ignore.case = TRUE)],
selec = 1,
start = c[, grep("lefttime",colnames(c), ignore.case = TRUE)],
end = c[, grep("righttime",colnames(c), ignore.case = TRUE)],
low.freq = c[, grep("bottomfreq",colnames(c), ignore.case = TRUE)],
high.freq = c[, grep("topfreq",colnames(c), ignore.case = TRUE)])
for(i in 2:nrow(c)) if(c$sound.files[i] == c$sound.files[i-1]) c$selec[i] <- c$selec[i-1] + 1}
} else c <- a
}
return(c)
})
clist <- clist[sapply(clist, is.data.frame)]
b <- do.call("rbind", clist)
if(!all.data) if(any(is.na(b$start))) warning("NAs found (empty rows)")
b <- b[!duplicated(b), ]
options(warn = -1)
if(!all.data)
{
b$start <- as.numeric(b$start)
b$end <- as.numeric(b$end)
#remove NA rows
b <- b[!is.na(b$start), ]
} else b <-b[b[,2] != names(b)[2],]
# convert to hz
if(hz.to.khz & !all.data & all(c("low.freq", "high.freq") %in% names(b)))
{b$low.freq <- as.numeric(b$low.freq) / 1000
b$high.freq <- as.numeric(b$high.freq) / 1000
}
return(b[!duplicated(b), ])
}
| /R/imp.syrinx.R | no_license | RTbecard/warbleR | R | false | false | 5,897 | r | #' Import Syrinx selections
#'
#' \code{imp.syrinx} imports Syrinx selection data from many files simultaneously.
#' All files must be have the same columns.
#' @usage imp.syrinx(path = NULL, all.data = FALSE, recursive = FALSE,
#' exclude = FALSE, hz.to.khz = TRUE)
#' @param path A character string indicating the path of the directory in which to look for the text files.
#' If not provided (default) the function searches into the current working directory. Default is \code{NULL}).
#' @param all.data Logical. If \code{TRUE}) all columns in text files are returned. Default is \code{FALSE}). Note
#' that all files should contain exactly the same columns in the same order.
#' @param recursive Logical. If \code{TRUE}) the listing recurse into sub-directories.
#' @param exclude Logical. Controls whether files that cannot be read are ignored (\code{TRUE}). Default is \code{FALSE}.
#' @param hz.to.khz Logical. Controls if frequency variables should be converted from Hz (the unit used by Syrinx) to kHz (the unit used by warbleR). Default if \code{TRUE}. Ignored if all.data is \code{TRUE}.
#' @return A single data frame with information of the selection files. If all.data argument is set to \code{FALSE}) the data
#' frame contains the following columns: selec, start, end, and selec.file. If sound.file.col is provided the data frame
#' will also contain a 'sound.files' column. In addition, all rows with duplicated data are removed. This is useful when
#' both spectrogram and waveform views are included in the Syrinx selection files. If all.data is set to \code{TRUE} then all
#' columns in selection files are returned.
#' @seealso \code{\link{imp.raven}}
#' @export
#' @name imp.syrinx
#' @examples
#' \dontrun{
#' # First set temporary folder
#' setwd(tempdir())
#'
#' #load data
#' data(selection.files)
#'
#' write.table(selection.files[[3]],file = "harpyeagle.wav.txt",row.names = FALSE,
#' col.names = FALSE, sep= "\t")
#'
#' write.table(selection.files[[4]],file = "Phae.long4.wav.txt",row.names = FALSE,
#' col.names = FALSE, sep= "\t")
#'
#' syr.dat<-imp.syrinx(all.data = FALSE)
#'
#' View(syr.dat)
#'
#' #getting all the data
#' syr.dat<-imp.syrinx(all.data = TRUE)
#'
#' View(syr.dat)
#' }
#' @author Marcelo Araya-Salas (\email{araya-salas@@cornell.edu})
#last modification on jul-5-2016 (MAS)
imp.syrinx <- function(path = NULL, all.data = FALSE, recursive = FALSE,
exclude = FALSE, hz.to.khz = TRUE)
{
# reset working directory
wd <- getwd()
on.exit(setwd(wd))
#check path to working directory
if(is.null(path)) path <- getwd() else {if(!file.exists(path)) stop("'path' provided does not exist") else
setwd(path)
}
sel.txt <- list.files(full.names = TRUE)
sel.txt2 <- list.files(full.names = FALSE)
sel.txt <- sel.txt[grep(".log$|.txt$",ignore.case = TRUE, sel.txt)]
sel.txt2 <- sel.txt2[grep(".log$|.txt$",ignore.case = TRUE, sel.txt2)]
if(length(sel.txt) == 0) stop("No selection files in working directory/'path' provided")
b<-NULL
if(substring(text = readLines(sel.txt[1])[1], first = 0, last = 9) == "fieldkey:") field <- T else field <- F
clist<-lapply(1:length(sel.txt), function(i)
{
if(field) {
a <- try(read.table(sel.txt[i], header = TRUE, sep = "\t", fill = TRUE, stringsAsFactors = FALSE), silent = TRUE)
if(!exclude & class(a) == "try-error") stop(paste("The selection file",sel.txt[i], "cannot be read"))
if(!class(a) == "try-error" & !all.data) { c <- data.frame(selec.file = sel.txt2[i], sound.files = a[, grep("soundfile",colnames(a))],
selec = 1,
start = a[, grep("lefttimesec",colnames(a))],
end = a[, grep("righttimesec",colnames(a))],
low.freq = a[, grep("bottomfreq",colnames(a))],
high.freq = a[, grep("topfreq",colnames(a))])
for(i in 2:nrow(c)) if(c$selec.file[i] == c$selec.file[i-1]) c$selec[i]<-c$selec[i-1] + 1
} else c<-a
} else {
a <- try(read.table(sel.txt[i], header = FALSE, sep = "\t", fill = TRUE, stringsAsFactors = FALSE), silent = TRUE)
if(!exclude & class(a) == "try-error") stop(paste("The selection file",sel.txt[i], "cannot be read"))
if(!class(a) == "try-error")
{
c <- a[, seq(2, ncol(a), by =2)]
colnames(c) <- gsub(":", "", unlist(a[1, seq(1,ncol(a), by =2)]), fixed = TRUE)
if(!all.data) {c<-data.frame(sound.files = c[, grep("selected",colnames(c), ignore.case = TRUE)],
selec = 1,
start = c[, grep("lefttime",colnames(c), ignore.case = TRUE)],
end = c[, grep("righttime",colnames(c), ignore.case = TRUE)],
low.freq = c[, grep("bottomfreq",colnames(c), ignore.case = TRUE)],
high.freq = c[, grep("topfreq",colnames(c), ignore.case = TRUE)])
for(i in 2:nrow(c)) if(c$sound.files[i] == c$sound.files[i-1]) c$selec[i] <- c$selec[i-1] + 1}
} else c <- a
}
return(c)
})
clist <- clist[sapply(clist, is.data.frame)]
b <- do.call("rbind", clist)
if(!all.data) if(any(is.na(b$start))) warning("NAs found (empty rows)")
b <- b[!duplicated(b), ]
options(warn = -1)
if(!all.data)
{
b$start <- as.numeric(b$start)
b$end <- as.numeric(b$end)
#remove NA rows
b <- b[!is.na(b$start), ]
} else b <-b[b[,2] != names(b)[2],]
# convert to hz
if(hz.to.khz & !all.data & all(c("low.freq", "high.freq") %in% names(b)))
{b$low.freq <- as.numeric(b$low.freq) / 1000
b$high.freq <- as.numeric(b$high.freq) / 1000
}
return(b[!duplicated(b), ])
}
|
n_cores = parallel::detectCores()-1
#cell_data = read.csv("Data/cellData.csv")
#groups = c("Unidentified", "Immune", "Endothelial", "Mesenchymal-like", "Tumor", "Keratin-positive tumor")
#cell_data$Group = as.factor(cell_data$Group)
#levels(cell_data$Group) <- groups
#write.csv(cell_data,"Data/Full_cellData.csv",row.names = F)
cell_data = read.csv("Data/Full_cellData.csv")
columns_of_interest = c(1,2,55,11,12,14:40,42,43,45,48,49)
#write.csv(cell_data[,columns_of_interest],"Data/Full_cellData_pruned.csv",row.names = F)
cell_data = read.csv("Data/Full_cellData_pruned.csv")
#write.csv(cell_data[,-3],"Data/Full_cellData_pruned.csv",row.names = F)
#-----Summary of cell proportions-------------------
summ_cell(cell_data)
#selected_cols = names(cell_data)[columns_of_interest]
#selected_data = cell_data[,selected_cols]
#only_markers = Matrix::Matrix(t(as.matrix(selected_data[,-c(1:4)])),sparse = TRUE)
num_patients = length(unique(selected_data$SampleID))
df = selected_data[,-c(1:3)]
#-----Generate Heatmaps of different kinds------------
genheatmap(df,is.log=FALSE)
genheatmap(df,is.log=TRUE)
#------proportion finder-----------------
fin_prop(selected_data)
#------Ridge-plot generator--------------
interested_traits = c("Vimentin", "SMA", "CD45", "CD31", "Beta.catenin","Keratin6" )
sampling_scheme="random"
number_of_images_chosen = floor(num_patients*0.1)+1
chosen = NULL
sampling_scheme = "random"
is.log = "TRUE"
chosen_images = NULL
if(sampling_scheme=="random"){chosen_images = sample(1:num_patients,
number_of_images_chosen)}
chosen_images = unique(chosen_images)
nonchosen_images = setdiff(1:num_patients,chosen_images)
result = NULL
run_train = training_part(chosen_images)
#save(run_train,file = "/Users/seals/Desktop/CSPH/New_vectra/Feb12/training_with_five_random_images.Rdata")
knnclassi = run_train$knnclassifier
proportion = run_train$training_proportion
res = testing_function(proportion)
#------------------------------------------------------------------------
| /data_generator.R | no_license | sealx017/VectraMIBI | R | false | false | 2,062 | r |
n_cores = parallel::detectCores()-1
#cell_data = read.csv("Data/cellData.csv")
#groups = c("Unidentified", "Immune", "Endothelial", "Mesenchymal-like", "Tumor", "Keratin-positive tumor")
#cell_data$Group = as.factor(cell_data$Group)
#levels(cell_data$Group) <- groups
#write.csv(cell_data,"Data/Full_cellData.csv",row.names = F)
cell_data = read.csv("Data/Full_cellData.csv")
columns_of_interest = c(1,2,55,11,12,14:40,42,43,45,48,49)
#write.csv(cell_data[,columns_of_interest],"Data/Full_cellData_pruned.csv",row.names = F)
cell_data = read.csv("Data/Full_cellData_pruned.csv")
#write.csv(cell_data[,-3],"Data/Full_cellData_pruned.csv",row.names = F)
#-----Summary of cell proportions-------------------
summ_cell(cell_data)
#selected_cols = names(cell_data)[columns_of_interest]
#selected_data = cell_data[,selected_cols]
#only_markers = Matrix::Matrix(t(as.matrix(selected_data[,-c(1:4)])),sparse = TRUE)
num_patients = length(unique(selected_data$SampleID))
df = selected_data[,-c(1:3)]
#-----Generate Heatmaps of different kinds------------
genheatmap(df,is.log=FALSE)
genheatmap(df,is.log=TRUE)
#------proportion finder-----------------
fin_prop(selected_data)
#------Ridge-plot generator--------------
interested_traits = c("Vimentin", "SMA", "CD45", "CD31", "Beta.catenin","Keratin6" )
sampling_scheme="random"
number_of_images_chosen = floor(num_patients*0.1)+1
chosen = NULL
sampling_scheme = "random"
is.log = "TRUE"
chosen_images = NULL
if(sampling_scheme=="random"){chosen_images = sample(1:num_patients,
number_of_images_chosen)}
chosen_images = unique(chosen_images)
nonchosen_images = setdiff(1:num_patients,chosen_images)
result = NULL
run_train = training_part(chosen_images)
#save(run_train,file = "/Users/seals/Desktop/CSPH/New_vectra/Feb12/training_with_five_random_images.Rdata")
knnclassi = run_train$knnclassifier
proportion = run_train$training_proportion
res = testing_function(proportion)
#------------------------------------------------------------------------
|
poisson.CARlocalised <- function(formula, data=NULL, G, W, burnin, n.sample, thin=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.delta=NULL, prior.tau2=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame.localised(formula, data, "poisson")
N.all <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- as.numeric(!is.na(Y))
n.miss <- N.all - sum(which.miss)
if(n.miss>0) stop("the response has missing 'NA' values.", call.=FALSE)
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### Compute a starting value for beta
if(!is.null(X))
{
mod.glm <- glm(Y~X.standardised-1, offset=offset, family="quasipoisson")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
regression.vec <- X.standardised %*% beta
}else
{
regression.vec <- rep(0, N.all)
}
#### CAR quantities
W.quants <- common.Wcheckformat.leroux(W)
K <- W.quants$n
N <- N.all / K
W <- W.quants$W
W.triplet <- W.quants$W.triplet
W.n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Format and check the number of clusters G
if(length(G)!=1) stop("G is the wrong length.", call.=FALSE)
if(!is.numeric(G)) stop("G is not numeric.", call.=FALSE)
if(G<=1) stop("G is less than 2.", call.=FALSE)
if(G!=round(G)) stop("G is not an integer.", call.=FALSE)
if(floor(G/2)==ceiling(G/2))
{
Gstar <- G/2
}else
{
Gstar <- (G+1)/2
}
#### Priors
if(!is.null(X))
{
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
prior.beta.check(prior.mean.beta, prior.var.beta, p)
}else
{}
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
prior.var.check(prior.tau2)
if(is.null(prior.delta)) prior.delta <- 10
if(length(prior.delta)!=1) stop("the prior value for delta is the wrong length.", call.=FALSE)
if(!is.numeric(prior.delta)) stop("the prior value for delta is not numeric.", call.=FALSE)
if(sum(is.na(prior.delta))!=0) stop("the prior value for delta has missing values.", call.=FALSE)
if(prior.delta<=0) stop("the prior value for delta is not positive.", call.=FALSE)
#### Compute the blocking structure for beta
if(!is.null(X))
{
## Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
}else
{}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
#############################
#### Initial parameter values
#############################
log.Y <- log(Y)
log.Y[Y==0] <- -0.1
res.temp <- log.Y - regression.vec - offset
clust <- kmeans(res.temp,G)
lambda <- clust$centers[order(clust$centers)]
lambda.mat <- matrix(rep(lambda, N), nrow=N, byrow=TRUE)
Z <- rep(1, N.all)
for(j in 2:G)
{
Z[clust$cluster==order(clust$centers)[j]] <- j
}
Z.mat <- matrix(Z, nrow=K, ncol=N, byrow=FALSE)
mu <- matrix(lambda[Z], nrow=K, ncol=N, byrow=FALSE)
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi.mat <- matrix(rnorm(n=N.all, mean=0, sd = res.sd), nrow=K, byrow=FALSE)
phi <- as.numeric(phi.mat)
tau2 <- var(phi)/10
gamma <- runif(1)
delta <- runif(1,1, min(2, prior.delta))
###############################
#### Set up the MCMC quantities
###############################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.Z <- array(NA, c(n.keep, N.all))
samples.lambda <- array(NA, c(n.keep, G))
samples.delta <- array(NA, c(n.keep, 1))
samples.tau2 <- array(NA, c(n.keep, 1))
samples.gamma <- array(NA, c(n.keep, 1))
samples.phi <- array(NA, c(n.keep, N.all))
samples.fitted <- array(NA, c(n.keep, N.all))
samples.loglike <- array(NA, c(n.keep, N.all))
#### Specify the Metropolis quantities
if(!is.null(X))
{
samples.beta <- array(NA, c(n.keep, p))
accept.all <- rep(0,8)
proposal.corr.beta <- solve(t(X.standardised) %*% X.standardised)
chol.proposal.corr.beta <- chol(proposal.corr.beta)
proposal.sd.beta <- 0.01
}else
{
accept.all <- rep(0,6)
}
accept <- accept.all
proposal.sd.lambda <- 0.1
proposal.sd.delta <- 0.1
proposal.sd.phi <- 0.1
Y.extend <- matrix(rep(Y, G), byrow=F, ncol=G)
delta.update <- matrix(rep(1:G, N.all-K), ncol=G, byrow=T)
tau2.posterior.shape <- prior.tau2[1] + N * (K-1) /2
##########################################
#### Specify quantities that do not change
##########################################
which.miss.mat <- matrix(which.miss, nrow=K, ncol=N, byrow=FALSE)
Y.mat <- matrix(Y, nrow=K, ncol=N, byrow=FALSE)
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
regression.mat <- matrix(regression.vec, nrow=K, ncol=N, byrow=FALSE)
###########################
#### Run the Bayesian model
###########################
#### Start timer
if(verbose)
{
cat("Generating", n.keep, "post burnin and thinned (if requested) samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################
## Sample from beta
####################
if(!is.null(X))
{
proposal <- beta + (sqrt(proposal.sd.beta)* t(chol.proposal.corr.beta)) %*% rnorm(p)
proposal.beta <- beta
offset.temp <- offset + as.numeric(mu) + as.numeric(phi.mat)
if(p>2)
{
temp <- poissonbetaupdateMALA(X.standardised, N.all, p, beta, offset.temp, Y, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- poissonbetaupdateRW(X.standardised, N.all, p, beta, offset.temp, Y, prior.mean.beta, prior.var.beta, proposal.sd.beta)
}
beta <- temp[[1]]
accept[7] <- accept[7] + temp[[2]]
accept[8] <- accept[8] + n.beta.block
regression.vec <- X.standardised %*% beta
regression.mat <- matrix(regression.vec, nrow=K, ncol=N, byrow=FALSE)
}else
{}
#######################
#### Sample from lambda
#######################
#### Propose a new value
proposal.extend <- c(-100, lambda, 100)
for(r in 1:G)
{
proposal.extend[(r+1)] <- rtruncnorm(n=1, a=proposal.extend[r], b=proposal.extend[(r+2)], mean=proposal.extend[(r+1)], sd=proposal.sd.lambda)
}
proposal <- proposal.extend[-c(1, (G+2))]
#### Compute the data likelihood
lp.current <- lambda[Z] + offset + as.numeric(regression.mat) + as.numeric(phi.mat)
lp.proposal <- proposal[Z] + offset + as.numeric(regression.mat) + as.numeric(phi.mat)
like.current <- Y * lp.current - exp(lp.current)
like.proposal <- Y * lp.proposal - exp(lp.proposal)
prob <- exp(sum(like.proposal - like.current))
if(prob > runif(1))
{
lambda <- proposal
lambda.mat <- matrix(rep(lambda, N), nrow=N, byrow=TRUE)
mu <- matrix(lambda[Z], nrow=K, ncol=N, byrow=FALSE)
accept[1] <- accept[1] + 1
}else
{}
accept[2] <- accept[2] + 1
##################
#### Sample from Z
##################
prior.offset <- rep(NA, G)
for(r in 1:G)
{
prior.offset[r] <- log(sum(exp(-delta * ((1:G - r)^2 + (1:G - Gstar)^2))))
}
mu.offset <- exp(offset.mat + regression.mat + phi.mat)
test <- Zupdatesqpoi(Z=Z.mat, Offset=mu.offset, Y=Y.mat, delta=delta, lambda=lambda, nsites=K, ntime=N, G=G, SS=1:G, prioroffset=prior.offset, Gstar=Gstar)
Z.mat <- test
Z <- as.numeric(Z.mat)
mu <- matrix(lambda[Z], nrow=K, ncol=N, byrow=FALSE)
######################
#### Sample from delta
######################
proposal.delta <- rtruncnorm(n=1, a=1, b=prior.delta, mean=delta, sd=proposal.sd.delta)
sum.delta1 <- sum((Z - Gstar)^2)
sum.delta2 <- sum((Z.mat[ ,-1] - Z.mat[ ,-N])^2)
current.fc1 <- -delta * (sum.delta1 + sum.delta2) - K * log(sum(exp(-delta * (1:G - Gstar)^2)))
proposal.fc1 <- -proposal.delta * (sum.delta1 + sum.delta2) - K * log(sum(exp(-proposal.delta * (1:G - Gstar)^2)))
Z.temp <- matrix(rep(as.numeric(Z.mat[ ,-N]),G), ncol=G, byrow=FALSE)
Z.temp2 <- (delta.update - Z.temp)^2 + (delta.update - Gstar)^2
current.fc <- current.fc1 - sum(log(apply(exp(-delta * Z.temp2),1,sum)))
proposal.fc <- proposal.fc1 - sum(log(apply(exp(-proposal.delta * Z.temp2),1,sum)))
prob <- exp(proposal.fc - current.fc)
if(prob > runif(1))
{
delta <- proposal.delta
accept[3] <- accept[3] + 1
}else
{}
accept[4] <- accept[4] + 1
####################
#### Sample from phi
####################
phi.offset <- mu + offset.mat + regression.mat
if(MALA)
{
temp1 <- poissonarcarupdateMALA(W.triplet, W.begfin, W.triplet.sum, K, N, phi.mat, tau2, gamma, 1, Y.mat, proposal.sd.phi, phi.offset, W.triplet.sum)
}else
{
temp1 <- poissonarcarupdateRW(W.triplet, W.begfin, W.triplet.sum, K, N, phi.mat, tau2, gamma, 1, Y.mat, proposal.sd.phi, phi.offset, W.triplet.sum)
}
phi.temp <- temp1[[1]]
phi <- as.numeric(phi.temp)
for(i in 1:G)
{
phi[which(Z==i)] <- phi[which(Z==i)] - mean(phi[which(Z==i)])
}
phi.mat <- matrix(phi, nrow=K, ncol=N, byrow=FALSE)
accept[5] <- accept[5] + temp1[[2]]
accept[6] <- accept[6] + K*N
####################
## Sample from gamma
####################
temp2 <- gammaquadformcompute(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, 1)
mean.gamma <- temp2[[1]] / temp2[[2]]
sd.gamma <- sqrt(tau2 / temp2[[2]])
gamma <- rtruncnorm(n=1, a=0, b=1, mean=mean.gamma, sd=sd.gamma)
####################
## Samples from tau2
####################
temp3 <- tauquadformcompute(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, 1, gamma)
tau2.posterior.scale <- temp3 + prior.tau2[2]
tau2 <- 1 / rgamma(1, tau2.posterior.shape, scale=(1/tau2.posterior.scale))
#########################
## Calculate the deviance
#########################
lp <- as.numeric(mu + offset.mat + regression.mat + phi.mat)
fitted <- exp(lp)
loglike <- dpois(x=as.numeric(Y), lambda=fitted, log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.delta[ele, ] <- delta
samples.lambda[ele, ] <- lambda
samples.Z[ele, ] <- Z
samples.phi[ele, ] <- as.numeric(phi.mat)
samples.tau2[ele, ] <- tau2
samples.gamma[ele, ] <- gamma
samples.fitted[ele, ] <- fitted
samples.loglike[ele, ] <- loglike
if(!is.null(X)) samples.beta[ele, ] <- beta
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
k <- j/100
if(ceiling(k)==floor(k))
{
if(!is.null(X))
{
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[7:8], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[7:8], proposal.sd.beta, 30, 40)
}
proposal.sd.phi <- common.accceptrates1(accept[5:6], proposal.sd.phi, 40, 50)
proposal.sd.lambda <- common.accceptrates2(accept[1:2], proposal.sd.lambda, 20, 40, 10)
proposal.sd.delta <- common.accceptrates2(accept[3:4], proposal.sd.delta, 40, 50, prior.delta/6)
accept.all <- accept.all + accept
accept <- rep(0,8)
}else
{
proposal.sd.phi <- common.accceptrates1(accept[5:6], proposal.sd.phi, 40, 50)
proposal.sd.lambda <- common.accceptrates2(accept[1:2], proposal.sd.lambda, 20, 40, 10)
proposal.sd.delta <- common.accceptrates2(accept[3:4], proposal.sd.delta, 40, 50, prior.delta/6)
accept.all <- accept.all + accept
accept <- rep(0,6)
}
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
#### end timer
if(verbose)
{
cat("\nSummarising results.")
close(progressBar)
}else
{}
###################################
#### Summarise and save the results
###################################
#### Compute the acceptance rates
accept.lambda <- 100 * accept.all[1] / accept.all[2]
accept.delta <- 100 * accept.all[3] / accept.all[4]
accept.phi <- 100 * accept.all[5] / accept.all[6]
accept.gamma <- 100
if(!is.null(X))
{
accept.beta <- 100 * accept.all[7] / accept.all[8]
accept.final <- c(accept.beta, accept.lambda, accept.delta, accept.phi, accept.gamma)
names(accept.final) <- c("beta", "lambda", "delta", "phi", "rho.T")
}else
{
accept.final <- c(accept.lambda, accept.delta, accept.phi, accept.gamma)
names(accept.final) <- c("lambda", "delta", "phi", "rho.T")
}
#### Compute the fitted deviance
mean.Z <- round(apply(samples.Z,2,mean), 0)
mean.lambda <- apply(samples.lambda, 2, mean)
mean.mu <- matrix(mean.lambda[mean.Z], nrow=K, ncol=N, byrow=FALSE)
if(!is.null(X))
{
mean.beta <- apply(samples.beta,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
}else
{}
mean.phi <- matrix(apply(samples.phi, 2, mean), nrow=K, byrow=FALSE)
fitted.mean <- as.numeric(exp(mean.mu + offset.mat + regression.mat + mean.phi))
deviance.fitted <- -2 * sum(dpois(x=as.numeric(Y), lambda=fitted.mean, log=TRUE))
#### Model fit criteria
modelfit <- common.modelfit(samples.loglike, deviance.fitted)
#### Create the fitted values and residuals
fitted.values <- apply(samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
#### Transform the parameters back to the original covariate scale
if(!is.null(X))
{
samples.beta.orig <- common.betatransform(samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
}else
{}
#### Create a summary object
summary.hyper <- array(NA, c(3, 7))
summary.hyper[1,1:3] <- quantile(samples.delta, c(0.5, 0.025, 0.975))
summary.hyper[2,1:3] <- quantile(samples.tau2, c(0.5, 0.025, 0.975))
summary.hyper[3,1:3] <- quantile(samples.gamma, c(0.5, 0.025, 0.975))
rownames(summary.hyper) <- c("delta", "tau2", "rho.T")
summary.hyper[1, 4:7] <- c(n.keep, accept.delta, effectiveSize(mcmc(samples.delta)), geweke.diag(mcmc(samples.delta))$z)
summary.hyper[2, 4:7] <- c(n.keep, 100, effectiveSize(mcmc(samples.tau2)), geweke.diag(mcmc(samples.tau2))$z)
summary.hyper[3, 4:7] <- c(n.keep, 100, effectiveSize(mcmc(samples.gamma)), geweke.diag(mcmc(samples.gamma))$z)
summary.lambda <- array(NA, c(G,1))
summary.lambda <- t(apply(samples.lambda, 2, quantile, c(0.5, 0.025, 0.975)))
summary.lambda <- cbind(summary.lambda, rep(n.keep, G), rep(accept.lambda, G), effectiveSize(mcmc(samples.lambda)), geweke.diag(mcmc(samples.lambda))$z)
summary.lambda <- matrix(summary.lambda, ncol=7)
rownames(summary.lambda) <- paste("lambda", 1:G, sep="")
if(!is.null(X))
{
samples.beta.orig <- mcmc(samples.beta.orig)
summary.beta <- t(apply(samples.beta.orig, 2, quantile, c(0.5, 0.025, 0.975)))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Median", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.results <- rbind(summary.beta, summary.lambda, summary.hyper)
}else
{
summary.results <- rbind(summary.lambda, summary.hyper)
}
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
colnames(summary.results) <- c("Median", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
#### Compile and return the results
#### Harmonise samples in case of them not being generated
if(is.null(X)) samples.beta.orig = NA
samples <- list(beta=mcmc(samples.beta.orig), lambda=mcmc(samples.lambda), Z=mcmc(samples.Z), delta=mcmc(samples.delta), phi = mcmc(samples.phi), tau2=mcmc(samples.tau2), rho.T=mcmc(samples.gamma), fitted=mcmc(samples.fitted))
model.string <- c("Likelihood model - Poisson (log link function)", "\nLatent structure model - Localised autoregressive CAR model\n")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=mean.Z, formula=formula, model=model.string, X=X)
class(results) <- "CARBayesST"
#### Finish by stating the time taken
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
| /R/poisson.CARlocalised.R | no_license | dilernia/CARBayesST | R | false | false | 19,046 | r | poisson.CARlocalised <- function(formula, data=NULL, G, W, burnin, n.sample, thin=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.delta=NULL, prior.tau2=NULL, MALA=TRUE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame.localised(formula, data, "poisson")
N.all <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- as.numeric(!is.na(Y))
n.miss <- N.all - sum(which.miss)
if(n.miss>0) stop("the response has missing 'NA' values.", call.=FALSE)
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### Compute a starting value for beta
if(!is.null(X))
{
mod.glm <- glm(Y~X.standardised-1, offset=offset, family="quasipoisson")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
regression.vec <- X.standardised %*% beta
}else
{
regression.vec <- rep(0, N.all)
}
#### CAR quantities
W.quants <- common.Wcheckformat.leroux(W)
K <- W.quants$n
N <- N.all / K
W <- W.quants$W
W.triplet <- W.quants$W.triplet
W.n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Format and check the number of clusters G
if(length(G)!=1) stop("G is the wrong length.", call.=FALSE)
if(!is.numeric(G)) stop("G is not numeric.", call.=FALSE)
if(G<=1) stop("G is less than 2.", call.=FALSE)
if(G!=round(G)) stop("G is not an integer.", call.=FALSE)
if(floor(G/2)==ceiling(G/2))
{
Gstar <- G/2
}else
{
Gstar <- (G+1)/2
}
#### Priors
if(!is.null(X))
{
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
prior.beta.check(prior.mean.beta, prior.var.beta, p)
}else
{}
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
prior.var.check(prior.tau2)
if(is.null(prior.delta)) prior.delta <- 10
if(length(prior.delta)!=1) stop("the prior value for delta is the wrong length.", call.=FALSE)
if(!is.numeric(prior.delta)) stop("the prior value for delta is not numeric.", call.=FALSE)
if(sum(is.na(prior.delta))!=0) stop("the prior value for delta has missing values.", call.=FALSE)
if(prior.delta<=0) stop("the prior value for delta is not positive.", call.=FALSE)
#### Compute the blocking structure for beta
if(!is.null(X))
{
## Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
}else
{}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
#############################
#### Initial parameter values
#############################
log.Y <- log(Y)
log.Y[Y==0] <- -0.1
res.temp <- log.Y - regression.vec - offset
clust <- kmeans(res.temp,G)
lambda <- clust$centers[order(clust$centers)]
lambda.mat <- matrix(rep(lambda, N), nrow=N, byrow=TRUE)
Z <- rep(1, N.all)
for(j in 2:G)
{
Z[clust$cluster==order(clust$centers)[j]] <- j
}
Z.mat <- matrix(Z, nrow=K, ncol=N, byrow=FALSE)
mu <- matrix(lambda[Z], nrow=K, ncol=N, byrow=FALSE)
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi.mat <- matrix(rnorm(n=N.all, mean=0, sd = res.sd), nrow=K, byrow=FALSE)
phi <- as.numeric(phi.mat)
tau2 <- var(phi)/10
gamma <- runif(1)
delta <- runif(1,1, min(2, prior.delta))
###############################
#### Set up the MCMC quantities
###############################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.Z <- array(NA, c(n.keep, N.all))
samples.lambda <- array(NA, c(n.keep, G))
samples.delta <- array(NA, c(n.keep, 1))
samples.tau2 <- array(NA, c(n.keep, 1))
samples.gamma <- array(NA, c(n.keep, 1))
samples.phi <- array(NA, c(n.keep, N.all))
samples.fitted <- array(NA, c(n.keep, N.all))
samples.loglike <- array(NA, c(n.keep, N.all))
#### Specify the Metropolis quantities
if(!is.null(X))
{
samples.beta <- array(NA, c(n.keep, p))
accept.all <- rep(0,8)
proposal.corr.beta <- solve(t(X.standardised) %*% X.standardised)
chol.proposal.corr.beta <- chol(proposal.corr.beta)
proposal.sd.beta <- 0.01
}else
{
accept.all <- rep(0,6)
}
accept <- accept.all
proposal.sd.lambda <- 0.1
proposal.sd.delta <- 0.1
proposal.sd.phi <- 0.1
Y.extend <- matrix(rep(Y, G), byrow=F, ncol=G)
delta.update <- matrix(rep(1:G, N.all-K), ncol=G, byrow=T)
tau2.posterior.shape <- prior.tau2[1] + N * (K-1) /2
##########################################
#### Specify quantities that do not change
##########################################
which.miss.mat <- matrix(which.miss, nrow=K, ncol=N, byrow=FALSE)
Y.mat <- matrix(Y, nrow=K, ncol=N, byrow=FALSE)
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
regression.mat <- matrix(regression.vec, nrow=K, ncol=N, byrow=FALSE)
###########################
#### Run the Bayesian model
###########################
#### Start timer
if(verbose)
{
cat("Generating", n.keep, "post burnin and thinned (if requested) samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################
## Sample from beta
####################
if(!is.null(X))
{
proposal <- beta + (sqrt(proposal.sd.beta)* t(chol.proposal.corr.beta)) %*% rnorm(p)
proposal.beta <- beta
offset.temp <- offset + as.numeric(mu) + as.numeric(phi.mat)
if(p>2)
{
temp <- poissonbetaupdateMALA(X.standardised, N.all, p, beta, offset.temp, Y, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block)
}else
{
temp <- poissonbetaupdateRW(X.standardised, N.all, p, beta, offset.temp, Y, prior.mean.beta, prior.var.beta, proposal.sd.beta)
}
beta <- temp[[1]]
accept[7] <- accept[7] + temp[[2]]
accept[8] <- accept[8] + n.beta.block
regression.vec <- X.standardised %*% beta
regression.mat <- matrix(regression.vec, nrow=K, ncol=N, byrow=FALSE)
}else
{}
#######################
#### Sample from lambda
#######################
#### Propose a new value
proposal.extend <- c(-100, lambda, 100)
for(r in 1:G)
{
proposal.extend[(r+1)] <- rtruncnorm(n=1, a=proposal.extend[r], b=proposal.extend[(r+2)], mean=proposal.extend[(r+1)], sd=proposal.sd.lambda)
}
proposal <- proposal.extend[-c(1, (G+2))]
#### Compute the data likelihood
lp.current <- lambda[Z] + offset + as.numeric(regression.mat) + as.numeric(phi.mat)
lp.proposal <- proposal[Z] + offset + as.numeric(regression.mat) + as.numeric(phi.mat)
like.current <- Y * lp.current - exp(lp.current)
like.proposal <- Y * lp.proposal - exp(lp.proposal)
prob <- exp(sum(like.proposal - like.current))
if(prob > runif(1))
{
lambda <- proposal
lambda.mat <- matrix(rep(lambda, N), nrow=N, byrow=TRUE)
mu <- matrix(lambda[Z], nrow=K, ncol=N, byrow=FALSE)
accept[1] <- accept[1] + 1
}else
{}
accept[2] <- accept[2] + 1
##################
#### Sample from Z
##################
prior.offset <- rep(NA, G)
for(r in 1:G)
{
prior.offset[r] <- log(sum(exp(-delta * ((1:G - r)^2 + (1:G - Gstar)^2))))
}
mu.offset <- exp(offset.mat + regression.mat + phi.mat)
test <- Zupdatesqpoi(Z=Z.mat, Offset=mu.offset, Y=Y.mat, delta=delta, lambda=lambda, nsites=K, ntime=N, G=G, SS=1:G, prioroffset=prior.offset, Gstar=Gstar)
Z.mat <- test
Z <- as.numeric(Z.mat)
mu <- matrix(lambda[Z], nrow=K, ncol=N, byrow=FALSE)
######################
#### Sample from delta
######################
proposal.delta <- rtruncnorm(n=1, a=1, b=prior.delta, mean=delta, sd=proposal.sd.delta)
sum.delta1 <- sum((Z - Gstar)^2)
sum.delta2 <- sum((Z.mat[ ,-1] - Z.mat[ ,-N])^2)
current.fc1 <- -delta * (sum.delta1 + sum.delta2) - K * log(sum(exp(-delta * (1:G - Gstar)^2)))
proposal.fc1 <- -proposal.delta * (sum.delta1 + sum.delta2) - K * log(sum(exp(-proposal.delta * (1:G - Gstar)^2)))
Z.temp <- matrix(rep(as.numeric(Z.mat[ ,-N]),G), ncol=G, byrow=FALSE)
Z.temp2 <- (delta.update - Z.temp)^2 + (delta.update - Gstar)^2
current.fc <- current.fc1 - sum(log(apply(exp(-delta * Z.temp2),1,sum)))
proposal.fc <- proposal.fc1 - sum(log(apply(exp(-proposal.delta * Z.temp2),1,sum)))
prob <- exp(proposal.fc - current.fc)
if(prob > runif(1))
{
delta <- proposal.delta
accept[3] <- accept[3] + 1
}else
{}
accept[4] <- accept[4] + 1
####################
#### Sample from phi
####################
phi.offset <- mu + offset.mat + regression.mat
if(MALA)
{
temp1 <- poissonarcarupdateMALA(W.triplet, W.begfin, W.triplet.sum, K, N, phi.mat, tau2, gamma, 1, Y.mat, proposal.sd.phi, phi.offset, W.triplet.sum)
}else
{
temp1 <- poissonarcarupdateRW(W.triplet, W.begfin, W.triplet.sum, K, N, phi.mat, tau2, gamma, 1, Y.mat, proposal.sd.phi, phi.offset, W.triplet.sum)
}
phi.temp <- temp1[[1]]
phi <- as.numeric(phi.temp)
for(i in 1:G)
{
phi[which(Z==i)] <- phi[which(Z==i)] - mean(phi[which(Z==i)])
}
phi.mat <- matrix(phi, nrow=K, ncol=N, byrow=FALSE)
accept[5] <- accept[5] + temp1[[2]]
accept[6] <- accept[6] + K*N
####################
## Sample from gamma
####################
temp2 <- gammaquadformcompute(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, 1)
mean.gamma <- temp2[[1]] / temp2[[2]]
sd.gamma <- sqrt(tau2 / temp2[[2]])
gamma <- rtruncnorm(n=1, a=0, b=1, mean=mean.gamma, sd=sd.gamma)
####################
## Samples from tau2
####################
temp3 <- tauquadformcompute(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, 1, gamma)
tau2.posterior.scale <- temp3 + prior.tau2[2]
tau2 <- 1 / rgamma(1, tau2.posterior.shape, scale=(1/tau2.posterior.scale))
#########################
## Calculate the deviance
#########################
lp <- as.numeric(mu + offset.mat + regression.mat + phi.mat)
fitted <- exp(lp)
loglike <- dpois(x=as.numeric(Y), lambda=fitted, log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.delta[ele, ] <- delta
samples.lambda[ele, ] <- lambda
samples.Z[ele, ] <- Z
samples.phi[ele, ] <- as.numeric(phi.mat)
samples.tau2[ele, ] <- tau2
samples.gamma[ele, ] <- gamma
samples.fitted[ele, ] <- fitted
samples.loglike[ele, ] <- loglike
if(!is.null(X)) samples.beta[ele, ] <- beta
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
k <- j/100
if(ceiling(k)==floor(k))
{
if(!is.null(X))
{
if(p>2)
{
proposal.sd.beta <- common.accceptrates1(accept[7:8], proposal.sd.beta, 40, 50)
}else
{
proposal.sd.beta <- common.accceptrates1(accept[7:8], proposal.sd.beta, 30, 40)
}
proposal.sd.phi <- common.accceptrates1(accept[5:6], proposal.sd.phi, 40, 50)
proposal.sd.lambda <- common.accceptrates2(accept[1:2], proposal.sd.lambda, 20, 40, 10)
proposal.sd.delta <- common.accceptrates2(accept[3:4], proposal.sd.delta, 40, 50, prior.delta/6)
accept.all <- accept.all + accept
accept <- rep(0,8)
}else
{
proposal.sd.phi <- common.accceptrates1(accept[5:6], proposal.sd.phi, 40, 50)
proposal.sd.lambda <- common.accceptrates2(accept[1:2], proposal.sd.lambda, 20, 40, 10)
proposal.sd.delta <- common.accceptrates2(accept[3:4], proposal.sd.delta, 40, 50, prior.delta/6)
accept.all <- accept.all + accept
accept <- rep(0,6)
}
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
#### end timer
if(verbose)
{
cat("\nSummarising results.")
close(progressBar)
}else
{}
###################################
#### Summarise and save the results
###################################
#### Compute the acceptance rates
accept.lambda <- 100 * accept.all[1] / accept.all[2]
accept.delta <- 100 * accept.all[3] / accept.all[4]
accept.phi <- 100 * accept.all[5] / accept.all[6]
accept.gamma <- 100
if(!is.null(X))
{
accept.beta <- 100 * accept.all[7] / accept.all[8]
accept.final <- c(accept.beta, accept.lambda, accept.delta, accept.phi, accept.gamma)
names(accept.final) <- c("beta", "lambda", "delta", "phi", "rho.T")
}else
{
accept.final <- c(accept.lambda, accept.delta, accept.phi, accept.gamma)
names(accept.final) <- c("lambda", "delta", "phi", "rho.T")
}
#### Compute the fitted deviance
mean.Z <- round(apply(samples.Z,2,mean), 0)
mean.lambda <- apply(samples.lambda, 2, mean)
mean.mu <- matrix(mean.lambda[mean.Z], nrow=K, ncol=N, byrow=FALSE)
if(!is.null(X))
{
mean.beta <- apply(samples.beta,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
}else
{}
mean.phi <- matrix(apply(samples.phi, 2, mean), nrow=K, byrow=FALSE)
fitted.mean <- as.numeric(exp(mean.mu + offset.mat + regression.mat + mean.phi))
deviance.fitted <- -2 * sum(dpois(x=as.numeric(Y), lambda=fitted.mean, log=TRUE))
#### Model fit criteria
modelfit <- common.modelfit(samples.loglike, deviance.fitted)
#### Create the fitted values and residuals
fitted.values <- apply(samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(fitted.values)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
#### Transform the parameters back to the original covariate scale
if(!is.null(X))
{
samples.beta.orig <- common.betatransform(samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
}else
{}
#### Create a summary object
summary.hyper <- array(NA, c(3, 7))
summary.hyper[1,1:3] <- quantile(samples.delta, c(0.5, 0.025, 0.975))
summary.hyper[2,1:3] <- quantile(samples.tau2, c(0.5, 0.025, 0.975))
summary.hyper[3,1:3] <- quantile(samples.gamma, c(0.5, 0.025, 0.975))
rownames(summary.hyper) <- c("delta", "tau2", "rho.T")
summary.hyper[1, 4:7] <- c(n.keep, accept.delta, effectiveSize(mcmc(samples.delta)), geweke.diag(mcmc(samples.delta))$z)
summary.hyper[2, 4:7] <- c(n.keep, 100, effectiveSize(mcmc(samples.tau2)), geweke.diag(mcmc(samples.tau2))$z)
summary.hyper[3, 4:7] <- c(n.keep, 100, effectiveSize(mcmc(samples.gamma)), geweke.diag(mcmc(samples.gamma))$z)
summary.lambda <- array(NA, c(G,1))
summary.lambda <- t(apply(samples.lambda, 2, quantile, c(0.5, 0.025, 0.975)))
summary.lambda <- cbind(summary.lambda, rep(n.keep, G), rep(accept.lambda, G), effectiveSize(mcmc(samples.lambda)), geweke.diag(mcmc(samples.lambda))$z)
summary.lambda <- matrix(summary.lambda, ncol=7)
rownames(summary.lambda) <- paste("lambda", 1:G, sep="")
if(!is.null(X))
{
samples.beta.orig <- mcmc(samples.beta.orig)
summary.beta <- t(apply(samples.beta.orig, 2, quantile, c(0.5, 0.025, 0.975)))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Median", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.results <- rbind(summary.beta, summary.lambda, summary.hyper)
}else
{
summary.results <- rbind(summary.lambda, summary.hyper)
}
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
colnames(summary.results) <- c("Median", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
#### Compile and return the results
#### Harmonise samples in case of them not being generated
if(is.null(X)) samples.beta.orig = NA
samples <- list(beta=mcmc(samples.beta.orig), lambda=mcmc(samples.lambda), Z=mcmc(samples.Z), delta=mcmc(samples.delta), phi = mcmc(samples.phi), tau2=mcmc(samples.tau2), rho.T=mcmc(samples.gamma), fitted=mcmc(samples.fitted))
model.string <- c("Likelihood model - Poisson (log link function)", "\nLatent structure model - Localised autoregressive CAR model\n")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=mean.Z, formula=formula, model=model.string, X=X)
class(results) <- "CARBayesST"
#### Finish by stating the time taken
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
#Make a bestEstL1 Requires L1Error.fRegress but does it for every combination of basis number and event
L1bestEst<- function(xMat, yMat, basisList, BasisType = "Fourier",lambda=10^-1){
L1diff<- matrix(NA, ncol=ncol(xMat), nrow=length(basisList)) #Row is each basis trial, i is the test column for that itteration
rownames(L1diff)<-as.character(basisList)
for(i in 1:ncol(xMat)){
trainX<-xMat[,-i]
trainY<-yMat[,-i]
testX<-xMat[,i]
testY<-yMat[,i]
for(j in 1:length(basisList)){
L1diff[j,i]<-L1Error.fRegress(trainX, trainY, predVec=testX, trueY = testY, nBasis = basisList[j], Basis = BasisType, lambda = lambda)
}
}
#after going through both loops all the times, we have a matrix with errors for all the inputed basis after using all of the columns as the test data.
#now lets take the average of each column. Smallest means that basis has a smaller average L1 distance when leaving 1 out and using the rest to predict.
average.L1diff<- apply(L1diff,1,mean) #each row mean which is for each specific number of basis functions in the basisList vector
BestBasis<-which.min(average.L1diff)
smallest.L1diff<-average.L1diff[BestBasis]
out<-list(BestBasis,smallest.L1diff)
names(out)<-c("Best Basis", "Smallest L1diff")
return(out)
}
| /fdaconcur/R/L1bestEst.R | no_license | rpittman188/fdaconcur | R | false | false | 1,283 | r | #Make a bestEstL1 Requires L1Error.fRegress but does it for every combination of basis number and event
L1bestEst<- function(xMat, yMat, basisList, BasisType = "Fourier",lambda=10^-1){
L1diff<- matrix(NA, ncol=ncol(xMat), nrow=length(basisList)) #Row is each basis trial, i is the test column for that itteration
rownames(L1diff)<-as.character(basisList)
for(i in 1:ncol(xMat)){
trainX<-xMat[,-i]
trainY<-yMat[,-i]
testX<-xMat[,i]
testY<-yMat[,i]
for(j in 1:length(basisList)){
L1diff[j,i]<-L1Error.fRegress(trainX, trainY, predVec=testX, trueY = testY, nBasis = basisList[j], Basis = BasisType, lambda = lambda)
}
}
#after going through both loops all the times, we have a matrix with errors for all the inputed basis after using all of the columns as the test data.
#now lets take the average of each column. Smallest means that basis has a smaller average L1 distance when leaving 1 out and using the rest to predict.
average.L1diff<- apply(L1diff,1,mean) #each row mean which is for each specific number of basis functions in the basisList vector
BestBasis<-which.min(average.L1diff)
smallest.L1diff<-average.L1diff[BestBasis]
out<-list(BestBasis,smallest.L1diff)
names(out)<-c("Best Basis", "Smallest L1diff")
return(out)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.